crypto: ixp4xx - Fix handling of chained sg buffers
[sfrench/cifs-2.6.git] / net / rds / ib_rdma.c
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34
35 #include "rds.h"
36 #include "rdma.h"
37 #include "ib.h"
38
39
40 /*
41  * This is stored as mr->r_trans_private.
42  */
43 struct rds_ib_mr {
44         struct rds_ib_device    *device;
45         struct rds_ib_mr_pool   *pool;
46         struct ib_fmr           *fmr;
47         struct list_head        list;
48         unsigned int            remap_count;
49
50         struct scatterlist      *sg;
51         unsigned int            sg_len;
52         u64                     *dma;
53         int                     sg_dma_len;
54 };
55
56 /*
57  * Our own little FMR pool
58  */
59 struct rds_ib_mr_pool {
60         struct mutex            flush_lock;             /* serialize fmr invalidate */
61         struct work_struct      flush_worker;           /* flush worker */
62
63         spinlock_t              list_lock;              /* protect variables below */
64         atomic_t                item_count;             /* total # of MRs */
65         atomic_t                dirty_count;            /* # dirty of MRs */
66         struct list_head        drop_list;              /* MRs that have reached their max_maps limit */
67         struct list_head        free_list;              /* unused MRs */
68         struct list_head        clean_list;             /* unused & unamapped MRs */
69         atomic_t                free_pinned;            /* memory pinned by free MRs */
70         unsigned long           max_items;
71         unsigned long           max_items_soft;
72         unsigned long           max_free_pinned;
73         struct ib_fmr_attr      fmr_attr;
74 };
75
76 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all);
77 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
78 static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
79
80 static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
81 {
82         struct rds_ib_device *rds_ibdev;
83         struct rds_ib_ipaddr *i_ipaddr;
84
85         list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
86                 spin_lock_irq(&rds_ibdev->spinlock);
87                 list_for_each_entry(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
88                         if (i_ipaddr->ipaddr == ipaddr) {
89                                 spin_unlock_irq(&rds_ibdev->spinlock);
90                                 return rds_ibdev;
91                         }
92                 }
93                 spin_unlock_irq(&rds_ibdev->spinlock);
94         }
95
96         return NULL;
97 }
98
99 static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
100 {
101         struct rds_ib_ipaddr *i_ipaddr;
102
103         i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
104         if (!i_ipaddr)
105                 return -ENOMEM;
106
107         i_ipaddr->ipaddr = ipaddr;
108
109         spin_lock_irq(&rds_ibdev->spinlock);
110         list_add_tail(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
111         spin_unlock_irq(&rds_ibdev->spinlock);
112
113         return 0;
114 }
115
116 static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
117 {
118         struct rds_ib_ipaddr *i_ipaddr, *next;
119
120         spin_lock_irq(&rds_ibdev->spinlock);
121         list_for_each_entry_safe(i_ipaddr, next, &rds_ibdev->ipaddr_list, list) {
122                 if (i_ipaddr->ipaddr == ipaddr) {
123                         list_del(&i_ipaddr->list);
124                         kfree(i_ipaddr);
125                         break;
126                 }
127         }
128         spin_unlock_irq(&rds_ibdev->spinlock);
129 }
130
131 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
132 {
133         struct rds_ib_device *rds_ibdev_old;
134
135         rds_ibdev_old = rds_ib_get_device(ipaddr);
136         if (rds_ibdev_old)
137                 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
138
139         return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
140 }
141
142 int rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
143 {
144         struct rds_ib_connection *ic = conn->c_transport_data;
145
146         /* conn was previously on the nodev_conns_list */
147         spin_lock_irq(&ib_nodev_conns_lock);
148         BUG_ON(list_empty(&ib_nodev_conns));
149         BUG_ON(list_empty(&ic->ib_node));
150         list_del(&ic->ib_node);
151         spin_unlock_irq(&ib_nodev_conns_lock);
152
153         spin_lock_irq(&rds_ibdev->spinlock);
154         list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
155         spin_unlock_irq(&rds_ibdev->spinlock);
156
157         ic->rds_ibdev = rds_ibdev;
158
159         return 0;
160 }
161
162 void rds_ib_remove_nodev_conns(void)
163 {
164         struct rds_ib_connection *ic, *_ic;
165         LIST_HEAD(tmp_list);
166
167         /* avoid calling conn_destroy with irqs off */
168         spin_lock_irq(&ib_nodev_conns_lock);
169         list_splice(&ib_nodev_conns, &tmp_list);
170         INIT_LIST_HEAD(&ib_nodev_conns);
171         spin_unlock_irq(&ib_nodev_conns_lock);
172
173         list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) {
174                 if (ic->conn->c_passive)
175                         rds_conn_destroy(ic->conn->c_passive);
176                 rds_conn_destroy(ic->conn);
177         }
178 }
179
180 void rds_ib_remove_conns(struct rds_ib_device *rds_ibdev)
181 {
182         struct rds_ib_connection *ic, *_ic;
183         LIST_HEAD(tmp_list);
184
185         /* avoid calling conn_destroy with irqs off */
186         spin_lock_irq(&rds_ibdev->spinlock);
187         list_splice(&rds_ibdev->conn_list, &tmp_list);
188         INIT_LIST_HEAD(&rds_ibdev->conn_list);
189         spin_unlock_irq(&rds_ibdev->spinlock);
190
191         list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) {
192                 if (ic->conn->c_passive)
193                         rds_conn_destroy(ic->conn->c_passive);
194                 rds_conn_destroy(ic->conn);
195         }
196 }
197
198 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
199 {
200         struct rds_ib_mr_pool *pool;
201
202         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
203         if (!pool)
204                 return ERR_PTR(-ENOMEM);
205
206         INIT_LIST_HEAD(&pool->free_list);
207         INIT_LIST_HEAD(&pool->drop_list);
208         INIT_LIST_HEAD(&pool->clean_list);
209         mutex_init(&pool->flush_lock);
210         spin_lock_init(&pool->list_lock);
211         INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
212
213         pool->fmr_attr.max_pages = fmr_message_size;
214         pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
215         pool->fmr_attr.page_shift = rds_ibdev->fmr_page_shift;
216         pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
217
218         /* We never allow more than max_items MRs to be allocated.
219          * When we exceed more than max_items_soft, we start freeing
220          * items more aggressively.
221          * Make sure that max_items > max_items_soft > max_items / 2
222          */
223         pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
224         pool->max_items = rds_ibdev->max_fmrs;
225
226         return pool;
227 }
228
229 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
230 {
231         struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
232
233         iinfo->rdma_mr_max = pool->max_items;
234         iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
235 }
236
237 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
238 {
239         flush_workqueue(rds_wq);
240         rds_ib_flush_mr_pool(pool, 1);
241         BUG_ON(atomic_read(&pool->item_count));
242         BUG_ON(atomic_read(&pool->free_pinned));
243         kfree(pool);
244 }
245
246 static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
247 {
248         struct rds_ib_mr *ibmr = NULL;
249         unsigned long flags;
250
251         spin_lock_irqsave(&pool->list_lock, flags);
252         if (!list_empty(&pool->clean_list)) {
253                 ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list);
254                 list_del_init(&ibmr->list);
255         }
256         spin_unlock_irqrestore(&pool->list_lock, flags);
257
258         return ibmr;
259 }
260
261 static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
262 {
263         struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
264         struct rds_ib_mr *ibmr = NULL;
265         int err = 0, iter = 0;
266
267         while (1) {
268                 ibmr = rds_ib_reuse_fmr(pool);
269                 if (ibmr)
270                         return ibmr;
271
272                 /* No clean MRs - now we have the choice of either
273                  * allocating a fresh MR up to the limit imposed by the
274                  * driver, or flush any dirty unused MRs.
275                  * We try to avoid stalling in the send path if possible,
276                  * so we allocate as long as we're allowed to.
277                  *
278                  * We're fussy with enforcing the FMR limit, though. If the driver
279                  * tells us we can't use more than N fmrs, we shouldn't start
280                  * arguing with it */
281                 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
282                         break;
283
284                 atomic_dec(&pool->item_count);
285
286                 if (++iter > 2) {
287                         rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
288                         return ERR_PTR(-EAGAIN);
289                 }
290
291                 /* We do have some empty MRs. Flush them out. */
292                 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
293                 rds_ib_flush_mr_pool(pool, 0);
294         }
295
296         ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
297         if (!ibmr) {
298                 err = -ENOMEM;
299                 goto out_no_cigar;
300         }
301
302         ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
303                         (IB_ACCESS_LOCAL_WRITE |
304                          IB_ACCESS_REMOTE_READ |
305                          IB_ACCESS_REMOTE_WRITE),
306                         &pool->fmr_attr);
307         if (IS_ERR(ibmr->fmr)) {
308                 err = PTR_ERR(ibmr->fmr);
309                 ibmr->fmr = NULL;
310                 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
311                 goto out_no_cigar;
312         }
313
314         rds_ib_stats_inc(s_ib_rdma_mr_alloc);
315         return ibmr;
316
317 out_no_cigar:
318         if (ibmr) {
319                 if (ibmr->fmr)
320                         ib_dealloc_fmr(ibmr->fmr);
321                 kfree(ibmr);
322         }
323         atomic_dec(&pool->item_count);
324         return ERR_PTR(err);
325 }
326
327 static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
328                struct scatterlist *sg, unsigned int nents)
329 {
330         struct ib_device *dev = rds_ibdev->dev;
331         struct scatterlist *scat = sg;
332         u64 io_addr = 0;
333         u64 *dma_pages;
334         u32 len;
335         int page_cnt, sg_dma_len;
336         int i, j;
337         int ret;
338
339         sg_dma_len = ib_dma_map_sg(dev, sg, nents,
340                                  DMA_BIDIRECTIONAL);
341         if (unlikely(!sg_dma_len)) {
342                 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
343                 return -EBUSY;
344         }
345
346         len = 0;
347         page_cnt = 0;
348
349         for (i = 0; i < sg_dma_len; ++i) {
350                 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
351                 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
352
353                 if (dma_addr & ~rds_ibdev->fmr_page_mask) {
354                         if (i > 0)
355                                 return -EINVAL;
356                         else
357                                 ++page_cnt;
358                 }
359                 if ((dma_addr + dma_len) & ~rds_ibdev->fmr_page_mask) {
360                         if (i < sg_dma_len - 1)
361                                 return -EINVAL;
362                         else
363                                 ++page_cnt;
364                 }
365
366                 len += dma_len;
367         }
368
369         page_cnt += len >> rds_ibdev->fmr_page_shift;
370         if (page_cnt > fmr_message_size)
371                 return -EINVAL;
372
373         dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_ATOMIC);
374         if (!dma_pages)
375                 return -ENOMEM;
376
377         page_cnt = 0;
378         for (i = 0; i < sg_dma_len; ++i) {
379                 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
380                 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
381
382                 for (j = 0; j < dma_len; j += rds_ibdev->fmr_page_size)
383                         dma_pages[page_cnt++] =
384                                 (dma_addr & rds_ibdev->fmr_page_mask) + j;
385         }
386
387         ret = ib_map_phys_fmr(ibmr->fmr,
388                                    dma_pages, page_cnt, io_addr);
389         if (ret)
390                 goto out;
391
392         /* Success - we successfully remapped the MR, so we can
393          * safely tear down the old mapping. */
394         rds_ib_teardown_mr(ibmr);
395
396         ibmr->sg = scat;
397         ibmr->sg_len = nents;
398         ibmr->sg_dma_len = sg_dma_len;
399         ibmr->remap_count++;
400
401         rds_ib_stats_inc(s_ib_rdma_mr_used);
402         ret = 0;
403
404 out:
405         kfree(dma_pages);
406
407         return ret;
408 }
409
410 void rds_ib_sync_mr(void *trans_private, int direction)
411 {
412         struct rds_ib_mr *ibmr = trans_private;
413         struct rds_ib_device *rds_ibdev = ibmr->device;
414
415         switch (direction) {
416         case DMA_FROM_DEVICE:
417                 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
418                         ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
419                 break;
420         case DMA_TO_DEVICE:
421                 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
422                         ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
423                 break;
424         }
425 }
426
427 static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
428 {
429         struct rds_ib_device *rds_ibdev = ibmr->device;
430
431         if (ibmr->sg_dma_len) {
432                 ib_dma_unmap_sg(rds_ibdev->dev,
433                                 ibmr->sg, ibmr->sg_len,
434                                 DMA_BIDIRECTIONAL);
435                 ibmr->sg_dma_len = 0;
436         }
437
438         /* Release the s/g list */
439         if (ibmr->sg_len) {
440                 unsigned int i;
441
442                 for (i = 0; i < ibmr->sg_len; ++i) {
443                         struct page *page = sg_page(&ibmr->sg[i]);
444
445                         /* FIXME we need a way to tell a r/w MR
446                          * from a r/o MR */
447                         set_page_dirty(page);
448                         put_page(page);
449                 }
450                 kfree(ibmr->sg);
451
452                 ibmr->sg = NULL;
453                 ibmr->sg_len = 0;
454         }
455 }
456
457 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
458 {
459         unsigned int pinned = ibmr->sg_len;
460
461         __rds_ib_teardown_mr(ibmr);
462         if (pinned) {
463                 struct rds_ib_device *rds_ibdev = ibmr->device;
464                 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
465
466                 atomic_sub(pinned, &pool->free_pinned);
467         }
468 }
469
470 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
471 {
472         unsigned int item_count;
473
474         item_count = atomic_read(&pool->item_count);
475         if (free_all)
476                 return item_count;
477
478         return 0;
479 }
480
481 /*
482  * Flush our pool of MRs.
483  * At a minimum, all currently unused MRs are unmapped.
484  * If the number of MRs allocated exceeds the limit, we also try
485  * to free as many MRs as needed to get back to this limit.
486  */
487 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
488 {
489         struct rds_ib_mr *ibmr, *next;
490         LIST_HEAD(unmap_list);
491         LIST_HEAD(fmr_list);
492         unsigned long unpinned = 0;
493         unsigned long flags;
494         unsigned int nfreed = 0, ncleaned = 0, free_goal;
495         int ret = 0;
496
497         rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
498
499         mutex_lock(&pool->flush_lock);
500
501         spin_lock_irqsave(&pool->list_lock, flags);
502         /* Get the list of all MRs to be dropped. Ordering matters -
503          * we want to put drop_list ahead of free_list. */
504         list_splice_init(&pool->free_list, &unmap_list);
505         list_splice_init(&pool->drop_list, &unmap_list);
506         if (free_all)
507                 list_splice_init(&pool->clean_list, &unmap_list);
508         spin_unlock_irqrestore(&pool->list_lock, flags);
509
510         free_goal = rds_ib_flush_goal(pool, free_all);
511
512         if (list_empty(&unmap_list))
513                 goto out;
514
515         /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
516         list_for_each_entry(ibmr, &unmap_list, list)
517                 list_add(&ibmr->fmr->list, &fmr_list);
518         ret = ib_unmap_fmr(&fmr_list);
519         if (ret)
520                 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
521
522         /* Now we can destroy the DMA mapping and unpin any pages */
523         list_for_each_entry_safe(ibmr, next, &unmap_list, list) {
524                 unpinned += ibmr->sg_len;
525                 __rds_ib_teardown_mr(ibmr);
526                 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
527                         rds_ib_stats_inc(s_ib_rdma_mr_free);
528                         list_del(&ibmr->list);
529                         ib_dealloc_fmr(ibmr->fmr);
530                         kfree(ibmr);
531                         nfreed++;
532                 }
533                 ncleaned++;
534         }
535
536         spin_lock_irqsave(&pool->list_lock, flags);
537         list_splice(&unmap_list, &pool->clean_list);
538         spin_unlock_irqrestore(&pool->list_lock, flags);
539
540         atomic_sub(unpinned, &pool->free_pinned);
541         atomic_sub(ncleaned, &pool->dirty_count);
542         atomic_sub(nfreed, &pool->item_count);
543
544 out:
545         mutex_unlock(&pool->flush_lock);
546         return ret;
547 }
548
549 static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
550 {
551         struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker);
552
553         rds_ib_flush_mr_pool(pool, 0);
554 }
555
556 void rds_ib_free_mr(void *trans_private, int invalidate)
557 {
558         struct rds_ib_mr *ibmr = trans_private;
559         struct rds_ib_device *rds_ibdev = ibmr->device;
560         struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
561         unsigned long flags;
562
563         rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
564
565         /* Return it to the pool's free list */
566         spin_lock_irqsave(&pool->list_lock, flags);
567         if (ibmr->remap_count >= pool->fmr_attr.max_maps)
568                 list_add(&ibmr->list, &pool->drop_list);
569         else
570                 list_add(&ibmr->list, &pool->free_list);
571
572         atomic_add(ibmr->sg_len, &pool->free_pinned);
573         atomic_inc(&pool->dirty_count);
574         spin_unlock_irqrestore(&pool->list_lock, flags);
575
576         /* If we've pinned too many pages, request a flush */
577         if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned
578          || atomic_read(&pool->dirty_count) >= pool->max_items / 10)
579                 queue_work(rds_wq, &pool->flush_worker);
580
581         if (invalidate) {
582                 if (likely(!in_interrupt())) {
583                         rds_ib_flush_mr_pool(pool, 0);
584                 } else {
585                         /* We get here if the user created a MR marked
586                          * as use_once and invalidate at the same time. */
587                         queue_work(rds_wq, &pool->flush_worker);
588                 }
589         }
590 }
591
592 void rds_ib_flush_mrs(void)
593 {
594         struct rds_ib_device *rds_ibdev;
595
596         list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
597                 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
598
599                 if (pool)
600                         rds_ib_flush_mr_pool(pool, 0);
601         }
602 }
603
604 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
605                     struct rds_sock *rs, u32 *key_ret)
606 {
607         struct rds_ib_device *rds_ibdev;
608         struct rds_ib_mr *ibmr = NULL;
609         int ret;
610
611         rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
612         if (!rds_ibdev) {
613                 ret = -ENODEV;
614                 goto out;
615         }
616
617         if (!rds_ibdev->mr_pool) {
618                 ret = -ENODEV;
619                 goto out;
620         }
621
622         ibmr = rds_ib_alloc_fmr(rds_ibdev);
623         if (IS_ERR(ibmr))
624                 return ibmr;
625
626         ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
627         if (ret == 0)
628                 *key_ret = ibmr->fmr->rkey;
629         else
630                 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
631
632         ibmr->device = rds_ibdev;
633
634  out:
635         if (ret) {
636                 if (ibmr)
637                         rds_ib_free_mr(ibmr, 0);
638                 ibmr = ERR_PTR(ret);
639         }
640         return ibmr;
641 }