Merge branch 'work.get_user_pages_fast' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / rapidio / devices / rio_mport_cdev.c
1 /*
2  * RapidIO mport character device
3  *
4  * Copyright 2014-2015 Integrated Device Technology, Inc.
5  *    Alexandre Bounine <alexandre.bounine@idt.com>
6  * Copyright 2014-2015 Prodrive Technologies
7  *    Andre van Herk <andre.van.herk@prodrive-technologies.com>
8  *    Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
9  * Copyright (C) 2014 Texas Instruments Incorporated
10  *    Aurelien Jacquiot <a-jacquiot@ti.com>
11  *
12  * This program is free software; you can redistribute  it and/or modify it
13  * under  the terms of  the GNU General  Public License as published by the
14  * Free Software Foundation;  either version 2 of the  License, or (at your
15  * option) any later version.
16  */
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/cdev.h>
20 #include <linux/ioctl.h>
21 #include <linux/uaccess.h>
22 #include <linux/list.h>
23 #include <linux/fs.h>
24 #include <linux/err.h>
25 #include <linux/net.h>
26 #include <linux/poll.h>
27 #include <linux/spinlock.h>
28 #include <linux/sched.h>
29 #include <linux/kfifo.h>
30
31 #include <linux/mm.h>
32 #include <linux/slab.h>
33 #include <linux/vmalloc.h>
34 #include <linux/mman.h>
35
36 #include <linux/dma-mapping.h>
37 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
38 #include <linux/dmaengine.h>
39 #endif
40
41 #include <linux/rio.h>
42 #include <linux/rio_ids.h>
43 #include <linux/rio_drv.h>
44 #include <linux/rio_mport_cdev.h>
45
46 #include "../rio.h"
47
48 #define DRV_NAME        "rio_mport"
49 #define DRV_PREFIX      DRV_NAME ": "
50 #define DEV_NAME        "rio_mport"
51 #define DRV_VERSION     "1.0.0"
52
53 /* Debug output filtering masks */
54 enum {
55         DBG_NONE        = 0,
56         DBG_INIT        = BIT(0), /* driver init */
57         DBG_EXIT        = BIT(1), /* driver exit */
58         DBG_MPORT       = BIT(2), /* mport add/remove */
59         DBG_RDEV        = BIT(3), /* RapidIO device add/remove */
60         DBG_DMA         = BIT(4), /* DMA transfer messages */
61         DBG_MMAP        = BIT(5), /* mapping messages */
62         DBG_IBW         = BIT(6), /* inbound window */
63         DBG_EVENT       = BIT(7), /* event handling messages */
64         DBG_OBW         = BIT(8), /* outbound window messages */
65         DBG_DBELL       = BIT(9), /* doorbell messages */
66         DBG_ALL         = ~0,
67 };
68
69 #ifdef DEBUG
70 #define rmcd_debug(level, fmt, arg...)          \
71         do {                                    \
72                 if (DBG_##level & dbg_level)    \
73                         pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
74         } while (0)
75 #else
76 #define rmcd_debug(level, fmt, arg...) \
77                 no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
78 #endif
79
80 #define rmcd_warn(fmt, arg...) \
81         pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
82
83 #define rmcd_error(fmt, arg...) \
84         pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
85
86 MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
87 MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
88 MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
89 MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
90 MODULE_DESCRIPTION("RapidIO mport character device driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION);
93
94 static int dma_timeout = 3000; /* DMA transfer timeout in msec */
95 module_param(dma_timeout, int, S_IRUGO);
96 MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)");
97
98 #ifdef DEBUG
99 static u32 dbg_level = DBG_NONE;
100 module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO);
101 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
102 #endif
103
104 /*
105  * An internal DMA coherent buffer
106  */
107 struct mport_dma_buf {
108         void            *ib_base;
109         dma_addr_t      ib_phys;
110         u32             ib_size;
111         u64             ib_rio_base;
112         bool            ib_map;
113         struct file     *filp;
114 };
115
116 /*
117  * Internal memory mapping structure
118  */
119 enum rio_mport_map_dir {
120         MAP_INBOUND,
121         MAP_OUTBOUND,
122         MAP_DMA,
123 };
124
125 struct rio_mport_mapping {
126         struct list_head node;
127         struct mport_dev *md;
128         enum rio_mport_map_dir dir;
129         u16 rioid;
130         u64 rio_addr;
131         dma_addr_t phys_addr; /* for mmap */
132         void *virt_addr; /* kernel address, for dma_free_coherent */
133         u64 size;
134         struct kref ref; /* refcount of vmas sharing the mapping */
135         struct file *filp;
136 };
137
138 struct rio_mport_dma_map {
139         int valid;
140         u64 length;
141         void *vaddr;
142         dma_addr_t paddr;
143 };
144
145 #define MPORT_MAX_DMA_BUFS      16
146 #define MPORT_EVENT_DEPTH       10
147
148 /*
149  * mport_dev  driver-specific structure that represents mport device
150  * @active    mport device status flag
151  * @node      list node to maintain list of registered mports
152  * @cdev      character device
153  * @dev       associated device object
154  * @mport     associated subsystem's master port device object
155  * @buf_mutex lock for buffer handling
156  * @file_mutex - lock for open files list
157  * @file_list  - list of open files on given mport
158  * @properties properties of this mport
159  * @portwrites queue of inbound portwrites
160  * @pw_lock    lock for port write queue
161  * @mappings   queue for memory mappings
162  * @dma_chan   DMA channels associated with this device
163  * @dma_ref:
164  * @comp:
165  */
166 struct mport_dev {
167         atomic_t                active;
168         struct list_head        node;
169         struct cdev             cdev;
170         struct device           dev;
171         struct rio_mport        *mport;
172         struct mutex            buf_mutex;
173         struct mutex            file_mutex;
174         struct list_head        file_list;
175         struct rio_mport_properties     properties;
176         struct list_head                doorbells;
177         spinlock_t                      db_lock;
178         struct list_head                portwrites;
179         spinlock_t                      pw_lock;
180         struct list_head        mappings;
181 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
182         struct dma_chan *dma_chan;
183         struct kref     dma_ref;
184         struct completion comp;
185 #endif
186 };
187
188 /*
189  * mport_cdev_priv - data structure specific to individual file object
190  *                   associated with an open device
191  * @md    master port character device object
192  * @async_queue - asynchronous notification queue
193  * @list - file objects tracking list
194  * @db_filters    inbound doorbell filters for this descriptor
195  * @pw_filters    portwrite filters for this descriptor
196  * @event_fifo    event fifo for this descriptor
197  * @event_rx_wait wait queue for this descriptor
198  * @fifo_lock     lock for event_fifo
199  * @event_mask    event mask for this descriptor
200  * @dmach DMA engine channel allocated for specific file object
201  */
202 struct mport_cdev_priv {
203         struct mport_dev        *md;
204         struct fasync_struct    *async_queue;
205         struct list_head        list;
206         struct list_head        db_filters;
207         struct list_head        pw_filters;
208         struct kfifo            event_fifo;
209         wait_queue_head_t       event_rx_wait;
210         spinlock_t              fifo_lock;
211         u32                     event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
212 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
213         struct dma_chan         *dmach;
214         struct list_head        async_list;
215         struct list_head        pend_list;
216         spinlock_t              req_lock;
217         struct mutex            dma_lock;
218         struct kref             dma_ref;
219         struct completion       comp;
220 #endif
221 };
222
223 /*
224  * rio_mport_pw_filter - structure to describe a portwrite filter
225  * md_node   node in mport device's list
226  * priv_node node in private file object's list
227  * priv      reference to private data
228  * filter    actual portwrite filter
229  */
230 struct rio_mport_pw_filter {
231         struct list_head md_node;
232         struct list_head priv_node;
233         struct mport_cdev_priv *priv;
234         struct rio_pw_filter filter;
235 };
236
237 /*
238  * rio_mport_db_filter - structure to describe a doorbell filter
239  * @data_node reference to device node
240  * @priv_node node in private data
241  * @priv      reference to private data
242  * @filter    actual doorbell filter
243  */
244 struct rio_mport_db_filter {
245         struct list_head data_node;
246         struct list_head priv_node;
247         struct mport_cdev_priv *priv;
248         struct rio_doorbell_filter filter;
249 };
250
251 static LIST_HEAD(mport_devs);
252 static DEFINE_MUTEX(mport_devs_lock);
253
254 #if (0) /* used by commented out portion of poll function : FIXME */
255 static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
256 #endif
257
258 static struct class *dev_class;
259 static dev_t dev_number;
260
261 static struct workqueue_struct *dma_wq;
262
263 static void mport_release_mapping(struct kref *ref);
264
265 static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
266                               int local)
267 {
268         struct rio_mport *mport = priv->md->mport;
269         struct rio_mport_maint_io maint_io;
270         u32 *buffer;
271         u32 offset;
272         size_t length;
273         int ret, i;
274
275         if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
276                 return -EFAULT;
277
278         if ((maint_io.offset % 4) ||
279             (maint_io.length == 0) || (maint_io.length % 4) ||
280             (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
281                 return -EINVAL;
282
283         buffer = vmalloc(maint_io.length);
284         if (buffer == NULL)
285                 return -ENOMEM;
286         length = maint_io.length/sizeof(u32);
287         offset = maint_io.offset;
288
289         for (i = 0; i < length; i++) {
290                 if (local)
291                         ret = __rio_local_read_config_32(mport,
292                                 offset, &buffer[i]);
293                 else
294                         ret = rio_mport_read_config_32(mport, maint_io.rioid,
295                                 maint_io.hopcount, offset, &buffer[i]);
296                 if (ret)
297                         goto out;
298
299                 offset += 4;
300         }
301
302         if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
303                                    buffer, maint_io.length)))
304                 ret = -EFAULT;
305 out:
306         vfree(buffer);
307         return ret;
308 }
309
310 static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
311                               int local)
312 {
313         struct rio_mport *mport = priv->md->mport;
314         struct rio_mport_maint_io maint_io;
315         u32 *buffer;
316         u32 offset;
317         size_t length;
318         int ret = -EINVAL, i;
319
320         if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
321                 return -EFAULT;
322
323         if ((maint_io.offset % 4) ||
324             (maint_io.length == 0) || (maint_io.length % 4) ||
325             (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
326                 return -EINVAL;
327
328         buffer = vmalloc(maint_io.length);
329         if (buffer == NULL)
330                 return -ENOMEM;
331         length = maint_io.length;
332
333         if (unlikely(copy_from_user(buffer,
334                         (void __user *)(uintptr_t)maint_io.buffer, length))) {
335                 ret = -EFAULT;
336                 goto out;
337         }
338
339         offset = maint_io.offset;
340         length /= sizeof(u32);
341
342         for (i = 0; i < length; i++) {
343                 if (local)
344                         ret = __rio_local_write_config_32(mport,
345                                                           offset, buffer[i]);
346                 else
347                         ret = rio_mport_write_config_32(mport, maint_io.rioid,
348                                                         maint_io.hopcount,
349                                                         offset, buffer[i]);
350                 if (ret)
351                         goto out;
352
353                 offset += 4;
354         }
355
356 out:
357         vfree(buffer);
358         return ret;
359 }
360
361
362 /*
363  * Inbound/outbound memory mapping functions
364  */
365 static int
366 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
367                                   u16 rioid, u64 raddr, u32 size,
368                                   dma_addr_t *paddr)
369 {
370         struct rio_mport *mport = md->mport;
371         struct rio_mport_mapping *map;
372         int ret;
373
374         rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
375
376         map = kzalloc(sizeof(*map), GFP_KERNEL);
377         if (map == NULL)
378                 return -ENOMEM;
379
380         ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr);
381         if (ret < 0)
382                 goto err_map_outb;
383
384         map->dir = MAP_OUTBOUND;
385         map->rioid = rioid;
386         map->rio_addr = raddr;
387         map->size = size;
388         map->phys_addr = *paddr;
389         map->filp = filp;
390         map->md = md;
391         kref_init(&map->ref);
392         list_add_tail(&map->node, &md->mappings);
393         return 0;
394 err_map_outb:
395         kfree(map);
396         return ret;
397 }
398
399 static int
400 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
401                                u16 rioid, u64 raddr, u32 size,
402                                dma_addr_t *paddr)
403 {
404         struct rio_mport_mapping *map;
405         int err = -ENOMEM;
406
407         mutex_lock(&md->buf_mutex);
408         list_for_each_entry(map, &md->mappings, node) {
409                 if (map->dir != MAP_OUTBOUND)
410                         continue;
411                 if (rioid == map->rioid &&
412                     raddr == map->rio_addr && size == map->size) {
413                         *paddr = map->phys_addr;
414                         err = 0;
415                         break;
416                 } else if (rioid == map->rioid &&
417                            raddr < (map->rio_addr + map->size - 1) &&
418                            (raddr + size) > map->rio_addr) {
419                         err = -EBUSY;
420                         break;
421                 }
422         }
423
424         /* If not found, create new */
425         if (err == -ENOMEM)
426                 err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr,
427                                                 size, paddr);
428         mutex_unlock(&md->buf_mutex);
429         return err;
430 }
431
432 static int rio_mport_obw_map(struct file *filp, void __user *arg)
433 {
434         struct mport_cdev_priv *priv = filp->private_data;
435         struct mport_dev *data = priv->md;
436         struct rio_mmap map;
437         dma_addr_t paddr;
438         int ret;
439
440         if (unlikely(copy_from_user(&map, arg, sizeof(map))))
441                 return -EFAULT;
442
443         rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
444                    map.rioid, map.rio_addr, map.length);
445
446         ret = rio_mport_get_outbound_mapping(data, filp, map.rioid,
447                                              map.rio_addr, map.length, &paddr);
448         if (ret < 0) {
449                 rmcd_error("Failed to set OBW err= %d", ret);
450                 return ret;
451         }
452
453         map.handle = paddr;
454
455         if (unlikely(copy_to_user(arg, &map, sizeof(map))))
456                 return -EFAULT;
457         return 0;
458 }
459
460 /*
461  * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
462  *
463  * @priv: driver private data
464  * @arg:  buffer handle returned by allocation routine
465  */
466 static int rio_mport_obw_free(struct file *filp, void __user *arg)
467 {
468         struct mport_cdev_priv *priv = filp->private_data;
469         struct mport_dev *md = priv->md;
470         u64 handle;
471         struct rio_mport_mapping *map, *_map;
472
473         if (!md->mport->ops->unmap_outb)
474                 return -EPROTONOSUPPORT;
475
476         if (copy_from_user(&handle, arg, sizeof(handle)))
477                 return -EFAULT;
478
479         rmcd_debug(OBW, "h=0x%llx", handle);
480
481         mutex_lock(&md->buf_mutex);
482         list_for_each_entry_safe(map, _map, &md->mappings, node) {
483                 if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) {
484                         if (map->filp == filp) {
485                                 rmcd_debug(OBW, "kref_put h=0x%llx", handle);
486                                 map->filp = NULL;
487                                 kref_put(&map->ref, mport_release_mapping);
488                         }
489                         break;
490                 }
491         }
492         mutex_unlock(&md->buf_mutex);
493
494         return 0;
495 }
496
497 /*
498  * maint_hdid_set() - Set the host Device ID
499  * @priv: driver private data
500  * @arg:        Device Id
501  */
502 static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
503 {
504         struct mport_dev *md = priv->md;
505         u16 hdid;
506
507         if (copy_from_user(&hdid, arg, sizeof(hdid)))
508                 return -EFAULT;
509
510         md->mport->host_deviceid = hdid;
511         md->properties.hdid = hdid;
512         rio_local_set_device_id(md->mport, hdid);
513
514         rmcd_debug(MPORT, "Set host device Id to %d", hdid);
515
516         return 0;
517 }
518
519 /*
520  * maint_comptag_set() - Set the host Component Tag
521  * @priv: driver private data
522  * @arg:        Component Tag
523  */
524 static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
525 {
526         struct mport_dev *md = priv->md;
527         u32 comptag;
528
529         if (copy_from_user(&comptag, arg, sizeof(comptag)))
530                 return -EFAULT;
531
532         rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
533
534         rmcd_debug(MPORT, "Set host Component Tag to %d", comptag);
535
536         return 0;
537 }
538
539 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
540
541 struct mport_dma_req {
542         struct list_head node;
543         struct file *filp;
544         struct mport_cdev_priv *priv;
545         enum rio_transfer_sync sync;
546         struct sg_table sgt;
547         struct page **page_list;
548         unsigned int nr_pages;
549         struct rio_mport_mapping *map;
550         struct dma_chan *dmach;
551         enum dma_data_direction dir;
552         dma_cookie_t cookie;
553         enum dma_status status;
554         struct completion req_comp;
555 };
556
557 struct mport_faf_work {
558         struct work_struct work;
559         struct mport_dma_req *req;
560 };
561
562 static void mport_release_def_dma(struct kref *dma_ref)
563 {
564         struct mport_dev *md =
565                         container_of(dma_ref, struct mport_dev, dma_ref);
566
567         rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id);
568         rio_release_dma(md->dma_chan);
569         md->dma_chan = NULL;
570 }
571
572 static void mport_release_dma(struct kref *dma_ref)
573 {
574         struct mport_cdev_priv *priv =
575                         container_of(dma_ref, struct mport_cdev_priv, dma_ref);
576
577         rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id);
578         complete(&priv->comp);
579 }
580
581 static void dma_req_free(struct mport_dma_req *req)
582 {
583         struct mport_cdev_priv *priv = req->priv;
584         unsigned int i;
585
586         dma_unmap_sg(req->dmach->device->dev,
587                      req->sgt.sgl, req->sgt.nents, req->dir);
588         sg_free_table(&req->sgt);
589         if (req->page_list) {
590                 for (i = 0; i < req->nr_pages; i++)
591                         put_page(req->page_list[i]);
592                 kfree(req->page_list);
593         }
594
595         if (req->map) {
596                 mutex_lock(&req->map->md->buf_mutex);
597                 kref_put(&req->map->ref, mport_release_mapping);
598                 mutex_unlock(&req->map->md->buf_mutex);
599         }
600
601         kref_put(&priv->dma_ref, mport_release_dma);
602
603         kfree(req);
604 }
605
606 static void dma_xfer_callback(void *param)
607 {
608         struct mport_dma_req *req = (struct mport_dma_req *)param;
609         struct mport_cdev_priv *priv = req->priv;
610
611         req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
612                                                NULL, NULL);
613         complete(&req->req_comp);
614 }
615
616 static void dma_faf_cleanup(struct work_struct *_work)
617 {
618         struct mport_faf_work *work = container_of(_work,
619                                                 struct mport_faf_work, work);
620         struct mport_dma_req *req = work->req;
621
622         dma_req_free(req);
623         kfree(work);
624 }
625
626 static void dma_faf_callback(void *param)
627 {
628         struct mport_dma_req *req = (struct mport_dma_req *)param;
629         struct mport_faf_work *work;
630
631         work = kmalloc(sizeof(*work), GFP_ATOMIC);
632         if (!work)
633                 return;
634
635         INIT_WORK(&work->work, dma_faf_cleanup);
636         work->req = req;
637         queue_work(dma_wq, &work->work);
638 }
639
640 /*
641  * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
642  *                   transfer object.
643  * Returns pointer to DMA transaction descriptor allocated by DMA driver on
644  * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
645  * non-NULL pointer using IS_ERR macro.
646  */
647 static struct dma_async_tx_descriptor
648 *prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer,
649         struct sg_table *sgt, int nents, enum dma_transfer_direction dir,
650         enum dma_ctrl_flags flags)
651 {
652         struct rio_dma_data tx_data;
653
654         tx_data.sg = sgt->sgl;
655         tx_data.sg_len = nents;
656         tx_data.rio_addr_u = 0;
657         tx_data.rio_addr = transfer->rio_addr;
658         if (dir == DMA_MEM_TO_DEV) {
659                 switch (transfer->method) {
660                 case RIO_EXCHANGE_NWRITE:
661                         tx_data.wr_type = RDW_ALL_NWRITE;
662                         break;
663                 case RIO_EXCHANGE_NWRITE_R_ALL:
664                         tx_data.wr_type = RDW_ALL_NWRITE_R;
665                         break;
666                 case RIO_EXCHANGE_NWRITE_R:
667                         tx_data.wr_type = RDW_LAST_NWRITE_R;
668                         break;
669                 case RIO_EXCHANGE_DEFAULT:
670                         tx_data.wr_type = RDW_DEFAULT;
671                         break;
672                 default:
673                         return ERR_PTR(-EINVAL);
674                 }
675         }
676
677         return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags);
678 }
679
680 /* Request DMA channel associated with this mport device.
681  * Try to request DMA channel for every new process that opened given
682  * mport. If a new DMA channel is not available use default channel
683  * which is the first DMA channel opened on mport device.
684  */
685 static int get_dma_channel(struct mport_cdev_priv *priv)
686 {
687         mutex_lock(&priv->dma_lock);
688         if (!priv->dmach) {
689                 priv->dmach = rio_request_mport_dma(priv->md->mport);
690                 if (!priv->dmach) {
691                         /* Use default DMA channel if available */
692                         if (priv->md->dma_chan) {
693                                 priv->dmach = priv->md->dma_chan;
694                                 kref_get(&priv->md->dma_ref);
695                         } else {
696                                 rmcd_error("Failed to get DMA channel");
697                                 mutex_unlock(&priv->dma_lock);
698                                 return -ENODEV;
699                         }
700                 } else if (!priv->md->dma_chan) {
701                         /* Register default DMA channel if we do not have one */
702                         priv->md->dma_chan = priv->dmach;
703                         kref_init(&priv->md->dma_ref);
704                         rmcd_debug(DMA, "Register DMA_chan %d as default",
705                                    priv->dmach->chan_id);
706                 }
707
708                 kref_init(&priv->dma_ref);
709                 init_completion(&priv->comp);
710         }
711
712         kref_get(&priv->dma_ref);
713         mutex_unlock(&priv->dma_lock);
714         return 0;
715 }
716
717 static void put_dma_channel(struct mport_cdev_priv *priv)
718 {
719         kref_put(&priv->dma_ref, mport_release_dma);
720 }
721
722 /*
723  * DMA transfer functions
724  */
725 static int do_dma_request(struct mport_dma_req *req,
726                           struct rio_transfer_io *xfer,
727                           enum rio_transfer_sync sync, int nents)
728 {
729         struct mport_cdev_priv *priv;
730         struct sg_table *sgt;
731         struct dma_chan *chan;
732         struct dma_async_tx_descriptor *tx;
733         dma_cookie_t cookie;
734         unsigned long tmo = msecs_to_jiffies(dma_timeout);
735         enum dma_transfer_direction dir;
736         long wret;
737         int ret = 0;
738
739         priv = req->priv;
740         sgt = &req->sgt;
741
742         chan = priv->dmach;
743         dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
744
745         rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s",
746                    current->comm, task_pid_nr(current),
747                    dev_name(&chan->dev->device),
748                    (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
749
750         /* Initialize DMA transaction request */
751         tx = prep_dma_xfer(chan, xfer, sgt, nents, dir,
752                            DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
753
754         if (!tx) {
755                 rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx",
756                         (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
757                         xfer->rio_addr, xfer->length);
758                 ret = -EIO;
759                 goto err_out;
760         } else if (IS_ERR(tx)) {
761                 ret = PTR_ERR(tx);
762                 rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret,
763                         (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
764                         xfer->rio_addr, xfer->length);
765                 goto err_out;
766         }
767
768         if (sync == RIO_TRANSFER_FAF)
769                 tx->callback = dma_faf_callback;
770         else
771                 tx->callback = dma_xfer_callback;
772         tx->callback_param = req;
773
774         req->dmach = chan;
775         req->sync = sync;
776         req->status = DMA_IN_PROGRESS;
777         init_completion(&req->req_comp);
778
779         cookie = dmaengine_submit(tx);
780         req->cookie = cookie;
781
782         rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current),
783                    (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
784
785         if (dma_submit_error(cookie)) {
786                 rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
787                            cookie, xfer->rio_addr, xfer->length);
788                 ret = -EIO;
789                 goto err_out;
790         }
791
792         dma_async_issue_pending(chan);
793
794         if (sync == RIO_TRANSFER_ASYNC) {
795                 spin_lock(&priv->req_lock);
796                 list_add_tail(&req->node, &priv->async_list);
797                 spin_unlock(&priv->req_lock);
798                 return cookie;
799         } else if (sync == RIO_TRANSFER_FAF)
800                 return 0;
801
802         wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
803
804         if (wret == 0) {
805                 /* Timeout on wait occurred */
806                 rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
807                        current->comm, task_pid_nr(current),
808                        (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
809                 return -ETIMEDOUT;
810         } else if (wret == -ERESTARTSYS) {
811                 /* Wait_for_completion was interrupted by a signal but DMA may
812                  * be in progress
813                  */
814                 rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
815                         current->comm, task_pid_nr(current),
816                         (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
817                 return -EINTR;
818         }
819
820         if (req->status != DMA_COMPLETE) {
821                 /* DMA transaction completion was signaled with error */
822                 rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
823                         current->comm, task_pid_nr(current),
824                         (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
825                         cookie, req->status, ret);
826                 ret = -EIO;
827         }
828
829 err_out:
830         return ret;
831 }
832
833 /*
834  * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
835  *                      the remote RapidIO device
836  * @filp: file pointer associated with the call
837  * @transfer_mode: DMA transfer mode
838  * @sync: synchronization mode
839  * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
840  *                               DMA_DEV_TO_MEM = read)
841  * @xfer: data transfer descriptor structure
842  */
843 static int
844 rio_dma_transfer(struct file *filp, u32 transfer_mode,
845                  enum rio_transfer_sync sync, enum dma_data_direction dir,
846                  struct rio_transfer_io *xfer)
847 {
848         struct mport_cdev_priv *priv = filp->private_data;
849         unsigned long nr_pages = 0;
850         struct page **page_list = NULL;
851         struct mport_dma_req *req;
852         struct mport_dev *md = priv->md;
853         struct dma_chan *chan;
854         int i, ret;
855         int nents;
856
857         if (xfer->length == 0)
858                 return -EINVAL;
859         req = kzalloc(sizeof(*req), GFP_KERNEL);
860         if (!req)
861                 return -ENOMEM;
862
863         ret = get_dma_channel(priv);
864         if (ret) {
865                 kfree(req);
866                 return ret;
867         }
868
869         /*
870          * If parameter loc_addr != NULL, we are transferring data from/to
871          * data buffer allocated in user-space: lock in memory user-space
872          * buffer pages and build an SG table for DMA transfer request
873          *
874          * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
875          * used for DMA data transfers: build single entry SG table using
876          * offset within the internal buffer specified by handle parameter.
877          */
878         if (xfer->loc_addr) {
879                 unsigned int offset;
880                 long pinned;
881
882                 offset = lower_32_bits(offset_in_page(xfer->loc_addr));
883                 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
884
885                 page_list = kmalloc_array(nr_pages,
886                                           sizeof(*page_list), GFP_KERNEL);
887                 if (page_list == NULL) {
888                         ret = -ENOMEM;
889                         goto err_req;
890                 }
891
892                 pinned = get_user_pages_fast(
893                                 (unsigned long)xfer->loc_addr & PAGE_MASK,
894                                 nr_pages, dir == DMA_FROM_DEVICE, page_list);
895
896                 if (pinned != nr_pages) {
897                         if (pinned < 0) {
898                                 rmcd_error("get_user_pages_unlocked err=%ld",
899                                            pinned);
900                                 nr_pages = 0;
901                         } else
902                                 rmcd_error("pinned %ld out of %ld pages",
903                                            pinned, nr_pages);
904                         ret = -EFAULT;
905                         goto err_pg;
906                 }
907
908                 ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages,
909                                         offset, xfer->length, GFP_KERNEL);
910                 if (ret) {
911                         rmcd_error("sg_alloc_table failed with err=%d", ret);
912                         goto err_pg;
913                 }
914
915                 req->page_list = page_list;
916                 req->nr_pages = nr_pages;
917         } else {
918                 dma_addr_t baddr;
919                 struct rio_mport_mapping *map;
920
921                 baddr = (dma_addr_t)xfer->handle;
922
923                 mutex_lock(&md->buf_mutex);
924                 list_for_each_entry(map, &md->mappings, node) {
925                         if (baddr >= map->phys_addr &&
926                             baddr < (map->phys_addr + map->size)) {
927                                 kref_get(&map->ref);
928                                 req->map = map;
929                                 break;
930                         }
931                 }
932                 mutex_unlock(&md->buf_mutex);
933
934                 if (req->map == NULL) {
935                         ret = -ENOMEM;
936                         goto err_req;
937                 }
938
939                 if (xfer->length + xfer->offset > map->size) {
940                         ret = -EINVAL;
941                         goto err_req;
942                 }
943
944                 ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL);
945                 if (unlikely(ret)) {
946                         rmcd_error("sg_alloc_table failed for internal buf");
947                         goto err_req;
948                 }
949
950                 sg_set_buf(req->sgt.sgl,
951                            map->virt_addr + (baddr - map->phys_addr) +
952                                 xfer->offset, xfer->length);
953         }
954
955         req->dir = dir;
956         req->filp = filp;
957         req->priv = priv;
958         chan = priv->dmach;
959
960         nents = dma_map_sg(chan->device->dev,
961                            req->sgt.sgl, req->sgt.nents, dir);
962         if (nents == -EFAULT) {
963                 rmcd_error("Failed to map SG list");
964                 return -EFAULT;
965         }
966
967         ret = do_dma_request(req, xfer, sync, nents);
968
969         if (ret >= 0) {
970                 if (sync == RIO_TRANSFER_SYNC)
971                         goto sync_out;
972                 return ret; /* return ASYNC cookie */
973         }
974
975         if (ret == -ETIMEDOUT || ret == -EINTR) {
976                 /*
977                  * This can happen only in case of SYNC transfer.
978                  * Do not free unfinished request structure immediately.
979                  * Place it into pending list and deal with it later
980                  */
981                 spin_lock(&priv->req_lock);
982                 list_add_tail(&req->node, &priv->pend_list);
983                 spin_unlock(&priv->req_lock);
984                 return ret;
985         }
986
987
988         rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
989 sync_out:
990         dma_unmap_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir);
991         sg_free_table(&req->sgt);
992 err_pg:
993         if (page_list) {
994                 for (i = 0; i < nr_pages; i++)
995                         put_page(page_list[i]);
996                 kfree(page_list);
997         }
998 err_req:
999         if (req->map) {
1000                 mutex_lock(&md->buf_mutex);
1001                 kref_put(&req->map->ref, mport_release_mapping);
1002                 mutex_unlock(&md->buf_mutex);
1003         }
1004         put_dma_channel(priv);
1005         kfree(req);
1006         return ret;
1007 }
1008
1009 static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
1010 {
1011         struct mport_cdev_priv *priv = filp->private_data;
1012         struct rio_transaction transaction;
1013         struct rio_transfer_io *transfer;
1014         enum dma_data_direction dir;
1015         int i, ret = 0;
1016
1017         if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
1018                 return -EFAULT;
1019
1020         if (transaction.count != 1) /* only single transfer for now */
1021                 return -EINVAL;
1022
1023         if ((transaction.transfer_mode &
1024              priv->md->properties.transfer_mode) == 0)
1025                 return -ENODEV;
1026
1027         transfer = vmalloc(transaction.count * sizeof(*transfer));
1028         if (!transfer)
1029                 return -ENOMEM;
1030
1031         if (unlikely(copy_from_user(transfer,
1032                                     (void __user *)(uintptr_t)transaction.block,
1033                                     transaction.count * sizeof(*transfer)))) {
1034                 ret = -EFAULT;
1035                 goto out_free;
1036         }
1037
1038         dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ?
1039                                         DMA_FROM_DEVICE : DMA_TO_DEVICE;
1040         for (i = 0; i < transaction.count && ret == 0; i++)
1041                 ret = rio_dma_transfer(filp, transaction.transfer_mode,
1042                         transaction.sync, dir, &transfer[i]);
1043
1044         if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
1045                                   transfer,
1046                                   transaction.count * sizeof(*transfer))))
1047                 ret = -EFAULT;
1048
1049 out_free:
1050         vfree(transfer);
1051
1052         return ret;
1053 }
1054
1055 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1056 {
1057         struct mport_cdev_priv *priv;
1058         struct mport_dev *md;
1059         struct rio_async_tx_wait w_param;
1060         struct mport_dma_req *req;
1061         dma_cookie_t cookie;
1062         unsigned long tmo;
1063         long wret;
1064         int found = 0;
1065         int ret;
1066
1067         priv = (struct mport_cdev_priv *)filp->private_data;
1068         md = priv->md;
1069
1070         if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param))))
1071                 return -EFAULT;
1072
1073         cookie = w_param.token;
1074         if (w_param.timeout)
1075                 tmo = msecs_to_jiffies(w_param.timeout);
1076         else /* Use default DMA timeout */
1077                 tmo = msecs_to_jiffies(dma_timeout);
1078
1079         spin_lock(&priv->req_lock);
1080         list_for_each_entry(req, &priv->async_list, node) {
1081                 if (req->cookie == cookie) {
1082                         list_del(&req->node);
1083                         found = 1;
1084                         break;
1085                 }
1086         }
1087         spin_unlock(&priv->req_lock);
1088
1089         if (!found)
1090                 return -EAGAIN;
1091
1092         wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
1093
1094         if (wret == 0) {
1095                 /* Timeout on wait occurred */
1096                 rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
1097                        current->comm, task_pid_nr(current),
1098                        (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1099                 ret = -ETIMEDOUT;
1100                 goto err_tmo;
1101         } else if (wret == -ERESTARTSYS) {
1102                 /* Wait_for_completion was interrupted by a signal but DMA may
1103                  * be still in progress
1104                  */
1105                 rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
1106                         current->comm, task_pid_nr(current),
1107                         (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1108                 ret = -EINTR;
1109                 goto err_tmo;
1110         }
1111
1112         if (req->status != DMA_COMPLETE) {
1113                 /* DMA transaction completion signaled with transfer error */
1114                 rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
1115                         current->comm, task_pid_nr(current),
1116                         (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE",
1117                         req->status);
1118                 ret = -EIO;
1119         } else
1120                 ret = 0;
1121
1122         if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
1123                 dma_req_free(req);
1124
1125         return ret;
1126
1127 err_tmo:
1128         /* Return request back into async queue */
1129         spin_lock(&priv->req_lock);
1130         list_add_tail(&req->node, &priv->async_list);
1131         spin_unlock(&priv->req_lock);
1132         return ret;
1133 }
1134
1135 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
1136                         u64 size, struct rio_mport_mapping **mapping)
1137 {
1138         struct rio_mport_mapping *map;
1139
1140         map = kzalloc(sizeof(*map), GFP_KERNEL);
1141         if (map == NULL)
1142                 return -ENOMEM;
1143
1144         map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
1145                                             &map->phys_addr, GFP_KERNEL);
1146         if (map->virt_addr == NULL) {
1147                 kfree(map);
1148                 return -ENOMEM;
1149         }
1150
1151         map->dir = MAP_DMA;
1152         map->size = size;
1153         map->filp = filp;
1154         map->md = md;
1155         kref_init(&map->ref);
1156         mutex_lock(&md->buf_mutex);
1157         list_add_tail(&map->node, &md->mappings);
1158         mutex_unlock(&md->buf_mutex);
1159         *mapping = map;
1160
1161         return 0;
1162 }
1163
1164 static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1165 {
1166         struct mport_cdev_priv *priv = filp->private_data;
1167         struct mport_dev *md = priv->md;
1168         struct rio_dma_mem map;
1169         struct rio_mport_mapping *mapping = NULL;
1170         int ret;
1171
1172         if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1173                 return -EFAULT;
1174
1175         ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
1176         if (ret)
1177                 return ret;
1178
1179         map.dma_handle = mapping->phys_addr;
1180
1181         if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1182                 mutex_lock(&md->buf_mutex);
1183                 kref_put(&mapping->ref, mport_release_mapping);
1184                 mutex_unlock(&md->buf_mutex);
1185                 return -EFAULT;
1186         }
1187
1188         return 0;
1189 }
1190
1191 static int rio_mport_free_dma(struct file *filp, void __user *arg)
1192 {
1193         struct mport_cdev_priv *priv = filp->private_data;
1194         struct mport_dev *md = priv->md;
1195         u64 handle;
1196         int ret = -EFAULT;
1197         struct rio_mport_mapping *map, *_map;
1198
1199         if (copy_from_user(&handle, arg, sizeof(handle)))
1200                 return -EFAULT;
1201         rmcd_debug(EXIT, "filp=%p", filp);
1202
1203         mutex_lock(&md->buf_mutex);
1204         list_for_each_entry_safe(map, _map, &md->mappings, node) {
1205                 if (map->dir == MAP_DMA && map->phys_addr == handle &&
1206                     map->filp == filp) {
1207                         kref_put(&map->ref, mport_release_mapping);
1208                         ret = 0;
1209                         break;
1210                 }
1211         }
1212         mutex_unlock(&md->buf_mutex);
1213
1214         if (ret == -EFAULT) {
1215                 rmcd_debug(DMA, "ERR no matching mapping");
1216                 return ret;
1217         }
1218
1219         return 0;
1220 }
1221 #else
1222 static int rio_mport_transfer_ioctl(struct file *filp, void *arg)
1223 {
1224         return -ENODEV;
1225 }
1226
1227 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1228 {
1229         return -ENODEV;
1230 }
1231
1232 static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1233 {
1234         return -ENODEV;
1235 }
1236
1237 static int rio_mport_free_dma(struct file *filp, void __user *arg)
1238 {
1239         return -ENODEV;
1240 }
1241 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1242
1243 /*
1244  * Inbound/outbound memory mapping functions
1245  */
1246
1247 static int
1248 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
1249                                 u64 raddr, u64 size,
1250                                 struct rio_mport_mapping **mapping)
1251 {
1252         struct rio_mport *mport = md->mport;
1253         struct rio_mport_mapping *map;
1254         int ret;
1255
1256         /* rio_map_inb_region() accepts u32 size */
1257         if (size > 0xffffffff)
1258                 return -EINVAL;
1259
1260         map = kzalloc(sizeof(*map), GFP_KERNEL);
1261         if (map == NULL)
1262                 return -ENOMEM;
1263
1264         map->virt_addr = dma_alloc_coherent(mport->dev.parent, size,
1265                                             &map->phys_addr, GFP_KERNEL);
1266         if (map->virt_addr == NULL) {
1267                 ret = -ENOMEM;
1268                 goto err_dma_alloc;
1269         }
1270
1271         if (raddr == RIO_MAP_ANY_ADDR)
1272                 raddr = map->phys_addr;
1273         ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
1274         if (ret < 0)
1275                 goto err_map_inb;
1276
1277         map->dir = MAP_INBOUND;
1278         map->rio_addr = raddr;
1279         map->size = size;
1280         map->filp = filp;
1281         map->md = md;
1282         kref_init(&map->ref);
1283         mutex_lock(&md->buf_mutex);
1284         list_add_tail(&map->node, &md->mappings);
1285         mutex_unlock(&md->buf_mutex);
1286         *mapping = map;
1287         return 0;
1288
1289 err_map_inb:
1290         dma_free_coherent(mport->dev.parent, size,
1291                           map->virt_addr, map->phys_addr);
1292 err_dma_alloc:
1293         kfree(map);
1294         return ret;
1295 }
1296
1297 static int
1298 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
1299                               u64 raddr, u64 size,
1300                               struct rio_mport_mapping **mapping)
1301 {
1302         struct rio_mport_mapping *map;
1303         int err = -ENOMEM;
1304
1305         if (raddr == RIO_MAP_ANY_ADDR)
1306                 goto get_new;
1307
1308         mutex_lock(&md->buf_mutex);
1309         list_for_each_entry(map, &md->mappings, node) {
1310                 if (map->dir != MAP_INBOUND)
1311                         continue;
1312                 if (raddr == map->rio_addr && size == map->size) {
1313                         /* allow exact match only */
1314                         *mapping = map;
1315                         err = 0;
1316                         break;
1317                 } else if (raddr < (map->rio_addr + map->size - 1) &&
1318                            (raddr + size) > map->rio_addr) {
1319                         err = -EBUSY;
1320                         break;
1321                 }
1322         }
1323         mutex_unlock(&md->buf_mutex);
1324
1325         if (err != -ENOMEM)
1326                 return err;
1327 get_new:
1328         /* not found, create new */
1329         return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
1330 }
1331
1332 static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1333 {
1334         struct mport_cdev_priv *priv = filp->private_data;
1335         struct mport_dev *md = priv->md;
1336         struct rio_mmap map;
1337         struct rio_mport_mapping *mapping = NULL;
1338         int ret;
1339
1340         if (!md->mport->ops->map_inb)
1341                 return -EPROTONOSUPPORT;
1342         if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1343                 return -EFAULT;
1344
1345         rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1346
1347         ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
1348                                             map.length, &mapping);
1349         if (ret)
1350                 return ret;
1351
1352         map.handle = mapping->phys_addr;
1353         map.rio_addr = mapping->rio_addr;
1354
1355         if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1356                 /* Delete mapping if it was created by this request */
1357                 if (ret == 0 && mapping->filp == filp) {
1358                         mutex_lock(&md->buf_mutex);
1359                         kref_put(&mapping->ref, mport_release_mapping);
1360                         mutex_unlock(&md->buf_mutex);
1361                 }
1362                 return -EFAULT;
1363         }
1364
1365         return 0;
1366 }
1367
1368 /*
1369  * rio_mport_inbound_free() - unmap from RapidIO address space and free
1370  *                    previously allocated inbound DMA coherent buffer
1371  * @priv: driver private data
1372  * @arg:  buffer handle returned by allocation routine
1373  */
1374 static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1375 {
1376         struct mport_cdev_priv *priv = filp->private_data;
1377         struct mport_dev *md = priv->md;
1378         u64 handle;
1379         struct rio_mport_mapping *map, *_map;
1380
1381         rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1382
1383         if (!md->mport->ops->unmap_inb)
1384                 return -EPROTONOSUPPORT;
1385
1386         if (copy_from_user(&handle, arg, sizeof(handle)))
1387                 return -EFAULT;
1388
1389         mutex_lock(&md->buf_mutex);
1390         list_for_each_entry_safe(map, _map, &md->mappings, node) {
1391                 if (map->dir == MAP_INBOUND && map->phys_addr == handle) {
1392                         if (map->filp == filp) {
1393                                 map->filp = NULL;
1394                                 kref_put(&map->ref, mport_release_mapping);
1395                         }
1396                         break;
1397                 }
1398         }
1399         mutex_unlock(&md->buf_mutex);
1400
1401         return 0;
1402 }
1403
1404 /*
1405  * maint_port_idx_get() - Get the port index of the mport instance
1406  * @priv: driver private data
1407  * @arg:  port index
1408  */
1409 static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
1410 {
1411         struct mport_dev *md = priv->md;
1412         u32 port_idx = md->mport->index;
1413
1414         rmcd_debug(MPORT, "port_index=%d", port_idx);
1415
1416         if (copy_to_user(arg, &port_idx, sizeof(port_idx)))
1417                 return -EFAULT;
1418
1419         return 0;
1420 }
1421
1422 static int rio_mport_add_event(struct mport_cdev_priv *priv,
1423                                struct rio_event *event)
1424 {
1425         int overflow;
1426
1427         if (!(priv->event_mask & event->header))
1428                 return -EACCES;
1429
1430         spin_lock(&priv->fifo_lock);
1431         overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event)
1432                 || kfifo_in(&priv->event_fifo, (unsigned char *)event,
1433                         sizeof(*event)) != sizeof(*event);
1434         spin_unlock(&priv->fifo_lock);
1435
1436         wake_up_interruptible(&priv->event_rx_wait);
1437
1438         if (overflow) {
1439                 dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n");
1440                 return -EBUSY;
1441         }
1442
1443         return 0;
1444 }
1445
1446 static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
1447                                        u16 src, u16 dst, u16 info)
1448 {
1449         struct mport_dev *data = dev_id;
1450         struct mport_cdev_priv *priv;
1451         struct rio_mport_db_filter *db_filter;
1452         struct rio_event event;
1453         int handled;
1454
1455         event.header = RIO_DOORBELL;
1456         event.u.doorbell.rioid = src;
1457         event.u.doorbell.payload = info;
1458
1459         handled = 0;
1460         spin_lock(&data->db_lock);
1461         list_for_each_entry(db_filter, &data->doorbells, data_node) {
1462                 if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
1463                       db_filter->filter.rioid == src)) &&
1464                       info >= db_filter->filter.low &&
1465                       info <= db_filter->filter.high) {
1466                         priv = db_filter->priv;
1467                         rio_mport_add_event(priv, &event);
1468                         handled = 1;
1469                 }
1470         }
1471         spin_unlock(&data->db_lock);
1472
1473         if (!handled)
1474                 dev_warn(&data->dev,
1475                         "%s: spurious DB received from 0x%x, info=0x%04x\n",
1476                         __func__, src, info);
1477 }
1478
1479 static int rio_mport_add_db_filter(struct mport_cdev_priv *priv,
1480                                    void __user *arg)
1481 {
1482         struct mport_dev *md = priv->md;
1483         struct rio_mport_db_filter *db_filter;
1484         struct rio_doorbell_filter filter;
1485         unsigned long flags;
1486         int ret;
1487
1488         if (copy_from_user(&filter, arg, sizeof(filter)))
1489                 return -EFAULT;
1490
1491         if (filter.low > filter.high)
1492                 return -EINVAL;
1493
1494         ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high,
1495                                     rio_mport_doorbell_handler);
1496         if (ret) {
1497                 rmcd_error("%s failed to register IBDB, err=%d",
1498                            dev_name(&md->dev), ret);
1499                 return ret;
1500         }
1501
1502         db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL);
1503         if (db_filter == NULL) {
1504                 rio_release_inb_dbell(md->mport, filter.low, filter.high);
1505                 return -ENOMEM;
1506         }
1507
1508         db_filter->filter = filter;
1509         db_filter->priv = priv;
1510         spin_lock_irqsave(&md->db_lock, flags);
1511         list_add_tail(&db_filter->priv_node, &priv->db_filters);
1512         list_add_tail(&db_filter->data_node, &md->doorbells);
1513         spin_unlock_irqrestore(&md->db_lock, flags);
1514
1515         return 0;
1516 }
1517
1518 static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter)
1519 {
1520         list_del(&db_filter->data_node);
1521         list_del(&db_filter->priv_node);
1522         kfree(db_filter);
1523 }
1524
1525 static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
1526                                       void __user *arg)
1527 {
1528         struct rio_mport_db_filter *db_filter;
1529         struct rio_doorbell_filter filter;
1530         unsigned long flags;
1531         int ret = -EINVAL;
1532
1533         if (copy_from_user(&filter, arg, sizeof(filter)))
1534                 return -EFAULT;
1535
1536         if (filter.low > filter.high)
1537                 return -EINVAL;
1538
1539         spin_lock_irqsave(&priv->md->db_lock, flags);
1540         list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
1541                 if (db_filter->filter.rioid == filter.rioid &&
1542                     db_filter->filter.low == filter.low &&
1543                     db_filter->filter.high == filter.high) {
1544                         rio_mport_delete_db_filter(db_filter);
1545                         ret = 0;
1546                         break;
1547                 }
1548         }
1549         spin_unlock_irqrestore(&priv->md->db_lock, flags);
1550
1551         if (!ret)
1552                 rio_release_inb_dbell(priv->md->mport, filter.low, filter.high);
1553
1554         return ret;
1555 }
1556
1557 static int rio_mport_match_pw(union rio_pw_msg *msg,
1558                               struct rio_pw_filter *filter)
1559 {
1560         if ((msg->em.comptag & filter->mask) < filter->low ||
1561                 (msg->em.comptag & filter->mask) > filter->high)
1562                 return 0;
1563         return 1;
1564 }
1565
1566 static int rio_mport_pw_handler(struct rio_mport *mport, void *context,
1567                                 union rio_pw_msg *msg, int step)
1568 {
1569         struct mport_dev *md = context;
1570         struct mport_cdev_priv *priv;
1571         struct rio_mport_pw_filter *pw_filter;
1572         struct rio_event event;
1573         int handled;
1574
1575         event.header = RIO_PORTWRITE;
1576         memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE);
1577
1578         handled = 0;
1579         spin_lock(&md->pw_lock);
1580         list_for_each_entry(pw_filter, &md->portwrites, md_node) {
1581                 if (rio_mport_match_pw(msg, &pw_filter->filter)) {
1582                         priv = pw_filter->priv;
1583                         rio_mport_add_event(priv, &event);
1584                         handled = 1;
1585                 }
1586         }
1587         spin_unlock(&md->pw_lock);
1588
1589         if (!handled) {
1590                 printk_ratelimited(KERN_WARNING DRV_NAME
1591                         ": mport%d received spurious PW from 0x%08x\n",
1592                         mport->id, msg->em.comptag);
1593         }
1594
1595         return 0;
1596 }
1597
1598 static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv,
1599                                    void __user *arg)
1600 {
1601         struct mport_dev *md = priv->md;
1602         struct rio_mport_pw_filter *pw_filter;
1603         struct rio_pw_filter filter;
1604         unsigned long flags;
1605         int hadd = 0;
1606
1607         if (copy_from_user(&filter, arg, sizeof(filter)))
1608                 return -EFAULT;
1609
1610         pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL);
1611         if (pw_filter == NULL)
1612                 return -ENOMEM;
1613
1614         pw_filter->filter = filter;
1615         pw_filter->priv = priv;
1616         spin_lock_irqsave(&md->pw_lock, flags);
1617         if (list_empty(&md->portwrites))
1618                 hadd = 1;
1619         list_add_tail(&pw_filter->priv_node, &priv->pw_filters);
1620         list_add_tail(&pw_filter->md_node, &md->portwrites);
1621         spin_unlock_irqrestore(&md->pw_lock, flags);
1622
1623         if (hadd) {
1624                 int ret;
1625
1626                 ret = rio_add_mport_pw_handler(md->mport, md,
1627                                                rio_mport_pw_handler);
1628                 if (ret) {
1629                         dev_err(&md->dev,
1630                                 "%s: failed to add IB_PW handler, err=%d\n",
1631                                 __func__, ret);
1632                         return ret;
1633                 }
1634                 rio_pw_enable(md->mport, 1);
1635         }
1636
1637         return 0;
1638 }
1639
1640 static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter)
1641 {
1642         list_del(&pw_filter->md_node);
1643         list_del(&pw_filter->priv_node);
1644         kfree(pw_filter);
1645 }
1646
1647 static int rio_mport_match_pw_filter(struct rio_pw_filter *a,
1648                                      struct rio_pw_filter *b)
1649 {
1650         if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high))
1651                 return 1;
1652         return 0;
1653 }
1654
1655 static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv,
1656                                       void __user *arg)
1657 {
1658         struct mport_dev *md = priv->md;
1659         struct rio_mport_pw_filter *pw_filter;
1660         struct rio_pw_filter filter;
1661         unsigned long flags;
1662         int ret = -EINVAL;
1663         int hdel = 0;
1664
1665         if (copy_from_user(&filter, arg, sizeof(filter)))
1666                 return -EFAULT;
1667
1668         spin_lock_irqsave(&md->pw_lock, flags);
1669         list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) {
1670                 if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) {
1671                         rio_mport_delete_pw_filter(pw_filter);
1672                         ret = 0;
1673                         break;
1674                 }
1675         }
1676
1677         if (list_empty(&md->portwrites))
1678                 hdel = 1;
1679         spin_unlock_irqrestore(&md->pw_lock, flags);
1680
1681         if (hdel) {
1682                 rio_del_mport_pw_handler(md->mport, priv->md,
1683                                          rio_mport_pw_handler);
1684                 rio_pw_enable(md->mport, 0);
1685         }
1686
1687         return ret;
1688 }
1689
1690 /*
1691  * rio_release_dev - release routine for kernel RIO device object
1692  * @dev: kernel device object associated with a RIO device structure
1693  *
1694  * Frees a RIO device struct associated a RIO device struct.
1695  * The RIO device struct is freed.
1696  */
1697 static void rio_release_dev(struct device *dev)
1698 {
1699         struct rio_dev *rdev;
1700
1701         rdev = to_rio_dev(dev);
1702         pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev));
1703         kfree(rdev);
1704 }
1705
1706
1707 static void rio_release_net(struct device *dev)
1708 {
1709         struct rio_net *net;
1710
1711         net = to_rio_net(dev);
1712         rmcd_debug(RDEV, "net_%d", net->id);
1713         kfree(net);
1714 }
1715
1716
1717 /*
1718  * rio_mport_add_riodev - creates a kernel RIO device object
1719  *
1720  * Allocates a RIO device data structure and initializes required fields based
1721  * on device's configuration space contents.
1722  * If the device has switch capabilities, then a switch specific portion is
1723  * allocated and configured.
1724  */
1725 static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1726                                    void __user *arg)
1727 {
1728         struct mport_dev *md = priv->md;
1729         struct rio_rdev_info dev_info;
1730         struct rio_dev *rdev;
1731         struct rio_switch *rswitch = NULL;
1732         struct rio_mport *mport;
1733         size_t size;
1734         u32 rval;
1735         u32 swpinfo = 0;
1736         u16 destid;
1737         u8 hopcount;
1738         int err;
1739
1740         if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1741                 return -EFAULT;
1742
1743         rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
1744                    dev_info.comptag, dev_info.destid, dev_info.hopcount);
1745
1746         if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
1747                 rmcd_debug(RDEV, "device %s already exists", dev_info.name);
1748                 return -EEXIST;
1749         }
1750
1751         size = sizeof(*rdev);
1752         mport = md->mport;
1753         destid = dev_info.destid;
1754         hopcount = dev_info.hopcount;
1755
1756         if (rio_mport_read_config_32(mport, destid, hopcount,
1757                                      RIO_PEF_CAR, &rval))
1758                 return -EIO;
1759
1760         if (rval & RIO_PEF_SWITCH) {
1761                 rio_mport_read_config_32(mport, destid, hopcount,
1762                                          RIO_SWP_INFO_CAR, &swpinfo);
1763                 size += (RIO_GET_TOTAL_PORTS(swpinfo) *
1764                          sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
1765         }
1766
1767         rdev = kzalloc(size, GFP_KERNEL);
1768         if (rdev == NULL)
1769                 return -ENOMEM;
1770
1771         if (mport->net == NULL) {
1772                 struct rio_net *net;
1773
1774                 net = rio_alloc_net(mport);
1775                 if (!net) {
1776                         err = -ENOMEM;
1777                         rmcd_debug(RDEV, "failed to allocate net object");
1778                         goto cleanup;
1779                 }
1780
1781                 net->id = mport->id;
1782                 net->hport = mport;
1783                 dev_set_name(&net->dev, "rnet_%d", net->id);
1784                 net->dev.parent = &mport->dev;
1785                 net->dev.release = rio_release_net;
1786                 err = rio_add_net(net);
1787                 if (err) {
1788                         rmcd_debug(RDEV, "failed to register net, err=%d", err);
1789                         kfree(net);
1790                         goto cleanup;
1791                 }
1792         }
1793
1794         rdev->net = mport->net;
1795         rdev->pef = rval;
1796         rdev->swpinfo = swpinfo;
1797         rio_mport_read_config_32(mport, destid, hopcount,
1798                                  RIO_DEV_ID_CAR, &rval);
1799         rdev->did = rval >> 16;
1800         rdev->vid = rval & 0xffff;
1801         rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR,
1802                                  &rdev->device_rev);
1803         rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR,
1804                                  &rval);
1805         rdev->asm_did = rval >> 16;
1806         rdev->asm_vid = rval & 0xffff;
1807         rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR,
1808                                  &rval);
1809         rdev->asm_rev = rval >> 16;
1810
1811         if (rdev->pef & RIO_PEF_EXT_FEATURES) {
1812                 rdev->efptr = rval & 0xffff;
1813                 rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
1814                                                 hopcount, &rdev->phys_rmap);
1815
1816                 rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
1817                                                 hopcount, RIO_EFB_ERR_MGMNT);
1818         }
1819
1820         rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR,
1821                                  &rdev->src_ops);
1822         rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR,
1823                                  &rdev->dst_ops);
1824
1825         rdev->comp_tag = dev_info.comptag;
1826         rdev->destid = destid;
1827         /* hopcount is stored as specified by a caller, regardles of EP or SW */
1828         rdev->hopcount = hopcount;
1829
1830         if (rdev->pef & RIO_PEF_SWITCH) {
1831                 rswitch = rdev->rswitch;
1832                 rswitch->route_table = NULL;
1833         }
1834
1835         if (strlen(dev_info.name))
1836                 dev_set_name(&rdev->dev, "%s", dev_info.name);
1837         else if (rdev->pef & RIO_PEF_SWITCH)
1838                 dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id,
1839                              rdev->comp_tag & RIO_CTAG_UDEVID);
1840         else
1841                 dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id,
1842                              rdev->comp_tag & RIO_CTAG_UDEVID);
1843
1844         INIT_LIST_HEAD(&rdev->net_list);
1845         rdev->dev.parent = &mport->net->dev;
1846         rio_attach_device(rdev);
1847         rdev->dev.release = rio_release_dev;
1848
1849         if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
1850                 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
1851                                    0, 0xffff);
1852         err = rio_add_device(rdev);
1853         if (err)
1854                 goto cleanup;
1855         rio_dev_get(rdev);
1856
1857         return 0;
1858 cleanup:
1859         kfree(rdev);
1860         return err;
1861 }
1862
1863 static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1864 {
1865         struct rio_rdev_info dev_info;
1866         struct rio_dev *rdev = NULL;
1867         struct device  *dev;
1868         struct rio_mport *mport;
1869         struct rio_net *net;
1870
1871         if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1872                 return -EFAULT;
1873
1874         mport = priv->md->mport;
1875
1876         /* If device name is specified, removal by name has priority */
1877         if (strlen(dev_info.name)) {
1878                 dev = bus_find_device_by_name(&rio_bus_type, NULL,
1879                                               dev_info.name);
1880                 if (dev)
1881                         rdev = to_rio_dev(dev);
1882         } else {
1883                 do {
1884                         rdev = rio_get_comptag(dev_info.comptag, rdev);
1885                         if (rdev && rdev->dev.parent == &mport->net->dev &&
1886                             rdev->destid == dev_info.destid &&
1887                             rdev->hopcount == dev_info.hopcount)
1888                                 break;
1889                 } while (rdev);
1890         }
1891
1892         if (!rdev) {
1893                 rmcd_debug(RDEV,
1894                         "device name:%s ct:0x%x did:0x%x hc:0x%x not found",
1895                         dev_info.name, dev_info.comptag, dev_info.destid,
1896                         dev_info.hopcount);
1897                 return -ENODEV;
1898         }
1899
1900         net = rdev->net;
1901         rio_dev_put(rdev);
1902         rio_del_device(rdev, RIO_DEVICE_SHUTDOWN);
1903
1904         if (list_empty(&net->devices)) {
1905                 rio_free_net(net);
1906                 mport->net = NULL;
1907         }
1908
1909         return 0;
1910 }
1911
1912 /*
1913  * Mport cdev management
1914  */
1915
1916 /*
1917  * mport_cdev_open() - Open character device (mport)
1918  */
1919 static int mport_cdev_open(struct inode *inode, struct file *filp)
1920 {
1921         int ret;
1922         int minor = iminor(inode);
1923         struct mport_dev *chdev;
1924         struct mport_cdev_priv *priv;
1925
1926         /* Test for valid device */
1927         if (minor >= RIO_MAX_MPORTS) {
1928                 rmcd_error("Invalid minor device number");
1929                 return -EINVAL;
1930         }
1931
1932         chdev = container_of(inode->i_cdev, struct mport_dev, cdev);
1933
1934         rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp);
1935
1936         if (atomic_read(&chdev->active) == 0)
1937                 return -ENODEV;
1938
1939         get_device(&chdev->dev);
1940
1941         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1942         if (!priv) {
1943                 put_device(&chdev->dev);
1944                 return -ENOMEM;
1945         }
1946
1947         priv->md = chdev;
1948
1949         mutex_lock(&chdev->file_mutex);
1950         list_add_tail(&priv->list, &chdev->file_list);
1951         mutex_unlock(&chdev->file_mutex);
1952
1953         INIT_LIST_HEAD(&priv->db_filters);
1954         INIT_LIST_HEAD(&priv->pw_filters);
1955         spin_lock_init(&priv->fifo_lock);
1956         init_waitqueue_head(&priv->event_rx_wait);
1957         ret = kfifo_alloc(&priv->event_fifo,
1958                           sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
1959                           GFP_KERNEL);
1960         if (ret < 0) {
1961                 dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
1962                 ret = -ENOMEM;
1963                 goto err_fifo;
1964         }
1965
1966 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1967         INIT_LIST_HEAD(&priv->async_list);
1968         INIT_LIST_HEAD(&priv->pend_list);
1969         spin_lock_init(&priv->req_lock);
1970         mutex_init(&priv->dma_lock);
1971 #endif
1972
1973         filp->private_data = priv;
1974         goto out;
1975 err_fifo:
1976         kfree(priv);
1977 out:
1978         return ret;
1979 }
1980
1981 static int mport_cdev_fasync(int fd, struct file *filp, int mode)
1982 {
1983         struct mport_cdev_priv *priv = filp->private_data;
1984
1985         return fasync_helper(fd, filp, mode, &priv->async_queue);
1986 }
1987
1988 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1989 static void mport_cdev_release_dma(struct file *filp)
1990 {
1991         struct mport_cdev_priv *priv = filp->private_data;
1992         struct mport_dev *md;
1993         struct mport_dma_req *req, *req_next;
1994         unsigned long tmo = msecs_to_jiffies(dma_timeout);
1995         long wret;
1996         LIST_HEAD(list);
1997
1998         rmcd_debug(EXIT, "from filp=%p %s(%d)",
1999                    filp, current->comm, task_pid_nr(current));
2000
2001         if (!priv->dmach) {
2002                 rmcd_debug(EXIT, "No DMA channel for filp=%p", filp);
2003                 return;
2004         }
2005
2006         md = priv->md;
2007
2008         flush_workqueue(dma_wq);
2009
2010         spin_lock(&priv->req_lock);
2011         if (!list_empty(&priv->async_list)) {
2012                 rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
2013                            filp, current->comm, task_pid_nr(current));
2014                 list_splice_init(&priv->async_list, &list);
2015         }
2016         spin_unlock(&priv->req_lock);
2017
2018         if (!list_empty(&list)) {
2019                 rmcd_debug(EXIT, "temp list not empty");
2020                 list_for_each_entry_safe(req, req_next, &list, node) {
2021                         rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
2022                                    req->filp, req->cookie,
2023                                    completion_done(&req->req_comp)?"yes":"no");
2024                         list_del(&req->node);
2025                         dma_req_free(req);
2026                 }
2027         }
2028
2029         if (!list_empty(&priv->pend_list)) {
2030                 rmcd_debug(EXIT, "Free pending DMA requests for filp=%p %s(%d)",
2031                            filp, current->comm, task_pid_nr(current));
2032                 list_for_each_entry_safe(req,
2033                                          req_next, &priv->pend_list, node) {
2034                         rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
2035                                    req->filp, req->cookie,
2036                                    completion_done(&req->req_comp)?"yes":"no");
2037                         list_del(&req->node);
2038                         dma_req_free(req);
2039                 }
2040         }
2041
2042         put_dma_channel(priv);
2043         wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo);
2044
2045         if (wret <= 0) {
2046                 rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
2047                         current->comm, task_pid_nr(current), wret);
2048         }
2049
2050         spin_lock(&priv->req_lock);
2051
2052         if (!list_empty(&priv->pend_list)) {
2053                 rmcd_debug(EXIT, "ATTN: pending DMA requests, filp=%p %s(%d)",
2054                            filp, current->comm, task_pid_nr(current));
2055         }
2056
2057         spin_unlock(&priv->req_lock);
2058
2059         if (priv->dmach != priv->md->dma_chan) {
2060                 rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
2061                            filp, current->comm, task_pid_nr(current));
2062                 rio_release_dma(priv->dmach);
2063         } else {
2064                 rmcd_debug(EXIT, "Adjust default DMA channel refcount");
2065                 kref_put(&md->dma_ref, mport_release_def_dma);
2066         }
2067
2068         priv->dmach = NULL;
2069 }
2070 #else
2071 #define mport_cdev_release_dma(priv) do {} while (0)
2072 #endif
2073
2074 /*
2075  * mport_cdev_release() - Release character device
2076  */
2077 static int mport_cdev_release(struct inode *inode, struct file *filp)
2078 {
2079         struct mport_cdev_priv *priv = filp->private_data;
2080         struct mport_dev *chdev;
2081         struct rio_mport_pw_filter *pw_filter, *pw_filter_next;
2082         struct rio_mport_db_filter *db_filter, *db_filter_next;
2083         struct rio_mport_mapping *map, *_map;
2084         unsigned long flags;
2085
2086         rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp);
2087
2088         chdev = priv->md;
2089         mport_cdev_release_dma(filp);
2090
2091         priv->event_mask = 0;
2092
2093         spin_lock_irqsave(&chdev->pw_lock, flags);
2094         if (!list_empty(&priv->pw_filters)) {
2095                 list_for_each_entry_safe(pw_filter, pw_filter_next,
2096                                          &priv->pw_filters, priv_node)
2097                         rio_mport_delete_pw_filter(pw_filter);
2098         }
2099         spin_unlock_irqrestore(&chdev->pw_lock, flags);
2100
2101         spin_lock_irqsave(&chdev->db_lock, flags);
2102         list_for_each_entry_safe(db_filter, db_filter_next,
2103                                  &priv->db_filters, priv_node) {
2104                 rio_mport_delete_db_filter(db_filter);
2105         }
2106         spin_unlock_irqrestore(&chdev->db_lock, flags);
2107
2108         kfifo_free(&priv->event_fifo);
2109
2110         mutex_lock(&chdev->buf_mutex);
2111         list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
2112                 if (map->filp == filp) {
2113                         rmcd_debug(EXIT, "release mapping %p filp=%p",
2114                                    map->virt_addr, filp);
2115                         kref_put(&map->ref, mport_release_mapping);
2116                 }
2117         }
2118         mutex_unlock(&chdev->buf_mutex);
2119
2120         mport_cdev_fasync(-1, filp, 0);
2121         filp->private_data = NULL;
2122         mutex_lock(&chdev->file_mutex);
2123         list_del(&priv->list);
2124         mutex_unlock(&chdev->file_mutex);
2125         put_device(&chdev->dev);
2126         kfree(priv);
2127         return 0;
2128 }
2129
2130 /*
2131  * mport_cdev_ioctl() - IOCTLs for character device
2132  */
2133 static long mport_cdev_ioctl(struct file *filp,
2134                 unsigned int cmd, unsigned long arg)
2135 {
2136         int err = -EINVAL;
2137         struct mport_cdev_priv *data = filp->private_data;
2138         struct mport_dev *md = data->md;
2139
2140         if (atomic_read(&md->active) == 0)
2141                 return -ENODEV;
2142
2143         switch (cmd) {
2144         case RIO_MPORT_MAINT_READ_LOCAL:
2145                 return rio_mport_maint_rd(data, (void __user *)arg, 1);
2146         case RIO_MPORT_MAINT_WRITE_LOCAL:
2147                 return rio_mport_maint_wr(data, (void __user *)arg, 1);
2148         case RIO_MPORT_MAINT_READ_REMOTE:
2149                 return rio_mport_maint_rd(data, (void __user *)arg, 0);
2150         case RIO_MPORT_MAINT_WRITE_REMOTE:
2151                 return rio_mport_maint_wr(data, (void __user *)arg, 0);
2152         case RIO_MPORT_MAINT_HDID_SET:
2153                 return maint_hdid_set(data, (void __user *)arg);
2154         case RIO_MPORT_MAINT_COMPTAG_SET:
2155                 return maint_comptag_set(data, (void __user *)arg);
2156         case RIO_MPORT_MAINT_PORT_IDX_GET:
2157                 return maint_port_idx_get(data, (void __user *)arg);
2158         case RIO_MPORT_GET_PROPERTIES:
2159                 md->properties.hdid = md->mport->host_deviceid;
2160                 if (copy_to_user((void __user *)arg, &(md->properties),
2161                                  sizeof(md->properties)))
2162                         return -EFAULT;
2163                 return 0;
2164         case RIO_ENABLE_DOORBELL_RANGE:
2165                 return rio_mport_add_db_filter(data, (void __user *)arg);
2166         case RIO_DISABLE_DOORBELL_RANGE:
2167                 return rio_mport_remove_db_filter(data, (void __user *)arg);
2168         case RIO_ENABLE_PORTWRITE_RANGE:
2169                 return rio_mport_add_pw_filter(data, (void __user *)arg);
2170         case RIO_DISABLE_PORTWRITE_RANGE:
2171                 return rio_mport_remove_pw_filter(data, (void __user *)arg);
2172         case RIO_SET_EVENT_MASK:
2173                 data->event_mask = (u32)arg;
2174                 return 0;
2175         case RIO_GET_EVENT_MASK:
2176                 if (copy_to_user((void __user *)arg, &data->event_mask,
2177                                     sizeof(u32)))
2178                         return -EFAULT;
2179                 return 0;
2180         case RIO_MAP_OUTBOUND:
2181                 return rio_mport_obw_map(filp, (void __user *)arg);
2182         case RIO_MAP_INBOUND:
2183                 return rio_mport_map_inbound(filp, (void __user *)arg);
2184         case RIO_UNMAP_OUTBOUND:
2185                 return rio_mport_obw_free(filp, (void __user *)arg);
2186         case RIO_UNMAP_INBOUND:
2187                 return rio_mport_inbound_free(filp, (void __user *)arg);
2188         case RIO_ALLOC_DMA:
2189                 return rio_mport_alloc_dma(filp, (void __user *)arg);
2190         case RIO_FREE_DMA:
2191                 return rio_mport_free_dma(filp, (void __user *)arg);
2192         case RIO_WAIT_FOR_ASYNC:
2193                 return rio_mport_wait_for_async_dma(filp, (void __user *)arg);
2194         case RIO_TRANSFER:
2195                 return rio_mport_transfer_ioctl(filp, (void __user *)arg);
2196         case RIO_DEV_ADD:
2197                 return rio_mport_add_riodev(data, (void __user *)arg);
2198         case RIO_DEV_DEL:
2199                 return rio_mport_del_riodev(data, (void __user *)arg);
2200         default:
2201                 break;
2202         }
2203
2204         return err;
2205 }
2206
2207 /*
2208  * mport_release_mapping - free mapping resources and info structure
2209  * @ref: a pointer to the kref within struct rio_mport_mapping
2210  *
2211  * NOTE: Shall be called while holding buf_mutex.
2212  */
2213 static void mport_release_mapping(struct kref *ref)
2214 {
2215         struct rio_mport_mapping *map =
2216                         container_of(ref, struct rio_mport_mapping, ref);
2217         struct rio_mport *mport = map->md->mport;
2218
2219         rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s",
2220                    map->dir, map->virt_addr,
2221                    &map->phys_addr, mport->name);
2222
2223         list_del(&map->node);
2224
2225         switch (map->dir) {
2226         case MAP_INBOUND:
2227                 rio_unmap_inb_region(mport, map->phys_addr);
2228         case MAP_DMA:
2229                 dma_free_coherent(mport->dev.parent, map->size,
2230                                   map->virt_addr, map->phys_addr);
2231                 break;
2232         case MAP_OUTBOUND:
2233                 rio_unmap_outb_region(mport, map->rioid, map->rio_addr);
2234                 break;
2235         }
2236         kfree(map);
2237 }
2238
2239 static void mport_mm_open(struct vm_area_struct *vma)
2240 {
2241         struct rio_mport_mapping *map = vma->vm_private_data;
2242
2243         rmcd_debug(MMAP, "%pad", &map->phys_addr);
2244         kref_get(&map->ref);
2245 }
2246
2247 static void mport_mm_close(struct vm_area_struct *vma)
2248 {
2249         struct rio_mport_mapping *map = vma->vm_private_data;
2250
2251         rmcd_debug(MMAP, "%pad", &map->phys_addr);
2252         mutex_lock(&map->md->buf_mutex);
2253         kref_put(&map->ref, mport_release_mapping);
2254         mutex_unlock(&map->md->buf_mutex);
2255 }
2256
2257 static const struct vm_operations_struct vm_ops = {
2258         .open = mport_mm_open,
2259         .close = mport_mm_close,
2260 };
2261
2262 static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
2263 {
2264         struct mport_cdev_priv *priv = filp->private_data;
2265         struct mport_dev *md;
2266         size_t size = vma->vm_end - vma->vm_start;
2267         dma_addr_t baddr;
2268         unsigned long offset;
2269         int found = 0, ret;
2270         struct rio_mport_mapping *map;
2271
2272         rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx",
2273                    (unsigned int)size, vma->vm_pgoff);
2274
2275         md = priv->md;
2276         baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT);
2277
2278         mutex_lock(&md->buf_mutex);
2279         list_for_each_entry(map, &md->mappings, node) {
2280                 if (baddr >= map->phys_addr &&
2281                     baddr < (map->phys_addr + map->size)) {
2282                         found = 1;
2283                         break;
2284                 }
2285         }
2286         mutex_unlock(&md->buf_mutex);
2287
2288         if (!found)
2289                 return -ENOMEM;
2290
2291         offset = baddr - map->phys_addr;
2292
2293         if (size + offset > map->size)
2294                 return -EINVAL;
2295
2296         vma->vm_pgoff = offset >> PAGE_SHIFT;
2297         rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff);
2298
2299         if (map->dir == MAP_INBOUND || map->dir == MAP_DMA)
2300                 ret = dma_mmap_coherent(md->mport->dev.parent, vma,
2301                                 map->virt_addr, map->phys_addr, map->size);
2302         else if (map->dir == MAP_OUTBOUND) {
2303                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2304                 ret = vm_iomap_memory(vma, map->phys_addr, map->size);
2305         } else {
2306                 rmcd_error("Attempt to mmap unsupported mapping type");
2307                 ret = -EIO;
2308         }
2309
2310         if (!ret) {
2311                 vma->vm_private_data = map;
2312                 vma->vm_ops = &vm_ops;
2313                 mport_mm_open(vma);
2314         } else {
2315                 rmcd_error("MMAP exit with err=%d", ret);
2316         }
2317
2318         return ret;
2319 }
2320
2321 static unsigned int mport_cdev_poll(struct file *filp, poll_table *wait)
2322 {
2323         struct mport_cdev_priv *priv = filp->private_data;
2324
2325         poll_wait(filp, &priv->event_rx_wait, wait);
2326         if (kfifo_len(&priv->event_fifo))
2327                 return POLLIN | POLLRDNORM;
2328
2329         return 0;
2330 }
2331
2332 static ssize_t mport_read(struct file *filp, char __user *buf, size_t count,
2333                         loff_t *ppos)
2334 {
2335         struct mport_cdev_priv *priv = filp->private_data;
2336         int copied;
2337         ssize_t ret;
2338
2339         if (!count)
2340                 return 0;
2341
2342         if (kfifo_is_empty(&priv->event_fifo) &&
2343             (filp->f_flags & O_NONBLOCK))
2344                 return -EAGAIN;
2345
2346         if (count % sizeof(struct rio_event))
2347                 return -EINVAL;
2348
2349         ret = wait_event_interruptible(priv->event_rx_wait,
2350                                         kfifo_len(&priv->event_fifo) != 0);
2351         if (ret)
2352                 return ret;
2353
2354         while (ret < count) {
2355                 if (kfifo_to_user(&priv->event_fifo, buf,
2356                       sizeof(struct rio_event), &copied))
2357                         return -EFAULT;
2358                 ret += copied;
2359                 buf += copied;
2360         }
2361
2362         return ret;
2363 }
2364
2365 static ssize_t mport_write(struct file *filp, const char __user *buf,
2366                          size_t count, loff_t *ppos)
2367 {
2368         struct mport_cdev_priv *priv = filp->private_data;
2369         struct rio_mport *mport = priv->md->mport;
2370         struct rio_event event;
2371         int len, ret;
2372
2373         if (!count)
2374                 return 0;
2375
2376         if (count % sizeof(event))
2377                 return -EINVAL;
2378
2379         len = 0;
2380         while ((count - len) >= (int)sizeof(event)) {
2381                 if (copy_from_user(&event, buf, sizeof(event)))
2382                         return -EFAULT;
2383
2384                 if (event.header != RIO_DOORBELL)
2385                         return -EINVAL;
2386
2387                 ret = rio_mport_send_doorbell(mport,
2388                                               event.u.doorbell.rioid,
2389                                               event.u.doorbell.payload);
2390                 if (ret < 0)
2391                         return ret;
2392
2393                 len += sizeof(event);
2394                 buf += sizeof(event);
2395         }
2396
2397         return len;
2398 }
2399
2400 static const struct file_operations mport_fops = {
2401         .owner          = THIS_MODULE,
2402         .open           = mport_cdev_open,
2403         .release        = mport_cdev_release,
2404         .poll           = mport_cdev_poll,
2405         .read           = mport_read,
2406         .write          = mport_write,
2407         .mmap           = mport_cdev_mmap,
2408         .fasync         = mport_cdev_fasync,
2409         .unlocked_ioctl = mport_cdev_ioctl
2410 };
2411
2412 /*
2413  * Character device management
2414  */
2415
2416 static void mport_device_release(struct device *dev)
2417 {
2418         struct mport_dev *md;
2419
2420         rmcd_debug(EXIT, "%s", dev_name(dev));
2421         md = container_of(dev, struct mport_dev, dev);
2422         kfree(md);
2423 }
2424
2425 /*
2426  * mport_cdev_add() - Create mport_dev from rio_mport
2427  * @mport:      RapidIO master port
2428  */
2429 static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2430 {
2431         int ret = 0;
2432         struct mport_dev *md;
2433         struct rio_mport_attr attr;
2434
2435         md = kzalloc(sizeof(*md), GFP_KERNEL);
2436         if (!md) {
2437                 rmcd_error("Unable allocate a device object");
2438                 return NULL;
2439         }
2440
2441         md->mport = mport;
2442         mutex_init(&md->buf_mutex);
2443         mutex_init(&md->file_mutex);
2444         INIT_LIST_HEAD(&md->file_list);
2445
2446         device_initialize(&md->dev);
2447         md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
2448         md->dev.class = dev_class;
2449         md->dev.parent = &mport->dev;
2450         md->dev.release = mport_device_release;
2451         dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
2452         atomic_set(&md->active, 1);
2453
2454         cdev_init(&md->cdev, &mport_fops);
2455         md->cdev.owner = THIS_MODULE;
2456
2457         ret = cdev_device_add(&md->cdev, &md->dev);
2458         if (ret) {
2459                 rmcd_error("Failed to register mport %d (err=%d)",
2460                        mport->id, ret);
2461                 goto err_cdev;
2462         }
2463
2464         INIT_LIST_HEAD(&md->doorbells);
2465         spin_lock_init(&md->db_lock);
2466         INIT_LIST_HEAD(&md->portwrites);
2467         spin_lock_init(&md->pw_lock);
2468         INIT_LIST_HEAD(&md->mappings);
2469
2470         md->properties.id = mport->id;
2471         md->properties.sys_size = mport->sys_size;
2472         md->properties.hdid = mport->host_deviceid;
2473         md->properties.index = mport->index;
2474
2475         /* The transfer_mode property will be returned through mport query
2476          * interface
2477          */
2478 #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
2479         md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
2480 #else
2481         md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
2482 #endif
2483         ret = rio_query_mport(mport, &attr);
2484         if (!ret) {
2485                 md->properties.flags = attr.flags;
2486                 md->properties.link_speed = attr.link_speed;
2487                 md->properties.link_width = attr.link_width;
2488                 md->properties.dma_max_sge = attr.dma_max_sge;
2489                 md->properties.dma_max_size = attr.dma_max_size;
2490                 md->properties.dma_align = attr.dma_align;
2491                 md->properties.cap_sys_size = 0;
2492                 md->properties.cap_transfer_mode = 0;
2493                 md->properties.cap_addr_size = 0;
2494         } else
2495                 pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n",
2496                         mport->name, MAJOR(dev_number), mport->id);
2497
2498         mutex_lock(&mport_devs_lock);
2499         list_add_tail(&md->node, &mport_devs);
2500         mutex_unlock(&mport_devs_lock);
2501
2502         pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n",
2503                 mport->name, MAJOR(dev_number), mport->id);
2504
2505         return md;
2506
2507 err_cdev:
2508         put_device(&md->dev);
2509         return NULL;
2510 }
2511
2512 /*
2513  * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
2514  *                              associated DMA channels.
2515  */
2516 static void mport_cdev_terminate_dma(struct mport_dev *md)
2517 {
2518 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2519         struct mport_cdev_priv *client;
2520
2521         rmcd_debug(DMA, "%s", dev_name(&md->dev));
2522
2523         mutex_lock(&md->file_mutex);
2524         list_for_each_entry(client, &md->file_list, list) {
2525                 if (client->dmach) {
2526                         dmaengine_terminate_all(client->dmach);
2527                         rio_release_dma(client->dmach);
2528                 }
2529         }
2530         mutex_unlock(&md->file_mutex);
2531
2532         if (md->dma_chan) {
2533                 dmaengine_terminate_all(md->dma_chan);
2534                 rio_release_dma(md->dma_chan);
2535                 md->dma_chan = NULL;
2536         }
2537 #endif
2538 }
2539
2540
2541 /*
2542  * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
2543  *                            mport_cdev files.
2544  */
2545 static int mport_cdev_kill_fasync(struct mport_dev *md)
2546 {
2547         unsigned int files = 0;
2548         struct mport_cdev_priv *client;
2549
2550         mutex_lock(&md->file_mutex);
2551         list_for_each_entry(client, &md->file_list, list) {
2552                 if (client->async_queue)
2553                         kill_fasync(&client->async_queue, SIGIO, POLL_HUP);
2554                 files++;
2555         }
2556         mutex_unlock(&md->file_mutex);
2557         return files;
2558 }
2559
2560 /*
2561  * mport_cdev_remove() - Remove mport character device
2562  * @dev:        Mport device to remove
2563  */
2564 static void mport_cdev_remove(struct mport_dev *md)
2565 {
2566         struct rio_mport_mapping *map, *_map;
2567
2568         rmcd_debug(EXIT, "Remove %s cdev", md->mport->name);
2569         atomic_set(&md->active, 0);
2570         mport_cdev_terminate_dma(md);
2571         rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
2572         cdev_device_del(&md->cdev, &md->dev);
2573         mport_cdev_kill_fasync(md);
2574
2575         flush_workqueue(dma_wq);
2576
2577         /* TODO: do we need to give clients some time to close file
2578          * descriptors? Simple wait for XX, or kref?
2579          */
2580
2581         /*
2582          * Release DMA buffers allocated for the mport device.
2583          * Disable associated inbound Rapidio requests mapping if applicable.
2584          */
2585         mutex_lock(&md->buf_mutex);
2586         list_for_each_entry_safe(map, _map, &md->mappings, node) {
2587                 kref_put(&map->ref, mport_release_mapping);
2588         }
2589         mutex_unlock(&md->buf_mutex);
2590
2591         if (!list_empty(&md->mappings))
2592                 rmcd_warn("WARNING: %s pending mappings on removal",
2593                           md->mport->name);
2594
2595         rio_release_inb_dbell(md->mport, 0, 0x0fff);
2596
2597         put_device(&md->dev);
2598 }
2599
2600 /*
2601  * RIO rio_mport_interface driver
2602  */
2603
2604 /*
2605  * mport_add_mport() - Add rio_mport from LDM device struct
2606  * @dev:                Linux device model struct
2607  * @class_intf: Linux class_interface
2608  */
2609 static int mport_add_mport(struct device *dev,
2610                 struct class_interface *class_intf)
2611 {
2612         struct rio_mport *mport = NULL;
2613         struct mport_dev *chdev = NULL;
2614
2615         mport = to_rio_mport(dev);
2616         if (!mport)
2617                 return -ENODEV;
2618
2619         chdev = mport_cdev_add(mport);
2620         if (!chdev)
2621                 return -ENODEV;
2622
2623         return 0;
2624 }
2625
2626 /*
2627  * mport_remove_mport() - Remove rio_mport from global list
2628  * TODO remove device from global mport_dev list
2629  */
2630 static void mport_remove_mport(struct device *dev,
2631                 struct class_interface *class_intf)
2632 {
2633         struct rio_mport *mport = NULL;
2634         struct mport_dev *chdev;
2635         int found = 0;
2636
2637         mport = to_rio_mport(dev);
2638         rmcd_debug(EXIT, "Remove %s", mport->name);
2639
2640         mutex_lock(&mport_devs_lock);
2641         list_for_each_entry(chdev, &mport_devs, node) {
2642                 if (chdev->mport->id == mport->id) {
2643                         atomic_set(&chdev->active, 0);
2644                         list_del(&chdev->node);
2645                         found = 1;
2646                         break;
2647                 }
2648         }
2649         mutex_unlock(&mport_devs_lock);
2650
2651         if (found)
2652                 mport_cdev_remove(chdev);
2653 }
2654
2655 /* the rio_mport_interface is used to handle local mport devices */
2656 static struct class_interface rio_mport_interface __refdata = {
2657         .class          = &rio_mport_class,
2658         .add_dev        = mport_add_mport,
2659         .remove_dev     = mport_remove_mport,
2660 };
2661
2662 /*
2663  * Linux kernel module
2664  */
2665
2666 /*
2667  * mport_init - Driver module loading
2668  */
2669 static int __init mport_init(void)
2670 {
2671         int ret;
2672
2673         /* Create device class needed by udev */
2674         dev_class = class_create(THIS_MODULE, DRV_NAME);
2675         if (IS_ERR(dev_class)) {
2676                 rmcd_error("Unable to create " DRV_NAME " class");
2677                 return PTR_ERR(dev_class);
2678         }
2679
2680         ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
2681         if (ret < 0)
2682                 goto err_chr;
2683
2684         rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number));
2685
2686         /* Register to rio_mport_interface */
2687         ret = class_interface_register(&rio_mport_interface);
2688         if (ret) {
2689                 rmcd_error("class_interface_register() failed, err=%d", ret);
2690                 goto err_cli;
2691         }
2692
2693         dma_wq = create_singlethread_workqueue("dma_wq");
2694         if (!dma_wq) {
2695                 rmcd_error("failed to create DMA work queue");
2696                 ret = -ENOMEM;
2697                 goto err_wq;
2698         }
2699
2700         return 0;
2701
2702 err_wq:
2703         class_interface_unregister(&rio_mport_interface);
2704 err_cli:
2705         unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2706 err_chr:
2707         class_destroy(dev_class);
2708         return ret;
2709 }
2710
2711 /**
2712  * mport_exit - Driver module unloading
2713  */
2714 static void __exit mport_exit(void)
2715 {
2716         class_interface_unregister(&rio_mport_interface);
2717         class_destroy(dev_class);
2718         unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2719         destroy_workqueue(dma_wq);
2720 }
2721
2722 module_init(mport_init);
2723 module_exit(mport_exit);