1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * RapidIO mport character device
5 * Copyright 2014-2015 Integrated Device Technology, Inc.
6 * Alexandre Bounine <alexandre.bounine@idt.com>
7 * Copyright 2014-2015 Prodrive Technologies
8 * Andre van Herk <andre.van.herk@prodrive-technologies.com>
9 * Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
10 * Copyright (C) 2014 Texas Instruments Incorporated
11 * Aurelien Jacquiot <a-jacquiot@ti.com>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/cdev.h>
16 #include <linux/ioctl.h>
17 #include <linux/uaccess.h>
18 #include <linux/list.h>
20 #include <linux/err.h>
21 #include <linux/net.h>
22 #include <linux/poll.h>
23 #include <linux/spinlock.h>
24 #include <linux/sched.h>
25 #include <linux/kfifo.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/mman.h>
32 #include <linux/dma-mapping.h>
33 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
34 #include <linux/dmaengine.h>
37 #include <linux/rio.h>
38 #include <linux/rio_ids.h>
39 #include <linux/rio_drv.h>
40 #include <linux/rio_mport_cdev.h>
44 #define DRV_NAME "rio_mport"
45 #define DRV_PREFIX DRV_NAME ": "
46 #define DEV_NAME "rio_mport"
47 #define DRV_VERSION "1.0.0"
49 /* Debug output filtering masks */
52 DBG_INIT = BIT(0), /* driver init */
53 DBG_EXIT = BIT(1), /* driver exit */
54 DBG_MPORT = BIT(2), /* mport add/remove */
55 DBG_RDEV = BIT(3), /* RapidIO device add/remove */
56 DBG_DMA = BIT(4), /* DMA transfer messages */
57 DBG_MMAP = BIT(5), /* mapping messages */
58 DBG_IBW = BIT(6), /* inbound window */
59 DBG_EVENT = BIT(7), /* event handling messages */
60 DBG_OBW = BIT(8), /* outbound window messages */
61 DBG_DBELL = BIT(9), /* doorbell messages */
66 #define rmcd_debug(level, fmt, arg...) \
68 if (DBG_##level & dbg_level) \
69 pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
72 #define rmcd_debug(level, fmt, arg...) \
73 no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
76 #define rmcd_warn(fmt, arg...) \
77 pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
79 #define rmcd_error(fmt, arg...) \
80 pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
82 MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
83 MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
84 MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
85 MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
86 MODULE_DESCRIPTION("RapidIO mport character device driver");
87 MODULE_LICENSE("GPL");
88 MODULE_VERSION(DRV_VERSION);
90 static int dma_timeout = 3000; /* DMA transfer timeout in msec */
91 module_param(dma_timeout, int, S_IRUGO);
92 MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)");
95 static u32 dbg_level = DBG_NONE;
96 module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO);
97 MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
101 * An internal DMA coherent buffer
103 struct mport_dma_buf {
113 * Internal memory mapping structure
115 enum rio_mport_map_dir {
121 struct rio_mport_mapping {
122 struct list_head node;
123 struct mport_dev *md;
124 enum rio_mport_map_dir dir;
127 dma_addr_t phys_addr; /* for mmap */
128 void *virt_addr; /* kernel address, for dma_free_coherent */
130 struct kref ref; /* refcount of vmas sharing the mapping */
134 struct rio_mport_dma_map {
141 #define MPORT_MAX_DMA_BUFS 16
142 #define MPORT_EVENT_DEPTH 10
145 * mport_dev driver-specific structure that represents mport device
146 * @active mport device status flag
147 * @node list node to maintain list of registered mports
148 * @cdev character device
149 * @dev associated device object
150 * @mport associated subsystem's master port device object
151 * @buf_mutex lock for buffer handling
152 * @file_mutex - lock for open files list
153 * @file_list - list of open files on given mport
154 * @properties properties of this mport
155 * @portwrites queue of inbound portwrites
156 * @pw_lock lock for port write queue
157 * @mappings queue for memory mappings
158 * @dma_chan DMA channels associated with this device
164 struct list_head node;
167 struct rio_mport *mport;
168 struct mutex buf_mutex;
169 struct mutex file_mutex;
170 struct list_head file_list;
171 struct rio_mport_properties properties;
172 struct list_head doorbells;
174 struct list_head portwrites;
176 struct list_head mappings;
177 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
178 struct dma_chan *dma_chan;
180 struct completion comp;
185 * mport_cdev_priv - data structure specific to individual file object
186 * associated with an open device
187 * @md master port character device object
188 * @async_queue - asynchronous notification queue
189 * @list - file objects tracking list
190 * @db_filters inbound doorbell filters for this descriptor
191 * @pw_filters portwrite filters for this descriptor
192 * @event_fifo event fifo for this descriptor
193 * @event_rx_wait wait queue for this descriptor
194 * @fifo_lock lock for event_fifo
195 * @event_mask event mask for this descriptor
196 * @dmach DMA engine channel allocated for specific file object
198 struct mport_cdev_priv {
199 struct mport_dev *md;
200 struct fasync_struct *async_queue;
201 struct list_head list;
202 struct list_head db_filters;
203 struct list_head pw_filters;
204 struct kfifo event_fifo;
205 wait_queue_head_t event_rx_wait;
206 spinlock_t fifo_lock;
207 u32 event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
208 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
209 struct dma_chan *dmach;
210 struct list_head async_list;
212 struct mutex dma_lock;
214 struct completion comp;
219 * rio_mport_pw_filter - structure to describe a portwrite filter
220 * md_node node in mport device's list
221 * priv_node node in private file object's list
222 * priv reference to private data
223 * filter actual portwrite filter
225 struct rio_mport_pw_filter {
226 struct list_head md_node;
227 struct list_head priv_node;
228 struct mport_cdev_priv *priv;
229 struct rio_pw_filter filter;
233 * rio_mport_db_filter - structure to describe a doorbell filter
234 * @data_node reference to device node
235 * @priv_node node in private data
236 * @priv reference to private data
237 * @filter actual doorbell filter
239 struct rio_mport_db_filter {
240 struct list_head data_node;
241 struct list_head priv_node;
242 struct mport_cdev_priv *priv;
243 struct rio_doorbell_filter filter;
246 static LIST_HEAD(mport_devs);
247 static DEFINE_MUTEX(mport_devs_lock);
249 #if (0) /* used by commented out portion of poll function : FIXME */
250 static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
253 static struct class *dev_class;
254 static dev_t dev_number;
256 static void mport_release_mapping(struct kref *ref);
258 static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
261 struct rio_mport *mport = priv->md->mport;
262 struct rio_mport_maint_io maint_io;
268 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
271 if ((maint_io.offset % 4) ||
272 (maint_io.length == 0) || (maint_io.length % 4) ||
273 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
276 buffer = vmalloc(maint_io.length);
279 length = maint_io.length/sizeof(u32);
280 offset = maint_io.offset;
282 for (i = 0; i < length; i++) {
284 ret = __rio_local_read_config_32(mport,
287 ret = rio_mport_read_config_32(mport, maint_io.rioid,
288 maint_io.hopcount, offset, &buffer[i]);
295 if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
296 buffer, maint_io.length)))
303 static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
306 struct rio_mport *mport = priv->md->mport;
307 struct rio_mport_maint_io maint_io;
311 int ret = -EINVAL, i;
313 if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
316 if ((maint_io.offset % 4) ||
317 (maint_io.length == 0) || (maint_io.length % 4) ||
318 (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
321 buffer = vmalloc(maint_io.length);
324 length = maint_io.length;
326 if (unlikely(copy_from_user(buffer,
327 (void __user *)(uintptr_t)maint_io.buffer, length))) {
332 offset = maint_io.offset;
333 length /= sizeof(u32);
335 for (i = 0; i < length; i++) {
337 ret = __rio_local_write_config_32(mport,
340 ret = rio_mport_write_config_32(mport, maint_io.rioid,
356 * Inbound/outbound memory mapping functions
359 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
360 u16 rioid, u64 raddr, u32 size,
363 struct rio_mport *mport = md->mport;
364 struct rio_mport_mapping *map;
367 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
369 map = kzalloc(sizeof(*map), GFP_KERNEL);
373 ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr);
377 map->dir = MAP_OUTBOUND;
379 map->rio_addr = raddr;
381 map->phys_addr = *paddr;
384 kref_init(&map->ref);
385 list_add_tail(&map->node, &md->mappings);
393 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
394 u16 rioid, u64 raddr, u32 size,
397 struct rio_mport_mapping *map;
400 mutex_lock(&md->buf_mutex);
401 list_for_each_entry(map, &md->mappings, node) {
402 if (map->dir != MAP_OUTBOUND)
404 if (rioid == map->rioid &&
405 raddr == map->rio_addr && size == map->size) {
406 *paddr = map->phys_addr;
409 } else if (rioid == map->rioid &&
410 raddr < (map->rio_addr + map->size - 1) &&
411 (raddr + size) > map->rio_addr) {
417 /* If not found, create new */
419 err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr,
421 mutex_unlock(&md->buf_mutex);
425 static int rio_mport_obw_map(struct file *filp, void __user *arg)
427 struct mport_cdev_priv *priv = filp->private_data;
428 struct mport_dev *data = priv->md;
433 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
436 rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
437 map.rioid, map.rio_addr, map.length);
439 ret = rio_mport_get_outbound_mapping(data, filp, map.rioid,
440 map.rio_addr, map.length, &paddr);
442 rmcd_error("Failed to set OBW err= %d", ret);
448 if (unlikely(copy_to_user(arg, &map, sizeof(map))))
454 * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
456 * @priv: driver private data
457 * @arg: buffer handle returned by allocation routine
459 static int rio_mport_obw_free(struct file *filp, void __user *arg)
461 struct mport_cdev_priv *priv = filp->private_data;
462 struct mport_dev *md = priv->md;
464 struct rio_mport_mapping *map, *_map;
466 if (!md->mport->ops->unmap_outb)
467 return -EPROTONOSUPPORT;
469 if (copy_from_user(&handle, arg, sizeof(handle)))
472 rmcd_debug(OBW, "h=0x%llx", handle);
474 mutex_lock(&md->buf_mutex);
475 list_for_each_entry_safe(map, _map, &md->mappings, node) {
476 if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) {
477 if (map->filp == filp) {
478 rmcd_debug(OBW, "kref_put h=0x%llx", handle);
480 kref_put(&map->ref, mport_release_mapping);
485 mutex_unlock(&md->buf_mutex);
491 * maint_hdid_set() - Set the host Device ID
492 * @priv: driver private data
495 static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
497 struct mport_dev *md = priv->md;
500 if (copy_from_user(&hdid, arg, sizeof(hdid)))
503 md->mport->host_deviceid = hdid;
504 md->properties.hdid = hdid;
505 rio_local_set_device_id(md->mport, hdid);
507 rmcd_debug(MPORT, "Set host device Id to %d", hdid);
513 * maint_comptag_set() - Set the host Component Tag
514 * @priv: driver private data
515 * @arg: Component Tag
517 static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
519 struct mport_dev *md = priv->md;
522 if (copy_from_user(&comptag, arg, sizeof(comptag)))
525 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
527 rmcd_debug(MPORT, "Set host Component Tag to %d", comptag);
532 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
534 struct mport_dma_req {
535 struct kref refcount;
536 struct list_head node;
538 struct mport_cdev_priv *priv;
539 enum rio_transfer_sync sync;
541 struct page **page_list;
542 unsigned int nr_pages;
543 struct rio_mport_mapping *map;
544 struct dma_chan *dmach;
545 enum dma_data_direction dir;
547 enum dma_status status;
548 struct completion req_comp;
551 static void mport_release_def_dma(struct kref *dma_ref)
553 struct mport_dev *md =
554 container_of(dma_ref, struct mport_dev, dma_ref);
556 rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id);
557 rio_release_dma(md->dma_chan);
561 static void mport_release_dma(struct kref *dma_ref)
563 struct mport_cdev_priv *priv =
564 container_of(dma_ref, struct mport_cdev_priv, dma_ref);
566 rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id);
567 complete(&priv->comp);
570 static void dma_req_free(struct kref *ref)
572 struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
574 struct mport_cdev_priv *priv = req->priv;
577 dma_unmap_sg(req->dmach->device->dev,
578 req->sgt.sgl, req->sgt.nents, req->dir);
579 sg_free_table(&req->sgt);
580 if (req->page_list) {
581 for (i = 0; i < req->nr_pages; i++)
582 put_page(req->page_list[i]);
583 kfree(req->page_list);
587 mutex_lock(&req->map->md->buf_mutex);
588 kref_put(&req->map->ref, mport_release_mapping);
589 mutex_unlock(&req->map->md->buf_mutex);
592 kref_put(&priv->dma_ref, mport_release_dma);
597 static void dma_xfer_callback(void *param)
599 struct mport_dma_req *req = (struct mport_dma_req *)param;
600 struct mport_cdev_priv *priv = req->priv;
602 req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
604 complete(&req->req_comp);
605 kref_put(&req->refcount, dma_req_free);
609 * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
611 * Returns pointer to DMA transaction descriptor allocated by DMA driver on
612 * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
613 * non-NULL pointer using IS_ERR macro.
615 static struct dma_async_tx_descriptor
616 *prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer,
617 struct sg_table *sgt, int nents, enum dma_transfer_direction dir,
618 enum dma_ctrl_flags flags)
620 struct rio_dma_data tx_data;
622 tx_data.sg = sgt->sgl;
623 tx_data.sg_len = nents;
624 tx_data.rio_addr_u = 0;
625 tx_data.rio_addr = transfer->rio_addr;
626 if (dir == DMA_MEM_TO_DEV) {
627 switch (transfer->method) {
628 case RIO_EXCHANGE_NWRITE:
629 tx_data.wr_type = RDW_ALL_NWRITE;
631 case RIO_EXCHANGE_NWRITE_R_ALL:
632 tx_data.wr_type = RDW_ALL_NWRITE_R;
634 case RIO_EXCHANGE_NWRITE_R:
635 tx_data.wr_type = RDW_LAST_NWRITE_R;
637 case RIO_EXCHANGE_DEFAULT:
638 tx_data.wr_type = RDW_DEFAULT;
641 return ERR_PTR(-EINVAL);
645 return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags);
648 /* Request DMA channel associated with this mport device.
649 * Try to request DMA channel for every new process that opened given
650 * mport. If a new DMA channel is not available use default channel
651 * which is the first DMA channel opened on mport device.
653 static int get_dma_channel(struct mport_cdev_priv *priv)
655 mutex_lock(&priv->dma_lock);
657 priv->dmach = rio_request_mport_dma(priv->md->mport);
659 /* Use default DMA channel if available */
660 if (priv->md->dma_chan) {
661 priv->dmach = priv->md->dma_chan;
662 kref_get(&priv->md->dma_ref);
664 rmcd_error("Failed to get DMA channel");
665 mutex_unlock(&priv->dma_lock);
668 } else if (!priv->md->dma_chan) {
669 /* Register default DMA channel if we do not have one */
670 priv->md->dma_chan = priv->dmach;
671 kref_init(&priv->md->dma_ref);
672 rmcd_debug(DMA, "Register DMA_chan %d as default",
673 priv->dmach->chan_id);
676 kref_init(&priv->dma_ref);
677 init_completion(&priv->comp);
680 kref_get(&priv->dma_ref);
681 mutex_unlock(&priv->dma_lock);
685 static void put_dma_channel(struct mport_cdev_priv *priv)
687 kref_put(&priv->dma_ref, mport_release_dma);
691 * DMA transfer functions
693 static int do_dma_request(struct mport_dma_req *req,
694 struct rio_transfer_io *xfer,
695 enum rio_transfer_sync sync, int nents)
697 struct mport_cdev_priv *priv;
698 struct sg_table *sgt;
699 struct dma_chan *chan;
700 struct dma_async_tx_descriptor *tx;
702 unsigned long tmo = msecs_to_jiffies(dma_timeout);
703 enum dma_transfer_direction dir;
711 dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
713 rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s",
714 current->comm, task_pid_nr(current),
715 dev_name(&chan->dev->device),
716 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
718 /* Initialize DMA transaction request */
719 tx = prep_dma_xfer(chan, xfer, sgt, nents, dir,
720 DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
723 rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx",
724 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
725 xfer->rio_addr, xfer->length);
728 } else if (IS_ERR(tx)) {
730 rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret,
731 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
732 xfer->rio_addr, xfer->length);
736 tx->callback = dma_xfer_callback;
737 tx->callback_param = req;
739 req->status = DMA_IN_PROGRESS;
740 kref_get(&req->refcount);
742 cookie = dmaengine_submit(tx);
743 req->cookie = cookie;
745 rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current),
746 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
748 if (dma_submit_error(cookie)) {
749 rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
750 cookie, xfer->rio_addr, xfer->length);
751 kref_put(&req->refcount, dma_req_free);
756 dma_async_issue_pending(chan);
758 if (sync == RIO_TRANSFER_ASYNC) {
759 spin_lock(&priv->req_lock);
760 list_add_tail(&req->node, &priv->async_list);
761 spin_unlock(&priv->req_lock);
763 } else if (sync == RIO_TRANSFER_FAF)
766 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
769 /* Timeout on wait occurred */
770 rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
771 current->comm, task_pid_nr(current),
772 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
774 } else if (wret == -ERESTARTSYS) {
775 /* Wait_for_completion was interrupted by a signal but DMA may
778 rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
779 current->comm, task_pid_nr(current),
780 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
784 if (req->status != DMA_COMPLETE) {
785 /* DMA transaction completion was signaled with error */
786 rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
787 current->comm, task_pid_nr(current),
788 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
789 cookie, req->status, ret);
798 * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
799 * the remote RapidIO device
800 * @filp: file pointer associated with the call
801 * @transfer_mode: DMA transfer mode
802 * @sync: synchronization mode
803 * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
804 * DMA_DEV_TO_MEM = read)
805 * @xfer: data transfer descriptor structure
808 rio_dma_transfer(struct file *filp, u32 transfer_mode,
809 enum rio_transfer_sync sync, enum dma_data_direction dir,
810 struct rio_transfer_io *xfer)
812 struct mport_cdev_priv *priv = filp->private_data;
813 unsigned long nr_pages = 0;
814 struct page **page_list = NULL;
815 struct mport_dma_req *req;
816 struct mport_dev *md = priv->md;
817 struct dma_chan *chan;
821 if (xfer->length == 0)
823 req = kzalloc(sizeof(*req), GFP_KERNEL);
827 ret = get_dma_channel(priv);
834 kref_init(&req->refcount);
835 init_completion(&req->req_comp);
843 * If parameter loc_addr != NULL, we are transferring data from/to
844 * data buffer allocated in user-space: lock in memory user-space
845 * buffer pages and build an SG table for DMA transfer request
847 * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
848 * used for DMA data transfers: build single entry SG table using
849 * offset within the internal buffer specified by handle parameter.
851 if (xfer->loc_addr) {
855 offset = lower_32_bits(offset_in_page(xfer->loc_addr));
856 nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
858 page_list = kmalloc_array(nr_pages,
859 sizeof(*page_list), GFP_KERNEL);
860 if (page_list == NULL) {
865 pinned = get_user_pages_fast(
866 (unsigned long)xfer->loc_addr & PAGE_MASK,
868 dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
871 if (pinned != nr_pages) {
873 rmcd_error("get_user_pages_unlocked err=%ld",
877 rmcd_error("pinned %ld out of %ld pages",
883 ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages,
884 offset, xfer->length, GFP_KERNEL);
886 rmcd_error("sg_alloc_table failed with err=%d", ret);
890 req->page_list = page_list;
891 req->nr_pages = nr_pages;
894 struct rio_mport_mapping *map;
896 baddr = (dma_addr_t)xfer->handle;
898 mutex_lock(&md->buf_mutex);
899 list_for_each_entry(map, &md->mappings, node) {
900 if (baddr >= map->phys_addr &&
901 baddr < (map->phys_addr + map->size)) {
907 mutex_unlock(&md->buf_mutex);
909 if (req->map == NULL) {
914 if (xfer->length + xfer->offset > map->size) {
919 ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL);
921 rmcd_error("sg_alloc_table failed for internal buf");
925 sg_set_buf(req->sgt.sgl,
926 map->virt_addr + (baddr - map->phys_addr) +
927 xfer->offset, xfer->length);
930 nents = dma_map_sg(chan->device->dev,
931 req->sgt.sgl, req->sgt.nents, dir);
933 rmcd_error("Failed to map SG list");
938 ret = do_dma_request(req, xfer, sync, nents);
941 if (sync == RIO_TRANSFER_ASYNC)
942 return ret; /* return ASYNC cookie */
944 rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
948 if (!req->page_list) {
949 for (i = 0; i < nr_pages; i++)
950 put_page(page_list[i]);
954 kref_put(&req->refcount, dma_req_free);
958 static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
960 struct mport_cdev_priv *priv = filp->private_data;
961 struct rio_transaction transaction;
962 struct rio_transfer_io *transfer;
963 enum dma_data_direction dir;
966 if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
969 if (transaction.count != 1) /* only single transfer for now */
972 if ((transaction.transfer_mode &
973 priv->md->properties.transfer_mode) == 0)
976 transfer = vmalloc(array_size(sizeof(*transfer), transaction.count));
980 if (unlikely(copy_from_user(transfer,
981 (void __user *)(uintptr_t)transaction.block,
982 transaction.count * sizeof(*transfer)))) {
987 dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ?
988 DMA_FROM_DEVICE : DMA_TO_DEVICE;
989 for (i = 0; i < transaction.count && ret == 0; i++)
990 ret = rio_dma_transfer(filp, transaction.transfer_mode,
991 transaction.sync, dir, &transfer[i]);
993 if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
995 transaction.count * sizeof(*transfer))))
1004 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1006 struct mport_cdev_priv *priv;
1007 struct rio_async_tx_wait w_param;
1008 struct mport_dma_req *req;
1009 dma_cookie_t cookie;
1015 priv = (struct mport_cdev_priv *)filp->private_data;
1017 if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param))))
1020 cookie = w_param.token;
1021 if (w_param.timeout)
1022 tmo = msecs_to_jiffies(w_param.timeout);
1023 else /* Use default DMA timeout */
1024 tmo = msecs_to_jiffies(dma_timeout);
1026 spin_lock(&priv->req_lock);
1027 list_for_each_entry(req, &priv->async_list, node) {
1028 if (req->cookie == cookie) {
1029 list_del(&req->node);
1034 spin_unlock(&priv->req_lock);
1039 wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
1042 /* Timeout on wait occurred */
1043 rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
1044 current->comm, task_pid_nr(current),
1045 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1048 } else if (wret == -ERESTARTSYS) {
1049 /* Wait_for_completion was interrupted by a signal but DMA may
1050 * be still in progress
1052 rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
1053 current->comm, task_pid_nr(current),
1054 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1059 if (req->status != DMA_COMPLETE) {
1060 /* DMA transaction completion signaled with transfer error */
1061 rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
1062 current->comm, task_pid_nr(current),
1063 (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE",
1069 if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
1070 kref_put(&req->refcount, dma_req_free);
1075 /* Return request back into async queue */
1076 spin_lock(&priv->req_lock);
1077 list_add_tail(&req->node, &priv->async_list);
1078 spin_unlock(&priv->req_lock);
1082 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
1083 u64 size, struct rio_mport_mapping **mapping)
1085 struct rio_mport_mapping *map;
1087 map = kzalloc(sizeof(*map), GFP_KERNEL);
1091 map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
1092 &map->phys_addr, GFP_KERNEL);
1093 if (map->virt_addr == NULL) {
1102 kref_init(&map->ref);
1103 mutex_lock(&md->buf_mutex);
1104 list_add_tail(&map->node, &md->mappings);
1105 mutex_unlock(&md->buf_mutex);
1111 static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1113 struct mport_cdev_priv *priv = filp->private_data;
1114 struct mport_dev *md = priv->md;
1115 struct rio_dma_mem map;
1116 struct rio_mport_mapping *mapping = NULL;
1119 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1122 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
1126 map.dma_handle = mapping->phys_addr;
1128 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1129 mutex_lock(&md->buf_mutex);
1130 kref_put(&mapping->ref, mport_release_mapping);
1131 mutex_unlock(&md->buf_mutex);
1138 static int rio_mport_free_dma(struct file *filp, void __user *arg)
1140 struct mport_cdev_priv *priv = filp->private_data;
1141 struct mport_dev *md = priv->md;
1144 struct rio_mport_mapping *map, *_map;
1146 if (copy_from_user(&handle, arg, sizeof(handle)))
1148 rmcd_debug(EXIT, "filp=%p", filp);
1150 mutex_lock(&md->buf_mutex);
1151 list_for_each_entry_safe(map, _map, &md->mappings, node) {
1152 if (map->dir == MAP_DMA && map->phys_addr == handle &&
1153 map->filp == filp) {
1154 kref_put(&map->ref, mport_release_mapping);
1159 mutex_unlock(&md->buf_mutex);
1161 if (ret == -EFAULT) {
1162 rmcd_debug(DMA, "ERR no matching mapping");
1169 static int rio_mport_transfer_ioctl(struct file *filp, void *arg)
1174 static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1179 static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1184 static int rio_mport_free_dma(struct file *filp, void __user *arg)
1188 #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1191 * Inbound/outbound memory mapping functions
1195 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
1196 u64 raddr, u64 size,
1197 struct rio_mport_mapping **mapping)
1199 struct rio_mport *mport = md->mport;
1200 struct rio_mport_mapping *map;
1203 /* rio_map_inb_region() accepts u32 size */
1204 if (size > 0xffffffff)
1207 map = kzalloc(sizeof(*map), GFP_KERNEL);
1211 map->virt_addr = dma_alloc_coherent(mport->dev.parent, size,
1212 &map->phys_addr, GFP_KERNEL);
1213 if (map->virt_addr == NULL) {
1218 if (raddr == RIO_MAP_ANY_ADDR)
1219 raddr = map->phys_addr;
1220 ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
1224 map->dir = MAP_INBOUND;
1225 map->rio_addr = raddr;
1229 kref_init(&map->ref);
1230 mutex_lock(&md->buf_mutex);
1231 list_add_tail(&map->node, &md->mappings);
1232 mutex_unlock(&md->buf_mutex);
1237 dma_free_coherent(mport->dev.parent, size,
1238 map->virt_addr, map->phys_addr);
1245 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
1246 u64 raddr, u64 size,
1247 struct rio_mport_mapping **mapping)
1249 struct rio_mport_mapping *map;
1252 if (raddr == RIO_MAP_ANY_ADDR)
1255 mutex_lock(&md->buf_mutex);
1256 list_for_each_entry(map, &md->mappings, node) {
1257 if (map->dir != MAP_INBOUND)
1259 if (raddr == map->rio_addr && size == map->size) {
1260 /* allow exact match only */
1264 } else if (raddr < (map->rio_addr + map->size - 1) &&
1265 (raddr + size) > map->rio_addr) {
1270 mutex_unlock(&md->buf_mutex);
1275 /* not found, create new */
1276 return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
1279 static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1281 struct mport_cdev_priv *priv = filp->private_data;
1282 struct mport_dev *md = priv->md;
1283 struct rio_mmap map;
1284 struct rio_mport_mapping *mapping = NULL;
1287 if (!md->mport->ops->map_inb)
1288 return -EPROTONOSUPPORT;
1289 if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1292 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1294 ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
1295 map.length, &mapping);
1299 map.handle = mapping->phys_addr;
1300 map.rio_addr = mapping->rio_addr;
1302 if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1303 /* Delete mapping if it was created by this request */
1304 if (ret == 0 && mapping->filp == filp) {
1305 mutex_lock(&md->buf_mutex);
1306 kref_put(&mapping->ref, mport_release_mapping);
1307 mutex_unlock(&md->buf_mutex);
1316 * rio_mport_inbound_free() - unmap from RapidIO address space and free
1317 * previously allocated inbound DMA coherent buffer
1318 * @priv: driver private data
1319 * @arg: buffer handle returned by allocation routine
1321 static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1323 struct mport_cdev_priv *priv = filp->private_data;
1324 struct mport_dev *md = priv->md;
1326 struct rio_mport_mapping *map, *_map;
1328 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1330 if (!md->mport->ops->unmap_inb)
1331 return -EPROTONOSUPPORT;
1333 if (copy_from_user(&handle, arg, sizeof(handle)))
1336 mutex_lock(&md->buf_mutex);
1337 list_for_each_entry_safe(map, _map, &md->mappings, node) {
1338 if (map->dir == MAP_INBOUND && map->phys_addr == handle) {
1339 if (map->filp == filp) {
1341 kref_put(&map->ref, mport_release_mapping);
1346 mutex_unlock(&md->buf_mutex);
1352 * maint_port_idx_get() - Get the port index of the mport instance
1353 * @priv: driver private data
1356 static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
1358 struct mport_dev *md = priv->md;
1359 u32 port_idx = md->mport->index;
1361 rmcd_debug(MPORT, "port_index=%d", port_idx);
1363 if (copy_to_user(arg, &port_idx, sizeof(port_idx)))
1369 static int rio_mport_add_event(struct mport_cdev_priv *priv,
1370 struct rio_event *event)
1374 if (!(priv->event_mask & event->header))
1377 spin_lock(&priv->fifo_lock);
1378 overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event)
1379 || kfifo_in(&priv->event_fifo, (unsigned char *)event,
1380 sizeof(*event)) != sizeof(*event);
1381 spin_unlock(&priv->fifo_lock);
1383 wake_up_interruptible(&priv->event_rx_wait);
1386 dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n");
1393 static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
1394 u16 src, u16 dst, u16 info)
1396 struct mport_dev *data = dev_id;
1397 struct mport_cdev_priv *priv;
1398 struct rio_mport_db_filter *db_filter;
1399 struct rio_event event;
1402 event.header = RIO_DOORBELL;
1403 event.u.doorbell.rioid = src;
1404 event.u.doorbell.payload = info;
1407 spin_lock(&data->db_lock);
1408 list_for_each_entry(db_filter, &data->doorbells, data_node) {
1409 if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
1410 db_filter->filter.rioid == src)) &&
1411 info >= db_filter->filter.low &&
1412 info <= db_filter->filter.high) {
1413 priv = db_filter->priv;
1414 rio_mport_add_event(priv, &event);
1418 spin_unlock(&data->db_lock);
1421 dev_warn(&data->dev,
1422 "%s: spurious DB received from 0x%x, info=0x%04x\n",
1423 __func__, src, info);
1426 static int rio_mport_add_db_filter(struct mport_cdev_priv *priv,
1429 struct mport_dev *md = priv->md;
1430 struct rio_mport_db_filter *db_filter;
1431 struct rio_doorbell_filter filter;
1432 unsigned long flags;
1435 if (copy_from_user(&filter, arg, sizeof(filter)))
1438 if (filter.low > filter.high)
1441 ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high,
1442 rio_mport_doorbell_handler);
1444 rmcd_error("%s failed to register IBDB, err=%d",
1445 dev_name(&md->dev), ret);
1449 db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL);
1450 if (db_filter == NULL) {
1451 rio_release_inb_dbell(md->mport, filter.low, filter.high);
1455 db_filter->filter = filter;
1456 db_filter->priv = priv;
1457 spin_lock_irqsave(&md->db_lock, flags);
1458 list_add_tail(&db_filter->priv_node, &priv->db_filters);
1459 list_add_tail(&db_filter->data_node, &md->doorbells);
1460 spin_unlock_irqrestore(&md->db_lock, flags);
1465 static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter)
1467 list_del(&db_filter->data_node);
1468 list_del(&db_filter->priv_node);
1472 static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
1475 struct rio_mport_db_filter *db_filter;
1476 struct rio_doorbell_filter filter;
1477 unsigned long flags;
1480 if (copy_from_user(&filter, arg, sizeof(filter)))
1483 if (filter.low > filter.high)
1486 spin_lock_irqsave(&priv->md->db_lock, flags);
1487 list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
1488 if (db_filter->filter.rioid == filter.rioid &&
1489 db_filter->filter.low == filter.low &&
1490 db_filter->filter.high == filter.high) {
1491 rio_mport_delete_db_filter(db_filter);
1496 spin_unlock_irqrestore(&priv->md->db_lock, flags);
1499 rio_release_inb_dbell(priv->md->mport, filter.low, filter.high);
1504 static int rio_mport_match_pw(union rio_pw_msg *msg,
1505 struct rio_pw_filter *filter)
1507 if ((msg->em.comptag & filter->mask) < filter->low ||
1508 (msg->em.comptag & filter->mask) > filter->high)
1513 static int rio_mport_pw_handler(struct rio_mport *mport, void *context,
1514 union rio_pw_msg *msg, int step)
1516 struct mport_dev *md = context;
1517 struct mport_cdev_priv *priv;
1518 struct rio_mport_pw_filter *pw_filter;
1519 struct rio_event event;
1522 event.header = RIO_PORTWRITE;
1523 memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE);
1526 spin_lock(&md->pw_lock);
1527 list_for_each_entry(pw_filter, &md->portwrites, md_node) {
1528 if (rio_mport_match_pw(msg, &pw_filter->filter)) {
1529 priv = pw_filter->priv;
1530 rio_mport_add_event(priv, &event);
1534 spin_unlock(&md->pw_lock);
1537 printk_ratelimited(KERN_WARNING DRV_NAME
1538 ": mport%d received spurious PW from 0x%08x\n",
1539 mport->id, msg->em.comptag);
1545 static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv,
1548 struct mport_dev *md = priv->md;
1549 struct rio_mport_pw_filter *pw_filter;
1550 struct rio_pw_filter filter;
1551 unsigned long flags;
1554 if (copy_from_user(&filter, arg, sizeof(filter)))
1557 pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL);
1558 if (pw_filter == NULL)
1561 pw_filter->filter = filter;
1562 pw_filter->priv = priv;
1563 spin_lock_irqsave(&md->pw_lock, flags);
1564 if (list_empty(&md->portwrites))
1566 list_add_tail(&pw_filter->priv_node, &priv->pw_filters);
1567 list_add_tail(&pw_filter->md_node, &md->portwrites);
1568 spin_unlock_irqrestore(&md->pw_lock, flags);
1573 ret = rio_add_mport_pw_handler(md->mport, md,
1574 rio_mport_pw_handler);
1577 "%s: failed to add IB_PW handler, err=%d\n",
1581 rio_pw_enable(md->mport, 1);
1587 static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter)
1589 list_del(&pw_filter->md_node);
1590 list_del(&pw_filter->priv_node);
1594 static int rio_mport_match_pw_filter(struct rio_pw_filter *a,
1595 struct rio_pw_filter *b)
1597 if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high))
1602 static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv,
1605 struct mport_dev *md = priv->md;
1606 struct rio_mport_pw_filter *pw_filter;
1607 struct rio_pw_filter filter;
1608 unsigned long flags;
1612 if (copy_from_user(&filter, arg, sizeof(filter)))
1615 spin_lock_irqsave(&md->pw_lock, flags);
1616 list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) {
1617 if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) {
1618 rio_mport_delete_pw_filter(pw_filter);
1624 if (list_empty(&md->portwrites))
1626 spin_unlock_irqrestore(&md->pw_lock, flags);
1629 rio_del_mport_pw_handler(md->mport, priv->md,
1630 rio_mport_pw_handler);
1631 rio_pw_enable(md->mport, 0);
1638 * rio_release_dev - release routine for kernel RIO device object
1639 * @dev: kernel device object associated with a RIO device structure
1641 * Frees a RIO device struct associated a RIO device struct.
1642 * The RIO device struct is freed.
1644 static void rio_release_dev(struct device *dev)
1646 struct rio_dev *rdev;
1648 rdev = to_rio_dev(dev);
1649 pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev));
1654 static void rio_release_net(struct device *dev)
1656 struct rio_net *net;
1658 net = to_rio_net(dev);
1659 rmcd_debug(RDEV, "net_%d", net->id);
1665 * rio_mport_add_riodev - creates a kernel RIO device object
1667 * Allocates a RIO device data structure and initializes required fields based
1668 * on device's configuration space contents.
1669 * If the device has switch capabilities, then a switch specific portion is
1670 * allocated and configured.
1672 static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1675 struct mport_dev *md = priv->md;
1676 struct rio_rdev_info dev_info;
1677 struct rio_dev *rdev;
1678 struct rio_switch *rswitch = NULL;
1679 struct rio_mport *mport;
1687 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1690 rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
1691 dev_info.comptag, dev_info.destid, dev_info.hopcount);
1693 if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
1694 rmcd_debug(RDEV, "device %s already exists", dev_info.name);
1698 size = sizeof(*rdev);
1700 destid = dev_info.destid;
1701 hopcount = dev_info.hopcount;
1703 if (rio_mport_read_config_32(mport, destid, hopcount,
1704 RIO_PEF_CAR, &rval))
1707 if (rval & RIO_PEF_SWITCH) {
1708 rio_mport_read_config_32(mport, destid, hopcount,
1709 RIO_SWP_INFO_CAR, &swpinfo);
1710 size += (RIO_GET_TOTAL_PORTS(swpinfo) *
1711 sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
1714 rdev = kzalloc(size, GFP_KERNEL);
1718 if (mport->net == NULL) {
1719 struct rio_net *net;
1721 net = rio_alloc_net(mport);
1724 rmcd_debug(RDEV, "failed to allocate net object");
1728 net->id = mport->id;
1730 dev_set_name(&net->dev, "rnet_%d", net->id);
1731 net->dev.parent = &mport->dev;
1732 net->dev.release = rio_release_net;
1733 err = rio_add_net(net);
1735 rmcd_debug(RDEV, "failed to register net, err=%d", err);
1741 rdev->net = mport->net;
1743 rdev->swpinfo = swpinfo;
1744 rio_mport_read_config_32(mport, destid, hopcount,
1745 RIO_DEV_ID_CAR, &rval);
1746 rdev->did = rval >> 16;
1747 rdev->vid = rval & 0xffff;
1748 rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR,
1750 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR,
1752 rdev->asm_did = rval >> 16;
1753 rdev->asm_vid = rval & 0xffff;
1754 rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR,
1756 rdev->asm_rev = rval >> 16;
1758 if (rdev->pef & RIO_PEF_EXT_FEATURES) {
1759 rdev->efptr = rval & 0xffff;
1760 rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
1761 hopcount, &rdev->phys_rmap);
1763 rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
1764 hopcount, RIO_EFB_ERR_MGMNT);
1767 rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR,
1769 rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR,
1772 rdev->comp_tag = dev_info.comptag;
1773 rdev->destid = destid;
1774 /* hopcount is stored as specified by a caller, regardles of EP or SW */
1775 rdev->hopcount = hopcount;
1777 if (rdev->pef & RIO_PEF_SWITCH) {
1778 rswitch = rdev->rswitch;
1779 rswitch->route_table = NULL;
1782 if (strlen(dev_info.name))
1783 dev_set_name(&rdev->dev, "%s", dev_info.name);
1784 else if (rdev->pef & RIO_PEF_SWITCH)
1785 dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id,
1786 rdev->comp_tag & RIO_CTAG_UDEVID);
1788 dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id,
1789 rdev->comp_tag & RIO_CTAG_UDEVID);
1791 INIT_LIST_HEAD(&rdev->net_list);
1792 rdev->dev.parent = &mport->net->dev;
1793 rio_attach_device(rdev);
1794 rdev->dev.release = rio_release_dev;
1796 if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
1797 rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
1799 err = rio_add_device(rdev);
1810 static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1812 struct rio_rdev_info dev_info;
1813 struct rio_dev *rdev = NULL;
1815 struct rio_mport *mport;
1816 struct rio_net *net;
1818 if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1821 mport = priv->md->mport;
1823 /* If device name is specified, removal by name has priority */
1824 if (strlen(dev_info.name)) {
1825 dev = bus_find_device_by_name(&rio_bus_type, NULL,
1828 rdev = to_rio_dev(dev);
1831 rdev = rio_get_comptag(dev_info.comptag, rdev);
1832 if (rdev && rdev->dev.parent == &mport->net->dev &&
1833 rdev->destid == dev_info.destid &&
1834 rdev->hopcount == dev_info.hopcount)
1841 "device name:%s ct:0x%x did:0x%x hc:0x%x not found",
1842 dev_info.name, dev_info.comptag, dev_info.destid,
1849 rio_del_device(rdev, RIO_DEVICE_SHUTDOWN);
1851 if (list_empty(&net->devices)) {
1860 * Mport cdev management
1864 * mport_cdev_open() - Open character device (mport)
1866 static int mport_cdev_open(struct inode *inode, struct file *filp)
1869 int minor = iminor(inode);
1870 struct mport_dev *chdev;
1871 struct mport_cdev_priv *priv;
1873 /* Test for valid device */
1874 if (minor >= RIO_MAX_MPORTS) {
1875 rmcd_error("Invalid minor device number");
1879 chdev = container_of(inode->i_cdev, struct mport_dev, cdev);
1881 rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp);
1883 if (atomic_read(&chdev->active) == 0)
1886 get_device(&chdev->dev);
1888 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1890 put_device(&chdev->dev);
1896 mutex_lock(&chdev->file_mutex);
1897 list_add_tail(&priv->list, &chdev->file_list);
1898 mutex_unlock(&chdev->file_mutex);
1900 INIT_LIST_HEAD(&priv->db_filters);
1901 INIT_LIST_HEAD(&priv->pw_filters);
1902 spin_lock_init(&priv->fifo_lock);
1903 init_waitqueue_head(&priv->event_rx_wait);
1904 ret = kfifo_alloc(&priv->event_fifo,
1905 sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
1908 dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
1913 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1914 INIT_LIST_HEAD(&priv->async_list);
1915 spin_lock_init(&priv->req_lock);
1916 mutex_init(&priv->dma_lock);
1919 filp->private_data = priv;
1927 static int mport_cdev_fasync(int fd, struct file *filp, int mode)
1929 struct mport_cdev_priv *priv = filp->private_data;
1931 return fasync_helper(fd, filp, mode, &priv->async_queue);
1934 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1935 static void mport_cdev_release_dma(struct file *filp)
1937 struct mport_cdev_priv *priv = filp->private_data;
1938 struct mport_dev *md;
1939 struct mport_dma_req *req, *req_next;
1940 unsigned long tmo = msecs_to_jiffies(dma_timeout);
1944 rmcd_debug(EXIT, "from filp=%p %s(%d)",
1945 filp, current->comm, task_pid_nr(current));
1948 rmcd_debug(EXIT, "No DMA channel for filp=%p", filp);
1954 spin_lock(&priv->req_lock);
1955 if (!list_empty(&priv->async_list)) {
1956 rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
1957 filp, current->comm, task_pid_nr(current));
1958 list_splice_init(&priv->async_list, &list);
1960 spin_unlock(&priv->req_lock);
1962 if (!list_empty(&list)) {
1963 rmcd_debug(EXIT, "temp list not empty");
1964 list_for_each_entry_safe(req, req_next, &list, node) {
1965 rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
1966 req->filp, req->cookie,
1967 completion_done(&req->req_comp)?"yes":"no");
1968 list_del(&req->node);
1969 kref_put(&req->refcount, dma_req_free);
1973 put_dma_channel(priv);
1974 wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo);
1977 rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
1978 current->comm, task_pid_nr(current), wret);
1981 if (priv->dmach != priv->md->dma_chan) {
1982 rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
1983 filp, current->comm, task_pid_nr(current));
1984 rio_release_dma(priv->dmach);
1986 rmcd_debug(EXIT, "Adjust default DMA channel refcount");
1987 kref_put(&md->dma_ref, mport_release_def_dma);
1993 #define mport_cdev_release_dma(priv) do {} while (0)
1997 * mport_cdev_release() - Release character device
1999 static int mport_cdev_release(struct inode *inode, struct file *filp)
2001 struct mport_cdev_priv *priv = filp->private_data;
2002 struct mport_dev *chdev;
2003 struct rio_mport_pw_filter *pw_filter, *pw_filter_next;
2004 struct rio_mport_db_filter *db_filter, *db_filter_next;
2005 struct rio_mport_mapping *map, *_map;
2006 unsigned long flags;
2008 rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp);
2011 mport_cdev_release_dma(filp);
2013 priv->event_mask = 0;
2015 spin_lock_irqsave(&chdev->pw_lock, flags);
2016 if (!list_empty(&priv->pw_filters)) {
2017 list_for_each_entry_safe(pw_filter, pw_filter_next,
2018 &priv->pw_filters, priv_node)
2019 rio_mport_delete_pw_filter(pw_filter);
2021 spin_unlock_irqrestore(&chdev->pw_lock, flags);
2023 spin_lock_irqsave(&chdev->db_lock, flags);
2024 list_for_each_entry_safe(db_filter, db_filter_next,
2025 &priv->db_filters, priv_node) {
2026 rio_mport_delete_db_filter(db_filter);
2028 spin_unlock_irqrestore(&chdev->db_lock, flags);
2030 kfifo_free(&priv->event_fifo);
2032 mutex_lock(&chdev->buf_mutex);
2033 list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
2034 if (map->filp == filp) {
2035 rmcd_debug(EXIT, "release mapping %p filp=%p",
2036 map->virt_addr, filp);
2037 kref_put(&map->ref, mport_release_mapping);
2040 mutex_unlock(&chdev->buf_mutex);
2042 mport_cdev_fasync(-1, filp, 0);
2043 filp->private_data = NULL;
2044 mutex_lock(&chdev->file_mutex);
2045 list_del(&priv->list);
2046 mutex_unlock(&chdev->file_mutex);
2047 put_device(&chdev->dev);
2053 * mport_cdev_ioctl() - IOCTLs for character device
2055 static long mport_cdev_ioctl(struct file *filp,
2056 unsigned int cmd, unsigned long arg)
2059 struct mport_cdev_priv *data = filp->private_data;
2060 struct mport_dev *md = data->md;
2062 if (atomic_read(&md->active) == 0)
2066 case RIO_MPORT_MAINT_READ_LOCAL:
2067 return rio_mport_maint_rd(data, (void __user *)arg, 1);
2068 case RIO_MPORT_MAINT_WRITE_LOCAL:
2069 return rio_mport_maint_wr(data, (void __user *)arg, 1);
2070 case RIO_MPORT_MAINT_READ_REMOTE:
2071 return rio_mport_maint_rd(data, (void __user *)arg, 0);
2072 case RIO_MPORT_MAINT_WRITE_REMOTE:
2073 return rio_mport_maint_wr(data, (void __user *)arg, 0);
2074 case RIO_MPORT_MAINT_HDID_SET:
2075 return maint_hdid_set(data, (void __user *)arg);
2076 case RIO_MPORT_MAINT_COMPTAG_SET:
2077 return maint_comptag_set(data, (void __user *)arg);
2078 case RIO_MPORT_MAINT_PORT_IDX_GET:
2079 return maint_port_idx_get(data, (void __user *)arg);
2080 case RIO_MPORT_GET_PROPERTIES:
2081 md->properties.hdid = md->mport->host_deviceid;
2082 if (copy_to_user((void __user *)arg, &(md->properties),
2083 sizeof(md->properties)))
2086 case RIO_ENABLE_DOORBELL_RANGE:
2087 return rio_mport_add_db_filter(data, (void __user *)arg);
2088 case RIO_DISABLE_DOORBELL_RANGE:
2089 return rio_mport_remove_db_filter(data, (void __user *)arg);
2090 case RIO_ENABLE_PORTWRITE_RANGE:
2091 return rio_mport_add_pw_filter(data, (void __user *)arg);
2092 case RIO_DISABLE_PORTWRITE_RANGE:
2093 return rio_mport_remove_pw_filter(data, (void __user *)arg);
2094 case RIO_SET_EVENT_MASK:
2095 data->event_mask = (u32)arg;
2097 case RIO_GET_EVENT_MASK:
2098 if (copy_to_user((void __user *)arg, &data->event_mask,
2102 case RIO_MAP_OUTBOUND:
2103 return rio_mport_obw_map(filp, (void __user *)arg);
2104 case RIO_MAP_INBOUND:
2105 return rio_mport_map_inbound(filp, (void __user *)arg);
2106 case RIO_UNMAP_OUTBOUND:
2107 return rio_mport_obw_free(filp, (void __user *)arg);
2108 case RIO_UNMAP_INBOUND:
2109 return rio_mport_inbound_free(filp, (void __user *)arg);
2111 return rio_mport_alloc_dma(filp, (void __user *)arg);
2113 return rio_mport_free_dma(filp, (void __user *)arg);
2114 case RIO_WAIT_FOR_ASYNC:
2115 return rio_mport_wait_for_async_dma(filp, (void __user *)arg);
2117 return rio_mport_transfer_ioctl(filp, (void __user *)arg);
2119 return rio_mport_add_riodev(data, (void __user *)arg);
2121 return rio_mport_del_riodev(data, (void __user *)arg);
2130 * mport_release_mapping - free mapping resources and info structure
2131 * @ref: a pointer to the kref within struct rio_mport_mapping
2133 * NOTE: Shall be called while holding buf_mutex.
2135 static void mport_release_mapping(struct kref *ref)
2137 struct rio_mport_mapping *map =
2138 container_of(ref, struct rio_mport_mapping, ref);
2139 struct rio_mport *mport = map->md->mport;
2141 rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s",
2142 map->dir, map->virt_addr,
2143 &map->phys_addr, mport->name);
2145 list_del(&map->node);
2149 rio_unmap_inb_region(mport, map->phys_addr);
2152 dma_free_coherent(mport->dev.parent, map->size,
2153 map->virt_addr, map->phys_addr);
2156 rio_unmap_outb_region(mport, map->rioid, map->rio_addr);
2162 static void mport_mm_open(struct vm_area_struct *vma)
2164 struct rio_mport_mapping *map = vma->vm_private_data;
2166 rmcd_debug(MMAP, "%pad", &map->phys_addr);
2167 kref_get(&map->ref);
2170 static void mport_mm_close(struct vm_area_struct *vma)
2172 struct rio_mport_mapping *map = vma->vm_private_data;
2174 rmcd_debug(MMAP, "%pad", &map->phys_addr);
2175 mutex_lock(&map->md->buf_mutex);
2176 kref_put(&map->ref, mport_release_mapping);
2177 mutex_unlock(&map->md->buf_mutex);
2180 static const struct vm_operations_struct vm_ops = {
2181 .open = mport_mm_open,
2182 .close = mport_mm_close,
2185 static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
2187 struct mport_cdev_priv *priv = filp->private_data;
2188 struct mport_dev *md;
2189 size_t size = vma->vm_end - vma->vm_start;
2191 unsigned long offset;
2193 struct rio_mport_mapping *map;
2195 rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx",
2196 (unsigned int)size, vma->vm_pgoff);
2199 baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT);
2201 mutex_lock(&md->buf_mutex);
2202 list_for_each_entry(map, &md->mappings, node) {
2203 if (baddr >= map->phys_addr &&
2204 baddr < (map->phys_addr + map->size)) {
2209 mutex_unlock(&md->buf_mutex);
2214 offset = baddr - map->phys_addr;
2216 if (size + offset > map->size)
2219 vma->vm_pgoff = offset >> PAGE_SHIFT;
2220 rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff);
2222 if (map->dir == MAP_INBOUND || map->dir == MAP_DMA)
2223 ret = dma_mmap_coherent(md->mport->dev.parent, vma,
2224 map->virt_addr, map->phys_addr, map->size);
2225 else if (map->dir == MAP_OUTBOUND) {
2226 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2227 ret = vm_iomap_memory(vma, map->phys_addr, map->size);
2229 rmcd_error("Attempt to mmap unsupported mapping type");
2234 vma->vm_private_data = map;
2235 vma->vm_ops = &vm_ops;
2238 rmcd_error("MMAP exit with err=%d", ret);
2244 static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait)
2246 struct mport_cdev_priv *priv = filp->private_data;
2248 poll_wait(filp, &priv->event_rx_wait, wait);
2249 if (kfifo_len(&priv->event_fifo))
2250 return EPOLLIN | EPOLLRDNORM;
2255 static ssize_t mport_read(struct file *filp, char __user *buf, size_t count,
2258 struct mport_cdev_priv *priv = filp->private_data;
2265 if (kfifo_is_empty(&priv->event_fifo) &&
2266 (filp->f_flags & O_NONBLOCK))
2269 if (count % sizeof(struct rio_event))
2272 ret = wait_event_interruptible(priv->event_rx_wait,
2273 kfifo_len(&priv->event_fifo) != 0);
2277 while (ret < count) {
2278 if (kfifo_to_user(&priv->event_fifo, buf,
2279 sizeof(struct rio_event), &copied))
2288 static ssize_t mport_write(struct file *filp, const char __user *buf,
2289 size_t count, loff_t *ppos)
2291 struct mport_cdev_priv *priv = filp->private_data;
2292 struct rio_mport *mport = priv->md->mport;
2293 struct rio_event event;
2299 if (count % sizeof(event))
2303 while ((count - len) >= (int)sizeof(event)) {
2304 if (copy_from_user(&event, buf, sizeof(event)))
2307 if (event.header != RIO_DOORBELL)
2310 ret = rio_mport_send_doorbell(mport,
2311 event.u.doorbell.rioid,
2312 event.u.doorbell.payload);
2316 len += sizeof(event);
2317 buf += sizeof(event);
2323 static const struct file_operations mport_fops = {
2324 .owner = THIS_MODULE,
2325 .open = mport_cdev_open,
2326 .release = mport_cdev_release,
2327 .poll = mport_cdev_poll,
2329 .write = mport_write,
2330 .mmap = mport_cdev_mmap,
2331 .fasync = mport_cdev_fasync,
2332 .unlocked_ioctl = mport_cdev_ioctl
2336 * Character device management
2339 static void mport_device_release(struct device *dev)
2341 struct mport_dev *md;
2343 rmcd_debug(EXIT, "%s", dev_name(dev));
2344 md = container_of(dev, struct mport_dev, dev);
2349 * mport_cdev_add() - Create mport_dev from rio_mport
2350 * @mport: RapidIO master port
2352 static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2355 struct mport_dev *md;
2356 struct rio_mport_attr attr;
2358 md = kzalloc(sizeof(*md), GFP_KERNEL);
2360 rmcd_error("Unable allocate a device object");
2365 mutex_init(&md->buf_mutex);
2366 mutex_init(&md->file_mutex);
2367 INIT_LIST_HEAD(&md->file_list);
2369 device_initialize(&md->dev);
2370 md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
2371 md->dev.class = dev_class;
2372 md->dev.parent = &mport->dev;
2373 md->dev.release = mport_device_release;
2374 dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
2375 atomic_set(&md->active, 1);
2377 cdev_init(&md->cdev, &mport_fops);
2378 md->cdev.owner = THIS_MODULE;
2380 ret = cdev_device_add(&md->cdev, &md->dev);
2382 rmcd_error("Failed to register mport %d (err=%d)",
2387 INIT_LIST_HEAD(&md->doorbells);
2388 spin_lock_init(&md->db_lock);
2389 INIT_LIST_HEAD(&md->portwrites);
2390 spin_lock_init(&md->pw_lock);
2391 INIT_LIST_HEAD(&md->mappings);
2393 md->properties.id = mport->id;
2394 md->properties.sys_size = mport->sys_size;
2395 md->properties.hdid = mport->host_deviceid;
2396 md->properties.index = mport->index;
2398 /* The transfer_mode property will be returned through mport query
2401 #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
2402 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
2404 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
2406 ret = rio_query_mport(mport, &attr);
2408 md->properties.flags = attr.flags;
2409 md->properties.link_speed = attr.link_speed;
2410 md->properties.link_width = attr.link_width;
2411 md->properties.dma_max_sge = attr.dma_max_sge;
2412 md->properties.dma_max_size = attr.dma_max_size;
2413 md->properties.dma_align = attr.dma_align;
2414 md->properties.cap_sys_size = 0;
2415 md->properties.cap_transfer_mode = 0;
2416 md->properties.cap_addr_size = 0;
2418 pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n",
2419 mport->name, MAJOR(dev_number), mport->id);
2421 mutex_lock(&mport_devs_lock);
2422 list_add_tail(&md->node, &mport_devs);
2423 mutex_unlock(&mport_devs_lock);
2425 pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n",
2426 mport->name, MAJOR(dev_number), mport->id);
2431 put_device(&md->dev);
2436 * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
2437 * associated DMA channels.
2439 static void mport_cdev_terminate_dma(struct mport_dev *md)
2441 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2442 struct mport_cdev_priv *client;
2444 rmcd_debug(DMA, "%s", dev_name(&md->dev));
2446 mutex_lock(&md->file_mutex);
2447 list_for_each_entry(client, &md->file_list, list) {
2448 if (client->dmach) {
2449 dmaengine_terminate_all(client->dmach);
2450 rio_release_dma(client->dmach);
2453 mutex_unlock(&md->file_mutex);
2456 dmaengine_terminate_all(md->dma_chan);
2457 rio_release_dma(md->dma_chan);
2458 md->dma_chan = NULL;
2465 * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
2468 static int mport_cdev_kill_fasync(struct mport_dev *md)
2470 unsigned int files = 0;
2471 struct mport_cdev_priv *client;
2473 mutex_lock(&md->file_mutex);
2474 list_for_each_entry(client, &md->file_list, list) {
2475 if (client->async_queue)
2476 kill_fasync(&client->async_queue, SIGIO, POLL_HUP);
2479 mutex_unlock(&md->file_mutex);
2484 * mport_cdev_remove() - Remove mport character device
2485 * @dev: Mport device to remove
2487 static void mport_cdev_remove(struct mport_dev *md)
2489 struct rio_mport_mapping *map, *_map;
2491 rmcd_debug(EXIT, "Remove %s cdev", md->mport->name);
2492 atomic_set(&md->active, 0);
2493 mport_cdev_terminate_dma(md);
2494 rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
2495 cdev_device_del(&md->cdev, &md->dev);
2496 mport_cdev_kill_fasync(md);
2498 /* TODO: do we need to give clients some time to close file
2499 * descriptors? Simple wait for XX, or kref?
2503 * Release DMA buffers allocated for the mport device.
2504 * Disable associated inbound Rapidio requests mapping if applicable.
2506 mutex_lock(&md->buf_mutex);
2507 list_for_each_entry_safe(map, _map, &md->mappings, node) {
2508 kref_put(&map->ref, mport_release_mapping);
2510 mutex_unlock(&md->buf_mutex);
2512 if (!list_empty(&md->mappings))
2513 rmcd_warn("WARNING: %s pending mappings on removal",
2516 rio_release_inb_dbell(md->mport, 0, 0x0fff);
2518 put_device(&md->dev);
2522 * RIO rio_mport_interface driver
2526 * mport_add_mport() - Add rio_mport from LDM device struct
2527 * @dev: Linux device model struct
2528 * @class_intf: Linux class_interface
2530 static int mport_add_mport(struct device *dev,
2531 struct class_interface *class_intf)
2533 struct rio_mport *mport = NULL;
2534 struct mport_dev *chdev = NULL;
2536 mport = to_rio_mport(dev);
2540 chdev = mport_cdev_add(mport);
2548 * mport_remove_mport() - Remove rio_mport from global list
2549 * TODO remove device from global mport_dev list
2551 static void mport_remove_mport(struct device *dev,
2552 struct class_interface *class_intf)
2554 struct rio_mport *mport = NULL;
2555 struct mport_dev *chdev;
2558 mport = to_rio_mport(dev);
2559 rmcd_debug(EXIT, "Remove %s", mport->name);
2561 mutex_lock(&mport_devs_lock);
2562 list_for_each_entry(chdev, &mport_devs, node) {
2563 if (chdev->mport->id == mport->id) {
2564 atomic_set(&chdev->active, 0);
2565 list_del(&chdev->node);
2570 mutex_unlock(&mport_devs_lock);
2573 mport_cdev_remove(chdev);
2576 /* the rio_mport_interface is used to handle local mport devices */
2577 static struct class_interface rio_mport_interface __refdata = {
2578 .class = &rio_mport_class,
2579 .add_dev = mport_add_mport,
2580 .remove_dev = mport_remove_mport,
2584 * Linux kernel module
2588 * mport_init - Driver module loading
2590 static int __init mport_init(void)
2594 /* Create device class needed by udev */
2595 dev_class = class_create(THIS_MODULE, DRV_NAME);
2596 if (IS_ERR(dev_class)) {
2597 rmcd_error("Unable to create " DRV_NAME " class");
2598 return PTR_ERR(dev_class);
2601 ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
2605 rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number));
2607 /* Register to rio_mport_interface */
2608 ret = class_interface_register(&rio_mport_interface);
2610 rmcd_error("class_interface_register() failed, err=%d", ret);
2617 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2619 class_destroy(dev_class);
2624 * mport_exit - Driver module unloading
2626 static void __exit mport_exit(void)
2628 class_interface_unregister(&rio_mport_interface);
2629 class_destroy(dev_class);
2630 unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2633 module_init(mport_init);
2634 module_exit(mport_exit);