Merge remote-tracking branches 'regulator/topic/qcom-spmi', 'regulator/topic/rn5t618...
[sfrench/cifs-2.6.git] / drivers / dma-buf / dma-buf.c
1 /*
2  * Framework for buffer objects that can be shared across devices/subsystems.
3  *
4  * Copyright(C) 2011 Linaro Limited. All rights reserved.
5  * Author: Sumit Semwal <sumit.semwal@ti.com>
6  *
7  * Many thanks to linaro-mm-sig list, and specially
8  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
9  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
10  * refining of this idea.
11  *
12  * This program is free software; you can redistribute it and/or modify it
13  * under the terms of the GNU General Public License version 2 as published by
14  * the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
19  * more details.
20  *
21  * You should have received a copy of the GNU General Public License along with
22  * this program.  If not, see <http://www.gnu.org/licenses/>.
23  */
24
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/dma-buf.h>
28 #include <linux/fence.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/export.h>
31 #include <linux/debugfs.h>
32 #include <linux/module.h>
33 #include <linux/seq_file.h>
34 #include <linux/poll.h>
35 #include <linux/reservation.h>
36 #include <linux/mm.h>
37
38 #include <uapi/linux/dma-buf.h>
39
40 static inline int is_dma_buf_file(struct file *);
41
42 struct dma_buf_list {
43         struct list_head head;
44         struct mutex lock;
45 };
46
47 static struct dma_buf_list db_list;
48
49 static int dma_buf_release(struct inode *inode, struct file *file)
50 {
51         struct dma_buf *dmabuf;
52
53         if (!is_dma_buf_file(file))
54                 return -EINVAL;
55
56         dmabuf = file->private_data;
57
58         BUG_ON(dmabuf->vmapping_counter);
59
60         /*
61          * Any fences that a dma-buf poll can wait on should be signaled
62          * before releasing dma-buf. This is the responsibility of each
63          * driver that uses the reservation objects.
64          *
65          * If you hit this BUG() it means someone dropped their ref to the
66          * dma-buf while still having pending operation to the buffer.
67          */
68         BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
69
70         dmabuf->ops->release(dmabuf);
71
72         mutex_lock(&db_list.lock);
73         list_del(&dmabuf->list_node);
74         mutex_unlock(&db_list.lock);
75
76         if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
77                 reservation_object_fini(dmabuf->resv);
78
79         module_put(dmabuf->owner);
80         kfree(dmabuf);
81         return 0;
82 }
83
84 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
85 {
86         struct dma_buf *dmabuf;
87
88         if (!is_dma_buf_file(file))
89                 return -EINVAL;
90
91         dmabuf = file->private_data;
92
93         /* check for overflowing the buffer's size */
94         if (vma->vm_pgoff + vma_pages(vma) >
95             dmabuf->size >> PAGE_SHIFT)
96                 return -EINVAL;
97
98         return dmabuf->ops->mmap(dmabuf, vma);
99 }
100
101 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
102 {
103         struct dma_buf *dmabuf;
104         loff_t base;
105
106         if (!is_dma_buf_file(file))
107                 return -EBADF;
108
109         dmabuf = file->private_data;
110
111         /* only support discovering the end of the buffer,
112            but also allow SEEK_SET to maintain the idiomatic
113            SEEK_END(0), SEEK_CUR(0) pattern */
114         if (whence == SEEK_END)
115                 base = dmabuf->size;
116         else if (whence == SEEK_SET)
117                 base = 0;
118         else
119                 return -EINVAL;
120
121         if (offset != 0)
122                 return -EINVAL;
123
124         return base + offset;
125 }
126
127 static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
128 {
129         struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
130         unsigned long flags;
131
132         spin_lock_irqsave(&dcb->poll->lock, flags);
133         wake_up_locked_poll(dcb->poll, dcb->active);
134         dcb->active = 0;
135         spin_unlock_irqrestore(&dcb->poll->lock, flags);
136 }
137
138 static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
139 {
140         struct dma_buf *dmabuf;
141         struct reservation_object *resv;
142         struct reservation_object_list *fobj;
143         struct fence *fence_excl;
144         unsigned long events;
145         unsigned shared_count, seq;
146
147         dmabuf = file->private_data;
148         if (!dmabuf || !dmabuf->resv)
149                 return POLLERR;
150
151         resv = dmabuf->resv;
152
153         poll_wait(file, &dmabuf->poll, poll);
154
155         events = poll_requested_events(poll) & (POLLIN | POLLOUT);
156         if (!events)
157                 return 0;
158
159 retry:
160         seq = read_seqcount_begin(&resv->seq);
161         rcu_read_lock();
162
163         fobj = rcu_dereference(resv->fence);
164         if (fobj)
165                 shared_count = fobj->shared_count;
166         else
167                 shared_count = 0;
168         fence_excl = rcu_dereference(resv->fence_excl);
169         if (read_seqcount_retry(&resv->seq, seq)) {
170                 rcu_read_unlock();
171                 goto retry;
172         }
173
174         if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
175                 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
176                 unsigned long pevents = POLLIN;
177
178                 if (shared_count == 0)
179                         pevents |= POLLOUT;
180
181                 spin_lock_irq(&dmabuf->poll.lock);
182                 if (dcb->active) {
183                         dcb->active |= pevents;
184                         events &= ~pevents;
185                 } else
186                         dcb->active = pevents;
187                 spin_unlock_irq(&dmabuf->poll.lock);
188
189                 if (events & pevents) {
190                         if (!fence_get_rcu(fence_excl)) {
191                                 /* force a recheck */
192                                 events &= ~pevents;
193                                 dma_buf_poll_cb(NULL, &dcb->cb);
194                         } else if (!fence_add_callback(fence_excl, &dcb->cb,
195                                                        dma_buf_poll_cb)) {
196                                 events &= ~pevents;
197                                 fence_put(fence_excl);
198                         } else {
199                                 /*
200                                  * No callback queued, wake up any additional
201                                  * waiters.
202                                  */
203                                 fence_put(fence_excl);
204                                 dma_buf_poll_cb(NULL, &dcb->cb);
205                         }
206                 }
207         }
208
209         if ((events & POLLOUT) && shared_count > 0) {
210                 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
211                 int i;
212
213                 /* Only queue a new callback if no event has fired yet */
214                 spin_lock_irq(&dmabuf->poll.lock);
215                 if (dcb->active)
216                         events &= ~POLLOUT;
217                 else
218                         dcb->active = POLLOUT;
219                 spin_unlock_irq(&dmabuf->poll.lock);
220
221                 if (!(events & POLLOUT))
222                         goto out;
223
224                 for (i = 0; i < shared_count; ++i) {
225                         struct fence *fence = rcu_dereference(fobj->shared[i]);
226
227                         if (!fence_get_rcu(fence)) {
228                                 /*
229                                  * fence refcount dropped to zero, this means
230                                  * that fobj has been freed
231                                  *
232                                  * call dma_buf_poll_cb and force a recheck!
233                                  */
234                                 events &= ~POLLOUT;
235                                 dma_buf_poll_cb(NULL, &dcb->cb);
236                                 break;
237                         }
238                         if (!fence_add_callback(fence, &dcb->cb,
239                                                 dma_buf_poll_cb)) {
240                                 fence_put(fence);
241                                 events &= ~POLLOUT;
242                                 break;
243                         }
244                         fence_put(fence);
245                 }
246
247                 /* No callback queued, wake up any additional waiters. */
248                 if (i == shared_count)
249                         dma_buf_poll_cb(NULL, &dcb->cb);
250         }
251
252 out:
253         rcu_read_unlock();
254         return events;
255 }
256
257 static long dma_buf_ioctl(struct file *file,
258                           unsigned int cmd, unsigned long arg)
259 {
260         struct dma_buf *dmabuf;
261         struct dma_buf_sync sync;
262         enum dma_data_direction direction;
263         int ret;
264
265         dmabuf = file->private_data;
266
267         switch (cmd) {
268         case DMA_BUF_IOCTL_SYNC:
269                 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
270                         return -EFAULT;
271
272                 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
273                         return -EINVAL;
274
275                 switch (sync.flags & DMA_BUF_SYNC_RW) {
276                 case DMA_BUF_SYNC_READ:
277                         direction = DMA_FROM_DEVICE;
278                         break;
279                 case DMA_BUF_SYNC_WRITE:
280                         direction = DMA_TO_DEVICE;
281                         break;
282                 case DMA_BUF_SYNC_RW:
283                         direction = DMA_BIDIRECTIONAL;
284                         break;
285                 default:
286                         return -EINVAL;
287                 }
288
289                 if (sync.flags & DMA_BUF_SYNC_END)
290                         ret = dma_buf_end_cpu_access(dmabuf, direction);
291                 else
292                         ret = dma_buf_begin_cpu_access(dmabuf, direction);
293
294                 return ret;
295         default:
296                 return -ENOTTY;
297         }
298 }
299
300 static const struct file_operations dma_buf_fops = {
301         .release        = dma_buf_release,
302         .mmap           = dma_buf_mmap_internal,
303         .llseek         = dma_buf_llseek,
304         .poll           = dma_buf_poll,
305         .unlocked_ioctl = dma_buf_ioctl,
306 };
307
308 /*
309  * is_dma_buf_file - Check if struct file* is associated with dma_buf
310  */
311 static inline int is_dma_buf_file(struct file *file)
312 {
313         return file->f_op == &dma_buf_fops;
314 }
315
316 /**
317  * dma_buf_export - Creates a new dma_buf, and associates an anon file
318  * with this buffer, so it can be exported.
319  * Also connect the allocator specific data and ops to the buffer.
320  * Additionally, provide a name string for exporter; useful in debugging.
321  *
322  * @exp_info:   [in]    holds all the export related information provided
323  *                      by the exporter. see struct dma_buf_export_info
324  *                      for further details.
325  *
326  * Returns, on success, a newly created dma_buf object, which wraps the
327  * supplied private data and operations for dma_buf_ops. On either missing
328  * ops, or error in allocating struct dma_buf, will return negative error.
329  *
330  */
331 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
332 {
333         struct dma_buf *dmabuf;
334         struct reservation_object *resv = exp_info->resv;
335         struct file *file;
336         size_t alloc_size = sizeof(struct dma_buf);
337
338         if (!exp_info->resv)
339                 alloc_size += sizeof(struct reservation_object);
340         else
341                 /* prevent &dma_buf[1] == dma_buf->resv */
342                 alloc_size += 1;
343
344         if (WARN_ON(!exp_info->priv
345                           || !exp_info->ops
346                           || !exp_info->ops->map_dma_buf
347                           || !exp_info->ops->unmap_dma_buf
348                           || !exp_info->ops->release
349                           || !exp_info->ops->kmap_atomic
350                           || !exp_info->ops->kmap
351                           || !exp_info->ops->mmap)) {
352                 return ERR_PTR(-EINVAL);
353         }
354
355         if (!try_module_get(exp_info->owner))
356                 return ERR_PTR(-ENOENT);
357
358         dmabuf = kzalloc(alloc_size, GFP_KERNEL);
359         if (!dmabuf) {
360                 module_put(exp_info->owner);
361                 return ERR_PTR(-ENOMEM);
362         }
363
364         dmabuf->priv = exp_info->priv;
365         dmabuf->ops = exp_info->ops;
366         dmabuf->size = exp_info->size;
367         dmabuf->exp_name = exp_info->exp_name;
368         dmabuf->owner = exp_info->owner;
369         init_waitqueue_head(&dmabuf->poll);
370         dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
371         dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
372
373         if (!resv) {
374                 resv = (struct reservation_object *)&dmabuf[1];
375                 reservation_object_init(resv);
376         }
377         dmabuf->resv = resv;
378
379         file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
380                                         exp_info->flags);
381         if (IS_ERR(file)) {
382                 kfree(dmabuf);
383                 return ERR_CAST(file);
384         }
385
386         file->f_mode |= FMODE_LSEEK;
387         dmabuf->file = file;
388
389         mutex_init(&dmabuf->lock);
390         INIT_LIST_HEAD(&dmabuf->attachments);
391
392         mutex_lock(&db_list.lock);
393         list_add(&dmabuf->list_node, &db_list.head);
394         mutex_unlock(&db_list.lock);
395
396         return dmabuf;
397 }
398 EXPORT_SYMBOL_GPL(dma_buf_export);
399
400 /**
401  * dma_buf_fd - returns a file descriptor for the given dma_buf
402  * @dmabuf:     [in]    pointer to dma_buf for which fd is required.
403  * @flags:      [in]    flags to give to fd
404  *
405  * On success, returns an associated 'fd'. Else, returns error.
406  */
407 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
408 {
409         int fd;
410
411         if (!dmabuf || !dmabuf->file)
412                 return -EINVAL;
413
414         fd = get_unused_fd_flags(flags);
415         if (fd < 0)
416                 return fd;
417
418         fd_install(fd, dmabuf->file);
419
420         return fd;
421 }
422 EXPORT_SYMBOL_GPL(dma_buf_fd);
423
424 /**
425  * dma_buf_get - returns the dma_buf structure related to an fd
426  * @fd: [in]    fd associated with the dma_buf to be returned
427  *
428  * On success, returns the dma_buf structure associated with an fd; uses
429  * file's refcounting done by fget to increase refcount. returns ERR_PTR
430  * otherwise.
431  */
432 struct dma_buf *dma_buf_get(int fd)
433 {
434         struct file *file;
435
436         file = fget(fd);
437
438         if (!file)
439                 return ERR_PTR(-EBADF);
440
441         if (!is_dma_buf_file(file)) {
442                 fput(file);
443                 return ERR_PTR(-EINVAL);
444         }
445
446         return file->private_data;
447 }
448 EXPORT_SYMBOL_GPL(dma_buf_get);
449
450 /**
451  * dma_buf_put - decreases refcount of the buffer
452  * @dmabuf:     [in]    buffer to reduce refcount of
453  *
454  * Uses file's refcounting done implicitly by fput()
455  */
456 void dma_buf_put(struct dma_buf *dmabuf)
457 {
458         if (WARN_ON(!dmabuf || !dmabuf->file))
459                 return;
460
461         fput(dmabuf->file);
462 }
463 EXPORT_SYMBOL_GPL(dma_buf_put);
464
465 /**
466  * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
467  * calls attach() of dma_buf_ops to allow device-specific attach functionality
468  * @dmabuf:     [in]    buffer to attach device to.
469  * @dev:        [in]    device to be attached.
470  *
471  * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on
472  * error.
473  */
474 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
475                                           struct device *dev)
476 {
477         struct dma_buf_attachment *attach;
478         int ret;
479
480         if (WARN_ON(!dmabuf || !dev))
481                 return ERR_PTR(-EINVAL);
482
483         attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
484         if (attach == NULL)
485                 return ERR_PTR(-ENOMEM);
486
487         attach->dev = dev;
488         attach->dmabuf = dmabuf;
489
490         mutex_lock(&dmabuf->lock);
491
492         if (dmabuf->ops->attach) {
493                 ret = dmabuf->ops->attach(dmabuf, dev, attach);
494                 if (ret)
495                         goto err_attach;
496         }
497         list_add(&attach->node, &dmabuf->attachments);
498
499         mutex_unlock(&dmabuf->lock);
500         return attach;
501
502 err_attach:
503         kfree(attach);
504         mutex_unlock(&dmabuf->lock);
505         return ERR_PTR(ret);
506 }
507 EXPORT_SYMBOL_GPL(dma_buf_attach);
508
509 /**
510  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
511  * optionally calls detach() of dma_buf_ops for device-specific detach
512  * @dmabuf:     [in]    buffer to detach from.
513  * @attach:     [in]    attachment to be detached; is free'd after this call.
514  *
515  */
516 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
517 {
518         if (WARN_ON(!dmabuf || !attach))
519                 return;
520
521         mutex_lock(&dmabuf->lock);
522         list_del(&attach->node);
523         if (dmabuf->ops->detach)
524                 dmabuf->ops->detach(dmabuf, attach);
525
526         mutex_unlock(&dmabuf->lock);
527         kfree(attach);
528 }
529 EXPORT_SYMBOL_GPL(dma_buf_detach);
530
531 /**
532  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
533  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
534  * dma_buf_ops.
535  * @attach:     [in]    attachment whose scatterlist is to be returned
536  * @direction:  [in]    direction of DMA transfer
537  *
538  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
539  * on error.
540  */
541 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
542                                         enum dma_data_direction direction)
543 {
544         struct sg_table *sg_table = ERR_PTR(-EINVAL);
545
546         might_sleep();
547
548         if (WARN_ON(!attach || !attach->dmabuf))
549                 return ERR_PTR(-EINVAL);
550
551         sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
552         if (!sg_table)
553                 sg_table = ERR_PTR(-ENOMEM);
554
555         return sg_table;
556 }
557 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
558
559 /**
560  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
561  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
562  * dma_buf_ops.
563  * @attach:     [in]    attachment to unmap buffer from
564  * @sg_table:   [in]    scatterlist info of the buffer to unmap
565  * @direction:  [in]    direction of DMA transfer
566  *
567  */
568 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
569                                 struct sg_table *sg_table,
570                                 enum dma_data_direction direction)
571 {
572         might_sleep();
573
574         if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
575                 return;
576
577         attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
578                                                 direction);
579 }
580 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
581
582
583 /**
584  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
585  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
586  * preparations. Coherency is only guaranteed in the specified range for the
587  * specified access direction.
588  * @dmabuf:     [in]    buffer to prepare cpu access for.
589  * @direction:  [in]    length of range for cpu access.
590  *
591  * Can return negative error values, returns 0 on success.
592  */
593 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
594                              enum dma_data_direction direction)
595 {
596         int ret = 0;
597
598         if (WARN_ON(!dmabuf))
599                 return -EINVAL;
600
601         if (dmabuf->ops->begin_cpu_access)
602                 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
603
604         return ret;
605 }
606 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
607
608 /**
609  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
610  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
611  * actions. Coherency is only guaranteed in the specified range for the
612  * specified access direction.
613  * @dmabuf:     [in]    buffer to complete cpu access for.
614  * @direction:  [in]    length of range for cpu access.
615  *
616  * Can return negative error values, returns 0 on success.
617  */
618 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
619                            enum dma_data_direction direction)
620 {
621         int ret = 0;
622
623         WARN_ON(!dmabuf);
624
625         if (dmabuf->ops->end_cpu_access)
626                 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
627
628         return ret;
629 }
630 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
631
632 /**
633  * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
634  * space. The same restrictions as for kmap_atomic and friends apply.
635  * @dmabuf:     [in]    buffer to map page from.
636  * @page_num:   [in]    page in PAGE_SIZE units to map.
637  *
638  * This call must always succeed, any necessary preparations that might fail
639  * need to be done in begin_cpu_access.
640  */
641 void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
642 {
643         WARN_ON(!dmabuf);
644
645         return dmabuf->ops->kmap_atomic(dmabuf, page_num);
646 }
647 EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
648
649 /**
650  * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
651  * @dmabuf:     [in]    buffer to unmap page from.
652  * @page_num:   [in]    page in PAGE_SIZE units to unmap.
653  * @vaddr:      [in]    kernel space pointer obtained from dma_buf_kmap_atomic.
654  *
655  * This call must always succeed.
656  */
657 void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
658                            void *vaddr)
659 {
660         WARN_ON(!dmabuf);
661
662         if (dmabuf->ops->kunmap_atomic)
663                 dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
664 }
665 EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
666
667 /**
668  * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
669  * same restrictions as for kmap and friends apply.
670  * @dmabuf:     [in]    buffer to map page from.
671  * @page_num:   [in]    page in PAGE_SIZE units to map.
672  *
673  * This call must always succeed, any necessary preparations that might fail
674  * need to be done in begin_cpu_access.
675  */
676 void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
677 {
678         WARN_ON(!dmabuf);
679
680         return dmabuf->ops->kmap(dmabuf, page_num);
681 }
682 EXPORT_SYMBOL_GPL(dma_buf_kmap);
683
684 /**
685  * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
686  * @dmabuf:     [in]    buffer to unmap page from.
687  * @page_num:   [in]    page in PAGE_SIZE units to unmap.
688  * @vaddr:      [in]    kernel space pointer obtained from dma_buf_kmap.
689  *
690  * This call must always succeed.
691  */
692 void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
693                     void *vaddr)
694 {
695         WARN_ON(!dmabuf);
696
697         if (dmabuf->ops->kunmap)
698                 dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
699 }
700 EXPORT_SYMBOL_GPL(dma_buf_kunmap);
701
702
703 /**
704  * dma_buf_mmap - Setup up a userspace mmap with the given vma
705  * @dmabuf:     [in]    buffer that should back the vma
706  * @vma:        [in]    vma for the mmap
707  * @pgoff:      [in]    offset in pages where this mmap should start within the
708  *                      dma-buf buffer.
709  *
710  * This function adjusts the passed in vma so that it points at the file of the
711  * dma_buf operation. It also adjusts the starting pgoff and does bounds
712  * checking on the size of the vma. Then it calls the exporters mmap function to
713  * set up the mapping.
714  *
715  * Can return negative error values, returns 0 on success.
716  */
717 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
718                  unsigned long pgoff)
719 {
720         struct file *oldfile;
721         int ret;
722
723         if (WARN_ON(!dmabuf || !vma))
724                 return -EINVAL;
725
726         /* check for offset overflow */
727         if (pgoff + vma_pages(vma) < pgoff)
728                 return -EOVERFLOW;
729
730         /* check for overflowing the buffer's size */
731         if (pgoff + vma_pages(vma) >
732             dmabuf->size >> PAGE_SHIFT)
733                 return -EINVAL;
734
735         /* readjust the vma */
736         get_file(dmabuf->file);
737         oldfile = vma->vm_file;
738         vma->vm_file = dmabuf->file;
739         vma->vm_pgoff = pgoff;
740
741         ret = dmabuf->ops->mmap(dmabuf, vma);
742         if (ret) {
743                 /* restore old parameters on failure */
744                 vma->vm_file = oldfile;
745                 fput(dmabuf->file);
746         } else {
747                 if (oldfile)
748                         fput(oldfile);
749         }
750         return ret;
751
752 }
753 EXPORT_SYMBOL_GPL(dma_buf_mmap);
754
755 /**
756  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
757  * address space. Same restrictions as for vmap and friends apply.
758  * @dmabuf:     [in]    buffer to vmap
759  *
760  * This call may fail due to lack of virtual mapping address space.
761  * These calls are optional in drivers. The intended use for them
762  * is for mapping objects linear in kernel space for high use objects.
763  * Please attempt to use kmap/kunmap before thinking about these interfaces.
764  *
765  * Returns NULL on error.
766  */
767 void *dma_buf_vmap(struct dma_buf *dmabuf)
768 {
769         void *ptr;
770
771         if (WARN_ON(!dmabuf))
772                 return NULL;
773
774         if (!dmabuf->ops->vmap)
775                 return NULL;
776
777         mutex_lock(&dmabuf->lock);
778         if (dmabuf->vmapping_counter) {
779                 dmabuf->vmapping_counter++;
780                 BUG_ON(!dmabuf->vmap_ptr);
781                 ptr = dmabuf->vmap_ptr;
782                 goto out_unlock;
783         }
784
785         BUG_ON(dmabuf->vmap_ptr);
786
787         ptr = dmabuf->ops->vmap(dmabuf);
788         if (WARN_ON_ONCE(IS_ERR(ptr)))
789                 ptr = NULL;
790         if (!ptr)
791                 goto out_unlock;
792
793         dmabuf->vmap_ptr = ptr;
794         dmabuf->vmapping_counter = 1;
795
796 out_unlock:
797         mutex_unlock(&dmabuf->lock);
798         return ptr;
799 }
800 EXPORT_SYMBOL_GPL(dma_buf_vmap);
801
802 /**
803  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
804  * @dmabuf:     [in]    buffer to vunmap
805  * @vaddr:      [in]    vmap to vunmap
806  */
807 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
808 {
809         if (WARN_ON(!dmabuf))
810                 return;
811
812         BUG_ON(!dmabuf->vmap_ptr);
813         BUG_ON(dmabuf->vmapping_counter == 0);
814         BUG_ON(dmabuf->vmap_ptr != vaddr);
815
816         mutex_lock(&dmabuf->lock);
817         if (--dmabuf->vmapping_counter == 0) {
818                 if (dmabuf->ops->vunmap)
819                         dmabuf->ops->vunmap(dmabuf, vaddr);
820                 dmabuf->vmap_ptr = NULL;
821         }
822         mutex_unlock(&dmabuf->lock);
823 }
824 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
825
826 #ifdef CONFIG_DEBUG_FS
827 static int dma_buf_describe(struct seq_file *s)
828 {
829         int ret;
830         struct dma_buf *buf_obj;
831         struct dma_buf_attachment *attach_obj;
832         int count = 0, attach_count;
833         size_t size = 0;
834
835         ret = mutex_lock_interruptible(&db_list.lock);
836
837         if (ret)
838                 return ret;
839
840         seq_puts(s, "\nDma-buf Objects:\n");
841         seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
842
843         list_for_each_entry(buf_obj, &db_list.head, list_node) {
844                 ret = mutex_lock_interruptible(&buf_obj->lock);
845
846                 if (ret) {
847                         seq_puts(s,
848                                  "\tERROR locking buffer object: skipping\n");
849                         continue;
850                 }
851
852                 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
853                                 buf_obj->size,
854                                 buf_obj->file->f_flags, buf_obj->file->f_mode,
855                                 file_count(buf_obj->file),
856                                 buf_obj->exp_name);
857
858                 seq_puts(s, "\tAttached Devices:\n");
859                 attach_count = 0;
860
861                 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
862                         seq_puts(s, "\t");
863
864                         seq_printf(s, "%s\n", dev_name(attach_obj->dev));
865                         attach_count++;
866                 }
867
868                 seq_printf(s, "Total %d devices attached\n\n",
869                                 attach_count);
870
871                 count++;
872                 size += buf_obj->size;
873                 mutex_unlock(&buf_obj->lock);
874         }
875
876         seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
877
878         mutex_unlock(&db_list.lock);
879         return 0;
880 }
881
882 static int dma_buf_show(struct seq_file *s, void *unused)
883 {
884         void (*func)(struct seq_file *) = s->private;
885
886         func(s);
887         return 0;
888 }
889
890 static int dma_buf_debug_open(struct inode *inode, struct file *file)
891 {
892         return single_open(file, dma_buf_show, inode->i_private);
893 }
894
895 static const struct file_operations dma_buf_debug_fops = {
896         .open           = dma_buf_debug_open,
897         .read           = seq_read,
898         .llseek         = seq_lseek,
899         .release        = single_release,
900 };
901
902 static struct dentry *dma_buf_debugfs_dir;
903
904 static int dma_buf_init_debugfs(void)
905 {
906         int err = 0;
907
908         dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL);
909
910         if (IS_ERR(dma_buf_debugfs_dir)) {
911                 err = PTR_ERR(dma_buf_debugfs_dir);
912                 dma_buf_debugfs_dir = NULL;
913                 return err;
914         }
915
916         err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe);
917
918         if (err)
919                 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
920
921         return err;
922 }
923
924 static void dma_buf_uninit_debugfs(void)
925 {
926         if (dma_buf_debugfs_dir)
927                 debugfs_remove_recursive(dma_buf_debugfs_dir);
928 }
929
930 int dma_buf_debugfs_create_file(const char *name,
931                                 int (*write)(struct seq_file *))
932 {
933         struct dentry *d;
934
935         d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir,
936                         write, &dma_buf_debug_fops);
937
938         return PTR_ERR_OR_ZERO(d);
939 }
940 #else
941 static inline int dma_buf_init_debugfs(void)
942 {
943         return 0;
944 }
945 static inline void dma_buf_uninit_debugfs(void)
946 {
947 }
948 #endif
949
950 static int __init dma_buf_init(void)
951 {
952         mutex_init(&db_list.lock);
953         INIT_LIST_HEAD(&db_list.head);
954         dma_buf_init_debugfs();
955         return 0;
956 }
957 subsys_initcall(dma_buf_init);
958
959 static void __exit dma_buf_deinit(void)
960 {
961         dma_buf_uninit_debugfs();
962 }
963 __exitcall(dma_buf_deinit);