[media] media: videobuf2: Replace videobuf2-core with videobuf2-v4l2
[sfrench/cifs-2.6.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
19
20 #include <media/videobuf2-v4l2.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
23
24 struct vb2_dc_conf {
25         struct device           *dev;
26 };
27
28 struct vb2_dc_buf {
29         struct device                   *dev;
30         void                            *vaddr;
31         unsigned long                   size;
32         dma_addr_t                      dma_addr;
33         enum dma_data_direction         dma_dir;
34         struct sg_table                 *dma_sgt;
35         struct frame_vector             *vec;
36
37         /* MMAP related */
38         struct vb2_vmarea_handler       handler;
39         atomic_t                        refcount;
40         struct sg_table                 *sgt_base;
41
42         /* DMABUF related */
43         struct dma_buf_attachment       *db_attach;
44 };
45
46 /*********************************************/
47 /*        scatterlist table functions        */
48 /*********************************************/
49
50 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
51 {
52         struct scatterlist *s;
53         dma_addr_t expected = sg_dma_address(sgt->sgl);
54         unsigned int i;
55         unsigned long size = 0;
56
57         for_each_sg(sgt->sgl, s, sgt->nents, i) {
58                 if (sg_dma_address(s) != expected)
59                         break;
60                 expected = sg_dma_address(s) + sg_dma_len(s);
61                 size += sg_dma_len(s);
62         }
63         return size;
64 }
65
66 /*********************************************/
67 /*         callbacks for all buffers         */
68 /*********************************************/
69
70 static void *vb2_dc_cookie(void *buf_priv)
71 {
72         struct vb2_dc_buf *buf = buf_priv;
73
74         return &buf->dma_addr;
75 }
76
77 static void *vb2_dc_vaddr(void *buf_priv)
78 {
79         struct vb2_dc_buf *buf = buf_priv;
80
81         if (!buf->vaddr && buf->db_attach)
82                 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
83
84         return buf->vaddr;
85 }
86
87 static unsigned int vb2_dc_num_users(void *buf_priv)
88 {
89         struct vb2_dc_buf *buf = buf_priv;
90
91         return atomic_read(&buf->refcount);
92 }
93
94 static void vb2_dc_prepare(void *buf_priv)
95 {
96         struct vb2_dc_buf *buf = buf_priv;
97         struct sg_table *sgt = buf->dma_sgt;
98
99         /* DMABUF exporter will flush the cache for us */
100         if (!sgt || buf->db_attach)
101                 return;
102
103         dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
104 }
105
106 static void vb2_dc_finish(void *buf_priv)
107 {
108         struct vb2_dc_buf *buf = buf_priv;
109         struct sg_table *sgt = buf->dma_sgt;
110
111         /* DMABUF exporter will flush the cache for us */
112         if (!sgt || buf->db_attach)
113                 return;
114
115         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
116 }
117
118 /*********************************************/
119 /*        callbacks for MMAP buffers         */
120 /*********************************************/
121
122 static void vb2_dc_put(void *buf_priv)
123 {
124         struct vb2_dc_buf *buf = buf_priv;
125
126         if (!atomic_dec_and_test(&buf->refcount))
127                 return;
128
129         if (buf->sgt_base) {
130                 sg_free_table(buf->sgt_base);
131                 kfree(buf->sgt_base);
132         }
133         dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
134         put_device(buf->dev);
135         kfree(buf);
136 }
137
138 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size,
139                           enum dma_data_direction dma_dir, gfp_t gfp_flags)
140 {
141         struct vb2_dc_conf *conf = alloc_ctx;
142         struct device *dev = conf->dev;
143         struct vb2_dc_buf *buf;
144
145         buf = kzalloc(sizeof *buf, GFP_KERNEL);
146         if (!buf)
147                 return ERR_PTR(-ENOMEM);
148
149         buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
150                                                 GFP_KERNEL | gfp_flags);
151         if (!buf->vaddr) {
152                 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
153                 kfree(buf);
154                 return ERR_PTR(-ENOMEM);
155         }
156
157         /* Prevent the device from being released while the buffer is used */
158         buf->dev = get_device(dev);
159         buf->size = size;
160         buf->dma_dir = dma_dir;
161
162         buf->handler.refcount = &buf->refcount;
163         buf->handler.put = vb2_dc_put;
164         buf->handler.arg = buf;
165
166         atomic_inc(&buf->refcount);
167
168         return buf;
169 }
170
171 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
172 {
173         struct vb2_dc_buf *buf = buf_priv;
174         int ret;
175
176         if (!buf) {
177                 printk(KERN_ERR "No buffer to map\n");
178                 return -EINVAL;
179         }
180
181         /*
182          * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
183          * map whole buffer
184          */
185         vma->vm_pgoff = 0;
186
187         ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
188                 buf->dma_addr, buf->size);
189
190         if (ret) {
191                 pr_err("Remapping memory failed, error: %d\n", ret);
192                 return ret;
193         }
194
195         vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
196         vma->vm_private_data    = &buf->handler;
197         vma->vm_ops             = &vb2_common_vm_ops;
198
199         vma->vm_ops->open(vma);
200
201         pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
202                 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
203                 buf->size);
204
205         return 0;
206 }
207
208 /*********************************************/
209 /*         DMABUF ops for exporters          */
210 /*********************************************/
211
212 struct vb2_dc_attachment {
213         struct sg_table sgt;
214         enum dma_data_direction dma_dir;
215 };
216
217 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
218         struct dma_buf_attachment *dbuf_attach)
219 {
220         struct vb2_dc_attachment *attach;
221         unsigned int i;
222         struct scatterlist *rd, *wr;
223         struct sg_table *sgt;
224         struct vb2_dc_buf *buf = dbuf->priv;
225         int ret;
226
227         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
228         if (!attach)
229                 return -ENOMEM;
230
231         sgt = &attach->sgt;
232         /* Copy the buf->base_sgt scatter list to the attachment, as we can't
233          * map the same scatter list to multiple attachments at the same time.
234          */
235         ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
236         if (ret) {
237                 kfree(attach);
238                 return -ENOMEM;
239         }
240
241         rd = buf->sgt_base->sgl;
242         wr = sgt->sgl;
243         for (i = 0; i < sgt->orig_nents; ++i) {
244                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
245                 rd = sg_next(rd);
246                 wr = sg_next(wr);
247         }
248
249         attach->dma_dir = DMA_NONE;
250         dbuf_attach->priv = attach;
251
252         return 0;
253 }
254
255 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
256         struct dma_buf_attachment *db_attach)
257 {
258         struct vb2_dc_attachment *attach = db_attach->priv;
259         struct sg_table *sgt;
260
261         if (!attach)
262                 return;
263
264         sgt = &attach->sgt;
265
266         /* release the scatterlist cache */
267         if (attach->dma_dir != DMA_NONE)
268                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
269                         attach->dma_dir);
270         sg_free_table(sgt);
271         kfree(attach);
272         db_attach->priv = NULL;
273 }
274
275 static struct sg_table *vb2_dc_dmabuf_ops_map(
276         struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
277 {
278         struct vb2_dc_attachment *attach = db_attach->priv;
279         /* stealing dmabuf mutex to serialize map/unmap operations */
280         struct mutex *lock = &db_attach->dmabuf->lock;
281         struct sg_table *sgt;
282
283         mutex_lock(lock);
284
285         sgt = &attach->sgt;
286         /* return previously mapped sg table */
287         if (attach->dma_dir == dma_dir) {
288                 mutex_unlock(lock);
289                 return sgt;
290         }
291
292         /* release any previous cache */
293         if (attach->dma_dir != DMA_NONE) {
294                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
295                         attach->dma_dir);
296                 attach->dma_dir = DMA_NONE;
297         }
298
299         /* mapping to the client with new direction */
300         sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
301                                 dma_dir);
302         if (!sgt->nents) {
303                 pr_err("failed to map scatterlist\n");
304                 mutex_unlock(lock);
305                 return ERR_PTR(-EIO);
306         }
307
308         attach->dma_dir = dma_dir;
309
310         mutex_unlock(lock);
311
312         return sgt;
313 }
314
315 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
316         struct sg_table *sgt, enum dma_data_direction dma_dir)
317 {
318         /* nothing to be done here */
319 }
320
321 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
322 {
323         /* drop reference obtained in vb2_dc_get_dmabuf */
324         vb2_dc_put(dbuf->priv);
325 }
326
327 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
328 {
329         struct vb2_dc_buf *buf = dbuf->priv;
330
331         return buf->vaddr + pgnum * PAGE_SIZE;
332 }
333
334 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
335 {
336         struct vb2_dc_buf *buf = dbuf->priv;
337
338         return buf->vaddr;
339 }
340
341 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
342         struct vm_area_struct *vma)
343 {
344         return vb2_dc_mmap(dbuf->priv, vma);
345 }
346
347 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
348         .attach = vb2_dc_dmabuf_ops_attach,
349         .detach = vb2_dc_dmabuf_ops_detach,
350         .map_dma_buf = vb2_dc_dmabuf_ops_map,
351         .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
352         .kmap = vb2_dc_dmabuf_ops_kmap,
353         .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
354         .vmap = vb2_dc_dmabuf_ops_vmap,
355         .mmap = vb2_dc_dmabuf_ops_mmap,
356         .release = vb2_dc_dmabuf_ops_release,
357 };
358
359 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
360 {
361         int ret;
362         struct sg_table *sgt;
363
364         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
365         if (!sgt) {
366                 dev_err(buf->dev, "failed to alloc sg table\n");
367                 return NULL;
368         }
369
370         ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
371                 buf->size);
372         if (ret < 0) {
373                 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
374                 kfree(sgt);
375                 return NULL;
376         }
377
378         return sgt;
379 }
380
381 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
382 {
383         struct vb2_dc_buf *buf = buf_priv;
384         struct dma_buf *dbuf;
385         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
386
387         exp_info.ops = &vb2_dc_dmabuf_ops;
388         exp_info.size = buf->size;
389         exp_info.flags = flags;
390         exp_info.priv = buf;
391
392         if (!buf->sgt_base)
393                 buf->sgt_base = vb2_dc_get_base_sgt(buf);
394
395         if (WARN_ON(!buf->sgt_base))
396                 return NULL;
397
398         dbuf = dma_buf_export(&exp_info);
399         if (IS_ERR(dbuf))
400                 return NULL;
401
402         /* dmabuf keeps reference to vb2 buffer */
403         atomic_inc(&buf->refcount);
404
405         return dbuf;
406 }
407
408 /*********************************************/
409 /*       callbacks for USERPTR buffers       */
410 /*********************************************/
411
412 static void vb2_dc_put_userptr(void *buf_priv)
413 {
414         struct vb2_dc_buf *buf = buf_priv;
415         struct sg_table *sgt = buf->dma_sgt;
416         int i;
417         struct page **pages;
418
419         if (sgt) {
420                 DEFINE_DMA_ATTRS(attrs);
421
422                 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
423                 /*
424                  * No need to sync to CPU, it's already synced to the CPU
425                  * since the finish() memop will have been called before this.
426                  */
427                 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
428                                    buf->dma_dir, &attrs);
429                 pages = frame_vector_pages(buf->vec);
430                 /* sgt should exist only if vector contains pages... */
431                 BUG_ON(IS_ERR(pages));
432                 for (i = 0; i < frame_vector_count(buf->vec); i++)
433                         set_page_dirty_lock(pages[i]);
434                 sg_free_table(sgt);
435                 kfree(sgt);
436         }
437         vb2_destroy_framevec(buf->vec);
438         kfree(buf);
439 }
440
441 /*
442  * For some kind of reserved memory there might be no struct page available,
443  * so all that can be done to support such 'pages' is to try to convert
444  * pfn to dma address or at the last resort just assume that
445  * dma address == physical address (like it has been assumed in earlier version
446  * of videobuf2-dma-contig
447  */
448
449 #ifdef __arch_pfn_to_dma
450 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
451 {
452         return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
453 }
454 #elif defined(__pfn_to_bus)
455 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
456 {
457         return (dma_addr_t)__pfn_to_bus(pfn);
458 }
459 #elif defined(__pfn_to_phys)
460 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
461 {
462         return (dma_addr_t)__pfn_to_phys(pfn);
463 }
464 #else
465 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
466 {
467         /* really, we cannot do anything better at this point */
468         return (dma_addr_t)(pfn) << PAGE_SHIFT;
469 }
470 #endif
471
472 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
473         unsigned long size, enum dma_data_direction dma_dir)
474 {
475         struct vb2_dc_conf *conf = alloc_ctx;
476         struct vb2_dc_buf *buf;
477         struct frame_vector *vec;
478         unsigned long offset;
479         int n_pages, i;
480         int ret = 0;
481         struct sg_table *sgt;
482         unsigned long contig_size;
483         unsigned long dma_align = dma_get_cache_alignment();
484         DEFINE_DMA_ATTRS(attrs);
485
486         dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
487
488         /* Only cache aligned DMA transfers are reliable */
489         if (!IS_ALIGNED(vaddr | size, dma_align)) {
490                 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
491                 return ERR_PTR(-EINVAL);
492         }
493
494         if (!size) {
495                 pr_debug("size is zero\n");
496                 return ERR_PTR(-EINVAL);
497         }
498
499         buf = kzalloc(sizeof *buf, GFP_KERNEL);
500         if (!buf)
501                 return ERR_PTR(-ENOMEM);
502
503         buf->dev = conf->dev;
504         buf->dma_dir = dma_dir;
505
506         offset = vaddr & ~PAGE_MASK;
507         vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
508         if (IS_ERR(vec)) {
509                 ret = PTR_ERR(vec);
510                 goto fail_buf;
511         }
512         buf->vec = vec;
513         n_pages = frame_vector_count(vec);
514         ret = frame_vector_to_pages(vec);
515         if (ret < 0) {
516                 unsigned long *nums = frame_vector_pfns(vec);
517
518                 /*
519                  * Failed to convert to pages... Check the memory is physically
520                  * contiguous and use direct mapping
521                  */
522                 for (i = 1; i < n_pages; i++)
523                         if (nums[i-1] + 1 != nums[i])
524                                 goto fail_pfnvec;
525                 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
526                 goto out;
527         }
528
529         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
530         if (!sgt) {
531                 pr_err("failed to allocate sg table\n");
532                 ret = -ENOMEM;
533                 goto fail_pfnvec;
534         }
535
536         ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
537                 offset, size, GFP_KERNEL);
538         if (ret) {
539                 pr_err("failed to initialize sg table\n");
540                 goto fail_sgt;
541         }
542
543         /*
544          * No need to sync to the device, this will happen later when the
545          * prepare() memop is called.
546          */
547         sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
548                                       buf->dma_dir, &attrs);
549         if (sgt->nents <= 0) {
550                 pr_err("failed to map scatterlist\n");
551                 ret = -EIO;
552                 goto fail_sgt_init;
553         }
554
555         contig_size = vb2_dc_get_contiguous_size(sgt);
556         if (contig_size < size) {
557                 pr_err("contiguous mapping is too small %lu/%lu\n",
558                         contig_size, size);
559                 ret = -EFAULT;
560                 goto fail_map_sg;
561         }
562
563         buf->dma_addr = sg_dma_address(sgt->sgl);
564         buf->dma_sgt = sgt;
565 out:
566         buf->size = size;
567
568         return buf;
569
570 fail_map_sg:
571         dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
572                            buf->dma_dir, &attrs);
573
574 fail_sgt_init:
575         sg_free_table(sgt);
576
577 fail_sgt:
578         kfree(sgt);
579
580 fail_pfnvec:
581         vb2_destroy_framevec(vec);
582
583 fail_buf:
584         kfree(buf);
585
586         return ERR_PTR(ret);
587 }
588
589 /*********************************************/
590 /*       callbacks for DMABUF buffers        */
591 /*********************************************/
592
593 static int vb2_dc_map_dmabuf(void *mem_priv)
594 {
595         struct vb2_dc_buf *buf = mem_priv;
596         struct sg_table *sgt;
597         unsigned long contig_size;
598
599         if (WARN_ON(!buf->db_attach)) {
600                 pr_err("trying to pin a non attached buffer\n");
601                 return -EINVAL;
602         }
603
604         if (WARN_ON(buf->dma_sgt)) {
605                 pr_err("dmabuf buffer is already pinned\n");
606                 return 0;
607         }
608
609         /* get the associated scatterlist for this buffer */
610         sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
611         if (IS_ERR(sgt)) {
612                 pr_err("Error getting dmabuf scatterlist\n");
613                 return -EINVAL;
614         }
615
616         /* checking if dmabuf is big enough to store contiguous chunk */
617         contig_size = vb2_dc_get_contiguous_size(sgt);
618         if (contig_size < buf->size) {
619                 pr_err("contiguous chunk is too small %lu/%lu b\n",
620                         contig_size, buf->size);
621                 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
622                 return -EFAULT;
623         }
624
625         buf->dma_addr = sg_dma_address(sgt->sgl);
626         buf->dma_sgt = sgt;
627         buf->vaddr = NULL;
628
629         return 0;
630 }
631
632 static void vb2_dc_unmap_dmabuf(void *mem_priv)
633 {
634         struct vb2_dc_buf *buf = mem_priv;
635         struct sg_table *sgt = buf->dma_sgt;
636
637         if (WARN_ON(!buf->db_attach)) {
638                 pr_err("trying to unpin a not attached buffer\n");
639                 return;
640         }
641
642         if (WARN_ON(!sgt)) {
643                 pr_err("dmabuf buffer is already unpinned\n");
644                 return;
645         }
646
647         if (buf->vaddr) {
648                 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
649                 buf->vaddr = NULL;
650         }
651         dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
652
653         buf->dma_addr = 0;
654         buf->dma_sgt = NULL;
655 }
656
657 static void vb2_dc_detach_dmabuf(void *mem_priv)
658 {
659         struct vb2_dc_buf *buf = mem_priv;
660
661         /* if vb2 works correctly you should never detach mapped buffer */
662         if (WARN_ON(buf->dma_addr))
663                 vb2_dc_unmap_dmabuf(buf);
664
665         /* detach this attachment */
666         dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
667         kfree(buf);
668 }
669
670 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
671         unsigned long size, enum dma_data_direction dma_dir)
672 {
673         struct vb2_dc_conf *conf = alloc_ctx;
674         struct vb2_dc_buf *buf;
675         struct dma_buf_attachment *dba;
676
677         if (dbuf->size < size)
678                 return ERR_PTR(-EFAULT);
679
680         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
681         if (!buf)
682                 return ERR_PTR(-ENOMEM);
683
684         buf->dev = conf->dev;
685         /* create attachment for the dmabuf with the user device */
686         dba = dma_buf_attach(dbuf, buf->dev);
687         if (IS_ERR(dba)) {
688                 pr_err("failed to attach dmabuf\n");
689                 kfree(buf);
690                 return dba;
691         }
692
693         buf->dma_dir = dma_dir;
694         buf->size = size;
695         buf->db_attach = dba;
696
697         return buf;
698 }
699
700 /*********************************************/
701 /*       DMA CONTIG exported functions       */
702 /*********************************************/
703
704 const struct vb2_mem_ops vb2_dma_contig_memops = {
705         .alloc          = vb2_dc_alloc,
706         .put            = vb2_dc_put,
707         .get_dmabuf     = vb2_dc_get_dmabuf,
708         .cookie         = vb2_dc_cookie,
709         .vaddr          = vb2_dc_vaddr,
710         .mmap           = vb2_dc_mmap,
711         .get_userptr    = vb2_dc_get_userptr,
712         .put_userptr    = vb2_dc_put_userptr,
713         .prepare        = vb2_dc_prepare,
714         .finish         = vb2_dc_finish,
715         .map_dmabuf     = vb2_dc_map_dmabuf,
716         .unmap_dmabuf   = vb2_dc_unmap_dmabuf,
717         .attach_dmabuf  = vb2_dc_attach_dmabuf,
718         .detach_dmabuf  = vb2_dc_detach_dmabuf,
719         .num_users      = vb2_dc_num_users,
720 };
721 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
722
723 void *vb2_dma_contig_init_ctx(struct device *dev)
724 {
725         struct vb2_dc_conf *conf;
726
727         conf = kzalloc(sizeof *conf, GFP_KERNEL);
728         if (!conf)
729                 return ERR_PTR(-ENOMEM);
730
731         conf->dev = dev;
732
733         return conf;
734 }
735 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
736
737 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
738 {
739         if (!IS_ERR_OR_NULL(alloc_ctx))
740                 kfree(alloc_ctx);
741 }
742 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
743
744 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
745 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
746 MODULE_LICENSE("GPL");