Merge remote-tracking branches 'asoc/topic/cs47l24', 'asoc/topic/cx20442', 'asoc...
[sfrench/cifs-2.6.git] / drivers / media / v4l2-core / v4l2-mem2mem.c
1 /*
2  * Memory-to-memory device framework for Video for Linux 2 and videobuf.
3  *
4  * Helper functions for devices that use videobuf buffers for both their
5  * source and destination.
6  *
7  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8  * Pawel Osciak, <pawel@osciak.com>
9  * Marek Szyprowski, <m.szyprowski@samsung.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by the
13  * Free Software Foundation; either version 2 of the License, or (at your
14  * option) any later version.
15  */
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19
20 #include <media/videobuf2-v4l2.h>
21 #include <media/v4l2-mem2mem.h>
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-event.h>
25
26 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
27 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
28 MODULE_LICENSE("GPL");
29
30 static bool debug;
31 module_param(debug, bool, 0644);
32
33 #define dprintk(fmt, arg...)                                            \
34         do {                                                            \
35                 if (debug)                                              \
36                         printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
37         } while (0)
38
39
40 /* Instance is already queued on the job_queue */
41 #define TRANS_QUEUED            (1 << 0)
42 /* Instance is currently running in hardware */
43 #define TRANS_RUNNING           (1 << 1)
44 /* Instance is currently aborting */
45 #define TRANS_ABORT             (1 << 2)
46
47
48 /* Offset base for buffers on the destination queue - used to distinguish
49  * between source and destination buffers when mmapping - they receive the same
50  * offsets but for different queues */
51 #define DST_QUEUE_OFF_BASE      (1 << 30)
52
53
54 /**
55  * struct v4l2_m2m_dev - per-device context
56  * @curr_ctx:           currently running instance
57  * @job_queue:          instances queued to run
58  * @job_spinlock:       protects job_queue
59  * @m2m_ops:            driver callbacks
60  */
61 struct v4l2_m2m_dev {
62         struct v4l2_m2m_ctx     *curr_ctx;
63
64         struct list_head        job_queue;
65         spinlock_t              job_spinlock;
66
67         const struct v4l2_m2m_ops *m2m_ops;
68 };
69
70 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
71                                                 enum v4l2_buf_type type)
72 {
73         if (V4L2_TYPE_IS_OUTPUT(type))
74                 return &m2m_ctx->out_q_ctx;
75         else
76                 return &m2m_ctx->cap_q_ctx;
77 }
78
79 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
80                                        enum v4l2_buf_type type)
81 {
82         struct v4l2_m2m_queue_ctx *q_ctx;
83
84         q_ctx = get_queue_ctx(m2m_ctx, type);
85         if (!q_ctx)
86                 return NULL;
87
88         return &q_ctx->q;
89 }
90 EXPORT_SYMBOL(v4l2_m2m_get_vq);
91
92 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
93 {
94         struct v4l2_m2m_buffer *b;
95         unsigned long flags;
96
97         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
98
99         if (list_empty(&q_ctx->rdy_queue)) {
100                 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
101                 return NULL;
102         }
103
104         b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
105         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
106         return &b->vb;
107 }
108 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
109
110 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
111 {
112         struct v4l2_m2m_buffer *b;
113         unsigned long flags;
114
115         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
116         if (list_empty(&q_ctx->rdy_queue)) {
117                 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
118                 return NULL;
119         }
120         b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
121         list_del(&b->list);
122         q_ctx->num_rdy--;
123         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
124
125         return &b->vb;
126 }
127 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
128
129 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
130                                 struct vb2_v4l2_buffer *vbuf)
131 {
132         struct v4l2_m2m_buffer *b;
133         unsigned long flags;
134
135         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
136         b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
137         list_del(&b->list);
138         q_ctx->num_rdy--;
139         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
140 }
141 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
142
143 struct vb2_v4l2_buffer *
144 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
145
146 {
147         struct v4l2_m2m_buffer *b, *tmp;
148         struct vb2_v4l2_buffer *ret = NULL;
149         unsigned long flags;
150
151         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
152         list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
153                 if (b->vb.vb2_buf.index == idx) {
154                         list_del(&b->list);
155                         q_ctx->num_rdy--;
156                         ret = &b->vb;
157                         break;
158                 }
159         }
160         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
161
162         return ret;
163 }
164 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
165
166 /*
167  * Scheduling handlers
168  */
169
170 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
171 {
172         unsigned long flags;
173         void *ret = NULL;
174
175         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
176         if (m2m_dev->curr_ctx)
177                 ret = m2m_dev->curr_ctx->priv;
178         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
179
180         return ret;
181 }
182 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
183
184 /**
185  * v4l2_m2m_try_run() - select next job to perform and run it if possible
186  * @m2m_dev: per-device context
187  *
188  * Get next transaction (if present) from the waiting jobs list and run it.
189  */
190 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
191 {
192         unsigned long flags;
193
194         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
195         if (NULL != m2m_dev->curr_ctx) {
196                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
197                 dprintk("Another instance is running, won't run now\n");
198                 return;
199         }
200
201         if (list_empty(&m2m_dev->job_queue)) {
202                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
203                 dprintk("No job pending\n");
204                 return;
205         }
206
207         m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
208                                    struct v4l2_m2m_ctx, queue);
209         m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
210         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
211
212         m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
213 }
214
215 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
216 {
217         struct v4l2_m2m_dev *m2m_dev;
218         unsigned long flags_job, flags_out, flags_cap;
219
220         m2m_dev = m2m_ctx->m2m_dev;
221         dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
222
223         if (!m2m_ctx->out_q_ctx.q.streaming
224             || !m2m_ctx->cap_q_ctx.q.streaming) {
225                 dprintk("Streaming needs to be on for both queues\n");
226                 return;
227         }
228
229         spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
230
231         /* If the context is aborted then don't schedule it */
232         if (m2m_ctx->job_flags & TRANS_ABORT) {
233                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
234                 dprintk("Aborted context\n");
235                 return;
236         }
237
238         if (m2m_ctx->job_flags & TRANS_QUEUED) {
239                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
240                 dprintk("On job queue already\n");
241                 return;
242         }
243
244         spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
245         if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
246             && !m2m_ctx->out_q_ctx.buffered) {
247                 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
248                                         flags_out);
249                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
250                 dprintk("No input buffers available\n");
251                 return;
252         }
253         spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
254         if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
255             && !m2m_ctx->cap_q_ctx.buffered) {
256                 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
257                                         flags_cap);
258                 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
259                                         flags_out);
260                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
261                 dprintk("No output buffers available\n");
262                 return;
263         }
264         spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
265         spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
266
267         if (m2m_dev->m2m_ops->job_ready
268                 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
269                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
270                 dprintk("Driver not ready\n");
271                 return;
272         }
273
274         list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
275         m2m_ctx->job_flags |= TRANS_QUEUED;
276
277         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
278
279         v4l2_m2m_try_run(m2m_dev);
280 }
281 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
282
283 /**
284  * v4l2_m2m_cancel_job() - cancel pending jobs for the context
285  * @m2m_ctx: m2m context with jobs to be canceled
286  *
287  * In case of streamoff or release called on any context,
288  * 1] If the context is currently running, then abort job will be called
289  * 2] If the context is queued, then the context will be removed from
290  *    the job_queue
291  */
292 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
293 {
294         struct v4l2_m2m_dev *m2m_dev;
295         unsigned long flags;
296
297         m2m_dev = m2m_ctx->m2m_dev;
298         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
299
300         m2m_ctx->job_flags |= TRANS_ABORT;
301         if (m2m_ctx->job_flags & TRANS_RUNNING) {
302                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
303                 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
304                 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
305                 wait_event(m2m_ctx->finished,
306                                 !(m2m_ctx->job_flags & TRANS_RUNNING));
307         } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
308                 list_del(&m2m_ctx->queue);
309                 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
310                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
311                 dprintk("m2m_ctx: %p had been on queue and was removed\n",
312                         m2m_ctx);
313         } else {
314                 /* Do nothing, was not on queue/running */
315                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
316         }
317 }
318
319 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
320                          struct v4l2_m2m_ctx *m2m_ctx)
321 {
322         unsigned long flags;
323
324         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
325         if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
326                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
327                 dprintk("Called by an instance not currently running\n");
328                 return;
329         }
330
331         list_del(&m2m_dev->curr_ctx->queue);
332         m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
333         wake_up(&m2m_dev->curr_ctx->finished);
334         m2m_dev->curr_ctx = NULL;
335
336         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
337
338         /* This instance might have more buffers ready, but since we do not
339          * allow more than one job on the job_queue per instance, each has
340          * to be scheduled separately after the previous one finishes. */
341         v4l2_m2m_try_schedule(m2m_ctx);
342         v4l2_m2m_try_run(m2m_dev);
343 }
344 EXPORT_SYMBOL(v4l2_m2m_job_finish);
345
346 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
347                      struct v4l2_requestbuffers *reqbufs)
348 {
349         struct vb2_queue *vq;
350         int ret;
351
352         vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
353         ret = vb2_reqbufs(vq, reqbufs);
354         /* If count == 0, then the owner has released all buffers and he
355            is no longer owner of the queue. Otherwise we have an owner. */
356         if (ret == 0)
357                 vq->owner = reqbufs->count ? file->private_data : NULL;
358
359         return ret;
360 }
361 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
362
363 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
364                       struct v4l2_buffer *buf)
365 {
366         struct vb2_queue *vq;
367         int ret = 0;
368         unsigned int i;
369
370         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
371         ret = vb2_querybuf(vq, buf);
372
373         /* Adjust MMAP memory offsets for the CAPTURE queue */
374         if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
375                 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
376                         for (i = 0; i < buf->length; ++i)
377                                 buf->m.planes[i].m.mem_offset
378                                         += DST_QUEUE_OFF_BASE;
379                 } else {
380                         buf->m.offset += DST_QUEUE_OFF_BASE;
381                 }
382         }
383
384         return ret;
385 }
386 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
387
388 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
389                   struct v4l2_buffer *buf)
390 {
391         struct vb2_queue *vq;
392         int ret;
393
394         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
395         ret = vb2_qbuf(vq, buf);
396         if (!ret)
397                 v4l2_m2m_try_schedule(m2m_ctx);
398
399         return ret;
400 }
401 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
402
403 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
404                    struct v4l2_buffer *buf)
405 {
406         struct vb2_queue *vq;
407
408         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
409         return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
410 }
411 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
412
413 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
414                          struct v4l2_buffer *buf)
415 {
416         struct vb2_queue *vq;
417         int ret;
418
419         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
420         ret = vb2_prepare_buf(vq, buf);
421         if (!ret)
422                 v4l2_m2m_try_schedule(m2m_ctx);
423
424         return ret;
425 }
426 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
427
428 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
429                          struct v4l2_create_buffers *create)
430 {
431         struct vb2_queue *vq;
432
433         vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
434         return vb2_create_bufs(vq, create);
435 }
436 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
437
438 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
439                   struct v4l2_exportbuffer *eb)
440 {
441         struct vb2_queue *vq;
442
443         vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
444         return vb2_expbuf(vq, eb);
445 }
446 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
447
448 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
449                       enum v4l2_buf_type type)
450 {
451         struct vb2_queue *vq;
452         int ret;
453
454         vq = v4l2_m2m_get_vq(m2m_ctx, type);
455         ret = vb2_streamon(vq, type);
456         if (!ret)
457                 v4l2_m2m_try_schedule(m2m_ctx);
458
459         return ret;
460 }
461 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
462
463 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
464                        enum v4l2_buf_type type)
465 {
466         struct v4l2_m2m_dev *m2m_dev;
467         struct v4l2_m2m_queue_ctx *q_ctx;
468         unsigned long flags_job, flags;
469         int ret;
470
471         /* wait until the current context is dequeued from job_queue */
472         v4l2_m2m_cancel_job(m2m_ctx);
473
474         q_ctx = get_queue_ctx(m2m_ctx, type);
475         ret = vb2_streamoff(&q_ctx->q, type);
476         if (ret)
477                 return ret;
478
479         m2m_dev = m2m_ctx->m2m_dev;
480         spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
481         /* We should not be scheduled anymore, since we're dropping a queue. */
482         if (m2m_ctx->job_flags & TRANS_QUEUED)
483                 list_del(&m2m_ctx->queue);
484         m2m_ctx->job_flags = 0;
485
486         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
487         /* Drop queue, since streamoff returns device to the same state as after
488          * calling reqbufs. */
489         INIT_LIST_HEAD(&q_ctx->rdy_queue);
490         q_ctx->num_rdy = 0;
491         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
492
493         if (m2m_dev->curr_ctx == m2m_ctx) {
494                 m2m_dev->curr_ctx = NULL;
495                 wake_up(&m2m_ctx->finished);
496         }
497         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
498
499         return 0;
500 }
501 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
502
503 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
504                            struct poll_table_struct *wait)
505 {
506         struct video_device *vfd = video_devdata(file);
507         unsigned long req_events = poll_requested_events(wait);
508         struct vb2_queue *src_q, *dst_q;
509         struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
510         unsigned int rc = 0;
511         unsigned long flags;
512
513         if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
514                 struct v4l2_fh *fh = file->private_data;
515
516                 if (v4l2_event_pending(fh))
517                         rc = POLLPRI;
518                 else if (req_events & POLLPRI)
519                         poll_wait(file, &fh->wait, wait);
520                 if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
521                         return rc;
522         }
523
524         src_q = v4l2_m2m_get_src_vq(m2m_ctx);
525         dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
526
527         /*
528          * There has to be at least one buffer queued on each queued_list, which
529          * means either in driver already or waiting for driver to claim it
530          * and start processing.
531          */
532         if ((!src_q->streaming || list_empty(&src_q->queued_list))
533                 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
534                 rc |= POLLERR;
535                 goto end;
536         }
537
538         spin_lock_irqsave(&src_q->done_lock, flags);
539         if (list_empty(&src_q->done_list))
540                 poll_wait(file, &src_q->done_wq, wait);
541         spin_unlock_irqrestore(&src_q->done_lock, flags);
542
543         spin_lock_irqsave(&dst_q->done_lock, flags);
544         if (list_empty(&dst_q->done_list)) {
545                 /*
546                  * If the last buffer was dequeued from the capture queue,
547                  * return immediately. DQBUF will return -EPIPE.
548                  */
549                 if (dst_q->last_buffer_dequeued) {
550                         spin_unlock_irqrestore(&dst_q->done_lock, flags);
551                         return rc | POLLIN | POLLRDNORM;
552                 }
553
554                 poll_wait(file, &dst_q->done_wq, wait);
555         }
556         spin_unlock_irqrestore(&dst_q->done_lock, flags);
557
558         spin_lock_irqsave(&src_q->done_lock, flags);
559         if (!list_empty(&src_q->done_list))
560                 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
561                                                 done_entry);
562         if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
563                         || src_vb->state == VB2_BUF_STATE_ERROR))
564                 rc |= POLLOUT | POLLWRNORM;
565         spin_unlock_irqrestore(&src_q->done_lock, flags);
566
567         spin_lock_irqsave(&dst_q->done_lock, flags);
568         if (!list_empty(&dst_q->done_list))
569                 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
570                                                 done_entry);
571         if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
572                         || dst_vb->state == VB2_BUF_STATE_ERROR))
573                 rc |= POLLIN | POLLRDNORM;
574         spin_unlock_irqrestore(&dst_q->done_lock, flags);
575
576 end:
577         return rc;
578 }
579 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
580
581 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
582                          struct vm_area_struct *vma)
583 {
584         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
585         struct vb2_queue *vq;
586
587         if (offset < DST_QUEUE_OFF_BASE) {
588                 vq = v4l2_m2m_get_src_vq(m2m_ctx);
589         } else {
590                 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
591                 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
592         }
593
594         return vb2_mmap(vq, vma);
595 }
596 EXPORT_SYMBOL(v4l2_m2m_mmap);
597
598 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
599 {
600         struct v4l2_m2m_dev *m2m_dev;
601
602         if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
603                         WARN_ON(!m2m_ops->job_abort))
604                 return ERR_PTR(-EINVAL);
605
606         m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
607         if (!m2m_dev)
608                 return ERR_PTR(-ENOMEM);
609
610         m2m_dev->curr_ctx = NULL;
611         m2m_dev->m2m_ops = m2m_ops;
612         INIT_LIST_HEAD(&m2m_dev->job_queue);
613         spin_lock_init(&m2m_dev->job_spinlock);
614
615         return m2m_dev;
616 }
617 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
618
619 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
620 {
621         kfree(m2m_dev);
622 }
623 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
624
625 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
626                 void *drv_priv,
627                 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
628 {
629         struct v4l2_m2m_ctx *m2m_ctx;
630         struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
631         int ret;
632
633         m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
634         if (!m2m_ctx)
635                 return ERR_PTR(-ENOMEM);
636
637         m2m_ctx->priv = drv_priv;
638         m2m_ctx->m2m_dev = m2m_dev;
639         init_waitqueue_head(&m2m_ctx->finished);
640
641         out_q_ctx = &m2m_ctx->out_q_ctx;
642         cap_q_ctx = &m2m_ctx->cap_q_ctx;
643
644         INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
645         INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
646         spin_lock_init(&out_q_ctx->rdy_spinlock);
647         spin_lock_init(&cap_q_ctx->rdy_spinlock);
648
649         INIT_LIST_HEAD(&m2m_ctx->queue);
650
651         ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
652
653         if (ret)
654                 goto err;
655         /*
656          * If both queues use same mutex assign it as the common buffer
657          * queues lock to the m2m context. This lock is used in the
658          * v4l2_m2m_ioctl_* helpers.
659          */
660         if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
661                 m2m_ctx->q_lock = out_q_ctx->q.lock;
662
663         return m2m_ctx;
664 err:
665         kfree(m2m_ctx);
666         return ERR_PTR(ret);
667 }
668 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
669
670 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
671 {
672         /* wait until the current context is dequeued from job_queue */
673         v4l2_m2m_cancel_job(m2m_ctx);
674
675         vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
676         vb2_queue_release(&m2m_ctx->out_q_ctx.q);
677
678         kfree(m2m_ctx);
679 }
680 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
681
682 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
683                 struct vb2_v4l2_buffer *vbuf)
684 {
685         struct v4l2_m2m_buffer *b = container_of(vbuf,
686                                 struct v4l2_m2m_buffer, vb);
687         struct v4l2_m2m_queue_ctx *q_ctx;
688         unsigned long flags;
689
690         q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
691         if (!q_ctx)
692                 return;
693
694         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
695         list_add_tail(&b->list, &q_ctx->rdy_queue);
696         q_ctx->num_rdy++;
697         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
698 }
699 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
700
701 /* Videobuf2 ioctl helpers */
702
703 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
704                                 struct v4l2_requestbuffers *rb)
705 {
706         struct v4l2_fh *fh = file->private_data;
707
708         return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
709 }
710 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
711
712 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
713                                 struct v4l2_create_buffers *create)
714 {
715         struct v4l2_fh *fh = file->private_data;
716
717         return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
718 }
719 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
720
721 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
722                                 struct v4l2_buffer *buf)
723 {
724         struct v4l2_fh *fh = file->private_data;
725
726         return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
727 }
728 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
729
730 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
731                                 struct v4l2_buffer *buf)
732 {
733         struct v4l2_fh *fh = file->private_data;
734
735         return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
736 }
737 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
738
739 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
740                                 struct v4l2_buffer *buf)
741 {
742         struct v4l2_fh *fh = file->private_data;
743
744         return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
745 }
746 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
747
748 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
749                                struct v4l2_buffer *buf)
750 {
751         struct v4l2_fh *fh = file->private_data;
752
753         return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
754 }
755 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
756
757 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
758                                 struct v4l2_exportbuffer *eb)
759 {
760         struct v4l2_fh *fh = file->private_data;
761
762         return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
763 }
764 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
765
766 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
767                                 enum v4l2_buf_type type)
768 {
769         struct v4l2_fh *fh = file->private_data;
770
771         return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
772 }
773 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
774
775 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
776                                 enum v4l2_buf_type type)
777 {
778         struct v4l2_fh *fh = file->private_data;
779
780         return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
781 }
782 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
783
784 /*
785  * v4l2_file_operations helpers. It is assumed here same lock is used
786  * for the output and the capture buffer queue.
787  */
788
789 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
790 {
791         struct v4l2_fh *fh = file->private_data;
792
793         return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
794 }
795 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
796
797 unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
798 {
799         struct v4l2_fh *fh = file->private_data;
800         struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
801         unsigned int ret;
802
803         if (m2m_ctx->q_lock)
804                 mutex_lock(m2m_ctx->q_lock);
805
806         ret = v4l2_m2m_poll(file, m2m_ctx, wait);
807
808         if (m2m_ctx->q_lock)
809                 mutex_unlock(m2m_ctx->q_lock);
810
811         return ret;
812 }
813 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
814