Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[sfrench/cifs-2.6.git] / drivers / media / v4l2-core / v4l2-mem2mem.c
1 /*
2  * Memory-to-memory device framework for Video for Linux 2 and videobuf.
3  *
4  * Helper functions for devices that use videobuf buffers for both their
5  * source and destination.
6  *
7  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8  * Pawel Osciak, <pawel@osciak.com>
9  * Marek Szyprowski, <m.szyprowski@samsung.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by the
13  * Free Software Foundation; either version 2 of the License, or (at your
14  * option) any later version.
15  */
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/v4l2-mem2mem.h>
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-fh.h>
24 #include <media/v4l2-event.h>
25
26 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
27 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
28 MODULE_LICENSE("GPL");
29
30 static bool debug;
31 module_param(debug, bool, 0644);
32
33 #define dprintk(fmt, arg...)                                            \
34         do {                                                            \
35                 if (debug)                                              \
36                         printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
37         } while (0)
38
39
40 /* Instance is already queued on the job_queue */
41 #define TRANS_QUEUED            (1 << 0)
42 /* Instance is currently running in hardware */
43 #define TRANS_RUNNING           (1 << 1)
44 /* Instance is currently aborting */
45 #define TRANS_ABORT             (1 << 2)
46
47
48 /* Offset base for buffers on the destination queue - used to distinguish
49  * between source and destination buffers when mmapping - they receive the same
50  * offsets but for different queues */
51 #define DST_QUEUE_OFF_BASE      (1 << 30)
52
53
54 /**
55  * struct v4l2_m2m_dev - per-device context
56  * @curr_ctx:           currently running instance
57  * @job_queue:          instances queued to run
58  * @job_spinlock:       protects job_queue
59  * @m2m_ops:            driver callbacks
60  */
61 struct v4l2_m2m_dev {
62         struct v4l2_m2m_ctx     *curr_ctx;
63
64         struct list_head        job_queue;
65         spinlock_t              job_spinlock;
66
67         const struct v4l2_m2m_ops *m2m_ops;
68 };
69
70 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
71                                                 enum v4l2_buf_type type)
72 {
73         if (V4L2_TYPE_IS_OUTPUT(type))
74                 return &m2m_ctx->out_q_ctx;
75         else
76                 return &m2m_ctx->cap_q_ctx;
77 }
78
79 /**
80  * v4l2_m2m_get_vq() - return vb2_queue for the given type
81  */
82 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
83                                        enum v4l2_buf_type type)
84 {
85         struct v4l2_m2m_queue_ctx *q_ctx;
86
87         q_ctx = get_queue_ctx(m2m_ctx, type);
88         if (!q_ctx)
89                 return NULL;
90
91         return &q_ctx->q;
92 }
93 EXPORT_SYMBOL(v4l2_m2m_get_vq);
94
95 /**
96  * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
97  */
98 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
99 {
100         struct v4l2_m2m_buffer *b;
101         unsigned long flags;
102
103         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
104
105         if (list_empty(&q_ctx->rdy_queue)) {
106                 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
107                 return NULL;
108         }
109
110         b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
111         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
112         return &b->vb;
113 }
114 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
115
116 /**
117  * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
118  * return it
119  */
120 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
121 {
122         struct v4l2_m2m_buffer *b;
123         unsigned long flags;
124
125         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
126         if (list_empty(&q_ctx->rdy_queue)) {
127                 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
128                 return NULL;
129         }
130         b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
131         list_del(&b->list);
132         q_ctx->num_rdy--;
133         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
134
135         return &b->vb;
136 }
137 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
138
139 /*
140  * Scheduling handlers
141  */
142
143 /**
144  * v4l2_m2m_get_curr_priv() - return driver private data for the currently
145  * running instance or NULL if no instance is running
146  */
147 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
148 {
149         unsigned long flags;
150         void *ret = NULL;
151
152         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
153         if (m2m_dev->curr_ctx)
154                 ret = m2m_dev->curr_ctx->priv;
155         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
156
157         return ret;
158 }
159 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
160
161 /**
162  * v4l2_m2m_try_run() - select next job to perform and run it if possible
163  *
164  * Get next transaction (if present) from the waiting jobs list and run it.
165  */
166 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
167 {
168         unsigned long flags;
169
170         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
171         if (NULL != m2m_dev->curr_ctx) {
172                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
173                 dprintk("Another instance is running, won't run now\n");
174                 return;
175         }
176
177         if (list_empty(&m2m_dev->job_queue)) {
178                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
179                 dprintk("No job pending\n");
180                 return;
181         }
182
183         m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
184                                    struct v4l2_m2m_ctx, queue);
185         m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
186         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
187
188         m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
189 }
190
191 /**
192  * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
193  * the pending job queue and add it if so.
194  * @m2m_ctx:    m2m context assigned to the instance to be checked
195  *
196  * There are three basic requirements an instance has to meet to be able to run:
197  * 1) at least one source buffer has to be queued,
198  * 2) at least one destination buffer has to be queued,
199  * 3) streaming has to be on.
200  *
201  * If a queue is buffered (for example a decoder hardware ringbuffer that has
202  * to be drained before doing streamoff), allow scheduling without v4l2 buffers
203  * on that queue.
204  *
205  * There may also be additional, custom requirements. In such case the driver
206  * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
207  * return 1 if the instance is ready.
208  * An example of the above could be an instance that requires more than one
209  * src/dst buffer per transaction.
210  */
211 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
212 {
213         struct v4l2_m2m_dev *m2m_dev;
214         unsigned long flags_job, flags_out, flags_cap;
215
216         m2m_dev = m2m_ctx->m2m_dev;
217         dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
218
219         if (!m2m_ctx->out_q_ctx.q.streaming
220             || !m2m_ctx->cap_q_ctx.q.streaming) {
221                 dprintk("Streaming needs to be on for both queues\n");
222                 return;
223         }
224
225         spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
226
227         /* If the context is aborted then don't schedule it */
228         if (m2m_ctx->job_flags & TRANS_ABORT) {
229                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
230                 dprintk("Aborted context\n");
231                 return;
232         }
233
234         if (m2m_ctx->job_flags & TRANS_QUEUED) {
235                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
236                 dprintk("On job queue already\n");
237                 return;
238         }
239
240         spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
241         if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
242             && !m2m_ctx->out_q_ctx.buffered) {
243                 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
244                                         flags_out);
245                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
246                 dprintk("No input buffers available\n");
247                 return;
248         }
249         spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
250         if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
251             && !m2m_ctx->cap_q_ctx.buffered) {
252                 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
253                                         flags_cap);
254                 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
255                                         flags_out);
256                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
257                 dprintk("No output buffers available\n");
258                 return;
259         }
260         spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
261         spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
262
263         if (m2m_dev->m2m_ops->job_ready
264                 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
265                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
266                 dprintk("Driver not ready\n");
267                 return;
268         }
269
270         list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
271         m2m_ctx->job_flags |= TRANS_QUEUED;
272
273         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
274
275         v4l2_m2m_try_run(m2m_dev);
276 }
277 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
278
279 /**
280  * v4l2_m2m_cancel_job() - cancel pending jobs for the context
281  *
282  * In case of streamoff or release called on any context,
283  * 1] If the context is currently running, then abort job will be called
284  * 2] If the context is queued, then the context will be removed from
285  *    the job_queue
286  */
287 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
288 {
289         struct v4l2_m2m_dev *m2m_dev;
290         unsigned long flags;
291
292         m2m_dev = m2m_ctx->m2m_dev;
293         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
294
295         m2m_ctx->job_flags |= TRANS_ABORT;
296         if (m2m_ctx->job_flags & TRANS_RUNNING) {
297                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
298                 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
299                 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
300                 wait_event(m2m_ctx->finished,
301                                 !(m2m_ctx->job_flags & TRANS_RUNNING));
302         } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
303                 list_del(&m2m_ctx->queue);
304                 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
305                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
306                 dprintk("m2m_ctx: %p had been on queue and was removed\n",
307                         m2m_ctx);
308         } else {
309                 /* Do nothing, was not on queue/running */
310                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
311         }
312 }
313
314 /**
315  * v4l2_m2m_job_finish() - inform the framework that a job has been finished
316  * and have it clean up
317  *
318  * Called by a driver to yield back the device after it has finished with it.
319  * Should be called as soon as possible after reaching a state which allows
320  * other instances to take control of the device.
321  *
322  * This function has to be called only after device_run() callback has been
323  * called on the driver. To prevent recursion, it should not be called directly
324  * from the device_run() callback though.
325  */
326 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
327                          struct v4l2_m2m_ctx *m2m_ctx)
328 {
329         unsigned long flags;
330
331         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
332         if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
333                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
334                 dprintk("Called by an instance not currently running\n");
335                 return;
336         }
337
338         list_del(&m2m_dev->curr_ctx->queue);
339         m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
340         wake_up(&m2m_dev->curr_ctx->finished);
341         m2m_dev->curr_ctx = NULL;
342
343         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
344
345         /* This instance might have more buffers ready, but since we do not
346          * allow more than one job on the job_queue per instance, each has
347          * to be scheduled separately after the previous one finishes. */
348         v4l2_m2m_try_schedule(m2m_ctx);
349         v4l2_m2m_try_run(m2m_dev);
350 }
351 EXPORT_SYMBOL(v4l2_m2m_job_finish);
352
353 /**
354  * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
355  */
356 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
357                      struct v4l2_requestbuffers *reqbufs)
358 {
359         struct vb2_queue *vq;
360
361         vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
362         return vb2_reqbufs(vq, reqbufs);
363 }
364 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
365
366 /**
367  * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
368  *
369  * See v4l2_m2m_mmap() documentation for details.
370  */
371 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
372                       struct v4l2_buffer *buf)
373 {
374         struct vb2_queue *vq;
375         int ret = 0;
376         unsigned int i;
377
378         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
379         ret = vb2_querybuf(vq, buf);
380
381         /* Adjust MMAP memory offsets for the CAPTURE queue */
382         if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
383                 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
384                         for (i = 0; i < buf->length; ++i)
385                                 buf->m.planes[i].m.mem_offset
386                                         += DST_QUEUE_OFF_BASE;
387                 } else {
388                         buf->m.offset += DST_QUEUE_OFF_BASE;
389                 }
390         }
391
392         return ret;
393 }
394 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
395
396 /**
397  * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
398  * the type
399  */
400 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
401                   struct v4l2_buffer *buf)
402 {
403         struct vb2_queue *vq;
404         int ret;
405
406         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
407         ret = vb2_qbuf(vq, buf);
408         if (!ret)
409                 v4l2_m2m_try_schedule(m2m_ctx);
410
411         return ret;
412 }
413 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
414
415 /**
416  * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
417  * the type
418  */
419 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
420                    struct v4l2_buffer *buf)
421 {
422         struct vb2_queue *vq;
423
424         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
425         return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
426 }
427 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
428
429 /**
430  * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
431  * the type
432  */
433 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
434                          struct v4l2_buffer *buf)
435 {
436         struct vb2_queue *vq;
437         int ret;
438
439         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
440         ret = vb2_prepare_buf(vq, buf);
441         if (!ret)
442                 v4l2_m2m_try_schedule(m2m_ctx);
443
444         return ret;
445 }
446 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
447
448 /**
449  * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
450  * on the type
451  */
452 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
453                          struct v4l2_create_buffers *create)
454 {
455         struct vb2_queue *vq;
456
457         vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
458         return vb2_create_bufs(vq, create);
459 }
460 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
461
462 /**
463  * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
464  * the type
465  */
466 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
467                   struct v4l2_exportbuffer *eb)
468 {
469         struct vb2_queue *vq;
470
471         vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
472         return vb2_expbuf(vq, eb);
473 }
474 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
475 /**
476  * v4l2_m2m_streamon() - turn on streaming for a video queue
477  */
478 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
479                       enum v4l2_buf_type type)
480 {
481         struct vb2_queue *vq;
482         int ret;
483
484         vq = v4l2_m2m_get_vq(m2m_ctx, type);
485         ret = vb2_streamon(vq, type);
486         if (!ret)
487                 v4l2_m2m_try_schedule(m2m_ctx);
488
489         return ret;
490 }
491 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
492
493 /**
494  * v4l2_m2m_streamoff() - turn off streaming for a video queue
495  */
496 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
497                        enum v4l2_buf_type type)
498 {
499         struct v4l2_m2m_dev *m2m_dev;
500         struct v4l2_m2m_queue_ctx *q_ctx;
501         unsigned long flags_job, flags;
502         int ret;
503
504         /* wait until the current context is dequeued from job_queue */
505         v4l2_m2m_cancel_job(m2m_ctx);
506
507         q_ctx = get_queue_ctx(m2m_ctx, type);
508         ret = vb2_streamoff(&q_ctx->q, type);
509         if (ret)
510                 return ret;
511
512         m2m_dev = m2m_ctx->m2m_dev;
513         spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
514         /* We should not be scheduled anymore, since we're dropping a queue. */
515         if (m2m_ctx->job_flags & TRANS_QUEUED)
516                 list_del(&m2m_ctx->queue);
517         m2m_ctx->job_flags = 0;
518
519         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
520         /* Drop queue, since streamoff returns device to the same state as after
521          * calling reqbufs. */
522         INIT_LIST_HEAD(&q_ctx->rdy_queue);
523         q_ctx->num_rdy = 0;
524         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
525
526         if (m2m_dev->curr_ctx == m2m_ctx) {
527                 m2m_dev->curr_ctx = NULL;
528                 wake_up(&m2m_ctx->finished);
529         }
530         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
531
532         return 0;
533 }
534 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
535
536 /**
537  * v4l2_m2m_poll() - poll replacement, for destination buffers only
538  *
539  * Call from the driver's poll() function. Will poll both queues. If a buffer
540  * is available to dequeue (with dqbuf) from the source queue, this will
541  * indicate that a non-blocking write can be performed, while read will be
542  * returned in case of the destination queue.
543  */
544 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
545                            struct poll_table_struct *wait)
546 {
547         struct video_device *vfd = video_devdata(file);
548         unsigned long req_events = poll_requested_events(wait);
549         struct vb2_queue *src_q, *dst_q;
550         struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
551         unsigned int rc = 0;
552         unsigned long flags;
553
554         if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
555                 struct v4l2_fh *fh = file->private_data;
556
557                 if (v4l2_event_pending(fh))
558                         rc = POLLPRI;
559                 else if (req_events & POLLPRI)
560                         poll_wait(file, &fh->wait, wait);
561                 if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
562                         return rc;
563         }
564
565         src_q = v4l2_m2m_get_src_vq(m2m_ctx);
566         dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
567
568         /*
569          * There has to be at least one buffer queued on each queued_list, which
570          * means either in driver already or waiting for driver to claim it
571          * and start processing.
572          */
573         if ((!src_q->streaming || list_empty(&src_q->queued_list))
574                 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
575                 rc |= POLLERR;
576                 goto end;
577         }
578
579         if (m2m_ctx->m2m_dev->m2m_ops->unlock)
580                 m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
581         else if (m2m_ctx->q_lock)
582                 mutex_unlock(m2m_ctx->q_lock);
583
584         if (list_empty(&src_q->done_list))
585                 poll_wait(file, &src_q->done_wq, wait);
586         if (list_empty(&dst_q->done_list)) {
587                 /*
588                  * If the last buffer was dequeued from the capture queue,
589                  * return immediately. DQBUF will return -EPIPE.
590                  */
591                 if (dst_q->last_buffer_dequeued)
592                         return rc | POLLIN | POLLRDNORM;
593
594                 poll_wait(file, &dst_q->done_wq, wait);
595         }
596
597         if (m2m_ctx->m2m_dev->m2m_ops->lock)
598                 m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
599         else if (m2m_ctx->q_lock) {
600                 if (mutex_lock_interruptible(m2m_ctx->q_lock)) {
601                         rc |= POLLERR;
602                         goto end;
603                 }
604         }
605
606         spin_lock_irqsave(&src_q->done_lock, flags);
607         if (!list_empty(&src_q->done_list))
608                 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
609                                                 done_entry);
610         if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
611                         || src_vb->state == VB2_BUF_STATE_ERROR))
612                 rc |= POLLOUT | POLLWRNORM;
613         spin_unlock_irqrestore(&src_q->done_lock, flags);
614
615         spin_lock_irqsave(&dst_q->done_lock, flags);
616         if (!list_empty(&dst_q->done_list))
617                 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
618                                                 done_entry);
619         if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
620                         || dst_vb->state == VB2_BUF_STATE_ERROR))
621                 rc |= POLLIN | POLLRDNORM;
622         spin_unlock_irqrestore(&dst_q->done_lock, flags);
623
624 end:
625         return rc;
626 }
627 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
628
629 /**
630  * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
631  *
632  * Call from driver's mmap() function. Will handle mmap() for both queues
633  * seamlessly for videobuffer, which will receive normal per-queue offsets and
634  * proper videobuf queue pointers. The differentiation is made outside videobuf
635  * by adding a predefined offset to buffers from one of the queues and
636  * subtracting it before passing it back to videobuf. Only drivers (and
637  * thus applications) receive modified offsets.
638  */
639 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
640                          struct vm_area_struct *vma)
641 {
642         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
643         struct vb2_queue *vq;
644
645         if (offset < DST_QUEUE_OFF_BASE) {
646                 vq = v4l2_m2m_get_src_vq(m2m_ctx);
647         } else {
648                 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
649                 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
650         }
651
652         return vb2_mmap(vq, vma);
653 }
654 EXPORT_SYMBOL(v4l2_m2m_mmap);
655
656 /**
657  * v4l2_m2m_init() - initialize per-driver m2m data
658  *
659  * Usually called from driver's probe() function.
660  */
661 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
662 {
663         struct v4l2_m2m_dev *m2m_dev;
664
665         if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
666                         WARN_ON(!m2m_ops->job_abort))
667                 return ERR_PTR(-EINVAL);
668
669         m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
670         if (!m2m_dev)
671                 return ERR_PTR(-ENOMEM);
672
673         m2m_dev->curr_ctx = NULL;
674         m2m_dev->m2m_ops = m2m_ops;
675         INIT_LIST_HEAD(&m2m_dev->job_queue);
676         spin_lock_init(&m2m_dev->job_spinlock);
677
678         return m2m_dev;
679 }
680 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
681
682 /**
683  * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
684  *
685  * Usually called from driver's remove() function.
686  */
687 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
688 {
689         kfree(m2m_dev);
690 }
691 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
692
693 /**
694  * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
695  * @priv - driver's instance private data
696  * @m2m_dev - a previously initialized m2m_dev struct
697  * @vq_init - a callback for queue type-specific initialization function to be
698  * used for initializing videobuf_queues
699  *
700  * Usually called from driver's open() function.
701  */
702 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
703                 void *drv_priv,
704                 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
705 {
706         struct v4l2_m2m_ctx *m2m_ctx;
707         struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
708         int ret;
709
710         m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
711         if (!m2m_ctx)
712                 return ERR_PTR(-ENOMEM);
713
714         m2m_ctx->priv = drv_priv;
715         m2m_ctx->m2m_dev = m2m_dev;
716         init_waitqueue_head(&m2m_ctx->finished);
717
718         out_q_ctx = &m2m_ctx->out_q_ctx;
719         cap_q_ctx = &m2m_ctx->cap_q_ctx;
720
721         INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
722         INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
723         spin_lock_init(&out_q_ctx->rdy_spinlock);
724         spin_lock_init(&cap_q_ctx->rdy_spinlock);
725
726         INIT_LIST_HEAD(&m2m_ctx->queue);
727
728         ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
729
730         if (ret)
731                 goto err;
732         /*
733          * If both queues use same mutex assign it as the common buffer
734          * queues lock to the m2m context. This lock is used in the
735          * v4l2_m2m_ioctl_* helpers.
736          */
737         if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
738                 m2m_ctx->q_lock = out_q_ctx->q.lock;
739
740         return m2m_ctx;
741 err:
742         kfree(m2m_ctx);
743         return ERR_PTR(ret);
744 }
745 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
746
747 /**
748  * v4l2_m2m_ctx_release() - release m2m context
749  *
750  * Usually called from driver's release() function.
751  */
752 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
753 {
754         /* wait until the current context is dequeued from job_queue */
755         v4l2_m2m_cancel_job(m2m_ctx);
756
757         vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
758         vb2_queue_release(&m2m_ctx->out_q_ctx.q);
759
760         kfree(m2m_ctx);
761 }
762 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
763
764 /**
765  * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
766  *
767  * Call from buf_queue(), videobuf_queue_ops callback.
768  */
769 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
770 {
771         struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
772         struct v4l2_m2m_queue_ctx *q_ctx;
773         unsigned long flags;
774
775         q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
776         if (!q_ctx)
777                 return;
778
779         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
780         list_add_tail(&b->list, &q_ctx->rdy_queue);
781         q_ctx->num_rdy++;
782         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
783 }
784 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
785
786 /* Videobuf2 ioctl helpers */
787
788 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
789                                 struct v4l2_requestbuffers *rb)
790 {
791         struct v4l2_fh *fh = file->private_data;
792
793         return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
794 }
795 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
796
797 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
798                                 struct v4l2_create_buffers *create)
799 {
800         struct v4l2_fh *fh = file->private_data;
801
802         return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
803 }
804 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
805
806 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
807                                 struct v4l2_buffer *buf)
808 {
809         struct v4l2_fh *fh = file->private_data;
810
811         return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
812 }
813 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
814
815 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
816                                 struct v4l2_buffer *buf)
817 {
818         struct v4l2_fh *fh = file->private_data;
819
820         return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
821 }
822 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
823
824 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
825                                 struct v4l2_buffer *buf)
826 {
827         struct v4l2_fh *fh = file->private_data;
828
829         return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
830 }
831 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
832
833 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
834                                struct v4l2_buffer *buf)
835 {
836         struct v4l2_fh *fh = file->private_data;
837
838         return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
839 }
840 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
841
842 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
843                                 struct v4l2_exportbuffer *eb)
844 {
845         struct v4l2_fh *fh = file->private_data;
846
847         return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
848 }
849 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
850
851 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
852                                 enum v4l2_buf_type type)
853 {
854         struct v4l2_fh *fh = file->private_data;
855
856         return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
857 }
858 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
859
860 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
861                                 enum v4l2_buf_type type)
862 {
863         struct v4l2_fh *fh = file->private_data;
864
865         return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
866 }
867 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
868
869 /*
870  * v4l2_file_operations helpers. It is assumed here same lock is used
871  * for the output and the capture buffer queue.
872  */
873
874 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
875 {
876         struct v4l2_fh *fh = file->private_data;
877         struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
878         int ret;
879
880         if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock))
881                 return -ERESTARTSYS;
882
883         ret = v4l2_m2m_mmap(file, m2m_ctx, vma);
884
885         if (m2m_ctx->q_lock)
886                 mutex_unlock(m2m_ctx->q_lock);
887
888         return ret;
889 }
890 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
891
892 unsigned int v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
893 {
894         struct v4l2_fh *fh = file->private_data;
895         struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
896         unsigned int ret;
897
898         if (m2m_ctx->q_lock)
899                 mutex_lock(m2m_ctx->q_lock);
900
901         ret = v4l2_m2m_poll(file, m2m_ctx, wait);
902
903         if (m2m_ctx->q_lock)
904                 mutex_unlock(m2m_ctx->q_lock);
905
906         return ret;
907 }
908 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
909