Merge tag 'devicetree-for-4.21' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / media / v4l2-core / v4l2-mem2mem.c
1 /*
2  * Memory-to-memory device framework for Video for Linux 2 and videobuf.
3  *
4  * Helper functions for devices that use videobuf buffers for both their
5  * source and destination.
6  *
7  * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
8  * Pawel Osciak, <pawel@osciak.com>
9  * Marek Szyprowski, <m.szyprowski@samsung.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by the
13  * Free Software Foundation; either version 2 of the License, or (at your
14  * option) any later version.
15  */
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19
20 #include <media/media-device.h>
21 #include <media/videobuf2-v4l2.h>
22 #include <media/v4l2-mem2mem.h>
23 #include <media/v4l2-dev.h>
24 #include <media/v4l2-device.h>
25 #include <media/v4l2-fh.h>
26 #include <media/v4l2-event.h>
27
28 MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
29 MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
30 MODULE_LICENSE("GPL");
31
32 static bool debug;
33 module_param(debug, bool, 0644);
34
35 #define dprintk(fmt, arg...)                                            \
36         do {                                                            \
37                 if (debug)                                              \
38                         printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
39         } while (0)
40
41
42 /* Instance is already queued on the job_queue */
43 #define TRANS_QUEUED            (1 << 0)
44 /* Instance is currently running in hardware */
45 #define TRANS_RUNNING           (1 << 1)
46 /* Instance is currently aborting */
47 #define TRANS_ABORT             (1 << 2)
48
49
50 /* Offset base for buffers on the destination queue - used to distinguish
51  * between source and destination buffers when mmapping - they receive the same
52  * offsets but for different queues */
53 #define DST_QUEUE_OFF_BASE      (1 << 30)
54
55 enum v4l2_m2m_entity_type {
56         MEM2MEM_ENT_TYPE_SOURCE,
57         MEM2MEM_ENT_TYPE_SINK,
58         MEM2MEM_ENT_TYPE_PROC
59 };
60
61 static const char * const m2m_entity_name[] = {
62         "source",
63         "sink",
64         "proc"
65 };
66
67 /**
68  * struct v4l2_m2m_dev - per-device context
69  * @source:             &struct media_entity pointer with the source entity
70  *                      Used only when the M2M device is registered via
71  *                      v4l2_m2m_unregister_media_controller().
72  * @source_pad:         &struct media_pad with the source pad.
73  *                      Used only when the M2M device is registered via
74  *                      v4l2_m2m_unregister_media_controller().
75  * @sink:               &struct media_entity pointer with the sink entity
76  *                      Used only when the M2M device is registered via
77  *                      v4l2_m2m_unregister_media_controller().
78  * @sink_pad:           &struct media_pad with the sink pad.
79  *                      Used only when the M2M device is registered via
80  *                      v4l2_m2m_unregister_media_controller().
81  * @proc:               &struct media_entity pointer with the M2M device itself.
82  * @proc_pads:          &struct media_pad with the @proc pads.
83  *                      Used only when the M2M device is registered via
84  *                      v4l2_m2m_unregister_media_controller().
85  * @intf_devnode:       &struct media_intf devnode pointer with the interface
86  *                      with controls the M2M device.
87  * @curr_ctx:           currently running instance
88  * @job_queue:          instances queued to run
89  * @job_spinlock:       protects job_queue
90  * @job_work:           worker to run queued jobs.
91  * @m2m_ops:            driver callbacks
92  */
93 struct v4l2_m2m_dev {
94         struct v4l2_m2m_ctx     *curr_ctx;
95 #ifdef CONFIG_MEDIA_CONTROLLER
96         struct media_entity     *source;
97         struct media_pad        source_pad;
98         struct media_entity     sink;
99         struct media_pad        sink_pad;
100         struct media_entity     proc;
101         struct media_pad        proc_pads[2];
102         struct media_intf_devnode *intf_devnode;
103 #endif
104
105         struct list_head        job_queue;
106         spinlock_t              job_spinlock;
107         struct work_struct      job_work;
108
109         const struct v4l2_m2m_ops *m2m_ops;
110 };
111
112 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
113                                                 enum v4l2_buf_type type)
114 {
115         if (V4L2_TYPE_IS_OUTPUT(type))
116                 return &m2m_ctx->out_q_ctx;
117         else
118                 return &m2m_ctx->cap_q_ctx;
119 }
120
121 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
122                                        enum v4l2_buf_type type)
123 {
124         struct v4l2_m2m_queue_ctx *q_ctx;
125
126         q_ctx = get_queue_ctx(m2m_ctx, type);
127         if (!q_ctx)
128                 return NULL;
129
130         return &q_ctx->q;
131 }
132 EXPORT_SYMBOL(v4l2_m2m_get_vq);
133
134 void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
135 {
136         struct v4l2_m2m_buffer *b;
137         unsigned long flags;
138
139         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
140
141         if (list_empty(&q_ctx->rdy_queue)) {
142                 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
143                 return NULL;
144         }
145
146         b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
147         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
148         return &b->vb;
149 }
150 EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
151
152 void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
153 {
154         struct v4l2_m2m_buffer *b;
155         unsigned long flags;
156
157         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
158
159         if (list_empty(&q_ctx->rdy_queue)) {
160                 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
161                 return NULL;
162         }
163
164         b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
165         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
166         return &b->vb;
167 }
168 EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
169
170 void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
171 {
172         struct v4l2_m2m_buffer *b;
173         unsigned long flags;
174
175         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
176         if (list_empty(&q_ctx->rdy_queue)) {
177                 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
178                 return NULL;
179         }
180         b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
181         list_del(&b->list);
182         q_ctx->num_rdy--;
183         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
184
185         return &b->vb;
186 }
187 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
188
189 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
190                                 struct vb2_v4l2_buffer *vbuf)
191 {
192         struct v4l2_m2m_buffer *b;
193         unsigned long flags;
194
195         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
196         b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
197         list_del(&b->list);
198         q_ctx->num_rdy--;
199         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
200 }
201 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
202
203 struct vb2_v4l2_buffer *
204 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
205
206 {
207         struct v4l2_m2m_buffer *b, *tmp;
208         struct vb2_v4l2_buffer *ret = NULL;
209         unsigned long flags;
210
211         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
212         list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
213                 if (b->vb.vb2_buf.index == idx) {
214                         list_del(&b->list);
215                         q_ctx->num_rdy--;
216                         ret = &b->vb;
217                         break;
218                 }
219         }
220         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
221
222         return ret;
223 }
224 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
225
226 /*
227  * Scheduling handlers
228  */
229
230 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
231 {
232         unsigned long flags;
233         void *ret = NULL;
234
235         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
236         if (m2m_dev->curr_ctx)
237                 ret = m2m_dev->curr_ctx->priv;
238         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
239
240         return ret;
241 }
242 EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
243
244 /**
245  * v4l2_m2m_try_run() - select next job to perform and run it if possible
246  * @m2m_dev: per-device context
247  *
248  * Get next transaction (if present) from the waiting jobs list and run it.
249  *
250  * Note that this function can run on a given v4l2_m2m_ctx context,
251  * but call .device_run for another context.
252  */
253 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
254 {
255         unsigned long flags;
256
257         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
258         if (NULL != m2m_dev->curr_ctx) {
259                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
260                 dprintk("Another instance is running, won't run now\n");
261                 return;
262         }
263
264         if (list_empty(&m2m_dev->job_queue)) {
265                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
266                 dprintk("No job pending\n");
267                 return;
268         }
269
270         m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
271                                    struct v4l2_m2m_ctx, queue);
272         m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
273         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
274
275         dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
276         m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
277 }
278
279 /*
280  * __v4l2_m2m_try_queue() - queue a job
281  * @m2m_dev: m2m device
282  * @m2m_ctx: m2m context
283  *
284  * Check if this context is ready to queue a job.
285  *
286  * This function can run in interrupt context.
287  */
288 static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
289                                  struct v4l2_m2m_ctx *m2m_ctx)
290 {
291         unsigned long flags_job, flags_out, flags_cap;
292
293         dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
294
295         if (!m2m_ctx->out_q_ctx.q.streaming
296             || !m2m_ctx->cap_q_ctx.q.streaming) {
297                 dprintk("Streaming needs to be on for both queues\n");
298                 return;
299         }
300
301         spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
302
303         /* If the context is aborted then don't schedule it */
304         if (m2m_ctx->job_flags & TRANS_ABORT) {
305                 dprintk("Aborted context\n");
306                 goto job_unlock;
307         }
308
309         if (m2m_ctx->job_flags & TRANS_QUEUED) {
310                 dprintk("On job queue already\n");
311                 goto job_unlock;
312         }
313
314         spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
315         if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
316             && !m2m_ctx->out_q_ctx.buffered) {
317                 dprintk("No input buffers available\n");
318                 goto out_unlock;
319         }
320         spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
321         if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
322             && !m2m_ctx->cap_q_ctx.buffered) {
323                 dprintk("No output buffers available\n");
324                 goto cap_unlock;
325         }
326         spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
327         spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
328
329         if (m2m_dev->m2m_ops->job_ready
330                 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
331                 dprintk("Driver not ready\n");
332                 goto job_unlock;
333         }
334
335         list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
336         m2m_ctx->job_flags |= TRANS_QUEUED;
337
338         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
339         return;
340
341 cap_unlock:
342         spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
343 out_unlock:
344         spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
345 job_unlock:
346         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
347 }
348
349 /**
350  * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
351  * @m2m_ctx: m2m context
352  *
353  * Check if this context is ready to queue a job. If suitable,
354  * run the next queued job on the mem2mem device.
355  *
356  * This function shouldn't run in interrupt context.
357  *
358  * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
359  * and then run another job for another context.
360  */
361 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
362 {
363         struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
364
365         __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
366         v4l2_m2m_try_run(m2m_dev);
367 }
368 EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
369
370 /**
371  * v4l2_m2m_device_run_work() - run pending jobs for the context
372  * @work: Work structure used for scheduling the execution of this function.
373  */
374 static void v4l2_m2m_device_run_work(struct work_struct *work)
375 {
376         struct v4l2_m2m_dev *m2m_dev =
377                 container_of(work, struct v4l2_m2m_dev, job_work);
378
379         v4l2_m2m_try_run(m2m_dev);
380 }
381
382 /**
383  * v4l2_m2m_cancel_job() - cancel pending jobs for the context
384  * @m2m_ctx: m2m context with jobs to be canceled
385  *
386  * In case of streamoff or release called on any context,
387  * 1] If the context is currently running, then abort job will be called
388  * 2] If the context is queued, then the context will be removed from
389  *    the job_queue
390  */
391 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
392 {
393         struct v4l2_m2m_dev *m2m_dev;
394         unsigned long flags;
395
396         m2m_dev = m2m_ctx->m2m_dev;
397         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
398
399         m2m_ctx->job_flags |= TRANS_ABORT;
400         if (m2m_ctx->job_flags & TRANS_RUNNING) {
401                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
402                 if (m2m_dev->m2m_ops->job_abort)
403                         m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
404                 dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx);
405                 wait_event(m2m_ctx->finished,
406                                 !(m2m_ctx->job_flags & TRANS_RUNNING));
407         } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
408                 list_del(&m2m_ctx->queue);
409                 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
410                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
411                 dprintk("m2m_ctx: %p had been on queue and was removed\n",
412                         m2m_ctx);
413         } else {
414                 /* Do nothing, was not on queue/running */
415                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
416         }
417 }
418
419 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
420                          struct v4l2_m2m_ctx *m2m_ctx)
421 {
422         unsigned long flags;
423
424         spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
425         if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
426                 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
427                 dprintk("Called by an instance not currently running\n");
428                 return;
429         }
430
431         list_del(&m2m_dev->curr_ctx->queue);
432         m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
433         wake_up(&m2m_dev->curr_ctx->finished);
434         m2m_dev->curr_ctx = NULL;
435
436         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
437
438         /* This instance might have more buffers ready, but since we do not
439          * allow more than one job on the job_queue per instance, each has
440          * to be scheduled separately after the previous one finishes. */
441         __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
442
443         /* We might be running in atomic context,
444          * but the job must be run in non-atomic context.
445          */
446         schedule_work(&m2m_dev->job_work);
447 }
448 EXPORT_SYMBOL(v4l2_m2m_job_finish);
449
450 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
451                      struct v4l2_requestbuffers *reqbufs)
452 {
453         struct vb2_queue *vq;
454         int ret;
455
456         vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
457         ret = vb2_reqbufs(vq, reqbufs);
458         /* If count == 0, then the owner has released all buffers and he
459            is no longer owner of the queue. Otherwise we have an owner. */
460         if (ret == 0)
461                 vq->owner = reqbufs->count ? file->private_data : NULL;
462
463         return ret;
464 }
465 EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
466
467 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
468                       struct v4l2_buffer *buf)
469 {
470         struct vb2_queue *vq;
471         int ret = 0;
472         unsigned int i;
473
474         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
475         ret = vb2_querybuf(vq, buf);
476
477         /* Adjust MMAP memory offsets for the CAPTURE queue */
478         if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
479                 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
480                         for (i = 0; i < buf->length; ++i)
481                                 buf->m.planes[i].m.mem_offset
482                                         += DST_QUEUE_OFF_BASE;
483                 } else {
484                         buf->m.offset += DST_QUEUE_OFF_BASE;
485                 }
486         }
487
488         return ret;
489 }
490 EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
491
492 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
493                   struct v4l2_buffer *buf)
494 {
495         struct video_device *vdev = video_devdata(file);
496         struct vb2_queue *vq;
497         int ret;
498
499         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
500         if (!V4L2_TYPE_IS_OUTPUT(vq->type) &&
501             (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
502                 dprintk("%s: requests cannot be used with capture buffers\n",
503                         __func__);
504                 return -EPERM;
505         }
506         ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf);
507         if (!ret && !(buf->flags & V4L2_BUF_FLAG_IN_REQUEST))
508                 v4l2_m2m_try_schedule(m2m_ctx);
509
510         return ret;
511 }
512 EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
513
514 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
515                    struct v4l2_buffer *buf)
516 {
517         struct vb2_queue *vq;
518
519         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
520         return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
521 }
522 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
523
524 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
525                          struct v4l2_buffer *buf)
526 {
527         struct video_device *vdev = video_devdata(file);
528         struct vb2_queue *vq;
529
530         vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
531         return vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf);
532 }
533 EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
534
535 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
536                          struct v4l2_create_buffers *create)
537 {
538         struct vb2_queue *vq;
539
540         vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
541         return vb2_create_bufs(vq, create);
542 }
543 EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
544
545 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
546                   struct v4l2_exportbuffer *eb)
547 {
548         struct vb2_queue *vq;
549
550         vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
551         return vb2_expbuf(vq, eb);
552 }
553 EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
554
555 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
556                       enum v4l2_buf_type type)
557 {
558         struct vb2_queue *vq;
559         int ret;
560
561         vq = v4l2_m2m_get_vq(m2m_ctx, type);
562         ret = vb2_streamon(vq, type);
563         if (!ret)
564                 v4l2_m2m_try_schedule(m2m_ctx);
565
566         return ret;
567 }
568 EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
569
570 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
571                        enum v4l2_buf_type type)
572 {
573         struct v4l2_m2m_dev *m2m_dev;
574         struct v4l2_m2m_queue_ctx *q_ctx;
575         unsigned long flags_job, flags;
576         int ret;
577
578         /* wait until the current context is dequeued from job_queue */
579         v4l2_m2m_cancel_job(m2m_ctx);
580
581         q_ctx = get_queue_ctx(m2m_ctx, type);
582         ret = vb2_streamoff(&q_ctx->q, type);
583         if (ret)
584                 return ret;
585
586         m2m_dev = m2m_ctx->m2m_dev;
587         spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
588         /* We should not be scheduled anymore, since we're dropping a queue. */
589         if (m2m_ctx->job_flags & TRANS_QUEUED)
590                 list_del(&m2m_ctx->queue);
591         m2m_ctx->job_flags = 0;
592
593         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
594         /* Drop queue, since streamoff returns device to the same state as after
595          * calling reqbufs. */
596         INIT_LIST_HEAD(&q_ctx->rdy_queue);
597         q_ctx->num_rdy = 0;
598         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
599
600         if (m2m_dev->curr_ctx == m2m_ctx) {
601                 m2m_dev->curr_ctx = NULL;
602                 wake_up(&m2m_ctx->finished);
603         }
604         spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
605
606         return 0;
607 }
608 EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
609
610 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
611                            struct poll_table_struct *wait)
612 {
613         struct video_device *vfd = video_devdata(file);
614         __poll_t req_events = poll_requested_events(wait);
615         struct vb2_queue *src_q, *dst_q;
616         struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
617         __poll_t rc = 0;
618         unsigned long flags;
619
620         if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
621                 struct v4l2_fh *fh = file->private_data;
622
623                 if (v4l2_event_pending(fh))
624                         rc = EPOLLPRI;
625                 else if (req_events & EPOLLPRI)
626                         poll_wait(file, &fh->wait, wait);
627                 if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)))
628                         return rc;
629         }
630
631         src_q = v4l2_m2m_get_src_vq(m2m_ctx);
632         dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
633
634         /*
635          * There has to be at least one buffer queued on each queued_list, which
636          * means either in driver already or waiting for driver to claim it
637          * and start processing.
638          */
639         if ((!src_q->streaming || list_empty(&src_q->queued_list))
640                 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
641                 rc |= EPOLLERR;
642                 goto end;
643         }
644
645         spin_lock_irqsave(&src_q->done_lock, flags);
646         if (list_empty(&src_q->done_list))
647                 poll_wait(file, &src_q->done_wq, wait);
648         spin_unlock_irqrestore(&src_q->done_lock, flags);
649
650         spin_lock_irqsave(&dst_q->done_lock, flags);
651         if (list_empty(&dst_q->done_list)) {
652                 /*
653                  * If the last buffer was dequeued from the capture queue,
654                  * return immediately. DQBUF will return -EPIPE.
655                  */
656                 if (dst_q->last_buffer_dequeued) {
657                         spin_unlock_irqrestore(&dst_q->done_lock, flags);
658                         return rc | EPOLLIN | EPOLLRDNORM;
659                 }
660
661                 poll_wait(file, &dst_q->done_wq, wait);
662         }
663         spin_unlock_irqrestore(&dst_q->done_lock, flags);
664
665         spin_lock_irqsave(&src_q->done_lock, flags);
666         if (!list_empty(&src_q->done_list))
667                 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
668                                                 done_entry);
669         if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
670                         || src_vb->state == VB2_BUF_STATE_ERROR))
671                 rc |= EPOLLOUT | EPOLLWRNORM;
672         spin_unlock_irqrestore(&src_q->done_lock, flags);
673
674         spin_lock_irqsave(&dst_q->done_lock, flags);
675         if (!list_empty(&dst_q->done_list))
676                 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
677                                                 done_entry);
678         if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
679                         || dst_vb->state == VB2_BUF_STATE_ERROR))
680                 rc |= EPOLLIN | EPOLLRDNORM;
681         spin_unlock_irqrestore(&dst_q->done_lock, flags);
682
683 end:
684         return rc;
685 }
686 EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
687
688 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
689                          struct vm_area_struct *vma)
690 {
691         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
692         struct vb2_queue *vq;
693
694         if (offset < DST_QUEUE_OFF_BASE) {
695                 vq = v4l2_m2m_get_src_vq(m2m_ctx);
696         } else {
697                 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
698                 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
699         }
700
701         return vb2_mmap(vq, vma);
702 }
703 EXPORT_SYMBOL(v4l2_m2m_mmap);
704
705 #if defined(CONFIG_MEDIA_CONTROLLER)
706 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
707 {
708         media_remove_intf_links(&m2m_dev->intf_devnode->intf);
709         media_devnode_remove(m2m_dev->intf_devnode);
710
711         media_entity_remove_links(m2m_dev->source);
712         media_entity_remove_links(&m2m_dev->sink);
713         media_entity_remove_links(&m2m_dev->proc);
714         media_device_unregister_entity(m2m_dev->source);
715         media_device_unregister_entity(&m2m_dev->sink);
716         media_device_unregister_entity(&m2m_dev->proc);
717         kfree(m2m_dev->source->name);
718         kfree(m2m_dev->sink.name);
719         kfree(m2m_dev->proc.name);
720 }
721 EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller);
722
723 static int v4l2_m2m_register_entity(struct media_device *mdev,
724         struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
725         struct video_device *vdev, int function)
726 {
727         struct media_entity *entity;
728         struct media_pad *pads;
729         char *name;
730         unsigned int len;
731         int num_pads;
732         int ret;
733
734         switch (type) {
735         case MEM2MEM_ENT_TYPE_SOURCE:
736                 entity = m2m_dev->source;
737                 pads = &m2m_dev->source_pad;
738                 pads[0].flags = MEDIA_PAD_FL_SOURCE;
739                 num_pads = 1;
740                 break;
741         case MEM2MEM_ENT_TYPE_SINK:
742                 entity = &m2m_dev->sink;
743                 pads = &m2m_dev->sink_pad;
744                 pads[0].flags = MEDIA_PAD_FL_SINK;
745                 num_pads = 1;
746                 break;
747         case MEM2MEM_ENT_TYPE_PROC:
748                 entity = &m2m_dev->proc;
749                 pads = m2m_dev->proc_pads;
750                 pads[0].flags = MEDIA_PAD_FL_SINK;
751                 pads[1].flags = MEDIA_PAD_FL_SOURCE;
752                 num_pads = 2;
753                 break;
754         default:
755                 return -EINVAL;
756         }
757
758         entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
759         if (type != MEM2MEM_ENT_TYPE_PROC) {
760                 entity->info.dev.major = VIDEO_MAJOR;
761                 entity->info.dev.minor = vdev->minor;
762         }
763         len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]);
764         name = kmalloc(len, GFP_KERNEL);
765         if (!name)
766                 return -ENOMEM;
767         snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]);
768         entity->name = name;
769         entity->function = function;
770
771         ret = media_entity_pads_init(entity, num_pads, pads);
772         if (ret)
773                 return ret;
774         ret = media_device_register_entity(mdev, entity);
775         if (ret)
776                 return ret;
777
778         return 0;
779 }
780
781 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
782                 struct video_device *vdev, int function)
783 {
784         struct media_device *mdev = vdev->v4l2_dev->mdev;
785         struct media_link *link;
786         int ret;
787
788         if (!mdev)
789                 return 0;
790
791         /* A memory-to-memory device consists in two
792          * DMA engine and one video processing entities.
793          * The DMA engine entities are linked to a V4L interface
794          */
795
796         /* Create the three entities with their pads */
797         m2m_dev->source = &vdev->entity;
798         ret = v4l2_m2m_register_entity(mdev, m2m_dev,
799                         MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L);
800         if (ret)
801                 return ret;
802         ret = v4l2_m2m_register_entity(mdev, m2m_dev,
803                         MEM2MEM_ENT_TYPE_PROC, vdev, function);
804         if (ret)
805                 goto err_rel_entity0;
806         ret = v4l2_m2m_register_entity(mdev, m2m_dev,
807                         MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L);
808         if (ret)
809                 goto err_rel_entity1;
810
811         /* Connect the three entities */
812         ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1,
813                         MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
814         if (ret)
815                 goto err_rel_entity2;
816
817         ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0,
818                         MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
819         if (ret)
820                 goto err_rm_links0;
821
822         /* Create video interface */
823         m2m_dev->intf_devnode = media_devnode_create(mdev,
824                         MEDIA_INTF_T_V4L_VIDEO, 0,
825                         VIDEO_MAJOR, vdev->minor);
826         if (!m2m_dev->intf_devnode) {
827                 ret = -ENOMEM;
828                 goto err_rm_links1;
829         }
830
831         /* Connect the two DMA engines to the interface */
832         link = media_create_intf_link(m2m_dev->source,
833                         &m2m_dev->intf_devnode->intf,
834                         MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
835         if (!link) {
836                 ret = -ENOMEM;
837                 goto err_rm_devnode;
838         }
839
840         link = media_create_intf_link(&m2m_dev->sink,
841                         &m2m_dev->intf_devnode->intf,
842                         MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
843         if (!link) {
844                 ret = -ENOMEM;
845                 goto err_rm_intf_link;
846         }
847         return 0;
848
849 err_rm_intf_link:
850         media_remove_intf_links(&m2m_dev->intf_devnode->intf);
851 err_rm_devnode:
852         media_devnode_remove(m2m_dev->intf_devnode);
853 err_rm_links1:
854         media_entity_remove_links(&m2m_dev->sink);
855 err_rm_links0:
856         media_entity_remove_links(&m2m_dev->proc);
857         media_entity_remove_links(m2m_dev->source);
858 err_rel_entity2:
859         media_device_unregister_entity(&m2m_dev->proc);
860         kfree(m2m_dev->proc.name);
861 err_rel_entity1:
862         media_device_unregister_entity(&m2m_dev->sink);
863         kfree(m2m_dev->sink.name);
864 err_rel_entity0:
865         media_device_unregister_entity(m2m_dev->source);
866         kfree(m2m_dev->source->name);
867         return ret;
868         return 0;
869 }
870 EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller);
871 #endif
872
873 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
874 {
875         struct v4l2_m2m_dev *m2m_dev;
876
877         if (!m2m_ops || WARN_ON(!m2m_ops->device_run))
878                 return ERR_PTR(-EINVAL);
879
880         m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
881         if (!m2m_dev)
882                 return ERR_PTR(-ENOMEM);
883
884         m2m_dev->curr_ctx = NULL;
885         m2m_dev->m2m_ops = m2m_ops;
886         INIT_LIST_HEAD(&m2m_dev->job_queue);
887         spin_lock_init(&m2m_dev->job_spinlock);
888         INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
889
890         return m2m_dev;
891 }
892 EXPORT_SYMBOL_GPL(v4l2_m2m_init);
893
894 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
895 {
896         kfree(m2m_dev);
897 }
898 EXPORT_SYMBOL_GPL(v4l2_m2m_release);
899
900 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
901                 void *drv_priv,
902                 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
903 {
904         struct v4l2_m2m_ctx *m2m_ctx;
905         struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
906         int ret;
907
908         m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
909         if (!m2m_ctx)
910                 return ERR_PTR(-ENOMEM);
911
912         m2m_ctx->priv = drv_priv;
913         m2m_ctx->m2m_dev = m2m_dev;
914         init_waitqueue_head(&m2m_ctx->finished);
915
916         out_q_ctx = &m2m_ctx->out_q_ctx;
917         cap_q_ctx = &m2m_ctx->cap_q_ctx;
918
919         INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
920         INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
921         spin_lock_init(&out_q_ctx->rdy_spinlock);
922         spin_lock_init(&cap_q_ctx->rdy_spinlock);
923
924         INIT_LIST_HEAD(&m2m_ctx->queue);
925
926         ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
927
928         if (ret)
929                 goto err;
930         /*
931          * Both queues should use same the mutex to lock the m2m context.
932          * This lock is used in some v4l2_m2m_* helpers.
933          */
934         if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) {
935                 ret = -EINVAL;
936                 goto err;
937         }
938         m2m_ctx->q_lock = out_q_ctx->q.lock;
939
940         return m2m_ctx;
941 err:
942         kfree(m2m_ctx);
943         return ERR_PTR(ret);
944 }
945 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
946
947 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
948 {
949         /* wait until the current context is dequeued from job_queue */
950         v4l2_m2m_cancel_job(m2m_ctx);
951
952         vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
953         vb2_queue_release(&m2m_ctx->out_q_ctx.q);
954
955         kfree(m2m_ctx);
956 }
957 EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
958
959 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
960                 struct vb2_v4l2_buffer *vbuf)
961 {
962         struct v4l2_m2m_buffer *b = container_of(vbuf,
963                                 struct v4l2_m2m_buffer, vb);
964         struct v4l2_m2m_queue_ctx *q_ctx;
965         unsigned long flags;
966
967         q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
968         if (!q_ctx)
969                 return;
970
971         spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
972         list_add_tail(&b->list, &q_ctx->rdy_queue);
973         q_ctx->num_rdy++;
974         spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
975 }
976 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
977
978 void v4l2_m2m_request_queue(struct media_request *req)
979 {
980         struct media_request_object *obj, *obj_safe;
981         struct v4l2_m2m_ctx *m2m_ctx = NULL;
982
983         /*
984          * Queue all objects. Note that buffer objects are at the end of the
985          * objects list, after all other object types. Once buffer objects
986          * are queued, the driver might delete them immediately (if the driver
987          * processes the buffer at once), so we have to use
988          * list_for_each_entry_safe() to handle the case where the object we
989          * queue is deleted.
990          */
991         list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
992                 struct v4l2_m2m_ctx *m2m_ctx_obj;
993                 struct vb2_buffer *vb;
994
995                 if (!obj->ops->queue)
996                         continue;
997
998                 if (vb2_request_object_is_buffer(obj)) {
999                         /* Sanity checks */
1000                         vb = container_of(obj, struct vb2_buffer, req_obj);
1001                         WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type));
1002                         m2m_ctx_obj = container_of(vb->vb2_queue,
1003                                                    struct v4l2_m2m_ctx,
1004                                                    out_q_ctx.q);
1005                         WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx);
1006                         m2m_ctx = m2m_ctx_obj;
1007                 }
1008
1009                 /*
1010                  * The buffer we queue here can in theory be immediately
1011                  * unbound, hence the use of list_for_each_entry_safe()
1012                  * above and why we call the queue op last.
1013                  */
1014                 obj->ops->queue(obj);
1015         }
1016
1017         WARN_ON(!m2m_ctx);
1018
1019         if (m2m_ctx)
1020                 v4l2_m2m_try_schedule(m2m_ctx);
1021 }
1022 EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
1023
1024 /* Videobuf2 ioctl helpers */
1025
1026 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
1027                                 struct v4l2_requestbuffers *rb)
1028 {
1029         struct v4l2_fh *fh = file->private_data;
1030
1031         return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
1032 }
1033 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
1034
1035 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
1036                                 struct v4l2_create_buffers *create)
1037 {
1038         struct v4l2_fh *fh = file->private_data;
1039
1040         return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
1041 }
1042 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
1043
1044 int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
1045                                 struct v4l2_buffer *buf)
1046 {
1047         struct v4l2_fh *fh = file->private_data;
1048
1049         return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
1050 }
1051 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
1052
1053 int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
1054                                 struct v4l2_buffer *buf)
1055 {
1056         struct v4l2_fh *fh = file->private_data;
1057
1058         return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
1059 }
1060 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
1061
1062 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
1063                                 struct v4l2_buffer *buf)
1064 {
1065         struct v4l2_fh *fh = file->private_data;
1066
1067         return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
1068 }
1069 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
1070
1071 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
1072                                struct v4l2_buffer *buf)
1073 {
1074         struct v4l2_fh *fh = file->private_data;
1075
1076         return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
1077 }
1078 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
1079
1080 int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
1081                                 struct v4l2_exportbuffer *eb)
1082 {
1083         struct v4l2_fh *fh = file->private_data;
1084
1085         return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
1086 }
1087 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
1088
1089 int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
1090                                 enum v4l2_buf_type type)
1091 {
1092         struct v4l2_fh *fh = file->private_data;
1093
1094         return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
1095 }
1096 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
1097
1098 int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
1099                                 enum v4l2_buf_type type)
1100 {
1101         struct v4l2_fh *fh = file->private_data;
1102
1103         return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
1104 }
1105 EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
1106
1107 /*
1108  * v4l2_file_operations helpers. It is assumed here same lock is used
1109  * for the output and the capture buffer queue.
1110  */
1111
1112 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
1113 {
1114         struct v4l2_fh *fh = file->private_data;
1115
1116         return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
1117 }
1118 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
1119
1120 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
1121 {
1122         struct v4l2_fh *fh = file->private_data;
1123         struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
1124         __poll_t ret;
1125
1126         if (m2m_ctx->q_lock)
1127                 mutex_lock(m2m_ctx->q_lock);
1128
1129         ret = v4l2_m2m_poll(file, m2m_ctx, wait);
1130
1131         if (m2m_ctx->q_lock)
1132                 mutex_unlock(m2m_ctx->q_lock);
1133
1134         return ret;
1135 }
1136 EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
1137