Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / media / v4l2-core / v4l2-event.c
1 /*
2  * v4l2-event.c
3  *
4  * V4L2 events.
5  *
6  * Copyright (C) 2009--2010 Nokia Corporation.
7  *
8  * Contact: Sakari Ailus <sakari.ailus@iki.fi>
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * version 2 as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  */
19
20 #include <media/v4l2-dev.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
23
24 #include <linux/mm.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/export.h>
28
29 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
30 {
31         idx += sev->first;
32         return idx >= sev->elems ? idx - sev->elems : idx;
33 }
34
35 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
36 {
37         struct v4l2_kevent *kev;
38         unsigned long flags;
39
40         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
41
42         if (list_empty(&fh->available)) {
43                 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
44                 return -ENOENT;
45         }
46
47         WARN_ON(fh->navailable == 0);
48
49         kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
50         list_del(&kev->list);
51         fh->navailable--;
52
53         kev->event.pending = fh->navailable;
54         *event = kev->event;
55         kev->sev->first = sev_pos(kev->sev, 1);
56         kev->sev->in_use--;
57
58         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
59
60         return 0;
61 }
62
63 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
64                        int nonblocking)
65 {
66         int ret;
67
68         if (nonblocking)
69                 return __v4l2_event_dequeue(fh, event);
70
71         /* Release the vdev lock while waiting */
72         if (fh->vdev->lock)
73                 mutex_unlock(fh->vdev->lock);
74
75         do {
76                 ret = wait_event_interruptible(fh->wait,
77                                                fh->navailable != 0);
78                 if (ret < 0)
79                         break;
80
81                 ret = __v4l2_event_dequeue(fh, event);
82         } while (ret == -ENOENT);
83
84         if (fh->vdev->lock)
85                 mutex_lock(fh->vdev->lock);
86
87         return ret;
88 }
89 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
90
91 /* Caller must hold fh->vdev->fh_lock! */
92 static struct v4l2_subscribed_event *v4l2_event_subscribed(
93                 struct v4l2_fh *fh, u32 type, u32 id)
94 {
95         struct v4l2_subscribed_event *sev;
96
97         assert_spin_locked(&fh->vdev->fh_lock);
98
99         list_for_each_entry(sev, &fh->subscribed, list)
100                 if (sev->type == type && sev->id == id)
101                         return sev;
102
103         return NULL;
104 }
105
106 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
107                 const struct timespec *ts)
108 {
109         struct v4l2_subscribed_event *sev;
110         struct v4l2_kevent *kev;
111         bool copy_payload = true;
112
113         /* Are we subscribed? */
114         sev = v4l2_event_subscribed(fh, ev->type, ev->id);
115         if (sev == NULL)
116                 return;
117
118         /* Increase event sequence number on fh. */
119         fh->sequence++;
120
121         /* Do we have any free events? */
122         if (sev->in_use == sev->elems) {
123                 /* no, remove the oldest one */
124                 kev = sev->events + sev_pos(sev, 0);
125                 list_del(&kev->list);
126                 sev->in_use--;
127                 sev->first = sev_pos(sev, 1);
128                 fh->navailable--;
129                 if (sev->elems == 1) {
130                         if (sev->ops && sev->ops->replace) {
131                                 sev->ops->replace(&kev->event, ev);
132                                 copy_payload = false;
133                         }
134                 } else if (sev->ops && sev->ops->merge) {
135                         struct v4l2_kevent *second_oldest =
136                                 sev->events + sev_pos(sev, 0);
137                         sev->ops->merge(&kev->event, &second_oldest->event);
138                 }
139         }
140
141         /* Take one and fill it. */
142         kev = sev->events + sev_pos(sev, sev->in_use);
143         kev->event.type = ev->type;
144         if (copy_payload)
145                 kev->event.u = ev->u;
146         kev->event.id = ev->id;
147         kev->event.timestamp = *ts;
148         kev->event.sequence = fh->sequence;
149         sev->in_use++;
150         list_add_tail(&kev->list, &fh->available);
151
152         fh->navailable++;
153
154         wake_up_all(&fh->wait);
155 }
156
157 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
158 {
159         struct v4l2_fh *fh;
160         unsigned long flags;
161         struct timespec timestamp;
162
163         if (vdev == NULL)
164                 return;
165
166         ktime_get_ts(&timestamp);
167
168         spin_lock_irqsave(&vdev->fh_lock, flags);
169
170         list_for_each_entry(fh, &vdev->fh_list, list)
171                 __v4l2_event_queue_fh(fh, ev, &timestamp);
172
173         spin_unlock_irqrestore(&vdev->fh_lock, flags);
174 }
175 EXPORT_SYMBOL_GPL(v4l2_event_queue);
176
177 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
178 {
179         unsigned long flags;
180         struct timespec timestamp;
181
182         ktime_get_ts(&timestamp);
183
184         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
185         __v4l2_event_queue_fh(fh, ev, &timestamp);
186         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
187 }
188 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
189
190 int v4l2_event_pending(struct v4l2_fh *fh)
191 {
192         return fh->navailable;
193 }
194 EXPORT_SYMBOL_GPL(v4l2_event_pending);
195
196 int v4l2_event_subscribe(struct v4l2_fh *fh,
197                          const struct v4l2_event_subscription *sub, unsigned elems,
198                          const struct v4l2_subscribed_event_ops *ops)
199 {
200         struct v4l2_subscribed_event *sev, *found_ev;
201         unsigned long flags;
202         unsigned i;
203         int ret = 0;
204
205         if (sub->type == V4L2_EVENT_ALL)
206                 return -EINVAL;
207
208         if (elems < 1)
209                 elems = 1;
210
211         sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
212         if (!sev)
213                 return -ENOMEM;
214         for (i = 0; i < elems; i++)
215                 sev->events[i].sev = sev;
216         sev->type = sub->type;
217         sev->id = sub->id;
218         sev->flags = sub->flags;
219         sev->fh = fh;
220         sev->ops = ops;
221         sev->elems = elems;
222
223         mutex_lock(&fh->subscribe_lock);
224
225         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
226         found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
227         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
228
229         if (found_ev) {
230                 /* Already listening */
231                 kvfree(sev);
232                 goto out_unlock;
233         }
234
235         if (sev->ops && sev->ops->add) {
236                 ret = sev->ops->add(sev, elems);
237                 if (ret) {
238                         kvfree(sev);
239                         goto out_unlock;
240                 }
241         }
242
243         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
244         list_add(&sev->list, &fh->subscribed);
245         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
246
247 out_unlock:
248         mutex_unlock(&fh->subscribe_lock);
249
250         return ret;
251 }
252 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
253
254 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
255 {
256         struct v4l2_event_subscription sub;
257         struct v4l2_subscribed_event *sev;
258         unsigned long flags;
259
260         do {
261                 sev = NULL;
262
263                 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
264                 if (!list_empty(&fh->subscribed)) {
265                         sev = list_first_entry(&fh->subscribed,
266                                         struct v4l2_subscribed_event, list);
267                         sub.type = sev->type;
268                         sub.id = sev->id;
269                 }
270                 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
271                 if (sev)
272                         v4l2_event_unsubscribe(fh, &sub);
273         } while (sev);
274 }
275 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
276
277 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
278                            const struct v4l2_event_subscription *sub)
279 {
280         struct v4l2_subscribed_event *sev;
281         unsigned long flags;
282         int i;
283
284         if (sub->type == V4L2_EVENT_ALL) {
285                 v4l2_event_unsubscribe_all(fh);
286                 return 0;
287         }
288
289         mutex_lock(&fh->subscribe_lock);
290
291         spin_lock_irqsave(&fh->vdev->fh_lock, flags);
292
293         sev = v4l2_event_subscribed(fh, sub->type, sub->id);
294         if (sev != NULL) {
295                 /* Remove any pending events for this subscription */
296                 for (i = 0; i < sev->in_use; i++) {
297                         list_del(&sev->events[sev_pos(sev, i)].list);
298                         fh->navailable--;
299                 }
300                 list_del(&sev->list);
301         }
302
303         spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
304
305         if (sev && sev->ops && sev->ops->del)
306                 sev->ops->del(sev);
307
308         mutex_unlock(&fh->subscribe_lock);
309
310         kvfree(sev);
311
312         return 0;
313 }
314 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
315
316 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
317                                   struct v4l2_event_subscription *sub)
318 {
319         return v4l2_event_unsubscribe(fh, sub);
320 }
321 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
322
323 static void v4l2_event_src_replace(struct v4l2_event *old,
324                                 const struct v4l2_event *new)
325 {
326         u32 old_changes = old->u.src_change.changes;
327
328         old->u.src_change = new->u.src_change;
329         old->u.src_change.changes |= old_changes;
330 }
331
332 static void v4l2_event_src_merge(const struct v4l2_event *old,
333                                 struct v4l2_event *new)
334 {
335         new->u.src_change.changes |= old->u.src_change.changes;
336 }
337
338 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
339         .replace = v4l2_event_src_replace,
340         .merge = v4l2_event_src_merge,
341 };
342
343 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
344                                 const struct v4l2_event_subscription *sub)
345 {
346         if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
347                 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
348         return -EINVAL;
349 }
350 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
351
352 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
353                 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
354 {
355         return v4l2_src_change_event_subscribe(fh, sub);
356 }
357 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);