1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright (C) 2009--2010 Nokia Corporation.
9 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
12 #include <media/v4l2-dev.h>
13 #include <media/v4l2-fh.h>
14 #include <media/v4l2-event.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/export.h>
21 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
24 return idx >= sev->elems ? idx - sev->elems : idx;
27 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
29 struct v4l2_kevent *kev;
32 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
34 if (list_empty(&fh->available)) {
35 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
39 WARN_ON(fh->navailable == 0);
41 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
45 kev->event.pending = fh->navailable;
47 event->timestamp = ns_to_timespec(kev->ts);
48 kev->sev->first = sev_pos(kev->sev, 1);
51 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
56 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
62 return __v4l2_event_dequeue(fh, event);
64 /* Release the vdev lock while waiting */
66 mutex_unlock(fh->vdev->lock);
69 ret = wait_event_interruptible(fh->wait,
74 ret = __v4l2_event_dequeue(fh, event);
75 } while (ret == -ENOENT);
78 mutex_lock(fh->vdev->lock);
82 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
84 /* Caller must hold fh->vdev->fh_lock! */
85 static struct v4l2_subscribed_event *v4l2_event_subscribed(
86 struct v4l2_fh *fh, u32 type, u32 id)
88 struct v4l2_subscribed_event *sev;
90 assert_spin_locked(&fh->vdev->fh_lock);
92 list_for_each_entry(sev, &fh->subscribed, list)
93 if (sev->type == type && sev->id == id)
99 static void __v4l2_event_queue_fh(struct v4l2_fh *fh,
100 const struct v4l2_event *ev, u64 ts)
102 struct v4l2_subscribed_event *sev;
103 struct v4l2_kevent *kev;
104 bool copy_payload = true;
106 /* Are we subscribed? */
107 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
111 /* Increase event sequence number on fh. */
114 /* Do we have any free events? */
115 if (sev->in_use == sev->elems) {
116 /* no, remove the oldest one */
117 kev = sev->events + sev_pos(sev, 0);
118 list_del(&kev->list);
120 sev->first = sev_pos(sev, 1);
122 if (sev->elems == 1) {
123 if (sev->ops && sev->ops->replace) {
124 sev->ops->replace(&kev->event, ev);
125 copy_payload = false;
127 } else if (sev->ops && sev->ops->merge) {
128 struct v4l2_kevent *second_oldest =
129 sev->events + sev_pos(sev, 0);
130 sev->ops->merge(&kev->event, &second_oldest->event);
134 /* Take one and fill it. */
135 kev = sev->events + sev_pos(sev, sev->in_use);
136 kev->event.type = ev->type;
138 kev->event.u = ev->u;
139 kev->event.id = ev->id;
141 kev->event.sequence = fh->sequence;
143 list_add_tail(&kev->list, &fh->available);
147 wake_up_all(&fh->wait);
150 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
161 spin_lock_irqsave(&vdev->fh_lock, flags);
163 list_for_each_entry(fh, &vdev->fh_list, list)
164 __v4l2_event_queue_fh(fh, ev, ts);
166 spin_unlock_irqrestore(&vdev->fh_lock, flags);
168 EXPORT_SYMBOL_GPL(v4l2_event_queue);
170 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
173 u64 ts = ktime_get_ns();
175 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
176 __v4l2_event_queue_fh(fh, ev, ts);
177 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
179 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
181 int v4l2_event_pending(struct v4l2_fh *fh)
183 return fh->navailable;
185 EXPORT_SYMBOL_GPL(v4l2_event_pending);
187 static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
189 struct v4l2_fh *fh = sev->fh;
192 lockdep_assert_held(&fh->subscribe_lock);
193 assert_spin_locked(&fh->vdev->fh_lock);
195 /* Remove any pending events for this subscription */
196 for (i = 0; i < sev->in_use; i++) {
197 list_del(&sev->events[sev_pos(sev, i)].list);
200 list_del(&sev->list);
203 int v4l2_event_subscribe(struct v4l2_fh *fh,
204 const struct v4l2_event_subscription *sub, unsigned elems,
205 const struct v4l2_subscribed_event_ops *ops)
207 struct v4l2_subscribed_event *sev, *found_ev;
212 if (sub->type == V4L2_EVENT_ALL)
218 sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
221 for (i = 0; i < elems; i++)
222 sev->events[i].sev = sev;
223 sev->type = sub->type;
225 sev->flags = sub->flags;
230 mutex_lock(&fh->subscribe_lock);
232 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
233 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
235 list_add(&sev->list, &fh->subscribed);
236 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
239 /* Already listening */
241 } else if (sev->ops && sev->ops->add) {
242 ret = sev->ops->add(sev, elems);
244 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
245 __v4l2_event_unsubscribe(sev);
246 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
251 mutex_unlock(&fh->subscribe_lock);
255 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
257 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
259 struct v4l2_event_subscription sub;
260 struct v4l2_subscribed_event *sev;
266 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
267 if (!list_empty(&fh->subscribed)) {
268 sev = list_first_entry(&fh->subscribed,
269 struct v4l2_subscribed_event, list);
270 sub.type = sev->type;
273 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
275 v4l2_event_unsubscribe(fh, &sub);
278 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
280 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
281 const struct v4l2_event_subscription *sub)
283 struct v4l2_subscribed_event *sev;
286 if (sub->type == V4L2_EVENT_ALL) {
287 v4l2_event_unsubscribe_all(fh);
291 mutex_lock(&fh->subscribe_lock);
293 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
295 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
297 __v4l2_event_unsubscribe(sev);
299 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
301 if (sev && sev->ops && sev->ops->del)
304 mutex_unlock(&fh->subscribe_lock);
310 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
312 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
313 struct v4l2_event_subscription *sub)
315 return v4l2_event_unsubscribe(fh, sub);
317 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
319 static void v4l2_event_src_replace(struct v4l2_event *old,
320 const struct v4l2_event *new)
322 u32 old_changes = old->u.src_change.changes;
324 old->u.src_change = new->u.src_change;
325 old->u.src_change.changes |= old_changes;
328 static void v4l2_event_src_merge(const struct v4l2_event *old,
329 struct v4l2_event *new)
331 new->u.src_change.changes |= old->u.src_change.changes;
334 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
335 .replace = v4l2_event_src_replace,
336 .merge = v4l2_event_src_merge,
339 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
340 const struct v4l2_event_subscription *sub)
342 if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
343 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
346 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
348 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
349 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
351 return v4l2_src_change_event_subscribe(fh, sub);
353 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);