1 /*
2 * v4l2-event.c
3 *
4 * V4L2 events.
5 *
6 * Copyright (C) 2009--2010 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */
19
20 #include <media/v4l2-dev.h>
21 #include <media/v4l2-fh.h>
22 #include <media/v4l2-event.h>
23
24 #include <linux/mm.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/export.h>
28
sev_pos(const struct v4l2_subscribed_event * sev,unsigned idx)29 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
30 {
31 idx += sev->first;
32 return idx >= sev->elems ? idx - sev->elems : idx;
33 }
34
__v4l2_event_dequeue(struct v4l2_fh * fh,struct v4l2_event * event)35 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
36 {
37 struct v4l2_kevent *kev;
38 unsigned long flags;
39
40 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
41
42 if (list_empty(&fh->available)) {
43 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
44 return -ENOENT;
45 }
46
47 WARN_ON(fh->navailable == 0);
48
49 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
50 list_del(&kev->list);
51 fh->navailable--;
52
53 kev->event.pending = fh->navailable;
54 *event = kev->event;
55 kev->sev->first = sev_pos(kev->sev, 1);
56 kev->sev->in_use--;
57
58 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
59
60 return 0;
61 }
62
v4l2_event_dequeue(struct v4l2_fh * fh,struct v4l2_event * event,int nonblocking)63 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
64 int nonblocking)
65 {
66 int ret;
67
68 if (nonblocking)
69 return __v4l2_event_dequeue(fh, event);
70
71 /* Release the vdev lock while waiting */
72 if (fh->vdev->lock)
73 mutex_unlock(fh->vdev->lock);
74
75 do {
76 ret = wait_event_interruptible(fh->wait,
77 fh->navailable != 0);
78 if (ret < 0)
79 break;
80
81 ret = __v4l2_event_dequeue(fh, event);
82 } while (ret == -ENOENT);
83
84 if (fh->vdev->lock)
85 mutex_lock(fh->vdev->lock);
86
87 return ret;
88 }
89 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
90
91 /* Caller must hold fh->vdev->fh_lock! */
v4l2_event_subscribed(struct v4l2_fh * fh,u32 type,u32 id)92 static struct v4l2_subscribed_event *v4l2_event_subscribed(
93 struct v4l2_fh *fh, u32 type, u32 id)
94 {
95 struct v4l2_subscribed_event *sev;
96
97 assert_spin_locked(&fh->vdev->fh_lock);
98
99 list_for_each_entry(sev, &fh->subscribed, list)
100 if (sev->type == type && sev->id == id)
101 return sev;
102
103 return NULL;
104 }
105
__v4l2_event_queue_fh(struct v4l2_fh * fh,const struct v4l2_event * ev,const struct timespec * ts)106 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
107 const struct timespec *ts)
108 {
109 struct v4l2_subscribed_event *sev;
110 struct v4l2_kevent *kev;
111 bool copy_payload = true;
112
113 /* Are we subscribed? */
114 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
115 if (sev == NULL)
116 return;
117
118 /* Increase event sequence number on fh. */
119 fh->sequence++;
120
121 /* Do we have any free events? */
122 if (sev->in_use == sev->elems) {
123 /* no, remove the oldest one */
124 kev = sev->events + sev_pos(sev, 0);
125 list_del(&kev->list);
126 sev->in_use--;
127 sev->first = sev_pos(sev, 1);
128 fh->navailable--;
129 if (sev->elems == 1) {
130 if (sev->ops && sev->ops->replace) {
131 sev->ops->replace(&kev->event, ev);
132 copy_payload = false;
133 }
134 } else if (sev->ops && sev->ops->merge) {
135 struct v4l2_kevent *second_oldest =
136 sev->events + sev_pos(sev, 0);
137 sev->ops->merge(&kev->event, &second_oldest->event);
138 }
139 }
140
141 /* Take one and fill it. */
142 kev = sev->events + sev_pos(sev, sev->in_use);
143 kev->event.type = ev->type;
144 if (copy_payload)
145 kev->event.u = ev->u;
146 kev->event.id = ev->id;
147 kev->event.timestamp = *ts;
148 kev->event.sequence = fh->sequence;
149 sev->in_use++;
150 list_add_tail(&kev->list, &fh->available);
151
152 fh->navailable++;
153
154 wake_up_all(&fh->wait);
155 }
156
v4l2_event_queue(struct video_device * vdev,const struct v4l2_event * ev)157 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
158 {
159 struct v4l2_fh *fh;
160 unsigned long flags;
161 struct timespec timestamp;
162
163 if (vdev == NULL)
164 return;
165
166 ktime_get_ts(×tamp);
167
168 spin_lock_irqsave(&vdev->fh_lock, flags);
169
170 list_for_each_entry(fh, &vdev->fh_list, list)
171 __v4l2_event_queue_fh(fh, ev, ×tamp);
172
173 spin_unlock_irqrestore(&vdev->fh_lock, flags);
174 }
175 EXPORT_SYMBOL_GPL(v4l2_event_queue);
176
v4l2_event_queue_fh(struct v4l2_fh * fh,const struct v4l2_event * ev)177 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
178 {
179 unsigned long flags;
180 struct timespec timestamp;
181
182 ktime_get_ts(×tamp);
183
184 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
185 __v4l2_event_queue_fh(fh, ev, ×tamp);
186 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
187 }
188 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
189
v4l2_event_pending(struct v4l2_fh * fh)190 int v4l2_event_pending(struct v4l2_fh *fh)
191 {
192 return fh->navailable;
193 }
194 EXPORT_SYMBOL_GPL(v4l2_event_pending);
195
__v4l2_event_unsubscribe(struct v4l2_subscribed_event * sev)196 static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
197 {
198 struct v4l2_fh *fh = sev->fh;
199 unsigned int i;
200
201 lockdep_assert_held(&fh->subscribe_lock);
202 assert_spin_locked(&fh->vdev->fh_lock);
203
204 /* Remove any pending events for this subscription */
205 for (i = 0; i < sev->in_use; i++) {
206 list_del(&sev->events[sev_pos(sev, i)].list);
207 fh->navailable--;
208 }
209 list_del(&sev->list);
210 }
211
v4l2_event_subscribe(struct v4l2_fh * fh,const struct v4l2_event_subscription * sub,unsigned elems,const struct v4l2_subscribed_event_ops * ops)212 int v4l2_event_subscribe(struct v4l2_fh *fh,
213 const struct v4l2_event_subscription *sub, unsigned elems,
214 const struct v4l2_subscribed_event_ops *ops)
215 {
216 struct v4l2_subscribed_event *sev, *found_ev;
217 unsigned long flags;
218 unsigned i;
219 int ret = 0;
220
221 if (sub->type == V4L2_EVENT_ALL)
222 return -EINVAL;
223
224 if (elems < 1)
225 elems = 1;
226
227 sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
228 if (!sev)
229 return -ENOMEM;
230 for (i = 0; i < elems; i++)
231 sev->events[i].sev = sev;
232 sev->type = sub->type;
233 sev->id = sub->id;
234 sev->flags = sub->flags;
235 sev->fh = fh;
236 sev->ops = ops;
237 sev->elems = elems;
238
239 mutex_lock(&fh->subscribe_lock);
240
241 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
242 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
243 if (!found_ev)
244 list_add(&sev->list, &fh->subscribed);
245 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
246
247 if (found_ev) {
248 /* Already listening */
249 kvfree(sev);
250 } else if (sev->ops && sev->ops->add) {
251 ret = sev->ops->add(sev, elems);
252 if (ret) {
253 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
254 __v4l2_event_unsubscribe(sev);
255 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
256 kvfree(sev);
257 }
258 }
259
260 mutex_unlock(&fh->subscribe_lock);
261
262 return ret;
263 }
264 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
265
v4l2_event_unsubscribe_all(struct v4l2_fh * fh)266 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
267 {
268 struct v4l2_event_subscription sub;
269 struct v4l2_subscribed_event *sev;
270 unsigned long flags;
271
272 do {
273 sev = NULL;
274
275 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
276 if (!list_empty(&fh->subscribed)) {
277 sev = list_first_entry(&fh->subscribed,
278 struct v4l2_subscribed_event, list);
279 sub.type = sev->type;
280 sub.id = sev->id;
281 }
282 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
283 if (sev)
284 v4l2_event_unsubscribe(fh, &sub);
285 } while (sev);
286 }
287 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
288
v4l2_event_unsubscribe(struct v4l2_fh * fh,const struct v4l2_event_subscription * sub)289 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
290 const struct v4l2_event_subscription *sub)
291 {
292 struct v4l2_subscribed_event *sev;
293 unsigned long flags;
294
295 if (sub->type == V4L2_EVENT_ALL) {
296 v4l2_event_unsubscribe_all(fh);
297 return 0;
298 }
299
300 mutex_lock(&fh->subscribe_lock);
301
302 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
303
304 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
305 if (sev != NULL)
306 __v4l2_event_unsubscribe(sev);
307
308 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
309
310 if (sev && sev->ops && sev->ops->del)
311 sev->ops->del(sev);
312
313 mutex_unlock(&fh->subscribe_lock);
314
315 kvfree(sev);
316
317 return 0;
318 }
319 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
320
v4l2_event_subdev_unsubscribe(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)321 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
322 struct v4l2_event_subscription *sub)
323 {
324 return v4l2_event_unsubscribe(fh, sub);
325 }
326 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
327
v4l2_event_src_replace(struct v4l2_event * old,const struct v4l2_event * new)328 static void v4l2_event_src_replace(struct v4l2_event *old,
329 const struct v4l2_event *new)
330 {
331 u32 old_changes = old->u.src_change.changes;
332
333 old->u.src_change = new->u.src_change;
334 old->u.src_change.changes |= old_changes;
335 }
336
v4l2_event_src_merge(const struct v4l2_event * old,struct v4l2_event * new)337 static void v4l2_event_src_merge(const struct v4l2_event *old,
338 struct v4l2_event *new)
339 {
340 new->u.src_change.changes |= old->u.src_change.changes;
341 }
342
343 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
344 .replace = v4l2_event_src_replace,
345 .merge = v4l2_event_src_merge,
346 };
347
v4l2_src_change_event_subscribe(struct v4l2_fh * fh,const struct v4l2_event_subscription * sub)348 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
349 const struct v4l2_event_subscription *sub)
350 {
351 if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
352 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
353 return -EINVAL;
354 }
355 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
356
v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)357 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
358 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
359 {
360 return v4l2_src_change_event_subscribe(fh, sub);
361 }
362 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
363