Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / drivers / media / v4l2-core / v4l2-event.c
1 /*
2 * v4l2-event.c
3 *
4 * V4L2 events.
5 *
6 * Copyright (C) 2009--2010 Nokia Corporation.
7 *
8 * Contact: Sakari Ailus <sakari.ailus@iki.fi>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 */
24
25 #include <media/v4l2-dev.h>
26 #include <media/v4l2-fh.h>
27 #include <media/v4l2-event.h>
28
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/export.h>
32
33 static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx)
34 {
35 idx += sev->first;
36 return idx >= sev->elems ? idx - sev->elems : idx;
37 }
38
39 static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event)
40 {
41 struct v4l2_kevent *kev;
42 unsigned long flags;
43
44 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
45
46 if (list_empty(&fh->available)) {
47 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
48 return -ENOENT;
49 }
50
51 WARN_ON(fh->navailable == 0);
52
53 kev = list_first_entry(&fh->available, struct v4l2_kevent, list);
54 list_del(&kev->list);
55 fh->navailable--;
56
57 kev->event.pending = fh->navailable;
58 *event = kev->event;
59 kev->sev->first = sev_pos(kev->sev, 1);
60 kev->sev->in_use--;
61
62 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
63
64 return 0;
65 }
66
67 int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event,
68 int nonblocking)
69 {
70 int ret;
71
72 if (nonblocking)
73 return __v4l2_event_dequeue(fh, event);
74
75 /* Release the vdev lock while waiting */
76 if (fh->vdev->lock)
77 mutex_unlock(fh->vdev->lock);
78
79 do {
80 ret = wait_event_interruptible(fh->wait,
81 fh->navailable != 0);
82 if (ret < 0)
83 break;
84
85 ret = __v4l2_event_dequeue(fh, event);
86 } while (ret == -ENOENT);
87
88 if (fh->vdev->lock)
89 mutex_lock(fh->vdev->lock);
90
91 return ret;
92 }
93 EXPORT_SYMBOL_GPL(v4l2_event_dequeue);
94
95 /* Caller must hold fh->vdev->fh_lock! */
96 static struct v4l2_subscribed_event *v4l2_event_subscribed(
97 struct v4l2_fh *fh, u32 type, u32 id)
98 {
99 struct v4l2_subscribed_event *sev;
100
101 assert_spin_locked(&fh->vdev->fh_lock);
102
103 list_for_each_entry(sev, &fh->subscribed, list)
104 if (sev->type == type && sev->id == id)
105 return sev;
106
107 return NULL;
108 }
109
110 static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev,
111 const struct timespec *ts)
112 {
113 struct v4l2_subscribed_event *sev;
114 struct v4l2_kevent *kev;
115 bool copy_payload = true;
116
117 /* Are we subscribed? */
118 sev = v4l2_event_subscribed(fh, ev->type, ev->id);
119 if (sev == NULL)
120 return;
121
122 /*
123 * If the event has been added to the fh->subscribed list, but its
124 * add op has not completed yet elems will be 0, treat this as
125 * not being subscribed.
126 */
127 if (!sev->elems)
128 return;
129
130 /* Increase event sequence number on fh. */
131 fh->sequence++;
132
133 /* Do we have any free events? */
134 if (sev->in_use == sev->elems) {
135 /* no, remove the oldest one */
136 kev = sev->events + sev_pos(sev, 0);
137 list_del(&kev->list);
138 sev->in_use--;
139 sev->first = sev_pos(sev, 1);
140 fh->navailable--;
141 if (sev->elems == 1) {
142 if (sev->ops && sev->ops->replace) {
143 sev->ops->replace(&kev->event, ev);
144 copy_payload = false;
145 }
146 } else if (sev->ops && sev->ops->merge) {
147 struct v4l2_kevent *second_oldest =
148 sev->events + sev_pos(sev, 0);
149 sev->ops->merge(&kev->event, &second_oldest->event);
150 }
151 }
152
153 /* Take one and fill it. */
154 kev = sev->events + sev_pos(sev, sev->in_use);
155 kev->event.type = ev->type;
156 if (copy_payload)
157 kev->event.u = ev->u;
158 kev->event.id = ev->id;
159 kev->event.timestamp = *ts;
160 kev->event.sequence = fh->sequence;
161 sev->in_use++;
162 list_add_tail(&kev->list, &fh->available);
163
164 fh->navailable++;
165
166 wake_up_all(&fh->wait);
167 }
168
169 void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev)
170 {
171 struct v4l2_fh *fh;
172 unsigned long flags;
173 struct timespec timestamp;
174
175 ktime_get_ts(&timestamp);
176
177 spin_lock_irqsave(&vdev->fh_lock, flags);
178
179 list_for_each_entry(fh, &vdev->fh_list, list)
180 __v4l2_event_queue_fh(fh, ev, &timestamp);
181
182 spin_unlock_irqrestore(&vdev->fh_lock, flags);
183 }
184 EXPORT_SYMBOL_GPL(v4l2_event_queue);
185
186 void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev)
187 {
188 unsigned long flags;
189 struct timespec timestamp;
190
191 ktime_get_ts(&timestamp);
192
193 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
194 __v4l2_event_queue_fh(fh, ev, &timestamp);
195 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
196 }
197 EXPORT_SYMBOL_GPL(v4l2_event_queue_fh);
198
199 int v4l2_event_pending(struct v4l2_fh *fh)
200 {
201 return fh->navailable;
202 }
203 EXPORT_SYMBOL_GPL(v4l2_event_pending);
204
205 int v4l2_event_subscribe(struct v4l2_fh *fh,
206 const struct v4l2_event_subscription *sub, unsigned elems,
207 const struct v4l2_subscribed_event_ops *ops)
208 {
209 struct v4l2_subscribed_event *sev, *found_ev;
210 unsigned long flags;
211 unsigned i;
212
213 if (sub->type == V4L2_EVENT_ALL)
214 return -EINVAL;
215
216 if (elems < 1)
217 elems = 1;
218
219 sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL);
220 if (!sev)
221 return -ENOMEM;
222 for (i = 0; i < elems; i++)
223 sev->events[i].sev = sev;
224 sev->type = sub->type;
225 sev->id = sub->id;
226 sev->flags = sub->flags;
227 sev->fh = fh;
228 sev->ops = ops;
229
230 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
231 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
232 if (!found_ev)
233 list_add(&sev->list, &fh->subscribed);
234 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
235
236 if (found_ev) {
237 kfree(sev);
238 return 0; /* Already listening */
239 }
240
241 if (sev->ops && sev->ops->add) {
242 int ret = sev->ops->add(sev, elems);
243 if (ret) {
244 sev->ops = NULL;
245 v4l2_event_unsubscribe(fh, sub);
246 return ret;
247 }
248 }
249
250 /* Mark as ready for use */
251 sev->elems = elems;
252
253 return 0;
254 }
255 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
256
257 void v4l2_event_unsubscribe_all(struct v4l2_fh *fh)
258 {
259 struct v4l2_event_subscription sub;
260 struct v4l2_subscribed_event *sev;
261 unsigned long flags;
262
263 do {
264 sev = NULL;
265
266 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
267 if (!list_empty(&fh->subscribed)) {
268 sev = list_first_entry(&fh->subscribed,
269 struct v4l2_subscribed_event, list);
270 sub.type = sev->type;
271 sub.id = sev->id;
272 }
273 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
274 if (sev)
275 v4l2_event_unsubscribe(fh, &sub);
276 } while (sev);
277 }
278 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all);
279
280 int v4l2_event_unsubscribe(struct v4l2_fh *fh,
281 const struct v4l2_event_subscription *sub)
282 {
283 struct v4l2_subscribed_event *sev;
284 unsigned long flags;
285 int i;
286
287 if (sub->type == V4L2_EVENT_ALL) {
288 v4l2_event_unsubscribe_all(fh);
289 return 0;
290 }
291
292 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
293
294 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
295 if (sev != NULL) {
296 /* Remove any pending events for this subscription */
297 for (i = 0; i < sev->in_use; i++) {
298 list_del(&sev->events[sev_pos(sev, i)].list);
299 fh->navailable--;
300 }
301 list_del(&sev->list);
302 }
303
304 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
305
306 if (sev && sev->ops && sev->ops->del)
307 sev->ops->del(sev);
308
309 kfree(sev);
310
311 return 0;
312 }
313 EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe);
314
315 int v4l2_event_subdev_unsubscribe(struct v4l2_subdev *sd, struct v4l2_fh *fh,
316 struct v4l2_event_subscription *sub)
317 {
318 return v4l2_event_unsubscribe(fh, sub);
319 }
320 EXPORT_SYMBOL_GPL(v4l2_event_subdev_unsubscribe);
321
322 static void v4l2_event_src_replace(struct v4l2_event *old,
323 const struct v4l2_event *new)
324 {
325 u32 old_changes = old->u.src_change.changes;
326
327 old->u.src_change = new->u.src_change;
328 old->u.src_change.changes |= old_changes;
329 }
330
331 static void v4l2_event_src_merge(const struct v4l2_event *old,
332 struct v4l2_event *new)
333 {
334 new->u.src_change.changes |= old->u.src_change.changes;
335 }
336
337 static const struct v4l2_subscribed_event_ops v4l2_event_src_ch_ops = {
338 .replace = v4l2_event_src_replace,
339 .merge = v4l2_event_src_merge,
340 };
341
342 int v4l2_src_change_event_subscribe(struct v4l2_fh *fh,
343 const struct v4l2_event_subscription *sub)
344 {
345 if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
346 return v4l2_event_subscribe(fh, sub, 0, &v4l2_event_src_ch_ops);
347 return -EINVAL;
348 }
349 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subscribe);
350
351 int v4l2_src_change_event_subdev_subscribe(struct v4l2_subdev *sd,
352 struct v4l2_fh *fh, struct v4l2_event_subscription *sub)
353 {
354 return v4l2_src_change_event_subscribe(fh, sub);
355 }
356 EXPORT_SYMBOL_GPL(v4l2_src_change_event_subdev_subscribe);
This page took 0.0388 seconds and 5 git commands to generate.