Commit | Line | Data |
---|---|---|
c3b5b024 SA |
1 | /* |
2 | * v4l2-event.c | |
3 | * | |
4 | * V4L2 events. | |
5 | * | |
6 | * Copyright (C) 2009--2010 Nokia Corporation. | |
7 | * | |
8 | * Contact: Sakari Ailus <sakari.ailus@maxwell.research.nokia.com> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License | |
12 | * version 2 as published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, but | |
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | |
22 | * 02110-1301 USA | |
23 | */ | |
24 | ||
25 | #include <media/v4l2-dev.h> | |
26 | #include <media/v4l2-fh.h> | |
27 | #include <media/v4l2-event.h> | |
28 | ||
29 | #include <linux/sched.h> | |
30 | #include <linux/slab.h> | |
35a24636 | 31 | #include <linux/export.h> |
c3b5b024 | 32 | |
f1e393de | 33 | static unsigned sev_pos(const struct v4l2_subscribed_event *sev, unsigned idx) |
c3b5b024 | 34 | { |
f1e393de HV |
35 | idx += sev->first; |
36 | return idx >= sev->elems ? idx - sev->elems : idx; | |
c3b5b024 | 37 | } |
c3b5b024 SA |
38 | |
39 | static int __v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event) | |
40 | { | |
c3b5b024 SA |
41 | struct v4l2_kevent *kev; |
42 | unsigned long flags; | |
43 | ||
44 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
45 | ||
523f46d6 | 46 | if (list_empty(&fh->available)) { |
c3b5b024 SA |
47 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); |
48 | return -ENOENT; | |
49 | } | |
50 | ||
523f46d6 | 51 | WARN_ON(fh->navailable == 0); |
c3b5b024 | 52 | |
523f46d6 | 53 | kev = list_first_entry(&fh->available, struct v4l2_kevent, list); |
f1e393de | 54 | list_del(&kev->list); |
523f46d6 | 55 | fh->navailable--; |
c3b5b024 | 56 | |
523f46d6 | 57 | kev->event.pending = fh->navailable; |
c3b5b024 | 58 | *event = kev->event; |
f1e393de HV |
59 | kev->sev->first = sev_pos(kev->sev, 1); |
60 | kev->sev->in_use--; | |
c3b5b024 SA |
61 | |
62 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | |
63 | ||
64 | return 0; | |
65 | } | |
66 | ||
67 | int v4l2_event_dequeue(struct v4l2_fh *fh, struct v4l2_event *event, | |
68 | int nonblocking) | |
69 | { | |
c3b5b024 SA |
70 | int ret; |
71 | ||
72 | if (nonblocking) | |
73 | return __v4l2_event_dequeue(fh, event); | |
74 | ||
ee6869af HV |
75 | /* Release the vdev lock while waiting */ |
76 | if (fh->vdev->lock) | |
77 | mutex_unlock(fh->vdev->lock); | |
78 | ||
c3b5b024 | 79 | do { |
523f46d6 HV |
80 | ret = wait_event_interruptible(fh->wait, |
81 | fh->navailable != 0); | |
c3b5b024 | 82 | if (ret < 0) |
ee6869af | 83 | break; |
c3b5b024 SA |
84 | |
85 | ret = __v4l2_event_dequeue(fh, event); | |
86 | } while (ret == -ENOENT); | |
87 | ||
ee6869af HV |
88 | if (fh->vdev->lock) |
89 | mutex_lock(fh->vdev->lock); | |
90 | ||
c3b5b024 SA |
91 | return ret; |
92 | } | |
0a4f8d07 | 93 | EXPORT_SYMBOL_GPL(v4l2_event_dequeue); |
c3b5b024 | 94 | |
6e239399 | 95 | /* Caller must hold fh->vdev->fh_lock! */ |
c3b5b024 | 96 | static struct v4l2_subscribed_event *v4l2_event_subscribed( |
6e239399 | 97 | struct v4l2_fh *fh, u32 type, u32 id) |
c3b5b024 | 98 | { |
c3b5b024 SA |
99 | struct v4l2_subscribed_event *sev; |
100 | ||
f3cd385a | 101 | assert_spin_locked(&fh->vdev->fh_lock); |
c3b5b024 | 102 | |
3f66f0ed | 103 | list_for_each_entry(sev, &fh->subscribed, list) |
6e239399 | 104 | if (sev->type == type && sev->id == id) |
c3b5b024 | 105 | return sev; |
c3b5b024 SA |
106 | |
107 | return NULL; | |
108 | } | |
109 | ||
6e239399 HV |
110 | static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev, |
111 | const struct timespec *ts) | |
112 | { | |
6e239399 HV |
113 | struct v4l2_subscribed_event *sev; |
114 | struct v4l2_kevent *kev; | |
2151bdc8 | 115 | bool copy_payload = true; |
6e239399 HV |
116 | |
117 | /* Are we subscribed? */ | |
118 | sev = v4l2_event_subscribed(fh, ev->type, ev->id); | |
119 | if (sev == NULL) | |
120 | return; | |
121 | ||
c53c2549 HG |
122 | /* |
123 | * If the event has been added to the fh->subscribed list, but its | |
124 | * add op has not completed yet elems will be 0, treat this as | |
125 | * not being subscribed. | |
126 | */ | |
127 | if (!sev->elems) | |
128 | return; | |
129 | ||
6e239399 | 130 | /* Increase event sequence number on fh. */ |
523f46d6 | 131 | fh->sequence++; |
6e239399 HV |
132 | |
133 | /* Do we have any free events? */ | |
f1e393de HV |
134 | if (sev->in_use == sev->elems) { |
135 | /* no, remove the oldest one */ | |
136 | kev = sev->events + sev_pos(sev, 0); | |
137 | list_del(&kev->list); | |
138 | sev->in_use--; | |
139 | sev->first = sev_pos(sev, 1); | |
140 | fh->navailable--; | |
2151bdc8 | 141 | if (sev->elems == 1) { |
c53c2549 HG |
142 | if (sev->ops && sev->ops->replace) { |
143 | sev->ops->replace(&kev->event, ev); | |
2151bdc8 HV |
144 | copy_payload = false; |
145 | } | |
c53c2549 | 146 | } else if (sev->ops && sev->ops->merge) { |
2151bdc8 HV |
147 | struct v4l2_kevent *second_oldest = |
148 | sev->events + sev_pos(sev, 0); | |
c53c2549 | 149 | sev->ops->merge(&kev->event, &second_oldest->event); |
2151bdc8 | 150 | } |
f1e393de | 151 | } |
6e239399 HV |
152 | |
153 | /* Take one and fill it. */ | |
f1e393de | 154 | kev = sev->events + sev_pos(sev, sev->in_use); |
6e239399 | 155 | kev->event.type = ev->type; |
2151bdc8 HV |
156 | if (copy_payload) |
157 | kev->event.u = ev->u; | |
6e239399 HV |
158 | kev->event.id = ev->id; |
159 | kev->event.timestamp = *ts; | |
523f46d6 | 160 | kev->event.sequence = fh->sequence; |
f1e393de HV |
161 | sev->in_use++; |
162 | list_add_tail(&kev->list, &fh->available); | |
6e239399 | 163 | |
523f46d6 | 164 | fh->navailable++; |
6e239399 | 165 | |
523f46d6 | 166 | wake_up_all(&fh->wait); |
6e239399 HV |
167 | } |
168 | ||
c3b5b024 SA |
169 | void v4l2_event_queue(struct video_device *vdev, const struct v4l2_event *ev) |
170 | { | |
171 | struct v4l2_fh *fh; | |
172 | unsigned long flags; | |
173 | struct timespec timestamp; | |
174 | ||
175 | ktime_get_ts(×tamp); | |
176 | ||
177 | spin_lock_irqsave(&vdev->fh_lock, flags); | |
178 | ||
3f66f0ed | 179 | list_for_each_entry(fh, &vdev->fh_list, list) |
6e239399 | 180 | __v4l2_event_queue_fh(fh, ev, ×tamp); |
c3b5b024 SA |
181 | |
182 | spin_unlock_irqrestore(&vdev->fh_lock, flags); | |
183 | } | |
184 | EXPORT_SYMBOL_GPL(v4l2_event_queue); | |
185 | ||
6e239399 HV |
186 | void v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *ev) |
187 | { | |
188 | unsigned long flags; | |
189 | struct timespec timestamp; | |
190 | ||
191 | ktime_get_ts(×tamp); | |
192 | ||
193 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
194 | __v4l2_event_queue_fh(fh, ev, ×tamp); | |
195 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | |
196 | } | |
197 | EXPORT_SYMBOL_GPL(v4l2_event_queue_fh); | |
198 | ||
c3b5b024 SA |
199 | int v4l2_event_pending(struct v4l2_fh *fh) |
200 | { | |
523f46d6 | 201 | return fh->navailable; |
c3b5b024 SA |
202 | } |
203 | EXPORT_SYMBOL_GPL(v4l2_event_pending); | |
204 | ||
205 | int v4l2_event_subscribe(struct v4l2_fh *fh, | |
c53c2549 HG |
206 | struct v4l2_event_subscription *sub, unsigned elems, |
207 | const struct v4l2_subscribed_event_ops *ops) | |
c3b5b024 | 208 | { |
6e239399 | 209 | struct v4l2_subscribed_event *sev, *found_ev; |
c3b5b024 | 210 | unsigned long flags; |
f1e393de | 211 | unsigned i; |
c3b5b024 | 212 | |
b36b5059 HG |
213 | if (sub->type == V4L2_EVENT_ALL) |
214 | return -EINVAL; | |
215 | ||
f1e393de HV |
216 | if (elems < 1) |
217 | elems = 1; | |
6e239399 | 218 | |
f1e393de | 219 | sev = kzalloc(sizeof(*sev) + sizeof(struct v4l2_kevent) * elems, GFP_KERNEL); |
c3b5b024 SA |
220 | if (!sev) |
221 | return -ENOMEM; | |
f1e393de HV |
222 | for (i = 0; i < elems; i++) |
223 | sev->events[i].sev = sev; | |
224 | sev->type = sub->type; | |
225 | sev->id = sub->id; | |
226 | sev->flags = sub->flags; | |
227 | sev->fh = fh; | |
c53c2549 | 228 | sev->ops = ops; |
c3b5b024 SA |
229 | |
230 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
6e239399 | 231 | found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); |
f1e393de | 232 | if (!found_ev) |
523f46d6 | 233 | list_add(&sev->list, &fh->subscribed); |
c3b5b024 SA |
234 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); |
235 | ||
c53c2549 | 236 | if (found_ev) { |
77068d36 | 237 | kfree(sev); |
c53c2549 HG |
238 | return 0; /* Already listening */ |
239 | } | |
240 | ||
241 | if (sev->ops && sev->ops->add) { | |
6e6d76cd | 242 | int ret = sev->ops->add(sev, elems); |
c53c2549 HG |
243 | if (ret) { |
244 | sev->ops = NULL; | |
245 | v4l2_event_unsubscribe(fh, sub); | |
246 | return ret; | |
247 | } | |
248 | } | |
249 | ||
c53c2549 HG |
250 | /* Mark as ready for use */ |
251 | sev->elems = elems; | |
252 | ||
c3b5b024 SA |
253 | return 0; |
254 | } | |
255 | EXPORT_SYMBOL_GPL(v4l2_event_subscribe); | |
256 | ||
f1e393de | 257 | void v4l2_event_unsubscribe_all(struct v4l2_fh *fh) |
c3b5b024 | 258 | { |
6e239399 | 259 | struct v4l2_event_subscription sub; |
c3b5b024 SA |
260 | struct v4l2_subscribed_event *sev; |
261 | unsigned long flags; | |
262 | ||
263 | do { | |
264 | sev = NULL; | |
265 | ||
266 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
523f46d6 HV |
267 | if (!list_empty(&fh->subscribed)) { |
268 | sev = list_first_entry(&fh->subscribed, | |
6e239399 HV |
269 | struct v4l2_subscribed_event, list); |
270 | sub.type = sev->type; | |
271 | sub.id = sev->id; | |
c3b5b024 SA |
272 | } |
273 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | |
6e239399 HV |
274 | if (sev) |
275 | v4l2_event_unsubscribe(fh, &sub); | |
c3b5b024 SA |
276 | } while (sev); |
277 | } | |
f1e393de | 278 | EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe_all); |
c3b5b024 SA |
279 | |
280 | int v4l2_event_unsubscribe(struct v4l2_fh *fh, | |
281 | struct v4l2_event_subscription *sub) | |
282 | { | |
283 | struct v4l2_subscribed_event *sev; | |
284 | unsigned long flags; | |
78c87e86 | 285 | int i; |
c3b5b024 SA |
286 | |
287 | if (sub->type == V4L2_EVENT_ALL) { | |
288 | v4l2_event_unsubscribe_all(fh); | |
289 | return 0; | |
290 | } | |
291 | ||
292 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | |
293 | ||
6e239399 | 294 | sev = v4l2_event_subscribed(fh, sub->type, sub->id); |
77068d36 | 295 | if (sev != NULL) { |
78c87e86 HG |
296 | /* Remove any pending events for this subscription */ |
297 | for (i = 0; i < sev->in_use; i++) { | |
298 | list_del(&sev->events[sev_pos(sev, i)].list); | |
299 | fh->navailable--; | |
300 | } | |
c3b5b024 | 301 | list_del(&sev->list); |
77068d36 | 302 | } |
c3b5b024 SA |
303 | |
304 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | |
c53c2549 HG |
305 | |
306 | if (sev && sev->ops && sev->ops->del) | |
307 | sev->ops->del(sev); | |
308 | ||
c3b5b024 SA |
309 | kfree(sev); |
310 | ||
311 | return 0; | |
312 | } | |
313 | EXPORT_SYMBOL_GPL(v4l2_event_unsubscribe); |