Commit | Line | Data |
---|---|---|
7026ea4b JC |
1 | /* The industrial I/O core |
2 | * | |
3 | * Copyright (c) 2008 Jonathan Cameron | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License version 2 as published by | |
7 | * the Free Software Foundation. | |
8 | * | |
14555b14 | 9 | * Handling of buffer allocation / resizing. |
7026ea4b JC |
10 | * |
11 | * | |
12 | * Things to look at here. | |
13 | * - Better memory allocation techniques? | |
14 | * - Alternative access techniques? | |
15 | */ | |
16 | #include <linux/kernel.h> | |
8e336a72 | 17 | #include <linux/export.h> |
7026ea4b | 18 | #include <linux/device.h> |
7026ea4b | 19 | #include <linux/fs.h> |
7026ea4b | 20 | #include <linux/cdev.h> |
5a0e3ad6 | 21 | #include <linux/slab.h> |
a7348347 | 22 | #include <linux/poll.h> |
7026ea4b | 23 | |
06458e27 | 24 | #include <linux/iio/iio.h> |
df9c1c42 | 25 | #include "iio_core.h" |
06458e27 JC |
26 | #include <linux/iio/sysfs.h> |
27 | #include <linux/iio/buffer.h> | |
7026ea4b | 28 | |
8310b86c JC |
29 | static const char * const iio_endian_prefix[] = { |
30 | [IIO_BE] = "be", | |
31 | [IIO_LE] = "le", | |
32 | }; | |
7026ea4b | 33 | |
705ee2c9 | 34 | static bool iio_buffer_is_active(struct iio_buffer *buf) |
84b36ce5 | 35 | { |
705ee2c9 | 36 | return !list_empty(&buf->buffer_list); |
84b36ce5 JC |
37 | } |
38 | ||
7026ea4b | 39 | /** |
14555b14 | 40 | * iio_buffer_read_first_n_outer() - chrdev read for buffer access |
7026ea4b | 41 | * |
14555b14 JC |
42 | * This function relies on all buffer implementations having an |
43 | * iio_buffer as their first element. | |
7026ea4b | 44 | **/ |
14555b14 JC |
45 | ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, |
46 | size_t n, loff_t *f_ps) | |
7026ea4b | 47 | { |
1aa04278 | 48 | struct iio_dev *indio_dev = filp->private_data; |
14555b14 | 49 | struct iio_buffer *rb = indio_dev->buffer; |
d5857d65 | 50 | |
f18e7a06 LPC |
51 | if (!indio_dev->info) |
52 | return -ENODEV; | |
53 | ||
96e00f11 | 54 | if (!rb || !rb->access->read_first_n) |
7026ea4b | 55 | return -EINVAL; |
8d213f24 | 56 | return rb->access->read_first_n(rb, n, buf); |
7026ea4b JC |
57 | } |
58 | ||
a7348347 | 59 | /** |
14555b14 | 60 | * iio_buffer_poll() - poll the buffer to find out if it has data |
a7348347 | 61 | */ |
14555b14 JC |
62 | unsigned int iio_buffer_poll(struct file *filp, |
63 | struct poll_table_struct *wait) | |
a7348347 | 64 | { |
1aa04278 | 65 | struct iio_dev *indio_dev = filp->private_data; |
14555b14 | 66 | struct iio_buffer *rb = indio_dev->buffer; |
a7348347 | 67 | |
f18e7a06 LPC |
68 | if (!indio_dev->info) |
69 | return -ENODEV; | |
70 | ||
a7348347 JC |
71 | poll_wait(filp, &rb->pollq, wait); |
72 | if (rb->stufftoread) | |
73 | return POLLIN | POLLRDNORM; | |
74 | /* need a way of knowing if there may be enough data... */ | |
8d213f24 | 75 | return 0; |
a7348347 JC |
76 | } |
77 | ||
f79a9098 | 78 | void iio_buffer_init(struct iio_buffer *buffer) |
7026ea4b | 79 | { |
5ada4ea9 | 80 | INIT_LIST_HEAD(&buffer->demux_list); |
705ee2c9 | 81 | INIT_LIST_HEAD(&buffer->buffer_list); |
14555b14 | 82 | init_waitqueue_head(&buffer->pollq); |
9e69c935 | 83 | kref_init(&buffer->ref); |
7026ea4b | 84 | } |
14555b14 | 85 | EXPORT_SYMBOL(iio_buffer_init); |
7026ea4b | 86 | |
1d892719 | 87 | static ssize_t iio_show_scan_index(struct device *dev, |
8d213f24 JC |
88 | struct device_attribute *attr, |
89 | char *buf) | |
1d892719 | 90 | { |
8d213f24 | 91 | return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); |
1d892719 JC |
92 | } |
93 | ||
94 | static ssize_t iio_show_fixed_type(struct device *dev, | |
95 | struct device_attribute *attr, | |
96 | char *buf) | |
97 | { | |
98 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); | |
8310b86c JC |
99 | u8 type = this_attr->c->scan_type.endianness; |
100 | ||
101 | if (type == IIO_CPU) { | |
9d5d1153 JC |
102 | #ifdef __LITTLE_ENDIAN |
103 | type = IIO_LE; | |
104 | #else | |
105 | type = IIO_BE; | |
106 | #endif | |
8310b86c JC |
107 | } |
108 | return sprintf(buf, "%s:%c%d/%d>>%u\n", | |
109 | iio_endian_prefix[type], | |
1d892719 JC |
110 | this_attr->c->scan_type.sign, |
111 | this_attr->c->scan_type.realbits, | |
112 | this_attr->c->scan_type.storagebits, | |
113 | this_attr->c->scan_type.shift); | |
114 | } | |
115 | ||
8d213f24 JC |
116 | static ssize_t iio_scan_el_show(struct device *dev, |
117 | struct device_attribute *attr, | |
118 | char *buf) | |
119 | { | |
120 | int ret; | |
e53f5ac5 | 121 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
8d213f24 | 122 | |
5ada4ea9 JC |
123 | ret = test_bit(to_iio_dev_attr(attr)->address, |
124 | indio_dev->buffer->scan_mask); | |
125 | ||
8d213f24 JC |
126 | return sprintf(buf, "%d\n", ret); |
127 | } | |
128 | ||
14555b14 | 129 | static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) |
8d213f24 | 130 | { |
14555b14 | 131 | clear_bit(bit, buffer->scan_mask); |
8d213f24 JC |
132 | return 0; |
133 | } | |
134 | ||
135 | static ssize_t iio_scan_el_store(struct device *dev, | |
136 | struct device_attribute *attr, | |
137 | const char *buf, | |
138 | size_t len) | |
139 | { | |
a714af27 | 140 | int ret; |
8d213f24 | 141 | bool state; |
e53f5ac5 | 142 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
14555b14 | 143 | struct iio_buffer *buffer = indio_dev->buffer; |
8d213f24 JC |
144 | struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); |
145 | ||
a714af27 JC |
146 | ret = strtobool(buf, &state); |
147 | if (ret < 0) | |
148 | return ret; | |
8d213f24 | 149 | mutex_lock(&indio_dev->mlock); |
705ee2c9 | 150 | if (iio_buffer_is_active(indio_dev->buffer)) { |
8d213f24 JC |
151 | ret = -EBUSY; |
152 | goto error_ret; | |
153 | } | |
f79a9098 | 154 | ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); |
8d213f24 JC |
155 | if (ret < 0) |
156 | goto error_ret; | |
157 | if (!state && ret) { | |
14555b14 | 158 | ret = iio_scan_mask_clear(buffer, this_attr->address); |
8d213f24 JC |
159 | if (ret) |
160 | goto error_ret; | |
161 | } else if (state && !ret) { | |
f79a9098 | 162 | ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); |
8d213f24 JC |
163 | if (ret) |
164 | goto error_ret; | |
165 | } | |
166 | ||
167 | error_ret: | |
168 | mutex_unlock(&indio_dev->mlock); | |
169 | ||
5a2a6e11 | 170 | return ret < 0 ? ret : len; |
8d213f24 JC |
171 | |
172 | } | |
173 | ||
174 | static ssize_t iio_scan_el_ts_show(struct device *dev, | |
175 | struct device_attribute *attr, | |
176 | char *buf) | |
177 | { | |
e53f5ac5 | 178 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
f8c6f4e9 | 179 | return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); |
8d213f24 JC |
180 | } |
181 | ||
182 | static ssize_t iio_scan_el_ts_store(struct device *dev, | |
183 | struct device_attribute *attr, | |
184 | const char *buf, | |
185 | size_t len) | |
186 | { | |
a714af27 | 187 | int ret; |
e53f5ac5 | 188 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
8d213f24 | 189 | bool state; |
1aa04278 | 190 | |
a714af27 JC |
191 | ret = strtobool(buf, &state); |
192 | if (ret < 0) | |
193 | return ret; | |
194 | ||
8d213f24 | 195 | mutex_lock(&indio_dev->mlock); |
705ee2c9 | 196 | if (iio_buffer_is_active(indio_dev->buffer)) { |
8d213f24 JC |
197 | ret = -EBUSY; |
198 | goto error_ret; | |
199 | } | |
14555b14 | 200 | indio_dev->buffer->scan_timestamp = state; |
8d213f24 JC |
201 | error_ret: |
202 | mutex_unlock(&indio_dev->mlock); | |
203 | ||
204 | return ret ? ret : len; | |
205 | } | |
206 | ||
14555b14 JC |
207 | static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, |
208 | const struct iio_chan_spec *chan) | |
1d892719 | 209 | { |
26d25ae3 | 210 | int ret, attrcount = 0; |
14555b14 | 211 | struct iio_buffer *buffer = indio_dev->buffer; |
1d892719 | 212 | |
26d25ae3 | 213 | ret = __iio_add_chan_devattr("index", |
1d892719 JC |
214 | chan, |
215 | &iio_show_scan_index, | |
216 | NULL, | |
217 | 0, | |
3704432f | 218 | IIO_SEPARATE, |
1aa04278 | 219 | &indio_dev->dev, |
14555b14 | 220 | &buffer->scan_el_dev_attr_list); |
1d892719 JC |
221 | if (ret) |
222 | goto error_ret; | |
26d25ae3 JC |
223 | attrcount++; |
224 | ret = __iio_add_chan_devattr("type", | |
1d892719 JC |
225 | chan, |
226 | &iio_show_fixed_type, | |
227 | NULL, | |
228 | 0, | |
229 | 0, | |
1aa04278 | 230 | &indio_dev->dev, |
14555b14 | 231 | &buffer->scan_el_dev_attr_list); |
1d892719 JC |
232 | if (ret) |
233 | goto error_ret; | |
26d25ae3 | 234 | attrcount++; |
a88b3ebc | 235 | if (chan->type != IIO_TIMESTAMP) |
26d25ae3 | 236 | ret = __iio_add_chan_devattr("en", |
a88b3ebc JC |
237 | chan, |
238 | &iio_scan_el_show, | |
239 | &iio_scan_el_store, | |
240 | chan->scan_index, | |
241 | 0, | |
1aa04278 | 242 | &indio_dev->dev, |
14555b14 | 243 | &buffer->scan_el_dev_attr_list); |
a88b3ebc | 244 | else |
26d25ae3 | 245 | ret = __iio_add_chan_devattr("en", |
a88b3ebc JC |
246 | chan, |
247 | &iio_scan_el_ts_show, | |
248 | &iio_scan_el_ts_store, | |
249 | chan->scan_index, | |
250 | 0, | |
1aa04278 | 251 | &indio_dev->dev, |
14555b14 | 252 | &buffer->scan_el_dev_attr_list); |
9572588c PM |
253 | if (ret) |
254 | goto error_ret; | |
26d25ae3 JC |
255 | attrcount++; |
256 | ret = attrcount; | |
1d892719 JC |
257 | error_ret: |
258 | return ret; | |
259 | } | |
260 | ||
14555b14 JC |
261 | static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev, |
262 | struct iio_dev_attr *p) | |
1d892719 | 263 | { |
1d892719 JC |
264 | kfree(p->dev_attr.attr.name); |
265 | kfree(p); | |
266 | } | |
267 | ||
14555b14 | 268 | static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev) |
1d892719 JC |
269 | { |
270 | struct iio_dev_attr *p, *n; | |
14555b14 | 271 | struct iio_buffer *buffer = indio_dev->buffer; |
26d25ae3 | 272 | |
1d892719 | 273 | list_for_each_entry_safe(p, n, |
14555b14 JC |
274 | &buffer->scan_el_dev_attr_list, l) |
275 | iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p); | |
1d892719 JC |
276 | } |
277 | ||
26d25ae3 JC |
278 | static const char * const iio_scan_elements_group_name = "scan_elements"; |
279 | ||
14555b14 JC |
280 | int iio_buffer_register(struct iio_dev *indio_dev, |
281 | const struct iio_chan_spec *channels, | |
282 | int num_channels) | |
1d892719 | 283 | { |
26d25ae3 JC |
284 | struct iio_dev_attr *p; |
285 | struct attribute **attr; | |
14555b14 | 286 | struct iio_buffer *buffer = indio_dev->buffer; |
26d25ae3 JC |
287 | int ret, i, attrn, attrcount, attrcount_orig = 0; |
288 | ||
14555b14 JC |
289 | if (buffer->attrs) |
290 | indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs; | |
bf32963c | 291 | |
14555b14 JC |
292 | if (buffer->scan_el_attrs != NULL) { |
293 | attr = buffer->scan_el_attrs->attrs; | |
26d25ae3 JC |
294 | while (*attr++ != NULL) |
295 | attrcount_orig++; | |
296 | } | |
297 | attrcount = attrcount_orig; | |
14555b14 | 298 | INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); |
1d892719 JC |
299 | if (channels) { |
300 | /* new magic */ | |
301 | for (i = 0; i < num_channels; i++) { | |
f5b81ddd LPC |
302 | if (channels[i].scan_index < 0) |
303 | continue; | |
304 | ||
32b5eeca JC |
305 | /* Establish necessary mask length */ |
306 | if (channels[i].scan_index > | |
307 | (int)indio_dev->masklength - 1) | |
308 | indio_dev->masklength | |
e1dc7bee | 309 | = channels[i].scan_index + 1; |
32b5eeca | 310 | |
14555b14 | 311 | ret = iio_buffer_add_channel_sysfs(indio_dev, |
1aa04278 | 312 | &channels[i]); |
1d892719 | 313 | if (ret < 0) |
26d25ae3 JC |
314 | goto error_cleanup_dynamic; |
315 | attrcount += ret; | |
beb80600 | 316 | if (channels[i].type == IIO_TIMESTAMP) |
f1264809 | 317 | indio_dev->scan_index_timestamp = |
beb80600 | 318 | channels[i].scan_index; |
1d892719 | 319 | } |
14555b14 | 320 | if (indio_dev->masklength && buffer->scan_mask == NULL) { |
d83fb184 TM |
321 | buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), |
322 | sizeof(*buffer->scan_mask), | |
323 | GFP_KERNEL); | |
14555b14 | 324 | if (buffer->scan_mask == NULL) { |
32b5eeca | 325 | ret = -ENOMEM; |
26d25ae3 | 326 | goto error_cleanup_dynamic; |
32b5eeca JC |
327 | } |
328 | } | |
1d892719 JC |
329 | } |
330 | ||
14555b14 | 331 | buffer->scan_el_group.name = iio_scan_elements_group_name; |
26d25ae3 | 332 | |
d83fb184 TM |
333 | buffer->scan_el_group.attrs = kcalloc(attrcount + 1, |
334 | sizeof(buffer->scan_el_group.attrs[0]), | |
335 | GFP_KERNEL); | |
14555b14 | 336 | if (buffer->scan_el_group.attrs == NULL) { |
26d25ae3 JC |
337 | ret = -ENOMEM; |
338 | goto error_free_scan_mask; | |
339 | } | |
14555b14 JC |
340 | if (buffer->scan_el_attrs) |
341 | memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, | |
342 | sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); | |
26d25ae3 JC |
343 | attrn = attrcount_orig; |
344 | ||
14555b14 JC |
345 | list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) |
346 | buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; | |
347 | indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; | |
26d25ae3 | 348 | |
1d892719 | 349 | return 0; |
26d25ae3 JC |
350 | |
351 | error_free_scan_mask: | |
14555b14 | 352 | kfree(buffer->scan_mask); |
1d892719 | 353 | error_cleanup_dynamic: |
14555b14 | 354 | __iio_buffer_attr_cleanup(indio_dev); |
26d25ae3 | 355 | |
7026ea4b JC |
356 | return ret; |
357 | } | |
14555b14 | 358 | EXPORT_SYMBOL(iio_buffer_register); |
1d892719 | 359 | |
14555b14 | 360 | void iio_buffer_unregister(struct iio_dev *indio_dev) |
7026ea4b | 361 | { |
14555b14 JC |
362 | kfree(indio_dev->buffer->scan_mask); |
363 | kfree(indio_dev->buffer->scan_el_group.attrs); | |
364 | __iio_buffer_attr_cleanup(indio_dev); | |
7026ea4b | 365 | } |
14555b14 | 366 | EXPORT_SYMBOL(iio_buffer_unregister); |
7026ea4b | 367 | |
14555b14 JC |
368 | ssize_t iio_buffer_read_length(struct device *dev, |
369 | struct device_attribute *attr, | |
370 | char *buf) | |
7026ea4b | 371 | { |
e53f5ac5 | 372 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
14555b14 | 373 | struct iio_buffer *buffer = indio_dev->buffer; |
7026ea4b | 374 | |
14555b14 | 375 | if (buffer->access->get_length) |
8d213f24 | 376 | return sprintf(buf, "%d\n", |
14555b14 | 377 | buffer->access->get_length(buffer)); |
7026ea4b | 378 | |
8d213f24 | 379 | return 0; |
7026ea4b | 380 | } |
14555b14 | 381 | EXPORT_SYMBOL(iio_buffer_read_length); |
7026ea4b | 382 | |
14555b14 JC |
383 | ssize_t iio_buffer_write_length(struct device *dev, |
384 | struct device_attribute *attr, | |
385 | const char *buf, | |
386 | size_t len) | |
7026ea4b | 387 | { |
e53f5ac5 | 388 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
14555b14 | 389 | struct iio_buffer *buffer = indio_dev->buffer; |
948ad205 LPC |
390 | unsigned int val; |
391 | int ret; | |
8d213f24 | 392 | |
948ad205 | 393 | ret = kstrtouint(buf, 10, &val); |
7026ea4b JC |
394 | if (ret) |
395 | return ret; | |
396 | ||
14555b14 JC |
397 | if (buffer->access->get_length) |
398 | if (val == buffer->access->get_length(buffer)) | |
7026ea4b JC |
399 | return len; |
400 | ||
e38c79e0 | 401 | mutex_lock(&indio_dev->mlock); |
705ee2c9 | 402 | if (iio_buffer_is_active(indio_dev->buffer)) { |
e38c79e0 LPC |
403 | ret = -EBUSY; |
404 | } else { | |
869871b5 | 405 | if (buffer->access->set_length) |
e38c79e0 | 406 | buffer->access->set_length(buffer, val); |
e38c79e0 | 407 | ret = 0; |
7026ea4b | 408 | } |
e38c79e0 | 409 | mutex_unlock(&indio_dev->mlock); |
7026ea4b | 410 | |
e38c79e0 | 411 | return ret ? ret : len; |
7026ea4b | 412 | } |
14555b14 | 413 | EXPORT_SYMBOL(iio_buffer_write_length); |
7026ea4b | 414 | |
14555b14 JC |
415 | ssize_t iio_buffer_show_enable(struct device *dev, |
416 | struct device_attribute *attr, | |
417 | char *buf) | |
7026ea4b | 418 | { |
e53f5ac5 | 419 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); |
705ee2c9 | 420 | return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer)); |
7026ea4b | 421 | } |
14555b14 | 422 | EXPORT_SYMBOL(iio_buffer_show_enable); |
7026ea4b | 423 | |
9572588c | 424 | /* Note NULL used as error indicator as it doesn't make sense. */ |
cd4361c7 | 425 | static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, |
32b5eeca | 426 | unsigned int masklength, |
cd4361c7 | 427 | const unsigned long *mask) |
32b5eeca JC |
428 | { |
429 | if (bitmap_empty(mask, masklength)) | |
430 | return NULL; | |
431 | while (*av_masks) { | |
432 | if (bitmap_subset(mask, av_masks, masklength)) | |
433 | return av_masks; | |
434 | av_masks += BITS_TO_LONGS(masklength); | |
435 | } | |
436 | return NULL; | |
437 | } | |
438 | ||
183f4173 PM |
439 | static int iio_compute_scan_bytes(struct iio_dev *indio_dev, |
440 | const unsigned long *mask, bool timestamp) | |
959d2952 | 441 | { |
959d2952 JC |
442 | const struct iio_chan_spec *ch; |
443 | unsigned bytes = 0; | |
444 | int length, i; | |
959d2952 JC |
445 | |
446 | /* How much space will the demuxed element take? */ | |
6b3b58ed | 447 | for_each_set_bit(i, mask, |
959d2952 JC |
448 | indio_dev->masklength) { |
449 | ch = iio_find_channel_from_si(indio_dev, i); | |
6b3b58ed | 450 | length = ch->scan_type.storagebits / 8; |
959d2952 JC |
451 | bytes = ALIGN(bytes, length); |
452 | bytes += length; | |
453 | } | |
6b3b58ed | 454 | if (timestamp) { |
959d2952 | 455 | ch = iio_find_channel_from_si(indio_dev, |
f1264809 | 456 | indio_dev->scan_index_timestamp); |
6b3b58ed | 457 | length = ch->scan_type.storagebits / 8; |
959d2952 JC |
458 | bytes = ALIGN(bytes, length); |
459 | bytes += length; | |
460 | } | |
6b3b58ed JC |
461 | return bytes; |
462 | } | |
463 | ||
9e69c935 LPC |
464 | static void iio_buffer_activate(struct iio_dev *indio_dev, |
465 | struct iio_buffer *buffer) | |
466 | { | |
467 | iio_buffer_get(buffer); | |
468 | list_add(&buffer->buffer_list, &indio_dev->buffer_list); | |
469 | } | |
470 | ||
471 | static void iio_buffer_deactivate(struct iio_buffer *buffer) | |
472 | { | |
473 | list_del_init(&buffer->buffer_list); | |
474 | iio_buffer_put(buffer); | |
475 | } | |
476 | ||
a87c82e4 LPC |
477 | void iio_disable_all_buffers(struct iio_dev *indio_dev) |
478 | { | |
479 | struct iio_buffer *buffer, *_buffer; | |
480 | ||
481 | if (list_empty(&indio_dev->buffer_list)) | |
482 | return; | |
483 | ||
484 | if (indio_dev->setup_ops->predisable) | |
485 | indio_dev->setup_ops->predisable(indio_dev); | |
486 | ||
487 | list_for_each_entry_safe(buffer, _buffer, | |
488 | &indio_dev->buffer_list, buffer_list) | |
9e69c935 | 489 | iio_buffer_deactivate(buffer); |
a87c82e4 LPC |
490 | |
491 | indio_dev->currentmode = INDIO_DIRECT_MODE; | |
492 | if (indio_dev->setup_ops->postdisable) | |
493 | indio_dev->setup_ops->postdisable(indio_dev); | |
494 | } | |
495 | ||
84b36ce5 JC |
496 | int iio_update_buffers(struct iio_dev *indio_dev, |
497 | struct iio_buffer *insert_buffer, | |
498 | struct iio_buffer *remove_buffer) | |
6b3b58ed | 499 | { |
84b36ce5 JC |
500 | int ret; |
501 | int success = 0; | |
502 | struct iio_buffer *buffer; | |
503 | unsigned long *compound_mask; | |
504 | const unsigned long *old_mask; | |
6b3b58ed | 505 | |
84b36ce5 JC |
506 | /* Wind down existing buffers - iff there are any */ |
507 | if (!list_empty(&indio_dev->buffer_list)) { | |
508 | if (indio_dev->setup_ops->predisable) { | |
509 | ret = indio_dev->setup_ops->predisable(indio_dev); | |
510 | if (ret) | |
511 | goto error_ret; | |
512 | } | |
513 | indio_dev->currentmode = INDIO_DIRECT_MODE; | |
514 | if (indio_dev->setup_ops->postdisable) { | |
515 | ret = indio_dev->setup_ops->postdisable(indio_dev); | |
516 | if (ret) | |
517 | goto error_ret; | |
518 | } | |
519 | } | |
520 | /* Keep a copy of current setup to allow roll back */ | |
521 | old_mask = indio_dev->active_scan_mask; | |
522 | if (!indio_dev->available_scan_masks) | |
523 | indio_dev->active_scan_mask = NULL; | |
524 | ||
525 | if (remove_buffer) | |
9e69c935 | 526 | iio_buffer_deactivate(remove_buffer); |
84b36ce5 | 527 | if (insert_buffer) |
9e69c935 | 528 | iio_buffer_activate(indio_dev, insert_buffer); |
84b36ce5 JC |
529 | |
530 | /* If no buffers in list, we are done */ | |
531 | if (list_empty(&indio_dev->buffer_list)) { | |
532 | indio_dev->currentmode = INDIO_DIRECT_MODE; | |
533 | if (indio_dev->available_scan_masks == NULL) | |
534 | kfree(old_mask); | |
535 | return 0; | |
536 | } | |
959d2952 | 537 | |
9572588c | 538 | /* What scan mask do we actually have? */ |
84b36ce5 JC |
539 | compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), |
540 | sizeof(long), GFP_KERNEL); | |
541 | if (compound_mask == NULL) { | |
542 | if (indio_dev->available_scan_masks == NULL) | |
543 | kfree(old_mask); | |
544 | return -ENOMEM; | |
545 | } | |
546 | indio_dev->scan_timestamp = 0; | |
547 | ||
548 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { | |
549 | bitmap_or(compound_mask, compound_mask, buffer->scan_mask, | |
550 | indio_dev->masklength); | |
551 | indio_dev->scan_timestamp |= buffer->scan_timestamp; | |
552 | } | |
553 | if (indio_dev->available_scan_masks) { | |
959d2952 JC |
554 | indio_dev->active_scan_mask = |
555 | iio_scan_mask_match(indio_dev->available_scan_masks, | |
556 | indio_dev->masklength, | |
84b36ce5 JC |
557 | compound_mask); |
558 | if (indio_dev->active_scan_mask == NULL) { | |
559 | /* | |
560 | * Roll back. | |
561 | * Note can only occur when adding a buffer. | |
562 | */ | |
9e69c935 | 563 | iio_buffer_deactivate(insert_buffer); |
d66e0452 PM |
564 | if (old_mask) { |
565 | indio_dev->active_scan_mask = old_mask; | |
566 | success = -EINVAL; | |
567 | } | |
568 | else { | |
569 | kfree(compound_mask); | |
570 | ret = -EINVAL; | |
571 | goto error_ret; | |
572 | } | |
84b36ce5 JC |
573 | } |
574 | } else { | |
575 | indio_dev->active_scan_mask = compound_mask; | |
576 | } | |
aff1eb4e | 577 | |
5ada4ea9 JC |
578 | iio_update_demux(indio_dev); |
579 | ||
84b36ce5 JC |
580 | /* Wind up again */ |
581 | if (indio_dev->setup_ops->preenable) { | |
582 | ret = indio_dev->setup_ops->preenable(indio_dev); | |
583 | if (ret) { | |
584 | printk(KERN_ERR | |
bec1889d | 585 | "Buffer not started: buffer preenable failed (%d)\n", ret); |
84b36ce5 JC |
586 | goto error_remove_inserted; |
587 | } | |
588 | } | |
589 | indio_dev->scan_bytes = | |
590 | iio_compute_scan_bytes(indio_dev, | |
591 | indio_dev->active_scan_mask, | |
592 | indio_dev->scan_timestamp); | |
593 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) | |
594 | if (buffer->access->request_update) { | |
595 | ret = buffer->access->request_update(buffer); | |
596 | if (ret) { | |
597 | printk(KERN_INFO | |
bec1889d | 598 | "Buffer not started: buffer parameter update failed (%d)\n", ret); |
84b36ce5 JC |
599 | goto error_run_postdisable; |
600 | } | |
601 | } | |
602 | if (indio_dev->info->update_scan_mode) { | |
603 | ret = indio_dev->info | |
5ada4ea9 JC |
604 | ->update_scan_mode(indio_dev, |
605 | indio_dev->active_scan_mask); | |
84b36ce5 | 606 | if (ret < 0) { |
bec1889d | 607 | printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret); |
84b36ce5 JC |
608 | goto error_run_postdisable; |
609 | } | |
610 | } | |
9572588c | 611 | /* Definitely possible for devices to support both of these. */ |
84b36ce5 JC |
612 | if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { |
613 | if (!indio_dev->trig) { | |
614 | printk(KERN_INFO "Buffer not started: no trigger\n"); | |
615 | ret = -EINVAL; | |
616 | /* Can only occur on first buffer */ | |
617 | goto error_run_postdisable; | |
618 | } | |
619 | indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; | |
620 | } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) { | |
621 | indio_dev->currentmode = INDIO_BUFFER_HARDWARE; | |
9572588c | 622 | } else { /* Should never be reached */ |
84b36ce5 JC |
623 | ret = -EINVAL; |
624 | goto error_run_postdisable; | |
625 | } | |
626 | ||
627 | if (indio_dev->setup_ops->postenable) { | |
628 | ret = indio_dev->setup_ops->postenable(indio_dev); | |
629 | if (ret) { | |
630 | printk(KERN_INFO | |
bec1889d | 631 | "Buffer not started: postenable failed (%d)\n", ret); |
84b36ce5 JC |
632 | indio_dev->currentmode = INDIO_DIRECT_MODE; |
633 | if (indio_dev->setup_ops->postdisable) | |
634 | indio_dev->setup_ops->postdisable(indio_dev); | |
635 | goto error_disable_all_buffers; | |
636 | } | |
637 | } | |
638 | ||
639 | if (indio_dev->available_scan_masks) | |
640 | kfree(compound_mask); | |
641 | else | |
642 | kfree(old_mask); | |
643 | ||
644 | return success; | |
645 | ||
646 | error_disable_all_buffers: | |
647 | indio_dev->currentmode = INDIO_DIRECT_MODE; | |
648 | error_run_postdisable: | |
649 | if (indio_dev->setup_ops->postdisable) | |
650 | indio_dev->setup_ops->postdisable(indio_dev); | |
651 | error_remove_inserted: | |
652 | ||
653 | if (insert_buffer) | |
9e69c935 | 654 | iio_buffer_deactivate(insert_buffer); |
84b36ce5 JC |
655 | indio_dev->active_scan_mask = old_mask; |
656 | kfree(compound_mask); | |
657 | error_ret: | |
658 | ||
659 | return ret; | |
660 | } | |
661 | EXPORT_SYMBOL_GPL(iio_update_buffers); | |
662 | ||
663 | ssize_t iio_buffer_store_enable(struct device *dev, | |
664 | struct device_attribute *attr, | |
665 | const char *buf, | |
666 | size_t len) | |
667 | { | |
668 | int ret; | |
669 | bool requested_state; | |
670 | struct iio_dev *indio_dev = dev_to_iio_dev(dev); | |
84b36ce5 JC |
671 | bool inlist; |
672 | ||
673 | ret = strtobool(buf, &requested_state); | |
674 | if (ret < 0) | |
675 | return ret; | |
676 | ||
677 | mutex_lock(&indio_dev->mlock); | |
678 | ||
679 | /* Find out if it is in the list */ | |
705ee2c9 | 680 | inlist = iio_buffer_is_active(indio_dev->buffer); |
84b36ce5 JC |
681 | /* Already in desired state */ |
682 | if (inlist == requested_state) | |
683 | goto done; | |
684 | ||
685 | if (requested_state) | |
686 | ret = iio_update_buffers(indio_dev, | |
687 | indio_dev->buffer, NULL); | |
688 | else | |
689 | ret = iio_update_buffers(indio_dev, | |
690 | NULL, indio_dev->buffer); | |
691 | ||
692 | if (ret < 0) | |
693 | goto done; | |
694 | done: | |
695 | mutex_unlock(&indio_dev->mlock); | |
696 | return (ret < 0) ? ret : len; | |
697 | } | |
698 | EXPORT_SYMBOL(iio_buffer_store_enable); | |
699 | ||
700 | int iio_sw_buffer_preenable(struct iio_dev *indio_dev) | |
701 | { | |
702 | struct iio_buffer *buffer; | |
703 | unsigned bytes; | |
704 | dev_dbg(&indio_dev->dev, "%s\n", __func__); | |
705 | ||
706 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) | |
707 | if (buffer->access->set_bytes_per_datum) { | |
708 | bytes = iio_compute_scan_bytes(indio_dev, | |
709 | buffer->scan_mask, | |
710 | buffer->scan_timestamp); | |
711 | ||
712 | buffer->access->set_bytes_per_datum(buffer, bytes); | |
713 | } | |
959d2952 JC |
714 | return 0; |
715 | } | |
716 | EXPORT_SYMBOL(iio_sw_buffer_preenable); | |
717 | ||
81636632 LPC |
718 | /** |
719 | * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected | |
720 | * @indio_dev: the iio device | |
721 | * @mask: scan mask to be checked | |
722 | * | |
723 | * Return true if exactly one bit is set in the scan mask, false otherwise. It | |
724 | * can be used for devices where only one channel can be active for sampling at | |
725 | * a time. | |
726 | */ | |
727 | bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, | |
728 | const unsigned long *mask) | |
729 | { | |
730 | return bitmap_weight(mask, indio_dev->masklength) == 1; | |
731 | } | |
732 | EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); | |
733 | ||
939546d1 LPC |
734 | static bool iio_validate_scan_mask(struct iio_dev *indio_dev, |
735 | const unsigned long *mask) | |
736 | { | |
737 | if (!indio_dev->setup_ops->validate_scan_mask) | |
738 | return true; | |
739 | ||
740 | return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); | |
741 | } | |
742 | ||
32b5eeca JC |
743 | /** |
744 | * iio_scan_mask_set() - set particular bit in the scan mask | |
9572588c | 745 | * @indio_dev: the iio device |
14555b14 | 746 | * @buffer: the buffer whose scan mask we are interested in |
32b5eeca | 747 | * @bit: the bit to be set. |
84b36ce5 JC |
748 | * |
749 | * Note that at this point we have no way of knowing what other | |
750 | * buffers might request, hence this code only verifies that the | |
751 | * individual buffers request is plausible. | |
752 | */ | |
f79a9098 JC |
753 | int iio_scan_mask_set(struct iio_dev *indio_dev, |
754 | struct iio_buffer *buffer, int bit) | |
32b5eeca | 755 | { |
cd4361c7 | 756 | const unsigned long *mask; |
32b5eeca JC |
757 | unsigned long *trialmask; |
758 | ||
759 | trialmask = kmalloc(sizeof(*trialmask)* | |
f8c6f4e9 | 760 | BITS_TO_LONGS(indio_dev->masklength), |
32b5eeca JC |
761 | GFP_KERNEL); |
762 | ||
763 | if (trialmask == NULL) | |
764 | return -ENOMEM; | |
f8c6f4e9 | 765 | if (!indio_dev->masklength) { |
9572588c | 766 | WARN_ON("Trying to set scanmask prior to registering buffer\n"); |
939546d1 | 767 | goto err_invalid_mask; |
32b5eeca | 768 | } |
f8c6f4e9 | 769 | bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); |
32b5eeca JC |
770 | set_bit(bit, trialmask); |
771 | ||
939546d1 LPC |
772 | if (!iio_validate_scan_mask(indio_dev, trialmask)) |
773 | goto err_invalid_mask; | |
774 | ||
f8c6f4e9 JC |
775 | if (indio_dev->available_scan_masks) { |
776 | mask = iio_scan_mask_match(indio_dev->available_scan_masks, | |
777 | indio_dev->masklength, | |
32b5eeca | 778 | trialmask); |
939546d1 LPC |
779 | if (!mask) |
780 | goto err_invalid_mask; | |
32b5eeca | 781 | } |
f8c6f4e9 | 782 | bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); |
32b5eeca JC |
783 | |
784 | kfree(trialmask); | |
785 | ||
786 | return 0; | |
939546d1 LPC |
787 | |
788 | err_invalid_mask: | |
789 | kfree(trialmask); | |
790 | return -EINVAL; | |
791 | } | |
32b5eeca JC |
792 | EXPORT_SYMBOL_GPL(iio_scan_mask_set); |
793 | ||
f79a9098 JC |
794 | int iio_scan_mask_query(struct iio_dev *indio_dev, |
795 | struct iio_buffer *buffer, int bit) | |
32b5eeca | 796 | { |
f8c6f4e9 | 797 | if (bit > indio_dev->masklength) |
32b5eeca JC |
798 | return -EINVAL; |
799 | ||
14555b14 | 800 | if (!buffer->scan_mask) |
32b5eeca | 801 | return 0; |
32b5eeca | 802 | |
5a2a6e11 | 803 | return test_bit(bit, buffer->scan_mask); |
32b5eeca JC |
804 | }; |
805 | EXPORT_SYMBOL_GPL(iio_scan_mask_query); | |
5ada4ea9 JC |
806 | |
807 | /** | |
808 | * struct iio_demux_table() - table describing demux memcpy ops | |
809 | * @from: index to copy from | |
99698b45 | 810 | * @to: index to copy to |
5ada4ea9 JC |
811 | * @length: how many bytes to copy |
812 | * @l: list head used for management | |
813 | */ | |
814 | struct iio_demux_table { | |
815 | unsigned from; | |
816 | unsigned to; | |
817 | unsigned length; | |
818 | struct list_head l; | |
819 | }; | |
820 | ||
5d65d920 LPC |
821 | static const void *iio_demux(struct iio_buffer *buffer, |
822 | const void *datain) | |
5ada4ea9 JC |
823 | { |
824 | struct iio_demux_table *t; | |
825 | ||
826 | if (list_empty(&buffer->demux_list)) | |
827 | return datain; | |
828 | list_for_each_entry(t, &buffer->demux_list, l) | |
829 | memcpy(buffer->demux_bounce + t->to, | |
830 | datain + t->from, t->length); | |
831 | ||
832 | return buffer->demux_bounce; | |
833 | } | |
834 | ||
5d65d920 | 835 | static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) |
5ada4ea9 | 836 | { |
5d65d920 | 837 | const void *dataout = iio_demux(buffer, data); |
5ada4ea9 | 838 | |
ce56ade6 | 839 | return buffer->access->store_to(buffer, dataout); |
5ada4ea9 | 840 | } |
5ada4ea9 | 841 | |
842cd100 JC |
842 | static void iio_buffer_demux_free(struct iio_buffer *buffer) |
843 | { | |
844 | struct iio_demux_table *p, *q; | |
845 | list_for_each_entry_safe(p, q, &buffer->demux_list, l) { | |
846 | list_del(&p->l); | |
847 | kfree(p); | |
848 | } | |
849 | } | |
850 | ||
84b36ce5 | 851 | |
5d65d920 | 852 | int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data) |
84b36ce5 JC |
853 | { |
854 | int ret; | |
855 | struct iio_buffer *buf; | |
856 | ||
857 | list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) { | |
858 | ret = iio_push_to_buffer(buf, data); | |
859 | if (ret < 0) | |
860 | return ret; | |
861 | } | |
862 | ||
863 | return 0; | |
864 | } | |
865 | EXPORT_SYMBOL_GPL(iio_push_to_buffers); | |
866 | ||
867 | static int iio_buffer_update_demux(struct iio_dev *indio_dev, | |
868 | struct iio_buffer *buffer) | |
5ada4ea9 JC |
869 | { |
870 | const struct iio_chan_spec *ch; | |
5ada4ea9 JC |
871 | int ret, in_ind = -1, out_ind, length; |
872 | unsigned in_loc = 0, out_loc = 0; | |
842cd100 | 873 | struct iio_demux_table *p; |
5ada4ea9 JC |
874 | |
875 | /* Clear out any old demux */ | |
842cd100 | 876 | iio_buffer_demux_free(buffer); |
5ada4ea9 JC |
877 | kfree(buffer->demux_bounce); |
878 | buffer->demux_bounce = NULL; | |
879 | ||
880 | /* First work out which scan mode we will actually have */ | |
881 | if (bitmap_equal(indio_dev->active_scan_mask, | |
882 | buffer->scan_mask, | |
883 | indio_dev->masklength)) | |
884 | return 0; | |
885 | ||
886 | /* Now we have the two masks, work from least sig and build up sizes */ | |
887 | for_each_set_bit(out_ind, | |
888 | indio_dev->active_scan_mask, | |
889 | indio_dev->masklength) { | |
890 | in_ind = find_next_bit(indio_dev->active_scan_mask, | |
891 | indio_dev->masklength, | |
892 | in_ind + 1); | |
893 | while (in_ind != out_ind) { | |
894 | in_ind = find_next_bit(indio_dev->active_scan_mask, | |
895 | indio_dev->masklength, | |
896 | in_ind + 1); | |
897 | ch = iio_find_channel_from_si(indio_dev, in_ind); | |
898 | length = ch->scan_type.storagebits/8; | |
899 | /* Make sure we are aligned */ | |
900 | in_loc += length; | |
901 | if (in_loc % length) | |
902 | in_loc += length - in_loc % length; | |
903 | } | |
904 | p = kmalloc(sizeof(*p), GFP_KERNEL); | |
905 | if (p == NULL) { | |
906 | ret = -ENOMEM; | |
907 | goto error_clear_mux_table; | |
908 | } | |
909 | ch = iio_find_channel_from_si(indio_dev, in_ind); | |
910 | length = ch->scan_type.storagebits/8; | |
911 | if (out_loc % length) | |
912 | out_loc += length - out_loc % length; | |
913 | if (in_loc % length) | |
914 | in_loc += length - in_loc % length; | |
915 | p->from = in_loc; | |
916 | p->to = out_loc; | |
917 | p->length = length; | |
918 | list_add_tail(&p->l, &buffer->demux_list); | |
919 | out_loc += length; | |
920 | in_loc += length; | |
921 | } | |
922 | /* Relies on scan_timestamp being last */ | |
923 | if (buffer->scan_timestamp) { | |
924 | p = kmalloc(sizeof(*p), GFP_KERNEL); | |
925 | if (p == NULL) { | |
926 | ret = -ENOMEM; | |
927 | goto error_clear_mux_table; | |
928 | } | |
929 | ch = iio_find_channel_from_si(indio_dev, | |
f1264809 | 930 | indio_dev->scan_index_timestamp); |
5ada4ea9 JC |
931 | length = ch->scan_type.storagebits/8; |
932 | if (out_loc % length) | |
933 | out_loc += length - out_loc % length; | |
934 | if (in_loc % length) | |
935 | in_loc += length - in_loc % length; | |
936 | p->from = in_loc; | |
937 | p->to = out_loc; | |
938 | p->length = length; | |
939 | list_add_tail(&p->l, &buffer->demux_list); | |
940 | out_loc += length; | |
941 | in_loc += length; | |
942 | } | |
943 | buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); | |
944 | if (buffer->demux_bounce == NULL) { | |
945 | ret = -ENOMEM; | |
946 | goto error_clear_mux_table; | |
947 | } | |
948 | return 0; | |
949 | ||
950 | error_clear_mux_table: | |
842cd100 JC |
951 | iio_buffer_demux_free(buffer); |
952 | ||
5ada4ea9 JC |
953 | return ret; |
954 | } | |
84b36ce5 JC |
955 | |
956 | int iio_update_demux(struct iio_dev *indio_dev) | |
957 | { | |
958 | struct iio_buffer *buffer; | |
959 | int ret; | |
960 | ||
961 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) { | |
962 | ret = iio_buffer_update_demux(indio_dev, buffer); | |
963 | if (ret < 0) | |
964 | goto error_clear_mux_table; | |
965 | } | |
966 | return 0; | |
967 | ||
968 | error_clear_mux_table: | |
969 | list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) | |
970 | iio_buffer_demux_free(buffer); | |
971 | ||
972 | return ret; | |
973 | } | |
5ada4ea9 | 974 | EXPORT_SYMBOL_GPL(iio_update_demux); |
9e69c935 LPC |
975 | |
976 | /** | |
977 | * iio_buffer_release() - Free a buffer's resources | |
978 | * @ref: Pointer to the kref embedded in the iio_buffer struct | |
979 | * | |
980 | * This function is called when the last reference to the buffer has been | |
981 | * dropped. It will typically free all resources allocated by the buffer. Do not | |
982 | * call this function manually, always use iio_buffer_put() when done using a | |
983 | * buffer. | |
984 | */ | |
985 | static void iio_buffer_release(struct kref *ref) | |
986 | { | |
987 | struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref); | |
988 | ||
989 | buffer->access->release(buffer); | |
990 | } | |
991 | ||
992 | /** | |
993 | * iio_buffer_get() - Grab a reference to the buffer | |
994 | * @buffer: The buffer to grab a reference for, may be NULL | |
995 | * | |
996 | * Returns the pointer to the buffer that was passed into the function. | |
997 | */ | |
998 | struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer) | |
999 | { | |
1000 | if (buffer) | |
1001 | kref_get(&buffer->ref); | |
1002 | ||
1003 | return buffer; | |
1004 | } | |
1005 | EXPORT_SYMBOL_GPL(iio_buffer_get); | |
1006 | ||
1007 | /** | |
1008 | * iio_buffer_put() - Release the reference to the buffer | |
1009 | * @buffer: The buffer to release the reference for, may be NULL | |
1010 | */ | |
1011 | void iio_buffer_put(struct iio_buffer *buffer) | |
1012 | { | |
1013 | if (buffer) | |
1014 | kref_put(&buffer->ref, iio_buffer_release); | |
1015 | } | |
1016 | EXPORT_SYMBOL_GPL(iio_buffer_put); |