iio: at91: move the num_channels from DT to driver itself
[deliverable/linux.git] / drivers / iio / industrialio-buffer.c
CommitLineData
7026ea4b
JC
1/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
14555b14 9 * Handling of buffer allocation / resizing.
7026ea4b
JC
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
8e336a72 17#include <linux/export.h>
7026ea4b 18#include <linux/device.h>
7026ea4b 19#include <linux/fs.h>
7026ea4b 20#include <linux/cdev.h>
5a0e3ad6 21#include <linux/slab.h>
a7348347 22#include <linux/poll.h>
7026ea4b 23
06458e27 24#include <linux/iio/iio.h>
df9c1c42 25#include "iio_core.h"
06458e27
JC
26#include <linux/iio/sysfs.h>
27#include <linux/iio/buffer.h>
7026ea4b 28
8310b86c
JC
29static const char * const iio_endian_prefix[] = {
30 [IIO_BE] = "be",
31 [IIO_LE] = "le",
32};
7026ea4b 33
705ee2c9 34static bool iio_buffer_is_active(struct iio_buffer *buf)
84b36ce5 35{
705ee2c9 36 return !list_empty(&buf->buffer_list);
84b36ce5
JC
37}
38
7026ea4b 39/**
14555b14 40 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
7026ea4b 41 *
14555b14
JC
42 * This function relies on all buffer implementations having an
43 * iio_buffer as their first element.
7026ea4b 44 **/
14555b14
JC
45ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
46 size_t n, loff_t *f_ps)
7026ea4b 47{
1aa04278 48 struct iio_dev *indio_dev = filp->private_data;
14555b14 49 struct iio_buffer *rb = indio_dev->buffer;
d5857d65 50
96e00f11 51 if (!rb || !rb->access->read_first_n)
7026ea4b 52 return -EINVAL;
8d213f24 53 return rb->access->read_first_n(rb, n, buf);
7026ea4b
JC
54}
55
a7348347 56/**
14555b14 57 * iio_buffer_poll() - poll the buffer to find out if it has data
a7348347 58 */
14555b14
JC
59unsigned int iio_buffer_poll(struct file *filp,
60 struct poll_table_struct *wait)
a7348347 61{
1aa04278 62 struct iio_dev *indio_dev = filp->private_data;
14555b14 63 struct iio_buffer *rb = indio_dev->buffer;
a7348347
JC
64
65 poll_wait(filp, &rb->pollq, wait);
66 if (rb->stufftoread)
67 return POLLIN | POLLRDNORM;
68 /* need a way of knowing if there may be enough data... */
8d213f24 69 return 0;
a7348347
JC
70}
71
f79a9098 72void iio_buffer_init(struct iio_buffer *buffer)
7026ea4b 73{
5ada4ea9 74 INIT_LIST_HEAD(&buffer->demux_list);
705ee2c9 75 INIT_LIST_HEAD(&buffer->buffer_list);
14555b14 76 init_waitqueue_head(&buffer->pollq);
7026ea4b 77}
14555b14 78EXPORT_SYMBOL(iio_buffer_init);
7026ea4b 79
1d892719 80static ssize_t iio_show_scan_index(struct device *dev,
8d213f24
JC
81 struct device_attribute *attr,
82 char *buf)
1d892719 83{
8d213f24 84 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
1d892719
JC
85}
86
87static ssize_t iio_show_fixed_type(struct device *dev,
88 struct device_attribute *attr,
89 char *buf)
90{
91 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
8310b86c
JC
92 u8 type = this_attr->c->scan_type.endianness;
93
94 if (type == IIO_CPU) {
9d5d1153
JC
95#ifdef __LITTLE_ENDIAN
96 type = IIO_LE;
97#else
98 type = IIO_BE;
99#endif
8310b86c
JC
100 }
101 return sprintf(buf, "%s:%c%d/%d>>%u\n",
102 iio_endian_prefix[type],
1d892719
JC
103 this_attr->c->scan_type.sign,
104 this_attr->c->scan_type.realbits,
105 this_attr->c->scan_type.storagebits,
106 this_attr->c->scan_type.shift);
107}
108
8d213f24
JC
109static ssize_t iio_scan_el_show(struct device *dev,
110 struct device_attribute *attr,
111 char *buf)
112{
113 int ret;
e53f5ac5 114 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
8d213f24 115
5ada4ea9
JC
116 ret = test_bit(to_iio_dev_attr(attr)->address,
117 indio_dev->buffer->scan_mask);
118
8d213f24
JC
119 return sprintf(buf, "%d\n", ret);
120}
121
14555b14 122static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
8d213f24 123{
14555b14 124 clear_bit(bit, buffer->scan_mask);
8d213f24
JC
125 return 0;
126}
127
128static ssize_t iio_scan_el_store(struct device *dev,
129 struct device_attribute *attr,
130 const char *buf,
131 size_t len)
132{
a714af27 133 int ret;
8d213f24 134 bool state;
e53f5ac5 135 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b14 136 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24
JC
137 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
138
a714af27
JC
139 ret = strtobool(buf, &state);
140 if (ret < 0)
141 return ret;
8d213f24 142 mutex_lock(&indio_dev->mlock);
705ee2c9 143 if (iio_buffer_is_active(indio_dev->buffer)) {
8d213f24
JC
144 ret = -EBUSY;
145 goto error_ret;
146 }
f79a9098 147 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
8d213f24
JC
148 if (ret < 0)
149 goto error_ret;
150 if (!state && ret) {
14555b14 151 ret = iio_scan_mask_clear(buffer, this_attr->address);
8d213f24
JC
152 if (ret)
153 goto error_ret;
154 } else if (state && !ret) {
f79a9098 155 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
8d213f24
JC
156 if (ret)
157 goto error_ret;
158 }
159
160error_ret:
161 mutex_unlock(&indio_dev->mlock);
162
5a2a6e11 163 return ret < 0 ? ret : len;
8d213f24
JC
164
165}
166
167static ssize_t iio_scan_el_ts_show(struct device *dev,
168 struct device_attribute *attr,
169 char *buf)
170{
e53f5ac5 171 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
f8c6f4e9 172 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
8d213f24
JC
173}
174
175static ssize_t iio_scan_el_ts_store(struct device *dev,
176 struct device_attribute *attr,
177 const char *buf,
178 size_t len)
179{
a714af27 180 int ret;
e53f5ac5 181 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
8d213f24 182 bool state;
1aa04278 183
a714af27
JC
184 ret = strtobool(buf, &state);
185 if (ret < 0)
186 return ret;
187
8d213f24 188 mutex_lock(&indio_dev->mlock);
705ee2c9 189 if (iio_buffer_is_active(indio_dev->buffer)) {
8d213f24
JC
190 ret = -EBUSY;
191 goto error_ret;
192 }
14555b14 193 indio_dev->buffer->scan_timestamp = state;
8d213f24
JC
194error_ret:
195 mutex_unlock(&indio_dev->mlock);
196
197 return ret ? ret : len;
198}
199
14555b14
JC
200static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
201 const struct iio_chan_spec *chan)
1d892719 202{
26d25ae3 203 int ret, attrcount = 0;
14555b14 204 struct iio_buffer *buffer = indio_dev->buffer;
1d892719 205
26d25ae3 206 ret = __iio_add_chan_devattr("index",
1d892719
JC
207 chan,
208 &iio_show_scan_index,
209 NULL,
210 0,
3704432f 211 IIO_SEPARATE,
1aa04278 212 &indio_dev->dev,
14555b14 213 &buffer->scan_el_dev_attr_list);
1d892719
JC
214 if (ret)
215 goto error_ret;
26d25ae3
JC
216 attrcount++;
217 ret = __iio_add_chan_devattr("type",
1d892719
JC
218 chan,
219 &iio_show_fixed_type,
220 NULL,
221 0,
222 0,
1aa04278 223 &indio_dev->dev,
14555b14 224 &buffer->scan_el_dev_attr_list);
1d892719
JC
225 if (ret)
226 goto error_ret;
26d25ae3 227 attrcount++;
a88b3ebc 228 if (chan->type != IIO_TIMESTAMP)
26d25ae3 229 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
230 chan,
231 &iio_scan_el_show,
232 &iio_scan_el_store,
233 chan->scan_index,
234 0,
1aa04278 235 &indio_dev->dev,
14555b14 236 &buffer->scan_el_dev_attr_list);
a88b3ebc 237 else
26d25ae3 238 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
239 chan,
240 &iio_scan_el_ts_show,
241 &iio_scan_el_ts_store,
242 chan->scan_index,
243 0,
1aa04278 244 &indio_dev->dev,
14555b14 245 &buffer->scan_el_dev_attr_list);
9572588c
PM
246 if (ret)
247 goto error_ret;
26d25ae3
JC
248 attrcount++;
249 ret = attrcount;
1d892719
JC
250error_ret:
251 return ret;
252}
253
14555b14
JC
254static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
255 struct iio_dev_attr *p)
1d892719 256{
1d892719
JC
257 kfree(p->dev_attr.attr.name);
258 kfree(p);
259}
260
14555b14 261static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
1d892719
JC
262{
263 struct iio_dev_attr *p, *n;
14555b14 264 struct iio_buffer *buffer = indio_dev->buffer;
26d25ae3 265
1d892719 266 list_for_each_entry_safe(p, n,
14555b14
JC
267 &buffer->scan_el_dev_attr_list, l)
268 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
1d892719
JC
269}
270
26d25ae3
JC
271static const char * const iio_scan_elements_group_name = "scan_elements";
272
14555b14
JC
273int iio_buffer_register(struct iio_dev *indio_dev,
274 const struct iio_chan_spec *channels,
275 int num_channels)
1d892719 276{
26d25ae3
JC
277 struct iio_dev_attr *p;
278 struct attribute **attr;
14555b14 279 struct iio_buffer *buffer = indio_dev->buffer;
26d25ae3
JC
280 int ret, i, attrn, attrcount, attrcount_orig = 0;
281
14555b14
JC
282 if (buffer->attrs)
283 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
bf32963c 284
14555b14
JC
285 if (buffer->scan_el_attrs != NULL) {
286 attr = buffer->scan_el_attrs->attrs;
26d25ae3
JC
287 while (*attr++ != NULL)
288 attrcount_orig++;
289 }
290 attrcount = attrcount_orig;
14555b14 291 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1d892719
JC
292 if (channels) {
293 /* new magic */
294 for (i = 0; i < num_channels; i++) {
f5b81ddd
LPC
295 if (channels[i].scan_index < 0)
296 continue;
297
32b5eeca
JC
298 /* Establish necessary mask length */
299 if (channels[i].scan_index >
300 (int)indio_dev->masklength - 1)
301 indio_dev->masklength
e1dc7bee 302 = channels[i].scan_index + 1;
32b5eeca 303
14555b14 304 ret = iio_buffer_add_channel_sysfs(indio_dev,
1aa04278 305 &channels[i]);
1d892719 306 if (ret < 0)
26d25ae3
JC
307 goto error_cleanup_dynamic;
308 attrcount += ret;
beb80600 309 if (channels[i].type == IIO_TIMESTAMP)
f1264809 310 indio_dev->scan_index_timestamp =
beb80600 311 channels[i].scan_index;
1d892719 312 }
14555b14 313 if (indio_dev->masklength && buffer->scan_mask == NULL) {
d83fb184
TM
314 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
315 sizeof(*buffer->scan_mask),
316 GFP_KERNEL);
14555b14 317 if (buffer->scan_mask == NULL) {
32b5eeca 318 ret = -ENOMEM;
26d25ae3 319 goto error_cleanup_dynamic;
32b5eeca
JC
320 }
321 }
1d892719
JC
322 }
323
14555b14 324 buffer->scan_el_group.name = iio_scan_elements_group_name;
26d25ae3 325
d83fb184
TM
326 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
327 sizeof(buffer->scan_el_group.attrs[0]),
328 GFP_KERNEL);
14555b14 329 if (buffer->scan_el_group.attrs == NULL) {
26d25ae3
JC
330 ret = -ENOMEM;
331 goto error_free_scan_mask;
332 }
14555b14
JC
333 if (buffer->scan_el_attrs)
334 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
335 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
26d25ae3
JC
336 attrn = attrcount_orig;
337
14555b14
JC
338 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
339 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
340 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
26d25ae3 341
1d892719 342 return 0;
26d25ae3
JC
343
344error_free_scan_mask:
14555b14 345 kfree(buffer->scan_mask);
1d892719 346error_cleanup_dynamic:
14555b14 347 __iio_buffer_attr_cleanup(indio_dev);
26d25ae3 348
7026ea4b
JC
349 return ret;
350}
14555b14 351EXPORT_SYMBOL(iio_buffer_register);
1d892719 352
14555b14 353void iio_buffer_unregister(struct iio_dev *indio_dev)
7026ea4b 354{
14555b14
JC
355 kfree(indio_dev->buffer->scan_mask);
356 kfree(indio_dev->buffer->scan_el_group.attrs);
357 __iio_buffer_attr_cleanup(indio_dev);
7026ea4b 358}
14555b14 359EXPORT_SYMBOL(iio_buffer_unregister);
7026ea4b 360
14555b14
JC
361ssize_t iio_buffer_read_length(struct device *dev,
362 struct device_attribute *attr,
363 char *buf)
7026ea4b 364{
e53f5ac5 365 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b14 366 struct iio_buffer *buffer = indio_dev->buffer;
7026ea4b 367
14555b14 368 if (buffer->access->get_length)
8d213f24 369 return sprintf(buf, "%d\n",
14555b14 370 buffer->access->get_length(buffer));
7026ea4b 371
8d213f24 372 return 0;
7026ea4b 373}
14555b14 374EXPORT_SYMBOL(iio_buffer_read_length);
7026ea4b 375
14555b14
JC
376ssize_t iio_buffer_write_length(struct device *dev,
377 struct device_attribute *attr,
378 const char *buf,
379 size_t len)
7026ea4b 380{
e53f5ac5 381 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b14 382 struct iio_buffer *buffer = indio_dev->buffer;
948ad205
LPC
383 unsigned int val;
384 int ret;
8d213f24 385
948ad205 386 ret = kstrtouint(buf, 10, &val);
7026ea4b
JC
387 if (ret)
388 return ret;
389
14555b14
JC
390 if (buffer->access->get_length)
391 if (val == buffer->access->get_length(buffer))
7026ea4b
JC
392 return len;
393
e38c79e0 394 mutex_lock(&indio_dev->mlock);
705ee2c9 395 if (iio_buffer_is_active(indio_dev->buffer)) {
e38c79e0
LPC
396 ret = -EBUSY;
397 } else {
869871b5 398 if (buffer->access->set_length)
e38c79e0 399 buffer->access->set_length(buffer, val);
e38c79e0 400 ret = 0;
7026ea4b 401 }
e38c79e0 402 mutex_unlock(&indio_dev->mlock);
7026ea4b 403
e38c79e0 404 return ret ? ret : len;
7026ea4b 405}
14555b14 406EXPORT_SYMBOL(iio_buffer_write_length);
7026ea4b 407
14555b14
JC
408ssize_t iio_buffer_show_enable(struct device *dev,
409 struct device_attribute *attr,
410 char *buf)
7026ea4b 411{
e53f5ac5 412 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
705ee2c9 413 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
7026ea4b 414}
14555b14 415EXPORT_SYMBOL(iio_buffer_show_enable);
7026ea4b 416
9572588c 417/* Note NULL used as error indicator as it doesn't make sense. */
cd4361c7 418static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
32b5eeca 419 unsigned int masklength,
cd4361c7 420 const unsigned long *mask)
32b5eeca
JC
421{
422 if (bitmap_empty(mask, masklength))
423 return NULL;
424 while (*av_masks) {
425 if (bitmap_subset(mask, av_masks, masklength))
426 return av_masks;
427 av_masks += BITS_TO_LONGS(masklength);
428 }
429 return NULL;
430}
431
183f4173
PM
432static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
433 const unsigned long *mask, bool timestamp)
959d2952 434{
959d2952
JC
435 const struct iio_chan_spec *ch;
436 unsigned bytes = 0;
437 int length, i;
959d2952
JC
438
439 /* How much space will the demuxed element take? */
6b3b58ed 440 for_each_set_bit(i, mask,
959d2952
JC
441 indio_dev->masklength) {
442 ch = iio_find_channel_from_si(indio_dev, i);
6b3b58ed 443 length = ch->scan_type.storagebits / 8;
959d2952
JC
444 bytes = ALIGN(bytes, length);
445 bytes += length;
446 }
6b3b58ed 447 if (timestamp) {
959d2952 448 ch = iio_find_channel_from_si(indio_dev,
f1264809 449 indio_dev->scan_index_timestamp);
6b3b58ed 450 length = ch->scan_type.storagebits / 8;
959d2952
JC
451 bytes = ALIGN(bytes, length);
452 bytes += length;
453 }
6b3b58ed
JC
454 return bytes;
455}
456
a87c82e4
LPC
457void iio_disable_all_buffers(struct iio_dev *indio_dev)
458{
459 struct iio_buffer *buffer, *_buffer;
460
461 if (list_empty(&indio_dev->buffer_list))
462 return;
463
464 if (indio_dev->setup_ops->predisable)
465 indio_dev->setup_ops->predisable(indio_dev);
466
467 list_for_each_entry_safe(buffer, _buffer,
468 &indio_dev->buffer_list, buffer_list)
469 list_del_init(&buffer->buffer_list);
470
471 indio_dev->currentmode = INDIO_DIRECT_MODE;
472 if (indio_dev->setup_ops->postdisable)
473 indio_dev->setup_ops->postdisable(indio_dev);
474}
475
84b36ce5
JC
476int iio_update_buffers(struct iio_dev *indio_dev,
477 struct iio_buffer *insert_buffer,
478 struct iio_buffer *remove_buffer)
6b3b58ed 479{
84b36ce5
JC
480 int ret;
481 int success = 0;
482 struct iio_buffer *buffer;
483 unsigned long *compound_mask;
484 const unsigned long *old_mask;
6b3b58ed 485
84b36ce5
JC
486 /* Wind down existing buffers - iff there are any */
487 if (!list_empty(&indio_dev->buffer_list)) {
488 if (indio_dev->setup_ops->predisable) {
489 ret = indio_dev->setup_ops->predisable(indio_dev);
490 if (ret)
491 goto error_ret;
492 }
493 indio_dev->currentmode = INDIO_DIRECT_MODE;
494 if (indio_dev->setup_ops->postdisable) {
495 ret = indio_dev->setup_ops->postdisable(indio_dev);
496 if (ret)
497 goto error_ret;
498 }
499 }
500 /* Keep a copy of current setup to allow roll back */
501 old_mask = indio_dev->active_scan_mask;
502 if (!indio_dev->available_scan_masks)
503 indio_dev->active_scan_mask = NULL;
504
505 if (remove_buffer)
705ee2c9 506 list_del_init(&remove_buffer->buffer_list);
84b36ce5
JC
507 if (insert_buffer)
508 list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);
509
510 /* If no buffers in list, we are done */
511 if (list_empty(&indio_dev->buffer_list)) {
512 indio_dev->currentmode = INDIO_DIRECT_MODE;
513 if (indio_dev->available_scan_masks == NULL)
514 kfree(old_mask);
515 return 0;
516 }
959d2952 517
9572588c 518 /* What scan mask do we actually have? */
84b36ce5
JC
519 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
520 sizeof(long), GFP_KERNEL);
521 if (compound_mask == NULL) {
522 if (indio_dev->available_scan_masks == NULL)
523 kfree(old_mask);
524 return -ENOMEM;
525 }
526 indio_dev->scan_timestamp = 0;
527
528 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
529 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
530 indio_dev->masklength);
531 indio_dev->scan_timestamp |= buffer->scan_timestamp;
532 }
533 if (indio_dev->available_scan_masks) {
959d2952
JC
534 indio_dev->active_scan_mask =
535 iio_scan_mask_match(indio_dev->available_scan_masks,
536 indio_dev->masklength,
84b36ce5
JC
537 compound_mask);
538 if (indio_dev->active_scan_mask == NULL) {
539 /*
540 * Roll back.
541 * Note can only occur when adding a buffer.
542 */
705ee2c9 543 list_del_init(&insert_buffer->buffer_list);
d66e0452
PM
544 if (old_mask) {
545 indio_dev->active_scan_mask = old_mask;
546 success = -EINVAL;
547 }
548 else {
549 kfree(compound_mask);
550 ret = -EINVAL;
551 goto error_ret;
552 }
84b36ce5
JC
553 }
554 } else {
555 indio_dev->active_scan_mask = compound_mask;
556 }
aff1eb4e 557
5ada4ea9
JC
558 iio_update_demux(indio_dev);
559
84b36ce5
JC
560 /* Wind up again */
561 if (indio_dev->setup_ops->preenable) {
562 ret = indio_dev->setup_ops->preenable(indio_dev);
563 if (ret) {
564 printk(KERN_ERR
bec1889d 565 "Buffer not started: buffer preenable failed (%d)\n", ret);
84b36ce5
JC
566 goto error_remove_inserted;
567 }
568 }
569 indio_dev->scan_bytes =
570 iio_compute_scan_bytes(indio_dev,
571 indio_dev->active_scan_mask,
572 indio_dev->scan_timestamp);
573 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
574 if (buffer->access->request_update) {
575 ret = buffer->access->request_update(buffer);
576 if (ret) {
577 printk(KERN_INFO
bec1889d 578 "Buffer not started: buffer parameter update failed (%d)\n", ret);
84b36ce5
JC
579 goto error_run_postdisable;
580 }
581 }
582 if (indio_dev->info->update_scan_mode) {
583 ret = indio_dev->info
5ada4ea9
JC
584 ->update_scan_mode(indio_dev,
585 indio_dev->active_scan_mask);
84b36ce5 586 if (ret < 0) {
bec1889d 587 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
84b36ce5
JC
588 goto error_run_postdisable;
589 }
590 }
9572588c 591 /* Definitely possible for devices to support both of these. */
84b36ce5
JC
592 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
593 if (!indio_dev->trig) {
594 printk(KERN_INFO "Buffer not started: no trigger\n");
595 ret = -EINVAL;
596 /* Can only occur on first buffer */
597 goto error_run_postdisable;
598 }
599 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
600 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
601 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
9572588c 602 } else { /* Should never be reached */
84b36ce5
JC
603 ret = -EINVAL;
604 goto error_run_postdisable;
605 }
606
607 if (indio_dev->setup_ops->postenable) {
608 ret = indio_dev->setup_ops->postenable(indio_dev);
609 if (ret) {
610 printk(KERN_INFO
bec1889d 611 "Buffer not started: postenable failed (%d)\n", ret);
84b36ce5
JC
612 indio_dev->currentmode = INDIO_DIRECT_MODE;
613 if (indio_dev->setup_ops->postdisable)
614 indio_dev->setup_ops->postdisable(indio_dev);
615 goto error_disable_all_buffers;
616 }
617 }
618
619 if (indio_dev->available_scan_masks)
620 kfree(compound_mask);
621 else
622 kfree(old_mask);
623
624 return success;
625
626error_disable_all_buffers:
627 indio_dev->currentmode = INDIO_DIRECT_MODE;
628error_run_postdisable:
629 if (indio_dev->setup_ops->postdisable)
630 indio_dev->setup_ops->postdisable(indio_dev);
631error_remove_inserted:
632
633 if (insert_buffer)
705ee2c9 634 list_del_init(&insert_buffer->buffer_list);
84b36ce5
JC
635 indio_dev->active_scan_mask = old_mask;
636 kfree(compound_mask);
637error_ret:
638
639 return ret;
640}
641EXPORT_SYMBOL_GPL(iio_update_buffers);
642
643ssize_t iio_buffer_store_enable(struct device *dev,
644 struct device_attribute *attr,
645 const char *buf,
646 size_t len)
647{
648 int ret;
649 bool requested_state;
650 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
84b36ce5
JC
651 bool inlist;
652
653 ret = strtobool(buf, &requested_state);
654 if (ret < 0)
655 return ret;
656
657 mutex_lock(&indio_dev->mlock);
658
659 /* Find out if it is in the list */
705ee2c9 660 inlist = iio_buffer_is_active(indio_dev->buffer);
84b36ce5
JC
661 /* Already in desired state */
662 if (inlist == requested_state)
663 goto done;
664
665 if (requested_state)
666 ret = iio_update_buffers(indio_dev,
667 indio_dev->buffer, NULL);
668 else
669 ret = iio_update_buffers(indio_dev,
670 NULL, indio_dev->buffer);
671
672 if (ret < 0)
673 goto done;
674done:
675 mutex_unlock(&indio_dev->mlock);
676 return (ret < 0) ? ret : len;
677}
678EXPORT_SYMBOL(iio_buffer_store_enable);
679
680int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
681{
682 struct iio_buffer *buffer;
683 unsigned bytes;
684 dev_dbg(&indio_dev->dev, "%s\n", __func__);
685
686 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
687 if (buffer->access->set_bytes_per_datum) {
688 bytes = iio_compute_scan_bytes(indio_dev,
689 buffer->scan_mask,
690 buffer->scan_timestamp);
691
692 buffer->access->set_bytes_per_datum(buffer, bytes);
693 }
959d2952
JC
694 return 0;
695}
696EXPORT_SYMBOL(iio_sw_buffer_preenable);
697
81636632
LPC
698/**
699 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
700 * @indio_dev: the iio device
701 * @mask: scan mask to be checked
702 *
703 * Return true if exactly one bit is set in the scan mask, false otherwise. It
704 * can be used for devices where only one channel can be active for sampling at
705 * a time.
706 */
707bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
708 const unsigned long *mask)
709{
710 return bitmap_weight(mask, indio_dev->masklength) == 1;
711}
712EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
713
939546d1
LPC
714static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
715 const unsigned long *mask)
716{
717 if (!indio_dev->setup_ops->validate_scan_mask)
718 return true;
719
720 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
721}
722
32b5eeca
JC
723/**
724 * iio_scan_mask_set() - set particular bit in the scan mask
9572588c 725 * @indio_dev: the iio device
14555b14 726 * @buffer: the buffer whose scan mask we are interested in
32b5eeca 727 * @bit: the bit to be set.
84b36ce5
JC
728 *
729 * Note that at this point we have no way of knowing what other
730 * buffers might request, hence this code only verifies that the
731 * individual buffers request is plausible.
732 */
f79a9098
JC
733int iio_scan_mask_set(struct iio_dev *indio_dev,
734 struct iio_buffer *buffer, int bit)
32b5eeca 735{
cd4361c7 736 const unsigned long *mask;
32b5eeca
JC
737 unsigned long *trialmask;
738
739 trialmask = kmalloc(sizeof(*trialmask)*
f8c6f4e9 740 BITS_TO_LONGS(indio_dev->masklength),
32b5eeca
JC
741 GFP_KERNEL);
742
743 if (trialmask == NULL)
744 return -ENOMEM;
f8c6f4e9 745 if (!indio_dev->masklength) {
9572588c 746 WARN_ON("Trying to set scanmask prior to registering buffer\n");
939546d1 747 goto err_invalid_mask;
32b5eeca 748 }
f8c6f4e9 749 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
32b5eeca
JC
750 set_bit(bit, trialmask);
751
939546d1
LPC
752 if (!iio_validate_scan_mask(indio_dev, trialmask))
753 goto err_invalid_mask;
754
f8c6f4e9
JC
755 if (indio_dev->available_scan_masks) {
756 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
757 indio_dev->masklength,
32b5eeca 758 trialmask);
939546d1
LPC
759 if (!mask)
760 goto err_invalid_mask;
32b5eeca 761 }
f8c6f4e9 762 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
32b5eeca
JC
763
764 kfree(trialmask);
765
766 return 0;
939546d1
LPC
767
768err_invalid_mask:
769 kfree(trialmask);
770 return -EINVAL;
771}
32b5eeca
JC
772EXPORT_SYMBOL_GPL(iio_scan_mask_set);
773
f79a9098
JC
774int iio_scan_mask_query(struct iio_dev *indio_dev,
775 struct iio_buffer *buffer, int bit)
32b5eeca 776{
f8c6f4e9 777 if (bit > indio_dev->masklength)
32b5eeca
JC
778 return -EINVAL;
779
14555b14 780 if (!buffer->scan_mask)
32b5eeca 781 return 0;
32b5eeca 782
5a2a6e11 783 return test_bit(bit, buffer->scan_mask);
32b5eeca
JC
784};
785EXPORT_SYMBOL_GPL(iio_scan_mask_query);
5ada4ea9
JC
786
787/**
788 * struct iio_demux_table() - table describing demux memcpy ops
789 * @from: index to copy from
99698b45 790 * @to: index to copy to
5ada4ea9
JC
791 * @length: how many bytes to copy
792 * @l: list head used for management
793 */
794struct iio_demux_table {
795 unsigned from;
796 unsigned to;
797 unsigned length;
798 struct list_head l;
799};
800
5d65d920
LPC
801static const void *iio_demux(struct iio_buffer *buffer,
802 const void *datain)
5ada4ea9
JC
803{
804 struct iio_demux_table *t;
805
806 if (list_empty(&buffer->demux_list))
807 return datain;
808 list_for_each_entry(t, &buffer->demux_list, l)
809 memcpy(buffer->demux_bounce + t->to,
810 datain + t->from, t->length);
811
812 return buffer->demux_bounce;
813}
814
5d65d920 815static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
5ada4ea9 816{
5d65d920 817 const void *dataout = iio_demux(buffer, data);
5ada4ea9 818
ce56ade6 819 return buffer->access->store_to(buffer, dataout);
5ada4ea9 820}
5ada4ea9 821
842cd100
JC
822static void iio_buffer_demux_free(struct iio_buffer *buffer)
823{
824 struct iio_demux_table *p, *q;
825 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
826 list_del(&p->l);
827 kfree(p);
828 }
829}
830
84b36ce5 831
5d65d920 832int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
84b36ce5
JC
833{
834 int ret;
835 struct iio_buffer *buf;
836
837 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
838 ret = iio_push_to_buffer(buf, data);
839 if (ret < 0)
840 return ret;
841 }
842
843 return 0;
844}
845EXPORT_SYMBOL_GPL(iio_push_to_buffers);
846
847static int iio_buffer_update_demux(struct iio_dev *indio_dev,
848 struct iio_buffer *buffer)
5ada4ea9
JC
849{
850 const struct iio_chan_spec *ch;
5ada4ea9
JC
851 int ret, in_ind = -1, out_ind, length;
852 unsigned in_loc = 0, out_loc = 0;
842cd100 853 struct iio_demux_table *p;
5ada4ea9
JC
854
855 /* Clear out any old demux */
842cd100 856 iio_buffer_demux_free(buffer);
5ada4ea9
JC
857 kfree(buffer->demux_bounce);
858 buffer->demux_bounce = NULL;
859
860 /* First work out which scan mode we will actually have */
861 if (bitmap_equal(indio_dev->active_scan_mask,
862 buffer->scan_mask,
863 indio_dev->masklength))
864 return 0;
865
866 /* Now we have the two masks, work from least sig and build up sizes */
867 for_each_set_bit(out_ind,
868 indio_dev->active_scan_mask,
869 indio_dev->masklength) {
870 in_ind = find_next_bit(indio_dev->active_scan_mask,
871 indio_dev->masklength,
872 in_ind + 1);
873 while (in_ind != out_ind) {
874 in_ind = find_next_bit(indio_dev->active_scan_mask,
875 indio_dev->masklength,
876 in_ind + 1);
877 ch = iio_find_channel_from_si(indio_dev, in_ind);
878 length = ch->scan_type.storagebits/8;
879 /* Make sure we are aligned */
880 in_loc += length;
881 if (in_loc % length)
882 in_loc += length - in_loc % length;
883 }
884 p = kmalloc(sizeof(*p), GFP_KERNEL);
885 if (p == NULL) {
886 ret = -ENOMEM;
887 goto error_clear_mux_table;
888 }
889 ch = iio_find_channel_from_si(indio_dev, in_ind);
890 length = ch->scan_type.storagebits/8;
891 if (out_loc % length)
892 out_loc += length - out_loc % length;
893 if (in_loc % length)
894 in_loc += length - in_loc % length;
895 p->from = in_loc;
896 p->to = out_loc;
897 p->length = length;
898 list_add_tail(&p->l, &buffer->demux_list);
899 out_loc += length;
900 in_loc += length;
901 }
902 /* Relies on scan_timestamp being last */
903 if (buffer->scan_timestamp) {
904 p = kmalloc(sizeof(*p), GFP_KERNEL);
905 if (p == NULL) {
906 ret = -ENOMEM;
907 goto error_clear_mux_table;
908 }
909 ch = iio_find_channel_from_si(indio_dev,
f1264809 910 indio_dev->scan_index_timestamp);
5ada4ea9
JC
911 length = ch->scan_type.storagebits/8;
912 if (out_loc % length)
913 out_loc += length - out_loc % length;
914 if (in_loc % length)
915 in_loc += length - in_loc % length;
916 p->from = in_loc;
917 p->to = out_loc;
918 p->length = length;
919 list_add_tail(&p->l, &buffer->demux_list);
920 out_loc += length;
921 in_loc += length;
922 }
923 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
924 if (buffer->demux_bounce == NULL) {
925 ret = -ENOMEM;
926 goto error_clear_mux_table;
927 }
928 return 0;
929
930error_clear_mux_table:
842cd100
JC
931 iio_buffer_demux_free(buffer);
932
5ada4ea9
JC
933 return ret;
934}
84b36ce5
JC
935
936int iio_update_demux(struct iio_dev *indio_dev)
937{
938 struct iio_buffer *buffer;
939 int ret;
940
941 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
942 ret = iio_buffer_update_demux(indio_dev, buffer);
943 if (ret < 0)
944 goto error_clear_mux_table;
945 }
946 return 0;
947
948error_clear_mux_table:
949 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
950 iio_buffer_demux_free(buffer);
951
952 return ret;
953}
5ada4ea9 954EXPORT_SYMBOL_GPL(iio_update_demux);
This page took 0.735078 seconds and 5 git commands to generate.