iio: Fix tcs3472 dev-to-indio_dev conversion in suspend/resume
[deliverable/linux.git] / drivers / iio / industrialio-buffer.c
CommitLineData
7026ea4b
JC
1/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
14555b14 9 * Handling of buffer allocation / resizing.
7026ea4b
JC
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
8e336a72 17#include <linux/export.h>
7026ea4b 18#include <linux/device.h>
7026ea4b 19#include <linux/fs.h>
7026ea4b 20#include <linux/cdev.h>
5a0e3ad6 21#include <linux/slab.h>
a7348347 22#include <linux/poll.h>
7026ea4b 23
06458e27 24#include <linux/iio/iio.h>
df9c1c42 25#include "iio_core.h"
06458e27
JC
26#include <linux/iio/sysfs.h>
27#include <linux/iio/buffer.h>
7026ea4b 28
8310b86c
JC
29static const char * const iio_endian_prefix[] = {
30 [IIO_BE] = "be",
31 [IIO_LE] = "le",
32};
7026ea4b 33
705ee2c9 34static bool iio_buffer_is_active(struct iio_buffer *buf)
84b36ce5 35{
705ee2c9 36 return !list_empty(&buf->buffer_list);
84b36ce5
JC
37}
38
7026ea4b 39/**
14555b14 40 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
7026ea4b 41 *
14555b14
JC
42 * This function relies on all buffer implementations having an
43 * iio_buffer as their first element.
7026ea4b 44 **/
14555b14
JC
45ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
46 size_t n, loff_t *f_ps)
7026ea4b 47{
1aa04278 48 struct iio_dev *indio_dev = filp->private_data;
14555b14 49 struct iio_buffer *rb = indio_dev->buffer;
d5857d65 50
96e00f11 51 if (!rb || !rb->access->read_first_n)
7026ea4b 52 return -EINVAL;
8d213f24 53 return rb->access->read_first_n(rb, n, buf);
7026ea4b
JC
54}
55
a7348347 56/**
14555b14 57 * iio_buffer_poll() - poll the buffer to find out if it has data
a7348347 58 */
14555b14
JC
59unsigned int iio_buffer_poll(struct file *filp,
60 struct poll_table_struct *wait)
a7348347 61{
1aa04278 62 struct iio_dev *indio_dev = filp->private_data;
14555b14 63 struct iio_buffer *rb = indio_dev->buffer;
a7348347
JC
64
65 poll_wait(filp, &rb->pollq, wait);
66 if (rb->stufftoread)
67 return POLLIN | POLLRDNORM;
68 /* need a way of knowing if there may be enough data... */
8d213f24 69 return 0;
a7348347
JC
70}
71
f79a9098 72void iio_buffer_init(struct iio_buffer *buffer)
7026ea4b 73{
5ada4ea9 74 INIT_LIST_HEAD(&buffer->demux_list);
705ee2c9 75 INIT_LIST_HEAD(&buffer->buffer_list);
14555b14 76 init_waitqueue_head(&buffer->pollq);
7026ea4b 77}
14555b14 78EXPORT_SYMBOL(iio_buffer_init);
7026ea4b 79
1d892719 80static ssize_t iio_show_scan_index(struct device *dev,
8d213f24
JC
81 struct device_attribute *attr,
82 char *buf)
1d892719 83{
8d213f24 84 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
1d892719
JC
85}
86
87static ssize_t iio_show_fixed_type(struct device *dev,
88 struct device_attribute *attr,
89 char *buf)
90{
91 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
8310b86c
JC
92 u8 type = this_attr->c->scan_type.endianness;
93
94 if (type == IIO_CPU) {
9d5d1153
JC
95#ifdef __LITTLE_ENDIAN
96 type = IIO_LE;
97#else
98 type = IIO_BE;
99#endif
8310b86c
JC
100 }
101 return sprintf(buf, "%s:%c%d/%d>>%u\n",
102 iio_endian_prefix[type],
1d892719
JC
103 this_attr->c->scan_type.sign,
104 this_attr->c->scan_type.realbits,
105 this_attr->c->scan_type.storagebits,
106 this_attr->c->scan_type.shift);
107}
108
8d213f24
JC
109static ssize_t iio_scan_el_show(struct device *dev,
110 struct device_attribute *attr,
111 char *buf)
112{
113 int ret;
e53f5ac5 114 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
8d213f24 115
5ada4ea9
JC
116 ret = test_bit(to_iio_dev_attr(attr)->address,
117 indio_dev->buffer->scan_mask);
118
8d213f24
JC
119 return sprintf(buf, "%d\n", ret);
120}
121
14555b14 122static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
8d213f24 123{
14555b14 124 clear_bit(bit, buffer->scan_mask);
8d213f24
JC
125 return 0;
126}
127
128static ssize_t iio_scan_el_store(struct device *dev,
129 struct device_attribute *attr,
130 const char *buf,
131 size_t len)
132{
a714af27 133 int ret;
8d213f24 134 bool state;
e53f5ac5 135 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b14 136 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24
JC
137 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
138
a714af27
JC
139 ret = strtobool(buf, &state);
140 if (ret < 0)
141 return ret;
8d213f24 142 mutex_lock(&indio_dev->mlock);
705ee2c9 143 if (iio_buffer_is_active(indio_dev->buffer)) {
8d213f24
JC
144 ret = -EBUSY;
145 goto error_ret;
146 }
f79a9098 147 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
8d213f24
JC
148 if (ret < 0)
149 goto error_ret;
150 if (!state && ret) {
14555b14 151 ret = iio_scan_mask_clear(buffer, this_attr->address);
8d213f24
JC
152 if (ret)
153 goto error_ret;
154 } else if (state && !ret) {
f79a9098 155 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
8d213f24
JC
156 if (ret)
157 goto error_ret;
158 }
159
160error_ret:
161 mutex_unlock(&indio_dev->mlock);
162
5a2a6e11 163 return ret < 0 ? ret : len;
8d213f24
JC
164
165}
166
167static ssize_t iio_scan_el_ts_show(struct device *dev,
168 struct device_attribute *attr,
169 char *buf)
170{
e53f5ac5 171 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
f8c6f4e9 172 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
8d213f24
JC
173}
174
175static ssize_t iio_scan_el_ts_store(struct device *dev,
176 struct device_attribute *attr,
177 const char *buf,
178 size_t len)
179{
a714af27 180 int ret;
e53f5ac5 181 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
8d213f24 182 bool state;
1aa04278 183
a714af27
JC
184 ret = strtobool(buf, &state);
185 if (ret < 0)
186 return ret;
187
8d213f24 188 mutex_lock(&indio_dev->mlock);
705ee2c9 189 if (iio_buffer_is_active(indio_dev->buffer)) {
8d213f24
JC
190 ret = -EBUSY;
191 goto error_ret;
192 }
14555b14 193 indio_dev->buffer->scan_timestamp = state;
8d213f24
JC
194error_ret:
195 mutex_unlock(&indio_dev->mlock);
196
197 return ret ? ret : len;
198}
199
14555b14
JC
200static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
201 const struct iio_chan_spec *chan)
1d892719 202{
26d25ae3 203 int ret, attrcount = 0;
14555b14 204 struct iio_buffer *buffer = indio_dev->buffer;
1d892719 205
26d25ae3 206 ret = __iio_add_chan_devattr("index",
1d892719
JC
207 chan,
208 &iio_show_scan_index,
209 NULL,
210 0,
3704432f 211 IIO_SEPARATE,
1aa04278 212 &indio_dev->dev,
14555b14 213 &buffer->scan_el_dev_attr_list);
1d892719
JC
214 if (ret)
215 goto error_ret;
26d25ae3
JC
216 attrcount++;
217 ret = __iio_add_chan_devattr("type",
1d892719
JC
218 chan,
219 &iio_show_fixed_type,
220 NULL,
221 0,
222 0,
1aa04278 223 &indio_dev->dev,
14555b14 224 &buffer->scan_el_dev_attr_list);
1d892719
JC
225 if (ret)
226 goto error_ret;
26d25ae3 227 attrcount++;
a88b3ebc 228 if (chan->type != IIO_TIMESTAMP)
26d25ae3 229 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
230 chan,
231 &iio_scan_el_show,
232 &iio_scan_el_store,
233 chan->scan_index,
234 0,
1aa04278 235 &indio_dev->dev,
14555b14 236 &buffer->scan_el_dev_attr_list);
a88b3ebc 237 else
26d25ae3 238 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
239 chan,
240 &iio_scan_el_ts_show,
241 &iio_scan_el_ts_store,
242 chan->scan_index,
243 0,
1aa04278 244 &indio_dev->dev,
14555b14 245 &buffer->scan_el_dev_attr_list);
9572588c
PM
246 if (ret)
247 goto error_ret;
26d25ae3
JC
248 attrcount++;
249 ret = attrcount;
1d892719
JC
250error_ret:
251 return ret;
252}
253
14555b14
JC
254static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
255 struct iio_dev_attr *p)
1d892719 256{
1d892719
JC
257 kfree(p->dev_attr.attr.name);
258 kfree(p);
259}
260
14555b14 261static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
1d892719
JC
262{
263 struct iio_dev_attr *p, *n;
14555b14 264 struct iio_buffer *buffer = indio_dev->buffer;
26d25ae3 265
1d892719 266 list_for_each_entry_safe(p, n,
14555b14
JC
267 &buffer->scan_el_dev_attr_list, l)
268 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
1d892719
JC
269}
270
26d25ae3
JC
271static const char * const iio_scan_elements_group_name = "scan_elements";
272
14555b14
JC
273int iio_buffer_register(struct iio_dev *indio_dev,
274 const struct iio_chan_spec *channels,
275 int num_channels)
1d892719 276{
26d25ae3
JC
277 struct iio_dev_attr *p;
278 struct attribute **attr;
14555b14 279 struct iio_buffer *buffer = indio_dev->buffer;
26d25ae3
JC
280 int ret, i, attrn, attrcount, attrcount_orig = 0;
281
14555b14
JC
282 if (buffer->attrs)
283 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
bf32963c 284
14555b14
JC
285 if (buffer->scan_el_attrs != NULL) {
286 attr = buffer->scan_el_attrs->attrs;
26d25ae3
JC
287 while (*attr++ != NULL)
288 attrcount_orig++;
289 }
290 attrcount = attrcount_orig;
14555b14 291 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1d892719
JC
292 if (channels) {
293 /* new magic */
294 for (i = 0; i < num_channels; i++) {
f5b81ddd
LPC
295 if (channels[i].scan_index < 0)
296 continue;
297
32b5eeca
JC
298 /* Establish necessary mask length */
299 if (channels[i].scan_index >
300 (int)indio_dev->masklength - 1)
301 indio_dev->masklength
e1dc7bee 302 = channels[i].scan_index + 1;
32b5eeca 303
14555b14 304 ret = iio_buffer_add_channel_sysfs(indio_dev,
1aa04278 305 &channels[i]);
1d892719 306 if (ret < 0)
26d25ae3
JC
307 goto error_cleanup_dynamic;
308 attrcount += ret;
beb80600 309 if (channels[i].type == IIO_TIMESTAMP)
f1264809 310 indio_dev->scan_index_timestamp =
beb80600 311 channels[i].scan_index;
1d892719 312 }
14555b14 313 if (indio_dev->masklength && buffer->scan_mask == NULL) {
d83fb184
TM
314 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
315 sizeof(*buffer->scan_mask),
316 GFP_KERNEL);
14555b14 317 if (buffer->scan_mask == NULL) {
32b5eeca 318 ret = -ENOMEM;
26d25ae3 319 goto error_cleanup_dynamic;
32b5eeca
JC
320 }
321 }
1d892719
JC
322 }
323
14555b14 324 buffer->scan_el_group.name = iio_scan_elements_group_name;
26d25ae3 325
d83fb184
TM
326 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
327 sizeof(buffer->scan_el_group.attrs[0]),
328 GFP_KERNEL);
14555b14 329 if (buffer->scan_el_group.attrs == NULL) {
26d25ae3
JC
330 ret = -ENOMEM;
331 goto error_free_scan_mask;
332 }
14555b14
JC
333 if (buffer->scan_el_attrs)
334 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
335 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
26d25ae3
JC
336 attrn = attrcount_orig;
337
14555b14
JC
338 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
339 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
340 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
26d25ae3 341
1d892719 342 return 0;
26d25ae3
JC
343
344error_free_scan_mask:
14555b14 345 kfree(buffer->scan_mask);
1d892719 346error_cleanup_dynamic:
14555b14 347 __iio_buffer_attr_cleanup(indio_dev);
26d25ae3 348
7026ea4b
JC
349 return ret;
350}
14555b14 351EXPORT_SYMBOL(iio_buffer_register);
1d892719 352
14555b14 353void iio_buffer_unregister(struct iio_dev *indio_dev)
7026ea4b 354{
14555b14
JC
355 kfree(indio_dev->buffer->scan_mask);
356 kfree(indio_dev->buffer->scan_el_group.attrs);
357 __iio_buffer_attr_cleanup(indio_dev);
7026ea4b 358}
14555b14 359EXPORT_SYMBOL(iio_buffer_unregister);
7026ea4b 360
14555b14
JC
361ssize_t iio_buffer_read_length(struct device *dev,
362 struct device_attribute *attr,
363 char *buf)
7026ea4b 364{
e53f5ac5 365 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b14 366 struct iio_buffer *buffer = indio_dev->buffer;
7026ea4b 367
14555b14 368 if (buffer->access->get_length)
8d213f24 369 return sprintf(buf, "%d\n",
14555b14 370 buffer->access->get_length(buffer));
7026ea4b 371
8d213f24 372 return 0;
7026ea4b 373}
14555b14 374EXPORT_SYMBOL(iio_buffer_read_length);
7026ea4b 375
14555b14
JC
376ssize_t iio_buffer_write_length(struct device *dev,
377 struct device_attribute *attr,
378 const char *buf,
379 size_t len)
7026ea4b 380{
e53f5ac5 381 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b14 382 struct iio_buffer *buffer = indio_dev->buffer;
948ad205
LPC
383 unsigned int val;
384 int ret;
8d213f24 385
948ad205 386 ret = kstrtouint(buf, 10, &val);
7026ea4b
JC
387 if (ret)
388 return ret;
389
14555b14
JC
390 if (buffer->access->get_length)
391 if (val == buffer->access->get_length(buffer))
7026ea4b
JC
392 return len;
393
e38c79e0 394 mutex_lock(&indio_dev->mlock);
705ee2c9 395 if (iio_buffer_is_active(indio_dev->buffer)) {
e38c79e0
LPC
396 ret = -EBUSY;
397 } else {
869871b5 398 if (buffer->access->set_length)
e38c79e0 399 buffer->access->set_length(buffer, val);
e38c79e0 400 ret = 0;
7026ea4b 401 }
e38c79e0 402 mutex_unlock(&indio_dev->mlock);
7026ea4b 403
e38c79e0 404 return ret ? ret : len;
7026ea4b 405}
14555b14 406EXPORT_SYMBOL(iio_buffer_write_length);
7026ea4b 407
14555b14
JC
408ssize_t iio_buffer_show_enable(struct device *dev,
409 struct device_attribute *attr,
410 char *buf)
7026ea4b 411{
e53f5ac5 412 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
705ee2c9 413 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
7026ea4b 414}
14555b14 415EXPORT_SYMBOL(iio_buffer_show_enable);
7026ea4b 416
9572588c 417/* Note NULL used as error indicator as it doesn't make sense. */
cd4361c7 418static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
32b5eeca 419 unsigned int masklength,
cd4361c7 420 const unsigned long *mask)
32b5eeca
JC
421{
422 if (bitmap_empty(mask, masklength))
423 return NULL;
424 while (*av_masks) {
425 if (bitmap_subset(mask, av_masks, masklength))
426 return av_masks;
427 av_masks += BITS_TO_LONGS(masklength);
428 }
429 return NULL;
430}
431
6b3b58ed
JC
432static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask,
433 bool timestamp)
959d2952 434{
959d2952
JC
435 const struct iio_chan_spec *ch;
436 unsigned bytes = 0;
437 int length, i;
959d2952
JC
438
439 /* How much space will the demuxed element take? */
6b3b58ed 440 for_each_set_bit(i, mask,
959d2952
JC
441 indio_dev->masklength) {
442 ch = iio_find_channel_from_si(indio_dev, i);
6b3b58ed 443 length = ch->scan_type.storagebits / 8;
959d2952
JC
444 bytes = ALIGN(bytes, length);
445 bytes += length;
446 }
6b3b58ed 447 if (timestamp) {
959d2952 448 ch = iio_find_channel_from_si(indio_dev,
f1264809 449 indio_dev->scan_index_timestamp);
6b3b58ed 450 length = ch->scan_type.storagebits / 8;
959d2952
JC
451 bytes = ALIGN(bytes, length);
452 bytes += length;
453 }
6b3b58ed
JC
454 return bytes;
455}
456
84b36ce5
JC
457int iio_update_buffers(struct iio_dev *indio_dev,
458 struct iio_buffer *insert_buffer,
459 struct iio_buffer *remove_buffer)
6b3b58ed 460{
84b36ce5
JC
461 int ret;
462 int success = 0;
463 struct iio_buffer *buffer;
464 unsigned long *compound_mask;
465 const unsigned long *old_mask;
6b3b58ed 466
84b36ce5
JC
467 /* Wind down existing buffers - iff there are any */
468 if (!list_empty(&indio_dev->buffer_list)) {
469 if (indio_dev->setup_ops->predisable) {
470 ret = indio_dev->setup_ops->predisable(indio_dev);
471 if (ret)
472 goto error_ret;
473 }
474 indio_dev->currentmode = INDIO_DIRECT_MODE;
475 if (indio_dev->setup_ops->postdisable) {
476 ret = indio_dev->setup_ops->postdisable(indio_dev);
477 if (ret)
478 goto error_ret;
479 }
480 }
481 /* Keep a copy of current setup to allow roll back */
482 old_mask = indio_dev->active_scan_mask;
483 if (!indio_dev->available_scan_masks)
484 indio_dev->active_scan_mask = NULL;
485
486 if (remove_buffer)
705ee2c9 487 list_del_init(&remove_buffer->buffer_list);
84b36ce5
JC
488 if (insert_buffer)
489 list_add(&insert_buffer->buffer_list, &indio_dev->buffer_list);
490
491 /* If no buffers in list, we are done */
492 if (list_empty(&indio_dev->buffer_list)) {
493 indio_dev->currentmode = INDIO_DIRECT_MODE;
494 if (indio_dev->available_scan_masks == NULL)
495 kfree(old_mask);
496 return 0;
497 }
959d2952 498
9572588c 499 /* What scan mask do we actually have? */
84b36ce5
JC
500 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
501 sizeof(long), GFP_KERNEL);
502 if (compound_mask == NULL) {
503 if (indio_dev->available_scan_masks == NULL)
504 kfree(old_mask);
505 return -ENOMEM;
506 }
507 indio_dev->scan_timestamp = 0;
508
509 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
510 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
511 indio_dev->masklength);
512 indio_dev->scan_timestamp |= buffer->scan_timestamp;
513 }
514 if (indio_dev->available_scan_masks) {
959d2952
JC
515 indio_dev->active_scan_mask =
516 iio_scan_mask_match(indio_dev->available_scan_masks,
517 indio_dev->masklength,
84b36ce5
JC
518 compound_mask);
519 if (indio_dev->active_scan_mask == NULL) {
520 /*
521 * Roll back.
522 * Note can only occur when adding a buffer.
523 */
705ee2c9 524 list_del_init(&insert_buffer->buffer_list);
84b36ce5
JC
525 indio_dev->active_scan_mask = old_mask;
526 success = -EINVAL;
527 }
528 } else {
529 indio_dev->active_scan_mask = compound_mask;
530 }
aff1eb4e 531
5ada4ea9
JC
532 iio_update_demux(indio_dev);
533
84b36ce5
JC
534 /* Wind up again */
535 if (indio_dev->setup_ops->preenable) {
536 ret = indio_dev->setup_ops->preenable(indio_dev);
537 if (ret) {
538 printk(KERN_ERR
bec1889d 539 "Buffer not started: buffer preenable failed (%d)\n", ret);
84b36ce5
JC
540 goto error_remove_inserted;
541 }
542 }
543 indio_dev->scan_bytes =
544 iio_compute_scan_bytes(indio_dev,
545 indio_dev->active_scan_mask,
546 indio_dev->scan_timestamp);
547 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
548 if (buffer->access->request_update) {
549 ret = buffer->access->request_update(buffer);
550 if (ret) {
551 printk(KERN_INFO
bec1889d 552 "Buffer not started: buffer parameter update failed (%d)\n", ret);
84b36ce5
JC
553 goto error_run_postdisable;
554 }
555 }
556 if (indio_dev->info->update_scan_mode) {
557 ret = indio_dev->info
5ada4ea9
JC
558 ->update_scan_mode(indio_dev,
559 indio_dev->active_scan_mask);
84b36ce5 560 if (ret < 0) {
bec1889d 561 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
84b36ce5
JC
562 goto error_run_postdisable;
563 }
564 }
9572588c 565 /* Definitely possible for devices to support both of these. */
84b36ce5
JC
566 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
567 if (!indio_dev->trig) {
568 printk(KERN_INFO "Buffer not started: no trigger\n");
569 ret = -EINVAL;
570 /* Can only occur on first buffer */
571 goto error_run_postdisable;
572 }
573 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
574 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
575 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
9572588c 576 } else { /* Should never be reached */
84b36ce5
JC
577 ret = -EINVAL;
578 goto error_run_postdisable;
579 }
580
581 if (indio_dev->setup_ops->postenable) {
582 ret = indio_dev->setup_ops->postenable(indio_dev);
583 if (ret) {
584 printk(KERN_INFO
bec1889d 585 "Buffer not started: postenable failed (%d)\n", ret);
84b36ce5
JC
586 indio_dev->currentmode = INDIO_DIRECT_MODE;
587 if (indio_dev->setup_ops->postdisable)
588 indio_dev->setup_ops->postdisable(indio_dev);
589 goto error_disable_all_buffers;
590 }
591 }
592
593 if (indio_dev->available_scan_masks)
594 kfree(compound_mask);
595 else
596 kfree(old_mask);
597
598 return success;
599
600error_disable_all_buffers:
601 indio_dev->currentmode = INDIO_DIRECT_MODE;
602error_run_postdisable:
603 if (indio_dev->setup_ops->postdisable)
604 indio_dev->setup_ops->postdisable(indio_dev);
605error_remove_inserted:
606
607 if (insert_buffer)
705ee2c9 608 list_del_init(&insert_buffer->buffer_list);
84b36ce5
JC
609 indio_dev->active_scan_mask = old_mask;
610 kfree(compound_mask);
611error_ret:
612
613 return ret;
614}
615EXPORT_SYMBOL_GPL(iio_update_buffers);
616
617ssize_t iio_buffer_store_enable(struct device *dev,
618 struct device_attribute *attr,
619 const char *buf,
620 size_t len)
621{
622 int ret;
623 bool requested_state;
624 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
84b36ce5
JC
625 bool inlist;
626
627 ret = strtobool(buf, &requested_state);
628 if (ret < 0)
629 return ret;
630
631 mutex_lock(&indio_dev->mlock);
632
633 /* Find out if it is in the list */
705ee2c9 634 inlist = iio_buffer_is_active(indio_dev->buffer);
84b36ce5
JC
635 /* Already in desired state */
636 if (inlist == requested_state)
637 goto done;
638
639 if (requested_state)
640 ret = iio_update_buffers(indio_dev,
641 indio_dev->buffer, NULL);
642 else
643 ret = iio_update_buffers(indio_dev,
644 NULL, indio_dev->buffer);
645
646 if (ret < 0)
647 goto done;
648done:
649 mutex_unlock(&indio_dev->mlock);
650 return (ret < 0) ? ret : len;
651}
652EXPORT_SYMBOL(iio_buffer_store_enable);
653
654int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
655{
656 struct iio_buffer *buffer;
657 unsigned bytes;
658 dev_dbg(&indio_dev->dev, "%s\n", __func__);
659
660 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
661 if (buffer->access->set_bytes_per_datum) {
662 bytes = iio_compute_scan_bytes(indio_dev,
663 buffer->scan_mask,
664 buffer->scan_timestamp);
665
666 buffer->access->set_bytes_per_datum(buffer, bytes);
667 }
959d2952
JC
668 return 0;
669}
670EXPORT_SYMBOL(iio_sw_buffer_preenable);
671
81636632
LPC
672/**
673 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
674 * @indio_dev: the iio device
675 * @mask: scan mask to be checked
676 *
677 * Return true if exactly one bit is set in the scan mask, false otherwise. It
678 * can be used for devices where only one channel can be active for sampling at
679 * a time.
680 */
681bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
682 const unsigned long *mask)
683{
684 return bitmap_weight(mask, indio_dev->masklength) == 1;
685}
686EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
687
939546d1
LPC
688static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
689 const unsigned long *mask)
690{
691 if (!indio_dev->setup_ops->validate_scan_mask)
692 return true;
693
694 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
695}
696
32b5eeca
JC
697/**
698 * iio_scan_mask_set() - set particular bit in the scan mask
9572588c 699 * @indio_dev: the iio device
14555b14 700 * @buffer: the buffer whose scan mask we are interested in
32b5eeca 701 * @bit: the bit to be set.
84b36ce5
JC
702 *
703 * Note that at this point we have no way of knowing what other
704 * buffers might request, hence this code only verifies that the
705 * individual buffers request is plausible.
706 */
f79a9098
JC
707int iio_scan_mask_set(struct iio_dev *indio_dev,
708 struct iio_buffer *buffer, int bit)
32b5eeca 709{
cd4361c7 710 const unsigned long *mask;
32b5eeca
JC
711 unsigned long *trialmask;
712
713 trialmask = kmalloc(sizeof(*trialmask)*
f8c6f4e9 714 BITS_TO_LONGS(indio_dev->masklength),
32b5eeca
JC
715 GFP_KERNEL);
716
717 if (trialmask == NULL)
718 return -ENOMEM;
f8c6f4e9 719 if (!indio_dev->masklength) {
9572588c 720 WARN_ON("Trying to set scanmask prior to registering buffer\n");
939546d1 721 goto err_invalid_mask;
32b5eeca 722 }
f8c6f4e9 723 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
32b5eeca
JC
724 set_bit(bit, trialmask);
725
939546d1
LPC
726 if (!iio_validate_scan_mask(indio_dev, trialmask))
727 goto err_invalid_mask;
728
f8c6f4e9
JC
729 if (indio_dev->available_scan_masks) {
730 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
731 indio_dev->masklength,
32b5eeca 732 trialmask);
939546d1
LPC
733 if (!mask)
734 goto err_invalid_mask;
32b5eeca 735 }
f8c6f4e9 736 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
32b5eeca
JC
737
738 kfree(trialmask);
739
740 return 0;
939546d1
LPC
741
742err_invalid_mask:
743 kfree(trialmask);
744 return -EINVAL;
745}
32b5eeca
JC
746EXPORT_SYMBOL_GPL(iio_scan_mask_set);
747
f79a9098
JC
748int iio_scan_mask_query(struct iio_dev *indio_dev,
749 struct iio_buffer *buffer, int bit)
32b5eeca 750{
f8c6f4e9 751 if (bit > indio_dev->masklength)
32b5eeca
JC
752 return -EINVAL;
753
14555b14 754 if (!buffer->scan_mask)
32b5eeca 755 return 0;
32b5eeca 756
5a2a6e11 757 return test_bit(bit, buffer->scan_mask);
32b5eeca
JC
758};
759EXPORT_SYMBOL_GPL(iio_scan_mask_query);
5ada4ea9
JC
760
761/**
762 * struct iio_demux_table() - table describing demux memcpy ops
763 * @from: index to copy from
99698b45 764 * @to: index to copy to
5ada4ea9
JC
765 * @length: how many bytes to copy
766 * @l: list head used for management
767 */
768struct iio_demux_table {
769 unsigned from;
770 unsigned to;
771 unsigned length;
772 struct list_head l;
773};
774
5d65d920
LPC
775static const void *iio_demux(struct iio_buffer *buffer,
776 const void *datain)
5ada4ea9
JC
777{
778 struct iio_demux_table *t;
779
780 if (list_empty(&buffer->demux_list))
781 return datain;
782 list_for_each_entry(t, &buffer->demux_list, l)
783 memcpy(buffer->demux_bounce + t->to,
784 datain + t->from, t->length);
785
786 return buffer->demux_bounce;
787}
788
5d65d920 789static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
5ada4ea9 790{
5d65d920 791 const void *dataout = iio_demux(buffer, data);
5ada4ea9 792
ce56ade6 793 return buffer->access->store_to(buffer, dataout);
5ada4ea9 794}
5ada4ea9 795
842cd100
JC
796static void iio_buffer_demux_free(struct iio_buffer *buffer)
797{
798 struct iio_demux_table *p, *q;
799 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
800 list_del(&p->l);
801 kfree(p);
802 }
803}
804
84b36ce5 805
5d65d920 806int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
84b36ce5
JC
807{
808 int ret;
809 struct iio_buffer *buf;
810
811 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
812 ret = iio_push_to_buffer(buf, data);
813 if (ret < 0)
814 return ret;
815 }
816
817 return 0;
818}
819EXPORT_SYMBOL_GPL(iio_push_to_buffers);
820
821static int iio_buffer_update_demux(struct iio_dev *indio_dev,
822 struct iio_buffer *buffer)
5ada4ea9
JC
823{
824 const struct iio_chan_spec *ch;
5ada4ea9
JC
825 int ret, in_ind = -1, out_ind, length;
826 unsigned in_loc = 0, out_loc = 0;
842cd100 827 struct iio_demux_table *p;
5ada4ea9
JC
828
829 /* Clear out any old demux */
842cd100 830 iio_buffer_demux_free(buffer);
5ada4ea9
JC
831 kfree(buffer->demux_bounce);
832 buffer->demux_bounce = NULL;
833
834 /* First work out which scan mode we will actually have */
835 if (bitmap_equal(indio_dev->active_scan_mask,
836 buffer->scan_mask,
837 indio_dev->masklength))
838 return 0;
839
840 /* Now we have the two masks, work from least sig and build up sizes */
841 for_each_set_bit(out_ind,
842 indio_dev->active_scan_mask,
843 indio_dev->masklength) {
844 in_ind = find_next_bit(indio_dev->active_scan_mask,
845 indio_dev->masklength,
846 in_ind + 1);
847 while (in_ind != out_ind) {
848 in_ind = find_next_bit(indio_dev->active_scan_mask,
849 indio_dev->masklength,
850 in_ind + 1);
851 ch = iio_find_channel_from_si(indio_dev, in_ind);
852 length = ch->scan_type.storagebits/8;
853 /* Make sure we are aligned */
854 in_loc += length;
855 if (in_loc % length)
856 in_loc += length - in_loc % length;
857 }
858 p = kmalloc(sizeof(*p), GFP_KERNEL);
859 if (p == NULL) {
860 ret = -ENOMEM;
861 goto error_clear_mux_table;
862 }
863 ch = iio_find_channel_from_si(indio_dev, in_ind);
864 length = ch->scan_type.storagebits/8;
865 if (out_loc % length)
866 out_loc += length - out_loc % length;
867 if (in_loc % length)
868 in_loc += length - in_loc % length;
869 p->from = in_loc;
870 p->to = out_loc;
871 p->length = length;
872 list_add_tail(&p->l, &buffer->demux_list);
873 out_loc += length;
874 in_loc += length;
875 }
876 /* Relies on scan_timestamp being last */
877 if (buffer->scan_timestamp) {
878 p = kmalloc(sizeof(*p), GFP_KERNEL);
879 if (p == NULL) {
880 ret = -ENOMEM;
881 goto error_clear_mux_table;
882 }
883 ch = iio_find_channel_from_si(indio_dev,
f1264809 884 indio_dev->scan_index_timestamp);
5ada4ea9
JC
885 length = ch->scan_type.storagebits/8;
886 if (out_loc % length)
887 out_loc += length - out_loc % length;
888 if (in_loc % length)
889 in_loc += length - in_loc % length;
890 p->from = in_loc;
891 p->to = out_loc;
892 p->length = length;
893 list_add_tail(&p->l, &buffer->demux_list);
894 out_loc += length;
895 in_loc += length;
896 }
897 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
898 if (buffer->demux_bounce == NULL) {
899 ret = -ENOMEM;
900 goto error_clear_mux_table;
901 }
902 return 0;
903
904error_clear_mux_table:
842cd100
JC
905 iio_buffer_demux_free(buffer);
906
5ada4ea9
JC
907 return ret;
908}
84b36ce5
JC
909
910int iio_update_demux(struct iio_dev *indio_dev)
911{
912 struct iio_buffer *buffer;
913 int ret;
914
915 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
916 ret = iio_buffer_update_demux(indio_dev, buffer);
917 if (ret < 0)
918 goto error_clear_mux_table;
919 }
920 return 0;
921
922error_clear_mux_table:
923 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
924 iio_buffer_demux_free(buffer);
925
926 return ret;
927}
5ada4ea9 928EXPORT_SYMBOL_GPL(iio_update_demux);
This page took 0.434925 seconds and 5 git commands to generate.