staging:iio:buffer trivial use of strtobool to remove dodgy equivalent.
[deliverable/linux.git] / drivers / staging / iio / industrialio-buffer.c
CommitLineData
7026ea4b
JC
1/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
14555b14 9 * Handling of buffer allocation / resizing.
7026ea4b
JC
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
8e336a72 17#include <linux/export.h>
7026ea4b 18#include <linux/device.h>
7026ea4b 19#include <linux/fs.h>
7026ea4b 20#include <linux/cdev.h>
5a0e3ad6 21#include <linux/slab.h>
a7348347 22#include <linux/poll.h>
7026ea4b
JC
23
24#include "iio.h"
df9c1c42 25#include "iio_core.h"
9dd1cb30 26#include "sysfs.h"
af5046af 27#include "buffer.h"
7026ea4b 28
8310b86c
JC
29static const char * const iio_endian_prefix[] = {
30 [IIO_BE] = "be",
31 [IIO_LE] = "le",
32};
7026ea4b
JC
33
34/**
14555b14 35 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
7026ea4b 36 *
14555b14
JC
37 * This function relies on all buffer implementations having an
38 * iio_buffer as their first element.
7026ea4b 39 **/
14555b14
JC
40ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
41 size_t n, loff_t *f_ps)
7026ea4b 42{
1aa04278 43 struct iio_dev *indio_dev = filp->private_data;
14555b14 44 struct iio_buffer *rb = indio_dev->buffer;
d5857d65 45
96e00f11 46 if (!rb || !rb->access->read_first_n)
7026ea4b 47 return -EINVAL;
8d213f24 48 return rb->access->read_first_n(rb, n, buf);
7026ea4b
JC
49}
50
a7348347 51/**
14555b14 52 * iio_buffer_poll() - poll the buffer to find out if it has data
a7348347 53 */
14555b14
JC
54unsigned int iio_buffer_poll(struct file *filp,
55 struct poll_table_struct *wait)
a7348347 56{
1aa04278 57 struct iio_dev *indio_dev = filp->private_data;
14555b14 58 struct iio_buffer *rb = indio_dev->buffer;
a7348347
JC
59
60 poll_wait(filp, &rb->pollq, wait);
61 if (rb->stufftoread)
62 return POLLIN | POLLRDNORM;
63 /* need a way of knowing if there may be enough data... */
8d213f24 64 return 0;
a7348347
JC
65}
66
f79a9098 67void iio_buffer_init(struct iio_buffer *buffer)
7026ea4b 68{
5ada4ea9 69 INIT_LIST_HEAD(&buffer->demux_list);
14555b14 70 init_waitqueue_head(&buffer->pollq);
7026ea4b 71}
14555b14 72EXPORT_SYMBOL(iio_buffer_init);
7026ea4b 73
1d892719 74static ssize_t iio_show_scan_index(struct device *dev,
8d213f24
JC
75 struct device_attribute *attr,
76 char *buf)
1d892719 77{
8d213f24 78 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
1d892719
JC
79}
80
81static ssize_t iio_show_fixed_type(struct device *dev,
82 struct device_attribute *attr,
83 char *buf)
84{
85 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
8310b86c
JC
86 u8 type = this_attr->c->scan_type.endianness;
87
88 if (type == IIO_CPU) {
9d5d1153
JC
89#ifdef __LITTLE_ENDIAN
90 type = IIO_LE;
91#else
92 type = IIO_BE;
93#endif
8310b86c
JC
94 }
95 return sprintf(buf, "%s:%c%d/%d>>%u\n",
96 iio_endian_prefix[type],
1d892719
JC
97 this_attr->c->scan_type.sign,
98 this_attr->c->scan_type.realbits,
99 this_attr->c->scan_type.storagebits,
100 this_attr->c->scan_type.shift);
101}
102
8d213f24
JC
103static ssize_t iio_scan_el_show(struct device *dev,
104 struct device_attribute *attr,
105 char *buf)
106{
107 int ret;
f8c6f4e9 108 struct iio_dev *indio_dev = dev_get_drvdata(dev);
8d213f24 109
5ada4ea9
JC
110 ret = test_bit(to_iio_dev_attr(attr)->address,
111 indio_dev->buffer->scan_mask);
112
8d213f24
JC
113 return sprintf(buf, "%d\n", ret);
114}
115
14555b14 116static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
8d213f24 117{
14555b14 118 clear_bit(bit, buffer->scan_mask);
8d213f24
JC
119 return 0;
120}
121
122static ssize_t iio_scan_el_store(struct device *dev,
123 struct device_attribute *attr,
124 const char *buf,
125 size_t len)
126{
a714af27 127 int ret;
8d213f24 128 bool state;
1aa04278 129 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 130 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24
JC
131 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
132
a714af27
JC
133 ret = strtobool(buf, &state);
134 if (ret < 0)
135 return ret;
8d213f24 136 mutex_lock(&indio_dev->mlock);
5fd6218c 137 if (iio_buffer_enabled(indio_dev)) {
8d213f24
JC
138 ret = -EBUSY;
139 goto error_ret;
140 }
f79a9098 141 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
8d213f24
JC
142 if (ret < 0)
143 goto error_ret;
144 if (!state && ret) {
14555b14 145 ret = iio_scan_mask_clear(buffer, this_attr->address);
8d213f24
JC
146 if (ret)
147 goto error_ret;
148 } else if (state && !ret) {
f79a9098 149 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
8d213f24
JC
150 if (ret)
151 goto error_ret;
152 }
153
154error_ret:
155 mutex_unlock(&indio_dev->mlock);
156
5a2a6e11 157 return ret < 0 ? ret : len;
8d213f24
JC
158
159}
160
161static ssize_t iio_scan_el_ts_show(struct device *dev,
162 struct device_attribute *attr,
163 char *buf)
164{
f8c6f4e9
JC
165 struct iio_dev *indio_dev = dev_get_drvdata(dev);
166 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
8d213f24
JC
167}
168
169static ssize_t iio_scan_el_ts_store(struct device *dev,
170 struct device_attribute *attr,
171 const char *buf,
172 size_t len)
173{
a714af27 174 int ret;
1aa04278 175 struct iio_dev *indio_dev = dev_get_drvdata(dev);
8d213f24 176 bool state;
1aa04278 177
a714af27
JC
178 ret = strtobool(buf, &state);
179 if (ret < 0)
180 return ret;
181
8d213f24 182 mutex_lock(&indio_dev->mlock);
5fd6218c 183 if (iio_buffer_enabled(indio_dev)) {
8d213f24
JC
184 ret = -EBUSY;
185 goto error_ret;
186 }
14555b14 187 indio_dev->buffer->scan_timestamp = state;
8d213f24
JC
188error_ret:
189 mutex_unlock(&indio_dev->mlock);
190
191 return ret ? ret : len;
192}
193
14555b14
JC
194static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
195 const struct iio_chan_spec *chan)
1d892719 196{
26d25ae3 197 int ret, attrcount = 0;
14555b14 198 struct iio_buffer *buffer = indio_dev->buffer;
1d892719 199
26d25ae3 200 ret = __iio_add_chan_devattr("index",
1d892719
JC
201 chan,
202 &iio_show_scan_index,
203 NULL,
204 0,
205 0,
1aa04278 206 &indio_dev->dev,
14555b14 207 &buffer->scan_el_dev_attr_list);
1d892719
JC
208 if (ret)
209 goto error_ret;
26d25ae3
JC
210 attrcount++;
211 ret = __iio_add_chan_devattr("type",
1d892719
JC
212 chan,
213 &iio_show_fixed_type,
214 NULL,
215 0,
216 0,
1aa04278 217 &indio_dev->dev,
14555b14 218 &buffer->scan_el_dev_attr_list);
1d892719
JC
219 if (ret)
220 goto error_ret;
26d25ae3 221 attrcount++;
a88b3ebc 222 if (chan->type != IIO_TIMESTAMP)
26d25ae3 223 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
224 chan,
225 &iio_scan_el_show,
226 &iio_scan_el_store,
227 chan->scan_index,
228 0,
1aa04278 229 &indio_dev->dev,
14555b14 230 &buffer->scan_el_dev_attr_list);
a88b3ebc 231 else
26d25ae3 232 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
233 chan,
234 &iio_scan_el_ts_show,
235 &iio_scan_el_ts_store,
236 chan->scan_index,
237 0,
1aa04278 238 &indio_dev->dev,
14555b14 239 &buffer->scan_el_dev_attr_list);
26d25ae3
JC
240 attrcount++;
241 ret = attrcount;
1d892719
JC
242error_ret:
243 return ret;
244}
245
14555b14
JC
246static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev,
247 struct iio_dev_attr *p)
1d892719 248{
1d892719
JC
249 kfree(p->dev_attr.attr.name);
250 kfree(p);
251}
252
14555b14 253static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev)
1d892719
JC
254{
255 struct iio_dev_attr *p, *n;
14555b14 256 struct iio_buffer *buffer = indio_dev->buffer;
26d25ae3 257
1d892719 258 list_for_each_entry_safe(p, n,
14555b14
JC
259 &buffer->scan_el_dev_attr_list, l)
260 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p);
1d892719
JC
261}
262
26d25ae3
JC
263static const char * const iio_scan_elements_group_name = "scan_elements";
264
14555b14
JC
265int iio_buffer_register(struct iio_dev *indio_dev,
266 const struct iio_chan_spec *channels,
267 int num_channels)
1d892719 268{
26d25ae3
JC
269 struct iio_dev_attr *p;
270 struct attribute **attr;
14555b14 271 struct iio_buffer *buffer = indio_dev->buffer;
26d25ae3
JC
272 int ret, i, attrn, attrcount, attrcount_orig = 0;
273
14555b14
JC
274 if (buffer->attrs)
275 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
bf32963c 276
14555b14
JC
277 if (buffer->scan_el_attrs != NULL) {
278 attr = buffer->scan_el_attrs->attrs;
26d25ae3
JC
279 while (*attr++ != NULL)
280 attrcount_orig++;
281 }
282 attrcount = attrcount_orig;
14555b14 283 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1d892719
JC
284 if (channels) {
285 /* new magic */
286 for (i = 0; i < num_channels; i++) {
32b5eeca
JC
287 /* Establish necessary mask length */
288 if (channels[i].scan_index >
289 (int)indio_dev->masklength - 1)
290 indio_dev->masklength
291 = indio_dev->channels[i].scan_index + 1;
292
14555b14 293 ret = iio_buffer_add_channel_sysfs(indio_dev,
1aa04278 294 &channels[i]);
1d892719 295 if (ret < 0)
26d25ae3
JC
296 goto error_cleanup_dynamic;
297 attrcount += ret;
beb80600
JC
298 if (channels[i].type == IIO_TIMESTAMP)
299 buffer->scan_index_timestamp =
300 channels[i].scan_index;
1d892719 301 }
14555b14 302 if (indio_dev->masklength && buffer->scan_mask == NULL) {
d83fb184
TM
303 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
304 sizeof(*buffer->scan_mask),
305 GFP_KERNEL);
14555b14 306 if (buffer->scan_mask == NULL) {
32b5eeca 307 ret = -ENOMEM;
26d25ae3 308 goto error_cleanup_dynamic;
32b5eeca
JC
309 }
310 }
1d892719
JC
311 }
312
14555b14 313 buffer->scan_el_group.name = iio_scan_elements_group_name;
26d25ae3 314
d83fb184
TM
315 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
316 sizeof(buffer->scan_el_group.attrs[0]),
317 GFP_KERNEL);
14555b14 318 if (buffer->scan_el_group.attrs == NULL) {
26d25ae3
JC
319 ret = -ENOMEM;
320 goto error_free_scan_mask;
321 }
14555b14
JC
322 if (buffer->scan_el_attrs)
323 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
324 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
26d25ae3
JC
325 attrn = attrcount_orig;
326
14555b14
JC
327 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
328 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
329 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
26d25ae3 330
1d892719 331 return 0;
26d25ae3
JC
332
333error_free_scan_mask:
14555b14 334 kfree(buffer->scan_mask);
1d892719 335error_cleanup_dynamic:
14555b14 336 __iio_buffer_attr_cleanup(indio_dev);
26d25ae3 337
7026ea4b
JC
338 return ret;
339}
14555b14 340EXPORT_SYMBOL(iio_buffer_register);
1d892719 341
14555b14 342void iio_buffer_unregister(struct iio_dev *indio_dev)
7026ea4b 343{
14555b14
JC
344 kfree(indio_dev->buffer->scan_mask);
345 kfree(indio_dev->buffer->scan_el_group.attrs);
346 __iio_buffer_attr_cleanup(indio_dev);
7026ea4b 347}
14555b14 348EXPORT_SYMBOL(iio_buffer_unregister);
7026ea4b 349
14555b14
JC
350ssize_t iio_buffer_read_length(struct device *dev,
351 struct device_attribute *attr,
352 char *buf)
7026ea4b 353{
1aa04278 354 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 355 struct iio_buffer *buffer = indio_dev->buffer;
7026ea4b 356
14555b14 357 if (buffer->access->get_length)
8d213f24 358 return sprintf(buf, "%d\n",
14555b14 359 buffer->access->get_length(buffer));
7026ea4b 360
8d213f24 361 return 0;
7026ea4b 362}
14555b14 363EXPORT_SYMBOL(iio_buffer_read_length);
7026ea4b 364
14555b14
JC
365ssize_t iio_buffer_write_length(struct device *dev,
366 struct device_attribute *attr,
367 const char *buf,
368 size_t len)
7026ea4b
JC
369{
370 int ret;
371 ulong val;
1aa04278 372 struct iio_dev *indio_dev = dev_get_drvdata(dev);
14555b14 373 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24 374
7026ea4b
JC
375 ret = strict_strtoul(buf, 10, &val);
376 if (ret)
377 return ret;
378
14555b14
JC
379 if (buffer->access->get_length)
380 if (val == buffer->access->get_length(buffer))
7026ea4b
JC
381 return len;
382
e38c79e0
LPC
383 mutex_lock(&indio_dev->mlock);
384 if (iio_buffer_enabled(indio_dev)) {
385 ret = -EBUSY;
386 } else {
869871b5 387 if (buffer->access->set_length)
e38c79e0 388 buffer->access->set_length(buffer, val);
e38c79e0 389 ret = 0;
7026ea4b 390 }
e38c79e0 391 mutex_unlock(&indio_dev->mlock);
7026ea4b 392
e38c79e0 393 return ret ? ret : len;
7026ea4b 394}
14555b14 395EXPORT_SYMBOL(iio_buffer_write_length);
7026ea4b 396
14555b14
JC
397ssize_t iio_buffer_store_enable(struct device *dev,
398 struct device_attribute *attr,
399 const char *buf,
400 size_t len)
7026ea4b
JC
401{
402 int ret;
403 bool requested_state, current_state;
404 int previous_mode;
f8c6f4e9
JC
405 struct iio_dev *indio_dev = dev_get_drvdata(dev);
406 struct iio_buffer *buffer = indio_dev->buffer;
7026ea4b 407
f8c6f4e9
JC
408 mutex_lock(&indio_dev->mlock);
409 previous_mode = indio_dev->currentmode;
7026ea4b 410 requested_state = !(buf[0] == '0');
d4a6882e 411 current_state = iio_buffer_enabled(indio_dev);
7026ea4b 412 if (current_state == requested_state) {
14555b14 413 printk(KERN_INFO "iio-buffer, current state requested again\n");
7026ea4b
JC
414 goto done;
415 }
416 if (requested_state) {
1612244f
JC
417 if (indio_dev->setup_ops->preenable) {
418 ret = indio_dev->setup_ops->preenable(indio_dev);
7026ea4b
JC
419 if (ret) {
420 printk(KERN_ERR
421 "Buffer not started:"
14555b14 422 "buffer preenable failed\n");
7026ea4b
JC
423 goto error_ret;
424 }
425 }
14555b14
JC
426 if (buffer->access->request_update) {
427 ret = buffer->access->request_update(buffer);
7026ea4b
JC
428 if (ret) {
429 printk(KERN_INFO
430 "Buffer not started:"
14555b14 431 "buffer parameter update failed\n");
7026ea4b
JC
432 goto error_ret;
433 }
434 }
7026ea4b 435 /* Definitely possible for devices to support both of these.*/
f8c6f4e9
JC
436 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
437 if (!indio_dev->trig) {
7026ea4b
JC
438 printk(KERN_INFO
439 "Buffer not started: no trigger\n");
440 ret = -EINVAL;
7026ea4b
JC
441 goto error_ret;
442 }
f8c6f4e9
JC
443 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
444 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE)
445 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
7026ea4b
JC
446 else { /* should never be reached */
447 ret = -EINVAL;
448 goto error_ret;
449 }
450
1612244f
JC
451 if (indio_dev->setup_ops->postenable) {
452 ret = indio_dev->setup_ops->postenable(indio_dev);
7026ea4b
JC
453 if (ret) {
454 printk(KERN_INFO
455 "Buffer not started:"
456 "postenable failed\n");
f8c6f4e9 457 indio_dev->currentmode = previous_mode;
1612244f
JC
458 if (indio_dev->setup_ops->postdisable)
459 indio_dev->setup_ops->
f8c6f4e9 460 postdisable(indio_dev);
7026ea4b
JC
461 goto error_ret;
462 }
463 }
464 } else {
1612244f
JC
465 if (indio_dev->setup_ops->predisable) {
466 ret = indio_dev->setup_ops->predisable(indio_dev);
7026ea4b
JC
467 if (ret)
468 goto error_ret;
469 }
f8c6f4e9 470 indio_dev->currentmode = INDIO_DIRECT_MODE;
1612244f
JC
471 if (indio_dev->setup_ops->postdisable) {
472 ret = indio_dev->setup_ops->postdisable(indio_dev);
7026ea4b
JC
473 if (ret)
474 goto error_ret;
475 }
476 }
477done:
f8c6f4e9 478 mutex_unlock(&indio_dev->mlock);
7026ea4b
JC
479 return len;
480
481error_ret:
f8c6f4e9 482 mutex_unlock(&indio_dev->mlock);
7026ea4b
JC
483 return ret;
484}
14555b14 485EXPORT_SYMBOL(iio_buffer_store_enable);
8d213f24 486
14555b14
JC
487ssize_t iio_buffer_show_enable(struct device *dev,
488 struct device_attribute *attr,
489 char *buf)
7026ea4b 490{
f8c6f4e9 491 struct iio_dev *indio_dev = dev_get_drvdata(dev);
d4a6882e 492 return sprintf(buf, "%d\n", iio_buffer_enabled(indio_dev));
7026ea4b 493}
14555b14 494EXPORT_SYMBOL(iio_buffer_show_enable);
7026ea4b 495
32b5eeca 496/* note NULL used as error indicator as it doesn't make sense. */
cd4361c7 497static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
32b5eeca 498 unsigned int masklength,
cd4361c7 499 const unsigned long *mask)
32b5eeca
JC
500{
501 if (bitmap_empty(mask, masklength))
502 return NULL;
503 while (*av_masks) {
504 if (bitmap_subset(mask, av_masks, masklength))
505 return av_masks;
506 av_masks += BITS_TO_LONGS(masklength);
507 }
508 return NULL;
509}
510
959d2952
JC
511int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
512{
513 struct iio_buffer *buffer = indio_dev->buffer;
514 const struct iio_chan_spec *ch;
515 unsigned bytes = 0;
516 int length, i;
517 dev_dbg(&indio_dev->dev, "%s\n", __func__);
518
519 /* How much space will the demuxed element take? */
520 for_each_set_bit(i, buffer->scan_mask,
521 indio_dev->masklength) {
522 ch = iio_find_channel_from_si(indio_dev, i);
523 length = ch->scan_type.storagebits/8;
524 bytes = ALIGN(bytes, length);
525 bytes += length;
526 }
527 if (buffer->scan_timestamp) {
528 ch = iio_find_channel_from_si(indio_dev,
529 buffer->scan_index_timestamp);
530 length = ch->scan_type.storagebits/8;
531 bytes = ALIGN(bytes, length);
532 bytes += length;
533 }
534 buffer->access->set_bytes_per_datum(buffer, bytes);
535
536 /* What scan mask do we actually have ?*/
537 if (indio_dev->available_scan_masks)
538 indio_dev->active_scan_mask =
539 iio_scan_mask_match(indio_dev->available_scan_masks,
540 indio_dev->masklength,
541 buffer->scan_mask);
542 else
543 indio_dev->active_scan_mask = buffer->scan_mask;
5ada4ea9
JC
544 iio_update_demux(indio_dev);
545
546 if (indio_dev->info->update_scan_mode)
547 return indio_dev->info
548 ->update_scan_mode(indio_dev,
549 indio_dev->active_scan_mask);
959d2952
JC
550 return 0;
551}
552EXPORT_SYMBOL(iio_sw_buffer_preenable);
553
32b5eeca
JC
554/**
555 * iio_scan_mask_set() - set particular bit in the scan mask
14555b14 556 * @buffer: the buffer whose scan mask we are interested in
32b5eeca
JC
557 * @bit: the bit to be set.
558 **/
f79a9098
JC
559int iio_scan_mask_set(struct iio_dev *indio_dev,
560 struct iio_buffer *buffer, int bit)
32b5eeca 561{
cd4361c7 562 const unsigned long *mask;
32b5eeca
JC
563 unsigned long *trialmask;
564
565 trialmask = kmalloc(sizeof(*trialmask)*
f8c6f4e9 566 BITS_TO_LONGS(indio_dev->masklength),
32b5eeca
JC
567 GFP_KERNEL);
568
569 if (trialmask == NULL)
570 return -ENOMEM;
f8c6f4e9 571 if (!indio_dev->masklength) {
14555b14 572 WARN_ON("trying to set scanmask prior to registering buffer\n");
32b5eeca
JC
573 kfree(trialmask);
574 return -EINVAL;
575 }
f8c6f4e9 576 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
32b5eeca
JC
577 set_bit(bit, trialmask);
578
f8c6f4e9
JC
579 if (indio_dev->available_scan_masks) {
580 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
581 indio_dev->masklength,
32b5eeca
JC
582 trialmask);
583 if (!mask) {
584 kfree(trialmask);
585 return -EINVAL;
586 }
587 }
f8c6f4e9 588 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
32b5eeca
JC
589
590 kfree(trialmask);
591
592 return 0;
593};
594EXPORT_SYMBOL_GPL(iio_scan_mask_set);
595
f79a9098
JC
596int iio_scan_mask_query(struct iio_dev *indio_dev,
597 struct iio_buffer *buffer, int bit)
32b5eeca 598{
f8c6f4e9 599 if (bit > indio_dev->masklength)
32b5eeca
JC
600 return -EINVAL;
601
14555b14 602 if (!buffer->scan_mask)
32b5eeca 603 return 0;
32b5eeca 604
5a2a6e11 605 return test_bit(bit, buffer->scan_mask);
32b5eeca
JC
606};
607EXPORT_SYMBOL_GPL(iio_scan_mask_query);
5ada4ea9
JC
608
609/**
610 * struct iio_demux_table() - table describing demux memcpy ops
611 * @from: index to copy from
612 * @to: index to copy to
613 * @length: how many bytes to copy
614 * @l: list head used for management
615 */
616struct iio_demux_table {
617 unsigned from;
618 unsigned to;
619 unsigned length;
620 struct list_head l;
621};
622
623static unsigned char *iio_demux(struct iio_buffer *buffer,
624 unsigned char *datain)
625{
626 struct iio_demux_table *t;
627
628 if (list_empty(&buffer->demux_list))
629 return datain;
630 list_for_each_entry(t, &buffer->demux_list, l)
631 memcpy(buffer->demux_bounce + t->to,
632 datain + t->from, t->length);
633
634 return buffer->demux_bounce;
635}
636
637int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data,
638 s64 timestamp)
639{
640 unsigned char *dataout = iio_demux(buffer, data);
641
642 return buffer->access->store_to(buffer, dataout, timestamp);
643}
644EXPORT_SYMBOL_GPL(iio_push_to_buffer);
645
646int iio_update_demux(struct iio_dev *indio_dev)
647{
648 const struct iio_chan_spec *ch;
649 struct iio_buffer *buffer = indio_dev->buffer;
650 int ret, in_ind = -1, out_ind, length;
651 unsigned in_loc = 0, out_loc = 0;
652 struct iio_demux_table *p, *q;
653
654 /* Clear out any old demux */
655 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
656 list_del(&p->l);
657 kfree(p);
658 }
659 kfree(buffer->demux_bounce);
660 buffer->demux_bounce = NULL;
661
662 /* First work out which scan mode we will actually have */
663 if (bitmap_equal(indio_dev->active_scan_mask,
664 buffer->scan_mask,
665 indio_dev->masklength))
666 return 0;
667
668 /* Now we have the two masks, work from least sig and build up sizes */
669 for_each_set_bit(out_ind,
670 indio_dev->active_scan_mask,
671 indio_dev->masklength) {
672 in_ind = find_next_bit(indio_dev->active_scan_mask,
673 indio_dev->masklength,
674 in_ind + 1);
675 while (in_ind != out_ind) {
676 in_ind = find_next_bit(indio_dev->active_scan_mask,
677 indio_dev->masklength,
678 in_ind + 1);
679 ch = iio_find_channel_from_si(indio_dev, in_ind);
680 length = ch->scan_type.storagebits/8;
681 /* Make sure we are aligned */
682 in_loc += length;
683 if (in_loc % length)
684 in_loc += length - in_loc % length;
685 }
686 p = kmalloc(sizeof(*p), GFP_KERNEL);
687 if (p == NULL) {
688 ret = -ENOMEM;
689 goto error_clear_mux_table;
690 }
691 ch = iio_find_channel_from_si(indio_dev, in_ind);
692 length = ch->scan_type.storagebits/8;
693 if (out_loc % length)
694 out_loc += length - out_loc % length;
695 if (in_loc % length)
696 in_loc += length - in_loc % length;
697 p->from = in_loc;
698 p->to = out_loc;
699 p->length = length;
700 list_add_tail(&p->l, &buffer->demux_list);
701 out_loc += length;
702 in_loc += length;
703 }
704 /* Relies on scan_timestamp being last */
705 if (buffer->scan_timestamp) {
706 p = kmalloc(sizeof(*p), GFP_KERNEL);
707 if (p == NULL) {
708 ret = -ENOMEM;
709 goto error_clear_mux_table;
710 }
711 ch = iio_find_channel_from_si(indio_dev,
712 buffer->scan_index_timestamp);
713 length = ch->scan_type.storagebits/8;
714 if (out_loc % length)
715 out_loc += length - out_loc % length;
716 if (in_loc % length)
717 in_loc += length - in_loc % length;
718 p->from = in_loc;
719 p->to = out_loc;
720 p->length = length;
721 list_add_tail(&p->l, &buffer->demux_list);
722 out_loc += length;
723 in_loc += length;
724 }
725 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
726 if (buffer->demux_bounce == NULL) {
727 ret = -ENOMEM;
728 goto error_clear_mux_table;
729 }
730 return 0;
731
732error_clear_mux_table:
733 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
734 list_del(&p->l);
735 kfree(p);
736 }
737 return ret;
738}
739EXPORT_SYMBOL_GPL(iio_update_demux);
This page took 0.318968 seconds and 5 git commands to generate.