iio: kfifo_buf: Implement data_available() callback
[deliverable/linux.git] / drivers / iio / industrialio-buffer.c
CommitLineData
7026ea4b
JC
1/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
14555b14 9 * Handling of buffer allocation / resizing.
7026ea4b
JC
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
8e336a72 17#include <linux/export.h>
7026ea4b 18#include <linux/device.h>
7026ea4b 19#include <linux/fs.h>
7026ea4b 20#include <linux/cdev.h>
5a0e3ad6 21#include <linux/slab.h>
a7348347 22#include <linux/poll.h>
d2f0a48f 23#include <linux/sched.h>
7026ea4b 24
06458e27 25#include <linux/iio/iio.h>
df9c1c42 26#include "iio_core.h"
06458e27
JC
27#include <linux/iio/sysfs.h>
28#include <linux/iio/buffer.h>
7026ea4b 29
8310b86c
JC
30static const char * const iio_endian_prefix[] = {
31 [IIO_BE] = "be",
32 [IIO_LE] = "le",
33};
7026ea4b 34
705ee2c9 35static bool iio_buffer_is_active(struct iio_buffer *buf)
84b36ce5 36{
705ee2c9 37 return !list_empty(&buf->buffer_list);
84b36ce5
JC
38}
39
647cc7b9
LPC
40static bool iio_buffer_data_available(struct iio_buffer *buf)
41{
42 if (buf->access->data_available)
43 return buf->access->data_available(buf);
44
45 return buf->stufftoread;
46}
47
7026ea4b 48/**
14555b14 49 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
7026ea4b 50 *
14555b14
JC
51 * This function relies on all buffer implementations having an
52 * iio_buffer as their first element.
7026ea4b 53 **/
14555b14
JC
54ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
55 size_t n, loff_t *f_ps)
7026ea4b 56{
1aa04278 57 struct iio_dev *indio_dev = filp->private_data;
14555b14 58 struct iio_buffer *rb = indio_dev->buffer;
d5857d65 59
f18e7a06
LPC
60 if (!indio_dev->info)
61 return -ENODEV;
62
96e00f11 63 if (!rb || !rb->access->read_first_n)
7026ea4b 64 return -EINVAL;
8d213f24 65 return rb->access->read_first_n(rb, n, buf);
7026ea4b
JC
66}
67
a7348347 68/**
14555b14 69 * iio_buffer_poll() - poll the buffer to find out if it has data
a7348347 70 */
14555b14
JC
71unsigned int iio_buffer_poll(struct file *filp,
72 struct poll_table_struct *wait)
a7348347 73{
1aa04278 74 struct iio_dev *indio_dev = filp->private_data;
14555b14 75 struct iio_buffer *rb = indio_dev->buffer;
a7348347 76
f18e7a06
LPC
77 if (!indio_dev->info)
78 return -ENODEV;
79
a7348347 80 poll_wait(filp, &rb->pollq, wait);
647cc7b9 81 if (iio_buffer_data_available(rb))
a7348347
JC
82 return POLLIN | POLLRDNORM;
83 /* need a way of knowing if there may be enough data... */
8d213f24 84 return 0;
a7348347
JC
85}
86
d2f0a48f
LPC
87/**
88 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
89 * @indio_dev: The IIO device
90 *
91 * Wakes up the event waitqueue used for poll(). Should usually
92 * be called when the device is unregistered.
93 */
94void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
95{
96 if (!indio_dev->buffer)
97 return;
98
99 wake_up(&indio_dev->buffer->pollq);
100}
101
f79a9098 102void iio_buffer_init(struct iio_buffer *buffer)
7026ea4b 103{
5ada4ea9 104 INIT_LIST_HEAD(&buffer->demux_list);
705ee2c9 105 INIT_LIST_HEAD(&buffer->buffer_list);
14555b14 106 init_waitqueue_head(&buffer->pollq);
9e69c935 107 kref_init(&buffer->ref);
7026ea4b 108}
14555b14 109EXPORT_SYMBOL(iio_buffer_init);
7026ea4b 110
1d892719 111static ssize_t iio_show_scan_index(struct device *dev,
8d213f24
JC
112 struct device_attribute *attr,
113 char *buf)
1d892719 114{
8d213f24 115 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
1d892719
JC
116}
117
118static ssize_t iio_show_fixed_type(struct device *dev,
119 struct device_attribute *attr,
120 char *buf)
121{
122 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
8310b86c
JC
123 u8 type = this_attr->c->scan_type.endianness;
124
125 if (type == IIO_CPU) {
9d5d1153
JC
126#ifdef __LITTLE_ENDIAN
127 type = IIO_LE;
128#else
129 type = IIO_BE;
130#endif
8310b86c
JC
131 }
132 return sprintf(buf, "%s:%c%d/%d>>%u\n",
133 iio_endian_prefix[type],
1d892719
JC
134 this_attr->c->scan_type.sign,
135 this_attr->c->scan_type.realbits,
136 this_attr->c->scan_type.storagebits,
137 this_attr->c->scan_type.shift);
138}
139
8d213f24
JC
140static ssize_t iio_scan_el_show(struct device *dev,
141 struct device_attribute *attr,
142 char *buf)
143{
144 int ret;
e53f5ac5 145 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
8d213f24 146
5ada4ea9
JC
147 ret = test_bit(to_iio_dev_attr(attr)->address,
148 indio_dev->buffer->scan_mask);
149
8d213f24
JC
150 return sprintf(buf, "%d\n", ret);
151}
152
14555b14 153static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
8d213f24 154{
14555b14 155 clear_bit(bit, buffer->scan_mask);
8d213f24
JC
156 return 0;
157}
158
159static ssize_t iio_scan_el_store(struct device *dev,
160 struct device_attribute *attr,
161 const char *buf,
162 size_t len)
163{
a714af27 164 int ret;
8d213f24 165 bool state;
e53f5ac5 166 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b14 167 struct iio_buffer *buffer = indio_dev->buffer;
8d213f24
JC
168 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
169
a714af27
JC
170 ret = strtobool(buf, &state);
171 if (ret < 0)
172 return ret;
8d213f24 173 mutex_lock(&indio_dev->mlock);
705ee2c9 174 if (iio_buffer_is_active(indio_dev->buffer)) {
8d213f24
JC
175 ret = -EBUSY;
176 goto error_ret;
177 }
f79a9098 178 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
8d213f24
JC
179 if (ret < 0)
180 goto error_ret;
181 if (!state && ret) {
14555b14 182 ret = iio_scan_mask_clear(buffer, this_attr->address);
8d213f24
JC
183 if (ret)
184 goto error_ret;
185 } else if (state && !ret) {
f79a9098 186 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
8d213f24
JC
187 if (ret)
188 goto error_ret;
189 }
190
191error_ret:
192 mutex_unlock(&indio_dev->mlock);
193
5a2a6e11 194 return ret < 0 ? ret : len;
8d213f24
JC
195
196}
197
198static ssize_t iio_scan_el_ts_show(struct device *dev,
199 struct device_attribute *attr,
200 char *buf)
201{
e53f5ac5 202 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
f8c6f4e9 203 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp);
8d213f24
JC
204}
205
206static ssize_t iio_scan_el_ts_store(struct device *dev,
207 struct device_attribute *attr,
208 const char *buf,
209 size_t len)
210{
a714af27 211 int ret;
e53f5ac5 212 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
8d213f24 213 bool state;
1aa04278 214
a714af27
JC
215 ret = strtobool(buf, &state);
216 if (ret < 0)
217 return ret;
218
8d213f24 219 mutex_lock(&indio_dev->mlock);
705ee2c9 220 if (iio_buffer_is_active(indio_dev->buffer)) {
8d213f24
JC
221 ret = -EBUSY;
222 goto error_ret;
223 }
14555b14 224 indio_dev->buffer->scan_timestamp = state;
8d213f24
JC
225error_ret:
226 mutex_unlock(&indio_dev->mlock);
227
228 return ret ? ret : len;
229}
230
14555b14
JC
231static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
232 const struct iio_chan_spec *chan)
1d892719 233{
26d25ae3 234 int ret, attrcount = 0;
14555b14 235 struct iio_buffer *buffer = indio_dev->buffer;
1d892719 236
26d25ae3 237 ret = __iio_add_chan_devattr("index",
1d892719
JC
238 chan,
239 &iio_show_scan_index,
240 NULL,
241 0,
3704432f 242 IIO_SEPARATE,
1aa04278 243 &indio_dev->dev,
14555b14 244 &buffer->scan_el_dev_attr_list);
1d892719
JC
245 if (ret)
246 goto error_ret;
26d25ae3
JC
247 attrcount++;
248 ret = __iio_add_chan_devattr("type",
1d892719
JC
249 chan,
250 &iio_show_fixed_type,
251 NULL,
252 0,
253 0,
1aa04278 254 &indio_dev->dev,
14555b14 255 &buffer->scan_el_dev_attr_list);
1d892719
JC
256 if (ret)
257 goto error_ret;
26d25ae3 258 attrcount++;
a88b3ebc 259 if (chan->type != IIO_TIMESTAMP)
26d25ae3 260 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
261 chan,
262 &iio_scan_el_show,
263 &iio_scan_el_store,
264 chan->scan_index,
265 0,
1aa04278 266 &indio_dev->dev,
14555b14 267 &buffer->scan_el_dev_attr_list);
a88b3ebc 268 else
26d25ae3 269 ret = __iio_add_chan_devattr("en",
a88b3ebc
JC
270 chan,
271 &iio_scan_el_ts_show,
272 &iio_scan_el_ts_store,
273 chan->scan_index,
274 0,
1aa04278 275 &indio_dev->dev,
14555b14 276 &buffer->scan_el_dev_attr_list);
9572588c
PM
277 if (ret)
278 goto error_ret;
26d25ae3
JC
279 attrcount++;
280 ret = attrcount;
1d892719
JC
281error_ret:
282 return ret;
283}
284
26d25ae3
JC
285static const char * const iio_scan_elements_group_name = "scan_elements";
286
14555b14
JC
287int iio_buffer_register(struct iio_dev *indio_dev,
288 const struct iio_chan_spec *channels,
289 int num_channels)
1d892719 290{
26d25ae3
JC
291 struct iio_dev_attr *p;
292 struct attribute **attr;
14555b14 293 struct iio_buffer *buffer = indio_dev->buffer;
26d25ae3
JC
294 int ret, i, attrn, attrcount, attrcount_orig = 0;
295
14555b14
JC
296 if (buffer->attrs)
297 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs;
bf32963c 298
14555b14
JC
299 if (buffer->scan_el_attrs != NULL) {
300 attr = buffer->scan_el_attrs->attrs;
26d25ae3
JC
301 while (*attr++ != NULL)
302 attrcount_orig++;
303 }
304 attrcount = attrcount_orig;
14555b14 305 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list);
1d892719
JC
306 if (channels) {
307 /* new magic */
308 for (i = 0; i < num_channels; i++) {
f5b81ddd
LPC
309 if (channels[i].scan_index < 0)
310 continue;
311
32b5eeca
JC
312 /* Establish necessary mask length */
313 if (channels[i].scan_index >
314 (int)indio_dev->masklength - 1)
315 indio_dev->masklength
e1dc7bee 316 = channels[i].scan_index + 1;
32b5eeca 317
14555b14 318 ret = iio_buffer_add_channel_sysfs(indio_dev,
1aa04278 319 &channels[i]);
1d892719 320 if (ret < 0)
26d25ae3
JC
321 goto error_cleanup_dynamic;
322 attrcount += ret;
beb80600 323 if (channels[i].type == IIO_TIMESTAMP)
f1264809 324 indio_dev->scan_index_timestamp =
beb80600 325 channels[i].scan_index;
1d892719 326 }
14555b14 327 if (indio_dev->masklength && buffer->scan_mask == NULL) {
d83fb184
TM
328 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
329 sizeof(*buffer->scan_mask),
330 GFP_KERNEL);
14555b14 331 if (buffer->scan_mask == NULL) {
32b5eeca 332 ret = -ENOMEM;
26d25ae3 333 goto error_cleanup_dynamic;
32b5eeca
JC
334 }
335 }
1d892719
JC
336 }
337
14555b14 338 buffer->scan_el_group.name = iio_scan_elements_group_name;
26d25ae3 339
d83fb184
TM
340 buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
341 sizeof(buffer->scan_el_group.attrs[0]),
342 GFP_KERNEL);
14555b14 343 if (buffer->scan_el_group.attrs == NULL) {
26d25ae3
JC
344 ret = -ENOMEM;
345 goto error_free_scan_mask;
346 }
14555b14
JC
347 if (buffer->scan_el_attrs)
348 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs,
349 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig);
26d25ae3
JC
350 attrn = attrcount_orig;
351
14555b14
JC
352 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l)
353 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr;
354 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group;
26d25ae3 355
1d892719 356 return 0;
26d25ae3
JC
357
358error_free_scan_mask:
14555b14 359 kfree(buffer->scan_mask);
1d892719 360error_cleanup_dynamic:
84088ebd 361 iio_free_chan_devattr_list(&buffer->scan_el_dev_attr_list);
26d25ae3 362
7026ea4b
JC
363 return ret;
364}
14555b14 365EXPORT_SYMBOL(iio_buffer_register);
1d892719 366
14555b14 367void iio_buffer_unregister(struct iio_dev *indio_dev)
7026ea4b 368{
14555b14
JC
369 kfree(indio_dev->buffer->scan_mask);
370 kfree(indio_dev->buffer->scan_el_group.attrs);
84088ebd 371 iio_free_chan_devattr_list(&indio_dev->buffer->scan_el_dev_attr_list);
7026ea4b 372}
14555b14 373EXPORT_SYMBOL(iio_buffer_unregister);
7026ea4b 374
14555b14
JC
375ssize_t iio_buffer_read_length(struct device *dev,
376 struct device_attribute *attr,
377 char *buf)
7026ea4b 378{
e53f5ac5 379 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b14 380 struct iio_buffer *buffer = indio_dev->buffer;
7026ea4b 381
14555b14 382 if (buffer->access->get_length)
8d213f24 383 return sprintf(buf, "%d\n",
14555b14 384 buffer->access->get_length(buffer));
7026ea4b 385
8d213f24 386 return 0;
7026ea4b 387}
14555b14 388EXPORT_SYMBOL(iio_buffer_read_length);
7026ea4b 389
14555b14
JC
390ssize_t iio_buffer_write_length(struct device *dev,
391 struct device_attribute *attr,
392 const char *buf,
393 size_t len)
7026ea4b 394{
e53f5ac5 395 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
14555b14 396 struct iio_buffer *buffer = indio_dev->buffer;
948ad205
LPC
397 unsigned int val;
398 int ret;
8d213f24 399
948ad205 400 ret = kstrtouint(buf, 10, &val);
7026ea4b
JC
401 if (ret)
402 return ret;
403
14555b14
JC
404 if (buffer->access->get_length)
405 if (val == buffer->access->get_length(buffer))
7026ea4b
JC
406 return len;
407
e38c79e0 408 mutex_lock(&indio_dev->mlock);
705ee2c9 409 if (iio_buffer_is_active(indio_dev->buffer)) {
e38c79e0
LPC
410 ret = -EBUSY;
411 } else {
869871b5 412 if (buffer->access->set_length)
e38c79e0 413 buffer->access->set_length(buffer, val);
e38c79e0 414 ret = 0;
7026ea4b 415 }
e38c79e0 416 mutex_unlock(&indio_dev->mlock);
7026ea4b 417
e38c79e0 418 return ret ? ret : len;
7026ea4b 419}
14555b14 420EXPORT_SYMBOL(iio_buffer_write_length);
7026ea4b 421
14555b14
JC
422ssize_t iio_buffer_show_enable(struct device *dev,
423 struct device_attribute *attr,
424 char *buf)
7026ea4b 425{
e53f5ac5 426 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
705ee2c9 427 return sprintf(buf, "%d\n", iio_buffer_is_active(indio_dev->buffer));
7026ea4b 428}
14555b14 429EXPORT_SYMBOL(iio_buffer_show_enable);
7026ea4b 430
9572588c 431/* Note NULL used as error indicator as it doesn't make sense. */
cd4361c7 432static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
32b5eeca 433 unsigned int masklength,
cd4361c7 434 const unsigned long *mask)
32b5eeca
JC
435{
436 if (bitmap_empty(mask, masklength))
437 return NULL;
438 while (*av_masks) {
439 if (bitmap_subset(mask, av_masks, masklength))
440 return av_masks;
441 av_masks += BITS_TO_LONGS(masklength);
442 }
443 return NULL;
444}
445
183f4173
PM
446static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
447 const unsigned long *mask, bool timestamp)
959d2952 448{
959d2952
JC
449 const struct iio_chan_spec *ch;
450 unsigned bytes = 0;
451 int length, i;
959d2952
JC
452
453 /* How much space will the demuxed element take? */
6b3b58ed 454 for_each_set_bit(i, mask,
959d2952
JC
455 indio_dev->masklength) {
456 ch = iio_find_channel_from_si(indio_dev, i);
6b3b58ed 457 length = ch->scan_type.storagebits / 8;
959d2952
JC
458 bytes = ALIGN(bytes, length);
459 bytes += length;
460 }
6b3b58ed 461 if (timestamp) {
959d2952 462 ch = iio_find_channel_from_si(indio_dev,
f1264809 463 indio_dev->scan_index_timestamp);
6b3b58ed 464 length = ch->scan_type.storagebits / 8;
959d2952
JC
465 bytes = ALIGN(bytes, length);
466 bytes += length;
467 }
6b3b58ed
JC
468 return bytes;
469}
470
9e69c935
LPC
471static void iio_buffer_activate(struct iio_dev *indio_dev,
472 struct iio_buffer *buffer)
473{
474 iio_buffer_get(buffer);
475 list_add(&buffer->buffer_list, &indio_dev->buffer_list);
476}
477
478static void iio_buffer_deactivate(struct iio_buffer *buffer)
479{
480 list_del_init(&buffer->buffer_list);
481 iio_buffer_put(buffer);
482}
483
a87c82e4
LPC
484void iio_disable_all_buffers(struct iio_dev *indio_dev)
485{
486 struct iio_buffer *buffer, *_buffer;
487
488 if (list_empty(&indio_dev->buffer_list))
489 return;
490
491 if (indio_dev->setup_ops->predisable)
492 indio_dev->setup_ops->predisable(indio_dev);
493
494 list_for_each_entry_safe(buffer, _buffer,
495 &indio_dev->buffer_list, buffer_list)
9e69c935 496 iio_buffer_deactivate(buffer);
a87c82e4
LPC
497
498 indio_dev->currentmode = INDIO_DIRECT_MODE;
499 if (indio_dev->setup_ops->postdisable)
500 indio_dev->setup_ops->postdisable(indio_dev);
e086ed76
LPC
501
502 if (indio_dev->available_scan_masks == NULL)
503 kfree(indio_dev->active_scan_mask);
a87c82e4
LPC
504}
505
8e050996
LPC
506static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
507 struct iio_buffer *buffer)
508{
509 unsigned int bytes;
510
511 if (!buffer->access->set_bytes_per_datum)
512 return;
513
514 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
515 buffer->scan_timestamp);
516
517 buffer->access->set_bytes_per_datum(buffer, bytes);
518}
519
a9519456 520static int __iio_update_buffers(struct iio_dev *indio_dev,
84b36ce5
JC
521 struct iio_buffer *insert_buffer,
522 struct iio_buffer *remove_buffer)
6b3b58ed 523{
84b36ce5
JC
524 int ret;
525 int success = 0;
526 struct iio_buffer *buffer;
527 unsigned long *compound_mask;
528 const unsigned long *old_mask;
6b3b58ed 529
84b36ce5
JC
530 /* Wind down existing buffers - iff there are any */
531 if (!list_empty(&indio_dev->buffer_list)) {
532 if (indio_dev->setup_ops->predisable) {
533 ret = indio_dev->setup_ops->predisable(indio_dev);
534 if (ret)
535 goto error_ret;
536 }
537 indio_dev->currentmode = INDIO_DIRECT_MODE;
538 if (indio_dev->setup_ops->postdisable) {
539 ret = indio_dev->setup_ops->postdisable(indio_dev);
540 if (ret)
541 goto error_ret;
542 }
543 }
544 /* Keep a copy of current setup to allow roll back */
545 old_mask = indio_dev->active_scan_mask;
546 if (!indio_dev->available_scan_masks)
547 indio_dev->active_scan_mask = NULL;
548
549 if (remove_buffer)
9e69c935 550 iio_buffer_deactivate(remove_buffer);
84b36ce5 551 if (insert_buffer)
9e69c935 552 iio_buffer_activate(indio_dev, insert_buffer);
84b36ce5
JC
553
554 /* If no buffers in list, we are done */
555 if (list_empty(&indio_dev->buffer_list)) {
556 indio_dev->currentmode = INDIO_DIRECT_MODE;
557 if (indio_dev->available_scan_masks == NULL)
558 kfree(old_mask);
559 return 0;
560 }
959d2952 561
9572588c 562 /* What scan mask do we actually have? */
84b36ce5
JC
563 compound_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
564 sizeof(long), GFP_KERNEL);
565 if (compound_mask == NULL) {
566 if (indio_dev->available_scan_masks == NULL)
567 kfree(old_mask);
568 return -ENOMEM;
569 }
570 indio_dev->scan_timestamp = 0;
571
572 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
573 bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
574 indio_dev->masklength);
575 indio_dev->scan_timestamp |= buffer->scan_timestamp;
576 }
577 if (indio_dev->available_scan_masks) {
959d2952
JC
578 indio_dev->active_scan_mask =
579 iio_scan_mask_match(indio_dev->available_scan_masks,
580 indio_dev->masklength,
84b36ce5
JC
581 compound_mask);
582 if (indio_dev->active_scan_mask == NULL) {
583 /*
584 * Roll back.
585 * Note can only occur when adding a buffer.
586 */
9e69c935 587 iio_buffer_deactivate(insert_buffer);
d66e0452
PM
588 if (old_mask) {
589 indio_dev->active_scan_mask = old_mask;
590 success = -EINVAL;
591 }
592 else {
593 kfree(compound_mask);
594 ret = -EINVAL;
595 goto error_ret;
596 }
84b36ce5
JC
597 }
598 } else {
599 indio_dev->active_scan_mask = compound_mask;
600 }
aff1eb4e 601
5ada4ea9
JC
602 iio_update_demux(indio_dev);
603
84b36ce5
JC
604 /* Wind up again */
605 if (indio_dev->setup_ops->preenable) {
606 ret = indio_dev->setup_ops->preenable(indio_dev);
607 if (ret) {
608 printk(KERN_ERR
bec1889d 609 "Buffer not started: buffer preenable failed (%d)\n", ret);
84b36ce5
JC
610 goto error_remove_inserted;
611 }
612 }
613 indio_dev->scan_bytes =
614 iio_compute_scan_bytes(indio_dev,
615 indio_dev->active_scan_mask,
616 indio_dev->scan_timestamp);
8e050996
LPC
617 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
618 iio_buffer_update_bytes_per_datum(indio_dev, buffer);
84b36ce5
JC
619 if (buffer->access->request_update) {
620 ret = buffer->access->request_update(buffer);
621 if (ret) {
622 printk(KERN_INFO
bec1889d 623 "Buffer not started: buffer parameter update failed (%d)\n", ret);
84b36ce5
JC
624 goto error_run_postdisable;
625 }
626 }
8e050996 627 }
84b36ce5
JC
628 if (indio_dev->info->update_scan_mode) {
629 ret = indio_dev->info
5ada4ea9
JC
630 ->update_scan_mode(indio_dev,
631 indio_dev->active_scan_mask);
84b36ce5 632 if (ret < 0) {
bec1889d 633 printk(KERN_INFO "Buffer not started: update scan mode failed (%d)\n", ret);
84b36ce5
JC
634 goto error_run_postdisable;
635 }
636 }
9572588c 637 /* Definitely possible for devices to support both of these. */
84b36ce5
JC
638 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
639 if (!indio_dev->trig) {
640 printk(KERN_INFO "Buffer not started: no trigger\n");
641 ret = -EINVAL;
642 /* Can only occur on first buffer */
643 goto error_run_postdisable;
644 }
645 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
646 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) {
647 indio_dev->currentmode = INDIO_BUFFER_HARDWARE;
9572588c 648 } else { /* Should never be reached */
84b36ce5
JC
649 ret = -EINVAL;
650 goto error_run_postdisable;
651 }
652
653 if (indio_dev->setup_ops->postenable) {
654 ret = indio_dev->setup_ops->postenable(indio_dev);
655 if (ret) {
656 printk(KERN_INFO
bec1889d 657 "Buffer not started: postenable failed (%d)\n", ret);
84b36ce5
JC
658 indio_dev->currentmode = INDIO_DIRECT_MODE;
659 if (indio_dev->setup_ops->postdisable)
660 indio_dev->setup_ops->postdisable(indio_dev);
661 goto error_disable_all_buffers;
662 }
663 }
664
665 if (indio_dev->available_scan_masks)
666 kfree(compound_mask);
667 else
668 kfree(old_mask);
669
670 return success;
671
672error_disable_all_buffers:
673 indio_dev->currentmode = INDIO_DIRECT_MODE;
674error_run_postdisable:
675 if (indio_dev->setup_ops->postdisable)
676 indio_dev->setup_ops->postdisable(indio_dev);
677error_remove_inserted:
678
679 if (insert_buffer)
9e69c935 680 iio_buffer_deactivate(insert_buffer);
84b36ce5
JC
681 indio_dev->active_scan_mask = old_mask;
682 kfree(compound_mask);
683error_ret:
684
685 return ret;
686}
a9519456
LPC
687
688int iio_update_buffers(struct iio_dev *indio_dev,
689 struct iio_buffer *insert_buffer,
690 struct iio_buffer *remove_buffer)
691{
692 int ret;
693
3909fab5
LPC
694 if (insert_buffer == remove_buffer)
695 return 0;
696
a9519456
LPC
697 mutex_lock(&indio_dev->info_exist_lock);
698 mutex_lock(&indio_dev->mlock);
699
3909fab5
LPC
700 if (insert_buffer && iio_buffer_is_active(insert_buffer))
701 insert_buffer = NULL;
702
703 if (remove_buffer && !iio_buffer_is_active(remove_buffer))
704 remove_buffer = NULL;
705
706 if (!insert_buffer && !remove_buffer) {
707 ret = 0;
708 goto out_unlock;
709 }
710
a9519456
LPC
711 if (indio_dev->info == NULL) {
712 ret = -ENODEV;
713 goto out_unlock;
714 }
715
716 ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
717
718out_unlock:
719 mutex_unlock(&indio_dev->mlock);
720 mutex_unlock(&indio_dev->info_exist_lock);
721
722 return ret;
723}
84b36ce5
JC
724EXPORT_SYMBOL_GPL(iio_update_buffers);
725
726ssize_t iio_buffer_store_enable(struct device *dev,
727 struct device_attribute *attr,
728 const char *buf,
729 size_t len)
730{
731 int ret;
732 bool requested_state;
733 struct iio_dev *indio_dev = dev_to_iio_dev(dev);
84b36ce5
JC
734 bool inlist;
735
736 ret = strtobool(buf, &requested_state);
737 if (ret < 0)
738 return ret;
739
740 mutex_lock(&indio_dev->mlock);
741
742 /* Find out if it is in the list */
705ee2c9 743 inlist = iio_buffer_is_active(indio_dev->buffer);
84b36ce5
JC
744 /* Already in desired state */
745 if (inlist == requested_state)
746 goto done;
747
748 if (requested_state)
a9519456 749 ret = __iio_update_buffers(indio_dev,
84b36ce5
JC
750 indio_dev->buffer, NULL);
751 else
a9519456 752 ret = __iio_update_buffers(indio_dev,
84b36ce5
JC
753 NULL, indio_dev->buffer);
754
755 if (ret < 0)
756 goto done;
757done:
758 mutex_unlock(&indio_dev->mlock);
759 return (ret < 0) ? ret : len;
760}
761EXPORT_SYMBOL(iio_buffer_store_enable);
762
81636632
LPC
763/**
764 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
765 * @indio_dev: the iio device
766 * @mask: scan mask to be checked
767 *
768 * Return true if exactly one bit is set in the scan mask, false otherwise. It
769 * can be used for devices where only one channel can be active for sampling at
770 * a time.
771 */
772bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
773 const unsigned long *mask)
774{
775 return bitmap_weight(mask, indio_dev->masklength) == 1;
776}
777EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
778
939546d1
LPC
779static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
780 const unsigned long *mask)
781{
782 if (!indio_dev->setup_ops->validate_scan_mask)
783 return true;
784
785 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
786}
787
32b5eeca
JC
788/**
789 * iio_scan_mask_set() - set particular bit in the scan mask
9572588c 790 * @indio_dev: the iio device
14555b14 791 * @buffer: the buffer whose scan mask we are interested in
32b5eeca 792 * @bit: the bit to be set.
84b36ce5
JC
793 *
794 * Note that at this point we have no way of knowing what other
795 * buffers might request, hence this code only verifies that the
796 * individual buffers request is plausible.
797 */
f79a9098
JC
798int iio_scan_mask_set(struct iio_dev *indio_dev,
799 struct iio_buffer *buffer, int bit)
32b5eeca 800{
cd4361c7 801 const unsigned long *mask;
32b5eeca
JC
802 unsigned long *trialmask;
803
804 trialmask = kmalloc(sizeof(*trialmask)*
f8c6f4e9 805 BITS_TO_LONGS(indio_dev->masklength),
32b5eeca
JC
806 GFP_KERNEL);
807
808 if (trialmask == NULL)
809 return -ENOMEM;
f8c6f4e9 810 if (!indio_dev->masklength) {
9572588c 811 WARN_ON("Trying to set scanmask prior to registering buffer\n");
939546d1 812 goto err_invalid_mask;
32b5eeca 813 }
f8c6f4e9 814 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
32b5eeca
JC
815 set_bit(bit, trialmask);
816
939546d1
LPC
817 if (!iio_validate_scan_mask(indio_dev, trialmask))
818 goto err_invalid_mask;
819
f8c6f4e9
JC
820 if (indio_dev->available_scan_masks) {
821 mask = iio_scan_mask_match(indio_dev->available_scan_masks,
822 indio_dev->masklength,
32b5eeca 823 trialmask);
939546d1
LPC
824 if (!mask)
825 goto err_invalid_mask;
32b5eeca 826 }
f8c6f4e9 827 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
32b5eeca
JC
828
829 kfree(trialmask);
830
831 return 0;
939546d1
LPC
832
833err_invalid_mask:
834 kfree(trialmask);
835 return -EINVAL;
836}
32b5eeca
JC
837EXPORT_SYMBOL_GPL(iio_scan_mask_set);
838
f79a9098
JC
839int iio_scan_mask_query(struct iio_dev *indio_dev,
840 struct iio_buffer *buffer, int bit)
32b5eeca 841{
f8c6f4e9 842 if (bit > indio_dev->masklength)
32b5eeca
JC
843 return -EINVAL;
844
14555b14 845 if (!buffer->scan_mask)
32b5eeca 846 return 0;
32b5eeca 847
5a2a6e11 848 return test_bit(bit, buffer->scan_mask);
32b5eeca
JC
849};
850EXPORT_SYMBOL_GPL(iio_scan_mask_query);
5ada4ea9
JC
851
852/**
853 * struct iio_demux_table() - table describing demux memcpy ops
854 * @from: index to copy from
99698b45 855 * @to: index to copy to
5ada4ea9
JC
856 * @length: how many bytes to copy
857 * @l: list head used for management
858 */
859struct iio_demux_table {
860 unsigned from;
861 unsigned to;
862 unsigned length;
863 struct list_head l;
864};
865
5d65d920
LPC
866static const void *iio_demux(struct iio_buffer *buffer,
867 const void *datain)
5ada4ea9
JC
868{
869 struct iio_demux_table *t;
870
871 if (list_empty(&buffer->demux_list))
872 return datain;
873 list_for_each_entry(t, &buffer->demux_list, l)
874 memcpy(buffer->demux_bounce + t->to,
875 datain + t->from, t->length);
876
877 return buffer->demux_bounce;
878}
879
5d65d920 880static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
5ada4ea9 881{
5d65d920 882 const void *dataout = iio_demux(buffer, data);
5ada4ea9 883
ce56ade6 884 return buffer->access->store_to(buffer, dataout);
5ada4ea9 885}
5ada4ea9 886
842cd100
JC
887static void iio_buffer_demux_free(struct iio_buffer *buffer)
888{
889 struct iio_demux_table *p, *q;
890 list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
891 list_del(&p->l);
892 kfree(p);
893 }
894}
895
84b36ce5 896
5d65d920 897int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
84b36ce5
JC
898{
899 int ret;
900 struct iio_buffer *buf;
901
902 list_for_each_entry(buf, &indio_dev->buffer_list, buffer_list) {
903 ret = iio_push_to_buffer(buf, data);
904 if (ret < 0)
905 return ret;
906 }
907
908 return 0;
909}
910EXPORT_SYMBOL_GPL(iio_push_to_buffers);
911
912static int iio_buffer_update_demux(struct iio_dev *indio_dev,
913 struct iio_buffer *buffer)
5ada4ea9
JC
914{
915 const struct iio_chan_spec *ch;
5ada4ea9
JC
916 int ret, in_ind = -1, out_ind, length;
917 unsigned in_loc = 0, out_loc = 0;
842cd100 918 struct iio_demux_table *p;
5ada4ea9
JC
919
920 /* Clear out any old demux */
842cd100 921 iio_buffer_demux_free(buffer);
5ada4ea9
JC
922 kfree(buffer->demux_bounce);
923 buffer->demux_bounce = NULL;
924
925 /* First work out which scan mode we will actually have */
926 if (bitmap_equal(indio_dev->active_scan_mask,
927 buffer->scan_mask,
928 indio_dev->masklength))
929 return 0;
930
931 /* Now we have the two masks, work from least sig and build up sizes */
932 for_each_set_bit(out_ind,
933 indio_dev->active_scan_mask,
934 indio_dev->masklength) {
935 in_ind = find_next_bit(indio_dev->active_scan_mask,
936 indio_dev->masklength,
937 in_ind + 1);
938 while (in_ind != out_ind) {
939 in_ind = find_next_bit(indio_dev->active_scan_mask,
940 indio_dev->masklength,
941 in_ind + 1);
942 ch = iio_find_channel_from_si(indio_dev, in_ind);
943 length = ch->scan_type.storagebits/8;
944 /* Make sure we are aligned */
945 in_loc += length;
946 if (in_loc % length)
947 in_loc += length - in_loc % length;
948 }
949 p = kmalloc(sizeof(*p), GFP_KERNEL);
950 if (p == NULL) {
951 ret = -ENOMEM;
952 goto error_clear_mux_table;
953 }
954 ch = iio_find_channel_from_si(indio_dev, in_ind);
955 length = ch->scan_type.storagebits/8;
956 if (out_loc % length)
957 out_loc += length - out_loc % length;
958 if (in_loc % length)
959 in_loc += length - in_loc % length;
960 p->from = in_loc;
961 p->to = out_loc;
962 p->length = length;
963 list_add_tail(&p->l, &buffer->demux_list);
964 out_loc += length;
965 in_loc += length;
966 }
967 /* Relies on scan_timestamp being last */
968 if (buffer->scan_timestamp) {
969 p = kmalloc(sizeof(*p), GFP_KERNEL);
970 if (p == NULL) {
971 ret = -ENOMEM;
972 goto error_clear_mux_table;
973 }
974 ch = iio_find_channel_from_si(indio_dev,
f1264809 975 indio_dev->scan_index_timestamp);
5ada4ea9
JC
976 length = ch->scan_type.storagebits/8;
977 if (out_loc % length)
978 out_loc += length - out_loc % length;
979 if (in_loc % length)
980 in_loc += length - in_loc % length;
981 p->from = in_loc;
982 p->to = out_loc;
983 p->length = length;
984 list_add_tail(&p->l, &buffer->demux_list);
985 out_loc += length;
986 in_loc += length;
987 }
988 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
989 if (buffer->demux_bounce == NULL) {
990 ret = -ENOMEM;
991 goto error_clear_mux_table;
992 }
993 return 0;
994
995error_clear_mux_table:
842cd100
JC
996 iio_buffer_demux_free(buffer);
997
5ada4ea9
JC
998 return ret;
999}
84b36ce5
JC
1000
1001int iio_update_demux(struct iio_dev *indio_dev)
1002{
1003 struct iio_buffer *buffer;
1004 int ret;
1005
1006 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list) {
1007 ret = iio_buffer_update_demux(indio_dev, buffer);
1008 if (ret < 0)
1009 goto error_clear_mux_table;
1010 }
1011 return 0;
1012
1013error_clear_mux_table:
1014 list_for_each_entry(buffer, &indio_dev->buffer_list, buffer_list)
1015 iio_buffer_demux_free(buffer);
1016
1017 return ret;
1018}
5ada4ea9 1019EXPORT_SYMBOL_GPL(iio_update_demux);
9e69c935
LPC
1020
1021/**
1022 * iio_buffer_release() - Free a buffer's resources
1023 * @ref: Pointer to the kref embedded in the iio_buffer struct
1024 *
1025 * This function is called when the last reference to the buffer has been
1026 * dropped. It will typically free all resources allocated by the buffer. Do not
1027 * call this function manually, always use iio_buffer_put() when done using a
1028 * buffer.
1029 */
1030static void iio_buffer_release(struct kref *ref)
1031{
1032 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1033
1034 buffer->access->release(buffer);
1035}
1036
1037/**
1038 * iio_buffer_get() - Grab a reference to the buffer
1039 * @buffer: The buffer to grab a reference for, may be NULL
1040 *
1041 * Returns the pointer to the buffer that was passed into the function.
1042 */
1043struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1044{
1045 if (buffer)
1046 kref_get(&buffer->ref);
1047
1048 return buffer;
1049}
1050EXPORT_SYMBOL_GPL(iio_buffer_get);
1051
1052/**
1053 * iio_buffer_put() - Release the reference to the buffer
1054 * @buffer: The buffer to release the reference for, may be NULL
1055 */
1056void iio_buffer_put(struct iio_buffer *buffer)
1057{
1058 if (buffer)
1059 kref_put(&buffer->ref, iio_buffer_release);
1060}
1061EXPORT_SYMBOL_GPL(iio_buffer_put);
This page took 0.46828 seconds and 5 git commands to generate.