1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
25 #include <linux/iio/iio.h>
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
30 static const char * const iio_endian_prefix
[] = {
35 static bool iio_buffer_is_active(struct iio_buffer
*buf
)
37 return !list_empty(&buf
->buffer_list
);
40 static size_t iio_buffer_data_available(struct iio_buffer
*buf
)
42 return buf
->access
->data_available(buf
);
45 static int iio_buffer_flush_hwfifo(struct iio_dev
*indio_dev
,
46 struct iio_buffer
*buf
, size_t required
)
48 if (!indio_dev
->info
->hwfifo_flush_to_buffer
)
51 return indio_dev
->info
->hwfifo_flush_to_buffer(indio_dev
, required
);
54 static bool iio_buffer_ready(struct iio_dev
*indio_dev
, struct iio_buffer
*buf
,
55 size_t to_wait
, int to_flush
)
60 /* wakeup if the device was unregistered */
64 /* drain the buffer if it was disabled */
65 if (!iio_buffer_is_active(buf
)) {
66 to_wait
= min_t(size_t, to_wait
, 1);
70 avail
= iio_buffer_data_available(buf
);
72 if (avail
>= to_wait
) {
73 /* force a flush for non-blocking reads */
74 if (!to_wait
&& !avail
&& to_flush
)
75 iio_buffer_flush_hwfifo(indio_dev
, buf
, to_flush
);
80 flushed
= iio_buffer_flush_hwfifo(indio_dev
, buf
,
85 if (avail
+ flushed
>= to_wait
)
92 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
94 * This function relies on all buffer implementations having an
95 * iio_buffer as their first element.
97 ssize_t
iio_buffer_read_first_n_outer(struct file
*filp
, char __user
*buf
,
98 size_t n
, loff_t
*f_ps
)
100 struct iio_dev
*indio_dev
= filp
->private_data
;
101 struct iio_buffer
*rb
= indio_dev
->buffer
;
107 if (!indio_dev
->info
)
110 if (!rb
|| !rb
->access
->read_first_n
)
113 datum_size
= rb
->bytes_per_datum
;
116 * If datum_size is 0 there will never be anything to read from the
117 * buffer, so signal end of file now.
122 to_read
= min_t(size_t, n
/ datum_size
, rb
->watermark
);
124 if (!(filp
->f_flags
& O_NONBLOCK
))
128 ret
= wait_event_interruptible(rb
->pollq
,
129 iio_buffer_ready(indio_dev
, rb
, to_wait
, to_read
));
133 if (!indio_dev
->info
)
136 ret
= rb
->access
->read_first_n(rb
, n
, buf
);
137 if (ret
== 0 && (filp
->f_flags
& O_NONBLOCK
))
145 * iio_buffer_poll() - poll the buffer to find out if it has data
147 unsigned int iio_buffer_poll(struct file
*filp
,
148 struct poll_table_struct
*wait
)
150 struct iio_dev
*indio_dev
= filp
->private_data
;
151 struct iio_buffer
*rb
= indio_dev
->buffer
;
153 if (!indio_dev
->info
)
156 poll_wait(filp
, &rb
->pollq
, wait
);
157 if (iio_buffer_ready(indio_dev
, rb
, rb
->watermark
, 0))
158 return POLLIN
| POLLRDNORM
;
163 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
164 * @indio_dev: The IIO device
166 * Wakes up the event waitqueue used for poll(). Should usually
167 * be called when the device is unregistered.
169 void iio_buffer_wakeup_poll(struct iio_dev
*indio_dev
)
171 if (!indio_dev
->buffer
)
174 wake_up(&indio_dev
->buffer
->pollq
);
177 void iio_buffer_init(struct iio_buffer
*buffer
)
179 INIT_LIST_HEAD(&buffer
->demux_list
);
180 INIT_LIST_HEAD(&buffer
->buffer_list
);
181 init_waitqueue_head(&buffer
->pollq
);
182 kref_init(&buffer
->ref
);
183 buffer
->watermark
= 1;
185 EXPORT_SYMBOL(iio_buffer_init
);
187 static ssize_t
iio_show_scan_index(struct device
*dev
,
188 struct device_attribute
*attr
,
191 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
194 static ssize_t
iio_show_fixed_type(struct device
*dev
,
195 struct device_attribute
*attr
,
198 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
199 u8 type
= this_attr
->c
->scan_type
.endianness
;
201 if (type
== IIO_CPU
) {
202 #ifdef __LITTLE_ENDIAN
208 if (this_attr
->c
->scan_type
.repeat
> 1)
209 return sprintf(buf
, "%s:%c%d/%dX%d>>%u\n",
210 iio_endian_prefix
[type
],
211 this_attr
->c
->scan_type
.sign
,
212 this_attr
->c
->scan_type
.realbits
,
213 this_attr
->c
->scan_type
.storagebits
,
214 this_attr
->c
->scan_type
.repeat
,
215 this_attr
->c
->scan_type
.shift
);
217 return sprintf(buf
, "%s:%c%d/%d>>%u\n",
218 iio_endian_prefix
[type
],
219 this_attr
->c
->scan_type
.sign
,
220 this_attr
->c
->scan_type
.realbits
,
221 this_attr
->c
->scan_type
.storagebits
,
222 this_attr
->c
->scan_type
.shift
);
225 static ssize_t
iio_scan_el_show(struct device
*dev
,
226 struct device_attribute
*attr
,
230 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
232 /* Ensure ret is 0 or 1. */
233 ret
= !!test_bit(to_iio_dev_attr(attr
)->address
,
234 indio_dev
->buffer
->scan_mask
);
236 return sprintf(buf
, "%d\n", ret
);
239 /* Note NULL used as error indicator as it doesn't make sense. */
240 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks
,
241 unsigned int masklength
,
242 const unsigned long *mask
)
244 if (bitmap_empty(mask
, masklength
))
247 if (bitmap_subset(mask
, av_masks
, masklength
))
249 av_masks
+= BITS_TO_LONGS(masklength
);
254 static bool iio_validate_scan_mask(struct iio_dev
*indio_dev
,
255 const unsigned long *mask
)
257 if (!indio_dev
->setup_ops
->validate_scan_mask
)
260 return indio_dev
->setup_ops
->validate_scan_mask(indio_dev
, mask
);
264 * iio_scan_mask_set() - set particular bit in the scan mask
265 * @indio_dev: the iio device
266 * @buffer: the buffer whose scan mask we are interested in
267 * @bit: the bit to be set.
269 * Note that at this point we have no way of knowing what other
270 * buffers might request, hence this code only verifies that the
271 * individual buffers request is plausible.
273 static int iio_scan_mask_set(struct iio_dev
*indio_dev
,
274 struct iio_buffer
*buffer
, int bit
)
276 const unsigned long *mask
;
277 unsigned long *trialmask
;
279 trialmask
= kmalloc(sizeof(*trialmask
)*
280 BITS_TO_LONGS(indio_dev
->masklength
),
283 if (trialmask
== NULL
)
285 if (!indio_dev
->masklength
) {
286 WARN_ON("Trying to set scanmask prior to registering buffer\n");
287 goto err_invalid_mask
;
289 bitmap_copy(trialmask
, buffer
->scan_mask
, indio_dev
->masklength
);
290 set_bit(bit
, trialmask
);
292 if (!iio_validate_scan_mask(indio_dev
, trialmask
))
293 goto err_invalid_mask
;
295 if (indio_dev
->available_scan_masks
) {
296 mask
= iio_scan_mask_match(indio_dev
->available_scan_masks
,
297 indio_dev
->masklength
,
300 goto err_invalid_mask
;
302 bitmap_copy(buffer
->scan_mask
, trialmask
, indio_dev
->masklength
);
313 static int iio_scan_mask_clear(struct iio_buffer
*buffer
, int bit
)
315 clear_bit(bit
, buffer
->scan_mask
);
319 static ssize_t
iio_scan_el_store(struct device
*dev
,
320 struct device_attribute
*attr
,
326 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
327 struct iio_buffer
*buffer
= indio_dev
->buffer
;
328 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
330 ret
= strtobool(buf
, &state
);
333 mutex_lock(&indio_dev
->mlock
);
334 if (iio_buffer_is_active(indio_dev
->buffer
)) {
338 ret
= iio_scan_mask_query(indio_dev
, buffer
, this_attr
->address
);
342 ret
= iio_scan_mask_clear(buffer
, this_attr
->address
);
345 } else if (state
&& !ret
) {
346 ret
= iio_scan_mask_set(indio_dev
, buffer
, this_attr
->address
);
352 mutex_unlock(&indio_dev
->mlock
);
354 return ret
< 0 ? ret
: len
;
358 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
359 struct device_attribute
*attr
,
362 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
363 return sprintf(buf
, "%d\n", indio_dev
->buffer
->scan_timestamp
);
366 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
367 struct device_attribute
*attr
,
372 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
375 ret
= strtobool(buf
, &state
);
379 mutex_lock(&indio_dev
->mlock
);
380 if (iio_buffer_is_active(indio_dev
->buffer
)) {
384 indio_dev
->buffer
->scan_timestamp
= state
;
386 mutex_unlock(&indio_dev
->mlock
);
388 return ret
? ret
: len
;
391 static int iio_buffer_add_channel_sysfs(struct iio_dev
*indio_dev
,
392 const struct iio_chan_spec
*chan
)
394 int ret
, attrcount
= 0;
395 struct iio_buffer
*buffer
= indio_dev
->buffer
;
397 ret
= __iio_add_chan_devattr("index",
399 &iio_show_scan_index
,
404 &buffer
->scan_el_dev_attr_list
);
408 ret
= __iio_add_chan_devattr("type",
410 &iio_show_fixed_type
,
415 &buffer
->scan_el_dev_attr_list
);
419 if (chan
->type
!= IIO_TIMESTAMP
)
420 ret
= __iio_add_chan_devattr("en",
427 &buffer
->scan_el_dev_attr_list
);
429 ret
= __iio_add_chan_devattr("en",
431 &iio_scan_el_ts_show
,
432 &iio_scan_el_ts_store
,
436 &buffer
->scan_el_dev_attr_list
);
444 static ssize_t
iio_buffer_read_length(struct device
*dev
,
445 struct device_attribute
*attr
,
448 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
449 struct iio_buffer
*buffer
= indio_dev
->buffer
;
451 return sprintf(buf
, "%d\n", buffer
->length
);
454 static ssize_t
iio_buffer_write_length(struct device
*dev
,
455 struct device_attribute
*attr
,
456 const char *buf
, size_t len
)
458 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
459 struct iio_buffer
*buffer
= indio_dev
->buffer
;
463 ret
= kstrtouint(buf
, 10, &val
);
467 if (val
== buffer
->length
)
470 mutex_lock(&indio_dev
->mlock
);
471 if (iio_buffer_is_active(indio_dev
->buffer
)) {
474 buffer
->access
->set_length(buffer
, val
);
479 if (buffer
->length
&& buffer
->length
< buffer
->watermark
)
480 buffer
->watermark
= buffer
->length
;
482 mutex_unlock(&indio_dev
->mlock
);
484 return ret
? ret
: len
;
487 static ssize_t
iio_buffer_show_enable(struct device
*dev
,
488 struct device_attribute
*attr
,
491 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
492 return sprintf(buf
, "%d\n", iio_buffer_is_active(indio_dev
->buffer
));
495 static int iio_compute_scan_bytes(struct iio_dev
*indio_dev
,
496 const unsigned long *mask
, bool timestamp
)
498 const struct iio_chan_spec
*ch
;
502 /* How much space will the demuxed element take? */
503 for_each_set_bit(i
, mask
,
504 indio_dev
->masklength
) {
505 ch
= iio_find_channel_from_si(indio_dev
, i
);
506 if (ch
->scan_type
.repeat
> 1)
507 length
= ch
->scan_type
.storagebits
/ 8 *
508 ch
->scan_type
.repeat
;
510 length
= ch
->scan_type
.storagebits
/ 8;
511 bytes
= ALIGN(bytes
, length
);
515 ch
= iio_find_channel_from_si(indio_dev
,
516 indio_dev
->scan_index_timestamp
);
517 if (ch
->scan_type
.repeat
> 1)
518 length
= ch
->scan_type
.storagebits
/ 8 *
519 ch
->scan_type
.repeat
;
521 length
= ch
->scan_type
.storagebits
/ 8;
522 bytes
= ALIGN(bytes
, length
);
528 static void iio_buffer_activate(struct iio_dev
*indio_dev
,
529 struct iio_buffer
*buffer
)
531 iio_buffer_get(buffer
);
532 list_add(&buffer
->buffer_list
, &indio_dev
->buffer_list
);
535 static void iio_buffer_deactivate(struct iio_buffer
*buffer
)
537 list_del_init(&buffer
->buffer_list
);
538 wake_up_interruptible(&buffer
->pollq
);
539 iio_buffer_put(buffer
);
542 void iio_disable_all_buffers(struct iio_dev
*indio_dev
)
544 struct iio_buffer
*buffer
, *_buffer
;
546 if (list_empty(&indio_dev
->buffer_list
))
549 if (indio_dev
->setup_ops
->predisable
)
550 indio_dev
->setup_ops
->predisable(indio_dev
);
552 list_for_each_entry_safe(buffer
, _buffer
,
553 &indio_dev
->buffer_list
, buffer_list
)
554 iio_buffer_deactivate(buffer
);
556 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
557 if (indio_dev
->setup_ops
->postdisable
)
558 indio_dev
->setup_ops
->postdisable(indio_dev
);
560 if (indio_dev
->available_scan_masks
== NULL
)
561 kfree(indio_dev
->active_scan_mask
);
564 static void iio_buffer_update_bytes_per_datum(struct iio_dev
*indio_dev
,
565 struct iio_buffer
*buffer
)
569 if (!buffer
->access
->set_bytes_per_datum
)
572 bytes
= iio_compute_scan_bytes(indio_dev
, buffer
->scan_mask
,
573 buffer
->scan_timestamp
);
575 buffer
->access
->set_bytes_per_datum(buffer
, bytes
);
578 static int __iio_update_buffers(struct iio_dev
*indio_dev
,
579 struct iio_buffer
*insert_buffer
,
580 struct iio_buffer
*remove_buffer
)
584 struct iio_buffer
*buffer
;
585 unsigned long *compound_mask
;
586 const unsigned long *old_mask
;
588 /* Wind down existing buffers - iff there are any */
589 if (!list_empty(&indio_dev
->buffer_list
)) {
590 if (indio_dev
->setup_ops
->predisable
) {
591 ret
= indio_dev
->setup_ops
->predisable(indio_dev
);
595 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
596 if (indio_dev
->setup_ops
->postdisable
) {
597 ret
= indio_dev
->setup_ops
->postdisable(indio_dev
);
602 /* Keep a copy of current setup to allow roll back */
603 old_mask
= indio_dev
->active_scan_mask
;
604 if (!indio_dev
->available_scan_masks
)
605 indio_dev
->active_scan_mask
= NULL
;
608 iio_buffer_deactivate(remove_buffer
);
610 iio_buffer_activate(indio_dev
, insert_buffer
);
612 /* If no buffers in list, we are done */
613 if (list_empty(&indio_dev
->buffer_list
)) {
614 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
615 if (indio_dev
->available_scan_masks
== NULL
)
620 /* What scan mask do we actually have? */
621 compound_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
622 sizeof(long), GFP_KERNEL
);
623 if (compound_mask
== NULL
) {
624 if (indio_dev
->available_scan_masks
== NULL
)
628 indio_dev
->scan_timestamp
= 0;
630 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
631 bitmap_or(compound_mask
, compound_mask
, buffer
->scan_mask
,
632 indio_dev
->masklength
);
633 indio_dev
->scan_timestamp
|= buffer
->scan_timestamp
;
635 if (indio_dev
->available_scan_masks
) {
636 indio_dev
->active_scan_mask
=
637 iio_scan_mask_match(indio_dev
->available_scan_masks
,
638 indio_dev
->masklength
,
640 if (indio_dev
->active_scan_mask
== NULL
) {
643 * Note can only occur when adding a buffer.
645 iio_buffer_deactivate(insert_buffer
);
647 indio_dev
->active_scan_mask
= old_mask
;
651 kfree(compound_mask
);
657 indio_dev
->active_scan_mask
= compound_mask
;
660 iio_update_demux(indio_dev
);
663 if (indio_dev
->setup_ops
->preenable
) {
664 ret
= indio_dev
->setup_ops
->preenable(indio_dev
);
667 "Buffer not started: buffer preenable failed (%d)\n", ret
);
668 goto error_remove_inserted
;
671 indio_dev
->scan_bytes
=
672 iio_compute_scan_bytes(indio_dev
,
673 indio_dev
->active_scan_mask
,
674 indio_dev
->scan_timestamp
);
675 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
676 iio_buffer_update_bytes_per_datum(indio_dev
, buffer
);
677 if (buffer
->access
->request_update
) {
678 ret
= buffer
->access
->request_update(buffer
);
681 "Buffer not started: buffer parameter update failed (%d)\n", ret
);
682 goto error_run_postdisable
;
686 if (indio_dev
->info
->update_scan_mode
) {
687 ret
= indio_dev
->info
688 ->update_scan_mode(indio_dev
,
689 indio_dev
->active_scan_mask
);
691 printk(KERN_INFO
"Buffer not started: update scan mode failed (%d)\n", ret
);
692 goto error_run_postdisable
;
695 /* Definitely possible for devices to support both of these. */
696 if ((indio_dev
->modes
& INDIO_BUFFER_TRIGGERED
) && indio_dev
->trig
) {
697 indio_dev
->currentmode
= INDIO_BUFFER_TRIGGERED
;
698 } else if (indio_dev
->modes
& INDIO_BUFFER_HARDWARE
) {
699 indio_dev
->currentmode
= INDIO_BUFFER_HARDWARE
;
700 } else if (indio_dev
->modes
& INDIO_BUFFER_SOFTWARE
) {
701 indio_dev
->currentmode
= INDIO_BUFFER_SOFTWARE
;
702 } else { /* Should never be reached */
703 /* Can only occur on first buffer */
704 if (indio_dev
->modes
& INDIO_BUFFER_TRIGGERED
)
705 pr_info("Buffer not started: no trigger\n");
707 goto error_run_postdisable
;
710 if (indio_dev
->setup_ops
->postenable
) {
711 ret
= indio_dev
->setup_ops
->postenable(indio_dev
);
714 "Buffer not started: postenable failed (%d)\n", ret
);
715 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
716 if (indio_dev
->setup_ops
->postdisable
)
717 indio_dev
->setup_ops
->postdisable(indio_dev
);
718 goto error_disable_all_buffers
;
722 if (indio_dev
->available_scan_masks
)
723 kfree(compound_mask
);
729 error_disable_all_buffers
:
730 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
731 error_run_postdisable
:
732 if (indio_dev
->setup_ops
->postdisable
)
733 indio_dev
->setup_ops
->postdisable(indio_dev
);
734 error_remove_inserted
:
736 iio_buffer_deactivate(insert_buffer
);
737 indio_dev
->active_scan_mask
= old_mask
;
738 kfree(compound_mask
);
742 int iio_update_buffers(struct iio_dev
*indio_dev
,
743 struct iio_buffer
*insert_buffer
,
744 struct iio_buffer
*remove_buffer
)
748 if (insert_buffer
== remove_buffer
)
751 mutex_lock(&indio_dev
->info_exist_lock
);
752 mutex_lock(&indio_dev
->mlock
);
754 if (insert_buffer
&& iio_buffer_is_active(insert_buffer
))
755 insert_buffer
= NULL
;
757 if (remove_buffer
&& !iio_buffer_is_active(remove_buffer
))
758 remove_buffer
= NULL
;
760 if (!insert_buffer
&& !remove_buffer
) {
765 if (indio_dev
->info
== NULL
) {
770 ret
= __iio_update_buffers(indio_dev
, insert_buffer
, remove_buffer
);
773 mutex_unlock(&indio_dev
->mlock
);
774 mutex_unlock(&indio_dev
->info_exist_lock
);
778 EXPORT_SYMBOL_GPL(iio_update_buffers
);
780 static ssize_t
iio_buffer_store_enable(struct device
*dev
,
781 struct device_attribute
*attr
,
786 bool requested_state
;
787 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
790 ret
= strtobool(buf
, &requested_state
);
794 mutex_lock(&indio_dev
->mlock
);
796 /* Find out if it is in the list */
797 inlist
= iio_buffer_is_active(indio_dev
->buffer
);
798 /* Already in desired state */
799 if (inlist
== requested_state
)
803 ret
= __iio_update_buffers(indio_dev
,
804 indio_dev
->buffer
, NULL
);
806 ret
= __iio_update_buffers(indio_dev
,
807 NULL
, indio_dev
->buffer
);
812 mutex_unlock(&indio_dev
->mlock
);
813 return (ret
< 0) ? ret
: len
;
816 static const char * const iio_scan_elements_group_name
= "scan_elements";
818 static ssize_t
iio_buffer_show_watermark(struct device
*dev
,
819 struct device_attribute
*attr
,
822 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
823 struct iio_buffer
*buffer
= indio_dev
->buffer
;
825 return sprintf(buf
, "%u\n", buffer
->watermark
);
828 static ssize_t
iio_buffer_store_watermark(struct device
*dev
,
829 struct device_attribute
*attr
,
833 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
834 struct iio_buffer
*buffer
= indio_dev
->buffer
;
838 ret
= kstrtouint(buf
, 10, &val
);
844 mutex_lock(&indio_dev
->mlock
);
846 if (val
> buffer
->length
) {
851 if (iio_buffer_is_active(indio_dev
->buffer
)) {
856 buffer
->watermark
= val
;
858 if (indio_dev
->info
->hwfifo_set_watermark
)
859 indio_dev
->info
->hwfifo_set_watermark(indio_dev
, val
);
861 mutex_unlock(&indio_dev
->mlock
);
863 return ret
? ret
: len
;
866 static DEVICE_ATTR(length
, S_IRUGO
| S_IWUSR
, iio_buffer_read_length
,
867 iio_buffer_write_length
);
868 static struct device_attribute dev_attr_length_ro
= __ATTR(length
,
869 S_IRUGO
, iio_buffer_read_length
, NULL
);
870 static DEVICE_ATTR(enable
, S_IRUGO
| S_IWUSR
,
871 iio_buffer_show_enable
, iio_buffer_store_enable
);
872 static DEVICE_ATTR(watermark
, S_IRUGO
| S_IWUSR
,
873 iio_buffer_show_watermark
, iio_buffer_store_watermark
);
875 static struct attribute
*iio_buffer_attrs
[] = {
876 &dev_attr_length
.attr
,
877 &dev_attr_enable
.attr
,
878 &dev_attr_watermark
.attr
,
881 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev
*indio_dev
)
883 struct iio_dev_attr
*p
;
884 struct attribute
**attr
;
885 struct iio_buffer
*buffer
= indio_dev
->buffer
;
886 int ret
, i
, attrn
, attrcount
, attrcount_orig
= 0;
887 const struct iio_chan_spec
*channels
;
894 while (buffer
->attrs
[attrcount
] != NULL
)
898 attr
= kcalloc(attrcount
+ ARRAY_SIZE(iio_buffer_attrs
) + 1,
899 sizeof(struct attribute
*), GFP_KERNEL
);
903 memcpy(attr
, iio_buffer_attrs
, sizeof(iio_buffer_attrs
));
904 if (!buffer
->access
->set_length
)
905 attr
[0] = &dev_attr_length_ro
.attr
;
908 memcpy(&attr
[ARRAY_SIZE(iio_buffer_attrs
)], buffer
->attrs
,
909 sizeof(struct attribute
*) * attrcount
);
911 attr
[attrcount
+ ARRAY_SIZE(iio_buffer_attrs
)] = NULL
;
913 buffer
->buffer_group
.name
= "buffer";
914 buffer
->buffer_group
.attrs
= attr
;
916 indio_dev
->groups
[indio_dev
->groupcounter
++] = &buffer
->buffer_group
;
918 if (buffer
->scan_el_attrs
!= NULL
) {
919 attr
= buffer
->scan_el_attrs
->attrs
;
920 while (*attr
++ != NULL
)
923 attrcount
= attrcount_orig
;
924 INIT_LIST_HEAD(&buffer
->scan_el_dev_attr_list
);
925 channels
= indio_dev
->channels
;
928 for (i
= 0; i
< indio_dev
->num_channels
; i
++) {
929 if (channels
[i
].scan_index
< 0)
932 /* Establish necessary mask length */
933 if (channels
[i
].scan_index
>
934 (int)indio_dev
->masklength
- 1)
935 indio_dev
->masklength
936 = channels
[i
].scan_index
+ 1;
938 ret
= iio_buffer_add_channel_sysfs(indio_dev
,
941 goto error_cleanup_dynamic
;
943 if (channels
[i
].type
== IIO_TIMESTAMP
)
944 indio_dev
->scan_index_timestamp
=
945 channels
[i
].scan_index
;
947 if (indio_dev
->masklength
&& buffer
->scan_mask
== NULL
) {
948 buffer
->scan_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
949 sizeof(*buffer
->scan_mask
),
951 if (buffer
->scan_mask
== NULL
) {
953 goto error_cleanup_dynamic
;
958 buffer
->scan_el_group
.name
= iio_scan_elements_group_name
;
960 buffer
->scan_el_group
.attrs
= kcalloc(attrcount
+ 1,
961 sizeof(buffer
->scan_el_group
.attrs
[0]),
963 if (buffer
->scan_el_group
.attrs
== NULL
) {
965 goto error_free_scan_mask
;
967 if (buffer
->scan_el_attrs
)
968 memcpy(buffer
->scan_el_group
.attrs
, buffer
->scan_el_attrs
,
969 sizeof(buffer
->scan_el_group
.attrs
[0])*attrcount_orig
);
970 attrn
= attrcount_orig
;
972 list_for_each_entry(p
, &buffer
->scan_el_dev_attr_list
, l
)
973 buffer
->scan_el_group
.attrs
[attrn
++] = &p
->dev_attr
.attr
;
974 indio_dev
->groups
[indio_dev
->groupcounter
++] = &buffer
->scan_el_group
;
978 error_free_scan_mask
:
979 kfree(buffer
->scan_mask
);
980 error_cleanup_dynamic
:
981 iio_free_chan_devattr_list(&buffer
->scan_el_dev_attr_list
);
982 kfree(indio_dev
->buffer
->buffer_group
.attrs
);
987 void iio_buffer_free_sysfs_and_mask(struct iio_dev
*indio_dev
)
989 if (!indio_dev
->buffer
)
992 kfree(indio_dev
->buffer
->scan_mask
);
993 kfree(indio_dev
->buffer
->buffer_group
.attrs
);
994 kfree(indio_dev
->buffer
->scan_el_group
.attrs
);
995 iio_free_chan_devattr_list(&indio_dev
->buffer
->scan_el_dev_attr_list
);
999 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1000 * @indio_dev: the iio device
1001 * @mask: scan mask to be checked
1003 * Return true if exactly one bit is set in the scan mask, false otherwise. It
1004 * can be used for devices where only one channel can be active for sampling at
1007 bool iio_validate_scan_mask_onehot(struct iio_dev
*indio_dev
,
1008 const unsigned long *mask
)
1010 return bitmap_weight(mask
, indio_dev
->masklength
) == 1;
1012 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot
);
1014 int iio_scan_mask_query(struct iio_dev
*indio_dev
,
1015 struct iio_buffer
*buffer
, int bit
)
1017 if (bit
> indio_dev
->masklength
)
1020 if (!buffer
->scan_mask
)
1023 /* Ensure return value is 0 or 1. */
1024 return !!test_bit(bit
, buffer
->scan_mask
);
1026 EXPORT_SYMBOL_GPL(iio_scan_mask_query
);
1029 * struct iio_demux_table() - table describing demux memcpy ops
1030 * @from: index to copy from
1031 * @to: index to copy to
1032 * @length: how many bytes to copy
1033 * @l: list head used for management
1035 struct iio_demux_table
{
1042 static const void *iio_demux(struct iio_buffer
*buffer
,
1045 struct iio_demux_table
*t
;
1047 if (list_empty(&buffer
->demux_list
))
1049 list_for_each_entry(t
, &buffer
->demux_list
, l
)
1050 memcpy(buffer
->demux_bounce
+ t
->to
,
1051 datain
+ t
->from
, t
->length
);
1053 return buffer
->demux_bounce
;
1056 static int iio_push_to_buffer(struct iio_buffer
*buffer
, const void *data
)
1058 const void *dataout
= iio_demux(buffer
, data
);
1061 ret
= buffer
->access
->store_to(buffer
, dataout
);
1066 * We can't just test for watermark to decide if we wake the poll queue
1067 * because read may request less samples than the watermark.
1069 wake_up_interruptible_poll(&buffer
->pollq
, POLLIN
| POLLRDNORM
);
1073 static void iio_buffer_demux_free(struct iio_buffer
*buffer
)
1075 struct iio_demux_table
*p
, *q
;
1076 list_for_each_entry_safe(p
, q
, &buffer
->demux_list
, l
) {
1083 int iio_push_to_buffers(struct iio_dev
*indio_dev
, const void *data
)
1086 struct iio_buffer
*buf
;
1088 list_for_each_entry(buf
, &indio_dev
->buffer_list
, buffer_list
) {
1089 ret
= iio_push_to_buffer(buf
, data
);
1096 EXPORT_SYMBOL_GPL(iio_push_to_buffers
);
1098 static int iio_buffer_add_demux(struct iio_buffer
*buffer
,
1099 struct iio_demux_table
**p
, unsigned int in_loc
, unsigned int out_loc
,
1100 unsigned int length
)
1103 if (*p
&& (*p
)->from
+ (*p
)->length
== in_loc
&&
1104 (*p
)->to
+ (*p
)->length
== out_loc
) {
1105 (*p
)->length
+= length
;
1107 *p
= kmalloc(sizeof(**p
), GFP_KERNEL
);
1110 (*p
)->from
= in_loc
;
1112 (*p
)->length
= length
;
1113 list_add_tail(&(*p
)->l
, &buffer
->demux_list
);
1119 static int iio_buffer_update_demux(struct iio_dev
*indio_dev
,
1120 struct iio_buffer
*buffer
)
1122 const struct iio_chan_spec
*ch
;
1123 int ret
, in_ind
= -1, out_ind
, length
;
1124 unsigned in_loc
= 0, out_loc
= 0;
1125 struct iio_demux_table
*p
= NULL
;
1127 /* Clear out any old demux */
1128 iio_buffer_demux_free(buffer
);
1129 kfree(buffer
->demux_bounce
);
1130 buffer
->demux_bounce
= NULL
;
1132 /* First work out which scan mode we will actually have */
1133 if (bitmap_equal(indio_dev
->active_scan_mask
,
1135 indio_dev
->masklength
))
1138 /* Now we have the two masks, work from least sig and build up sizes */
1139 for_each_set_bit(out_ind
,
1141 indio_dev
->masklength
) {
1142 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
1143 indio_dev
->masklength
,
1145 while (in_ind
!= out_ind
) {
1146 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
1147 indio_dev
->masklength
,
1149 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
1150 if (ch
->scan_type
.repeat
> 1)
1151 length
= ch
->scan_type
.storagebits
/ 8 *
1152 ch
->scan_type
.repeat
;
1154 length
= ch
->scan_type
.storagebits
/ 8;
1155 /* Make sure we are aligned */
1156 in_loc
= roundup(in_loc
, length
) + length
;
1158 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
1159 if (ch
->scan_type
.repeat
> 1)
1160 length
= ch
->scan_type
.storagebits
/ 8 *
1161 ch
->scan_type
.repeat
;
1163 length
= ch
->scan_type
.storagebits
/ 8;
1164 out_loc
= roundup(out_loc
, length
);
1165 in_loc
= roundup(in_loc
, length
);
1166 ret
= iio_buffer_add_demux(buffer
, &p
, in_loc
, out_loc
, length
);
1168 goto error_clear_mux_table
;
1172 /* Relies on scan_timestamp being last */
1173 if (buffer
->scan_timestamp
) {
1174 ch
= iio_find_channel_from_si(indio_dev
,
1175 indio_dev
->scan_index_timestamp
);
1176 if (ch
->scan_type
.repeat
> 1)
1177 length
= ch
->scan_type
.storagebits
/ 8 *
1178 ch
->scan_type
.repeat
;
1180 length
= ch
->scan_type
.storagebits
/ 8;
1181 out_loc
= roundup(out_loc
, length
);
1182 in_loc
= roundup(in_loc
, length
);
1183 ret
= iio_buffer_add_demux(buffer
, &p
, in_loc
, out_loc
, length
);
1185 goto error_clear_mux_table
;
1189 buffer
->demux_bounce
= kzalloc(out_loc
, GFP_KERNEL
);
1190 if (buffer
->demux_bounce
== NULL
) {
1192 goto error_clear_mux_table
;
1196 error_clear_mux_table
:
1197 iio_buffer_demux_free(buffer
);
1202 int iio_update_demux(struct iio_dev
*indio_dev
)
1204 struct iio_buffer
*buffer
;
1207 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
1208 ret
= iio_buffer_update_demux(indio_dev
, buffer
);
1210 goto error_clear_mux_table
;
1214 error_clear_mux_table
:
1215 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
)
1216 iio_buffer_demux_free(buffer
);
1220 EXPORT_SYMBOL_GPL(iio_update_demux
);
1223 * iio_buffer_release() - Free a buffer's resources
1224 * @ref: Pointer to the kref embedded in the iio_buffer struct
1226 * This function is called when the last reference to the buffer has been
1227 * dropped. It will typically free all resources allocated by the buffer. Do not
1228 * call this function manually, always use iio_buffer_put() when done using a
1231 static void iio_buffer_release(struct kref
*ref
)
1233 struct iio_buffer
*buffer
= container_of(ref
, struct iio_buffer
, ref
);
1235 buffer
->access
->release(buffer
);
1239 * iio_buffer_get() - Grab a reference to the buffer
1240 * @buffer: The buffer to grab a reference for, may be NULL
1242 * Returns the pointer to the buffer that was passed into the function.
1244 struct iio_buffer
*iio_buffer_get(struct iio_buffer
*buffer
)
1247 kref_get(&buffer
->ref
);
1251 EXPORT_SYMBOL_GPL(iio_buffer_get
);
1254 * iio_buffer_put() - Release the reference to the buffer
1255 * @buffer: The buffer to release the reference for, may be NULL
1257 void iio_buffer_put(struct iio_buffer
*buffer
)
1260 kref_put(&buffer
->ref
, iio_buffer_release
);
1262 EXPORT_SYMBOL_GPL(iio_buffer_put
);