1 /* The industrial I/O core
3 * Copyright (c) 2008 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * Handling of buffer allocation / resizing.
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/device.h>
20 #include <linux/cdev.h>
21 #include <linux/slab.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
25 #include <linux/iio/iio.h>
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
30 static const char * const iio_endian_prefix
[] = {
35 static bool iio_buffer_is_active(struct iio_buffer
*buf
)
37 return !list_empty(&buf
->buffer_list
);
40 static size_t iio_buffer_data_available(struct iio_buffer
*buf
)
42 return buf
->access
->data_available(buf
);
45 static int iio_buffer_flush_hwfifo(struct iio_dev
*indio_dev
,
46 struct iio_buffer
*buf
, size_t required
)
48 if (!indio_dev
->info
->hwfifo_flush_to_buffer
)
51 return indio_dev
->info
->hwfifo_flush_to_buffer(indio_dev
, required
);
54 static bool iio_buffer_ready(struct iio_dev
*indio_dev
, struct iio_buffer
*buf
,
55 size_t to_wait
, int to_flush
)
60 /* wakeup if the device was unregistered */
64 /* drain the buffer if it was disabled */
65 if (!iio_buffer_is_active(buf
)) {
66 to_wait
= min_t(size_t, to_wait
, 1);
70 avail
= iio_buffer_data_available(buf
);
72 if (avail
>= to_wait
) {
73 /* force a flush for non-blocking reads */
74 if (!to_wait
&& !avail
&& to_flush
)
75 iio_buffer_flush_hwfifo(indio_dev
, buf
, to_flush
);
80 flushed
= iio_buffer_flush_hwfifo(indio_dev
, buf
,
85 if (avail
+ flushed
>= to_wait
)
92 * iio_buffer_read_first_n_outer() - chrdev read for buffer access
94 * This function relies on all buffer implementations having an
95 * iio_buffer as their first element.
97 ssize_t
iio_buffer_read_first_n_outer(struct file
*filp
, char __user
*buf
,
98 size_t n
, loff_t
*f_ps
)
100 struct iio_dev
*indio_dev
= filp
->private_data
;
101 struct iio_buffer
*rb
= indio_dev
->buffer
;
107 if (!indio_dev
->info
)
110 if (!rb
|| !rb
->access
->read_first_n
)
113 datum_size
= rb
->bytes_per_datum
;
116 * If datum_size is 0 there will never be anything to read from the
117 * buffer, so signal end of file now.
122 to_read
= min_t(size_t, n
/ datum_size
, rb
->watermark
);
124 if (!(filp
->f_flags
& O_NONBLOCK
))
128 ret
= wait_event_interruptible(rb
->pollq
,
129 iio_buffer_ready(indio_dev
, rb
, to_wait
, to_read
));
133 if (!indio_dev
->info
)
136 ret
= rb
->access
->read_first_n(rb
, n
, buf
);
137 if (ret
== 0 && (filp
->f_flags
& O_NONBLOCK
))
145 * iio_buffer_poll() - poll the buffer to find out if it has data
147 unsigned int iio_buffer_poll(struct file
*filp
,
148 struct poll_table_struct
*wait
)
150 struct iio_dev
*indio_dev
= filp
->private_data
;
151 struct iio_buffer
*rb
= indio_dev
->buffer
;
153 if (!indio_dev
->info
)
156 poll_wait(filp
, &rb
->pollq
, wait
);
157 if (iio_buffer_ready(indio_dev
, rb
, rb
->watermark
, 0))
158 return POLLIN
| POLLRDNORM
;
163 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
164 * @indio_dev: The IIO device
166 * Wakes up the event waitqueue used for poll(). Should usually
167 * be called when the device is unregistered.
169 void iio_buffer_wakeup_poll(struct iio_dev
*indio_dev
)
171 if (!indio_dev
->buffer
)
174 wake_up(&indio_dev
->buffer
->pollq
);
177 void iio_buffer_init(struct iio_buffer
*buffer
)
179 INIT_LIST_HEAD(&buffer
->demux_list
);
180 INIT_LIST_HEAD(&buffer
->buffer_list
);
181 init_waitqueue_head(&buffer
->pollq
);
182 kref_init(&buffer
->ref
);
183 buffer
->watermark
= 1;
185 EXPORT_SYMBOL(iio_buffer_init
);
187 static ssize_t
iio_show_scan_index(struct device
*dev
,
188 struct device_attribute
*attr
,
191 return sprintf(buf
, "%u\n", to_iio_dev_attr(attr
)->c
->scan_index
);
194 static ssize_t
iio_show_fixed_type(struct device
*dev
,
195 struct device_attribute
*attr
,
198 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
199 u8 type
= this_attr
->c
->scan_type
.endianness
;
201 if (type
== IIO_CPU
) {
202 #ifdef __LITTLE_ENDIAN
208 if (this_attr
->c
->scan_type
.repeat
> 1)
209 return sprintf(buf
, "%s:%c%d/%dX%d>>%u\n",
210 iio_endian_prefix
[type
],
211 this_attr
->c
->scan_type
.sign
,
212 this_attr
->c
->scan_type
.realbits
,
213 this_attr
->c
->scan_type
.storagebits
,
214 this_attr
->c
->scan_type
.repeat
,
215 this_attr
->c
->scan_type
.shift
);
217 return sprintf(buf
, "%s:%c%d/%d>>%u\n",
218 iio_endian_prefix
[type
],
219 this_attr
->c
->scan_type
.sign
,
220 this_attr
->c
->scan_type
.realbits
,
221 this_attr
->c
->scan_type
.storagebits
,
222 this_attr
->c
->scan_type
.shift
);
225 static ssize_t
iio_scan_el_show(struct device
*dev
,
226 struct device_attribute
*attr
,
230 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
232 /* Ensure ret is 0 or 1. */
233 ret
= !!test_bit(to_iio_dev_attr(attr
)->address
,
234 indio_dev
->buffer
->scan_mask
);
236 return sprintf(buf
, "%d\n", ret
);
239 /* Note NULL used as error indicator as it doesn't make sense. */
240 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks
,
241 unsigned int masklength
,
242 const unsigned long *mask
,
245 if (bitmap_empty(mask
, masklength
))
249 if (bitmap_equal(mask
, av_masks
, masklength
))
252 if (bitmap_subset(mask
, av_masks
, masklength
))
255 av_masks
+= BITS_TO_LONGS(masklength
);
260 static bool iio_validate_scan_mask(struct iio_dev
*indio_dev
,
261 const unsigned long *mask
)
263 if (!indio_dev
->setup_ops
->validate_scan_mask
)
266 return indio_dev
->setup_ops
->validate_scan_mask(indio_dev
, mask
);
270 * iio_scan_mask_set() - set particular bit in the scan mask
271 * @indio_dev: the iio device
272 * @buffer: the buffer whose scan mask we are interested in
273 * @bit: the bit to be set.
275 * Note that at this point we have no way of knowing what other
276 * buffers might request, hence this code only verifies that the
277 * individual buffers request is plausible.
279 static int iio_scan_mask_set(struct iio_dev
*indio_dev
,
280 struct iio_buffer
*buffer
, int bit
)
282 const unsigned long *mask
;
283 unsigned long *trialmask
;
285 trialmask
= kmalloc(sizeof(*trialmask
)*
286 BITS_TO_LONGS(indio_dev
->masklength
),
289 if (trialmask
== NULL
)
291 if (!indio_dev
->masklength
) {
292 WARN_ON("Trying to set scanmask prior to registering buffer\n");
293 goto err_invalid_mask
;
295 bitmap_copy(trialmask
, buffer
->scan_mask
, indio_dev
->masklength
);
296 set_bit(bit
, trialmask
);
298 if (!iio_validate_scan_mask(indio_dev
, trialmask
))
299 goto err_invalid_mask
;
301 if (indio_dev
->available_scan_masks
) {
302 mask
= iio_scan_mask_match(indio_dev
->available_scan_masks
,
303 indio_dev
->masklength
,
306 goto err_invalid_mask
;
308 bitmap_copy(buffer
->scan_mask
, trialmask
, indio_dev
->masklength
);
319 static int iio_scan_mask_clear(struct iio_buffer
*buffer
, int bit
)
321 clear_bit(bit
, buffer
->scan_mask
);
325 static ssize_t
iio_scan_el_store(struct device
*dev
,
326 struct device_attribute
*attr
,
332 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
333 struct iio_buffer
*buffer
= indio_dev
->buffer
;
334 struct iio_dev_attr
*this_attr
= to_iio_dev_attr(attr
);
336 ret
= strtobool(buf
, &state
);
339 mutex_lock(&indio_dev
->mlock
);
340 if (iio_buffer_is_active(indio_dev
->buffer
)) {
344 ret
= iio_scan_mask_query(indio_dev
, buffer
, this_attr
->address
);
348 ret
= iio_scan_mask_clear(buffer
, this_attr
->address
);
351 } else if (state
&& !ret
) {
352 ret
= iio_scan_mask_set(indio_dev
, buffer
, this_attr
->address
);
358 mutex_unlock(&indio_dev
->mlock
);
360 return ret
< 0 ? ret
: len
;
364 static ssize_t
iio_scan_el_ts_show(struct device
*dev
,
365 struct device_attribute
*attr
,
368 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
369 return sprintf(buf
, "%d\n", indio_dev
->buffer
->scan_timestamp
);
372 static ssize_t
iio_scan_el_ts_store(struct device
*dev
,
373 struct device_attribute
*attr
,
378 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
381 ret
= strtobool(buf
, &state
);
385 mutex_lock(&indio_dev
->mlock
);
386 if (iio_buffer_is_active(indio_dev
->buffer
)) {
390 indio_dev
->buffer
->scan_timestamp
= state
;
392 mutex_unlock(&indio_dev
->mlock
);
394 return ret
? ret
: len
;
397 static int iio_buffer_add_channel_sysfs(struct iio_dev
*indio_dev
,
398 const struct iio_chan_spec
*chan
)
400 int ret
, attrcount
= 0;
401 struct iio_buffer
*buffer
= indio_dev
->buffer
;
403 ret
= __iio_add_chan_devattr("index",
405 &iio_show_scan_index
,
410 &buffer
->scan_el_dev_attr_list
);
414 ret
= __iio_add_chan_devattr("type",
416 &iio_show_fixed_type
,
421 &buffer
->scan_el_dev_attr_list
);
425 if (chan
->type
!= IIO_TIMESTAMP
)
426 ret
= __iio_add_chan_devattr("en",
433 &buffer
->scan_el_dev_attr_list
);
435 ret
= __iio_add_chan_devattr("en",
437 &iio_scan_el_ts_show
,
438 &iio_scan_el_ts_store
,
442 &buffer
->scan_el_dev_attr_list
);
450 static ssize_t
iio_buffer_read_length(struct device
*dev
,
451 struct device_attribute
*attr
,
454 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
455 struct iio_buffer
*buffer
= indio_dev
->buffer
;
457 return sprintf(buf
, "%d\n", buffer
->length
);
460 static ssize_t
iio_buffer_write_length(struct device
*dev
,
461 struct device_attribute
*attr
,
462 const char *buf
, size_t len
)
464 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
465 struct iio_buffer
*buffer
= indio_dev
->buffer
;
469 ret
= kstrtouint(buf
, 10, &val
);
473 if (val
== buffer
->length
)
476 mutex_lock(&indio_dev
->mlock
);
477 if (iio_buffer_is_active(indio_dev
->buffer
)) {
480 buffer
->access
->set_length(buffer
, val
);
485 if (buffer
->length
&& buffer
->length
< buffer
->watermark
)
486 buffer
->watermark
= buffer
->length
;
488 mutex_unlock(&indio_dev
->mlock
);
490 return ret
? ret
: len
;
493 static ssize_t
iio_buffer_show_enable(struct device
*dev
,
494 struct device_attribute
*attr
,
497 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
498 return sprintf(buf
, "%d\n", iio_buffer_is_active(indio_dev
->buffer
));
501 static int iio_compute_scan_bytes(struct iio_dev
*indio_dev
,
502 const unsigned long *mask
, bool timestamp
)
504 const struct iio_chan_spec
*ch
;
508 /* How much space will the demuxed element take? */
509 for_each_set_bit(i
, mask
,
510 indio_dev
->masklength
) {
511 ch
= iio_find_channel_from_si(indio_dev
, i
);
512 if (ch
->scan_type
.repeat
> 1)
513 length
= ch
->scan_type
.storagebits
/ 8 *
514 ch
->scan_type
.repeat
;
516 length
= ch
->scan_type
.storagebits
/ 8;
517 bytes
= ALIGN(bytes
, length
);
521 ch
= iio_find_channel_from_si(indio_dev
,
522 indio_dev
->scan_index_timestamp
);
523 if (ch
->scan_type
.repeat
> 1)
524 length
= ch
->scan_type
.storagebits
/ 8 *
525 ch
->scan_type
.repeat
;
527 length
= ch
->scan_type
.storagebits
/ 8;
528 bytes
= ALIGN(bytes
, length
);
534 static void iio_buffer_activate(struct iio_dev
*indio_dev
,
535 struct iio_buffer
*buffer
)
537 iio_buffer_get(buffer
);
538 list_add(&buffer
->buffer_list
, &indio_dev
->buffer_list
);
541 static void iio_buffer_deactivate(struct iio_buffer
*buffer
)
543 list_del_init(&buffer
->buffer_list
);
544 wake_up_interruptible(&buffer
->pollq
);
545 iio_buffer_put(buffer
);
548 static void iio_buffer_deactivate_all(struct iio_dev
*indio_dev
)
550 struct iio_buffer
*buffer
, *_buffer
;
552 list_for_each_entry_safe(buffer
, _buffer
,
553 &indio_dev
->buffer_list
, buffer_list
)
554 iio_buffer_deactivate(buffer
);
557 static void iio_buffer_update_bytes_per_datum(struct iio_dev
*indio_dev
,
558 struct iio_buffer
*buffer
)
562 if (!buffer
->access
->set_bytes_per_datum
)
565 bytes
= iio_compute_scan_bytes(indio_dev
, buffer
->scan_mask
,
566 buffer
->scan_timestamp
);
568 buffer
->access
->set_bytes_per_datum(buffer
, bytes
);
571 static int iio_buffer_request_update(struct iio_dev
*indio_dev
,
572 struct iio_buffer
*buffer
)
576 iio_buffer_update_bytes_per_datum(indio_dev
, buffer
);
577 if (buffer
->access
->request_update
) {
578 ret
= buffer
->access
->request_update(buffer
);
580 dev_dbg(&indio_dev
->dev
,
581 "Buffer not started: buffer parameter update failed (%d)\n",
590 static void iio_free_scan_mask(struct iio_dev
*indio_dev
,
591 const unsigned long *mask
)
593 /* If the mask is dynamically allocated free it, otherwise do nothing */
594 if (!indio_dev
->available_scan_masks
)
598 struct iio_device_config
{
600 const unsigned long *scan_mask
;
601 unsigned int scan_bytes
;
605 static int iio_verify_update(struct iio_dev
*indio_dev
,
606 struct iio_buffer
*insert_buffer
, struct iio_buffer
*remove_buffer
,
607 struct iio_device_config
*config
)
609 unsigned long *compound_mask
;
610 const unsigned long *scan_mask
;
611 bool strict_scanmask
= false;
612 struct iio_buffer
*buffer
;
616 memset(config
, 0, sizeof(*config
));
619 * If there is just one buffer and we are removing it there is nothing
622 if (remove_buffer
&& !insert_buffer
&&
623 list_is_singular(&indio_dev
->buffer_list
))
626 modes
= indio_dev
->modes
;
628 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
629 if (buffer
== remove_buffer
)
631 modes
&= buffer
->access
->modes
;
635 modes
&= insert_buffer
->access
->modes
;
637 /* Definitely possible for devices to support both of these. */
638 if ((modes
& INDIO_BUFFER_TRIGGERED
) && indio_dev
->trig
) {
639 config
->mode
= INDIO_BUFFER_TRIGGERED
;
640 } else if (modes
& INDIO_BUFFER_HARDWARE
) {
642 * Keep things simple for now and only allow a single buffer to
643 * be connected in hardware mode.
645 if (insert_buffer
&& !list_empty(&indio_dev
->buffer_list
))
647 config
->mode
= INDIO_BUFFER_HARDWARE
;
648 strict_scanmask
= true;
649 } else if (modes
& INDIO_BUFFER_SOFTWARE
) {
650 config
->mode
= INDIO_BUFFER_SOFTWARE
;
652 /* Can only occur on first buffer */
653 if (indio_dev
->modes
& INDIO_BUFFER_TRIGGERED
)
654 dev_dbg(&indio_dev
->dev
, "Buffer not started: no trigger\n");
658 /* What scan mask do we actually have? */
659 compound_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
660 sizeof(long), GFP_KERNEL
);
661 if (compound_mask
== NULL
)
664 scan_timestamp
= false;
666 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
667 if (buffer
== remove_buffer
)
669 bitmap_or(compound_mask
, compound_mask
, buffer
->scan_mask
,
670 indio_dev
->masklength
);
671 scan_timestamp
|= buffer
->scan_timestamp
;
675 bitmap_or(compound_mask
, compound_mask
,
676 insert_buffer
->scan_mask
, indio_dev
->masklength
);
677 scan_timestamp
|= insert_buffer
->scan_timestamp
;
680 if (indio_dev
->available_scan_masks
) {
681 scan_mask
= iio_scan_mask_match(indio_dev
->available_scan_masks
,
682 indio_dev
->masklength
,
685 kfree(compound_mask
);
686 if (scan_mask
== NULL
)
689 scan_mask
= compound_mask
;
692 config
->scan_bytes
= iio_compute_scan_bytes(indio_dev
,
693 scan_mask
, scan_timestamp
);
694 config
->scan_mask
= scan_mask
;
695 config
->scan_timestamp
= scan_timestamp
;
700 static int iio_enable_buffers(struct iio_dev
*indio_dev
,
701 struct iio_device_config
*config
)
705 indio_dev
->active_scan_mask
= config
->scan_mask
;
706 indio_dev
->scan_timestamp
= config
->scan_timestamp
;
707 indio_dev
->scan_bytes
= config
->scan_bytes
;
709 iio_update_demux(indio_dev
);
712 if (indio_dev
->setup_ops
->preenable
) {
713 ret
= indio_dev
->setup_ops
->preenable(indio_dev
);
715 dev_dbg(&indio_dev
->dev
,
716 "Buffer not started: buffer preenable failed (%d)\n", ret
);
717 goto err_undo_config
;
721 if (indio_dev
->info
->update_scan_mode
) {
722 ret
= indio_dev
->info
723 ->update_scan_mode(indio_dev
,
724 indio_dev
->active_scan_mask
);
726 dev_dbg(&indio_dev
->dev
,
727 "Buffer not started: update scan mode failed (%d)\n",
729 goto err_run_postdisable
;
733 indio_dev
->currentmode
= config
->mode
;
735 if (indio_dev
->setup_ops
->postenable
) {
736 ret
= indio_dev
->setup_ops
->postenable(indio_dev
);
738 dev_dbg(&indio_dev
->dev
,
739 "Buffer not started: postenable failed (%d)\n", ret
);
740 goto err_run_postdisable
;
747 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
748 if (indio_dev
->setup_ops
->postdisable
)
749 indio_dev
->setup_ops
->postdisable(indio_dev
);
751 indio_dev
->active_scan_mask
= NULL
;
756 static int iio_disable_buffers(struct iio_dev
*indio_dev
)
761 /* Wind down existing buffers - iff there are any */
762 if (list_empty(&indio_dev
->buffer_list
))
766 * If things go wrong at some step in disable we still need to continue
767 * to perform the other steps, otherwise we leave the device in a
768 * inconsistent state. We return the error code for the first error we
772 if (indio_dev
->setup_ops
->predisable
) {
773 ret2
= indio_dev
->setup_ops
->predisable(indio_dev
);
778 indio_dev
->currentmode
= INDIO_DIRECT_MODE
;
780 if (indio_dev
->setup_ops
->postdisable
) {
781 ret2
= indio_dev
->setup_ops
->postdisable(indio_dev
);
786 iio_free_scan_mask(indio_dev
, indio_dev
->active_scan_mask
);
787 indio_dev
->active_scan_mask
= NULL
;
792 static int __iio_update_buffers(struct iio_dev
*indio_dev
,
793 struct iio_buffer
*insert_buffer
,
794 struct iio_buffer
*remove_buffer
)
796 struct iio_device_config new_config
;
799 ret
= iio_verify_update(indio_dev
, insert_buffer
, remove_buffer
,
805 ret
= iio_buffer_request_update(indio_dev
, insert_buffer
);
807 goto err_free_config
;
810 ret
= iio_disable_buffers(indio_dev
);
812 goto err_deactivate_all
;
815 iio_buffer_deactivate(remove_buffer
);
817 iio_buffer_activate(indio_dev
, insert_buffer
);
819 /* If no buffers in list, we are done */
820 if (list_empty(&indio_dev
->buffer_list
))
823 ret
= iio_enable_buffers(indio_dev
, &new_config
);
825 goto err_deactivate_all
;
831 * We've already verified that the config is valid earlier. If things go
832 * wrong in either enable or disable the most likely reason is an IO
833 * error from the device. In this case there is no good recovery
834 * strategy. Just make sure to disable everything and leave the device
835 * in a sane state. With a bit of luck the device might come back to
836 * life again later and userspace can try again.
838 iio_buffer_deactivate_all(indio_dev
);
841 iio_free_scan_mask(indio_dev
, new_config
.scan_mask
);
845 int iio_update_buffers(struct iio_dev
*indio_dev
,
846 struct iio_buffer
*insert_buffer
,
847 struct iio_buffer
*remove_buffer
)
851 if (insert_buffer
== remove_buffer
)
854 mutex_lock(&indio_dev
->info_exist_lock
);
855 mutex_lock(&indio_dev
->mlock
);
857 if (insert_buffer
&& iio_buffer_is_active(insert_buffer
))
858 insert_buffer
= NULL
;
860 if (remove_buffer
&& !iio_buffer_is_active(remove_buffer
))
861 remove_buffer
= NULL
;
863 if (!insert_buffer
&& !remove_buffer
) {
868 if (indio_dev
->info
== NULL
) {
873 ret
= __iio_update_buffers(indio_dev
, insert_buffer
, remove_buffer
);
876 mutex_unlock(&indio_dev
->mlock
);
877 mutex_unlock(&indio_dev
->info_exist_lock
);
881 EXPORT_SYMBOL_GPL(iio_update_buffers
);
883 void iio_disable_all_buffers(struct iio_dev
*indio_dev
)
885 iio_disable_buffers(indio_dev
);
886 iio_buffer_deactivate_all(indio_dev
);
889 static ssize_t
iio_buffer_store_enable(struct device
*dev
,
890 struct device_attribute
*attr
,
895 bool requested_state
;
896 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
899 ret
= strtobool(buf
, &requested_state
);
903 mutex_lock(&indio_dev
->mlock
);
905 /* Find out if it is in the list */
906 inlist
= iio_buffer_is_active(indio_dev
->buffer
);
907 /* Already in desired state */
908 if (inlist
== requested_state
)
912 ret
= __iio_update_buffers(indio_dev
,
913 indio_dev
->buffer
, NULL
);
915 ret
= __iio_update_buffers(indio_dev
,
916 NULL
, indio_dev
->buffer
);
919 mutex_unlock(&indio_dev
->mlock
);
920 return (ret
< 0) ? ret
: len
;
923 static const char * const iio_scan_elements_group_name
= "scan_elements";
925 static ssize_t
iio_buffer_show_watermark(struct device
*dev
,
926 struct device_attribute
*attr
,
929 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
930 struct iio_buffer
*buffer
= indio_dev
->buffer
;
932 return sprintf(buf
, "%u\n", buffer
->watermark
);
935 static ssize_t
iio_buffer_store_watermark(struct device
*dev
,
936 struct device_attribute
*attr
,
940 struct iio_dev
*indio_dev
= dev_to_iio_dev(dev
);
941 struct iio_buffer
*buffer
= indio_dev
->buffer
;
945 ret
= kstrtouint(buf
, 10, &val
);
951 mutex_lock(&indio_dev
->mlock
);
953 if (val
> buffer
->length
) {
958 if (iio_buffer_is_active(indio_dev
->buffer
)) {
963 buffer
->watermark
= val
;
965 if (indio_dev
->info
->hwfifo_set_watermark
)
966 indio_dev
->info
->hwfifo_set_watermark(indio_dev
, val
);
968 mutex_unlock(&indio_dev
->mlock
);
970 return ret
? ret
: len
;
973 static DEVICE_ATTR(length
, S_IRUGO
| S_IWUSR
, iio_buffer_read_length
,
974 iio_buffer_write_length
);
975 static struct device_attribute dev_attr_length_ro
= __ATTR(length
,
976 S_IRUGO
, iio_buffer_read_length
, NULL
);
977 static DEVICE_ATTR(enable
, S_IRUGO
| S_IWUSR
,
978 iio_buffer_show_enable
, iio_buffer_store_enable
);
979 static DEVICE_ATTR(watermark
, S_IRUGO
| S_IWUSR
,
980 iio_buffer_show_watermark
, iio_buffer_store_watermark
);
982 static struct attribute
*iio_buffer_attrs
[] = {
983 &dev_attr_length
.attr
,
984 &dev_attr_enable
.attr
,
985 &dev_attr_watermark
.attr
,
988 int iio_buffer_alloc_sysfs_and_mask(struct iio_dev
*indio_dev
)
990 struct iio_dev_attr
*p
;
991 struct attribute
**attr
;
992 struct iio_buffer
*buffer
= indio_dev
->buffer
;
993 int ret
, i
, attrn
, attrcount
, attrcount_orig
= 0;
994 const struct iio_chan_spec
*channels
;
996 channels
= indio_dev
->channels
;
998 int ml
= indio_dev
->masklength
;
1000 for (i
= 0; i
< indio_dev
->num_channels
; i
++)
1001 ml
= max(ml
, channels
[i
].scan_index
+ 1);
1002 indio_dev
->masklength
= ml
;
1009 if (buffer
->attrs
) {
1010 while (buffer
->attrs
[attrcount
] != NULL
)
1014 attr
= kcalloc(attrcount
+ ARRAY_SIZE(iio_buffer_attrs
) + 1,
1015 sizeof(struct attribute
*), GFP_KERNEL
);
1019 memcpy(attr
, iio_buffer_attrs
, sizeof(iio_buffer_attrs
));
1020 if (!buffer
->access
->set_length
)
1021 attr
[0] = &dev_attr_length_ro
.attr
;
1024 memcpy(&attr
[ARRAY_SIZE(iio_buffer_attrs
)], buffer
->attrs
,
1025 sizeof(struct attribute
*) * attrcount
);
1027 attr
[attrcount
+ ARRAY_SIZE(iio_buffer_attrs
)] = NULL
;
1029 buffer
->buffer_group
.name
= "buffer";
1030 buffer
->buffer_group
.attrs
= attr
;
1032 indio_dev
->groups
[indio_dev
->groupcounter
++] = &buffer
->buffer_group
;
1034 if (buffer
->scan_el_attrs
!= NULL
) {
1035 attr
= buffer
->scan_el_attrs
->attrs
;
1036 while (*attr
++ != NULL
)
1039 attrcount
= attrcount_orig
;
1040 INIT_LIST_HEAD(&buffer
->scan_el_dev_attr_list
);
1041 channels
= indio_dev
->channels
;
1044 for (i
= 0; i
< indio_dev
->num_channels
; i
++) {
1045 if (channels
[i
].scan_index
< 0)
1048 ret
= iio_buffer_add_channel_sysfs(indio_dev
,
1051 goto error_cleanup_dynamic
;
1053 if (channels
[i
].type
== IIO_TIMESTAMP
)
1054 indio_dev
->scan_index_timestamp
=
1055 channels
[i
].scan_index
;
1057 if (indio_dev
->masklength
&& buffer
->scan_mask
== NULL
) {
1058 buffer
->scan_mask
= kcalloc(BITS_TO_LONGS(indio_dev
->masklength
),
1059 sizeof(*buffer
->scan_mask
),
1061 if (buffer
->scan_mask
== NULL
) {
1063 goto error_cleanup_dynamic
;
1068 buffer
->scan_el_group
.name
= iio_scan_elements_group_name
;
1070 buffer
->scan_el_group
.attrs
= kcalloc(attrcount
+ 1,
1071 sizeof(buffer
->scan_el_group
.attrs
[0]),
1073 if (buffer
->scan_el_group
.attrs
== NULL
) {
1075 goto error_free_scan_mask
;
1077 if (buffer
->scan_el_attrs
)
1078 memcpy(buffer
->scan_el_group
.attrs
, buffer
->scan_el_attrs
,
1079 sizeof(buffer
->scan_el_group
.attrs
[0])*attrcount_orig
);
1080 attrn
= attrcount_orig
;
1082 list_for_each_entry(p
, &buffer
->scan_el_dev_attr_list
, l
)
1083 buffer
->scan_el_group
.attrs
[attrn
++] = &p
->dev_attr
.attr
;
1084 indio_dev
->groups
[indio_dev
->groupcounter
++] = &buffer
->scan_el_group
;
1088 error_free_scan_mask
:
1089 kfree(buffer
->scan_mask
);
1090 error_cleanup_dynamic
:
1091 iio_free_chan_devattr_list(&buffer
->scan_el_dev_attr_list
);
1092 kfree(indio_dev
->buffer
->buffer_group
.attrs
);
1097 void iio_buffer_free_sysfs_and_mask(struct iio_dev
*indio_dev
)
1099 if (!indio_dev
->buffer
)
1102 kfree(indio_dev
->buffer
->scan_mask
);
1103 kfree(indio_dev
->buffer
->buffer_group
.attrs
);
1104 kfree(indio_dev
->buffer
->scan_el_group
.attrs
);
1105 iio_free_chan_devattr_list(&indio_dev
->buffer
->scan_el_dev_attr_list
);
1109 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1110 * @indio_dev: the iio device
1111 * @mask: scan mask to be checked
1113 * Return true if exactly one bit is set in the scan mask, false otherwise. It
1114 * can be used for devices where only one channel can be active for sampling at
1117 bool iio_validate_scan_mask_onehot(struct iio_dev
*indio_dev
,
1118 const unsigned long *mask
)
1120 return bitmap_weight(mask
, indio_dev
->masklength
) == 1;
1122 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot
);
1124 int iio_scan_mask_query(struct iio_dev
*indio_dev
,
1125 struct iio_buffer
*buffer
, int bit
)
1127 if (bit
> indio_dev
->masklength
)
1130 if (!buffer
->scan_mask
)
1133 /* Ensure return value is 0 or 1. */
1134 return !!test_bit(bit
, buffer
->scan_mask
);
1136 EXPORT_SYMBOL_GPL(iio_scan_mask_query
);
1139 * struct iio_demux_table() - table describing demux memcpy ops
1140 * @from: index to copy from
1141 * @to: index to copy to
1142 * @length: how many bytes to copy
1143 * @l: list head used for management
1145 struct iio_demux_table
{
1152 static const void *iio_demux(struct iio_buffer
*buffer
,
1155 struct iio_demux_table
*t
;
1157 if (list_empty(&buffer
->demux_list
))
1159 list_for_each_entry(t
, &buffer
->demux_list
, l
)
1160 memcpy(buffer
->demux_bounce
+ t
->to
,
1161 datain
+ t
->from
, t
->length
);
1163 return buffer
->demux_bounce
;
1166 static int iio_push_to_buffer(struct iio_buffer
*buffer
, const void *data
)
1168 const void *dataout
= iio_demux(buffer
, data
);
1171 ret
= buffer
->access
->store_to(buffer
, dataout
);
1176 * We can't just test for watermark to decide if we wake the poll queue
1177 * because read may request less samples than the watermark.
1179 wake_up_interruptible_poll(&buffer
->pollq
, POLLIN
| POLLRDNORM
);
1183 static void iio_buffer_demux_free(struct iio_buffer
*buffer
)
1185 struct iio_demux_table
*p
, *q
;
1186 list_for_each_entry_safe(p
, q
, &buffer
->demux_list
, l
) {
1193 int iio_push_to_buffers(struct iio_dev
*indio_dev
, const void *data
)
1196 struct iio_buffer
*buf
;
1198 list_for_each_entry(buf
, &indio_dev
->buffer_list
, buffer_list
) {
1199 ret
= iio_push_to_buffer(buf
, data
);
1206 EXPORT_SYMBOL_GPL(iio_push_to_buffers
);
1208 static int iio_buffer_add_demux(struct iio_buffer
*buffer
,
1209 struct iio_demux_table
**p
, unsigned int in_loc
, unsigned int out_loc
,
1210 unsigned int length
)
1213 if (*p
&& (*p
)->from
+ (*p
)->length
== in_loc
&&
1214 (*p
)->to
+ (*p
)->length
== out_loc
) {
1215 (*p
)->length
+= length
;
1217 *p
= kmalloc(sizeof(**p
), GFP_KERNEL
);
1220 (*p
)->from
= in_loc
;
1222 (*p
)->length
= length
;
1223 list_add_tail(&(*p
)->l
, &buffer
->demux_list
);
1229 static int iio_buffer_update_demux(struct iio_dev
*indio_dev
,
1230 struct iio_buffer
*buffer
)
1232 const struct iio_chan_spec
*ch
;
1233 int ret
, in_ind
= -1, out_ind
, length
;
1234 unsigned in_loc
= 0, out_loc
= 0;
1235 struct iio_demux_table
*p
= NULL
;
1237 /* Clear out any old demux */
1238 iio_buffer_demux_free(buffer
);
1239 kfree(buffer
->demux_bounce
);
1240 buffer
->demux_bounce
= NULL
;
1242 /* First work out which scan mode we will actually have */
1243 if (bitmap_equal(indio_dev
->active_scan_mask
,
1245 indio_dev
->masklength
))
1248 /* Now we have the two masks, work from least sig and build up sizes */
1249 for_each_set_bit(out_ind
,
1251 indio_dev
->masklength
) {
1252 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
1253 indio_dev
->masklength
,
1255 while (in_ind
!= out_ind
) {
1256 in_ind
= find_next_bit(indio_dev
->active_scan_mask
,
1257 indio_dev
->masklength
,
1259 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
1260 if (ch
->scan_type
.repeat
> 1)
1261 length
= ch
->scan_type
.storagebits
/ 8 *
1262 ch
->scan_type
.repeat
;
1264 length
= ch
->scan_type
.storagebits
/ 8;
1265 /* Make sure we are aligned */
1266 in_loc
= roundup(in_loc
, length
) + length
;
1268 ch
= iio_find_channel_from_si(indio_dev
, in_ind
);
1269 if (ch
->scan_type
.repeat
> 1)
1270 length
= ch
->scan_type
.storagebits
/ 8 *
1271 ch
->scan_type
.repeat
;
1273 length
= ch
->scan_type
.storagebits
/ 8;
1274 out_loc
= roundup(out_loc
, length
);
1275 in_loc
= roundup(in_loc
, length
);
1276 ret
= iio_buffer_add_demux(buffer
, &p
, in_loc
, out_loc
, length
);
1278 goto error_clear_mux_table
;
1282 /* Relies on scan_timestamp being last */
1283 if (buffer
->scan_timestamp
) {
1284 ch
= iio_find_channel_from_si(indio_dev
,
1285 indio_dev
->scan_index_timestamp
);
1286 if (ch
->scan_type
.repeat
> 1)
1287 length
= ch
->scan_type
.storagebits
/ 8 *
1288 ch
->scan_type
.repeat
;
1290 length
= ch
->scan_type
.storagebits
/ 8;
1291 out_loc
= roundup(out_loc
, length
);
1292 in_loc
= roundup(in_loc
, length
);
1293 ret
= iio_buffer_add_demux(buffer
, &p
, in_loc
, out_loc
, length
);
1295 goto error_clear_mux_table
;
1299 buffer
->demux_bounce
= kzalloc(out_loc
, GFP_KERNEL
);
1300 if (buffer
->demux_bounce
== NULL
) {
1302 goto error_clear_mux_table
;
1306 error_clear_mux_table
:
1307 iio_buffer_demux_free(buffer
);
1312 int iio_update_demux(struct iio_dev
*indio_dev
)
1314 struct iio_buffer
*buffer
;
1317 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
) {
1318 ret
= iio_buffer_update_demux(indio_dev
, buffer
);
1320 goto error_clear_mux_table
;
1324 error_clear_mux_table
:
1325 list_for_each_entry(buffer
, &indio_dev
->buffer_list
, buffer_list
)
1326 iio_buffer_demux_free(buffer
);
1330 EXPORT_SYMBOL_GPL(iio_update_demux
);
1333 * iio_buffer_release() - Free a buffer's resources
1334 * @ref: Pointer to the kref embedded in the iio_buffer struct
1336 * This function is called when the last reference to the buffer has been
1337 * dropped. It will typically free all resources allocated by the buffer. Do not
1338 * call this function manually, always use iio_buffer_put() when done using a
1341 static void iio_buffer_release(struct kref
*ref
)
1343 struct iio_buffer
*buffer
= container_of(ref
, struct iio_buffer
, ref
);
1345 buffer
->access
->release(buffer
);
1349 * iio_buffer_get() - Grab a reference to the buffer
1350 * @buffer: The buffer to grab a reference for, may be NULL
1352 * Returns the pointer to the buffer that was passed into the function.
1354 struct iio_buffer
*iio_buffer_get(struct iio_buffer
*buffer
)
1357 kref_get(&buffer
->ref
);
1361 EXPORT_SYMBOL_GPL(iio_buffer_get
);
1364 * iio_buffer_put() - Release the reference to the buffer
1365 * @buffer: The buffer to release the reference for, may be NULL
1367 void iio_buffer_put(struct iio_buffer
*buffer
)
1370 kref_put(&buffer
->ref
, iio_buffer_release
);
1372 EXPORT_SYMBOL_GPL(iio_buffer_put
);