staging:iio:core cleanup: squash tiny wrappers and use dev_set_name to handle creatio...
[deliverable/linux.git] / drivers / staging / iio / industrialio-ring.c
CommitLineData
7026ea4b
JC
1/* The industrial I/O core
2 *
3 * Copyright (c) 2008 Jonathan Cameron
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * Handling of ring allocation / resizing.
10 *
11 *
12 * Things to look at here.
13 * - Better memory allocation techniques?
14 * - Alternative access techniques?
15 */
16#include <linux/kernel.h>
17#include <linux/device.h>
7026ea4b 18#include <linux/fs.h>
7026ea4b 19#include <linux/cdev.h>
5a0e3ad6 20#include <linux/slab.h>
a7348347 21#include <linux/poll.h>
7026ea4b
JC
22
23#include "iio.h"
24#include "ring_generic.h"
25
7026ea4b 26/**
c3e5d410 27 * iio_ring_open() - chrdev file open for ring buffer access
7026ea4b
JC
28 *
29 * This function relies on all ring buffer implementations having an
30 * iio_ring_buffer as their first element.
31 **/
0deebb4f 32static int iio_ring_open(struct inode *inode, struct file *filp)
7026ea4b
JC
33{
34 struct iio_handler *hand
35 = container_of(inode->i_cdev, struct iio_handler, chrdev);
36 struct iio_ring_buffer *rb = hand->private;
37
38 filp->private_data = hand->private;
5565a450
JC
39 if (rb->access->mark_in_use)
40 rb->access->mark_in_use(rb);
7026ea4b
JC
41
42 return 0;
43}
44
45/**
c3e5d410 46 * iio_ring_release() - chrdev file close ring buffer access
7026ea4b
JC
47 *
48 * This function relies on all ring buffer implementations having an
49 * iio_ring_buffer as their first element.
50 **/
0deebb4f 51static int iio_ring_release(struct inode *inode, struct file *filp)
7026ea4b
JC
52{
53 struct cdev *cd = inode->i_cdev;
54 struct iio_handler *hand = iio_cdev_to_handler(cd);
55 struct iio_ring_buffer *rb = hand->private;
56
57 clear_bit(IIO_BUSY_BIT_POS, &rb->access_handler.flags);
5565a450
JC
58 if (rb->access->unmark_in_use)
59 rb->access->unmark_in_use(rb);
7026ea4b
JC
60
61 return 0;
62}
63
64/**
b4281733 65 * iio_ring_read_first_n_outer() - chrdev read for ring buffer access
7026ea4b
JC
66 *
67 * This function relies on all ring buffer implementations having an
68 * iio_ring _bufer as their first element.
69 **/
b4281733
JC
70static ssize_t iio_ring_read_first_n_outer(struct file *filp, char __user *buf,
71 size_t n, loff_t *f_ps)
7026ea4b
JC
72{
73 struct iio_ring_buffer *rb = filp->private_data;
b26a2188 74 int ret;
d5857d65 75
7026ea4b 76 /* rip lots must exist. */
5565a450 77 if (!rb->access->read_first_n)
7026ea4b 78 return -EINVAL;
5565a450 79 ret = rb->access->read_first_n(rb, n, buf);
7026ea4b 80
7026ea4b
JC
81 return ret;
82}
83
a7348347
JC
84/**
85 * iio_ring_poll() - poll the ring to find out if it has data
86 */
87static unsigned int iio_ring_poll(struct file *filp,
88 struct poll_table_struct *wait)
89{
90 struct iio_ring_buffer *rb = filp->private_data;
91 int ret = 0;
92
93 poll_wait(filp, &rb->pollq, wait);
94 if (rb->stufftoread)
95 return POLLIN | POLLRDNORM;
96 /* need a way of knowing if there may be enough data... */
97 return ret;
98}
99
7026ea4b 100static const struct file_operations iio_ring_fileops = {
b4281733 101 .read = iio_ring_read_first_n_outer,
7026ea4b
JC
102 .release = iio_ring_release,
103 .open = iio_ring_open,
a7348347 104 .poll = iio_ring_poll,
7026ea4b 105 .owner = THIS_MODULE,
6038f373 106 .llseek = noop_llseek,
7026ea4b
JC
107};
108
3feb0797 109void iio_ring_access_release(struct device *dev)
7026ea4b
JC
110{
111 struct iio_ring_buffer *buf
3feb0797 112 = container_of(dev, struct iio_ring_buffer, dev);
7026ea4b
JC
113 cdev_del(&buf->access_handler.chrdev);
114 iio_device_free_chrdev_minor(MINOR(dev->devt));
115}
3feb0797 116EXPORT_SYMBOL(iio_ring_access_release);
7026ea4b
JC
117
118static inline int
3feb0797 119__iio_request_ring_buffer_chrdev(struct iio_ring_buffer *buf,
7026ea4b
JC
120 struct module *owner)
121{
122 int ret, minor;
123
124 buf->access_handler.flags = 0;
125
3feb0797
JC
126 buf->dev.bus = &iio_bus_type;
127 device_initialize(&buf->dev);
7026ea4b
JC
128
129 minor = iio_device_get_chrdev_minor();
130 if (minor < 0) {
131 ret = minor;
132 goto error_device_put;
133 }
3feb0797
JC
134 buf->dev.devt = MKDEV(MAJOR(iio_devt), minor);
135 dev_set_name(&buf->dev, "%s:buffer%d",
136 dev_name(buf->dev.parent),
137 buf->id);
138 ret = device_add(&buf->dev);
7026ea4b 139 if (ret < 0) {
3feb0797 140 printk(KERN_ERR "failed to add the ring dev\n");
758d988c 141 goto error_device_put;
7026ea4b 142 }
7026ea4b
JC
143 cdev_init(&buf->access_handler.chrdev, &iio_ring_fileops);
144 buf->access_handler.chrdev.owner = owner;
3feb0797 145 ret = cdev_add(&buf->access_handler.chrdev, buf->dev.devt, 1);
7026ea4b 146 if (ret) {
3feb0797 147 printk(KERN_ERR "failed to allocate ring chrdev\n");
7026ea4b
JC
148 goto error_device_unregister;
149 }
150 return 0;
758d988c 151
7026ea4b 152error_device_unregister:
3feb0797 153 device_unregister(&buf->dev);
7026ea4b 154error_device_put:
3feb0797 155 put_device(&buf->dev);
7026ea4b
JC
156
157 return ret;
158}
159
3feb0797 160static void __iio_free_ring_buffer_chrdev(struct iio_ring_buffer *buf)
7026ea4b 161{
3feb0797 162 device_unregister(&buf->dev);
7026ea4b
JC
163}
164
165void iio_ring_buffer_init(struct iio_ring_buffer *ring,
166 struct iio_dev *dev_info)
167{
7026ea4b 168 ring->indio_dev = dev_info;
7026ea4b 169 ring->access_handler.private = ring;
a7348347 170 init_waitqueue_head(&ring->pollq);
7026ea4b
JC
171}
172EXPORT_SYMBOL(iio_ring_buffer_init);
173
1d892719
JC
174static ssize_t iio_show_scan_index(struct device *dev,
175 struct device_attribute *attr,
176 char *buf)
177{
178 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
179 return sprintf(buf, "%u\n", this_attr->c->scan_index);
180}
181
182static ssize_t iio_show_fixed_type(struct device *dev,
183 struct device_attribute *attr,
184 char *buf)
185{
186 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
187 return sprintf(buf, "%c%d/%d>>%u\n",
188 this_attr->c->scan_type.sign,
189 this_attr->c->scan_type.realbits,
190 this_attr->c->scan_type.storagebits,
191 this_attr->c->scan_type.shift);
192}
193
1d892719
JC
194static int iio_ring_add_channel_sysfs(struct iio_ring_buffer *ring,
195 const struct iio_chan_spec *chan)
196{
197 int ret;
198
199 ret = __iio_add_chan_devattr("index", "scan_elements",
200 chan,
201 &iio_show_scan_index,
202 NULL,
203 0,
204 0,
205 &ring->dev,
206 &ring->scan_el_dev_attr_list);
207 if (ret)
208 goto error_ret;
209
210 ret = __iio_add_chan_devattr("type", "scan_elements",
211 chan,
212 &iio_show_fixed_type,
213 NULL,
214 0,
215 0,
216 &ring->dev,
217 &ring->scan_el_dev_attr_list);
218
219 if (ret)
220 goto error_ret;
221
a88b3ebc
JC
222 if (chan->type != IIO_TIMESTAMP)
223 ret = __iio_add_chan_devattr("en", "scan_elements",
224 chan,
225 &iio_scan_el_show,
226 &iio_scan_el_store,
227 chan->scan_index,
228 0,
229 &ring->dev,
230 &ring->scan_el_dev_attr_list);
231 else
232 ret = __iio_add_chan_devattr("en", "scan_elements",
233 chan,
234 &iio_scan_el_ts_show,
235 &iio_scan_el_ts_store,
236 chan->scan_index,
237 0,
238 &ring->dev,
239 &ring->scan_el_dev_attr_list);
1d892719
JC
240error_ret:
241 return ret;
242}
243
1d892719
JC
244static void iio_ring_remove_and_free_scan_dev_attr(struct iio_ring_buffer *ring,
245 struct iio_dev_attr *p)
246{
247 sysfs_remove_file_from_group(&ring->dev.kobj,
248 &p->dev_attr.attr, "scan_elements");
249 kfree(p->dev_attr.attr.name);
250 kfree(p);
251}
252
253static struct attribute *iio_scan_el_dummy_attrs[] = {
254 NULL
255};
256
257static struct attribute_group iio_scan_el_dummy_group = {
258 .name = "scan_elements",
259 .attrs = iio_scan_el_dummy_attrs
260};
261
262static void __iio_ring_attr_cleanup(struct iio_ring_buffer *ring)
263{
264 struct iio_dev_attr *p, *n;
a88b3ebc 265 int anydynamic = !list_empty(&ring->scan_el_dev_attr_list);
1d892719
JC
266 list_for_each_entry_safe(p, n,
267 &ring->scan_el_dev_attr_list, l)
268 iio_ring_remove_and_free_scan_dev_attr(ring, p);
1d892719
JC
269
270 if (ring->scan_el_attrs)
271 sysfs_remove_group(&ring->dev.kobj,
272 ring->scan_el_attrs);
273 else if (anydynamic)
274 sysfs_remove_group(&ring->dev.kobj,
275 &iio_scan_el_dummy_group);
276}
277
278int iio_ring_buffer_register_ex(struct iio_ring_buffer *ring, int id,
279 const struct iio_chan_spec *channels,
280 int num_channels)
281{
282 int ret, i;
758d988c
JC
283
284 ring->id = id;
7026ea4b 285
3feb0797 286 ret = __iio_request_ring_buffer_chrdev(ring, ring->owner);
7026ea4b
JC
287
288 if (ret)
3feb0797 289 goto error_ret;
bf32963c
MS
290 if (ring->scan_el_attrs) {
291 ret = sysfs_create_group(&ring->dev.kobj,
292 ring->scan_el_attrs);
293 if (ret) {
294 dev_err(&ring->dev,
295 "Failed to add sysfs scan elements\n");
3feb0797 296 goto error_free_ring_buffer_chrdev;
bf32963c 297 }
1d892719
JC
298 } else if (channels) {
299 ret = sysfs_create_group(&ring->dev.kobj,
300 &iio_scan_el_dummy_group);
301 if (ret)
3feb0797 302 goto error_free_ring_buffer_chrdev;
bf32963c
MS
303 }
304
1d892719 305 INIT_LIST_HEAD(&ring->scan_el_dev_attr_list);
1d892719
JC
306 if (channels) {
307 /* new magic */
308 for (i = 0; i < num_channels; i++) {
309 ret = iio_ring_add_channel_sysfs(ring, &channels[i]);
310 if (ret < 0)
311 goto error_cleanup_dynamic;
312 }
313 }
314
315 return 0;
316error_cleanup_dynamic:
317 __iio_ring_attr_cleanup(ring);
3feb0797
JC
318error_free_ring_buffer_chrdev:
319 __iio_free_ring_buffer_chrdev(ring);
7026ea4b
JC
320error_ret:
321 return ret;
322}
1d892719
JC
323EXPORT_SYMBOL(iio_ring_buffer_register_ex);
324
325int iio_ring_buffer_register(struct iio_ring_buffer *ring, int id)
326{
327 return iio_ring_buffer_register_ex(ring, id, NULL, 0);
328}
7026ea4b
JC
329EXPORT_SYMBOL(iio_ring_buffer_register);
330
331void iio_ring_buffer_unregister(struct iio_ring_buffer *ring)
332{
1d892719 333 __iio_ring_attr_cleanup(ring);
3feb0797 334 __iio_free_ring_buffer_chrdev(ring);
7026ea4b
JC
335}
336EXPORT_SYMBOL(iio_ring_buffer_unregister);
337
338ssize_t iio_read_ring_length(struct device *dev,
339 struct device_attribute *attr,
340 char *buf)
341{
342 int len = 0;
343 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
344
5565a450 345 if (ring->access->get_length)
7026ea4b 346 len = sprintf(buf, "%d\n",
5565a450 347 ring->access->get_length(ring));
7026ea4b
JC
348
349 return len;
350}
351EXPORT_SYMBOL(iio_read_ring_length);
352
0abd2428 353ssize_t iio_write_ring_length(struct device *dev,
7026ea4b
JC
354 struct device_attribute *attr,
355 const char *buf,
356 size_t len)
357{
358 int ret;
359 ulong val;
360 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
361 ret = strict_strtoul(buf, 10, &val);
362 if (ret)
363 return ret;
364
5565a450
JC
365 if (ring->access->get_length)
366 if (val == ring->access->get_length(ring))
7026ea4b
JC
367 return len;
368
5565a450
JC
369 if (ring->access->set_length) {
370 ring->access->set_length(ring, val);
371 if (ring->access->mark_param_change)
372 ring->access->mark_param_change(ring);
7026ea4b
JC
373 }
374
375 return len;
376}
377EXPORT_SYMBOL(iio_write_ring_length);
378
ffcab07a 379ssize_t iio_read_ring_bytes_per_datum(struct device *dev,
7026ea4b
JC
380 struct device_attribute *attr,
381 char *buf)
382{
383 int len = 0;
384 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
385
5565a450 386 if (ring->access->get_bytes_per_datum)
7026ea4b 387 len = sprintf(buf, "%d\n",
5565a450 388 ring->access->get_bytes_per_datum(ring));
7026ea4b
JC
389
390 return len;
391}
ffcab07a 392EXPORT_SYMBOL(iio_read_ring_bytes_per_datum);
7026ea4b
JC
393
394ssize_t iio_store_ring_enable(struct device *dev,
395 struct device_attribute *attr,
396 const char *buf,
397 size_t len)
398{
399 int ret;
400 bool requested_state, current_state;
401 int previous_mode;
402 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
403 struct iio_dev *dev_info = ring->indio_dev;
404
405 mutex_lock(&dev_info->mlock);
406 previous_mode = dev_info->currentmode;
407 requested_state = !(buf[0] == '0');
408 current_state = !!(previous_mode & INDIO_ALL_RING_MODES);
409 if (current_state == requested_state) {
410 printk(KERN_INFO "iio-ring, current state requested again\n");
411 goto done;
412 }
413 if (requested_state) {
5565a450
JC
414 if (ring->setup_ops->preenable) {
415 ret = ring->setup_ops->preenable(dev_info);
7026ea4b
JC
416 if (ret) {
417 printk(KERN_ERR
418 "Buffer not started:"
419 "ring preenable failed\n");
420 goto error_ret;
421 }
422 }
5565a450
JC
423 if (ring->access->request_update) {
424 ret = ring->access->request_update(ring);
7026ea4b
JC
425 if (ret) {
426 printk(KERN_INFO
427 "Buffer not started:"
428 "ring parameter update failed\n");
429 goto error_ret;
430 }
431 }
5565a450
JC
432 if (ring->access->mark_in_use)
433 ring->access->mark_in_use(ring);
7026ea4b
JC
434 /* Definitely possible for devices to support both of these.*/
435 if (dev_info->modes & INDIO_RING_TRIGGERED) {
436 if (!dev_info->trig) {
437 printk(KERN_INFO
438 "Buffer not started: no trigger\n");
439 ret = -EINVAL;
5565a450
JC
440 if (ring->access->unmark_in_use)
441 ring->access->unmark_in_use(ring);
7026ea4b
JC
442 goto error_ret;
443 }
444 dev_info->currentmode = INDIO_RING_TRIGGERED;
445 } else if (dev_info->modes & INDIO_RING_HARDWARE_BUFFER)
446 dev_info->currentmode = INDIO_RING_HARDWARE_BUFFER;
447 else { /* should never be reached */
448 ret = -EINVAL;
449 goto error_ret;
450 }
451
5565a450 452 if (ring->setup_ops->postenable) {
7026ea4b 453
5565a450 454 ret = ring->setup_ops->postenable(dev_info);
7026ea4b
JC
455 if (ret) {
456 printk(KERN_INFO
457 "Buffer not started:"
458 "postenable failed\n");
5565a450
JC
459 if (ring->access->unmark_in_use)
460 ring->access->unmark_in_use(ring);
7026ea4b 461 dev_info->currentmode = previous_mode;
5565a450
JC
462 if (ring->setup_ops->postdisable)
463 ring->setup_ops->postdisable(dev_info);
7026ea4b
JC
464 goto error_ret;
465 }
466 }
467 } else {
5565a450
JC
468 if (ring->setup_ops->predisable) {
469 ret = ring->setup_ops->predisable(dev_info);
7026ea4b
JC
470 if (ret)
471 goto error_ret;
472 }
5565a450
JC
473 if (ring->access->unmark_in_use)
474 ring->access->unmark_in_use(ring);
7026ea4b 475 dev_info->currentmode = INDIO_DIRECT_MODE;
5565a450
JC
476 if (ring->setup_ops->postdisable) {
477 ret = ring->setup_ops->postdisable(dev_info);
7026ea4b
JC
478 if (ret)
479 goto error_ret;
480 }
481 }
482done:
483 mutex_unlock(&dev_info->mlock);
484 return len;
485
486error_ret:
487 mutex_unlock(&dev_info->mlock);
488 return ret;
489}
490EXPORT_SYMBOL(iio_store_ring_enable);
491ssize_t iio_show_ring_enable(struct device *dev,
492 struct device_attribute *attr,
493 char *buf)
494{
495 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
496 return sprintf(buf, "%d\n", !!(ring->indio_dev->currentmode
497 & INDIO_ALL_RING_MODES));
498}
499EXPORT_SYMBOL(iio_show_ring_enable);
500
501ssize_t iio_scan_el_show(struct device *dev,
502 struct device_attribute *attr,
503 char *buf)
504{
505 int ret;
bf32963c 506 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
a88b3ebc 507 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
7026ea4b 508
a88b3ebc 509 ret = iio_scan_mask_query(ring, this_attr->address);
7026ea4b
JC
510 if (ret < 0)
511 return ret;
512 return sprintf(buf, "%d\n", ret);
513}
514EXPORT_SYMBOL(iio_scan_el_show);
515
516ssize_t iio_scan_el_store(struct device *dev,
517 struct device_attribute *attr,
518 const char *buf,
519 size_t len)
520{
521 int ret = 0;
522 bool state;
bf32963c
MS
523 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
524 struct iio_dev *indio_dev = ring->indio_dev;
a88b3ebc 525 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
7026ea4b
JC
526
527 state = !(buf[0] == '0');
528 mutex_lock(&indio_dev->mlock);
529 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
530 ret = -EBUSY;
531 goto error_ret;
532 }
a88b3ebc 533 ret = iio_scan_mask_query(ring, this_attr->address);
7026ea4b
JC
534 if (ret < 0)
535 goto error_ret;
536 if (!state && ret) {
a88b3ebc 537 ret = iio_scan_mask_clear(ring, this_attr->address);
7026ea4b
JC
538 if (ret)
539 goto error_ret;
7026ea4b 540 } else if (state && !ret) {
a88b3ebc 541 ret = iio_scan_mask_set(ring, this_attr->address);
7026ea4b
JC
542 if (ret)
543 goto error_ret;
7026ea4b 544 }
a88b3ebc 545
7026ea4b
JC
546error_ret:
547 mutex_unlock(&indio_dev->mlock);
548
549 return ret ? ret : len;
550
551}
552EXPORT_SYMBOL(iio_scan_el_store);
553
554ssize_t iio_scan_el_ts_show(struct device *dev,
555 struct device_attribute *attr,
556 char *buf)
557{
bf32963c
MS
558 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
559 return sprintf(buf, "%d\n", ring->scan_timestamp);
7026ea4b
JC
560}
561EXPORT_SYMBOL(iio_scan_el_ts_show);
562
563ssize_t iio_scan_el_ts_store(struct device *dev,
564 struct device_attribute *attr,
565 const char *buf,
566 size_t len)
567{
568 int ret = 0;
bf32963c
MS
569 struct iio_ring_buffer *ring = dev_get_drvdata(dev);
570 struct iio_dev *indio_dev = ring->indio_dev;
7026ea4b
JC
571 bool state;
572 state = !(buf[0] == '0');
573 mutex_lock(&indio_dev->mlock);
574 if (indio_dev->currentmode == INDIO_RING_TRIGGERED) {
575 ret = -EBUSY;
576 goto error_ret;
577 }
bf32963c 578 ring->scan_timestamp = state;
7026ea4b
JC
579error_ret:
580 mutex_unlock(&indio_dev->mlock);
581
582 return ret ? ret : len;
583}
584EXPORT_SYMBOL(iio_scan_el_ts_store);
5565a450
JC
585
586int iio_sw_ring_preenable(struct iio_dev *indio_dev)
587{
588 struct iio_ring_buffer *ring = indio_dev->ring;
589 size_t size;
590 dev_dbg(&indio_dev->dev, "%s\n", __func__);
591 /* Check if there are any scan elements enabled, if not fail*/
592 if (!(ring->scan_count || ring->scan_timestamp))
593 return -EINVAL;
594 if (ring->scan_timestamp)
595 if (ring->scan_count)
596 /* Timestamp (aligned to s64) and data */
597 size = (((ring->scan_count * ring->bpe)
598 + sizeof(s64) - 1)
599 & ~(sizeof(s64) - 1))
600 + sizeof(s64);
601 else /* Timestamp only */
602 size = sizeof(s64);
603 else /* Data only */
604 size = ring->scan_count * ring->bpe;
605 ring->access->set_bytes_per_datum(ring, size);
606
607 return 0;
608}
609EXPORT_SYMBOL(iio_sw_ring_preenable);
This page took 0.256009 seconds and 5 git commands to generate.