2 * VMEbus User access driver
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
8 * Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 #include <linux/cdev.h>
19 #include <linux/delay.h>
20 #include <linux/device.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/ioctl.h>
25 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/pci.h>
30 #include <linux/mutex.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/syscalls.h>
34 #include <linux/types.h>
37 #include <linux/uaccess.h>
38 #include <linux/vme.h>
42 static DEFINE_MUTEX(vme_user_mutex
);
43 static const char driver_name
[] = "vme_user";
45 static int bus
[VME_USER_BUS_MAX
];
46 static unsigned int bus_num
;
48 /* Currently Documentation/devices.txt defines the following for VME:
51 * 0 = /dev/bus/vme/m0 First master image
52 * 1 = /dev/bus/vme/m1 Second master image
53 * 2 = /dev/bus/vme/m2 Third master image
54 * 3 = /dev/bus/vme/m3 Fourth master image
55 * 4 = /dev/bus/vme/s0 First slave image
56 * 5 = /dev/bus/vme/s1 Second slave image
57 * 6 = /dev/bus/vme/s2 Third slave image
58 * 7 = /dev/bus/vme/s3 Fourth slave image
59 * 8 = /dev/bus/vme/ctl Control
61 * It is expected that all VME bus drivers will use the
62 * same interface. For interface documentation see
63 * http://www.vmelinux.org/.
65 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
66 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
67 * We'll run with this for now as far as possible, however it probably makes
68 * sense to get rid of the old mappings and just do everything dynamically.
70 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
71 * defined above and try to support at least some of the interface from
72 * http://www.vmelinux.org/ as an alternative the driver can be written
73 * providing a saner interface later.
75 * The vmelinux.org driver never supported slave images, the devices reserved
76 * for slaves were repurposed to support all 8 master images on the UniverseII!
77 * We shall support 4 masters and 4 slaves with this driver.
79 #define VME_MAJOR 221 /* VME Major Device Number */
80 #define VME_DEVS 9 /* Number of dev entries */
82 #define MASTER_MINOR 0
86 #define CONTROL_MINOR 8
88 #define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
91 * Structure to handle image related parameters.
94 void *kern_buf
; /* Buffer address in kernel space */
95 dma_addr_t pci_buf
; /* Buffer address in PCI address space */
96 unsigned long long size_buf
; /* Buffer size */
97 struct mutex mutex
; /* Mutex for locking image */
98 struct device
*device
; /* Sysfs device */
99 struct vme_resource
*resource
; /* VME resource */
100 int users
; /* Number of current users */
102 static struct image_desc image
[VME_DEVS
];
104 struct driver_stats
{
106 unsigned long writes
;
107 unsigned long ioctls
;
110 unsigned long dmaErrors
;
111 unsigned long timeouts
;
112 unsigned long external
;
114 static struct driver_stats statistics
;
116 static struct cdev
*vme_user_cdev
; /* Character device */
117 static struct class *vme_user_sysfs_class
; /* Sysfs class */
118 static struct vme_dev
*vme_user_bridge
; /* Pointer to user device */
121 static const int type
[VME_DEVS
] = { MASTER_MINOR
, MASTER_MINOR
,
122 MASTER_MINOR
, MASTER_MINOR
,
123 SLAVE_MINOR
, SLAVE_MINOR
,
124 SLAVE_MINOR
, SLAVE_MINOR
,
129 static int vme_user_open(struct inode
*, struct file
*);
130 static int vme_user_release(struct inode
*, struct file
*);
131 static ssize_t
vme_user_read(struct file
*, char __user
*, size_t, loff_t
*);
132 static ssize_t
vme_user_write(struct file
*, const char __user
*, size_t,
134 static loff_t
vme_user_llseek(struct file
*, loff_t
, int);
135 static long vme_user_unlocked_ioctl(struct file
*, unsigned int, unsigned long);
137 static int vme_user_match(struct vme_dev
*);
138 static int __devinit
vme_user_probe(struct vme_dev
*);
139 static int __devexit
vme_user_remove(struct vme_dev
*);
141 static const struct file_operations vme_user_fops
= {
142 .open
= vme_user_open
,
143 .release
= vme_user_release
,
144 .read
= vme_user_read
,
145 .write
= vme_user_write
,
146 .llseek
= vme_user_llseek
,
147 .unlocked_ioctl
= vme_user_unlocked_ioctl
,
152 * Reset all the statistic counters
154 static void reset_counters(void)
156 statistics
.reads
= 0;
157 statistics
.writes
= 0;
158 statistics
.ioctls
= 0;
160 statistics
.berrs
= 0;
161 statistics
.dmaErrors
= 0;
162 statistics
.timeouts
= 0;
165 static int vme_user_open(struct inode
*inode
, struct file
*file
)
168 unsigned int minor
= MINOR(inode
->i_rdev
);
170 mutex_lock(&image
[minor
].mutex
);
171 /* Allow device to be opened if a resource is needed and allocated. */
172 if (minor
< CONTROL_MINOR
&& image
[minor
].resource
== NULL
) {
173 printk(KERN_ERR
"No resources allocated for device\n");
178 /* Increment user count */
179 image
[minor
].users
++;
181 mutex_unlock(&image
[minor
].mutex
);
186 mutex_unlock(&image
[minor
].mutex
);
191 static int vme_user_release(struct inode
*inode
, struct file
*file
)
193 unsigned int minor
= MINOR(inode
->i_rdev
);
195 mutex_lock(&image
[minor
].mutex
);
197 /* Decrement user count */
198 image
[minor
].users
--;
200 mutex_unlock(&image
[minor
].mutex
);
206 * We are going ot alloc a page during init per window for small transfers.
207 * Small transfers will go VME -> buffer -> user space. Larger (more than a
208 * page) transfers will lock the user space buffer into memory and then
209 * transfer the data directly into the user space buffers.
211 static ssize_t
resource_to_user(int minor
, char __user
*buf
, size_t count
,
217 if (count
<= image
[minor
].size_buf
) {
218 /* We copy to kernel buffer */
219 copied
= vme_master_read(image
[minor
].resource
,
220 image
[minor
].kern_buf
, count
, *ppos
);
224 retval
= __copy_to_user(buf
, image
[minor
].kern_buf
,
225 (unsigned long)copied
);
227 copied
= (copied
- retval
);
228 printk(KERN_INFO
"User copy failed\n");
233 /* XXX Need to write this */
234 printk(KERN_INFO
"Currently don't support large transfers\n");
235 /* Map in pages from userspace */
237 /* Call vme_master_read to do the transfer */
245 * We are going to alloc a page during init per window for small transfers.
246 * Small transfers will go user space -> buffer -> VME. Larger (more than a
247 * page) transfers will lock the user space buffer into memory and then
248 * transfer the data directly from the user space buffers out to VME.
250 static ssize_t
resource_from_user(unsigned int minor
, const char __user
*buf
,
251 size_t count
, loff_t
*ppos
)
256 if (count
<= image
[minor
].size_buf
) {
257 retval
= __copy_from_user(image
[minor
].kern_buf
, buf
,
258 (unsigned long)count
);
260 copied
= (copied
- retval
);
264 copied
= vme_master_write(image
[minor
].resource
,
265 image
[minor
].kern_buf
, copied
, *ppos
);
267 /* XXX Need to write this */
268 printk(KERN_INFO
"Currently don't support large transfers\n");
269 /* Map in pages from userspace */
271 /* Call vme_master_write to do the transfer */
278 static ssize_t
buffer_to_user(unsigned int minor
, char __user
*buf
,
279 size_t count
, loff_t
*ppos
)
284 image_ptr
= image
[minor
].kern_buf
+ *ppos
;
286 retval
= __copy_to_user(buf
, image_ptr
, (unsigned long)count
);
288 retval
= (count
- retval
);
289 printk(KERN_WARNING
"Partial copy to userspace\n");
293 /* Return number of bytes successfully read */
297 static ssize_t
buffer_from_user(unsigned int minor
, const char __user
*buf
,
298 size_t count
, loff_t
*ppos
)
303 image_ptr
= image
[minor
].kern_buf
+ *ppos
;
305 retval
= __copy_from_user(image_ptr
, buf
, (unsigned long)count
);
307 retval
= (count
- retval
);
308 printk(KERN_WARNING
"Partial copy to userspace\n");
312 /* Return number of bytes successfully read */
316 static ssize_t
vme_user_read(struct file
*file
, char __user
*buf
, size_t count
,
319 unsigned int minor
= MINOR(file
->f_dentry
->d_inode
->i_rdev
);
324 if (minor
== CONTROL_MINOR
)
327 mutex_lock(&image
[minor
].mutex
);
329 /* XXX Do we *really* want this helper - we can use vme_*_get ? */
330 image_size
= vme_get_size(image
[minor
].resource
);
332 /* Ensure we are starting at a valid location */
333 if ((*ppos
< 0) || (*ppos
> (image_size
- 1))) {
334 mutex_unlock(&image
[minor
].mutex
);
338 /* Ensure not reading past end of the image */
339 if (*ppos
+ count
> image_size
)
340 okcount
= image_size
- *ppos
;
344 switch (type
[minor
]) {
346 retval
= resource_to_user(minor
, buf
, okcount
, ppos
);
349 retval
= buffer_to_user(minor
, buf
, okcount
, ppos
);
355 mutex_unlock(&image
[minor
].mutex
);
362 static ssize_t
vme_user_write(struct file
*file
, const char __user
*buf
,
363 size_t count
, loff_t
*ppos
)
365 unsigned int minor
= MINOR(file
->f_dentry
->d_inode
->i_rdev
);
370 if (minor
== CONTROL_MINOR
)
373 mutex_lock(&image
[minor
].mutex
);
375 image_size
= vme_get_size(image
[minor
].resource
);
377 /* Ensure we are starting at a valid location */
378 if ((*ppos
< 0) || (*ppos
> (image_size
- 1))) {
379 mutex_unlock(&image
[minor
].mutex
);
383 /* Ensure not reading past end of the image */
384 if (*ppos
+ count
> image_size
)
385 okcount
= image_size
- *ppos
;
389 switch (type
[minor
]) {
391 retval
= resource_from_user(minor
, buf
, okcount
, ppos
);
394 retval
= buffer_from_user(minor
, buf
, okcount
, ppos
);
400 mutex_unlock(&image
[minor
].mutex
);
408 static loff_t
vme_user_llseek(struct file
*file
, loff_t off
, int whence
)
410 loff_t absolute
= -1;
411 unsigned int minor
= MINOR(file
->f_dentry
->d_inode
->i_rdev
);
414 if (minor
== CONTROL_MINOR
)
417 mutex_lock(&image
[minor
].mutex
);
418 image_size
= vme_get_size(image
[minor
].resource
);
425 absolute
= file
->f_pos
+ off
;
428 absolute
= image_size
+ off
;
431 mutex_unlock(&image
[minor
].mutex
);
436 if ((absolute
< 0) || (absolute
>= image_size
)) {
437 mutex_unlock(&image
[minor
].mutex
);
441 file
->f_pos
= absolute
;
443 mutex_unlock(&image
[minor
].mutex
);
449 * The ioctls provided by the old VME access method (the one at vmelinux.org)
450 * are most certainly wrong as the effectively push the registers layout
451 * through to user space. Given that the VME core can handle multiple bridges,
452 * with different register layouts this is most certainly not the way to go.
454 * We aren't using the structures defined in the Motorola driver either - these
455 * are also quite low level, however we should use the definitions that have
456 * already been defined.
458 static int vme_user_ioctl(struct inode
*inode
, struct file
*file
,
459 unsigned int cmd
, unsigned long arg
)
461 struct vme_master master
;
462 struct vme_slave slave
;
463 struct vme_irq_id irq_req
;
464 unsigned long copied
;
465 unsigned int minor
= MINOR(inode
->i_rdev
);
468 void __user
*argp
= (void __user
*)arg
;
472 switch (type
[minor
]) {
476 copied
= copy_from_user(&irq_req
, argp
,
477 sizeof(struct vme_irq_id
));
479 printk(KERN_WARNING
"Partial copy from userspace\n");
483 retval
= vme_irq_generate(vme_user_bridge
,
493 memset(&master
, 0, sizeof(struct vme_master
));
495 /* XXX We do not want to push aspace, cycle and width
496 * to userspace as they are
498 retval
= vme_master_get(image
[minor
].resource
,
499 &master
.enable
, &master
.vme_addr
,
500 &master
.size
, &master
.aspace
,
501 &master
.cycle
, &master
.dwidth
);
503 copied
= copy_to_user(argp
, &master
,
504 sizeof(struct vme_master
));
506 printk(KERN_WARNING
"Partial copy to "
516 copied
= copy_from_user(&master
, argp
, sizeof(master
));
518 printk(KERN_WARNING
"Partial copy from "
523 /* XXX We do not want to push aspace, cycle and width
524 * to userspace as they are
526 return vme_master_set(image
[minor
].resource
,
527 master
.enable
, master
.vme_addr
, master
.size
,
528 master
.aspace
, master
.cycle
, master
.dwidth
);
536 memset(&slave
, 0, sizeof(struct vme_slave
));
538 /* XXX We do not want to push aspace, cycle and width
539 * to userspace as they are
541 retval
= vme_slave_get(image
[minor
].resource
,
542 &slave
.enable
, &slave
.vme_addr
,
543 &slave
.size
, &pci_addr
, &slave
.aspace
,
546 copied
= copy_to_user(argp
, &slave
,
547 sizeof(struct vme_slave
));
549 printk(KERN_WARNING
"Partial copy to "
559 copied
= copy_from_user(&slave
, argp
, sizeof(slave
));
561 printk(KERN_WARNING
"Partial copy from "
566 /* XXX We do not want to push aspace, cycle and width
567 * to userspace as they are
569 return vme_slave_set(image
[minor
].resource
,
570 slave
.enable
, slave
.vme_addr
, slave
.size
,
571 image
[minor
].pci_buf
, slave
.aspace
,
583 vme_user_unlocked_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
587 mutex_lock(&vme_user_mutex
);
588 ret
= vme_user_ioctl(file
->f_path
.dentry
->d_inode
, file
, cmd
, arg
);
589 mutex_unlock(&vme_user_mutex
);
596 * Unallocate a previously allocated buffer
598 static void buf_unalloc(int num
)
600 if (image
[num
].kern_buf
) {
602 printk(KERN_DEBUG
"UniverseII:Releasing buffer at %p\n",
606 vme_free_consistent(image
[num
].resource
, image
[num
].size_buf
,
607 image
[num
].kern_buf
, image
[num
].pci_buf
);
609 image
[num
].kern_buf
= NULL
;
610 image
[num
].pci_buf
= 0;
611 image
[num
].size_buf
= 0;
615 printk(KERN_DEBUG
"UniverseII: Buffer not allocated\n");
620 static struct vme_driver vme_user_driver
= {
622 .match
= vme_user_match
,
623 .probe
= vme_user_probe
,
624 .remove
= __devexit_p(vme_user_remove
),
628 static int __init
vme_user_init(void)
632 printk(KERN_INFO
"VME User Space Access Driver\n");
635 printk(KERN_ERR
"%s: No cards, skipping registration\n",
641 /* Let's start by supporting one bus, we can support more than one
642 * in future revisions if that ever becomes necessary.
644 if (bus_num
> VME_USER_BUS_MAX
) {
645 printk(KERN_ERR
"%s: Driver only able to handle %d buses\n",
646 driver_name
, VME_USER_BUS_MAX
);
647 bus_num
= VME_USER_BUS_MAX
;
651 * Here we just register the maximum number of devices we can and
652 * leave vme_user_match() to allow only 1 to go through to probe().
653 * This way, if we later want to allow multiple user access devices,
654 * we just change the code in vme_user_match().
656 retval
= vme_register_driver(&vme_user_driver
, VME_MAX_SLOTS
);
667 static int vme_user_match(struct vme_dev
*vdev
)
669 if (vdev
->num
>= VME_USER_BUS_MAX
)
675 * In this simple access driver, the old behaviour is being preserved as much
676 * as practical. We will therefore reserve the buffers and request the images
677 * here so that we don't have to do it later.
679 static int __devinit
vme_user_probe(struct vme_dev
*vdev
)
684 /* Save pointer to the bridge device */
685 if (vme_user_bridge
!= NULL
) {
686 printk(KERN_ERR
"%s: Driver can only be loaded for 1 device\n",
691 vme_user_bridge
= vdev
;
693 /* Initialise descriptors */
694 for (i
= 0; i
< VME_DEVS
; i
++) {
695 image
[i
].kern_buf
= NULL
;
696 image
[i
].pci_buf
= 0;
697 mutex_init(&image
[i
].mutex
);
698 image
[i
].device
= NULL
;
699 image
[i
].resource
= NULL
;
703 /* Initialise statistics counters */
706 /* Assign major and minor numbers for the driver */
707 err
= register_chrdev_region(MKDEV(VME_MAJOR
, 0), VME_DEVS
,
710 printk(KERN_WARNING
"%s: Error getting Major Number %d for "
711 "driver.\n", driver_name
, VME_MAJOR
);
715 /* Register the driver as a char device */
716 vme_user_cdev
= cdev_alloc();
717 vme_user_cdev
->ops
= &vme_user_fops
;
718 vme_user_cdev
->owner
= THIS_MODULE
;
719 err
= cdev_add(vme_user_cdev
, MKDEV(VME_MAJOR
, 0), VME_DEVS
);
721 printk(KERN_WARNING
"%s: cdev_all failed\n", driver_name
);
725 /* Request slave resources and allocate buffers (128kB wide) */
726 for (i
= SLAVE_MINOR
; i
< (SLAVE_MAX
+ 1); i
++) {
727 /* XXX Need to properly request attributes */
728 /* For ca91cx42 bridge there are only two slave windows
729 * supporting A16 addressing, so we request A24 supported
732 image
[i
].resource
= vme_slave_request(vme_user_bridge
,
734 if (image
[i
].resource
== NULL
) {
735 printk(KERN_WARNING
"Unable to allocate slave "
739 image
[i
].size_buf
= PCI_BUF_SIZE
;
740 image
[i
].kern_buf
= vme_alloc_consistent(image
[i
].resource
,
741 image
[i
].size_buf
, &image
[i
].pci_buf
);
742 if (image
[i
].kern_buf
== NULL
) {
743 printk(KERN_WARNING
"Unable to allocate memory for "
745 image
[i
].pci_buf
= 0;
746 vme_slave_free(image
[i
].resource
);
753 * Request master resources allocate page sized buffers for small
756 for (i
= MASTER_MINOR
; i
< (MASTER_MAX
+ 1); i
++) {
757 /* XXX Need to properly request attributes */
758 image
[i
].resource
= vme_master_request(vme_user_bridge
,
759 VME_A32
, VME_SCT
, VME_D32
);
760 if (image
[i
].resource
== NULL
) {
761 printk(KERN_WARNING
"Unable to allocate master "
765 image
[i
].size_buf
= PCI_BUF_SIZE
;
766 image
[i
].kern_buf
= kmalloc(image
[i
].size_buf
, GFP_KERNEL
);
767 if (image
[i
].kern_buf
== NULL
) {
768 printk(KERN_WARNING
"Unable to allocate memory for "
769 "master window buffers\n");
775 /* Create sysfs entries - on udev systems this creates the dev files */
776 vme_user_sysfs_class
= class_create(THIS_MODULE
, driver_name
);
777 if (IS_ERR(vme_user_sysfs_class
)) {
778 printk(KERN_ERR
"Error creating vme_user class.\n");
779 err
= PTR_ERR(vme_user_sysfs_class
);
783 /* Add sysfs Entries */
784 for (i
= 0; i
< VME_DEVS
; i
++) {
788 sprintf(name
, "bus/vme/m%%d");
791 sprintf(name
, "bus/vme/ctl");
794 sprintf(name
, "bus/vme/s%%d");
802 num
= (type
[i
] == SLAVE_MINOR
) ? i
- (MASTER_MAX
+ 1) : i
;
803 image
[i
].device
= device_create(vme_user_sysfs_class
, NULL
,
804 MKDEV(VME_MAJOR
, i
), NULL
, name
, num
);
805 if (IS_ERR(image
[i
].device
)) {
806 printk(KERN_INFO
"%s: Error creating sysfs device\n",
808 err
= PTR_ERR(image
[i
].device
);
815 /* Ensure counter set correcty to destroy all sysfs devices */
820 device_destroy(vme_user_sysfs_class
, MKDEV(VME_MAJOR
, i
));
822 class_destroy(vme_user_sysfs_class
);
824 /* Ensure counter set correcty to unalloc all master windows */
827 for (i
= MASTER_MINOR
; i
< (MASTER_MAX
+ 1); i
++)
828 kfree(image
[i
].kern_buf
);
830 while (i
> MASTER_MINOR
) {
832 vme_master_free(image
[i
].resource
);
836 * Ensure counter set correcty to unalloc all slave windows and buffers
840 while (i
> SLAVE_MINOR
) {
843 vme_slave_free(image
[i
].resource
);
846 cdev_del(vme_user_cdev
);
848 unregister_chrdev_region(MKDEV(VME_MAJOR
, 0), VME_DEVS
);
854 static int __devexit
vme_user_remove(struct vme_dev
*dev
)
858 /* Remove sysfs Entries */
859 for (i
= 0; i
< VME_DEVS
; i
++) {
860 mutex_destroy(&image
[i
].mutex
);
861 device_destroy(vme_user_sysfs_class
, MKDEV(VME_MAJOR
, i
));
863 class_destroy(vme_user_sysfs_class
);
865 for (i
= MASTER_MINOR
; i
< (MASTER_MAX
+ 1); i
++) {
866 kfree(image
[i
].kern_buf
);
867 vme_master_free(image
[i
].resource
);
870 for (i
= SLAVE_MINOR
; i
< (SLAVE_MAX
+ 1); i
++) {
871 vme_slave_set(image
[i
].resource
, 0, 0, 0, 0, VME_A32
, 0);
873 vme_slave_free(image
[i
].resource
);
876 /* Unregister device driver */
877 cdev_del(vme_user_cdev
);
879 /* Unregiser the major and minor device numbers */
880 unregister_chrdev_region(MKDEV(VME_MAJOR
, 0), VME_DEVS
);
885 static void __exit
vme_user_exit(void)
887 vme_unregister_driver(&vme_user_driver
);
891 MODULE_PARM_DESC(bus
, "Enumeration of VMEbus to which the driver is connected");
892 module_param_array(bus
, int, &bus_num
, 0);
894 MODULE_DESCRIPTION("VME User Space Access Driver");
895 MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
896 MODULE_LICENSE("GPL");
898 module_init(vme_user_init
);
899 module_exit(vme_user_exit
);