Staging: vme: add VME userspace driver
[deliverable/linux.git] / drivers / staging / vme / devices / vme_user.c
1 /*
2 * VMEbus User access driver
3 *
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by:
8 * Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18 #include <linux/cdev.h>
19 #include <linux/delay.h>
20 #include <linux/device.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/ioctl.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/pagemap.h>
29 #include <linux/pci.h>
30 #include <linux/semaphore.h>
31 #include <linux/spinlock.h>
32 #include <linux/syscalls.h>
33 #include <linux/types.h>
34 #include <linux/version.h>
35
36 #include <asm/io.h>
37 #include <asm/uaccess.h>
38
39 #include "../vme.h"
40 #include "vme_user.h"
41
42 /* Currently Documentation/devices.txt defines the following for VME:
43 *
44 * 221 char VME bus
45 * 0 = /dev/bus/vme/m0 First master image
46 * 1 = /dev/bus/vme/m1 Second master image
47 * 2 = /dev/bus/vme/m2 Third master image
48 * 3 = /dev/bus/vme/m3 Fourth master image
49 * 4 = /dev/bus/vme/s0 First slave image
50 * 5 = /dev/bus/vme/s1 Second slave image
51 * 6 = /dev/bus/vme/s2 Third slave image
52 * 7 = /dev/bus/vme/s3 Fourth slave image
53 * 8 = /dev/bus/vme/ctl Control
54 *
55 * It is expected that all VME bus drivers will use the
56 * same interface. For interface documentation see
57 * http://www.vmelinux.org/.
58 *
59 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
60 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
61 * We'll run with this or now as far as possible, however it probably makes
62 * sense to get rid of the old mappings and just do everything dynamically.
63 *
64 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
65 * defined above and try to support at least some of the interface from
66 * http://www.vmelinux.org/ as an alternative drive can be written providing a
67 * saner interface later.
68 */
69 #define VME_MAJOR 221 /* VME Major Device Number */
70 #define VME_DEVS 9 /* Number of dev entries */
71
72 #define MASTER_MINOR 0
73 #define MASTER_MAX 3
74 #define SLAVE_MINOR 4
75 #define SLAVE_MAX 7
76 #define CONTROL_MINOR 8
77
78 #define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
79
80 /*
81 * Structure to handle image related parameters.
82 */
83 typedef struct {
84 void __iomem *kern_buf; /* Buffer address in kernel space */
85 dma_addr_t pci_buf; /* Buffer address in PCI address space */
86 unsigned long long size_buf; /* Buffer size */
87 struct semaphore sem; /* Semaphore for locking image */
88 struct device *device; /* Sysfs device */
89 struct vme_resource *resource; /* VME resource */
90 int users; /* Number of current users */
91 } image_desc_t;
92 static image_desc_t image[VME_DEVS];
93
94 typedef struct {
95 unsigned long reads;
96 unsigned long writes;
97 unsigned long ioctls;
98 unsigned long irqs;
99 unsigned long berrs;
100 unsigned long dmaErrors;
101 unsigned long timeouts;
102 unsigned long external;
103 } driver_stats_t;
104 static driver_stats_t statistics;
105
106 struct cdev *vme_user_cdev; /* Character device */
107 struct class *vme_user_sysfs_class; /* Sysfs class */
108 struct device *vme_user_bridge; /* Pointer to the bridge device */
109
110 static char driver_name[] = "vme_user";
111
112 static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
113 MASTER_MINOR, MASTER_MINOR,
114 SLAVE_MINOR, SLAVE_MINOR,
115 SLAVE_MINOR, SLAVE_MINOR,
116 CONTROL_MINOR
117 };
118
119
120 static int vme_user_open(struct inode *, struct file *);
121 static int vme_user_release(struct inode *, struct file *);
122 static ssize_t vme_user_read(struct file *, char *, size_t, loff_t *);
123 static ssize_t vme_user_write(struct file *, const char *, size_t, loff_t *);
124 static loff_t vme_user_llseek(struct file *, loff_t, int);
125 static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
126 unsigned long);
127
128 static int __init vme_user_probe(struct device *dev);
129
130 static struct file_operations vme_user_fops = {
131 .open = vme_user_open,
132 .release = vme_user_release,
133 .read = vme_user_read,
134 .write = vme_user_write,
135 .llseek = vme_user_llseek,
136 .ioctl = vme_user_ioctl,
137 };
138
139
140 /*
141 * Reset all the statistic counters
142 */
143 static void reset_counters(void)
144 {
145 statistics.reads = 0;
146 statistics.writes = 0;
147 statistics.ioctls = 0;
148 statistics.irqs = 0;
149 statistics.berrs = 0;
150 statistics.dmaErrors = 0;
151 statistics.timeouts = 0;
152 }
153
154 void lmcall(int monitor)
155 {
156 printk("Caught Location Monitor %d access\n", monitor);
157 }
158
159 static void tests(void)
160 {
161 struct vme_resource *dma_res;
162 struct vme_dma_list *dma_list;
163 struct vme_dma_attr *pattern_attr, *vme_attr;
164
165 int retval;
166 unsigned int data;
167
168 printk("Running VME DMA test\n");
169 dma_res = vme_request_dma(vme_user_bridge);
170 dma_list = vme_new_dma_list(dma_res);
171 pattern_attr = vme_dma_pattern_attribute(0x0,
172 VME_DMA_PATTERN_WORD |
173 VME_DMA_PATTERN_INCREMENT);
174 vme_attr = vme_dma_vme_attribute(0x10000, VME_A32,
175 VME_SCT, VME_D32);
176 retval = vme_dma_list_add(dma_list, pattern_attr,
177 vme_attr, 0x10000);
178 #if 0
179 vme_dma_free_attribute(vme_attr);
180 vme_attr = vme_dma_vme_attribute(0x20000, VME_A32,
181 VME_SCT, VME_D32);
182 retval = vme_dma_list_add(dma_list, pattern_attr,
183 vme_attr, 0x10000);
184 #endif
185 retval = vme_dma_list_exec(dma_list);
186 vme_dma_free_attribute(pattern_attr);
187 vme_dma_free_attribute(vme_attr);
188 vme_dma_list_free(dma_list);
189 #if 0
190 printk("Generating a VME interrupt\n");
191 vme_generate_irq(dma_res, 0x3, 0xaa);
192 printk("Interrupt returned\n");
193 #endif
194 vme_dma_free(dma_res);
195
196 /* Attempt RMW */
197 data = vme_master_rmw(image[0].resource, 0x80000000, 0x00000000,
198 0x80000000, 0);
199 printk("RMW returned 0x%8.8x\n", data);
200
201
202 /* Location Monitor */
203 printk("vme_lm_set:%d\n", vme_lm_set(vme_user_bridge, 0x60000, VME_A32, VME_SCT | VME_USER | VME_DATA));
204 printk("vme_lm_attach:%d\n", vme_lm_attach(vme_user_bridge, 0, lmcall));
205
206 printk("Board in VME slot:%d\n", vme_slot_get(vme_user_bridge));
207 }
208
209 static int vme_user_open(struct inode *inode, struct file *file)
210 {
211 int err;
212 unsigned int minor = MINOR(inode->i_rdev);
213
214 down(&image[minor].sem);
215 /* Only allow device to be opened if a resource is allocated */
216 if (image[minor].resource == NULL) {
217 printk(KERN_ERR "No resources allocated for device\n");
218 err = -EINVAL;
219 goto err_res;
220 }
221
222 /* Increment user count */
223 image[minor].users++;
224
225 up(&image[minor].sem);
226
227 return 0;
228
229 err_res:
230 up(&image[minor].sem);
231
232 return err;
233 }
234
235 static int vme_user_release(struct inode *inode, struct file *file)
236 {
237 unsigned int minor = MINOR(inode->i_rdev);
238
239 down(&image[minor].sem);
240
241 /* Decrement user count */
242 image[minor].users--;
243
244 up(&image[minor].sem);
245
246 return 0;
247 }
248
249 /*
250 * We are going ot alloc a page during init per window for small transfers.
251 * Small transfers will go VME -> buffer -> user space. Larger (more than a
252 * page) transfers will lock the user space buffer into memory and then
253 * transfer the data directly into the user space buffers.
254 */
255 static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
256 loff_t *ppos)
257 {
258 ssize_t retval;
259 ssize_t copied = 0;
260
261 if (count <= image[minor].size_buf) {
262 /* We copy to kernel buffer */
263 copied = vme_master_read(image[minor].resource,
264 image[minor].kern_buf, count, *ppos);
265 if (copied < 0) {
266 return (int)copied;
267 }
268
269 retval = __copy_to_user(buf, image[minor].kern_buf,
270 (unsigned long)copied);
271 if (retval != 0) {
272 copied = (copied - retval);
273 printk("User copy failed\n");
274 return -EINVAL;
275 }
276
277 } else {
278 /* XXX Need to write this */
279 printk("Currently don't support large transfers\n");
280 /* Map in pages from userspace */
281
282 /* Call vme_master_read to do the transfer */
283 return -EINVAL;
284 }
285
286 return copied;
287 }
288
289 /*
290 * We are going ot alloc a page during init per window for small transfers.
291 * Small transfers will go user space -> buffer -> VME. Larger (more than a
292 * page) transfers will lock the user space buffer into memory and then
293 * transfer the data directly from the user space buffers out to VME.
294 */
295 static ssize_t resource_from_user(unsigned int minor, const char *buf,
296 size_t count, loff_t *ppos)
297 {
298 ssize_t retval;
299 ssize_t copied = 0;
300
301 if (count <= image[minor].size_buf) {
302 retval = __copy_from_user(image[minor].kern_buf, buf,
303 (unsigned long)count);
304 if (retval != 0)
305 copied = (copied - retval);
306 else
307 copied = count;
308
309 copied = vme_master_write(image[minor].resource,
310 image[minor].kern_buf, copied, *ppos);
311 } else {
312 /* XXX Need to write this */
313 printk("Currently don't support large transfers\n");
314 /* Map in pages from userspace */
315
316 /* Call vme_master_write to do the transfer */
317 return -EINVAL;
318 }
319
320 return copied;
321 }
322
323 static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
324 size_t count, loff_t *ppos)
325 {
326 void __iomem *image_ptr;
327 ssize_t retval;
328
329 image_ptr = image[minor].kern_buf + *ppos;
330
331 retval = __copy_to_user(buf, image_ptr, (unsigned long)count);
332 if (retval != 0) {
333 retval = (count - retval);
334 printk(KERN_WARNING "Partial copy to userspace\n");
335 } else
336 retval = count;
337
338 /* Return number of bytes successfully read */
339 return retval;
340 }
341
342 static ssize_t buffer_from_user(unsigned int minor, const char *buf,
343 size_t count, loff_t *ppos)
344 {
345 void __iomem *image_ptr;
346 size_t retval;
347
348 image_ptr = image[minor].kern_buf + *ppos;
349
350 retval = __copy_from_user(image_ptr, buf, (unsigned long)count);
351 if (retval != 0) {
352 retval = (count - retval);
353 printk(KERN_WARNING "Partial copy to userspace\n");
354 } else
355 retval = count;
356
357 /* Return number of bytes successfully read */
358 return retval;
359 }
360
361 static ssize_t vme_user_read(struct file *file, char *buf, size_t count,
362 loff_t * ppos)
363 {
364 unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
365 ssize_t retval;
366 size_t image_size;
367 size_t okcount;
368
369 down(&image[minor].sem);
370
371 /* XXX Do we *really* want this helper - we can use vme_*_get ? */
372 image_size = vme_get_size(image[minor].resource);
373
374 /* Ensure we are starting at a valid location */
375 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
376 up(&image[minor].sem);
377 return 0;
378 }
379
380 /* Ensure not reading past end of the image */
381 if (*ppos + count > image_size)
382 okcount = image_size - *ppos;
383 else
384 okcount = count;
385
386 switch (type[minor]){
387 case MASTER_MINOR:
388 retval = resource_to_user(minor, buf, okcount, ppos);
389 break;
390 case SLAVE_MINOR:
391 retval = buffer_to_user(minor, buf, okcount, ppos);
392 break;
393 default:
394 retval = -EINVAL;
395 }
396
397 up(&image[minor].sem);
398
399 if (retval > 0)
400 *ppos += retval;
401
402 return retval;
403 }
404
405 static ssize_t vme_user_write(struct file *file, const char *buf, size_t count,
406 loff_t *ppos)
407 {
408 unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
409 ssize_t retval;
410 size_t image_size;
411 size_t okcount;
412
413 down(&image[minor].sem);
414
415 image_size = vme_get_size(image[minor].resource);
416
417 /* Ensure we are starting at a valid location */
418 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
419 up(&image[minor].sem);
420 return 0;
421 }
422
423 /* Ensure not reading past end of the image */
424 if (*ppos + count > image_size)
425 okcount = image_size - *ppos;
426 else
427 okcount = count;
428
429 switch (type[minor]){
430 case MASTER_MINOR:
431 retval = resource_from_user(minor, buf, okcount, ppos);
432 break;
433 case SLAVE_MINOR:
434 retval = buffer_from_user(minor, buf, okcount, ppos);
435 break;
436 default:
437 retval = -EINVAL;
438 }
439
440 up(&image[minor].sem);
441
442 if (retval > 0)
443 *ppos += retval;
444
445 return retval;
446 }
447
448 static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
449 {
450 printk(KERN_ERR "Llseek currently incomplete\n");
451 return -EINVAL;
452 }
453
454 static int vme_user_ioctl(struct inode *inode, struct file *file,
455 unsigned int cmd, unsigned long arg)
456 {
457 unsigned int minor = MINOR(inode->i_rdev);
458 #if 0
459 int ret_val;
460 #endif
461 unsigned long copyRet;
462 vme_slave_t slave;
463
464 statistics.ioctls++;
465 switch (type[minor]) {
466 case CONTROL_MINOR:
467 break;
468 case MASTER_MINOR:
469 break;
470 case SLAVE_MINOR:
471 switch (cmd) {
472 case VME_SET_SLAVE:
473
474 copyRet = copy_from_user(&slave, (char *)arg,
475 sizeof(slave));
476 if (copyRet != 0) {
477 printk(KERN_WARNING "Partial copy from "
478 "userspace\n");
479 return -EFAULT;
480 }
481
482 return vme_slave_set(image[minor].resource,
483 slave.enable, slave.vme_addr, slave.size,
484 image[minor].pci_buf, slave.aspace,
485 slave.cycle);
486
487 break;
488 #if 0
489 case VME_GET_SLAVE:
490 vme_slave_t slave;
491
492 ret_val = vme_slave_get(minor, &iRegs);
493
494 copyRet = copy_to_user((char *)arg, &slave,
495 sizeof(slave));
496 if (copyRet != 0) {
497 printk(KERN_WARNING "Partial copy to "
498 "userspace\n");
499 return -EFAULT;
500 }
501
502 return ret_val;
503 break;
504 #endif
505 }
506 break;
507 }
508
509 return -EINVAL;
510 }
511
512
513 /*
514 * Unallocate a previously allocated buffer
515 */
516 static void buf_unalloc (int num)
517 {
518 if (image[num].kern_buf) {
519 #ifdef VME_DEBUG
520 printk(KERN_DEBUG "UniverseII:Releasing buffer at %p\n",
521 image[num].pci_buf);
522 #endif
523
524 vme_free_consistent(image[num].resource, image[num].size_buf,
525 image[num].kern_buf, image[num].pci_buf);
526
527 image[num].kern_buf = NULL;
528 image[num].pci_buf = 0;
529 image[num].size_buf = 0;
530
531 #ifdef VME_DEBUG
532 } else {
533 printk(KERN_DEBUG "UniverseII: Buffer not allocated\n");
534 #endif
535 }
536 }
537
538 static struct vme_driver vme_user_driver = {
539 .name = driver_name,
540 .probe = vme_user_probe,
541 };
542
543
544 /*
545 * In this simple access driver, the old behaviour is being preserved as much
546 * as practical. We will therefore reserve the buffers and request the images
547 * here so that we don't have to do it later.
548 */
549 static int __init vme_bridge_init(void)
550 {
551 int retval;
552 printk(KERN_INFO "VME User Space Access Driver\n");
553 printk("vme_user_driver:%p\n", &vme_user_driver);
554 retval = vme_register_driver(&vme_user_driver);
555 printk("vme_register_driver returned %d\n", retval);
556 return retval;
557 }
558
559 /*
560 * This structure gets passed a device, this should be the device created at
561 * registration.
562 */
563 static int __init vme_user_probe(struct device *dev)
564 {
565 int i, err;
566 char name[8];
567
568 printk("Running vme_user_probe()\n");
569
570 /* Pointer to the bridge device */
571 vme_user_bridge = dev;
572
573 /* Initialise descriptors */
574 for (i = 0; i < VME_DEVS; i++) {
575 image[i].kern_buf = NULL;
576 image[i].pci_buf = 0;
577 init_MUTEX(&(image[i].sem));
578 image[i].device = NULL;
579 image[i].resource = NULL;
580 image[i].users = 0;
581 }
582
583 /* Initialise statistics counters */
584 reset_counters();
585
586 /* Assign major and minor numbers for the driver */
587 err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
588 driver_name);
589 if (err) {
590 printk(KERN_WARNING "%s: Error getting Major Number %d for "
591 "driver.\n", driver_name, VME_MAJOR);
592 goto err_region;
593 }
594
595 /* Register the driver as a char device */
596 vme_user_cdev = cdev_alloc();
597 vme_user_cdev->ops = &vme_user_fops;
598 vme_user_cdev->owner = THIS_MODULE;
599 err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
600 if (err) {
601 printk(KERN_WARNING "%s: cdev_all failed\n", driver_name);
602 goto err_char;
603 }
604
605 /* Request slave resources and allocate buffers (128kB wide) */
606 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
607 /* XXX Need to properly request attributes */
608 image[i].resource = vme_slave_request(vme_user_bridge,
609 VME_A16, VME_SCT);
610 if (image[i].resource == NULL) {
611 printk(KERN_WARNING "Unable to allocate slave "
612 "resource\n");
613 goto err_buf;
614 }
615 image[i].size_buf = PCI_BUF_SIZE;
616 image[i].kern_buf = vme_alloc_consistent(image[i].resource,
617 image[i].size_buf, &(image[i].pci_buf));
618 if (image[i].kern_buf == NULL) {
619 printk(KERN_WARNING "Unable to allocate memory for "
620 "buffer\n");
621 image[i].pci_buf = 0;
622 vme_slave_free(image[i].resource);
623 err = -ENOMEM;
624 goto err_buf;
625 }
626 }
627
628 /*
629 * Request master resources allocate page sized buffers for small
630 * reads and writes
631 */
632 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
633 /* XXX Need to properly request attributes */
634 image[i].resource = vme_master_request(vme_user_bridge,
635 VME_A32, VME_SCT, VME_D32);
636 if (image[i].resource == NULL) {
637 printk(KERN_WARNING "Unable to allocate master "
638 "resource\n");
639 goto err_buf;
640 }
641 image[i].size_buf = PAGE_SIZE;
642 image[i].kern_buf = vme_alloc_consistent(image[i].resource,
643 image[i].size_buf, &(image[i].pci_buf));
644 if (image[i].kern_buf == NULL) {
645 printk(KERN_WARNING "Unable to allocate memory for "
646 "buffer\n");
647 image[i].pci_buf = 0;
648 vme_master_free(image[i].resource);
649 err = -ENOMEM;
650 goto err_buf;
651 }
652 }
653
654 /* Setup some debug windows */
655 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
656 err = vme_slave_set(image[i].resource, 1, 0x4000*(i-4),
657 0x4000, image[i].pci_buf, VME_A16,
658 VME_SCT | VME_SUPER | VME_USER | VME_PROG | VME_DATA);
659 if (err != 0) {
660 printk(KERN_WARNING "Failed to configure window\n");
661 goto err_buf;
662 }
663 }
664 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
665 err = vme_master_set(image[i].resource, 1,
666 (0x10000 + (0x10000*i)), 0x10000,
667 VME_A32, VME_SCT | VME_USER | VME_DATA, VME_D32);
668 if (err != 0) {
669 printk(KERN_WARNING "Failed to configure window\n");
670 goto err_buf;
671 }
672 }
673
674 /* Create sysfs entries - on udev systems this creates the dev files */
675 vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
676 if (IS_ERR(vme_user_sysfs_class)) {
677 printk(KERN_ERR "Error creating vme_user class.\n");
678 err = PTR_ERR(vme_user_sysfs_class);
679 goto err_class;
680 }
681
682 /* Add sysfs Entries */
683 for (i=0; i<VME_DEVS; i++) {
684 switch (type[i]) {
685 case MASTER_MINOR:
686 sprintf(name,"bus/vme/m%%d");
687 break;
688 case CONTROL_MINOR:
689 sprintf(name,"bus/vme/ctl");
690 break;
691 case SLAVE_MINOR:
692 sprintf(name,"bus/vme/s%%d");
693 break;
694 default:
695 err = -EINVAL;
696 goto err_sysfs;
697 break;
698 }
699
700 image[i].device =
701 device_create(vme_user_sysfs_class, NULL,
702 MKDEV(VME_MAJOR, i), NULL, name,
703 (type[i] == SLAVE_MINOR)? i - (MASTER_MAX + 1) : i);
704 if (IS_ERR(image[i].device)) {
705 printk("%s: Error creating sysfs device\n",
706 driver_name);
707 err = PTR_ERR(image[i].device);
708 goto err_sysfs;
709 }
710 }
711
712 /* XXX Run tests */
713 /*
714 tests();
715 */
716
717 return 0;
718
719 /* Ensure counter set correcty to destroy all sysfs devices */
720 i = VME_DEVS;
721 err_sysfs:
722 while (i > 0){
723 i--;
724 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
725 }
726 class_destroy(vme_user_sysfs_class);
727
728 /* Ensure counter set correcty to unalloc all slave buffers */
729 i = SLAVE_MAX + 1;
730 err_buf:
731 while (i > SLAVE_MINOR){
732 i--;
733 vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
734 vme_slave_free(image[i].resource);
735 buf_unalloc(i);
736 }
737 err_class:
738 cdev_del(vme_user_cdev);
739 err_char:
740 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
741 err_region:
742 return err;
743 }
744
745 static void __exit vme_bridge_exit(void)
746 {
747 int i;
748
749 /* Remove sysfs Entries */
750 for(i=0; i<VME_DEVS; i++) {
751 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
752 }
753 class_destroy(vme_user_sysfs_class);
754
755 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
756 buf_unalloc(i);
757 }
758
759 /* Unregister device driver */
760 cdev_del(vme_user_cdev);
761
762 /* Unregiser the major and minor device numbers */
763 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
764 }
765
766 MODULE_DESCRIPTION("VME User Space Access Driver");
767 MODULE_AUTHOR("Martyn Welch <martyn.welch@gefanuc.com");
768 MODULE_LICENSE("GPL");
769
770 module_init(vme_bridge_init);
771 module_exit(vme_bridge_exit);
This page took 0.059067 seconds and 5 git commands to generate.