Staging VME: Fix remaining checkpatch.pl errors.
[deliverable/linux.git] / drivers / staging / vme / devices / vme_user.c
CommitLineData
f00a86d9
MW
1/*
2 * VMEbus User access driver
3 *
66bd8db5
MW
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
f00a86d9
MW
6 *
7 * Based on work by:
8 * Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18#include <linux/cdev.h>
19#include <linux/delay.h>
20#include <linux/device.h>
21#include <linux/dma-mapping.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/ioctl.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/module.h>
28#include <linux/pagemap.h>
29#include <linux/pci.h>
30#include <linux/semaphore.h>
5a0e3ad6 31#include <linux/slab.h>
f00a86d9
MW
32#include <linux/spinlock.h>
33#include <linux/syscalls.h>
8e2394a9 34#include <linux/mutex.h>
f00a86d9 35#include <linux/types.h>
f00a86d9 36
45f9f018
NC
37#include <linux/io.h>
38#include <linux/uaccess.h>
f00a86d9
MW
39
40#include "../vme.h"
41#include "vme_user.h"
42
8e2394a9 43static DEFINE_MUTEX(vme_user_mutex);
584721ca 44static const char driver_name[] = "vme_user";
238add52
MW
45
46static int bus[USER_BUS_MAX];
c9492318 47static unsigned int bus_num;
238add52 48
f00a86d9
MW
49/* Currently Documentation/devices.txt defines the following for VME:
50 *
51 * 221 char VME bus
45f9f018
NC
52 * 0 = /dev/bus/vme/m0 First master image
53 * 1 = /dev/bus/vme/m1 Second master image
54 * 2 = /dev/bus/vme/m2 Third master image
55 * 3 = /dev/bus/vme/m3 Fourth master image
56 * 4 = /dev/bus/vme/s0 First slave image
57 * 5 = /dev/bus/vme/s1 Second slave image
58 * 6 = /dev/bus/vme/s2 Third slave image
59 * 7 = /dev/bus/vme/s3 Fourth slave image
60 * 8 = /dev/bus/vme/ctl Control
f00a86d9 61 *
45f9f018
NC
62 * It is expected that all VME bus drivers will use the
63 * same interface. For interface documentation see
64 * http://www.vmelinux.org/.
f00a86d9
MW
65 *
66 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
67 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
68 * We'll run with this or now as far as possible, however it probably makes
69 * sense to get rid of the old mappings and just do everything dynamically.
70 *
71 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
72 * defined above and try to support at least some of the interface from
73 * http://www.vmelinux.org/ as an alternative drive can be written providing a
74 * saner interface later.
238add52
MW
75 *
76 * The vmelinux.org driver never supported slave images, the devices reserved
77 * for slaves were repurposed to support all 8 master images on the UniverseII!
78 * We shall support 4 masters and 4 slaves with this driver.
f00a86d9
MW
79 */
80#define VME_MAJOR 221 /* VME Major Device Number */
81#define VME_DEVS 9 /* Number of dev entries */
82
83#define MASTER_MINOR 0
84#define MASTER_MAX 3
85#define SLAVE_MINOR 4
86#define SLAVE_MAX 7
87#define CONTROL_MINOR 8
88
89#define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
90
91/*
92 * Structure to handle image related parameters.
93 */
584721ca 94struct image_desc {
0a81a0f7 95 void *kern_buf; /* Buffer address in kernel space */
f00a86d9
MW
96 dma_addr_t pci_buf; /* Buffer address in PCI address space */
97 unsigned long long size_buf; /* Buffer size */
98 struct semaphore sem; /* Semaphore for locking image */
99 struct device *device; /* Sysfs device */
100 struct vme_resource *resource; /* VME resource */
101 int users; /* Number of current users */
584721ca
VB
102};
103static struct image_desc image[VME_DEVS];
f00a86d9 104
584721ca 105struct driver_stats {
f00a86d9
MW
106 unsigned long reads;
107 unsigned long writes;
108 unsigned long ioctls;
109 unsigned long irqs;
110 unsigned long berrs;
111 unsigned long dmaErrors;
112 unsigned long timeouts;
113 unsigned long external;
584721ca
VB
114};
115static struct driver_stats statistics;
f00a86d9 116
b9cc2934
EC
117static struct cdev *vme_user_cdev; /* Character device */
118static struct class *vme_user_sysfs_class; /* Sysfs class */
119static struct device *vme_user_bridge; /* Pointer to bridge device */
f00a86d9 120
f00a86d9
MW
121
122static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
123 MASTER_MINOR, MASTER_MINOR,
124 SLAVE_MINOR, SLAVE_MINOR,
125 SLAVE_MINOR, SLAVE_MINOR,
126 CONTROL_MINOR
127 };
128
129
130static int vme_user_open(struct inode *, struct file *);
131static int vme_user_release(struct inode *, struct file *);
1a85f207
EC
132static ssize_t vme_user_read(struct file *, char __user *, size_t, loff_t *);
133static ssize_t vme_user_write(struct file *, const char __user *, size_t,
134 loff_t *);
f00a86d9 135static loff_t vme_user_llseek(struct file *, loff_t, int);
b1f2ac07 136static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
f00a86d9 137
4740a084
EC
138static int __devinit vme_user_probe(struct device *, int, int);
139static int __devexit vme_user_remove(struct device *, int, int);
f00a86d9 140
584721ca 141static const struct file_operations vme_user_fops = {
45f9f018
NC
142 .open = vme_user_open,
143 .release = vme_user_release,
144 .read = vme_user_read,
145 .write = vme_user_write,
146 .llseek = vme_user_llseek,
147 .unlocked_ioctl = vme_user_unlocked_ioctl,
f00a86d9
MW
148};
149
150
151/*
152 * Reset all the statistic counters
153 */
154static void reset_counters(void)
155{
45f9f018
NC
156 statistics.reads = 0;
157 statistics.writes = 0;
158 statistics.ioctls = 0;
159 statistics.irqs = 0;
160 statistics.berrs = 0;
161 statistics.dmaErrors = 0;
162 statistics.timeouts = 0;
f00a86d9
MW
163}
164
f00a86d9
MW
165static int vme_user_open(struct inode *inode, struct file *file)
166{
167 int err;
168 unsigned int minor = MINOR(inode->i_rdev);
169
170 down(&image[minor].sem);
171 /* Only allow device to be opened if a resource is allocated */
172 if (image[minor].resource == NULL) {
173 printk(KERN_ERR "No resources allocated for device\n");
174 err = -EINVAL;
175 goto err_res;
176 }
177
178 /* Increment user count */
179 image[minor].users++;
180
181 up(&image[minor].sem);
182
183 return 0;
184
185err_res:
186 up(&image[minor].sem);
187
188 return err;
189}
190
191static int vme_user_release(struct inode *inode, struct file *file)
192{
193 unsigned int minor = MINOR(inode->i_rdev);
194
195 down(&image[minor].sem);
196
197 /* Decrement user count */
198 image[minor].users--;
199
200 up(&image[minor].sem);
201
202 return 0;
203}
204
205/*
206 * We are going ot alloc a page during init per window for small transfers.
207 * Small transfers will go VME -> buffer -> user space. Larger (more than a
208 * page) transfers will lock the user space buffer into memory and then
209 * transfer the data directly into the user space buffers.
210 */
211static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
212 loff_t *ppos)
213{
214 ssize_t retval;
215 ssize_t copied = 0;
216
217 if (count <= image[minor].size_buf) {
218 /* We copy to kernel buffer */
219 copied = vme_master_read(image[minor].resource,
220 image[minor].kern_buf, count, *ppos);
45f9f018 221 if (copied < 0)
f00a86d9 222 return (int)copied;
f00a86d9
MW
223
224 retval = __copy_to_user(buf, image[minor].kern_buf,
225 (unsigned long)copied);
226 if (retval != 0) {
227 copied = (copied - retval);
45f9f018 228 printk(KERN_INFO "User copy failed\n");
f00a86d9
MW
229 return -EINVAL;
230 }
231
232 } else {
233 /* XXX Need to write this */
45f9f018 234 printk(KERN_INFO "Currently don't support large transfers\n");
f00a86d9
MW
235 /* Map in pages from userspace */
236
237 /* Call vme_master_read to do the transfer */
238 return -EINVAL;
239 }
240
241 return copied;
242}
243
244/*
245 * We are going ot alloc a page during init per window for small transfers.
246 * Small transfers will go user space -> buffer -> VME. Larger (more than a
247 * page) transfers will lock the user space buffer into memory and then
248 * transfer the data directly from the user space buffers out to VME.
249 */
1a85f207 250static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
f00a86d9
MW
251 size_t count, loff_t *ppos)
252{
253 ssize_t retval;
254 ssize_t copied = 0;
255
256 if (count <= image[minor].size_buf) {
257 retval = __copy_from_user(image[minor].kern_buf, buf,
258 (unsigned long)count);
259 if (retval != 0)
260 copied = (copied - retval);
261 else
262 copied = count;
263
264 copied = vme_master_write(image[minor].resource,
265 image[minor].kern_buf, copied, *ppos);
266 } else {
267 /* XXX Need to write this */
45f9f018 268 printk(KERN_INFO "Currently don't support large transfers\n");
f00a86d9
MW
269 /* Map in pages from userspace */
270
271 /* Call vme_master_write to do the transfer */
272 return -EINVAL;
273 }
274
275 return copied;
276}
277
278static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
279 size_t count, loff_t *ppos)
280{
0a81a0f7 281 void *image_ptr;
f00a86d9
MW
282 ssize_t retval;
283
284 image_ptr = image[minor].kern_buf + *ppos;
285
286 retval = __copy_to_user(buf, image_ptr, (unsigned long)count);
287 if (retval != 0) {
288 retval = (count - retval);
289 printk(KERN_WARNING "Partial copy to userspace\n");
290 } else
291 retval = count;
292
293 /* Return number of bytes successfully read */
294 return retval;
295}
296
1a85f207 297static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
f00a86d9
MW
298 size_t count, loff_t *ppos)
299{
0a81a0f7 300 void *image_ptr;
f00a86d9
MW
301 size_t retval;
302
303 image_ptr = image[minor].kern_buf + *ppos;
304
305 retval = __copy_from_user(image_ptr, buf, (unsigned long)count);
306 if (retval != 0) {
307 retval = (count - retval);
308 printk(KERN_WARNING "Partial copy to userspace\n");
309 } else
310 retval = count;
311
312 /* Return number of bytes successfully read */
313 return retval;
314}
315
1a85f207 316static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
45f9f018 317 loff_t *ppos)
f00a86d9
MW
318{
319 unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
320 ssize_t retval;
321 size_t image_size;
322 size_t okcount;
323
324 down(&image[minor].sem);
325
326 /* XXX Do we *really* want this helper - we can use vme_*_get ? */
327 image_size = vme_get_size(image[minor].resource);
328
329 /* Ensure we are starting at a valid location */
330 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
331 up(&image[minor].sem);
332 return 0;
333 }
334
335 /* Ensure not reading past end of the image */
336 if (*ppos + count > image_size)
337 okcount = image_size - *ppos;
338 else
339 okcount = count;
340
45f9f018 341 switch (type[minor]) {
f00a86d9
MW
342 case MASTER_MINOR:
343 retval = resource_to_user(minor, buf, okcount, ppos);
344 break;
345 case SLAVE_MINOR:
346 retval = buffer_to_user(minor, buf, okcount, ppos);
347 break;
348 default:
349 retval = -EINVAL;
350 }
351
352 up(&image[minor].sem);
353
354 if (retval > 0)
355 *ppos += retval;
356
357 return retval;
358}
359
1a85f207
EC
360static ssize_t vme_user_write(struct file *file, const char __user *buf,
361 size_t count, loff_t *ppos)
f00a86d9
MW
362{
363 unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
364 ssize_t retval;
365 size_t image_size;
366 size_t okcount;
367
368 down(&image[minor].sem);
369
370 image_size = vme_get_size(image[minor].resource);
371
372 /* Ensure we are starting at a valid location */
373 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
374 up(&image[minor].sem);
375 return 0;
376 }
377
378 /* Ensure not reading past end of the image */
379 if (*ppos + count > image_size)
380 okcount = image_size - *ppos;
381 else
382 okcount = count;
383
45f9f018 384 switch (type[minor]) {
f00a86d9
MW
385 case MASTER_MINOR:
386 retval = resource_from_user(minor, buf, okcount, ppos);
387 break;
388 case SLAVE_MINOR:
389 retval = buffer_from_user(minor, buf, okcount, ppos);
390 break;
391 default:
392 retval = -EINVAL;
393 }
394
395 up(&image[minor].sem);
396
397 if (retval > 0)
398 *ppos += retval;
399
400 return retval;
401}
402
403static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
404{
877de4b4
AB
405 loff_t absolute = -1;
406 unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
407 size_t image_size;
408
409 down(&image[minor].sem);
410 image_size = vme_get_size(image[minor].resource);
411
412 switch (whence) {
413 case SEEK_SET:
414 absolute = off;
415 break;
416 case SEEK_CUR:
417 absolute = file->f_pos + off;
418 break;
419 case SEEK_END:
420 absolute = image_size + off;
421 break;
422 default:
423 up(&image[minor].sem);
424 return -EINVAL;
425 break;
426 }
427
428 if ((absolute < 0) || (absolute >= image_size)) {
429 up(&image[minor].sem);
430 return -EINVAL;
431 }
432
433 file->f_pos = absolute;
434
435 up(&image[minor].sem);
436
437 return absolute;
f00a86d9
MW
438}
439
238add52
MW
440/*
441 * The ioctls provided by the old VME access method (the one at vmelinux.org)
442 * are most certainly wrong as the effectively push the registers layout
443 * through to user space. Given that the VME core can handle multiple bridges,
444 * with different register layouts this is most certainly not the way to go.
445 *
446 * We aren't using the structures defined in the Motorola driver either - these
447 * are also quite low level, however we should use the definitions that have
448 * already been defined.
449 */
f00a86d9
MW
450static int vme_user_ioctl(struct inode *inode, struct file *file,
451 unsigned int cmd, unsigned long arg)
452{
238add52
MW
453 struct vme_master master;
454 struct vme_slave slave;
455 unsigned long copied;
f00a86d9 456 unsigned int minor = MINOR(inode->i_rdev);
238add52
MW
457 int retval;
458 dma_addr_t pci_addr;
1a85f207 459 void __user *argp = (void __user *)arg;
f00a86d9
MW
460
461 statistics.ioctls++;
238add52 462
f00a86d9
MW
463 switch (type[minor]) {
464 case CONTROL_MINOR:
465 break;
466 case MASTER_MINOR:
f00a86d9 467 switch (cmd) {
238add52
MW
468 case VME_GET_MASTER:
469 memset(&master, 0, sizeof(struct vme_master));
470
471 /* XXX We do not want to push aspace, cycle and width
472 * to userspace as they are
473 */
474 retval = vme_master_get(image[minor].resource,
886953e9
EC
475 &master.enable, &master.vme_addr,
476 &master.size, &master.aspace,
477 &master.cycle, &master.dwidth);
238add52 478
1a85f207 479 copied = copy_to_user(argp, &master,
238add52
MW
480 sizeof(struct vme_master));
481 if (copied != 0) {
482 printk(KERN_WARNING "Partial copy to "
483 "userspace\n");
484 return -EFAULT;
485 }
f00a86d9 486
238add52
MW
487 return retval;
488 break;
489
490 case VME_SET_MASTER:
491
1a85f207 492 copied = copy_from_user(&master, argp, sizeof(master));
238add52 493 if (copied != 0) {
f00a86d9
MW
494 printk(KERN_WARNING "Partial copy from "
495 "userspace\n");
496 return -EFAULT;
497 }
498
238add52
MW
499 /* XXX We do not want to push aspace, cycle and width
500 * to userspace as they are
501 */
502 return vme_master_set(image[minor].resource,
503 master.enable, master.vme_addr, master.size,
504 master.aspace, master.cycle, master.dwidth);
f00a86d9
MW
505
506 break;
238add52
MW
507 }
508 break;
509 case SLAVE_MINOR:
510 switch (cmd) {
f00a86d9 511 case VME_GET_SLAVE:
238add52
MW
512 memset(&slave, 0, sizeof(struct vme_slave));
513
514 /* XXX We do not want to push aspace, cycle and width
515 * to userspace as they are
516 */
517 retval = vme_slave_get(image[minor].resource,
886953e9
EC
518 &slave.enable, &slave.vme_addr,
519 &slave.size, &pci_addr, &slave.aspace,
520 &slave.cycle);
238add52 521
1a85f207 522 copied = copy_to_user(argp, &slave,
238add52
MW
523 sizeof(struct vme_slave));
524 if (copied != 0) {
525 printk(KERN_WARNING "Partial copy to "
526 "userspace\n");
527 return -EFAULT;
528 }
529
530 return retval;
531 break;
f00a86d9 532
238add52 533 case VME_SET_SLAVE:
f00a86d9 534
1a85f207 535 copied = copy_from_user(&slave, argp, sizeof(slave));
238add52
MW
536 if (copied != 0) {
537 printk(KERN_WARNING "Partial copy from "
f00a86d9
MW
538 "userspace\n");
539 return -EFAULT;
540 }
541
238add52
MW
542 /* XXX We do not want to push aspace, cycle and width
543 * to userspace as they are
544 */
545 return vme_slave_set(image[minor].resource,
546 slave.enable, slave.vme_addr, slave.size,
547 image[minor].pci_buf, slave.aspace,
548 slave.cycle);
549
f00a86d9 550 break;
f00a86d9
MW
551 }
552 break;
553 }
554
555 return -EINVAL;
556}
557
b1f2ac07
AB
558static long
559vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
560{
561 int ret;
562
8e2394a9 563 mutex_lock(&vme_user_mutex);
b1f2ac07 564 ret = vme_user_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
8e2394a9 565 mutex_unlock(&vme_user_mutex);
b1f2ac07
AB
566
567 return ret;
568}
569
f00a86d9
MW
570
571/*
572 * Unallocate a previously allocated buffer
573 */
45f9f018 574static void buf_unalloc(int num)
f00a86d9
MW
575{
576 if (image[num].kern_buf) {
577#ifdef VME_DEBUG
578 printk(KERN_DEBUG "UniverseII:Releasing buffer at %p\n",
579 image[num].pci_buf);
580#endif
581
582 vme_free_consistent(image[num].resource, image[num].size_buf,
583 image[num].kern_buf, image[num].pci_buf);
584
585 image[num].kern_buf = NULL;
586 image[num].pci_buf = 0;
587 image[num].size_buf = 0;
588
589#ifdef VME_DEBUG
590 } else {
591 printk(KERN_DEBUG "UniverseII: Buffer not allocated\n");
592#endif
593 }
594}
595
596static struct vme_driver vme_user_driver = {
45f9f018
NC
597 .name = driver_name,
598 .probe = vme_user_probe,
4740a084 599 .remove = __devexit_p(vme_user_remove),
f00a86d9
MW
600};
601
602
238add52 603static int __init vme_user_init(void)
f00a86d9 604{
238add52
MW
605 int retval = 0;
606 int i;
607 struct vme_device_id *ids;
608
f00a86d9 609 printk(KERN_INFO "VME User Space Access Driver\n");
238add52
MW
610
611 if (bus_num == 0) {
612 printk(KERN_ERR "%s: No cards, skipping registration\n",
613 driver_name);
55db5020 614 retval = -ENODEV;
238add52
MW
615 goto err_nocard;
616 }
617
618 /* Let's start by supporting one bus, we can support more than one
619 * in future revisions if that ever becomes necessary.
620 */
621 if (bus_num > USER_BUS_MAX) {
51616e21
MW
622 printk(KERN_ERR "%s: Driver only able to handle %d buses\n",
623 driver_name, USER_BUS_MAX);
238add52
MW
624 bus_num = USER_BUS_MAX;
625 }
626
627
628 /* Dynamically create the bind table based on module parameters */
629 ids = kmalloc(sizeof(struct vme_device_id) * (bus_num + 1), GFP_KERNEL);
630 if (ids == NULL) {
631 printk(KERN_ERR "%s: Unable to allocate ID table\n",
632 driver_name);
55db5020 633 retval = -ENOMEM;
238add52
MW
634 goto err_id;
635 }
636
637 memset(ids, 0, (sizeof(struct vme_device_id) * (bus_num + 1)));
638
639 for (i = 0; i < bus_num; i++) {
640 ids[i].bus = bus[i];
641 /*
642 * We register the driver against the slot occupied by *this*
643 * card, since it's really a low level way of controlling
644 * the VME bridge
645 */
646 ids[i].slot = VME_SLOT_CURRENT;
647 }
648
649 vme_user_driver.bind_table = ids;
650
f00a86d9 651 retval = vme_register_driver(&vme_user_driver);
238add52
MW
652 if (retval != 0)
653 goto err_reg;
654
655 return retval;
656
238add52
MW
657err_reg:
658 kfree(ids);
659err_id:
660err_nocard:
f00a86d9
MW
661 return retval;
662}
663
664/*
238add52
MW
665 * In this simple access driver, the old behaviour is being preserved as much
666 * as practical. We will therefore reserve the buffers and request the images
667 * here so that we don't have to do it later.
f00a86d9 668 */
4740a084
EC
669static int __devinit vme_user_probe(struct device *dev, int cur_bus,
670 int cur_slot)
f00a86d9
MW
671{
672 int i, err;
beb9ccc6 673 char name[12];
f00a86d9 674
238add52
MW
675 /* Save pointer to the bridge device */
676 if (vme_user_bridge != NULL) {
677 printk(KERN_ERR "%s: Driver can only be loaded for 1 device\n",
678 driver_name);
679 err = -EINVAL;
680 goto err_dev;
681 }
f00a86d9
MW
682 vme_user_bridge = dev;
683
684 /* Initialise descriptors */
685 for (i = 0; i < VME_DEVS; i++) {
686 image[i].kern_buf = NULL;
687 image[i].pci_buf = 0;
886953e9 688 sema_init(&image[i].sem, 1);
f00a86d9
MW
689 image[i].device = NULL;
690 image[i].resource = NULL;
691 image[i].users = 0;
692 }
693
694 /* Initialise statistics counters */
695 reset_counters();
696
697 /* Assign major and minor numbers for the driver */
698 err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
699 driver_name);
700 if (err) {
701 printk(KERN_WARNING "%s: Error getting Major Number %d for "
702 "driver.\n", driver_name, VME_MAJOR);
703 goto err_region;
704 }
705
706 /* Register the driver as a char device */
707 vme_user_cdev = cdev_alloc();
708 vme_user_cdev->ops = &vme_user_fops;
709 vme_user_cdev->owner = THIS_MODULE;
710 err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
711 if (err) {
712 printk(KERN_WARNING "%s: cdev_all failed\n", driver_name);
713 goto err_char;
714 }
715
716 /* Request slave resources and allocate buffers (128kB wide) */
717 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
718 /* XXX Need to properly request attributes */
5188d74c
AB
719 /* For ca91cx42 bridge there are only two slave windows
720 * supporting A16 addressing, so we request A24 supported
721 * by all windows.
722 */
f00a86d9 723 image[i].resource = vme_slave_request(vme_user_bridge,
5188d74c 724 VME_A24, VME_SCT);
f00a86d9
MW
725 if (image[i].resource == NULL) {
726 printk(KERN_WARNING "Unable to allocate slave "
727 "resource\n");
238add52 728 goto err_slave;
f00a86d9
MW
729 }
730 image[i].size_buf = PCI_BUF_SIZE;
731 image[i].kern_buf = vme_alloc_consistent(image[i].resource,
886953e9 732 image[i].size_buf, &image[i].pci_buf);
f00a86d9
MW
733 if (image[i].kern_buf == NULL) {
734 printk(KERN_WARNING "Unable to allocate memory for "
735 "buffer\n");
736 image[i].pci_buf = 0;
737 vme_slave_free(image[i].resource);
738 err = -ENOMEM;
238add52 739 goto err_slave;
f00a86d9
MW
740 }
741 }
742
743 /*
744 * Request master resources allocate page sized buffers for small
745 * reads and writes
746 */
747 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
748 /* XXX Need to properly request attributes */
749 image[i].resource = vme_master_request(vme_user_bridge,
750 VME_A32, VME_SCT, VME_D32);
751 if (image[i].resource == NULL) {
752 printk(KERN_WARNING "Unable to allocate master "
753 "resource\n");
238add52 754 goto err_master;
f00a86d9 755 }
33e920d9
AB
756 image[i].size_buf = PCI_BUF_SIZE;
757 image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
758 if (image[i].kern_buf == NULL) {
759 printk(KERN_WARNING "Unable to allocate memory for "
760 "master window buffers\n");
761 err = -ENOMEM;
762 goto err_master_buf;
763 }
f00a86d9
MW
764 }
765
766 /* Create sysfs entries - on udev systems this creates the dev files */
767 vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
768 if (IS_ERR(vme_user_sysfs_class)) {
769 printk(KERN_ERR "Error creating vme_user class.\n");
770 err = PTR_ERR(vme_user_sysfs_class);
771 goto err_class;
772 }
773
774 /* Add sysfs Entries */
45f9f018 775 for (i = 0; i < VME_DEVS; i++) {
584721ca 776 int num;
f00a86d9
MW
777 switch (type[i]) {
778 case MASTER_MINOR:
45f9f018 779 sprintf(name, "bus/vme/m%%d");
f00a86d9
MW
780 break;
781 case CONTROL_MINOR:
45f9f018 782 sprintf(name, "bus/vme/ctl");
f00a86d9
MW
783 break;
784 case SLAVE_MINOR:
45f9f018 785 sprintf(name, "bus/vme/s%%d");
f00a86d9
MW
786 break;
787 default:
788 err = -EINVAL;
789 goto err_sysfs;
790 break;
791 }
792
584721ca
VB
793 num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
794 image[i].device = device_create(vme_user_sysfs_class, NULL,
795 MKDEV(VME_MAJOR, i), NULL, name, num);
f00a86d9 796 if (IS_ERR(image[i].device)) {
45f9f018 797 printk(KERN_INFO "%s: Error creating sysfs device\n",
f00a86d9
MW
798 driver_name);
799 err = PTR_ERR(image[i].device);
800 goto err_sysfs;
801 }
802 }
803
f00a86d9
MW
804 return 0;
805
806 /* Ensure counter set correcty to destroy all sysfs devices */
807 i = VME_DEVS;
808err_sysfs:
45f9f018 809 while (i > 0) {
f00a86d9
MW
810 i--;
811 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
812 }
813 class_destroy(vme_user_sysfs_class);
814
238add52
MW
815 /* Ensure counter set correcty to unalloc all master windows */
816 i = MASTER_MAX + 1;
33e920d9
AB
817err_master_buf:
818 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++)
819 kfree(image[i].kern_buf);
238add52
MW
820err_master:
821 while (i > MASTER_MINOR) {
822 i--;
823 vme_master_free(image[i].resource);
824 }
825
826 /*
827 * Ensure counter set correcty to unalloc all slave windows and buffers
828 */
f00a86d9 829 i = SLAVE_MAX + 1;
238add52
MW
830err_slave:
831 while (i > SLAVE_MINOR) {
f00a86d9 832 i--;
f00a86d9 833 buf_unalloc(i);
1daa38d3 834 vme_slave_free(image[i].resource);
f00a86d9
MW
835 }
836err_class:
837 cdev_del(vme_user_cdev);
838err_char:
839 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
840err_region:
238add52 841err_dev:
f00a86d9
MW
842 return err;
843}
844
4740a084
EC
845static int __devexit vme_user_remove(struct device *dev, int cur_bus,
846 int cur_slot)
f00a86d9
MW
847{
848 int i;
849
850 /* Remove sysfs Entries */
45f9f018 851 for (i = 0; i < VME_DEVS; i++)
f00a86d9 852 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
f00a86d9
MW
853 class_destroy(vme_user_sysfs_class);
854
b62c99b1 855 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
33e920d9 856 kfree(image[i].kern_buf);
b62c99b1
EC
857 vme_master_free(image[i].resource);
858 }
33e920d9 859
f00a86d9 860 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
238add52 861 vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
f00a86d9 862 buf_unalloc(i);
1daa38d3 863 vme_slave_free(image[i].resource);
f00a86d9
MW
864 }
865
866 /* Unregister device driver */
867 cdev_del(vme_user_cdev);
868
869 /* Unregiser the major and minor device numbers */
870 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
238add52
MW
871
872 return 0;
f00a86d9
MW
873}
874
238add52
MW
875static void __exit vme_user_exit(void)
876{
877 vme_unregister_driver(&vme_user_driver);
878
879 kfree(vme_user_driver.bind_table);
880}
881
882
883MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
884module_param_array(bus, int, &bus_num, 0);
885
f00a86d9 886MODULE_DESCRIPTION("VME User Space Access Driver");
66bd8db5 887MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
f00a86d9
MW
888MODULE_LICENSE("GPL");
889
238add52
MW
890module_init(vme_user_init);
891module_exit(vme_user_exit);
This page took 0.226415 seconds and 5 git commands to generate.