Staging: vme: remove an unnecessary and wrong warning message
[deliverable/linux.git] / drivers / staging / vme / devices / vme_user.c
CommitLineData
f00a86d9
MW
1/*
2 * VMEbus User access driver
3 *
66bd8db5
MW
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
f00a86d9
MW
6 *
7 * Based on work by:
8 * Tom Armistead and Ajit Prem
9 * Copyright 2004 Motorola Inc.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
0093e5f8
YT
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
c74a804f 20#include <linux/atomic.h>
f00a86d9
MW
21#include <linux/cdev.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/dma-mapping.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/ioctl.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/pagemap.h>
32#include <linux/pci.h>
ecb3b80f 33#include <linux/mutex.h>
5a0e3ad6 34#include <linux/slab.h>
f00a86d9
MW
35#include <linux/spinlock.h>
36#include <linux/syscalls.h>
37#include <linux/types.h>
f00a86d9 38
45f9f018
NC
39#include <linux/io.h>
40#include <linux/uaccess.h>
db3b9e99 41#include <linux/vme.h>
f00a86d9 42
f00a86d9
MW
43#include "vme_user.h"
44
584721ca 45static const char driver_name[] = "vme_user";
238add52 46
0a4b6b02 47static int bus[VME_USER_BUS_MAX];
c9492318 48static unsigned int bus_num;
238add52 49
f00a86d9
MW
50/* Currently Documentation/devices.txt defines the following for VME:
51 *
52 * 221 char VME bus
45f9f018
NC
53 * 0 = /dev/bus/vme/m0 First master image
54 * 1 = /dev/bus/vme/m1 Second master image
55 * 2 = /dev/bus/vme/m2 Third master image
56 * 3 = /dev/bus/vme/m3 Fourth master image
57 * 4 = /dev/bus/vme/s0 First slave image
58 * 5 = /dev/bus/vme/s1 Second slave image
59 * 6 = /dev/bus/vme/s2 Third slave image
60 * 7 = /dev/bus/vme/s3 Fourth slave image
61 * 8 = /dev/bus/vme/ctl Control
f00a86d9 62 *
45f9f018
NC
63 * It is expected that all VME bus drivers will use the
64 * same interface. For interface documentation see
65 * http://www.vmelinux.org/.
f00a86d9
MW
66 *
67 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
68 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
95605332 69 * We'll run with this for now as far as possible, however it probably makes
f00a86d9
MW
70 * sense to get rid of the old mappings and just do everything dynamically.
71 *
72 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
73 * defined above and try to support at least some of the interface from
95605332
JM
74 * http://www.vmelinux.org/ as an alternative the driver can be written
75 * providing a saner interface later.
238add52
MW
76 *
77 * The vmelinux.org driver never supported slave images, the devices reserved
78 * for slaves were repurposed to support all 8 master images on the UniverseII!
79 * We shall support 4 masters and 4 slaves with this driver.
f00a86d9
MW
80 */
81#define VME_MAJOR 221 /* VME Major Device Number */
82#define VME_DEVS 9 /* Number of dev entries */
83
84#define MASTER_MINOR 0
85#define MASTER_MAX 3
86#define SLAVE_MINOR 4
87#define SLAVE_MAX 7
88#define CONTROL_MINOR 8
89
90#define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
91
92/*
93 * Structure to handle image related parameters.
94 */
584721ca 95struct image_desc {
0a81a0f7 96 void *kern_buf; /* Buffer address in kernel space */
f00a86d9
MW
97 dma_addr_t pci_buf; /* Buffer address in PCI address space */
98 unsigned long long size_buf; /* Buffer size */
ecb3b80f 99 struct mutex mutex; /* Mutex for locking image */
f00a86d9
MW
100 struct device *device; /* Sysfs device */
101 struct vme_resource *resource; /* VME resource */
c74a804f 102 int mmap_count; /* Number of current mmap's */
584721ca 103};
cd974d35 104
584721ca 105static struct image_desc image[VME_DEVS];
f00a86d9 106
b9cc2934
EC
107static struct cdev *vme_user_cdev; /* Character device */
108static struct class *vme_user_sysfs_class; /* Sysfs class */
8f966dc4 109static struct vme_dev *vme_user_bridge; /* Pointer to user device */
f00a86d9 110
f00a86d9
MW
111static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
112 MASTER_MINOR, MASTER_MINOR,
113 SLAVE_MINOR, SLAVE_MINOR,
114 SLAVE_MINOR, SLAVE_MINOR,
115 CONTROL_MINOR
116 };
117
c74a804f
DK
118struct vme_user_vma_priv {
119 unsigned int minor;
120 atomic_t refcnt;
121};
122
f00a86d9 123static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
86eadace 124 loff_t *ppos)
f00a86d9 125{
f00a86d9
MW
126 ssize_t copied = 0;
127
8e4d138c
DK
128 if (count > image[minor].size_buf)
129 count = image[minor].size_buf;
130
8e4d138c
DK
131 copied = vme_master_read(image[minor].resource, image[minor].kern_buf,
132 count, *ppos);
133 if (copied < 0)
134 return (int)copied;
135
7c78e0cd
DK
136 if (__copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied))
137 return -EFAULT;
f00a86d9
MW
138
139 return copied;
140}
141
1a85f207 142static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
86eadace 143 size_t count, loff_t *ppos)
f00a86d9 144{
8e4d138c
DK
145 if (count > image[minor].size_buf)
146 count = image[minor].size_buf;
147
7c78e0cd
DK
148 if (__copy_from_user(image[minor].kern_buf, buf, (unsigned long)count))
149 return -EFAULT;
8e4d138c 150
457ab286
DK
151 return vme_master_write(image[minor].resource, image[minor].kern_buf,
152 count, *ppos);
f00a86d9
MW
153}
154
155static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
86eadace 156 size_t count, loff_t *ppos)
f00a86d9 157{
0a81a0f7 158 void *image_ptr;
f00a86d9
MW
159
160 image_ptr = image[minor].kern_buf + *ppos;
7c78e0cd
DK
161 if (__copy_to_user(buf, image_ptr, (unsigned long)count))
162 return -EFAULT;
f00a86d9 163
7c78e0cd 164 return count;
f00a86d9
MW
165}
166
1a85f207 167static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
86eadace 168 size_t count, loff_t *ppos)
f00a86d9 169{
0a81a0f7 170 void *image_ptr;
f00a86d9
MW
171
172 image_ptr = image[minor].kern_buf + *ppos;
7c78e0cd
DK
173 if (__copy_from_user(image_ptr, buf, (unsigned long)count))
174 return -EFAULT;
f00a86d9 175
7c78e0cd 176 return count;
f00a86d9
MW
177}
178
1a85f207 179static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
86eadace 180 loff_t *ppos)
f00a86d9 181{
496ad9aa 182 unsigned int minor = MINOR(file_inode(file)->i_rdev);
f00a86d9
MW
183 ssize_t retval;
184 size_t image_size;
f00a86d9 185
05614fbf
VB
186 if (minor == CONTROL_MINOR)
187 return 0;
188
ecb3b80f 189 mutex_lock(&image[minor].mutex);
f00a86d9
MW
190
191 /* XXX Do we *really* want this helper - we can use vme_*_get ? */
192 image_size = vme_get_size(image[minor].resource);
193
194 /* Ensure we are starting at a valid location */
195 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
ecb3b80f 196 mutex_unlock(&image[minor].mutex);
f00a86d9
MW
197 return 0;
198 }
199
200 /* Ensure not reading past end of the image */
201 if (*ppos + count > image_size)
32491f56 202 count = image_size - *ppos;
f00a86d9 203
45f9f018 204 switch (type[minor]) {
f00a86d9 205 case MASTER_MINOR:
32491f56 206 retval = resource_to_user(minor, buf, count, ppos);
f00a86d9
MW
207 break;
208 case SLAVE_MINOR:
32491f56 209 retval = buffer_to_user(minor, buf, count, ppos);
f00a86d9
MW
210 break;
211 default:
212 retval = -EINVAL;
213 }
214
ecb3b80f 215 mutex_unlock(&image[minor].mutex);
f00a86d9
MW
216 if (retval > 0)
217 *ppos += retval;
218
219 return retval;
220}
221
1a85f207 222static ssize_t vme_user_write(struct file *file, const char __user *buf,
86eadace 223 size_t count, loff_t *ppos)
f00a86d9 224{
496ad9aa 225 unsigned int minor = MINOR(file_inode(file)->i_rdev);
f00a86d9
MW
226 ssize_t retval;
227 size_t image_size;
f00a86d9 228
05614fbf
VB
229 if (minor == CONTROL_MINOR)
230 return 0;
231
ecb3b80f 232 mutex_lock(&image[minor].mutex);
f00a86d9
MW
233
234 image_size = vme_get_size(image[minor].resource);
235
236 /* Ensure we are starting at a valid location */
237 if ((*ppos < 0) || (*ppos > (image_size - 1))) {
ecb3b80f 238 mutex_unlock(&image[minor].mutex);
f00a86d9
MW
239 return 0;
240 }
241
242 /* Ensure not reading past end of the image */
243 if (*ppos + count > image_size)
32491f56 244 count = image_size - *ppos;
f00a86d9 245
45f9f018 246 switch (type[minor]) {
f00a86d9 247 case MASTER_MINOR:
32491f56 248 retval = resource_from_user(minor, buf, count, ppos);
f00a86d9
MW
249 break;
250 case SLAVE_MINOR:
32491f56 251 retval = buffer_from_user(minor, buf, count, ppos);
f00a86d9
MW
252 break;
253 default:
254 retval = -EINVAL;
255 }
538a697a 256
ecb3b80f 257 mutex_unlock(&image[minor].mutex);
f00a86d9
MW
258
259 if (retval > 0)
260 *ppos += retval;
261
262 return retval;
263}
264
265static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
266{
496ad9aa 267 unsigned int minor = MINOR(file_inode(file)->i_rdev);
877de4b4 268 size_t image_size;
59482291 269 loff_t res;
877de4b4 270
615c40dd
DK
271 switch (type[minor]) {
272 case MASTER_MINOR:
273 case SLAVE_MINOR:
274 mutex_lock(&image[minor].mutex);
275 image_size = vme_get_size(image[minor].resource);
276 res = fixed_size_llseek(file, off, whence, image_size);
277 mutex_unlock(&image[minor].mutex);
278 return res;
279 }
877de4b4 280
615c40dd 281 return -EINVAL;
f00a86d9
MW
282}
283
238add52
MW
284/*
285 * The ioctls provided by the old VME access method (the one at vmelinux.org)
286 * are most certainly wrong as the effectively push the registers layout
287 * through to user space. Given that the VME core can handle multiple bridges,
288 * with different register layouts this is most certainly not the way to go.
289 *
290 * We aren't using the structures defined in the Motorola driver either - these
291 * are also quite low level, however we should use the definitions that have
292 * already been defined.
293 */
f00a86d9 294static int vme_user_ioctl(struct inode *inode, struct file *file,
86eadace 295 unsigned int cmd, unsigned long arg)
f00a86d9 296{
238add52
MW
297 struct vme_master master;
298 struct vme_slave slave;
dca22184 299 struct vme_irq_id irq_req;
238add52 300 unsigned long copied;
f00a86d9 301 unsigned int minor = MINOR(inode->i_rdev);
238add52
MW
302 int retval;
303 dma_addr_t pci_addr;
1a85f207 304 void __user *argp = (void __user *)arg;
f00a86d9 305
f00a86d9
MW
306 switch (type[minor]) {
307 case CONTROL_MINOR:
dca22184
VB
308 switch (cmd) {
309 case VME_IRQ_GEN:
a7f3943c 310 copied = copy_from_user(&irq_req, argp,
dca22184
VB
311 sizeof(struct vme_irq_id));
312 if (copied != 0) {
0093e5f8 313 pr_warn("Partial copy from userspace\n");
dca22184
VB
314 return -EFAULT;
315 }
316
fc489a52 317 return vme_irq_generate(vme_user_bridge,
dca22184
VB
318 irq_req.level,
319 irq_req.statid);
dca22184 320 }
f00a86d9
MW
321 break;
322 case MASTER_MINOR:
f00a86d9 323 switch (cmd) {
238add52
MW
324 case VME_GET_MASTER:
325 memset(&master, 0, sizeof(struct vme_master));
326
327 /* XXX We do not want to push aspace, cycle and width
328 * to userspace as they are
329 */
330 retval = vme_master_get(image[minor].resource,
86eadace
DK
331 &master.enable,
332 &master.vme_addr,
333 &master.size, &master.aspace,
334 &master.cycle, &master.dwidth);
238add52 335
1a85f207 336 copied = copy_to_user(argp, &master,
86eadace 337 sizeof(struct vme_master));
238add52 338 if (copied != 0) {
0093e5f8 339 pr_warn("Partial copy to userspace\n");
238add52
MW
340 return -EFAULT;
341 }
f00a86d9 342
238add52 343 return retval;
238add52
MW
344
345 case VME_SET_MASTER:
346
c74a804f
DK
347 if (image[minor].mmap_count != 0) {
348 pr_warn("Can't adjust mapped window\n");
349 return -EPERM;
350 }
351
1a85f207 352 copied = copy_from_user(&master, argp, sizeof(master));
238add52 353 if (copied != 0) {
0093e5f8 354 pr_warn("Partial copy from userspace\n");
f00a86d9
MW
355 return -EFAULT;
356 }
357
238add52
MW
358 /* XXX We do not want to push aspace, cycle and width
359 * to userspace as they are
360 */
361 return vme_master_set(image[minor].resource,
362 master.enable, master.vme_addr, master.size,
363 master.aspace, master.cycle, master.dwidth);
f00a86d9
MW
364
365 break;
238add52
MW
366 }
367 break;
368 case SLAVE_MINOR:
369 switch (cmd) {
f00a86d9 370 case VME_GET_SLAVE:
238add52
MW
371 memset(&slave, 0, sizeof(struct vme_slave));
372
373 /* XXX We do not want to push aspace, cycle and width
374 * to userspace as they are
375 */
376 retval = vme_slave_get(image[minor].resource,
86eadace
DK
377 &slave.enable, &slave.vme_addr,
378 &slave.size, &pci_addr,
379 &slave.aspace, &slave.cycle);
238add52 380
1a85f207 381 copied = copy_to_user(argp, &slave,
86eadace 382 sizeof(struct vme_slave));
238add52 383 if (copied != 0) {
0093e5f8 384 pr_warn("Partial copy to userspace\n");
238add52
MW
385 return -EFAULT;
386 }
387
388 return retval;
f00a86d9 389
238add52 390 case VME_SET_SLAVE:
f00a86d9 391
1a85f207 392 copied = copy_from_user(&slave, argp, sizeof(slave));
238add52 393 if (copied != 0) {
0093e5f8 394 pr_warn("Partial copy from userspace\n");
f00a86d9
MW
395 return -EFAULT;
396 }
397
238add52
MW
398 /* XXX We do not want to push aspace, cycle and width
399 * to userspace as they are
400 */
401 return vme_slave_set(image[minor].resource,
402 slave.enable, slave.vme_addr, slave.size,
403 image[minor].pci_buf, slave.aspace,
404 slave.cycle);
405
f00a86d9 406 break;
f00a86d9
MW
407 }
408 break;
409 }
410
411 return -EINVAL;
412}
413
b1f2ac07
AB
414static long
415vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
416{
417 int ret;
0cd189a4
DK
418 struct inode *inode = file_inode(file);
419 unsigned int minor = MINOR(inode->i_rdev);
b1f2ac07 420
0cd189a4
DK
421 mutex_lock(&image[minor].mutex);
422 ret = vme_user_ioctl(inode, file, cmd, arg);
423 mutex_unlock(&image[minor].mutex);
b1f2ac07
AB
424
425 return ret;
426}
427
c74a804f
DK
428static void vme_user_vm_open(struct vm_area_struct *vma)
429{
430 struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
431
432 atomic_inc(&vma_priv->refcnt);
433}
434
435static void vme_user_vm_close(struct vm_area_struct *vma)
436{
437 struct vme_user_vma_priv *vma_priv = vma->vm_private_data;
438 unsigned int minor = vma_priv->minor;
439
440 if (!atomic_dec_and_test(&vma_priv->refcnt))
441 return;
442
443 mutex_lock(&image[minor].mutex);
444 image[minor].mmap_count--;
445 mutex_unlock(&image[minor].mutex);
446
447 kfree(vma_priv);
448}
449
e4aea6aa
DK
450static const struct vm_operations_struct vme_user_vm_ops = {
451 .open = vme_user_vm_open,
452 .close = vme_user_vm_close,
453};
454
c74a804f
DK
455static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma)
456{
457 int err;
458 struct vme_user_vma_priv *vma_priv;
459
460 mutex_lock(&image[minor].mutex);
461
462 err = vme_master_mmap(image[minor].resource, vma);
463 if (err) {
464 mutex_unlock(&image[minor].mutex);
465 return err;
466 }
467
1f0622de 468 vma_priv = kmalloc(sizeof(*vma_priv), GFP_KERNEL);
f99b71be 469 if (!vma_priv) {
c74a804f
DK
470 mutex_unlock(&image[minor].mutex);
471 return -ENOMEM;
472 }
473
474 vma_priv->minor = minor;
475 atomic_set(&vma_priv->refcnt, 1);
476 vma->vm_ops = &vme_user_vm_ops;
477 vma->vm_private_data = vma_priv;
478
479 image[minor].mmap_count++;
480
481 mutex_unlock(&image[minor].mutex);
482
483 return 0;
484}
485
486static int vme_user_mmap(struct file *file, struct vm_area_struct *vma)
487{
488 unsigned int minor = MINOR(file_inode(file)->i_rdev);
489
490 if (type[minor] == MASTER_MINOR)
491 return vme_user_master_mmap(minor, vma);
492
493 return -ENODEV;
494}
495
e4aea6aa 496static const struct file_operations vme_user_fops = {
e4aea6aa
DK
497 .read = vme_user_read,
498 .write = vme_user_write,
499 .llseek = vme_user_llseek,
500 .unlocked_ioctl = vme_user_unlocked_ioctl,
501 .compat_ioctl = vme_user_unlocked_ioctl,
502 .mmap = vme_user_mmap,
503};
f00a86d9 504
5d6abf37
MV
505static int vme_user_match(struct vme_dev *vdev)
506{
978f47d6
MW
507 int i;
508
509 int cur_bus = vme_bus_num(vdev);
d7729f0f 510 int cur_slot = vme_slot_num(vdev);
978f47d6
MW
511
512 for (i = 0; i < bus_num; i++)
513 if ((cur_bus == bus[i]) && (cur_slot == vdev->num))
514 return 1;
515
516 return 0;
5d6abf37
MV
517}
518
f00a86d9 519/*
238add52
MW
520 * In this simple access driver, the old behaviour is being preserved as much
521 * as practical. We will therefore reserve the buffers and request the images
522 * here so that we don't have to do it later.
f00a86d9 523 */
d7e530d2 524static int vme_user_probe(struct vme_dev *vdev)
f00a86d9
MW
525{
526 int i, err;
f1552cbd 527 char *name;
f00a86d9 528
238add52 529 /* Save pointer to the bridge device */
f99b71be 530 if (vme_user_bridge) {
0093e5f8 531 dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n");
238add52
MW
532 err = -EINVAL;
533 goto err_dev;
534 }
8f966dc4 535 vme_user_bridge = vdev;
f00a86d9
MW
536
537 /* Initialise descriptors */
538 for (i = 0; i < VME_DEVS; i++) {
539 image[i].kern_buf = NULL;
540 image[i].pci_buf = 0;
ecb3b80f 541 mutex_init(&image[i].mutex);
f00a86d9
MW
542 image[i].device = NULL;
543 image[i].resource = NULL;
f00a86d9
MW
544 }
545
f00a86d9
MW
546 /* Assign major and minor numbers for the driver */
547 err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
86eadace 548 driver_name);
f00a86d9 549 if (err) {
0093e5f8
YT
550 dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n",
551 VME_MAJOR);
f00a86d9
MW
552 goto err_region;
553 }
554
555 /* Register the driver as a char device */
556 vme_user_cdev = cdev_alloc();
d4113a69
KAM
557 if (!vme_user_cdev) {
558 err = -ENOMEM;
559 goto err_char;
560 }
f00a86d9
MW
561 vme_user_cdev->ops = &vme_user_fops;
562 vme_user_cdev->owner = THIS_MODULE;
563 err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
44ae5443 564 if (err)
f00a86d9 565 goto err_char;
f00a86d9
MW
566
567 /* Request slave resources and allocate buffers (128kB wide) */
568 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
569 /* XXX Need to properly request attributes */
5188d74c
AB
570 /* For ca91cx42 bridge there are only two slave windows
571 * supporting A16 addressing, so we request A24 supported
572 * by all windows.
573 */
f00a86d9 574 image[i].resource = vme_slave_request(vme_user_bridge,
5188d74c 575 VME_A24, VME_SCT);
f99b71be 576 if (!image[i].resource) {
0093e5f8
YT
577 dev_warn(&vdev->dev,
578 "Unable to allocate slave resource\n");
465ff28d 579 err = -ENOMEM;
238add52 580 goto err_slave;
f00a86d9
MW
581 }
582 image[i].size_buf = PCI_BUF_SIZE;
583 image[i].kern_buf = vme_alloc_consistent(image[i].resource,
886953e9 584 image[i].size_buf, &image[i].pci_buf);
f99b71be 585 if (!image[i].kern_buf) {
0093e5f8
YT
586 dev_warn(&vdev->dev,
587 "Unable to allocate memory for buffer\n");
f00a86d9
MW
588 image[i].pci_buf = 0;
589 vme_slave_free(image[i].resource);
590 err = -ENOMEM;
238add52 591 goto err_slave;
f00a86d9
MW
592 }
593 }
594
595 /*
596 * Request master resources allocate page sized buffers for small
597 * reads and writes
598 */
599 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
600 /* XXX Need to properly request attributes */
601 image[i].resource = vme_master_request(vme_user_bridge,
602 VME_A32, VME_SCT, VME_D32);
f99b71be 603 if (!image[i].resource) {
0093e5f8
YT
604 dev_warn(&vdev->dev,
605 "Unable to allocate master resource\n");
465ff28d 606 err = -ENOMEM;
238add52 607 goto err_master;
f00a86d9 608 }
33e920d9
AB
609 image[i].size_buf = PCI_BUF_SIZE;
610 image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
f99b71be 611 if (!image[i].kern_buf) {
33e920d9 612 err = -ENOMEM;
1a524893
DY
613 vme_master_free(image[i].resource);
614 goto err_master;
33e920d9 615 }
f00a86d9
MW
616 }
617
618 /* Create sysfs entries - on udev systems this creates the dev files */
619 vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
620 if (IS_ERR(vme_user_sysfs_class)) {
0093e5f8 621 dev_err(&vdev->dev, "Error creating vme_user class.\n");
f00a86d9
MW
622 err = PTR_ERR(vme_user_sysfs_class);
623 goto err_class;
624 }
625
626 /* Add sysfs Entries */
45f9f018 627 for (i = 0; i < VME_DEVS; i++) {
584721ca 628 int num;
938acb99 629
f00a86d9
MW
630 switch (type[i]) {
631 case MASTER_MINOR:
f1552cbd 632 name = "bus/vme/m%d";
f00a86d9
MW
633 break;
634 case CONTROL_MINOR:
f1552cbd 635 name = "bus/vme/ctl";
f00a86d9
MW
636 break;
637 case SLAVE_MINOR:
f1552cbd 638 name = "bus/vme/s%d";
f00a86d9
MW
639 break;
640 default:
641 err = -EINVAL;
642 goto err_sysfs;
f00a86d9
MW
643 }
644
584721ca
VB
645 num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
646 image[i].device = device_create(vme_user_sysfs_class, NULL,
647 MKDEV(VME_MAJOR, i), NULL, name, num);
f00a86d9 648 if (IS_ERR(image[i].device)) {
0093e5f8 649 dev_info(&vdev->dev, "Error creating sysfs device\n");
f00a86d9
MW
650 err = PTR_ERR(image[i].device);
651 goto err_sysfs;
652 }
653 }
654
f00a86d9
MW
655 return 0;
656
f00a86d9 657err_sysfs:
45f9f018 658 while (i > 0) {
f00a86d9
MW
659 i--;
660 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
661 }
662 class_destroy(vme_user_sysfs_class);
663
238add52
MW
664 /* Ensure counter set correcty to unalloc all master windows */
665 i = MASTER_MAX + 1;
666err_master:
667 while (i > MASTER_MINOR) {
668 i--;
1a524893 669 kfree(image[i].kern_buf);
238add52
MW
670 vme_master_free(image[i].resource);
671 }
672
673 /*
674 * Ensure counter set correcty to unalloc all slave windows and buffers
675 */
f00a86d9 676 i = SLAVE_MAX + 1;
238add52
MW
677err_slave:
678 while (i > SLAVE_MINOR) {
f00a86d9 679 i--;
625a9e01
DK
680 vme_free_consistent(image[i].resource, image[i].size_buf,
681 image[i].kern_buf, image[i].pci_buf);
1daa38d3 682 vme_slave_free(image[i].resource);
f00a86d9
MW
683 }
684err_class:
685 cdev_del(vme_user_cdev);
686err_char:
687 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
688err_region:
238add52 689err_dev:
f00a86d9
MW
690 return err;
691}
692
f21a8247 693static int vme_user_remove(struct vme_dev *dev)
f00a86d9
MW
694{
695 int i;
696
697 /* Remove sysfs Entries */
ecb3b80f
SN
698 for (i = 0; i < VME_DEVS; i++) {
699 mutex_destroy(&image[i].mutex);
f00a86d9 700 device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
ecb3b80f 701 }
f00a86d9
MW
702 class_destroy(vme_user_sysfs_class);
703
b62c99b1 704 for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
33e920d9 705 kfree(image[i].kern_buf);
b62c99b1
EC
706 vme_master_free(image[i].resource);
707 }
33e920d9 708
f00a86d9 709 for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
238add52 710 vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
625a9e01
DK
711 vme_free_consistent(image[i].resource, image[i].size_buf,
712 image[i].kern_buf, image[i].pci_buf);
1daa38d3 713 vme_slave_free(image[i].resource);
f00a86d9
MW
714 }
715
716 /* Unregister device driver */
717 cdev_del(vme_user_cdev);
718
719 /* Unregiser the major and minor device numbers */
720 unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
238add52
MW
721
722 return 0;
f00a86d9
MW
723}
724
e4aea6aa
DK
725static struct vme_driver vme_user_driver = {
726 .name = driver_name,
727 .match = vme_user_match,
728 .probe = vme_user_probe,
729 .remove = vme_user_remove,
730};
731
732static int __init vme_user_init(void)
733{
734 int retval = 0;
735
736 pr_info("VME User Space Access Driver\n");
737
738 if (bus_num == 0) {
739 pr_err("No cards, skipping registration\n");
740 retval = -ENODEV;
741 goto err_nocard;
742 }
743
744 /* Let's start by supporting one bus, we can support more than one
745 * in future revisions if that ever becomes necessary.
746 */
747 if (bus_num > VME_USER_BUS_MAX) {
748 pr_err("Driver only able to handle %d buses\n",
749 VME_USER_BUS_MAX);
750 bus_num = VME_USER_BUS_MAX;
751 }
752
753 /*
754 * Here we just register the maximum number of devices we can and
755 * leave vme_user_match() to allow only 1 to go through to probe().
756 * This way, if we later want to allow multiple user access devices,
757 * we just change the code in vme_user_match().
758 */
759 retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS);
760 if (retval != 0)
761 goto err_reg;
762
763 return retval;
764
765err_reg:
766err_nocard:
767 return retval;
768}
769
238add52
MW
770static void __exit vme_user_exit(void)
771{
772 vme_unregister_driver(&vme_user_driver);
238add52
MW
773}
774
238add52
MW
775MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
776module_param_array(bus, int, &bus_num, 0);
777
f00a86d9 778MODULE_DESCRIPTION("VME User Space Access Driver");
66bd8db5 779MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
f00a86d9
MW
780MODULE_LICENSE("GPL");
781
238add52
MW
782module_init(vme_user_init);
783module_exit(vme_user_exit);
This page took 0.721051 seconds and 5 git commands to generate.