Merge branch 'master' into upstream
[deliverable/linux.git] / fs / char_dev.c
1 /*
2 * linux/fs/char_dev.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/slab.h>
10 #include <linux/string.h>
11
12 #include <linux/major.h>
13 #include <linux/errno.h>
14 #include <linux/module.h>
15 #include <linux/smp_lock.h>
16 #include <linux/seq_file.h>
17
18 #include <linux/kobject.h>
19 #include <linux/kobj_map.h>
20 #include <linux/cdev.h>
21 #include <linux/mutex.h>
22 #include <linux/backing-dev.h>
23
24 #ifdef CONFIG_KMOD
25 #include <linux/kmod.h>
26 #endif
27
28 /*
29 * capabilities for /dev/mem, /dev/kmem and similar directly mappable character
30 * devices
31 * - permits shared-mmap for read, write and/or exec
32 * - does not permit private mmap in NOMMU mode (can't do COW)
33 * - no readahead or I/O queue unplugging required
34 */
35 struct backing_dev_info directly_mappable_cdev_bdi = {
36 .capabilities = (
37 #ifdef CONFIG_MMU
38 /* permit private copies of the data to be taken */
39 BDI_CAP_MAP_COPY |
40 #endif
41 /* permit direct mmap, for read, write or exec */
42 BDI_CAP_MAP_DIRECT |
43 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP),
44 };
45
46 static struct kobj_map *cdev_map;
47
48 static DEFINE_MUTEX(chrdevs_lock);
49
50 static struct char_device_struct {
51 struct char_device_struct *next;
52 unsigned int major;
53 unsigned int baseminor;
54 int minorct;
55 char name[64];
56 struct file_operations *fops;
57 struct cdev *cdev; /* will die */
58 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
59
60 /* index in the above */
61 static inline int major_to_index(int major)
62 {
63 return major % CHRDEV_MAJOR_HASH_SIZE;
64 }
65
66 #ifdef CONFIG_PROC_FS
67
68 void chrdev_show(struct seq_file *f, off_t offset)
69 {
70 struct char_device_struct *cd;
71
72 if (offset < CHRDEV_MAJOR_HASH_SIZE) {
73 mutex_lock(&chrdevs_lock);
74 for (cd = chrdevs[offset]; cd; cd = cd->next)
75 seq_printf(f, "%3d %s\n", cd->major, cd->name);
76 mutex_unlock(&chrdevs_lock);
77 }
78 }
79
80 #endif /* CONFIG_PROC_FS */
81
82 /*
83 * Register a single major with a specified minor range.
84 *
85 * If major == 0 this functions will dynamically allocate a major and return
86 * its number.
87 *
88 * If major > 0 this function will attempt to reserve the passed range of
89 * minors and will return zero on success.
90 *
91 * Returns a -ve errno on failure.
92 */
93 static struct char_device_struct *
94 __register_chrdev_region(unsigned int major, unsigned int baseminor,
95 int minorct, const char *name)
96 {
97 struct char_device_struct *cd, **cp;
98 int ret = 0;
99 int i;
100
101 cd = kzalloc(sizeof(struct char_device_struct), GFP_KERNEL);
102 if (cd == NULL)
103 return ERR_PTR(-ENOMEM);
104
105 mutex_lock(&chrdevs_lock);
106
107 /* temporary */
108 if (major == 0) {
109 for (i = ARRAY_SIZE(chrdevs)-1; i > 0; i--) {
110 if (chrdevs[i] == NULL)
111 break;
112 }
113
114 if (i == 0) {
115 ret = -EBUSY;
116 goto out;
117 }
118 major = i;
119 ret = major;
120 }
121
122 cd->major = major;
123 cd->baseminor = baseminor;
124 cd->minorct = minorct;
125 strncpy(cd->name,name, 64);
126
127 i = major_to_index(major);
128
129 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
130 if ((*cp)->major > major ||
131 ((*cp)->major == major && (*cp)->baseminor >= baseminor))
132 break;
133 if (*cp && (*cp)->major == major &&
134 (*cp)->baseminor < baseminor + minorct) {
135 ret = -EBUSY;
136 goto out;
137 }
138 cd->next = *cp;
139 *cp = cd;
140 mutex_unlock(&chrdevs_lock);
141 return cd;
142 out:
143 mutex_unlock(&chrdevs_lock);
144 kfree(cd);
145 return ERR_PTR(ret);
146 }
147
148 static struct char_device_struct *
149 __unregister_chrdev_region(unsigned major, unsigned baseminor, int minorct)
150 {
151 struct char_device_struct *cd = NULL, **cp;
152 int i = major_to_index(major);
153
154 mutex_lock(&chrdevs_lock);
155 for (cp = &chrdevs[i]; *cp; cp = &(*cp)->next)
156 if ((*cp)->major == major &&
157 (*cp)->baseminor == baseminor &&
158 (*cp)->minorct == minorct)
159 break;
160 if (*cp) {
161 cd = *cp;
162 *cp = cd->next;
163 }
164 mutex_unlock(&chrdevs_lock);
165 return cd;
166 }
167
168 int register_chrdev_region(dev_t from, unsigned count, const char *name)
169 {
170 struct char_device_struct *cd;
171 dev_t to = from + count;
172 dev_t n, next;
173
174 for (n = from; n < to; n = next) {
175 next = MKDEV(MAJOR(n)+1, 0);
176 if (next > to)
177 next = to;
178 cd = __register_chrdev_region(MAJOR(n), MINOR(n),
179 next - n, name);
180 if (IS_ERR(cd))
181 goto fail;
182 }
183 return 0;
184 fail:
185 to = n;
186 for (n = from; n < to; n = next) {
187 next = MKDEV(MAJOR(n)+1, 0);
188 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
189 }
190 return PTR_ERR(cd);
191 }
192
193 int alloc_chrdev_region(dev_t *dev, unsigned baseminor, unsigned count,
194 const char *name)
195 {
196 struct char_device_struct *cd;
197 cd = __register_chrdev_region(0, baseminor, count, name);
198 if (IS_ERR(cd))
199 return PTR_ERR(cd);
200 *dev = MKDEV(cd->major, cd->baseminor);
201 return 0;
202 }
203
204 /**
205 * register_chrdev() - Register a major number for character devices.
206 * @major: major device number or 0 for dynamic allocation
207 * @name: name of this range of devices
208 * @fops: file operations associated with this devices
209 *
210 * If @major == 0 this functions will dynamically allocate a major and return
211 * its number.
212 *
213 * If @major > 0 this function will attempt to reserve a device with the given
214 * major number and will return zero on success.
215 *
216 * Returns a -ve errno on failure.
217 *
218 * The name of this device has nothing to do with the name of the device in
219 * /dev. It only helps to keep track of the different owners of devices. If
220 * your module name has only one type of devices it's ok to use e.g. the name
221 * of the module here.
222 *
223 * This function registers a range of 256 minor numbers. The first minor number
224 * is 0.
225 */
226 int register_chrdev(unsigned int major, const char *name,
227 const struct file_operations *fops)
228 {
229 struct char_device_struct *cd;
230 struct cdev *cdev;
231 char *s;
232 int err = -ENOMEM;
233
234 cd = __register_chrdev_region(major, 0, 256, name);
235 if (IS_ERR(cd))
236 return PTR_ERR(cd);
237
238 cdev = cdev_alloc();
239 if (!cdev)
240 goto out2;
241
242 cdev->owner = fops->owner;
243 cdev->ops = fops;
244 kobject_set_name(&cdev->kobj, "%s", name);
245 for (s = strchr(kobject_name(&cdev->kobj),'/'); s; s = strchr(s, '/'))
246 *s = '!';
247
248 err = cdev_add(cdev, MKDEV(cd->major, 0), 256);
249 if (err)
250 goto out;
251
252 cd->cdev = cdev;
253
254 return major ? 0 : cd->major;
255 out:
256 kobject_put(&cdev->kobj);
257 out2:
258 kfree(__unregister_chrdev_region(cd->major, 0, 256));
259 return err;
260 }
261
262 void unregister_chrdev_region(dev_t from, unsigned count)
263 {
264 dev_t to = from + count;
265 dev_t n, next;
266
267 for (n = from; n < to; n = next) {
268 next = MKDEV(MAJOR(n)+1, 0);
269 if (next > to)
270 next = to;
271 kfree(__unregister_chrdev_region(MAJOR(n), MINOR(n), next - n));
272 }
273 }
274
275 int unregister_chrdev(unsigned int major, const char *name)
276 {
277 struct char_device_struct *cd;
278 cd = __unregister_chrdev_region(major, 0, 256);
279 if (cd && cd->cdev)
280 cdev_del(cd->cdev);
281 kfree(cd);
282 return 0;
283 }
284
285 static DEFINE_SPINLOCK(cdev_lock);
286
287 static struct kobject *cdev_get(struct cdev *p)
288 {
289 struct module *owner = p->owner;
290 struct kobject *kobj;
291
292 if (owner && !try_module_get(owner))
293 return NULL;
294 kobj = kobject_get(&p->kobj);
295 if (!kobj)
296 module_put(owner);
297 return kobj;
298 }
299
300 void cdev_put(struct cdev *p)
301 {
302 if (p) {
303 struct module *owner = p->owner;
304 kobject_put(&p->kobj);
305 module_put(owner);
306 }
307 }
308
309 /*
310 * Called every time a character special file is opened
311 */
312 int chrdev_open(struct inode * inode, struct file * filp)
313 {
314 struct cdev *p;
315 struct cdev *new = NULL;
316 int ret = 0;
317
318 spin_lock(&cdev_lock);
319 p = inode->i_cdev;
320 if (!p) {
321 struct kobject *kobj;
322 int idx;
323 spin_unlock(&cdev_lock);
324 kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
325 if (!kobj)
326 return -ENXIO;
327 new = container_of(kobj, struct cdev, kobj);
328 spin_lock(&cdev_lock);
329 p = inode->i_cdev;
330 if (!p) {
331 inode->i_cdev = p = new;
332 inode->i_cindex = idx;
333 list_add(&inode->i_devices, &p->list);
334 new = NULL;
335 } else if (!cdev_get(p))
336 ret = -ENXIO;
337 } else if (!cdev_get(p))
338 ret = -ENXIO;
339 spin_unlock(&cdev_lock);
340 cdev_put(new);
341 if (ret)
342 return ret;
343 filp->f_op = fops_get(p->ops);
344 if (!filp->f_op) {
345 cdev_put(p);
346 return -ENXIO;
347 }
348 if (filp->f_op->open) {
349 lock_kernel();
350 ret = filp->f_op->open(inode,filp);
351 unlock_kernel();
352 }
353 if (ret)
354 cdev_put(p);
355 return ret;
356 }
357
358 void cd_forget(struct inode *inode)
359 {
360 spin_lock(&cdev_lock);
361 list_del_init(&inode->i_devices);
362 inode->i_cdev = NULL;
363 spin_unlock(&cdev_lock);
364 }
365
366 static void cdev_purge(struct cdev *cdev)
367 {
368 spin_lock(&cdev_lock);
369 while (!list_empty(&cdev->list)) {
370 struct inode *inode;
371 inode = container_of(cdev->list.next, struct inode, i_devices);
372 list_del_init(&inode->i_devices);
373 inode->i_cdev = NULL;
374 }
375 spin_unlock(&cdev_lock);
376 }
377
378 /*
379 * Dummy default file-operations: the only thing this does
380 * is contain the open that then fills in the correct operations
381 * depending on the special file...
382 */
383 const struct file_operations def_chr_fops = {
384 .open = chrdev_open,
385 };
386
387 static struct kobject *exact_match(dev_t dev, int *part, void *data)
388 {
389 struct cdev *p = data;
390 return &p->kobj;
391 }
392
393 static int exact_lock(dev_t dev, void *data)
394 {
395 struct cdev *p = data;
396 return cdev_get(p) ? 0 : -1;
397 }
398
399 int cdev_add(struct cdev *p, dev_t dev, unsigned count)
400 {
401 p->dev = dev;
402 p->count = count;
403 return kobj_map(cdev_map, dev, count, NULL, exact_match, exact_lock, p);
404 }
405
406 static void cdev_unmap(dev_t dev, unsigned count)
407 {
408 kobj_unmap(cdev_map, dev, count);
409 }
410
411 void cdev_del(struct cdev *p)
412 {
413 cdev_unmap(p->dev, p->count);
414 kobject_put(&p->kobj);
415 }
416
417
418 static void cdev_default_release(struct kobject *kobj)
419 {
420 struct cdev *p = container_of(kobj, struct cdev, kobj);
421 cdev_purge(p);
422 }
423
424 static void cdev_dynamic_release(struct kobject *kobj)
425 {
426 struct cdev *p = container_of(kobj, struct cdev, kobj);
427 cdev_purge(p);
428 kfree(p);
429 }
430
431 static struct kobj_type ktype_cdev_default = {
432 .release = cdev_default_release,
433 };
434
435 static struct kobj_type ktype_cdev_dynamic = {
436 .release = cdev_dynamic_release,
437 };
438
439 struct cdev *cdev_alloc(void)
440 {
441 struct cdev *p = kzalloc(sizeof(struct cdev), GFP_KERNEL);
442 if (p) {
443 p->kobj.ktype = &ktype_cdev_dynamic;
444 INIT_LIST_HEAD(&p->list);
445 kobject_init(&p->kobj);
446 }
447 return p;
448 }
449
450 void cdev_init(struct cdev *cdev, const struct file_operations *fops)
451 {
452 memset(cdev, 0, sizeof *cdev);
453 INIT_LIST_HEAD(&cdev->list);
454 cdev->kobj.ktype = &ktype_cdev_default;
455 kobject_init(&cdev->kobj);
456 cdev->ops = fops;
457 }
458
459 static struct kobject *base_probe(dev_t dev, int *part, void *data)
460 {
461 if (request_module("char-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
462 /* Make old-style 2.4 aliases work */
463 request_module("char-major-%d", MAJOR(dev));
464 return NULL;
465 }
466
467 void __init chrdev_init(void)
468 {
469 cdev_map = kobj_map_init(base_probe, &chrdevs_lock);
470 }
471
472
473 /* Let modules do char dev stuff */
474 EXPORT_SYMBOL(register_chrdev_region);
475 EXPORT_SYMBOL(unregister_chrdev_region);
476 EXPORT_SYMBOL(alloc_chrdev_region);
477 EXPORT_SYMBOL(cdev_init);
478 EXPORT_SYMBOL(cdev_alloc);
479 EXPORT_SYMBOL(cdev_del);
480 EXPORT_SYMBOL(cdev_add);
481 EXPORT_SYMBOL(register_chrdev);
482 EXPORT_SYMBOL(unregister_chrdev);
483 EXPORT_SYMBOL(directly_mappable_cdev_bdi);
This page took 0.041416 seconds and 6 git commands to generate.