Merge branch 'next/deletion' of git://git.linaro.org/people/arnd/arm-soc
[deliverable/linux.git] / block / genhd.c
1 /*
2 * gendisk handling
3 */
4
5 #include <linux/module.h>
6 #include <linux/fs.h>
7 #include <linux/genhd.h>
8 #include <linux/kdev_t.h>
9 #include <linux/kernel.h>
10 #include <linux/blkdev.h>
11 #include <linux/init.h>
12 #include <linux/spinlock.h>
13 #include <linux/proc_fs.h>
14 #include <linux/seq_file.h>
15 #include <linux/slab.h>
16 #include <linux/kmod.h>
17 #include <linux/kobj_map.h>
18 #include <linux/buffer_head.h>
19 #include <linux/mutex.h>
20 #include <linux/idr.h>
21 #include <linux/log2.h>
22 #include <linux/ctype.h>
23
24 #include "blk.h"
25
26 static DEFINE_MUTEX(block_class_lock);
27 struct kobject *block_depr;
28
29 /* for extended dynamic devt allocation, currently only one major is used */
30 #define MAX_EXT_DEVT (1 << MINORBITS)
31
32 /* For extended devt allocation. ext_devt_mutex prevents look up
33 * results from going away underneath its user.
34 */
35 static DEFINE_MUTEX(ext_devt_mutex);
36 static DEFINE_IDR(ext_devt_idr);
37
38 static struct device_type disk_type;
39
40 static void disk_add_events(struct gendisk *disk);
41 static void disk_del_events(struct gendisk *disk);
42 static void disk_release_events(struct gendisk *disk);
43
44 /**
45 * disk_get_part - get partition
46 * @disk: disk to look partition from
47 * @partno: partition number
48 *
49 * Look for partition @partno from @disk. If found, increment
50 * reference count and return it.
51 *
52 * CONTEXT:
53 * Don't care.
54 *
55 * RETURNS:
56 * Pointer to the found partition on success, NULL if not found.
57 */
58 struct hd_struct *disk_get_part(struct gendisk *disk, int partno)
59 {
60 struct hd_struct *part = NULL;
61 struct disk_part_tbl *ptbl;
62
63 if (unlikely(partno < 0))
64 return NULL;
65
66 rcu_read_lock();
67
68 ptbl = rcu_dereference(disk->part_tbl);
69 if (likely(partno < ptbl->len)) {
70 part = rcu_dereference(ptbl->part[partno]);
71 if (part)
72 get_device(part_to_dev(part));
73 }
74
75 rcu_read_unlock();
76
77 return part;
78 }
79 EXPORT_SYMBOL_GPL(disk_get_part);
80
81 /**
82 * disk_part_iter_init - initialize partition iterator
83 * @piter: iterator to initialize
84 * @disk: disk to iterate over
85 * @flags: DISK_PITER_* flags
86 *
87 * Initialize @piter so that it iterates over partitions of @disk.
88 *
89 * CONTEXT:
90 * Don't care.
91 */
92 void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk,
93 unsigned int flags)
94 {
95 struct disk_part_tbl *ptbl;
96
97 rcu_read_lock();
98 ptbl = rcu_dereference(disk->part_tbl);
99
100 piter->disk = disk;
101 piter->part = NULL;
102
103 if (flags & DISK_PITER_REVERSE)
104 piter->idx = ptbl->len - 1;
105 else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0))
106 piter->idx = 0;
107 else
108 piter->idx = 1;
109
110 piter->flags = flags;
111
112 rcu_read_unlock();
113 }
114 EXPORT_SYMBOL_GPL(disk_part_iter_init);
115
116 /**
117 * disk_part_iter_next - proceed iterator to the next partition and return it
118 * @piter: iterator of interest
119 *
120 * Proceed @piter to the next partition and return it.
121 *
122 * CONTEXT:
123 * Don't care.
124 */
125 struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
126 {
127 struct disk_part_tbl *ptbl;
128 int inc, end;
129
130 /* put the last partition */
131 disk_put_part(piter->part);
132 piter->part = NULL;
133
134 /* get part_tbl */
135 rcu_read_lock();
136 ptbl = rcu_dereference(piter->disk->part_tbl);
137
138 /* determine iteration parameters */
139 if (piter->flags & DISK_PITER_REVERSE) {
140 inc = -1;
141 if (piter->flags & (DISK_PITER_INCL_PART0 |
142 DISK_PITER_INCL_EMPTY_PART0))
143 end = -1;
144 else
145 end = 0;
146 } else {
147 inc = 1;
148 end = ptbl->len;
149 }
150
151 /* iterate to the next partition */
152 for (; piter->idx != end; piter->idx += inc) {
153 struct hd_struct *part;
154
155 part = rcu_dereference(ptbl->part[piter->idx]);
156 if (!part)
157 continue;
158 if (!part->nr_sects &&
159 !(piter->flags & DISK_PITER_INCL_EMPTY) &&
160 !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
161 piter->idx == 0))
162 continue;
163
164 get_device(part_to_dev(part));
165 piter->part = part;
166 piter->idx += inc;
167 break;
168 }
169
170 rcu_read_unlock();
171
172 return piter->part;
173 }
174 EXPORT_SYMBOL_GPL(disk_part_iter_next);
175
176 /**
177 * disk_part_iter_exit - finish up partition iteration
178 * @piter: iter of interest
179 *
180 * Called when iteration is over. Cleans up @piter.
181 *
182 * CONTEXT:
183 * Don't care.
184 */
185 void disk_part_iter_exit(struct disk_part_iter *piter)
186 {
187 disk_put_part(piter->part);
188 piter->part = NULL;
189 }
190 EXPORT_SYMBOL_GPL(disk_part_iter_exit);
191
192 static inline int sector_in_part(struct hd_struct *part, sector_t sector)
193 {
194 return part->start_sect <= sector &&
195 sector < part->start_sect + part->nr_sects;
196 }
197
198 /**
199 * disk_map_sector_rcu - map sector to partition
200 * @disk: gendisk of interest
201 * @sector: sector to map
202 *
203 * Find out which partition @sector maps to on @disk. This is
204 * primarily used for stats accounting.
205 *
206 * CONTEXT:
207 * RCU read locked. The returned partition pointer is valid only
208 * while preemption is disabled.
209 *
210 * RETURNS:
211 * Found partition on success, part0 is returned if no partition matches
212 */
213 struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
214 {
215 struct disk_part_tbl *ptbl;
216 struct hd_struct *part;
217 int i;
218
219 ptbl = rcu_dereference(disk->part_tbl);
220
221 part = rcu_dereference(ptbl->last_lookup);
222 if (part && sector_in_part(part, sector))
223 return part;
224
225 for (i = 1; i < ptbl->len; i++) {
226 part = rcu_dereference(ptbl->part[i]);
227
228 if (part && sector_in_part(part, sector)) {
229 rcu_assign_pointer(ptbl->last_lookup, part);
230 return part;
231 }
232 }
233 return &disk->part0;
234 }
235 EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
236
237 /*
238 * Can be deleted altogether. Later.
239 *
240 */
241 static struct blk_major_name {
242 struct blk_major_name *next;
243 int major;
244 char name[16];
245 } *major_names[BLKDEV_MAJOR_HASH_SIZE];
246
247 /* index in the above - for now: assume no multimajor ranges */
248 static inline int major_to_index(unsigned major)
249 {
250 return major % BLKDEV_MAJOR_HASH_SIZE;
251 }
252
253 #ifdef CONFIG_PROC_FS
254 void blkdev_show(struct seq_file *seqf, off_t offset)
255 {
256 struct blk_major_name *dp;
257
258 if (offset < BLKDEV_MAJOR_HASH_SIZE) {
259 mutex_lock(&block_class_lock);
260 for (dp = major_names[offset]; dp; dp = dp->next)
261 seq_printf(seqf, "%3d %s\n", dp->major, dp->name);
262 mutex_unlock(&block_class_lock);
263 }
264 }
265 #endif /* CONFIG_PROC_FS */
266
267 /**
268 * register_blkdev - register a new block device
269 *
270 * @major: the requested major device number [1..255]. If @major=0, try to
271 * allocate any unused major number.
272 * @name: the name of the new block device as a zero terminated string
273 *
274 * The @name must be unique within the system.
275 *
276 * The return value depends on the @major input parameter.
277 * - if a major device number was requested in range [1..255] then the
278 * function returns zero on success, or a negative error code
279 * - if any unused major number was requested with @major=0 parameter
280 * then the return value is the allocated major number in range
281 * [1..255] or a negative error code otherwise
282 */
283 int register_blkdev(unsigned int major, const char *name)
284 {
285 struct blk_major_name **n, *p;
286 int index, ret = 0;
287
288 mutex_lock(&block_class_lock);
289
290 /* temporary */
291 if (major == 0) {
292 for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
293 if (major_names[index] == NULL)
294 break;
295 }
296
297 if (index == 0) {
298 printk("register_blkdev: failed to get major for %s\n",
299 name);
300 ret = -EBUSY;
301 goto out;
302 }
303 major = index;
304 ret = major;
305 }
306
307 p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
308 if (p == NULL) {
309 ret = -ENOMEM;
310 goto out;
311 }
312
313 p->major = major;
314 strlcpy(p->name, name, sizeof(p->name));
315 p->next = NULL;
316 index = major_to_index(major);
317
318 for (n = &major_names[index]; *n; n = &(*n)->next) {
319 if ((*n)->major == major)
320 break;
321 }
322 if (!*n)
323 *n = p;
324 else
325 ret = -EBUSY;
326
327 if (ret < 0) {
328 printk("register_blkdev: cannot get major %d for %s\n",
329 major, name);
330 kfree(p);
331 }
332 out:
333 mutex_unlock(&block_class_lock);
334 return ret;
335 }
336
337 EXPORT_SYMBOL(register_blkdev);
338
339 void unregister_blkdev(unsigned int major, const char *name)
340 {
341 struct blk_major_name **n;
342 struct blk_major_name *p = NULL;
343 int index = major_to_index(major);
344
345 mutex_lock(&block_class_lock);
346 for (n = &major_names[index]; *n; n = &(*n)->next)
347 if ((*n)->major == major)
348 break;
349 if (!*n || strcmp((*n)->name, name)) {
350 WARN_ON(1);
351 } else {
352 p = *n;
353 *n = p->next;
354 }
355 mutex_unlock(&block_class_lock);
356 kfree(p);
357 }
358
359 EXPORT_SYMBOL(unregister_blkdev);
360
361 static struct kobj_map *bdev_map;
362
363 /**
364 * blk_mangle_minor - scatter minor numbers apart
365 * @minor: minor number to mangle
366 *
367 * Scatter consecutively allocated @minor number apart if MANGLE_DEVT
368 * is enabled. Mangling twice gives the original value.
369 *
370 * RETURNS:
371 * Mangled value.
372 *
373 * CONTEXT:
374 * Don't care.
375 */
376 static int blk_mangle_minor(int minor)
377 {
378 #ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT
379 int i;
380
381 for (i = 0; i < MINORBITS / 2; i++) {
382 int low = minor & (1 << i);
383 int high = minor & (1 << (MINORBITS - 1 - i));
384 int distance = MINORBITS - 1 - 2 * i;
385
386 minor ^= low | high; /* clear both bits */
387 low <<= distance; /* swap the positions */
388 high >>= distance;
389 minor |= low | high; /* and set */
390 }
391 #endif
392 return minor;
393 }
394
395 /**
396 * blk_alloc_devt - allocate a dev_t for a partition
397 * @part: partition to allocate dev_t for
398 * @devt: out parameter for resulting dev_t
399 *
400 * Allocate a dev_t for block device.
401 *
402 * RETURNS:
403 * 0 on success, allocated dev_t is returned in *@devt. -errno on
404 * failure.
405 *
406 * CONTEXT:
407 * Might sleep.
408 */
409 int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
410 {
411 struct gendisk *disk = part_to_disk(part);
412 int idx, rc;
413
414 /* in consecutive minor range? */
415 if (part->partno < disk->minors) {
416 *devt = MKDEV(disk->major, disk->first_minor + part->partno);
417 return 0;
418 }
419
420 /* allocate ext devt */
421 do {
422 if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
423 return -ENOMEM;
424 rc = idr_get_new(&ext_devt_idr, part, &idx);
425 } while (rc == -EAGAIN);
426
427 if (rc)
428 return rc;
429
430 if (idx > MAX_EXT_DEVT) {
431 idr_remove(&ext_devt_idr, idx);
432 return -EBUSY;
433 }
434
435 *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
436 return 0;
437 }
438
439 /**
440 * blk_free_devt - free a dev_t
441 * @devt: dev_t to free
442 *
443 * Free @devt which was allocated using blk_alloc_devt().
444 *
445 * CONTEXT:
446 * Might sleep.
447 */
448 void blk_free_devt(dev_t devt)
449 {
450 might_sleep();
451
452 if (devt == MKDEV(0, 0))
453 return;
454
455 if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
456 mutex_lock(&ext_devt_mutex);
457 idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
458 mutex_unlock(&ext_devt_mutex);
459 }
460 }
461
462 static char *bdevt_str(dev_t devt, char *buf)
463 {
464 if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
465 char tbuf[BDEVT_SIZE];
466 snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt));
467 snprintf(buf, BDEVT_SIZE, "%-9s", tbuf);
468 } else
469 snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt));
470
471 return buf;
472 }
473
474 /*
475 * Register device numbers dev..(dev+range-1)
476 * range must be nonzero
477 * The hash chain is sorted on range, so that subranges can override.
478 */
479 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
480 struct kobject *(*probe)(dev_t, int *, void *),
481 int (*lock)(dev_t, void *), void *data)
482 {
483 kobj_map(bdev_map, devt, range, module, probe, lock, data);
484 }
485
486 EXPORT_SYMBOL(blk_register_region);
487
488 void blk_unregister_region(dev_t devt, unsigned long range)
489 {
490 kobj_unmap(bdev_map, devt, range);
491 }
492
493 EXPORT_SYMBOL(blk_unregister_region);
494
495 static struct kobject *exact_match(dev_t devt, int *partno, void *data)
496 {
497 struct gendisk *p = data;
498
499 return &disk_to_dev(p)->kobj;
500 }
501
502 static int exact_lock(dev_t devt, void *data)
503 {
504 struct gendisk *p = data;
505
506 if (!get_disk(p))
507 return -1;
508 return 0;
509 }
510
511 void register_disk(struct gendisk *disk)
512 {
513 struct device *ddev = disk_to_dev(disk);
514 struct block_device *bdev;
515 struct disk_part_iter piter;
516 struct hd_struct *part;
517 int err;
518
519 ddev->parent = disk->driverfs_dev;
520
521 dev_set_name(ddev, disk->disk_name);
522
523 /* delay uevents, until we scanned partition table */
524 dev_set_uevent_suppress(ddev, 1);
525
526 if (device_add(ddev))
527 return;
528 if (!sysfs_deprecated) {
529 err = sysfs_create_link(block_depr, &ddev->kobj,
530 kobject_name(&ddev->kobj));
531 if (err) {
532 device_del(ddev);
533 return;
534 }
535 }
536 disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
537 disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
538
539 /* No minors to use for partitions */
540 if (!disk_partitionable(disk))
541 goto exit;
542
543 /* No such device (e.g., media were just removed) */
544 if (!get_capacity(disk))
545 goto exit;
546
547 bdev = bdget_disk(disk, 0);
548 if (!bdev)
549 goto exit;
550
551 bdev->bd_invalidated = 1;
552 err = blkdev_get(bdev, FMODE_READ, NULL);
553 if (err < 0)
554 goto exit;
555 blkdev_put(bdev, FMODE_READ);
556
557 exit:
558 /* announce disk after possible partitions are created */
559 dev_set_uevent_suppress(ddev, 0);
560 kobject_uevent(&ddev->kobj, KOBJ_ADD);
561
562 /* announce possible partitions */
563 disk_part_iter_init(&piter, disk, 0);
564 while ((part = disk_part_iter_next(&piter)))
565 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
566 disk_part_iter_exit(&piter);
567 }
568
569 /**
570 * add_disk - add partitioning information to kernel list
571 * @disk: per-device partitioning information
572 *
573 * This function registers the partitioning information in @disk
574 * with the kernel.
575 *
576 * FIXME: error handling
577 */
578 void add_disk(struct gendisk *disk)
579 {
580 struct backing_dev_info *bdi;
581 dev_t devt;
582 int retval;
583
584 /* minors == 0 indicates to use ext devt from part0 and should
585 * be accompanied with EXT_DEVT flag. Make sure all
586 * parameters make sense.
587 */
588 WARN_ON(disk->minors && !(disk->major || disk->first_minor));
589 WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT));
590
591 disk->flags |= GENHD_FL_UP;
592
593 retval = blk_alloc_devt(&disk->part0, &devt);
594 if (retval) {
595 WARN_ON(1);
596 return;
597 }
598 disk_to_dev(disk)->devt = devt;
599
600 /* ->major and ->first_minor aren't supposed to be
601 * dereferenced from here on, but set them just in case.
602 */
603 disk->major = MAJOR(devt);
604 disk->first_minor = MINOR(devt);
605
606 /* Register BDI before referencing it from bdev */
607 bdi = &disk->queue->backing_dev_info;
608 bdi_register_dev(bdi, disk_devt(disk));
609
610 blk_register_region(disk_devt(disk), disk->minors, NULL,
611 exact_match, exact_lock, disk);
612 register_disk(disk);
613 blk_register_queue(disk);
614
615 retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
616 "bdi");
617 WARN_ON(retval);
618
619 disk_add_events(disk);
620 }
621 EXPORT_SYMBOL(add_disk);
622
623 void del_gendisk(struct gendisk *disk)
624 {
625 struct disk_part_iter piter;
626 struct hd_struct *part;
627
628 disk_del_events(disk);
629
630 /* invalidate stuff */
631 disk_part_iter_init(&piter, disk,
632 DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
633 while ((part = disk_part_iter_next(&piter))) {
634 invalidate_partition(disk, part->partno);
635 delete_partition(disk, part->partno);
636 }
637 disk_part_iter_exit(&piter);
638
639 invalidate_partition(disk, 0);
640 blk_free_devt(disk_to_dev(disk)->devt);
641 set_capacity(disk, 0);
642 disk->flags &= ~GENHD_FL_UP;
643
644 sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
645 bdi_unregister(&disk->queue->backing_dev_info);
646 blk_unregister_queue(disk);
647 blk_unregister_region(disk_devt(disk), disk->minors);
648
649 part_stat_set_all(&disk->part0, 0);
650 disk->part0.stamp = 0;
651
652 kobject_put(disk->part0.holder_dir);
653 kobject_put(disk->slave_dir);
654 disk->driverfs_dev = NULL;
655 if (!sysfs_deprecated)
656 sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
657 device_del(disk_to_dev(disk));
658 }
659 EXPORT_SYMBOL(del_gendisk);
660
661 /**
662 * get_gendisk - get partitioning information for a given device
663 * @devt: device to get partitioning information for
664 * @partno: returned partition index
665 *
666 * This function gets the structure containing partitioning
667 * information for the given device @devt.
668 */
669 struct gendisk *get_gendisk(dev_t devt, int *partno)
670 {
671 struct gendisk *disk = NULL;
672
673 if (MAJOR(devt) != BLOCK_EXT_MAJOR) {
674 struct kobject *kobj;
675
676 kobj = kobj_lookup(bdev_map, devt, partno);
677 if (kobj)
678 disk = dev_to_disk(kobj_to_dev(kobj));
679 } else {
680 struct hd_struct *part;
681
682 mutex_lock(&ext_devt_mutex);
683 part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
684 if (part && get_disk(part_to_disk(part))) {
685 *partno = part->partno;
686 disk = part_to_disk(part);
687 }
688 mutex_unlock(&ext_devt_mutex);
689 }
690
691 return disk;
692 }
693 EXPORT_SYMBOL(get_gendisk);
694
695 /**
696 * bdget_disk - do bdget() by gendisk and partition number
697 * @disk: gendisk of interest
698 * @partno: partition number
699 *
700 * Find partition @partno from @disk, do bdget() on it.
701 *
702 * CONTEXT:
703 * Don't care.
704 *
705 * RETURNS:
706 * Resulting block_device on success, NULL on failure.
707 */
708 struct block_device *bdget_disk(struct gendisk *disk, int partno)
709 {
710 struct hd_struct *part;
711 struct block_device *bdev = NULL;
712
713 part = disk_get_part(disk, partno);
714 if (part)
715 bdev = bdget(part_devt(part));
716 disk_put_part(part);
717
718 return bdev;
719 }
720 EXPORT_SYMBOL(bdget_disk);
721
722 /*
723 * print a full list of all partitions - intended for places where the root
724 * filesystem can't be mounted and thus to give the victim some idea of what
725 * went wrong
726 */
727 void __init printk_all_partitions(void)
728 {
729 struct class_dev_iter iter;
730 struct device *dev;
731
732 class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
733 while ((dev = class_dev_iter_next(&iter))) {
734 struct gendisk *disk = dev_to_disk(dev);
735 struct disk_part_iter piter;
736 struct hd_struct *part;
737 char name_buf[BDEVNAME_SIZE];
738 char devt_buf[BDEVT_SIZE];
739 u8 uuid[PARTITION_META_INFO_UUIDLTH * 2 + 1];
740
741 /*
742 * Don't show empty devices or things that have been
743 * suppressed
744 */
745 if (get_capacity(disk) == 0 ||
746 (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
747 continue;
748
749 /*
750 * Note, unlike /proc/partitions, I am showing the
751 * numbers in hex - the same format as the root=
752 * option takes.
753 */
754 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
755 while ((part = disk_part_iter_next(&piter))) {
756 bool is_part0 = part == &disk->part0;
757
758 uuid[0] = 0;
759 if (part->info)
760 part_unpack_uuid(part->info->uuid, uuid);
761
762 printk("%s%s %10llu %s %s", is_part0 ? "" : " ",
763 bdevt_str(part_devt(part), devt_buf),
764 (unsigned long long)part->nr_sects >> 1,
765 disk_name(disk, part->partno, name_buf), uuid);
766 if (is_part0) {
767 if (disk->driverfs_dev != NULL &&
768 disk->driverfs_dev->driver != NULL)
769 printk(" driver: %s\n",
770 disk->driverfs_dev->driver->name);
771 else
772 printk(" (driver?)\n");
773 } else
774 printk("\n");
775 }
776 disk_part_iter_exit(&piter);
777 }
778 class_dev_iter_exit(&iter);
779 }
780
781 #ifdef CONFIG_PROC_FS
782 /* iterator */
783 static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos)
784 {
785 loff_t skip = *pos;
786 struct class_dev_iter *iter;
787 struct device *dev;
788
789 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
790 if (!iter)
791 return ERR_PTR(-ENOMEM);
792
793 seqf->private = iter;
794 class_dev_iter_init(iter, &block_class, NULL, &disk_type);
795 do {
796 dev = class_dev_iter_next(iter);
797 if (!dev)
798 return NULL;
799 } while (skip--);
800
801 return dev_to_disk(dev);
802 }
803
804 static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos)
805 {
806 struct device *dev;
807
808 (*pos)++;
809 dev = class_dev_iter_next(seqf->private);
810 if (dev)
811 return dev_to_disk(dev);
812
813 return NULL;
814 }
815
816 static void disk_seqf_stop(struct seq_file *seqf, void *v)
817 {
818 struct class_dev_iter *iter = seqf->private;
819
820 /* stop is called even after start failed :-( */
821 if (iter) {
822 class_dev_iter_exit(iter);
823 kfree(iter);
824 }
825 }
826
827 static void *show_partition_start(struct seq_file *seqf, loff_t *pos)
828 {
829 static void *p;
830
831 p = disk_seqf_start(seqf, pos);
832 if (!IS_ERR_OR_NULL(p) && !*pos)
833 seq_puts(seqf, "major minor #blocks name\n\n");
834 return p;
835 }
836
837 static int show_partition(struct seq_file *seqf, void *v)
838 {
839 struct gendisk *sgp = v;
840 struct disk_part_iter piter;
841 struct hd_struct *part;
842 char buf[BDEVNAME_SIZE];
843
844 /* Don't show non-partitionable removeable devices or empty devices */
845 if (!get_capacity(sgp) || (!disk_partitionable(sgp) &&
846 (sgp->flags & GENHD_FL_REMOVABLE)))
847 return 0;
848 if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
849 return 0;
850
851 /* show the full disk and all non-0 size partitions of it */
852 disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0);
853 while ((part = disk_part_iter_next(&piter)))
854 seq_printf(seqf, "%4d %7d %10llu %s\n",
855 MAJOR(part_devt(part)), MINOR(part_devt(part)),
856 (unsigned long long)part->nr_sects >> 1,
857 disk_name(sgp, part->partno, buf));
858 disk_part_iter_exit(&piter);
859
860 return 0;
861 }
862
863 static const struct seq_operations partitions_op = {
864 .start = show_partition_start,
865 .next = disk_seqf_next,
866 .stop = disk_seqf_stop,
867 .show = show_partition
868 };
869
870 static int partitions_open(struct inode *inode, struct file *file)
871 {
872 return seq_open(file, &partitions_op);
873 }
874
875 static const struct file_operations proc_partitions_operations = {
876 .open = partitions_open,
877 .read = seq_read,
878 .llseek = seq_lseek,
879 .release = seq_release,
880 };
881 #endif
882
883
884 static struct kobject *base_probe(dev_t devt, int *partno, void *data)
885 {
886 if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
887 /* Make old-style 2.4 aliases work */
888 request_module("block-major-%d", MAJOR(devt));
889 return NULL;
890 }
891
892 static int __init genhd_device_init(void)
893 {
894 int error;
895
896 block_class.dev_kobj = sysfs_dev_block_kobj;
897 error = class_register(&block_class);
898 if (unlikely(error))
899 return error;
900 bdev_map = kobj_map_init(base_probe, &block_class_lock);
901 blk_dev_init();
902
903 register_blkdev(BLOCK_EXT_MAJOR, "blkext");
904
905 /* create top-level block dir */
906 if (!sysfs_deprecated)
907 block_depr = kobject_create_and_add("block", NULL);
908 return 0;
909 }
910
911 subsys_initcall(genhd_device_init);
912
913 static ssize_t alias_show(struct device *dev,
914 struct device_attribute *attr, char *buf)
915 {
916 struct gendisk *disk = dev_to_disk(dev);
917 ssize_t ret = 0;
918
919 if (disk->alias)
920 ret = snprintf(buf, ALIAS_LEN, "%s\n", disk->alias);
921 return ret;
922 }
923
924 static ssize_t alias_store(struct device *dev, struct device_attribute *attr,
925 const char *buf, size_t count)
926 {
927 struct gendisk *disk = dev_to_disk(dev);
928 char *alias;
929 char *envp[] = { NULL, NULL };
930 unsigned char c;
931 int i;
932 ssize_t ret = count;
933
934 if (!count)
935 return -EINVAL;
936
937 if (count >= ALIAS_LEN) {
938 printk(KERN_ERR "alias: alias is too long\n");
939 return -EINVAL;
940 }
941
942 /* Validation check */
943 for (i = 0; i < count; i++) {
944 c = buf[i];
945 if (i == count - 1 && c == '\n')
946 break;
947 if (!isalnum(c) && c != '_' && c != '-') {
948 printk(KERN_ERR "alias: invalid alias\n");
949 return -EINVAL;
950 }
951 }
952
953 if (disk->alias) {
954 printk(KERN_INFO "alias: %s is already assigned (%s)\n",
955 disk->disk_name, disk->alias);
956 return -EINVAL;
957 }
958
959 alias = kasprintf(GFP_KERNEL, "%s", buf);
960 if (!alias)
961 return -ENOMEM;
962
963 if (alias[count - 1] == '\n')
964 alias[count - 1] = '\0';
965
966 envp[0] = kasprintf(GFP_KERNEL, "ALIAS=%s", alias);
967 if (!envp[0]) {
968 kfree(alias);
969 return -ENOMEM;
970 }
971
972 disk->alias = alias;
973 printk(KERN_INFO "alias: assigned %s to %s\n", alias, disk->disk_name);
974
975 kobject_uevent_env(&dev->kobj, KOBJ_ADD, envp);
976
977 kfree(envp[0]);
978 return ret;
979 }
980
981 static ssize_t disk_range_show(struct device *dev,
982 struct device_attribute *attr, char *buf)
983 {
984 struct gendisk *disk = dev_to_disk(dev);
985
986 return sprintf(buf, "%d\n", disk->minors);
987 }
988
989 static ssize_t disk_ext_range_show(struct device *dev,
990 struct device_attribute *attr, char *buf)
991 {
992 struct gendisk *disk = dev_to_disk(dev);
993
994 return sprintf(buf, "%d\n", disk_max_parts(disk));
995 }
996
997 static ssize_t disk_removable_show(struct device *dev,
998 struct device_attribute *attr, char *buf)
999 {
1000 struct gendisk *disk = dev_to_disk(dev);
1001
1002 return sprintf(buf, "%d\n",
1003 (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
1004 }
1005
1006 static ssize_t disk_ro_show(struct device *dev,
1007 struct device_attribute *attr, char *buf)
1008 {
1009 struct gendisk *disk = dev_to_disk(dev);
1010
1011 return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0);
1012 }
1013
1014 static ssize_t disk_capability_show(struct device *dev,
1015 struct device_attribute *attr, char *buf)
1016 {
1017 struct gendisk *disk = dev_to_disk(dev);
1018
1019 return sprintf(buf, "%x\n", disk->flags);
1020 }
1021
1022 static ssize_t disk_alignment_offset_show(struct device *dev,
1023 struct device_attribute *attr,
1024 char *buf)
1025 {
1026 struct gendisk *disk = dev_to_disk(dev);
1027
1028 return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue));
1029 }
1030
1031 static ssize_t disk_discard_alignment_show(struct device *dev,
1032 struct device_attribute *attr,
1033 char *buf)
1034 {
1035 struct gendisk *disk = dev_to_disk(dev);
1036
1037 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
1038 }
1039
1040 static DEVICE_ATTR(alias, S_IRUGO|S_IWUSR, alias_show, alias_store);
1041 static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
1042 static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
1043 static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
1044 static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
1045 static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
1046 static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
1047 static DEVICE_ATTR(discard_alignment, S_IRUGO, disk_discard_alignment_show,
1048 NULL);
1049 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
1050 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
1051 static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
1052 #ifdef CONFIG_FAIL_MAKE_REQUEST
1053 static struct device_attribute dev_attr_fail =
1054 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
1055 #endif
1056 #ifdef CONFIG_FAIL_IO_TIMEOUT
1057 static struct device_attribute dev_attr_fail_timeout =
1058 __ATTR(io-timeout-fail, S_IRUGO|S_IWUSR, part_timeout_show,
1059 part_timeout_store);
1060 #endif
1061
1062 static struct attribute *disk_attrs[] = {
1063 &dev_attr_alias.attr,
1064 &dev_attr_range.attr,
1065 &dev_attr_ext_range.attr,
1066 &dev_attr_removable.attr,
1067 &dev_attr_ro.attr,
1068 &dev_attr_size.attr,
1069 &dev_attr_alignment_offset.attr,
1070 &dev_attr_discard_alignment.attr,
1071 &dev_attr_capability.attr,
1072 &dev_attr_stat.attr,
1073 &dev_attr_inflight.attr,
1074 #ifdef CONFIG_FAIL_MAKE_REQUEST
1075 &dev_attr_fail.attr,
1076 #endif
1077 #ifdef CONFIG_FAIL_IO_TIMEOUT
1078 &dev_attr_fail_timeout.attr,
1079 #endif
1080 NULL
1081 };
1082
1083 static struct attribute_group disk_attr_group = {
1084 .attrs = disk_attrs,
1085 };
1086
1087 static const struct attribute_group *disk_attr_groups[] = {
1088 &disk_attr_group,
1089 NULL
1090 };
1091
1092 /**
1093 * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way
1094 * @disk: disk to replace part_tbl for
1095 * @new_ptbl: new part_tbl to install
1096 *
1097 * Replace disk->part_tbl with @new_ptbl in RCU-safe way. The
1098 * original ptbl is freed using RCU callback.
1099 *
1100 * LOCKING:
1101 * Matching bd_mutx locked.
1102 */
1103 static void disk_replace_part_tbl(struct gendisk *disk,
1104 struct disk_part_tbl *new_ptbl)
1105 {
1106 struct disk_part_tbl *old_ptbl = disk->part_tbl;
1107
1108 rcu_assign_pointer(disk->part_tbl, new_ptbl);
1109
1110 if (old_ptbl) {
1111 rcu_assign_pointer(old_ptbl->last_lookup, NULL);
1112 kfree_rcu(old_ptbl, rcu_head);
1113 }
1114 }
1115
1116 /**
1117 * disk_expand_part_tbl - expand disk->part_tbl
1118 * @disk: disk to expand part_tbl for
1119 * @partno: expand such that this partno can fit in
1120 *
1121 * Expand disk->part_tbl such that @partno can fit in. disk->part_tbl
1122 * uses RCU to allow unlocked dereferencing for stats and other stuff.
1123 *
1124 * LOCKING:
1125 * Matching bd_mutex locked, might sleep.
1126 *
1127 * RETURNS:
1128 * 0 on success, -errno on failure.
1129 */
1130 int disk_expand_part_tbl(struct gendisk *disk, int partno)
1131 {
1132 struct disk_part_tbl *old_ptbl = disk->part_tbl;
1133 struct disk_part_tbl *new_ptbl;
1134 int len = old_ptbl ? old_ptbl->len : 0;
1135 int target = partno + 1;
1136 size_t size;
1137 int i;
1138
1139 /* disk_max_parts() is zero during initialization, ignore if so */
1140 if (disk_max_parts(disk) && target > disk_max_parts(disk))
1141 return -EINVAL;
1142
1143 if (target <= len)
1144 return 0;
1145
1146 size = sizeof(*new_ptbl) + target * sizeof(new_ptbl->part[0]);
1147 new_ptbl = kzalloc_node(size, GFP_KERNEL, disk->node_id);
1148 if (!new_ptbl)
1149 return -ENOMEM;
1150
1151 new_ptbl->len = target;
1152
1153 for (i = 0; i < len; i++)
1154 rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]);
1155
1156 disk_replace_part_tbl(disk, new_ptbl);
1157 return 0;
1158 }
1159
1160 static void disk_release(struct device *dev)
1161 {
1162 struct gendisk *disk = dev_to_disk(dev);
1163
1164 disk_release_events(disk);
1165 kfree(disk->random);
1166 disk_replace_part_tbl(disk, NULL);
1167 free_part_stats(&disk->part0);
1168 free_part_info(&disk->part0);
1169 kfree(disk);
1170 }
1171 struct class block_class = {
1172 .name = "block",
1173 };
1174
1175 static char *block_devnode(struct device *dev, mode_t *mode)
1176 {
1177 struct gendisk *disk = dev_to_disk(dev);
1178
1179 if (disk->devnode)
1180 return disk->devnode(disk, mode);
1181 return NULL;
1182 }
1183
1184 static struct device_type disk_type = {
1185 .name = "disk",
1186 .groups = disk_attr_groups,
1187 .release = disk_release,
1188 .devnode = block_devnode,
1189 };
1190
1191 #ifdef CONFIG_PROC_FS
1192 /*
1193 * aggregate disk stat collector. Uses the same stats that the sysfs
1194 * entries do, above, but makes them available through one seq_file.
1195 *
1196 * The output looks suspiciously like /proc/partitions with a bunch of
1197 * extra fields.
1198 */
1199 static int diskstats_show(struct seq_file *seqf, void *v)
1200 {
1201 struct gendisk *gp = v;
1202 struct disk_part_iter piter;
1203 struct hd_struct *hd;
1204 char buf[BDEVNAME_SIZE];
1205 int cpu;
1206
1207 /*
1208 if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
1209 seq_puts(seqf, "major minor name"
1210 " rio rmerge rsect ruse wio wmerge "
1211 "wsect wuse running use aveq"
1212 "\n\n");
1213 */
1214
1215 disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
1216 while ((hd = disk_part_iter_next(&piter))) {
1217 cpu = part_stat_lock();
1218 part_round_stats(cpu, hd);
1219 part_stat_unlock();
1220 seq_printf(seqf, "%4d %7d %s %lu %lu %lu "
1221 "%u %lu %lu %lu %u %u %u %u\n",
1222 MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
1223 disk_name(gp, hd->partno, buf),
1224 part_stat_read(hd, ios[READ]),
1225 part_stat_read(hd, merges[READ]),
1226 part_stat_read(hd, sectors[READ]),
1227 jiffies_to_msecs(part_stat_read(hd, ticks[READ])),
1228 part_stat_read(hd, ios[WRITE]),
1229 part_stat_read(hd, merges[WRITE]),
1230 part_stat_read(hd, sectors[WRITE]),
1231 jiffies_to_msecs(part_stat_read(hd, ticks[WRITE])),
1232 part_in_flight(hd),
1233 jiffies_to_msecs(part_stat_read(hd, io_ticks)),
1234 jiffies_to_msecs(part_stat_read(hd, time_in_queue))
1235 );
1236 }
1237 disk_part_iter_exit(&piter);
1238
1239 return 0;
1240 }
1241
1242 static const struct seq_operations diskstats_op = {
1243 .start = disk_seqf_start,
1244 .next = disk_seqf_next,
1245 .stop = disk_seqf_stop,
1246 .show = diskstats_show
1247 };
1248
1249 static int diskstats_open(struct inode *inode, struct file *file)
1250 {
1251 return seq_open(file, &diskstats_op);
1252 }
1253
1254 static const struct file_operations proc_diskstats_operations = {
1255 .open = diskstats_open,
1256 .read = seq_read,
1257 .llseek = seq_lseek,
1258 .release = seq_release,
1259 };
1260
1261 static int __init proc_genhd_init(void)
1262 {
1263 proc_create("diskstats", 0, NULL, &proc_diskstats_operations);
1264 proc_create("partitions", 0, NULL, &proc_partitions_operations);
1265 return 0;
1266 }
1267 module_init(proc_genhd_init);
1268 #endif /* CONFIG_PROC_FS */
1269
1270 dev_t blk_lookup_devt(const char *name, int partno)
1271 {
1272 dev_t devt = MKDEV(0, 0);
1273 struct class_dev_iter iter;
1274 struct device *dev;
1275
1276 class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
1277 while ((dev = class_dev_iter_next(&iter))) {
1278 struct gendisk *disk = dev_to_disk(dev);
1279 struct hd_struct *part;
1280
1281 if (strcmp(dev_name(dev), name))
1282 continue;
1283
1284 if (partno < disk->minors) {
1285 /* We need to return the right devno, even
1286 * if the partition doesn't exist yet.
1287 */
1288 devt = MKDEV(MAJOR(dev->devt),
1289 MINOR(dev->devt) + partno);
1290 break;
1291 }
1292 part = disk_get_part(disk, partno);
1293 if (part) {
1294 devt = part_devt(part);
1295 disk_put_part(part);
1296 break;
1297 }
1298 disk_put_part(part);
1299 }
1300 class_dev_iter_exit(&iter);
1301 return devt;
1302 }
1303 EXPORT_SYMBOL(blk_lookup_devt);
1304
1305 struct gendisk *alloc_disk(int minors)
1306 {
1307 return alloc_disk_node(minors, -1);
1308 }
1309 EXPORT_SYMBOL(alloc_disk);
1310
1311 struct gendisk *alloc_disk_node(int minors, int node_id)
1312 {
1313 struct gendisk *disk;
1314
1315 disk = kmalloc_node(sizeof(struct gendisk),
1316 GFP_KERNEL | __GFP_ZERO, node_id);
1317 if (disk) {
1318 if (!init_part_stats(&disk->part0)) {
1319 kfree(disk);
1320 return NULL;
1321 }
1322 disk->node_id = node_id;
1323 if (disk_expand_part_tbl(disk, 0)) {
1324 free_part_stats(&disk->part0);
1325 kfree(disk);
1326 return NULL;
1327 }
1328 disk->part_tbl->part[0] = &disk->part0;
1329
1330 hd_ref_init(&disk->part0);
1331
1332 disk->minors = minors;
1333 rand_initialize_disk(disk);
1334 disk_to_dev(disk)->class = &block_class;
1335 disk_to_dev(disk)->type = &disk_type;
1336 device_initialize(disk_to_dev(disk));
1337 }
1338 return disk;
1339 }
1340 EXPORT_SYMBOL(alloc_disk_node);
1341
1342 struct kobject *get_disk(struct gendisk *disk)
1343 {
1344 struct module *owner;
1345 struct kobject *kobj;
1346
1347 if (!disk->fops)
1348 return NULL;
1349 owner = disk->fops->owner;
1350 if (owner && !try_module_get(owner))
1351 return NULL;
1352 kobj = kobject_get(&disk_to_dev(disk)->kobj);
1353 if (kobj == NULL) {
1354 module_put(owner);
1355 return NULL;
1356 }
1357 return kobj;
1358
1359 }
1360
1361 EXPORT_SYMBOL(get_disk);
1362
1363 void put_disk(struct gendisk *disk)
1364 {
1365 if (disk)
1366 kobject_put(&disk_to_dev(disk)->kobj);
1367 }
1368
1369 EXPORT_SYMBOL(put_disk);
1370
1371 static void set_disk_ro_uevent(struct gendisk *gd, int ro)
1372 {
1373 char event[] = "DISK_RO=1";
1374 char *envp[] = { event, NULL };
1375
1376 if (!ro)
1377 event[8] = '0';
1378 kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp);
1379 }
1380
1381 void set_device_ro(struct block_device *bdev, int flag)
1382 {
1383 bdev->bd_part->policy = flag;
1384 }
1385
1386 EXPORT_SYMBOL(set_device_ro);
1387
1388 void set_disk_ro(struct gendisk *disk, int flag)
1389 {
1390 struct disk_part_iter piter;
1391 struct hd_struct *part;
1392
1393 if (disk->part0.policy != flag) {
1394 set_disk_ro_uevent(disk, flag);
1395 disk->part0.policy = flag;
1396 }
1397
1398 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
1399 while ((part = disk_part_iter_next(&piter)))
1400 part->policy = flag;
1401 disk_part_iter_exit(&piter);
1402 }
1403
1404 EXPORT_SYMBOL(set_disk_ro);
1405
1406 int bdev_read_only(struct block_device *bdev)
1407 {
1408 if (!bdev)
1409 return 0;
1410 return bdev->bd_part->policy;
1411 }
1412
1413 EXPORT_SYMBOL(bdev_read_only);
1414
1415 int invalidate_partition(struct gendisk *disk, int partno)
1416 {
1417 int res = 0;
1418 struct block_device *bdev = bdget_disk(disk, partno);
1419 if (bdev) {
1420 fsync_bdev(bdev);
1421 res = __invalidate_device(bdev, true);
1422 bdput(bdev);
1423 }
1424 return res;
1425 }
1426
1427 EXPORT_SYMBOL(invalidate_partition);
1428
1429 /*
1430 * Disk events - monitor disk events like media change and eject request.
1431 */
1432 struct disk_events {
1433 struct list_head node; /* all disk_event's */
1434 struct gendisk *disk; /* the associated disk */
1435 spinlock_t lock;
1436
1437 struct mutex block_mutex; /* protects blocking */
1438 int block; /* event blocking depth */
1439 unsigned int pending; /* events already sent out */
1440 unsigned int clearing; /* events being cleared */
1441
1442 long poll_msecs; /* interval, -1 for default */
1443 struct delayed_work dwork;
1444 };
1445
1446 static const char *disk_events_strs[] = {
1447 [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "media_change",
1448 [ilog2(DISK_EVENT_EJECT_REQUEST)] = "eject_request",
1449 };
1450
1451 static char *disk_uevents[] = {
1452 [ilog2(DISK_EVENT_MEDIA_CHANGE)] = "DISK_MEDIA_CHANGE=1",
1453 [ilog2(DISK_EVENT_EJECT_REQUEST)] = "DISK_EJECT_REQUEST=1",
1454 };
1455
1456 /* list of all disk_events */
1457 static DEFINE_MUTEX(disk_events_mutex);
1458 static LIST_HEAD(disk_events);
1459
1460 /* disable in-kernel polling by default */
1461 static unsigned long disk_events_dfl_poll_msecs = 0;
1462
1463 static unsigned long disk_events_poll_jiffies(struct gendisk *disk)
1464 {
1465 struct disk_events *ev = disk->ev;
1466 long intv_msecs = 0;
1467
1468 /*
1469 * If device-specific poll interval is set, always use it. If
1470 * the default is being used, poll iff there are events which
1471 * can't be monitored asynchronously.
1472 */
1473 if (ev->poll_msecs >= 0)
1474 intv_msecs = ev->poll_msecs;
1475 else if (disk->events & ~disk->async_events)
1476 intv_msecs = disk_events_dfl_poll_msecs;
1477
1478 return msecs_to_jiffies(intv_msecs);
1479 }
1480
1481 /**
1482 * disk_block_events - block and flush disk event checking
1483 * @disk: disk to block events for
1484 *
1485 * On return from this function, it is guaranteed that event checking
1486 * isn't in progress and won't happen until unblocked by
1487 * disk_unblock_events(). Events blocking is counted and the actual
1488 * unblocking happens after the matching number of unblocks are done.
1489 *
1490 * Note that this intentionally does not block event checking from
1491 * disk_clear_events().
1492 *
1493 * CONTEXT:
1494 * Might sleep.
1495 */
1496 void disk_block_events(struct gendisk *disk)
1497 {
1498 struct disk_events *ev = disk->ev;
1499 unsigned long flags;
1500 bool cancel;
1501
1502 if (!ev)
1503 return;
1504
1505 /*
1506 * Outer mutex ensures that the first blocker completes canceling
1507 * the event work before further blockers are allowed to finish.
1508 */
1509 mutex_lock(&ev->block_mutex);
1510
1511 spin_lock_irqsave(&ev->lock, flags);
1512 cancel = !ev->block++;
1513 spin_unlock_irqrestore(&ev->lock, flags);
1514
1515 if (cancel)
1516 cancel_delayed_work_sync(&disk->ev->dwork);
1517
1518 mutex_unlock(&ev->block_mutex);
1519 }
1520
1521 static void __disk_unblock_events(struct gendisk *disk, bool check_now)
1522 {
1523 struct disk_events *ev = disk->ev;
1524 unsigned long intv;
1525 unsigned long flags;
1526
1527 spin_lock_irqsave(&ev->lock, flags);
1528
1529 if (WARN_ON_ONCE(ev->block <= 0))
1530 goto out_unlock;
1531
1532 if (--ev->block)
1533 goto out_unlock;
1534
1535 /*
1536 * Not exactly a latency critical operation, set poll timer
1537 * slack to 25% and kick event check.
1538 */
1539 intv = disk_events_poll_jiffies(disk);
1540 set_timer_slack(&ev->dwork.timer, intv / 4);
1541 if (check_now)
1542 queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
1543 else if (intv)
1544 queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
1545 out_unlock:
1546 spin_unlock_irqrestore(&ev->lock, flags);
1547 }
1548
1549 /**
1550 * disk_unblock_events - unblock disk event checking
1551 * @disk: disk to unblock events for
1552 *
1553 * Undo disk_block_events(). When the block count reaches zero, it
1554 * starts events polling if configured.
1555 *
1556 * CONTEXT:
1557 * Don't care. Safe to call from irq context.
1558 */
1559 void disk_unblock_events(struct gendisk *disk)
1560 {
1561 if (disk->ev)
1562 __disk_unblock_events(disk, false);
1563 }
1564
1565 /**
1566 * disk_flush_events - schedule immediate event checking and flushing
1567 * @disk: disk to check and flush events for
1568 * @mask: events to flush
1569 *
1570 * Schedule immediate event checking on @disk if not blocked. Events in
1571 * @mask are scheduled to be cleared from the driver. Note that this
1572 * doesn't clear the events from @disk->ev.
1573 *
1574 * CONTEXT:
1575 * If @mask is non-zero must be called with bdev->bd_mutex held.
1576 */
1577 void disk_flush_events(struct gendisk *disk, unsigned int mask)
1578 {
1579 struct disk_events *ev = disk->ev;
1580
1581 if (!ev)
1582 return;
1583
1584 spin_lock_irq(&ev->lock);
1585 ev->clearing |= mask;
1586 if (!ev->block) {
1587 cancel_delayed_work(&ev->dwork);
1588 queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
1589 }
1590 spin_unlock_irq(&ev->lock);
1591 }
1592
1593 /**
1594 * disk_clear_events - synchronously check, clear and return pending events
1595 * @disk: disk to fetch and clear events from
1596 * @mask: mask of events to be fetched and clearted
1597 *
1598 * Disk events are synchronously checked and pending events in @mask
1599 * are cleared and returned. This ignores the block count.
1600 *
1601 * CONTEXT:
1602 * Might sleep.
1603 */
1604 unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
1605 {
1606 const struct block_device_operations *bdops = disk->fops;
1607 struct disk_events *ev = disk->ev;
1608 unsigned int pending;
1609
1610 if (!ev) {
1611 /* for drivers still using the old ->media_changed method */
1612 if ((mask & DISK_EVENT_MEDIA_CHANGE) &&
1613 bdops->media_changed && bdops->media_changed(disk))
1614 return DISK_EVENT_MEDIA_CHANGE;
1615 return 0;
1616 }
1617
1618 /* tell the workfn about the events being cleared */
1619 spin_lock_irq(&ev->lock);
1620 ev->clearing |= mask;
1621 spin_unlock_irq(&ev->lock);
1622
1623 /* uncondtionally schedule event check and wait for it to finish */
1624 disk_block_events(disk);
1625 queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
1626 flush_delayed_work(&ev->dwork);
1627 __disk_unblock_events(disk, false);
1628
1629 /* then, fetch and clear pending events */
1630 spin_lock_irq(&ev->lock);
1631 WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */
1632 pending = ev->pending & mask;
1633 ev->pending &= ~mask;
1634 spin_unlock_irq(&ev->lock);
1635
1636 return pending;
1637 }
1638
1639 static void disk_events_workfn(struct work_struct *work)
1640 {
1641 struct delayed_work *dwork = to_delayed_work(work);
1642 struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
1643 struct gendisk *disk = ev->disk;
1644 char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
1645 unsigned int clearing = ev->clearing;
1646 unsigned int events;
1647 unsigned long intv;
1648 int nr_events = 0, i;
1649
1650 /* check events */
1651 events = disk->fops->check_events(disk, clearing);
1652
1653 /* accumulate pending events and schedule next poll if necessary */
1654 spin_lock_irq(&ev->lock);
1655
1656 events &= ~ev->pending;
1657 ev->pending |= events;
1658 ev->clearing &= ~clearing;
1659
1660 intv = disk_events_poll_jiffies(disk);
1661 if (!ev->block && intv)
1662 queue_delayed_work(system_nrt_wq, &ev->dwork, intv);
1663
1664 spin_unlock_irq(&ev->lock);
1665
1666 /*
1667 * Tell userland about new events. Only the events listed in
1668 * @disk->events are reported. Unlisted events are processed the
1669 * same internally but never get reported to userland.
1670 */
1671 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
1672 if (events & disk->events & (1 << i))
1673 envp[nr_events++] = disk_uevents[i];
1674
1675 if (nr_events)
1676 kobject_uevent_env(&disk_to_dev(disk)->kobj, KOBJ_CHANGE, envp);
1677 }
1678
1679 /*
1680 * A disk events enabled device has the following sysfs nodes under
1681 * its /sys/block/X/ directory.
1682 *
1683 * events : list of all supported events
1684 * events_async : list of events which can be detected w/o polling
1685 * events_poll_msecs : polling interval, 0: disable, -1: system default
1686 */
1687 static ssize_t __disk_events_show(unsigned int events, char *buf)
1688 {
1689 const char *delim = "";
1690 ssize_t pos = 0;
1691 int i;
1692
1693 for (i = 0; i < ARRAY_SIZE(disk_events_strs); i++)
1694 if (events & (1 << i)) {
1695 pos += sprintf(buf + pos, "%s%s",
1696 delim, disk_events_strs[i]);
1697 delim = " ";
1698 }
1699 if (pos)
1700 pos += sprintf(buf + pos, "\n");
1701 return pos;
1702 }
1703
1704 static ssize_t disk_events_show(struct device *dev,
1705 struct device_attribute *attr, char *buf)
1706 {
1707 struct gendisk *disk = dev_to_disk(dev);
1708
1709 return __disk_events_show(disk->events, buf);
1710 }
1711
1712 static ssize_t disk_events_async_show(struct device *dev,
1713 struct device_attribute *attr, char *buf)
1714 {
1715 struct gendisk *disk = dev_to_disk(dev);
1716
1717 return __disk_events_show(disk->async_events, buf);
1718 }
1719
1720 static ssize_t disk_events_poll_msecs_show(struct device *dev,
1721 struct device_attribute *attr,
1722 char *buf)
1723 {
1724 struct gendisk *disk = dev_to_disk(dev);
1725
1726 return sprintf(buf, "%ld\n", disk->ev->poll_msecs);
1727 }
1728
1729 static ssize_t disk_events_poll_msecs_store(struct device *dev,
1730 struct device_attribute *attr,
1731 const char *buf, size_t count)
1732 {
1733 struct gendisk *disk = dev_to_disk(dev);
1734 long intv;
1735
1736 if (!count || !sscanf(buf, "%ld", &intv))
1737 return -EINVAL;
1738
1739 if (intv < 0 && intv != -1)
1740 return -EINVAL;
1741
1742 disk_block_events(disk);
1743 disk->ev->poll_msecs = intv;
1744 __disk_unblock_events(disk, true);
1745
1746 return count;
1747 }
1748
1749 static const DEVICE_ATTR(events, S_IRUGO, disk_events_show, NULL);
1750 static const DEVICE_ATTR(events_async, S_IRUGO, disk_events_async_show, NULL);
1751 static const DEVICE_ATTR(events_poll_msecs, S_IRUGO|S_IWUSR,
1752 disk_events_poll_msecs_show,
1753 disk_events_poll_msecs_store);
1754
1755 static const struct attribute *disk_events_attrs[] = {
1756 &dev_attr_events.attr,
1757 &dev_attr_events_async.attr,
1758 &dev_attr_events_poll_msecs.attr,
1759 NULL,
1760 };
1761
1762 /*
1763 * The default polling interval can be specified by the kernel
1764 * parameter block.events_dfl_poll_msecs which defaults to 0
1765 * (disable). This can also be modified runtime by writing to
1766 * /sys/module/block/events_dfl_poll_msecs.
1767 */
1768 static int disk_events_set_dfl_poll_msecs(const char *val,
1769 const struct kernel_param *kp)
1770 {
1771 struct disk_events *ev;
1772 int ret;
1773
1774 ret = param_set_ulong(val, kp);
1775 if (ret < 0)
1776 return ret;
1777
1778 mutex_lock(&disk_events_mutex);
1779
1780 list_for_each_entry(ev, &disk_events, node)
1781 disk_flush_events(ev->disk, 0);
1782
1783 mutex_unlock(&disk_events_mutex);
1784
1785 return 0;
1786 }
1787
1788 static const struct kernel_param_ops disk_events_dfl_poll_msecs_param_ops = {
1789 .set = disk_events_set_dfl_poll_msecs,
1790 .get = param_get_ulong,
1791 };
1792
1793 #undef MODULE_PARAM_PREFIX
1794 #define MODULE_PARAM_PREFIX "block."
1795
1796 module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops,
1797 &disk_events_dfl_poll_msecs, 0644);
1798
1799 /*
1800 * disk_{add|del|release}_events - initialize and destroy disk_events.
1801 */
1802 static void disk_add_events(struct gendisk *disk)
1803 {
1804 struct disk_events *ev;
1805
1806 if (!disk->fops->check_events)
1807 return;
1808
1809 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
1810 if (!ev) {
1811 pr_warn("%s: failed to initialize events\n", disk->disk_name);
1812 return;
1813 }
1814
1815 if (sysfs_create_files(&disk_to_dev(disk)->kobj,
1816 disk_events_attrs) < 0) {
1817 pr_warn("%s: failed to create sysfs files for events\n",
1818 disk->disk_name);
1819 kfree(ev);
1820 return;
1821 }
1822
1823 disk->ev = ev;
1824
1825 INIT_LIST_HEAD(&ev->node);
1826 ev->disk = disk;
1827 spin_lock_init(&ev->lock);
1828 mutex_init(&ev->block_mutex);
1829 ev->block = 1;
1830 ev->poll_msecs = -1;
1831 INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
1832
1833 mutex_lock(&disk_events_mutex);
1834 list_add_tail(&ev->node, &disk_events);
1835 mutex_unlock(&disk_events_mutex);
1836
1837 /*
1838 * Block count is initialized to 1 and the following initial
1839 * unblock kicks it into action.
1840 */
1841 __disk_unblock_events(disk, true);
1842 }
1843
1844 static void disk_del_events(struct gendisk *disk)
1845 {
1846 if (!disk->ev)
1847 return;
1848
1849 disk_block_events(disk);
1850
1851 mutex_lock(&disk_events_mutex);
1852 list_del_init(&disk->ev->node);
1853 mutex_unlock(&disk_events_mutex);
1854
1855 sysfs_remove_files(&disk_to_dev(disk)->kobj, disk_events_attrs);
1856 }
1857
1858 static void disk_release_events(struct gendisk *disk)
1859 {
1860 /* the block count should be 1 from disk_del_events() */
1861 WARN_ON_ONCE(disk->ev && disk->ev->block != 1);
1862 kfree(disk->ev);
1863 }
This page took 0.116674 seconds and 5 git commands to generate.