2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
7 #include <linux/pagemap.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
14 static atomic_long_t bdi_seq
= ATOMIC_LONG_INIT(0);
16 void default_unplug_io_fn(struct backing_dev_info
*bdi
, struct page
*page
)
19 EXPORT_SYMBOL(default_unplug_io_fn
);
21 struct backing_dev_info default_backing_dev_info
= {
23 .ra_pages
= VM_MAX_READAHEAD
* 1024 / PAGE_CACHE_SIZE
,
25 .capabilities
= BDI_CAP_MAP_COPY
,
26 .unplug_io_fn
= default_unplug_io_fn
,
28 EXPORT_SYMBOL_GPL(default_backing_dev_info
);
30 struct backing_dev_info noop_backing_dev_info
= {
33 EXPORT_SYMBOL_GPL(noop_backing_dev_info
);
35 static struct class *bdi_class
;
38 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
39 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
42 DEFINE_SPINLOCK(bdi_lock
);
44 LIST_HEAD(bdi_pending_list
);
46 static struct task_struct
*sync_supers_tsk
;
47 static struct timer_list sync_supers_timer
;
49 static int bdi_sync_supers(void *);
50 static void sync_supers_timer_fn(unsigned long);
52 static void bdi_add_default_flusher_task(struct backing_dev_info
*bdi
);
54 #ifdef CONFIG_DEBUG_FS
55 #include <linux/debugfs.h>
56 #include <linux/seq_file.h>
58 static struct dentry
*bdi_debug_root
;
60 static void bdi_debug_init(void)
62 bdi_debug_root
= debugfs_create_dir("bdi", NULL
);
65 static int bdi_debug_stats_show(struct seq_file
*m
, void *v
)
67 struct backing_dev_info
*bdi
= m
->private;
68 struct bdi_writeback
*wb
= &bdi
->wb
;
69 unsigned long background_thresh
;
70 unsigned long dirty_thresh
;
71 unsigned long bdi_thresh
;
72 unsigned long nr_dirty
, nr_io
, nr_more_io
, nr_wb
;
75 nr_wb
= nr_dirty
= nr_io
= nr_more_io
= 0;
76 spin_lock(&inode_lock
);
77 list_for_each_entry(inode
, &wb
->b_dirty
, i_list
)
79 list_for_each_entry(inode
, &wb
->b_io
, i_list
)
81 list_for_each_entry(inode
, &wb
->b_more_io
, i_list
)
83 spin_unlock(&inode_lock
);
85 get_dirty_limits(&background_thresh
, &dirty_thresh
, &bdi_thresh
, bdi
);
87 #define K(x) ((x) << (PAGE_SHIFT - 10))
89 "BdiWriteback: %8lu kB\n"
90 "BdiReclaimable: %8lu kB\n"
91 "BdiDirtyThresh: %8lu kB\n"
92 "DirtyThresh: %8lu kB\n"
93 "BackgroundThresh: %8lu kB\n"
99 (unsigned long) K(bdi_stat(bdi
, BDI_WRITEBACK
)),
100 (unsigned long) K(bdi_stat(bdi
, BDI_RECLAIMABLE
)),
101 K(bdi_thresh
), K(dirty_thresh
),
102 K(background_thresh
), nr_dirty
, nr_io
, nr_more_io
,
103 !list_empty(&bdi
->bdi_list
), bdi
->state
);
109 static int bdi_debug_stats_open(struct inode
*inode
, struct file
*file
)
111 return single_open(file
, bdi_debug_stats_show
, inode
->i_private
);
114 static const struct file_operations bdi_debug_stats_fops
= {
115 .open
= bdi_debug_stats_open
,
118 .release
= single_release
,
121 static void bdi_debug_register(struct backing_dev_info
*bdi
, const char *name
)
123 bdi
->debug_dir
= debugfs_create_dir(name
, bdi_debug_root
);
124 bdi
->debug_stats
= debugfs_create_file("stats", 0444, bdi
->debug_dir
,
125 bdi
, &bdi_debug_stats_fops
);
128 static void bdi_debug_unregister(struct backing_dev_info
*bdi
)
130 debugfs_remove(bdi
->debug_stats
);
131 debugfs_remove(bdi
->debug_dir
);
134 static inline void bdi_debug_init(void)
137 static inline void bdi_debug_register(struct backing_dev_info
*bdi
,
141 static inline void bdi_debug_unregister(struct backing_dev_info
*bdi
)
146 static ssize_t
read_ahead_kb_store(struct device
*dev
,
147 struct device_attribute
*attr
,
148 const char *buf
, size_t count
)
150 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
152 unsigned long read_ahead_kb
;
153 ssize_t ret
= -EINVAL
;
155 read_ahead_kb
= simple_strtoul(buf
, &end
, 10);
156 if (*buf
&& (end
[0] == '\0' || (end
[0] == '\n' && end
[1] == '\0'))) {
157 bdi
->ra_pages
= read_ahead_kb
>> (PAGE_SHIFT
- 10);
163 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
165 #define BDI_SHOW(name, expr) \
166 static ssize_t name##_show(struct device *dev, \
167 struct device_attribute *attr, char *page) \
169 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
171 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
174 BDI_SHOW(read_ahead_kb
, K(bdi
->ra_pages
))
176 static ssize_t
min_ratio_store(struct device
*dev
,
177 struct device_attribute
*attr
, const char *buf
, size_t count
)
179 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
182 ssize_t ret
= -EINVAL
;
184 ratio
= simple_strtoul(buf
, &end
, 10);
185 if (*buf
&& (end
[0] == '\0' || (end
[0] == '\n' && end
[1] == '\0'))) {
186 ret
= bdi_set_min_ratio(bdi
, ratio
);
192 BDI_SHOW(min_ratio
, bdi
->min_ratio
)
194 static ssize_t
max_ratio_store(struct device
*dev
,
195 struct device_attribute
*attr
, const char *buf
, size_t count
)
197 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
200 ssize_t ret
= -EINVAL
;
202 ratio
= simple_strtoul(buf
, &end
, 10);
203 if (*buf
&& (end
[0] == '\0' || (end
[0] == '\n' && end
[1] == '\0'))) {
204 ret
= bdi_set_max_ratio(bdi
, ratio
);
210 BDI_SHOW(max_ratio
, bdi
->max_ratio
)
212 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
214 static struct device_attribute bdi_dev_attrs
[] = {
215 __ATTR_RW(read_ahead_kb
),
216 __ATTR_RW(min_ratio
),
217 __ATTR_RW(max_ratio
),
221 static __init
int bdi_class_init(void)
223 bdi_class
= class_create(THIS_MODULE
, "bdi");
224 if (IS_ERR(bdi_class
))
225 return PTR_ERR(bdi_class
);
227 bdi_class
->dev_attrs
= bdi_dev_attrs
;
231 postcore_initcall(bdi_class_init
);
233 static int __init
default_bdi_init(void)
237 sync_supers_tsk
= kthread_run(bdi_sync_supers
, NULL
, "sync_supers");
238 BUG_ON(IS_ERR(sync_supers_tsk
));
240 init_timer(&sync_supers_timer
);
241 setup_timer(&sync_supers_timer
, sync_supers_timer_fn
, 0);
242 bdi_arm_supers_timer();
244 err
= bdi_init(&default_backing_dev_info
);
246 bdi_register(&default_backing_dev_info
, NULL
, "default");
250 subsys_initcall(default_bdi_init
);
252 static void bdi_wb_init(struct bdi_writeback
*wb
, struct backing_dev_info
*bdi
)
254 memset(wb
, 0, sizeof(*wb
));
257 wb
->last_old_flush
= jiffies
;
258 INIT_LIST_HEAD(&wb
->b_dirty
);
259 INIT_LIST_HEAD(&wb
->b_io
);
260 INIT_LIST_HEAD(&wb
->b_more_io
);
263 static int bdi_start_fn(void *ptr
)
265 struct bdi_writeback
*wb
= ptr
;
266 struct backing_dev_info
*bdi
= wb
->bdi
;
270 * Add us to the active bdi_list
272 spin_lock_bh(&bdi_lock
);
273 list_add_rcu(&bdi
->bdi_list
, &bdi_list
);
274 spin_unlock_bh(&bdi_lock
);
276 current
->flags
|= PF_FLUSHER
| PF_SWAPWRITE
;
280 * Our parent may run at a different priority, just set us to normal
282 set_user_nice(current
, 0);
285 * Clear pending bit and wakeup anybody waiting to tear us down
287 clear_bit(BDI_pending
, &bdi
->state
);
288 smp_mb__after_clear_bit();
289 wake_up_bit(&bdi
->state
, BDI_pending
);
291 ret
= bdi_writeback_task(wb
);
296 * Flush any work that raced with us exiting. No new work
297 * will be added, since this bdi isn't discoverable anymore.
299 if (!list_empty(&bdi
->work_list
))
300 wb_do_writeback(wb
, 1);
305 int bdi_has_dirty_io(struct backing_dev_info
*bdi
)
307 return wb_has_dirty_io(&bdi
->wb
);
310 static void bdi_flush_io(struct backing_dev_info
*bdi
)
312 struct writeback_control wbc
= {
313 .sync_mode
= WB_SYNC_NONE
,
314 .older_than_this
= NULL
,
319 writeback_inodes_wb(&bdi
->wb
, &wbc
);
323 * kupdated() used to do this. We cannot do it from the bdi_forker_task()
324 * or we risk deadlocking on ->s_umount. The longer term solution would be
325 * to implement sync_supers_bdi() or similar and simply do it from the
326 * bdi writeback tasks individually.
328 static int bdi_sync_supers(void *unused
)
330 set_user_nice(current
, 0);
332 while (!kthread_should_stop()) {
333 set_current_state(TASK_INTERRUPTIBLE
);
337 * Do this periodically, like kupdated() did before.
345 void bdi_arm_supers_timer(void)
349 if (!dirty_writeback_interval
)
352 next
= msecs_to_jiffies(dirty_writeback_interval
* 10) + jiffies
;
353 mod_timer(&sync_supers_timer
, round_jiffies_up(next
));
356 static void sync_supers_timer_fn(unsigned long unused
)
358 wake_up_process(sync_supers_tsk
);
359 bdi_arm_supers_timer();
362 static int bdi_forker_task(void *ptr
)
364 struct bdi_writeback
*me
= ptr
;
366 current
->flags
|= PF_FLUSHER
| PF_SWAPWRITE
;
370 * Our parent may run at a different priority, just set us to normal
372 set_user_nice(current
, 0);
375 struct backing_dev_info
*bdi
, *tmp
;
376 struct bdi_writeback
*wb
;
379 * Temporary measure, we want to make sure we don't see
380 * dirty data on the default backing_dev_info
382 if (wb_has_dirty_io(me
) || !list_empty(&me
->bdi
->work_list
))
383 wb_do_writeback(me
, 0);
385 spin_lock_bh(&bdi_lock
);
388 * Check if any existing bdi's have dirty data without
389 * a thread registered. If so, set that up.
391 list_for_each_entry_safe(bdi
, tmp
, &bdi_list
, bdi_list
) {
394 if (list_empty(&bdi
->work_list
) &&
395 !bdi_has_dirty_io(bdi
))
398 bdi_add_default_flusher_task(bdi
);
401 set_current_state(TASK_INTERRUPTIBLE
);
403 if (list_empty(&bdi_pending_list
)) {
406 spin_unlock_bh(&bdi_lock
);
407 wait
= msecs_to_jiffies(dirty_writeback_interval
* 10);
409 schedule_timeout(wait
);
416 __set_current_state(TASK_RUNNING
);
419 * This is our real job - check for pending entries in
420 * bdi_pending_list, and create the tasks that got added
422 bdi
= list_entry(bdi_pending_list
.next
, struct backing_dev_info
,
424 list_del_init(&bdi
->bdi_list
);
425 spin_unlock_bh(&bdi_lock
);
428 wb
->task
= kthread_run(bdi_start_fn
, wb
, "flush-%s",
431 * If task creation fails, then readd the bdi to
432 * the pending list and force writeout of the bdi
433 * from this forker thread. That will free some memory
434 * and we can try again.
436 if (IS_ERR(wb
->task
)) {
440 * Add this 'bdi' to the back, so we get
441 * a chance to flush other bdi's to free
444 spin_lock_bh(&bdi_lock
);
445 list_add_tail(&bdi
->bdi_list
, &bdi_pending_list
);
446 spin_unlock_bh(&bdi_lock
);
455 static void bdi_add_to_pending(struct rcu_head
*head
)
457 struct backing_dev_info
*bdi
;
459 bdi
= container_of(head
, struct backing_dev_info
, rcu_head
);
460 INIT_LIST_HEAD(&bdi
->bdi_list
);
462 spin_lock(&bdi_lock
);
463 list_add_tail(&bdi
->bdi_list
, &bdi_pending_list
);
464 spin_unlock(&bdi_lock
);
467 * We are now on the pending list, wake up bdi_forker_task()
468 * to finish the job and add us back to the active bdi_list
470 wake_up_process(default_backing_dev_info
.wb
.task
);
474 * Add the default flusher task that gets created for any bdi
475 * that has dirty data pending writeout
477 void static bdi_add_default_flusher_task(struct backing_dev_info
*bdi
)
479 if (!bdi_cap_writeback_dirty(bdi
))
482 if (WARN_ON(!test_bit(BDI_registered
, &bdi
->state
))) {
483 printk(KERN_ERR
"bdi %p/%s is not registered!\n",
489 * Check with the helper whether to proceed adding a task. Will only
490 * abort if we two or more simultanous calls to
491 * bdi_add_default_flusher_task() occured, further additions will block
492 * waiting for previous additions to finish.
494 if (!test_and_set_bit(BDI_pending
, &bdi
->state
)) {
495 list_del_rcu(&bdi
->bdi_list
);
498 * We must wait for the current RCU period to end before
499 * moving to the pending list. So schedule that operation
500 * from an RCU callback.
502 call_rcu(&bdi
->rcu_head
, bdi_add_to_pending
);
507 * Remove bdi from bdi_list, and ensure that it is no longer visible
509 static void bdi_remove_from_list(struct backing_dev_info
*bdi
)
511 spin_lock_bh(&bdi_lock
);
512 list_del_rcu(&bdi
->bdi_list
);
513 spin_unlock_bh(&bdi_lock
);
518 int bdi_register(struct backing_dev_info
*bdi
, struct device
*parent
,
519 const char *fmt
, ...)
525 if (bdi
->dev
) /* The driver needs to use separate queues per device */
529 dev
= device_create_vargs(bdi_class
, parent
, MKDEV(0, 0), bdi
, fmt
, args
);
536 spin_lock_bh(&bdi_lock
);
537 list_add_tail_rcu(&bdi
->bdi_list
, &bdi_list
);
538 spin_unlock_bh(&bdi_lock
);
543 * Just start the forker thread for our default backing_dev_info,
544 * and add other bdi's to the list. They will get a thread created
545 * on-demand when they need it.
547 if (bdi_cap_flush_forker(bdi
)) {
548 struct bdi_writeback
*wb
= &bdi
->wb
;
550 wb
->task
= kthread_run(bdi_forker_task
, wb
, "bdi-%s",
552 if (IS_ERR(wb
->task
)) {
556 bdi_remove_from_list(bdi
);
561 bdi_debug_register(bdi
, dev_name(dev
));
562 set_bit(BDI_registered
, &bdi
->state
);
566 EXPORT_SYMBOL(bdi_register
);
568 int bdi_register_dev(struct backing_dev_info
*bdi
, dev_t dev
)
570 return bdi_register(bdi
, NULL
, "%u:%u", MAJOR(dev
), MINOR(dev
));
572 EXPORT_SYMBOL(bdi_register_dev
);
575 * Remove bdi from the global list and shutdown any threads we have running
577 static void bdi_wb_shutdown(struct backing_dev_info
*bdi
)
579 if (!bdi_cap_writeback_dirty(bdi
))
583 * If setup is pending, wait for that to complete first
585 wait_on_bit(&bdi
->state
, BDI_pending
, bdi_sched_wait
,
586 TASK_UNINTERRUPTIBLE
);
589 * Make sure nobody finds us on the bdi_list anymore
591 bdi_remove_from_list(bdi
);
594 * Finally, kill the kernel thread. We don't need to be RCU
595 * safe anymore, since the bdi is gone from visibility. Force
596 * unfreeze of the thread before calling kthread_stop(), otherwise
597 * it would never exet if it is currently stuck in the refrigerator.
600 thaw_process(bdi
->wb
.task
);
601 kthread_stop(bdi
->wb
.task
);
606 * This bdi is going away now, make sure that no super_blocks point to it
608 static void bdi_prune_sb(struct backing_dev_info
*bdi
)
610 struct super_block
*sb
;
613 list_for_each_entry(sb
, &super_blocks
, s_list
) {
614 if (sb
->s_bdi
== bdi
)
617 spin_unlock(&sb_lock
);
620 void bdi_unregister(struct backing_dev_info
*bdi
)
625 if (!bdi_cap_flush_forker(bdi
))
626 bdi_wb_shutdown(bdi
);
627 bdi_debug_unregister(bdi
);
628 device_unregister(bdi
->dev
);
632 EXPORT_SYMBOL(bdi_unregister
);
634 int bdi_init(struct backing_dev_info
*bdi
)
641 bdi
->max_ratio
= 100;
642 bdi
->max_prop_frac
= PROP_FRAC_BASE
;
643 spin_lock_init(&bdi
->wb_lock
);
644 INIT_RCU_HEAD(&bdi
->rcu_head
);
645 INIT_LIST_HEAD(&bdi
->bdi_list
);
646 INIT_LIST_HEAD(&bdi
->work_list
);
648 bdi_wb_init(&bdi
->wb
, bdi
);
650 for (i
= 0; i
< NR_BDI_STAT_ITEMS
; i
++) {
651 err
= percpu_counter_init(&bdi
->bdi_stat
[i
], 0);
656 bdi
->dirty_exceeded
= 0;
657 err
= prop_local_init_percpu(&bdi
->completions
);
662 percpu_counter_destroy(&bdi
->bdi_stat
[i
]);
667 EXPORT_SYMBOL(bdi_init
);
669 void bdi_destroy(struct backing_dev_info
*bdi
)
674 * Splice our entries to the default_backing_dev_info, if this
677 if (bdi_has_dirty_io(bdi
)) {
678 struct bdi_writeback
*dst
= &default_backing_dev_info
.wb
;
680 spin_lock(&inode_lock
);
681 list_splice(&bdi
->wb
.b_dirty
, &dst
->b_dirty
);
682 list_splice(&bdi
->wb
.b_io
, &dst
->b_io
);
683 list_splice(&bdi
->wb
.b_more_io
, &dst
->b_more_io
);
684 spin_unlock(&inode_lock
);
689 for (i
= 0; i
< NR_BDI_STAT_ITEMS
; i
++)
690 percpu_counter_destroy(&bdi
->bdi_stat
[i
]);
692 prop_local_destroy_percpu(&bdi
->completions
);
694 EXPORT_SYMBOL(bdi_destroy
);
697 * For use from filesystems to quickly init and register a bdi associated
698 * with dirty writeback
700 int bdi_setup_and_register(struct backing_dev_info
*bdi
, char *name
,
707 bdi
->capabilities
= cap
;
712 sprintf(tmp
, "%.28s%s", name
, "-%d");
713 err
= bdi_register(bdi
, NULL
, tmp
, atomic_long_inc_return(&bdi_seq
));
721 EXPORT_SYMBOL(bdi_setup_and_register
);
723 static wait_queue_head_t congestion_wqh
[2] = {
724 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh
[0]),
725 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh
[1])
728 void clear_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
731 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
733 bit
= sync
? BDI_sync_congested
: BDI_async_congested
;
734 clear_bit(bit
, &bdi
->state
);
735 smp_mb__after_clear_bit();
736 if (waitqueue_active(wqh
))
739 EXPORT_SYMBOL(clear_bdi_congested
);
741 void set_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
745 bit
= sync
? BDI_sync_congested
: BDI_async_congested
;
746 set_bit(bit
, &bdi
->state
);
748 EXPORT_SYMBOL(set_bdi_congested
);
751 * congestion_wait - wait for a backing_dev to become uncongested
752 * @sync: SYNC or ASYNC IO
753 * @timeout: timeout in jiffies
755 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
756 * write congestion. If no backing_devs are congested then just wait for the
757 * next write to be completed.
759 long congestion_wait(int sync
, long timeout
)
763 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
765 prepare_to_wait(wqh
, &wait
, TASK_UNINTERRUPTIBLE
);
766 ret
= io_schedule_timeout(timeout
);
767 finish_wait(wqh
, &wait
);
770 EXPORT_SYMBOL(congestion_wait
);