2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
7 #include <linux/pagemap.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
14 void default_unplug_io_fn(struct backing_dev_info
*bdi
, struct page
*page
)
17 EXPORT_SYMBOL(default_unplug_io_fn
);
19 struct backing_dev_info default_backing_dev_info
= {
20 .ra_pages
= VM_MAX_READAHEAD
* 1024 / PAGE_CACHE_SIZE
,
22 .capabilities
= BDI_CAP_MAP_COPY
,
23 .unplug_io_fn
= default_unplug_io_fn
,
25 EXPORT_SYMBOL_GPL(default_backing_dev_info
);
27 static struct class *bdi_class
;
28 DEFINE_SPINLOCK(bdi_lock
);
30 LIST_HEAD(bdi_pending_list
);
32 static struct task_struct
*sync_supers_tsk
;
33 static struct timer_list sync_supers_timer
;
35 static int bdi_sync_supers(void *);
36 static void sync_supers_timer_fn(unsigned long);
37 static void arm_supers_timer(void);
39 static void bdi_add_default_flusher_task(struct backing_dev_info
*bdi
);
41 #ifdef CONFIG_DEBUG_FS
42 #include <linux/debugfs.h>
43 #include <linux/seq_file.h>
45 static struct dentry
*bdi_debug_root
;
47 static void bdi_debug_init(void)
49 bdi_debug_root
= debugfs_create_dir("bdi", NULL
);
52 static int bdi_debug_stats_show(struct seq_file
*m
, void *v
)
54 struct backing_dev_info
*bdi
= m
->private;
55 struct bdi_writeback
*wb
;
56 unsigned long background_thresh
;
57 unsigned long dirty_thresh
;
58 unsigned long bdi_thresh
;
59 unsigned long nr_dirty
, nr_io
, nr_more_io
, nr_wb
;
63 * inode lock is enough here, the bdi->wb_list is protected by
64 * RCU on the reader side
66 nr_wb
= nr_dirty
= nr_io
= nr_more_io
= 0;
67 spin_lock(&inode_lock
);
68 list_for_each_entry(wb
, &bdi
->wb_list
, list
) {
70 list_for_each_entry(inode
, &wb
->b_dirty
, i_list
)
72 list_for_each_entry(inode
, &wb
->b_io
, i_list
)
74 list_for_each_entry(inode
, &wb
->b_more_io
, i_list
)
77 spin_unlock(&inode_lock
);
79 get_dirty_limits(&background_thresh
, &dirty_thresh
, &bdi_thresh
, bdi
);
81 #define K(x) ((x) << (PAGE_SHIFT - 10))
83 "BdiWriteback: %8lu kB\n"
84 "BdiReclaimable: %8lu kB\n"
85 "BdiDirtyThresh: %8lu kB\n"
86 "DirtyThresh: %8lu kB\n"
87 "BackgroundThresh: %8lu kB\n"
88 "WriteBack threads:%8lu\n"
97 (unsigned long) K(bdi_stat(bdi
, BDI_WRITEBACK
)),
98 (unsigned long) K(bdi_stat(bdi
, BDI_RECLAIMABLE
)),
99 K(bdi_thresh
), K(dirty_thresh
),
100 K(background_thresh
), nr_wb
, nr_dirty
, nr_io
, nr_more_io
,
101 !list_empty(&bdi
->bdi_list
), bdi
->state
, bdi
->wb_mask
,
102 !list_empty(&bdi
->wb_list
), bdi
->wb_cnt
);
108 static int bdi_debug_stats_open(struct inode
*inode
, struct file
*file
)
110 return single_open(file
, bdi_debug_stats_show
, inode
->i_private
);
113 static const struct file_operations bdi_debug_stats_fops
= {
114 .open
= bdi_debug_stats_open
,
117 .release
= single_release
,
120 static void bdi_debug_register(struct backing_dev_info
*bdi
, const char *name
)
122 bdi
->debug_dir
= debugfs_create_dir(name
, bdi_debug_root
);
123 bdi
->debug_stats
= debugfs_create_file("stats", 0444, bdi
->debug_dir
,
124 bdi
, &bdi_debug_stats_fops
);
127 static void bdi_debug_unregister(struct backing_dev_info
*bdi
)
129 debugfs_remove(bdi
->debug_stats
);
130 debugfs_remove(bdi
->debug_dir
);
133 static inline void bdi_debug_init(void)
136 static inline void bdi_debug_register(struct backing_dev_info
*bdi
,
140 static inline void bdi_debug_unregister(struct backing_dev_info
*bdi
)
145 static ssize_t
read_ahead_kb_store(struct device
*dev
,
146 struct device_attribute
*attr
,
147 const char *buf
, size_t count
)
149 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
151 unsigned long read_ahead_kb
;
152 ssize_t ret
= -EINVAL
;
154 read_ahead_kb
= simple_strtoul(buf
, &end
, 10);
155 if (*buf
&& (end
[0] == '\0' || (end
[0] == '\n' && end
[1] == '\0'))) {
156 bdi
->ra_pages
= read_ahead_kb
>> (PAGE_SHIFT
- 10);
162 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
164 #define BDI_SHOW(name, expr) \
165 static ssize_t name##_show(struct device *dev, \
166 struct device_attribute *attr, char *page) \
168 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
170 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
173 BDI_SHOW(read_ahead_kb
, K(bdi
->ra_pages
))
175 static ssize_t
min_ratio_store(struct device
*dev
,
176 struct device_attribute
*attr
, const char *buf
, size_t count
)
178 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
181 ssize_t ret
= -EINVAL
;
183 ratio
= simple_strtoul(buf
, &end
, 10);
184 if (*buf
&& (end
[0] == '\0' || (end
[0] == '\n' && end
[1] == '\0'))) {
185 ret
= bdi_set_min_ratio(bdi
, ratio
);
191 BDI_SHOW(min_ratio
, bdi
->min_ratio
)
193 static ssize_t
max_ratio_store(struct device
*dev
,
194 struct device_attribute
*attr
, const char *buf
, size_t count
)
196 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
199 ssize_t ret
= -EINVAL
;
201 ratio
= simple_strtoul(buf
, &end
, 10);
202 if (*buf
&& (end
[0] == '\0' || (end
[0] == '\n' && end
[1] == '\0'))) {
203 ret
= bdi_set_max_ratio(bdi
, ratio
);
209 BDI_SHOW(max_ratio
, bdi
->max_ratio
)
211 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
213 static struct device_attribute bdi_dev_attrs
[] = {
214 __ATTR_RW(read_ahead_kb
),
215 __ATTR_RW(min_ratio
),
216 __ATTR_RW(max_ratio
),
220 static __init
int bdi_class_init(void)
222 bdi_class
= class_create(THIS_MODULE
, "bdi");
223 bdi_class
->dev_attrs
= bdi_dev_attrs
;
227 postcore_initcall(bdi_class_init
);
229 static int __init
default_bdi_init(void)
233 sync_supers_tsk
= kthread_run(bdi_sync_supers
, NULL
, "sync_supers");
234 BUG_ON(IS_ERR(sync_supers_tsk
));
236 init_timer(&sync_supers_timer
);
237 setup_timer(&sync_supers_timer
, sync_supers_timer_fn
, 0);
240 err
= bdi_init(&default_backing_dev_info
);
242 bdi_register(&default_backing_dev_info
, NULL
, "default");
246 subsys_initcall(default_bdi_init
);
248 static void bdi_wb_init(struct bdi_writeback
*wb
, struct backing_dev_info
*bdi
)
250 memset(wb
, 0, sizeof(*wb
));
253 wb
->last_old_flush
= jiffies
;
254 INIT_LIST_HEAD(&wb
->b_dirty
);
255 INIT_LIST_HEAD(&wb
->b_io
);
256 INIT_LIST_HEAD(&wb
->b_more_io
);
259 static void bdi_task_init(struct backing_dev_info
*bdi
,
260 struct bdi_writeback
*wb
)
262 struct task_struct
*tsk
= current
;
264 spin_lock(&bdi
->wb_lock
);
265 list_add_tail_rcu(&wb
->list
, &bdi
->wb_list
);
266 spin_unlock(&bdi
->wb_lock
);
268 tsk
->flags
|= PF_FLUSHER
| PF_SWAPWRITE
;
272 * Our parent may run at a different priority, just set us to normal
274 set_user_nice(tsk
, 0);
277 static int bdi_start_fn(void *ptr
)
279 struct bdi_writeback
*wb
= ptr
;
280 struct backing_dev_info
*bdi
= wb
->bdi
;
284 * Add us to the active bdi_list
286 spin_lock(&bdi_lock
);
287 list_add(&bdi
->bdi_list
, &bdi_list
);
288 spin_unlock(&bdi_lock
);
290 bdi_task_init(bdi
, wb
);
293 * Clear pending bit and wakeup anybody waiting to tear us down
295 clear_bit(BDI_pending
, &bdi
->state
);
296 smp_mb__after_clear_bit();
297 wake_up_bit(&bdi
->state
, BDI_pending
);
299 ret
= bdi_writeback_task(wb
);
302 * Remove us from the list
304 spin_lock(&bdi
->wb_lock
);
305 list_del_rcu(&wb
->list
);
306 spin_unlock(&bdi
->wb_lock
);
309 * Flush any work that raced with us exiting. No new work
310 * will be added, since this bdi isn't discoverable anymore.
312 if (!list_empty(&bdi
->work_list
))
313 wb_do_writeback(wb
, 1);
319 int bdi_has_dirty_io(struct backing_dev_info
*bdi
)
321 return wb_has_dirty_io(&bdi
->wb
);
324 static void bdi_flush_io(struct backing_dev_info
*bdi
)
326 struct writeback_control wbc
= {
328 .sync_mode
= WB_SYNC_NONE
,
329 .older_than_this
= NULL
,
334 writeback_inodes_wbc(&wbc
);
338 * kupdated() used to do this. We cannot do it from the bdi_forker_task()
339 * or we risk deadlocking on ->s_umount. The longer term solution would be
340 * to implement sync_supers_bdi() or similar and simply do it from the
341 * bdi writeback tasks individually.
343 static int bdi_sync_supers(void *unused
)
345 set_user_nice(current
, 0);
347 while (!kthread_should_stop()) {
348 set_current_state(TASK_INTERRUPTIBLE
);
352 * Do this periodically, like kupdated() did before.
360 static void arm_supers_timer(void)
364 next
= msecs_to_jiffies(dirty_writeback_interval
* 10) + jiffies
;
365 mod_timer(&sync_supers_timer
, round_jiffies_up(next
));
368 static void sync_supers_timer_fn(unsigned long unused
)
370 wake_up_process(sync_supers_tsk
);
374 static int bdi_forker_task(void *ptr
)
376 struct bdi_writeback
*me
= ptr
;
378 bdi_task_init(me
->bdi
, me
);
381 struct backing_dev_info
*bdi
, *tmp
;
382 struct bdi_writeback
*wb
;
385 * Temporary measure, we want to make sure we don't see
386 * dirty data on the default backing_dev_info
388 if (wb_has_dirty_io(me
) || !list_empty(&me
->bdi
->work_list
))
389 wb_do_writeback(me
, 0);
391 spin_lock(&bdi_lock
);
394 * Check if any existing bdi's have dirty data without
395 * a thread registered. If so, set that up.
397 list_for_each_entry_safe(bdi
, tmp
, &bdi_list
, bdi_list
) {
400 if (list_empty(&bdi
->work_list
) &&
401 !bdi_has_dirty_io(bdi
))
404 bdi_add_default_flusher_task(bdi
);
407 set_current_state(TASK_INTERRUPTIBLE
);
409 if (list_empty(&bdi_pending_list
)) {
412 spin_unlock(&bdi_lock
);
413 wait
= msecs_to_jiffies(dirty_writeback_interval
* 10);
414 schedule_timeout(wait
);
419 __set_current_state(TASK_RUNNING
);
422 * This is our real job - check for pending entries in
423 * bdi_pending_list, and create the tasks that got added
425 bdi
= list_entry(bdi_pending_list
.next
, struct backing_dev_info
,
427 list_del_init(&bdi
->bdi_list
);
428 spin_unlock(&bdi_lock
);
431 wb
->task
= kthread_run(bdi_start_fn
, wb
, "flush-%s",
434 * If task creation fails, then readd the bdi to
435 * the pending list and force writeout of the bdi
436 * from this forker thread. That will free some memory
437 * and we can try again.
439 if (IS_ERR(wb
->task
)) {
443 * Add this 'bdi' to the back, so we get
444 * a chance to flush other bdi's to free
447 spin_lock(&bdi_lock
);
448 list_add_tail(&bdi
->bdi_list
, &bdi_pending_list
);
449 spin_unlock(&bdi_lock
);
459 * Add the default flusher task that gets created for any bdi
460 * that has dirty data pending writeout
462 void static bdi_add_default_flusher_task(struct backing_dev_info
*bdi
)
464 if (!bdi_cap_writeback_dirty(bdi
))
468 * Check with the helper whether to proceed adding a task. Will only
469 * abort if we two or more simultanous calls to
470 * bdi_add_default_flusher_task() occured, further additions will block
471 * waiting for previous additions to finish.
473 if (!test_and_set_bit(BDI_pending
, &bdi
->state
)) {
474 list_move_tail(&bdi
->bdi_list
, &bdi_pending_list
);
477 * We are now on the pending list, wake up bdi_forker_task()
478 * to finish the job and add us back to the active bdi_list
480 wake_up_process(default_backing_dev_info
.wb
.task
);
484 int bdi_register(struct backing_dev_info
*bdi
, struct device
*parent
,
485 const char *fmt
, ...)
491 if (bdi
->dev
) /* The driver needs to use separate queues per device */
495 dev
= device_create_vargs(bdi_class
, parent
, MKDEV(0, 0), bdi
, fmt
, args
);
502 spin_lock(&bdi_lock
);
503 list_add_tail(&bdi
->bdi_list
, &bdi_list
);
504 spin_unlock(&bdi_lock
);
509 * Just start the forker thread for our default backing_dev_info,
510 * and add other bdi's to the list. They will get a thread created
511 * on-demand when they need it.
513 if (bdi_cap_flush_forker(bdi
)) {
514 struct bdi_writeback
*wb
= &bdi
->wb
;
516 wb
->task
= kthread_run(bdi_forker_task
, wb
, "bdi-%s",
518 if (IS_ERR(wb
->task
)) {
522 spin_lock(&bdi_lock
);
523 list_del(&bdi
->bdi_list
);
524 spin_unlock(&bdi_lock
);
529 bdi_debug_register(bdi
, dev_name(dev
));
533 EXPORT_SYMBOL(bdi_register
);
535 int bdi_register_dev(struct backing_dev_info
*bdi
, dev_t dev
)
537 return bdi_register(bdi
, NULL
, "%u:%u", MAJOR(dev
), MINOR(dev
));
539 EXPORT_SYMBOL(bdi_register_dev
);
542 * Remove bdi from the global list and shutdown any threads we have running
544 static void bdi_wb_shutdown(struct backing_dev_info
*bdi
)
546 struct bdi_writeback
*wb
;
548 if (!bdi_cap_writeback_dirty(bdi
))
552 * If setup is pending, wait for that to complete first
554 wait_on_bit(&bdi
->state
, BDI_pending
, bdi_sched_wait
,
555 TASK_UNINTERRUPTIBLE
);
558 * Make sure nobody finds us on the bdi_list anymore
560 spin_lock(&bdi_lock
);
561 list_del(&bdi
->bdi_list
);
562 spin_unlock(&bdi_lock
);
565 * Finally, kill the kernel threads. We don't need to be RCU
566 * safe anymore, since the bdi is gone from visibility.
568 list_for_each_entry(wb
, &bdi
->wb_list
, list
)
569 kthread_stop(wb
->task
);
572 void bdi_unregister(struct backing_dev_info
*bdi
)
575 if (!bdi_cap_flush_forker(bdi
))
576 bdi_wb_shutdown(bdi
);
577 bdi_debug_unregister(bdi
);
578 device_unregister(bdi
->dev
);
582 EXPORT_SYMBOL(bdi_unregister
);
584 int bdi_init(struct backing_dev_info
*bdi
)
591 bdi
->max_ratio
= 100;
592 bdi
->max_prop_frac
= PROP_FRAC_BASE
;
593 spin_lock_init(&bdi
->wb_lock
);
594 INIT_LIST_HEAD(&bdi
->bdi_list
);
595 INIT_LIST_HEAD(&bdi
->wb_list
);
596 INIT_LIST_HEAD(&bdi
->work_list
);
598 bdi_wb_init(&bdi
->wb
, bdi
);
601 * Just one thread support for now, hard code mask and count
606 for (i
= 0; i
< NR_BDI_STAT_ITEMS
; i
++) {
607 err
= percpu_counter_init(&bdi
->bdi_stat
[i
], 0);
612 bdi
->dirty_exceeded
= 0;
613 err
= prop_local_init_percpu(&bdi
->completions
);
618 percpu_counter_destroy(&bdi
->bdi_stat
[i
]);
623 EXPORT_SYMBOL(bdi_init
);
625 void bdi_destroy(struct backing_dev_info
*bdi
)
629 WARN_ON(bdi_has_dirty_io(bdi
));
633 for (i
= 0; i
< NR_BDI_STAT_ITEMS
; i
++)
634 percpu_counter_destroy(&bdi
->bdi_stat
[i
]);
636 prop_local_destroy_percpu(&bdi
->completions
);
638 EXPORT_SYMBOL(bdi_destroy
);
640 static wait_queue_head_t congestion_wqh
[2] = {
641 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh
[0]),
642 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh
[1])
645 void clear_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
648 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
650 bit
= sync
? BDI_sync_congested
: BDI_async_congested
;
651 clear_bit(bit
, &bdi
->state
);
652 smp_mb__after_clear_bit();
653 if (waitqueue_active(wqh
))
656 EXPORT_SYMBOL(clear_bdi_congested
);
658 void set_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
662 bit
= sync
? BDI_sync_congested
: BDI_async_congested
;
663 set_bit(bit
, &bdi
->state
);
665 EXPORT_SYMBOL(set_bdi_congested
);
668 * congestion_wait - wait for a backing_dev to become uncongested
669 * @sync: SYNC or ASYNC IO
670 * @timeout: timeout in jiffies
672 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
673 * write congestion. If no backing_devs are congested then just wait for the
674 * next write to be completed.
676 long congestion_wait(int sync
, long timeout
)
680 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
682 prepare_to_wait(wqh
, &wait
, TASK_UNINTERRUPTIBLE
);
683 ret
= io_schedule_timeout(timeout
);
684 finish_wait(wqh
, &wait
);
687 EXPORT_SYMBOL(congestion_wait
);