2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
7 #include <linux/pagemap.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
15 static atomic_long_t bdi_seq
= ATOMIC_LONG_INIT(0);
17 struct backing_dev_info default_backing_dev_info
= {
19 .ra_pages
= VM_MAX_READAHEAD
* 1024 / PAGE_CACHE_SIZE
,
21 EXPORT_SYMBOL_GPL(default_backing_dev_info
);
23 struct backing_dev_info noop_backing_dev_info
= {
25 .capabilities
= BDI_CAP_NO_ACCT_AND_WRITEBACK
,
28 static struct class *bdi_class
;
31 * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
34 DEFINE_SPINLOCK(bdi_lock
);
37 /* bdi_wq serves all asynchronous writeback tasks */
38 struct workqueue_struct
*bdi_wq
;
40 #ifdef CONFIG_DEBUG_FS
41 #include <linux/debugfs.h>
42 #include <linux/seq_file.h>
44 static struct dentry
*bdi_debug_root
;
46 static void bdi_debug_init(void)
48 bdi_debug_root
= debugfs_create_dir("bdi", NULL
);
51 static int bdi_debug_stats_show(struct seq_file
*m
, void *v
)
53 struct backing_dev_info
*bdi
= m
->private;
54 struct bdi_writeback
*wb
= &bdi
->wb
;
55 unsigned long background_thresh
;
56 unsigned long dirty_thresh
;
57 unsigned long bdi_thresh
;
58 unsigned long nr_dirty
, nr_io
, nr_more_io
;
61 nr_dirty
= nr_io
= nr_more_io
= 0;
62 spin_lock(&wb
->list_lock
);
63 list_for_each_entry(inode
, &wb
->b_dirty
, i_wb_list
)
65 list_for_each_entry(inode
, &wb
->b_io
, i_wb_list
)
67 list_for_each_entry(inode
, &wb
->b_more_io
, i_wb_list
)
69 spin_unlock(&wb
->list_lock
);
71 global_dirty_limits(&background_thresh
, &dirty_thresh
);
72 bdi_thresh
= bdi_dirty_limit(bdi
, dirty_thresh
);
74 #define K(x) ((x) << (PAGE_SHIFT - 10))
76 "BdiWriteback: %10lu kB\n"
77 "BdiReclaimable: %10lu kB\n"
78 "BdiDirtyThresh: %10lu kB\n"
79 "DirtyThresh: %10lu kB\n"
80 "BackgroundThresh: %10lu kB\n"
81 "BdiDirtied: %10lu kB\n"
82 "BdiWritten: %10lu kB\n"
83 "BdiWriteBandwidth: %10lu kBps\n"
89 (unsigned long) K(bdi_stat(bdi
, BDI_WRITEBACK
)),
90 (unsigned long) K(bdi_stat(bdi
, BDI_RECLAIMABLE
)),
94 (unsigned long) K(bdi_stat(bdi
, BDI_DIRTIED
)),
95 (unsigned long) K(bdi_stat(bdi
, BDI_WRITTEN
)),
96 (unsigned long) K(bdi
->write_bandwidth
),
100 !list_empty(&bdi
->bdi_list
), bdi
->state
);
106 static int bdi_debug_stats_open(struct inode
*inode
, struct file
*file
)
108 return single_open(file
, bdi_debug_stats_show
, inode
->i_private
);
111 static const struct file_operations bdi_debug_stats_fops
= {
112 .open
= bdi_debug_stats_open
,
115 .release
= single_release
,
118 static void bdi_debug_register(struct backing_dev_info
*bdi
, const char *name
)
120 bdi
->debug_dir
= debugfs_create_dir(name
, bdi_debug_root
);
121 bdi
->debug_stats
= debugfs_create_file("stats", 0444, bdi
->debug_dir
,
122 bdi
, &bdi_debug_stats_fops
);
125 static void bdi_debug_unregister(struct backing_dev_info
*bdi
)
127 debugfs_remove(bdi
->debug_stats
);
128 debugfs_remove(bdi
->debug_dir
);
131 static inline void bdi_debug_init(void)
134 static inline void bdi_debug_register(struct backing_dev_info
*bdi
,
138 static inline void bdi_debug_unregister(struct backing_dev_info
*bdi
)
143 static ssize_t
read_ahead_kb_store(struct device
*dev
,
144 struct device_attribute
*attr
,
145 const char *buf
, size_t count
)
147 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
148 unsigned long read_ahead_kb
;
151 ret
= kstrtoul(buf
, 10, &read_ahead_kb
);
155 bdi
->ra_pages
= read_ahead_kb
>> (PAGE_SHIFT
- 10);
160 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
162 #define BDI_SHOW(name, expr) \
163 static ssize_t name##_show(struct device *dev, \
164 struct device_attribute *attr, char *page) \
166 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
168 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
170 static DEVICE_ATTR_RW(name);
172 BDI_SHOW(read_ahead_kb
, K(bdi
->ra_pages
))
174 static ssize_t
min_ratio_store(struct device
*dev
,
175 struct device_attribute
*attr
, const char *buf
, size_t count
)
177 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
181 ret
= kstrtouint(buf
, 10, &ratio
);
185 ret
= bdi_set_min_ratio(bdi
, ratio
);
191 BDI_SHOW(min_ratio
, bdi
->min_ratio
)
193 static ssize_t
max_ratio_store(struct device
*dev
,
194 struct device_attribute
*attr
, const char *buf
, size_t count
)
196 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
200 ret
= kstrtouint(buf
, 10, &ratio
);
204 ret
= bdi_set_max_ratio(bdi
, ratio
);
210 BDI_SHOW(max_ratio
, bdi
->max_ratio
)
212 static ssize_t
stable_pages_required_show(struct device
*dev
,
213 struct device_attribute
*attr
,
216 struct backing_dev_info
*bdi
= dev_get_drvdata(dev
);
218 return snprintf(page
, PAGE_SIZE
-1, "%d\n",
219 bdi_cap_stable_pages_required(bdi
) ? 1 : 0);
221 static DEVICE_ATTR_RO(stable_pages_required
);
223 static struct attribute
*bdi_dev_attrs
[] = {
224 &dev_attr_read_ahead_kb
.attr
,
225 &dev_attr_min_ratio
.attr
,
226 &dev_attr_max_ratio
.attr
,
227 &dev_attr_stable_pages_required
.attr
,
230 ATTRIBUTE_GROUPS(bdi_dev
);
232 static __init
int bdi_class_init(void)
234 bdi_class
= class_create(THIS_MODULE
, "bdi");
235 if (IS_ERR(bdi_class
))
236 return PTR_ERR(bdi_class
);
238 bdi_class
->dev_groups
= bdi_dev_groups
;
242 postcore_initcall(bdi_class_init
);
244 static int __init
default_bdi_init(void)
248 bdi_wq
= alloc_workqueue("writeback", WQ_MEM_RECLAIM
| WQ_FREEZABLE
|
249 WQ_UNBOUND
| WQ_SYSFS
, 0);
253 err
= bdi_init(&default_backing_dev_info
);
255 bdi_register(&default_backing_dev_info
, NULL
, "default");
256 err
= bdi_init(&noop_backing_dev_info
);
260 subsys_initcall(default_bdi_init
);
262 int bdi_has_dirty_io(struct backing_dev_info
*bdi
)
264 return wb_has_dirty_io(&bdi
->wb
);
268 * This function is used when the first inode for this bdi is marked dirty. It
269 * wakes-up the corresponding bdi thread which should then take care of the
270 * periodic background write-out of dirty inodes. Since the write-out would
271 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
272 * set up a timer which wakes the bdi thread up later.
274 * Note, we wouldn't bother setting up the timer, but this function is on the
275 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
276 * by delaying the wake-up.
278 * We have to be careful not to postpone flush work if it is scheduled for
279 * earlier. Thus we use queue_delayed_work().
281 void bdi_wakeup_thread_delayed(struct backing_dev_info
*bdi
)
283 unsigned long timeout
;
285 timeout
= msecs_to_jiffies(dirty_writeback_interval
* 10);
286 spin_lock_bh(&bdi
->wb_lock
);
287 if (test_bit(BDI_registered
, &bdi
->state
))
288 queue_delayed_work(bdi_wq
, &bdi
->wb
.dwork
, timeout
);
289 spin_unlock_bh(&bdi
->wb_lock
);
293 * Remove bdi from bdi_list, and ensure that it is no longer visible
295 static void bdi_remove_from_list(struct backing_dev_info
*bdi
)
297 spin_lock_bh(&bdi_lock
);
298 list_del_rcu(&bdi
->bdi_list
);
299 spin_unlock_bh(&bdi_lock
);
301 synchronize_rcu_expedited();
304 int bdi_register(struct backing_dev_info
*bdi
, struct device
*parent
,
305 const char *fmt
, ...)
310 if (bdi
->dev
) /* The driver needs to use separate queues per device */
314 dev
= device_create_vargs(bdi_class
, parent
, MKDEV(0, 0), bdi
, fmt
, args
);
321 bdi_debug_register(bdi
, dev_name(dev
));
322 set_bit(BDI_registered
, &bdi
->state
);
324 spin_lock_bh(&bdi_lock
);
325 list_add_tail_rcu(&bdi
->bdi_list
, &bdi_list
);
326 spin_unlock_bh(&bdi_lock
);
328 trace_writeback_bdi_register(bdi
);
331 EXPORT_SYMBOL(bdi_register
);
333 int bdi_register_dev(struct backing_dev_info
*bdi
, dev_t dev
)
335 return bdi_register(bdi
, NULL
, "%u:%u", MAJOR(dev
), MINOR(dev
));
337 EXPORT_SYMBOL(bdi_register_dev
);
340 * Remove bdi from the global list and shutdown any threads we have running
342 static void bdi_wb_shutdown(struct backing_dev_info
*bdi
)
344 /* Make sure nobody queues further work */
345 spin_lock_bh(&bdi
->wb_lock
);
346 if (!test_and_clear_bit(BDI_registered
, &bdi
->state
)) {
347 spin_unlock_bh(&bdi
->wb_lock
);
350 spin_unlock_bh(&bdi
->wb_lock
);
353 * Make sure nobody finds us on the bdi_list anymore
355 bdi_remove_from_list(bdi
);
358 * Drain work list and shutdown the delayed_work. At this point,
359 * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi
360 * is dying and its work_list needs to be drained no matter what.
362 mod_delayed_work(bdi_wq
, &bdi
->wb
.dwork
, 0);
363 flush_delayed_work(&bdi
->wb
.dwork
);
367 * Called when the device behind @bdi has been removed or ejected.
369 * We can't really do much here except for reducing the dirty ratio at
370 * the moment. In the future we should be able to set a flag so that
371 * the filesystem can handle errors at mark_inode_dirty time instead
372 * of only at writeback time.
374 void bdi_unregister(struct backing_dev_info
*bdi
)
376 if (WARN_ON_ONCE(!bdi
->dev
))
379 bdi_set_min_ratio(bdi
, 0);
381 EXPORT_SYMBOL(bdi_unregister
);
383 static void bdi_wb_init(struct bdi_writeback
*wb
, struct backing_dev_info
*bdi
)
385 memset(wb
, 0, sizeof(*wb
));
388 wb
->last_old_flush
= jiffies
;
389 INIT_LIST_HEAD(&wb
->b_dirty
);
390 INIT_LIST_HEAD(&wb
->b_io
);
391 INIT_LIST_HEAD(&wb
->b_more_io
);
392 spin_lock_init(&wb
->list_lock
);
393 INIT_DELAYED_WORK(&wb
->dwork
, bdi_writeback_workfn
);
397 * Initial write bandwidth: 100 MB/s
399 #define INIT_BW (100 << (20 - PAGE_SHIFT))
401 int bdi_init(struct backing_dev_info
*bdi
)
408 bdi
->max_ratio
= 100;
409 bdi
->max_prop_frac
= FPROP_FRAC_BASE
;
410 spin_lock_init(&bdi
->wb_lock
);
411 INIT_LIST_HEAD(&bdi
->bdi_list
);
412 INIT_LIST_HEAD(&bdi
->work_list
);
414 bdi_wb_init(&bdi
->wb
, bdi
);
416 for (i
= 0; i
< NR_BDI_STAT_ITEMS
; i
++) {
417 err
= percpu_counter_init(&bdi
->bdi_stat
[i
], 0, GFP_KERNEL
);
422 bdi
->dirty_exceeded
= 0;
424 bdi
->bw_time_stamp
= jiffies
;
425 bdi
->written_stamp
= 0;
427 bdi
->balanced_dirty_ratelimit
= INIT_BW
;
428 bdi
->dirty_ratelimit
= INIT_BW
;
429 bdi
->write_bandwidth
= INIT_BW
;
430 bdi
->avg_write_bandwidth
= INIT_BW
;
432 err
= fprop_local_init_percpu(&bdi
->completions
, GFP_KERNEL
);
437 percpu_counter_destroy(&bdi
->bdi_stat
[i
]);
442 EXPORT_SYMBOL(bdi_init
);
444 void bdi_destroy(struct backing_dev_info
*bdi
)
448 bdi_wb_shutdown(bdi
);
450 WARN_ON(!list_empty(&bdi
->work_list
));
451 WARN_ON(delayed_work_pending(&bdi
->wb
.dwork
));
454 bdi_debug_unregister(bdi
);
455 device_unregister(bdi
->dev
);
459 for (i
= 0; i
< NR_BDI_STAT_ITEMS
; i
++)
460 percpu_counter_destroy(&bdi
->bdi_stat
[i
]);
461 fprop_local_destroy_percpu(&bdi
->completions
);
463 EXPORT_SYMBOL(bdi_destroy
);
466 * For use from filesystems to quickly init and register a bdi associated
467 * with dirty writeback
469 int bdi_setup_and_register(struct backing_dev_info
*bdi
, char *name
)
474 bdi
->capabilities
= 0;
479 err
= bdi_register(bdi
, NULL
, "%.28s-%ld", name
,
480 atomic_long_inc_return(&bdi_seq
));
488 EXPORT_SYMBOL(bdi_setup_and_register
);
490 static wait_queue_head_t congestion_wqh
[2] = {
491 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh
[0]),
492 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh
[1])
494 static atomic_t nr_bdi_congested
[2];
496 void clear_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
499 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
501 bit
= sync
? BDI_sync_congested
: BDI_async_congested
;
502 if (test_and_clear_bit(bit
, &bdi
->state
))
503 atomic_dec(&nr_bdi_congested
[sync
]);
504 smp_mb__after_atomic();
505 if (waitqueue_active(wqh
))
508 EXPORT_SYMBOL(clear_bdi_congested
);
510 void set_bdi_congested(struct backing_dev_info
*bdi
, int sync
)
514 bit
= sync
? BDI_sync_congested
: BDI_async_congested
;
515 if (!test_and_set_bit(bit
, &bdi
->state
))
516 atomic_inc(&nr_bdi_congested
[sync
]);
518 EXPORT_SYMBOL(set_bdi_congested
);
521 * congestion_wait - wait for a backing_dev to become uncongested
522 * @sync: SYNC or ASYNC IO
523 * @timeout: timeout in jiffies
525 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
526 * write congestion. If no backing_devs are congested then just wait for the
527 * next write to be completed.
529 long congestion_wait(int sync
, long timeout
)
532 unsigned long start
= jiffies
;
534 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
536 prepare_to_wait(wqh
, &wait
, TASK_UNINTERRUPTIBLE
);
537 ret
= io_schedule_timeout(timeout
);
538 finish_wait(wqh
, &wait
);
540 trace_writeback_congestion_wait(jiffies_to_usecs(timeout
),
541 jiffies_to_usecs(jiffies
- start
));
545 EXPORT_SYMBOL(congestion_wait
);
548 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes
549 * @zone: A zone to check if it is heavily congested
550 * @sync: SYNC or ASYNC IO
551 * @timeout: timeout in jiffies
553 * In the event of a congested backing_dev (any backing_dev) and the given
554 * @zone has experienced recent congestion, this waits for up to @timeout
555 * jiffies for either a BDI to exit congestion of the given @sync queue
556 * or a write to complete.
558 * In the absence of zone congestion, cond_resched() is called to yield
559 * the processor if necessary but otherwise does not sleep.
561 * The return value is 0 if the sleep is for the full timeout. Otherwise,
562 * it is the number of jiffies that were still remaining when the function
563 * returned. return_value == timeout implies the function did not sleep.
565 long wait_iff_congested(struct zone
*zone
, int sync
, long timeout
)
568 unsigned long start
= jiffies
;
570 wait_queue_head_t
*wqh
= &congestion_wqh
[sync
];
573 * If there is no congestion, or heavy congestion is not being
574 * encountered in the current zone, yield if necessary instead
575 * of sleeping on the congestion queue
577 if (atomic_read(&nr_bdi_congested
[sync
]) == 0 ||
578 !test_bit(ZONE_CONGESTED
, &zone
->flags
)) {
581 /* In case we scheduled, work out time remaining */
582 ret
= timeout
- (jiffies
- start
);
589 /* Sleep until uncongested or a write happens */
590 prepare_to_wait(wqh
, &wait
, TASK_UNINTERRUPTIBLE
);
591 ret
= io_schedule_timeout(timeout
);
592 finish_wait(wqh
, &wait
);
595 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout
),
596 jiffies_to_usecs(jiffies
- start
));
600 EXPORT_SYMBOL(wait_iff_congested
);
602 int pdflush_proc_obsolete(struct ctl_table
*table
, int write
,
603 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
607 if (*ppos
|| *lenp
< sizeof(kbuf
)) {
612 if (copy_to_user(buffer
, kbuf
, sizeof(kbuf
)))
614 printk_once(KERN_WARNING
"%s exported in /proc is scheduled for removal\n",