fs: inode split IO and LRU lists
[deliverable/linux.git] / fs / fs-writeback.c
1 /*
2 * fs/fs-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
10 *
11 * 10Apr2002 Andrew Morton
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
14 */
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/kthread.h>
24 #include <linux/freezer.h>
25 #include <linux/writeback.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/buffer_head.h>
29 #include <linux/tracepoint.h>
30 #include "internal.h"
31
32 /*
33 * Passed into wb_writeback(), essentially a subset of writeback_control
34 */
35 struct wb_writeback_work {
36 long nr_pages;
37 struct super_block *sb;
38 enum writeback_sync_modes sync_mode;
39 unsigned int for_kupdate:1;
40 unsigned int range_cyclic:1;
41 unsigned int for_background:1;
42
43 struct list_head list; /* pending work list */
44 struct completion *done; /* set if the caller waits */
45 };
46
47 /*
48 * Include the creation of the trace points after defining the
49 * wb_writeback_work structure so that the definition remains local to this
50 * file.
51 */
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/writeback.h>
54
55 /*
56 * We don't actually have pdflush, but this one is exported though /proc...
57 */
58 int nr_pdflush_threads;
59
60 /**
61 * writeback_in_progress - determine whether there is writeback in progress
62 * @bdi: the device's backing_dev_info structure.
63 *
64 * Determine whether there is writeback waiting to be handled against a
65 * backing device.
66 */
67 int writeback_in_progress(struct backing_dev_info *bdi)
68 {
69 return test_bit(BDI_writeback_running, &bdi->state);
70 }
71
72 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
73 {
74 struct super_block *sb = inode->i_sb;
75
76 if (strcmp(sb->s_type->name, "bdev") == 0)
77 return inode->i_mapping->backing_dev_info;
78
79 return sb->s_bdi;
80 }
81
82 static inline struct inode *wb_inode(struct list_head *head)
83 {
84 return list_entry(head, struct inode, i_wb_list);
85 }
86
87 static void bdi_queue_work(struct backing_dev_info *bdi,
88 struct wb_writeback_work *work)
89 {
90 trace_writeback_queue(bdi, work);
91
92 spin_lock_bh(&bdi->wb_lock);
93 list_add_tail(&work->list, &bdi->work_list);
94 if (bdi->wb.task) {
95 wake_up_process(bdi->wb.task);
96 } else {
97 /*
98 * The bdi thread isn't there, wake up the forker thread which
99 * will create and run it.
100 */
101 trace_writeback_nothread(bdi, work);
102 wake_up_process(default_backing_dev_info.wb.task);
103 }
104 spin_unlock_bh(&bdi->wb_lock);
105 }
106
107 static void
108 __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
109 bool range_cyclic, bool for_background)
110 {
111 struct wb_writeback_work *work;
112
113 /*
114 * This is WB_SYNC_NONE writeback, so if allocation fails just
115 * wakeup the thread for old dirty data writeback
116 */
117 work = kzalloc(sizeof(*work), GFP_ATOMIC);
118 if (!work) {
119 if (bdi->wb.task) {
120 trace_writeback_nowork(bdi);
121 wake_up_process(bdi->wb.task);
122 }
123 return;
124 }
125
126 work->sync_mode = WB_SYNC_NONE;
127 work->nr_pages = nr_pages;
128 work->range_cyclic = range_cyclic;
129 work->for_background = for_background;
130
131 bdi_queue_work(bdi, work);
132 }
133
134 /**
135 * bdi_start_writeback - start writeback
136 * @bdi: the backing device to write from
137 * @nr_pages: the number of pages to write
138 *
139 * Description:
140 * This does WB_SYNC_NONE opportunistic writeback. The IO is only
141 * started when this function returns, we make no guarentees on
142 * completion. Caller need not hold sb s_umount semaphore.
143 *
144 */
145 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
146 {
147 __bdi_start_writeback(bdi, nr_pages, true, false);
148 }
149
150 /**
151 * bdi_start_background_writeback - start background writeback
152 * @bdi: the backing device to write from
153 *
154 * Description:
155 * This does WB_SYNC_NONE background writeback. The IO is only
156 * started when this function returns, we make no guarentees on
157 * completion. Caller need not hold sb s_umount semaphore.
158 */
159 void bdi_start_background_writeback(struct backing_dev_info *bdi)
160 {
161 __bdi_start_writeback(bdi, LONG_MAX, true, true);
162 }
163
164 /*
165 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
166 * furthest end of its superblock's dirty-inode list.
167 *
168 * Before stamping the inode's ->dirtied_when, we check to see whether it is
169 * already the most-recently-dirtied inode on the b_dirty list. If that is
170 * the case then the inode must have been redirtied while it was being written
171 * out and we don't reset its dirtied_when.
172 */
173 static void redirty_tail(struct inode *inode)
174 {
175 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
176
177 if (!list_empty(&wb->b_dirty)) {
178 struct inode *tail;
179
180 tail = wb_inode(wb->b_dirty.next);
181 if (time_before(inode->dirtied_when, tail->dirtied_when))
182 inode->dirtied_when = jiffies;
183 }
184 list_move(&inode->i_wb_list, &wb->b_dirty);
185 }
186
187 /*
188 * requeue inode for re-scanning after bdi->b_io list is exhausted.
189 */
190 static void requeue_io(struct inode *inode)
191 {
192 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
193
194 list_move(&inode->i_wb_list, &wb->b_more_io);
195 }
196
197 static void inode_sync_complete(struct inode *inode)
198 {
199 /*
200 * Prevent speculative execution through spin_unlock(&inode_lock);
201 */
202 smp_mb();
203 wake_up_bit(&inode->i_state, __I_SYNC);
204 }
205
206 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
207 {
208 bool ret = time_after(inode->dirtied_when, t);
209 #ifndef CONFIG_64BIT
210 /*
211 * For inodes being constantly redirtied, dirtied_when can get stuck.
212 * It _appears_ to be in the future, but is actually in distant past.
213 * This test is necessary to prevent such wrapped-around relative times
214 * from permanently stopping the whole bdi writeback.
215 */
216 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
217 #endif
218 return ret;
219 }
220
221 /*
222 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
223 */
224 static void move_expired_inodes(struct list_head *delaying_queue,
225 struct list_head *dispatch_queue,
226 unsigned long *older_than_this)
227 {
228 LIST_HEAD(tmp);
229 struct list_head *pos, *node;
230 struct super_block *sb = NULL;
231 struct inode *inode;
232 int do_sb_sort = 0;
233
234 while (!list_empty(delaying_queue)) {
235 inode = wb_inode(delaying_queue->prev);
236 if (older_than_this &&
237 inode_dirtied_after(inode, *older_than_this))
238 break;
239 if (sb && sb != inode->i_sb)
240 do_sb_sort = 1;
241 sb = inode->i_sb;
242 list_move(&inode->i_wb_list, &tmp);
243 }
244
245 /* just one sb in list, splice to dispatch_queue and we're done */
246 if (!do_sb_sort) {
247 list_splice(&tmp, dispatch_queue);
248 return;
249 }
250
251 /* Move inodes from one superblock together */
252 while (!list_empty(&tmp)) {
253 sb = wb_inode(tmp.prev)->i_sb;
254 list_for_each_prev_safe(pos, node, &tmp) {
255 inode = wb_inode(pos);
256 if (inode->i_sb == sb)
257 list_move(&inode->i_wb_list, dispatch_queue);
258 }
259 }
260 }
261
262 /*
263 * Queue all expired dirty inodes for io, eldest first.
264 * Before
265 * newly dirtied b_dirty b_io b_more_io
266 * =============> gf edc BA
267 * After
268 * newly dirtied b_dirty b_io b_more_io
269 * =============> g fBAedc
270 * |
271 * +--> dequeue for IO
272 */
273 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
274 {
275 list_splice_init(&wb->b_more_io, &wb->b_io);
276 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
277 }
278
279 static int write_inode(struct inode *inode, struct writeback_control *wbc)
280 {
281 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
282 return inode->i_sb->s_op->write_inode(inode, wbc);
283 return 0;
284 }
285
286 /*
287 * Wait for writeback on an inode to complete.
288 */
289 static void inode_wait_for_writeback(struct inode *inode)
290 {
291 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
292 wait_queue_head_t *wqh;
293
294 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
295 while (inode->i_state & I_SYNC) {
296 spin_unlock(&inode_lock);
297 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
298 spin_lock(&inode_lock);
299 }
300 }
301
302 /*
303 * Write out an inode's dirty pages. Called under inode_lock. Either the
304 * caller has ref on the inode (either via __iget or via syscall against an fd)
305 * or the inode has I_WILL_FREE set (via generic_forget_inode)
306 *
307 * If `wait' is set, wait on the writeout.
308 *
309 * The whole writeout design is quite complex and fragile. We want to avoid
310 * starvation of particular inodes when others are being redirtied, prevent
311 * livelocks, etc.
312 *
313 * Called under inode_lock.
314 */
315 static int
316 writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
317 {
318 struct address_space *mapping = inode->i_mapping;
319 unsigned dirty;
320 int ret;
321
322 if (!atomic_read(&inode->i_count))
323 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
324 else
325 WARN_ON(inode->i_state & I_WILL_FREE);
326
327 if (inode->i_state & I_SYNC) {
328 /*
329 * If this inode is locked for writeback and we are not doing
330 * writeback-for-data-integrity, move it to b_more_io so that
331 * writeback can proceed with the other inodes on s_io.
332 *
333 * We'll have another go at writing back this inode when we
334 * completed a full scan of b_io.
335 */
336 if (wbc->sync_mode != WB_SYNC_ALL) {
337 requeue_io(inode);
338 return 0;
339 }
340
341 /*
342 * It's a data-integrity sync. We must wait.
343 */
344 inode_wait_for_writeback(inode);
345 }
346
347 BUG_ON(inode->i_state & I_SYNC);
348
349 /* Set I_SYNC, reset I_DIRTY_PAGES */
350 inode->i_state |= I_SYNC;
351 inode->i_state &= ~I_DIRTY_PAGES;
352 spin_unlock(&inode_lock);
353
354 ret = do_writepages(mapping, wbc);
355
356 /*
357 * Make sure to wait on the data before writing out the metadata.
358 * This is important for filesystems that modify metadata on data
359 * I/O completion.
360 */
361 if (wbc->sync_mode == WB_SYNC_ALL) {
362 int err = filemap_fdatawait(mapping);
363 if (ret == 0)
364 ret = err;
365 }
366
367 /*
368 * Some filesystems may redirty the inode during the writeback
369 * due to delalloc, clear dirty metadata flags right before
370 * write_inode()
371 */
372 spin_lock(&inode_lock);
373 dirty = inode->i_state & I_DIRTY;
374 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
375 spin_unlock(&inode_lock);
376 /* Don't write the inode if only I_DIRTY_PAGES was set */
377 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
378 int err = write_inode(inode, wbc);
379 if (ret == 0)
380 ret = err;
381 }
382
383 spin_lock(&inode_lock);
384 inode->i_state &= ~I_SYNC;
385 if (!(inode->i_state & I_FREEING)) {
386 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
387 /*
388 * We didn't write back all the pages. nfs_writepages()
389 * sometimes bales out without doing anything.
390 */
391 inode->i_state |= I_DIRTY_PAGES;
392 if (wbc->nr_to_write <= 0) {
393 /*
394 * slice used up: queue for next turn
395 */
396 requeue_io(inode);
397 } else {
398 /*
399 * Writeback blocked by something other than
400 * congestion. Delay the inode for some time to
401 * avoid spinning on the CPU (100% iowait)
402 * retrying writeback of the dirty page/inode
403 * that cannot be performed immediately.
404 */
405 redirty_tail(inode);
406 }
407 } else if (inode->i_state & I_DIRTY) {
408 /*
409 * Filesystems can dirty the inode during writeback
410 * operations, such as delayed allocation during
411 * submission or metadata updates after data IO
412 * completion.
413 */
414 redirty_tail(inode);
415 } else {
416 /*
417 * The inode is clean. At this point we either have
418 * a reference to the inode or it's on it's way out.
419 * No need to add it back to the LRU.
420 */
421 list_del_init(&inode->i_wb_list);
422 }
423 }
424 inode_sync_complete(inode);
425 return ret;
426 }
427
428 /*
429 * For background writeback the caller does not have the sb pinned
430 * before calling writeback. So make sure that we do pin it, so it doesn't
431 * go away while we are writing inodes from it.
432 */
433 static bool pin_sb_for_writeback(struct super_block *sb)
434 {
435 spin_lock(&sb_lock);
436 if (list_empty(&sb->s_instances)) {
437 spin_unlock(&sb_lock);
438 return false;
439 }
440
441 sb->s_count++;
442 spin_unlock(&sb_lock);
443
444 if (down_read_trylock(&sb->s_umount)) {
445 if (sb->s_root)
446 return true;
447 up_read(&sb->s_umount);
448 }
449
450 put_super(sb);
451 return false;
452 }
453
454 /*
455 * Write a portion of b_io inodes which belong to @sb.
456 *
457 * If @only_this_sb is true, then find and write all such
458 * inodes. Otherwise write only ones which go sequentially
459 * in reverse order.
460 *
461 * Return 1, if the caller writeback routine should be
462 * interrupted. Otherwise return 0.
463 */
464 static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
465 struct writeback_control *wbc, bool only_this_sb)
466 {
467 while (!list_empty(&wb->b_io)) {
468 long pages_skipped;
469 struct inode *inode = wb_inode(wb->b_io.prev);
470
471 if (inode->i_sb != sb) {
472 if (only_this_sb) {
473 /*
474 * We only want to write back data for this
475 * superblock, move all inodes not belonging
476 * to it back onto the dirty list.
477 */
478 redirty_tail(inode);
479 continue;
480 }
481
482 /*
483 * The inode belongs to a different superblock.
484 * Bounce back to the caller to unpin this and
485 * pin the next superblock.
486 */
487 return 0;
488 }
489
490 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
491 requeue_io(inode);
492 continue;
493 }
494 /*
495 * Was this inode dirtied after sync_sb_inodes was called?
496 * This keeps sync from extra jobs and livelock.
497 */
498 if (inode_dirtied_after(inode, wbc->wb_start))
499 return 1;
500
501 BUG_ON(inode->i_state & I_FREEING);
502 __iget(inode);
503 pages_skipped = wbc->pages_skipped;
504 writeback_single_inode(inode, wbc);
505 if (wbc->pages_skipped != pages_skipped) {
506 /*
507 * writeback is not making progress due to locked
508 * buffers. Skip this inode for now.
509 */
510 redirty_tail(inode);
511 }
512 spin_unlock(&inode_lock);
513 iput(inode);
514 cond_resched();
515 spin_lock(&inode_lock);
516 if (wbc->nr_to_write <= 0) {
517 wbc->more_io = 1;
518 return 1;
519 }
520 if (!list_empty(&wb->b_more_io))
521 wbc->more_io = 1;
522 }
523 /* b_io is empty */
524 return 1;
525 }
526
527 void writeback_inodes_wb(struct bdi_writeback *wb,
528 struct writeback_control *wbc)
529 {
530 int ret = 0;
531
532 if (!wbc->wb_start)
533 wbc->wb_start = jiffies; /* livelock avoidance */
534 spin_lock(&inode_lock);
535 if (!wbc->for_kupdate || list_empty(&wb->b_io))
536 queue_io(wb, wbc->older_than_this);
537
538 while (!list_empty(&wb->b_io)) {
539 struct inode *inode = wb_inode(wb->b_io.prev);
540 struct super_block *sb = inode->i_sb;
541
542 if (!pin_sb_for_writeback(sb)) {
543 requeue_io(inode);
544 continue;
545 }
546 ret = writeback_sb_inodes(sb, wb, wbc, false);
547 drop_super(sb);
548
549 if (ret)
550 break;
551 }
552 spin_unlock(&inode_lock);
553 /* Leave any unwritten inodes on b_io */
554 }
555
556 static void __writeback_inodes_sb(struct super_block *sb,
557 struct bdi_writeback *wb, struct writeback_control *wbc)
558 {
559 WARN_ON(!rwsem_is_locked(&sb->s_umount));
560
561 spin_lock(&inode_lock);
562 if (!wbc->for_kupdate || list_empty(&wb->b_io))
563 queue_io(wb, wbc->older_than_this);
564 writeback_sb_inodes(sb, wb, wbc, true);
565 spin_unlock(&inode_lock);
566 }
567
568 /*
569 * The maximum number of pages to writeout in a single bdi flush/kupdate
570 * operation. We do this so we don't hold I_SYNC against an inode for
571 * enormous amounts of time, which would block a userspace task which has
572 * been forced to throttle against that inode. Also, the code reevaluates
573 * the dirty each time it has written this many pages.
574 */
575 #define MAX_WRITEBACK_PAGES 1024
576
577 static inline bool over_bground_thresh(void)
578 {
579 unsigned long background_thresh, dirty_thresh;
580
581 global_dirty_limits(&background_thresh, &dirty_thresh);
582
583 return (global_page_state(NR_FILE_DIRTY) +
584 global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
585 }
586
587 /*
588 * Explicit flushing or periodic writeback of "old" data.
589 *
590 * Define "old": the first time one of an inode's pages is dirtied, we mark the
591 * dirtying-time in the inode's address_space. So this periodic writeback code
592 * just walks the superblock inode list, writing back any inodes which are
593 * older than a specific point in time.
594 *
595 * Try to run once per dirty_writeback_interval. But if a writeback event
596 * takes longer than a dirty_writeback_interval interval, then leave a
597 * one-second gap.
598 *
599 * older_than_this takes precedence over nr_to_write. So we'll only write back
600 * all dirty pages if they are all attached to "old" mappings.
601 */
602 static long wb_writeback(struct bdi_writeback *wb,
603 struct wb_writeback_work *work)
604 {
605 struct writeback_control wbc = {
606 .sync_mode = work->sync_mode,
607 .older_than_this = NULL,
608 .for_kupdate = work->for_kupdate,
609 .for_background = work->for_background,
610 .range_cyclic = work->range_cyclic,
611 };
612 unsigned long oldest_jif;
613 long wrote = 0;
614 struct inode *inode;
615
616 if (wbc.for_kupdate) {
617 wbc.older_than_this = &oldest_jif;
618 oldest_jif = jiffies -
619 msecs_to_jiffies(dirty_expire_interval * 10);
620 }
621 if (!wbc.range_cyclic) {
622 wbc.range_start = 0;
623 wbc.range_end = LLONG_MAX;
624 }
625
626 wbc.wb_start = jiffies; /* livelock avoidance */
627 for (;;) {
628 /*
629 * Stop writeback when nr_pages has been consumed
630 */
631 if (work->nr_pages <= 0)
632 break;
633
634 /*
635 * For background writeout, stop when we are below the
636 * background dirty threshold
637 */
638 if (work->for_background && !over_bground_thresh())
639 break;
640
641 wbc.more_io = 0;
642 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
643 wbc.pages_skipped = 0;
644
645 trace_wbc_writeback_start(&wbc, wb->bdi);
646 if (work->sb)
647 __writeback_inodes_sb(work->sb, wb, &wbc);
648 else
649 writeback_inodes_wb(wb, &wbc);
650 trace_wbc_writeback_written(&wbc, wb->bdi);
651
652 work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
653 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
654
655 /*
656 * If we consumed everything, see if we have more
657 */
658 if (wbc.nr_to_write <= 0)
659 continue;
660 /*
661 * Didn't write everything and we don't have more IO, bail
662 */
663 if (!wbc.more_io)
664 break;
665 /*
666 * Did we write something? Try for more
667 */
668 if (wbc.nr_to_write < MAX_WRITEBACK_PAGES)
669 continue;
670 /*
671 * Nothing written. Wait for some inode to
672 * become available for writeback. Otherwise
673 * we'll just busyloop.
674 */
675 spin_lock(&inode_lock);
676 if (!list_empty(&wb->b_more_io)) {
677 inode = wb_inode(wb->b_more_io.prev);
678 trace_wbc_writeback_wait(&wbc, wb->bdi);
679 inode_wait_for_writeback(inode);
680 }
681 spin_unlock(&inode_lock);
682 }
683
684 return wrote;
685 }
686
687 /*
688 * Return the next wb_writeback_work struct that hasn't been processed yet.
689 */
690 static struct wb_writeback_work *
691 get_next_work_item(struct backing_dev_info *bdi)
692 {
693 struct wb_writeback_work *work = NULL;
694
695 spin_lock_bh(&bdi->wb_lock);
696 if (!list_empty(&bdi->work_list)) {
697 work = list_entry(bdi->work_list.next,
698 struct wb_writeback_work, list);
699 list_del_init(&work->list);
700 }
701 spin_unlock_bh(&bdi->wb_lock);
702 return work;
703 }
704
705 static long wb_check_old_data_flush(struct bdi_writeback *wb)
706 {
707 unsigned long expired;
708 long nr_pages;
709
710 /*
711 * When set to zero, disable periodic writeback
712 */
713 if (!dirty_writeback_interval)
714 return 0;
715
716 expired = wb->last_old_flush +
717 msecs_to_jiffies(dirty_writeback_interval * 10);
718 if (time_before(jiffies, expired))
719 return 0;
720
721 wb->last_old_flush = jiffies;
722 nr_pages = global_page_state(NR_FILE_DIRTY) +
723 global_page_state(NR_UNSTABLE_NFS) +
724 get_nr_dirty_inodes();
725
726 if (nr_pages) {
727 struct wb_writeback_work work = {
728 .nr_pages = nr_pages,
729 .sync_mode = WB_SYNC_NONE,
730 .for_kupdate = 1,
731 .range_cyclic = 1,
732 };
733
734 return wb_writeback(wb, &work);
735 }
736
737 return 0;
738 }
739
740 /*
741 * Retrieve work items and do the writeback they describe
742 */
743 long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
744 {
745 struct backing_dev_info *bdi = wb->bdi;
746 struct wb_writeback_work *work;
747 long wrote = 0;
748
749 set_bit(BDI_writeback_running, &wb->bdi->state);
750 while ((work = get_next_work_item(bdi)) != NULL) {
751 /*
752 * Override sync mode, in case we must wait for completion
753 * because this thread is exiting now.
754 */
755 if (force_wait)
756 work->sync_mode = WB_SYNC_ALL;
757
758 trace_writeback_exec(bdi, work);
759
760 wrote += wb_writeback(wb, work);
761
762 /*
763 * Notify the caller of completion if this is a synchronous
764 * work item, otherwise just free it.
765 */
766 if (work->done)
767 complete(work->done);
768 else
769 kfree(work);
770 }
771
772 /*
773 * Check for periodic writeback, kupdated() style
774 */
775 wrote += wb_check_old_data_flush(wb);
776 clear_bit(BDI_writeback_running, &wb->bdi->state);
777
778 return wrote;
779 }
780
781 /*
782 * Handle writeback of dirty data for the device backed by this bdi. Also
783 * wakes up periodically and does kupdated style flushing.
784 */
785 int bdi_writeback_thread(void *data)
786 {
787 struct bdi_writeback *wb = data;
788 struct backing_dev_info *bdi = wb->bdi;
789 long pages_written;
790
791 current->flags |= PF_FLUSHER | PF_SWAPWRITE;
792 set_freezable();
793 wb->last_active = jiffies;
794
795 /*
796 * Our parent may run at a different priority, just set us to normal
797 */
798 set_user_nice(current, 0);
799
800 trace_writeback_thread_start(bdi);
801
802 while (!kthread_should_stop()) {
803 /*
804 * Remove own delayed wake-up timer, since we are already awake
805 * and we'll take care of the preriodic write-back.
806 */
807 del_timer(&wb->wakeup_timer);
808
809 pages_written = wb_do_writeback(wb, 0);
810
811 trace_writeback_pages_written(pages_written);
812
813 if (pages_written)
814 wb->last_active = jiffies;
815
816 set_current_state(TASK_INTERRUPTIBLE);
817 if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
818 __set_current_state(TASK_RUNNING);
819 continue;
820 }
821
822 if (wb_has_dirty_io(wb) && dirty_writeback_interval)
823 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
824 else {
825 /*
826 * We have nothing to do, so can go sleep without any
827 * timeout and save power. When a work is queued or
828 * something is made dirty - we will be woken up.
829 */
830 schedule();
831 }
832
833 try_to_freeze();
834 }
835
836 /* Flush any work that raced with us exiting */
837 if (!list_empty(&bdi->work_list))
838 wb_do_writeback(wb, 1);
839
840 trace_writeback_thread_stop(bdi);
841 return 0;
842 }
843
844
845 /*
846 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
847 * the whole world.
848 */
849 void wakeup_flusher_threads(long nr_pages)
850 {
851 struct backing_dev_info *bdi;
852
853 if (!nr_pages) {
854 nr_pages = global_page_state(NR_FILE_DIRTY) +
855 global_page_state(NR_UNSTABLE_NFS);
856 }
857
858 rcu_read_lock();
859 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
860 if (!bdi_has_dirty_io(bdi))
861 continue;
862 __bdi_start_writeback(bdi, nr_pages, false, false);
863 }
864 rcu_read_unlock();
865 }
866
867 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
868 {
869 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
870 struct dentry *dentry;
871 const char *name = "?";
872
873 dentry = d_find_alias(inode);
874 if (dentry) {
875 spin_lock(&dentry->d_lock);
876 name = (const char *) dentry->d_name.name;
877 }
878 printk(KERN_DEBUG
879 "%s(%d): dirtied inode %lu (%s) on %s\n",
880 current->comm, task_pid_nr(current), inode->i_ino,
881 name, inode->i_sb->s_id);
882 if (dentry) {
883 spin_unlock(&dentry->d_lock);
884 dput(dentry);
885 }
886 }
887 }
888
889 /**
890 * __mark_inode_dirty - internal function
891 * @inode: inode to mark
892 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
893 * Mark an inode as dirty. Callers should use mark_inode_dirty or
894 * mark_inode_dirty_sync.
895 *
896 * Put the inode on the super block's dirty list.
897 *
898 * CAREFUL! We mark it dirty unconditionally, but move it onto the
899 * dirty list only if it is hashed or if it refers to a blockdev.
900 * If it was not hashed, it will never be added to the dirty list
901 * even if it is later hashed, as it will have been marked dirty already.
902 *
903 * In short, make sure you hash any inodes _before_ you start marking
904 * them dirty.
905 *
906 * This function *must* be atomic for the I_DIRTY_PAGES case -
907 * set_page_dirty() is called under spinlock in several places.
908 *
909 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
910 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
911 * the kernel-internal blockdev inode represents the dirtying time of the
912 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
913 * page->mapping->host, so the page-dirtying time is recorded in the internal
914 * blockdev inode.
915 */
916 void __mark_inode_dirty(struct inode *inode, int flags)
917 {
918 struct super_block *sb = inode->i_sb;
919 struct backing_dev_info *bdi = NULL;
920 bool wakeup_bdi = false;
921
922 /*
923 * Don't do this for I_DIRTY_PAGES - that doesn't actually
924 * dirty the inode itself
925 */
926 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
927 if (sb->s_op->dirty_inode)
928 sb->s_op->dirty_inode(inode);
929 }
930
931 /*
932 * make sure that changes are seen by all cpus before we test i_state
933 * -- mikulas
934 */
935 smp_mb();
936
937 /* avoid the locking if we can */
938 if ((inode->i_state & flags) == flags)
939 return;
940
941 if (unlikely(block_dump))
942 block_dump___mark_inode_dirty(inode);
943
944 spin_lock(&inode_lock);
945 if ((inode->i_state & flags) != flags) {
946 const int was_dirty = inode->i_state & I_DIRTY;
947
948 inode->i_state |= flags;
949
950 /*
951 * If the inode is being synced, just update its dirty state.
952 * The unlocker will place the inode on the appropriate
953 * superblock list, based upon its state.
954 */
955 if (inode->i_state & I_SYNC)
956 goto out;
957
958 /*
959 * Only add valid (hashed) inodes to the superblock's
960 * dirty list. Add blockdev inodes as well.
961 */
962 if (!S_ISBLK(inode->i_mode)) {
963 if (inode_unhashed(inode))
964 goto out;
965 }
966 if (inode->i_state & I_FREEING)
967 goto out;
968
969 /*
970 * If the inode was already on b_dirty/b_io/b_more_io, don't
971 * reposition it (that would break b_dirty time-ordering).
972 */
973 if (!was_dirty) {
974 bdi = inode_to_bdi(inode);
975
976 if (bdi_cap_writeback_dirty(bdi)) {
977 WARN(!test_bit(BDI_registered, &bdi->state),
978 "bdi-%s not registered\n", bdi->name);
979
980 /*
981 * If this is the first dirty inode for this
982 * bdi, we have to wake-up the corresponding
983 * bdi thread to make sure background
984 * write-back happens later.
985 */
986 if (!wb_has_dirty_io(&bdi->wb))
987 wakeup_bdi = true;
988 }
989
990 inode->dirtied_when = jiffies;
991 list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
992 }
993 }
994 out:
995 spin_unlock(&inode_lock);
996
997 if (wakeup_bdi)
998 bdi_wakeup_thread_delayed(bdi);
999 }
1000 EXPORT_SYMBOL(__mark_inode_dirty);
1001
1002 /*
1003 * Write out a superblock's list of dirty inodes. A wait will be performed
1004 * upon no inodes, all inodes or the final one, depending upon sync_mode.
1005 *
1006 * If older_than_this is non-NULL, then only write out inodes which
1007 * had their first dirtying at a time earlier than *older_than_this.
1008 *
1009 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1010 * This function assumes that the blockdev superblock's inodes are backed by
1011 * a variety of queues, so all inodes are searched. For other superblocks,
1012 * assume that all inodes are backed by the same queue.
1013 *
1014 * The inodes to be written are parked on bdi->b_io. They are moved back onto
1015 * bdi->b_dirty as they are selected for writing. This way, none can be missed
1016 * on the writer throttling path, and we get decent balancing between many
1017 * throttled threads: we don't want them all piling up on inode_sync_wait.
1018 */
1019 static void wait_sb_inodes(struct super_block *sb)
1020 {
1021 struct inode *inode, *old_inode = NULL;
1022
1023 /*
1024 * We need to be protected against the filesystem going from
1025 * r/o to r/w or vice versa.
1026 */
1027 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1028
1029 spin_lock(&inode_lock);
1030
1031 /*
1032 * Data integrity sync. Must wait for all pages under writeback,
1033 * because there may have been pages dirtied before our sync
1034 * call, but which had writeout started before we write it out.
1035 * In which case, the inode may not be on the dirty list, but
1036 * we still have to wait for that writeout.
1037 */
1038 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1039 struct address_space *mapping;
1040
1041 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
1042 continue;
1043 mapping = inode->i_mapping;
1044 if (mapping->nrpages == 0)
1045 continue;
1046 __iget(inode);
1047 spin_unlock(&inode_lock);
1048 /*
1049 * We hold a reference to 'inode' so it couldn't have
1050 * been removed from s_inodes list while we dropped the
1051 * inode_lock. We cannot iput the inode now as we can
1052 * be holding the last reference and we cannot iput it
1053 * under inode_lock. So we keep the reference and iput
1054 * it later.
1055 */
1056 iput(old_inode);
1057 old_inode = inode;
1058
1059 filemap_fdatawait(mapping);
1060
1061 cond_resched();
1062
1063 spin_lock(&inode_lock);
1064 }
1065 spin_unlock(&inode_lock);
1066 iput(old_inode);
1067 }
1068
1069 /**
1070 * writeback_inodes_sb - writeback dirty inodes from given super_block
1071 * @sb: the superblock
1072 *
1073 * Start writeback on some inodes on this super_block. No guarantees are made
1074 * on how many (if any) will be written, and this function does not wait
1075 * for IO completion of submitted IO. The number of pages submitted is
1076 * returned.
1077 */
1078 void writeback_inodes_sb(struct super_block *sb)
1079 {
1080 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1081 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1082 DECLARE_COMPLETION_ONSTACK(done);
1083 struct wb_writeback_work work = {
1084 .sb = sb,
1085 .sync_mode = WB_SYNC_NONE,
1086 .done = &done,
1087 };
1088
1089 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1090
1091 work.nr_pages = nr_dirty + nr_unstable + get_nr_dirty_inodes();
1092
1093 bdi_queue_work(sb->s_bdi, &work);
1094 wait_for_completion(&done);
1095 }
1096 EXPORT_SYMBOL(writeback_inodes_sb);
1097
1098 /**
1099 * writeback_inodes_sb_if_idle - start writeback if none underway
1100 * @sb: the superblock
1101 *
1102 * Invoke writeback_inodes_sb if no writeback is currently underway.
1103 * Returns 1 if writeback was started, 0 if not.
1104 */
1105 int writeback_inodes_sb_if_idle(struct super_block *sb)
1106 {
1107 if (!writeback_in_progress(sb->s_bdi)) {
1108 down_read(&sb->s_umount);
1109 writeback_inodes_sb(sb);
1110 up_read(&sb->s_umount);
1111 return 1;
1112 } else
1113 return 0;
1114 }
1115 EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
1116
1117 /**
1118 * sync_inodes_sb - sync sb inode pages
1119 * @sb: the superblock
1120 *
1121 * This function writes and waits on any dirty inode belonging to this
1122 * super_block. The number of pages synced is returned.
1123 */
1124 void sync_inodes_sb(struct super_block *sb)
1125 {
1126 DECLARE_COMPLETION_ONSTACK(done);
1127 struct wb_writeback_work work = {
1128 .sb = sb,
1129 .sync_mode = WB_SYNC_ALL,
1130 .nr_pages = LONG_MAX,
1131 .range_cyclic = 0,
1132 .done = &done,
1133 };
1134
1135 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1136
1137 bdi_queue_work(sb->s_bdi, &work);
1138 wait_for_completion(&done);
1139
1140 wait_sb_inodes(sb);
1141 }
1142 EXPORT_SYMBOL(sync_inodes_sb);
1143
1144 /**
1145 * write_inode_now - write an inode to disk
1146 * @inode: inode to write to disk
1147 * @sync: whether the write should be synchronous or not
1148 *
1149 * This function commits an inode to disk immediately if it is dirty. This is
1150 * primarily needed by knfsd.
1151 *
1152 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1153 */
1154 int write_inode_now(struct inode *inode, int sync)
1155 {
1156 int ret;
1157 struct writeback_control wbc = {
1158 .nr_to_write = LONG_MAX,
1159 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1160 .range_start = 0,
1161 .range_end = LLONG_MAX,
1162 };
1163
1164 if (!mapping_cap_writeback_dirty(inode->i_mapping))
1165 wbc.nr_to_write = 0;
1166
1167 might_sleep();
1168 spin_lock(&inode_lock);
1169 ret = writeback_single_inode(inode, &wbc);
1170 spin_unlock(&inode_lock);
1171 if (sync)
1172 inode_sync_wait(inode);
1173 return ret;
1174 }
1175 EXPORT_SYMBOL(write_inode_now);
1176
1177 /**
1178 * sync_inode - write an inode and its pages to disk.
1179 * @inode: the inode to sync
1180 * @wbc: controls the writeback mode
1181 *
1182 * sync_inode() will write an inode and its pages to disk. It will also
1183 * correctly update the inode on its superblock's dirty inode lists and will
1184 * update inode->i_state.
1185 *
1186 * The caller must have a ref on the inode.
1187 */
1188 int sync_inode(struct inode *inode, struct writeback_control *wbc)
1189 {
1190 int ret;
1191
1192 spin_lock(&inode_lock);
1193 ret = writeback_single_inode(inode, wbc);
1194 spin_unlock(&inode_lock);
1195 return ret;
1196 }
1197 EXPORT_SYMBOL(sync_inode);
1198
1199 /**
1200 * sync_inode - write an inode to disk
1201 * @inode: the inode to sync
1202 * @wait: wait for I/O to complete.
1203 *
1204 * Write an inode to disk and adjust it's dirty state after completion.
1205 *
1206 * Note: only writes the actual inode, no associated data or other metadata.
1207 */
1208 int sync_inode_metadata(struct inode *inode, int wait)
1209 {
1210 struct writeback_control wbc = {
1211 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1212 .nr_to_write = 0, /* metadata-only */
1213 };
1214
1215 return sync_inode(inode, &wbc);
1216 }
1217 EXPORT_SYMBOL(sync_inode_metadata);
This page took 0.0757 seconds and 6 git commands to generate.