Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/page-writeback.c. | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds. | |
5 | * | |
6 | * Contains functions related to writing back dirty pages at the | |
7 | * address_space level. | |
8 | * | |
9 | * 10Apr2002 akpm@zip.com.au | |
10 | * Initial version | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/fs.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/pagemap.h> | |
21 | #include <linux/writeback.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/backing-dev.h> | |
24 | #include <linux/blkdev.h> | |
25 | #include <linux/mpage.h> | |
26 | #include <linux/percpu.h> | |
27 | #include <linux/notifier.h> | |
28 | #include <linux/smp.h> | |
29 | #include <linux/sysctl.h> | |
30 | #include <linux/cpu.h> | |
31 | #include <linux/syscalls.h> | |
32 | ||
33 | /* | |
34 | * The maximum number of pages to writeout in a single bdflush/kupdate | |
35 | * operation. We do this so we don't hold I_LOCK against an inode for | |
36 | * enormous amounts of time, which would block a userspace task which has | |
37 | * been forced to throttle against that inode. Also, the code reevaluates | |
38 | * the dirty each time it has written this many pages. | |
39 | */ | |
40 | #define MAX_WRITEBACK_PAGES 1024 | |
41 | ||
42 | /* | |
43 | * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited | |
44 | * will look to see if it needs to force writeback or throttling. | |
45 | */ | |
46 | static long ratelimit_pages = 32; | |
47 | ||
48 | static long total_pages; /* The total number of pages in the machine. */ | |
e236a166 | 49 | static int dirty_exceeded __cacheline_aligned_in_smp; /* Dirty mem may be over limit */ |
1da177e4 LT |
50 | |
51 | /* | |
52 | * When balance_dirty_pages decides that the caller needs to perform some | |
53 | * non-background writeback, this is how many pages it will attempt to write. | |
54 | * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably | |
55 | * large amounts of I/O are submitted. | |
56 | */ | |
57 | static inline long sync_writeback_pages(void) | |
58 | { | |
59 | return ratelimit_pages + ratelimit_pages / 2; | |
60 | } | |
61 | ||
62 | /* The following parameters are exported via /proc/sys/vm */ | |
63 | ||
64 | /* | |
65 | * Start background writeback (via pdflush) at this percentage | |
66 | */ | |
67 | int dirty_background_ratio = 10; | |
68 | ||
69 | /* | |
70 | * The generator of dirty data starts writeback at this percentage | |
71 | */ | |
72 | int vm_dirty_ratio = 40; | |
73 | ||
74 | /* | |
fd5403c7 | 75 | * The interval between `kupdate'-style writebacks, in jiffies |
1da177e4 | 76 | */ |
f6ef9438 | 77 | int dirty_writeback_interval = 5 * HZ; |
1da177e4 LT |
78 | |
79 | /* | |
fd5403c7 | 80 | * The longest number of jiffies for which data is allowed to remain dirty |
1da177e4 | 81 | */ |
f6ef9438 | 82 | int dirty_expire_interval = 30 * HZ; |
1da177e4 LT |
83 | |
84 | /* | |
85 | * Flag that makes the machine dump writes/reads and block dirtyings. | |
86 | */ | |
87 | int block_dump; | |
88 | ||
89 | /* | |
ed5b43f1 BS |
90 | * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: |
91 | * a full sync is triggered after this time elapses without any disk activity. | |
1da177e4 LT |
92 | */ |
93 | int laptop_mode; | |
94 | ||
95 | EXPORT_SYMBOL(laptop_mode); | |
96 | ||
97 | /* End of sysctl-exported parameters */ | |
98 | ||
99 | ||
100 | static void background_writeout(unsigned long _min_pages); | |
101 | ||
102 | struct writeback_state | |
103 | { | |
104 | unsigned long nr_dirty; | |
105 | unsigned long nr_unstable; | |
106 | unsigned long nr_mapped; | |
107 | unsigned long nr_writeback; | |
108 | }; | |
109 | ||
110 | static void get_writeback_state(struct writeback_state *wbs) | |
111 | { | |
112 | wbs->nr_dirty = read_page_state(nr_dirty); | |
113 | wbs->nr_unstable = read_page_state(nr_unstable); | |
114 | wbs->nr_mapped = read_page_state(nr_mapped); | |
115 | wbs->nr_writeback = read_page_state(nr_writeback); | |
116 | } | |
117 | ||
118 | /* | |
119 | * Work out the current dirty-memory clamping and background writeout | |
120 | * thresholds. | |
121 | * | |
122 | * The main aim here is to lower them aggressively if there is a lot of mapped | |
123 | * memory around. To avoid stressing page reclaim with lots of unreclaimable | |
124 | * pages. It is better to clamp down on writers than to start swapping, and | |
125 | * performing lots of scanning. | |
126 | * | |
127 | * We only allow 1/2 of the currently-unmapped memory to be dirtied. | |
128 | * | |
129 | * We don't permit the clamping level to fall below 5% - that is getting rather | |
130 | * excessive. | |
131 | * | |
132 | * We make sure that the background writeout level is below the adjusted | |
133 | * clamping level. | |
134 | */ | |
135 | static void | |
136 | get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, | |
137 | struct address_space *mapping) | |
138 | { | |
139 | int background_ratio; /* Percentages */ | |
140 | int dirty_ratio; | |
141 | int unmapped_ratio; | |
142 | long background; | |
143 | long dirty; | |
144 | unsigned long available_memory = total_pages; | |
145 | struct task_struct *tsk; | |
146 | ||
147 | get_writeback_state(wbs); | |
148 | ||
149 | #ifdef CONFIG_HIGHMEM | |
150 | /* | |
151 | * If this mapping can only allocate from low memory, | |
152 | * we exclude high memory from our count. | |
153 | */ | |
154 | if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM)) | |
155 | available_memory -= totalhigh_pages; | |
156 | #endif | |
157 | ||
158 | ||
159 | unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages; | |
160 | ||
161 | dirty_ratio = vm_dirty_ratio; | |
162 | if (dirty_ratio > unmapped_ratio / 2) | |
163 | dirty_ratio = unmapped_ratio / 2; | |
164 | ||
165 | if (dirty_ratio < 5) | |
166 | dirty_ratio = 5; | |
167 | ||
168 | background_ratio = dirty_background_ratio; | |
169 | if (background_ratio >= dirty_ratio) | |
170 | background_ratio = dirty_ratio / 2; | |
171 | ||
172 | background = (background_ratio * available_memory) / 100; | |
173 | dirty = (dirty_ratio * available_memory) / 100; | |
174 | tsk = current; | |
175 | if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { | |
176 | background += background / 4; | |
177 | dirty += dirty / 4; | |
178 | } | |
179 | *pbackground = background; | |
180 | *pdirty = dirty; | |
181 | } | |
182 | ||
183 | /* | |
184 | * balance_dirty_pages() must be called by processes which are generating dirty | |
185 | * data. It looks at the number of dirty pages in the machine and will force | |
186 | * the caller to perform writeback if the system is over `vm_dirty_ratio'. | |
187 | * If we're over `background_thresh' then pdflush is woken to perform some | |
188 | * writeout. | |
189 | */ | |
190 | static void balance_dirty_pages(struct address_space *mapping) | |
191 | { | |
192 | struct writeback_state wbs; | |
193 | long nr_reclaimable; | |
194 | long background_thresh; | |
195 | long dirty_thresh; | |
196 | unsigned long pages_written = 0; | |
197 | unsigned long write_chunk = sync_writeback_pages(); | |
198 | ||
199 | struct backing_dev_info *bdi = mapping->backing_dev_info; | |
200 | ||
201 | for (;;) { | |
202 | struct writeback_control wbc = { | |
203 | .bdi = bdi, | |
204 | .sync_mode = WB_SYNC_NONE, | |
205 | .older_than_this = NULL, | |
206 | .nr_to_write = write_chunk, | |
207 | }; | |
208 | ||
209 | get_dirty_limits(&wbs, &background_thresh, | |
210 | &dirty_thresh, mapping); | |
211 | nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; | |
212 | if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) | |
213 | break; | |
214 | ||
e236a166 AM |
215 | if (!dirty_exceeded) |
216 | dirty_exceeded = 1; | |
1da177e4 LT |
217 | |
218 | /* Note: nr_reclaimable denotes nr_dirty + nr_unstable. | |
219 | * Unstable writes are a feature of certain networked | |
220 | * filesystems (i.e. NFS) in which data may have been | |
221 | * written to the server's write cache, but has not yet | |
222 | * been flushed to permanent storage. | |
223 | */ | |
224 | if (nr_reclaimable) { | |
225 | writeback_inodes(&wbc); | |
226 | get_dirty_limits(&wbs, &background_thresh, | |
227 | &dirty_thresh, mapping); | |
228 | nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; | |
229 | if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) | |
230 | break; | |
231 | pages_written += write_chunk - wbc.nr_to_write; | |
232 | if (pages_written >= write_chunk) | |
233 | break; /* We've done our duty */ | |
234 | } | |
235 | blk_congestion_wait(WRITE, HZ/10); | |
236 | } | |
237 | ||
e236a166 | 238 | if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded) |
1da177e4 LT |
239 | dirty_exceeded = 0; |
240 | ||
241 | if (writeback_in_progress(bdi)) | |
242 | return; /* pdflush is already working this queue */ | |
243 | ||
244 | /* | |
245 | * In laptop mode, we wait until hitting the higher threshold before | |
246 | * starting background writeout, and then write out all the way down | |
247 | * to the lower threshold. So slow writers cause minimal disk activity. | |
248 | * | |
249 | * In normal mode, we start background writeout at the lower | |
250 | * background_thresh, to keep the amount of dirty memory low. | |
251 | */ | |
252 | if ((laptop_mode && pages_written) || | |
253 | (!laptop_mode && (nr_reclaimable > background_thresh))) | |
254 | pdflush_operation(background_writeout, 0); | |
255 | } | |
256 | ||
257 | /** | |
fa5a734e | 258 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state |
67be2dd1 | 259 | * @mapping: address_space which was dirtied |
a580290c | 260 | * @nr_pages_dirtied: number of pages which the caller has just dirtied |
1da177e4 LT |
261 | * |
262 | * Processes which are dirtying memory should call in here once for each page | |
263 | * which was newly dirtied. The function will periodically check the system's | |
264 | * dirty state and will initiate writeback if needed. | |
265 | * | |
266 | * On really big machines, get_writeback_state is expensive, so try to avoid | |
267 | * calling it too often (ratelimiting). But once we're over the dirty memory | |
268 | * limit we decrease the ratelimiting by a lot, to prevent individual processes | |
269 | * from overshooting the limit by (ratelimit_pages) each. | |
270 | */ | |
fa5a734e AM |
271 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, |
272 | unsigned long nr_pages_dirtied) | |
1da177e4 | 273 | { |
fa5a734e AM |
274 | static DEFINE_PER_CPU(unsigned long, ratelimits) = 0; |
275 | unsigned long ratelimit; | |
276 | unsigned long *p; | |
1da177e4 LT |
277 | |
278 | ratelimit = ratelimit_pages; | |
279 | if (dirty_exceeded) | |
280 | ratelimit = 8; | |
281 | ||
282 | /* | |
283 | * Check the rate limiting. Also, we do not want to throttle real-time | |
284 | * tasks in balance_dirty_pages(). Period. | |
285 | */ | |
fa5a734e AM |
286 | preempt_disable(); |
287 | p = &__get_cpu_var(ratelimits); | |
288 | *p += nr_pages_dirtied; | |
289 | if (unlikely(*p >= ratelimit)) { | |
290 | *p = 0; | |
291 | preempt_enable(); | |
1da177e4 LT |
292 | balance_dirty_pages(mapping); |
293 | return; | |
294 | } | |
fa5a734e | 295 | preempt_enable(); |
1da177e4 | 296 | } |
fa5a734e | 297 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); |
1da177e4 LT |
298 | |
299 | void throttle_vm_writeout(void) | |
300 | { | |
301 | struct writeback_state wbs; | |
302 | long background_thresh; | |
303 | long dirty_thresh; | |
304 | ||
305 | for ( ; ; ) { | |
306 | get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); | |
307 | ||
308 | /* | |
309 | * Boost the allowable dirty threshold a bit for page | |
310 | * allocators so they don't get DoS'ed by heavy writers | |
311 | */ | |
312 | dirty_thresh += dirty_thresh / 10; /* wheeee... */ | |
313 | ||
314 | if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh) | |
315 | break; | |
316 | blk_congestion_wait(WRITE, HZ/10); | |
317 | } | |
318 | } | |
319 | ||
320 | ||
321 | /* | |
322 | * writeback at least _min_pages, and keep writing until the amount of dirty | |
323 | * memory is less than the background threshold, or until we're all clean. | |
324 | */ | |
325 | static void background_writeout(unsigned long _min_pages) | |
326 | { | |
327 | long min_pages = _min_pages; | |
328 | struct writeback_control wbc = { | |
329 | .bdi = NULL, | |
330 | .sync_mode = WB_SYNC_NONE, | |
331 | .older_than_this = NULL, | |
332 | .nr_to_write = 0, | |
333 | .nonblocking = 1, | |
334 | }; | |
335 | ||
336 | for ( ; ; ) { | |
337 | struct writeback_state wbs; | |
338 | long background_thresh; | |
339 | long dirty_thresh; | |
340 | ||
341 | get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); | |
342 | if (wbs.nr_dirty + wbs.nr_unstable < background_thresh | |
343 | && min_pages <= 0) | |
344 | break; | |
345 | wbc.encountered_congestion = 0; | |
346 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; | |
347 | wbc.pages_skipped = 0; | |
348 | writeback_inodes(&wbc); | |
349 | min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | |
350 | if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { | |
351 | /* Wrote less than expected */ | |
352 | blk_congestion_wait(WRITE, HZ/10); | |
353 | if (!wbc.encountered_congestion) | |
354 | break; | |
355 | } | |
356 | } | |
357 | } | |
358 | ||
359 | /* | |
360 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back | |
361 | * the whole world. Returns 0 if a pdflush thread was dispatched. Returns | |
362 | * -1 if all pdflush threads were busy. | |
363 | */ | |
687a21ce | 364 | int wakeup_pdflush(long nr_pages) |
1da177e4 LT |
365 | { |
366 | if (nr_pages == 0) { | |
367 | struct writeback_state wbs; | |
368 | ||
369 | get_writeback_state(&wbs); | |
370 | nr_pages = wbs.nr_dirty + wbs.nr_unstable; | |
371 | } | |
372 | return pdflush_operation(background_writeout, nr_pages); | |
373 | } | |
374 | ||
375 | static void wb_timer_fn(unsigned long unused); | |
376 | static void laptop_timer_fn(unsigned long unused); | |
377 | ||
8d06afab IM |
378 | static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0); |
379 | static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0); | |
1da177e4 LT |
380 | |
381 | /* | |
382 | * Periodic writeback of "old" data. | |
383 | * | |
384 | * Define "old": the first time one of an inode's pages is dirtied, we mark the | |
385 | * dirtying-time in the inode's address_space. So this periodic writeback code | |
386 | * just walks the superblock inode list, writing back any inodes which are | |
387 | * older than a specific point in time. | |
388 | * | |
f6ef9438 BS |
389 | * Try to run once per dirty_writeback_interval. But if a writeback event |
390 | * takes longer than a dirty_writeback_interval interval, then leave a | |
1da177e4 LT |
391 | * one-second gap. |
392 | * | |
393 | * older_than_this takes precedence over nr_to_write. So we'll only write back | |
394 | * all dirty pages if they are all attached to "old" mappings. | |
395 | */ | |
396 | static void wb_kupdate(unsigned long arg) | |
397 | { | |
398 | unsigned long oldest_jif; | |
399 | unsigned long start_jif; | |
400 | unsigned long next_jif; | |
401 | long nr_to_write; | |
402 | struct writeback_state wbs; | |
403 | struct writeback_control wbc = { | |
404 | .bdi = NULL, | |
405 | .sync_mode = WB_SYNC_NONE, | |
406 | .older_than_this = &oldest_jif, | |
407 | .nr_to_write = 0, | |
408 | .nonblocking = 1, | |
409 | .for_kupdate = 1, | |
410 | }; | |
411 | ||
412 | sync_supers(); | |
413 | ||
414 | get_writeback_state(&wbs); | |
f6ef9438 | 415 | oldest_jif = jiffies - dirty_expire_interval; |
1da177e4 | 416 | start_jif = jiffies; |
f6ef9438 | 417 | next_jif = start_jif + dirty_writeback_interval; |
1da177e4 LT |
418 | nr_to_write = wbs.nr_dirty + wbs.nr_unstable + |
419 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | |
420 | while (nr_to_write > 0) { | |
421 | wbc.encountered_congestion = 0; | |
422 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; | |
423 | writeback_inodes(&wbc); | |
424 | if (wbc.nr_to_write > 0) { | |
425 | if (wbc.encountered_congestion) | |
426 | blk_congestion_wait(WRITE, HZ/10); | |
427 | else | |
428 | break; /* All the old data is written */ | |
429 | } | |
430 | nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | |
431 | } | |
432 | if (time_before(next_jif, jiffies + HZ)) | |
433 | next_jif = jiffies + HZ; | |
f6ef9438 | 434 | if (dirty_writeback_interval) |
1da177e4 LT |
435 | mod_timer(&wb_timer, next_jif); |
436 | } | |
437 | ||
438 | /* | |
439 | * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs | |
440 | */ | |
441 | int dirty_writeback_centisecs_handler(ctl_table *table, int write, | |
442 | struct file *file, void __user *buffer, size_t *length, loff_t *ppos) | |
443 | { | |
f6ef9438 BS |
444 | proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos); |
445 | if (dirty_writeback_interval) { | |
1da177e4 | 446 | mod_timer(&wb_timer, |
f6ef9438 BS |
447 | jiffies + dirty_writeback_interval); |
448 | } else { | |
1da177e4 LT |
449 | del_timer(&wb_timer); |
450 | } | |
451 | return 0; | |
452 | } | |
453 | ||
454 | static void wb_timer_fn(unsigned long unused) | |
455 | { | |
456 | if (pdflush_operation(wb_kupdate, 0) < 0) | |
457 | mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */ | |
458 | } | |
459 | ||
460 | static void laptop_flush(unsigned long unused) | |
461 | { | |
462 | sys_sync(); | |
463 | } | |
464 | ||
465 | static void laptop_timer_fn(unsigned long unused) | |
466 | { | |
467 | pdflush_operation(laptop_flush, 0); | |
468 | } | |
469 | ||
470 | /* | |
471 | * We've spun up the disk and we're in laptop mode: schedule writeback | |
472 | * of all dirty data a few seconds from now. If the flush is already scheduled | |
473 | * then push it back - the user is still using the disk. | |
474 | */ | |
475 | void laptop_io_completion(void) | |
476 | { | |
ed5b43f1 | 477 | mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode); |
1da177e4 LT |
478 | } |
479 | ||
480 | /* | |
481 | * We're in laptop mode and we've just synced. The sync's writes will have | |
482 | * caused another writeback to be scheduled by laptop_io_completion. | |
483 | * Nothing needs to be written back anymore, so we unschedule the writeback. | |
484 | */ | |
485 | void laptop_sync_completion(void) | |
486 | { | |
487 | del_timer(&laptop_mode_wb_timer); | |
488 | } | |
489 | ||
490 | /* | |
491 | * If ratelimit_pages is too high then we can get into dirty-data overload | |
492 | * if a large number of processes all perform writes at the same time. | |
493 | * If it is too low then SMP machines will call the (expensive) | |
494 | * get_writeback_state too often. | |
495 | * | |
496 | * Here we set ratelimit_pages to a level which ensures that when all CPUs are | |
497 | * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory | |
498 | * thresholds before writeback cuts in. | |
499 | * | |
500 | * But the limit should not be set too high. Because it also controls the | |
501 | * amount of memory which the balance_dirty_pages() caller has to write back. | |
502 | * If this is too large then the caller will block on the IO queue all the | |
503 | * time. So limit it to four megabytes - the balance_dirty_pages() caller | |
504 | * will write six megabyte chunks, max. | |
505 | */ | |
506 | ||
507 | static void set_ratelimit(void) | |
508 | { | |
509 | ratelimit_pages = total_pages / (num_online_cpus() * 32); | |
510 | if (ratelimit_pages < 16) | |
511 | ratelimit_pages = 16; | |
512 | if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024) | |
513 | ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; | |
514 | } | |
515 | ||
516 | static int | |
517 | ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) | |
518 | { | |
519 | set_ratelimit(); | |
520 | return 0; | |
521 | } | |
522 | ||
523 | static struct notifier_block ratelimit_nb = { | |
524 | .notifier_call = ratelimit_handler, | |
525 | .next = NULL, | |
526 | }; | |
527 | ||
528 | /* | |
529 | * If the machine has a large highmem:lowmem ratio then scale back the default | |
530 | * dirty memory thresholds: allowing too much dirty highmem pins an excessive | |
531 | * number of buffer_heads. | |
532 | */ | |
533 | void __init page_writeback_init(void) | |
534 | { | |
535 | long buffer_pages = nr_free_buffer_pages(); | |
536 | long correction; | |
537 | ||
538 | total_pages = nr_free_pagecache_pages(); | |
539 | ||
540 | correction = (100 * 4 * buffer_pages) / total_pages; | |
541 | ||
542 | if (correction < 100) { | |
543 | dirty_background_ratio *= correction; | |
544 | dirty_background_ratio /= 100; | |
545 | vm_dirty_ratio *= correction; | |
546 | vm_dirty_ratio /= 100; | |
547 | ||
548 | if (dirty_background_ratio <= 0) | |
549 | dirty_background_ratio = 1; | |
550 | if (vm_dirty_ratio <= 0) | |
551 | vm_dirty_ratio = 1; | |
552 | } | |
f6ef9438 | 553 | mod_timer(&wb_timer, jiffies + dirty_writeback_interval); |
1da177e4 LT |
554 | set_ratelimit(); |
555 | register_cpu_notifier(&ratelimit_nb); | |
556 | } | |
557 | ||
558 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc) | |
559 | { | |
22905f77 AM |
560 | int ret; |
561 | ||
1da177e4 LT |
562 | if (wbc->nr_to_write <= 0) |
563 | return 0; | |
22905f77 | 564 | wbc->for_writepages = 1; |
1da177e4 | 565 | if (mapping->a_ops->writepages) |
22905f77 AM |
566 | ret = mapping->a_ops->writepages(mapping, wbc); |
567 | else | |
568 | ret = generic_writepages(mapping, wbc); | |
569 | wbc->for_writepages = 0; | |
570 | return ret; | |
1da177e4 LT |
571 | } |
572 | ||
573 | /** | |
574 | * write_one_page - write out a single page and optionally wait on I/O | |
575 | * | |
67be2dd1 MW |
576 | * @page: the page to write |
577 | * @wait: if true, wait on writeout | |
1da177e4 LT |
578 | * |
579 | * The page must be locked by the caller and will be unlocked upon return. | |
580 | * | |
581 | * write_one_page() returns a negative error code if I/O failed. | |
582 | */ | |
583 | int write_one_page(struct page *page, int wait) | |
584 | { | |
585 | struct address_space *mapping = page->mapping; | |
586 | int ret = 0; | |
587 | struct writeback_control wbc = { | |
588 | .sync_mode = WB_SYNC_ALL, | |
589 | .nr_to_write = 1, | |
590 | }; | |
591 | ||
592 | BUG_ON(!PageLocked(page)); | |
593 | ||
594 | if (wait) | |
595 | wait_on_page_writeback(page); | |
596 | ||
597 | if (clear_page_dirty_for_io(page)) { | |
598 | page_cache_get(page); | |
599 | ret = mapping->a_ops->writepage(page, &wbc); | |
600 | if (ret == 0 && wait) { | |
601 | wait_on_page_writeback(page); | |
602 | if (PageError(page)) | |
603 | ret = -EIO; | |
604 | } | |
605 | page_cache_release(page); | |
606 | } else { | |
607 | unlock_page(page); | |
608 | } | |
609 | return ret; | |
610 | } | |
611 | EXPORT_SYMBOL(write_one_page); | |
612 | ||
613 | /* | |
614 | * For address_spaces which do not use buffers. Just tag the page as dirty in | |
615 | * its radix tree. | |
616 | * | |
617 | * This is also used when a single buffer is being dirtied: we want to set the | |
618 | * page dirty in that case, but not all the buffers. This is a "bottom-up" | |
619 | * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. | |
620 | * | |
621 | * Most callers have locked the page, which pins the address_space in memory. | |
622 | * But zap_pte_range() does not lock the page, however in that case the | |
623 | * mapping is pinned by the vma's ->vm_file reference. | |
624 | * | |
625 | * We take care to handle the case where the page was truncated from the | |
626 | * mapping by re-checking page_mapping() insode tree_lock. | |
627 | */ | |
628 | int __set_page_dirty_nobuffers(struct page *page) | |
629 | { | |
1da177e4 LT |
630 | if (!TestSetPageDirty(page)) { |
631 | struct address_space *mapping = page_mapping(page); | |
632 | struct address_space *mapping2; | |
633 | ||
634 | if (mapping) { | |
635 | write_lock_irq(&mapping->tree_lock); | |
636 | mapping2 = page_mapping(page); | |
637 | if (mapping2) { /* Race with truncate? */ | |
638 | BUG_ON(mapping2 != mapping); | |
639 | if (mapping_cap_account_dirty(mapping)) | |
640 | inc_page_state(nr_dirty); | |
641 | radix_tree_tag_set(&mapping->page_tree, | |
642 | page_index(page), PAGECACHE_TAG_DIRTY); | |
643 | } | |
644 | write_unlock_irq(&mapping->tree_lock); | |
645 | if (mapping->host) { | |
646 | /* !PageAnon && !swapper_space */ | |
647 | __mark_inode_dirty(mapping->host, | |
648 | I_DIRTY_PAGES); | |
649 | } | |
650 | } | |
4741c9fd | 651 | return 1; |
1da177e4 | 652 | } |
4741c9fd | 653 | return 0; |
1da177e4 LT |
654 | } |
655 | EXPORT_SYMBOL(__set_page_dirty_nobuffers); | |
656 | ||
657 | /* | |
658 | * When a writepage implementation decides that it doesn't want to write this | |
659 | * page for some reason, it should redirty the locked page via | |
660 | * redirty_page_for_writepage() and it should then unlock the page and return 0 | |
661 | */ | |
662 | int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) | |
663 | { | |
664 | wbc->pages_skipped++; | |
665 | return __set_page_dirty_nobuffers(page); | |
666 | } | |
667 | EXPORT_SYMBOL(redirty_page_for_writepage); | |
668 | ||
669 | /* | |
670 | * If the mapping doesn't provide a set_page_dirty a_op, then | |
671 | * just fall through and assume that it wants buffer_heads. | |
672 | */ | |
673 | int fastcall set_page_dirty(struct page *page) | |
674 | { | |
675 | struct address_space *mapping = page_mapping(page); | |
676 | ||
677 | if (likely(mapping)) { | |
678 | int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; | |
679 | if (spd) | |
680 | return (*spd)(page); | |
681 | return __set_page_dirty_buffers(page); | |
682 | } | |
4741c9fd AM |
683 | if (!PageDirty(page)) { |
684 | if (!TestSetPageDirty(page)) | |
685 | return 1; | |
686 | } | |
1da177e4 LT |
687 | return 0; |
688 | } | |
689 | EXPORT_SYMBOL(set_page_dirty); | |
690 | ||
691 | /* | |
692 | * set_page_dirty() is racy if the caller has no reference against | |
693 | * page->mapping->host, and if the page is unlocked. This is because another | |
694 | * CPU could truncate the page off the mapping and then free the mapping. | |
695 | * | |
696 | * Usually, the page _is_ locked, or the caller is a user-space process which | |
697 | * holds a reference on the inode by having an open file. | |
698 | * | |
699 | * In other cases, the page should be locked before running set_page_dirty(). | |
700 | */ | |
701 | int set_page_dirty_lock(struct page *page) | |
702 | { | |
703 | int ret; | |
704 | ||
705 | lock_page(page); | |
706 | ret = set_page_dirty(page); | |
707 | unlock_page(page); | |
708 | return ret; | |
709 | } | |
710 | EXPORT_SYMBOL(set_page_dirty_lock); | |
711 | ||
712 | /* | |
713 | * Clear a page's dirty flag, while caring for dirty memory accounting. | |
714 | * Returns true if the page was previously dirty. | |
715 | */ | |
716 | int test_clear_page_dirty(struct page *page) | |
717 | { | |
718 | struct address_space *mapping = page_mapping(page); | |
719 | unsigned long flags; | |
720 | ||
721 | if (mapping) { | |
722 | write_lock_irqsave(&mapping->tree_lock, flags); | |
723 | if (TestClearPageDirty(page)) { | |
724 | radix_tree_tag_clear(&mapping->page_tree, | |
725 | page_index(page), | |
726 | PAGECACHE_TAG_DIRTY); | |
727 | write_unlock_irqrestore(&mapping->tree_lock, flags); | |
728 | if (mapping_cap_account_dirty(mapping)) | |
729 | dec_page_state(nr_dirty); | |
730 | return 1; | |
731 | } | |
732 | write_unlock_irqrestore(&mapping->tree_lock, flags); | |
733 | return 0; | |
734 | } | |
735 | return TestClearPageDirty(page); | |
736 | } | |
737 | EXPORT_SYMBOL(test_clear_page_dirty); | |
738 | ||
739 | /* | |
740 | * Clear a page's dirty flag, while caring for dirty memory accounting. | |
741 | * Returns true if the page was previously dirty. | |
742 | * | |
743 | * This is for preparing to put the page under writeout. We leave the page | |
744 | * tagged as dirty in the radix tree so that a concurrent write-for-sync | |
745 | * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage | |
746 | * implementation will run either set_page_writeback() or set_page_dirty(), | |
747 | * at which stage we bring the page's dirty flag and radix-tree dirty tag | |
748 | * back into sync. | |
749 | * | |
750 | * This incoherency between the page's dirty flag and radix-tree tag is | |
751 | * unfortunate, but it only exists while the page is locked. | |
752 | */ | |
753 | int clear_page_dirty_for_io(struct page *page) | |
754 | { | |
755 | struct address_space *mapping = page_mapping(page); | |
756 | ||
757 | if (mapping) { | |
758 | if (TestClearPageDirty(page)) { | |
759 | if (mapping_cap_account_dirty(mapping)) | |
760 | dec_page_state(nr_dirty); | |
761 | return 1; | |
762 | } | |
763 | return 0; | |
764 | } | |
765 | return TestClearPageDirty(page); | |
766 | } | |
58bb01a9 | 767 | EXPORT_SYMBOL(clear_page_dirty_for_io); |
1da177e4 LT |
768 | |
769 | int test_clear_page_writeback(struct page *page) | |
770 | { | |
771 | struct address_space *mapping = page_mapping(page); | |
772 | int ret; | |
773 | ||
774 | if (mapping) { | |
775 | unsigned long flags; | |
776 | ||
777 | write_lock_irqsave(&mapping->tree_lock, flags); | |
778 | ret = TestClearPageWriteback(page); | |
779 | if (ret) | |
780 | radix_tree_tag_clear(&mapping->page_tree, | |
781 | page_index(page), | |
782 | PAGECACHE_TAG_WRITEBACK); | |
783 | write_unlock_irqrestore(&mapping->tree_lock, flags); | |
784 | } else { | |
785 | ret = TestClearPageWriteback(page); | |
786 | } | |
787 | return ret; | |
788 | } | |
789 | ||
790 | int test_set_page_writeback(struct page *page) | |
791 | { | |
792 | struct address_space *mapping = page_mapping(page); | |
793 | int ret; | |
794 | ||
795 | if (mapping) { | |
796 | unsigned long flags; | |
797 | ||
798 | write_lock_irqsave(&mapping->tree_lock, flags); | |
799 | ret = TestSetPageWriteback(page); | |
800 | if (!ret) | |
801 | radix_tree_tag_set(&mapping->page_tree, | |
802 | page_index(page), | |
803 | PAGECACHE_TAG_WRITEBACK); | |
804 | if (!PageDirty(page)) | |
805 | radix_tree_tag_clear(&mapping->page_tree, | |
806 | page_index(page), | |
807 | PAGECACHE_TAG_DIRTY); | |
808 | write_unlock_irqrestore(&mapping->tree_lock, flags); | |
809 | } else { | |
810 | ret = TestSetPageWriteback(page); | |
811 | } | |
812 | return ret; | |
813 | ||
814 | } | |
815 | EXPORT_SYMBOL(test_set_page_writeback); | |
816 | ||
817 | /* | |
818 | * Return true if any of the pages in the mapping are marged with the | |
819 | * passed tag. | |
820 | */ | |
821 | int mapping_tagged(struct address_space *mapping, int tag) | |
822 | { | |
823 | unsigned long flags; | |
824 | int ret; | |
825 | ||
826 | read_lock_irqsave(&mapping->tree_lock, flags); | |
827 | ret = radix_tree_tagged(&mapping->page_tree, tag); | |
828 | read_unlock_irqrestore(&mapping->tree_lock, flags); | |
829 | return ret; | |
830 | } | |
831 | EXPORT_SYMBOL(mapping_tagged); |