Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
f30c2269 | 2 | * mm/page-writeback.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2002, Linus Torvalds. | |
04fbfdc1 | 5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
1da177e4 LT |
6 | * |
7 | * Contains functions related to writing back dirty pages at the | |
8 | * address_space level. | |
9 | * | |
e1f8e874 | 10 | * 10Apr2002 Andrew Morton |
1da177e4 LT |
11 | * Initial version |
12 | */ | |
13 | ||
14 | #include <linux/kernel.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/spinlock.h> | |
17 | #include <linux/fs.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/slab.h> | |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/writeback.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/backing-dev.h> | |
55e829af | 25 | #include <linux/task_io_accounting_ops.h> |
1da177e4 LT |
26 | #include <linux/blkdev.h> |
27 | #include <linux/mpage.h> | |
d08b3851 | 28 | #include <linux/rmap.h> |
1da177e4 LT |
29 | #include <linux/percpu.h> |
30 | #include <linux/notifier.h> | |
31 | #include <linux/smp.h> | |
32 | #include <linux/sysctl.h> | |
33 | #include <linux/cpu.h> | |
34 | #include <linux/syscalls.h> | |
cf9a2ae8 | 35 | #include <linux/buffer_head.h> |
811d736f | 36 | #include <linux/pagevec.h> |
028c2dd1 | 37 | #include <trace/events/writeback.h> |
1da177e4 | 38 | |
ffd1f609 WF |
39 | /* |
40 | * Sleep at most 200ms at a time in balance_dirty_pages(). | |
41 | */ | |
42 | #define MAX_PAUSE max(HZ/5, 1) | |
43 | ||
e98be2d5 WF |
44 | /* |
45 | * Estimate write bandwidth at 200ms intervals. | |
46 | */ | |
47 | #define BANDWIDTH_INTERVAL max(HZ/5, 1) | |
48 | ||
1da177e4 LT |
49 | /* |
50 | * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited | |
51 | * will look to see if it needs to force writeback or throttling. | |
52 | */ | |
53 | static long ratelimit_pages = 32; | |
54 | ||
1da177e4 LT |
55 | /* |
56 | * When balance_dirty_pages decides that the caller needs to perform some | |
57 | * non-background writeback, this is how many pages it will attempt to write. | |
3a2e9a5a | 58 | * It should be somewhat larger than dirtied pages to ensure that reasonably |
1da177e4 LT |
59 | * large amounts of I/O are submitted. |
60 | */ | |
3a2e9a5a | 61 | static inline long sync_writeback_pages(unsigned long dirtied) |
1da177e4 | 62 | { |
3a2e9a5a WF |
63 | if (dirtied < ratelimit_pages) |
64 | dirtied = ratelimit_pages; | |
65 | ||
66 | return dirtied + dirtied / 2; | |
1da177e4 LT |
67 | } |
68 | ||
69 | /* The following parameters are exported via /proc/sys/vm */ | |
70 | ||
71 | /* | |
5b0830cb | 72 | * Start background writeback (via writeback threads) at this percentage |
1da177e4 | 73 | */ |
1b5e62b4 | 74 | int dirty_background_ratio = 10; |
1da177e4 | 75 | |
2da02997 DR |
76 | /* |
77 | * dirty_background_bytes starts at 0 (disabled) so that it is a function of | |
78 | * dirty_background_ratio * the amount of dirtyable memory | |
79 | */ | |
80 | unsigned long dirty_background_bytes; | |
81 | ||
195cf453 BG |
82 | /* |
83 | * free highmem will not be subtracted from the total free memory | |
84 | * for calculating free ratios if vm_highmem_is_dirtyable is true | |
85 | */ | |
86 | int vm_highmem_is_dirtyable; | |
87 | ||
1da177e4 LT |
88 | /* |
89 | * The generator of dirty data starts writeback at this percentage | |
90 | */ | |
1b5e62b4 | 91 | int vm_dirty_ratio = 20; |
1da177e4 | 92 | |
2da02997 DR |
93 | /* |
94 | * vm_dirty_bytes starts at 0 (disabled) so that it is a function of | |
95 | * vm_dirty_ratio * the amount of dirtyable memory | |
96 | */ | |
97 | unsigned long vm_dirty_bytes; | |
98 | ||
1da177e4 | 99 | /* |
704503d8 | 100 | * The interval between `kupdate'-style writebacks |
1da177e4 | 101 | */ |
22ef37ee | 102 | unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */ |
1da177e4 LT |
103 | |
104 | /* | |
704503d8 | 105 | * The longest time for which data is allowed to remain dirty |
1da177e4 | 106 | */ |
22ef37ee | 107 | unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */ |
1da177e4 LT |
108 | |
109 | /* | |
110 | * Flag that makes the machine dump writes/reads and block dirtyings. | |
111 | */ | |
112 | int block_dump; | |
113 | ||
114 | /* | |
ed5b43f1 BS |
115 | * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies: |
116 | * a full sync is triggered after this time elapses without any disk activity. | |
1da177e4 LT |
117 | */ |
118 | int laptop_mode; | |
119 | ||
120 | EXPORT_SYMBOL(laptop_mode); | |
121 | ||
122 | /* End of sysctl-exported parameters */ | |
123 | ||
c42843f2 | 124 | unsigned long global_dirty_limit; |
1da177e4 | 125 | |
04fbfdc1 PZ |
126 | /* |
127 | * Scale the writeback cache size proportional to the relative writeout speeds. | |
128 | * | |
129 | * We do this by keeping a floating proportion between BDIs, based on page | |
130 | * writeback completions [end_page_writeback()]. Those devices that write out | |
131 | * pages fastest will get the larger share, while the slower will get a smaller | |
132 | * share. | |
133 | * | |
134 | * We use page writeout completions because we are interested in getting rid of | |
135 | * dirty pages. Having them written out is the primary goal. | |
136 | * | |
137 | * We introduce a concept of time, a period over which we measure these events, | |
138 | * because demand can/will vary over time. The length of this period itself is | |
139 | * measured in page writeback completions. | |
140 | * | |
141 | */ | |
142 | static struct prop_descriptor vm_completions; | |
3e26c149 | 143 | static struct prop_descriptor vm_dirties; |
04fbfdc1 | 144 | |
04fbfdc1 PZ |
145 | /* |
146 | * couple the period to the dirty_ratio: | |
147 | * | |
148 | * period/2 ~ roundup_pow_of_two(dirty limit) | |
149 | */ | |
150 | static int calc_period_shift(void) | |
151 | { | |
152 | unsigned long dirty_total; | |
153 | ||
2da02997 DR |
154 | if (vm_dirty_bytes) |
155 | dirty_total = vm_dirty_bytes / PAGE_SIZE; | |
156 | else | |
157 | dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / | |
158 | 100; | |
04fbfdc1 PZ |
159 | return 2 + ilog2(dirty_total - 1); |
160 | } | |
161 | ||
162 | /* | |
2da02997 | 163 | * update the period when the dirty threshold changes. |
04fbfdc1 | 164 | */ |
2da02997 DR |
165 | static void update_completion_period(void) |
166 | { | |
167 | int shift = calc_period_shift(); | |
168 | prop_change_shift(&vm_completions, shift); | |
169 | prop_change_shift(&vm_dirties, shift); | |
170 | } | |
171 | ||
172 | int dirty_background_ratio_handler(struct ctl_table *table, int write, | |
8d65af78 | 173 | void __user *buffer, size_t *lenp, |
2da02997 DR |
174 | loff_t *ppos) |
175 | { | |
176 | int ret; | |
177 | ||
8d65af78 | 178 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
2da02997 DR |
179 | if (ret == 0 && write) |
180 | dirty_background_bytes = 0; | |
181 | return ret; | |
182 | } | |
183 | ||
184 | int dirty_background_bytes_handler(struct ctl_table *table, int write, | |
8d65af78 | 185 | void __user *buffer, size_t *lenp, |
2da02997 DR |
186 | loff_t *ppos) |
187 | { | |
188 | int ret; | |
189 | ||
8d65af78 | 190 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
2da02997 DR |
191 | if (ret == 0 && write) |
192 | dirty_background_ratio = 0; | |
193 | return ret; | |
194 | } | |
195 | ||
04fbfdc1 | 196 | int dirty_ratio_handler(struct ctl_table *table, int write, |
8d65af78 | 197 | void __user *buffer, size_t *lenp, |
04fbfdc1 PZ |
198 | loff_t *ppos) |
199 | { | |
200 | int old_ratio = vm_dirty_ratio; | |
2da02997 DR |
201 | int ret; |
202 | ||
8d65af78 | 203 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
04fbfdc1 | 204 | if (ret == 0 && write && vm_dirty_ratio != old_ratio) { |
2da02997 DR |
205 | update_completion_period(); |
206 | vm_dirty_bytes = 0; | |
207 | } | |
208 | return ret; | |
209 | } | |
210 | ||
211 | ||
212 | int dirty_bytes_handler(struct ctl_table *table, int write, | |
8d65af78 | 213 | void __user *buffer, size_t *lenp, |
2da02997 DR |
214 | loff_t *ppos) |
215 | { | |
fc3501d4 | 216 | unsigned long old_bytes = vm_dirty_bytes; |
2da02997 DR |
217 | int ret; |
218 | ||
8d65af78 | 219 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
2da02997 DR |
220 | if (ret == 0 && write && vm_dirty_bytes != old_bytes) { |
221 | update_completion_period(); | |
222 | vm_dirty_ratio = 0; | |
04fbfdc1 PZ |
223 | } |
224 | return ret; | |
225 | } | |
226 | ||
227 | /* | |
228 | * Increment the BDI's writeout completion count and the global writeout | |
229 | * completion count. Called from test_clear_page_writeback(). | |
230 | */ | |
231 | static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) | |
232 | { | |
f7d2b1ec | 233 | __inc_bdi_stat(bdi, BDI_WRITTEN); |
a42dde04 PZ |
234 | __prop_inc_percpu_max(&vm_completions, &bdi->completions, |
235 | bdi->max_prop_frac); | |
04fbfdc1 PZ |
236 | } |
237 | ||
dd5656e5 MS |
238 | void bdi_writeout_inc(struct backing_dev_info *bdi) |
239 | { | |
240 | unsigned long flags; | |
241 | ||
242 | local_irq_save(flags); | |
243 | __bdi_writeout_inc(bdi); | |
244 | local_irq_restore(flags); | |
245 | } | |
246 | EXPORT_SYMBOL_GPL(bdi_writeout_inc); | |
247 | ||
1cf6e7d8 | 248 | void task_dirty_inc(struct task_struct *tsk) |
3e26c149 PZ |
249 | { |
250 | prop_inc_single(&vm_dirties, &tsk->dirties); | |
251 | } | |
252 | ||
04fbfdc1 PZ |
253 | /* |
254 | * Obtain an accurate fraction of the BDI's portion. | |
255 | */ | |
256 | static void bdi_writeout_fraction(struct backing_dev_info *bdi, | |
257 | long *numerator, long *denominator) | |
258 | { | |
3efaf0fa | 259 | prop_fraction_percpu(&vm_completions, &bdi->completions, |
04fbfdc1 | 260 | numerator, denominator); |
04fbfdc1 PZ |
261 | } |
262 | ||
3e26c149 PZ |
263 | static inline void task_dirties_fraction(struct task_struct *tsk, |
264 | long *numerator, long *denominator) | |
265 | { | |
266 | prop_fraction_single(&vm_dirties, &tsk->dirties, | |
267 | numerator, denominator); | |
268 | } | |
269 | ||
270 | /* | |
1babe183 | 271 | * task_dirty_limit - scale down dirty throttling threshold for one task |
3e26c149 PZ |
272 | * |
273 | * task specific dirty limit: | |
274 | * | |
275 | * dirty -= (dirty/8) * p_{t} | |
1babe183 WF |
276 | * |
277 | * To protect light/slow dirtying tasks from heavier/fast ones, we start | |
278 | * throttling individual tasks before reaching the bdi dirty limit. | |
279 | * Relatively low thresholds will be allocated to heavy dirtiers. So when | |
280 | * dirty pages grow large, heavy dirtiers will be throttled first, which will | |
281 | * effectively curb the growth of dirty pages. Light dirtiers with high enough | |
282 | * dirty threshold may never get throttled. | |
3e26c149 | 283 | */ |
bcff25fc | 284 | #define TASK_LIMIT_FRACTION 8 |
16c4042f WF |
285 | static unsigned long task_dirty_limit(struct task_struct *tsk, |
286 | unsigned long bdi_dirty) | |
3e26c149 PZ |
287 | { |
288 | long numerator, denominator; | |
16c4042f | 289 | unsigned long dirty = bdi_dirty; |
bcff25fc | 290 | u64 inv = dirty / TASK_LIMIT_FRACTION; |
3e26c149 PZ |
291 | |
292 | task_dirties_fraction(tsk, &numerator, &denominator); | |
293 | inv *= numerator; | |
294 | do_div(inv, denominator); | |
295 | ||
296 | dirty -= inv; | |
3e26c149 | 297 | |
16c4042f | 298 | return max(dirty, bdi_dirty/2); |
3e26c149 PZ |
299 | } |
300 | ||
bcff25fc JK |
301 | /* Minimum limit for any task */ |
302 | static unsigned long task_min_dirty_limit(unsigned long bdi_dirty) | |
303 | { | |
304 | return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION; | |
305 | } | |
306 | ||
189d3c4a PZ |
307 | /* |
308 | * | |
309 | */ | |
189d3c4a PZ |
310 | static unsigned int bdi_min_ratio; |
311 | ||
312 | int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio) | |
313 | { | |
314 | int ret = 0; | |
189d3c4a | 315 | |
cfc4ba53 | 316 | spin_lock_bh(&bdi_lock); |
a42dde04 | 317 | if (min_ratio > bdi->max_ratio) { |
189d3c4a | 318 | ret = -EINVAL; |
a42dde04 PZ |
319 | } else { |
320 | min_ratio -= bdi->min_ratio; | |
321 | if (bdi_min_ratio + min_ratio < 100) { | |
322 | bdi_min_ratio += min_ratio; | |
323 | bdi->min_ratio += min_ratio; | |
324 | } else { | |
325 | ret = -EINVAL; | |
326 | } | |
327 | } | |
cfc4ba53 | 328 | spin_unlock_bh(&bdi_lock); |
a42dde04 PZ |
329 | |
330 | return ret; | |
331 | } | |
332 | ||
333 | int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio) | |
334 | { | |
a42dde04 PZ |
335 | int ret = 0; |
336 | ||
337 | if (max_ratio > 100) | |
338 | return -EINVAL; | |
339 | ||
cfc4ba53 | 340 | spin_lock_bh(&bdi_lock); |
a42dde04 PZ |
341 | if (bdi->min_ratio > max_ratio) { |
342 | ret = -EINVAL; | |
343 | } else { | |
344 | bdi->max_ratio = max_ratio; | |
345 | bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100; | |
346 | } | |
cfc4ba53 | 347 | spin_unlock_bh(&bdi_lock); |
189d3c4a PZ |
348 | |
349 | return ret; | |
350 | } | |
a42dde04 | 351 | EXPORT_SYMBOL(bdi_set_max_ratio); |
189d3c4a | 352 | |
1da177e4 LT |
353 | /* |
354 | * Work out the current dirty-memory clamping and background writeout | |
355 | * thresholds. | |
356 | * | |
357 | * The main aim here is to lower them aggressively if there is a lot of mapped | |
358 | * memory around. To avoid stressing page reclaim with lots of unreclaimable | |
359 | * pages. It is better to clamp down on writers than to start swapping, and | |
360 | * performing lots of scanning. | |
361 | * | |
362 | * We only allow 1/2 of the currently-unmapped memory to be dirtied. | |
363 | * | |
364 | * We don't permit the clamping level to fall below 5% - that is getting rather | |
365 | * excessive. | |
366 | * | |
367 | * We make sure that the background writeout level is below the adjusted | |
368 | * clamping level. | |
369 | */ | |
1b424464 CL |
370 | |
371 | static unsigned long highmem_dirtyable_memory(unsigned long total) | |
372 | { | |
373 | #ifdef CONFIG_HIGHMEM | |
374 | int node; | |
375 | unsigned long x = 0; | |
376 | ||
37b07e41 | 377 | for_each_node_state(node, N_HIGH_MEMORY) { |
1b424464 CL |
378 | struct zone *z = |
379 | &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; | |
380 | ||
adea02a1 WF |
381 | x += zone_page_state(z, NR_FREE_PAGES) + |
382 | zone_reclaimable_pages(z); | |
1b424464 CL |
383 | } |
384 | /* | |
385 | * Make sure that the number of highmem pages is never larger | |
386 | * than the number of the total dirtyable memory. This can only | |
387 | * occur in very strange VM situations but we want to make sure | |
388 | * that this does not occur. | |
389 | */ | |
390 | return min(x, total); | |
391 | #else | |
392 | return 0; | |
393 | #endif | |
394 | } | |
395 | ||
3eefae99 SR |
396 | /** |
397 | * determine_dirtyable_memory - amount of memory that may be used | |
398 | * | |
399 | * Returns the numebr of pages that can currently be freed and used | |
400 | * by the kernel for direct mappings. | |
401 | */ | |
402 | unsigned long determine_dirtyable_memory(void) | |
1b424464 CL |
403 | { |
404 | unsigned long x; | |
405 | ||
adea02a1 | 406 | x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); |
195cf453 BG |
407 | |
408 | if (!vm_highmem_is_dirtyable) | |
409 | x -= highmem_dirtyable_memory(x); | |
410 | ||
1b424464 CL |
411 | return x + 1; /* Ensure that we never return 0 */ |
412 | } | |
413 | ||
ffd1f609 WF |
414 | static unsigned long hard_dirty_limit(unsigned long thresh) |
415 | { | |
416 | return max(thresh, global_dirty_limit); | |
417 | } | |
418 | ||
03ab450f | 419 | /* |
1babe183 WF |
420 | * global_dirty_limits - background-writeback and dirty-throttling thresholds |
421 | * | |
422 | * Calculate the dirty thresholds based on sysctl parameters | |
423 | * - vm.dirty_background_ratio or vm.dirty_background_bytes | |
424 | * - vm.dirty_ratio or vm.dirty_bytes | |
425 | * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and | |
ebd1373d | 426 | * real-time tasks. |
1babe183 | 427 | */ |
16c4042f | 428 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) |
1da177e4 | 429 | { |
364aeb28 DR |
430 | unsigned long background; |
431 | unsigned long dirty; | |
240c879f | 432 | unsigned long uninitialized_var(available_memory); |
1da177e4 LT |
433 | struct task_struct *tsk; |
434 | ||
240c879f MK |
435 | if (!vm_dirty_bytes || !dirty_background_bytes) |
436 | available_memory = determine_dirtyable_memory(); | |
437 | ||
2da02997 DR |
438 | if (vm_dirty_bytes) |
439 | dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); | |
4cbec4c8 WF |
440 | else |
441 | dirty = (vm_dirty_ratio * available_memory) / 100; | |
1da177e4 | 442 | |
2da02997 DR |
443 | if (dirty_background_bytes) |
444 | background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); | |
445 | else | |
446 | background = (dirty_background_ratio * available_memory) / 100; | |
1da177e4 | 447 | |
2da02997 DR |
448 | if (background >= dirty) |
449 | background = dirty / 2; | |
1da177e4 LT |
450 | tsk = current; |
451 | if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) { | |
452 | background += background / 4; | |
453 | dirty += dirty / 4; | |
454 | } | |
455 | *pbackground = background; | |
456 | *pdirty = dirty; | |
e1cbe236 | 457 | trace_global_dirty_state(background, dirty); |
16c4042f | 458 | } |
04fbfdc1 | 459 | |
6f718656 | 460 | /** |
1babe183 | 461 | * bdi_dirty_limit - @bdi's share of dirty throttling threshold |
6f718656 WF |
462 | * @bdi: the backing_dev_info to query |
463 | * @dirty: global dirty limit in pages | |
1babe183 | 464 | * |
6f718656 WF |
465 | * Returns @bdi's dirty limit in pages. The term "dirty" in the context of |
466 | * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages. | |
467 | * And the "limit" in the name is not seriously taken as hard limit in | |
468 | * balance_dirty_pages(). | |
1babe183 | 469 | * |
6f718656 | 470 | * It allocates high/low dirty limits to fast/slow devices, in order to prevent |
1babe183 WF |
471 | * - starving fast devices |
472 | * - piling up dirty pages (that will take long time to sync) on slow devices | |
473 | * | |
474 | * The bdi's share of dirty limit will be adapting to its throughput and | |
475 | * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set. | |
476 | */ | |
477 | unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty) | |
16c4042f WF |
478 | { |
479 | u64 bdi_dirty; | |
480 | long numerator, denominator; | |
04fbfdc1 | 481 | |
16c4042f WF |
482 | /* |
483 | * Calculate this BDI's share of the dirty ratio. | |
484 | */ | |
485 | bdi_writeout_fraction(bdi, &numerator, &denominator); | |
04fbfdc1 | 486 | |
16c4042f WF |
487 | bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100; |
488 | bdi_dirty *= numerator; | |
489 | do_div(bdi_dirty, denominator); | |
04fbfdc1 | 490 | |
16c4042f WF |
491 | bdi_dirty += (dirty * bdi->min_ratio) / 100; |
492 | if (bdi_dirty > (dirty * bdi->max_ratio) / 100) | |
493 | bdi_dirty = dirty * bdi->max_ratio / 100; | |
494 | ||
495 | return bdi_dirty; | |
1da177e4 LT |
496 | } |
497 | ||
e98be2d5 WF |
498 | static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, |
499 | unsigned long elapsed, | |
500 | unsigned long written) | |
501 | { | |
502 | const unsigned long period = roundup_pow_of_two(3 * HZ); | |
503 | unsigned long avg = bdi->avg_write_bandwidth; | |
504 | unsigned long old = bdi->write_bandwidth; | |
505 | u64 bw; | |
506 | ||
507 | /* | |
508 | * bw = written * HZ / elapsed | |
509 | * | |
510 | * bw * elapsed + write_bandwidth * (period - elapsed) | |
511 | * write_bandwidth = --------------------------------------------------- | |
512 | * period | |
513 | */ | |
514 | bw = written - bdi->written_stamp; | |
515 | bw *= HZ; | |
516 | if (unlikely(elapsed > period)) { | |
517 | do_div(bw, elapsed); | |
518 | avg = bw; | |
519 | goto out; | |
520 | } | |
521 | bw += (u64)bdi->write_bandwidth * (period - elapsed); | |
522 | bw >>= ilog2(period); | |
523 | ||
524 | /* | |
525 | * one more level of smoothing, for filtering out sudden spikes | |
526 | */ | |
527 | if (avg > old && old >= (unsigned long)bw) | |
528 | avg -= (avg - old) >> 3; | |
529 | ||
530 | if (avg < old && old <= (unsigned long)bw) | |
531 | avg += (old - avg) >> 3; | |
532 | ||
533 | out: | |
534 | bdi->write_bandwidth = bw; | |
535 | bdi->avg_write_bandwidth = avg; | |
536 | } | |
537 | ||
c42843f2 WF |
538 | /* |
539 | * The global dirtyable memory and dirty threshold could be suddenly knocked | |
540 | * down by a large amount (eg. on the startup of KVM in a swapless system). | |
541 | * This may throw the system into deep dirty exceeded state and throttle | |
542 | * heavy/light dirtiers alike. To retain good responsiveness, maintain | |
543 | * global_dirty_limit for tracking slowly down to the knocked down dirty | |
544 | * threshold. | |
545 | */ | |
546 | static void update_dirty_limit(unsigned long thresh, unsigned long dirty) | |
547 | { | |
548 | unsigned long limit = global_dirty_limit; | |
549 | ||
550 | /* | |
551 | * Follow up in one step. | |
552 | */ | |
553 | if (limit < thresh) { | |
554 | limit = thresh; | |
555 | goto update; | |
556 | } | |
557 | ||
558 | /* | |
559 | * Follow down slowly. Use the higher one as the target, because thresh | |
560 | * may drop below dirty. This is exactly the reason to introduce | |
561 | * global_dirty_limit which is guaranteed to lie above the dirty pages. | |
562 | */ | |
563 | thresh = max(thresh, dirty); | |
564 | if (limit > thresh) { | |
565 | limit -= (limit - thresh) >> 5; | |
566 | goto update; | |
567 | } | |
568 | return; | |
569 | update: | |
570 | global_dirty_limit = limit; | |
571 | } | |
572 | ||
573 | static void global_update_bandwidth(unsigned long thresh, | |
574 | unsigned long dirty, | |
575 | unsigned long now) | |
576 | { | |
577 | static DEFINE_SPINLOCK(dirty_lock); | |
578 | static unsigned long update_time; | |
579 | ||
580 | /* | |
581 | * check locklessly first to optimize away locking for the most time | |
582 | */ | |
583 | if (time_before(now, update_time + BANDWIDTH_INTERVAL)) | |
584 | return; | |
585 | ||
586 | spin_lock(&dirty_lock); | |
587 | if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) { | |
588 | update_dirty_limit(thresh, dirty); | |
589 | update_time = now; | |
590 | } | |
591 | spin_unlock(&dirty_lock); | |
592 | } | |
593 | ||
e98be2d5 | 594 | void __bdi_update_bandwidth(struct backing_dev_info *bdi, |
c42843f2 WF |
595 | unsigned long thresh, |
596 | unsigned long dirty, | |
597 | unsigned long bdi_thresh, | |
598 | unsigned long bdi_dirty, | |
e98be2d5 WF |
599 | unsigned long start_time) |
600 | { | |
601 | unsigned long now = jiffies; | |
602 | unsigned long elapsed = now - bdi->bw_time_stamp; | |
603 | unsigned long written; | |
604 | ||
605 | /* | |
606 | * rate-limit, only update once every 200ms. | |
607 | */ | |
608 | if (elapsed < BANDWIDTH_INTERVAL) | |
609 | return; | |
610 | ||
611 | written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]); | |
612 | ||
613 | /* | |
614 | * Skip quiet periods when disk bandwidth is under-utilized. | |
615 | * (at least 1s idle time between two flusher runs) | |
616 | */ | |
617 | if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time)) | |
618 | goto snapshot; | |
619 | ||
c42843f2 WF |
620 | if (thresh) |
621 | global_update_bandwidth(thresh, dirty, now); | |
622 | ||
e98be2d5 WF |
623 | bdi_update_write_bandwidth(bdi, elapsed, written); |
624 | ||
625 | snapshot: | |
626 | bdi->written_stamp = written; | |
627 | bdi->bw_time_stamp = now; | |
628 | } | |
629 | ||
630 | static void bdi_update_bandwidth(struct backing_dev_info *bdi, | |
c42843f2 WF |
631 | unsigned long thresh, |
632 | unsigned long dirty, | |
633 | unsigned long bdi_thresh, | |
634 | unsigned long bdi_dirty, | |
e98be2d5 WF |
635 | unsigned long start_time) |
636 | { | |
637 | if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL)) | |
638 | return; | |
639 | spin_lock(&bdi->wb.list_lock); | |
c42843f2 WF |
640 | __bdi_update_bandwidth(bdi, thresh, dirty, bdi_thresh, bdi_dirty, |
641 | start_time); | |
e98be2d5 WF |
642 | spin_unlock(&bdi->wb.list_lock); |
643 | } | |
644 | ||
1da177e4 LT |
645 | /* |
646 | * balance_dirty_pages() must be called by processes which are generating dirty | |
647 | * data. It looks at the number of dirty pages in the machine and will force | |
648 | * the caller to perform writeback if the system is over `vm_dirty_ratio'. | |
5b0830cb JA |
649 | * If we're over `background_thresh' then the writeback threads are woken to |
650 | * perform some writeout. | |
1da177e4 | 651 | */ |
3a2e9a5a WF |
652 | static void balance_dirty_pages(struct address_space *mapping, |
653 | unsigned long write_chunk) | |
1da177e4 | 654 | { |
7762741e WF |
655 | unsigned long nr_reclaimable, bdi_nr_reclaimable; |
656 | unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */ | |
657 | unsigned long bdi_dirty; | |
364aeb28 DR |
658 | unsigned long background_thresh; |
659 | unsigned long dirty_thresh; | |
660 | unsigned long bdi_thresh; | |
bcff25fc JK |
661 | unsigned long task_bdi_thresh; |
662 | unsigned long min_task_bdi_thresh; | |
1da177e4 | 663 | unsigned long pages_written = 0; |
87c6a9b2 | 664 | unsigned long pause = 1; |
e50e3720 | 665 | bool dirty_exceeded = false; |
bcff25fc | 666 | bool clear_dirty_exceeded = true; |
1da177e4 | 667 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
e98be2d5 | 668 | unsigned long start_time = jiffies; |
1da177e4 LT |
669 | |
670 | for (;;) { | |
5fce25a9 PZ |
671 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + |
672 | global_page_state(NR_UNSTABLE_NFS); | |
7762741e | 673 | nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); |
5fce25a9 | 674 | |
16c4042f WF |
675 | global_dirty_limits(&background_thresh, &dirty_thresh); |
676 | ||
677 | /* | |
678 | * Throttle it only when the background writeback cannot | |
679 | * catch-up. This avoids (excessively) small writeouts | |
680 | * when the bdi limits are ramping up. | |
681 | */ | |
7762741e | 682 | if (nr_dirty <= (background_thresh + dirty_thresh) / 2) |
16c4042f WF |
683 | break; |
684 | ||
685 | bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); | |
bcff25fc JK |
686 | min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh); |
687 | task_bdi_thresh = task_dirty_limit(current, bdi_thresh); | |
16c4042f | 688 | |
e50e3720 WF |
689 | /* |
690 | * In order to avoid the stacked BDI deadlock we need | |
691 | * to ensure we accurately count the 'dirty' pages when | |
692 | * the threshold is low. | |
693 | * | |
694 | * Otherwise it would be possible to get thresh+n pages | |
695 | * reported dirty, even though there are thresh-m pages | |
696 | * actually dirty; with m+n sitting in the percpu | |
697 | * deltas. | |
698 | */ | |
bcff25fc | 699 | if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) { |
e50e3720 | 700 | bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); |
7762741e WF |
701 | bdi_dirty = bdi_nr_reclaimable + |
702 | bdi_stat_sum(bdi, BDI_WRITEBACK); | |
e50e3720 WF |
703 | } else { |
704 | bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); | |
7762741e WF |
705 | bdi_dirty = bdi_nr_reclaimable + |
706 | bdi_stat(bdi, BDI_WRITEBACK); | |
e50e3720 | 707 | } |
5fce25a9 | 708 | |
e50e3720 WF |
709 | /* |
710 | * The bdi thresh is somehow "soft" limit derived from the | |
711 | * global "hard" limit. The former helps to prevent heavy IO | |
712 | * bdi or process from holding back light ones; The latter is | |
713 | * the last resort safeguard. | |
714 | */ | |
bcff25fc | 715 | dirty_exceeded = (bdi_dirty > task_bdi_thresh) || |
7762741e | 716 | (nr_dirty > dirty_thresh); |
bcff25fc JK |
717 | clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) && |
718 | (nr_dirty <= dirty_thresh); | |
e50e3720 WF |
719 | |
720 | if (!dirty_exceeded) | |
04fbfdc1 | 721 | break; |
1da177e4 | 722 | |
04fbfdc1 PZ |
723 | if (!bdi->dirty_exceeded) |
724 | bdi->dirty_exceeded = 1; | |
1da177e4 | 725 | |
c42843f2 WF |
726 | bdi_update_bandwidth(bdi, dirty_thresh, nr_dirty, |
727 | bdi_thresh, bdi_dirty, start_time); | |
e98be2d5 | 728 | |
1da177e4 LT |
729 | /* Note: nr_reclaimable denotes nr_dirty + nr_unstable. |
730 | * Unstable writes are a feature of certain networked | |
731 | * filesystems (i.e. NFS) in which data may have been | |
732 | * written to the server's write cache, but has not yet | |
733 | * been flushed to permanent storage. | |
d7831a0b RK |
734 | * Only move pages to writeback if this bdi is over its |
735 | * threshold otherwise wait until the disk writes catch | |
736 | * up. | |
1da177e4 | 737 | */ |
d46db3d5 | 738 | trace_balance_dirty_start(bdi); |
bcff25fc | 739 | if (bdi_nr_reclaimable > task_bdi_thresh) { |
d46db3d5 WF |
740 | pages_written += writeback_inodes_wb(&bdi->wb, |
741 | write_chunk); | |
742 | trace_balance_dirty_written(bdi, pages_written); | |
e50e3720 WF |
743 | if (pages_written >= write_chunk) |
744 | break; /* We've done our duty */ | |
04fbfdc1 | 745 | } |
d153ba64 | 746 | __set_current_state(TASK_UNINTERRUPTIBLE); |
d25105e8 | 747 | io_schedule_timeout(pause); |
d46db3d5 | 748 | trace_balance_dirty_wait(bdi); |
87c6a9b2 | 749 | |
ffd1f609 WF |
750 | dirty_thresh = hard_dirty_limit(dirty_thresh); |
751 | /* | |
752 | * max-pause area. If dirty exceeded but still within this | |
753 | * area, no need to sleep for more than 200ms: (a) 8 pages per | |
754 | * 200ms is typically more than enough to curb heavy dirtiers; | |
755 | * (b) the pause time limit makes the dirtiers more responsive. | |
756 | */ | |
bb082295 WF |
757 | if (nr_dirty < dirty_thresh && |
758 | bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 && | |
ffd1f609 WF |
759 | time_after(jiffies, start_time + MAX_PAUSE)) |
760 | break; | |
87c6a9b2 JA |
761 | |
762 | /* | |
763 | * Increase the delay for each loop, up to our previous | |
764 | * default of taking a 100ms nap. | |
765 | */ | |
766 | pause <<= 1; | |
767 | if (pause > HZ / 10) | |
768 | pause = HZ / 10; | |
1da177e4 LT |
769 | } |
770 | ||
bcff25fc JK |
771 | /* Clear dirty_exceeded flag only when no task can exceed the limit */ |
772 | if (clear_dirty_exceeded && bdi->dirty_exceeded) | |
04fbfdc1 | 773 | bdi->dirty_exceeded = 0; |
1da177e4 LT |
774 | |
775 | if (writeback_in_progress(bdi)) | |
5b0830cb | 776 | return; |
1da177e4 LT |
777 | |
778 | /* | |
779 | * In laptop mode, we wait until hitting the higher threshold before | |
780 | * starting background writeout, and then write out all the way down | |
781 | * to the lower threshold. So slow writers cause minimal disk activity. | |
782 | * | |
783 | * In normal mode, we start background writeout at the lower | |
784 | * background_thresh, to keep the amount of dirty memory low. | |
785 | */ | |
786 | if ((laptop_mode && pages_written) || | |
e50e3720 | 787 | (!laptop_mode && (nr_reclaimable > background_thresh))) |
c5444198 | 788 | bdi_start_background_writeback(bdi); |
1da177e4 LT |
789 | } |
790 | ||
a200ee18 | 791 | void set_page_dirty_balance(struct page *page, int page_mkwrite) |
edc79b2a | 792 | { |
a200ee18 | 793 | if (set_page_dirty(page) || page_mkwrite) { |
edc79b2a PZ |
794 | struct address_space *mapping = page_mapping(page); |
795 | ||
796 | if (mapping) | |
797 | balance_dirty_pages_ratelimited(mapping); | |
798 | } | |
799 | } | |
800 | ||
245b2e70 TH |
801 | static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0; |
802 | ||
1da177e4 | 803 | /** |
fa5a734e | 804 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state |
67be2dd1 | 805 | * @mapping: address_space which was dirtied |
a580290c | 806 | * @nr_pages_dirtied: number of pages which the caller has just dirtied |
1da177e4 LT |
807 | * |
808 | * Processes which are dirtying memory should call in here once for each page | |
809 | * which was newly dirtied. The function will periodically check the system's | |
810 | * dirty state and will initiate writeback if needed. | |
811 | * | |
812 | * On really big machines, get_writeback_state is expensive, so try to avoid | |
813 | * calling it too often (ratelimiting). But once we're over the dirty memory | |
814 | * limit we decrease the ratelimiting by a lot, to prevent individual processes | |
815 | * from overshooting the limit by (ratelimit_pages) each. | |
816 | */ | |
fa5a734e AM |
817 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, |
818 | unsigned long nr_pages_dirtied) | |
1da177e4 | 819 | { |
36715cef | 820 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
fa5a734e AM |
821 | unsigned long ratelimit; |
822 | unsigned long *p; | |
1da177e4 | 823 | |
36715cef WF |
824 | if (!bdi_cap_account_dirty(bdi)) |
825 | return; | |
826 | ||
1da177e4 | 827 | ratelimit = ratelimit_pages; |
04fbfdc1 | 828 | if (mapping->backing_dev_info->dirty_exceeded) |
1da177e4 LT |
829 | ratelimit = 8; |
830 | ||
831 | /* | |
832 | * Check the rate limiting. Also, we do not want to throttle real-time | |
833 | * tasks in balance_dirty_pages(). Period. | |
834 | */ | |
fa5a734e | 835 | preempt_disable(); |
245b2e70 | 836 | p = &__get_cpu_var(bdp_ratelimits); |
fa5a734e AM |
837 | *p += nr_pages_dirtied; |
838 | if (unlikely(*p >= ratelimit)) { | |
3a2e9a5a | 839 | ratelimit = sync_writeback_pages(*p); |
fa5a734e AM |
840 | *p = 0; |
841 | preempt_enable(); | |
3a2e9a5a | 842 | balance_dirty_pages(mapping, ratelimit); |
1da177e4 LT |
843 | return; |
844 | } | |
fa5a734e | 845 | preempt_enable(); |
1da177e4 | 846 | } |
fa5a734e | 847 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); |
1da177e4 | 848 | |
232ea4d6 | 849 | void throttle_vm_writeout(gfp_t gfp_mask) |
1da177e4 | 850 | { |
364aeb28 DR |
851 | unsigned long background_thresh; |
852 | unsigned long dirty_thresh; | |
1da177e4 LT |
853 | |
854 | for ( ; ; ) { | |
16c4042f | 855 | global_dirty_limits(&background_thresh, &dirty_thresh); |
1da177e4 LT |
856 | |
857 | /* | |
858 | * Boost the allowable dirty threshold a bit for page | |
859 | * allocators so they don't get DoS'ed by heavy writers | |
860 | */ | |
861 | dirty_thresh += dirty_thresh / 10; /* wheeee... */ | |
862 | ||
c24f21bd CL |
863 | if (global_page_state(NR_UNSTABLE_NFS) + |
864 | global_page_state(NR_WRITEBACK) <= dirty_thresh) | |
865 | break; | |
8aa7e847 | 866 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
369f2389 FW |
867 | |
868 | /* | |
869 | * The caller might hold locks which can prevent IO completion | |
870 | * or progress in the filesystem. So we cannot just sit here | |
871 | * waiting for IO to complete. | |
872 | */ | |
873 | if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) | |
874 | break; | |
1da177e4 LT |
875 | } |
876 | } | |
877 | ||
1da177e4 LT |
878 | /* |
879 | * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs | |
880 | */ | |
881 | int dirty_writeback_centisecs_handler(ctl_table *table, int write, | |
8d65af78 | 882 | void __user *buffer, size_t *length, loff_t *ppos) |
1da177e4 | 883 | { |
8d65af78 | 884 | proc_dointvec(table, write, buffer, length, ppos); |
6423104b | 885 | bdi_arm_supers_timer(); |
1da177e4 LT |
886 | return 0; |
887 | } | |
888 | ||
c2c4986e | 889 | #ifdef CONFIG_BLOCK |
31373d09 | 890 | void laptop_mode_timer_fn(unsigned long data) |
1da177e4 | 891 | { |
31373d09 MG |
892 | struct request_queue *q = (struct request_queue *)data; |
893 | int nr_pages = global_page_state(NR_FILE_DIRTY) + | |
894 | global_page_state(NR_UNSTABLE_NFS); | |
1da177e4 | 895 | |
31373d09 MG |
896 | /* |
897 | * We want to write everything out, not just down to the dirty | |
898 | * threshold | |
899 | */ | |
31373d09 | 900 | if (bdi_has_dirty_io(&q->backing_dev_info)) |
c5444198 | 901 | bdi_start_writeback(&q->backing_dev_info, nr_pages); |
1da177e4 LT |
902 | } |
903 | ||
904 | /* | |
905 | * We've spun up the disk and we're in laptop mode: schedule writeback | |
906 | * of all dirty data a few seconds from now. If the flush is already scheduled | |
907 | * then push it back - the user is still using the disk. | |
908 | */ | |
31373d09 | 909 | void laptop_io_completion(struct backing_dev_info *info) |
1da177e4 | 910 | { |
31373d09 | 911 | mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode); |
1da177e4 LT |
912 | } |
913 | ||
914 | /* | |
915 | * We're in laptop mode and we've just synced. The sync's writes will have | |
916 | * caused another writeback to be scheduled by laptop_io_completion. | |
917 | * Nothing needs to be written back anymore, so we unschedule the writeback. | |
918 | */ | |
919 | void laptop_sync_completion(void) | |
920 | { | |
31373d09 MG |
921 | struct backing_dev_info *bdi; |
922 | ||
923 | rcu_read_lock(); | |
924 | ||
925 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) | |
926 | del_timer(&bdi->laptop_mode_wb_timer); | |
927 | ||
928 | rcu_read_unlock(); | |
1da177e4 | 929 | } |
c2c4986e | 930 | #endif |
1da177e4 LT |
931 | |
932 | /* | |
933 | * If ratelimit_pages is too high then we can get into dirty-data overload | |
934 | * if a large number of processes all perform writes at the same time. | |
935 | * If it is too low then SMP machines will call the (expensive) | |
936 | * get_writeback_state too often. | |
937 | * | |
938 | * Here we set ratelimit_pages to a level which ensures that when all CPUs are | |
939 | * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory | |
940 | * thresholds before writeback cuts in. | |
941 | * | |
942 | * But the limit should not be set too high. Because it also controls the | |
943 | * amount of memory which the balance_dirty_pages() caller has to write back. | |
944 | * If this is too large then the caller will block on the IO queue all the | |
945 | * time. So limit it to four megabytes - the balance_dirty_pages() caller | |
946 | * will write six megabyte chunks, max. | |
947 | */ | |
948 | ||
2d1d43f6 | 949 | void writeback_set_ratelimit(void) |
1da177e4 | 950 | { |
40c99aae | 951 | ratelimit_pages = vm_total_pages / (num_online_cpus() * 32); |
1da177e4 LT |
952 | if (ratelimit_pages < 16) |
953 | ratelimit_pages = 16; | |
954 | if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024) | |
955 | ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; | |
956 | } | |
957 | ||
26c2143b | 958 | static int __cpuinit |
1da177e4 LT |
959 | ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) |
960 | { | |
2d1d43f6 | 961 | writeback_set_ratelimit(); |
aa0f0303 | 962 | return NOTIFY_DONE; |
1da177e4 LT |
963 | } |
964 | ||
74b85f37 | 965 | static struct notifier_block __cpuinitdata ratelimit_nb = { |
1da177e4 LT |
966 | .notifier_call = ratelimit_handler, |
967 | .next = NULL, | |
968 | }; | |
969 | ||
970 | /* | |
dc6e29da LT |
971 | * Called early on to tune the page writeback dirty limits. |
972 | * | |
973 | * We used to scale dirty pages according to how total memory | |
974 | * related to pages that could be allocated for buffers (by | |
975 | * comparing nr_free_buffer_pages() to vm_total_pages. | |
976 | * | |
977 | * However, that was when we used "dirty_ratio" to scale with | |
978 | * all memory, and we don't do that any more. "dirty_ratio" | |
979 | * is now applied to total non-HIGHPAGE memory (by subtracting | |
980 | * totalhigh_pages from vm_total_pages), and as such we can't | |
981 | * get into the old insane situation any more where we had | |
982 | * large amounts of dirty pages compared to a small amount of | |
983 | * non-HIGHMEM memory. | |
984 | * | |
985 | * But we might still want to scale the dirty_ratio by how | |
986 | * much memory the box has.. | |
1da177e4 LT |
987 | */ |
988 | void __init page_writeback_init(void) | |
989 | { | |
04fbfdc1 PZ |
990 | int shift; |
991 | ||
2d1d43f6 | 992 | writeback_set_ratelimit(); |
1da177e4 | 993 | register_cpu_notifier(&ratelimit_nb); |
04fbfdc1 PZ |
994 | |
995 | shift = calc_period_shift(); | |
996 | prop_descriptor_init(&vm_completions, shift); | |
3e26c149 | 997 | prop_descriptor_init(&vm_dirties, shift); |
1da177e4 LT |
998 | } |
999 | ||
f446daae JK |
1000 | /** |
1001 | * tag_pages_for_writeback - tag pages to be written by write_cache_pages | |
1002 | * @mapping: address space structure to write | |
1003 | * @start: starting page index | |
1004 | * @end: ending page index (inclusive) | |
1005 | * | |
1006 | * This function scans the page range from @start to @end (inclusive) and tags | |
1007 | * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is | |
1008 | * that write_cache_pages (or whoever calls this function) will then use | |
1009 | * TOWRITE tag to identify pages eligible for writeback. This mechanism is | |
1010 | * used to avoid livelocking of writeback by a process steadily creating new | |
1011 | * dirty pages in the file (thus it is important for this function to be quick | |
1012 | * so that it can tag pages faster than a dirtying process can create them). | |
1013 | */ | |
1014 | /* | |
1015 | * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency. | |
1016 | */ | |
f446daae JK |
1017 | void tag_pages_for_writeback(struct address_space *mapping, |
1018 | pgoff_t start, pgoff_t end) | |
1019 | { | |
3c111a07 | 1020 | #define WRITEBACK_TAG_BATCH 4096 |
f446daae JK |
1021 | unsigned long tagged; |
1022 | ||
1023 | do { | |
1024 | spin_lock_irq(&mapping->tree_lock); | |
1025 | tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, | |
1026 | &start, end, WRITEBACK_TAG_BATCH, | |
1027 | PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE); | |
1028 | spin_unlock_irq(&mapping->tree_lock); | |
1029 | WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH); | |
1030 | cond_resched(); | |
d5ed3a4a JK |
1031 | /* We check 'start' to handle wrapping when end == ~0UL */ |
1032 | } while (tagged >= WRITEBACK_TAG_BATCH && start); | |
f446daae JK |
1033 | } |
1034 | EXPORT_SYMBOL(tag_pages_for_writeback); | |
1035 | ||
811d736f | 1036 | /** |
0ea97180 | 1037 | * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. |
811d736f DH |
1038 | * @mapping: address space structure to write |
1039 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write | |
0ea97180 MS |
1040 | * @writepage: function called for each page |
1041 | * @data: data passed to writepage function | |
811d736f | 1042 | * |
0ea97180 | 1043 | * If a page is already under I/O, write_cache_pages() skips it, even |
811d736f DH |
1044 | * if it's dirty. This is desirable behaviour for memory-cleaning writeback, |
1045 | * but it is INCORRECT for data-integrity system calls such as fsync(). fsync() | |
1046 | * and msync() need to guarantee that all the data which was dirty at the time | |
1047 | * the call was made get new I/O started against them. If wbc->sync_mode is | |
1048 | * WB_SYNC_ALL then we were called for data integrity and we must wait for | |
1049 | * existing IO to complete. | |
f446daae JK |
1050 | * |
1051 | * To avoid livelocks (when other process dirties new pages), we first tag | |
1052 | * pages which should be written back with TOWRITE tag and only then start | |
1053 | * writing them. For data-integrity sync we have to be careful so that we do | |
1054 | * not miss some pages (e.g., because some other process has cleared TOWRITE | |
1055 | * tag we set). The rule we follow is that TOWRITE tag can be cleared only | |
1056 | * by the process clearing the DIRTY tag (and submitting the page for IO). | |
811d736f | 1057 | */ |
0ea97180 MS |
1058 | int write_cache_pages(struct address_space *mapping, |
1059 | struct writeback_control *wbc, writepage_t writepage, | |
1060 | void *data) | |
811d736f | 1061 | { |
811d736f DH |
1062 | int ret = 0; |
1063 | int done = 0; | |
811d736f DH |
1064 | struct pagevec pvec; |
1065 | int nr_pages; | |
31a12666 | 1066 | pgoff_t uninitialized_var(writeback_index); |
811d736f DH |
1067 | pgoff_t index; |
1068 | pgoff_t end; /* Inclusive */ | |
bd19e012 | 1069 | pgoff_t done_index; |
31a12666 | 1070 | int cycled; |
811d736f | 1071 | int range_whole = 0; |
f446daae | 1072 | int tag; |
811d736f | 1073 | |
811d736f DH |
1074 | pagevec_init(&pvec, 0); |
1075 | if (wbc->range_cyclic) { | |
31a12666 NP |
1076 | writeback_index = mapping->writeback_index; /* prev offset */ |
1077 | index = writeback_index; | |
1078 | if (index == 0) | |
1079 | cycled = 1; | |
1080 | else | |
1081 | cycled = 0; | |
811d736f DH |
1082 | end = -1; |
1083 | } else { | |
1084 | index = wbc->range_start >> PAGE_CACHE_SHIFT; | |
1085 | end = wbc->range_end >> PAGE_CACHE_SHIFT; | |
1086 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) | |
1087 | range_whole = 1; | |
31a12666 | 1088 | cycled = 1; /* ignore range_cyclic tests */ |
811d736f | 1089 | } |
6e6938b6 | 1090 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
f446daae JK |
1091 | tag = PAGECACHE_TAG_TOWRITE; |
1092 | else | |
1093 | tag = PAGECACHE_TAG_DIRTY; | |
811d736f | 1094 | retry: |
6e6938b6 | 1095 | if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) |
f446daae | 1096 | tag_pages_for_writeback(mapping, index, end); |
bd19e012 | 1097 | done_index = index; |
5a3d5c98 NP |
1098 | while (!done && (index <= end)) { |
1099 | int i; | |
1100 | ||
f446daae | 1101 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, |
5a3d5c98 NP |
1102 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); |
1103 | if (nr_pages == 0) | |
1104 | break; | |
811d736f | 1105 | |
811d736f DH |
1106 | for (i = 0; i < nr_pages; i++) { |
1107 | struct page *page = pvec.pages[i]; | |
1108 | ||
1109 | /* | |
d5482cdf NP |
1110 | * At this point, the page may be truncated or |
1111 | * invalidated (changing page->mapping to NULL), or | |
1112 | * even swizzled back from swapper_space to tmpfs file | |
1113 | * mapping. However, page->index will not change | |
1114 | * because we have a reference on the page. | |
811d736f | 1115 | */ |
d5482cdf NP |
1116 | if (page->index > end) { |
1117 | /* | |
1118 | * can't be range_cyclic (1st pass) because | |
1119 | * end == -1 in that case. | |
1120 | */ | |
1121 | done = 1; | |
1122 | break; | |
1123 | } | |
1124 | ||
cf15b07c | 1125 | done_index = page->index; |
d5482cdf | 1126 | |
811d736f DH |
1127 | lock_page(page); |
1128 | ||
5a3d5c98 NP |
1129 | /* |
1130 | * Page truncated or invalidated. We can freely skip it | |
1131 | * then, even for data integrity operations: the page | |
1132 | * has disappeared concurrently, so there could be no | |
1133 | * real expectation of this data interity operation | |
1134 | * even if there is now a new, dirty page at the same | |
1135 | * pagecache address. | |
1136 | */ | |
811d736f | 1137 | if (unlikely(page->mapping != mapping)) { |
5a3d5c98 | 1138 | continue_unlock: |
811d736f DH |
1139 | unlock_page(page); |
1140 | continue; | |
1141 | } | |
1142 | ||
515f4a03 NP |
1143 | if (!PageDirty(page)) { |
1144 | /* someone wrote it for us */ | |
1145 | goto continue_unlock; | |
1146 | } | |
1147 | ||
1148 | if (PageWriteback(page)) { | |
1149 | if (wbc->sync_mode != WB_SYNC_NONE) | |
1150 | wait_on_page_writeback(page); | |
1151 | else | |
1152 | goto continue_unlock; | |
1153 | } | |
811d736f | 1154 | |
515f4a03 NP |
1155 | BUG_ON(PageWriteback(page)); |
1156 | if (!clear_page_dirty_for_io(page)) | |
5a3d5c98 | 1157 | goto continue_unlock; |
811d736f | 1158 | |
9e094383 | 1159 | trace_wbc_writepage(wbc, mapping->backing_dev_info); |
0ea97180 | 1160 | ret = (*writepage)(page, wbc, data); |
00266770 NP |
1161 | if (unlikely(ret)) { |
1162 | if (ret == AOP_WRITEPAGE_ACTIVATE) { | |
1163 | unlock_page(page); | |
1164 | ret = 0; | |
1165 | } else { | |
1166 | /* | |
1167 | * done_index is set past this page, | |
1168 | * so media errors will not choke | |
1169 | * background writeout for the entire | |
1170 | * file. This has consequences for | |
1171 | * range_cyclic semantics (ie. it may | |
1172 | * not be suitable for data integrity | |
1173 | * writeout). | |
1174 | */ | |
cf15b07c | 1175 | done_index = page->index + 1; |
00266770 NP |
1176 | done = 1; |
1177 | break; | |
1178 | } | |
0b564927 | 1179 | } |
00266770 | 1180 | |
546a1924 DC |
1181 | /* |
1182 | * We stop writing back only if we are not doing | |
1183 | * integrity sync. In case of integrity sync we have to | |
1184 | * keep going until we have written all the pages | |
1185 | * we tagged for writeback prior to entering this loop. | |
1186 | */ | |
1187 | if (--wbc->nr_to_write <= 0 && | |
1188 | wbc->sync_mode == WB_SYNC_NONE) { | |
1189 | done = 1; | |
1190 | break; | |
05fe478d | 1191 | } |
811d736f DH |
1192 | } |
1193 | pagevec_release(&pvec); | |
1194 | cond_resched(); | |
1195 | } | |
3a4c6800 | 1196 | if (!cycled && !done) { |
811d736f | 1197 | /* |
31a12666 | 1198 | * range_cyclic: |
811d736f DH |
1199 | * We hit the last page and there is more work to be done: wrap |
1200 | * back to the start of the file | |
1201 | */ | |
31a12666 | 1202 | cycled = 1; |
811d736f | 1203 | index = 0; |
31a12666 | 1204 | end = writeback_index - 1; |
811d736f DH |
1205 | goto retry; |
1206 | } | |
0b564927 DC |
1207 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) |
1208 | mapping->writeback_index = done_index; | |
06d6cf69 | 1209 | |
811d736f DH |
1210 | return ret; |
1211 | } | |
0ea97180 MS |
1212 | EXPORT_SYMBOL(write_cache_pages); |
1213 | ||
1214 | /* | |
1215 | * Function used by generic_writepages to call the real writepage | |
1216 | * function and set the mapping flags on error | |
1217 | */ | |
1218 | static int __writepage(struct page *page, struct writeback_control *wbc, | |
1219 | void *data) | |
1220 | { | |
1221 | struct address_space *mapping = data; | |
1222 | int ret = mapping->a_ops->writepage(page, wbc); | |
1223 | mapping_set_error(mapping, ret); | |
1224 | return ret; | |
1225 | } | |
1226 | ||
1227 | /** | |
1228 | * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them. | |
1229 | * @mapping: address space structure to write | |
1230 | * @wbc: subtract the number of written pages from *@wbc->nr_to_write | |
1231 | * | |
1232 | * This is a library function, which implements the writepages() | |
1233 | * address_space_operation. | |
1234 | */ | |
1235 | int generic_writepages(struct address_space *mapping, | |
1236 | struct writeback_control *wbc) | |
1237 | { | |
9b6096a6 SL |
1238 | struct blk_plug plug; |
1239 | int ret; | |
1240 | ||
0ea97180 MS |
1241 | /* deal with chardevs and other special file */ |
1242 | if (!mapping->a_ops->writepage) | |
1243 | return 0; | |
1244 | ||
9b6096a6 SL |
1245 | blk_start_plug(&plug); |
1246 | ret = write_cache_pages(mapping, wbc, __writepage, mapping); | |
1247 | blk_finish_plug(&plug); | |
1248 | return ret; | |
0ea97180 | 1249 | } |
811d736f DH |
1250 | |
1251 | EXPORT_SYMBOL(generic_writepages); | |
1252 | ||
1da177e4 LT |
1253 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc) |
1254 | { | |
22905f77 AM |
1255 | int ret; |
1256 | ||
1da177e4 LT |
1257 | if (wbc->nr_to_write <= 0) |
1258 | return 0; | |
1259 | if (mapping->a_ops->writepages) | |
d08b3851 | 1260 | ret = mapping->a_ops->writepages(mapping, wbc); |
22905f77 AM |
1261 | else |
1262 | ret = generic_writepages(mapping, wbc); | |
22905f77 | 1263 | return ret; |
1da177e4 LT |
1264 | } |
1265 | ||
1266 | /** | |
1267 | * write_one_page - write out a single page and optionally wait on I/O | |
67be2dd1 MW |
1268 | * @page: the page to write |
1269 | * @wait: if true, wait on writeout | |
1da177e4 LT |
1270 | * |
1271 | * The page must be locked by the caller and will be unlocked upon return. | |
1272 | * | |
1273 | * write_one_page() returns a negative error code if I/O failed. | |
1274 | */ | |
1275 | int write_one_page(struct page *page, int wait) | |
1276 | { | |
1277 | struct address_space *mapping = page->mapping; | |
1278 | int ret = 0; | |
1279 | struct writeback_control wbc = { | |
1280 | .sync_mode = WB_SYNC_ALL, | |
1281 | .nr_to_write = 1, | |
1282 | }; | |
1283 | ||
1284 | BUG_ON(!PageLocked(page)); | |
1285 | ||
1286 | if (wait) | |
1287 | wait_on_page_writeback(page); | |
1288 | ||
1289 | if (clear_page_dirty_for_io(page)) { | |
1290 | page_cache_get(page); | |
1291 | ret = mapping->a_ops->writepage(page, &wbc); | |
1292 | if (ret == 0 && wait) { | |
1293 | wait_on_page_writeback(page); | |
1294 | if (PageError(page)) | |
1295 | ret = -EIO; | |
1296 | } | |
1297 | page_cache_release(page); | |
1298 | } else { | |
1299 | unlock_page(page); | |
1300 | } | |
1301 | return ret; | |
1302 | } | |
1303 | EXPORT_SYMBOL(write_one_page); | |
1304 | ||
76719325 KC |
1305 | /* |
1306 | * For address_spaces which do not use buffers nor write back. | |
1307 | */ | |
1308 | int __set_page_dirty_no_writeback(struct page *page) | |
1309 | { | |
1310 | if (!PageDirty(page)) | |
c3f0da63 | 1311 | return !TestSetPageDirty(page); |
76719325 KC |
1312 | return 0; |
1313 | } | |
1314 | ||
e3a7cca1 ES |
1315 | /* |
1316 | * Helper function for set_page_dirty family. | |
1317 | * NOTE: This relies on being atomic wrt interrupts. | |
1318 | */ | |
1319 | void account_page_dirtied(struct page *page, struct address_space *mapping) | |
1320 | { | |
1321 | if (mapping_cap_account_dirty(mapping)) { | |
1322 | __inc_zone_page_state(page, NR_FILE_DIRTY); | |
ea941f0e | 1323 | __inc_zone_page_state(page, NR_DIRTIED); |
e3a7cca1 ES |
1324 | __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); |
1325 | task_dirty_inc(current); | |
1326 | task_io_account_write(PAGE_CACHE_SIZE); | |
1327 | } | |
1328 | } | |
679ceace | 1329 | EXPORT_SYMBOL(account_page_dirtied); |
e3a7cca1 | 1330 | |
f629d1c9 MR |
1331 | /* |
1332 | * Helper function for set_page_writeback family. | |
1333 | * NOTE: Unlike account_page_dirtied this does not rely on being atomic | |
1334 | * wrt interrupts. | |
1335 | */ | |
1336 | void account_page_writeback(struct page *page) | |
1337 | { | |
1338 | inc_zone_page_state(page, NR_WRITEBACK); | |
1339 | } | |
1340 | EXPORT_SYMBOL(account_page_writeback); | |
1341 | ||
1da177e4 LT |
1342 | /* |
1343 | * For address_spaces which do not use buffers. Just tag the page as dirty in | |
1344 | * its radix tree. | |
1345 | * | |
1346 | * This is also used when a single buffer is being dirtied: we want to set the | |
1347 | * page dirty in that case, but not all the buffers. This is a "bottom-up" | |
1348 | * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. | |
1349 | * | |
1350 | * Most callers have locked the page, which pins the address_space in memory. | |
1351 | * But zap_pte_range() does not lock the page, however in that case the | |
1352 | * mapping is pinned by the vma's ->vm_file reference. | |
1353 | * | |
1354 | * We take care to handle the case where the page was truncated from the | |
183ff22b | 1355 | * mapping by re-checking page_mapping() inside tree_lock. |
1da177e4 LT |
1356 | */ |
1357 | int __set_page_dirty_nobuffers(struct page *page) | |
1358 | { | |
1da177e4 LT |
1359 | if (!TestSetPageDirty(page)) { |
1360 | struct address_space *mapping = page_mapping(page); | |
1361 | struct address_space *mapping2; | |
1362 | ||
8c08540f AM |
1363 | if (!mapping) |
1364 | return 1; | |
1365 | ||
19fd6231 | 1366 | spin_lock_irq(&mapping->tree_lock); |
8c08540f AM |
1367 | mapping2 = page_mapping(page); |
1368 | if (mapping2) { /* Race with truncate? */ | |
1369 | BUG_ON(mapping2 != mapping); | |
787d2214 | 1370 | WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); |
e3a7cca1 | 1371 | account_page_dirtied(page, mapping); |
8c08540f AM |
1372 | radix_tree_tag_set(&mapping->page_tree, |
1373 | page_index(page), PAGECACHE_TAG_DIRTY); | |
1374 | } | |
19fd6231 | 1375 | spin_unlock_irq(&mapping->tree_lock); |
8c08540f AM |
1376 | if (mapping->host) { |
1377 | /* !PageAnon && !swapper_space */ | |
1378 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | |
1da177e4 | 1379 | } |
4741c9fd | 1380 | return 1; |
1da177e4 | 1381 | } |
4741c9fd | 1382 | return 0; |
1da177e4 LT |
1383 | } |
1384 | EXPORT_SYMBOL(__set_page_dirty_nobuffers); | |
1385 | ||
1386 | /* | |
1387 | * When a writepage implementation decides that it doesn't want to write this | |
1388 | * page for some reason, it should redirty the locked page via | |
1389 | * redirty_page_for_writepage() and it should then unlock the page and return 0 | |
1390 | */ | |
1391 | int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) | |
1392 | { | |
1393 | wbc->pages_skipped++; | |
1394 | return __set_page_dirty_nobuffers(page); | |
1395 | } | |
1396 | EXPORT_SYMBOL(redirty_page_for_writepage); | |
1397 | ||
1398 | /* | |
6746aff7 WF |
1399 | * Dirty a page. |
1400 | * | |
1401 | * For pages with a mapping this should be done under the page lock | |
1402 | * for the benefit of asynchronous memory errors who prefer a consistent | |
1403 | * dirty state. This rule can be broken in some special cases, | |
1404 | * but should be better not to. | |
1405 | * | |
1da177e4 LT |
1406 | * If the mapping doesn't provide a set_page_dirty a_op, then |
1407 | * just fall through and assume that it wants buffer_heads. | |
1408 | */ | |
1cf6e7d8 | 1409 | int set_page_dirty(struct page *page) |
1da177e4 LT |
1410 | { |
1411 | struct address_space *mapping = page_mapping(page); | |
1412 | ||
1413 | if (likely(mapping)) { | |
1414 | int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; | |
278df9f4 MK |
1415 | /* |
1416 | * readahead/lru_deactivate_page could remain | |
1417 | * PG_readahead/PG_reclaim due to race with end_page_writeback | |
1418 | * About readahead, if the page is written, the flags would be | |
1419 | * reset. So no problem. | |
1420 | * About lru_deactivate_page, if the page is redirty, the flag | |
1421 | * will be reset. So no problem. but if the page is used by readahead | |
1422 | * it will confuse readahead and make it restart the size rampup | |
1423 | * process. But it's a trivial problem. | |
1424 | */ | |
1425 | ClearPageReclaim(page); | |
9361401e DH |
1426 | #ifdef CONFIG_BLOCK |
1427 | if (!spd) | |
1428 | spd = __set_page_dirty_buffers; | |
1429 | #endif | |
1430 | return (*spd)(page); | |
1da177e4 | 1431 | } |
4741c9fd AM |
1432 | if (!PageDirty(page)) { |
1433 | if (!TestSetPageDirty(page)) | |
1434 | return 1; | |
1435 | } | |
1da177e4 LT |
1436 | return 0; |
1437 | } | |
1438 | EXPORT_SYMBOL(set_page_dirty); | |
1439 | ||
1440 | /* | |
1441 | * set_page_dirty() is racy if the caller has no reference against | |
1442 | * page->mapping->host, and if the page is unlocked. This is because another | |
1443 | * CPU could truncate the page off the mapping and then free the mapping. | |
1444 | * | |
1445 | * Usually, the page _is_ locked, or the caller is a user-space process which | |
1446 | * holds a reference on the inode by having an open file. | |
1447 | * | |
1448 | * In other cases, the page should be locked before running set_page_dirty(). | |
1449 | */ | |
1450 | int set_page_dirty_lock(struct page *page) | |
1451 | { | |
1452 | int ret; | |
1453 | ||
7eaceacc | 1454 | lock_page(page); |
1da177e4 LT |
1455 | ret = set_page_dirty(page); |
1456 | unlock_page(page); | |
1457 | return ret; | |
1458 | } | |
1459 | EXPORT_SYMBOL(set_page_dirty_lock); | |
1460 | ||
1da177e4 LT |
1461 | /* |
1462 | * Clear a page's dirty flag, while caring for dirty memory accounting. | |
1463 | * Returns true if the page was previously dirty. | |
1464 | * | |
1465 | * This is for preparing to put the page under writeout. We leave the page | |
1466 | * tagged as dirty in the radix tree so that a concurrent write-for-sync | |
1467 | * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage | |
1468 | * implementation will run either set_page_writeback() or set_page_dirty(), | |
1469 | * at which stage we bring the page's dirty flag and radix-tree dirty tag | |
1470 | * back into sync. | |
1471 | * | |
1472 | * This incoherency between the page's dirty flag and radix-tree tag is | |
1473 | * unfortunate, but it only exists while the page is locked. | |
1474 | */ | |
1475 | int clear_page_dirty_for_io(struct page *page) | |
1476 | { | |
1477 | struct address_space *mapping = page_mapping(page); | |
1478 | ||
79352894 NP |
1479 | BUG_ON(!PageLocked(page)); |
1480 | ||
7658cc28 LT |
1481 | if (mapping && mapping_cap_account_dirty(mapping)) { |
1482 | /* | |
1483 | * Yes, Virginia, this is indeed insane. | |
1484 | * | |
1485 | * We use this sequence to make sure that | |
1486 | * (a) we account for dirty stats properly | |
1487 | * (b) we tell the low-level filesystem to | |
1488 | * mark the whole page dirty if it was | |
1489 | * dirty in a pagetable. Only to then | |
1490 | * (c) clean the page again and return 1 to | |
1491 | * cause the writeback. | |
1492 | * | |
1493 | * This way we avoid all nasty races with the | |
1494 | * dirty bit in multiple places and clearing | |
1495 | * them concurrently from different threads. | |
1496 | * | |
1497 | * Note! Normally the "set_page_dirty(page)" | |
1498 | * has no effect on the actual dirty bit - since | |
1499 | * that will already usually be set. But we | |
1500 | * need the side effects, and it can help us | |
1501 | * avoid races. | |
1502 | * | |
1503 | * We basically use the page "master dirty bit" | |
1504 | * as a serialization point for all the different | |
1505 | * threads doing their things. | |
7658cc28 LT |
1506 | */ |
1507 | if (page_mkclean(page)) | |
1508 | set_page_dirty(page); | |
79352894 NP |
1509 | /* |
1510 | * We carefully synchronise fault handlers against | |
1511 | * installing a dirty pte and marking the page dirty | |
1512 | * at this point. We do this by having them hold the | |
1513 | * page lock at some point after installing their | |
1514 | * pte, but before marking the page dirty. | |
1515 | * Pages are always locked coming in here, so we get | |
1516 | * the desired exclusion. See mm/memory.c:do_wp_page() | |
1517 | * for more comments. | |
1518 | */ | |
7658cc28 | 1519 | if (TestClearPageDirty(page)) { |
8c08540f | 1520 | dec_zone_page_state(page, NR_FILE_DIRTY); |
c9e51e41 PZ |
1521 | dec_bdi_stat(mapping->backing_dev_info, |
1522 | BDI_RECLAIMABLE); | |
7658cc28 | 1523 | return 1; |
1da177e4 | 1524 | } |
7658cc28 | 1525 | return 0; |
1da177e4 | 1526 | } |
7658cc28 | 1527 | return TestClearPageDirty(page); |
1da177e4 | 1528 | } |
58bb01a9 | 1529 | EXPORT_SYMBOL(clear_page_dirty_for_io); |
1da177e4 LT |
1530 | |
1531 | int test_clear_page_writeback(struct page *page) | |
1532 | { | |
1533 | struct address_space *mapping = page_mapping(page); | |
1534 | int ret; | |
1535 | ||
1536 | if (mapping) { | |
69cb51d1 | 1537 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
1da177e4 LT |
1538 | unsigned long flags; |
1539 | ||
19fd6231 | 1540 | spin_lock_irqsave(&mapping->tree_lock, flags); |
1da177e4 | 1541 | ret = TestClearPageWriteback(page); |
69cb51d1 | 1542 | if (ret) { |
1da177e4 LT |
1543 | radix_tree_tag_clear(&mapping->page_tree, |
1544 | page_index(page), | |
1545 | PAGECACHE_TAG_WRITEBACK); | |
e4ad08fe | 1546 | if (bdi_cap_account_writeback(bdi)) { |
69cb51d1 | 1547 | __dec_bdi_stat(bdi, BDI_WRITEBACK); |
04fbfdc1 PZ |
1548 | __bdi_writeout_inc(bdi); |
1549 | } | |
69cb51d1 | 1550 | } |
19fd6231 | 1551 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
1da177e4 LT |
1552 | } else { |
1553 | ret = TestClearPageWriteback(page); | |
1554 | } | |
99b12e3d | 1555 | if (ret) { |
d688abf5 | 1556 | dec_zone_page_state(page, NR_WRITEBACK); |
99b12e3d WF |
1557 | inc_zone_page_state(page, NR_WRITTEN); |
1558 | } | |
1da177e4 LT |
1559 | return ret; |
1560 | } | |
1561 | ||
1562 | int test_set_page_writeback(struct page *page) | |
1563 | { | |
1564 | struct address_space *mapping = page_mapping(page); | |
1565 | int ret; | |
1566 | ||
1567 | if (mapping) { | |
69cb51d1 | 1568 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
1da177e4 LT |
1569 | unsigned long flags; |
1570 | ||
19fd6231 | 1571 | spin_lock_irqsave(&mapping->tree_lock, flags); |
1da177e4 | 1572 | ret = TestSetPageWriteback(page); |
69cb51d1 | 1573 | if (!ret) { |
1da177e4 LT |
1574 | radix_tree_tag_set(&mapping->page_tree, |
1575 | page_index(page), | |
1576 | PAGECACHE_TAG_WRITEBACK); | |
e4ad08fe | 1577 | if (bdi_cap_account_writeback(bdi)) |
69cb51d1 PZ |
1578 | __inc_bdi_stat(bdi, BDI_WRITEBACK); |
1579 | } | |
1da177e4 LT |
1580 | if (!PageDirty(page)) |
1581 | radix_tree_tag_clear(&mapping->page_tree, | |
1582 | page_index(page), | |
1583 | PAGECACHE_TAG_DIRTY); | |
f446daae JK |
1584 | radix_tree_tag_clear(&mapping->page_tree, |
1585 | page_index(page), | |
1586 | PAGECACHE_TAG_TOWRITE); | |
19fd6231 | 1587 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
1da177e4 LT |
1588 | } else { |
1589 | ret = TestSetPageWriteback(page); | |
1590 | } | |
d688abf5 | 1591 | if (!ret) |
f629d1c9 | 1592 | account_page_writeback(page); |
1da177e4 LT |
1593 | return ret; |
1594 | ||
1595 | } | |
1596 | EXPORT_SYMBOL(test_set_page_writeback); | |
1597 | ||
1598 | /* | |
00128188 | 1599 | * Return true if any of the pages in the mapping are marked with the |
1da177e4 LT |
1600 | * passed tag. |
1601 | */ | |
1602 | int mapping_tagged(struct address_space *mapping, int tag) | |
1603 | { | |
72c47832 | 1604 | return radix_tree_tagged(&mapping->page_tree, tag); |
1da177e4 LT |
1605 | } |
1606 | EXPORT_SYMBOL(mapping_tagged); |