3b73e97ecfc711896b7aab4045f2b3e9e9dff704
[deliverable/linux.git] / include / linux / writeback.h
1 /*
2 * include/linux/writeback.h
3 */
4 #ifndef WRITEBACK_H
5 #define WRITEBACK_H
6
7 #include <linux/sched.h>
8 #include <linux/workqueue.h>
9 #include <linux/fs.h>
10 #include <linux/flex_proportions.h>
11
12 DECLARE_PER_CPU(int, dirty_throttle_leaks);
13
14 /*
15 * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
16 *
17 * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
18 *
19 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
20 * time) for the dirty pages to drop, unless written enough pages.
21 *
22 * The global dirty threshold is normally equal to the global dirty limit,
23 * except when the system suddenly allocates a lot of anonymous memory and
24 * knocks down the global dirty threshold quickly, in which case the global
25 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
26 */
27 #define DIRTY_SCOPE 8
28 #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
29
30 struct backing_dev_info;
31
32 /*
33 * fs/fs-writeback.c
34 */
35 enum writeback_sync_modes {
36 WB_SYNC_NONE, /* Don't wait on anything */
37 WB_SYNC_ALL, /* Wait on every mapping */
38 };
39
40 /*
41 * why some writeback work was initiated
42 */
43 enum wb_reason {
44 WB_REASON_BACKGROUND,
45 WB_REASON_TRY_TO_FREE_PAGES,
46 WB_REASON_SYNC,
47 WB_REASON_PERIODIC,
48 WB_REASON_LAPTOP_TIMER,
49 WB_REASON_FREE_MORE_MEM,
50 WB_REASON_FS_FREE_SPACE,
51 /*
52 * There is no bdi forker thread any more and works are done
53 * by emergency worker, however, this is TPs userland visible
54 * and we'll be exposing exactly the same information,
55 * so it has a mismatch name.
56 */
57 WB_REASON_FORKER_THREAD,
58
59 WB_REASON_MAX,
60 };
61
62 /*
63 * A control structure which tells the writeback code what to do. These are
64 * always on the stack, and hence need no locking. They are always initialised
65 * in a manner such that unspecified fields are set to zero.
66 */
67 struct writeback_control {
68 long nr_to_write; /* Write this many pages, and decrement
69 this for each page written */
70 long pages_skipped; /* Pages which were not written */
71
72 /*
73 * For a_ops->writepages(): if start or end are non-zero then this is
74 * a hint that the filesystem need only write out the pages inside that
75 * byterange. The byte at `end' is included in the writeout request.
76 */
77 loff_t range_start;
78 loff_t range_end;
79
80 enum writeback_sync_modes sync_mode;
81
82 unsigned for_kupdate:1; /* A kupdate writeback */
83 unsigned for_background:1; /* A background writeback */
84 unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
85 unsigned for_reclaim:1; /* Invoked from the page allocator */
86 unsigned range_cyclic:1; /* range_start is cyclic */
87 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
88 };
89
90 /*
91 * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
92 * and are measured against each other in. There always is one global
93 * domain, global_wb_domain, that every wb in the system is a member of.
94 * This allows measuring the relative bandwidth of each wb to distribute
95 * dirtyable memory accordingly.
96 */
97 struct wb_domain {
98 spinlock_t lock;
99
100 /*
101 * Scale the writeback cache size proportional to the relative
102 * writeout speed.
103 *
104 * We do this by keeping a floating proportion between BDIs, based
105 * on page writeback completions [end_page_writeback()]. Those
106 * devices that write out pages fastest will get the larger share,
107 * while the slower will get a smaller share.
108 *
109 * We use page writeout completions because we are interested in
110 * getting rid of dirty pages. Having them written out is the
111 * primary goal.
112 *
113 * We introduce a concept of time, a period over which we measure
114 * these events, because demand can/will vary over time. The length
115 * of this period itself is measured in page writeback completions.
116 */
117 struct fprop_global completions;
118 struct timer_list period_timer; /* timer for aging of completions */
119 unsigned long period_time;
120
121 /*
122 * The dirtyable memory and dirty threshold could be suddenly
123 * knocked down by a large amount (eg. on the startup of KVM in a
124 * swapless system). This may throw the system into deep dirty
125 * exceeded state and throttle heavy/light dirtiers alike. To
126 * retain good responsiveness, maintain global_dirty_limit for
127 * tracking slowly down to the knocked down dirty threshold.
128 *
129 * Both fields are protected by ->lock.
130 */
131 unsigned long dirty_limit_tstamp;
132 unsigned long dirty_limit;
133 };
134
135 /**
136 * wb_domain_size_changed - memory available to a wb_domain has changed
137 * @dom: wb_domain of interest
138 *
139 * This function should be called when the amount of memory available to
140 * @dom has changed. It resets @dom's dirty limit parameters to prevent
141 * the past values which don't match the current configuration from skewing
142 * dirty throttling. Without this, when memory size of a wb_domain is
143 * greatly reduced, the dirty throttling logic may allow too many pages to
144 * be dirtied leading to consecutive unnecessary OOMs and may get stuck in
145 * that situation.
146 */
147 static inline void wb_domain_size_changed(struct wb_domain *dom)
148 {
149 spin_lock(&dom->lock);
150 dom->dirty_limit_tstamp = jiffies;
151 dom->dirty_limit = 0;
152 spin_unlock(&dom->lock);
153 }
154
155 /*
156 * fs/fs-writeback.c
157 */
158 struct bdi_writeback;
159 void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
160 void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
161 enum wb_reason reason);
162 bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
163 bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
164 enum wb_reason reason);
165 void sync_inodes_sb(struct super_block *);
166 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
167 void inode_wait_for_writeback(struct inode *inode);
168
169 /* writeback.h requires fs.h; it, too, is not included from here. */
170 static inline void wait_on_inode(struct inode *inode)
171 {
172 might_sleep();
173 wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
174 }
175
176 /*
177 * mm/page-writeback.c
178 */
179 #ifdef CONFIG_BLOCK
180 void laptop_io_completion(struct backing_dev_info *info);
181 void laptop_sync_completion(void);
182 void laptop_mode_sync(struct work_struct *work);
183 void laptop_mode_timer_fn(unsigned long data);
184 #else
185 static inline void laptop_sync_completion(void) { }
186 #endif
187 void throttle_vm_writeout(gfp_t gfp_mask);
188 bool zone_dirty_ok(struct zone *zone);
189 int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
190 #ifdef CONFIG_CGROUP_WRITEBACK
191 void wb_domain_exit(struct wb_domain *dom);
192 #endif
193
194 extern struct wb_domain global_wb_domain;
195
196 /* These are exported to sysctl. */
197 extern int dirty_background_ratio;
198 extern unsigned long dirty_background_bytes;
199 extern int vm_dirty_ratio;
200 extern unsigned long vm_dirty_bytes;
201 extern unsigned int dirty_writeback_interval;
202 extern unsigned int dirty_expire_interval;
203 extern unsigned int dirtytime_expire_interval;
204 extern int vm_highmem_is_dirtyable;
205 extern int block_dump;
206 extern int laptop_mode;
207
208 extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
209 void __user *buffer, size_t *lenp,
210 loff_t *ppos);
211 extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
212 void __user *buffer, size_t *lenp,
213 loff_t *ppos);
214 extern int dirty_ratio_handler(struct ctl_table *table, int write,
215 void __user *buffer, size_t *lenp,
216 loff_t *ppos);
217 extern int dirty_bytes_handler(struct ctl_table *table, int write,
218 void __user *buffer, size_t *lenp,
219 loff_t *ppos);
220 int dirtytime_interval_handler(struct ctl_table *table, int write,
221 void __user *buffer, size_t *lenp, loff_t *ppos);
222
223 struct ctl_table;
224 int dirty_writeback_centisecs_handler(struct ctl_table *, int,
225 void __user *, size_t *, loff_t *);
226
227 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
228 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
229
230 void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
231 void page_writeback_init(void);
232 void balance_dirty_pages_ratelimited(struct address_space *mapping);
233 bool wb_over_bg_thresh(struct bdi_writeback *wb);
234
235 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
236 void *data);
237
238 int generic_writepages(struct address_space *mapping,
239 struct writeback_control *wbc);
240 void tag_pages_for_writeback(struct address_space *mapping,
241 pgoff_t start, pgoff_t end);
242 int write_cache_pages(struct address_space *mapping,
243 struct writeback_control *wbc, writepage_t writepage,
244 void *data);
245 int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
246 void writeback_set_ratelimit(void);
247 void tag_pages_for_writeback(struct address_space *mapping,
248 pgoff_t start, pgoff_t end);
249
250 void account_page_redirty(struct page *page);
251
252 #endif /* WRITEBACK_H */
This page took 0.035845 seconds and 4 git commands to generate.