writeback: optimize periodic bdi thread wakeups
[deliverable/linux.git] / include / linux / backing-dev.h
1 /*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10
11 #include <linux/percpu_counter.h>
12 #include <linux/log2.h>
13 #include <linux/proportions.h>
14 #include <linux/kernel.h>
15 #include <linux/fs.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/writeback.h>
19 #include <asm/atomic.h>
20
21 struct page;
22 struct device;
23 struct dentry;
24
25 /*
26 * Bits in backing_dev_info.state
27 */
28 enum bdi_state {
29 BDI_pending, /* On its way to being activated */
30 BDI_wb_alloc, /* Default embedded wb allocated */
31 BDI_async_congested, /* The async (write) queue is getting full */
32 BDI_sync_congested, /* The sync queue is getting full */
33 BDI_registered, /* bdi_register() was done */
34 BDI_unused, /* Available bits start here */
35 };
36
37 typedef int (congested_fn)(void *, int);
38
39 enum bdi_stat_item {
40 BDI_RECLAIMABLE,
41 BDI_WRITEBACK,
42 NR_BDI_STAT_ITEMS
43 };
44
45 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
46
47 struct bdi_writeback {
48 struct backing_dev_info *bdi; /* our parent bdi */
49 unsigned int nr;
50
51 unsigned long last_old_flush; /* last old data flush */
52 unsigned long last_active; /* last time bdi thread was active */
53
54 struct task_struct *task; /* writeback thread */
55 struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */
56 struct list_head b_dirty; /* dirty inodes */
57 struct list_head b_io; /* parked for writeback */
58 struct list_head b_more_io; /* parked for more writeback */
59 };
60
61 struct backing_dev_info {
62 struct list_head bdi_list;
63 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
64 unsigned long state; /* Always use atomic bitops on this */
65 unsigned int capabilities; /* Device capabilities */
66 congested_fn *congested_fn; /* Function pointer if device is md/dm */
67 void *congested_data; /* Pointer to aux data for congested func */
68 void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
69 void *unplug_io_data;
70
71 char *name;
72
73 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
74
75 struct prop_local_percpu completions;
76 int dirty_exceeded;
77
78 unsigned int min_ratio;
79 unsigned int max_ratio, max_prop_frac;
80
81 struct bdi_writeback wb; /* default writeback info for this bdi */
82 spinlock_t wb_lock; /* protects work_list */
83
84 struct list_head work_list;
85
86 struct device *dev;
87
88 struct timer_list laptop_mode_wb_timer;
89
90 #ifdef CONFIG_DEBUG_FS
91 struct dentry *debug_dir;
92 struct dentry *debug_stats;
93 #endif
94 };
95
96 int bdi_init(struct backing_dev_info *bdi);
97 void bdi_destroy(struct backing_dev_info *bdi);
98
99 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
100 const char *fmt, ...);
101 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
102 void bdi_unregister(struct backing_dev_info *bdi);
103 int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
104 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
105 void bdi_start_background_writeback(struct backing_dev_info *bdi);
106 int bdi_writeback_thread(void *data);
107 int bdi_has_dirty_io(struct backing_dev_info *bdi);
108 void bdi_arm_supers_timer(void);
109 void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
110
111 extern spinlock_t bdi_lock;
112 extern struct list_head bdi_list;
113
114 static inline int wb_has_dirty_io(struct bdi_writeback *wb)
115 {
116 return !list_empty(&wb->b_dirty) ||
117 !list_empty(&wb->b_io) ||
118 !list_empty(&wb->b_more_io);
119 }
120
121 static inline void __add_bdi_stat(struct backing_dev_info *bdi,
122 enum bdi_stat_item item, s64 amount)
123 {
124 __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
125 }
126
127 static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
128 enum bdi_stat_item item)
129 {
130 __add_bdi_stat(bdi, item, 1);
131 }
132
133 static inline void inc_bdi_stat(struct backing_dev_info *bdi,
134 enum bdi_stat_item item)
135 {
136 unsigned long flags;
137
138 local_irq_save(flags);
139 __inc_bdi_stat(bdi, item);
140 local_irq_restore(flags);
141 }
142
143 static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
144 enum bdi_stat_item item)
145 {
146 __add_bdi_stat(bdi, item, -1);
147 }
148
149 static inline void dec_bdi_stat(struct backing_dev_info *bdi,
150 enum bdi_stat_item item)
151 {
152 unsigned long flags;
153
154 local_irq_save(flags);
155 __dec_bdi_stat(bdi, item);
156 local_irq_restore(flags);
157 }
158
159 static inline s64 bdi_stat(struct backing_dev_info *bdi,
160 enum bdi_stat_item item)
161 {
162 return percpu_counter_read_positive(&bdi->bdi_stat[item]);
163 }
164
165 static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
166 enum bdi_stat_item item)
167 {
168 return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
169 }
170
171 static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
172 enum bdi_stat_item item)
173 {
174 s64 sum;
175 unsigned long flags;
176
177 local_irq_save(flags);
178 sum = __bdi_stat_sum(bdi, item);
179 local_irq_restore(flags);
180
181 return sum;
182 }
183
184 extern void bdi_writeout_inc(struct backing_dev_info *bdi);
185
186 /*
187 * maximal error of a stat counter.
188 */
189 static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
190 {
191 #ifdef CONFIG_SMP
192 return nr_cpu_ids * BDI_STAT_BATCH;
193 #else
194 return 1;
195 #endif
196 }
197
198 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
199 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
200
201 /*
202 * Flags in backing_dev_info::capability
203 *
204 * The first three flags control whether dirty pages will contribute to the
205 * VM's accounting and whether writepages() should be called for dirty pages
206 * (something that would not, for example, be appropriate for ramfs)
207 *
208 * WARNING: these flags are closely related and should not normally be
209 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
210 * three flags into a single convenience macro.
211 *
212 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
213 * BDI_CAP_NO_WRITEBACK: Don't write pages back
214 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
215 *
216 * These flags let !MMU mmap() govern direct device mapping vs immediate
217 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
218 *
219 * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
220 * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
221 * BDI_CAP_READ_MAP: Can be mapped for reading
222 * BDI_CAP_WRITE_MAP: Can be mapped for writing
223 * BDI_CAP_EXEC_MAP: Can be mapped for execution
224 *
225 * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
226 */
227 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
228 #define BDI_CAP_NO_WRITEBACK 0x00000002
229 #define BDI_CAP_MAP_COPY 0x00000004
230 #define BDI_CAP_MAP_DIRECT 0x00000008
231 #define BDI_CAP_READ_MAP 0x00000010
232 #define BDI_CAP_WRITE_MAP 0x00000020
233 #define BDI_CAP_EXEC_MAP 0x00000040
234 #define BDI_CAP_NO_ACCT_WB 0x00000080
235 #define BDI_CAP_SWAP_BACKED 0x00000100
236
237 #define BDI_CAP_VMFLAGS \
238 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
239
240 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
241 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
242
243 #if defined(VM_MAYREAD) && \
244 (BDI_CAP_READ_MAP != VM_MAYREAD || \
245 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
246 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
247 #error please change backing_dev_info::capabilities flags
248 #endif
249
250 extern struct backing_dev_info default_backing_dev_info;
251 extern struct backing_dev_info noop_backing_dev_info;
252 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
253
254 int writeback_in_progress(struct backing_dev_info *bdi);
255
256 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
257 {
258 if (bdi->congested_fn)
259 return bdi->congested_fn(bdi->congested_data, bdi_bits);
260 return (bdi->state & bdi_bits);
261 }
262
263 static inline int bdi_read_congested(struct backing_dev_info *bdi)
264 {
265 return bdi_congested(bdi, 1 << BDI_sync_congested);
266 }
267
268 static inline int bdi_write_congested(struct backing_dev_info *bdi)
269 {
270 return bdi_congested(bdi, 1 << BDI_async_congested);
271 }
272
273 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
274 {
275 return bdi_congested(bdi, (1 << BDI_sync_congested) |
276 (1 << BDI_async_congested));
277 }
278
279 enum {
280 BLK_RW_ASYNC = 0,
281 BLK_RW_SYNC = 1,
282 };
283
284 void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
285 void set_bdi_congested(struct backing_dev_info *bdi, int sync);
286 long congestion_wait(int sync, long timeout);
287
288
289 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
290 {
291 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
292 }
293
294 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
295 {
296 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
297 }
298
299 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
300 {
301 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
302 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
303 BDI_CAP_NO_WRITEBACK));
304 }
305
306 static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
307 {
308 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
309 }
310
311 static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
312 {
313 return bdi == &default_backing_dev_info;
314 }
315
316 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
317 {
318 return bdi_cap_writeback_dirty(mapping->backing_dev_info);
319 }
320
321 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
322 {
323 return bdi_cap_account_dirty(mapping->backing_dev_info);
324 }
325
326 static inline bool mapping_cap_swap_backed(struct address_space *mapping)
327 {
328 return bdi_cap_swap_backed(mapping->backing_dev_info);
329 }
330
331 static inline int bdi_sched_wait(void *word)
332 {
333 schedule();
334 return 0;
335 }
336
337 static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
338 struct page *page)
339 {
340 if (bdi && bdi->unplug_io_fn)
341 bdi->unplug_io_fn(bdi, page);
342 }
343
344 static inline void blk_run_address_space(struct address_space *mapping)
345 {
346 if (mapping)
347 blk_run_backing_dev(mapping->backing_dev_info, NULL);
348 }
349
350 #endif /* _LINUX_BACKING_DEV_H */
This page took 0.038337 seconds and 5 git commands to generate.