Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / include / linux / backing-dev.h
1 /*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10
11 #include <linux/percpu_counter.h>
12 #include <linux/log2.h>
13 #include <linux/proportions.h>
14 #include <linux/kernel.h>
15 #include <linux/fs.h>
16 #include <linux/sched.h>
17 #include <linux/timer.h>
18 #include <linux/writeback.h>
19 #include <asm/atomic.h>
20
21 struct page;
22 struct device;
23 struct dentry;
24
25 /*
26 * Bits in backing_dev_info.state
27 */
28 enum bdi_state {
29 BDI_pending, /* On its way to being activated */
30 BDI_wb_alloc, /* Default embedded wb allocated */
31 BDI_async_congested, /* The async (write) queue is getting full */
32 BDI_sync_congested, /* The sync queue is getting full */
33 BDI_registered, /* bdi_register() was done */
34 BDI_unused, /* Available bits start here */
35 };
36
37 typedef int (congested_fn)(void *, int);
38
39 enum bdi_stat_item {
40 BDI_RECLAIMABLE,
41 BDI_WRITEBACK,
42 NR_BDI_STAT_ITEMS
43 };
44
45 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
46
47 struct bdi_writeback {
48 struct list_head list; /* hangs off the bdi */
49
50 struct backing_dev_info *bdi; /* our parent bdi */
51 unsigned int nr;
52
53 unsigned long last_old_flush; /* last old data flush */
54
55 struct task_struct *task; /* writeback task */
56 struct list_head b_dirty; /* dirty inodes */
57 struct list_head b_io; /* parked for writeback */
58 struct list_head b_more_io; /* parked for more writeback */
59 };
60
61 struct backing_dev_info {
62 struct list_head bdi_list;
63 struct rcu_head rcu_head;
64 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
65 unsigned long state; /* Always use atomic bitops on this */
66 unsigned int capabilities; /* Device capabilities */
67 congested_fn *congested_fn; /* Function pointer if device is md/dm */
68 void *congested_data; /* Pointer to aux data for congested func */
69 void (*unplug_io_fn)(struct backing_dev_info *, struct page *);
70 void *unplug_io_data;
71
72 char *name;
73
74 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
75
76 struct prop_local_percpu completions;
77 int dirty_exceeded;
78
79 unsigned int min_ratio;
80 unsigned int max_ratio, max_prop_frac;
81
82 struct bdi_writeback wb; /* default writeback info for this bdi */
83 spinlock_t wb_lock; /* protects update side of wb_list */
84 struct list_head wb_list; /* the flusher threads hanging off this bdi */
85 unsigned long wb_mask; /* bitmask of registered tasks */
86 unsigned int wb_cnt; /* number of registered tasks */
87
88 struct list_head work_list;
89
90 struct device *dev;
91
92 struct timer_list laptop_mode_wb_timer;
93
94 #ifdef CONFIG_DEBUG_FS
95 struct dentry *debug_dir;
96 struct dentry *debug_stats;
97 #endif
98 };
99
100 int bdi_init(struct backing_dev_info *bdi);
101 void bdi_destroy(struct backing_dev_info *bdi);
102
103 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
104 const char *fmt, ...);
105 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
106 void bdi_unregister(struct backing_dev_info *bdi);
107 int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int);
108 void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
109 long nr_pages, int sb_locked);
110 int bdi_writeback_task(struct bdi_writeback *wb);
111 int bdi_has_dirty_io(struct backing_dev_info *bdi);
112 void bdi_arm_supers_timer(void);
113
114 extern spinlock_t bdi_lock;
115 extern struct list_head bdi_list;
116
117 static inline int wb_has_dirty_io(struct bdi_writeback *wb)
118 {
119 return !list_empty(&wb->b_dirty) ||
120 !list_empty(&wb->b_io) ||
121 !list_empty(&wb->b_more_io);
122 }
123
124 static inline void __add_bdi_stat(struct backing_dev_info *bdi,
125 enum bdi_stat_item item, s64 amount)
126 {
127 __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH);
128 }
129
130 static inline void __inc_bdi_stat(struct backing_dev_info *bdi,
131 enum bdi_stat_item item)
132 {
133 __add_bdi_stat(bdi, item, 1);
134 }
135
136 static inline void inc_bdi_stat(struct backing_dev_info *bdi,
137 enum bdi_stat_item item)
138 {
139 unsigned long flags;
140
141 local_irq_save(flags);
142 __inc_bdi_stat(bdi, item);
143 local_irq_restore(flags);
144 }
145
146 static inline void __dec_bdi_stat(struct backing_dev_info *bdi,
147 enum bdi_stat_item item)
148 {
149 __add_bdi_stat(bdi, item, -1);
150 }
151
152 static inline void dec_bdi_stat(struct backing_dev_info *bdi,
153 enum bdi_stat_item item)
154 {
155 unsigned long flags;
156
157 local_irq_save(flags);
158 __dec_bdi_stat(bdi, item);
159 local_irq_restore(flags);
160 }
161
162 static inline s64 bdi_stat(struct backing_dev_info *bdi,
163 enum bdi_stat_item item)
164 {
165 return percpu_counter_read_positive(&bdi->bdi_stat[item]);
166 }
167
168 static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi,
169 enum bdi_stat_item item)
170 {
171 return percpu_counter_sum_positive(&bdi->bdi_stat[item]);
172 }
173
174 static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
175 enum bdi_stat_item item)
176 {
177 s64 sum;
178 unsigned long flags;
179
180 local_irq_save(flags);
181 sum = __bdi_stat_sum(bdi, item);
182 local_irq_restore(flags);
183
184 return sum;
185 }
186
187 extern void bdi_writeout_inc(struct backing_dev_info *bdi);
188
189 /*
190 * maximal error of a stat counter.
191 */
192 static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
193 {
194 #ifdef CONFIG_SMP
195 return nr_cpu_ids * BDI_STAT_BATCH;
196 #else
197 return 1;
198 #endif
199 }
200
201 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
202 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
203
204 /*
205 * Flags in backing_dev_info::capability
206 *
207 * The first three flags control whether dirty pages will contribute to the
208 * VM's accounting and whether writepages() should be called for dirty pages
209 * (something that would not, for example, be appropriate for ramfs)
210 *
211 * WARNING: these flags are closely related and should not normally be
212 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
213 * three flags into a single convenience macro.
214 *
215 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
216 * BDI_CAP_NO_WRITEBACK: Don't write pages back
217 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
218 *
219 * These flags let !MMU mmap() govern direct device mapping vs immediate
220 * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
221 *
222 * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE)
223 * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED)
224 * BDI_CAP_READ_MAP: Can be mapped for reading
225 * BDI_CAP_WRITE_MAP: Can be mapped for writing
226 * BDI_CAP_EXEC_MAP: Can be mapped for execution
227 *
228 * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
229 */
230 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
231 #define BDI_CAP_NO_WRITEBACK 0x00000002
232 #define BDI_CAP_MAP_COPY 0x00000004
233 #define BDI_CAP_MAP_DIRECT 0x00000008
234 #define BDI_CAP_READ_MAP 0x00000010
235 #define BDI_CAP_WRITE_MAP 0x00000020
236 #define BDI_CAP_EXEC_MAP 0x00000040
237 #define BDI_CAP_NO_ACCT_WB 0x00000080
238 #define BDI_CAP_SWAP_BACKED 0x00000100
239
240 #define BDI_CAP_VMFLAGS \
241 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
242
243 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
244 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
245
246 #if defined(VM_MAYREAD) && \
247 (BDI_CAP_READ_MAP != VM_MAYREAD || \
248 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
249 BDI_CAP_EXEC_MAP != VM_MAYEXEC)
250 #error please change backing_dev_info::capabilities flags
251 #endif
252
253 extern struct backing_dev_info default_backing_dev_info;
254 extern struct backing_dev_info noop_backing_dev_info;
255 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
256
257 int writeback_in_progress(struct backing_dev_info *bdi);
258
259 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
260 {
261 if (bdi->congested_fn)
262 return bdi->congested_fn(bdi->congested_data, bdi_bits);
263 return (bdi->state & bdi_bits);
264 }
265
266 static inline int bdi_read_congested(struct backing_dev_info *bdi)
267 {
268 return bdi_congested(bdi, 1 << BDI_sync_congested);
269 }
270
271 static inline int bdi_write_congested(struct backing_dev_info *bdi)
272 {
273 return bdi_congested(bdi, 1 << BDI_async_congested);
274 }
275
276 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
277 {
278 return bdi_congested(bdi, (1 << BDI_sync_congested) |
279 (1 << BDI_async_congested));
280 }
281
282 enum {
283 BLK_RW_ASYNC = 0,
284 BLK_RW_SYNC = 1,
285 };
286
287 void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
288 void set_bdi_congested(struct backing_dev_info *bdi, int sync);
289 long congestion_wait(int sync, long timeout);
290
291
292 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
293 {
294 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
295 }
296
297 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
298 {
299 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
300 }
301
302 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
303 {
304 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
305 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
306 BDI_CAP_NO_WRITEBACK));
307 }
308
309 static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
310 {
311 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
312 }
313
314 static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
315 {
316 return bdi == &default_backing_dev_info;
317 }
318
319 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
320 {
321 return bdi_cap_writeback_dirty(mapping->backing_dev_info);
322 }
323
324 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
325 {
326 return bdi_cap_account_dirty(mapping->backing_dev_info);
327 }
328
329 static inline bool mapping_cap_swap_backed(struct address_space *mapping)
330 {
331 return bdi_cap_swap_backed(mapping->backing_dev_info);
332 }
333
334 static inline int bdi_sched_wait(void *word)
335 {
336 schedule();
337 return 0;
338 }
339
340 static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
341 struct page *page)
342 {
343 if (bdi && bdi->unplug_io_fn)
344 bdi->unplug_io_fn(bdi, page);
345 }
346
347 static inline void blk_run_address_space(struct address_space *mapping)
348 {
349 if (mapping)
350 blk_run_backing_dev(mapping->backing_dev_info, NULL);
351 }
352
353 #endif /* _LINUX_BACKING_DEV_H */
This page took 0.066869 seconds and 5 git commands to generate.