bdi: make inode_to_bdi() inline
[deliverable/linux.git] / include / linux / backing-dev.h
1 /*
2 * include/linux/backing-dev.h
3 *
4 * low-level device information and state which is propagated up through
5 * to high-level code.
6 */
7
8 #ifndef _LINUX_BACKING_DEV_H
9 #define _LINUX_BACKING_DEV_H
10
11 #include <linux/kernel.h>
12 #include <linux/fs.h>
13 #include <linux/sched.h>
14 #include <linux/blkdev.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev-defs.h>
17
18 int __must_check bdi_init(struct backing_dev_info *bdi);
19 void bdi_destroy(struct backing_dev_info *bdi);
20
21 __printf(3, 4)
22 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
23 const char *fmt, ...);
24 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
25 void bdi_unregister(struct backing_dev_info *bdi);
26 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
27 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
28 enum wb_reason reason);
29 void bdi_start_background_writeback(struct backing_dev_info *bdi);
30 void wb_workfn(struct work_struct *work);
31 int bdi_has_dirty_io(struct backing_dev_info *bdi);
32 void wb_wakeup_delayed(struct bdi_writeback *wb);
33
34 extern spinlock_t bdi_lock;
35 extern struct list_head bdi_list;
36
37 extern struct workqueue_struct *bdi_wq;
38
39 static inline int wb_has_dirty_io(struct bdi_writeback *wb)
40 {
41 return !list_empty(&wb->b_dirty) ||
42 !list_empty(&wb->b_io) ||
43 !list_empty(&wb->b_more_io);
44 }
45
46 static inline void __add_wb_stat(struct bdi_writeback *wb,
47 enum wb_stat_item item, s64 amount)
48 {
49 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
50 }
51
52 static inline void __inc_wb_stat(struct bdi_writeback *wb,
53 enum wb_stat_item item)
54 {
55 __add_wb_stat(wb, item, 1);
56 }
57
58 static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
59 {
60 unsigned long flags;
61
62 local_irq_save(flags);
63 __inc_wb_stat(wb, item);
64 local_irq_restore(flags);
65 }
66
67 static inline void __dec_wb_stat(struct bdi_writeback *wb,
68 enum wb_stat_item item)
69 {
70 __add_wb_stat(wb, item, -1);
71 }
72
73 static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
74 {
75 unsigned long flags;
76
77 local_irq_save(flags);
78 __dec_wb_stat(wb, item);
79 local_irq_restore(flags);
80 }
81
82 static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
83 {
84 return percpu_counter_read_positive(&wb->stat[item]);
85 }
86
87 static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
88 enum wb_stat_item item)
89 {
90 return percpu_counter_sum_positive(&wb->stat[item]);
91 }
92
93 static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
94 {
95 s64 sum;
96 unsigned long flags;
97
98 local_irq_save(flags);
99 sum = __wb_stat_sum(wb, item);
100 local_irq_restore(flags);
101
102 return sum;
103 }
104
105 extern void wb_writeout_inc(struct bdi_writeback *wb);
106
107 /*
108 * maximal error of a stat counter.
109 */
110 static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
111 {
112 #ifdef CONFIG_SMP
113 return nr_cpu_ids * WB_STAT_BATCH;
114 #else
115 return 1;
116 #endif
117 }
118
119 int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
120 int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
121
122 /*
123 * Flags in backing_dev_info::capability
124 *
125 * The first three flags control whether dirty pages will contribute to the
126 * VM's accounting and whether writepages() should be called for dirty pages
127 * (something that would not, for example, be appropriate for ramfs)
128 *
129 * WARNING: these flags are closely related and should not normally be
130 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
131 * three flags into a single convenience macro.
132 *
133 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting
134 * BDI_CAP_NO_WRITEBACK: Don't write pages back
135 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages
136 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold.
137 */
138 #define BDI_CAP_NO_ACCT_DIRTY 0x00000001
139 #define BDI_CAP_NO_WRITEBACK 0x00000002
140 #define BDI_CAP_NO_ACCT_WB 0x00000004
141 #define BDI_CAP_STABLE_WRITES 0x00000008
142 #define BDI_CAP_STRICTLIMIT 0x00000010
143
144 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \
145 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
146
147 extern struct backing_dev_info noop_backing_dev_info;
148
149 int writeback_in_progress(struct backing_dev_info *bdi);
150
151 static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
152 {
153 struct super_block *sb;
154
155 if (!inode)
156 return &noop_backing_dev_info;
157
158 sb = inode->i_sb;
159 #ifdef CONFIG_BLOCK
160 if (sb_is_blkdev_sb(sb))
161 return blk_get_backing_dev_info(I_BDEV(inode));
162 #endif
163 return sb->s_bdi;
164 }
165
166 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
167 {
168 if (bdi->congested_fn)
169 return bdi->congested_fn(bdi->congested_data, bdi_bits);
170 return (bdi->wb.state & bdi_bits);
171 }
172
173 static inline int bdi_read_congested(struct backing_dev_info *bdi)
174 {
175 return bdi_congested(bdi, 1 << WB_sync_congested);
176 }
177
178 static inline int bdi_write_congested(struct backing_dev_info *bdi)
179 {
180 return bdi_congested(bdi, 1 << WB_async_congested);
181 }
182
183 static inline int bdi_rw_congested(struct backing_dev_info *bdi)
184 {
185 return bdi_congested(bdi, (1 << WB_sync_congested) |
186 (1 << WB_async_congested));
187 }
188
189 long congestion_wait(int sync, long timeout);
190 long wait_iff_congested(struct zone *zone, int sync, long timeout);
191 int pdflush_proc_obsolete(struct ctl_table *table, int write,
192 void __user *buffer, size_t *lenp, loff_t *ppos);
193
194 static inline bool bdi_cap_stable_pages_required(struct backing_dev_info *bdi)
195 {
196 return bdi->capabilities & BDI_CAP_STABLE_WRITES;
197 }
198
199 static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
200 {
201 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
202 }
203
204 static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
205 {
206 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
207 }
208
209 static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
210 {
211 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
212 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
213 BDI_CAP_NO_WRITEBACK));
214 }
215
216 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
217 {
218 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
219 }
220
221 static inline bool mapping_cap_account_dirty(struct address_space *mapping)
222 {
223 return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
224 }
225
226 static inline int bdi_sched_wait(void *word)
227 {
228 schedule();
229 return 0;
230 }
231
232 #endif /* _LINUX_BACKING_DEV_H */
This page took 0.048549 seconds and 6 git commands to generate.