Merge branch 'for-linus-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[deliverable/linux.git] / fs / xfs / xfs_buf.h
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #ifndef __XFS_BUF_H__
19 #define __XFS_BUF_H__
20
21 #include <linux/list.h>
22 #include <linux/types.h>
23 #include <linux/spinlock.h>
24 #include <linux/mm.h>
25 #include <linux/fs.h>
26 #include <linux/dax.h>
27 #include <linux/buffer_head.h>
28 #include <linux/uio.h>
29 #include <linux/list_lru.h>
30
31 /*
32 * Base types
33 */
34
35 #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
36
37 typedef enum {
38 XBRW_READ = 1, /* transfer into target memory */
39 XBRW_WRITE = 2, /* transfer from target memory */
40 XBRW_ZERO = 3, /* Zero target memory */
41 } xfs_buf_rw_t;
42
43 #define XBF_READ (1 << 0) /* buffer intended for reading from device */
44 #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
45 #define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
46 #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
47 #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
48 #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
49 #define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */
50
51 /* I/O hints for the BIO layer */
52 #define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
53 #define XBF_FUA (1 << 11)/* force cache write through mode */
54 #define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */
55
56 /* flags used only as arguments to access routines */
57 #define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
58 #define XBF_UNMAPPED (1 << 17)/* do not map the buffer */
59
60 /* flags used only internally */
61 #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
62 #define _XBF_KMEM (1 << 21)/* backed by heap memory */
63 #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
64 #define _XBF_COMPOUND (1 << 23)/* compound buffer */
65
66 typedef unsigned int xfs_buf_flags_t;
67
68 #define XFS_BUF_FLAGS \
69 { XBF_READ, "READ" }, \
70 { XBF_WRITE, "WRITE" }, \
71 { XBF_READ_AHEAD, "READ_AHEAD" }, \
72 { XBF_ASYNC, "ASYNC" }, \
73 { XBF_DONE, "DONE" }, \
74 { XBF_STALE, "STALE" }, \
75 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
76 { XBF_SYNCIO, "SYNCIO" }, \
77 { XBF_FUA, "FUA" }, \
78 { XBF_FLUSH, "FLUSH" }, \
79 { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\
80 { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\
81 { _XBF_PAGES, "PAGES" }, \
82 { _XBF_KMEM, "KMEM" }, \
83 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
84 { _XBF_COMPOUND, "COMPOUND" }
85
86
87 /*
88 * Internal state flags.
89 */
90 #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
91
92 /*
93 * The xfs_buftarg contains 2 notions of "sector size" -
94 *
95 * 1) The metadata sector size, which is the minimum unit and
96 * alignment of IO which will be performed by metadata operations.
97 * 2) The device logical sector size
98 *
99 * The first is specified at mkfs time, and is stored on-disk in the
100 * superblock's sb_sectsize.
101 *
102 * The latter is derived from the underlying device, and controls direct IO
103 * alignment constraints.
104 */
105 typedef struct xfs_buftarg {
106 dev_t bt_dev;
107 struct block_device *bt_bdev;
108 struct backing_dev_info *bt_bdi;
109 struct xfs_mount *bt_mount;
110 unsigned int bt_meta_sectorsize;
111 size_t bt_meta_sectormask;
112 size_t bt_logical_sectorsize;
113 size_t bt_logical_sectormask;
114
115 /* LRU control structures */
116 struct shrinker bt_shrinker;
117 struct list_lru bt_lru;
118 } xfs_buftarg_t;
119
120 struct xfs_buf;
121 typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
122
123
124 #define XB_PAGES 2
125
126 struct xfs_buf_map {
127 xfs_daddr_t bm_bn; /* block number for I/O */
128 int bm_len; /* size of I/O */
129 };
130
131 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
132 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
133
134 struct xfs_buf_ops {
135 char *name;
136 void (*verify_read)(struct xfs_buf *);
137 void (*verify_write)(struct xfs_buf *);
138 };
139
140 typedef struct xfs_buf {
141 /*
142 * first cacheline holds all the fields needed for an uncontended cache
143 * hit to be fully processed. The semaphore straddles the cacheline
144 * boundary, but the counter and lock sits on the first cacheline,
145 * which is the only bit that is touched if we hit the semaphore
146 * fast-path on locking.
147 */
148 struct rb_node b_rbnode; /* rbtree node */
149 xfs_daddr_t b_bn; /* block number of buffer */
150 int b_length; /* size of buffer in BBs */
151 atomic_t b_hold; /* reference count */
152 atomic_t b_lru_ref; /* lru reclaim ref count */
153 xfs_buf_flags_t b_flags; /* status flags */
154 struct semaphore b_sema; /* semaphore for lockables */
155
156 /*
157 * concurrent access to b_lru and b_lru_flags are protected by
158 * bt_lru_lock and not by b_sema
159 */
160 struct list_head b_lru; /* lru list */
161 spinlock_t b_lock; /* internal state lock */
162 unsigned int b_state; /* internal state flags */
163 int b_io_error; /* internal IO error state */
164 wait_queue_head_t b_waiters; /* unpin waiters */
165 struct list_head b_list;
166 struct xfs_perag *b_pag; /* contains rbtree root */
167 xfs_buftarg_t *b_target; /* buffer target (device) */
168 void *b_addr; /* virtual address of buffer */
169 struct work_struct b_ioend_work;
170 struct workqueue_struct *b_ioend_wq; /* I/O completion wq */
171 xfs_buf_iodone_t b_iodone; /* I/O completion function */
172 struct completion b_iowait; /* queue for I/O waiters */
173 void *b_fspriv;
174 struct xfs_trans *b_transp;
175 struct page **b_pages; /* array of page pointers */
176 struct page *b_page_array[XB_PAGES]; /* inline pages */
177 struct xfs_buf_map *b_maps; /* compound buffer map */
178 struct xfs_buf_map __b_map; /* inline compound buffer map */
179 int b_map_count;
180 int b_io_length; /* IO size in BBs */
181 atomic_t b_pin_count; /* pin count */
182 atomic_t b_io_remaining; /* #outstanding I/O requests */
183 unsigned int b_page_count; /* size of page array */
184 unsigned int b_offset; /* page offset in first page */
185 int b_error; /* error code on I/O */
186
187 /*
188 * async write failure retry count. Initialised to zero on the first
189 * failure, then when it exceeds the maximum configured without a
190 * success the write is considered to be failed permanently and the
191 * iodone handler will take appropriate action.
192 *
193 * For retry timeouts, we record the jiffie of the first failure. This
194 * means that we can change the retry timeout for buffers already under
195 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
196 *
197 * last_error is used to ensure that we are getting repeated errors, not
198 * different errors. e.g. a block device might change ENOSPC to EIO when
199 * a failure timeout occurs, so we want to re-initialise the error
200 * retry behaviour appropriately when that happens.
201 */
202 int b_retries;
203 unsigned long b_first_retry_time; /* in jiffies */
204 int b_last_error;
205
206 const struct xfs_buf_ops *b_ops;
207
208 #ifdef XFS_BUF_LOCK_TRACKING
209 int b_last_holder;
210 #endif
211 } xfs_buf_t;
212
213 /* Finding and Reading Buffers */
214 struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target,
215 struct xfs_buf_map *map, int nmaps,
216 xfs_buf_flags_t flags, struct xfs_buf *new_bp);
217
218 static inline struct xfs_buf *
219 xfs_incore(
220 struct xfs_buftarg *target,
221 xfs_daddr_t blkno,
222 size_t numblks,
223 xfs_buf_flags_t flags)
224 {
225 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
226 return _xfs_buf_find(target, &map, 1, flags, NULL);
227 }
228
229 struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
230 struct xfs_buf_map *map, int nmaps,
231 xfs_buf_flags_t flags);
232
233 static inline struct xfs_buf *
234 xfs_buf_alloc(
235 struct xfs_buftarg *target,
236 xfs_daddr_t blkno,
237 size_t numblks,
238 xfs_buf_flags_t flags)
239 {
240 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
241 return _xfs_buf_alloc(target, &map, 1, flags);
242 }
243
244 struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
245 struct xfs_buf_map *map, int nmaps,
246 xfs_buf_flags_t flags);
247 struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
248 struct xfs_buf_map *map, int nmaps,
249 xfs_buf_flags_t flags,
250 const struct xfs_buf_ops *ops);
251 void xfs_buf_readahead_map(struct xfs_buftarg *target,
252 struct xfs_buf_map *map, int nmaps,
253 const struct xfs_buf_ops *ops);
254
255 static inline struct xfs_buf *
256 xfs_buf_get(
257 struct xfs_buftarg *target,
258 xfs_daddr_t blkno,
259 size_t numblks,
260 xfs_buf_flags_t flags)
261 {
262 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
263 return xfs_buf_get_map(target, &map, 1, flags);
264 }
265
266 static inline struct xfs_buf *
267 xfs_buf_read(
268 struct xfs_buftarg *target,
269 xfs_daddr_t blkno,
270 size_t numblks,
271 xfs_buf_flags_t flags,
272 const struct xfs_buf_ops *ops)
273 {
274 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
275 return xfs_buf_read_map(target, &map, 1, flags, ops);
276 }
277
278 static inline void
279 xfs_buf_readahead(
280 struct xfs_buftarg *target,
281 xfs_daddr_t blkno,
282 size_t numblks,
283 const struct xfs_buf_ops *ops)
284 {
285 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
286 return xfs_buf_readahead_map(target, &map, 1, ops);
287 }
288
289 struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks);
290 void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
291 int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
292
293 struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
294 int flags);
295 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
296 size_t numblks, int flags, struct xfs_buf **bpp,
297 const struct xfs_buf_ops *ops);
298 void xfs_buf_hold(struct xfs_buf *bp);
299
300 /* Releasing Buffers */
301 extern void xfs_buf_free(xfs_buf_t *);
302 extern void xfs_buf_rele(xfs_buf_t *);
303
304 /* Locking and Unlocking Buffers */
305 extern int xfs_buf_trylock(xfs_buf_t *);
306 extern void xfs_buf_lock(xfs_buf_t *);
307 extern void xfs_buf_unlock(xfs_buf_t *);
308 #define xfs_buf_islocked(bp) \
309 ((bp)->b_sema.count <= 0)
310
311 /* Buffer Read and Write Routines */
312 extern int xfs_bwrite(struct xfs_buf *bp);
313 extern void xfs_buf_ioend(struct xfs_buf *bp);
314 extern void xfs_buf_ioerror(xfs_buf_t *, int);
315 extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
316 extern void xfs_buf_submit(struct xfs_buf *bp);
317 extern int xfs_buf_submit_wait(struct xfs_buf *bp);
318 extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
319 xfs_buf_rw_t);
320 #define xfs_buf_zero(bp, off, len) \
321 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
322
323 /* Buffer Utility Routines */
324 extern void *xfs_buf_offset(struct xfs_buf *, size_t);
325 extern void xfs_buf_stale(struct xfs_buf *bp);
326
327 /* Delayed Write Buffer Routines */
328 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
329 extern int xfs_buf_delwri_submit(struct list_head *);
330 extern int xfs_buf_delwri_submit_nowait(struct list_head *);
331
332 /* Buffer Daemon Setup Routines */
333 extern int xfs_buf_init(void);
334 extern void xfs_buf_terminate(void);
335
336 /*
337 * These macros use the IO block map rather than b_bn. b_bn is now really
338 * just for the buffer cache index for cached buffers. As IO does not use b_bn
339 * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
340 * map directly. Uncached buffers are not allowed to be discontiguous, so this
341 * is safe to do.
342 *
343 * In future, uncached buffers will pass the block number directly to the io
344 * request function and hence these macros will go away at that point.
345 */
346 #define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn)
347 #define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
348
349 static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
350 {
351 atomic_set(&bp->b_lru_ref, lru_ref);
352 }
353
354 static inline int xfs_buf_ispinned(struct xfs_buf *bp)
355 {
356 return atomic_read(&bp->b_pin_count);
357 }
358
359 static inline void xfs_buf_relse(xfs_buf_t *bp)
360 {
361 xfs_buf_unlock(bp);
362 xfs_buf_rele(bp);
363 }
364
365 static inline int
366 xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
367 {
368 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
369 cksum_offset);
370 }
371
372 static inline void
373 xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
374 {
375 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
376 cksum_offset);
377 }
378
379 /*
380 * Handling of buftargs.
381 */
382 extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
383 struct block_device *);
384 extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
385 extern void xfs_wait_buftarg(xfs_buftarg_t *);
386 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
387
388 #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
389 #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
390
391 #endif /* __XFS_BUF_H__ */
This page took 0.050914 seconds and 5 git commands to generate.