[XFS] 956618: Linux crashes on boot with XFS-DMAPI filesystem when
[deliverable/linux.git] / fs / xfs / linux-2.6 / xfs_buf.c
CommitLineData
1da177e4 1/*
f07c2250 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
7b718769 3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
93c189c1 18#include "xfs.h"
1da177e4
LT
19#include <linux/stddef.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/pagemap.h>
23#include <linux/init.h>
24#include <linux/vmalloc.h>
25#include <linux/bio.h>
26#include <linux/sysctl.h>
27#include <linux/proc_fs.h>
28#include <linux/workqueue.h>
29#include <linux/percpu.h>
30#include <linux/blkdev.h>
31#include <linux/hash.h>
4df08c52 32#include <linux/kthread.h>
b20a3503 33#include <linux/migrate.h>
3fcfab16 34#include <linux/backing-dev.h>
1da177e4 35
ce8e922c
NS
36STATIC kmem_zone_t *xfs_buf_zone;
37STATIC kmem_shaker_t xfs_buf_shake;
a6867a68 38STATIC int xfsbufd(void *);
27496a8c 39STATIC int xfsbufd_wakeup(int, gfp_t);
ce8e922c 40STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
23ea4032
CH
41
42STATIC struct workqueue_struct *xfslogd_workqueue;
0829c360 43struct workqueue_struct *xfsdatad_workqueue;
1da177e4 44
ce8e922c 45#ifdef XFS_BUF_TRACE
1da177e4 46void
ce8e922c
NS
47xfs_buf_trace(
48 xfs_buf_t *bp,
1da177e4
LT
49 char *id,
50 void *data,
51 void *ra)
52{
ce8e922c
NS
53 ktrace_enter(xfs_buf_trace_buf,
54 bp, id,
55 (void *)(unsigned long)bp->b_flags,
56 (void *)(unsigned long)bp->b_hold.counter,
57 (void *)(unsigned long)bp->b_sema.count.counter,
1da177e4
LT
58 (void *)current,
59 data, ra,
ce8e922c
NS
60 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
61 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
62 (void *)(unsigned long)bp->b_buffer_length,
1da177e4
LT
63 NULL, NULL, NULL, NULL, NULL);
64}
ce8e922c
NS
65ktrace_t *xfs_buf_trace_buf;
66#define XFS_BUF_TRACE_SIZE 4096
67#define XB_TRACE(bp, id, data) \
68 xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
1da177e4 69#else
ce8e922c 70#define XB_TRACE(bp, id, data) do { } while (0)
1da177e4
LT
71#endif
72
ce8e922c
NS
73#ifdef XFS_BUF_LOCK_TRACKING
74# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
75# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
76# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
1da177e4 77#else
ce8e922c
NS
78# define XB_SET_OWNER(bp) do { } while (0)
79# define XB_CLEAR_OWNER(bp) do { } while (0)
80# define XB_GET_OWNER(bp) do { } while (0)
1da177e4
LT
81#endif
82
ce8e922c
NS
83#define xb_to_gfp(flags) \
84 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
85 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
1da177e4 86
ce8e922c
NS
87#define xb_to_km(flags) \
88 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
1da177e4 89
ce8e922c
NS
90#define xfs_buf_allocate(flags) \
91 kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
92#define xfs_buf_deallocate(bp) \
93 kmem_zone_free(xfs_buf_zone, (bp));
1da177e4
LT
94
95/*
ce8e922c 96 * Page Region interfaces.
1da177e4 97 *
ce8e922c
NS
98 * For pages in filesystems where the blocksize is smaller than the
99 * pagesize, we use the page->private field (long) to hold a bitmap
100 * of uptodate regions within the page.
1da177e4 101 *
ce8e922c 102 * Each such region is "bytes per page / bits per long" bytes long.
1da177e4 103 *
ce8e922c
NS
104 * NBPPR == number-of-bytes-per-page-region
105 * BTOPR == bytes-to-page-region (rounded up)
106 * BTOPRT == bytes-to-page-region-truncated (rounded down)
1da177e4
LT
107 */
108#if (BITS_PER_LONG == 32)
109#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
110#elif (BITS_PER_LONG == 64)
111#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
112#else
113#error BITS_PER_LONG must be 32 or 64
114#endif
115#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
116#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
117#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
118
119STATIC unsigned long
120page_region_mask(
121 size_t offset,
122 size_t length)
123{
124 unsigned long mask;
125 int first, final;
126
127 first = BTOPR(offset);
128 final = BTOPRT(offset + length - 1);
129 first = min(first, final);
130
131 mask = ~0UL;
132 mask <<= BITS_PER_LONG - (final - first);
133 mask >>= BITS_PER_LONG - (final);
134
135 ASSERT(offset + length <= PAGE_CACHE_SIZE);
136 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
137
138 return mask;
139}
140
141STATIC inline void
142set_page_region(
143 struct page *page,
144 size_t offset,
145 size_t length)
146{
4c21e2f2
HD
147 set_page_private(page,
148 page_private(page) | page_region_mask(offset, length));
149 if (page_private(page) == ~0UL)
1da177e4
LT
150 SetPageUptodate(page);
151}
152
153STATIC inline int
154test_page_region(
155 struct page *page,
156 size_t offset,
157 size_t length)
158{
159 unsigned long mask = page_region_mask(offset, length);
160
4c21e2f2 161 return (mask && (page_private(page) & mask) == mask);
1da177e4
LT
162}
163
164/*
ce8e922c 165 * Mapping of multi-page buffers into contiguous virtual space
1da177e4
LT
166 */
167
168typedef struct a_list {
169 void *vm_addr;
170 struct a_list *next;
171} a_list_t;
172
173STATIC a_list_t *as_free_head;
174STATIC int as_list_len;
175STATIC DEFINE_SPINLOCK(as_lock);
176
177/*
ce8e922c 178 * Try to batch vunmaps because they are costly.
1da177e4
LT
179 */
180STATIC void
181free_address(
182 void *addr)
183{
184 a_list_t *aentry;
185
7b04d717 186 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
1da177e4
LT
187 if (likely(aentry)) {
188 spin_lock(&as_lock);
189 aentry->next = as_free_head;
190 aentry->vm_addr = addr;
191 as_free_head = aentry;
192 as_list_len++;
193 spin_unlock(&as_lock);
194 } else {
195 vunmap(addr);
196 }
197}
198
199STATIC void
200purge_addresses(void)
201{
202 a_list_t *aentry, *old;
203
204 if (as_free_head == NULL)
205 return;
206
207 spin_lock(&as_lock);
208 aentry = as_free_head;
209 as_free_head = NULL;
210 as_list_len = 0;
211 spin_unlock(&as_lock);
212
213 while ((old = aentry) != NULL) {
214 vunmap(aentry->vm_addr);
215 aentry = aentry->next;
216 kfree(old);
217 }
218}
219
220/*
ce8e922c 221 * Internal xfs_buf_t object manipulation
1da177e4
LT
222 */
223
224STATIC void
ce8e922c
NS
225_xfs_buf_initialize(
226 xfs_buf_t *bp,
1da177e4 227 xfs_buftarg_t *target,
204ab25f 228 xfs_off_t range_base,
1da177e4 229 size_t range_length,
ce8e922c 230 xfs_buf_flags_t flags)
1da177e4
LT
231{
232 /*
ce8e922c 233 * We don't want certain flags to appear in b_flags.
1da177e4 234 */
ce8e922c
NS
235 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
236
237 memset(bp, 0, sizeof(xfs_buf_t));
238 atomic_set(&bp->b_hold, 1);
239 init_MUTEX_LOCKED(&bp->b_iodonesema);
240 INIT_LIST_HEAD(&bp->b_list);
241 INIT_LIST_HEAD(&bp->b_hash_list);
242 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
243 XB_SET_OWNER(bp);
244 bp->b_target = target;
245 bp->b_file_offset = range_base;
1da177e4
LT
246 /*
247 * Set buffer_length and count_desired to the same value initially.
248 * I/O routines should use count_desired, which will be the same in
249 * most cases but may be reset (e.g. XFS recovery).
250 */
ce8e922c
NS
251 bp->b_buffer_length = bp->b_count_desired = range_length;
252 bp->b_flags = flags;
253 bp->b_bn = XFS_BUF_DADDR_NULL;
254 atomic_set(&bp->b_pin_count, 0);
255 init_waitqueue_head(&bp->b_waiters);
256
257 XFS_STATS_INC(xb_create);
258 XB_TRACE(bp, "initialize", target);
1da177e4
LT
259}
260
261/*
ce8e922c
NS
262 * Allocate a page array capable of holding a specified number
263 * of pages, and point the page buf at it.
1da177e4
LT
264 */
265STATIC int
ce8e922c
NS
266_xfs_buf_get_pages(
267 xfs_buf_t *bp,
1da177e4 268 int page_count,
ce8e922c 269 xfs_buf_flags_t flags)
1da177e4
LT
270{
271 /* Make sure that we have a page list */
ce8e922c
NS
272 if (bp->b_pages == NULL) {
273 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
274 bp->b_page_count = page_count;
275 if (page_count <= XB_PAGES) {
276 bp->b_pages = bp->b_page_array;
1da177e4 277 } else {
ce8e922c
NS
278 bp->b_pages = kmem_alloc(sizeof(struct page *) *
279 page_count, xb_to_km(flags));
280 if (bp->b_pages == NULL)
1da177e4
LT
281 return -ENOMEM;
282 }
ce8e922c 283 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
1da177e4
LT
284 }
285 return 0;
286}
287
288/*
ce8e922c 289 * Frees b_pages if it was allocated.
1da177e4
LT
290 */
291STATIC void
ce8e922c 292_xfs_buf_free_pages(
1da177e4
LT
293 xfs_buf_t *bp)
294{
ce8e922c
NS
295 if (bp->b_pages != bp->b_page_array) {
296 kmem_free(bp->b_pages,
297 bp->b_page_count * sizeof(struct page *));
1da177e4
LT
298 }
299}
300
301/*
302 * Releases the specified buffer.
303 *
304 * The modification state of any associated pages is left unchanged.
ce8e922c 305 * The buffer most not be on any hash - use xfs_buf_rele instead for
1da177e4
LT
306 * hashed and refcounted buffers
307 */
308void
ce8e922c 309xfs_buf_free(
1da177e4
LT
310 xfs_buf_t *bp)
311{
ce8e922c 312 XB_TRACE(bp, "free", 0);
1da177e4 313
ce8e922c 314 ASSERT(list_empty(&bp->b_hash_list));
1da177e4 315
ce8e922c 316 if (bp->b_flags & _XBF_PAGE_CACHE) {
1da177e4
LT
317 uint i;
318
ce8e922c
NS
319 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
320 free_address(bp->b_addr - bp->b_offset);
1da177e4 321
948ecdb4
NS
322 for (i = 0; i < bp->b_page_count; i++) {
323 struct page *page = bp->b_pages[i];
324
325 ASSERT(!PagePrivate(page));
326 page_cache_release(page);
327 }
ce8e922c
NS
328 _xfs_buf_free_pages(bp);
329 } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
1da177e4 330 /*
ce8e922c
NS
331 * XXX(hch): bp->b_count_desired might be incorrect (see
332 * xfs_buf_associate_memory for details), but fortunately
1da177e4
LT
333 * the Linux version of kmem_free ignores the len argument..
334 */
ce8e922c
NS
335 kmem_free(bp->b_addr, bp->b_count_desired);
336 _xfs_buf_free_pages(bp);
1da177e4
LT
337 }
338
ce8e922c 339 xfs_buf_deallocate(bp);
1da177e4
LT
340}
341
342/*
343 * Finds all pages for buffer in question and builds it's page list.
344 */
345STATIC int
ce8e922c 346_xfs_buf_lookup_pages(
1da177e4
LT
347 xfs_buf_t *bp,
348 uint flags)
349{
ce8e922c
NS
350 struct address_space *mapping = bp->b_target->bt_mapping;
351 size_t blocksize = bp->b_target->bt_bsize;
352 size_t size = bp->b_count_desired;
1da177e4 353 size_t nbytes, offset;
ce8e922c 354 gfp_t gfp_mask = xb_to_gfp(flags);
1da177e4
LT
355 unsigned short page_count, i;
356 pgoff_t first;
204ab25f 357 xfs_off_t end;
1da177e4
LT
358 int error;
359
ce8e922c
NS
360 end = bp->b_file_offset + bp->b_buffer_length;
361 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
1da177e4 362
ce8e922c 363 error = _xfs_buf_get_pages(bp, page_count, flags);
1da177e4
LT
364 if (unlikely(error))
365 return error;
ce8e922c 366 bp->b_flags |= _XBF_PAGE_CACHE;
1da177e4 367
ce8e922c
NS
368 offset = bp->b_offset;
369 first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
1da177e4 370
ce8e922c 371 for (i = 0; i < bp->b_page_count; i++) {
1da177e4
LT
372 struct page *page;
373 uint retries = 0;
374
375 retry:
376 page = find_or_create_page(mapping, first + i, gfp_mask);
377 if (unlikely(page == NULL)) {
ce8e922c
NS
378 if (flags & XBF_READ_AHEAD) {
379 bp->b_page_count = i;
380 for (i = 0; i < bp->b_page_count; i++)
381 unlock_page(bp->b_pages[i]);
1da177e4
LT
382 return -ENOMEM;
383 }
384
385 /*
386 * This could deadlock.
387 *
388 * But until all the XFS lowlevel code is revamped to
389 * handle buffer allocation failures we can't do much.
390 */
391 if (!(++retries % 100))
392 printk(KERN_ERR
393 "XFS: possible memory allocation "
394 "deadlock in %s (mode:0x%x)\n",
395 __FUNCTION__, gfp_mask);
396
ce8e922c 397 XFS_STATS_INC(xb_page_retries);
23ea4032 398 xfsbufd_wakeup(0, gfp_mask);
3fcfab16 399 congestion_wait(WRITE, HZ/50);
1da177e4
LT
400 goto retry;
401 }
402
ce8e922c 403 XFS_STATS_INC(xb_page_found);
1da177e4
LT
404
405 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
406 size -= nbytes;
407
948ecdb4 408 ASSERT(!PagePrivate(page));
1da177e4
LT
409 if (!PageUptodate(page)) {
410 page_count--;
411 if (blocksize >= PAGE_CACHE_SIZE) {
ce8e922c
NS
412 if (flags & XBF_READ)
413 bp->b_locked = 1;
1da177e4
LT
414 } else if (!PagePrivate(page)) {
415 if (test_page_region(page, offset, nbytes))
416 page_count++;
417 }
418 }
419
ce8e922c 420 bp->b_pages[i] = page;
1da177e4
LT
421 offset = 0;
422 }
423
ce8e922c
NS
424 if (!bp->b_locked) {
425 for (i = 0; i < bp->b_page_count; i++)
426 unlock_page(bp->b_pages[i]);
1da177e4
LT
427 }
428
ce8e922c
NS
429 if (page_count == bp->b_page_count)
430 bp->b_flags |= XBF_DONE;
1da177e4 431
ce8e922c 432 XB_TRACE(bp, "lookup_pages", (long)page_count);
1da177e4
LT
433 return error;
434}
435
436/*
437 * Map buffer into kernel address-space if nessecary.
438 */
439STATIC int
ce8e922c 440_xfs_buf_map_pages(
1da177e4
LT
441 xfs_buf_t *bp,
442 uint flags)
443{
444 /* A single page buffer is always mappable */
ce8e922c
NS
445 if (bp->b_page_count == 1) {
446 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
447 bp->b_flags |= XBF_MAPPED;
448 } else if (flags & XBF_MAPPED) {
1da177e4
LT
449 if (as_list_len > 64)
450 purge_addresses();
ce8e922c
NS
451 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
452 VM_MAP, PAGE_KERNEL);
453 if (unlikely(bp->b_addr == NULL))
1da177e4 454 return -ENOMEM;
ce8e922c
NS
455 bp->b_addr += bp->b_offset;
456 bp->b_flags |= XBF_MAPPED;
1da177e4
LT
457 }
458
459 return 0;
460}
461
462/*
463 * Finding and Reading Buffers
464 */
465
466/*
ce8e922c 467 * Look up, and creates if absent, a lockable buffer for
1da177e4
LT
468 * a given range of an inode. The buffer is returned
469 * locked. If other overlapping buffers exist, they are
470 * released before the new buffer is created and locked,
471 * which may imply that this call will block until those buffers
472 * are unlocked. No I/O is implied by this call.
473 */
474xfs_buf_t *
ce8e922c 475_xfs_buf_find(
1da177e4 476 xfs_buftarg_t *btp, /* block device target */
204ab25f 477 xfs_off_t ioff, /* starting offset of range */
1da177e4 478 size_t isize, /* length of range */
ce8e922c
NS
479 xfs_buf_flags_t flags,
480 xfs_buf_t *new_bp)
1da177e4 481{
204ab25f 482 xfs_off_t range_base;
1da177e4
LT
483 size_t range_length;
484 xfs_bufhash_t *hash;
ce8e922c 485 xfs_buf_t *bp, *n;
1da177e4
LT
486
487 range_base = (ioff << BBSHIFT);
488 range_length = (isize << BBSHIFT);
489
490 /* Check for IOs smaller than the sector size / not sector aligned */
ce8e922c 491 ASSERT(!(range_length < (1 << btp->bt_sshift)));
204ab25f 492 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
1da177e4
LT
493
494 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
495
496 spin_lock(&hash->bh_lock);
497
ce8e922c
NS
498 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
499 ASSERT(btp == bp->b_target);
500 if (bp->b_file_offset == range_base &&
501 bp->b_buffer_length == range_length) {
1da177e4 502 /*
ce8e922c 503 * If we look at something, bring it to the
1da177e4
LT
504 * front of the list for next time.
505 */
ce8e922c
NS
506 atomic_inc(&bp->b_hold);
507 list_move(&bp->b_hash_list, &hash->bh_list);
1da177e4
LT
508 goto found;
509 }
510 }
511
512 /* No match found */
ce8e922c
NS
513 if (new_bp) {
514 _xfs_buf_initialize(new_bp, btp, range_base,
1da177e4 515 range_length, flags);
ce8e922c
NS
516 new_bp->b_hash = hash;
517 list_add(&new_bp->b_hash_list, &hash->bh_list);
1da177e4 518 } else {
ce8e922c 519 XFS_STATS_INC(xb_miss_locked);
1da177e4
LT
520 }
521
522 spin_unlock(&hash->bh_lock);
ce8e922c 523 return new_bp;
1da177e4
LT
524
525found:
526 spin_unlock(&hash->bh_lock);
527
528 /* Attempt to get the semaphore without sleeping,
529 * if this does not work then we need to drop the
530 * spinlock and do a hard attempt on the semaphore.
531 */
ce8e922c
NS
532 if (down_trylock(&bp->b_sema)) {
533 if (!(flags & XBF_TRYLOCK)) {
1da177e4 534 /* wait for buffer ownership */
ce8e922c
NS
535 XB_TRACE(bp, "get_lock", 0);
536 xfs_buf_lock(bp);
537 XFS_STATS_INC(xb_get_locked_waited);
1da177e4
LT
538 } else {
539 /* We asked for a trylock and failed, no need
540 * to look at file offset and length here, we
ce8e922c
NS
541 * know that this buffer at least overlaps our
542 * buffer and is locked, therefore our buffer
543 * either does not exist, or is this buffer.
1da177e4 544 */
ce8e922c
NS
545 xfs_buf_rele(bp);
546 XFS_STATS_INC(xb_busy_locked);
547 return NULL;
1da177e4
LT
548 }
549 } else {
550 /* trylock worked */
ce8e922c 551 XB_SET_OWNER(bp);
1da177e4
LT
552 }
553
ce8e922c
NS
554 if (bp->b_flags & XBF_STALE) {
555 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
556 bp->b_flags &= XBF_MAPPED;
2f926587 557 }
ce8e922c
NS
558 XB_TRACE(bp, "got_lock", 0);
559 XFS_STATS_INC(xb_get_locked);
560 return bp;
1da177e4
LT
561}
562
563/*
ce8e922c 564 * Assembles a buffer covering the specified range.
1da177e4
LT
565 * Storage in memory for all portions of the buffer will be allocated,
566 * although backing storage may not be.
567 */
568xfs_buf_t *
ce8e922c 569xfs_buf_get_flags(
1da177e4 570 xfs_buftarg_t *target,/* target for buffer */
204ab25f 571 xfs_off_t ioff, /* starting offset of range */
1da177e4 572 size_t isize, /* length of range */
ce8e922c 573 xfs_buf_flags_t flags)
1da177e4 574{
ce8e922c 575 xfs_buf_t *bp, *new_bp;
1da177e4
LT
576 int error = 0, i;
577
ce8e922c
NS
578 new_bp = xfs_buf_allocate(flags);
579 if (unlikely(!new_bp))
1da177e4
LT
580 return NULL;
581
ce8e922c
NS
582 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
583 if (bp == new_bp) {
584 error = _xfs_buf_lookup_pages(bp, flags);
1da177e4
LT
585 if (error)
586 goto no_buffer;
587 } else {
ce8e922c
NS
588 xfs_buf_deallocate(new_bp);
589 if (unlikely(bp == NULL))
1da177e4
LT
590 return NULL;
591 }
592
ce8e922c
NS
593 for (i = 0; i < bp->b_page_count; i++)
594 mark_page_accessed(bp->b_pages[i]);
1da177e4 595
ce8e922c
NS
596 if (!(bp->b_flags & XBF_MAPPED)) {
597 error = _xfs_buf_map_pages(bp, flags);
1da177e4
LT
598 if (unlikely(error)) {
599 printk(KERN_WARNING "%s: failed to map pages\n",
600 __FUNCTION__);
601 goto no_buffer;
602 }
603 }
604
ce8e922c 605 XFS_STATS_INC(xb_get);
1da177e4
LT
606
607 /*
608 * Always fill in the block number now, the mapped cases can do
609 * their own overlay of this later.
610 */
ce8e922c
NS
611 bp->b_bn = ioff;
612 bp->b_count_desired = bp->b_buffer_length;
1da177e4 613
ce8e922c
NS
614 XB_TRACE(bp, "get", (unsigned long)flags);
615 return bp;
1da177e4
LT
616
617 no_buffer:
ce8e922c
NS
618 if (flags & (XBF_LOCK | XBF_TRYLOCK))
619 xfs_buf_unlock(bp);
620 xfs_buf_rele(bp);
1da177e4
LT
621 return NULL;
622}
623
624xfs_buf_t *
625xfs_buf_read_flags(
626 xfs_buftarg_t *target,
204ab25f 627 xfs_off_t ioff,
1da177e4 628 size_t isize,
ce8e922c 629 xfs_buf_flags_t flags)
1da177e4 630{
ce8e922c
NS
631 xfs_buf_t *bp;
632
633 flags |= XBF_READ;
634
635 bp = xfs_buf_get_flags(target, ioff, isize, flags);
636 if (bp) {
637 if (!XFS_BUF_ISDONE(bp)) {
638 XB_TRACE(bp, "read", (unsigned long)flags);
639 XFS_STATS_INC(xb_get_read);
640 xfs_buf_iostart(bp, flags);
641 } else if (flags & XBF_ASYNC) {
642 XB_TRACE(bp, "read_async", (unsigned long)flags);
1da177e4
LT
643 /*
644 * Read ahead call which is already satisfied,
645 * drop the buffer
646 */
647 goto no_buffer;
648 } else {
ce8e922c 649 XB_TRACE(bp, "read_done", (unsigned long)flags);
1da177e4 650 /* We do not want read in the flags */
ce8e922c 651 bp->b_flags &= ~XBF_READ;
1da177e4
LT
652 }
653 }
654
ce8e922c 655 return bp;
1da177e4
LT
656
657 no_buffer:
ce8e922c
NS
658 if (flags & (XBF_LOCK | XBF_TRYLOCK))
659 xfs_buf_unlock(bp);
660 xfs_buf_rele(bp);
1da177e4
LT
661 return NULL;
662}
663
1da177e4 664/*
ce8e922c
NS
665 * If we are not low on memory then do the readahead in a deadlock
666 * safe manner.
1da177e4
LT
667 */
668void
ce8e922c 669xfs_buf_readahead(
1da177e4 670 xfs_buftarg_t *target,
204ab25f 671 xfs_off_t ioff,
1da177e4 672 size_t isize,
ce8e922c 673 xfs_buf_flags_t flags)
1da177e4
LT
674{
675 struct backing_dev_info *bdi;
676
ce8e922c 677 bdi = target->bt_mapping->backing_dev_info;
1da177e4
LT
678 if (bdi_read_congested(bdi))
679 return;
680
ce8e922c 681 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
1da177e4
LT
682 xfs_buf_read_flags(target, ioff, isize, flags);
683}
684
685xfs_buf_t *
ce8e922c 686xfs_buf_get_empty(
1da177e4
LT
687 size_t len,
688 xfs_buftarg_t *target)
689{
ce8e922c 690 xfs_buf_t *bp;
1da177e4 691
ce8e922c
NS
692 bp = xfs_buf_allocate(0);
693 if (bp)
694 _xfs_buf_initialize(bp, target, 0, len, 0);
695 return bp;
1da177e4
LT
696}
697
698static inline struct page *
699mem_to_page(
700 void *addr)
701{
702 if (((unsigned long)addr < VMALLOC_START) ||
703 ((unsigned long)addr >= VMALLOC_END)) {
704 return virt_to_page(addr);
705 } else {
706 return vmalloc_to_page(addr);
707 }
708}
709
710int
ce8e922c
NS
711xfs_buf_associate_memory(
712 xfs_buf_t *bp,
1da177e4
LT
713 void *mem,
714 size_t len)
715{
716 int rval;
717 int i = 0;
718 size_t ptr;
719 size_t end, end_cur;
720 off_t offset;
721 int page_count;
722
723 page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
724 offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
725 if (offset && (len > PAGE_CACHE_SIZE))
726 page_count++;
727
728 /* Free any previous set of page pointers */
ce8e922c
NS
729 if (bp->b_pages)
730 _xfs_buf_free_pages(bp);
1da177e4 731
ce8e922c
NS
732 bp->b_pages = NULL;
733 bp->b_addr = mem;
1da177e4 734
ce8e922c 735 rval = _xfs_buf_get_pages(bp, page_count, 0);
1da177e4
LT
736 if (rval)
737 return rval;
738
ce8e922c 739 bp->b_offset = offset;
1da177e4
LT
740 ptr = (size_t) mem & PAGE_CACHE_MASK;
741 end = PAGE_CACHE_ALIGN((size_t) mem + len);
742 end_cur = end;
743 /* set up first page */
ce8e922c 744 bp->b_pages[0] = mem_to_page(mem);
1da177e4
LT
745
746 ptr += PAGE_CACHE_SIZE;
ce8e922c 747 bp->b_page_count = ++i;
1da177e4 748 while (ptr < end) {
ce8e922c
NS
749 bp->b_pages[i] = mem_to_page((void *)ptr);
750 bp->b_page_count = ++i;
1da177e4
LT
751 ptr += PAGE_CACHE_SIZE;
752 }
ce8e922c 753 bp->b_locked = 0;
1da177e4 754
ce8e922c
NS
755 bp->b_count_desired = bp->b_buffer_length = len;
756 bp->b_flags |= XBF_MAPPED;
1da177e4
LT
757
758 return 0;
759}
760
761xfs_buf_t *
ce8e922c 762xfs_buf_get_noaddr(
1da177e4
LT
763 size_t len,
764 xfs_buftarg_t *target)
765{
766 size_t malloc_len = len;
767 xfs_buf_t *bp;
768 void *data;
769 int error;
770
ce8e922c 771 bp = xfs_buf_allocate(0);
1da177e4
LT
772 if (unlikely(bp == NULL))
773 goto fail;
ce8e922c 774 _xfs_buf_initialize(bp, target, 0, len, 0);
1da177e4
LT
775
776 try_again:
efb8ad7e 777 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE);
1da177e4
LT
778 if (unlikely(data == NULL))
779 goto fail_free_buf;
780
781 /* check whether alignment matches.. */
782 if ((__psunsigned_t)data !=
ce8e922c 783 ((__psunsigned_t)data & ~target->bt_smask)) {
1da177e4
LT
784 /* .. else double the size and try again */
785 kmem_free(data, malloc_len);
786 malloc_len <<= 1;
787 goto try_again;
788 }
789
ce8e922c 790 error = xfs_buf_associate_memory(bp, data, len);
1da177e4
LT
791 if (error)
792 goto fail_free_mem;
ce8e922c 793 bp->b_flags |= _XBF_KMEM_ALLOC;
1da177e4 794
ce8e922c 795 xfs_buf_unlock(bp);
1da177e4 796
ce8e922c 797 XB_TRACE(bp, "no_daddr", data);
1da177e4
LT
798 return bp;
799 fail_free_mem:
800 kmem_free(data, malloc_len);
801 fail_free_buf:
ce8e922c 802 xfs_buf_free(bp);
1da177e4
LT
803 fail:
804 return NULL;
805}
806
807/*
1da177e4
LT
808 * Increment reference count on buffer, to hold the buffer concurrently
809 * with another thread which may release (free) the buffer asynchronously.
1da177e4
LT
810 * Must hold the buffer already to call this function.
811 */
812void
ce8e922c
NS
813xfs_buf_hold(
814 xfs_buf_t *bp)
1da177e4 815{
ce8e922c
NS
816 atomic_inc(&bp->b_hold);
817 XB_TRACE(bp, "hold", 0);
1da177e4
LT
818}
819
820/*
ce8e922c
NS
821 * Releases a hold on the specified buffer. If the
822 * the hold count is 1, calls xfs_buf_free.
1da177e4
LT
823 */
824void
ce8e922c
NS
825xfs_buf_rele(
826 xfs_buf_t *bp)
1da177e4 827{
ce8e922c 828 xfs_bufhash_t *hash = bp->b_hash;
1da177e4 829
ce8e922c 830 XB_TRACE(bp, "rele", bp->b_relse);
1da177e4 831
fad3aa1e
NS
832 if (unlikely(!hash)) {
833 ASSERT(!bp->b_relse);
834 if (atomic_dec_and_test(&bp->b_hold))
835 xfs_buf_free(bp);
836 return;
837 }
838
ce8e922c
NS
839 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
840 if (bp->b_relse) {
841 atomic_inc(&bp->b_hold);
1da177e4 842 spin_unlock(&hash->bh_lock);
ce8e922c
NS
843 (*(bp->b_relse)) (bp);
844 } else if (bp->b_flags & XBF_FS_MANAGED) {
1da177e4 845 spin_unlock(&hash->bh_lock);
1da177e4 846 } else {
ce8e922c
NS
847 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
848 list_del_init(&bp->b_hash_list);
1da177e4 849 spin_unlock(&hash->bh_lock);
ce8e922c 850 xfs_buf_free(bp);
1da177e4 851 }
2f926587
DC
852 } else {
853 /*
854 * Catch reference count leaks
855 */
ce8e922c 856 ASSERT(atomic_read(&bp->b_hold) >= 0);
1da177e4
LT
857 }
858}
859
860
861/*
862 * Mutual exclusion on buffers. Locking model:
863 *
864 * Buffers associated with inodes for which buffer locking
865 * is not enabled are not protected by semaphores, and are
866 * assumed to be exclusively owned by the caller. There is a
867 * spinlock in the buffer, used by the caller when concurrent
868 * access is possible.
869 */
870
871/*
ce8e922c
NS
872 * Locks a buffer object, if it is not already locked.
873 * Note that this in no way locks the underlying pages, so it is only
874 * useful for synchronizing concurrent use of buffer objects, not for
875 * synchronizing independent access to the underlying pages.
1da177e4
LT
876 */
877int
ce8e922c
NS
878xfs_buf_cond_lock(
879 xfs_buf_t *bp)
1da177e4
LT
880{
881 int locked;
882
ce8e922c 883 locked = down_trylock(&bp->b_sema) == 0;
1da177e4 884 if (locked) {
ce8e922c 885 XB_SET_OWNER(bp);
1da177e4 886 }
ce8e922c
NS
887 XB_TRACE(bp, "cond_lock", (long)locked);
888 return locked ? 0 : -EBUSY;
1da177e4
LT
889}
890
891#if defined(DEBUG) || defined(XFS_BLI_TRACE)
1da177e4 892int
ce8e922c
NS
893xfs_buf_lock_value(
894 xfs_buf_t *bp)
1da177e4 895{
ce8e922c 896 return atomic_read(&bp->b_sema.count);
1da177e4
LT
897}
898#endif
899
900/*
ce8e922c
NS
901 * Locks a buffer object.
902 * Note that this in no way locks the underlying pages, so it is only
903 * useful for synchronizing concurrent use of buffer objects, not for
904 * synchronizing independent access to the underlying pages.
1da177e4 905 */
ce8e922c
NS
906void
907xfs_buf_lock(
908 xfs_buf_t *bp)
1da177e4 909{
ce8e922c
NS
910 XB_TRACE(bp, "lock", 0);
911 if (atomic_read(&bp->b_io_remaining))
912 blk_run_address_space(bp->b_target->bt_mapping);
913 down(&bp->b_sema);
914 XB_SET_OWNER(bp);
915 XB_TRACE(bp, "locked", 0);
1da177e4
LT
916}
917
918/*
ce8e922c 919 * Releases the lock on the buffer object.
2f926587 920 * If the buffer is marked delwri but is not queued, do so before we
ce8e922c 921 * unlock the buffer as we need to set flags correctly. We also need to
2f926587
DC
922 * take a reference for the delwri queue because the unlocker is going to
923 * drop their's and they don't know we just queued it.
1da177e4
LT
924 */
925void
ce8e922c
NS
926xfs_buf_unlock(
927 xfs_buf_t *bp)
1da177e4 928{
ce8e922c
NS
929 if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
930 atomic_inc(&bp->b_hold);
931 bp->b_flags |= XBF_ASYNC;
932 xfs_buf_delwri_queue(bp, 0);
2f926587
DC
933 }
934
ce8e922c
NS
935 XB_CLEAR_OWNER(bp);
936 up(&bp->b_sema);
937 XB_TRACE(bp, "unlock", 0);
1da177e4
LT
938}
939
940
941/*
942 * Pinning Buffer Storage in Memory
ce8e922c 943 * Ensure that no attempt to force a buffer to disk will succeed.
1da177e4
LT
944 */
945void
ce8e922c
NS
946xfs_buf_pin(
947 xfs_buf_t *bp)
1da177e4 948{
ce8e922c
NS
949 atomic_inc(&bp->b_pin_count);
950 XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
1da177e4
LT
951}
952
1da177e4 953void
ce8e922c
NS
954xfs_buf_unpin(
955 xfs_buf_t *bp)
1da177e4 956{
ce8e922c
NS
957 if (atomic_dec_and_test(&bp->b_pin_count))
958 wake_up_all(&bp->b_waiters);
959 XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
1da177e4
LT
960}
961
962int
ce8e922c
NS
963xfs_buf_ispin(
964 xfs_buf_t *bp)
1da177e4 965{
ce8e922c 966 return atomic_read(&bp->b_pin_count);
1da177e4
LT
967}
968
ce8e922c
NS
969STATIC void
970xfs_buf_wait_unpin(
971 xfs_buf_t *bp)
1da177e4
LT
972{
973 DECLARE_WAITQUEUE (wait, current);
974
ce8e922c 975 if (atomic_read(&bp->b_pin_count) == 0)
1da177e4
LT
976 return;
977
ce8e922c 978 add_wait_queue(&bp->b_waiters, &wait);
1da177e4
LT
979 for (;;) {
980 set_current_state(TASK_UNINTERRUPTIBLE);
ce8e922c 981 if (atomic_read(&bp->b_pin_count) == 0)
1da177e4 982 break;
ce8e922c
NS
983 if (atomic_read(&bp->b_io_remaining))
984 blk_run_address_space(bp->b_target->bt_mapping);
1da177e4
LT
985 schedule();
986 }
ce8e922c 987 remove_wait_queue(&bp->b_waiters, &wait);
1da177e4
LT
988 set_current_state(TASK_RUNNING);
989}
990
991/*
992 * Buffer Utility Routines
993 */
994
1da177e4 995STATIC void
ce8e922c 996xfs_buf_iodone_work(
1da177e4
LT
997 void *v)
998{
999 xfs_buf_t *bp = (xfs_buf_t *)v;
1000
ce8e922c
NS
1001 if (bp->b_iodone)
1002 (*(bp->b_iodone))(bp);
1003 else if (bp->b_flags & XBF_ASYNC)
1da177e4
LT
1004 xfs_buf_relse(bp);
1005}
1006
1007void
ce8e922c
NS
1008xfs_buf_ioend(
1009 xfs_buf_t *bp,
1da177e4
LT
1010 int schedule)
1011{
ce8e922c
NS
1012 bp->b_flags &= ~(XBF_READ | XBF_WRITE);
1013 if (bp->b_error == 0)
1014 bp->b_flags |= XBF_DONE;
1da177e4 1015
ce8e922c 1016 XB_TRACE(bp, "iodone", bp->b_iodone);
1da177e4 1017
ce8e922c 1018 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1da177e4 1019 if (schedule) {
ce8e922c
NS
1020 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
1021 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1da177e4 1022 } else {
ce8e922c 1023 xfs_buf_iodone_work(bp);
1da177e4
LT
1024 }
1025 } else {
ce8e922c 1026 up(&bp->b_iodonesema);
1da177e4
LT
1027 }
1028}
1029
1da177e4 1030void
ce8e922c
NS
1031xfs_buf_ioerror(
1032 xfs_buf_t *bp,
1033 int error)
1da177e4
LT
1034{
1035 ASSERT(error >= 0 && error <= 0xffff);
ce8e922c
NS
1036 bp->b_error = (unsigned short)error;
1037 XB_TRACE(bp, "ioerror", (unsigned long)error);
1da177e4
LT
1038}
1039
1040/*
ce8e922c
NS
1041 * Initiate I/O on a buffer, based on the flags supplied.
1042 * The b_iodone routine in the buffer supplied will only be called
1da177e4 1043 * when all of the subsidiary I/O requests, if any, have been completed.
1da177e4
LT
1044 */
1045int
ce8e922c
NS
1046xfs_buf_iostart(
1047 xfs_buf_t *bp,
1048 xfs_buf_flags_t flags)
1da177e4
LT
1049{
1050 int status = 0;
1051
ce8e922c 1052 XB_TRACE(bp, "iostart", (unsigned long)flags);
1da177e4 1053
ce8e922c
NS
1054 if (flags & XBF_DELWRI) {
1055 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1056 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1057 xfs_buf_delwri_queue(bp, 1);
1da177e4
LT
1058 return status;
1059 }
1060
ce8e922c
NS
1061 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1062 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1063 bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1064 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1da177e4 1065
ce8e922c 1066 BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
1da177e4
LT
1067
1068 /* For writes allow an alternate strategy routine to precede
1069 * the actual I/O request (which may not be issued at all in
1070 * a shutdown situation, for example).
1071 */
ce8e922c
NS
1072 status = (flags & XBF_WRITE) ?
1073 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
1da177e4
LT
1074
1075 /* Wait for I/O if we are not an async request.
1076 * Note: async I/O request completion will release the buffer,
1077 * and that can already be done by this point. So using the
1078 * buffer pointer from here on, after async I/O, is invalid.
1079 */
ce8e922c
NS
1080 if (!status && !(flags & XBF_ASYNC))
1081 status = xfs_buf_iowait(bp);
1da177e4
LT
1082
1083 return status;
1084}
1085
1da177e4 1086STATIC __inline__ int
ce8e922c
NS
1087_xfs_buf_iolocked(
1088 xfs_buf_t *bp)
1da177e4 1089{
ce8e922c
NS
1090 ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1091 if (bp->b_flags & XBF_READ)
1092 return bp->b_locked;
1da177e4
LT
1093 return 0;
1094}
1095
1096STATIC __inline__ void
ce8e922c
NS
1097_xfs_buf_ioend(
1098 xfs_buf_t *bp,
1da177e4
LT
1099 int schedule)
1100{
ce8e922c
NS
1101 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1102 bp->b_locked = 0;
1103 xfs_buf_ioend(bp, schedule);
1da177e4
LT
1104 }
1105}
1106
1107STATIC int
ce8e922c 1108xfs_buf_bio_end_io(
1da177e4
LT
1109 struct bio *bio,
1110 unsigned int bytes_done,
1111 int error)
1112{
ce8e922c
NS
1113 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1114 unsigned int blocksize = bp->b_target->bt_bsize;
eedb5530 1115 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1da177e4
LT
1116
1117 if (bio->bi_size)
1118 return 1;
1119
1120 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
ce8e922c 1121 bp->b_error = EIO;
1da177e4 1122
eedb5530 1123 do {
1da177e4
LT
1124 struct page *page = bvec->bv_page;
1125
948ecdb4 1126 ASSERT(!PagePrivate(page));
ce8e922c
NS
1127 if (unlikely(bp->b_error)) {
1128 if (bp->b_flags & XBF_READ)
eedb5530 1129 ClearPageUptodate(page);
ce8e922c 1130 } else if (blocksize >= PAGE_CACHE_SIZE) {
1da177e4
LT
1131 SetPageUptodate(page);
1132 } else if (!PagePrivate(page) &&
ce8e922c 1133 (bp->b_flags & _XBF_PAGE_CACHE)) {
1da177e4
LT
1134 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1135 }
1136
eedb5530
NS
1137 if (--bvec >= bio->bi_io_vec)
1138 prefetchw(&bvec->bv_page->flags);
1139
ce8e922c 1140 if (_xfs_buf_iolocked(bp)) {
1da177e4
LT
1141 unlock_page(page);
1142 }
eedb5530 1143 } while (bvec >= bio->bi_io_vec);
1da177e4 1144
ce8e922c 1145 _xfs_buf_ioend(bp, 1);
1da177e4
LT
1146 bio_put(bio);
1147 return 0;
1148}
1149
1150STATIC void
ce8e922c
NS
1151_xfs_buf_ioapply(
1152 xfs_buf_t *bp)
1da177e4
LT
1153{
1154 int i, rw, map_i, total_nr_pages, nr_pages;
1155 struct bio *bio;
ce8e922c
NS
1156 int offset = bp->b_offset;
1157 int size = bp->b_count_desired;
1158 sector_t sector = bp->b_bn;
1159 unsigned int blocksize = bp->b_target->bt_bsize;
1160 int locking = _xfs_buf_iolocked(bp);
1da177e4 1161
ce8e922c 1162 total_nr_pages = bp->b_page_count;
1da177e4
LT
1163 map_i = 0;
1164
ce8e922c
NS
1165 if (bp->b_flags & XBF_ORDERED) {
1166 ASSERT(!(bp->b_flags & XBF_READ));
f538d4da 1167 rw = WRITE_BARRIER;
51bdd706
NS
1168 } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1169 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1170 bp->b_flags &= ~_XBF_RUN_QUEUES;
1171 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1172 } else {
1173 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1174 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
f538d4da
CH
1175 }
1176
ce8e922c 1177 /* Special code path for reading a sub page size buffer in --
1da177e4
LT
1178 * we populate up the whole page, and hence the other metadata
1179 * in the same page. This optimization is only valid when the
ce8e922c 1180 * filesystem block size is not smaller than the page size.
1da177e4 1181 */
ce8e922c
NS
1182 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1183 (bp->b_flags & XBF_READ) && locking &&
1184 (blocksize >= PAGE_CACHE_SIZE)) {
1da177e4
LT
1185 bio = bio_alloc(GFP_NOIO, 1);
1186
ce8e922c 1187 bio->bi_bdev = bp->b_target->bt_bdev;
1da177e4 1188 bio->bi_sector = sector - (offset >> BBSHIFT);
ce8e922c
NS
1189 bio->bi_end_io = xfs_buf_bio_end_io;
1190 bio->bi_private = bp;
1da177e4 1191
ce8e922c 1192 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1da177e4
LT
1193 size = 0;
1194
ce8e922c 1195 atomic_inc(&bp->b_io_remaining);
1da177e4
LT
1196
1197 goto submit_io;
1198 }
1199
1200 /* Lock down the pages which we need to for the request */
ce8e922c 1201 if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1da177e4
LT
1202 for (i = 0; size; i++) {
1203 int nbytes = PAGE_CACHE_SIZE - offset;
ce8e922c 1204 struct page *page = bp->b_pages[i];
1da177e4
LT
1205
1206 if (nbytes > size)
1207 nbytes = size;
1208
1209 lock_page(page);
1210
1211 size -= nbytes;
1212 offset = 0;
1213 }
ce8e922c
NS
1214 offset = bp->b_offset;
1215 size = bp->b_count_desired;
1da177e4
LT
1216 }
1217
1218next_chunk:
ce8e922c 1219 atomic_inc(&bp->b_io_remaining);
1da177e4
LT
1220 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1221 if (nr_pages > total_nr_pages)
1222 nr_pages = total_nr_pages;
1223
1224 bio = bio_alloc(GFP_NOIO, nr_pages);
ce8e922c 1225 bio->bi_bdev = bp->b_target->bt_bdev;
1da177e4 1226 bio->bi_sector = sector;
ce8e922c
NS
1227 bio->bi_end_io = xfs_buf_bio_end_io;
1228 bio->bi_private = bp;
1da177e4
LT
1229
1230 for (; size && nr_pages; nr_pages--, map_i++) {
ce8e922c 1231 int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1da177e4
LT
1232
1233 if (nbytes > size)
1234 nbytes = size;
1235
ce8e922c
NS
1236 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1237 if (rbytes < nbytes)
1da177e4
LT
1238 break;
1239
1240 offset = 0;
1241 sector += nbytes >> BBSHIFT;
1242 size -= nbytes;
1243 total_nr_pages--;
1244 }
1245
1246submit_io:
1247 if (likely(bio->bi_size)) {
1248 submit_bio(rw, bio);
1249 if (size)
1250 goto next_chunk;
1251 } else {
1252 bio_put(bio);
ce8e922c 1253 xfs_buf_ioerror(bp, EIO);
1da177e4
LT
1254 }
1255}
1256
1da177e4 1257int
ce8e922c
NS
1258xfs_buf_iorequest(
1259 xfs_buf_t *bp)
1da177e4 1260{
ce8e922c 1261 XB_TRACE(bp, "iorequest", 0);
1da177e4 1262
ce8e922c
NS
1263 if (bp->b_flags & XBF_DELWRI) {
1264 xfs_buf_delwri_queue(bp, 1);
1da177e4
LT
1265 return 0;
1266 }
1267
ce8e922c
NS
1268 if (bp->b_flags & XBF_WRITE) {
1269 xfs_buf_wait_unpin(bp);
1da177e4
LT
1270 }
1271
ce8e922c 1272 xfs_buf_hold(bp);
1da177e4
LT
1273
1274 /* Set the count to 1 initially, this will stop an I/O
1275 * completion callout which happens before we have started
ce8e922c 1276 * all the I/O from calling xfs_buf_ioend too early.
1da177e4 1277 */
ce8e922c
NS
1278 atomic_set(&bp->b_io_remaining, 1);
1279 _xfs_buf_ioapply(bp);
1280 _xfs_buf_ioend(bp, 0);
1da177e4 1281
ce8e922c 1282 xfs_buf_rele(bp);
1da177e4
LT
1283 return 0;
1284}
1285
1286/*
ce8e922c
NS
1287 * Waits for I/O to complete on the buffer supplied.
1288 * It returns immediately if no I/O is pending.
1289 * It returns the I/O error code, if any, or 0 if there was no error.
1da177e4
LT
1290 */
1291int
ce8e922c
NS
1292xfs_buf_iowait(
1293 xfs_buf_t *bp)
1da177e4 1294{
ce8e922c
NS
1295 XB_TRACE(bp, "iowait", 0);
1296 if (atomic_read(&bp->b_io_remaining))
1297 blk_run_address_space(bp->b_target->bt_mapping);
1298 down(&bp->b_iodonesema);
1299 XB_TRACE(bp, "iowaited", (long)bp->b_error);
1300 return bp->b_error;
1da177e4
LT
1301}
1302
ce8e922c
NS
1303xfs_caddr_t
1304xfs_buf_offset(
1305 xfs_buf_t *bp,
1da177e4
LT
1306 size_t offset)
1307{
1308 struct page *page;
1309
ce8e922c
NS
1310 if (bp->b_flags & XBF_MAPPED)
1311 return XFS_BUF_PTR(bp) + offset;
1da177e4 1312
ce8e922c
NS
1313 offset += bp->b_offset;
1314 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1315 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1da177e4
LT
1316}
1317
1318/*
1da177e4
LT
1319 * Move data into or out of a buffer.
1320 */
1321void
ce8e922c
NS
1322xfs_buf_iomove(
1323 xfs_buf_t *bp, /* buffer to process */
1da177e4
LT
1324 size_t boff, /* starting buffer offset */
1325 size_t bsize, /* length to copy */
1326 caddr_t data, /* data address */
ce8e922c 1327 xfs_buf_rw_t mode) /* read/write/zero flag */
1da177e4
LT
1328{
1329 size_t bend, cpoff, csize;
1330 struct page *page;
1331
1332 bend = boff + bsize;
1333 while (boff < bend) {
ce8e922c
NS
1334 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1335 cpoff = xfs_buf_poff(boff + bp->b_offset);
1da177e4 1336 csize = min_t(size_t,
ce8e922c 1337 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1da177e4
LT
1338
1339 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1340
1341 switch (mode) {
ce8e922c 1342 case XBRW_ZERO:
1da177e4
LT
1343 memset(page_address(page) + cpoff, 0, csize);
1344 break;
ce8e922c 1345 case XBRW_READ:
1da177e4
LT
1346 memcpy(data, page_address(page) + cpoff, csize);
1347 break;
ce8e922c 1348 case XBRW_WRITE:
1da177e4
LT
1349 memcpy(page_address(page) + cpoff, data, csize);
1350 }
1351
1352 boff += csize;
1353 data += csize;
1354 }
1355}
1356
1357/*
ce8e922c 1358 * Handling of buffer targets (buftargs).
1da177e4
LT
1359 */
1360
1361/*
ce8e922c
NS
1362 * Wait for any bufs with callbacks that have been submitted but
1363 * have not yet returned... walk the hash list for the target.
1da177e4
LT
1364 */
1365void
1366xfs_wait_buftarg(
1367 xfs_buftarg_t *btp)
1368{
1369 xfs_buf_t *bp, *n;
1370 xfs_bufhash_t *hash;
1371 uint i;
1372
1373 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1374 hash = &btp->bt_hash[i];
1375again:
1376 spin_lock(&hash->bh_lock);
ce8e922c
NS
1377 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1378 ASSERT(btp == bp->b_target);
1379 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1da177e4 1380 spin_unlock(&hash->bh_lock);
2f926587
DC
1381 /*
1382 * Catch superblock reference count leaks
1383 * immediately
1384 */
ce8e922c 1385 BUG_ON(bp->b_bn == 0);
1da177e4
LT
1386 delay(100);
1387 goto again;
1388 }
1389 }
1390 spin_unlock(&hash->bh_lock);
1391 }
1392}
1393
1394/*
ce8e922c
NS
1395 * Allocate buffer hash table for a given target.
1396 * For devices containing metadata (i.e. not the log/realtime devices)
1397 * we need to allocate a much larger hash table.
1da177e4
LT
1398 */
1399STATIC void
1400xfs_alloc_bufhash(
1401 xfs_buftarg_t *btp,
1402 int external)
1403{
1404 unsigned int i;
1405
1406 btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
1407 btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1408 btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
93c189c1 1409 sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
1da177e4
LT
1410 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1411 spin_lock_init(&btp->bt_hash[i].bh_lock);
1412 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1413 }
1414}
1415
1416STATIC void
1417xfs_free_bufhash(
1418 xfs_buftarg_t *btp)
1419{
ce8e922c 1420 kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1da177e4
LT
1421 btp->bt_hash = NULL;
1422}
1423
a6867a68 1424/*
ce8e922c 1425 * buftarg list for delwrite queue processing
a6867a68
DC
1426 */
1427STATIC LIST_HEAD(xfs_buftarg_list);
1428STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
1429
1430STATIC void
1431xfs_register_buftarg(
1432 xfs_buftarg_t *btp)
1433{
1434 spin_lock(&xfs_buftarg_lock);
1435 list_add(&btp->bt_list, &xfs_buftarg_list);
1436 spin_unlock(&xfs_buftarg_lock);
1437}
1438
1439STATIC void
1440xfs_unregister_buftarg(
1441 xfs_buftarg_t *btp)
1442{
1443 spin_lock(&xfs_buftarg_lock);
1444 list_del(&btp->bt_list);
1445 spin_unlock(&xfs_buftarg_lock);
1446}
1447
1da177e4
LT
1448void
1449xfs_free_buftarg(
1450 xfs_buftarg_t *btp,
1451 int external)
1452{
1453 xfs_flush_buftarg(btp, 1);
1454 if (external)
ce8e922c 1455 xfs_blkdev_put(btp->bt_bdev);
1da177e4 1456 xfs_free_bufhash(btp);
ce8e922c 1457 iput(btp->bt_mapping->host);
a6867a68 1458
ce8e922c
NS
1459 /* Unregister the buftarg first so that we don't get a
1460 * wakeup finding a non-existent task
1461 */
a6867a68
DC
1462 xfs_unregister_buftarg(btp);
1463 kthread_stop(btp->bt_task);
1464
1da177e4
LT
1465 kmem_free(btp, sizeof(*btp));
1466}
1467
1da177e4
LT
1468STATIC int
1469xfs_setsize_buftarg_flags(
1470 xfs_buftarg_t *btp,
1471 unsigned int blocksize,
1472 unsigned int sectorsize,
1473 int verbose)
1474{
ce8e922c
NS
1475 btp->bt_bsize = blocksize;
1476 btp->bt_sshift = ffs(sectorsize) - 1;
1477 btp->bt_smask = sectorsize - 1;
1da177e4 1478
ce8e922c 1479 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1da177e4
LT
1480 printk(KERN_WARNING
1481 "XFS: Cannot set_blocksize to %u on device %s\n",
1482 sectorsize, XFS_BUFTARG_NAME(btp));
1483 return EINVAL;
1484 }
1485
1486 if (verbose &&
1487 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1488 printk(KERN_WARNING
1489 "XFS: %u byte sectors in use on device %s. "
1490 "This is suboptimal; %u or greater is ideal.\n",
1491 sectorsize, XFS_BUFTARG_NAME(btp),
1492 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1493 }
1494
1495 return 0;
1496}
1497
1498/*
ce8e922c
NS
1499 * When allocating the initial buffer target we have not yet
1500 * read in the superblock, so don't know what sized sectors
1501 * are being used is at this early stage. Play safe.
1502 */
1da177e4
LT
1503STATIC int
1504xfs_setsize_buftarg_early(
1505 xfs_buftarg_t *btp,
1506 struct block_device *bdev)
1507{
1508 return xfs_setsize_buftarg_flags(btp,
1509 PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1510}
1511
1512int
1513xfs_setsize_buftarg(
1514 xfs_buftarg_t *btp,
1515 unsigned int blocksize,
1516 unsigned int sectorsize)
1517{
1518 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1519}
1520
1521STATIC int
1522xfs_mapping_buftarg(
1523 xfs_buftarg_t *btp,
1524 struct block_device *bdev)
1525{
1526 struct backing_dev_info *bdi;
1527 struct inode *inode;
1528 struct address_space *mapping;
f5e54d6e 1529 static const struct address_space_operations mapping_aops = {
1da177e4 1530 .sync_page = block_sync_page,
e965f963 1531 .migratepage = fail_migrate_page,
1da177e4
LT
1532 };
1533
1534 inode = new_inode(bdev->bd_inode->i_sb);
1535 if (!inode) {
1536 printk(KERN_WARNING
1537 "XFS: Cannot allocate mapping inode for device %s\n",
1538 XFS_BUFTARG_NAME(btp));
1539 return ENOMEM;
1540 }
1541 inode->i_mode = S_IFBLK;
1542 inode->i_bdev = bdev;
1543 inode->i_rdev = bdev->bd_dev;
1544 bdi = blk_get_backing_dev_info(bdev);
1545 if (!bdi)
1546 bdi = &default_backing_dev_info;
1547 mapping = &inode->i_data;
1548 mapping->a_ops = &mapping_aops;
1549 mapping->backing_dev_info = bdi;
1550 mapping_set_gfp_mask(mapping, GFP_NOFS);
ce8e922c 1551 btp->bt_mapping = mapping;
1da177e4
LT
1552 return 0;
1553}
1554
a6867a68
DC
1555STATIC int
1556xfs_alloc_delwrite_queue(
1557 xfs_buftarg_t *btp)
1558{
1559 int error = 0;
1560
1561 INIT_LIST_HEAD(&btp->bt_list);
1562 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1563 spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1564 btp->bt_flags = 0;
1565 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1566 if (IS_ERR(btp->bt_task)) {
1567 error = PTR_ERR(btp->bt_task);
1568 goto out_error;
1569 }
1570 xfs_register_buftarg(btp);
1571out_error:
1572 return error;
1573}
1574
1da177e4
LT
1575xfs_buftarg_t *
1576xfs_alloc_buftarg(
1577 struct block_device *bdev,
1578 int external)
1579{
1580 xfs_buftarg_t *btp;
1581
1582 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1583
ce8e922c
NS
1584 btp->bt_dev = bdev->bd_dev;
1585 btp->bt_bdev = bdev;
1da177e4
LT
1586 if (xfs_setsize_buftarg_early(btp, bdev))
1587 goto error;
1588 if (xfs_mapping_buftarg(btp, bdev))
1589 goto error;
a6867a68
DC
1590 if (xfs_alloc_delwrite_queue(btp))
1591 goto error;
1da177e4
LT
1592 xfs_alloc_bufhash(btp, external);
1593 return btp;
1594
1595error:
1596 kmem_free(btp, sizeof(*btp));
1597 return NULL;
1598}
1599
1600
1601/*
ce8e922c 1602 * Delayed write buffer handling
1da177e4 1603 */
1da177e4 1604STATIC void
ce8e922c
NS
1605xfs_buf_delwri_queue(
1606 xfs_buf_t *bp,
1da177e4
LT
1607 int unlock)
1608{
ce8e922c
NS
1609 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1610 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
a6867a68 1611
ce8e922c
NS
1612 XB_TRACE(bp, "delwri_q", (long)unlock);
1613 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1da177e4 1614
a6867a68 1615 spin_lock(dwlk);
1da177e4 1616 /* If already in the queue, dequeue and place at tail */
ce8e922c
NS
1617 if (!list_empty(&bp->b_list)) {
1618 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1619 if (unlock)
1620 atomic_dec(&bp->b_hold);
1621 list_del(&bp->b_list);
1da177e4
LT
1622 }
1623
ce8e922c
NS
1624 bp->b_flags |= _XBF_DELWRI_Q;
1625 list_add_tail(&bp->b_list, dwq);
1626 bp->b_queuetime = jiffies;
a6867a68 1627 spin_unlock(dwlk);
1da177e4
LT
1628
1629 if (unlock)
ce8e922c 1630 xfs_buf_unlock(bp);
1da177e4
LT
1631}
1632
1633void
ce8e922c
NS
1634xfs_buf_delwri_dequeue(
1635 xfs_buf_t *bp)
1da177e4 1636{
ce8e922c 1637 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1da177e4
LT
1638 int dequeued = 0;
1639
a6867a68 1640 spin_lock(dwlk);
ce8e922c
NS
1641 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1642 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1643 list_del_init(&bp->b_list);
1da177e4
LT
1644 dequeued = 1;
1645 }
ce8e922c 1646 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
a6867a68 1647 spin_unlock(dwlk);
1da177e4
LT
1648
1649 if (dequeued)
ce8e922c 1650 xfs_buf_rele(bp);
1da177e4 1651
ce8e922c 1652 XB_TRACE(bp, "delwri_dq", (long)dequeued);
1da177e4
LT
1653}
1654
1655STATIC void
ce8e922c 1656xfs_buf_runall_queues(
1da177e4
LT
1657 struct workqueue_struct *queue)
1658{
1659 flush_workqueue(queue);
1660}
1661
1da177e4 1662STATIC int
23ea4032 1663xfsbufd_wakeup(
15c84a47
NS
1664 int priority,
1665 gfp_t mask)
1da177e4 1666{
da7f93e9 1667 xfs_buftarg_t *btp;
a6867a68
DC
1668
1669 spin_lock(&xfs_buftarg_lock);
da7f93e9 1670 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
ce8e922c 1671 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
a6867a68 1672 continue;
ce8e922c 1673 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
a6867a68
DC
1674 wake_up_process(btp->bt_task);
1675 }
1676 spin_unlock(&xfs_buftarg_lock);
1da177e4
LT
1677 return 0;
1678}
1679
1680STATIC int
23ea4032 1681xfsbufd(
1da177e4
LT
1682 void *data)
1683{
1684 struct list_head tmp;
1685 unsigned long age;
a6867a68 1686 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
ce8e922c 1687 xfs_buf_t *bp, *n;
a6867a68
DC
1688 struct list_head *dwq = &target->bt_delwrite_queue;
1689 spinlock_t *dwlk = &target->bt_delwrite_lock;
f07c2250 1690 int count;
1da177e4 1691
1da177e4
LT
1692 current->flags |= PF_MEMALLOC;
1693
1da177e4
LT
1694 INIT_LIST_HEAD(&tmp);
1695 do {
3e1d1d28 1696 if (unlikely(freezing(current))) {
ce8e922c 1697 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
3e1d1d28 1698 refrigerator();
abd0cf7a 1699 } else {
ce8e922c 1700 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
abd0cf7a 1701 }
1da177e4 1702
15c84a47
NS
1703 schedule_timeout_interruptible(
1704 xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1da177e4 1705
f07c2250 1706 count = 0;
041e0e3b 1707 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
a6867a68 1708 spin_lock(dwlk);
ce8e922c
NS
1709 list_for_each_entry_safe(bp, n, dwq, b_list) {
1710 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1711 ASSERT(bp->b_flags & XBF_DELWRI);
1da177e4 1712
ce8e922c
NS
1713 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1714 if (!test_bit(XBT_FORCE_FLUSH,
a6867a68 1715 &target->bt_flags) &&
1da177e4 1716 time_before(jiffies,
ce8e922c
NS
1717 bp->b_queuetime + age)) {
1718 xfs_buf_unlock(bp);
1da177e4
LT
1719 break;
1720 }
1721
f07c2250
NS
1722 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1723 _XBF_RUN_QUEUES);
ce8e922c 1724 bp->b_flags |= XBF_WRITE;
f07c2250
NS
1725 list_move_tail(&bp->b_list, &tmp);
1726 count++;
1da177e4
LT
1727 }
1728 }
a6867a68 1729 spin_unlock(dwlk);
1da177e4
LT
1730
1731 while (!list_empty(&tmp)) {
ce8e922c
NS
1732 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1733 ASSERT(target == bp->b_target);
1da177e4 1734
ce8e922c
NS
1735 list_del_init(&bp->b_list);
1736 xfs_buf_iostrategy(bp);
1da177e4
LT
1737 }
1738
1739 if (as_list_len > 0)
1740 purge_addresses();
f07c2250
NS
1741 if (count)
1742 blk_run_address_space(target->bt_mapping);
1da177e4 1743
ce8e922c 1744 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
4df08c52 1745 } while (!kthread_should_stop());
1da177e4 1746
4df08c52 1747 return 0;
1da177e4
LT
1748}
1749
1750/*
ce8e922c
NS
1751 * Go through all incore buffers, and release buffers if they belong to
1752 * the given device. This is used in filesystem error handling to
1753 * preserve the consistency of its metadata.
1da177e4
LT
1754 */
1755int
1756xfs_flush_buftarg(
1757 xfs_buftarg_t *target,
1758 int wait)
1759{
1760 struct list_head tmp;
ce8e922c 1761 xfs_buf_t *bp, *n;
1da177e4 1762 int pincount = 0;
a6867a68
DC
1763 struct list_head *dwq = &target->bt_delwrite_queue;
1764 spinlock_t *dwlk = &target->bt_delwrite_lock;
1da177e4 1765
ce8e922c
NS
1766 xfs_buf_runall_queues(xfsdatad_workqueue);
1767 xfs_buf_runall_queues(xfslogd_workqueue);
1da177e4
LT
1768
1769 INIT_LIST_HEAD(&tmp);
a6867a68 1770 spin_lock(dwlk);
ce8e922c
NS
1771 list_for_each_entry_safe(bp, n, dwq, b_list) {
1772 ASSERT(bp->b_target == target);
1773 ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
1774 XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
1775 if (xfs_buf_ispin(bp)) {
1da177e4
LT
1776 pincount++;
1777 continue;
1778 }
1779
f07c2250 1780 list_move_tail(&bp->b_list, &tmp);
1da177e4 1781 }
a6867a68 1782 spin_unlock(dwlk);
1da177e4
LT
1783
1784 /*
1785 * Dropped the delayed write list lock, now walk the temporary list
1786 */
ce8e922c
NS
1787 list_for_each_entry_safe(bp, n, &tmp, b_list) {
1788 xfs_buf_lock(bp);
f07c2250 1789 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|_XBF_RUN_QUEUES);
ce8e922c 1790 bp->b_flags |= XBF_WRITE;
1da177e4 1791 if (wait)
ce8e922c 1792 bp->b_flags &= ~XBF_ASYNC;
1da177e4 1793 else
ce8e922c 1794 list_del_init(&bp->b_list);
1da177e4 1795
ce8e922c 1796 xfs_buf_iostrategy(bp);
1da177e4
LT
1797 }
1798
f07c2250
NS
1799 if (wait)
1800 blk_run_address_space(target->bt_mapping);
1801
1da177e4
LT
1802 /*
1803 * Remaining list items must be flushed before returning
1804 */
1805 while (!list_empty(&tmp)) {
ce8e922c 1806 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1da177e4 1807
ce8e922c
NS
1808 list_del_init(&bp->b_list);
1809 xfs_iowait(bp);
1810 xfs_buf_relse(bp);
1da177e4
LT
1811 }
1812
1da177e4
LT
1813 return pincount;
1814}
1815
04d8b284 1816int __init
ce8e922c 1817xfs_buf_init(void)
1da177e4 1818{
ce8e922c
NS
1819#ifdef XFS_BUF_TRACE
1820 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
04d8b284
CH
1821#endif
1822
8758280f
NS
1823 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1824 KM_ZONE_HWALIGN, NULL);
ce8e922c 1825 if (!xfs_buf_zone)
04d8b284
CH
1826 goto out_free_trace_buf;
1827
23ea4032
CH
1828 xfslogd_workqueue = create_workqueue("xfslogd");
1829 if (!xfslogd_workqueue)
04d8b284 1830 goto out_free_buf_zone;
1da177e4 1831
23ea4032
CH
1832 xfsdatad_workqueue = create_workqueue("xfsdatad");
1833 if (!xfsdatad_workqueue)
1834 goto out_destroy_xfslogd_workqueue;
1da177e4 1835
ce8e922c
NS
1836 xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
1837 if (!xfs_buf_shake)
a6867a68 1838 goto out_destroy_xfsdatad_workqueue;
04d8b284 1839
23ea4032 1840 return 0;
1da177e4 1841
23ea4032
CH
1842 out_destroy_xfsdatad_workqueue:
1843 destroy_workqueue(xfsdatad_workqueue);
1844 out_destroy_xfslogd_workqueue:
1845 destroy_workqueue(xfslogd_workqueue);
23ea4032 1846 out_free_buf_zone:
ce8e922c 1847 kmem_zone_destroy(xfs_buf_zone);
04d8b284 1848 out_free_trace_buf:
ce8e922c
NS
1849#ifdef XFS_BUF_TRACE
1850 ktrace_free(xfs_buf_trace_buf);
23ea4032 1851#endif
8758280f 1852 return -ENOMEM;
1da177e4
LT
1853}
1854
1da177e4 1855void
ce8e922c 1856xfs_buf_terminate(void)
1da177e4 1857{
ce8e922c 1858 kmem_shake_deregister(xfs_buf_shake);
04d8b284
CH
1859 destroy_workqueue(xfsdatad_workqueue);
1860 destroy_workqueue(xfslogd_workqueue);
ce8e922c
NS
1861 kmem_zone_destroy(xfs_buf_zone);
1862#ifdef XFS_BUF_TRACE
1863 ktrace_free(xfs_buf_trace_buf);
1da177e4 1864#endif
1da177e4 1865}
This page took 0.298186 seconds and 5 git commands to generate.