[PATCH] Direct Migration V9: upgrade MPOL_MF_MOVE and sys_migrate_pages()
[deliverable/linux.git] / fs / xfs / linux-2.6 / xfs_buf.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include <linux/stddef.h>
19#include <linux/errno.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/init.h>
23#include <linux/vmalloc.h>
24#include <linux/bio.h>
25#include <linux/sysctl.h>
26#include <linux/proc_fs.h>
27#include <linux/workqueue.h>
28#include <linux/percpu.h>
29#include <linux/blkdev.h>
30#include <linux/hash.h>
31#include <linux/kthread.h>
32#include "xfs_linux.h"
33
34STATIC kmem_zone_t *xfs_buf_zone;
35STATIC kmem_shaker_t xfs_buf_shake;
36STATIC int xfsbufd(void *);
37STATIC int xfsbufd_wakeup(int, gfp_t);
38STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
39
40STATIC struct workqueue_struct *xfslogd_workqueue;
41struct workqueue_struct *xfsdatad_workqueue;
42
43#ifdef XFS_BUF_TRACE
44void
45xfs_buf_trace(
46 xfs_buf_t *bp,
47 char *id,
48 void *data,
49 void *ra)
50{
51 ktrace_enter(xfs_buf_trace_buf,
52 bp, id,
53 (void *)(unsigned long)bp->b_flags,
54 (void *)(unsigned long)bp->b_hold.counter,
55 (void *)(unsigned long)bp->b_sema.count.counter,
56 (void *)current,
57 data, ra,
58 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
59 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
60 (void *)(unsigned long)bp->b_buffer_length,
61 NULL, NULL, NULL, NULL, NULL);
62}
63ktrace_t *xfs_buf_trace_buf;
64#define XFS_BUF_TRACE_SIZE 4096
65#define XB_TRACE(bp, id, data) \
66 xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
67#else
68#define XB_TRACE(bp, id, data) do { } while (0)
69#endif
70
71#ifdef XFS_BUF_LOCK_TRACKING
72# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
73# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
74# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
75#else
76# define XB_SET_OWNER(bp) do { } while (0)
77# define XB_CLEAR_OWNER(bp) do { } while (0)
78# define XB_GET_OWNER(bp) do { } while (0)
79#endif
80
81#define xb_to_gfp(flags) \
82 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
83 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
84
85#define xb_to_km(flags) \
86 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
87
88#define xfs_buf_allocate(flags) \
89 kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
90#define xfs_buf_deallocate(bp) \
91 kmem_zone_free(xfs_buf_zone, (bp));
92
93/*
94 * Page Region interfaces.
95 *
96 * For pages in filesystems where the blocksize is smaller than the
97 * pagesize, we use the page->private field (long) to hold a bitmap
98 * of uptodate regions within the page.
99 *
100 * Each such region is "bytes per page / bits per long" bytes long.
101 *
102 * NBPPR == number-of-bytes-per-page-region
103 * BTOPR == bytes-to-page-region (rounded up)
104 * BTOPRT == bytes-to-page-region-truncated (rounded down)
105 */
106#if (BITS_PER_LONG == 32)
107#define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
108#elif (BITS_PER_LONG == 64)
109#define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
110#else
111#error BITS_PER_LONG must be 32 or 64
112#endif
113#define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
114#define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
115#define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
116
117STATIC unsigned long
118page_region_mask(
119 size_t offset,
120 size_t length)
121{
122 unsigned long mask;
123 int first, final;
124
125 first = BTOPR(offset);
126 final = BTOPRT(offset + length - 1);
127 first = min(first, final);
128
129 mask = ~0UL;
130 mask <<= BITS_PER_LONG - (final - first);
131 mask >>= BITS_PER_LONG - (final);
132
133 ASSERT(offset + length <= PAGE_CACHE_SIZE);
134 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
135
136 return mask;
137}
138
139STATIC inline void
140set_page_region(
141 struct page *page,
142 size_t offset,
143 size_t length)
144{
145 set_page_private(page,
146 page_private(page) | page_region_mask(offset, length));
147 if (page_private(page) == ~0UL)
148 SetPageUptodate(page);
149}
150
151STATIC inline int
152test_page_region(
153 struct page *page,
154 size_t offset,
155 size_t length)
156{
157 unsigned long mask = page_region_mask(offset, length);
158
159 return (mask && (page_private(page) & mask) == mask);
160}
161
162/*
163 * Mapping of multi-page buffers into contiguous virtual space
164 */
165
166typedef struct a_list {
167 void *vm_addr;
168 struct a_list *next;
169} a_list_t;
170
171STATIC a_list_t *as_free_head;
172STATIC int as_list_len;
173STATIC DEFINE_SPINLOCK(as_lock);
174
175/*
176 * Try to batch vunmaps because they are costly.
177 */
178STATIC void
179free_address(
180 void *addr)
181{
182 a_list_t *aentry;
183
184 aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC & ~__GFP_HIGH);
185 if (likely(aentry)) {
186 spin_lock(&as_lock);
187 aentry->next = as_free_head;
188 aentry->vm_addr = addr;
189 as_free_head = aentry;
190 as_list_len++;
191 spin_unlock(&as_lock);
192 } else {
193 vunmap(addr);
194 }
195}
196
197STATIC void
198purge_addresses(void)
199{
200 a_list_t *aentry, *old;
201
202 if (as_free_head == NULL)
203 return;
204
205 spin_lock(&as_lock);
206 aentry = as_free_head;
207 as_free_head = NULL;
208 as_list_len = 0;
209 spin_unlock(&as_lock);
210
211 while ((old = aentry) != NULL) {
212 vunmap(aentry->vm_addr);
213 aentry = aentry->next;
214 kfree(old);
215 }
216}
217
218/*
219 * Internal xfs_buf_t object manipulation
220 */
221
222STATIC void
223_xfs_buf_initialize(
224 xfs_buf_t *bp,
225 xfs_buftarg_t *target,
226 xfs_off_t range_base,
227 size_t range_length,
228 xfs_buf_flags_t flags)
229{
230 /*
231 * We don't want certain flags to appear in b_flags.
232 */
233 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
234
235 memset(bp, 0, sizeof(xfs_buf_t));
236 atomic_set(&bp->b_hold, 1);
237 init_MUTEX_LOCKED(&bp->b_iodonesema);
238 INIT_LIST_HEAD(&bp->b_list);
239 INIT_LIST_HEAD(&bp->b_hash_list);
240 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
241 XB_SET_OWNER(bp);
242 bp->b_target = target;
243 bp->b_file_offset = range_base;
244 /*
245 * Set buffer_length and count_desired to the same value initially.
246 * I/O routines should use count_desired, which will be the same in
247 * most cases but may be reset (e.g. XFS recovery).
248 */
249 bp->b_buffer_length = bp->b_count_desired = range_length;
250 bp->b_flags = flags;
251 bp->b_bn = XFS_BUF_DADDR_NULL;
252 atomic_set(&bp->b_pin_count, 0);
253 init_waitqueue_head(&bp->b_waiters);
254
255 XFS_STATS_INC(xb_create);
256 XB_TRACE(bp, "initialize", target);
257}
258
259/*
260 * Allocate a page array capable of holding a specified number
261 * of pages, and point the page buf at it.
262 */
263STATIC int
264_xfs_buf_get_pages(
265 xfs_buf_t *bp,
266 int page_count,
267 xfs_buf_flags_t flags)
268{
269 /* Make sure that we have a page list */
270 if (bp->b_pages == NULL) {
271 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
272 bp->b_page_count = page_count;
273 if (page_count <= XB_PAGES) {
274 bp->b_pages = bp->b_page_array;
275 } else {
276 bp->b_pages = kmem_alloc(sizeof(struct page *) *
277 page_count, xb_to_km(flags));
278 if (bp->b_pages == NULL)
279 return -ENOMEM;
280 }
281 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
282 }
283 return 0;
284}
285
286/*
287 * Frees b_pages if it was allocated.
288 */
289STATIC void
290_xfs_buf_free_pages(
291 xfs_buf_t *bp)
292{
293 if (bp->b_pages != bp->b_page_array) {
294 kmem_free(bp->b_pages,
295 bp->b_page_count * sizeof(struct page *));
296 }
297}
298
299/*
300 * Releases the specified buffer.
301 *
302 * The modification state of any associated pages is left unchanged.
303 * The buffer most not be on any hash - use xfs_buf_rele instead for
304 * hashed and refcounted buffers
305 */
306void
307xfs_buf_free(
308 xfs_buf_t *bp)
309{
310 XB_TRACE(bp, "free", 0);
311
312 ASSERT(list_empty(&bp->b_hash_list));
313
314 if (bp->b_flags & _XBF_PAGE_CACHE) {
315 uint i;
316
317 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
318 free_address(bp->b_addr - bp->b_offset);
319
320 for (i = 0; i < bp->b_page_count; i++)
321 page_cache_release(bp->b_pages[i]);
322 _xfs_buf_free_pages(bp);
323 } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
324 /*
325 * XXX(hch): bp->b_count_desired might be incorrect (see
326 * xfs_buf_associate_memory for details), but fortunately
327 * the Linux version of kmem_free ignores the len argument..
328 */
329 kmem_free(bp->b_addr, bp->b_count_desired);
330 _xfs_buf_free_pages(bp);
331 }
332
333 xfs_buf_deallocate(bp);
334}
335
336/*
337 * Finds all pages for buffer in question and builds it's page list.
338 */
339STATIC int
340_xfs_buf_lookup_pages(
341 xfs_buf_t *bp,
342 uint flags)
343{
344 struct address_space *mapping = bp->b_target->bt_mapping;
345 size_t blocksize = bp->b_target->bt_bsize;
346 size_t size = bp->b_count_desired;
347 size_t nbytes, offset;
348 gfp_t gfp_mask = xb_to_gfp(flags);
349 unsigned short page_count, i;
350 pgoff_t first;
351 xfs_off_t end;
352 int error;
353
354 end = bp->b_file_offset + bp->b_buffer_length;
355 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
356
357 error = _xfs_buf_get_pages(bp, page_count, flags);
358 if (unlikely(error))
359 return error;
360 bp->b_flags |= _XBF_PAGE_CACHE;
361
362 offset = bp->b_offset;
363 first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
364
365 for (i = 0; i < bp->b_page_count; i++) {
366 struct page *page;
367 uint retries = 0;
368
369 retry:
370 page = find_or_create_page(mapping, first + i, gfp_mask);
371 if (unlikely(page == NULL)) {
372 if (flags & XBF_READ_AHEAD) {
373 bp->b_page_count = i;
374 for (i = 0; i < bp->b_page_count; i++)
375 unlock_page(bp->b_pages[i]);
376 return -ENOMEM;
377 }
378
379 /*
380 * This could deadlock.
381 *
382 * But until all the XFS lowlevel code is revamped to
383 * handle buffer allocation failures we can't do much.
384 */
385 if (!(++retries % 100))
386 printk(KERN_ERR
387 "XFS: possible memory allocation "
388 "deadlock in %s (mode:0x%x)\n",
389 __FUNCTION__, gfp_mask);
390
391 XFS_STATS_INC(xb_page_retries);
392 xfsbufd_wakeup(0, gfp_mask);
393 blk_congestion_wait(WRITE, HZ/50);
394 goto retry;
395 }
396
397 XFS_STATS_INC(xb_page_found);
398
399 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
400 size -= nbytes;
401
402 if (!PageUptodate(page)) {
403 page_count--;
404 if (blocksize >= PAGE_CACHE_SIZE) {
405 if (flags & XBF_READ)
406 bp->b_locked = 1;
407 } else if (!PagePrivate(page)) {
408 if (test_page_region(page, offset, nbytes))
409 page_count++;
410 }
411 }
412
413 bp->b_pages[i] = page;
414 offset = 0;
415 }
416
417 if (!bp->b_locked) {
418 for (i = 0; i < bp->b_page_count; i++)
419 unlock_page(bp->b_pages[i]);
420 }
421
422 if (page_count == bp->b_page_count)
423 bp->b_flags |= XBF_DONE;
424
425 XB_TRACE(bp, "lookup_pages", (long)page_count);
426 return error;
427}
428
429/*
430 * Map buffer into kernel address-space if nessecary.
431 */
432STATIC int
433_xfs_buf_map_pages(
434 xfs_buf_t *bp,
435 uint flags)
436{
437 /* A single page buffer is always mappable */
438 if (bp->b_page_count == 1) {
439 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
440 bp->b_flags |= XBF_MAPPED;
441 } else if (flags & XBF_MAPPED) {
442 if (as_list_len > 64)
443 purge_addresses();
444 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
445 VM_MAP, PAGE_KERNEL);
446 if (unlikely(bp->b_addr == NULL))
447 return -ENOMEM;
448 bp->b_addr += bp->b_offset;
449 bp->b_flags |= XBF_MAPPED;
450 }
451
452 return 0;
453}
454
455/*
456 * Finding and Reading Buffers
457 */
458
459/*
460 * Look up, and creates if absent, a lockable buffer for
461 * a given range of an inode. The buffer is returned
462 * locked. If other overlapping buffers exist, they are
463 * released before the new buffer is created and locked,
464 * which may imply that this call will block until those buffers
465 * are unlocked. No I/O is implied by this call.
466 */
467xfs_buf_t *
468_xfs_buf_find(
469 xfs_buftarg_t *btp, /* block device target */
470 xfs_off_t ioff, /* starting offset of range */
471 size_t isize, /* length of range */
472 xfs_buf_flags_t flags,
473 xfs_buf_t *new_bp)
474{
475 xfs_off_t range_base;
476 size_t range_length;
477 xfs_bufhash_t *hash;
478 xfs_buf_t *bp, *n;
479
480 range_base = (ioff << BBSHIFT);
481 range_length = (isize << BBSHIFT);
482
483 /* Check for IOs smaller than the sector size / not sector aligned */
484 ASSERT(!(range_length < (1 << btp->bt_sshift)));
485 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
486
487 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
488
489 spin_lock(&hash->bh_lock);
490
491 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
492 ASSERT(btp == bp->b_target);
493 if (bp->b_file_offset == range_base &&
494 bp->b_buffer_length == range_length) {
495 /*
496 * If we look at something, bring it to the
497 * front of the list for next time.
498 */
499 atomic_inc(&bp->b_hold);
500 list_move(&bp->b_hash_list, &hash->bh_list);
501 goto found;
502 }
503 }
504
505 /* No match found */
506 if (new_bp) {
507 _xfs_buf_initialize(new_bp, btp, range_base,
508 range_length, flags);
509 new_bp->b_hash = hash;
510 list_add(&new_bp->b_hash_list, &hash->bh_list);
511 } else {
512 XFS_STATS_INC(xb_miss_locked);
513 }
514
515 spin_unlock(&hash->bh_lock);
516 return new_bp;
517
518found:
519 spin_unlock(&hash->bh_lock);
520
521 /* Attempt to get the semaphore without sleeping,
522 * if this does not work then we need to drop the
523 * spinlock and do a hard attempt on the semaphore.
524 */
525 if (down_trylock(&bp->b_sema)) {
526 if (!(flags & XBF_TRYLOCK)) {
527 /* wait for buffer ownership */
528 XB_TRACE(bp, "get_lock", 0);
529 xfs_buf_lock(bp);
530 XFS_STATS_INC(xb_get_locked_waited);
531 } else {
532 /* We asked for a trylock and failed, no need
533 * to look at file offset and length here, we
534 * know that this buffer at least overlaps our
535 * buffer and is locked, therefore our buffer
536 * either does not exist, or is this buffer.
537 */
538 xfs_buf_rele(bp);
539 XFS_STATS_INC(xb_busy_locked);
540 return NULL;
541 }
542 } else {
543 /* trylock worked */
544 XB_SET_OWNER(bp);
545 }
546
547 if (bp->b_flags & XBF_STALE) {
548 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
549 bp->b_flags &= XBF_MAPPED;
550 }
551 XB_TRACE(bp, "got_lock", 0);
552 XFS_STATS_INC(xb_get_locked);
553 return bp;
554}
555
556/*
557 * Assembles a buffer covering the specified range.
558 * Storage in memory for all portions of the buffer will be allocated,
559 * although backing storage may not be.
560 */
561xfs_buf_t *
562xfs_buf_get_flags(
563 xfs_buftarg_t *target,/* target for buffer */
564 xfs_off_t ioff, /* starting offset of range */
565 size_t isize, /* length of range */
566 xfs_buf_flags_t flags)
567{
568 xfs_buf_t *bp, *new_bp;
569 int error = 0, i;
570
571 new_bp = xfs_buf_allocate(flags);
572 if (unlikely(!new_bp))
573 return NULL;
574
575 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
576 if (bp == new_bp) {
577 error = _xfs_buf_lookup_pages(bp, flags);
578 if (error)
579 goto no_buffer;
580 } else {
581 xfs_buf_deallocate(new_bp);
582 if (unlikely(bp == NULL))
583 return NULL;
584 }
585
586 for (i = 0; i < bp->b_page_count; i++)
587 mark_page_accessed(bp->b_pages[i]);
588
589 if (!(bp->b_flags & XBF_MAPPED)) {
590 error = _xfs_buf_map_pages(bp, flags);
591 if (unlikely(error)) {
592 printk(KERN_WARNING "%s: failed to map pages\n",
593 __FUNCTION__);
594 goto no_buffer;
595 }
596 }
597
598 XFS_STATS_INC(xb_get);
599
600 /*
601 * Always fill in the block number now, the mapped cases can do
602 * their own overlay of this later.
603 */
604 bp->b_bn = ioff;
605 bp->b_count_desired = bp->b_buffer_length;
606
607 XB_TRACE(bp, "get", (unsigned long)flags);
608 return bp;
609
610 no_buffer:
611 if (flags & (XBF_LOCK | XBF_TRYLOCK))
612 xfs_buf_unlock(bp);
613 xfs_buf_rele(bp);
614 return NULL;
615}
616
617xfs_buf_t *
618xfs_buf_read_flags(
619 xfs_buftarg_t *target,
620 xfs_off_t ioff,
621 size_t isize,
622 xfs_buf_flags_t flags)
623{
624 xfs_buf_t *bp;
625
626 flags |= XBF_READ;
627
628 bp = xfs_buf_get_flags(target, ioff, isize, flags);
629 if (bp) {
630 if (!XFS_BUF_ISDONE(bp)) {
631 XB_TRACE(bp, "read", (unsigned long)flags);
632 XFS_STATS_INC(xb_get_read);
633 xfs_buf_iostart(bp, flags);
634 } else if (flags & XBF_ASYNC) {
635 XB_TRACE(bp, "read_async", (unsigned long)flags);
636 /*
637 * Read ahead call which is already satisfied,
638 * drop the buffer
639 */
640 goto no_buffer;
641 } else {
642 XB_TRACE(bp, "read_done", (unsigned long)flags);
643 /* We do not want read in the flags */
644 bp->b_flags &= ~XBF_READ;
645 }
646 }
647
648 return bp;
649
650 no_buffer:
651 if (flags & (XBF_LOCK | XBF_TRYLOCK))
652 xfs_buf_unlock(bp);
653 xfs_buf_rele(bp);
654 return NULL;
655}
656
657/*
658 * If we are not low on memory then do the readahead in a deadlock
659 * safe manner.
660 */
661void
662xfs_buf_readahead(
663 xfs_buftarg_t *target,
664 xfs_off_t ioff,
665 size_t isize,
666 xfs_buf_flags_t flags)
667{
668 struct backing_dev_info *bdi;
669
670 bdi = target->bt_mapping->backing_dev_info;
671 if (bdi_read_congested(bdi))
672 return;
673
674 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
675 xfs_buf_read_flags(target, ioff, isize, flags);
676}
677
678xfs_buf_t *
679xfs_buf_get_empty(
680 size_t len,
681 xfs_buftarg_t *target)
682{
683 xfs_buf_t *bp;
684
685 bp = xfs_buf_allocate(0);
686 if (bp)
687 _xfs_buf_initialize(bp, target, 0, len, 0);
688 return bp;
689}
690
691static inline struct page *
692mem_to_page(
693 void *addr)
694{
695 if (((unsigned long)addr < VMALLOC_START) ||
696 ((unsigned long)addr >= VMALLOC_END)) {
697 return virt_to_page(addr);
698 } else {
699 return vmalloc_to_page(addr);
700 }
701}
702
703int
704xfs_buf_associate_memory(
705 xfs_buf_t *bp,
706 void *mem,
707 size_t len)
708{
709 int rval;
710 int i = 0;
711 size_t ptr;
712 size_t end, end_cur;
713 off_t offset;
714 int page_count;
715
716 page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
717 offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
718 if (offset && (len > PAGE_CACHE_SIZE))
719 page_count++;
720
721 /* Free any previous set of page pointers */
722 if (bp->b_pages)
723 _xfs_buf_free_pages(bp);
724
725 bp->b_pages = NULL;
726 bp->b_addr = mem;
727
728 rval = _xfs_buf_get_pages(bp, page_count, 0);
729 if (rval)
730 return rval;
731
732 bp->b_offset = offset;
733 ptr = (size_t) mem & PAGE_CACHE_MASK;
734 end = PAGE_CACHE_ALIGN((size_t) mem + len);
735 end_cur = end;
736 /* set up first page */
737 bp->b_pages[0] = mem_to_page(mem);
738
739 ptr += PAGE_CACHE_SIZE;
740 bp->b_page_count = ++i;
741 while (ptr < end) {
742 bp->b_pages[i] = mem_to_page((void *)ptr);
743 bp->b_page_count = ++i;
744 ptr += PAGE_CACHE_SIZE;
745 }
746 bp->b_locked = 0;
747
748 bp->b_count_desired = bp->b_buffer_length = len;
749 bp->b_flags |= XBF_MAPPED;
750
751 return 0;
752}
753
754xfs_buf_t *
755xfs_buf_get_noaddr(
756 size_t len,
757 xfs_buftarg_t *target)
758{
759 size_t malloc_len = len;
760 xfs_buf_t *bp;
761 void *data;
762 int error;
763
764 bp = xfs_buf_allocate(0);
765 if (unlikely(bp == NULL))
766 goto fail;
767 _xfs_buf_initialize(bp, target, 0, len, 0);
768
769 try_again:
770 data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
771 if (unlikely(data == NULL))
772 goto fail_free_buf;
773
774 /* check whether alignment matches.. */
775 if ((__psunsigned_t)data !=
776 ((__psunsigned_t)data & ~target->bt_smask)) {
777 /* .. else double the size and try again */
778 kmem_free(data, malloc_len);
779 malloc_len <<= 1;
780 goto try_again;
781 }
782
783 error = xfs_buf_associate_memory(bp, data, len);
784 if (error)
785 goto fail_free_mem;
786 bp->b_flags |= _XBF_KMEM_ALLOC;
787
788 xfs_buf_unlock(bp);
789
790 XB_TRACE(bp, "no_daddr", data);
791 return bp;
792 fail_free_mem:
793 kmem_free(data, malloc_len);
794 fail_free_buf:
795 xfs_buf_free(bp);
796 fail:
797 return NULL;
798}
799
800/*
801 * Increment reference count on buffer, to hold the buffer concurrently
802 * with another thread which may release (free) the buffer asynchronously.
803 * Must hold the buffer already to call this function.
804 */
805void
806xfs_buf_hold(
807 xfs_buf_t *bp)
808{
809 atomic_inc(&bp->b_hold);
810 XB_TRACE(bp, "hold", 0);
811}
812
813/*
814 * Releases a hold on the specified buffer. If the
815 * the hold count is 1, calls xfs_buf_free.
816 */
817void
818xfs_buf_rele(
819 xfs_buf_t *bp)
820{
821 xfs_bufhash_t *hash = bp->b_hash;
822
823 XB_TRACE(bp, "rele", bp->b_relse);
824
825 if (unlikely(!hash)) {
826 ASSERT(!bp->b_relse);
827 if (atomic_dec_and_test(&bp->b_hold))
828 xfs_buf_free(bp);
829 return;
830 }
831
832 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
833 if (bp->b_relse) {
834 atomic_inc(&bp->b_hold);
835 spin_unlock(&hash->bh_lock);
836 (*(bp->b_relse)) (bp);
837 } else if (bp->b_flags & XBF_FS_MANAGED) {
838 spin_unlock(&hash->bh_lock);
839 } else {
840 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
841 list_del_init(&bp->b_hash_list);
842 spin_unlock(&hash->bh_lock);
843 xfs_buf_free(bp);
844 }
845 } else {
846 /*
847 * Catch reference count leaks
848 */
849 ASSERT(atomic_read(&bp->b_hold) >= 0);
850 }
851}
852
853
854/*
855 * Mutual exclusion on buffers. Locking model:
856 *
857 * Buffers associated with inodes for which buffer locking
858 * is not enabled are not protected by semaphores, and are
859 * assumed to be exclusively owned by the caller. There is a
860 * spinlock in the buffer, used by the caller when concurrent
861 * access is possible.
862 */
863
864/*
865 * Locks a buffer object, if it is not already locked.
866 * Note that this in no way locks the underlying pages, so it is only
867 * useful for synchronizing concurrent use of buffer objects, not for
868 * synchronizing independent access to the underlying pages.
869 */
870int
871xfs_buf_cond_lock(
872 xfs_buf_t *bp)
873{
874 int locked;
875
876 locked = down_trylock(&bp->b_sema) == 0;
877 if (locked) {
878 XB_SET_OWNER(bp);
879 }
880 XB_TRACE(bp, "cond_lock", (long)locked);
881 return locked ? 0 : -EBUSY;
882}
883
884#if defined(DEBUG) || defined(XFS_BLI_TRACE)
885int
886xfs_buf_lock_value(
887 xfs_buf_t *bp)
888{
889 return atomic_read(&bp->b_sema.count);
890}
891#endif
892
893/*
894 * Locks a buffer object.
895 * Note that this in no way locks the underlying pages, so it is only
896 * useful for synchronizing concurrent use of buffer objects, not for
897 * synchronizing independent access to the underlying pages.
898 */
899void
900xfs_buf_lock(
901 xfs_buf_t *bp)
902{
903 XB_TRACE(bp, "lock", 0);
904 if (atomic_read(&bp->b_io_remaining))
905 blk_run_address_space(bp->b_target->bt_mapping);
906 down(&bp->b_sema);
907 XB_SET_OWNER(bp);
908 XB_TRACE(bp, "locked", 0);
909}
910
911/*
912 * Releases the lock on the buffer object.
913 * If the buffer is marked delwri but is not queued, do so before we
914 * unlock the buffer as we need to set flags correctly. We also need to
915 * take a reference for the delwri queue because the unlocker is going to
916 * drop their's and they don't know we just queued it.
917 */
918void
919xfs_buf_unlock(
920 xfs_buf_t *bp)
921{
922 if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
923 atomic_inc(&bp->b_hold);
924 bp->b_flags |= XBF_ASYNC;
925 xfs_buf_delwri_queue(bp, 0);
926 }
927
928 XB_CLEAR_OWNER(bp);
929 up(&bp->b_sema);
930 XB_TRACE(bp, "unlock", 0);
931}
932
933
934/*
935 * Pinning Buffer Storage in Memory
936 * Ensure that no attempt to force a buffer to disk will succeed.
937 */
938void
939xfs_buf_pin(
940 xfs_buf_t *bp)
941{
942 atomic_inc(&bp->b_pin_count);
943 XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
944}
945
946void
947xfs_buf_unpin(
948 xfs_buf_t *bp)
949{
950 if (atomic_dec_and_test(&bp->b_pin_count))
951 wake_up_all(&bp->b_waiters);
952 XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
953}
954
955int
956xfs_buf_ispin(
957 xfs_buf_t *bp)
958{
959 return atomic_read(&bp->b_pin_count);
960}
961
962STATIC void
963xfs_buf_wait_unpin(
964 xfs_buf_t *bp)
965{
966 DECLARE_WAITQUEUE (wait, current);
967
968 if (atomic_read(&bp->b_pin_count) == 0)
969 return;
970
971 add_wait_queue(&bp->b_waiters, &wait);
972 for (;;) {
973 set_current_state(TASK_UNINTERRUPTIBLE);
974 if (atomic_read(&bp->b_pin_count) == 0)
975 break;
976 if (atomic_read(&bp->b_io_remaining))
977 blk_run_address_space(bp->b_target->bt_mapping);
978 schedule();
979 }
980 remove_wait_queue(&bp->b_waiters, &wait);
981 set_current_state(TASK_RUNNING);
982}
983
984/*
985 * Buffer Utility Routines
986 */
987
988STATIC void
989xfs_buf_iodone_work(
990 void *v)
991{
992 xfs_buf_t *bp = (xfs_buf_t *)v;
993
994 if (bp->b_iodone)
995 (*(bp->b_iodone))(bp);
996 else if (bp->b_flags & XBF_ASYNC)
997 xfs_buf_relse(bp);
998}
999
1000void
1001xfs_buf_ioend(
1002 xfs_buf_t *bp,
1003 int schedule)
1004{
1005 bp->b_flags &= ~(XBF_READ | XBF_WRITE);
1006 if (bp->b_error == 0)
1007 bp->b_flags |= XBF_DONE;
1008
1009 XB_TRACE(bp, "iodone", bp->b_iodone);
1010
1011 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1012 if (schedule) {
1013 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
1014 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1015 } else {
1016 xfs_buf_iodone_work(bp);
1017 }
1018 } else {
1019 up(&bp->b_iodonesema);
1020 }
1021}
1022
1023void
1024xfs_buf_ioerror(
1025 xfs_buf_t *bp,
1026 int error)
1027{
1028 ASSERT(error >= 0 && error <= 0xffff);
1029 bp->b_error = (unsigned short)error;
1030 XB_TRACE(bp, "ioerror", (unsigned long)error);
1031}
1032
1033/*
1034 * Initiate I/O on a buffer, based on the flags supplied.
1035 * The b_iodone routine in the buffer supplied will only be called
1036 * when all of the subsidiary I/O requests, if any, have been completed.
1037 */
1038int
1039xfs_buf_iostart(
1040 xfs_buf_t *bp,
1041 xfs_buf_flags_t flags)
1042{
1043 int status = 0;
1044
1045 XB_TRACE(bp, "iostart", (unsigned long)flags);
1046
1047 if (flags & XBF_DELWRI) {
1048 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1049 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1050 xfs_buf_delwri_queue(bp, 1);
1051 return status;
1052 }
1053
1054 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1055 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1056 bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1057 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1058
1059 BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
1060
1061 /* For writes allow an alternate strategy routine to precede
1062 * the actual I/O request (which may not be issued at all in
1063 * a shutdown situation, for example).
1064 */
1065 status = (flags & XBF_WRITE) ?
1066 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
1067
1068 /* Wait for I/O if we are not an async request.
1069 * Note: async I/O request completion will release the buffer,
1070 * and that can already be done by this point. So using the
1071 * buffer pointer from here on, after async I/O, is invalid.
1072 */
1073 if (!status && !(flags & XBF_ASYNC))
1074 status = xfs_buf_iowait(bp);
1075
1076 return status;
1077}
1078
1079STATIC __inline__ int
1080_xfs_buf_iolocked(
1081 xfs_buf_t *bp)
1082{
1083 ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1084 if (bp->b_flags & XBF_READ)
1085 return bp->b_locked;
1086 return 0;
1087}
1088
1089STATIC __inline__ void
1090_xfs_buf_ioend(
1091 xfs_buf_t *bp,
1092 int schedule)
1093{
1094 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1095 bp->b_locked = 0;
1096 xfs_buf_ioend(bp, schedule);
1097 }
1098}
1099
1100STATIC int
1101xfs_buf_bio_end_io(
1102 struct bio *bio,
1103 unsigned int bytes_done,
1104 int error)
1105{
1106 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1107 unsigned int blocksize = bp->b_target->bt_bsize;
1108 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1109
1110 if (bio->bi_size)
1111 return 1;
1112
1113 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1114 bp->b_error = EIO;
1115
1116 do {
1117 struct page *page = bvec->bv_page;
1118
1119 if (unlikely(bp->b_error)) {
1120 if (bp->b_flags & XBF_READ)
1121 ClearPageUptodate(page);
1122 SetPageError(page);
1123 } else if (blocksize >= PAGE_CACHE_SIZE) {
1124 SetPageUptodate(page);
1125 } else if (!PagePrivate(page) &&
1126 (bp->b_flags & _XBF_PAGE_CACHE)) {
1127 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1128 }
1129
1130 if (--bvec >= bio->bi_io_vec)
1131 prefetchw(&bvec->bv_page->flags);
1132
1133 if (_xfs_buf_iolocked(bp)) {
1134 unlock_page(page);
1135 }
1136 } while (bvec >= bio->bi_io_vec);
1137
1138 _xfs_buf_ioend(bp, 1);
1139 bio_put(bio);
1140 return 0;
1141}
1142
1143STATIC void
1144_xfs_buf_ioapply(
1145 xfs_buf_t *bp)
1146{
1147 int i, rw, map_i, total_nr_pages, nr_pages;
1148 struct bio *bio;
1149 int offset = bp->b_offset;
1150 int size = bp->b_count_desired;
1151 sector_t sector = bp->b_bn;
1152 unsigned int blocksize = bp->b_target->bt_bsize;
1153 int locking = _xfs_buf_iolocked(bp);
1154
1155 total_nr_pages = bp->b_page_count;
1156 map_i = 0;
1157
1158 if (bp->b_flags & _XBF_RUN_QUEUES) {
1159 bp->b_flags &= ~_XBF_RUN_QUEUES;
1160 rw = (bp->b_flags & XBF_READ) ? READ_SYNC : WRITE_SYNC;
1161 } else {
1162 rw = (bp->b_flags & XBF_READ) ? READ : WRITE;
1163 }
1164
1165 if (bp->b_flags & XBF_ORDERED) {
1166 ASSERT(!(bp->b_flags & XBF_READ));
1167 rw = WRITE_BARRIER;
1168 }
1169
1170 /* Special code path for reading a sub page size buffer in --
1171 * we populate up the whole page, and hence the other metadata
1172 * in the same page. This optimization is only valid when the
1173 * filesystem block size is not smaller than the page size.
1174 */
1175 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1176 (bp->b_flags & XBF_READ) && locking &&
1177 (blocksize >= PAGE_CACHE_SIZE)) {
1178 bio = bio_alloc(GFP_NOIO, 1);
1179
1180 bio->bi_bdev = bp->b_target->bt_bdev;
1181 bio->bi_sector = sector - (offset >> BBSHIFT);
1182 bio->bi_end_io = xfs_buf_bio_end_io;
1183 bio->bi_private = bp;
1184
1185 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1186 size = 0;
1187
1188 atomic_inc(&bp->b_io_remaining);
1189
1190 goto submit_io;
1191 }
1192
1193 /* Lock down the pages which we need to for the request */
1194 if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1195 for (i = 0; size; i++) {
1196 int nbytes = PAGE_CACHE_SIZE - offset;
1197 struct page *page = bp->b_pages[i];
1198
1199 if (nbytes > size)
1200 nbytes = size;
1201
1202 lock_page(page);
1203
1204 size -= nbytes;
1205 offset = 0;
1206 }
1207 offset = bp->b_offset;
1208 size = bp->b_count_desired;
1209 }
1210
1211next_chunk:
1212 atomic_inc(&bp->b_io_remaining);
1213 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1214 if (nr_pages > total_nr_pages)
1215 nr_pages = total_nr_pages;
1216
1217 bio = bio_alloc(GFP_NOIO, nr_pages);
1218 bio->bi_bdev = bp->b_target->bt_bdev;
1219 bio->bi_sector = sector;
1220 bio->bi_end_io = xfs_buf_bio_end_io;
1221 bio->bi_private = bp;
1222
1223 for (; size && nr_pages; nr_pages--, map_i++) {
1224 int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1225
1226 if (nbytes > size)
1227 nbytes = size;
1228
1229 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1230 if (rbytes < nbytes)
1231 break;
1232
1233 offset = 0;
1234 sector += nbytes >> BBSHIFT;
1235 size -= nbytes;
1236 total_nr_pages--;
1237 }
1238
1239submit_io:
1240 if (likely(bio->bi_size)) {
1241 submit_bio(rw, bio);
1242 if (size)
1243 goto next_chunk;
1244 } else {
1245 bio_put(bio);
1246 xfs_buf_ioerror(bp, EIO);
1247 }
1248}
1249
1250int
1251xfs_buf_iorequest(
1252 xfs_buf_t *bp)
1253{
1254 XB_TRACE(bp, "iorequest", 0);
1255
1256 if (bp->b_flags & XBF_DELWRI) {
1257 xfs_buf_delwri_queue(bp, 1);
1258 return 0;
1259 }
1260
1261 if (bp->b_flags & XBF_WRITE) {
1262 xfs_buf_wait_unpin(bp);
1263 }
1264
1265 xfs_buf_hold(bp);
1266
1267 /* Set the count to 1 initially, this will stop an I/O
1268 * completion callout which happens before we have started
1269 * all the I/O from calling xfs_buf_ioend too early.
1270 */
1271 atomic_set(&bp->b_io_remaining, 1);
1272 _xfs_buf_ioapply(bp);
1273 _xfs_buf_ioend(bp, 0);
1274
1275 xfs_buf_rele(bp);
1276 return 0;
1277}
1278
1279/*
1280 * Waits for I/O to complete on the buffer supplied.
1281 * It returns immediately if no I/O is pending.
1282 * It returns the I/O error code, if any, or 0 if there was no error.
1283 */
1284int
1285xfs_buf_iowait(
1286 xfs_buf_t *bp)
1287{
1288 XB_TRACE(bp, "iowait", 0);
1289 if (atomic_read(&bp->b_io_remaining))
1290 blk_run_address_space(bp->b_target->bt_mapping);
1291 down(&bp->b_iodonesema);
1292 XB_TRACE(bp, "iowaited", (long)bp->b_error);
1293 return bp->b_error;
1294}
1295
1296xfs_caddr_t
1297xfs_buf_offset(
1298 xfs_buf_t *bp,
1299 size_t offset)
1300{
1301 struct page *page;
1302
1303 if (bp->b_flags & XBF_MAPPED)
1304 return XFS_BUF_PTR(bp) + offset;
1305
1306 offset += bp->b_offset;
1307 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1308 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1309}
1310
1311/*
1312 * Move data into or out of a buffer.
1313 */
1314void
1315xfs_buf_iomove(
1316 xfs_buf_t *bp, /* buffer to process */
1317 size_t boff, /* starting buffer offset */
1318 size_t bsize, /* length to copy */
1319 caddr_t data, /* data address */
1320 xfs_buf_rw_t mode) /* read/write/zero flag */
1321{
1322 size_t bend, cpoff, csize;
1323 struct page *page;
1324
1325 bend = boff + bsize;
1326 while (boff < bend) {
1327 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1328 cpoff = xfs_buf_poff(boff + bp->b_offset);
1329 csize = min_t(size_t,
1330 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1331
1332 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1333
1334 switch (mode) {
1335 case XBRW_ZERO:
1336 memset(page_address(page) + cpoff, 0, csize);
1337 break;
1338 case XBRW_READ:
1339 memcpy(data, page_address(page) + cpoff, csize);
1340 break;
1341 case XBRW_WRITE:
1342 memcpy(page_address(page) + cpoff, data, csize);
1343 }
1344
1345 boff += csize;
1346 data += csize;
1347 }
1348}
1349
1350/*
1351 * Handling of buffer targets (buftargs).
1352 */
1353
1354/*
1355 * Wait for any bufs with callbacks that have been submitted but
1356 * have not yet returned... walk the hash list for the target.
1357 */
1358void
1359xfs_wait_buftarg(
1360 xfs_buftarg_t *btp)
1361{
1362 xfs_buf_t *bp, *n;
1363 xfs_bufhash_t *hash;
1364 uint i;
1365
1366 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1367 hash = &btp->bt_hash[i];
1368again:
1369 spin_lock(&hash->bh_lock);
1370 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1371 ASSERT(btp == bp->b_target);
1372 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1373 spin_unlock(&hash->bh_lock);
1374 /*
1375 * Catch superblock reference count leaks
1376 * immediately
1377 */
1378 BUG_ON(bp->b_bn == 0);
1379 delay(100);
1380 goto again;
1381 }
1382 }
1383 spin_unlock(&hash->bh_lock);
1384 }
1385}
1386
1387/*
1388 * Allocate buffer hash table for a given target.
1389 * For devices containing metadata (i.e. not the log/realtime devices)
1390 * we need to allocate a much larger hash table.
1391 */
1392STATIC void
1393xfs_alloc_bufhash(
1394 xfs_buftarg_t *btp,
1395 int external)
1396{
1397 unsigned int i;
1398
1399 btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
1400 btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1401 btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1402 sizeof(xfs_bufhash_t), KM_SLEEP);
1403 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1404 spin_lock_init(&btp->bt_hash[i].bh_lock);
1405 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1406 }
1407}
1408
1409STATIC void
1410xfs_free_bufhash(
1411 xfs_buftarg_t *btp)
1412{
1413 kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1414 btp->bt_hash = NULL;
1415}
1416
1417/*
1418 * buftarg list for delwrite queue processing
1419 */
1420STATIC LIST_HEAD(xfs_buftarg_list);
1421STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
1422
1423STATIC void
1424xfs_register_buftarg(
1425 xfs_buftarg_t *btp)
1426{
1427 spin_lock(&xfs_buftarg_lock);
1428 list_add(&btp->bt_list, &xfs_buftarg_list);
1429 spin_unlock(&xfs_buftarg_lock);
1430}
1431
1432STATIC void
1433xfs_unregister_buftarg(
1434 xfs_buftarg_t *btp)
1435{
1436 spin_lock(&xfs_buftarg_lock);
1437 list_del(&btp->bt_list);
1438 spin_unlock(&xfs_buftarg_lock);
1439}
1440
1441void
1442xfs_free_buftarg(
1443 xfs_buftarg_t *btp,
1444 int external)
1445{
1446 xfs_flush_buftarg(btp, 1);
1447 if (external)
1448 xfs_blkdev_put(btp->bt_bdev);
1449 xfs_free_bufhash(btp);
1450 iput(btp->bt_mapping->host);
1451
1452 /* Unregister the buftarg first so that we don't get a
1453 * wakeup finding a non-existent task
1454 */
1455 xfs_unregister_buftarg(btp);
1456 kthread_stop(btp->bt_task);
1457
1458 kmem_free(btp, sizeof(*btp));
1459}
1460
1461STATIC int
1462xfs_setsize_buftarg_flags(
1463 xfs_buftarg_t *btp,
1464 unsigned int blocksize,
1465 unsigned int sectorsize,
1466 int verbose)
1467{
1468 btp->bt_bsize = blocksize;
1469 btp->bt_sshift = ffs(sectorsize) - 1;
1470 btp->bt_smask = sectorsize - 1;
1471
1472 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1473 printk(KERN_WARNING
1474 "XFS: Cannot set_blocksize to %u on device %s\n",
1475 sectorsize, XFS_BUFTARG_NAME(btp));
1476 return EINVAL;
1477 }
1478
1479 if (verbose &&
1480 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1481 printk(KERN_WARNING
1482 "XFS: %u byte sectors in use on device %s. "
1483 "This is suboptimal; %u or greater is ideal.\n",
1484 sectorsize, XFS_BUFTARG_NAME(btp),
1485 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1486 }
1487
1488 return 0;
1489}
1490
1491/*
1492 * When allocating the initial buffer target we have not yet
1493 * read in the superblock, so don't know what sized sectors
1494 * are being used is at this early stage. Play safe.
1495 */
1496STATIC int
1497xfs_setsize_buftarg_early(
1498 xfs_buftarg_t *btp,
1499 struct block_device *bdev)
1500{
1501 return xfs_setsize_buftarg_flags(btp,
1502 PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1503}
1504
1505int
1506xfs_setsize_buftarg(
1507 xfs_buftarg_t *btp,
1508 unsigned int blocksize,
1509 unsigned int sectorsize)
1510{
1511 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1512}
1513
1514STATIC int
1515xfs_mapping_buftarg(
1516 xfs_buftarg_t *btp,
1517 struct block_device *bdev)
1518{
1519 struct backing_dev_info *bdi;
1520 struct inode *inode;
1521 struct address_space *mapping;
1522 static struct address_space_operations mapping_aops = {
1523 .sync_page = block_sync_page,
1524 };
1525
1526 inode = new_inode(bdev->bd_inode->i_sb);
1527 if (!inode) {
1528 printk(KERN_WARNING
1529 "XFS: Cannot allocate mapping inode for device %s\n",
1530 XFS_BUFTARG_NAME(btp));
1531 return ENOMEM;
1532 }
1533 inode->i_mode = S_IFBLK;
1534 inode->i_bdev = bdev;
1535 inode->i_rdev = bdev->bd_dev;
1536 bdi = blk_get_backing_dev_info(bdev);
1537 if (!bdi)
1538 bdi = &default_backing_dev_info;
1539 mapping = &inode->i_data;
1540 mapping->a_ops = &mapping_aops;
1541 mapping->backing_dev_info = bdi;
1542 mapping_set_gfp_mask(mapping, GFP_NOFS);
1543 btp->bt_mapping = mapping;
1544 return 0;
1545}
1546
1547STATIC int
1548xfs_alloc_delwrite_queue(
1549 xfs_buftarg_t *btp)
1550{
1551 int error = 0;
1552
1553 INIT_LIST_HEAD(&btp->bt_list);
1554 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1555 spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1556 btp->bt_flags = 0;
1557 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1558 if (IS_ERR(btp->bt_task)) {
1559 error = PTR_ERR(btp->bt_task);
1560 goto out_error;
1561 }
1562 xfs_register_buftarg(btp);
1563out_error:
1564 return error;
1565}
1566
1567xfs_buftarg_t *
1568xfs_alloc_buftarg(
1569 struct block_device *bdev,
1570 int external)
1571{
1572 xfs_buftarg_t *btp;
1573
1574 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1575
1576 btp->bt_dev = bdev->bd_dev;
1577 btp->bt_bdev = bdev;
1578 if (xfs_setsize_buftarg_early(btp, bdev))
1579 goto error;
1580 if (xfs_mapping_buftarg(btp, bdev))
1581 goto error;
1582 if (xfs_alloc_delwrite_queue(btp))
1583 goto error;
1584 xfs_alloc_bufhash(btp, external);
1585 return btp;
1586
1587error:
1588 kmem_free(btp, sizeof(*btp));
1589 return NULL;
1590}
1591
1592
1593/*
1594 * Delayed write buffer handling
1595 */
1596STATIC void
1597xfs_buf_delwri_queue(
1598 xfs_buf_t *bp,
1599 int unlock)
1600{
1601 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1602 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1603
1604 XB_TRACE(bp, "delwri_q", (long)unlock);
1605 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1606
1607 spin_lock(dwlk);
1608 /* If already in the queue, dequeue and place at tail */
1609 if (!list_empty(&bp->b_list)) {
1610 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1611 if (unlock)
1612 atomic_dec(&bp->b_hold);
1613 list_del(&bp->b_list);
1614 }
1615
1616 bp->b_flags |= _XBF_DELWRI_Q;
1617 list_add_tail(&bp->b_list, dwq);
1618 bp->b_queuetime = jiffies;
1619 spin_unlock(dwlk);
1620
1621 if (unlock)
1622 xfs_buf_unlock(bp);
1623}
1624
1625void
1626xfs_buf_delwri_dequeue(
1627 xfs_buf_t *bp)
1628{
1629 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1630 int dequeued = 0;
1631
1632 spin_lock(dwlk);
1633 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1634 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1635 list_del_init(&bp->b_list);
1636 dequeued = 1;
1637 }
1638 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1639 spin_unlock(dwlk);
1640
1641 if (dequeued)
1642 xfs_buf_rele(bp);
1643
1644 XB_TRACE(bp, "delwri_dq", (long)dequeued);
1645}
1646
1647STATIC void
1648xfs_buf_runall_queues(
1649 struct workqueue_struct *queue)
1650{
1651 flush_workqueue(queue);
1652}
1653
1654STATIC int
1655xfsbufd_wakeup(
1656 int priority,
1657 gfp_t mask)
1658{
1659 xfs_buftarg_t *btp;
1660
1661 spin_lock(&xfs_buftarg_lock);
1662 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1663 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1664 continue;
1665 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1666 wake_up_process(btp->bt_task);
1667 }
1668 spin_unlock(&xfs_buftarg_lock);
1669 return 0;
1670}
1671
1672STATIC int
1673xfsbufd(
1674 void *data)
1675{
1676 struct list_head tmp;
1677 unsigned long age;
1678 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
1679 xfs_buf_t *bp, *n;
1680 struct list_head *dwq = &target->bt_delwrite_queue;
1681 spinlock_t *dwlk = &target->bt_delwrite_lock;
1682
1683 current->flags |= PF_MEMALLOC;
1684
1685 INIT_LIST_HEAD(&tmp);
1686 do {
1687 if (unlikely(freezing(current))) {
1688 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1689 refrigerator();
1690 } else {
1691 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1692 }
1693
1694 schedule_timeout_interruptible(
1695 xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1696
1697 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1698 spin_lock(dwlk);
1699 list_for_each_entry_safe(bp, n, dwq, b_list) {
1700 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1701 ASSERT(bp->b_flags & XBF_DELWRI);
1702
1703 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1704 if (!test_bit(XBT_FORCE_FLUSH,
1705 &target->bt_flags) &&
1706 time_before(jiffies,
1707 bp->b_queuetime + age)) {
1708 xfs_buf_unlock(bp);
1709 break;
1710 }
1711
1712 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1713 bp->b_flags |= XBF_WRITE;
1714 list_move(&bp->b_list, &tmp);
1715 }
1716 }
1717 spin_unlock(dwlk);
1718
1719 while (!list_empty(&tmp)) {
1720 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1721 ASSERT(target == bp->b_target);
1722
1723 list_del_init(&bp->b_list);
1724 xfs_buf_iostrategy(bp);
1725
1726 blk_run_address_space(target->bt_mapping);
1727 }
1728
1729 if (as_list_len > 0)
1730 purge_addresses();
1731
1732 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1733 } while (!kthread_should_stop());
1734
1735 return 0;
1736}
1737
1738/*
1739 * Go through all incore buffers, and release buffers if they belong to
1740 * the given device. This is used in filesystem error handling to
1741 * preserve the consistency of its metadata.
1742 */
1743int
1744xfs_flush_buftarg(
1745 xfs_buftarg_t *target,
1746 int wait)
1747{
1748 struct list_head tmp;
1749 xfs_buf_t *bp, *n;
1750 int pincount = 0;
1751 struct list_head *dwq = &target->bt_delwrite_queue;
1752 spinlock_t *dwlk = &target->bt_delwrite_lock;
1753
1754 xfs_buf_runall_queues(xfsdatad_workqueue);
1755 xfs_buf_runall_queues(xfslogd_workqueue);
1756
1757 INIT_LIST_HEAD(&tmp);
1758 spin_lock(dwlk);
1759 list_for_each_entry_safe(bp, n, dwq, b_list) {
1760 ASSERT(bp->b_target == target);
1761 ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
1762 XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
1763 if (xfs_buf_ispin(bp)) {
1764 pincount++;
1765 continue;
1766 }
1767
1768 list_move(&bp->b_list, &tmp);
1769 }
1770 spin_unlock(dwlk);
1771
1772 /*
1773 * Dropped the delayed write list lock, now walk the temporary list
1774 */
1775 list_for_each_entry_safe(bp, n, &tmp, b_list) {
1776 xfs_buf_lock(bp);
1777 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1778 bp->b_flags |= XBF_WRITE;
1779 if (wait)
1780 bp->b_flags &= ~XBF_ASYNC;
1781 else
1782 list_del_init(&bp->b_list);
1783
1784 xfs_buf_iostrategy(bp);
1785 }
1786
1787 /*
1788 * Remaining list items must be flushed before returning
1789 */
1790 while (!list_empty(&tmp)) {
1791 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1792
1793 list_del_init(&bp->b_list);
1794 xfs_iowait(bp);
1795 xfs_buf_relse(bp);
1796 }
1797
1798 if (wait)
1799 blk_run_address_space(target->bt_mapping);
1800
1801 return pincount;
1802}
1803
1804int __init
1805xfs_buf_init(void)
1806{
1807 int error = -ENOMEM;
1808
1809#ifdef XFS_BUF_TRACE
1810 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
1811#endif
1812
1813 xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
1814 if (!xfs_buf_zone)
1815 goto out_free_trace_buf;
1816
1817 xfslogd_workqueue = create_workqueue("xfslogd");
1818 if (!xfslogd_workqueue)
1819 goto out_free_buf_zone;
1820
1821 xfsdatad_workqueue = create_workqueue("xfsdatad");
1822 if (!xfsdatad_workqueue)
1823 goto out_destroy_xfslogd_workqueue;
1824
1825 xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
1826 if (!xfs_buf_shake)
1827 goto out_destroy_xfsdatad_workqueue;
1828
1829 return 0;
1830
1831 out_destroy_xfsdatad_workqueue:
1832 destroy_workqueue(xfsdatad_workqueue);
1833 out_destroy_xfslogd_workqueue:
1834 destroy_workqueue(xfslogd_workqueue);
1835 out_free_buf_zone:
1836 kmem_zone_destroy(xfs_buf_zone);
1837 out_free_trace_buf:
1838#ifdef XFS_BUF_TRACE
1839 ktrace_free(xfs_buf_trace_buf);
1840#endif
1841 return error;
1842}
1843
1844void
1845xfs_buf_terminate(void)
1846{
1847 kmem_shake_deregister(xfs_buf_shake);
1848 destroy_workqueue(xfsdatad_workqueue);
1849 destroy_workqueue(xfslogd_workqueue);
1850 kmem_zone_destroy(xfs_buf_zone);
1851#ifdef XFS_BUF_TRACE
1852 ktrace_free(xfs_buf_trace_buf);
1853#endif
1854}
This page took 0.029323 seconds and 5 git commands to generate.