cb329edc925b915ae5e590edeb4840810f811326
[deliverable/linux.git] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36
37 static kmem_zone_t *xfs_buf_zone;
38 STATIC int xfsbufd(void *);
39 STATIC int xfsbufd_wakeup(int, gfp_t);
40 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
41 static struct shrinker xfs_buf_shake = {
42 .shrink = xfsbufd_wakeup,
43 .seeks = DEFAULT_SEEKS,
44 };
45
46 static struct workqueue_struct *xfslogd_workqueue;
47 struct workqueue_struct *xfsdatad_workqueue;
48
49 #ifdef XFS_BUF_TRACE
50 void
51 xfs_buf_trace(
52 xfs_buf_t *bp,
53 char *id,
54 void *data,
55 void *ra)
56 {
57 ktrace_enter(xfs_buf_trace_buf,
58 bp, id,
59 (void *)(unsigned long)bp->b_flags,
60 (void *)(unsigned long)bp->b_hold.counter,
61 (void *)(unsigned long)bp->b_sema.count,
62 (void *)current,
63 data, ra,
64 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
65 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
66 (void *)(unsigned long)bp->b_buffer_length,
67 NULL, NULL, NULL, NULL, NULL);
68 }
69 ktrace_t *xfs_buf_trace_buf;
70 #define XFS_BUF_TRACE_SIZE 4096
71 #define XB_TRACE(bp, id, data) \
72 xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
73 #else
74 #define XB_TRACE(bp, id, data) do { } while (0)
75 #endif
76
77 #ifdef XFS_BUF_LOCK_TRACKING
78 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
79 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
80 # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
81 #else
82 # define XB_SET_OWNER(bp) do { } while (0)
83 # define XB_CLEAR_OWNER(bp) do { } while (0)
84 # define XB_GET_OWNER(bp) do { } while (0)
85 #endif
86
87 #define xb_to_gfp(flags) \
88 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
89 ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
90
91 #define xb_to_km(flags) \
92 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
93
94 #define xfs_buf_allocate(flags) \
95 kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
96 #define xfs_buf_deallocate(bp) \
97 kmem_zone_free(xfs_buf_zone, (bp));
98
99 /*
100 * Page Region interfaces.
101 *
102 * For pages in filesystems where the blocksize is smaller than the
103 * pagesize, we use the page->private field (long) to hold a bitmap
104 * of uptodate regions within the page.
105 *
106 * Each such region is "bytes per page / bits per long" bytes long.
107 *
108 * NBPPR == number-of-bytes-per-page-region
109 * BTOPR == bytes-to-page-region (rounded up)
110 * BTOPRT == bytes-to-page-region-truncated (rounded down)
111 */
112 #if (BITS_PER_LONG == 32)
113 #define PRSHIFT (PAGE_CACHE_SHIFT - 5) /* (32 == 1<<5) */
114 #elif (BITS_PER_LONG == 64)
115 #define PRSHIFT (PAGE_CACHE_SHIFT - 6) /* (64 == 1<<6) */
116 #else
117 #error BITS_PER_LONG must be 32 or 64
118 #endif
119 #define NBPPR (PAGE_CACHE_SIZE/BITS_PER_LONG)
120 #define BTOPR(b) (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
121 #define BTOPRT(b) (((unsigned int)(b) >> PRSHIFT))
122
123 STATIC unsigned long
124 page_region_mask(
125 size_t offset,
126 size_t length)
127 {
128 unsigned long mask;
129 int first, final;
130
131 first = BTOPR(offset);
132 final = BTOPRT(offset + length - 1);
133 first = min(first, final);
134
135 mask = ~0UL;
136 mask <<= BITS_PER_LONG - (final - first);
137 mask >>= BITS_PER_LONG - (final);
138
139 ASSERT(offset + length <= PAGE_CACHE_SIZE);
140 ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
141
142 return mask;
143 }
144
145 STATIC_INLINE void
146 set_page_region(
147 struct page *page,
148 size_t offset,
149 size_t length)
150 {
151 set_page_private(page,
152 page_private(page) | page_region_mask(offset, length));
153 if (page_private(page) == ~0UL)
154 SetPageUptodate(page);
155 }
156
157 STATIC_INLINE int
158 test_page_region(
159 struct page *page,
160 size_t offset,
161 size_t length)
162 {
163 unsigned long mask = page_region_mask(offset, length);
164
165 return (mask && (page_private(page) & mask) == mask);
166 }
167
168 /*
169 * Mapping of multi-page buffers into contiguous virtual space
170 */
171
172 typedef struct a_list {
173 void *vm_addr;
174 struct a_list *next;
175 } a_list_t;
176
177 static a_list_t *as_free_head;
178 static int as_list_len;
179 static DEFINE_SPINLOCK(as_lock);
180
181 /*
182 * Try to batch vunmaps because they are costly.
183 */
184 STATIC void
185 free_address(
186 void *addr)
187 {
188 a_list_t *aentry;
189
190 #ifdef CONFIG_XEN
191 /*
192 * Xen needs to be able to make sure it can get an exclusive
193 * RO mapping of pages it wants to turn into a pagetable. If
194 * a newly allocated page is also still being vmap()ed by xfs,
195 * it will cause pagetable construction to fail. This is a
196 * quick workaround to always eagerly unmap pages so that Xen
197 * is happy.
198 */
199 vunmap(addr);
200 return;
201 #endif
202
203 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
204 if (likely(aentry)) {
205 spin_lock(&as_lock);
206 aentry->next = as_free_head;
207 aentry->vm_addr = addr;
208 as_free_head = aentry;
209 as_list_len++;
210 spin_unlock(&as_lock);
211 } else {
212 vunmap(addr);
213 }
214 }
215
216 STATIC void
217 purge_addresses(void)
218 {
219 a_list_t *aentry, *old;
220
221 if (as_free_head == NULL)
222 return;
223
224 spin_lock(&as_lock);
225 aentry = as_free_head;
226 as_free_head = NULL;
227 as_list_len = 0;
228 spin_unlock(&as_lock);
229
230 while ((old = aentry) != NULL) {
231 vunmap(aentry->vm_addr);
232 aentry = aentry->next;
233 kfree(old);
234 }
235 }
236
237 /*
238 * Internal xfs_buf_t object manipulation
239 */
240
241 STATIC void
242 _xfs_buf_initialize(
243 xfs_buf_t *bp,
244 xfs_buftarg_t *target,
245 xfs_off_t range_base,
246 size_t range_length,
247 xfs_buf_flags_t flags)
248 {
249 /*
250 * We don't want certain flags to appear in b_flags.
251 */
252 flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
253
254 memset(bp, 0, sizeof(xfs_buf_t));
255 atomic_set(&bp->b_hold, 1);
256 init_completion(&bp->b_iowait);
257 INIT_LIST_HEAD(&bp->b_list);
258 INIT_LIST_HEAD(&bp->b_hash_list);
259 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
260 XB_SET_OWNER(bp);
261 bp->b_target = target;
262 bp->b_file_offset = range_base;
263 /*
264 * Set buffer_length and count_desired to the same value initially.
265 * I/O routines should use count_desired, which will be the same in
266 * most cases but may be reset (e.g. XFS recovery).
267 */
268 bp->b_buffer_length = bp->b_count_desired = range_length;
269 bp->b_flags = flags;
270 bp->b_bn = XFS_BUF_DADDR_NULL;
271 atomic_set(&bp->b_pin_count, 0);
272 init_waitqueue_head(&bp->b_waiters);
273
274 XFS_STATS_INC(xb_create);
275 XB_TRACE(bp, "initialize", target);
276 }
277
278 /*
279 * Allocate a page array capable of holding a specified number
280 * of pages, and point the page buf at it.
281 */
282 STATIC int
283 _xfs_buf_get_pages(
284 xfs_buf_t *bp,
285 int page_count,
286 xfs_buf_flags_t flags)
287 {
288 /* Make sure that we have a page list */
289 if (bp->b_pages == NULL) {
290 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
291 bp->b_page_count = page_count;
292 if (page_count <= XB_PAGES) {
293 bp->b_pages = bp->b_page_array;
294 } else {
295 bp->b_pages = kmem_alloc(sizeof(struct page *) *
296 page_count, xb_to_km(flags));
297 if (bp->b_pages == NULL)
298 return -ENOMEM;
299 }
300 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
301 }
302 return 0;
303 }
304
305 /*
306 * Frees b_pages if it was allocated.
307 */
308 STATIC void
309 _xfs_buf_free_pages(
310 xfs_buf_t *bp)
311 {
312 if (bp->b_pages != bp->b_page_array) {
313 kmem_free(bp->b_pages);
314 }
315 }
316
317 /*
318 * Releases the specified buffer.
319 *
320 * The modification state of any associated pages is left unchanged.
321 * The buffer most not be on any hash - use xfs_buf_rele instead for
322 * hashed and refcounted buffers
323 */
324 void
325 xfs_buf_free(
326 xfs_buf_t *bp)
327 {
328 XB_TRACE(bp, "free", 0);
329
330 ASSERT(list_empty(&bp->b_hash_list));
331
332 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
333 uint i;
334
335 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
336 free_address(bp->b_addr - bp->b_offset);
337
338 for (i = 0; i < bp->b_page_count; i++) {
339 struct page *page = bp->b_pages[i];
340
341 if (bp->b_flags & _XBF_PAGE_CACHE)
342 ASSERT(!PagePrivate(page));
343 page_cache_release(page);
344 }
345 _xfs_buf_free_pages(bp);
346 }
347
348 xfs_buf_deallocate(bp);
349 }
350
351 /*
352 * Finds all pages for buffer in question and builds it's page list.
353 */
354 STATIC int
355 _xfs_buf_lookup_pages(
356 xfs_buf_t *bp,
357 uint flags)
358 {
359 struct address_space *mapping = bp->b_target->bt_mapping;
360 size_t blocksize = bp->b_target->bt_bsize;
361 size_t size = bp->b_count_desired;
362 size_t nbytes, offset;
363 gfp_t gfp_mask = xb_to_gfp(flags);
364 unsigned short page_count, i;
365 pgoff_t first;
366 xfs_off_t end;
367 int error;
368
369 end = bp->b_file_offset + bp->b_buffer_length;
370 page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
371
372 error = _xfs_buf_get_pages(bp, page_count, flags);
373 if (unlikely(error))
374 return error;
375 bp->b_flags |= _XBF_PAGE_CACHE;
376
377 offset = bp->b_offset;
378 first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
379
380 for (i = 0; i < bp->b_page_count; i++) {
381 struct page *page;
382 uint retries = 0;
383
384 retry:
385 page = find_or_create_page(mapping, first + i, gfp_mask);
386 if (unlikely(page == NULL)) {
387 if (flags & XBF_READ_AHEAD) {
388 bp->b_page_count = i;
389 for (i = 0; i < bp->b_page_count; i++)
390 unlock_page(bp->b_pages[i]);
391 return -ENOMEM;
392 }
393
394 /*
395 * This could deadlock.
396 *
397 * But until all the XFS lowlevel code is revamped to
398 * handle buffer allocation failures we can't do much.
399 */
400 if (!(++retries % 100))
401 printk(KERN_ERR
402 "XFS: possible memory allocation "
403 "deadlock in %s (mode:0x%x)\n",
404 __func__, gfp_mask);
405
406 XFS_STATS_INC(xb_page_retries);
407 xfsbufd_wakeup(0, gfp_mask);
408 congestion_wait(WRITE, HZ/50);
409 goto retry;
410 }
411
412 XFS_STATS_INC(xb_page_found);
413
414 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
415 size -= nbytes;
416
417 ASSERT(!PagePrivate(page));
418 if (!PageUptodate(page)) {
419 page_count--;
420 if (blocksize >= PAGE_CACHE_SIZE) {
421 if (flags & XBF_READ)
422 bp->b_flags |= _XBF_PAGE_LOCKED;
423 } else if (!PagePrivate(page)) {
424 if (test_page_region(page, offset, nbytes))
425 page_count++;
426 }
427 }
428
429 bp->b_pages[i] = page;
430 offset = 0;
431 }
432
433 if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
434 for (i = 0; i < bp->b_page_count; i++)
435 unlock_page(bp->b_pages[i]);
436 }
437
438 if (page_count == bp->b_page_count)
439 bp->b_flags |= XBF_DONE;
440
441 XB_TRACE(bp, "lookup_pages", (long)page_count);
442 return error;
443 }
444
445 /*
446 * Map buffer into kernel address-space if nessecary.
447 */
448 STATIC int
449 _xfs_buf_map_pages(
450 xfs_buf_t *bp,
451 uint flags)
452 {
453 /* A single page buffer is always mappable */
454 if (bp->b_page_count == 1) {
455 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
456 bp->b_flags |= XBF_MAPPED;
457 } else if (flags & XBF_MAPPED) {
458 if (as_list_len > 64)
459 purge_addresses();
460 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
461 VM_MAP, PAGE_KERNEL);
462 if (unlikely(bp->b_addr == NULL))
463 return -ENOMEM;
464 bp->b_addr += bp->b_offset;
465 bp->b_flags |= XBF_MAPPED;
466 }
467
468 return 0;
469 }
470
471 /*
472 * Finding and Reading Buffers
473 */
474
475 /*
476 * Look up, and creates if absent, a lockable buffer for
477 * a given range of an inode. The buffer is returned
478 * locked. If other overlapping buffers exist, they are
479 * released before the new buffer is created and locked,
480 * which may imply that this call will block until those buffers
481 * are unlocked. No I/O is implied by this call.
482 */
483 xfs_buf_t *
484 _xfs_buf_find(
485 xfs_buftarg_t *btp, /* block device target */
486 xfs_off_t ioff, /* starting offset of range */
487 size_t isize, /* length of range */
488 xfs_buf_flags_t flags,
489 xfs_buf_t *new_bp)
490 {
491 xfs_off_t range_base;
492 size_t range_length;
493 xfs_bufhash_t *hash;
494 xfs_buf_t *bp, *n;
495
496 range_base = (ioff << BBSHIFT);
497 range_length = (isize << BBSHIFT);
498
499 /* Check for IOs smaller than the sector size / not sector aligned */
500 ASSERT(!(range_length < (1 << btp->bt_sshift)));
501 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
502
503 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
504
505 spin_lock(&hash->bh_lock);
506
507 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
508 ASSERT(btp == bp->b_target);
509 if (bp->b_file_offset == range_base &&
510 bp->b_buffer_length == range_length) {
511 /*
512 * If we look at something, bring it to the
513 * front of the list for next time.
514 */
515 atomic_inc(&bp->b_hold);
516 list_move(&bp->b_hash_list, &hash->bh_list);
517 goto found;
518 }
519 }
520
521 /* No match found */
522 if (new_bp) {
523 _xfs_buf_initialize(new_bp, btp, range_base,
524 range_length, flags);
525 new_bp->b_hash = hash;
526 list_add(&new_bp->b_hash_list, &hash->bh_list);
527 } else {
528 XFS_STATS_INC(xb_miss_locked);
529 }
530
531 spin_unlock(&hash->bh_lock);
532 return new_bp;
533
534 found:
535 spin_unlock(&hash->bh_lock);
536
537 /* Attempt to get the semaphore without sleeping,
538 * if this does not work then we need to drop the
539 * spinlock and do a hard attempt on the semaphore.
540 */
541 if (down_trylock(&bp->b_sema)) {
542 if (!(flags & XBF_TRYLOCK)) {
543 /* wait for buffer ownership */
544 XB_TRACE(bp, "get_lock", 0);
545 xfs_buf_lock(bp);
546 XFS_STATS_INC(xb_get_locked_waited);
547 } else {
548 /* We asked for a trylock and failed, no need
549 * to look at file offset and length here, we
550 * know that this buffer at least overlaps our
551 * buffer and is locked, therefore our buffer
552 * either does not exist, or is this buffer.
553 */
554 xfs_buf_rele(bp);
555 XFS_STATS_INC(xb_busy_locked);
556 return NULL;
557 }
558 } else {
559 /* trylock worked */
560 XB_SET_OWNER(bp);
561 }
562
563 if (bp->b_flags & XBF_STALE) {
564 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
565 bp->b_flags &= XBF_MAPPED;
566 }
567 XB_TRACE(bp, "got_lock", 0);
568 XFS_STATS_INC(xb_get_locked);
569 return bp;
570 }
571
572 /*
573 * Assembles a buffer covering the specified range.
574 * Storage in memory for all portions of the buffer will be allocated,
575 * although backing storage may not be.
576 */
577 xfs_buf_t *
578 xfs_buf_get_flags(
579 xfs_buftarg_t *target,/* target for buffer */
580 xfs_off_t ioff, /* starting offset of range */
581 size_t isize, /* length of range */
582 xfs_buf_flags_t flags)
583 {
584 xfs_buf_t *bp, *new_bp;
585 int error = 0, i;
586
587 new_bp = xfs_buf_allocate(flags);
588 if (unlikely(!new_bp))
589 return NULL;
590
591 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
592 if (bp == new_bp) {
593 error = _xfs_buf_lookup_pages(bp, flags);
594 if (error)
595 goto no_buffer;
596 } else {
597 xfs_buf_deallocate(new_bp);
598 if (unlikely(bp == NULL))
599 return NULL;
600 }
601
602 for (i = 0; i < bp->b_page_count; i++)
603 mark_page_accessed(bp->b_pages[i]);
604
605 if (!(bp->b_flags & XBF_MAPPED)) {
606 error = _xfs_buf_map_pages(bp, flags);
607 if (unlikely(error)) {
608 printk(KERN_WARNING "%s: failed to map pages\n",
609 __func__);
610 goto no_buffer;
611 }
612 }
613
614 XFS_STATS_INC(xb_get);
615
616 /*
617 * Always fill in the block number now, the mapped cases can do
618 * their own overlay of this later.
619 */
620 bp->b_bn = ioff;
621 bp->b_count_desired = bp->b_buffer_length;
622
623 XB_TRACE(bp, "get", (unsigned long)flags);
624 return bp;
625
626 no_buffer:
627 if (flags & (XBF_LOCK | XBF_TRYLOCK))
628 xfs_buf_unlock(bp);
629 xfs_buf_rele(bp);
630 return NULL;
631 }
632
633 STATIC int
634 _xfs_buf_read(
635 xfs_buf_t *bp,
636 xfs_buf_flags_t flags)
637 {
638 int status;
639
640 XB_TRACE(bp, "_xfs_buf_read", (unsigned long)flags);
641
642 ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
643 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
644
645 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
646 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
647 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
648 XBF_READ_AHEAD | _XBF_RUN_QUEUES);
649
650 status = xfs_buf_iorequest(bp);
651 if (!status && !(flags & XBF_ASYNC))
652 status = xfs_buf_iowait(bp);
653 return status;
654 }
655
656 xfs_buf_t *
657 xfs_buf_read_flags(
658 xfs_buftarg_t *target,
659 xfs_off_t ioff,
660 size_t isize,
661 xfs_buf_flags_t flags)
662 {
663 xfs_buf_t *bp;
664
665 flags |= XBF_READ;
666
667 bp = xfs_buf_get_flags(target, ioff, isize, flags);
668 if (bp) {
669 if (!XFS_BUF_ISDONE(bp)) {
670 XB_TRACE(bp, "read", (unsigned long)flags);
671 XFS_STATS_INC(xb_get_read);
672 _xfs_buf_read(bp, flags);
673 } else if (flags & XBF_ASYNC) {
674 XB_TRACE(bp, "read_async", (unsigned long)flags);
675 /*
676 * Read ahead call which is already satisfied,
677 * drop the buffer
678 */
679 goto no_buffer;
680 } else {
681 XB_TRACE(bp, "read_done", (unsigned long)flags);
682 /* We do not want read in the flags */
683 bp->b_flags &= ~XBF_READ;
684 }
685 }
686
687 return bp;
688
689 no_buffer:
690 if (flags & (XBF_LOCK | XBF_TRYLOCK))
691 xfs_buf_unlock(bp);
692 xfs_buf_rele(bp);
693 return NULL;
694 }
695
696 /*
697 * If we are not low on memory then do the readahead in a deadlock
698 * safe manner.
699 */
700 void
701 xfs_buf_readahead(
702 xfs_buftarg_t *target,
703 xfs_off_t ioff,
704 size_t isize,
705 xfs_buf_flags_t flags)
706 {
707 struct backing_dev_info *bdi;
708
709 bdi = target->bt_mapping->backing_dev_info;
710 if (bdi_read_congested(bdi))
711 return;
712
713 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
714 xfs_buf_read_flags(target, ioff, isize, flags);
715 }
716
717 xfs_buf_t *
718 xfs_buf_get_empty(
719 size_t len,
720 xfs_buftarg_t *target)
721 {
722 xfs_buf_t *bp;
723
724 bp = xfs_buf_allocate(0);
725 if (bp)
726 _xfs_buf_initialize(bp, target, 0, len, 0);
727 return bp;
728 }
729
730 static inline struct page *
731 mem_to_page(
732 void *addr)
733 {
734 if ((!is_vmalloc_addr(addr))) {
735 return virt_to_page(addr);
736 } else {
737 return vmalloc_to_page(addr);
738 }
739 }
740
741 int
742 xfs_buf_associate_memory(
743 xfs_buf_t *bp,
744 void *mem,
745 size_t len)
746 {
747 int rval;
748 int i = 0;
749 unsigned long pageaddr;
750 unsigned long offset;
751 size_t buflen;
752 int page_count;
753
754 pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
755 offset = (unsigned long)mem - pageaddr;
756 buflen = PAGE_CACHE_ALIGN(len + offset);
757 page_count = buflen >> PAGE_CACHE_SHIFT;
758
759 /* Free any previous set of page pointers */
760 if (bp->b_pages)
761 _xfs_buf_free_pages(bp);
762
763 bp->b_pages = NULL;
764 bp->b_addr = mem;
765
766 rval = _xfs_buf_get_pages(bp, page_count, 0);
767 if (rval)
768 return rval;
769
770 bp->b_offset = offset;
771
772 for (i = 0; i < bp->b_page_count; i++) {
773 bp->b_pages[i] = mem_to_page((void *)pageaddr);
774 pageaddr += PAGE_CACHE_SIZE;
775 }
776
777 bp->b_count_desired = len;
778 bp->b_buffer_length = buflen;
779 bp->b_flags |= XBF_MAPPED;
780 bp->b_flags &= ~_XBF_PAGE_LOCKED;
781
782 return 0;
783 }
784
785 xfs_buf_t *
786 xfs_buf_get_noaddr(
787 size_t len,
788 xfs_buftarg_t *target)
789 {
790 unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
791 int error, i;
792 xfs_buf_t *bp;
793
794 bp = xfs_buf_allocate(0);
795 if (unlikely(bp == NULL))
796 goto fail;
797 _xfs_buf_initialize(bp, target, 0, len, 0);
798
799 error = _xfs_buf_get_pages(bp, page_count, 0);
800 if (error)
801 goto fail_free_buf;
802
803 for (i = 0; i < page_count; i++) {
804 bp->b_pages[i] = alloc_page(GFP_KERNEL);
805 if (!bp->b_pages[i])
806 goto fail_free_mem;
807 }
808 bp->b_flags |= _XBF_PAGES;
809
810 error = _xfs_buf_map_pages(bp, XBF_MAPPED);
811 if (unlikely(error)) {
812 printk(KERN_WARNING "%s: failed to map pages\n",
813 __func__);
814 goto fail_free_mem;
815 }
816
817 xfs_buf_unlock(bp);
818
819 XB_TRACE(bp, "no_daddr", len);
820 return bp;
821
822 fail_free_mem:
823 while (--i >= 0)
824 __free_page(bp->b_pages[i]);
825 _xfs_buf_free_pages(bp);
826 fail_free_buf:
827 xfs_buf_deallocate(bp);
828 fail:
829 return NULL;
830 }
831
832 /*
833 * Increment reference count on buffer, to hold the buffer concurrently
834 * with another thread which may release (free) the buffer asynchronously.
835 * Must hold the buffer already to call this function.
836 */
837 void
838 xfs_buf_hold(
839 xfs_buf_t *bp)
840 {
841 atomic_inc(&bp->b_hold);
842 XB_TRACE(bp, "hold", 0);
843 }
844
845 /*
846 * Releases a hold on the specified buffer. If the
847 * the hold count is 1, calls xfs_buf_free.
848 */
849 void
850 xfs_buf_rele(
851 xfs_buf_t *bp)
852 {
853 xfs_bufhash_t *hash = bp->b_hash;
854
855 XB_TRACE(bp, "rele", bp->b_relse);
856
857 if (unlikely(!hash)) {
858 ASSERT(!bp->b_relse);
859 if (atomic_dec_and_test(&bp->b_hold))
860 xfs_buf_free(bp);
861 return;
862 }
863
864 ASSERT(atomic_read(&bp->b_hold) > 0);
865 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
866 if (bp->b_relse) {
867 atomic_inc(&bp->b_hold);
868 spin_unlock(&hash->bh_lock);
869 (*(bp->b_relse)) (bp);
870 } else if (bp->b_flags & XBF_FS_MANAGED) {
871 spin_unlock(&hash->bh_lock);
872 } else {
873 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
874 list_del_init(&bp->b_hash_list);
875 spin_unlock(&hash->bh_lock);
876 xfs_buf_free(bp);
877 }
878 }
879 }
880
881
882 /*
883 * Mutual exclusion on buffers. Locking model:
884 *
885 * Buffers associated with inodes for which buffer locking
886 * is not enabled are not protected by semaphores, and are
887 * assumed to be exclusively owned by the caller. There is a
888 * spinlock in the buffer, used by the caller when concurrent
889 * access is possible.
890 */
891
892 /*
893 * Locks a buffer object, if it is not already locked.
894 * Note that this in no way locks the underlying pages, so it is only
895 * useful for synchronizing concurrent use of buffer objects, not for
896 * synchronizing independent access to the underlying pages.
897 */
898 int
899 xfs_buf_cond_lock(
900 xfs_buf_t *bp)
901 {
902 int locked;
903
904 locked = down_trylock(&bp->b_sema) == 0;
905 if (locked) {
906 XB_SET_OWNER(bp);
907 }
908 XB_TRACE(bp, "cond_lock", (long)locked);
909 return locked ? 0 : -EBUSY;
910 }
911
912 #if defined(DEBUG) || defined(XFS_BLI_TRACE)
913 int
914 xfs_buf_lock_value(
915 xfs_buf_t *bp)
916 {
917 return bp->b_sema.count;
918 }
919 #endif
920
921 /*
922 * Locks a buffer object.
923 * Note that this in no way locks the underlying pages, so it is only
924 * useful for synchronizing concurrent use of buffer objects, not for
925 * synchronizing independent access to the underlying pages.
926 */
927 void
928 xfs_buf_lock(
929 xfs_buf_t *bp)
930 {
931 XB_TRACE(bp, "lock", 0);
932 if (atomic_read(&bp->b_io_remaining))
933 blk_run_address_space(bp->b_target->bt_mapping);
934 down(&bp->b_sema);
935 XB_SET_OWNER(bp);
936 XB_TRACE(bp, "locked", 0);
937 }
938
939 /*
940 * Releases the lock on the buffer object.
941 * If the buffer is marked delwri but is not queued, do so before we
942 * unlock the buffer as we need to set flags correctly. We also need to
943 * take a reference for the delwri queue because the unlocker is going to
944 * drop their's and they don't know we just queued it.
945 */
946 void
947 xfs_buf_unlock(
948 xfs_buf_t *bp)
949 {
950 if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
951 atomic_inc(&bp->b_hold);
952 bp->b_flags |= XBF_ASYNC;
953 xfs_buf_delwri_queue(bp, 0);
954 }
955
956 XB_CLEAR_OWNER(bp);
957 up(&bp->b_sema);
958 XB_TRACE(bp, "unlock", 0);
959 }
960
961
962 /*
963 * Pinning Buffer Storage in Memory
964 * Ensure that no attempt to force a buffer to disk will succeed.
965 */
966 void
967 xfs_buf_pin(
968 xfs_buf_t *bp)
969 {
970 atomic_inc(&bp->b_pin_count);
971 XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
972 }
973
974 void
975 xfs_buf_unpin(
976 xfs_buf_t *bp)
977 {
978 if (atomic_dec_and_test(&bp->b_pin_count))
979 wake_up_all(&bp->b_waiters);
980 XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
981 }
982
983 int
984 xfs_buf_ispin(
985 xfs_buf_t *bp)
986 {
987 return atomic_read(&bp->b_pin_count);
988 }
989
990 STATIC void
991 xfs_buf_wait_unpin(
992 xfs_buf_t *bp)
993 {
994 DECLARE_WAITQUEUE (wait, current);
995
996 if (atomic_read(&bp->b_pin_count) == 0)
997 return;
998
999 add_wait_queue(&bp->b_waiters, &wait);
1000 for (;;) {
1001 set_current_state(TASK_UNINTERRUPTIBLE);
1002 if (atomic_read(&bp->b_pin_count) == 0)
1003 break;
1004 if (atomic_read(&bp->b_io_remaining))
1005 blk_run_address_space(bp->b_target->bt_mapping);
1006 schedule();
1007 }
1008 remove_wait_queue(&bp->b_waiters, &wait);
1009 set_current_state(TASK_RUNNING);
1010 }
1011
1012 /*
1013 * Buffer Utility Routines
1014 */
1015
1016 STATIC void
1017 xfs_buf_iodone_work(
1018 struct work_struct *work)
1019 {
1020 xfs_buf_t *bp =
1021 container_of(work, xfs_buf_t, b_iodone_work);
1022
1023 /*
1024 * We can get an EOPNOTSUPP to ordered writes. Here we clear the
1025 * ordered flag and reissue them. Because we can't tell the higher
1026 * layers directly that they should not issue ordered I/O anymore, they
1027 * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
1028 */
1029 if ((bp->b_error == EOPNOTSUPP) &&
1030 (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
1031 XB_TRACE(bp, "ordered_retry", bp->b_iodone);
1032 bp->b_flags &= ~XBF_ORDERED;
1033 bp->b_flags |= _XFS_BARRIER_FAILED;
1034 xfs_buf_iorequest(bp);
1035 } else if (bp->b_iodone)
1036 (*(bp->b_iodone))(bp);
1037 else if (bp->b_flags & XBF_ASYNC)
1038 xfs_buf_relse(bp);
1039 }
1040
1041 void
1042 xfs_buf_ioend(
1043 xfs_buf_t *bp,
1044 int schedule)
1045 {
1046 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1047 if (bp->b_error == 0)
1048 bp->b_flags |= XBF_DONE;
1049
1050 XB_TRACE(bp, "iodone", bp->b_iodone);
1051
1052 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1053 if (schedule) {
1054 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1055 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1056 } else {
1057 xfs_buf_iodone_work(&bp->b_iodone_work);
1058 }
1059 } else {
1060 complete(&bp->b_iowait);
1061 }
1062 }
1063
1064 void
1065 xfs_buf_ioerror(
1066 xfs_buf_t *bp,
1067 int error)
1068 {
1069 ASSERT(error >= 0 && error <= 0xffff);
1070 bp->b_error = (unsigned short)error;
1071 XB_TRACE(bp, "ioerror", (unsigned long)error);
1072 }
1073
1074 int
1075 xfs_bawrite(
1076 void *mp,
1077 struct xfs_buf *bp)
1078 {
1079 XB_TRACE(bp, "bawrite", 0);
1080
1081 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
1082
1083 xfs_buf_delwri_dequeue(bp);
1084
1085 bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD);
1086 bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
1087
1088 bp->b_mount = mp;
1089 bp->b_strat = xfs_bdstrat_cb;
1090 return xfs_bdstrat_cb(bp);
1091 }
1092
1093 void
1094 xfs_bdwrite(
1095 void *mp,
1096 struct xfs_buf *bp)
1097 {
1098 XB_TRACE(bp, "bdwrite", 0);
1099
1100 bp->b_strat = xfs_bdstrat_cb;
1101 bp->b_mount = mp;
1102
1103 bp->b_flags &= ~XBF_READ;
1104 bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
1105
1106 xfs_buf_delwri_queue(bp, 1);
1107 }
1108
1109 STATIC_INLINE void
1110 _xfs_buf_ioend(
1111 xfs_buf_t *bp,
1112 int schedule)
1113 {
1114 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1115 bp->b_flags &= ~_XBF_PAGE_LOCKED;
1116 xfs_buf_ioend(bp, schedule);
1117 }
1118 }
1119
1120 STATIC void
1121 xfs_buf_bio_end_io(
1122 struct bio *bio,
1123 int error)
1124 {
1125 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1126 unsigned int blocksize = bp->b_target->bt_bsize;
1127 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1128
1129 xfs_buf_ioerror(bp, -error);
1130
1131 do {
1132 struct page *page = bvec->bv_page;
1133
1134 ASSERT(!PagePrivate(page));
1135 if (unlikely(bp->b_error)) {
1136 if (bp->b_flags & XBF_READ)
1137 ClearPageUptodate(page);
1138 } else if (blocksize >= PAGE_CACHE_SIZE) {
1139 SetPageUptodate(page);
1140 } else if (!PagePrivate(page) &&
1141 (bp->b_flags & _XBF_PAGE_CACHE)) {
1142 set_page_region(page, bvec->bv_offset, bvec->bv_len);
1143 }
1144
1145 if (--bvec >= bio->bi_io_vec)
1146 prefetchw(&bvec->bv_page->flags);
1147
1148 if (bp->b_flags & _XBF_PAGE_LOCKED)
1149 unlock_page(page);
1150 } while (bvec >= bio->bi_io_vec);
1151
1152 _xfs_buf_ioend(bp, 1);
1153 bio_put(bio);
1154 }
1155
1156 STATIC void
1157 _xfs_buf_ioapply(
1158 xfs_buf_t *bp)
1159 {
1160 int rw, map_i, total_nr_pages, nr_pages;
1161 struct bio *bio;
1162 int offset = bp->b_offset;
1163 int size = bp->b_count_desired;
1164 sector_t sector = bp->b_bn;
1165 unsigned int blocksize = bp->b_target->bt_bsize;
1166
1167 total_nr_pages = bp->b_page_count;
1168 map_i = 0;
1169
1170 if (bp->b_flags & XBF_ORDERED) {
1171 ASSERT(!(bp->b_flags & XBF_READ));
1172 rw = WRITE_BARRIER;
1173 } else if (bp->b_flags & _XBF_RUN_QUEUES) {
1174 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1175 bp->b_flags &= ~_XBF_RUN_QUEUES;
1176 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1177 } else {
1178 rw = (bp->b_flags & XBF_WRITE) ? WRITE :
1179 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1180 }
1181
1182 /* Special code path for reading a sub page size buffer in --
1183 * we populate up the whole page, and hence the other metadata
1184 * in the same page. This optimization is only valid when the
1185 * filesystem block size is not smaller than the page size.
1186 */
1187 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1188 ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
1189 (XBF_READ|_XBF_PAGE_LOCKED)) &&
1190 (blocksize >= PAGE_CACHE_SIZE)) {
1191 bio = bio_alloc(GFP_NOIO, 1);
1192
1193 bio->bi_bdev = bp->b_target->bt_bdev;
1194 bio->bi_sector = sector - (offset >> BBSHIFT);
1195 bio->bi_end_io = xfs_buf_bio_end_io;
1196 bio->bi_private = bp;
1197
1198 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1199 size = 0;
1200
1201 atomic_inc(&bp->b_io_remaining);
1202
1203 goto submit_io;
1204 }
1205
1206 next_chunk:
1207 atomic_inc(&bp->b_io_remaining);
1208 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1209 if (nr_pages > total_nr_pages)
1210 nr_pages = total_nr_pages;
1211
1212 bio = bio_alloc(GFP_NOIO, nr_pages);
1213 bio->bi_bdev = bp->b_target->bt_bdev;
1214 bio->bi_sector = sector;
1215 bio->bi_end_io = xfs_buf_bio_end_io;
1216 bio->bi_private = bp;
1217
1218 for (; size && nr_pages; nr_pages--, map_i++) {
1219 int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1220
1221 if (nbytes > size)
1222 nbytes = size;
1223
1224 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1225 if (rbytes < nbytes)
1226 break;
1227
1228 offset = 0;
1229 sector += nbytes >> BBSHIFT;
1230 size -= nbytes;
1231 total_nr_pages--;
1232 }
1233
1234 submit_io:
1235 if (likely(bio->bi_size)) {
1236 submit_bio(rw, bio);
1237 if (size)
1238 goto next_chunk;
1239 } else {
1240 bio_put(bio);
1241 xfs_buf_ioerror(bp, EIO);
1242 }
1243 }
1244
1245 int
1246 xfs_buf_iorequest(
1247 xfs_buf_t *bp)
1248 {
1249 XB_TRACE(bp, "iorequest", 0);
1250
1251 if (bp->b_flags & XBF_DELWRI) {
1252 xfs_buf_delwri_queue(bp, 1);
1253 return 0;
1254 }
1255
1256 if (bp->b_flags & XBF_WRITE) {
1257 xfs_buf_wait_unpin(bp);
1258 }
1259
1260 xfs_buf_hold(bp);
1261
1262 /* Set the count to 1 initially, this will stop an I/O
1263 * completion callout which happens before we have started
1264 * all the I/O from calling xfs_buf_ioend too early.
1265 */
1266 atomic_set(&bp->b_io_remaining, 1);
1267 _xfs_buf_ioapply(bp);
1268 _xfs_buf_ioend(bp, 0);
1269
1270 xfs_buf_rele(bp);
1271 return 0;
1272 }
1273
1274 /*
1275 * Waits for I/O to complete on the buffer supplied.
1276 * It returns immediately if no I/O is pending.
1277 * It returns the I/O error code, if any, or 0 if there was no error.
1278 */
1279 int
1280 xfs_buf_iowait(
1281 xfs_buf_t *bp)
1282 {
1283 XB_TRACE(bp, "iowait", 0);
1284 if (atomic_read(&bp->b_io_remaining))
1285 blk_run_address_space(bp->b_target->bt_mapping);
1286 wait_for_completion(&bp->b_iowait);
1287 XB_TRACE(bp, "iowaited", (long)bp->b_error);
1288 return bp->b_error;
1289 }
1290
1291 xfs_caddr_t
1292 xfs_buf_offset(
1293 xfs_buf_t *bp,
1294 size_t offset)
1295 {
1296 struct page *page;
1297
1298 if (bp->b_flags & XBF_MAPPED)
1299 return XFS_BUF_PTR(bp) + offset;
1300
1301 offset += bp->b_offset;
1302 page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1303 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1304 }
1305
1306 /*
1307 * Move data into or out of a buffer.
1308 */
1309 void
1310 xfs_buf_iomove(
1311 xfs_buf_t *bp, /* buffer to process */
1312 size_t boff, /* starting buffer offset */
1313 size_t bsize, /* length to copy */
1314 caddr_t data, /* data address */
1315 xfs_buf_rw_t mode) /* read/write/zero flag */
1316 {
1317 size_t bend, cpoff, csize;
1318 struct page *page;
1319
1320 bend = boff + bsize;
1321 while (boff < bend) {
1322 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1323 cpoff = xfs_buf_poff(boff + bp->b_offset);
1324 csize = min_t(size_t,
1325 PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1326
1327 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1328
1329 switch (mode) {
1330 case XBRW_ZERO:
1331 memset(page_address(page) + cpoff, 0, csize);
1332 break;
1333 case XBRW_READ:
1334 memcpy(data, page_address(page) + cpoff, csize);
1335 break;
1336 case XBRW_WRITE:
1337 memcpy(page_address(page) + cpoff, data, csize);
1338 }
1339
1340 boff += csize;
1341 data += csize;
1342 }
1343 }
1344
1345 /*
1346 * Handling of buffer targets (buftargs).
1347 */
1348
1349 /*
1350 * Wait for any bufs with callbacks that have been submitted but
1351 * have not yet returned... walk the hash list for the target.
1352 */
1353 void
1354 xfs_wait_buftarg(
1355 xfs_buftarg_t *btp)
1356 {
1357 xfs_buf_t *bp, *n;
1358 xfs_bufhash_t *hash;
1359 uint i;
1360
1361 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1362 hash = &btp->bt_hash[i];
1363 again:
1364 spin_lock(&hash->bh_lock);
1365 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1366 ASSERT(btp == bp->b_target);
1367 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1368 spin_unlock(&hash->bh_lock);
1369 /*
1370 * Catch superblock reference count leaks
1371 * immediately
1372 */
1373 BUG_ON(bp->b_bn == 0);
1374 delay(100);
1375 goto again;
1376 }
1377 }
1378 spin_unlock(&hash->bh_lock);
1379 }
1380 }
1381
1382 /*
1383 * Allocate buffer hash table for a given target.
1384 * For devices containing metadata (i.e. not the log/realtime devices)
1385 * we need to allocate a much larger hash table.
1386 */
1387 STATIC void
1388 xfs_alloc_bufhash(
1389 xfs_buftarg_t *btp,
1390 int external)
1391 {
1392 unsigned int i;
1393
1394 btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
1395 btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1396 btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1397 sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
1398 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1399 spin_lock_init(&btp->bt_hash[i].bh_lock);
1400 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1401 }
1402 }
1403
1404 STATIC void
1405 xfs_free_bufhash(
1406 xfs_buftarg_t *btp)
1407 {
1408 kmem_free(btp->bt_hash);
1409 btp->bt_hash = NULL;
1410 }
1411
1412 /*
1413 * buftarg list for delwrite queue processing
1414 */
1415 static LIST_HEAD(xfs_buftarg_list);
1416 static DEFINE_SPINLOCK(xfs_buftarg_lock);
1417
1418 STATIC void
1419 xfs_register_buftarg(
1420 xfs_buftarg_t *btp)
1421 {
1422 spin_lock(&xfs_buftarg_lock);
1423 list_add(&btp->bt_list, &xfs_buftarg_list);
1424 spin_unlock(&xfs_buftarg_lock);
1425 }
1426
1427 STATIC void
1428 xfs_unregister_buftarg(
1429 xfs_buftarg_t *btp)
1430 {
1431 spin_lock(&xfs_buftarg_lock);
1432 list_del(&btp->bt_list);
1433 spin_unlock(&xfs_buftarg_lock);
1434 }
1435
1436 void
1437 xfs_free_buftarg(
1438 xfs_buftarg_t *btp)
1439 {
1440 xfs_flush_buftarg(btp, 1);
1441 xfs_blkdev_issue_flush(btp);
1442 xfs_free_bufhash(btp);
1443 iput(btp->bt_mapping->host);
1444
1445 /* Unregister the buftarg first so that we don't get a
1446 * wakeup finding a non-existent task
1447 */
1448 xfs_unregister_buftarg(btp);
1449 kthread_stop(btp->bt_task);
1450
1451 kmem_free(btp);
1452 }
1453
1454 STATIC int
1455 xfs_setsize_buftarg_flags(
1456 xfs_buftarg_t *btp,
1457 unsigned int blocksize,
1458 unsigned int sectorsize,
1459 int verbose)
1460 {
1461 btp->bt_bsize = blocksize;
1462 btp->bt_sshift = ffs(sectorsize) - 1;
1463 btp->bt_smask = sectorsize - 1;
1464
1465 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1466 printk(KERN_WARNING
1467 "XFS: Cannot set_blocksize to %u on device %s\n",
1468 sectorsize, XFS_BUFTARG_NAME(btp));
1469 return EINVAL;
1470 }
1471
1472 if (verbose &&
1473 (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1474 printk(KERN_WARNING
1475 "XFS: %u byte sectors in use on device %s. "
1476 "This is suboptimal; %u or greater is ideal.\n",
1477 sectorsize, XFS_BUFTARG_NAME(btp),
1478 (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1479 }
1480
1481 return 0;
1482 }
1483
1484 /*
1485 * When allocating the initial buffer target we have not yet
1486 * read in the superblock, so don't know what sized sectors
1487 * are being used is at this early stage. Play safe.
1488 */
1489 STATIC int
1490 xfs_setsize_buftarg_early(
1491 xfs_buftarg_t *btp,
1492 struct block_device *bdev)
1493 {
1494 return xfs_setsize_buftarg_flags(btp,
1495 PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1496 }
1497
1498 int
1499 xfs_setsize_buftarg(
1500 xfs_buftarg_t *btp,
1501 unsigned int blocksize,
1502 unsigned int sectorsize)
1503 {
1504 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1505 }
1506
1507 STATIC int
1508 xfs_mapping_buftarg(
1509 xfs_buftarg_t *btp,
1510 struct block_device *bdev)
1511 {
1512 struct backing_dev_info *bdi;
1513 struct inode *inode;
1514 struct address_space *mapping;
1515 static const struct address_space_operations mapping_aops = {
1516 .sync_page = block_sync_page,
1517 .migratepage = fail_migrate_page,
1518 };
1519
1520 inode = new_inode(bdev->bd_inode->i_sb);
1521 if (!inode) {
1522 printk(KERN_WARNING
1523 "XFS: Cannot allocate mapping inode for device %s\n",
1524 XFS_BUFTARG_NAME(btp));
1525 return ENOMEM;
1526 }
1527 inode->i_mode = S_IFBLK;
1528 inode->i_bdev = bdev;
1529 inode->i_rdev = bdev->bd_dev;
1530 bdi = blk_get_backing_dev_info(bdev);
1531 if (!bdi)
1532 bdi = &default_backing_dev_info;
1533 mapping = &inode->i_data;
1534 mapping->a_ops = &mapping_aops;
1535 mapping->backing_dev_info = bdi;
1536 mapping_set_gfp_mask(mapping, GFP_NOFS);
1537 btp->bt_mapping = mapping;
1538 return 0;
1539 }
1540
1541 STATIC int
1542 xfs_alloc_delwrite_queue(
1543 xfs_buftarg_t *btp)
1544 {
1545 int error = 0;
1546
1547 INIT_LIST_HEAD(&btp->bt_list);
1548 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1549 spin_lock_init(&btp->bt_delwrite_lock);
1550 btp->bt_flags = 0;
1551 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1552 if (IS_ERR(btp->bt_task)) {
1553 error = PTR_ERR(btp->bt_task);
1554 goto out_error;
1555 }
1556 xfs_register_buftarg(btp);
1557 out_error:
1558 return error;
1559 }
1560
1561 xfs_buftarg_t *
1562 xfs_alloc_buftarg(
1563 struct block_device *bdev,
1564 int external)
1565 {
1566 xfs_buftarg_t *btp;
1567
1568 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1569
1570 btp->bt_dev = bdev->bd_dev;
1571 btp->bt_bdev = bdev;
1572 if (xfs_setsize_buftarg_early(btp, bdev))
1573 goto error;
1574 if (xfs_mapping_buftarg(btp, bdev))
1575 goto error;
1576 if (xfs_alloc_delwrite_queue(btp))
1577 goto error;
1578 xfs_alloc_bufhash(btp, external);
1579 return btp;
1580
1581 error:
1582 kmem_free(btp);
1583 return NULL;
1584 }
1585
1586
1587 /*
1588 * Delayed write buffer handling
1589 */
1590 STATIC void
1591 xfs_buf_delwri_queue(
1592 xfs_buf_t *bp,
1593 int unlock)
1594 {
1595 struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
1596 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1597
1598 XB_TRACE(bp, "delwri_q", (long)unlock);
1599 ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1600
1601 spin_lock(dwlk);
1602 /* If already in the queue, dequeue and place at tail */
1603 if (!list_empty(&bp->b_list)) {
1604 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1605 if (unlock)
1606 atomic_dec(&bp->b_hold);
1607 list_del(&bp->b_list);
1608 }
1609
1610 bp->b_flags |= _XBF_DELWRI_Q;
1611 list_add_tail(&bp->b_list, dwq);
1612 bp->b_queuetime = jiffies;
1613 spin_unlock(dwlk);
1614
1615 if (unlock)
1616 xfs_buf_unlock(bp);
1617 }
1618
1619 void
1620 xfs_buf_delwri_dequeue(
1621 xfs_buf_t *bp)
1622 {
1623 spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
1624 int dequeued = 0;
1625
1626 spin_lock(dwlk);
1627 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1628 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1629 list_del_init(&bp->b_list);
1630 dequeued = 1;
1631 }
1632 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1633 spin_unlock(dwlk);
1634
1635 if (dequeued)
1636 xfs_buf_rele(bp);
1637
1638 XB_TRACE(bp, "delwri_dq", (long)dequeued);
1639 }
1640
1641 STATIC void
1642 xfs_buf_runall_queues(
1643 struct workqueue_struct *queue)
1644 {
1645 flush_workqueue(queue);
1646 }
1647
1648 STATIC int
1649 xfsbufd_wakeup(
1650 int priority,
1651 gfp_t mask)
1652 {
1653 xfs_buftarg_t *btp;
1654
1655 spin_lock(&xfs_buftarg_lock);
1656 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1657 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1658 continue;
1659 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1660 wake_up_process(btp->bt_task);
1661 }
1662 spin_unlock(&xfs_buftarg_lock);
1663 return 0;
1664 }
1665
1666 /*
1667 * Move as many buffers as specified to the supplied list
1668 * idicating if we skipped any buffers to prevent deadlocks.
1669 */
1670 STATIC int
1671 xfs_buf_delwri_split(
1672 xfs_buftarg_t *target,
1673 struct list_head *list,
1674 unsigned long age)
1675 {
1676 xfs_buf_t *bp, *n;
1677 struct list_head *dwq = &target->bt_delwrite_queue;
1678 spinlock_t *dwlk = &target->bt_delwrite_lock;
1679 int skipped = 0;
1680 int force;
1681
1682 force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1683 INIT_LIST_HEAD(list);
1684 spin_lock(dwlk);
1685 list_for_each_entry_safe(bp, n, dwq, b_list) {
1686 XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1687 ASSERT(bp->b_flags & XBF_DELWRI);
1688
1689 if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1690 if (!force &&
1691 time_before(jiffies, bp->b_queuetime + age)) {
1692 xfs_buf_unlock(bp);
1693 break;
1694 }
1695
1696 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
1697 _XBF_RUN_QUEUES);
1698 bp->b_flags |= XBF_WRITE;
1699 list_move_tail(&bp->b_list, list);
1700 } else
1701 skipped++;
1702 }
1703 spin_unlock(dwlk);
1704
1705 return skipped;
1706
1707 }
1708
1709 STATIC int
1710 xfsbufd(
1711 void *data)
1712 {
1713 struct list_head tmp;
1714 xfs_buftarg_t *target = (xfs_buftarg_t *)data;
1715 int count;
1716 xfs_buf_t *bp;
1717
1718 current->flags |= PF_MEMALLOC;
1719
1720 set_freezable();
1721
1722 do {
1723 if (unlikely(freezing(current))) {
1724 set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1725 refrigerator();
1726 } else {
1727 clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1728 }
1729
1730 schedule_timeout_interruptible(
1731 xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1732
1733 xfs_buf_delwri_split(target, &tmp,
1734 xfs_buf_age_centisecs * msecs_to_jiffies(10));
1735
1736 count = 0;
1737 while (!list_empty(&tmp)) {
1738 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1739 ASSERT(target == bp->b_target);
1740
1741 list_del_init(&bp->b_list);
1742 xfs_buf_iostrategy(bp);
1743 count++;
1744 }
1745
1746 if (as_list_len > 0)
1747 purge_addresses();
1748 if (count)
1749 blk_run_address_space(target->bt_mapping);
1750
1751 } while (!kthread_should_stop());
1752
1753 return 0;
1754 }
1755
1756 /*
1757 * Go through all incore buffers, and release buffers if they belong to
1758 * the given device. This is used in filesystem error handling to
1759 * preserve the consistency of its metadata.
1760 */
1761 int
1762 xfs_flush_buftarg(
1763 xfs_buftarg_t *target,
1764 int wait)
1765 {
1766 struct list_head tmp;
1767 xfs_buf_t *bp, *n;
1768 int pincount = 0;
1769
1770 xfs_buf_runall_queues(xfsdatad_workqueue);
1771 xfs_buf_runall_queues(xfslogd_workqueue);
1772
1773 set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1774 pincount = xfs_buf_delwri_split(target, &tmp, 0);
1775
1776 /*
1777 * Dropped the delayed write list lock, now walk the temporary list
1778 */
1779 list_for_each_entry_safe(bp, n, &tmp, b_list) {
1780 ASSERT(target == bp->b_target);
1781 if (wait)
1782 bp->b_flags &= ~XBF_ASYNC;
1783 else
1784 list_del_init(&bp->b_list);
1785
1786 xfs_buf_iostrategy(bp);
1787 }
1788
1789 if (wait)
1790 blk_run_address_space(target->bt_mapping);
1791
1792 /*
1793 * Remaining list items must be flushed before returning
1794 */
1795 while (!list_empty(&tmp)) {
1796 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1797
1798 list_del_init(&bp->b_list);
1799 xfs_iowait(bp);
1800 xfs_buf_relse(bp);
1801 }
1802
1803 return pincount;
1804 }
1805
1806 int __init
1807 xfs_buf_init(void)
1808 {
1809 #ifdef XFS_BUF_TRACE
1810 xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_NOFS);
1811 #endif
1812
1813 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1814 KM_ZONE_HWALIGN, NULL);
1815 if (!xfs_buf_zone)
1816 goto out_free_trace_buf;
1817
1818 xfslogd_workqueue = create_workqueue("xfslogd");
1819 if (!xfslogd_workqueue)
1820 goto out_free_buf_zone;
1821
1822 xfsdatad_workqueue = create_workqueue("xfsdatad");
1823 if (!xfsdatad_workqueue)
1824 goto out_destroy_xfslogd_workqueue;
1825
1826 register_shrinker(&xfs_buf_shake);
1827 return 0;
1828
1829 out_destroy_xfslogd_workqueue:
1830 destroy_workqueue(xfslogd_workqueue);
1831 out_free_buf_zone:
1832 kmem_zone_destroy(xfs_buf_zone);
1833 out_free_trace_buf:
1834 #ifdef XFS_BUF_TRACE
1835 ktrace_free(xfs_buf_trace_buf);
1836 #endif
1837 return -ENOMEM;
1838 }
1839
1840 void
1841 xfs_buf_terminate(void)
1842 {
1843 unregister_shrinker(&xfs_buf_shake);
1844 destroy_workqueue(xfsdatad_workqueue);
1845 destroy_workqueue(xfslogd_workqueue);
1846 kmem_zone_destroy(xfs_buf_zone);
1847 #ifdef XFS_BUF_TRACE
1848 ktrace_free(xfs_buf_trace_buf);
1849 #endif
1850 }
1851
1852 #ifdef CONFIG_KDB_MODULES
1853 struct list_head *
1854 xfs_get_buftarg_list(void)
1855 {
1856 return &xfs_buftarg_list;
1857 }
1858 #endif
This page took 0.065822 seconds and 4 git commands to generate.