Merge tag 'writeback-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/wfg...
[deliverable/linux.git] / fs / xfs / xfs_buf.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36
37 #include "xfs_sb.h"
38 #include "xfs_trans_resv.h"
39 #include "xfs_log.h"
40 #include "xfs_ag.h"
41 #include "xfs_mount.h"
42 #include "xfs_trace.h"
43
44 static kmem_zone_t *xfs_buf_zone;
45
46 static struct workqueue_struct *xfslogd_workqueue;
47
48 #ifdef XFS_BUF_LOCK_TRACKING
49 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
50 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
51 # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
52 #else
53 # define XB_SET_OWNER(bp) do { } while (0)
54 # define XB_CLEAR_OWNER(bp) do { } while (0)
55 # define XB_GET_OWNER(bp) do { } while (0)
56 #endif
57
58 #define xb_to_gfp(flags) \
59 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
60
61
62 static inline int
63 xfs_buf_is_vmapped(
64 struct xfs_buf *bp)
65 {
66 /*
67 * Return true if the buffer is vmapped.
68 *
69 * b_addr is null if the buffer is not mapped, but the code is clever
70 * enough to know it doesn't have to map a single page, so the check has
71 * to be both for b_addr and bp->b_page_count > 1.
72 */
73 return bp->b_addr && bp->b_page_count > 1;
74 }
75
76 static inline int
77 xfs_buf_vmap_len(
78 struct xfs_buf *bp)
79 {
80 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
81 }
82
83 /*
84 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
85 * b_lru_ref count so that the buffer is freed immediately when the buffer
86 * reference count falls to zero. If the buffer is already on the LRU, we need
87 * to remove the reference that LRU holds on the buffer.
88 *
89 * This prevents build-up of stale buffers on the LRU.
90 */
91 void
92 xfs_buf_stale(
93 struct xfs_buf *bp)
94 {
95 ASSERT(xfs_buf_islocked(bp));
96
97 bp->b_flags |= XBF_STALE;
98
99 /*
100 * Clear the delwri status so that a delwri queue walker will not
101 * flush this buffer to disk now that it is stale. The delwri queue has
102 * a reference to the buffer, so this is safe to do.
103 */
104 bp->b_flags &= ~_XBF_DELWRI_Q;
105
106 spin_lock(&bp->b_lock);
107 atomic_set(&bp->b_lru_ref, 0);
108 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
109 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
110 atomic_dec(&bp->b_hold);
111
112 ASSERT(atomic_read(&bp->b_hold) >= 1);
113 spin_unlock(&bp->b_lock);
114 }
115
116 static int
117 xfs_buf_get_maps(
118 struct xfs_buf *bp,
119 int map_count)
120 {
121 ASSERT(bp->b_maps == NULL);
122 bp->b_map_count = map_count;
123
124 if (map_count == 1) {
125 bp->b_maps = &bp->__b_map;
126 return 0;
127 }
128
129 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
130 KM_NOFS);
131 if (!bp->b_maps)
132 return ENOMEM;
133 return 0;
134 }
135
136 /*
137 * Frees b_pages if it was allocated.
138 */
139 static void
140 xfs_buf_free_maps(
141 struct xfs_buf *bp)
142 {
143 if (bp->b_maps != &bp->__b_map) {
144 kmem_free(bp->b_maps);
145 bp->b_maps = NULL;
146 }
147 }
148
149 struct xfs_buf *
150 _xfs_buf_alloc(
151 struct xfs_buftarg *target,
152 struct xfs_buf_map *map,
153 int nmaps,
154 xfs_buf_flags_t flags)
155 {
156 struct xfs_buf *bp;
157 int error;
158 int i;
159
160 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
161 if (unlikely(!bp))
162 return NULL;
163
164 /*
165 * We don't want certain flags to appear in b_flags unless they are
166 * specifically set by later operations on the buffer.
167 */
168 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
169
170 atomic_set(&bp->b_hold, 1);
171 atomic_set(&bp->b_lru_ref, 1);
172 init_completion(&bp->b_iowait);
173 INIT_LIST_HEAD(&bp->b_lru);
174 INIT_LIST_HEAD(&bp->b_list);
175 RB_CLEAR_NODE(&bp->b_rbnode);
176 sema_init(&bp->b_sema, 0); /* held, no waiters */
177 spin_lock_init(&bp->b_lock);
178 XB_SET_OWNER(bp);
179 bp->b_target = target;
180 bp->b_flags = flags;
181
182 /*
183 * Set length and io_length to the same value initially.
184 * I/O routines should use io_length, which will be the same in
185 * most cases but may be reset (e.g. XFS recovery).
186 */
187 error = xfs_buf_get_maps(bp, nmaps);
188 if (error) {
189 kmem_zone_free(xfs_buf_zone, bp);
190 return NULL;
191 }
192
193 bp->b_bn = map[0].bm_bn;
194 bp->b_length = 0;
195 for (i = 0; i < nmaps; i++) {
196 bp->b_maps[i].bm_bn = map[i].bm_bn;
197 bp->b_maps[i].bm_len = map[i].bm_len;
198 bp->b_length += map[i].bm_len;
199 }
200 bp->b_io_length = bp->b_length;
201
202 atomic_set(&bp->b_pin_count, 0);
203 init_waitqueue_head(&bp->b_waiters);
204
205 XFS_STATS_INC(xb_create);
206 trace_xfs_buf_init(bp, _RET_IP_);
207
208 return bp;
209 }
210
211 /*
212 * Allocate a page array capable of holding a specified number
213 * of pages, and point the page buf at it.
214 */
215 STATIC int
216 _xfs_buf_get_pages(
217 xfs_buf_t *bp,
218 int page_count,
219 xfs_buf_flags_t flags)
220 {
221 /* Make sure that we have a page list */
222 if (bp->b_pages == NULL) {
223 bp->b_page_count = page_count;
224 if (page_count <= XB_PAGES) {
225 bp->b_pages = bp->b_page_array;
226 } else {
227 bp->b_pages = kmem_alloc(sizeof(struct page *) *
228 page_count, KM_NOFS);
229 if (bp->b_pages == NULL)
230 return -ENOMEM;
231 }
232 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
233 }
234 return 0;
235 }
236
237 /*
238 * Frees b_pages if it was allocated.
239 */
240 STATIC void
241 _xfs_buf_free_pages(
242 xfs_buf_t *bp)
243 {
244 if (bp->b_pages != bp->b_page_array) {
245 kmem_free(bp->b_pages);
246 bp->b_pages = NULL;
247 }
248 }
249
250 /*
251 * Releases the specified buffer.
252 *
253 * The modification state of any associated pages is left unchanged.
254 * The buffer must not be on any hash - use xfs_buf_rele instead for
255 * hashed and refcounted buffers
256 */
257 void
258 xfs_buf_free(
259 xfs_buf_t *bp)
260 {
261 trace_xfs_buf_free(bp, _RET_IP_);
262
263 ASSERT(list_empty(&bp->b_lru));
264
265 if (bp->b_flags & _XBF_PAGES) {
266 uint i;
267
268 if (xfs_buf_is_vmapped(bp))
269 vm_unmap_ram(bp->b_addr - bp->b_offset,
270 bp->b_page_count);
271
272 for (i = 0; i < bp->b_page_count; i++) {
273 struct page *page = bp->b_pages[i];
274
275 __free_page(page);
276 }
277 } else if (bp->b_flags & _XBF_KMEM)
278 kmem_free(bp->b_addr);
279 _xfs_buf_free_pages(bp);
280 xfs_buf_free_maps(bp);
281 kmem_zone_free(xfs_buf_zone, bp);
282 }
283
284 /*
285 * Allocates all the pages for buffer in question and builds it's page list.
286 */
287 STATIC int
288 xfs_buf_allocate_memory(
289 xfs_buf_t *bp,
290 uint flags)
291 {
292 size_t size;
293 size_t nbytes, offset;
294 gfp_t gfp_mask = xb_to_gfp(flags);
295 unsigned short page_count, i;
296 xfs_off_t start, end;
297 int error;
298
299 /*
300 * for buffers that are contained within a single page, just allocate
301 * the memory from the heap - there's no need for the complexity of
302 * page arrays to keep allocation down to order 0.
303 */
304 size = BBTOB(bp->b_length);
305 if (size < PAGE_SIZE) {
306 bp->b_addr = kmem_alloc(size, KM_NOFS);
307 if (!bp->b_addr) {
308 /* low memory - use alloc_page loop instead */
309 goto use_alloc_page;
310 }
311
312 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
313 ((unsigned long)bp->b_addr & PAGE_MASK)) {
314 /* b_addr spans two pages - use alloc_page instead */
315 kmem_free(bp->b_addr);
316 bp->b_addr = NULL;
317 goto use_alloc_page;
318 }
319 bp->b_offset = offset_in_page(bp->b_addr);
320 bp->b_pages = bp->b_page_array;
321 bp->b_pages[0] = virt_to_page(bp->b_addr);
322 bp->b_page_count = 1;
323 bp->b_flags |= _XBF_KMEM;
324 return 0;
325 }
326
327 use_alloc_page:
328 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
329 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
330 >> PAGE_SHIFT;
331 page_count = end - start;
332 error = _xfs_buf_get_pages(bp, page_count, flags);
333 if (unlikely(error))
334 return error;
335
336 offset = bp->b_offset;
337 bp->b_flags |= _XBF_PAGES;
338
339 for (i = 0; i < bp->b_page_count; i++) {
340 struct page *page;
341 uint retries = 0;
342 retry:
343 page = alloc_page(gfp_mask);
344 if (unlikely(page == NULL)) {
345 if (flags & XBF_READ_AHEAD) {
346 bp->b_page_count = i;
347 error = ENOMEM;
348 goto out_free_pages;
349 }
350
351 /*
352 * This could deadlock.
353 *
354 * But until all the XFS lowlevel code is revamped to
355 * handle buffer allocation failures we can't do much.
356 */
357 if (!(++retries % 100))
358 xfs_err(NULL,
359 "possible memory allocation deadlock in %s (mode:0x%x)",
360 __func__, gfp_mask);
361
362 XFS_STATS_INC(xb_page_retries);
363 congestion_wait(BLK_RW_ASYNC, HZ/50);
364 goto retry;
365 }
366
367 XFS_STATS_INC(xb_page_found);
368
369 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
370 size -= nbytes;
371 bp->b_pages[i] = page;
372 offset = 0;
373 }
374 return 0;
375
376 out_free_pages:
377 for (i = 0; i < bp->b_page_count; i++)
378 __free_page(bp->b_pages[i]);
379 return error;
380 }
381
382 /*
383 * Map buffer into kernel address-space if necessary.
384 */
385 STATIC int
386 _xfs_buf_map_pages(
387 xfs_buf_t *bp,
388 uint flags)
389 {
390 ASSERT(bp->b_flags & _XBF_PAGES);
391 if (bp->b_page_count == 1) {
392 /* A single page buffer is always mappable */
393 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
394 } else if (flags & XBF_UNMAPPED) {
395 bp->b_addr = NULL;
396 } else {
397 int retried = 0;
398
399 do {
400 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
401 -1, PAGE_KERNEL);
402 if (bp->b_addr)
403 break;
404 vm_unmap_aliases();
405 } while (retried++ <= 1);
406
407 if (!bp->b_addr)
408 return -ENOMEM;
409 bp->b_addr += bp->b_offset;
410 }
411
412 return 0;
413 }
414
415 /*
416 * Finding and Reading Buffers
417 */
418
419 /*
420 * Look up, and creates if absent, a lockable buffer for
421 * a given range of an inode. The buffer is returned
422 * locked. No I/O is implied by this call.
423 */
424 xfs_buf_t *
425 _xfs_buf_find(
426 struct xfs_buftarg *btp,
427 struct xfs_buf_map *map,
428 int nmaps,
429 xfs_buf_flags_t flags,
430 xfs_buf_t *new_bp)
431 {
432 size_t numbytes;
433 struct xfs_perag *pag;
434 struct rb_node **rbp;
435 struct rb_node *parent;
436 xfs_buf_t *bp;
437 xfs_daddr_t blkno = map[0].bm_bn;
438 xfs_daddr_t eofs;
439 int numblks = 0;
440 int i;
441
442 for (i = 0; i < nmaps; i++)
443 numblks += map[i].bm_len;
444 numbytes = BBTOB(numblks);
445
446 /* Check for IOs smaller than the sector size / not sector aligned */
447 ASSERT(!(numbytes < (1 << btp->bt_sshift)));
448 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
449
450 /*
451 * Corrupted block numbers can get through to here, unfortunately, so we
452 * have to check that the buffer falls within the filesystem bounds.
453 */
454 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
455 if (blkno >= eofs) {
456 /*
457 * XXX (dgc): we should really be returning EFSCORRUPTED here,
458 * but none of the higher level infrastructure supports
459 * returning a specific error on buffer lookup failures.
460 */
461 xfs_alert(btp->bt_mount,
462 "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
463 __func__, blkno, eofs);
464 WARN_ON(1);
465 return NULL;
466 }
467
468 /* get tree root */
469 pag = xfs_perag_get(btp->bt_mount,
470 xfs_daddr_to_agno(btp->bt_mount, blkno));
471
472 /* walk tree */
473 spin_lock(&pag->pag_buf_lock);
474 rbp = &pag->pag_buf_tree.rb_node;
475 parent = NULL;
476 bp = NULL;
477 while (*rbp) {
478 parent = *rbp;
479 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
480
481 if (blkno < bp->b_bn)
482 rbp = &(*rbp)->rb_left;
483 else if (blkno > bp->b_bn)
484 rbp = &(*rbp)->rb_right;
485 else {
486 /*
487 * found a block number match. If the range doesn't
488 * match, the only way this is allowed is if the buffer
489 * in the cache is stale and the transaction that made
490 * it stale has not yet committed. i.e. we are
491 * reallocating a busy extent. Skip this buffer and
492 * continue searching to the right for an exact match.
493 */
494 if (bp->b_length != numblks) {
495 ASSERT(bp->b_flags & XBF_STALE);
496 rbp = &(*rbp)->rb_right;
497 continue;
498 }
499 atomic_inc(&bp->b_hold);
500 goto found;
501 }
502 }
503
504 /* No match found */
505 if (new_bp) {
506 rb_link_node(&new_bp->b_rbnode, parent, rbp);
507 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
508 /* the buffer keeps the perag reference until it is freed */
509 new_bp->b_pag = pag;
510 spin_unlock(&pag->pag_buf_lock);
511 } else {
512 XFS_STATS_INC(xb_miss_locked);
513 spin_unlock(&pag->pag_buf_lock);
514 xfs_perag_put(pag);
515 }
516 return new_bp;
517
518 found:
519 spin_unlock(&pag->pag_buf_lock);
520 xfs_perag_put(pag);
521
522 if (!xfs_buf_trylock(bp)) {
523 if (flags & XBF_TRYLOCK) {
524 xfs_buf_rele(bp);
525 XFS_STATS_INC(xb_busy_locked);
526 return NULL;
527 }
528 xfs_buf_lock(bp);
529 XFS_STATS_INC(xb_get_locked_waited);
530 }
531
532 /*
533 * if the buffer is stale, clear all the external state associated with
534 * it. We need to keep flags such as how we allocated the buffer memory
535 * intact here.
536 */
537 if (bp->b_flags & XBF_STALE) {
538 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
539 ASSERT(bp->b_iodone == NULL);
540 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
541 bp->b_ops = NULL;
542 }
543
544 trace_xfs_buf_find(bp, flags, _RET_IP_);
545 XFS_STATS_INC(xb_get_locked);
546 return bp;
547 }
548
549 /*
550 * Assembles a buffer covering the specified range. The code is optimised for
551 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
552 * more hits than misses.
553 */
554 struct xfs_buf *
555 xfs_buf_get_map(
556 struct xfs_buftarg *target,
557 struct xfs_buf_map *map,
558 int nmaps,
559 xfs_buf_flags_t flags)
560 {
561 struct xfs_buf *bp;
562 struct xfs_buf *new_bp;
563 int error = 0;
564
565 bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
566 if (likely(bp))
567 goto found;
568
569 new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
570 if (unlikely(!new_bp))
571 return NULL;
572
573 error = xfs_buf_allocate_memory(new_bp, flags);
574 if (error) {
575 xfs_buf_free(new_bp);
576 return NULL;
577 }
578
579 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
580 if (!bp) {
581 xfs_buf_free(new_bp);
582 return NULL;
583 }
584
585 if (bp != new_bp)
586 xfs_buf_free(new_bp);
587
588 found:
589 if (!bp->b_addr) {
590 error = _xfs_buf_map_pages(bp, flags);
591 if (unlikely(error)) {
592 xfs_warn(target->bt_mount,
593 "%s: failed to map pages\n", __func__);
594 xfs_buf_relse(bp);
595 return NULL;
596 }
597 }
598
599 XFS_STATS_INC(xb_get);
600 trace_xfs_buf_get(bp, flags, _RET_IP_);
601 return bp;
602 }
603
604 STATIC int
605 _xfs_buf_read(
606 xfs_buf_t *bp,
607 xfs_buf_flags_t flags)
608 {
609 ASSERT(!(flags & XBF_WRITE));
610 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
611
612 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
613 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
614
615 xfs_buf_iorequest(bp);
616 if (flags & XBF_ASYNC)
617 return 0;
618 return xfs_buf_iowait(bp);
619 }
620
621 xfs_buf_t *
622 xfs_buf_read_map(
623 struct xfs_buftarg *target,
624 struct xfs_buf_map *map,
625 int nmaps,
626 xfs_buf_flags_t flags,
627 const struct xfs_buf_ops *ops)
628 {
629 struct xfs_buf *bp;
630
631 flags |= XBF_READ;
632
633 bp = xfs_buf_get_map(target, map, nmaps, flags);
634 if (bp) {
635 trace_xfs_buf_read(bp, flags, _RET_IP_);
636
637 if (!XFS_BUF_ISDONE(bp)) {
638 XFS_STATS_INC(xb_get_read);
639 bp->b_ops = ops;
640 _xfs_buf_read(bp, flags);
641 } else if (flags & XBF_ASYNC) {
642 /*
643 * Read ahead call which is already satisfied,
644 * drop the buffer
645 */
646 xfs_buf_relse(bp);
647 return NULL;
648 } else {
649 /* We do not want read in the flags */
650 bp->b_flags &= ~XBF_READ;
651 }
652 }
653
654 return bp;
655 }
656
657 /*
658 * If we are not low on memory then do the readahead in a deadlock
659 * safe manner.
660 */
661 void
662 xfs_buf_readahead_map(
663 struct xfs_buftarg *target,
664 struct xfs_buf_map *map,
665 int nmaps,
666 const struct xfs_buf_ops *ops)
667 {
668 if (bdi_read_congested(target->bt_bdi))
669 return;
670
671 xfs_buf_read_map(target, map, nmaps,
672 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
673 }
674
675 /*
676 * Read an uncached buffer from disk. Allocates and returns a locked
677 * buffer containing the disk contents or nothing.
678 */
679 struct xfs_buf *
680 xfs_buf_read_uncached(
681 struct xfs_buftarg *target,
682 xfs_daddr_t daddr,
683 size_t numblks,
684 int flags,
685 const struct xfs_buf_ops *ops)
686 {
687 struct xfs_buf *bp;
688
689 bp = xfs_buf_get_uncached(target, numblks, flags);
690 if (!bp)
691 return NULL;
692
693 /* set up the buffer for a read IO */
694 ASSERT(bp->b_map_count == 1);
695 bp->b_bn = daddr;
696 bp->b_maps[0].bm_bn = daddr;
697 bp->b_flags |= XBF_READ;
698 bp->b_ops = ops;
699
700 xfsbdstrat(target->bt_mount, bp);
701 xfs_buf_iowait(bp);
702 return bp;
703 }
704
705 /*
706 * Return a buffer allocated as an empty buffer and associated to external
707 * memory via xfs_buf_associate_memory() back to it's empty state.
708 */
709 void
710 xfs_buf_set_empty(
711 struct xfs_buf *bp,
712 size_t numblks)
713 {
714 if (bp->b_pages)
715 _xfs_buf_free_pages(bp);
716
717 bp->b_pages = NULL;
718 bp->b_page_count = 0;
719 bp->b_addr = NULL;
720 bp->b_length = numblks;
721 bp->b_io_length = numblks;
722
723 ASSERT(bp->b_map_count == 1);
724 bp->b_bn = XFS_BUF_DADDR_NULL;
725 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
726 bp->b_maps[0].bm_len = bp->b_length;
727 }
728
729 static inline struct page *
730 mem_to_page(
731 void *addr)
732 {
733 if ((!is_vmalloc_addr(addr))) {
734 return virt_to_page(addr);
735 } else {
736 return vmalloc_to_page(addr);
737 }
738 }
739
740 int
741 xfs_buf_associate_memory(
742 xfs_buf_t *bp,
743 void *mem,
744 size_t len)
745 {
746 int rval;
747 int i = 0;
748 unsigned long pageaddr;
749 unsigned long offset;
750 size_t buflen;
751 int page_count;
752
753 pageaddr = (unsigned long)mem & PAGE_MASK;
754 offset = (unsigned long)mem - pageaddr;
755 buflen = PAGE_ALIGN(len + offset);
756 page_count = buflen >> PAGE_SHIFT;
757
758 /* Free any previous set of page pointers */
759 if (bp->b_pages)
760 _xfs_buf_free_pages(bp);
761
762 bp->b_pages = NULL;
763 bp->b_addr = mem;
764
765 rval = _xfs_buf_get_pages(bp, page_count, 0);
766 if (rval)
767 return rval;
768
769 bp->b_offset = offset;
770
771 for (i = 0; i < bp->b_page_count; i++) {
772 bp->b_pages[i] = mem_to_page((void *)pageaddr);
773 pageaddr += PAGE_SIZE;
774 }
775
776 bp->b_io_length = BTOBB(len);
777 bp->b_length = BTOBB(buflen);
778
779 return 0;
780 }
781
782 xfs_buf_t *
783 xfs_buf_get_uncached(
784 struct xfs_buftarg *target,
785 size_t numblks,
786 int flags)
787 {
788 unsigned long page_count;
789 int error, i;
790 struct xfs_buf *bp;
791 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
792
793 bp = _xfs_buf_alloc(target, &map, 1, 0);
794 if (unlikely(bp == NULL))
795 goto fail;
796
797 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
798 error = _xfs_buf_get_pages(bp, page_count, 0);
799 if (error)
800 goto fail_free_buf;
801
802 for (i = 0; i < page_count; i++) {
803 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
804 if (!bp->b_pages[i])
805 goto fail_free_mem;
806 }
807 bp->b_flags |= _XBF_PAGES;
808
809 error = _xfs_buf_map_pages(bp, 0);
810 if (unlikely(error)) {
811 xfs_warn(target->bt_mount,
812 "%s: failed to map pages\n", __func__);
813 goto fail_free_mem;
814 }
815
816 trace_xfs_buf_get_uncached(bp, _RET_IP_);
817 return bp;
818
819 fail_free_mem:
820 while (--i >= 0)
821 __free_page(bp->b_pages[i]);
822 _xfs_buf_free_pages(bp);
823 fail_free_buf:
824 xfs_buf_free_maps(bp);
825 kmem_zone_free(xfs_buf_zone, bp);
826 fail:
827 return NULL;
828 }
829
830 /*
831 * Increment reference count on buffer, to hold the buffer concurrently
832 * with another thread which may release (free) the buffer asynchronously.
833 * Must hold the buffer already to call this function.
834 */
835 void
836 xfs_buf_hold(
837 xfs_buf_t *bp)
838 {
839 trace_xfs_buf_hold(bp, _RET_IP_);
840 atomic_inc(&bp->b_hold);
841 }
842
843 /*
844 * Releases a hold on the specified buffer. If the
845 * the hold count is 1, calls xfs_buf_free.
846 */
847 void
848 xfs_buf_rele(
849 xfs_buf_t *bp)
850 {
851 struct xfs_perag *pag = bp->b_pag;
852
853 trace_xfs_buf_rele(bp, _RET_IP_);
854
855 if (!pag) {
856 ASSERT(list_empty(&bp->b_lru));
857 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
858 if (atomic_dec_and_test(&bp->b_hold))
859 xfs_buf_free(bp);
860 return;
861 }
862
863 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
864
865 ASSERT(atomic_read(&bp->b_hold) > 0);
866 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
867 spin_lock(&bp->b_lock);
868 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
869 /*
870 * If the buffer is added to the LRU take a new
871 * reference to the buffer for the LRU and clear the
872 * (now stale) dispose list state flag
873 */
874 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
875 bp->b_state &= ~XFS_BSTATE_DISPOSE;
876 atomic_inc(&bp->b_hold);
877 }
878 spin_unlock(&bp->b_lock);
879 spin_unlock(&pag->pag_buf_lock);
880 } else {
881 /*
882 * most of the time buffers will already be removed from
883 * the LRU, so optimise that case by checking for the
884 * XFS_BSTATE_DISPOSE flag indicating the last list the
885 * buffer was on was the disposal list
886 */
887 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
888 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
889 } else {
890 ASSERT(list_empty(&bp->b_lru));
891 }
892 spin_unlock(&bp->b_lock);
893
894 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
895 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
896 spin_unlock(&pag->pag_buf_lock);
897 xfs_perag_put(pag);
898 xfs_buf_free(bp);
899 }
900 }
901 }
902
903
904 /*
905 * Lock a buffer object, if it is not already locked.
906 *
907 * If we come across a stale, pinned, locked buffer, we know that we are
908 * being asked to lock a buffer that has been reallocated. Because it is
909 * pinned, we know that the log has not been pushed to disk and hence it
910 * will still be locked. Rather than continuing to have trylock attempts
911 * fail until someone else pushes the log, push it ourselves before
912 * returning. This means that the xfsaild will not get stuck trying
913 * to push on stale inode buffers.
914 */
915 int
916 xfs_buf_trylock(
917 struct xfs_buf *bp)
918 {
919 int locked;
920
921 locked = down_trylock(&bp->b_sema) == 0;
922 if (locked)
923 XB_SET_OWNER(bp);
924
925 trace_xfs_buf_trylock(bp, _RET_IP_);
926 return locked;
927 }
928
929 /*
930 * Lock a buffer object.
931 *
932 * If we come across a stale, pinned, locked buffer, we know that we
933 * are being asked to lock a buffer that has been reallocated. Because
934 * it is pinned, we know that the log has not been pushed to disk and
935 * hence it will still be locked. Rather than sleeping until someone
936 * else pushes the log, push it ourselves before trying to get the lock.
937 */
938 void
939 xfs_buf_lock(
940 struct xfs_buf *bp)
941 {
942 trace_xfs_buf_lock(bp, _RET_IP_);
943
944 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
945 xfs_log_force(bp->b_target->bt_mount, 0);
946 down(&bp->b_sema);
947 XB_SET_OWNER(bp);
948
949 trace_xfs_buf_lock_done(bp, _RET_IP_);
950 }
951
952 void
953 xfs_buf_unlock(
954 struct xfs_buf *bp)
955 {
956 XB_CLEAR_OWNER(bp);
957 up(&bp->b_sema);
958
959 trace_xfs_buf_unlock(bp, _RET_IP_);
960 }
961
962 STATIC void
963 xfs_buf_wait_unpin(
964 xfs_buf_t *bp)
965 {
966 DECLARE_WAITQUEUE (wait, current);
967
968 if (atomic_read(&bp->b_pin_count) == 0)
969 return;
970
971 add_wait_queue(&bp->b_waiters, &wait);
972 for (;;) {
973 set_current_state(TASK_UNINTERRUPTIBLE);
974 if (atomic_read(&bp->b_pin_count) == 0)
975 break;
976 io_schedule();
977 }
978 remove_wait_queue(&bp->b_waiters, &wait);
979 set_current_state(TASK_RUNNING);
980 }
981
982 /*
983 * Buffer Utility Routines
984 */
985
986 STATIC void
987 xfs_buf_iodone_work(
988 struct work_struct *work)
989 {
990 struct xfs_buf *bp =
991 container_of(work, xfs_buf_t, b_iodone_work);
992 bool read = !!(bp->b_flags & XBF_READ);
993
994 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
995
996 /* only validate buffers that were read without errors */
997 if (read && bp->b_ops && !bp->b_error && (bp->b_flags & XBF_DONE))
998 bp->b_ops->verify_read(bp);
999
1000 if (bp->b_iodone)
1001 (*(bp->b_iodone))(bp);
1002 else if (bp->b_flags & XBF_ASYNC)
1003 xfs_buf_relse(bp);
1004 else {
1005 ASSERT(read && bp->b_ops);
1006 complete(&bp->b_iowait);
1007 }
1008 }
1009
1010 void
1011 xfs_buf_ioend(
1012 struct xfs_buf *bp,
1013 int schedule)
1014 {
1015 bool read = !!(bp->b_flags & XBF_READ);
1016
1017 trace_xfs_buf_iodone(bp, _RET_IP_);
1018
1019 if (bp->b_error == 0)
1020 bp->b_flags |= XBF_DONE;
1021
1022 if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) {
1023 if (schedule) {
1024 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1025 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1026 } else {
1027 xfs_buf_iodone_work(&bp->b_iodone_work);
1028 }
1029 } else {
1030 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1031 complete(&bp->b_iowait);
1032 }
1033 }
1034
1035 void
1036 xfs_buf_ioerror(
1037 xfs_buf_t *bp,
1038 int error)
1039 {
1040 ASSERT(error >= 0 && error <= 0xffff);
1041 bp->b_error = (unsigned short)error;
1042 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1043 }
1044
1045 void
1046 xfs_buf_ioerror_alert(
1047 struct xfs_buf *bp,
1048 const char *func)
1049 {
1050 xfs_alert(bp->b_target->bt_mount,
1051 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1052 (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
1053 }
1054
1055 /*
1056 * Called when we want to stop a buffer from getting written or read.
1057 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1058 * so that the proper iodone callbacks get called.
1059 */
1060 STATIC int
1061 xfs_bioerror(
1062 xfs_buf_t *bp)
1063 {
1064 #ifdef XFSERRORDEBUG
1065 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1066 #endif
1067
1068 /*
1069 * No need to wait until the buffer is unpinned, we aren't flushing it.
1070 */
1071 xfs_buf_ioerror(bp, EIO);
1072
1073 /*
1074 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1075 */
1076 XFS_BUF_UNREAD(bp);
1077 XFS_BUF_UNDONE(bp);
1078 xfs_buf_stale(bp);
1079
1080 xfs_buf_ioend(bp, 0);
1081
1082 return EIO;
1083 }
1084
1085 /*
1086 * Same as xfs_bioerror, except that we are releasing the buffer
1087 * here ourselves, and avoiding the xfs_buf_ioend call.
1088 * This is meant for userdata errors; metadata bufs come with
1089 * iodone functions attached, so that we can track down errors.
1090 */
1091 STATIC int
1092 xfs_bioerror_relse(
1093 struct xfs_buf *bp)
1094 {
1095 int64_t fl = bp->b_flags;
1096 /*
1097 * No need to wait until the buffer is unpinned.
1098 * We aren't flushing it.
1099 *
1100 * chunkhold expects B_DONE to be set, whether
1101 * we actually finish the I/O or not. We don't want to
1102 * change that interface.
1103 */
1104 XFS_BUF_UNREAD(bp);
1105 XFS_BUF_DONE(bp);
1106 xfs_buf_stale(bp);
1107 bp->b_iodone = NULL;
1108 if (!(fl & XBF_ASYNC)) {
1109 /*
1110 * Mark b_error and B_ERROR _both_.
1111 * Lot's of chunkcache code assumes that.
1112 * There's no reason to mark error for
1113 * ASYNC buffers.
1114 */
1115 xfs_buf_ioerror(bp, EIO);
1116 complete(&bp->b_iowait);
1117 } else {
1118 xfs_buf_relse(bp);
1119 }
1120
1121 return EIO;
1122 }
1123
1124 STATIC int
1125 xfs_bdstrat_cb(
1126 struct xfs_buf *bp)
1127 {
1128 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1129 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1130 /*
1131 * Metadata write that didn't get logged but
1132 * written delayed anyway. These aren't associated
1133 * with a transaction, and can be ignored.
1134 */
1135 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1136 return xfs_bioerror_relse(bp);
1137 else
1138 return xfs_bioerror(bp);
1139 }
1140
1141 xfs_buf_iorequest(bp);
1142 return 0;
1143 }
1144
1145 int
1146 xfs_bwrite(
1147 struct xfs_buf *bp)
1148 {
1149 int error;
1150
1151 ASSERT(xfs_buf_islocked(bp));
1152
1153 bp->b_flags |= XBF_WRITE;
1154 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
1155
1156 xfs_bdstrat_cb(bp);
1157
1158 error = xfs_buf_iowait(bp);
1159 if (error) {
1160 xfs_force_shutdown(bp->b_target->bt_mount,
1161 SHUTDOWN_META_IO_ERROR);
1162 }
1163 return error;
1164 }
1165
1166 /*
1167 * Wrapper around bdstrat so that we can stop data from going to disk in case
1168 * we are shutting down the filesystem. Typically user data goes thru this
1169 * path; one of the exceptions is the superblock.
1170 */
1171 void
1172 xfsbdstrat(
1173 struct xfs_mount *mp,
1174 struct xfs_buf *bp)
1175 {
1176 if (XFS_FORCED_SHUTDOWN(mp)) {
1177 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1178 xfs_bioerror_relse(bp);
1179 return;
1180 }
1181
1182 xfs_buf_iorequest(bp);
1183 }
1184
1185 STATIC void
1186 _xfs_buf_ioend(
1187 xfs_buf_t *bp,
1188 int schedule)
1189 {
1190 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1191 xfs_buf_ioend(bp, schedule);
1192 }
1193
1194 STATIC void
1195 xfs_buf_bio_end_io(
1196 struct bio *bio,
1197 int error)
1198 {
1199 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1200
1201 /*
1202 * don't overwrite existing errors - otherwise we can lose errors on
1203 * buffers that require multiple bios to complete.
1204 */
1205 if (!bp->b_error)
1206 xfs_buf_ioerror(bp, -error);
1207
1208 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1209 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1210
1211 _xfs_buf_ioend(bp, 1);
1212 bio_put(bio);
1213 }
1214
1215 static void
1216 xfs_buf_ioapply_map(
1217 struct xfs_buf *bp,
1218 int map,
1219 int *buf_offset,
1220 int *count,
1221 int rw)
1222 {
1223 int page_index;
1224 int total_nr_pages = bp->b_page_count;
1225 int nr_pages;
1226 struct bio *bio;
1227 sector_t sector = bp->b_maps[map].bm_bn;
1228 int size;
1229 int offset;
1230
1231 total_nr_pages = bp->b_page_count;
1232
1233 /* skip the pages in the buffer before the start offset */
1234 page_index = 0;
1235 offset = *buf_offset;
1236 while (offset >= PAGE_SIZE) {
1237 page_index++;
1238 offset -= PAGE_SIZE;
1239 }
1240
1241 /*
1242 * Limit the IO size to the length of the current vector, and update the
1243 * remaining IO count for the next time around.
1244 */
1245 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1246 *count -= size;
1247 *buf_offset += size;
1248
1249 next_chunk:
1250 atomic_inc(&bp->b_io_remaining);
1251 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1252 if (nr_pages > total_nr_pages)
1253 nr_pages = total_nr_pages;
1254
1255 bio = bio_alloc(GFP_NOIO, nr_pages);
1256 bio->bi_bdev = bp->b_target->bt_bdev;
1257 bio->bi_sector = sector;
1258 bio->bi_end_io = xfs_buf_bio_end_io;
1259 bio->bi_private = bp;
1260
1261
1262 for (; size && nr_pages; nr_pages--, page_index++) {
1263 int rbytes, nbytes = PAGE_SIZE - offset;
1264
1265 if (nbytes > size)
1266 nbytes = size;
1267
1268 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1269 offset);
1270 if (rbytes < nbytes)
1271 break;
1272
1273 offset = 0;
1274 sector += BTOBB(nbytes);
1275 size -= nbytes;
1276 total_nr_pages--;
1277 }
1278
1279 if (likely(bio->bi_size)) {
1280 if (xfs_buf_is_vmapped(bp)) {
1281 flush_kernel_vmap_range(bp->b_addr,
1282 xfs_buf_vmap_len(bp));
1283 }
1284 submit_bio(rw, bio);
1285 if (size)
1286 goto next_chunk;
1287 } else {
1288 /*
1289 * This is guaranteed not to be the last io reference count
1290 * because the caller (xfs_buf_iorequest) holds a count itself.
1291 */
1292 atomic_dec(&bp->b_io_remaining);
1293 xfs_buf_ioerror(bp, EIO);
1294 bio_put(bio);
1295 }
1296
1297 }
1298
1299 STATIC void
1300 _xfs_buf_ioapply(
1301 struct xfs_buf *bp)
1302 {
1303 struct blk_plug plug;
1304 int rw;
1305 int offset;
1306 int size;
1307 int i;
1308
1309 /*
1310 * Make sure we capture only current IO errors rather than stale errors
1311 * left over from previous use of the buffer (e.g. failed readahead).
1312 */
1313 bp->b_error = 0;
1314
1315 if (bp->b_flags & XBF_WRITE) {
1316 if (bp->b_flags & XBF_SYNCIO)
1317 rw = WRITE_SYNC;
1318 else
1319 rw = WRITE;
1320 if (bp->b_flags & XBF_FUA)
1321 rw |= REQ_FUA;
1322 if (bp->b_flags & XBF_FLUSH)
1323 rw |= REQ_FLUSH;
1324
1325 /*
1326 * Run the write verifier callback function if it exists. If
1327 * this function fails it will mark the buffer with an error and
1328 * the IO should not be dispatched.
1329 */
1330 if (bp->b_ops) {
1331 bp->b_ops->verify_write(bp);
1332 if (bp->b_error) {
1333 xfs_force_shutdown(bp->b_target->bt_mount,
1334 SHUTDOWN_CORRUPT_INCORE);
1335 return;
1336 }
1337 }
1338 } else if (bp->b_flags & XBF_READ_AHEAD) {
1339 rw = READA;
1340 } else {
1341 rw = READ;
1342 }
1343
1344 /* we only use the buffer cache for meta-data */
1345 rw |= REQ_META;
1346
1347 /*
1348 * Walk all the vectors issuing IO on them. Set up the initial offset
1349 * into the buffer and the desired IO size before we start -
1350 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1351 * subsequent call.
1352 */
1353 offset = bp->b_offset;
1354 size = BBTOB(bp->b_io_length);
1355 blk_start_plug(&plug);
1356 for (i = 0; i < bp->b_map_count; i++) {
1357 xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1358 if (bp->b_error)
1359 break;
1360 if (size <= 0)
1361 break; /* all done */
1362 }
1363 blk_finish_plug(&plug);
1364 }
1365
1366 void
1367 xfs_buf_iorequest(
1368 xfs_buf_t *bp)
1369 {
1370 trace_xfs_buf_iorequest(bp, _RET_IP_);
1371
1372 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1373
1374 if (bp->b_flags & XBF_WRITE)
1375 xfs_buf_wait_unpin(bp);
1376 xfs_buf_hold(bp);
1377
1378 /* Set the count to 1 initially, this will stop an I/O
1379 * completion callout which happens before we have started
1380 * all the I/O from calling xfs_buf_ioend too early.
1381 */
1382 atomic_set(&bp->b_io_remaining, 1);
1383 _xfs_buf_ioapply(bp);
1384 _xfs_buf_ioend(bp, 1);
1385
1386 xfs_buf_rele(bp);
1387 }
1388
1389 /*
1390 * Waits for I/O to complete on the buffer supplied. It returns immediately if
1391 * no I/O is pending or there is already a pending error on the buffer. It
1392 * returns the I/O error code, if any, or 0 if there was no error.
1393 */
1394 int
1395 xfs_buf_iowait(
1396 xfs_buf_t *bp)
1397 {
1398 trace_xfs_buf_iowait(bp, _RET_IP_);
1399
1400 if (!bp->b_error)
1401 wait_for_completion(&bp->b_iowait);
1402
1403 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1404 return bp->b_error;
1405 }
1406
1407 xfs_caddr_t
1408 xfs_buf_offset(
1409 xfs_buf_t *bp,
1410 size_t offset)
1411 {
1412 struct page *page;
1413
1414 if (bp->b_addr)
1415 return bp->b_addr + offset;
1416
1417 offset += bp->b_offset;
1418 page = bp->b_pages[offset >> PAGE_SHIFT];
1419 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1420 }
1421
1422 /*
1423 * Move data into or out of a buffer.
1424 */
1425 void
1426 xfs_buf_iomove(
1427 xfs_buf_t *bp, /* buffer to process */
1428 size_t boff, /* starting buffer offset */
1429 size_t bsize, /* length to copy */
1430 void *data, /* data address */
1431 xfs_buf_rw_t mode) /* read/write/zero flag */
1432 {
1433 size_t bend;
1434
1435 bend = boff + bsize;
1436 while (boff < bend) {
1437 struct page *page;
1438 int page_index, page_offset, csize;
1439
1440 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1441 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1442 page = bp->b_pages[page_index];
1443 csize = min_t(size_t, PAGE_SIZE - page_offset,
1444 BBTOB(bp->b_io_length) - boff);
1445
1446 ASSERT((csize + page_offset) <= PAGE_SIZE);
1447
1448 switch (mode) {
1449 case XBRW_ZERO:
1450 memset(page_address(page) + page_offset, 0, csize);
1451 break;
1452 case XBRW_READ:
1453 memcpy(data, page_address(page) + page_offset, csize);
1454 break;
1455 case XBRW_WRITE:
1456 memcpy(page_address(page) + page_offset, data, csize);
1457 }
1458
1459 boff += csize;
1460 data += csize;
1461 }
1462 }
1463
1464 /*
1465 * Handling of buffer targets (buftargs).
1466 */
1467
1468 /*
1469 * Wait for any bufs with callbacks that have been submitted but have not yet
1470 * returned. These buffers will have an elevated hold count, so wait on those
1471 * while freeing all the buffers only held by the LRU.
1472 */
1473 static enum lru_status
1474 xfs_buftarg_wait_rele(
1475 struct list_head *item,
1476 spinlock_t *lru_lock,
1477 void *arg)
1478
1479 {
1480 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1481 struct list_head *dispose = arg;
1482
1483 if (atomic_read(&bp->b_hold) > 1) {
1484 /* need to wait, so skip it this pass */
1485 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1486 return LRU_SKIP;
1487 }
1488 if (!spin_trylock(&bp->b_lock))
1489 return LRU_SKIP;
1490
1491 /*
1492 * clear the LRU reference count so the buffer doesn't get
1493 * ignored in xfs_buf_rele().
1494 */
1495 atomic_set(&bp->b_lru_ref, 0);
1496 bp->b_state |= XFS_BSTATE_DISPOSE;
1497 list_move(item, dispose);
1498 spin_unlock(&bp->b_lock);
1499 return LRU_REMOVED;
1500 }
1501
1502 void
1503 xfs_wait_buftarg(
1504 struct xfs_buftarg *btp)
1505 {
1506 LIST_HEAD(dispose);
1507 int loop = 0;
1508
1509 /* loop until there is nothing left on the lru list. */
1510 while (list_lru_count(&btp->bt_lru)) {
1511 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1512 &dispose, LONG_MAX);
1513
1514 while (!list_empty(&dispose)) {
1515 struct xfs_buf *bp;
1516 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1517 list_del_init(&bp->b_lru);
1518 xfs_buf_rele(bp);
1519 }
1520 if (loop++ != 0)
1521 delay(100);
1522 }
1523 }
1524
1525 static enum lru_status
1526 xfs_buftarg_isolate(
1527 struct list_head *item,
1528 spinlock_t *lru_lock,
1529 void *arg)
1530 {
1531 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1532 struct list_head *dispose = arg;
1533
1534 /*
1535 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1536 * If we fail to get the lock, just skip it.
1537 */
1538 if (!spin_trylock(&bp->b_lock))
1539 return LRU_SKIP;
1540 /*
1541 * Decrement the b_lru_ref count unless the value is already
1542 * zero. If the value is already zero, we need to reclaim the
1543 * buffer, otherwise it gets another trip through the LRU.
1544 */
1545 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1546 spin_unlock(&bp->b_lock);
1547 return LRU_ROTATE;
1548 }
1549
1550 bp->b_state |= XFS_BSTATE_DISPOSE;
1551 list_move(item, dispose);
1552 spin_unlock(&bp->b_lock);
1553 return LRU_REMOVED;
1554 }
1555
1556 static unsigned long
1557 xfs_buftarg_shrink_scan(
1558 struct shrinker *shrink,
1559 struct shrink_control *sc)
1560 {
1561 struct xfs_buftarg *btp = container_of(shrink,
1562 struct xfs_buftarg, bt_shrinker);
1563 LIST_HEAD(dispose);
1564 unsigned long freed;
1565 unsigned long nr_to_scan = sc->nr_to_scan;
1566
1567 freed = list_lru_walk_node(&btp->bt_lru, sc->nid, xfs_buftarg_isolate,
1568 &dispose, &nr_to_scan);
1569
1570 while (!list_empty(&dispose)) {
1571 struct xfs_buf *bp;
1572 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1573 list_del_init(&bp->b_lru);
1574 xfs_buf_rele(bp);
1575 }
1576
1577 return freed;
1578 }
1579
1580 static unsigned long
1581 xfs_buftarg_shrink_count(
1582 struct shrinker *shrink,
1583 struct shrink_control *sc)
1584 {
1585 struct xfs_buftarg *btp = container_of(shrink,
1586 struct xfs_buftarg, bt_shrinker);
1587 return list_lru_count_node(&btp->bt_lru, sc->nid);
1588 }
1589
1590 void
1591 xfs_free_buftarg(
1592 struct xfs_mount *mp,
1593 struct xfs_buftarg *btp)
1594 {
1595 unregister_shrinker(&btp->bt_shrinker);
1596 list_lru_destroy(&btp->bt_lru);
1597
1598 if (mp->m_flags & XFS_MOUNT_BARRIER)
1599 xfs_blkdev_issue_flush(btp);
1600
1601 kmem_free(btp);
1602 }
1603
1604 STATIC int
1605 xfs_setsize_buftarg_flags(
1606 xfs_buftarg_t *btp,
1607 unsigned int blocksize,
1608 unsigned int sectorsize,
1609 int verbose)
1610 {
1611 btp->bt_bsize = blocksize;
1612 btp->bt_sshift = ffs(sectorsize) - 1;
1613 btp->bt_smask = sectorsize - 1;
1614
1615 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1616 char name[BDEVNAME_SIZE];
1617
1618 bdevname(btp->bt_bdev, name);
1619
1620 xfs_warn(btp->bt_mount,
1621 "Cannot set_blocksize to %u on device %s\n",
1622 sectorsize, name);
1623 return EINVAL;
1624 }
1625
1626 return 0;
1627 }
1628
1629 /*
1630 * When allocating the initial buffer target we have not yet
1631 * read in the superblock, so don't know what sized sectors
1632 * are being used at this early stage. Play safe.
1633 */
1634 STATIC int
1635 xfs_setsize_buftarg_early(
1636 xfs_buftarg_t *btp,
1637 struct block_device *bdev)
1638 {
1639 return xfs_setsize_buftarg_flags(btp,
1640 PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1641 }
1642
1643 int
1644 xfs_setsize_buftarg(
1645 xfs_buftarg_t *btp,
1646 unsigned int blocksize,
1647 unsigned int sectorsize)
1648 {
1649 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1650 }
1651
1652 xfs_buftarg_t *
1653 xfs_alloc_buftarg(
1654 struct xfs_mount *mp,
1655 struct block_device *bdev,
1656 int external,
1657 const char *fsname)
1658 {
1659 xfs_buftarg_t *btp;
1660
1661 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
1662
1663 btp->bt_mount = mp;
1664 btp->bt_dev = bdev->bd_dev;
1665 btp->bt_bdev = bdev;
1666 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1667 if (!btp->bt_bdi)
1668 goto error;
1669
1670 if (xfs_setsize_buftarg_early(btp, bdev))
1671 goto error;
1672
1673 if (list_lru_init(&btp->bt_lru))
1674 goto error;
1675
1676 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1677 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1678 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1679 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1680 register_shrinker(&btp->bt_shrinker);
1681 return btp;
1682
1683 error:
1684 kmem_free(btp);
1685 return NULL;
1686 }
1687
1688 /*
1689 * Add a buffer to the delayed write list.
1690 *
1691 * This queues a buffer for writeout if it hasn't already been. Note that
1692 * neither this routine nor the buffer list submission functions perform
1693 * any internal synchronization. It is expected that the lists are thread-local
1694 * to the callers.
1695 *
1696 * Returns true if we queued up the buffer, or false if it already had
1697 * been on the buffer list.
1698 */
1699 bool
1700 xfs_buf_delwri_queue(
1701 struct xfs_buf *bp,
1702 struct list_head *list)
1703 {
1704 ASSERT(xfs_buf_islocked(bp));
1705 ASSERT(!(bp->b_flags & XBF_READ));
1706
1707 /*
1708 * If the buffer is already marked delwri it already is queued up
1709 * by someone else for imediate writeout. Just ignore it in that
1710 * case.
1711 */
1712 if (bp->b_flags & _XBF_DELWRI_Q) {
1713 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1714 return false;
1715 }
1716
1717 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1718
1719 /*
1720 * If a buffer gets written out synchronously or marked stale while it
1721 * is on a delwri list we lazily remove it. To do this, the other party
1722 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1723 * It remains referenced and on the list. In a rare corner case it
1724 * might get readded to a delwri list after the synchronous writeout, in
1725 * which case we need just need to re-add the flag here.
1726 */
1727 bp->b_flags |= _XBF_DELWRI_Q;
1728 if (list_empty(&bp->b_list)) {
1729 atomic_inc(&bp->b_hold);
1730 list_add_tail(&bp->b_list, list);
1731 }
1732
1733 return true;
1734 }
1735
1736 /*
1737 * Compare function is more complex than it needs to be because
1738 * the return value is only 32 bits and we are doing comparisons
1739 * on 64 bit values
1740 */
1741 static int
1742 xfs_buf_cmp(
1743 void *priv,
1744 struct list_head *a,
1745 struct list_head *b)
1746 {
1747 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1748 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1749 xfs_daddr_t diff;
1750
1751 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1752 if (diff < 0)
1753 return -1;
1754 if (diff > 0)
1755 return 1;
1756 return 0;
1757 }
1758
1759 static int
1760 __xfs_buf_delwri_submit(
1761 struct list_head *buffer_list,
1762 struct list_head *io_list,
1763 bool wait)
1764 {
1765 struct blk_plug plug;
1766 struct xfs_buf *bp, *n;
1767 int pinned = 0;
1768
1769 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1770 if (!wait) {
1771 if (xfs_buf_ispinned(bp)) {
1772 pinned++;
1773 continue;
1774 }
1775 if (!xfs_buf_trylock(bp))
1776 continue;
1777 } else {
1778 xfs_buf_lock(bp);
1779 }
1780
1781 /*
1782 * Someone else might have written the buffer synchronously or
1783 * marked it stale in the meantime. In that case only the
1784 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1785 * reference and remove it from the list here.
1786 */
1787 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1788 list_del_init(&bp->b_list);
1789 xfs_buf_relse(bp);
1790 continue;
1791 }
1792
1793 list_move_tail(&bp->b_list, io_list);
1794 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1795 }
1796
1797 list_sort(NULL, io_list, xfs_buf_cmp);
1798
1799 blk_start_plug(&plug);
1800 list_for_each_entry_safe(bp, n, io_list, b_list) {
1801 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
1802 bp->b_flags |= XBF_WRITE;
1803
1804 if (!wait) {
1805 bp->b_flags |= XBF_ASYNC;
1806 list_del_init(&bp->b_list);
1807 }
1808 xfs_bdstrat_cb(bp);
1809 }
1810 blk_finish_plug(&plug);
1811
1812 return pinned;
1813 }
1814
1815 /*
1816 * Write out a buffer list asynchronously.
1817 *
1818 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1819 * out and not wait for I/O completion on any of the buffers. This interface
1820 * is only safely useable for callers that can track I/O completion by higher
1821 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1822 * function.
1823 */
1824 int
1825 xfs_buf_delwri_submit_nowait(
1826 struct list_head *buffer_list)
1827 {
1828 LIST_HEAD (io_list);
1829 return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1830 }
1831
1832 /*
1833 * Write out a buffer list synchronously.
1834 *
1835 * This will take the @buffer_list, write all buffers out and wait for I/O
1836 * completion on all of the buffers. @buffer_list is consumed by the function,
1837 * so callers must have some other way of tracking buffers if they require such
1838 * functionality.
1839 */
1840 int
1841 xfs_buf_delwri_submit(
1842 struct list_head *buffer_list)
1843 {
1844 LIST_HEAD (io_list);
1845 int error = 0, error2;
1846 struct xfs_buf *bp;
1847
1848 __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1849
1850 /* Wait for IO to complete. */
1851 while (!list_empty(&io_list)) {
1852 bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1853
1854 list_del_init(&bp->b_list);
1855 error2 = xfs_buf_iowait(bp);
1856 xfs_buf_relse(bp);
1857 if (!error)
1858 error = error2;
1859 }
1860
1861 return error;
1862 }
1863
1864 int __init
1865 xfs_buf_init(void)
1866 {
1867 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1868 KM_ZONE_HWALIGN, NULL);
1869 if (!xfs_buf_zone)
1870 goto out;
1871
1872 xfslogd_workqueue = alloc_workqueue("xfslogd",
1873 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1874 if (!xfslogd_workqueue)
1875 goto out_free_buf_zone;
1876
1877 return 0;
1878
1879 out_free_buf_zone:
1880 kmem_zone_destroy(xfs_buf_zone);
1881 out:
1882 return -ENOMEM;
1883 }
1884
1885 void
1886 xfs_buf_terminate(void)
1887 {
1888 destroy_workqueue(xfslogd_workqueue);
1889 kmem_zone_destroy(xfs_buf_zone);
1890 }
This page took 0.06834 seconds and 6 git commands to generate.