irq_domain: correct a minor wrong comment for linear revmap
[deliverable/linux.git] / fs / xfs / xfs_buf.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include <linux/stddef.h>
20 #include <linux/errno.h>
21 #include <linux/gfp.h>
22 #include <linux/pagemap.h>
23 #include <linux/init.h>
24 #include <linux/vmalloc.h>
25 #include <linux/bio.h>
26 #include <linux/sysctl.h>
27 #include <linux/proc_fs.h>
28 #include <linux/workqueue.h>
29 #include <linux/percpu.h>
30 #include <linux/blkdev.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/migrate.h>
34 #include <linux/backing-dev.h>
35 #include <linux/freezer.h>
36
37 #include "xfs_sb.h"
38 #include "xfs_log.h"
39 #include "xfs_ag.h"
40 #include "xfs_mount.h"
41 #include "xfs_trace.h"
42
43 static kmem_zone_t *xfs_buf_zone;
44
45 static struct workqueue_struct *xfslogd_workqueue;
46
47 #ifdef XFS_BUF_LOCK_TRACKING
48 # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
49 # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
50 # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
51 #else
52 # define XB_SET_OWNER(bp) do { } while (0)
53 # define XB_CLEAR_OWNER(bp) do { } while (0)
54 # define XB_GET_OWNER(bp) do { } while (0)
55 #endif
56
57 #define xb_to_gfp(flags) \
58 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
59
60
61 static inline int
62 xfs_buf_is_vmapped(
63 struct xfs_buf *bp)
64 {
65 /*
66 * Return true if the buffer is vmapped.
67 *
68 * b_addr is null if the buffer is not mapped, but the code is clever
69 * enough to know it doesn't have to map a single page, so the check has
70 * to be both for b_addr and bp->b_page_count > 1.
71 */
72 return bp->b_addr && bp->b_page_count > 1;
73 }
74
75 static inline int
76 xfs_buf_vmap_len(
77 struct xfs_buf *bp)
78 {
79 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
80 }
81
82 /*
83 * xfs_buf_lru_add - add a buffer to the LRU.
84 *
85 * The LRU takes a new reference to the buffer so that it will only be freed
86 * once the shrinker takes the buffer off the LRU.
87 */
88 STATIC void
89 xfs_buf_lru_add(
90 struct xfs_buf *bp)
91 {
92 struct xfs_buftarg *btp = bp->b_target;
93
94 spin_lock(&btp->bt_lru_lock);
95 if (list_empty(&bp->b_lru)) {
96 atomic_inc(&bp->b_hold);
97 list_add_tail(&bp->b_lru, &btp->bt_lru);
98 btp->bt_lru_nr++;
99 }
100 spin_unlock(&btp->bt_lru_lock);
101 }
102
103 /*
104 * xfs_buf_lru_del - remove a buffer from the LRU
105 *
106 * The unlocked check is safe here because it only occurs when there are not
107 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
108 * to optimise the shrinker removing the buffer from the LRU and calling
109 * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
110 * bt_lru_lock.
111 */
112 STATIC void
113 xfs_buf_lru_del(
114 struct xfs_buf *bp)
115 {
116 struct xfs_buftarg *btp = bp->b_target;
117
118 if (list_empty(&bp->b_lru))
119 return;
120
121 spin_lock(&btp->bt_lru_lock);
122 if (!list_empty(&bp->b_lru)) {
123 list_del_init(&bp->b_lru);
124 btp->bt_lru_nr--;
125 }
126 spin_unlock(&btp->bt_lru_lock);
127 }
128
129 /*
130 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
131 * b_lru_ref count so that the buffer is freed immediately when the buffer
132 * reference count falls to zero. If the buffer is already on the LRU, we need
133 * to remove the reference that LRU holds on the buffer.
134 *
135 * This prevents build-up of stale buffers on the LRU.
136 */
137 void
138 xfs_buf_stale(
139 struct xfs_buf *bp)
140 {
141 ASSERT(xfs_buf_islocked(bp));
142
143 bp->b_flags |= XBF_STALE;
144
145 /*
146 * Clear the delwri status so that a delwri queue walker will not
147 * flush this buffer to disk now that it is stale. The delwri queue has
148 * a reference to the buffer, so this is safe to do.
149 */
150 bp->b_flags &= ~_XBF_DELWRI_Q;
151
152 atomic_set(&(bp)->b_lru_ref, 0);
153 if (!list_empty(&bp->b_lru)) {
154 struct xfs_buftarg *btp = bp->b_target;
155
156 spin_lock(&btp->bt_lru_lock);
157 if (!list_empty(&bp->b_lru)) {
158 list_del_init(&bp->b_lru);
159 btp->bt_lru_nr--;
160 atomic_dec(&bp->b_hold);
161 }
162 spin_unlock(&btp->bt_lru_lock);
163 }
164 ASSERT(atomic_read(&bp->b_hold) >= 1);
165 }
166
167 struct xfs_buf *
168 xfs_buf_alloc(
169 struct xfs_buftarg *target,
170 xfs_daddr_t blkno,
171 size_t numblks,
172 xfs_buf_flags_t flags)
173 {
174 struct xfs_buf *bp;
175
176 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
177 if (unlikely(!bp))
178 return NULL;
179
180 /*
181 * We don't want certain flags to appear in b_flags unless they are
182 * specifically set by later operations on the buffer.
183 */
184 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
185
186 atomic_set(&bp->b_hold, 1);
187 atomic_set(&bp->b_lru_ref, 1);
188 init_completion(&bp->b_iowait);
189 INIT_LIST_HEAD(&bp->b_lru);
190 INIT_LIST_HEAD(&bp->b_list);
191 RB_CLEAR_NODE(&bp->b_rbnode);
192 sema_init(&bp->b_sema, 0); /* held, no waiters */
193 XB_SET_OWNER(bp);
194 bp->b_target = target;
195
196 /*
197 * Set length and io_length to the same value initially.
198 * I/O routines should use io_length, which will be the same in
199 * most cases but may be reset (e.g. XFS recovery).
200 */
201 bp->b_length = numblks;
202 bp->b_io_length = numblks;
203 bp->b_flags = flags;
204
205 /*
206 * We do not set the block number here in the buffer because we have not
207 * finished initialising the buffer. We insert the buffer into the cache
208 * in this state, so this ensures that we are unable to do IO on a
209 * buffer that hasn't been fully initialised.
210 */
211 bp->b_bn = XFS_BUF_DADDR_NULL;
212 atomic_set(&bp->b_pin_count, 0);
213 init_waitqueue_head(&bp->b_waiters);
214
215 XFS_STATS_INC(xb_create);
216 trace_xfs_buf_init(bp, _RET_IP_);
217
218 return bp;
219 }
220
221 /*
222 * Allocate a page array capable of holding a specified number
223 * of pages, and point the page buf at it.
224 */
225 STATIC int
226 _xfs_buf_get_pages(
227 xfs_buf_t *bp,
228 int page_count,
229 xfs_buf_flags_t flags)
230 {
231 /* Make sure that we have a page list */
232 if (bp->b_pages == NULL) {
233 bp->b_page_count = page_count;
234 if (page_count <= XB_PAGES) {
235 bp->b_pages = bp->b_page_array;
236 } else {
237 bp->b_pages = kmem_alloc(sizeof(struct page *) *
238 page_count, KM_NOFS);
239 if (bp->b_pages == NULL)
240 return -ENOMEM;
241 }
242 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
243 }
244 return 0;
245 }
246
247 /*
248 * Frees b_pages if it was allocated.
249 */
250 STATIC void
251 _xfs_buf_free_pages(
252 xfs_buf_t *bp)
253 {
254 if (bp->b_pages != bp->b_page_array) {
255 kmem_free(bp->b_pages);
256 bp->b_pages = NULL;
257 }
258 }
259
260 /*
261 * Releases the specified buffer.
262 *
263 * The modification state of any associated pages is left unchanged.
264 * The buffer most not be on any hash - use xfs_buf_rele instead for
265 * hashed and refcounted buffers
266 */
267 void
268 xfs_buf_free(
269 xfs_buf_t *bp)
270 {
271 trace_xfs_buf_free(bp, _RET_IP_);
272
273 ASSERT(list_empty(&bp->b_lru));
274
275 if (bp->b_flags & _XBF_PAGES) {
276 uint i;
277
278 if (xfs_buf_is_vmapped(bp))
279 vm_unmap_ram(bp->b_addr - bp->b_offset,
280 bp->b_page_count);
281
282 for (i = 0; i < bp->b_page_count; i++) {
283 struct page *page = bp->b_pages[i];
284
285 __free_page(page);
286 }
287 } else if (bp->b_flags & _XBF_KMEM)
288 kmem_free(bp->b_addr);
289 _xfs_buf_free_pages(bp);
290 kmem_zone_free(xfs_buf_zone, bp);
291 }
292
293 /*
294 * Allocates all the pages for buffer in question and builds it's page list.
295 */
296 STATIC int
297 xfs_buf_allocate_memory(
298 xfs_buf_t *bp,
299 uint flags)
300 {
301 size_t size;
302 size_t nbytes, offset;
303 gfp_t gfp_mask = xb_to_gfp(flags);
304 unsigned short page_count, i;
305 xfs_off_t start, end;
306 int error;
307
308 /*
309 * for buffers that are contained within a single page, just allocate
310 * the memory from the heap - there's no need for the complexity of
311 * page arrays to keep allocation down to order 0.
312 */
313 size = BBTOB(bp->b_length);
314 if (size < PAGE_SIZE) {
315 bp->b_addr = kmem_alloc(size, KM_NOFS);
316 if (!bp->b_addr) {
317 /* low memory - use alloc_page loop instead */
318 goto use_alloc_page;
319 }
320
321 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
322 ((unsigned long)bp->b_addr & PAGE_MASK)) {
323 /* b_addr spans two pages - use alloc_page instead */
324 kmem_free(bp->b_addr);
325 bp->b_addr = NULL;
326 goto use_alloc_page;
327 }
328 bp->b_offset = offset_in_page(bp->b_addr);
329 bp->b_pages = bp->b_page_array;
330 bp->b_pages[0] = virt_to_page(bp->b_addr);
331 bp->b_page_count = 1;
332 bp->b_flags |= _XBF_KMEM;
333 return 0;
334 }
335
336 use_alloc_page:
337 start = BBTOB(bp->b_bn) >> PAGE_SHIFT;
338 end = (BBTOB(bp->b_bn + bp->b_length) + PAGE_SIZE - 1) >> PAGE_SHIFT;
339 page_count = end - start;
340 error = _xfs_buf_get_pages(bp, page_count, flags);
341 if (unlikely(error))
342 return error;
343
344 offset = bp->b_offset;
345 bp->b_flags |= _XBF_PAGES;
346
347 for (i = 0; i < bp->b_page_count; i++) {
348 struct page *page;
349 uint retries = 0;
350 retry:
351 page = alloc_page(gfp_mask);
352 if (unlikely(page == NULL)) {
353 if (flags & XBF_READ_AHEAD) {
354 bp->b_page_count = i;
355 error = ENOMEM;
356 goto out_free_pages;
357 }
358
359 /*
360 * This could deadlock.
361 *
362 * But until all the XFS lowlevel code is revamped to
363 * handle buffer allocation failures we can't do much.
364 */
365 if (!(++retries % 100))
366 xfs_err(NULL,
367 "possible memory allocation deadlock in %s (mode:0x%x)",
368 __func__, gfp_mask);
369
370 XFS_STATS_INC(xb_page_retries);
371 congestion_wait(BLK_RW_ASYNC, HZ/50);
372 goto retry;
373 }
374
375 XFS_STATS_INC(xb_page_found);
376
377 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
378 size -= nbytes;
379 bp->b_pages[i] = page;
380 offset = 0;
381 }
382 return 0;
383
384 out_free_pages:
385 for (i = 0; i < bp->b_page_count; i++)
386 __free_page(bp->b_pages[i]);
387 return error;
388 }
389
390 /*
391 * Map buffer into kernel address-space if necessary.
392 */
393 STATIC int
394 _xfs_buf_map_pages(
395 xfs_buf_t *bp,
396 uint flags)
397 {
398 ASSERT(bp->b_flags & _XBF_PAGES);
399 if (bp->b_page_count == 1) {
400 /* A single page buffer is always mappable */
401 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
402 } else if (flags & XBF_UNMAPPED) {
403 bp->b_addr = NULL;
404 } else {
405 int retried = 0;
406
407 do {
408 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
409 -1, PAGE_KERNEL);
410 if (bp->b_addr)
411 break;
412 vm_unmap_aliases();
413 } while (retried++ <= 1);
414
415 if (!bp->b_addr)
416 return -ENOMEM;
417 bp->b_addr += bp->b_offset;
418 }
419
420 return 0;
421 }
422
423 /*
424 * Finding and Reading Buffers
425 */
426
427 /*
428 * Look up, and creates if absent, a lockable buffer for
429 * a given range of an inode. The buffer is returned
430 * locked. No I/O is implied by this call.
431 */
432 xfs_buf_t *
433 _xfs_buf_find(
434 struct xfs_buftarg *btp,
435 xfs_daddr_t blkno,
436 size_t numblks,
437 xfs_buf_flags_t flags,
438 xfs_buf_t *new_bp)
439 {
440 size_t numbytes;
441 struct xfs_perag *pag;
442 struct rb_node **rbp;
443 struct rb_node *parent;
444 xfs_buf_t *bp;
445
446 numbytes = BBTOB(numblks);
447
448 /* Check for IOs smaller than the sector size / not sector aligned */
449 ASSERT(!(numbytes < (1 << btp->bt_sshift)));
450 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
451
452 /* get tree root */
453 pag = xfs_perag_get(btp->bt_mount,
454 xfs_daddr_to_agno(btp->bt_mount, blkno));
455
456 /* walk tree */
457 spin_lock(&pag->pag_buf_lock);
458 rbp = &pag->pag_buf_tree.rb_node;
459 parent = NULL;
460 bp = NULL;
461 while (*rbp) {
462 parent = *rbp;
463 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
464
465 if (blkno < bp->b_bn)
466 rbp = &(*rbp)->rb_left;
467 else if (blkno > bp->b_bn)
468 rbp = &(*rbp)->rb_right;
469 else {
470 /*
471 * found a block number match. If the range doesn't
472 * match, the only way this is allowed is if the buffer
473 * in the cache is stale and the transaction that made
474 * it stale has not yet committed. i.e. we are
475 * reallocating a busy extent. Skip this buffer and
476 * continue searching to the right for an exact match.
477 */
478 if (bp->b_length != numblks) {
479 ASSERT(bp->b_flags & XBF_STALE);
480 rbp = &(*rbp)->rb_right;
481 continue;
482 }
483 atomic_inc(&bp->b_hold);
484 goto found;
485 }
486 }
487
488 /* No match found */
489 if (new_bp) {
490 rb_link_node(&new_bp->b_rbnode, parent, rbp);
491 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
492 /* the buffer keeps the perag reference until it is freed */
493 new_bp->b_pag = pag;
494 spin_unlock(&pag->pag_buf_lock);
495 } else {
496 XFS_STATS_INC(xb_miss_locked);
497 spin_unlock(&pag->pag_buf_lock);
498 xfs_perag_put(pag);
499 }
500 return new_bp;
501
502 found:
503 spin_unlock(&pag->pag_buf_lock);
504 xfs_perag_put(pag);
505
506 if (!xfs_buf_trylock(bp)) {
507 if (flags & XBF_TRYLOCK) {
508 xfs_buf_rele(bp);
509 XFS_STATS_INC(xb_busy_locked);
510 return NULL;
511 }
512 xfs_buf_lock(bp);
513 XFS_STATS_INC(xb_get_locked_waited);
514 }
515
516 /*
517 * if the buffer is stale, clear all the external state associated with
518 * it. We need to keep flags such as how we allocated the buffer memory
519 * intact here.
520 */
521 if (bp->b_flags & XBF_STALE) {
522 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
523 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
524 }
525
526 trace_xfs_buf_find(bp, flags, _RET_IP_);
527 XFS_STATS_INC(xb_get_locked);
528 return bp;
529 }
530
531 /*
532 * Assembles a buffer covering the specified range. The code is optimised for
533 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
534 * more hits than misses.
535 */
536 struct xfs_buf *
537 xfs_buf_get(
538 xfs_buftarg_t *target,
539 xfs_daddr_t blkno,
540 size_t numblks,
541 xfs_buf_flags_t flags)
542 {
543 struct xfs_buf *bp;
544 struct xfs_buf *new_bp;
545 int error = 0;
546
547 bp = _xfs_buf_find(target, blkno, numblks, flags, NULL);
548 if (likely(bp))
549 goto found;
550
551 new_bp = xfs_buf_alloc(target, blkno, numblks, flags);
552 if (unlikely(!new_bp))
553 return NULL;
554
555 error = xfs_buf_allocate_memory(new_bp, flags);
556 if (error) {
557 kmem_zone_free(xfs_buf_zone, new_bp);
558 return NULL;
559 }
560
561 bp = _xfs_buf_find(target, blkno, numblks, flags, new_bp);
562 if (!bp) {
563 xfs_buf_free(new_bp);
564 return NULL;
565 }
566
567 if (bp != new_bp)
568 xfs_buf_free(new_bp);
569
570 /*
571 * Now we have a workable buffer, fill in the block number so
572 * that we can do IO on it.
573 */
574 bp->b_bn = blkno;
575 bp->b_io_length = bp->b_length;
576
577 found:
578 if (!bp->b_addr) {
579 error = _xfs_buf_map_pages(bp, flags);
580 if (unlikely(error)) {
581 xfs_warn(target->bt_mount,
582 "%s: failed to map pages\n", __func__);
583 xfs_buf_relse(bp);
584 return NULL;
585 }
586 }
587
588 XFS_STATS_INC(xb_get);
589 trace_xfs_buf_get(bp, flags, _RET_IP_);
590 return bp;
591 }
592
593 STATIC int
594 _xfs_buf_read(
595 xfs_buf_t *bp,
596 xfs_buf_flags_t flags)
597 {
598 ASSERT(!(flags & XBF_WRITE));
599 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
600
601 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
602 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
603
604 xfs_buf_iorequest(bp);
605 if (flags & XBF_ASYNC)
606 return 0;
607 return xfs_buf_iowait(bp);
608 }
609
610 xfs_buf_t *
611 xfs_buf_read(
612 xfs_buftarg_t *target,
613 xfs_daddr_t blkno,
614 size_t numblks,
615 xfs_buf_flags_t flags)
616 {
617 xfs_buf_t *bp;
618
619 flags |= XBF_READ;
620
621 bp = xfs_buf_get(target, blkno, numblks, flags);
622 if (bp) {
623 trace_xfs_buf_read(bp, flags, _RET_IP_);
624
625 if (!XFS_BUF_ISDONE(bp)) {
626 XFS_STATS_INC(xb_get_read);
627 _xfs_buf_read(bp, flags);
628 } else if (flags & XBF_ASYNC) {
629 /*
630 * Read ahead call which is already satisfied,
631 * drop the buffer
632 */
633 xfs_buf_relse(bp);
634 return NULL;
635 } else {
636 /* We do not want read in the flags */
637 bp->b_flags &= ~XBF_READ;
638 }
639 }
640
641 return bp;
642 }
643
644 /*
645 * If we are not low on memory then do the readahead in a deadlock
646 * safe manner.
647 */
648 void
649 xfs_buf_readahead(
650 xfs_buftarg_t *target,
651 xfs_daddr_t blkno,
652 size_t numblks)
653 {
654 if (bdi_read_congested(target->bt_bdi))
655 return;
656
657 xfs_buf_read(target, blkno, numblks,
658 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
659 }
660
661 /*
662 * Read an uncached buffer from disk. Allocates and returns a locked
663 * buffer containing the disk contents or nothing.
664 */
665 struct xfs_buf *
666 xfs_buf_read_uncached(
667 struct xfs_buftarg *target,
668 xfs_daddr_t daddr,
669 size_t numblks,
670 int flags)
671 {
672 xfs_buf_t *bp;
673 int error;
674
675 bp = xfs_buf_get_uncached(target, numblks, flags);
676 if (!bp)
677 return NULL;
678
679 /* set up the buffer for a read IO */
680 XFS_BUF_SET_ADDR(bp, daddr);
681 XFS_BUF_READ(bp);
682
683 xfsbdstrat(target->bt_mount, bp);
684 error = xfs_buf_iowait(bp);
685 if (error) {
686 xfs_buf_relse(bp);
687 return NULL;
688 }
689 return bp;
690 }
691
692 /*
693 * Return a buffer allocated as an empty buffer and associated to external
694 * memory via xfs_buf_associate_memory() back to it's empty state.
695 */
696 void
697 xfs_buf_set_empty(
698 struct xfs_buf *bp,
699 size_t numblks)
700 {
701 if (bp->b_pages)
702 _xfs_buf_free_pages(bp);
703
704 bp->b_pages = NULL;
705 bp->b_page_count = 0;
706 bp->b_addr = NULL;
707 bp->b_length = numblks;
708 bp->b_io_length = numblks;
709 bp->b_bn = XFS_BUF_DADDR_NULL;
710 }
711
712 static inline struct page *
713 mem_to_page(
714 void *addr)
715 {
716 if ((!is_vmalloc_addr(addr))) {
717 return virt_to_page(addr);
718 } else {
719 return vmalloc_to_page(addr);
720 }
721 }
722
723 int
724 xfs_buf_associate_memory(
725 xfs_buf_t *bp,
726 void *mem,
727 size_t len)
728 {
729 int rval;
730 int i = 0;
731 unsigned long pageaddr;
732 unsigned long offset;
733 size_t buflen;
734 int page_count;
735
736 pageaddr = (unsigned long)mem & PAGE_MASK;
737 offset = (unsigned long)mem - pageaddr;
738 buflen = PAGE_ALIGN(len + offset);
739 page_count = buflen >> PAGE_SHIFT;
740
741 /* Free any previous set of page pointers */
742 if (bp->b_pages)
743 _xfs_buf_free_pages(bp);
744
745 bp->b_pages = NULL;
746 bp->b_addr = mem;
747
748 rval = _xfs_buf_get_pages(bp, page_count, 0);
749 if (rval)
750 return rval;
751
752 bp->b_offset = offset;
753
754 for (i = 0; i < bp->b_page_count; i++) {
755 bp->b_pages[i] = mem_to_page((void *)pageaddr);
756 pageaddr += PAGE_SIZE;
757 }
758
759 bp->b_io_length = BTOBB(len);
760 bp->b_length = BTOBB(buflen);
761
762 return 0;
763 }
764
765 xfs_buf_t *
766 xfs_buf_get_uncached(
767 struct xfs_buftarg *target,
768 size_t numblks,
769 int flags)
770 {
771 unsigned long page_count;
772 int error, i;
773 xfs_buf_t *bp;
774
775 bp = xfs_buf_alloc(target, 0, numblks, 0);
776 if (unlikely(bp == NULL))
777 goto fail;
778
779 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
780 error = _xfs_buf_get_pages(bp, page_count, 0);
781 if (error)
782 goto fail_free_buf;
783
784 for (i = 0; i < page_count; i++) {
785 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
786 if (!bp->b_pages[i])
787 goto fail_free_mem;
788 }
789 bp->b_flags |= _XBF_PAGES;
790
791 error = _xfs_buf_map_pages(bp, 0);
792 if (unlikely(error)) {
793 xfs_warn(target->bt_mount,
794 "%s: failed to map pages\n", __func__);
795 goto fail_free_mem;
796 }
797
798 trace_xfs_buf_get_uncached(bp, _RET_IP_);
799 return bp;
800
801 fail_free_mem:
802 while (--i >= 0)
803 __free_page(bp->b_pages[i]);
804 _xfs_buf_free_pages(bp);
805 fail_free_buf:
806 kmem_zone_free(xfs_buf_zone, bp);
807 fail:
808 return NULL;
809 }
810
811 /*
812 * Increment reference count on buffer, to hold the buffer concurrently
813 * with another thread which may release (free) the buffer asynchronously.
814 * Must hold the buffer already to call this function.
815 */
816 void
817 xfs_buf_hold(
818 xfs_buf_t *bp)
819 {
820 trace_xfs_buf_hold(bp, _RET_IP_);
821 atomic_inc(&bp->b_hold);
822 }
823
824 /*
825 * Releases a hold on the specified buffer. If the
826 * the hold count is 1, calls xfs_buf_free.
827 */
828 void
829 xfs_buf_rele(
830 xfs_buf_t *bp)
831 {
832 struct xfs_perag *pag = bp->b_pag;
833
834 trace_xfs_buf_rele(bp, _RET_IP_);
835
836 if (!pag) {
837 ASSERT(list_empty(&bp->b_lru));
838 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
839 if (atomic_dec_and_test(&bp->b_hold))
840 xfs_buf_free(bp);
841 return;
842 }
843
844 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
845
846 ASSERT(atomic_read(&bp->b_hold) > 0);
847 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
848 if (!(bp->b_flags & XBF_STALE) &&
849 atomic_read(&bp->b_lru_ref)) {
850 xfs_buf_lru_add(bp);
851 spin_unlock(&pag->pag_buf_lock);
852 } else {
853 xfs_buf_lru_del(bp);
854 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
855 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
856 spin_unlock(&pag->pag_buf_lock);
857 xfs_perag_put(pag);
858 xfs_buf_free(bp);
859 }
860 }
861 }
862
863
864 /*
865 * Lock a buffer object, if it is not already locked.
866 *
867 * If we come across a stale, pinned, locked buffer, we know that we are
868 * being asked to lock a buffer that has been reallocated. Because it is
869 * pinned, we know that the log has not been pushed to disk and hence it
870 * will still be locked. Rather than continuing to have trylock attempts
871 * fail until someone else pushes the log, push it ourselves before
872 * returning. This means that the xfsaild will not get stuck trying
873 * to push on stale inode buffers.
874 */
875 int
876 xfs_buf_trylock(
877 struct xfs_buf *bp)
878 {
879 int locked;
880
881 locked = down_trylock(&bp->b_sema) == 0;
882 if (locked)
883 XB_SET_OWNER(bp);
884 else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
885 xfs_log_force(bp->b_target->bt_mount, 0);
886
887 trace_xfs_buf_trylock(bp, _RET_IP_);
888 return locked;
889 }
890
891 /*
892 * Lock a buffer object.
893 *
894 * If we come across a stale, pinned, locked buffer, we know that we
895 * are being asked to lock a buffer that has been reallocated. Because
896 * it is pinned, we know that the log has not been pushed to disk and
897 * hence it will still be locked. Rather than sleeping until someone
898 * else pushes the log, push it ourselves before trying to get the lock.
899 */
900 void
901 xfs_buf_lock(
902 struct xfs_buf *bp)
903 {
904 trace_xfs_buf_lock(bp, _RET_IP_);
905
906 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
907 xfs_log_force(bp->b_target->bt_mount, 0);
908 down(&bp->b_sema);
909 XB_SET_OWNER(bp);
910
911 trace_xfs_buf_lock_done(bp, _RET_IP_);
912 }
913
914 void
915 xfs_buf_unlock(
916 struct xfs_buf *bp)
917 {
918 XB_CLEAR_OWNER(bp);
919 up(&bp->b_sema);
920
921 trace_xfs_buf_unlock(bp, _RET_IP_);
922 }
923
924 STATIC void
925 xfs_buf_wait_unpin(
926 xfs_buf_t *bp)
927 {
928 DECLARE_WAITQUEUE (wait, current);
929
930 if (atomic_read(&bp->b_pin_count) == 0)
931 return;
932
933 add_wait_queue(&bp->b_waiters, &wait);
934 for (;;) {
935 set_current_state(TASK_UNINTERRUPTIBLE);
936 if (atomic_read(&bp->b_pin_count) == 0)
937 break;
938 io_schedule();
939 }
940 remove_wait_queue(&bp->b_waiters, &wait);
941 set_current_state(TASK_RUNNING);
942 }
943
944 /*
945 * Buffer Utility Routines
946 */
947
948 STATIC void
949 xfs_buf_iodone_work(
950 struct work_struct *work)
951 {
952 xfs_buf_t *bp =
953 container_of(work, xfs_buf_t, b_iodone_work);
954
955 if (bp->b_iodone)
956 (*(bp->b_iodone))(bp);
957 else if (bp->b_flags & XBF_ASYNC)
958 xfs_buf_relse(bp);
959 }
960
961 void
962 xfs_buf_ioend(
963 xfs_buf_t *bp,
964 int schedule)
965 {
966 trace_xfs_buf_iodone(bp, _RET_IP_);
967
968 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
969 if (bp->b_error == 0)
970 bp->b_flags |= XBF_DONE;
971
972 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
973 if (schedule) {
974 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
975 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
976 } else {
977 xfs_buf_iodone_work(&bp->b_iodone_work);
978 }
979 } else {
980 complete(&bp->b_iowait);
981 }
982 }
983
984 void
985 xfs_buf_ioerror(
986 xfs_buf_t *bp,
987 int error)
988 {
989 ASSERT(error >= 0 && error <= 0xffff);
990 bp->b_error = (unsigned short)error;
991 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
992 }
993
994 void
995 xfs_buf_ioerror_alert(
996 struct xfs_buf *bp,
997 const char *func)
998 {
999 xfs_alert(bp->b_target->bt_mount,
1000 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1001 (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
1002 }
1003
1004 int
1005 xfs_bwrite(
1006 struct xfs_buf *bp)
1007 {
1008 int error;
1009
1010 ASSERT(xfs_buf_islocked(bp));
1011
1012 bp->b_flags |= XBF_WRITE;
1013 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
1014
1015 xfs_bdstrat_cb(bp);
1016
1017 error = xfs_buf_iowait(bp);
1018 if (error) {
1019 xfs_force_shutdown(bp->b_target->bt_mount,
1020 SHUTDOWN_META_IO_ERROR);
1021 }
1022 return error;
1023 }
1024
1025 /*
1026 * Called when we want to stop a buffer from getting written or read.
1027 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1028 * so that the proper iodone callbacks get called.
1029 */
1030 STATIC int
1031 xfs_bioerror(
1032 xfs_buf_t *bp)
1033 {
1034 #ifdef XFSERRORDEBUG
1035 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1036 #endif
1037
1038 /*
1039 * No need to wait until the buffer is unpinned, we aren't flushing it.
1040 */
1041 xfs_buf_ioerror(bp, EIO);
1042
1043 /*
1044 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1045 */
1046 XFS_BUF_UNREAD(bp);
1047 XFS_BUF_UNDONE(bp);
1048 xfs_buf_stale(bp);
1049
1050 xfs_buf_ioend(bp, 0);
1051
1052 return EIO;
1053 }
1054
1055 /*
1056 * Same as xfs_bioerror, except that we are releasing the buffer
1057 * here ourselves, and avoiding the xfs_buf_ioend call.
1058 * This is meant for userdata errors; metadata bufs come with
1059 * iodone functions attached, so that we can track down errors.
1060 */
1061 STATIC int
1062 xfs_bioerror_relse(
1063 struct xfs_buf *bp)
1064 {
1065 int64_t fl = bp->b_flags;
1066 /*
1067 * No need to wait until the buffer is unpinned.
1068 * We aren't flushing it.
1069 *
1070 * chunkhold expects B_DONE to be set, whether
1071 * we actually finish the I/O or not. We don't want to
1072 * change that interface.
1073 */
1074 XFS_BUF_UNREAD(bp);
1075 XFS_BUF_DONE(bp);
1076 xfs_buf_stale(bp);
1077 bp->b_iodone = NULL;
1078 if (!(fl & XBF_ASYNC)) {
1079 /*
1080 * Mark b_error and B_ERROR _both_.
1081 * Lot's of chunkcache code assumes that.
1082 * There's no reason to mark error for
1083 * ASYNC buffers.
1084 */
1085 xfs_buf_ioerror(bp, EIO);
1086 complete(&bp->b_iowait);
1087 } else {
1088 xfs_buf_relse(bp);
1089 }
1090
1091 return EIO;
1092 }
1093
1094
1095 /*
1096 * All xfs metadata buffers except log state machine buffers
1097 * get this attached as their b_bdstrat callback function.
1098 * This is so that we can catch a buffer
1099 * after prematurely unpinning it to forcibly shutdown the filesystem.
1100 */
1101 int
1102 xfs_bdstrat_cb(
1103 struct xfs_buf *bp)
1104 {
1105 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1106 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1107 /*
1108 * Metadata write that didn't get logged but
1109 * written delayed anyway. These aren't associated
1110 * with a transaction, and can be ignored.
1111 */
1112 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1113 return xfs_bioerror_relse(bp);
1114 else
1115 return xfs_bioerror(bp);
1116 }
1117
1118 xfs_buf_iorequest(bp);
1119 return 0;
1120 }
1121
1122 /*
1123 * Wrapper around bdstrat so that we can stop data from going to disk in case
1124 * we are shutting down the filesystem. Typically user data goes thru this
1125 * path; one of the exceptions is the superblock.
1126 */
1127 void
1128 xfsbdstrat(
1129 struct xfs_mount *mp,
1130 struct xfs_buf *bp)
1131 {
1132 if (XFS_FORCED_SHUTDOWN(mp)) {
1133 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1134 xfs_bioerror_relse(bp);
1135 return;
1136 }
1137
1138 xfs_buf_iorequest(bp);
1139 }
1140
1141 STATIC void
1142 _xfs_buf_ioend(
1143 xfs_buf_t *bp,
1144 int schedule)
1145 {
1146 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1147 xfs_buf_ioend(bp, schedule);
1148 }
1149
1150 STATIC void
1151 xfs_buf_bio_end_io(
1152 struct bio *bio,
1153 int error)
1154 {
1155 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1156
1157 xfs_buf_ioerror(bp, -error);
1158
1159 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1160 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1161
1162 _xfs_buf_ioend(bp, 1);
1163 bio_put(bio);
1164 }
1165
1166 STATIC void
1167 _xfs_buf_ioapply(
1168 xfs_buf_t *bp)
1169 {
1170 int rw, map_i, total_nr_pages, nr_pages;
1171 struct bio *bio;
1172 int offset = bp->b_offset;
1173 int size = BBTOB(bp->b_io_length);
1174 sector_t sector = bp->b_bn;
1175
1176 total_nr_pages = bp->b_page_count;
1177 map_i = 0;
1178
1179 if (bp->b_flags & XBF_WRITE) {
1180 if (bp->b_flags & XBF_SYNCIO)
1181 rw = WRITE_SYNC;
1182 else
1183 rw = WRITE;
1184 if (bp->b_flags & XBF_FUA)
1185 rw |= REQ_FUA;
1186 if (bp->b_flags & XBF_FLUSH)
1187 rw |= REQ_FLUSH;
1188 } else if (bp->b_flags & XBF_READ_AHEAD) {
1189 rw = READA;
1190 } else {
1191 rw = READ;
1192 }
1193
1194 /* we only use the buffer cache for meta-data */
1195 rw |= REQ_META;
1196
1197 next_chunk:
1198 atomic_inc(&bp->b_io_remaining);
1199 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1200 if (nr_pages > total_nr_pages)
1201 nr_pages = total_nr_pages;
1202
1203 bio = bio_alloc(GFP_NOIO, nr_pages);
1204 bio->bi_bdev = bp->b_target->bt_bdev;
1205 bio->bi_sector = sector;
1206 bio->bi_end_io = xfs_buf_bio_end_io;
1207 bio->bi_private = bp;
1208
1209
1210 for (; size && nr_pages; nr_pages--, map_i++) {
1211 int rbytes, nbytes = PAGE_SIZE - offset;
1212
1213 if (nbytes > size)
1214 nbytes = size;
1215
1216 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1217 if (rbytes < nbytes)
1218 break;
1219
1220 offset = 0;
1221 sector += BTOBB(nbytes);
1222 size -= nbytes;
1223 total_nr_pages--;
1224 }
1225
1226 if (likely(bio->bi_size)) {
1227 if (xfs_buf_is_vmapped(bp)) {
1228 flush_kernel_vmap_range(bp->b_addr,
1229 xfs_buf_vmap_len(bp));
1230 }
1231 submit_bio(rw, bio);
1232 if (size)
1233 goto next_chunk;
1234 } else {
1235 xfs_buf_ioerror(bp, EIO);
1236 bio_put(bio);
1237 }
1238 }
1239
1240 void
1241 xfs_buf_iorequest(
1242 xfs_buf_t *bp)
1243 {
1244 trace_xfs_buf_iorequest(bp, _RET_IP_);
1245
1246 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1247
1248 if (bp->b_flags & XBF_WRITE)
1249 xfs_buf_wait_unpin(bp);
1250 xfs_buf_hold(bp);
1251
1252 /* Set the count to 1 initially, this will stop an I/O
1253 * completion callout which happens before we have started
1254 * all the I/O from calling xfs_buf_ioend too early.
1255 */
1256 atomic_set(&bp->b_io_remaining, 1);
1257 _xfs_buf_ioapply(bp);
1258 _xfs_buf_ioend(bp, 0);
1259
1260 xfs_buf_rele(bp);
1261 }
1262
1263 /*
1264 * Waits for I/O to complete on the buffer supplied. It returns immediately if
1265 * no I/O is pending or there is already a pending error on the buffer. It
1266 * returns the I/O error code, if any, or 0 if there was no error.
1267 */
1268 int
1269 xfs_buf_iowait(
1270 xfs_buf_t *bp)
1271 {
1272 trace_xfs_buf_iowait(bp, _RET_IP_);
1273
1274 if (!bp->b_error)
1275 wait_for_completion(&bp->b_iowait);
1276
1277 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1278 return bp->b_error;
1279 }
1280
1281 xfs_caddr_t
1282 xfs_buf_offset(
1283 xfs_buf_t *bp,
1284 size_t offset)
1285 {
1286 struct page *page;
1287
1288 if (bp->b_addr)
1289 return bp->b_addr + offset;
1290
1291 offset += bp->b_offset;
1292 page = bp->b_pages[offset >> PAGE_SHIFT];
1293 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1294 }
1295
1296 /*
1297 * Move data into or out of a buffer.
1298 */
1299 void
1300 xfs_buf_iomove(
1301 xfs_buf_t *bp, /* buffer to process */
1302 size_t boff, /* starting buffer offset */
1303 size_t bsize, /* length to copy */
1304 void *data, /* data address */
1305 xfs_buf_rw_t mode) /* read/write/zero flag */
1306 {
1307 size_t bend;
1308
1309 bend = boff + bsize;
1310 while (boff < bend) {
1311 struct page *page;
1312 int page_index, page_offset, csize;
1313
1314 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1315 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1316 page = bp->b_pages[page_index];
1317 csize = min_t(size_t, PAGE_SIZE - page_offset,
1318 BBTOB(bp->b_io_length) - boff);
1319
1320 ASSERT((csize + page_offset) <= PAGE_SIZE);
1321
1322 switch (mode) {
1323 case XBRW_ZERO:
1324 memset(page_address(page) + page_offset, 0, csize);
1325 break;
1326 case XBRW_READ:
1327 memcpy(data, page_address(page) + page_offset, csize);
1328 break;
1329 case XBRW_WRITE:
1330 memcpy(page_address(page) + page_offset, data, csize);
1331 }
1332
1333 boff += csize;
1334 data += csize;
1335 }
1336 }
1337
1338 /*
1339 * Handling of buffer targets (buftargs).
1340 */
1341
1342 /*
1343 * Wait for any bufs with callbacks that have been submitted but have not yet
1344 * returned. These buffers will have an elevated hold count, so wait on those
1345 * while freeing all the buffers only held by the LRU.
1346 */
1347 void
1348 xfs_wait_buftarg(
1349 struct xfs_buftarg *btp)
1350 {
1351 struct xfs_buf *bp;
1352
1353 restart:
1354 spin_lock(&btp->bt_lru_lock);
1355 while (!list_empty(&btp->bt_lru)) {
1356 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1357 if (atomic_read(&bp->b_hold) > 1) {
1358 spin_unlock(&btp->bt_lru_lock);
1359 delay(100);
1360 goto restart;
1361 }
1362 /*
1363 * clear the LRU reference count so the buffer doesn't get
1364 * ignored in xfs_buf_rele().
1365 */
1366 atomic_set(&bp->b_lru_ref, 0);
1367 spin_unlock(&btp->bt_lru_lock);
1368 xfs_buf_rele(bp);
1369 spin_lock(&btp->bt_lru_lock);
1370 }
1371 spin_unlock(&btp->bt_lru_lock);
1372 }
1373
1374 int
1375 xfs_buftarg_shrink(
1376 struct shrinker *shrink,
1377 struct shrink_control *sc)
1378 {
1379 struct xfs_buftarg *btp = container_of(shrink,
1380 struct xfs_buftarg, bt_shrinker);
1381 struct xfs_buf *bp;
1382 int nr_to_scan = sc->nr_to_scan;
1383 LIST_HEAD(dispose);
1384
1385 if (!nr_to_scan)
1386 return btp->bt_lru_nr;
1387
1388 spin_lock(&btp->bt_lru_lock);
1389 while (!list_empty(&btp->bt_lru)) {
1390 if (nr_to_scan-- <= 0)
1391 break;
1392
1393 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1394
1395 /*
1396 * Decrement the b_lru_ref count unless the value is already
1397 * zero. If the value is already zero, we need to reclaim the
1398 * buffer, otherwise it gets another trip through the LRU.
1399 */
1400 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1401 list_move_tail(&bp->b_lru, &btp->bt_lru);
1402 continue;
1403 }
1404
1405 /*
1406 * remove the buffer from the LRU now to avoid needing another
1407 * lock round trip inside xfs_buf_rele().
1408 */
1409 list_move(&bp->b_lru, &dispose);
1410 btp->bt_lru_nr--;
1411 }
1412 spin_unlock(&btp->bt_lru_lock);
1413
1414 while (!list_empty(&dispose)) {
1415 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1416 list_del_init(&bp->b_lru);
1417 xfs_buf_rele(bp);
1418 }
1419
1420 return btp->bt_lru_nr;
1421 }
1422
1423 void
1424 xfs_free_buftarg(
1425 struct xfs_mount *mp,
1426 struct xfs_buftarg *btp)
1427 {
1428 unregister_shrinker(&btp->bt_shrinker);
1429
1430 if (mp->m_flags & XFS_MOUNT_BARRIER)
1431 xfs_blkdev_issue_flush(btp);
1432
1433 kmem_free(btp);
1434 }
1435
1436 STATIC int
1437 xfs_setsize_buftarg_flags(
1438 xfs_buftarg_t *btp,
1439 unsigned int blocksize,
1440 unsigned int sectorsize,
1441 int verbose)
1442 {
1443 btp->bt_bsize = blocksize;
1444 btp->bt_sshift = ffs(sectorsize) - 1;
1445 btp->bt_smask = sectorsize - 1;
1446
1447 if (set_blocksize(btp->bt_bdev, sectorsize)) {
1448 char name[BDEVNAME_SIZE];
1449
1450 bdevname(btp->bt_bdev, name);
1451
1452 xfs_warn(btp->bt_mount,
1453 "Cannot set_blocksize to %u on device %s\n",
1454 sectorsize, name);
1455 return EINVAL;
1456 }
1457
1458 return 0;
1459 }
1460
1461 /*
1462 * When allocating the initial buffer target we have not yet
1463 * read in the superblock, so don't know what sized sectors
1464 * are being used is at this early stage. Play safe.
1465 */
1466 STATIC int
1467 xfs_setsize_buftarg_early(
1468 xfs_buftarg_t *btp,
1469 struct block_device *bdev)
1470 {
1471 return xfs_setsize_buftarg_flags(btp,
1472 PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1473 }
1474
1475 int
1476 xfs_setsize_buftarg(
1477 xfs_buftarg_t *btp,
1478 unsigned int blocksize,
1479 unsigned int sectorsize)
1480 {
1481 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1482 }
1483
1484 xfs_buftarg_t *
1485 xfs_alloc_buftarg(
1486 struct xfs_mount *mp,
1487 struct block_device *bdev,
1488 int external,
1489 const char *fsname)
1490 {
1491 xfs_buftarg_t *btp;
1492
1493 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1494
1495 btp->bt_mount = mp;
1496 btp->bt_dev = bdev->bd_dev;
1497 btp->bt_bdev = bdev;
1498 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1499 if (!btp->bt_bdi)
1500 goto error;
1501
1502 INIT_LIST_HEAD(&btp->bt_lru);
1503 spin_lock_init(&btp->bt_lru_lock);
1504 if (xfs_setsize_buftarg_early(btp, bdev))
1505 goto error;
1506 btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1507 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1508 register_shrinker(&btp->bt_shrinker);
1509 return btp;
1510
1511 error:
1512 kmem_free(btp);
1513 return NULL;
1514 }
1515
1516 /*
1517 * Add a buffer to the delayed write list.
1518 *
1519 * This queues a buffer for writeout if it hasn't already been. Note that
1520 * neither this routine nor the buffer list submission functions perform
1521 * any internal synchronization. It is expected that the lists are thread-local
1522 * to the callers.
1523 *
1524 * Returns true if we queued up the buffer, or false if it already had
1525 * been on the buffer list.
1526 */
1527 bool
1528 xfs_buf_delwri_queue(
1529 struct xfs_buf *bp,
1530 struct list_head *list)
1531 {
1532 ASSERT(xfs_buf_islocked(bp));
1533 ASSERT(!(bp->b_flags & XBF_READ));
1534
1535 /*
1536 * If the buffer is already marked delwri it already is queued up
1537 * by someone else for imediate writeout. Just ignore it in that
1538 * case.
1539 */
1540 if (bp->b_flags & _XBF_DELWRI_Q) {
1541 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1542 return false;
1543 }
1544
1545 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1546
1547 /*
1548 * If a buffer gets written out synchronously or marked stale while it
1549 * is on a delwri list we lazily remove it. To do this, the other party
1550 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1551 * It remains referenced and on the list. In a rare corner case it
1552 * might get readded to a delwri list after the synchronous writeout, in
1553 * which case we need just need to re-add the flag here.
1554 */
1555 bp->b_flags |= _XBF_DELWRI_Q;
1556 if (list_empty(&bp->b_list)) {
1557 atomic_inc(&bp->b_hold);
1558 list_add_tail(&bp->b_list, list);
1559 }
1560
1561 return true;
1562 }
1563
1564 /*
1565 * Compare function is more complex than it needs to be because
1566 * the return value is only 32 bits and we are doing comparisons
1567 * on 64 bit values
1568 */
1569 static int
1570 xfs_buf_cmp(
1571 void *priv,
1572 struct list_head *a,
1573 struct list_head *b)
1574 {
1575 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1576 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1577 xfs_daddr_t diff;
1578
1579 diff = ap->b_bn - bp->b_bn;
1580 if (diff < 0)
1581 return -1;
1582 if (diff > 0)
1583 return 1;
1584 return 0;
1585 }
1586
1587 static int
1588 __xfs_buf_delwri_submit(
1589 struct list_head *buffer_list,
1590 struct list_head *io_list,
1591 bool wait)
1592 {
1593 struct blk_plug plug;
1594 struct xfs_buf *bp, *n;
1595 int pinned = 0;
1596
1597 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1598 if (!wait) {
1599 if (xfs_buf_ispinned(bp)) {
1600 pinned++;
1601 continue;
1602 }
1603 if (!xfs_buf_trylock(bp))
1604 continue;
1605 } else {
1606 xfs_buf_lock(bp);
1607 }
1608
1609 /*
1610 * Someone else might have written the buffer synchronously or
1611 * marked it stale in the meantime. In that case only the
1612 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1613 * reference and remove it from the list here.
1614 */
1615 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1616 list_del_init(&bp->b_list);
1617 xfs_buf_relse(bp);
1618 continue;
1619 }
1620
1621 list_move_tail(&bp->b_list, io_list);
1622 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1623 }
1624
1625 list_sort(NULL, io_list, xfs_buf_cmp);
1626
1627 blk_start_plug(&plug);
1628 list_for_each_entry_safe(bp, n, io_list, b_list) {
1629 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
1630 bp->b_flags |= XBF_WRITE;
1631
1632 if (!wait) {
1633 bp->b_flags |= XBF_ASYNC;
1634 list_del_init(&bp->b_list);
1635 }
1636 xfs_bdstrat_cb(bp);
1637 }
1638 blk_finish_plug(&plug);
1639
1640 return pinned;
1641 }
1642
1643 /*
1644 * Write out a buffer list asynchronously.
1645 *
1646 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1647 * out and not wait for I/O completion on any of the buffers. This interface
1648 * is only safely useable for callers that can track I/O completion by higher
1649 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1650 * function.
1651 */
1652 int
1653 xfs_buf_delwri_submit_nowait(
1654 struct list_head *buffer_list)
1655 {
1656 LIST_HEAD (io_list);
1657 return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1658 }
1659
1660 /*
1661 * Write out a buffer list synchronously.
1662 *
1663 * This will take the @buffer_list, write all buffers out and wait for I/O
1664 * completion on all of the buffers. @buffer_list is consumed by the function,
1665 * so callers must have some other way of tracking buffers if they require such
1666 * functionality.
1667 */
1668 int
1669 xfs_buf_delwri_submit(
1670 struct list_head *buffer_list)
1671 {
1672 LIST_HEAD (io_list);
1673 int error = 0, error2;
1674 struct xfs_buf *bp;
1675
1676 __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1677
1678 /* Wait for IO to complete. */
1679 while (!list_empty(&io_list)) {
1680 bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1681
1682 list_del_init(&bp->b_list);
1683 error2 = xfs_buf_iowait(bp);
1684 xfs_buf_relse(bp);
1685 if (!error)
1686 error = error2;
1687 }
1688
1689 return error;
1690 }
1691
1692 int __init
1693 xfs_buf_init(void)
1694 {
1695 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1696 KM_ZONE_HWALIGN, NULL);
1697 if (!xfs_buf_zone)
1698 goto out;
1699
1700 xfslogd_workqueue = alloc_workqueue("xfslogd",
1701 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1702 if (!xfslogd_workqueue)
1703 goto out_free_buf_zone;
1704
1705 return 0;
1706
1707 out_free_buf_zone:
1708 kmem_zone_destroy(xfs_buf_zone);
1709 out:
1710 return -ENOMEM;
1711 }
1712
1713 void
1714 xfs_buf_terminate(void)
1715 {
1716 destroy_workqueue(xfslogd_workqueue);
1717 kmem_zone_destroy(xfs_buf_zone);
1718 }
This page took 0.075602 seconds and 5 git commands to generate.