xfs: don't defer metadata allocation to the workqueue
[deliverable/linux.git] / fs / xfs / xfs_buf.c
CommitLineData
1da177e4 1/*
f07c2250 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
7b718769 3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
93c189c1 18#include "xfs.h"
1da177e4
LT
19#include <linux/stddef.h>
20#include <linux/errno.h>
5a0e3ad6 21#include <linux/gfp.h>
1da177e4
LT
22#include <linux/pagemap.h>
23#include <linux/init.h>
24#include <linux/vmalloc.h>
25#include <linux/bio.h>
26#include <linux/sysctl.h>
27#include <linux/proc_fs.h>
28#include <linux/workqueue.h>
29#include <linux/percpu.h>
30#include <linux/blkdev.h>
31#include <linux/hash.h>
4df08c52 32#include <linux/kthread.h>
b20a3503 33#include <linux/migrate.h>
3fcfab16 34#include <linux/backing-dev.h>
7dfb7103 35#include <linux/freezer.h>
1da177e4 36
b7963133 37#include "xfs_sb.h"
ed3b4d6c 38#include "xfs_log.h"
b7963133 39#include "xfs_ag.h"
b7963133 40#include "xfs_mount.h"
0b1b213f 41#include "xfs_trace.h"
b7963133 42
7989cb8e 43static kmem_zone_t *xfs_buf_zone;
23ea4032 44
7989cb8e 45static struct workqueue_struct *xfslogd_workqueue;
1da177e4 46
ce8e922c
NS
47#ifdef XFS_BUF_LOCK_TRACKING
48# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
49# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
50# define XB_GET_OWNER(bp) ((bp)->b_last_holder)
1da177e4 51#else
ce8e922c
NS
52# define XB_SET_OWNER(bp) do { } while (0)
53# define XB_CLEAR_OWNER(bp) do { } while (0)
54# define XB_GET_OWNER(bp) do { } while (0)
1da177e4
LT
55#endif
56
ce8e922c 57#define xb_to_gfp(flags) \
aa5c158e 58 ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
1da177e4 59
1da177e4 60
73c77e2c
JB
61static inline int
62xfs_buf_is_vmapped(
63 struct xfs_buf *bp)
64{
65 /*
66 * Return true if the buffer is vmapped.
67 *
611c9946
DC
68 * b_addr is null if the buffer is not mapped, but the code is clever
69 * enough to know it doesn't have to map a single page, so the check has
70 * to be both for b_addr and bp->b_page_count > 1.
73c77e2c 71 */
611c9946 72 return bp->b_addr && bp->b_page_count > 1;
73c77e2c
JB
73}
74
75static inline int
76xfs_buf_vmap_len(
77 struct xfs_buf *bp)
78{
79 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
80}
81
1da177e4 82/*
430cbeb8
DC
83 * xfs_buf_lru_add - add a buffer to the LRU.
84 *
85 * The LRU takes a new reference to the buffer so that it will only be freed
86 * once the shrinker takes the buffer off the LRU.
87 */
88STATIC void
89xfs_buf_lru_add(
90 struct xfs_buf *bp)
91{
92 struct xfs_buftarg *btp = bp->b_target;
93
94 spin_lock(&btp->bt_lru_lock);
95 if (list_empty(&bp->b_lru)) {
96 atomic_inc(&bp->b_hold);
97 list_add_tail(&bp->b_lru, &btp->bt_lru);
98 btp->bt_lru_nr++;
99 }
100 spin_unlock(&btp->bt_lru_lock);
101}
102
103/*
104 * xfs_buf_lru_del - remove a buffer from the LRU
105 *
106 * The unlocked check is safe here because it only occurs when there are not
107 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
108 * to optimise the shrinker removing the buffer from the LRU and calling
25985edc 109 * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
430cbeb8 110 * bt_lru_lock.
1da177e4 111 */
430cbeb8
DC
112STATIC void
113xfs_buf_lru_del(
114 struct xfs_buf *bp)
115{
116 struct xfs_buftarg *btp = bp->b_target;
117
118 if (list_empty(&bp->b_lru))
119 return;
120
121 spin_lock(&btp->bt_lru_lock);
122 if (!list_empty(&bp->b_lru)) {
123 list_del_init(&bp->b_lru);
124 btp->bt_lru_nr--;
125 }
126 spin_unlock(&btp->bt_lru_lock);
127}
128
129/*
130 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
131 * b_lru_ref count so that the buffer is freed immediately when the buffer
132 * reference count falls to zero. If the buffer is already on the LRU, we need
133 * to remove the reference that LRU holds on the buffer.
134 *
135 * This prevents build-up of stale buffers on the LRU.
136 */
137void
138xfs_buf_stale(
139 struct xfs_buf *bp)
140{
43ff2122
CH
141 ASSERT(xfs_buf_islocked(bp));
142
430cbeb8 143 bp->b_flags |= XBF_STALE;
43ff2122
CH
144
145 /*
146 * Clear the delwri status so that a delwri queue walker will not
147 * flush this buffer to disk now that it is stale. The delwri queue has
148 * a reference to the buffer, so this is safe to do.
149 */
150 bp->b_flags &= ~_XBF_DELWRI_Q;
151
430cbeb8
DC
152 atomic_set(&(bp)->b_lru_ref, 0);
153 if (!list_empty(&bp->b_lru)) {
154 struct xfs_buftarg *btp = bp->b_target;
155
156 spin_lock(&btp->bt_lru_lock);
157 if (!list_empty(&bp->b_lru)) {
158 list_del_init(&bp->b_lru);
159 btp->bt_lru_nr--;
160 atomic_dec(&bp->b_hold);
161 }
162 spin_unlock(&btp->bt_lru_lock);
163 }
164 ASSERT(atomic_read(&bp->b_hold) >= 1);
165}
1da177e4 166
3e85c868
DC
167static int
168xfs_buf_get_maps(
169 struct xfs_buf *bp,
170 int map_count)
171{
172 ASSERT(bp->b_maps == NULL);
173 bp->b_map_count = map_count;
174
175 if (map_count == 1) {
176 bp->b_maps = &bp->b_map;
177 return 0;
178 }
179
180 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
181 KM_NOFS);
182 if (!bp->b_maps)
183 return ENOMEM;
184 return 0;
185}
186
187/*
188 * Frees b_pages if it was allocated.
189 */
190static void
191xfs_buf_free_maps(
192 struct xfs_buf *bp)
193{
194 if (bp->b_maps != &bp->b_map) {
195 kmem_free(bp->b_maps);
196 bp->b_maps = NULL;
197 }
198}
199
4347b9d7 200struct xfs_buf *
3e85c868 201_xfs_buf_alloc(
4347b9d7 202 struct xfs_buftarg *target,
3e85c868
DC
203 struct xfs_buf_map *map,
204 int nmaps,
ce8e922c 205 xfs_buf_flags_t flags)
1da177e4 206{
4347b9d7 207 struct xfs_buf *bp;
3e85c868
DC
208 int error;
209 int i;
4347b9d7 210
aa5c158e 211 bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
4347b9d7
CH
212 if (unlikely(!bp))
213 return NULL;
214
1da177e4 215 /*
12bcb3f7
DC
216 * We don't want certain flags to appear in b_flags unless they are
217 * specifically set by later operations on the buffer.
1da177e4 218 */
611c9946 219 flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
ce8e922c 220
ce8e922c 221 atomic_set(&bp->b_hold, 1);
430cbeb8 222 atomic_set(&bp->b_lru_ref, 1);
b4dd330b 223 init_completion(&bp->b_iowait);
430cbeb8 224 INIT_LIST_HEAD(&bp->b_lru);
ce8e922c 225 INIT_LIST_HEAD(&bp->b_list);
74f75a0c 226 RB_CLEAR_NODE(&bp->b_rbnode);
a731cd11 227 sema_init(&bp->b_sema, 0); /* held, no waiters */
ce8e922c
NS
228 XB_SET_OWNER(bp);
229 bp->b_target = target;
3e85c868 230 bp->b_flags = flags;
de1cbee4 231
1da177e4 232 /*
aa0e8833
DC
233 * Set length and io_length to the same value initially.
234 * I/O routines should use io_length, which will be the same in
1da177e4
LT
235 * most cases but may be reset (e.g. XFS recovery).
236 */
3e85c868
DC
237 error = xfs_buf_get_maps(bp, nmaps);
238 if (error) {
239 kmem_zone_free(xfs_buf_zone, bp);
240 return NULL;
241 }
242
243 bp->b_bn = map[0].bm_bn;
244 bp->b_length = 0;
245 for (i = 0; i < nmaps; i++) {
246 bp->b_maps[i].bm_bn = map[i].bm_bn;
247 bp->b_maps[i].bm_len = map[i].bm_len;
248 bp->b_length += map[i].bm_len;
249 }
250 bp->b_io_length = bp->b_length;
251
ce8e922c
NS
252 atomic_set(&bp->b_pin_count, 0);
253 init_waitqueue_head(&bp->b_waiters);
254
255 XFS_STATS_INC(xb_create);
0b1b213f 256 trace_xfs_buf_init(bp, _RET_IP_);
4347b9d7
CH
257
258 return bp;
1da177e4
LT
259}
260
261/*
ce8e922c
NS
262 * Allocate a page array capable of holding a specified number
263 * of pages, and point the page buf at it.
1da177e4
LT
264 */
265STATIC int
ce8e922c
NS
266_xfs_buf_get_pages(
267 xfs_buf_t *bp,
1da177e4 268 int page_count,
ce8e922c 269 xfs_buf_flags_t flags)
1da177e4
LT
270{
271 /* Make sure that we have a page list */
ce8e922c 272 if (bp->b_pages == NULL) {
ce8e922c
NS
273 bp->b_page_count = page_count;
274 if (page_count <= XB_PAGES) {
275 bp->b_pages = bp->b_page_array;
1da177e4 276 } else {
ce8e922c 277 bp->b_pages = kmem_alloc(sizeof(struct page *) *
aa5c158e 278 page_count, KM_NOFS);
ce8e922c 279 if (bp->b_pages == NULL)
1da177e4
LT
280 return -ENOMEM;
281 }
ce8e922c 282 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
1da177e4
LT
283 }
284 return 0;
285}
286
287/*
ce8e922c 288 * Frees b_pages if it was allocated.
1da177e4
LT
289 */
290STATIC void
ce8e922c 291_xfs_buf_free_pages(
1da177e4
LT
292 xfs_buf_t *bp)
293{
ce8e922c 294 if (bp->b_pages != bp->b_page_array) {
f0e2d93c 295 kmem_free(bp->b_pages);
3fc98b1a 296 bp->b_pages = NULL;
1da177e4
LT
297 }
298}
299
300/*
301 * Releases the specified buffer.
302 *
303 * The modification state of any associated pages is left unchanged.
ce8e922c 304 * The buffer most not be on any hash - use xfs_buf_rele instead for
1da177e4
LT
305 * hashed and refcounted buffers
306 */
307void
ce8e922c 308xfs_buf_free(
1da177e4
LT
309 xfs_buf_t *bp)
310{
0b1b213f 311 trace_xfs_buf_free(bp, _RET_IP_);
1da177e4 312
430cbeb8
DC
313 ASSERT(list_empty(&bp->b_lru));
314
0e6e847f 315 if (bp->b_flags & _XBF_PAGES) {
1da177e4
LT
316 uint i;
317
73c77e2c 318 if (xfs_buf_is_vmapped(bp))
8a262e57
AE
319 vm_unmap_ram(bp->b_addr - bp->b_offset,
320 bp->b_page_count);
1da177e4 321
948ecdb4
NS
322 for (i = 0; i < bp->b_page_count; i++) {
323 struct page *page = bp->b_pages[i];
324
0e6e847f 325 __free_page(page);
948ecdb4 326 }
0e6e847f
DC
327 } else if (bp->b_flags & _XBF_KMEM)
328 kmem_free(bp->b_addr);
3fc98b1a 329 _xfs_buf_free_pages(bp);
3e85c868 330 xfs_buf_free_maps(bp);
4347b9d7 331 kmem_zone_free(xfs_buf_zone, bp);
1da177e4
LT
332}
333
334/*
0e6e847f 335 * Allocates all the pages for buffer in question and builds it's page list.
1da177e4
LT
336 */
337STATIC int
0e6e847f 338xfs_buf_allocate_memory(
1da177e4
LT
339 xfs_buf_t *bp,
340 uint flags)
341{
aa0e8833 342 size_t size;
1da177e4 343 size_t nbytes, offset;
ce8e922c 344 gfp_t gfp_mask = xb_to_gfp(flags);
1da177e4 345 unsigned short page_count, i;
795cac72 346 xfs_off_t start, end;
1da177e4
LT
347 int error;
348
0e6e847f
DC
349 /*
350 * for buffers that are contained within a single page, just allocate
351 * the memory from the heap - there's no need for the complexity of
352 * page arrays to keep allocation down to order 0.
353 */
795cac72
DC
354 size = BBTOB(bp->b_length);
355 if (size < PAGE_SIZE) {
aa5c158e 356 bp->b_addr = kmem_alloc(size, KM_NOFS);
0e6e847f
DC
357 if (!bp->b_addr) {
358 /* low memory - use alloc_page loop instead */
359 goto use_alloc_page;
360 }
361
795cac72 362 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
0e6e847f
DC
363 ((unsigned long)bp->b_addr & PAGE_MASK)) {
364 /* b_addr spans two pages - use alloc_page instead */
365 kmem_free(bp->b_addr);
366 bp->b_addr = NULL;
367 goto use_alloc_page;
368 }
369 bp->b_offset = offset_in_page(bp->b_addr);
370 bp->b_pages = bp->b_page_array;
371 bp->b_pages[0] = virt_to_page(bp->b_addr);
372 bp->b_page_count = 1;
611c9946 373 bp->b_flags |= _XBF_KMEM;
0e6e847f
DC
374 return 0;
375 }
376
377use_alloc_page:
cbb7baab
DC
378 start = BBTOB(bp->b_map.bm_bn) >> PAGE_SHIFT;
379 end = (BBTOB(bp->b_map.bm_bn + bp->b_length) + PAGE_SIZE - 1)
380 >> PAGE_SHIFT;
795cac72 381 page_count = end - start;
ce8e922c 382 error = _xfs_buf_get_pages(bp, page_count, flags);
1da177e4
LT
383 if (unlikely(error))
384 return error;
1da177e4 385
ce8e922c 386 offset = bp->b_offset;
0e6e847f 387 bp->b_flags |= _XBF_PAGES;
1da177e4 388
ce8e922c 389 for (i = 0; i < bp->b_page_count; i++) {
1da177e4
LT
390 struct page *page;
391 uint retries = 0;
0e6e847f
DC
392retry:
393 page = alloc_page(gfp_mask);
1da177e4 394 if (unlikely(page == NULL)) {
ce8e922c
NS
395 if (flags & XBF_READ_AHEAD) {
396 bp->b_page_count = i;
0e6e847f
DC
397 error = ENOMEM;
398 goto out_free_pages;
1da177e4
LT
399 }
400
401 /*
402 * This could deadlock.
403 *
404 * But until all the XFS lowlevel code is revamped to
405 * handle buffer allocation failures we can't do much.
406 */
407 if (!(++retries % 100))
4f10700a
DC
408 xfs_err(NULL,
409 "possible memory allocation deadlock in %s (mode:0x%x)",
34a622b2 410 __func__, gfp_mask);
1da177e4 411
ce8e922c 412 XFS_STATS_INC(xb_page_retries);
8aa7e847 413 congestion_wait(BLK_RW_ASYNC, HZ/50);
1da177e4
LT
414 goto retry;
415 }
416
ce8e922c 417 XFS_STATS_INC(xb_page_found);
1da177e4 418
0e6e847f 419 nbytes = min_t(size_t, size, PAGE_SIZE - offset);
1da177e4 420 size -= nbytes;
ce8e922c 421 bp->b_pages[i] = page;
1da177e4
LT
422 offset = 0;
423 }
0e6e847f 424 return 0;
1da177e4 425
0e6e847f
DC
426out_free_pages:
427 for (i = 0; i < bp->b_page_count; i++)
428 __free_page(bp->b_pages[i]);
1da177e4
LT
429 return error;
430}
431
432/*
25985edc 433 * Map buffer into kernel address-space if necessary.
1da177e4
LT
434 */
435STATIC int
ce8e922c 436_xfs_buf_map_pages(
1da177e4
LT
437 xfs_buf_t *bp,
438 uint flags)
439{
0e6e847f 440 ASSERT(bp->b_flags & _XBF_PAGES);
ce8e922c 441 if (bp->b_page_count == 1) {
0e6e847f 442 /* A single page buffer is always mappable */
ce8e922c 443 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
611c9946
DC
444 } else if (flags & XBF_UNMAPPED) {
445 bp->b_addr = NULL;
446 } else {
a19fb380
DC
447 int retried = 0;
448
449 do {
450 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
451 -1, PAGE_KERNEL);
452 if (bp->b_addr)
453 break;
454 vm_unmap_aliases();
455 } while (retried++ <= 1);
456
457 if (!bp->b_addr)
1da177e4 458 return -ENOMEM;
ce8e922c 459 bp->b_addr += bp->b_offset;
1da177e4
LT
460 }
461
462 return 0;
463}
464
465/*
466 * Finding and Reading Buffers
467 */
468
469/*
ce8e922c 470 * Look up, and creates if absent, a lockable buffer for
1da177e4 471 * a given range of an inode. The buffer is returned
eabbaf11 472 * locked. No I/O is implied by this call.
1da177e4
LT
473 */
474xfs_buf_t *
ce8e922c 475_xfs_buf_find(
e70b73f8 476 struct xfs_buftarg *btp,
3e85c868
DC
477 struct xfs_buf_map *map,
478 int nmaps,
ce8e922c
NS
479 xfs_buf_flags_t flags,
480 xfs_buf_t *new_bp)
1da177e4 481{
e70b73f8 482 size_t numbytes;
74f75a0c
DC
483 struct xfs_perag *pag;
484 struct rb_node **rbp;
485 struct rb_node *parent;
486 xfs_buf_t *bp;
3e85c868
DC
487 xfs_daddr_t blkno = map[0].bm_bn;
488 int numblks = 0;
489 int i;
1da177e4 490
3e85c868
DC
491 for (i = 0; i < nmaps; i++)
492 numblks += map[i].bm_len;
e70b73f8 493 numbytes = BBTOB(numblks);
1da177e4
LT
494
495 /* Check for IOs smaller than the sector size / not sector aligned */
e70b73f8 496 ASSERT(!(numbytes < (1 << btp->bt_sshift)));
de1cbee4 497 ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask));
1da177e4 498
74f75a0c
DC
499 /* get tree root */
500 pag = xfs_perag_get(btp->bt_mount,
e70b73f8 501 xfs_daddr_to_agno(btp->bt_mount, blkno));
74f75a0c
DC
502
503 /* walk tree */
504 spin_lock(&pag->pag_buf_lock);
505 rbp = &pag->pag_buf_tree.rb_node;
506 parent = NULL;
507 bp = NULL;
508 while (*rbp) {
509 parent = *rbp;
510 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
511
de1cbee4 512 if (blkno < bp->b_bn)
74f75a0c 513 rbp = &(*rbp)->rb_left;
de1cbee4 514 else if (blkno > bp->b_bn)
74f75a0c
DC
515 rbp = &(*rbp)->rb_right;
516 else {
517 /*
de1cbee4 518 * found a block number match. If the range doesn't
74f75a0c
DC
519 * match, the only way this is allowed is if the buffer
520 * in the cache is stale and the transaction that made
521 * it stale has not yet committed. i.e. we are
522 * reallocating a busy extent. Skip this buffer and
523 * continue searching to the right for an exact match.
524 */
4e94b71b 525 if (bp->b_length != numblks) {
74f75a0c
DC
526 ASSERT(bp->b_flags & XBF_STALE);
527 rbp = &(*rbp)->rb_right;
528 continue;
529 }
ce8e922c 530 atomic_inc(&bp->b_hold);
1da177e4
LT
531 goto found;
532 }
533 }
534
535 /* No match found */
ce8e922c 536 if (new_bp) {
74f75a0c
DC
537 rb_link_node(&new_bp->b_rbnode, parent, rbp);
538 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
539 /* the buffer keeps the perag reference until it is freed */
540 new_bp->b_pag = pag;
541 spin_unlock(&pag->pag_buf_lock);
1da177e4 542 } else {
ce8e922c 543 XFS_STATS_INC(xb_miss_locked);
74f75a0c
DC
544 spin_unlock(&pag->pag_buf_lock);
545 xfs_perag_put(pag);
1da177e4 546 }
ce8e922c 547 return new_bp;
1da177e4
LT
548
549found:
74f75a0c
DC
550 spin_unlock(&pag->pag_buf_lock);
551 xfs_perag_put(pag);
1da177e4 552
0c842ad4
CH
553 if (!xfs_buf_trylock(bp)) {
554 if (flags & XBF_TRYLOCK) {
ce8e922c
NS
555 xfs_buf_rele(bp);
556 XFS_STATS_INC(xb_busy_locked);
557 return NULL;
1da177e4 558 }
0c842ad4
CH
559 xfs_buf_lock(bp);
560 XFS_STATS_INC(xb_get_locked_waited);
1da177e4
LT
561 }
562
0e6e847f
DC
563 /*
564 * if the buffer is stale, clear all the external state associated with
565 * it. We need to keep flags such as how we allocated the buffer memory
566 * intact here.
567 */
ce8e922c
NS
568 if (bp->b_flags & XBF_STALE) {
569 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
611c9946 570 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
2f926587 571 }
0b1b213f
CH
572
573 trace_xfs_buf_find(bp, flags, _RET_IP_);
ce8e922c
NS
574 XFS_STATS_INC(xb_get_locked);
575 return bp;
1da177e4
LT
576}
577
578/*
3815832a
DC
579 * Assembles a buffer covering the specified range. The code is optimised for
580 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
581 * more hits than misses.
1da177e4 582 */
3815832a 583struct xfs_buf *
6dde2707
DC
584xfs_buf_get_map(
585 struct xfs_buftarg *target,
586 struct xfs_buf_map *map,
587 int nmaps,
ce8e922c 588 xfs_buf_flags_t flags)
1da177e4 589{
3815832a
DC
590 struct xfs_buf *bp;
591 struct xfs_buf *new_bp;
0e6e847f 592 int error = 0;
1da177e4 593
6dde2707 594 bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
3815832a
DC
595 if (likely(bp))
596 goto found;
597
6dde2707 598 new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
ce8e922c 599 if (unlikely(!new_bp))
1da177e4
LT
600 return NULL;
601
fe2429b0
DC
602 error = xfs_buf_allocate_memory(new_bp, flags);
603 if (error) {
3e85c868 604 xfs_buf_free(new_bp);
fe2429b0
DC
605 return NULL;
606 }
607
6dde2707 608 bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
3815832a 609 if (!bp) {
fe2429b0 610 xfs_buf_free(new_bp);
3815832a
DC
611 return NULL;
612 }
613
fe2429b0
DC
614 if (bp != new_bp)
615 xfs_buf_free(new_bp);
1da177e4 616
3815832a 617found:
611c9946 618 if (!bp->b_addr) {
ce8e922c 619 error = _xfs_buf_map_pages(bp, flags);
1da177e4 620 if (unlikely(error)) {
4f10700a
DC
621 xfs_warn(target->bt_mount,
622 "%s: failed to map pages\n", __func__);
a8acad70
DC
623 xfs_buf_relse(bp);
624 return NULL;
1da177e4
LT
625 }
626 }
627
ce8e922c 628 XFS_STATS_INC(xb_get);
0b1b213f 629 trace_xfs_buf_get(bp, flags, _RET_IP_);
ce8e922c 630 return bp;
1da177e4
LT
631}
632
5d765b97
CH
633STATIC int
634_xfs_buf_read(
635 xfs_buf_t *bp,
636 xfs_buf_flags_t flags)
637{
43ff2122 638 ASSERT(!(flags & XBF_WRITE));
cbb7baab 639 ASSERT(bp->b_map.bm_bn != XFS_BUF_DADDR_NULL);
5d765b97 640
43ff2122 641 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
1d5ae5df 642 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
5d765b97 643
0e95f19a
DC
644 xfs_buf_iorequest(bp);
645 if (flags & XBF_ASYNC)
646 return 0;
ec53d1db 647 return xfs_buf_iowait(bp);
5d765b97
CH
648}
649
1da177e4 650xfs_buf_t *
6dde2707
DC
651xfs_buf_read_map(
652 struct xfs_buftarg *target,
653 struct xfs_buf_map *map,
654 int nmaps,
ce8e922c 655 xfs_buf_flags_t flags)
1da177e4 656{
6dde2707 657 struct xfs_buf *bp;
ce8e922c
NS
658
659 flags |= XBF_READ;
660
6dde2707 661 bp = xfs_buf_get_map(target, map, nmaps, flags);
ce8e922c 662 if (bp) {
0b1b213f
CH
663 trace_xfs_buf_read(bp, flags, _RET_IP_);
664
ce8e922c 665 if (!XFS_BUF_ISDONE(bp)) {
ce8e922c 666 XFS_STATS_INC(xb_get_read);
5d765b97 667 _xfs_buf_read(bp, flags);
ce8e922c 668 } else if (flags & XBF_ASYNC) {
1da177e4
LT
669 /*
670 * Read ahead call which is already satisfied,
671 * drop the buffer
672 */
a8acad70
DC
673 xfs_buf_relse(bp);
674 return NULL;
1da177e4 675 } else {
1da177e4 676 /* We do not want read in the flags */
ce8e922c 677 bp->b_flags &= ~XBF_READ;
1da177e4
LT
678 }
679 }
680
ce8e922c 681 return bp;
1da177e4
LT
682}
683
1da177e4 684/*
ce8e922c
NS
685 * If we are not low on memory then do the readahead in a deadlock
686 * safe manner.
1da177e4
LT
687 */
688void
6dde2707
DC
689xfs_buf_readahead_map(
690 struct xfs_buftarg *target,
691 struct xfs_buf_map *map,
692 int nmaps)
1da177e4 693{
0e6e847f 694 if (bdi_read_congested(target->bt_bdi))
1da177e4
LT
695 return;
696
6dde2707 697 xfs_buf_read_map(target, map, nmaps,
aa5c158e 698 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
1da177e4
LT
699}
700
5adc94c2
DC
701/*
702 * Read an uncached buffer from disk. Allocates and returns a locked
703 * buffer containing the disk contents or nothing.
704 */
705struct xfs_buf *
706xfs_buf_read_uncached(
5adc94c2
DC
707 struct xfs_buftarg *target,
708 xfs_daddr_t daddr,
e70b73f8 709 size_t numblks,
5adc94c2
DC
710 int flags)
711{
712 xfs_buf_t *bp;
713 int error;
714
e70b73f8 715 bp = xfs_buf_get_uncached(target, numblks, flags);
5adc94c2
DC
716 if (!bp)
717 return NULL;
718
719 /* set up the buffer for a read IO */
3e85c868
DC
720 ASSERT(bp->b_map_count == 1);
721 bp->b_bn = daddr;
722 bp->b_maps[0].bm_bn = daddr;
cbb7baab 723 bp->b_flags |= XBF_READ;
5adc94c2 724
e70b73f8 725 xfsbdstrat(target->bt_mount, bp);
1a1a3e97 726 error = xfs_buf_iowait(bp);
0e95f19a 727 if (error) {
5adc94c2
DC
728 xfs_buf_relse(bp);
729 return NULL;
730 }
731 return bp;
1da177e4
LT
732}
733
44396476
DC
734/*
735 * Return a buffer allocated as an empty buffer and associated to external
736 * memory via xfs_buf_associate_memory() back to it's empty state.
737 */
738void
739xfs_buf_set_empty(
740 struct xfs_buf *bp,
e70b73f8 741 size_t numblks)
44396476
DC
742{
743 if (bp->b_pages)
744 _xfs_buf_free_pages(bp);
745
746 bp->b_pages = NULL;
747 bp->b_page_count = 0;
748 bp->b_addr = NULL;
4e94b71b 749 bp->b_length = numblks;
aa0e8833 750 bp->b_io_length = numblks;
3e85c868
DC
751
752 ASSERT(bp->b_map_count == 1);
44396476 753 bp->b_bn = XFS_BUF_DADDR_NULL;
3e85c868
DC
754 bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
755 bp->b_maps[0].bm_len = bp->b_length;
44396476
DC
756}
757
1da177e4
LT
758static inline struct page *
759mem_to_page(
760 void *addr)
761{
9e2779fa 762 if ((!is_vmalloc_addr(addr))) {
1da177e4
LT
763 return virt_to_page(addr);
764 } else {
765 return vmalloc_to_page(addr);
766 }
767}
768
769int
ce8e922c
NS
770xfs_buf_associate_memory(
771 xfs_buf_t *bp,
1da177e4
LT
772 void *mem,
773 size_t len)
774{
775 int rval;
776 int i = 0;
d1afb678
LM
777 unsigned long pageaddr;
778 unsigned long offset;
779 size_t buflen;
1da177e4
LT
780 int page_count;
781
0e6e847f 782 pageaddr = (unsigned long)mem & PAGE_MASK;
d1afb678 783 offset = (unsigned long)mem - pageaddr;
0e6e847f
DC
784 buflen = PAGE_ALIGN(len + offset);
785 page_count = buflen >> PAGE_SHIFT;
1da177e4
LT
786
787 /* Free any previous set of page pointers */
ce8e922c
NS
788 if (bp->b_pages)
789 _xfs_buf_free_pages(bp);
1da177e4 790
ce8e922c
NS
791 bp->b_pages = NULL;
792 bp->b_addr = mem;
1da177e4 793
aa5c158e 794 rval = _xfs_buf_get_pages(bp, page_count, 0);
1da177e4
LT
795 if (rval)
796 return rval;
797
ce8e922c 798 bp->b_offset = offset;
d1afb678
LM
799
800 for (i = 0; i < bp->b_page_count; i++) {
801 bp->b_pages[i] = mem_to_page((void *)pageaddr);
0e6e847f 802 pageaddr += PAGE_SIZE;
1da177e4 803 }
1da177e4 804
aa0e8833 805 bp->b_io_length = BTOBB(len);
4e94b71b 806 bp->b_length = BTOBB(buflen);
1da177e4
LT
807
808 return 0;
809}
810
811xfs_buf_t *
686865f7
DC
812xfs_buf_get_uncached(
813 struct xfs_buftarg *target,
e70b73f8 814 size_t numblks,
686865f7 815 int flags)
1da177e4 816{
e70b73f8 817 unsigned long page_count;
1fa40b01 818 int error, i;
3e85c868
DC
819 struct xfs_buf *bp;
820 DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
1da177e4 821
3e85c868 822 bp = _xfs_buf_alloc(target, &map, 1, 0);
1da177e4
LT
823 if (unlikely(bp == NULL))
824 goto fail;
1da177e4 825
e70b73f8 826 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
1fa40b01
CH
827 error = _xfs_buf_get_pages(bp, page_count, 0);
828 if (error)
1da177e4
LT
829 goto fail_free_buf;
830
1fa40b01 831 for (i = 0; i < page_count; i++) {
686865f7 832 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
1fa40b01
CH
833 if (!bp->b_pages[i])
834 goto fail_free_mem;
1da177e4 835 }
1fa40b01 836 bp->b_flags |= _XBF_PAGES;
1da177e4 837
611c9946 838 error = _xfs_buf_map_pages(bp, 0);
1fa40b01 839 if (unlikely(error)) {
4f10700a
DC
840 xfs_warn(target->bt_mount,
841 "%s: failed to map pages\n", __func__);
1da177e4 842 goto fail_free_mem;
1fa40b01 843 }
1da177e4 844
686865f7 845 trace_xfs_buf_get_uncached(bp, _RET_IP_);
1da177e4 846 return bp;
1fa40b01 847
1da177e4 848 fail_free_mem:
1fa40b01
CH
849 while (--i >= 0)
850 __free_page(bp->b_pages[i]);
ca165b88 851 _xfs_buf_free_pages(bp);
1da177e4 852 fail_free_buf:
3e85c868 853 xfs_buf_free_maps(bp);
4347b9d7 854 kmem_zone_free(xfs_buf_zone, bp);
1da177e4
LT
855 fail:
856 return NULL;
857}
858
859/*
1da177e4
LT
860 * Increment reference count on buffer, to hold the buffer concurrently
861 * with another thread which may release (free) the buffer asynchronously.
1da177e4
LT
862 * Must hold the buffer already to call this function.
863 */
864void
ce8e922c
NS
865xfs_buf_hold(
866 xfs_buf_t *bp)
1da177e4 867{
0b1b213f 868 trace_xfs_buf_hold(bp, _RET_IP_);
ce8e922c 869 atomic_inc(&bp->b_hold);
1da177e4
LT
870}
871
872/*
ce8e922c
NS
873 * Releases a hold on the specified buffer. If the
874 * the hold count is 1, calls xfs_buf_free.
1da177e4
LT
875 */
876void
ce8e922c
NS
877xfs_buf_rele(
878 xfs_buf_t *bp)
1da177e4 879{
74f75a0c 880 struct xfs_perag *pag = bp->b_pag;
1da177e4 881
0b1b213f 882 trace_xfs_buf_rele(bp, _RET_IP_);
1da177e4 883
74f75a0c 884 if (!pag) {
430cbeb8 885 ASSERT(list_empty(&bp->b_lru));
74f75a0c 886 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
fad3aa1e
NS
887 if (atomic_dec_and_test(&bp->b_hold))
888 xfs_buf_free(bp);
889 return;
890 }
891
74f75a0c 892 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
430cbeb8 893
3790689f 894 ASSERT(atomic_read(&bp->b_hold) > 0);
74f75a0c 895 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
bfc60177 896 if (!(bp->b_flags & XBF_STALE) &&
430cbeb8
DC
897 atomic_read(&bp->b_lru_ref)) {
898 xfs_buf_lru_add(bp);
899 spin_unlock(&pag->pag_buf_lock);
1da177e4 900 } else {
430cbeb8 901 xfs_buf_lru_del(bp);
43ff2122 902 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
74f75a0c
DC
903 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
904 spin_unlock(&pag->pag_buf_lock);
905 xfs_perag_put(pag);
ce8e922c 906 xfs_buf_free(bp);
1da177e4
LT
907 }
908 }
909}
910
911
912/*
0e6e847f 913 * Lock a buffer object, if it is not already locked.
90810b9e
DC
914 *
915 * If we come across a stale, pinned, locked buffer, we know that we are
916 * being asked to lock a buffer that has been reallocated. Because it is
917 * pinned, we know that the log has not been pushed to disk and hence it
918 * will still be locked. Rather than continuing to have trylock attempts
919 * fail until someone else pushes the log, push it ourselves before
920 * returning. This means that the xfsaild will not get stuck trying
921 * to push on stale inode buffers.
1da177e4
LT
922 */
923int
0c842ad4
CH
924xfs_buf_trylock(
925 struct xfs_buf *bp)
1da177e4
LT
926{
927 int locked;
928
ce8e922c 929 locked = down_trylock(&bp->b_sema) == 0;
0b1b213f 930 if (locked)
ce8e922c 931 XB_SET_OWNER(bp);
90810b9e
DC
932 else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
933 xfs_log_force(bp->b_target->bt_mount, 0);
0b1b213f 934
0c842ad4
CH
935 trace_xfs_buf_trylock(bp, _RET_IP_);
936 return locked;
1da177e4 937}
1da177e4
LT
938
939/*
0e6e847f 940 * Lock a buffer object.
ed3b4d6c
DC
941 *
942 * If we come across a stale, pinned, locked buffer, we know that we
943 * are being asked to lock a buffer that has been reallocated. Because
944 * it is pinned, we know that the log has not been pushed to disk and
945 * hence it will still be locked. Rather than sleeping until someone
946 * else pushes the log, push it ourselves before trying to get the lock.
1da177e4 947 */
ce8e922c
NS
948void
949xfs_buf_lock(
0c842ad4 950 struct xfs_buf *bp)
1da177e4 951{
0b1b213f
CH
952 trace_xfs_buf_lock(bp, _RET_IP_);
953
ed3b4d6c 954 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
ebad861b 955 xfs_log_force(bp->b_target->bt_mount, 0);
ce8e922c
NS
956 down(&bp->b_sema);
957 XB_SET_OWNER(bp);
0b1b213f
CH
958
959 trace_xfs_buf_lock_done(bp, _RET_IP_);
1da177e4
LT
960}
961
1da177e4 962void
ce8e922c 963xfs_buf_unlock(
0c842ad4 964 struct xfs_buf *bp)
1da177e4 965{
ce8e922c
NS
966 XB_CLEAR_OWNER(bp);
967 up(&bp->b_sema);
0b1b213f
CH
968
969 trace_xfs_buf_unlock(bp, _RET_IP_);
1da177e4
LT
970}
971
ce8e922c
NS
972STATIC void
973xfs_buf_wait_unpin(
974 xfs_buf_t *bp)
1da177e4
LT
975{
976 DECLARE_WAITQUEUE (wait, current);
977
ce8e922c 978 if (atomic_read(&bp->b_pin_count) == 0)
1da177e4
LT
979 return;
980
ce8e922c 981 add_wait_queue(&bp->b_waiters, &wait);
1da177e4
LT
982 for (;;) {
983 set_current_state(TASK_UNINTERRUPTIBLE);
ce8e922c 984 if (atomic_read(&bp->b_pin_count) == 0)
1da177e4 985 break;
7eaceacc 986 io_schedule();
1da177e4 987 }
ce8e922c 988 remove_wait_queue(&bp->b_waiters, &wait);
1da177e4
LT
989 set_current_state(TASK_RUNNING);
990}
991
992/*
993 * Buffer Utility Routines
994 */
995
1da177e4 996STATIC void
ce8e922c 997xfs_buf_iodone_work(
c4028958 998 struct work_struct *work)
1da177e4 999{
c4028958
DH
1000 xfs_buf_t *bp =
1001 container_of(work, xfs_buf_t, b_iodone_work);
1da177e4 1002
80f6c29d 1003 if (bp->b_iodone)
ce8e922c
NS
1004 (*(bp->b_iodone))(bp);
1005 else if (bp->b_flags & XBF_ASYNC)
1da177e4
LT
1006 xfs_buf_relse(bp);
1007}
1008
1009void
ce8e922c
NS
1010xfs_buf_ioend(
1011 xfs_buf_t *bp,
1da177e4
LT
1012 int schedule)
1013{
0b1b213f
CH
1014 trace_xfs_buf_iodone(bp, _RET_IP_);
1015
77be55a5 1016 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
ce8e922c
NS
1017 if (bp->b_error == 0)
1018 bp->b_flags |= XBF_DONE;
1da177e4 1019
ce8e922c 1020 if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1da177e4 1021 if (schedule) {
c4028958 1022 INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
ce8e922c 1023 queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1da177e4 1024 } else {
c4028958 1025 xfs_buf_iodone_work(&bp->b_iodone_work);
1da177e4
LT
1026 }
1027 } else {
b4dd330b 1028 complete(&bp->b_iowait);
1da177e4
LT
1029 }
1030}
1031
1da177e4 1032void
ce8e922c
NS
1033xfs_buf_ioerror(
1034 xfs_buf_t *bp,
1035 int error)
1da177e4
LT
1036{
1037 ASSERT(error >= 0 && error <= 0xffff);
ce8e922c 1038 bp->b_error = (unsigned short)error;
0b1b213f 1039 trace_xfs_buf_ioerror(bp, error, _RET_IP_);
1da177e4
LT
1040}
1041
901796af
CH
1042void
1043xfs_buf_ioerror_alert(
1044 struct xfs_buf *bp,
1045 const char *func)
1046{
1047 xfs_alert(bp->b_target->bt_mount,
aa0e8833
DC
1048"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
1049 (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
901796af
CH
1050}
1051
1da177e4 1052int
64e0bc7d 1053xfs_bwrite(
5d765b97 1054 struct xfs_buf *bp)
1da177e4 1055{
8c38366f 1056 int error;
1da177e4 1057
43ff2122
CH
1058 ASSERT(xfs_buf_islocked(bp));
1059
64e0bc7d 1060 bp->b_flags |= XBF_WRITE;
43ff2122 1061 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
1da177e4 1062
939d723b 1063 xfs_bdstrat_cb(bp);
1da177e4 1064
8c38366f 1065 error = xfs_buf_iowait(bp);
c2b006c1
CH
1066 if (error) {
1067 xfs_force_shutdown(bp->b_target->bt_mount,
1068 SHUTDOWN_META_IO_ERROR);
1069 }
64e0bc7d 1070 return error;
5d765b97 1071}
1da177e4 1072
4e23471a
CH
1073/*
1074 * Called when we want to stop a buffer from getting written or read.
1a1a3e97 1075 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
4e23471a
CH
1076 * so that the proper iodone callbacks get called.
1077 */
1078STATIC int
1079xfs_bioerror(
1080 xfs_buf_t *bp)
1081{
1082#ifdef XFSERRORDEBUG
1083 ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
1084#endif
1085
1086 /*
1087 * No need to wait until the buffer is unpinned, we aren't flushing it.
1088 */
5a52c2a5 1089 xfs_buf_ioerror(bp, EIO);
4e23471a
CH
1090
1091 /*
1a1a3e97 1092 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
4e23471a
CH
1093 */
1094 XFS_BUF_UNREAD(bp);
4e23471a 1095 XFS_BUF_UNDONE(bp);
c867cb61 1096 xfs_buf_stale(bp);
4e23471a 1097
1a1a3e97 1098 xfs_buf_ioend(bp, 0);
4e23471a
CH
1099
1100 return EIO;
1101}
1102
1103/*
1104 * Same as xfs_bioerror, except that we are releasing the buffer
1a1a3e97 1105 * here ourselves, and avoiding the xfs_buf_ioend call.
4e23471a
CH
1106 * This is meant for userdata errors; metadata bufs come with
1107 * iodone functions attached, so that we can track down errors.
1108 */
1109STATIC int
1110xfs_bioerror_relse(
1111 struct xfs_buf *bp)
1112{
ed43233b 1113 int64_t fl = bp->b_flags;
4e23471a
CH
1114 /*
1115 * No need to wait until the buffer is unpinned.
1116 * We aren't flushing it.
1117 *
1118 * chunkhold expects B_DONE to be set, whether
1119 * we actually finish the I/O or not. We don't want to
1120 * change that interface.
1121 */
1122 XFS_BUF_UNREAD(bp);
4e23471a 1123 XFS_BUF_DONE(bp);
c867cb61 1124 xfs_buf_stale(bp);
cb669ca5 1125 bp->b_iodone = NULL;
0cadda1c 1126 if (!(fl & XBF_ASYNC)) {
4e23471a
CH
1127 /*
1128 * Mark b_error and B_ERROR _both_.
1129 * Lot's of chunkcache code assumes that.
1130 * There's no reason to mark error for
1131 * ASYNC buffers.
1132 */
5a52c2a5 1133 xfs_buf_ioerror(bp, EIO);
5fde0326 1134 complete(&bp->b_iowait);
4e23471a
CH
1135 } else {
1136 xfs_buf_relse(bp);
1137 }
1138
1139 return EIO;
1140}
1141
1142
1143/*
1144 * All xfs metadata buffers except log state machine buffers
1145 * get this attached as their b_bdstrat callback function.
1146 * This is so that we can catch a buffer
1147 * after prematurely unpinning it to forcibly shutdown the filesystem.
1148 */
1149int
1150xfs_bdstrat_cb(
1151 struct xfs_buf *bp)
1152{
ebad861b 1153 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
4e23471a
CH
1154 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1155 /*
1156 * Metadata write that didn't get logged but
1157 * written delayed anyway. These aren't associated
1158 * with a transaction, and can be ignored.
1159 */
1160 if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
1161 return xfs_bioerror_relse(bp);
1162 else
1163 return xfs_bioerror(bp);
1164 }
1165
1166 xfs_buf_iorequest(bp);
1167 return 0;
1168}
1169
1170/*
1171 * Wrapper around bdstrat so that we can stop data from going to disk in case
1172 * we are shutting down the filesystem. Typically user data goes thru this
1173 * path; one of the exceptions is the superblock.
1174 */
1175void
1176xfsbdstrat(
1177 struct xfs_mount *mp,
1178 struct xfs_buf *bp)
1179{
1180 if (XFS_FORCED_SHUTDOWN(mp)) {
1181 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1182 xfs_bioerror_relse(bp);
1183 return;
1184 }
1185
1186 xfs_buf_iorequest(bp);
1187}
1188
b8f82a4a 1189STATIC void
ce8e922c
NS
1190_xfs_buf_ioend(
1191 xfs_buf_t *bp,
1da177e4
LT
1192 int schedule)
1193{
0e6e847f 1194 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
ce8e922c 1195 xfs_buf_ioend(bp, schedule);
1da177e4
LT
1196}
1197
782e3b3b 1198STATIC void
ce8e922c 1199xfs_buf_bio_end_io(
1da177e4 1200 struct bio *bio,
1da177e4
LT
1201 int error)
1202{
ce8e922c 1203 xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
1da177e4 1204
cfbe5267 1205 xfs_buf_ioerror(bp, -error);
1da177e4 1206
73c77e2c
JB
1207 if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1208 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1209
ce8e922c 1210 _xfs_buf_ioend(bp, 1);
1da177e4 1211 bio_put(bio);
1da177e4
LT
1212}
1213
3e85c868
DC
1214static void
1215xfs_buf_ioapply_map(
1216 struct xfs_buf *bp,
1217 int map,
1218 int *buf_offset,
1219 int *count,
1220 int rw)
1da177e4 1221{
3e85c868
DC
1222 int page_index;
1223 int total_nr_pages = bp->b_page_count;
1224 int nr_pages;
1225 struct bio *bio;
1226 sector_t sector = bp->b_maps[map].bm_bn;
1227 int size;
1228 int offset;
1da177e4 1229
ce8e922c 1230 total_nr_pages = bp->b_page_count;
1da177e4 1231
3e85c868
DC
1232 /* skip the pages in the buffer before the start offset */
1233 page_index = 0;
1234 offset = *buf_offset;
1235 while (offset >= PAGE_SIZE) {
1236 page_index++;
1237 offset -= PAGE_SIZE;
f538d4da
CH
1238 }
1239
3e85c868
DC
1240 /*
1241 * Limit the IO size to the length of the current vector, and update the
1242 * remaining IO count for the next time around.
1243 */
1244 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1245 *count -= size;
1246 *buf_offset += size;
34951f5c 1247
1da177e4 1248next_chunk:
ce8e922c 1249 atomic_inc(&bp->b_io_remaining);
1da177e4
LT
1250 nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1251 if (nr_pages > total_nr_pages)
1252 nr_pages = total_nr_pages;
1253
1254 bio = bio_alloc(GFP_NOIO, nr_pages);
ce8e922c 1255 bio->bi_bdev = bp->b_target->bt_bdev;
1da177e4 1256 bio->bi_sector = sector;
ce8e922c
NS
1257 bio->bi_end_io = xfs_buf_bio_end_io;
1258 bio->bi_private = bp;
1da177e4 1259
0e6e847f 1260
3e85c868 1261 for (; size && nr_pages; nr_pages--, page_index++) {
0e6e847f 1262 int rbytes, nbytes = PAGE_SIZE - offset;
1da177e4
LT
1263
1264 if (nbytes > size)
1265 nbytes = size;
1266
3e85c868
DC
1267 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1268 offset);
ce8e922c 1269 if (rbytes < nbytes)
1da177e4
LT
1270 break;
1271
1272 offset = 0;
aa0e8833 1273 sector += BTOBB(nbytes);
1da177e4
LT
1274 size -= nbytes;
1275 total_nr_pages--;
1276 }
1277
1da177e4 1278 if (likely(bio->bi_size)) {
73c77e2c
JB
1279 if (xfs_buf_is_vmapped(bp)) {
1280 flush_kernel_vmap_range(bp->b_addr,
1281 xfs_buf_vmap_len(bp));
1282 }
1da177e4
LT
1283 submit_bio(rw, bio);
1284 if (size)
1285 goto next_chunk;
1286 } else {
ce8e922c 1287 xfs_buf_ioerror(bp, EIO);
ec53d1db 1288 bio_put(bio);
1da177e4 1289 }
3e85c868
DC
1290
1291}
1292
1293STATIC void
1294_xfs_buf_ioapply(
1295 struct xfs_buf *bp)
1296{
1297 struct blk_plug plug;
1298 int rw;
1299 int offset;
1300 int size;
1301 int i;
1302
1303 if (bp->b_flags & XBF_WRITE) {
1304 if (bp->b_flags & XBF_SYNCIO)
1305 rw = WRITE_SYNC;
1306 else
1307 rw = WRITE;
1308 if (bp->b_flags & XBF_FUA)
1309 rw |= REQ_FUA;
1310 if (bp->b_flags & XBF_FLUSH)
1311 rw |= REQ_FLUSH;
1312 } else if (bp->b_flags & XBF_READ_AHEAD) {
1313 rw = READA;
1314 } else {
1315 rw = READ;
1316 }
1317
1318 /* we only use the buffer cache for meta-data */
1319 rw |= REQ_META;
1320
1321 /*
1322 * Walk all the vectors issuing IO on them. Set up the initial offset
1323 * into the buffer and the desired IO size before we start -
1324 * _xfs_buf_ioapply_vec() will modify them appropriately for each
1325 * subsequent call.
1326 */
1327 offset = bp->b_offset;
1328 size = BBTOB(bp->b_io_length);
1329 blk_start_plug(&plug);
1330 for (i = 0; i < bp->b_map_count; i++) {
1331 xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
1332 if (bp->b_error)
1333 break;
1334 if (size <= 0)
1335 break; /* all done */
1336 }
1337 blk_finish_plug(&plug);
1da177e4
LT
1338}
1339
0e95f19a 1340void
ce8e922c
NS
1341xfs_buf_iorequest(
1342 xfs_buf_t *bp)
1da177e4 1343{
0b1b213f 1344 trace_xfs_buf_iorequest(bp, _RET_IP_);
1da177e4 1345
43ff2122 1346 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1da177e4 1347
375ec69d 1348 if (bp->b_flags & XBF_WRITE)
ce8e922c 1349 xfs_buf_wait_unpin(bp);
ce8e922c 1350 xfs_buf_hold(bp);
1da177e4
LT
1351
1352 /* Set the count to 1 initially, this will stop an I/O
1353 * completion callout which happens before we have started
ce8e922c 1354 * all the I/O from calling xfs_buf_ioend too early.
1da177e4 1355 */
ce8e922c
NS
1356 atomic_set(&bp->b_io_remaining, 1);
1357 _xfs_buf_ioapply(bp);
1358 _xfs_buf_ioend(bp, 0);
1da177e4 1359
ce8e922c 1360 xfs_buf_rele(bp);
1da177e4
LT
1361}
1362
1363/*
0e95f19a
DC
1364 * Waits for I/O to complete on the buffer supplied. It returns immediately if
1365 * no I/O is pending or there is already a pending error on the buffer. It
1366 * returns the I/O error code, if any, or 0 if there was no error.
1da177e4
LT
1367 */
1368int
ce8e922c
NS
1369xfs_buf_iowait(
1370 xfs_buf_t *bp)
1da177e4 1371{
0b1b213f
CH
1372 trace_xfs_buf_iowait(bp, _RET_IP_);
1373
0e95f19a
DC
1374 if (!bp->b_error)
1375 wait_for_completion(&bp->b_iowait);
0b1b213f
CH
1376
1377 trace_xfs_buf_iowait_done(bp, _RET_IP_);
ce8e922c 1378 return bp->b_error;
1da177e4
LT
1379}
1380
ce8e922c
NS
1381xfs_caddr_t
1382xfs_buf_offset(
1383 xfs_buf_t *bp,
1da177e4
LT
1384 size_t offset)
1385{
1386 struct page *page;
1387
611c9946 1388 if (bp->b_addr)
62926044 1389 return bp->b_addr + offset;
1da177e4 1390
ce8e922c 1391 offset += bp->b_offset;
0e6e847f
DC
1392 page = bp->b_pages[offset >> PAGE_SHIFT];
1393 return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
1da177e4
LT
1394}
1395
1396/*
1da177e4
LT
1397 * Move data into or out of a buffer.
1398 */
1399void
ce8e922c
NS
1400xfs_buf_iomove(
1401 xfs_buf_t *bp, /* buffer to process */
1da177e4
LT
1402 size_t boff, /* starting buffer offset */
1403 size_t bsize, /* length to copy */
b9c48649 1404 void *data, /* data address */
ce8e922c 1405 xfs_buf_rw_t mode) /* read/write/zero flag */
1da177e4 1406{
795cac72 1407 size_t bend;
1da177e4
LT
1408
1409 bend = boff + bsize;
1410 while (boff < bend) {
795cac72
DC
1411 struct page *page;
1412 int page_index, page_offset, csize;
1413
1414 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1415 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1416 page = bp->b_pages[page_index];
1417 csize = min_t(size_t, PAGE_SIZE - page_offset,
1418 BBTOB(bp->b_io_length) - boff);
1da177e4 1419
795cac72 1420 ASSERT((csize + page_offset) <= PAGE_SIZE);
1da177e4
LT
1421
1422 switch (mode) {
ce8e922c 1423 case XBRW_ZERO:
795cac72 1424 memset(page_address(page) + page_offset, 0, csize);
1da177e4 1425 break;
ce8e922c 1426 case XBRW_READ:
795cac72 1427 memcpy(data, page_address(page) + page_offset, csize);
1da177e4 1428 break;
ce8e922c 1429 case XBRW_WRITE:
795cac72 1430 memcpy(page_address(page) + page_offset, data, csize);
1da177e4
LT
1431 }
1432
1433 boff += csize;
1434 data += csize;
1435 }
1436}
1437
1438/*
ce8e922c 1439 * Handling of buffer targets (buftargs).
1da177e4
LT
1440 */
1441
1442/*
430cbeb8
DC
1443 * Wait for any bufs with callbacks that have been submitted but have not yet
1444 * returned. These buffers will have an elevated hold count, so wait on those
1445 * while freeing all the buffers only held by the LRU.
1da177e4
LT
1446 */
1447void
1448xfs_wait_buftarg(
74f75a0c 1449 struct xfs_buftarg *btp)
1da177e4 1450{
430cbeb8
DC
1451 struct xfs_buf *bp;
1452
1453restart:
1454 spin_lock(&btp->bt_lru_lock);
1455 while (!list_empty(&btp->bt_lru)) {
1456 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1457 if (atomic_read(&bp->b_hold) > 1) {
1458 spin_unlock(&btp->bt_lru_lock);
26af6552 1459 delay(100);
430cbeb8 1460 goto restart;
1da177e4 1461 }
430cbeb8 1462 /*
90802ed9 1463 * clear the LRU reference count so the buffer doesn't get
430cbeb8
DC
1464 * ignored in xfs_buf_rele().
1465 */
1466 atomic_set(&bp->b_lru_ref, 0);
1467 spin_unlock(&btp->bt_lru_lock);
1468 xfs_buf_rele(bp);
1469 spin_lock(&btp->bt_lru_lock);
1da177e4 1470 }
430cbeb8 1471 spin_unlock(&btp->bt_lru_lock);
1da177e4
LT
1472}
1473
ff57ab21
DC
1474int
1475xfs_buftarg_shrink(
1476 struct shrinker *shrink,
1495f230 1477 struct shrink_control *sc)
a6867a68 1478{
ff57ab21
DC
1479 struct xfs_buftarg *btp = container_of(shrink,
1480 struct xfs_buftarg, bt_shrinker);
430cbeb8 1481 struct xfs_buf *bp;
1495f230 1482 int nr_to_scan = sc->nr_to_scan;
430cbeb8
DC
1483 LIST_HEAD(dispose);
1484
1485 if (!nr_to_scan)
1486 return btp->bt_lru_nr;
1487
1488 spin_lock(&btp->bt_lru_lock);
1489 while (!list_empty(&btp->bt_lru)) {
1490 if (nr_to_scan-- <= 0)
1491 break;
1492
1493 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1494
1495 /*
1496 * Decrement the b_lru_ref count unless the value is already
1497 * zero. If the value is already zero, we need to reclaim the
1498 * buffer, otherwise it gets another trip through the LRU.
1499 */
1500 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1501 list_move_tail(&bp->b_lru, &btp->bt_lru);
1502 continue;
1503 }
1504
1505 /*
1506 * remove the buffer from the LRU now to avoid needing another
1507 * lock round trip inside xfs_buf_rele().
1508 */
1509 list_move(&bp->b_lru, &dispose);
1510 btp->bt_lru_nr--;
ff57ab21 1511 }
430cbeb8
DC
1512 spin_unlock(&btp->bt_lru_lock);
1513
1514 while (!list_empty(&dispose)) {
1515 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1516 list_del_init(&bp->b_lru);
1517 xfs_buf_rele(bp);
1518 }
1519
1520 return btp->bt_lru_nr;
a6867a68
DC
1521}
1522
1da177e4
LT
1523void
1524xfs_free_buftarg(
b7963133
CH
1525 struct xfs_mount *mp,
1526 struct xfs_buftarg *btp)
1da177e4 1527{
ff57ab21
DC
1528 unregister_shrinker(&btp->bt_shrinker);
1529
b7963133
CH
1530 if (mp->m_flags & XFS_MOUNT_BARRIER)
1531 xfs_blkdev_issue_flush(btp);
a6867a68 1532
f0e2d93c 1533 kmem_free(btp);
1da177e4
LT
1534}
1535
1da177e4
LT
1536STATIC int
1537xfs_setsize_buftarg_flags(
1538 xfs_buftarg_t *btp,
1539 unsigned int blocksize,
1540 unsigned int sectorsize,
1541 int verbose)
1542{
ce8e922c
NS
1543 btp->bt_bsize = blocksize;
1544 btp->bt_sshift = ffs(sectorsize) - 1;
1545 btp->bt_smask = sectorsize - 1;
1da177e4 1546
ce8e922c 1547 if (set_blocksize(btp->bt_bdev, sectorsize)) {
02b102df
CH
1548 char name[BDEVNAME_SIZE];
1549
1550 bdevname(btp->bt_bdev, name);
1551
4f10700a
DC
1552 xfs_warn(btp->bt_mount,
1553 "Cannot set_blocksize to %u on device %s\n",
02b102df 1554 sectorsize, name);
1da177e4
LT
1555 return EINVAL;
1556 }
1557
1da177e4
LT
1558 return 0;
1559}
1560
1561/*
ce8e922c
NS
1562 * When allocating the initial buffer target we have not yet
1563 * read in the superblock, so don't know what sized sectors
1564 * are being used is at this early stage. Play safe.
1565 */
1da177e4
LT
1566STATIC int
1567xfs_setsize_buftarg_early(
1568 xfs_buftarg_t *btp,
1569 struct block_device *bdev)
1570{
1571 return xfs_setsize_buftarg_flags(btp,
0e6e847f 1572 PAGE_SIZE, bdev_logical_block_size(bdev), 0);
1da177e4
LT
1573}
1574
1575int
1576xfs_setsize_buftarg(
1577 xfs_buftarg_t *btp,
1578 unsigned int blocksize,
1579 unsigned int sectorsize)
1580{
1581 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1582}
1583
1da177e4
LT
1584xfs_buftarg_t *
1585xfs_alloc_buftarg(
ebad861b 1586 struct xfs_mount *mp,
1da177e4 1587 struct block_device *bdev,
e2a07812
JE
1588 int external,
1589 const char *fsname)
1da177e4
LT
1590{
1591 xfs_buftarg_t *btp;
1592
1593 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1594
ebad861b 1595 btp->bt_mount = mp;
ce8e922c
NS
1596 btp->bt_dev = bdev->bd_dev;
1597 btp->bt_bdev = bdev;
0e6e847f
DC
1598 btp->bt_bdi = blk_get_backing_dev_info(bdev);
1599 if (!btp->bt_bdi)
1600 goto error;
1601
430cbeb8
DC
1602 INIT_LIST_HEAD(&btp->bt_lru);
1603 spin_lock_init(&btp->bt_lru_lock);
1da177e4
LT
1604 if (xfs_setsize_buftarg_early(btp, bdev))
1605 goto error;
ff57ab21
DC
1606 btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1607 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1608 register_shrinker(&btp->bt_shrinker);
1da177e4
LT
1609 return btp;
1610
1611error:
f0e2d93c 1612 kmem_free(btp);
1da177e4
LT
1613 return NULL;
1614}
1615
1da177e4 1616/*
43ff2122
CH
1617 * Add a buffer to the delayed write list.
1618 *
1619 * This queues a buffer for writeout if it hasn't already been. Note that
1620 * neither this routine nor the buffer list submission functions perform
1621 * any internal synchronization. It is expected that the lists are thread-local
1622 * to the callers.
1623 *
1624 * Returns true if we queued up the buffer, or false if it already had
1625 * been on the buffer list.
1da177e4 1626 */
43ff2122 1627bool
ce8e922c 1628xfs_buf_delwri_queue(
43ff2122
CH
1629 struct xfs_buf *bp,
1630 struct list_head *list)
1da177e4 1631{
43ff2122 1632 ASSERT(xfs_buf_islocked(bp));
5a8ee6ba 1633 ASSERT(!(bp->b_flags & XBF_READ));
1da177e4 1634
43ff2122
CH
1635 /*
1636 * If the buffer is already marked delwri it already is queued up
1637 * by someone else for imediate writeout. Just ignore it in that
1638 * case.
1639 */
1640 if (bp->b_flags & _XBF_DELWRI_Q) {
1641 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1642 return false;
1da177e4 1643 }
1da177e4 1644
43ff2122 1645 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
d808f617
DC
1646
1647 /*
43ff2122
CH
1648 * If a buffer gets written out synchronously or marked stale while it
1649 * is on a delwri list we lazily remove it. To do this, the other party
1650 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1651 * It remains referenced and on the list. In a rare corner case it
1652 * might get readded to a delwri list after the synchronous writeout, in
1653 * which case we need just need to re-add the flag here.
d808f617 1654 */
43ff2122
CH
1655 bp->b_flags |= _XBF_DELWRI_Q;
1656 if (list_empty(&bp->b_list)) {
1657 atomic_inc(&bp->b_hold);
1658 list_add_tail(&bp->b_list, list);
585e6d88 1659 }
585e6d88 1660
43ff2122 1661 return true;
585e6d88
DC
1662}
1663
089716aa
DC
1664/*
1665 * Compare function is more complex than it needs to be because
1666 * the return value is only 32 bits and we are doing comparisons
1667 * on 64 bit values
1668 */
1669static int
1670xfs_buf_cmp(
1671 void *priv,
1672 struct list_head *a,
1673 struct list_head *b)
1674{
1675 struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
1676 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
1677 xfs_daddr_t diff;
1678
cbb7baab 1679 diff = ap->b_map.bm_bn - bp->b_map.bm_bn;
089716aa
DC
1680 if (diff < 0)
1681 return -1;
1682 if (diff > 0)
1683 return 1;
1684 return 0;
1685}
1686
43ff2122
CH
1687static int
1688__xfs_buf_delwri_submit(
1689 struct list_head *buffer_list,
1690 struct list_head *io_list,
1691 bool wait)
1da177e4 1692{
43ff2122
CH
1693 struct blk_plug plug;
1694 struct xfs_buf *bp, *n;
1695 int pinned = 0;
1696
1697 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1698 if (!wait) {
1699 if (xfs_buf_ispinned(bp)) {
1700 pinned++;
1701 continue;
1702 }
1703 if (!xfs_buf_trylock(bp))
1704 continue;
1705 } else {
1706 xfs_buf_lock(bp);
1707 }
978c7b2f 1708
43ff2122
CH
1709 /*
1710 * Someone else might have written the buffer synchronously or
1711 * marked it stale in the meantime. In that case only the
1712 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1713 * reference and remove it from the list here.
1714 */
1715 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1716 list_del_init(&bp->b_list);
1717 xfs_buf_relse(bp);
1718 continue;
1719 }
c9c12971 1720
43ff2122
CH
1721 list_move_tail(&bp->b_list, io_list);
1722 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1723 }
1da177e4 1724
43ff2122 1725 list_sort(NULL, io_list, xfs_buf_cmp);
1da177e4 1726
43ff2122
CH
1727 blk_start_plug(&plug);
1728 list_for_each_entry_safe(bp, n, io_list, b_list) {
1729 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
1730 bp->b_flags |= XBF_WRITE;
a1b7ea5d 1731
43ff2122
CH
1732 if (!wait) {
1733 bp->b_flags |= XBF_ASYNC;
ce8e922c 1734 list_del_init(&bp->b_list);
1da177e4 1735 }
43ff2122
CH
1736 xfs_bdstrat_cb(bp);
1737 }
1738 blk_finish_plug(&plug);
1da177e4 1739
43ff2122 1740 return pinned;
1da177e4
LT
1741}
1742
1743/*
43ff2122
CH
1744 * Write out a buffer list asynchronously.
1745 *
1746 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1747 * out and not wait for I/O completion on any of the buffers. This interface
1748 * is only safely useable for callers that can track I/O completion by higher
1749 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1750 * function.
1da177e4
LT
1751 */
1752int
43ff2122
CH
1753xfs_buf_delwri_submit_nowait(
1754 struct list_head *buffer_list)
1da177e4 1755{
43ff2122
CH
1756 LIST_HEAD (io_list);
1757 return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1758}
1da177e4 1759
43ff2122
CH
1760/*
1761 * Write out a buffer list synchronously.
1762 *
1763 * This will take the @buffer_list, write all buffers out and wait for I/O
1764 * completion on all of the buffers. @buffer_list is consumed by the function,
1765 * so callers must have some other way of tracking buffers if they require such
1766 * functionality.
1767 */
1768int
1769xfs_buf_delwri_submit(
1770 struct list_head *buffer_list)
1771{
1772 LIST_HEAD (io_list);
1773 int error = 0, error2;
1774 struct xfs_buf *bp;
1da177e4 1775
43ff2122 1776 __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1da177e4 1777
43ff2122
CH
1778 /* Wait for IO to complete. */
1779 while (!list_empty(&io_list)) {
1780 bp = list_first_entry(&io_list, struct xfs_buf, b_list);
a1b7ea5d 1781
089716aa 1782 list_del_init(&bp->b_list);
43ff2122
CH
1783 error2 = xfs_buf_iowait(bp);
1784 xfs_buf_relse(bp);
1785 if (!error)
1786 error = error2;
1da177e4
LT
1787 }
1788
43ff2122 1789 return error;
1da177e4
LT
1790}
1791
04d8b284 1792int __init
ce8e922c 1793xfs_buf_init(void)
1da177e4 1794{
8758280f
NS
1795 xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
1796 KM_ZONE_HWALIGN, NULL);
ce8e922c 1797 if (!xfs_buf_zone)
0b1b213f 1798 goto out;
04d8b284 1799
51749e47 1800 xfslogd_workqueue = alloc_workqueue("xfslogd",
6370a6ad 1801 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
23ea4032 1802 if (!xfslogd_workqueue)
04d8b284 1803 goto out_free_buf_zone;
1da177e4 1804
23ea4032 1805 return 0;
1da177e4 1806
23ea4032 1807 out_free_buf_zone:
ce8e922c 1808 kmem_zone_destroy(xfs_buf_zone);
0b1b213f 1809 out:
8758280f 1810 return -ENOMEM;
1da177e4
LT
1811}
1812
1da177e4 1813void
ce8e922c 1814xfs_buf_terminate(void)
1da177e4 1815{
04d8b284 1816 destroy_workqueue(xfslogd_workqueue);
ce8e922c 1817 kmem_zone_destroy(xfs_buf_zone);
1da177e4 1818}
This page took 1.033867 seconds and 5 git commands to generate.