Commit | Line | Data |
---|---|---|
fe4fa4b8 DC |
1 | /* |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it would be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
17 | */ | |
18 | #include "xfs.h" | |
19 | #include "xfs_fs.h" | |
20 | #include "xfs_types.h" | |
21 | #include "xfs_bit.h" | |
22 | #include "xfs_log.h" | |
23 | #include "xfs_inum.h" | |
24 | #include "xfs_trans.h" | |
25 | #include "xfs_sb.h" | |
26 | #include "xfs_ag.h" | |
fe4fa4b8 DC |
27 | #include "xfs_mount.h" |
28 | #include "xfs_bmap_btree.h" | |
fe4fa4b8 DC |
29 | #include "xfs_inode.h" |
30 | #include "xfs_dinode.h" | |
31 | #include "xfs_error.h" | |
fe4fa4b8 DC |
32 | #include "xfs_filestream.h" |
33 | #include "xfs_vnodeops.h" | |
fe4fa4b8 | 34 | #include "xfs_inode_item.h" |
7d095257 | 35 | #include "xfs_quota.h" |
0b1b213f | 36 | #include "xfs_trace.h" |
1a387d3b | 37 | #include "xfs_fsops.h" |
fe4fa4b8 | 38 | |
a167b17e DC |
39 | #include <linux/kthread.h> |
40 | #include <linux/freezer.h> | |
41 | ||
78ae5256 DC |
42 | /* |
43 | * The inode lookup is done in batches to keep the amount of lock traffic and | |
44 | * radix tree lookups to a minimum. The batch size is a trade off between | |
45 | * lookup reduction and stack usage. This is in the reclaim path, so we can't | |
46 | * be too greedy. | |
47 | */ | |
48 | #define XFS_LOOKUP_BATCH 32 | |
49 | ||
e13de955 DC |
50 | STATIC int |
51 | xfs_inode_ag_walk_grab( | |
52 | struct xfs_inode *ip) | |
53 | { | |
54 | struct inode *inode = VFS_I(ip); | |
55 | ||
56 | /* nothing to sync during shutdown */ | |
57 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
58 | return EFSCORRUPTED; | |
59 | ||
60 | /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ | |
61 | if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) | |
62 | return ENOENT; | |
63 | ||
64 | /* If we can't grab the inode, it must on it's way to reclaim. */ | |
65 | if (!igrab(inode)) | |
66 | return ENOENT; | |
67 | ||
68 | if (is_bad_inode(inode)) { | |
69 | IRELE(ip); | |
70 | return ENOENT; | |
71 | } | |
72 | ||
73 | /* inode is valid */ | |
74 | return 0; | |
75 | } | |
76 | ||
75f3cb13 DC |
77 | STATIC int |
78 | xfs_inode_ag_walk( | |
79 | struct xfs_mount *mp, | |
5017e97d | 80 | struct xfs_perag *pag, |
75f3cb13 DC |
81 | int (*execute)(struct xfs_inode *ip, |
82 | struct xfs_perag *pag, int flags), | |
65d0f205 | 83 | int flags) |
75f3cb13 | 84 | { |
75f3cb13 DC |
85 | uint32_t first_index; |
86 | int last_error = 0; | |
87 | int skipped; | |
65d0f205 | 88 | int done; |
78ae5256 | 89 | int nr_found; |
75f3cb13 DC |
90 | |
91 | restart: | |
65d0f205 | 92 | done = 0; |
75f3cb13 DC |
93 | skipped = 0; |
94 | first_index = 0; | |
78ae5256 | 95 | nr_found = 0; |
75f3cb13 | 96 | do { |
78ae5256 | 97 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; |
75f3cb13 | 98 | int error = 0; |
78ae5256 | 99 | int i; |
75f3cb13 | 100 | |
65d0f205 DC |
101 | read_lock(&pag->pag_ici_lock); |
102 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, | |
78ae5256 DC |
103 | (void **)batch, first_index, |
104 | XFS_LOOKUP_BATCH); | |
65d0f205 DC |
105 | if (!nr_found) { |
106 | read_unlock(&pag->pag_ici_lock); | |
75f3cb13 | 107 | break; |
c8e20be0 | 108 | } |
75f3cb13 | 109 | |
65d0f205 | 110 | /* |
78ae5256 DC |
111 | * Grab the inodes before we drop the lock. if we found |
112 | * nothing, nr == 0 and the loop will be skipped. | |
65d0f205 | 113 | */ |
78ae5256 DC |
114 | for (i = 0; i < nr_found; i++) { |
115 | struct xfs_inode *ip = batch[i]; | |
116 | ||
117 | if (done || xfs_inode_ag_walk_grab(ip)) | |
118 | batch[i] = NULL; | |
119 | ||
120 | /* | |
121 | * Update the index for the next lookup. Catch overflows | |
122 | * into the next AG range which can occur if we have inodes | |
123 | * in the last block of the AG and we are currently | |
124 | * pointing to the last inode. | |
125 | */ | |
126 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | |
127 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | |
128 | done = 1; | |
e13de955 | 129 | } |
78ae5256 DC |
130 | |
131 | /* unlock now we've grabbed the inodes. */ | |
e13de955 DC |
132 | read_unlock(&pag->pag_ici_lock); |
133 | ||
78ae5256 DC |
134 | for (i = 0; i < nr_found; i++) { |
135 | if (!batch[i]) | |
136 | continue; | |
137 | error = execute(batch[i], pag, flags); | |
138 | IRELE(batch[i]); | |
139 | if (error == EAGAIN) { | |
140 | skipped++; | |
141 | continue; | |
142 | } | |
143 | if (error && last_error != EFSCORRUPTED) | |
144 | last_error = error; | |
75f3cb13 | 145 | } |
c8e20be0 DC |
146 | |
147 | /* bail out if the filesystem is corrupted. */ | |
75f3cb13 DC |
148 | if (error == EFSCORRUPTED) |
149 | break; | |
150 | ||
78ae5256 | 151 | } while (nr_found && !done); |
75f3cb13 DC |
152 | |
153 | if (skipped) { | |
154 | delay(1); | |
155 | goto restart; | |
156 | } | |
75f3cb13 DC |
157 | return last_error; |
158 | } | |
159 | ||
fe588ed3 | 160 | int |
75f3cb13 DC |
161 | xfs_inode_ag_iterator( |
162 | struct xfs_mount *mp, | |
163 | int (*execute)(struct xfs_inode *ip, | |
164 | struct xfs_perag *pag, int flags), | |
65d0f205 | 165 | int flags) |
75f3cb13 | 166 | { |
16fd5367 | 167 | struct xfs_perag *pag; |
75f3cb13 DC |
168 | int error = 0; |
169 | int last_error = 0; | |
170 | xfs_agnumber_t ag; | |
171 | ||
16fd5367 | 172 | ag = 0; |
65d0f205 DC |
173 | while ((pag = xfs_perag_get(mp, ag))) { |
174 | ag = pag->pag_agno + 1; | |
175 | error = xfs_inode_ag_walk(mp, pag, execute, flags); | |
5017e97d | 176 | xfs_perag_put(pag); |
75f3cb13 DC |
177 | if (error) { |
178 | last_error = error; | |
179 | if (error == EFSCORRUPTED) | |
180 | break; | |
181 | } | |
182 | } | |
183 | return XFS_ERROR(last_error); | |
184 | } | |
185 | ||
5a34d5cd DC |
186 | STATIC int |
187 | xfs_sync_inode_data( | |
188 | struct xfs_inode *ip, | |
75f3cb13 | 189 | struct xfs_perag *pag, |
5a34d5cd DC |
190 | int flags) |
191 | { | |
192 | struct inode *inode = VFS_I(ip); | |
193 | struct address_space *mapping = inode->i_mapping; | |
194 | int error = 0; | |
195 | ||
196 | if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) | |
197 | goto out_wait; | |
198 | ||
199 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { | |
200 | if (flags & SYNC_TRYLOCK) | |
201 | goto out_wait; | |
202 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | |
203 | } | |
204 | ||
205 | error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ? | |
0cadda1c | 206 | 0 : XBF_ASYNC, FI_NONE); |
5a34d5cd DC |
207 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
208 | ||
209 | out_wait: | |
b0710ccc | 210 | if (flags & SYNC_WAIT) |
5a34d5cd DC |
211 | xfs_ioend_wait(ip); |
212 | return error; | |
213 | } | |
214 | ||
845b6d0c CH |
215 | STATIC int |
216 | xfs_sync_inode_attr( | |
217 | struct xfs_inode *ip, | |
75f3cb13 | 218 | struct xfs_perag *pag, |
845b6d0c CH |
219 | int flags) |
220 | { | |
221 | int error = 0; | |
222 | ||
223 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
224 | if (xfs_inode_clean(ip)) | |
225 | goto out_unlock; | |
226 | if (!xfs_iflock_nowait(ip)) { | |
227 | if (!(flags & SYNC_WAIT)) | |
228 | goto out_unlock; | |
229 | xfs_iflock(ip); | |
230 | } | |
231 | ||
232 | if (xfs_inode_clean(ip)) { | |
233 | xfs_ifunlock(ip); | |
234 | goto out_unlock; | |
235 | } | |
236 | ||
c854363e | 237 | error = xfs_iflush(ip, flags); |
845b6d0c CH |
238 | |
239 | out_unlock: | |
240 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
241 | return error; | |
242 | } | |
243 | ||
075fe102 CH |
244 | /* |
245 | * Write out pagecache data for the whole filesystem. | |
246 | */ | |
64c86149 | 247 | STATIC int |
075fe102 CH |
248 | xfs_sync_data( |
249 | struct xfs_mount *mp, | |
250 | int flags) | |
683a8970 | 251 | { |
075fe102 | 252 | int error; |
fe4fa4b8 | 253 | |
b0710ccc | 254 | ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0); |
fe4fa4b8 | 255 | |
65d0f205 | 256 | error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags); |
075fe102 CH |
257 | if (error) |
258 | return XFS_ERROR(error); | |
e9f1c6ee | 259 | |
a14a348b | 260 | xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0); |
075fe102 CH |
261 | return 0; |
262 | } | |
e9f1c6ee | 263 | |
075fe102 CH |
264 | /* |
265 | * Write out inode metadata (attributes) for the whole filesystem. | |
266 | */ | |
64c86149 | 267 | STATIC int |
075fe102 CH |
268 | xfs_sync_attr( |
269 | struct xfs_mount *mp, | |
270 | int flags) | |
271 | { | |
272 | ASSERT((flags & ~SYNC_WAIT) == 0); | |
75f3cb13 | 273 | |
65d0f205 | 274 | return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags); |
fe4fa4b8 DC |
275 | } |
276 | ||
5d77c0dc | 277 | STATIC int |
2af75df7 | 278 | xfs_sync_fsdata( |
df308bcf | 279 | struct xfs_mount *mp) |
2af75df7 CH |
280 | { |
281 | struct xfs_buf *bp; | |
2af75df7 CH |
282 | |
283 | /* | |
df308bcf CH |
284 | * If the buffer is pinned then push on the log so we won't get stuck |
285 | * waiting in the write for someone, maybe ourselves, to flush the log. | |
286 | * | |
287 | * Even though we just pushed the log above, we did not have the | |
288 | * superblock buffer locked at that point so it can become pinned in | |
289 | * between there and here. | |
2af75df7 | 290 | */ |
df308bcf CH |
291 | bp = xfs_getsb(mp, 0); |
292 | if (XFS_BUF_ISPINNED(bp)) | |
293 | xfs_log_force(mp, 0); | |
2af75df7 | 294 | |
df308bcf | 295 | return xfs_bwrite(mp, bp); |
e9f1c6ee DC |
296 | } |
297 | ||
298 | /* | |
a4e4c4f4 DC |
299 | * When remounting a filesystem read-only or freezing the filesystem, we have |
300 | * two phases to execute. This first phase is syncing the data before we | |
301 | * quiesce the filesystem, and the second is flushing all the inodes out after | |
302 | * we've waited for all the transactions created by the first phase to | |
303 | * complete. The second phase ensures that the inodes are written to their | |
304 | * location on disk rather than just existing in transactions in the log. This | |
305 | * means after a quiesce there is no log replay required to write the inodes to | |
306 | * disk (this is the main difference between a sync and a quiesce). | |
307 | */ | |
308 | /* | |
309 | * First stage of freeze - no writers will make progress now we are here, | |
e9f1c6ee DC |
310 | * so we flush delwri and delalloc buffers here, then wait for all I/O to |
311 | * complete. Data is frozen at that point. Metadata is not frozen, | |
a4e4c4f4 DC |
312 | * transactions can still occur here so don't bother flushing the buftarg |
313 | * because it'll just get dirty again. | |
e9f1c6ee DC |
314 | */ |
315 | int | |
316 | xfs_quiesce_data( | |
317 | struct xfs_mount *mp) | |
318 | { | |
df308bcf | 319 | int error, error2 = 0; |
e9f1c6ee DC |
320 | |
321 | /* push non-blocking */ | |
075fe102 | 322 | xfs_sync_data(mp, 0); |
8b5403a6 | 323 | xfs_qm_sync(mp, SYNC_TRYLOCK); |
e9f1c6ee | 324 | |
c90b07e8 | 325 | /* push and block till complete */ |
b0710ccc | 326 | xfs_sync_data(mp, SYNC_WAIT); |
7d095257 | 327 | xfs_qm_sync(mp, SYNC_WAIT); |
e9f1c6ee | 328 | |
a4e4c4f4 | 329 | /* write superblock and hoover up shutdown errors */ |
df308bcf CH |
330 | error = xfs_sync_fsdata(mp); |
331 | ||
332 | /* make sure all delwri buffers are written out */ | |
333 | xfs_flush_buftarg(mp->m_ddev_targp, 1); | |
334 | ||
335 | /* mark the log as covered if needed */ | |
336 | if (xfs_log_need_covered(mp)) | |
1a387d3b | 337 | error2 = xfs_fs_log_dummy(mp, SYNC_WAIT); |
e9f1c6ee | 338 | |
a4e4c4f4 | 339 | /* flush data-only devices */ |
e9f1c6ee DC |
340 | if (mp->m_rtdev_targp) |
341 | XFS_bflush(mp->m_rtdev_targp); | |
342 | ||
df308bcf | 343 | return error ? error : error2; |
2af75df7 CH |
344 | } |
345 | ||
76bf105c DC |
346 | STATIC void |
347 | xfs_quiesce_fs( | |
348 | struct xfs_mount *mp) | |
349 | { | |
350 | int count = 0, pincount; | |
351 | ||
c854363e | 352 | xfs_reclaim_inodes(mp, 0); |
76bf105c | 353 | xfs_flush_buftarg(mp->m_ddev_targp, 0); |
76bf105c DC |
354 | |
355 | /* | |
356 | * This loop must run at least twice. The first instance of the loop | |
357 | * will flush most meta data but that will generate more meta data | |
358 | * (typically directory updates). Which then must be flushed and | |
c854363e DC |
359 | * logged before we can write the unmount record. We also so sync |
360 | * reclaim of inodes to catch any that the above delwri flush skipped. | |
76bf105c DC |
361 | */ |
362 | do { | |
c854363e | 363 | xfs_reclaim_inodes(mp, SYNC_WAIT); |
075fe102 | 364 | xfs_sync_attr(mp, SYNC_WAIT); |
76bf105c DC |
365 | pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); |
366 | if (!pincount) { | |
367 | delay(50); | |
368 | count++; | |
369 | } | |
370 | } while (count < 2); | |
371 | } | |
372 | ||
373 | /* | |
374 | * Second stage of a quiesce. The data is already synced, now we have to take | |
375 | * care of the metadata. New transactions are already blocked, so we need to | |
376 | * wait for any remaining transactions to drain out before proceding. | |
377 | */ | |
378 | void | |
379 | xfs_quiesce_attr( | |
380 | struct xfs_mount *mp) | |
381 | { | |
382 | int error = 0; | |
383 | ||
384 | /* wait for all modifications to complete */ | |
385 | while (atomic_read(&mp->m_active_trans) > 0) | |
386 | delay(100); | |
387 | ||
388 | /* flush inodes and push all remaining buffers out to disk */ | |
389 | xfs_quiesce_fs(mp); | |
390 | ||
5e106572 FB |
391 | /* |
392 | * Just warn here till VFS can correctly support | |
393 | * read-only remount without racing. | |
394 | */ | |
395 | WARN_ON(atomic_read(&mp->m_active_trans) != 0); | |
76bf105c DC |
396 | |
397 | /* Push the superblock and write an unmount record */ | |
398 | error = xfs_log_sbcount(mp, 1); | |
399 | if (error) | |
400 | xfs_fs_cmn_err(CE_WARN, mp, | |
401 | "xfs_attr_quiesce: failed to log sb changes. " | |
402 | "Frozen image may not be consistent."); | |
403 | xfs_log_unmount_write(mp); | |
404 | xfs_unmountfs_writesb(mp); | |
405 | } | |
406 | ||
a167b17e DC |
407 | /* |
408 | * Enqueue a work item to be picked up by the vfs xfssyncd thread. | |
409 | * Doing this has two advantages: | |
410 | * - It saves on stack space, which is tight in certain situations | |
411 | * - It can be used (with care) as a mechanism to avoid deadlocks. | |
412 | * Flushing while allocating in a full filesystem requires both. | |
413 | */ | |
414 | STATIC void | |
415 | xfs_syncd_queue_work( | |
416 | struct xfs_mount *mp, | |
417 | void *data, | |
e43afd72 DC |
418 | void (*syncer)(struct xfs_mount *, void *), |
419 | struct completion *completion) | |
a167b17e | 420 | { |
a8d770d9 | 421 | struct xfs_sync_work *work; |
a167b17e | 422 | |
a8d770d9 | 423 | work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP); |
a167b17e DC |
424 | INIT_LIST_HEAD(&work->w_list); |
425 | work->w_syncer = syncer; | |
426 | work->w_data = data; | |
427 | work->w_mount = mp; | |
e43afd72 | 428 | work->w_completion = completion; |
a167b17e DC |
429 | spin_lock(&mp->m_sync_lock); |
430 | list_add_tail(&work->w_list, &mp->m_sync_list); | |
431 | spin_unlock(&mp->m_sync_lock); | |
432 | wake_up_process(mp->m_sync_task); | |
433 | } | |
434 | ||
435 | /* | |
436 | * Flush delayed allocate data, attempting to free up reserved space | |
437 | * from existing allocations. At this point a new allocation attempt | |
438 | * has failed with ENOSPC and we are in the process of scratching our | |
439 | * heads, looking about for more room... | |
440 | */ | |
441 | STATIC void | |
a8d770d9 | 442 | xfs_flush_inodes_work( |
a167b17e DC |
443 | struct xfs_mount *mp, |
444 | void *arg) | |
445 | { | |
446 | struct inode *inode = arg; | |
075fe102 | 447 | xfs_sync_data(mp, SYNC_TRYLOCK); |
b0710ccc | 448 | xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT); |
a167b17e DC |
449 | iput(inode); |
450 | } | |
451 | ||
452 | void | |
a8d770d9 | 453 | xfs_flush_inodes( |
a167b17e DC |
454 | xfs_inode_t *ip) |
455 | { | |
456 | struct inode *inode = VFS_I(ip); | |
e43afd72 | 457 | DECLARE_COMPLETION_ONSTACK(completion); |
a167b17e DC |
458 | |
459 | igrab(inode); | |
e43afd72 DC |
460 | xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion); |
461 | wait_for_completion(&completion); | |
a14a348b | 462 | xfs_log_force(ip->i_mount, XFS_LOG_SYNC); |
a167b17e DC |
463 | } |
464 | ||
aacaa880 | 465 | /* |
df308bcf CH |
466 | * Every sync period we need to unpin all items, reclaim inodes and sync |
467 | * disk quotas. We might need to cover the log to indicate that the | |
1a387d3b | 468 | * filesystem is idle and not frozen. |
aacaa880 | 469 | */ |
a167b17e DC |
470 | STATIC void |
471 | xfs_sync_worker( | |
472 | struct xfs_mount *mp, | |
473 | void *unused) | |
474 | { | |
475 | int error; | |
476 | ||
aacaa880 | 477 | if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { |
a14a348b | 478 | xfs_log_force(mp, 0); |
c854363e | 479 | xfs_reclaim_inodes(mp, 0); |
aacaa880 | 480 | /* dgc: errors ignored here */ |
8b5403a6 | 481 | error = xfs_qm_sync(mp, SYNC_TRYLOCK); |
1a387d3b DC |
482 | if (mp->m_super->s_frozen == SB_UNFROZEN && |
483 | xfs_log_need_covered(mp)) | |
484 | error = xfs_fs_log_dummy(mp, 0); | |
aacaa880 | 485 | } |
a167b17e DC |
486 | mp->m_sync_seq++; |
487 | wake_up(&mp->m_wait_single_sync_task); | |
488 | } | |
489 | ||
490 | STATIC int | |
491 | xfssyncd( | |
492 | void *arg) | |
493 | { | |
494 | struct xfs_mount *mp = arg; | |
495 | long timeleft; | |
a8d770d9 | 496 | xfs_sync_work_t *work, *n; |
a167b17e DC |
497 | LIST_HEAD (tmp); |
498 | ||
499 | set_freezable(); | |
500 | timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10); | |
501 | for (;;) { | |
20f6b2c7 DC |
502 | if (list_empty(&mp->m_sync_list)) |
503 | timeleft = schedule_timeout_interruptible(timeleft); | |
a167b17e DC |
504 | /* swsusp */ |
505 | try_to_freeze(); | |
506 | if (kthread_should_stop() && list_empty(&mp->m_sync_list)) | |
507 | break; | |
508 | ||
509 | spin_lock(&mp->m_sync_lock); | |
510 | /* | |
511 | * We can get woken by laptop mode, to do a sync - | |
512 | * that's the (only!) case where the list would be | |
513 | * empty with time remaining. | |
514 | */ | |
515 | if (!timeleft || list_empty(&mp->m_sync_list)) { | |
516 | if (!timeleft) | |
517 | timeleft = xfs_syncd_centisecs * | |
518 | msecs_to_jiffies(10); | |
519 | INIT_LIST_HEAD(&mp->m_sync_work.w_list); | |
520 | list_add_tail(&mp->m_sync_work.w_list, | |
521 | &mp->m_sync_list); | |
522 | } | |
20f6b2c7 | 523 | list_splice_init(&mp->m_sync_list, &tmp); |
a167b17e DC |
524 | spin_unlock(&mp->m_sync_lock); |
525 | ||
526 | list_for_each_entry_safe(work, n, &tmp, w_list) { | |
527 | (*work->w_syncer)(mp, work->w_data); | |
528 | list_del(&work->w_list); | |
529 | if (work == &mp->m_sync_work) | |
530 | continue; | |
e43afd72 DC |
531 | if (work->w_completion) |
532 | complete(work->w_completion); | |
a167b17e DC |
533 | kmem_free(work); |
534 | } | |
535 | } | |
536 | ||
537 | return 0; | |
538 | } | |
539 | ||
540 | int | |
541 | xfs_syncd_init( | |
542 | struct xfs_mount *mp) | |
543 | { | |
544 | mp->m_sync_work.w_syncer = xfs_sync_worker; | |
545 | mp->m_sync_work.w_mount = mp; | |
e43afd72 | 546 | mp->m_sync_work.w_completion = NULL; |
e2a07812 | 547 | mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname); |
a167b17e DC |
548 | if (IS_ERR(mp->m_sync_task)) |
549 | return -PTR_ERR(mp->m_sync_task); | |
550 | return 0; | |
551 | } | |
552 | ||
553 | void | |
554 | xfs_syncd_stop( | |
555 | struct xfs_mount *mp) | |
556 | { | |
557 | kthread_stop(mp->m_sync_task); | |
558 | } | |
559 | ||
bc990f5c CH |
560 | void |
561 | __xfs_inode_set_reclaim_tag( | |
562 | struct xfs_perag *pag, | |
563 | struct xfs_inode *ip) | |
564 | { | |
565 | radix_tree_tag_set(&pag->pag_ici_root, | |
566 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), | |
567 | XFS_ICI_RECLAIM_TAG); | |
16fd5367 DC |
568 | |
569 | if (!pag->pag_ici_reclaimable) { | |
570 | /* propagate the reclaim tag up into the perag radix tree */ | |
571 | spin_lock(&ip->i_mount->m_perag_lock); | |
572 | radix_tree_tag_set(&ip->i_mount->m_perag_tree, | |
573 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | |
574 | XFS_ICI_RECLAIM_TAG); | |
575 | spin_unlock(&ip->i_mount->m_perag_lock); | |
576 | trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, | |
577 | -1, _RET_IP_); | |
578 | } | |
9bf729c0 | 579 | pag->pag_ici_reclaimable++; |
bc990f5c CH |
580 | } |
581 | ||
11654513 DC |
582 | /* |
583 | * We set the inode flag atomically with the radix tree tag. | |
584 | * Once we get tag lookups on the radix tree, this inode flag | |
585 | * can go away. | |
586 | */ | |
396beb85 DC |
587 | void |
588 | xfs_inode_set_reclaim_tag( | |
589 | xfs_inode_t *ip) | |
590 | { | |
5017e97d DC |
591 | struct xfs_mount *mp = ip->i_mount; |
592 | struct xfs_perag *pag; | |
396beb85 | 593 | |
5017e97d | 594 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
f1f724e4 | 595 | write_lock(&pag->pag_ici_lock); |
396beb85 | 596 | spin_lock(&ip->i_flags_lock); |
bc990f5c | 597 | __xfs_inode_set_reclaim_tag(pag, ip); |
11654513 | 598 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); |
396beb85 | 599 | spin_unlock(&ip->i_flags_lock); |
f1f724e4 | 600 | write_unlock(&pag->pag_ici_lock); |
5017e97d | 601 | xfs_perag_put(pag); |
396beb85 DC |
602 | } |
603 | ||
081003ff JW |
604 | STATIC void |
605 | __xfs_inode_clear_reclaim( | |
396beb85 DC |
606 | xfs_perag_t *pag, |
607 | xfs_inode_t *ip) | |
608 | { | |
9bf729c0 | 609 | pag->pag_ici_reclaimable--; |
16fd5367 DC |
610 | if (!pag->pag_ici_reclaimable) { |
611 | /* clear the reclaim tag from the perag radix tree */ | |
612 | spin_lock(&ip->i_mount->m_perag_lock); | |
613 | radix_tree_tag_clear(&ip->i_mount->m_perag_tree, | |
614 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | |
615 | XFS_ICI_RECLAIM_TAG); | |
616 | spin_unlock(&ip->i_mount->m_perag_lock); | |
617 | trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno, | |
618 | -1, _RET_IP_); | |
619 | } | |
396beb85 DC |
620 | } |
621 | ||
081003ff JW |
622 | void |
623 | __xfs_inode_clear_reclaim_tag( | |
624 | xfs_mount_t *mp, | |
625 | xfs_perag_t *pag, | |
626 | xfs_inode_t *ip) | |
627 | { | |
628 | radix_tree_tag_clear(&pag->pag_ici_root, | |
629 | XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); | |
630 | __xfs_inode_clear_reclaim(pag, ip); | |
631 | } | |
632 | ||
e3a20c0b DC |
633 | /* |
634 | * Grab the inode for reclaim exclusively. | |
635 | * Return 0 if we grabbed it, non-zero otherwise. | |
636 | */ | |
637 | STATIC int | |
638 | xfs_reclaim_inode_grab( | |
639 | struct xfs_inode *ip, | |
640 | int flags) | |
641 | { | |
642 | ||
643 | /* | |
644 | * do some unlocked checks first to avoid unnecceary lock traffic. | |
645 | * The first is a flush lock check, the second is a already in reclaim | |
646 | * check. Only do these checks if we are not going to block on locks. | |
647 | */ | |
648 | if ((flags & SYNC_TRYLOCK) && | |
649 | (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) { | |
650 | return 1; | |
651 | } | |
652 | ||
653 | /* | |
654 | * The radix tree lock here protects a thread in xfs_iget from racing | |
655 | * with us starting reclaim on the inode. Once we have the | |
656 | * XFS_IRECLAIM flag set it will not touch us. | |
657 | */ | |
658 | spin_lock(&ip->i_flags_lock); | |
659 | ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE)); | |
660 | if (__xfs_iflags_test(ip, XFS_IRECLAIM)) { | |
661 | /* ignore as it is already under reclaim */ | |
662 | spin_unlock(&ip->i_flags_lock); | |
663 | return 1; | |
664 | } | |
665 | __xfs_iflags_set(ip, XFS_IRECLAIM); | |
666 | spin_unlock(&ip->i_flags_lock); | |
667 | return 0; | |
668 | } | |
669 | ||
777df5af DC |
670 | /* |
671 | * Inodes in different states need to be treated differently, and the return | |
672 | * value of xfs_iflush is not sufficient to get this right. The following table | |
673 | * lists the inode states and the reclaim actions necessary for non-blocking | |
674 | * reclaim: | |
675 | * | |
676 | * | |
677 | * inode state iflush ret required action | |
678 | * --------------- ---------- --------------- | |
679 | * bad - reclaim | |
680 | * shutdown EIO unpin and reclaim | |
681 | * clean, unpinned 0 reclaim | |
682 | * stale, unpinned 0 reclaim | |
c854363e DC |
683 | * clean, pinned(*) 0 requeue |
684 | * stale, pinned EAGAIN requeue | |
685 | * dirty, delwri ok 0 requeue | |
686 | * dirty, delwri blocked EAGAIN requeue | |
687 | * dirty, sync flush 0 reclaim | |
777df5af DC |
688 | * |
689 | * (*) dgc: I don't think the clean, pinned state is possible but it gets | |
690 | * handled anyway given the order of checks implemented. | |
691 | * | |
c854363e DC |
692 | * As can be seen from the table, the return value of xfs_iflush() is not |
693 | * sufficient to correctly decide the reclaim action here. The checks in | |
694 | * xfs_iflush() might look like duplicates, but they are not. | |
695 | * | |
696 | * Also, because we get the flush lock first, we know that any inode that has | |
697 | * been flushed delwri has had the flush completed by the time we check that | |
698 | * the inode is clean. The clean inode check needs to be done before flushing | |
699 | * the inode delwri otherwise we would loop forever requeuing clean inodes as | |
700 | * we cannot tell apart a successful delwri flush and a clean inode from the | |
701 | * return value of xfs_iflush(). | |
702 | * | |
703 | * Note that because the inode is flushed delayed write by background | |
704 | * writeback, the flush lock may already be held here and waiting on it can | |
705 | * result in very long latencies. Hence for sync reclaims, where we wait on the | |
706 | * flush lock, the caller should push out delayed write inodes first before | |
707 | * trying to reclaim them to minimise the amount of time spent waiting. For | |
708 | * background relaim, we just requeue the inode for the next pass. | |
709 | * | |
777df5af DC |
710 | * Hence the order of actions after gaining the locks should be: |
711 | * bad => reclaim | |
712 | * shutdown => unpin and reclaim | |
c854363e DC |
713 | * pinned, delwri => requeue |
714 | * pinned, sync => unpin | |
777df5af DC |
715 | * stale => reclaim |
716 | * clean => reclaim | |
c854363e DC |
717 | * dirty, delwri => flush and requeue |
718 | * dirty, sync => flush, wait and reclaim | |
777df5af | 719 | */ |
75f3cb13 | 720 | STATIC int |
c8e20be0 | 721 | xfs_reclaim_inode( |
75f3cb13 DC |
722 | struct xfs_inode *ip, |
723 | struct xfs_perag *pag, | |
c8e20be0 | 724 | int sync_mode) |
fce08f2f | 725 | { |
c854363e | 726 | int error = 0; |
777df5af | 727 | |
c8e20be0 | 728 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
c854363e DC |
729 | if (!xfs_iflock_nowait(ip)) { |
730 | if (!(sync_mode & SYNC_WAIT)) | |
731 | goto out; | |
732 | xfs_iflock(ip); | |
733 | } | |
7a3be02b | 734 | |
777df5af DC |
735 | if (is_bad_inode(VFS_I(ip))) |
736 | goto reclaim; | |
737 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { | |
738 | xfs_iunpin_wait(ip); | |
739 | goto reclaim; | |
740 | } | |
c854363e DC |
741 | if (xfs_ipincount(ip)) { |
742 | if (!(sync_mode & SYNC_WAIT)) { | |
743 | xfs_ifunlock(ip); | |
744 | goto out; | |
745 | } | |
777df5af | 746 | xfs_iunpin_wait(ip); |
c854363e | 747 | } |
777df5af DC |
748 | if (xfs_iflags_test(ip, XFS_ISTALE)) |
749 | goto reclaim; | |
750 | if (xfs_inode_clean(ip)) | |
751 | goto reclaim; | |
752 | ||
753 | /* Now we have an inode that needs flushing */ | |
754 | error = xfs_iflush(ip, sync_mode); | |
c854363e DC |
755 | if (sync_mode & SYNC_WAIT) { |
756 | xfs_iflock(ip); | |
757 | goto reclaim; | |
c8e20be0 DC |
758 | } |
759 | ||
c854363e DC |
760 | /* |
761 | * When we have to flush an inode but don't have SYNC_WAIT set, we | |
762 | * flush the inode out using a delwri buffer and wait for the next | |
763 | * call into reclaim to find it in a clean state instead of waiting for | |
764 | * it now. We also don't return errors here - if the error is transient | |
765 | * then the next reclaim pass will flush the inode, and if the error | |
f1d486a3 | 766 | * is permanent then the next sync reclaim will reclaim the inode and |
c854363e DC |
767 | * pass on the error. |
768 | */ | |
f1d486a3 | 769 | if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
c854363e DC |
770 | xfs_fs_cmn_err(CE_WARN, ip->i_mount, |
771 | "inode 0x%llx background reclaim flush failed with %d", | |
772 | (long long)ip->i_ino, error); | |
773 | } | |
774 | out: | |
775 | xfs_iflags_clear(ip, XFS_IRECLAIM); | |
776 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
777 | /* | |
778 | * We could return EAGAIN here to make reclaim rescan the inode tree in | |
779 | * a short while. However, this just burns CPU time scanning the tree | |
780 | * waiting for IO to complete and xfssyncd never goes back to the idle | |
781 | * state. Instead, return 0 to let the next scheduled background reclaim | |
782 | * attempt to reclaim the inode again. | |
783 | */ | |
784 | return 0; | |
785 | ||
777df5af DC |
786 | reclaim: |
787 | xfs_ifunlock(ip); | |
c8e20be0 | 788 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2f11feab DC |
789 | |
790 | XFS_STATS_INC(xs_ig_reclaims); | |
791 | /* | |
792 | * Remove the inode from the per-AG radix tree. | |
793 | * | |
794 | * Because radix_tree_delete won't complain even if the item was never | |
795 | * added to the tree assert that it's been there before to catch | |
796 | * problems with the inode life time early on. | |
797 | */ | |
798 | write_lock(&pag->pag_ici_lock); | |
799 | if (!radix_tree_delete(&pag->pag_ici_root, | |
800 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) | |
801 | ASSERT(0); | |
081003ff | 802 | __xfs_inode_clear_reclaim(pag, ip); |
2f11feab DC |
803 | write_unlock(&pag->pag_ici_lock); |
804 | ||
805 | /* | |
806 | * Here we do an (almost) spurious inode lock in order to coordinate | |
807 | * with inode cache radix tree lookups. This is because the lookup | |
808 | * can reference the inodes in the cache without taking references. | |
809 | * | |
810 | * We make that OK here by ensuring that we wait until the inode is | |
811 | * unlocked after the lookup before we go ahead and free it. We get | |
812 | * both the ilock and the iolock because the code may need to drop the | |
813 | * ilock one but will still hold the iolock. | |
814 | */ | |
815 | xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | |
816 | xfs_qm_dqdetach(ip); | |
817 | xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); | |
818 | ||
819 | xfs_inode_free(ip); | |
c854363e DC |
820 | return error; |
821 | ||
7a3be02b DC |
822 | } |
823 | ||
65d0f205 DC |
824 | /* |
825 | * Walk the AGs and reclaim the inodes in them. Even if the filesystem is | |
826 | * corrupted, we still want to try to reclaim all the inodes. If we don't, | |
827 | * then a shut down during filesystem unmount reclaim walk leak all the | |
828 | * unreclaimed inodes. | |
829 | */ | |
830 | int | |
831 | xfs_reclaim_inodes_ag( | |
832 | struct xfs_mount *mp, | |
833 | int flags, | |
834 | int *nr_to_scan) | |
835 | { | |
836 | struct xfs_perag *pag; | |
837 | int error = 0; | |
838 | int last_error = 0; | |
839 | xfs_agnumber_t ag; | |
69b491c2 DC |
840 | int trylock = flags & SYNC_TRYLOCK; |
841 | int skipped; | |
65d0f205 | 842 | |
69b491c2 | 843 | restart: |
65d0f205 | 844 | ag = 0; |
69b491c2 | 845 | skipped = 0; |
65d0f205 DC |
846 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { |
847 | unsigned long first_index = 0; | |
848 | int done = 0; | |
e3a20c0b | 849 | int nr_found = 0; |
65d0f205 DC |
850 | |
851 | ag = pag->pag_agno + 1; | |
852 | ||
69b491c2 DC |
853 | if (trylock) { |
854 | if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { | |
855 | skipped++; | |
856 | continue; | |
857 | } | |
858 | first_index = pag->pag_ici_reclaim_cursor; | |
859 | } else | |
860 | mutex_lock(&pag->pag_ici_reclaim_lock); | |
861 | ||
65d0f205 | 862 | do { |
e3a20c0b DC |
863 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; |
864 | int i; | |
65d0f205 DC |
865 | |
866 | write_lock(&pag->pag_ici_lock); | |
e3a20c0b DC |
867 | nr_found = radix_tree_gang_lookup_tag( |
868 | &pag->pag_ici_root, | |
869 | (void **)batch, first_index, | |
870 | XFS_LOOKUP_BATCH, | |
65d0f205 DC |
871 | XFS_ICI_RECLAIM_TAG); |
872 | if (!nr_found) { | |
873 | write_unlock(&pag->pag_ici_lock); | |
874 | break; | |
875 | } | |
876 | ||
877 | /* | |
e3a20c0b DC |
878 | * Grab the inodes before we drop the lock. if we found |
879 | * nothing, nr == 0 and the loop will be skipped. | |
65d0f205 | 880 | */ |
e3a20c0b DC |
881 | for (i = 0; i < nr_found; i++) { |
882 | struct xfs_inode *ip = batch[i]; | |
883 | ||
884 | if (done || xfs_reclaim_inode_grab(ip, flags)) | |
885 | batch[i] = NULL; | |
886 | ||
887 | /* | |
888 | * Update the index for the next lookup. Catch | |
889 | * overflows into the next AG range which can | |
890 | * occur if we have inodes in the last block of | |
891 | * the AG and we are currently pointing to the | |
892 | * last inode. | |
893 | */ | |
894 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | |
895 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | |
896 | done = 1; | |
897 | } | |
65d0f205 | 898 | |
e3a20c0b DC |
899 | /* unlock now we've grabbed the inodes. */ |
900 | write_unlock(&pag->pag_ici_lock); | |
901 | ||
902 | for (i = 0; i < nr_found; i++) { | |
903 | if (!batch[i]) | |
904 | continue; | |
905 | error = xfs_reclaim_inode(batch[i], pag, flags); | |
906 | if (error && last_error != EFSCORRUPTED) | |
907 | last_error = error; | |
908 | } | |
909 | ||
910 | *nr_to_scan -= XFS_LOOKUP_BATCH; | |
65d0f205 | 911 | |
e3a20c0b | 912 | } while (nr_found && !done && *nr_to_scan > 0); |
65d0f205 | 913 | |
69b491c2 DC |
914 | if (trylock && !done) |
915 | pag->pag_ici_reclaim_cursor = first_index; | |
916 | else | |
917 | pag->pag_ici_reclaim_cursor = 0; | |
918 | mutex_unlock(&pag->pag_ici_reclaim_lock); | |
65d0f205 DC |
919 | xfs_perag_put(pag); |
920 | } | |
69b491c2 DC |
921 | |
922 | /* | |
923 | * if we skipped any AG, and we still have scan count remaining, do | |
924 | * another pass this time using blocking reclaim semantics (i.e | |
925 | * waiting on the reclaim locks and ignoring the reclaim cursors). This | |
926 | * ensure that when we get more reclaimers than AGs we block rather | |
927 | * than spin trying to execute reclaim. | |
928 | */ | |
929 | if (trylock && skipped && *nr_to_scan > 0) { | |
930 | trylock = 0; | |
931 | goto restart; | |
932 | } | |
65d0f205 DC |
933 | return XFS_ERROR(last_error); |
934 | } | |
935 | ||
7a3be02b DC |
936 | int |
937 | xfs_reclaim_inodes( | |
938 | xfs_mount_t *mp, | |
7a3be02b DC |
939 | int mode) |
940 | { | |
65d0f205 DC |
941 | int nr_to_scan = INT_MAX; |
942 | ||
943 | return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); | |
9bf729c0 DC |
944 | } |
945 | ||
946 | /* | |
947 | * Shrinker infrastructure. | |
9bf729c0 | 948 | */ |
9bf729c0 DC |
949 | static int |
950 | xfs_reclaim_inode_shrink( | |
7f8275d0 | 951 | struct shrinker *shrink, |
9bf729c0 DC |
952 | int nr_to_scan, |
953 | gfp_t gfp_mask) | |
954 | { | |
955 | struct xfs_mount *mp; | |
956 | struct xfs_perag *pag; | |
957 | xfs_agnumber_t ag; | |
16fd5367 | 958 | int reclaimable; |
9bf729c0 | 959 | |
70e60ce7 | 960 | mp = container_of(shrink, struct xfs_mount, m_inode_shrink); |
9bf729c0 DC |
961 | if (nr_to_scan) { |
962 | if (!(gfp_mask & __GFP_FS)) | |
963 | return -1; | |
964 | ||
e3a20c0b | 965 | xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK, &nr_to_scan); |
65d0f205 | 966 | /* terminate if we don't exhaust the scan */ |
70e60ce7 DC |
967 | if (nr_to_scan > 0) |
968 | return -1; | |
969 | } | |
9bf729c0 | 970 | |
16fd5367 DC |
971 | reclaimable = 0; |
972 | ag = 0; | |
65d0f205 DC |
973 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { |
974 | ag = pag->pag_agno + 1; | |
70e60ce7 DC |
975 | reclaimable += pag->pag_ici_reclaimable; |
976 | xfs_perag_put(pag); | |
9bf729c0 | 977 | } |
9bf729c0 DC |
978 | return reclaimable; |
979 | } | |
980 | ||
9bf729c0 DC |
981 | void |
982 | xfs_inode_shrinker_register( | |
983 | struct xfs_mount *mp) | |
984 | { | |
70e60ce7 DC |
985 | mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink; |
986 | mp->m_inode_shrink.seeks = DEFAULT_SEEKS; | |
987 | register_shrinker(&mp->m_inode_shrink); | |
9bf729c0 DC |
988 | } |
989 | ||
990 | void | |
991 | xfs_inode_shrinker_unregister( | |
992 | struct xfs_mount *mp) | |
993 | { | |
70e60ce7 | 994 | unregister_shrinker(&mp->m_inode_shrink); |
fce08f2f | 995 | } |