Factor outstanding I/O error handling
[deliverable/linux.git] / fs / gfs2 / glops.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/gfs2_ondisk.h>
15 #include <linux/lm_interface.h>
16
17 #include "gfs2.h"
18 #include "incore.h"
19 #include "bmap.h"
20 #include "glock.h"
21 #include "glops.h"
22 #include "inode.h"
23 #include "log.h"
24 #include "meta_io.h"
25 #include "recovery.h"
26 #include "rgrp.h"
27 #include "util.h"
28 #include "trans.h"
29
30 /**
31 * ail_empty_gl - remove all buffers for a given lock from the AIL
32 * @gl: the glock
33 *
34 * None of the buffers should be dirty, locked, or pinned.
35 */
36
37 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
38 {
39 struct gfs2_sbd *sdp = gl->gl_sbd;
40 unsigned int blocks;
41 struct list_head *head = &gl->gl_ail_list;
42 struct gfs2_bufdata *bd;
43 struct buffer_head *bh;
44 u64 blkno;
45 int error;
46
47 blocks = atomic_read(&gl->gl_ail_count);
48 if (!blocks)
49 return;
50
51 error = gfs2_trans_begin(sdp, 0, blocks);
52 if (gfs2_assert_withdraw(sdp, !error))
53 return;
54
55 gfs2_log_lock(sdp);
56 while (!list_empty(head)) {
57 bd = list_entry(head->next, struct gfs2_bufdata,
58 bd_ail_gl_list);
59 bh = bd->bd_bh;
60 blkno = bh->b_blocknr;
61 gfs2_assert_withdraw(sdp, !buffer_busy(bh));
62
63 bd->bd_ail = NULL;
64 list_del(&bd->bd_ail_st_list);
65 list_del(&bd->bd_ail_gl_list);
66 atomic_dec(&gl->gl_ail_count);
67 brelse(bh);
68 gfs2_log_unlock(sdp);
69
70 gfs2_trans_add_revoke(sdp, blkno);
71
72 gfs2_log_lock(sdp);
73 }
74 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
75 gfs2_log_unlock(sdp);
76
77 gfs2_trans_end(sdp);
78 gfs2_log_flush(sdp, NULL);
79 }
80
81 /**
82 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
83 * @gl: the glock
84 *
85 */
86
87 static void gfs2_pte_inval(struct gfs2_glock *gl)
88 {
89 struct gfs2_inode *ip;
90 struct inode *inode;
91
92 ip = gl->gl_object;
93 inode = &ip->i_inode;
94 if (!ip || !S_ISREG(inode->i_mode))
95 return;
96
97 if (!test_bit(GIF_PAGED, &ip->i_flags))
98 return;
99
100 unmap_shared_mapping_range(inode->i_mapping, 0, 0);
101
102 if (test_bit(GIF_SW_PAGED, &ip->i_flags))
103 set_bit(GLF_DIRTY, &gl->gl_flags);
104
105 clear_bit(GIF_SW_PAGED, &ip->i_flags);
106 }
107
108 /**
109 * meta_go_sync - sync out the metadata for this glock
110 * @gl: the glock
111 *
112 * Called when demoting or unlocking an EX glock. We must flush
113 * to disk all dirty buffers/pages relating to this glock, and must not
114 * not return to caller to demote/unlock the glock until I/O is complete.
115 */
116
117 static void meta_go_sync(struct gfs2_glock *gl)
118 {
119 if (gl->gl_state != LM_ST_EXCLUSIVE)
120 return;
121
122 if (test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) {
123 gfs2_log_flush(gl->gl_sbd, gl);
124 gfs2_meta_sync(gl);
125 gfs2_ail_empty_gl(gl);
126 }
127 }
128
129 /**
130 * meta_go_inval - invalidate the metadata for this glock
131 * @gl: the glock
132 * @flags:
133 *
134 */
135
136 static void meta_go_inval(struct gfs2_glock *gl, int flags)
137 {
138 if (!(flags & DIO_METADATA))
139 return;
140
141 gfs2_meta_inval(gl);
142 gl->gl_vn++;
143 }
144
145 /**
146 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
147 * @gl: the glock protecting the inode
148 *
149 */
150
151 static void inode_go_sync(struct gfs2_glock *gl)
152 {
153 struct gfs2_inode *ip = gl->gl_object;
154
155 if (ip && !S_ISREG(ip->i_inode.i_mode))
156 ip = NULL;
157
158 if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
159 gfs2_log_flush(gl->gl_sbd, gl);
160 if (ip)
161 filemap_fdatawrite(ip->i_inode.i_mapping);
162 gfs2_meta_sync(gl);
163 if (ip) {
164 struct address_space *mapping = ip->i_inode.i_mapping;
165 int error = filemap_fdatawait(mapping);
166 mapping_set_error(mapping, error);
167 }
168 clear_bit(GLF_DIRTY, &gl->gl_flags);
169 gfs2_ail_empty_gl(gl);
170 }
171 }
172
173 /**
174 * inode_go_xmote_th - promote/demote a glock
175 * @gl: the glock
176 * @state: the requested state
177 * @flags:
178 *
179 */
180
181 static void inode_go_xmote_th(struct gfs2_glock *gl)
182 {
183 if (gl->gl_state != LM_ST_UNLOCKED)
184 gfs2_pte_inval(gl);
185 if (gl->gl_state == LM_ST_EXCLUSIVE)
186 inode_go_sync(gl);
187 }
188
189 /**
190 * inode_go_xmote_bh - After promoting/demoting a glock
191 * @gl: the glock
192 *
193 */
194
195 static void inode_go_xmote_bh(struct gfs2_glock *gl)
196 {
197 struct gfs2_holder *gh = gl->gl_req_gh;
198 struct buffer_head *bh;
199 int error;
200
201 if (gl->gl_state != LM_ST_UNLOCKED &&
202 (!gh || !(gh->gh_flags & GL_SKIP))) {
203 error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
204 if (!error)
205 brelse(bh);
206 }
207 }
208
209 /**
210 * inode_go_drop_th - unlock a glock
211 * @gl: the glock
212 *
213 * Invoked from rq_demote().
214 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
215 * is being purged from our node's glock cache; we're dropping lock.
216 */
217
218 static void inode_go_drop_th(struct gfs2_glock *gl)
219 {
220 gfs2_pte_inval(gl);
221 if (gl->gl_state == LM_ST_EXCLUSIVE)
222 inode_go_sync(gl);
223 }
224
225 /**
226 * inode_go_inval - prepare a inode glock to be released
227 * @gl: the glock
228 * @flags:
229 *
230 */
231
232 static void inode_go_inval(struct gfs2_glock *gl, int flags)
233 {
234 struct gfs2_inode *ip = gl->gl_object;
235 int meta = (flags & DIO_METADATA);
236
237 if (meta) {
238 gfs2_meta_inval(gl);
239 if (ip)
240 set_bit(GIF_INVALID, &ip->i_flags);
241 }
242
243 if (ip && S_ISREG(ip->i_inode.i_mode)) {
244 truncate_inode_pages(ip->i_inode.i_mapping, 0);
245 clear_bit(GIF_PAGED, &ip->i_flags);
246 }
247 }
248
249 /**
250 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
251 * @gl: the glock
252 *
253 * Returns: 1 if it's ok
254 */
255
256 static int inode_go_demote_ok(struct gfs2_glock *gl)
257 {
258 struct gfs2_sbd *sdp = gl->gl_sbd;
259 int demote = 0;
260
261 if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages)
262 demote = 1;
263 else if (!sdp->sd_args.ar_localcaching &&
264 time_after_eq(jiffies, gl->gl_stamp +
265 gfs2_tune_get(sdp, gt_demote_secs) * HZ))
266 demote = 1;
267
268 return demote;
269 }
270
271 /**
272 * inode_go_lock - operation done after an inode lock is locked by a process
273 * @gl: the glock
274 * @flags:
275 *
276 * Returns: errno
277 */
278
279 static int inode_go_lock(struct gfs2_holder *gh)
280 {
281 struct gfs2_glock *gl = gh->gh_gl;
282 struct gfs2_inode *ip = gl->gl_object;
283 int error = 0;
284
285 if (!ip)
286 return 0;
287
288 if (test_bit(GIF_INVALID, &ip->i_flags)) {
289 error = gfs2_inode_refresh(ip);
290 if (error)
291 return error;
292 }
293
294 if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) &&
295 (gl->gl_state == LM_ST_EXCLUSIVE) &&
296 (gh->gh_state == LM_ST_EXCLUSIVE))
297 error = gfs2_truncatei_resume(ip);
298
299 return error;
300 }
301
302 /**
303 * inode_go_unlock - operation done before an inode lock is unlocked by a
304 * process
305 * @gl: the glock
306 * @flags:
307 *
308 */
309
310 static void inode_go_unlock(struct gfs2_holder *gh)
311 {
312 struct gfs2_glock *gl = gh->gh_gl;
313 struct gfs2_inode *ip = gl->gl_object;
314
315 if (ip)
316 gfs2_meta_cache_flush(ip);
317 }
318
319 /**
320 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
321 * @gl: the glock
322 *
323 * Returns: 1 if it's ok
324 */
325
326 static int rgrp_go_demote_ok(struct gfs2_glock *gl)
327 {
328 return !gl->gl_aspace->i_mapping->nrpages;
329 }
330
331 /**
332 * rgrp_go_lock - operation done after an rgrp lock is locked by
333 * a first holder on this node.
334 * @gl: the glock
335 * @flags:
336 *
337 * Returns: errno
338 */
339
340 static int rgrp_go_lock(struct gfs2_holder *gh)
341 {
342 return gfs2_rgrp_bh_get(gh->gh_gl->gl_object);
343 }
344
345 /**
346 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
347 * a last holder on this node.
348 * @gl: the glock
349 * @flags:
350 *
351 */
352
353 static void rgrp_go_unlock(struct gfs2_holder *gh)
354 {
355 gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
356 }
357
358 /**
359 * trans_go_xmote_th - promote/demote the transaction glock
360 * @gl: the glock
361 * @state: the requested state
362 * @flags:
363 *
364 */
365
366 static void trans_go_xmote_th(struct gfs2_glock *gl)
367 {
368 struct gfs2_sbd *sdp = gl->gl_sbd;
369
370 if (gl->gl_state != LM_ST_UNLOCKED &&
371 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
372 gfs2_meta_syncfs(sdp);
373 gfs2_log_shutdown(sdp);
374 }
375 }
376
377 /**
378 * trans_go_xmote_bh - After promoting/demoting the transaction glock
379 * @gl: the glock
380 *
381 */
382
383 static void trans_go_xmote_bh(struct gfs2_glock *gl)
384 {
385 struct gfs2_sbd *sdp = gl->gl_sbd;
386 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
387 struct gfs2_glock *j_gl = ip->i_gl;
388 struct gfs2_log_header_host head;
389 int error;
390
391 if (gl->gl_state != LM_ST_UNLOCKED &&
392 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
393 gfs2_meta_cache_flush(GFS2_I(sdp->sd_jdesc->jd_inode));
394 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
395
396 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
397 if (error)
398 gfs2_consist(sdp);
399 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
400 gfs2_consist(sdp);
401
402 /* Initialize some head of the log stuff */
403 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
404 sdp->sd_log_sequence = head.lh_sequence + 1;
405 gfs2_log_pointers_init(sdp, head.lh_blkno);
406 }
407 }
408 }
409
410 /**
411 * trans_go_drop_th - unlock the transaction glock
412 * @gl: the glock
413 *
414 * We want to sync the device even with localcaching. Remember
415 * that localcaching journal replay only marks buffers dirty.
416 */
417
418 static void trans_go_drop_th(struct gfs2_glock *gl)
419 {
420 struct gfs2_sbd *sdp = gl->gl_sbd;
421
422 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
423 gfs2_meta_syncfs(sdp);
424 gfs2_log_shutdown(sdp);
425 }
426 }
427
428 /**
429 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
430 * @gl: the glock
431 *
432 * Returns: 1 if it's ok
433 */
434
435 static int quota_go_demote_ok(struct gfs2_glock *gl)
436 {
437 return !atomic_read(&gl->gl_lvb_count);
438 }
439
440 const struct gfs2_glock_operations gfs2_meta_glops = {
441 .go_xmote_th = meta_go_sync,
442 .go_drop_th = meta_go_sync,
443 .go_type = LM_TYPE_META,
444 };
445
446 const struct gfs2_glock_operations gfs2_inode_glops = {
447 .go_xmote_th = inode_go_xmote_th,
448 .go_xmote_bh = inode_go_xmote_bh,
449 .go_drop_th = inode_go_drop_th,
450 .go_inval = inode_go_inval,
451 .go_demote_ok = inode_go_demote_ok,
452 .go_lock = inode_go_lock,
453 .go_unlock = inode_go_unlock,
454 .go_type = LM_TYPE_INODE,
455 };
456
457 const struct gfs2_glock_operations gfs2_rgrp_glops = {
458 .go_xmote_th = meta_go_sync,
459 .go_drop_th = meta_go_sync,
460 .go_inval = meta_go_inval,
461 .go_demote_ok = rgrp_go_demote_ok,
462 .go_lock = rgrp_go_lock,
463 .go_unlock = rgrp_go_unlock,
464 .go_type = LM_TYPE_RGRP,
465 };
466
467 const struct gfs2_glock_operations gfs2_trans_glops = {
468 .go_xmote_th = trans_go_xmote_th,
469 .go_xmote_bh = trans_go_xmote_bh,
470 .go_drop_th = trans_go_drop_th,
471 .go_type = LM_TYPE_NONDISK,
472 };
473
474 const struct gfs2_glock_operations gfs2_iopen_glops = {
475 .go_type = LM_TYPE_IOPEN,
476 };
477
478 const struct gfs2_glock_operations gfs2_flock_glops = {
479 .go_type = LM_TYPE_FLOCK,
480 };
481
482 const struct gfs2_glock_operations gfs2_nondisk_glops = {
483 .go_type = LM_TYPE_NONDISK,
484 };
485
486 const struct gfs2_glock_operations gfs2_quota_glops = {
487 .go_demote_ok = quota_go_demote_ok,
488 .go_type = LM_TYPE_QUOTA,
489 };
490
491 const struct gfs2_glock_operations gfs2_journal_glops = {
492 .go_type = LM_TYPE_JOURNAL,
493 };
494
This page took 0.042471 seconds and 5 git commands to generate.