GFS2: Use range based functions for rgrp sync/invalidation
[deliverable/linux.git] / fs / gfs2 / glops.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/posix_acl.h>
16
17 #include "gfs2.h"
18 #include "incore.h"
19 #include "bmap.h"
20 #include "glock.h"
21 #include "glops.h"
22 #include "inode.h"
23 #include "log.h"
24 #include "meta_io.h"
25 #include "recovery.h"
26 #include "rgrp.h"
27 #include "util.h"
28 #include "trans.h"
29 #include "dir.h"
30
31 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
32 {
33 fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n",
34 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
35 bh->b_page->mapping, bh->b_page->flags);
36 fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n",
37 gl->gl_name.ln_type, gl->gl_name.ln_number,
38 gfs2_glock2aspace(gl));
39 gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n");
40 }
41
42 /**
43 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
44 * @gl: the glock
45 * @fsync: set when called from fsync (not all buffers will be clean)
46 *
47 * None of the buffers should be dirty, locked, or pinned.
48 */
49
50 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
51 unsigned int nr_revokes)
52 {
53 struct gfs2_sbd *sdp = gl->gl_sbd;
54 struct list_head *head = &gl->gl_ail_list;
55 struct gfs2_bufdata *bd, *tmp;
56 struct buffer_head *bh;
57 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
58
59 gfs2_log_lock(sdp);
60 spin_lock(&sdp->sd_ail_lock);
61 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
62 if (nr_revokes == 0)
63 break;
64 bh = bd->bd_bh;
65 if (bh->b_state & b_state) {
66 if (fsync)
67 continue;
68 gfs2_ail_error(gl, bh);
69 }
70 gfs2_trans_add_revoke(sdp, bd);
71 nr_revokes--;
72 }
73 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
74 spin_unlock(&sdp->sd_ail_lock);
75 gfs2_log_unlock(sdp);
76 }
77
78
79 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
80 {
81 struct gfs2_sbd *sdp = gl->gl_sbd;
82 struct gfs2_trans tr;
83
84 memset(&tr, 0, sizeof(tr));
85 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
86
87 if (!tr.tr_revokes)
88 return;
89
90 /* A shortened, inline version of gfs2_trans_begin() */
91 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
92 tr.tr_ip = (unsigned long)__builtin_return_address(0);
93 sb_start_intwrite(sdp->sd_vfs);
94 gfs2_log_reserve(sdp, tr.tr_reserved);
95 WARN_ON_ONCE(current->journal_info);
96 current->journal_info = &tr;
97
98 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
99
100 gfs2_trans_end(sdp);
101 gfs2_log_flush(sdp, NULL);
102 }
103
104 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
105 {
106 struct gfs2_sbd *sdp = gl->gl_sbd;
107 unsigned int revokes = atomic_read(&gl->gl_ail_count);
108 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
109 int ret;
110
111 if (!revokes)
112 return;
113
114 while (revokes > max_revokes)
115 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
116
117 ret = gfs2_trans_begin(sdp, 0, max_revokes);
118 if (ret)
119 return;
120 __gfs2_ail_flush(gl, fsync, max_revokes);
121 gfs2_trans_end(sdp);
122 gfs2_log_flush(sdp, NULL);
123 }
124
125 /**
126 * rgrp_go_sync - sync out the metadata for this glock
127 * @gl: the glock
128 *
129 * Called when demoting or unlocking an EX glock. We must flush
130 * to disk all dirty buffers/pages relating to this glock, and must not
131 * not return to caller to demote/unlock the glock until I/O is complete.
132 */
133
134 static void rgrp_go_sync(struct gfs2_glock *gl)
135 {
136 struct address_space *metamapping = gfs2_glock2aspace(gl);
137 struct gfs2_rgrpd *rgd;
138 int error;
139
140 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
141 return;
142 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
143
144 gfs2_log_flush(gl->gl_sbd, gl);
145 filemap_fdatawrite_range(metamapping, gl->gl_vm.start, gl->gl_vm.end);
146 error = filemap_fdatawait_range(metamapping, gl->gl_vm.start, gl->gl_vm.end);
147 mapping_set_error(metamapping, error);
148 gfs2_ail_empty_gl(gl);
149
150 spin_lock(&gl->gl_spin);
151 rgd = gl->gl_object;
152 if (rgd)
153 gfs2_free_clones(rgd);
154 spin_unlock(&gl->gl_spin);
155 }
156
157 /**
158 * rgrp_go_inval - invalidate the metadata for this glock
159 * @gl: the glock
160 * @flags:
161 *
162 * We never used LM_ST_DEFERRED with resource groups, so that we
163 * should always see the metadata flag set here.
164 *
165 */
166
167 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
168 {
169 struct address_space *mapping = gfs2_glock2aspace(gl);
170
171 WARN_ON_ONCE(!(flags & DIO_METADATA));
172 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
173 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
174
175 if (gl->gl_object) {
176 struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
177 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
178 }
179 }
180
181 /**
182 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
183 * @gl: the glock protecting the inode
184 *
185 */
186
187 static void inode_go_sync(struct gfs2_glock *gl)
188 {
189 struct gfs2_inode *ip = gl->gl_object;
190 struct address_space *metamapping = gfs2_glock2aspace(gl);
191 int error;
192
193 if (ip && !S_ISREG(ip->i_inode.i_mode))
194 ip = NULL;
195 if (ip) {
196 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
197 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
198 inode_dio_wait(&ip->i_inode);
199 }
200 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
201 return;
202
203 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
204
205 gfs2_log_flush(gl->gl_sbd, gl);
206 filemap_fdatawrite(metamapping);
207 if (ip) {
208 struct address_space *mapping = ip->i_inode.i_mapping;
209 filemap_fdatawrite(mapping);
210 error = filemap_fdatawait(mapping);
211 mapping_set_error(mapping, error);
212 }
213 error = filemap_fdatawait(metamapping);
214 mapping_set_error(metamapping, error);
215 gfs2_ail_empty_gl(gl);
216 /*
217 * Writeback of the data mapping may cause the dirty flag to be set
218 * so we have to clear it again here.
219 */
220 smp_mb__before_clear_bit();
221 clear_bit(GLF_DIRTY, &gl->gl_flags);
222 }
223
224 /**
225 * inode_go_inval - prepare a inode glock to be released
226 * @gl: the glock
227 * @flags:
228 *
229 * Normally we invlidate everything, but if we are moving into
230 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
231 * can keep hold of the metadata, since it won't have changed.
232 *
233 */
234
235 static void inode_go_inval(struct gfs2_glock *gl, int flags)
236 {
237 struct gfs2_inode *ip = gl->gl_object;
238
239 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
240
241 if (flags & DIO_METADATA) {
242 struct address_space *mapping = gfs2_glock2aspace(gl);
243 truncate_inode_pages(mapping, 0);
244 if (ip) {
245 set_bit(GIF_INVALID, &ip->i_flags);
246 forget_all_cached_acls(&ip->i_inode);
247 gfs2_dir_hash_inval(ip);
248 }
249 }
250
251 if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
252 gfs2_log_flush(gl->gl_sbd, NULL);
253 gl->gl_sbd->sd_rindex_uptodate = 0;
254 }
255 if (ip && S_ISREG(ip->i_inode.i_mode))
256 truncate_inode_pages(ip->i_inode.i_mapping, 0);
257 }
258
259 /**
260 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
261 * @gl: the glock
262 *
263 * Returns: 1 if it's ok
264 */
265
266 static int inode_go_demote_ok(const struct gfs2_glock *gl)
267 {
268 struct gfs2_sbd *sdp = gl->gl_sbd;
269 struct gfs2_holder *gh;
270
271 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
272 return 0;
273
274 if (!list_empty(&gl->gl_holders)) {
275 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
276 if (gh->gh_list.next != &gl->gl_holders)
277 return 0;
278 }
279
280 return 1;
281 }
282
283 /**
284 * gfs2_set_nlink - Set the inode's link count based on on-disk info
285 * @inode: The inode in question
286 * @nlink: The link count
287 *
288 * If the link count has hit zero, it must never be raised, whatever the
289 * on-disk inode might say. When new struct inodes are created the link
290 * count is set to 1, so that we can safely use this test even when reading
291 * in on disk information for the first time.
292 */
293
294 static void gfs2_set_nlink(struct inode *inode, u32 nlink)
295 {
296 /*
297 * We will need to review setting the nlink count here in the
298 * light of the forthcoming ro bind mount work. This is a reminder
299 * to do that.
300 */
301 if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) {
302 if (nlink == 0)
303 clear_nlink(inode);
304 else
305 set_nlink(inode, nlink);
306 }
307 }
308
309 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
310 {
311 const struct gfs2_dinode *str = buf;
312 struct timespec atime;
313 u16 height, depth;
314
315 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
316 goto corrupt;
317 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
318 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
319 ip->i_inode.i_rdev = 0;
320 switch (ip->i_inode.i_mode & S_IFMT) {
321 case S_IFBLK:
322 case S_IFCHR:
323 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
324 be32_to_cpu(str->di_minor));
325 break;
326 };
327
328 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
329 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
330 gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
331 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
332 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
333 atime.tv_sec = be64_to_cpu(str->di_atime);
334 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
335 if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
336 ip->i_inode.i_atime = atime;
337 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
338 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
339 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
340 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
341
342 ip->i_goal = be64_to_cpu(str->di_goal_meta);
343 ip->i_generation = be64_to_cpu(str->di_generation);
344
345 ip->i_diskflags = be32_to_cpu(str->di_flags);
346 ip->i_eattr = be64_to_cpu(str->di_eattr);
347 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
348 gfs2_set_inode_flags(&ip->i_inode);
349 height = be16_to_cpu(str->di_height);
350 if (unlikely(height > GFS2_MAX_META_HEIGHT))
351 goto corrupt;
352 ip->i_height = (u8)height;
353
354 depth = be16_to_cpu(str->di_depth);
355 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
356 goto corrupt;
357 ip->i_depth = (u8)depth;
358 ip->i_entries = be32_to_cpu(str->di_entries);
359
360 if (S_ISREG(ip->i_inode.i_mode))
361 gfs2_set_aops(&ip->i_inode);
362
363 return 0;
364 corrupt:
365 gfs2_consist_inode(ip);
366 return -EIO;
367 }
368
369 /**
370 * gfs2_inode_refresh - Refresh the incore copy of the dinode
371 * @ip: The GFS2 inode
372 *
373 * Returns: errno
374 */
375
376 int gfs2_inode_refresh(struct gfs2_inode *ip)
377 {
378 struct buffer_head *dibh;
379 int error;
380
381 error = gfs2_meta_inode_buffer(ip, &dibh);
382 if (error)
383 return error;
384
385 error = gfs2_dinode_in(ip, dibh->b_data);
386 brelse(dibh);
387 clear_bit(GIF_INVALID, &ip->i_flags);
388
389 return error;
390 }
391
392 /**
393 * inode_go_lock - operation done after an inode lock is locked by a process
394 * @gl: the glock
395 * @flags:
396 *
397 * Returns: errno
398 */
399
400 static int inode_go_lock(struct gfs2_holder *gh)
401 {
402 struct gfs2_glock *gl = gh->gh_gl;
403 struct gfs2_sbd *sdp = gl->gl_sbd;
404 struct gfs2_inode *ip = gl->gl_object;
405 int error = 0;
406
407 if (!ip || (gh->gh_flags & GL_SKIP))
408 return 0;
409
410 if (test_bit(GIF_INVALID, &ip->i_flags)) {
411 error = gfs2_inode_refresh(ip);
412 if (error)
413 return error;
414 }
415
416 if (gh->gh_state != LM_ST_DEFERRED)
417 inode_dio_wait(&ip->i_inode);
418
419 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
420 (gl->gl_state == LM_ST_EXCLUSIVE) &&
421 (gh->gh_state == LM_ST_EXCLUSIVE)) {
422 spin_lock(&sdp->sd_trunc_lock);
423 if (list_empty(&ip->i_trunc_list))
424 list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
425 spin_unlock(&sdp->sd_trunc_lock);
426 wake_up(&sdp->sd_quota_wait);
427 return 1;
428 }
429
430 return error;
431 }
432
433 /**
434 * inode_go_dump - print information about an inode
435 * @seq: The iterator
436 * @ip: the inode
437 *
438 * Returns: 0 on success, -ENOBUFS when we run out of space
439 */
440
441 static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
442 {
443 const struct gfs2_inode *ip = gl->gl_object;
444 if (ip == NULL)
445 return 0;
446 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
447 (unsigned long long)ip->i_no_formal_ino,
448 (unsigned long long)ip->i_no_addr,
449 IF2DT(ip->i_inode.i_mode), ip->i_flags,
450 (unsigned int)ip->i_diskflags,
451 (unsigned long long)i_size_read(&ip->i_inode));
452 return 0;
453 }
454
455 /**
456 * trans_go_sync - promote/demote the transaction glock
457 * @gl: the glock
458 * @state: the requested state
459 * @flags:
460 *
461 */
462
463 static void trans_go_sync(struct gfs2_glock *gl)
464 {
465 struct gfs2_sbd *sdp = gl->gl_sbd;
466
467 if (gl->gl_state != LM_ST_UNLOCKED &&
468 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
469 gfs2_meta_syncfs(sdp);
470 gfs2_log_shutdown(sdp);
471 }
472 }
473
474 /**
475 * trans_go_xmote_bh - After promoting/demoting the transaction glock
476 * @gl: the glock
477 *
478 */
479
480 static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
481 {
482 struct gfs2_sbd *sdp = gl->gl_sbd;
483 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
484 struct gfs2_glock *j_gl = ip->i_gl;
485 struct gfs2_log_header_host head;
486 int error;
487
488 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
489 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
490
491 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
492 if (error)
493 gfs2_consist(sdp);
494 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
495 gfs2_consist(sdp);
496
497 /* Initialize some head of the log stuff */
498 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
499 sdp->sd_log_sequence = head.lh_sequence + 1;
500 gfs2_log_pointers_init(sdp, head.lh_blkno);
501 }
502 }
503 return 0;
504 }
505
506 /**
507 * trans_go_demote_ok
508 * @gl: the glock
509 *
510 * Always returns 0
511 */
512
513 static int trans_go_demote_ok(const struct gfs2_glock *gl)
514 {
515 return 0;
516 }
517
518 /**
519 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
520 * @gl: the glock
521 *
522 * gl_spin lock is held while calling this
523 */
524 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
525 {
526 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
527 struct gfs2_sbd *sdp = gl->gl_sbd;
528
529 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
530 return;
531
532 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
533 gl->gl_state == LM_ST_SHARED && ip) {
534 gl->gl_lockref.count++;
535 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
536 gl->gl_lockref.count--;
537 }
538 }
539
540 const struct gfs2_glock_operations gfs2_meta_glops = {
541 .go_type = LM_TYPE_META,
542 };
543
544 const struct gfs2_glock_operations gfs2_inode_glops = {
545 .go_sync = inode_go_sync,
546 .go_inval = inode_go_inval,
547 .go_demote_ok = inode_go_demote_ok,
548 .go_lock = inode_go_lock,
549 .go_dump = inode_go_dump,
550 .go_type = LM_TYPE_INODE,
551 .go_flags = GLOF_ASPACE,
552 };
553
554 const struct gfs2_glock_operations gfs2_rgrp_glops = {
555 .go_sync = rgrp_go_sync,
556 .go_inval = rgrp_go_inval,
557 .go_lock = gfs2_rgrp_go_lock,
558 .go_unlock = gfs2_rgrp_go_unlock,
559 .go_dump = gfs2_rgrp_dump,
560 .go_type = LM_TYPE_RGRP,
561 .go_flags = GLOF_ASPACE | GLOF_LVB,
562 };
563
564 const struct gfs2_glock_operations gfs2_trans_glops = {
565 .go_sync = trans_go_sync,
566 .go_xmote_bh = trans_go_xmote_bh,
567 .go_demote_ok = trans_go_demote_ok,
568 .go_type = LM_TYPE_NONDISK,
569 };
570
571 const struct gfs2_glock_operations gfs2_iopen_glops = {
572 .go_type = LM_TYPE_IOPEN,
573 .go_callback = iopen_go_callback,
574 };
575
576 const struct gfs2_glock_operations gfs2_flock_glops = {
577 .go_type = LM_TYPE_FLOCK,
578 };
579
580 const struct gfs2_glock_operations gfs2_nondisk_glops = {
581 .go_type = LM_TYPE_NONDISK,
582 };
583
584 const struct gfs2_glock_operations gfs2_quota_glops = {
585 .go_type = LM_TYPE_QUOTA,
586 .go_flags = GLOF_LVB,
587 };
588
589 const struct gfs2_glock_operations gfs2_journal_glops = {
590 .go_type = LM_TYPE_JOURNAL,
591 };
592
593 const struct gfs2_glock_operations *gfs2_glops_list[] = {
594 [LM_TYPE_META] = &gfs2_meta_glops,
595 [LM_TYPE_INODE] = &gfs2_inode_glops,
596 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
597 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
598 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
599 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
600 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
601 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
602 };
603
This page took 0.052717 seconds and 5 git commands to generate.