Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetoot...
[deliverable/linux.git] / fs / gfs2 / glops.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/posix_acl.h>
16
17 #include "gfs2.h"
18 #include "incore.h"
19 #include "bmap.h"
20 #include "glock.h"
21 #include "glops.h"
22 #include "inode.h"
23 #include "log.h"
24 #include "meta_io.h"
25 #include "recovery.h"
26 #include "rgrp.h"
27 #include "util.h"
28 #include "trans.h"
29 #include "dir.h"
30
31 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
32 {
33 fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n",
34 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
35 bh->b_page->mapping, bh->b_page->flags);
36 fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n",
37 gl->gl_name.ln_type, gl->gl_name.ln_number,
38 gfs2_glock2aspace(gl));
39 gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n");
40 }
41
42 /**
43 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
44 * @gl: the glock
45 * @fsync: set when called from fsync (not all buffers will be clean)
46 *
47 * None of the buffers should be dirty, locked, or pinned.
48 */
49
50 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
51 unsigned int nr_revokes)
52 {
53 struct gfs2_sbd *sdp = gl->gl_sbd;
54 struct list_head *head = &gl->gl_ail_list;
55 struct gfs2_bufdata *bd, *tmp;
56 struct buffer_head *bh;
57 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
58
59 gfs2_log_lock(sdp);
60 spin_lock(&sdp->sd_ail_lock);
61 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
62 if (nr_revokes == 0)
63 break;
64 bh = bd->bd_bh;
65 if (bh->b_state & b_state) {
66 if (fsync)
67 continue;
68 gfs2_ail_error(gl, bh);
69 }
70 gfs2_trans_add_revoke(sdp, bd);
71 nr_revokes--;
72 }
73 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
74 spin_unlock(&sdp->sd_ail_lock);
75 gfs2_log_unlock(sdp);
76 }
77
78
79 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
80 {
81 struct gfs2_sbd *sdp = gl->gl_sbd;
82 struct gfs2_trans tr;
83
84 memset(&tr, 0, sizeof(tr));
85 INIT_LIST_HEAD(&tr.tr_buf);
86 INIT_LIST_HEAD(&tr.tr_databuf);
87 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
88
89 if (!tr.tr_revokes)
90 return;
91
92 /* A shortened, inline version of gfs2_trans_begin() */
93 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
94 tr.tr_ip = (unsigned long)__builtin_return_address(0);
95 sb_start_intwrite(sdp->sd_vfs);
96 gfs2_log_reserve(sdp, tr.tr_reserved);
97 WARN_ON_ONCE(current->journal_info);
98 current->journal_info = &tr;
99
100 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
101
102 gfs2_trans_end(sdp);
103 gfs2_log_flush(sdp, NULL);
104 }
105
106 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
107 {
108 struct gfs2_sbd *sdp = gl->gl_sbd;
109 unsigned int revokes = atomic_read(&gl->gl_ail_count);
110 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
111 int ret;
112
113 if (!revokes)
114 return;
115
116 while (revokes > max_revokes)
117 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
118
119 ret = gfs2_trans_begin(sdp, 0, max_revokes);
120 if (ret)
121 return;
122 __gfs2_ail_flush(gl, fsync, max_revokes);
123 gfs2_trans_end(sdp);
124 gfs2_log_flush(sdp, NULL);
125 }
126
127 /**
128 * rgrp_go_sync - sync out the metadata for this glock
129 * @gl: the glock
130 *
131 * Called when demoting or unlocking an EX glock. We must flush
132 * to disk all dirty buffers/pages relating to this glock, and must not
133 * not return to caller to demote/unlock the glock until I/O is complete.
134 */
135
136 static void rgrp_go_sync(struct gfs2_glock *gl)
137 {
138 struct gfs2_sbd *sdp = gl->gl_sbd;
139 struct address_space *mapping = &sdp->sd_aspace;
140 struct gfs2_rgrpd *rgd;
141 int error;
142
143 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
144 return;
145 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
146
147 gfs2_log_flush(sdp, gl);
148 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
149 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
150 mapping_set_error(mapping, error);
151 gfs2_ail_empty_gl(gl);
152
153 spin_lock(&gl->gl_spin);
154 rgd = gl->gl_object;
155 if (rgd)
156 gfs2_free_clones(rgd);
157 spin_unlock(&gl->gl_spin);
158 }
159
160 /**
161 * rgrp_go_inval - invalidate the metadata for this glock
162 * @gl: the glock
163 * @flags:
164 *
165 * We never used LM_ST_DEFERRED with resource groups, so that we
166 * should always see the metadata flag set here.
167 *
168 */
169
170 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
171 {
172 struct gfs2_sbd *sdp = gl->gl_sbd;
173 struct address_space *mapping = &sdp->sd_aspace;
174
175 WARN_ON_ONCE(!(flags & DIO_METADATA));
176 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
177 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
178
179 if (gl->gl_object) {
180 struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object;
181 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
182 }
183 }
184
185 /**
186 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
187 * @gl: the glock protecting the inode
188 *
189 */
190
191 static void inode_go_sync(struct gfs2_glock *gl)
192 {
193 struct gfs2_inode *ip = gl->gl_object;
194 struct address_space *metamapping = gfs2_glock2aspace(gl);
195 int error;
196
197 if (ip && !S_ISREG(ip->i_inode.i_mode))
198 ip = NULL;
199 if (ip) {
200 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
201 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
202 inode_dio_wait(&ip->i_inode);
203 }
204 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
205 return;
206
207 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
208
209 gfs2_log_flush(gl->gl_sbd, gl);
210 filemap_fdatawrite(metamapping);
211 if (ip) {
212 struct address_space *mapping = ip->i_inode.i_mapping;
213 filemap_fdatawrite(mapping);
214 error = filemap_fdatawait(mapping);
215 mapping_set_error(mapping, error);
216 }
217 error = filemap_fdatawait(metamapping);
218 mapping_set_error(metamapping, error);
219 gfs2_ail_empty_gl(gl);
220 /*
221 * Writeback of the data mapping may cause the dirty flag to be set
222 * so we have to clear it again here.
223 */
224 smp_mb__before_clear_bit();
225 clear_bit(GLF_DIRTY, &gl->gl_flags);
226 }
227
228 /**
229 * inode_go_inval - prepare a inode glock to be released
230 * @gl: the glock
231 * @flags:
232 *
233 * Normally we invlidate everything, but if we are moving into
234 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
235 * can keep hold of the metadata, since it won't have changed.
236 *
237 */
238
239 static void inode_go_inval(struct gfs2_glock *gl, int flags)
240 {
241 struct gfs2_inode *ip = gl->gl_object;
242
243 gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
244
245 if (flags & DIO_METADATA) {
246 struct address_space *mapping = gfs2_glock2aspace(gl);
247 truncate_inode_pages(mapping, 0);
248 if (ip) {
249 set_bit(GIF_INVALID, &ip->i_flags);
250 forget_all_cached_acls(&ip->i_inode);
251 gfs2_dir_hash_inval(ip);
252 }
253 }
254
255 if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) {
256 gfs2_log_flush(gl->gl_sbd, NULL);
257 gl->gl_sbd->sd_rindex_uptodate = 0;
258 }
259 if (ip && S_ISREG(ip->i_inode.i_mode))
260 truncate_inode_pages(ip->i_inode.i_mapping, 0);
261 }
262
263 /**
264 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
265 * @gl: the glock
266 *
267 * Returns: 1 if it's ok
268 */
269
270 static int inode_go_demote_ok(const struct gfs2_glock *gl)
271 {
272 struct gfs2_sbd *sdp = gl->gl_sbd;
273 struct gfs2_holder *gh;
274
275 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
276 return 0;
277
278 if (!list_empty(&gl->gl_holders)) {
279 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
280 if (gh->gh_list.next != &gl->gl_holders)
281 return 0;
282 }
283
284 return 1;
285 }
286
287 /**
288 * gfs2_set_nlink - Set the inode's link count based on on-disk info
289 * @inode: The inode in question
290 * @nlink: The link count
291 *
292 * If the link count has hit zero, it must never be raised, whatever the
293 * on-disk inode might say. When new struct inodes are created the link
294 * count is set to 1, so that we can safely use this test even when reading
295 * in on disk information for the first time.
296 */
297
298 static void gfs2_set_nlink(struct inode *inode, u32 nlink)
299 {
300 /*
301 * We will need to review setting the nlink count here in the
302 * light of the forthcoming ro bind mount work. This is a reminder
303 * to do that.
304 */
305 if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) {
306 if (nlink == 0)
307 clear_nlink(inode);
308 else
309 set_nlink(inode, nlink);
310 }
311 }
312
313 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
314 {
315 const struct gfs2_dinode *str = buf;
316 struct timespec atime;
317 u16 height, depth;
318
319 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
320 goto corrupt;
321 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
322 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
323 ip->i_inode.i_rdev = 0;
324 switch (ip->i_inode.i_mode & S_IFMT) {
325 case S_IFBLK:
326 case S_IFCHR:
327 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
328 be32_to_cpu(str->di_minor));
329 break;
330 };
331
332 i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
333 i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
334 gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
335 i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
336 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
337 atime.tv_sec = be64_to_cpu(str->di_atime);
338 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
339 if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0)
340 ip->i_inode.i_atime = atime;
341 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
342 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
343 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
344 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
345
346 ip->i_goal = be64_to_cpu(str->di_goal_meta);
347 ip->i_generation = be64_to_cpu(str->di_generation);
348
349 ip->i_diskflags = be32_to_cpu(str->di_flags);
350 ip->i_eattr = be64_to_cpu(str->di_eattr);
351 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
352 gfs2_set_inode_flags(&ip->i_inode);
353 height = be16_to_cpu(str->di_height);
354 if (unlikely(height > GFS2_MAX_META_HEIGHT))
355 goto corrupt;
356 ip->i_height = (u8)height;
357
358 depth = be16_to_cpu(str->di_depth);
359 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
360 goto corrupt;
361 ip->i_depth = (u8)depth;
362 ip->i_entries = be32_to_cpu(str->di_entries);
363
364 if (S_ISREG(ip->i_inode.i_mode))
365 gfs2_set_aops(&ip->i_inode);
366
367 return 0;
368 corrupt:
369 gfs2_consist_inode(ip);
370 return -EIO;
371 }
372
373 /**
374 * gfs2_inode_refresh - Refresh the incore copy of the dinode
375 * @ip: The GFS2 inode
376 *
377 * Returns: errno
378 */
379
380 int gfs2_inode_refresh(struct gfs2_inode *ip)
381 {
382 struct buffer_head *dibh;
383 int error;
384
385 error = gfs2_meta_inode_buffer(ip, &dibh);
386 if (error)
387 return error;
388
389 error = gfs2_dinode_in(ip, dibh->b_data);
390 brelse(dibh);
391 clear_bit(GIF_INVALID, &ip->i_flags);
392
393 return error;
394 }
395
396 /**
397 * inode_go_lock - operation done after an inode lock is locked by a process
398 * @gl: the glock
399 * @flags:
400 *
401 * Returns: errno
402 */
403
404 static int inode_go_lock(struct gfs2_holder *gh)
405 {
406 struct gfs2_glock *gl = gh->gh_gl;
407 struct gfs2_sbd *sdp = gl->gl_sbd;
408 struct gfs2_inode *ip = gl->gl_object;
409 int error = 0;
410
411 if (!ip || (gh->gh_flags & GL_SKIP))
412 return 0;
413
414 if (test_bit(GIF_INVALID, &ip->i_flags)) {
415 error = gfs2_inode_refresh(ip);
416 if (error)
417 return error;
418 }
419
420 if (gh->gh_state != LM_ST_DEFERRED)
421 inode_dio_wait(&ip->i_inode);
422
423 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
424 (gl->gl_state == LM_ST_EXCLUSIVE) &&
425 (gh->gh_state == LM_ST_EXCLUSIVE)) {
426 spin_lock(&sdp->sd_trunc_lock);
427 if (list_empty(&ip->i_trunc_list))
428 list_add(&sdp->sd_trunc_list, &ip->i_trunc_list);
429 spin_unlock(&sdp->sd_trunc_lock);
430 wake_up(&sdp->sd_quota_wait);
431 return 1;
432 }
433
434 return error;
435 }
436
437 /**
438 * inode_go_dump - print information about an inode
439 * @seq: The iterator
440 * @ip: the inode
441 *
442 */
443
444 static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
445 {
446 const struct gfs2_inode *ip = gl->gl_object;
447 if (ip == NULL)
448 return;
449 gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
450 (unsigned long long)ip->i_no_formal_ino,
451 (unsigned long long)ip->i_no_addr,
452 IF2DT(ip->i_inode.i_mode), ip->i_flags,
453 (unsigned int)ip->i_diskflags,
454 (unsigned long long)i_size_read(&ip->i_inode));
455 }
456
457 /**
458 * trans_go_sync - promote/demote the transaction glock
459 * @gl: the glock
460 * @state: the requested state
461 * @flags:
462 *
463 */
464
465 static void trans_go_sync(struct gfs2_glock *gl)
466 {
467 struct gfs2_sbd *sdp = gl->gl_sbd;
468
469 if (gl->gl_state != LM_ST_UNLOCKED &&
470 test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
471 gfs2_meta_syncfs(sdp);
472 gfs2_log_shutdown(sdp);
473 }
474 }
475
476 /**
477 * trans_go_xmote_bh - After promoting/demoting the transaction glock
478 * @gl: the glock
479 *
480 */
481
482 static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
483 {
484 struct gfs2_sbd *sdp = gl->gl_sbd;
485 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
486 struct gfs2_glock *j_gl = ip->i_gl;
487 struct gfs2_log_header_host head;
488 int error;
489
490 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
491 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
492
493 error = gfs2_find_jhead(sdp->sd_jdesc, &head);
494 if (error)
495 gfs2_consist(sdp);
496 if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
497 gfs2_consist(sdp);
498
499 /* Initialize some head of the log stuff */
500 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
501 sdp->sd_log_sequence = head.lh_sequence + 1;
502 gfs2_log_pointers_init(sdp, head.lh_blkno);
503 }
504 }
505 return 0;
506 }
507
508 /**
509 * trans_go_demote_ok
510 * @gl: the glock
511 *
512 * Always returns 0
513 */
514
515 static int trans_go_demote_ok(const struct gfs2_glock *gl)
516 {
517 return 0;
518 }
519
520 /**
521 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
522 * @gl: the glock
523 *
524 * gl_spin lock is held while calling this
525 */
526 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
527 {
528 struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
529 struct gfs2_sbd *sdp = gl->gl_sbd;
530
531 if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
532 return;
533
534 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
535 gl->gl_state == LM_ST_SHARED && ip) {
536 gl->gl_lockref.count++;
537 if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
538 gl->gl_lockref.count--;
539 }
540 }
541
542 const struct gfs2_glock_operations gfs2_meta_glops = {
543 .go_type = LM_TYPE_META,
544 };
545
546 const struct gfs2_glock_operations gfs2_inode_glops = {
547 .go_sync = inode_go_sync,
548 .go_inval = inode_go_inval,
549 .go_demote_ok = inode_go_demote_ok,
550 .go_lock = inode_go_lock,
551 .go_dump = inode_go_dump,
552 .go_type = LM_TYPE_INODE,
553 .go_flags = GLOF_ASPACE,
554 };
555
556 const struct gfs2_glock_operations gfs2_rgrp_glops = {
557 .go_sync = rgrp_go_sync,
558 .go_inval = rgrp_go_inval,
559 .go_lock = gfs2_rgrp_go_lock,
560 .go_unlock = gfs2_rgrp_go_unlock,
561 .go_dump = gfs2_rgrp_dump,
562 .go_type = LM_TYPE_RGRP,
563 .go_flags = GLOF_LVB,
564 };
565
566 const struct gfs2_glock_operations gfs2_trans_glops = {
567 .go_sync = trans_go_sync,
568 .go_xmote_bh = trans_go_xmote_bh,
569 .go_demote_ok = trans_go_demote_ok,
570 .go_type = LM_TYPE_NONDISK,
571 };
572
573 const struct gfs2_glock_operations gfs2_iopen_glops = {
574 .go_type = LM_TYPE_IOPEN,
575 .go_callback = iopen_go_callback,
576 };
577
578 const struct gfs2_glock_operations gfs2_flock_glops = {
579 .go_type = LM_TYPE_FLOCK,
580 };
581
582 const struct gfs2_glock_operations gfs2_nondisk_glops = {
583 .go_type = LM_TYPE_NONDISK,
584 };
585
586 const struct gfs2_glock_operations gfs2_quota_glops = {
587 .go_type = LM_TYPE_QUOTA,
588 .go_flags = GLOF_LVB,
589 };
590
591 const struct gfs2_glock_operations gfs2_journal_glops = {
592 .go_type = LM_TYPE_JOURNAL,
593 };
594
595 const struct gfs2_glock_operations *gfs2_glops_list[] = {
596 [LM_TYPE_META] = &gfs2_meta_glops,
597 [LM_TYPE_INODE] = &gfs2_inode_glops,
598 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
599 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
600 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
601 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
602 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
603 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
604 };
605
This page took 0.059737 seconds and 5 git commands to generate.