[GFS2] Introduce gfs2_remove_from_ail
[deliverable/linux.git] / fs / gfs2 / log.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/crc32.h>
17 #include <linux/lm_interface.h>
18 #include <linux/delay.h>
19
20 #include "gfs2.h"
21 #include "incore.h"
22 #include "bmap.h"
23 #include "glock.h"
24 #include "log.h"
25 #include "lops.h"
26 #include "meta_io.h"
27 #include "util.h"
28 #include "dir.h"
29
30 #define PULL 1
31
32 /**
33 * gfs2_struct2blk - compute stuff
34 * @sdp: the filesystem
35 * @nstruct: the number of structures
36 * @ssize: the size of the structures
37 *
38 * Compute the number of log descriptor blocks needed to hold a certain number
39 * of structures of a certain size.
40 *
41 * Returns: the number of blocks needed (minimum is always 1)
42 */
43
44 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
45 unsigned int ssize)
46 {
47 unsigned int blks;
48 unsigned int first, second;
49
50 blks = 1;
51 first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
52
53 if (nstruct > first) {
54 second = (sdp->sd_sb.sb_bsize -
55 sizeof(struct gfs2_meta_header)) / ssize;
56 blks += DIV_ROUND_UP(nstruct - first, second);
57 }
58
59 return blks;
60 }
61
62 /**
63 * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
64 * @mapping: The associated mapping (maybe NULL)
65 * @bd: The gfs2_bufdata to remove
66 *
67 * The log lock _must_ be held when calling this function
68 *
69 */
70
71 void gfs2_remove_from_ail(struct address_space *mapping, struct gfs2_bufdata *bd)
72 {
73 bd->bd_ail = NULL;
74 list_del(&bd->bd_ail_st_list);
75 list_del(&bd->bd_ail_gl_list);
76 atomic_dec(&bd->bd_gl->gl_ail_count);
77 if (mapping)
78 gfs2_meta_cache_flush(GFS2_I(mapping->host));
79 brelse(bd->bd_bh);
80 }
81
82 /**
83 * gfs2_ail1_start_one - Start I/O on a part of the AIL
84 * @sdp: the filesystem
85 * @tr: the part of the AIL
86 *
87 */
88
89 static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
90 {
91 struct gfs2_bufdata *bd, *s;
92 struct buffer_head *bh;
93 int retry;
94
95 BUG_ON(!spin_is_locked(&sdp->sd_log_lock));
96
97 do {
98 retry = 0;
99
100 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
101 bd_ail_st_list) {
102 bh = bd->bd_bh;
103
104 gfs2_assert(sdp, bd->bd_ail == ai);
105
106 if (!buffer_busy(bh)) {
107 if (!buffer_uptodate(bh)) {
108 gfs2_log_unlock(sdp);
109 gfs2_io_error_bh(sdp, bh);
110 gfs2_log_lock(sdp);
111 }
112 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
113 continue;
114 }
115
116 if (!buffer_dirty(bh))
117 continue;
118
119 list_move(&bd->bd_ail_st_list, &ai->ai_ail1_list);
120
121 gfs2_log_unlock(sdp);
122 wait_on_buffer(bh);
123 ll_rw_block(WRITE, 1, &bh);
124 gfs2_log_lock(sdp);
125
126 retry = 1;
127 break;
128 }
129 } while (retry);
130 }
131
132 /**
133 * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
134 * @sdp: the filesystem
135 * @ai: the AIL entry
136 *
137 */
138
139 static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai, int flags)
140 {
141 struct gfs2_bufdata *bd, *s;
142 struct buffer_head *bh;
143
144 list_for_each_entry_safe_reverse(bd, s, &ai->ai_ail1_list,
145 bd_ail_st_list) {
146 bh = bd->bd_bh;
147
148 gfs2_assert(sdp, bd->bd_ail == ai);
149
150 if (buffer_busy(bh)) {
151 if (flags & DIO_ALL)
152 continue;
153 else
154 break;
155 }
156
157 if (!buffer_uptodate(bh))
158 gfs2_io_error_bh(sdp, bh);
159
160 list_move(&bd->bd_ail_st_list, &ai->ai_ail2_list);
161 }
162
163 return list_empty(&ai->ai_ail1_list);
164 }
165
166 static void gfs2_ail1_start(struct gfs2_sbd *sdp, int flags)
167 {
168 struct list_head *head;
169 u64 sync_gen;
170 struct list_head *first;
171 struct gfs2_ail *first_ai, *ai, *tmp;
172 int done = 0;
173
174 gfs2_log_lock(sdp);
175 head = &sdp->sd_ail1_list;
176 if (list_empty(head)) {
177 gfs2_log_unlock(sdp);
178 return;
179 }
180 sync_gen = sdp->sd_ail_sync_gen++;
181
182 first = head->prev;
183 first_ai = list_entry(first, struct gfs2_ail, ai_list);
184 first_ai->ai_sync_gen = sync_gen;
185 gfs2_ail1_start_one(sdp, first_ai); /* This may drop log lock */
186
187 if (flags & DIO_ALL)
188 first = NULL;
189
190 while(!done) {
191 if (first && (head->prev != first ||
192 gfs2_ail1_empty_one(sdp, first_ai, 0)))
193 break;
194
195 done = 1;
196 list_for_each_entry_safe_reverse(ai, tmp, head, ai_list) {
197 if (ai->ai_sync_gen >= sync_gen)
198 continue;
199 ai->ai_sync_gen = sync_gen;
200 gfs2_ail1_start_one(sdp, ai); /* This may drop log lock */
201 done = 0;
202 break;
203 }
204 }
205
206 gfs2_log_unlock(sdp);
207 }
208
209 int gfs2_ail1_empty(struct gfs2_sbd *sdp, int flags)
210 {
211 struct gfs2_ail *ai, *s;
212 int ret;
213
214 gfs2_log_lock(sdp);
215
216 list_for_each_entry_safe_reverse(ai, s, &sdp->sd_ail1_list, ai_list) {
217 if (gfs2_ail1_empty_one(sdp, ai, flags))
218 list_move(&ai->ai_list, &sdp->sd_ail2_list);
219 else if (!(flags & DIO_ALL))
220 break;
221 }
222
223 ret = list_empty(&sdp->sd_ail1_list);
224
225 gfs2_log_unlock(sdp);
226
227 return ret;
228 }
229
230
231 /**
232 * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
233 * @sdp: the filesystem
234 * @ai: the AIL entry
235 *
236 */
237
238 static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
239 {
240 struct list_head *head = &ai->ai_ail2_list;
241 struct gfs2_bufdata *bd;
242
243 while (!list_empty(head)) {
244 bd = list_entry(head->prev, struct gfs2_bufdata,
245 bd_ail_st_list);
246 gfs2_assert(sdp, bd->bd_ail == ai);
247 gfs2_remove_from_ail(bd->bd_bh->b_page->mapping, bd);
248 }
249 }
250
251 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
252 {
253 struct gfs2_ail *ai, *safe;
254 unsigned int old_tail = sdp->sd_log_tail;
255 int wrap = (new_tail < old_tail);
256 int a, b, rm;
257
258 gfs2_log_lock(sdp);
259
260 list_for_each_entry_safe(ai, safe, &sdp->sd_ail2_list, ai_list) {
261 a = (old_tail <= ai->ai_first);
262 b = (ai->ai_first < new_tail);
263 rm = (wrap) ? (a || b) : (a && b);
264 if (!rm)
265 continue;
266
267 gfs2_ail2_empty_one(sdp, ai);
268 list_del(&ai->ai_list);
269 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail1_list));
270 gfs2_assert_warn(sdp, list_empty(&ai->ai_ail2_list));
271 kfree(ai);
272 }
273
274 gfs2_log_unlock(sdp);
275 }
276
277 /**
278 * gfs2_log_reserve - Make a log reservation
279 * @sdp: The GFS2 superblock
280 * @blks: The number of blocks to reserve
281 *
282 * Note that we never give out the last few blocks of the journal. Thats
283 * due to the fact that there is a small number of header blocks
284 * associated with each log flush. The exact number can't be known until
285 * flush time, so we ensure that we have just enough free blocks at all
286 * times to avoid running out during a log flush.
287 *
288 * Returns: errno
289 */
290
291 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
292 {
293 unsigned int try = 0;
294 unsigned reserved_blks = 6 * (4096 / sdp->sd_vfs->s_blocksize);
295
296 if (gfs2_assert_warn(sdp, blks) ||
297 gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
298 return -EINVAL;
299
300 mutex_lock(&sdp->sd_log_reserve_mutex);
301 gfs2_log_lock(sdp);
302 while(sdp->sd_log_blks_free <= (blks + reserved_blks)) {
303 gfs2_log_unlock(sdp);
304 gfs2_ail1_empty(sdp, 0);
305 gfs2_log_flush(sdp, NULL);
306
307 if (try++)
308 gfs2_ail1_start(sdp, 0);
309 gfs2_log_lock(sdp);
310 }
311 sdp->sd_log_blks_free -= blks;
312 gfs2_log_unlock(sdp);
313 mutex_unlock(&sdp->sd_log_reserve_mutex);
314
315 down_read(&sdp->sd_log_flush_lock);
316
317 return 0;
318 }
319
320 /**
321 * gfs2_log_release - Release a given number of log blocks
322 * @sdp: The GFS2 superblock
323 * @blks: The number of blocks
324 *
325 */
326
327 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
328 {
329
330 gfs2_log_lock(sdp);
331 sdp->sd_log_blks_free += blks;
332 gfs2_assert_withdraw(sdp,
333 sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
334 gfs2_log_unlock(sdp);
335 up_read(&sdp->sd_log_flush_lock);
336 }
337
338 static u64 log_bmap(struct gfs2_sbd *sdp, unsigned int lbn)
339 {
340 struct inode *inode = sdp->sd_jdesc->jd_inode;
341 int error;
342 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
343
344 bh_map.b_size = 1 << inode->i_blkbits;
345 error = gfs2_block_map(inode, lbn, 0, &bh_map);
346 if (error || !bh_map.b_blocknr)
347 printk(KERN_INFO "error=%d, dbn=%llu lbn=%u", error,
348 (unsigned long long)bh_map.b_blocknr, lbn);
349 gfs2_assert_withdraw(sdp, !error && bh_map.b_blocknr);
350
351 return bh_map.b_blocknr;
352 }
353
354 /**
355 * log_distance - Compute distance between two journal blocks
356 * @sdp: The GFS2 superblock
357 * @newer: The most recent journal block of the pair
358 * @older: The older journal block of the pair
359 *
360 * Compute the distance (in the journal direction) between two
361 * blocks in the journal
362 *
363 * Returns: the distance in blocks
364 */
365
366 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
367 unsigned int older)
368 {
369 int dist;
370
371 dist = newer - older;
372 if (dist < 0)
373 dist += sdp->sd_jdesc->jd_blocks;
374
375 return dist;
376 }
377
378 /**
379 * calc_reserved - Calculate the number of blocks to reserve when
380 * refunding a transaction's unused buffers.
381 * @sdp: The GFS2 superblock
382 *
383 * This is complex. We need to reserve room for all our currently used
384 * metadata buffers (e.g. normal file I/O rewriting file time stamps) and
385 * all our journaled data buffers for journaled files (e.g. files in the
386 * meta_fs like rindex, or files for which chattr +j was done.)
387 * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
388 * will count it as free space (sd_log_blks_free) and corruption will follow.
389 *
390 * We can have metadata bufs and jdata bufs in the same journal. So each
391 * type gets its own log header, for which we need to reserve a block.
392 * In fact, each type has the potential for needing more than one header
393 * in cases where we have more buffers than will fit on a journal page.
394 * Metadata journal entries take up half the space of journaled buffer entries.
395 * Thus, metadata entries have buf_limit (502) and journaled buffers have
396 * databuf_limit (251) before they cause a wrap around.
397 *
398 * Also, we need to reserve blocks for revoke journal entries and one for an
399 * overall header for the lot.
400 *
401 * Returns: the number of blocks reserved
402 */
403 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
404 {
405 unsigned int reserved = 0;
406 unsigned int mbuf_limit, metabufhdrs_needed;
407 unsigned int dbuf_limit, databufhdrs_needed;
408 unsigned int revokes = 0;
409
410 mbuf_limit = buf_limit(sdp);
411 metabufhdrs_needed = (sdp->sd_log_commited_buf +
412 (mbuf_limit - 1)) / mbuf_limit;
413 dbuf_limit = databuf_limit(sdp);
414 databufhdrs_needed = (sdp->sd_log_commited_databuf +
415 (dbuf_limit - 1)) / dbuf_limit;
416
417 if (sdp->sd_log_commited_revoke)
418 revokes = gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
419 sizeof(u64));
420
421 reserved = sdp->sd_log_commited_buf + metabufhdrs_needed +
422 sdp->sd_log_commited_databuf + databufhdrs_needed +
423 revokes;
424 /* One for the overall header */
425 if (reserved)
426 reserved++;
427 return reserved;
428 }
429
430 static unsigned int current_tail(struct gfs2_sbd *sdp)
431 {
432 struct gfs2_ail *ai;
433 unsigned int tail;
434
435 gfs2_log_lock(sdp);
436
437 if (list_empty(&sdp->sd_ail1_list)) {
438 tail = sdp->sd_log_head;
439 } else {
440 ai = list_entry(sdp->sd_ail1_list.prev, struct gfs2_ail, ai_list);
441 tail = ai->ai_first;
442 }
443
444 gfs2_log_unlock(sdp);
445
446 return tail;
447 }
448
449 static inline void log_incr_head(struct gfs2_sbd *sdp)
450 {
451 if (sdp->sd_log_flush_head == sdp->sd_log_tail)
452 gfs2_assert_withdraw(sdp, sdp->sd_log_flush_head == sdp->sd_log_head);
453
454 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) {
455 sdp->sd_log_flush_head = 0;
456 sdp->sd_log_flush_wrapped = 1;
457 }
458 }
459
460 /**
461 * gfs2_log_get_buf - Get and initialize a buffer to use for log control data
462 * @sdp: The GFS2 superblock
463 *
464 * Returns: the buffer_head
465 */
466
467 struct buffer_head *gfs2_log_get_buf(struct gfs2_sbd *sdp)
468 {
469 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
470 struct gfs2_log_buf *lb;
471 struct buffer_head *bh;
472
473 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
474 list_add(&lb->lb_list, &sdp->sd_log_flush_list);
475
476 bh = lb->lb_bh = sb_getblk(sdp->sd_vfs, blkno);
477 lock_buffer(bh);
478 memset(bh->b_data, 0, bh->b_size);
479 set_buffer_uptodate(bh);
480 clear_buffer_dirty(bh);
481 unlock_buffer(bh);
482
483 log_incr_head(sdp);
484
485 return bh;
486 }
487
488 /**
489 * gfs2_log_fake_buf - Build a fake buffer head to write metadata buffer to log
490 * @sdp: the filesystem
491 * @data: the data the buffer_head should point to
492 *
493 * Returns: the log buffer descriptor
494 */
495
496 struct buffer_head *gfs2_log_fake_buf(struct gfs2_sbd *sdp,
497 struct buffer_head *real)
498 {
499 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
500 struct gfs2_log_buf *lb;
501 struct buffer_head *bh;
502
503 lb = kzalloc(sizeof(struct gfs2_log_buf), GFP_NOFS | __GFP_NOFAIL);
504 list_add(&lb->lb_list, &sdp->sd_log_flush_list);
505 lb->lb_real = real;
506
507 bh = lb->lb_bh = alloc_buffer_head(GFP_NOFS | __GFP_NOFAIL);
508 atomic_set(&bh->b_count, 1);
509 bh->b_state = (1 << BH_Mapped) | (1 << BH_Uptodate);
510 set_bh_page(bh, real->b_page, bh_offset(real));
511 bh->b_blocknr = blkno;
512 bh->b_size = sdp->sd_sb.sb_bsize;
513 bh->b_bdev = sdp->sd_vfs->s_bdev;
514
515 log_incr_head(sdp);
516
517 return bh;
518 }
519
520 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
521 {
522 unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
523
524 ail2_empty(sdp, new_tail);
525
526 gfs2_log_lock(sdp);
527 sdp->sd_log_blks_free += dist;
528 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free <= sdp->sd_jdesc->jd_blocks);
529 gfs2_log_unlock(sdp);
530
531 sdp->sd_log_tail = new_tail;
532 }
533
534 /**
535 * log_write_header - Get and initialize a journal header buffer
536 * @sdp: The GFS2 superblock
537 *
538 * Returns: the initialized log buffer descriptor
539 */
540
541 static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
542 {
543 u64 blkno = log_bmap(sdp, sdp->sd_log_flush_head);
544 struct buffer_head *bh;
545 struct gfs2_log_header *lh;
546 unsigned int tail;
547 u32 hash;
548
549 bh = sb_getblk(sdp->sd_vfs, blkno);
550 lock_buffer(bh);
551 memset(bh->b_data, 0, bh->b_size);
552 set_buffer_uptodate(bh);
553 clear_buffer_dirty(bh);
554 unlock_buffer(bh);
555
556 gfs2_ail1_empty(sdp, 0);
557 tail = current_tail(sdp);
558
559 lh = (struct gfs2_log_header *)bh->b_data;
560 memset(lh, 0, sizeof(struct gfs2_log_header));
561 lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
562 lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
563 lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
564 lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
565 lh->lh_flags = cpu_to_be32(flags);
566 lh->lh_tail = cpu_to_be32(tail);
567 lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
568 hash = gfs2_disk_hash(bh->b_data, sizeof(struct gfs2_log_header));
569 lh->lh_hash = cpu_to_be32(hash);
570
571 set_buffer_dirty(bh);
572 if (sync_dirty_buffer(bh))
573 gfs2_io_error_bh(sdp, bh);
574 brelse(bh);
575
576 if (sdp->sd_log_tail != tail)
577 log_pull_tail(sdp, tail);
578 else
579 gfs2_assert_withdraw(sdp, !pull);
580
581 sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
582 log_incr_head(sdp);
583 }
584
585 static void log_flush_commit(struct gfs2_sbd *sdp)
586 {
587 struct list_head *head = &sdp->sd_log_flush_list;
588 struct gfs2_log_buf *lb;
589 struct buffer_head *bh;
590 int flushcount = 0;
591
592 while (!list_empty(head)) {
593 lb = list_entry(head->next, struct gfs2_log_buf, lb_list);
594 list_del(&lb->lb_list);
595 bh = lb->lb_bh;
596
597 wait_on_buffer(bh);
598 if (!buffer_uptodate(bh))
599 gfs2_io_error_bh(sdp, bh);
600 if (lb->lb_real) {
601 while (atomic_read(&bh->b_count) != 1) /* Grrrr... */
602 schedule();
603 free_buffer_head(bh);
604 } else
605 brelse(bh);
606 kfree(lb);
607 flushcount++;
608 }
609
610 /* If nothing was journaled, the header is unplanned and unwanted. */
611 if (flushcount) {
612 log_write_header(sdp, 0, 0);
613 } else {
614 unsigned int tail;
615 tail = current_tail(sdp);
616
617 gfs2_ail1_empty(sdp, 0);
618 if (sdp->sd_log_tail != tail)
619 log_pull_tail(sdp, tail);
620 }
621 }
622
623 /**
624 * gfs2_log_flush - flush incore transaction(s)
625 * @sdp: the filesystem
626 * @gl: The glock structure to flush. If NULL, flush the whole incore log
627 *
628 */
629
630 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl)
631 {
632 struct gfs2_ail *ai;
633
634 down_write(&sdp->sd_log_flush_lock);
635
636 if (gl) {
637 gfs2_log_lock(sdp);
638 if (list_empty(&gl->gl_le.le_list)) {
639 gfs2_log_unlock(sdp);
640 up_write(&sdp->sd_log_flush_lock);
641 return;
642 }
643 gfs2_log_unlock(sdp);
644 }
645
646 ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL);
647 INIT_LIST_HEAD(&ai->ai_ail1_list);
648 INIT_LIST_HEAD(&ai->ai_ail2_list);
649
650 gfs2_assert_withdraw(sdp,
651 sdp->sd_log_num_buf + sdp->sd_log_num_jdata ==
652 sdp->sd_log_commited_buf +
653 sdp->sd_log_commited_databuf);
654 gfs2_assert_withdraw(sdp,
655 sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
656
657 sdp->sd_log_flush_head = sdp->sd_log_head;
658 sdp->sd_log_flush_wrapped = 0;
659 ai->ai_first = sdp->sd_log_flush_head;
660
661 lops_before_commit(sdp);
662 if (!list_empty(&sdp->sd_log_flush_list))
663 log_flush_commit(sdp);
664 else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
665 gfs2_log_lock(sdp);
666 sdp->sd_log_blks_free--; /* Adjust for unreserved buffer */
667 gfs2_log_unlock(sdp);
668 log_write_header(sdp, 0, PULL);
669 }
670 lops_after_commit(sdp, ai);
671
672 gfs2_log_lock(sdp);
673 sdp->sd_log_head = sdp->sd_log_flush_head;
674 sdp->sd_log_blks_reserved = 0;
675 sdp->sd_log_commited_buf = 0;
676 sdp->sd_log_commited_databuf = 0;
677 sdp->sd_log_commited_revoke = 0;
678
679 if (!list_empty(&ai->ai_ail1_list)) {
680 list_add(&ai->ai_list, &sdp->sd_ail1_list);
681 ai = NULL;
682 }
683 gfs2_log_unlock(sdp);
684
685 sdp->sd_vfs->s_dirt = 0;
686 up_write(&sdp->sd_log_flush_lock);
687
688 kfree(ai);
689 }
690
691 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
692 {
693 unsigned int reserved;
694 unsigned int old;
695
696 gfs2_log_lock(sdp);
697
698 sdp->sd_log_commited_buf += tr->tr_num_buf_new - tr->tr_num_buf_rm;
699 sdp->sd_log_commited_databuf += tr->tr_num_databuf_new -
700 tr->tr_num_databuf_rm;
701 gfs2_assert_withdraw(sdp, (((int)sdp->sd_log_commited_buf) >= 0) ||
702 (((int)sdp->sd_log_commited_databuf) >= 0));
703 sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
704 gfs2_assert_withdraw(sdp, ((int)sdp->sd_log_commited_revoke) >= 0);
705 reserved = calc_reserved(sdp);
706 old = sdp->sd_log_blks_free;
707 sdp->sd_log_blks_free += tr->tr_reserved -
708 (reserved - sdp->sd_log_blks_reserved);
709
710 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free >= old);
711 gfs2_assert_withdraw(sdp, sdp->sd_log_blks_free <=
712 sdp->sd_jdesc->jd_blocks);
713
714 sdp->sd_log_blks_reserved = reserved;
715
716 gfs2_log_unlock(sdp);
717 }
718
719 /**
720 * gfs2_log_commit - Commit a transaction to the log
721 * @sdp: the filesystem
722 * @tr: the transaction
723 *
724 * Returns: errno
725 */
726
727 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
728 {
729 log_refund(sdp, tr);
730 lops_incore_commit(sdp, tr);
731
732 sdp->sd_vfs->s_dirt = 1;
733 up_read(&sdp->sd_log_flush_lock);
734
735 gfs2_log_lock(sdp);
736 if (sdp->sd_log_num_buf > gfs2_tune_get(sdp, gt_incore_log_blocks))
737 wake_up_process(sdp->sd_logd_process);
738 gfs2_log_unlock(sdp);
739 }
740
741 /**
742 * gfs2_log_shutdown - write a shutdown header into a journal
743 * @sdp: the filesystem
744 *
745 */
746
747 void gfs2_log_shutdown(struct gfs2_sbd *sdp)
748 {
749 down_write(&sdp->sd_log_flush_lock);
750
751 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
752 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_gl);
753 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_buf);
754 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_jdata);
755 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
756 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_rg);
757 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_databuf);
758 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
759
760 sdp->sd_log_flush_head = sdp->sd_log_head;
761 sdp->sd_log_flush_wrapped = 0;
762
763 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT,
764 (sdp->sd_log_tail == current_tail(sdp)) ? 0 : PULL);
765
766 gfs2_assert_warn(sdp, sdp->sd_log_blks_free == sdp->sd_jdesc->jd_blocks);
767 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
768 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
769
770 sdp->sd_log_head = sdp->sd_log_flush_head;
771 sdp->sd_log_tail = sdp->sd_log_head;
772
773 up_write(&sdp->sd_log_flush_lock);
774 }
775
776
777 /**
778 * gfs2_meta_syncfs - sync all the buffers in a filesystem
779 * @sdp: the filesystem
780 *
781 */
782
783 void gfs2_meta_syncfs(struct gfs2_sbd *sdp)
784 {
785 gfs2_log_flush(sdp, NULL);
786 for (;;) {
787 gfs2_ail1_start(sdp, DIO_ALL);
788 if (gfs2_ail1_empty(sdp, DIO_ALL))
789 break;
790 msleep(10);
791 }
792 }
793
This page took 0.046939 seconds and 6 git commands to generate.