reiserfs: fix memset byte count during resize
[deliverable/linux.git] / fs / reiserfs / journal.c
CommitLineData
1da177e4
LT
1/*
2** Write ahead logging implementation copyright Chris Mason 2000
3**
4** The background commits make this code very interelated, and
5** overly complex. I need to rethink things a bit....The major players:
6**
7** journal_begin -- call with the number of blocks you expect to log.
8** If the current transaction is too
9** old, it will block until the current transaction is
10** finished, and then start a new one.
11** Usually, your transaction will get joined in with
12** previous ones for speed.
13**
14** journal_join -- same as journal_begin, but won't block on the current
15** transaction regardless of age. Don't ever call
16** this. Ever. There are only two places it should be
17** called from, and they are both inside this file.
18**
19** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20** that might make them get sent to disk
21** and then marks them BH_JDirty. Puts the buffer head
22** into the current transaction hash.
23**
24** journal_end -- if the current transaction is batchable, it does nothing
25** otherwise, it could do an async/synchronous commit, or
26** a full flush of all log and real blocks in the
27** transaction.
28**
29** flush_old_commits -- if the current transaction is too old, it is ended and
30** commit blocks are sent to disk. Forces commit blocks
31** to disk for all backgrounded commits that have been
32** around too long.
33** -- Note, if you call this as an immediate flush from
34** from within kupdate, it will ignore the immediate flag
35*/
36
1da177e4
LT
37#include <asm/uaccess.h>
38#include <asm/system.h>
39
40#include <linux/time.h>
41#include <asm/semaphore.h>
42
43#include <linux/vmalloc.h>
44#include <linux/reiserfs_fs.h>
45
46#include <linux/kernel.h>
47#include <linux/errno.h>
48#include <linux/fcntl.h>
49#include <linux/stat.h>
50#include <linux/string.h>
51#include <linux/smp_lock.h>
52#include <linux/buffer_head.h>
53#include <linux/workqueue.h>
54#include <linux/writeback.h>
55#include <linux/blkdev.h>
3fcfab16 56#include <linux/backing-dev.h>
1da177e4 57
1da177e4
LT
58/* gets a struct reiserfs_journal_list * from a list head */
59#define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
60 j_list))
61#define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
62 j_working_list))
63
64/* the number of mounted filesystems. This is used to decide when to
65** start and kill the commit workqueue
66*/
67static int reiserfs_mounted_fs_count;
68
69static struct workqueue_struct *commit_wq;
70
bd4c625c
LT
71#define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
72 structs at 4k */
73#define BUFNR 64 /*read ahead */
1da177e4
LT
74
75/* cnode stat bits. Move these into reiserfs_fs.h */
76
77#define BLOCK_FREED 2 /* this block was freed, and can't be written. */
bd4c625c 78#define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
1da177e4
LT
79
80#define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
81#define BLOCK_DIRTIED 5
82
1da177e4
LT
83/* journal list state bits */
84#define LIST_TOUCHED 1
85#define LIST_DIRTY 2
bd4c625c 86#define LIST_COMMIT_PENDING 4 /* someone will commit this list */
1da177e4
LT
87
88/* flags for do_journal_end */
89#define FLUSH_ALL 1 /* flush commit and real blocks */
90#define COMMIT_NOW 2 /* end and commit this transaction */
bd4c625c
LT
91#define WAIT 4 /* wait for the log blocks to hit the disk */
92
93static int do_journal_end(struct reiserfs_transaction_handle *,
94 struct super_block *, unsigned long nblocks,
95 int flags);
96static int flush_journal_list(struct super_block *s,
97 struct reiserfs_journal_list *jl, int flushall);
98static int flush_commit_list(struct super_block *s,
99 struct reiserfs_journal_list *jl, int flushall);
100static int can_dirty(struct reiserfs_journal_cnode *cn);
101static int journal_join(struct reiserfs_transaction_handle *th,
102 struct super_block *p_s_sb, unsigned long nblocks);
103static int release_journal_dev(struct super_block *super,
104 struct reiserfs_journal *journal);
1da177e4 105static int dirty_one_transaction(struct super_block *s,
bd4c625c 106 struct reiserfs_journal_list *jl);
c4028958 107static void flush_async_commits(struct work_struct *work);
1da177e4
LT
108static void queue_log_writer(struct super_block *s);
109
110/* values for join in do_journal_begin_r */
111enum {
bd4c625c
LT
112 JBEGIN_REG = 0, /* regular journal begin */
113 JBEGIN_JOIN = 1, /* join the running transaction if at all possible */
114 JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */
1da177e4
LT
115};
116
117static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
bd4c625c
LT
118 struct super_block *p_s_sb,
119 unsigned long nblocks, int join);
1da177e4 120
bd4c625c
LT
121static void init_journal_hash(struct super_block *p_s_sb)
122{
123 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
124 memset(journal->j_hash_table, 0,
125 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
1da177e4
LT
126}
127
128/*
129** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
130** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
131** more details.
132*/
bd4c625c
LT
133static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
134{
135 if (bh) {
136 clear_buffer_dirty(bh);
137 clear_buffer_journal_test(bh);
138 }
139 return 0;
1da177e4
LT
140}
141
142static void disable_barrier(struct super_block *s)
143{
bd4c625c
LT
144 REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
145 printk("reiserfs: disabling flush barriers on %s\n",
146 reiserfs_bdevname(s));
147}
148
149static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
150 *p_s_sb)
151{
152 struct reiserfs_bitmap_node *bn;
153 static int id;
154
d739b42b 155 bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS);
bd4c625c
LT
156 if (!bn) {
157 return NULL;
158 }
d739b42b 159 bn->data = kzalloc(p_s_sb->s_blocksize, GFP_NOFS);
bd4c625c 160 if (!bn->data) {
d739b42b 161 kfree(bn);
bd4c625c
LT
162 return NULL;
163 }
164 bn->id = id++;
bd4c625c
LT
165 INIT_LIST_HEAD(&bn->list);
166 return bn;
167}
168
169static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *p_s_sb)
170{
171 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
172 struct reiserfs_bitmap_node *bn = NULL;
173 struct list_head *entry = journal->j_bitmap_nodes.next;
174
175 journal->j_used_bitmap_nodes++;
176 repeat:
177
178 if (entry != &journal->j_bitmap_nodes) {
179 bn = list_entry(entry, struct reiserfs_bitmap_node, list);
180 list_del(entry);
181 memset(bn->data, 0, p_s_sb->s_blocksize);
182 journal->j_free_bitmap_nodes--;
183 return bn;
184 }
185 bn = allocate_bitmap_node(p_s_sb);
186 if (!bn) {
187 yield();
188 goto repeat;
189 }
190 return bn;
1da177e4
LT
191}
192static inline void free_bitmap_node(struct super_block *p_s_sb,
bd4c625c
LT
193 struct reiserfs_bitmap_node *bn)
194{
195 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
196 journal->j_used_bitmap_nodes--;
197 if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
d739b42b
PE
198 kfree(bn->data);
199 kfree(bn);
bd4c625c
LT
200 } else {
201 list_add(&bn->list, &journal->j_bitmap_nodes);
202 journal->j_free_bitmap_nodes++;
203 }
204}
205
206static void allocate_bitmap_nodes(struct super_block *p_s_sb)
207{
208 int i;
209 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
210 struct reiserfs_bitmap_node *bn = NULL;
211 for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) {
212 bn = allocate_bitmap_node(p_s_sb);
213 if (bn) {
214 list_add(&bn->list, &journal->j_bitmap_nodes);
215 journal->j_free_bitmap_nodes++;
216 } else {
217 break; // this is ok, we'll try again when more are needed
218 }
219 }
1da177e4
LT
220}
221
222static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
bd4c625c
LT
223 struct reiserfs_list_bitmap *jb)
224{
225 int bmap_nr = block / (p_s_sb->s_blocksize << 3);
226 int bit_nr = block % (p_s_sb->s_blocksize << 3);
1da177e4 227
bd4c625c
LT
228 if (!jb->bitmaps[bmap_nr]) {
229 jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb);
230 }
231 set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data);
232 return 0;
1da177e4
LT
233}
234
235static void cleanup_bitmap_list(struct super_block *p_s_sb,
bd4c625c
LT
236 struct reiserfs_list_bitmap *jb)
237{
238 int i;
239 if (jb->bitmaps == NULL)
240 return;
241
242 for (i = 0; i < SB_BMAP_NR(p_s_sb); i++) {
243 if (jb->bitmaps[i]) {
244 free_bitmap_node(p_s_sb, jb->bitmaps[i]);
245 jb->bitmaps[i] = NULL;
246 }
247 }
1da177e4
LT
248}
249
250/*
251** only call this on FS unmount.
252*/
253static int free_list_bitmaps(struct super_block *p_s_sb,
bd4c625c
LT
254 struct reiserfs_list_bitmap *jb_array)
255{
256 int i;
257 struct reiserfs_list_bitmap *jb;
258 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
259 jb = jb_array + i;
260 jb->journal_list = NULL;
261 cleanup_bitmap_list(p_s_sb, jb);
262 vfree(jb->bitmaps);
263 jb->bitmaps = NULL;
264 }
265 return 0;
266}
267
268static int free_bitmap_nodes(struct super_block *p_s_sb)
269{
270 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
271 struct list_head *next = journal->j_bitmap_nodes.next;
272 struct reiserfs_bitmap_node *bn;
273
274 while (next != &journal->j_bitmap_nodes) {
275 bn = list_entry(next, struct reiserfs_bitmap_node, list);
276 list_del(next);
d739b42b
PE
277 kfree(bn->data);
278 kfree(bn);
bd4c625c
LT
279 next = journal->j_bitmap_nodes.next;
280 journal->j_free_bitmap_nodes--;
281 }
282
283 return 0;
1da177e4
LT
284}
285
286/*
287** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
288** jb_array is the array to be filled in.
289*/
290int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
bd4c625c
LT
291 struct reiserfs_list_bitmap *jb_array,
292 int bmap_nr)
293{
294 int i;
295 int failed = 0;
296 struct reiserfs_list_bitmap *jb;
297 int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *);
298
299 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
300 jb = jb_array + i;
301 jb->journal_list = NULL;
302 jb->bitmaps = vmalloc(mem);
303 if (!jb->bitmaps) {
304 reiserfs_warning(p_s_sb,
305 "clm-2000, unable to allocate bitmaps for journal lists");
306 failed = 1;
307 break;
308 }
309 memset(jb->bitmaps, 0, mem);
310 }
311 if (failed) {
312 free_list_bitmaps(p_s_sb, jb_array);
313 return -1;
314 }
315 return 0;
1da177e4
LT
316}
317
318/*
319** find an available list bitmap. If you can't find one, flush a commit list
320** and try again
321*/
bd4c625c
LT
322static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
323 struct reiserfs_journal_list
324 *jl)
325{
326 int i, j;
327 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
328 struct reiserfs_list_bitmap *jb = NULL;
329
330 for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) {
331 i = journal->j_list_bitmap_index;
332 journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS;
333 jb = journal->j_list_bitmap + i;
334 if (journal->j_list_bitmap[i].journal_list) {
335 flush_commit_list(p_s_sb,
336 journal->j_list_bitmap[i].
337 journal_list, 1);
338 if (!journal->j_list_bitmap[i].journal_list) {
339 break;
340 }
341 } else {
342 break;
343 }
344 }
345 if (jb->journal_list) { /* double check to make sure if flushed correctly */
346 return NULL;
347 }
348 jb->journal_list = jl;
349 return jb;
1da177e4
LT
350}
351
352/*
353** allocates a new chunk of X nodes, and links them all together as a list.
354** Uses the cnode->next and cnode->prev pointers
355** returns NULL on failure
356*/
bd4c625c
LT
357static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
358{
359 struct reiserfs_journal_cnode *head;
360 int i;
361 if (num_cnodes <= 0) {
362 return NULL;
363 }
364 head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode));
365 if (!head) {
366 return NULL;
367 }
368 memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode));
369 head[0].prev = NULL;
370 head[0].next = head + 1;
371 for (i = 1; i < num_cnodes; i++) {
372 head[i].prev = head + (i - 1);
373 head[i].next = head + (i + 1); /* if last one, overwrite it after the if */
374 }
375 head[num_cnodes - 1].next = NULL;
376 return head;
1da177e4
LT
377}
378
379/*
380** pulls a cnode off the free list, or returns NULL on failure
381*/
bd4c625c
LT
382static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
383{
384 struct reiserfs_journal_cnode *cn;
385 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
386
387 reiserfs_check_lock_depth(p_s_sb, "get_cnode");
388
389 if (journal->j_cnode_free <= 0) {
390 return NULL;
391 }
392 journal->j_cnode_used++;
393 journal->j_cnode_free--;
394 cn = journal->j_cnode_free_list;
395 if (!cn) {
396 return cn;
397 }
398 if (cn->next) {
399 cn->next->prev = NULL;
400 }
401 journal->j_cnode_free_list = cn->next;
402 memset(cn, 0, sizeof(struct reiserfs_journal_cnode));
403 return cn;
1da177e4
LT
404}
405
406/*
407** returns a cnode to the free list
408*/
bd4c625c
LT
409static void free_cnode(struct super_block *p_s_sb,
410 struct reiserfs_journal_cnode *cn)
411{
412 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1da177e4 413
bd4c625c 414 reiserfs_check_lock_depth(p_s_sb, "free_cnode");
1da177e4 415
bd4c625c
LT
416 journal->j_cnode_used--;
417 journal->j_cnode_free++;
418 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
419 cn->next = journal->j_cnode_free_list;
420 if (journal->j_cnode_free_list) {
421 journal->j_cnode_free_list->prev = cn;
422 }
423 cn->prev = NULL; /* not needed with the memset, but I might kill the memset, and forget to do this */
424 journal->j_cnode_free_list = cn;
1da177e4
LT
425}
426
bd4c625c
LT
427static void clear_prepared_bits(struct buffer_head *bh)
428{
429 clear_buffer_journal_prepared(bh);
430 clear_buffer_journal_restore_dirty(bh);
1da177e4
LT
431}
432
433/* utility function to force a BUG if it is called without the big
434** kernel lock held. caller is the string printed just before calling BUG()
435*/
bd4c625c
LT
436void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
437{
1da177e4 438#ifdef CONFIG_SMP
bd4c625c
LT
439 if (current->lock_depth < 0) {
440 reiserfs_panic(sb, "%s called without kernel lock held",
441 caller);
442 }
1da177e4 443#else
bd4c625c 444 ;
1da177e4
LT
445#endif
446}
447
448/* return a cnode with same dev, block number and size in table, or null if not found */
bd4c625c
LT
449static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
450 super_block
451 *sb,
452 struct
453 reiserfs_journal_cnode
454 **table,
455 long bl)
1da177e4 456{
bd4c625c
LT
457 struct reiserfs_journal_cnode *cn;
458 cn = journal_hash(table, sb, bl);
459 while (cn) {
460 if (cn->blocknr == bl && cn->sb == sb)
461 return cn;
462 cn = cn->hnext;
463 }
464 return (struct reiserfs_journal_cnode *)0;
1da177e4
LT
465}
466
467/*
468** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
469** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
470** being overwritten by a replay after crashing.
471**
472** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
473** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
474** sure you never write the block without logging it.
475**
476** next_zero_bit is a suggestion about the next block to try for find_forward.
477** when bl is rejected because it is set in a journal list bitmap, we search
478** for the next zero bit in the bitmap that rejected bl. Then, we return that
479** through next_zero_bit for find_forward to try.
480**
481** Just because we return something in next_zero_bit does not mean we won't
482** reject it on the next call to reiserfs_in_journal
483**
484*/
485int reiserfs_in_journal(struct super_block *p_s_sb,
bd4c625c
LT
486 int bmap_nr, int bit_nr, int search_all,
487 b_blocknr_t * next_zero_bit)
488{
489 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
490 struct reiserfs_journal_cnode *cn;
491 struct reiserfs_list_bitmap *jb;
492 int i;
493 unsigned long bl;
494
495 *next_zero_bit = 0; /* always start this at zero. */
496
497 PROC_INFO_INC(p_s_sb, journal.in_journal);
498 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
499 ** if we crash before the transaction that freed it commits, this transaction won't
500 ** have committed either, and the block will never be written
501 */
502 if (search_all) {
503 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
504 PROC_INFO_INC(p_s_sb, journal.in_journal_bitmap);
505 jb = journal->j_list_bitmap + i;
506 if (jb->journal_list && jb->bitmaps[bmap_nr] &&
507 test_bit(bit_nr,
508 (unsigned long *)jb->bitmaps[bmap_nr]->
509 data)) {
510 *next_zero_bit =
511 find_next_zero_bit((unsigned long *)
512 (jb->bitmaps[bmap_nr]->
513 data),
514 p_s_sb->s_blocksize << 3,
515 bit_nr + 1);
516 return 1;
517 }
518 }
519 }
520
521 bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
522 /* is it in any old transactions? */
523 if (search_all
524 && (cn =
525 get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {
526 return 1;
527 }
528
529 /* is it in the current transaction. This should never happen */
530 if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {
531 BUG();
532 return 1;
533 }
534
535 PROC_INFO_INC(p_s_sb, journal.in_journal_reusable);
536 /* safe for reuse */
537 return 0;
1da177e4
LT
538}
539
540/* insert cn into table
541*/
bd4c625c
LT
542static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
543 struct reiserfs_journal_cnode *cn)
544{
545 struct reiserfs_journal_cnode *cn_orig;
1da177e4 546
bd4c625c
LT
547 cn_orig = journal_hash(table, cn->sb, cn->blocknr);
548 cn->hnext = cn_orig;
549 cn->hprev = NULL;
550 if (cn_orig) {
551 cn_orig->hprev = cn;
552 }
553 journal_hash(table, cn->sb, cn->blocknr) = cn;
1da177e4
LT
554}
555
556/* lock the current transaction */
77933d72 557static inline void lock_journal(struct super_block *p_s_sb)
bd4c625c
LT
558{
559 PROC_INFO_INC(p_s_sb, journal.lock_journal);
560 down(&SB_JOURNAL(p_s_sb)->j_lock);
1da177e4
LT
561}
562
563/* unlock the current transaction */
77933d72 564static inline void unlock_journal(struct super_block *p_s_sb)
bd4c625c
LT
565{
566 up(&SB_JOURNAL(p_s_sb)->j_lock);
1da177e4
LT
567}
568
569static inline void get_journal_list(struct reiserfs_journal_list *jl)
570{
bd4c625c 571 jl->j_refcount++;
1da177e4
LT
572}
573
574static inline void put_journal_list(struct super_block *s,
bd4c625c 575 struct reiserfs_journal_list *jl)
1da177e4 576{
bd4c625c
LT
577 if (jl->j_refcount < 1) {
578 reiserfs_panic(s, "trans id %lu, refcount at %d",
579 jl->j_trans_id, jl->j_refcount);
580 }
581 if (--jl->j_refcount == 0)
d739b42b 582 kfree(jl);
1da177e4
LT
583}
584
585/*
586** this used to be much more involved, and I'm keeping it just in case things get ugly again.
587** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
588** transaction.
589*/
bd4c625c
LT
590static void cleanup_freed_for_journal_list(struct super_block *p_s_sb,
591 struct reiserfs_journal_list *jl)
592{
1da177e4 593
bd4c625c
LT
594 struct reiserfs_list_bitmap *jb = jl->j_list_bitmap;
595 if (jb) {
596 cleanup_bitmap_list(p_s_sb, jb);
597 }
598 jl->j_list_bitmap->journal_list = NULL;
599 jl->j_list_bitmap = NULL;
1da177e4
LT
600}
601
602static int journal_list_still_alive(struct super_block *s,
bd4c625c
LT
603 unsigned long trans_id)
604{
605 struct reiserfs_journal *journal = SB_JOURNAL(s);
606 struct list_head *entry = &journal->j_journal_list;
607 struct reiserfs_journal_list *jl;
608
609 if (!list_empty(entry)) {
610 jl = JOURNAL_LIST_ENTRY(entry->next);
611 if (jl->j_trans_id <= trans_id) {
612 return 1;
613 }
614 }
615 return 0;
616}
617
398c95bd
CM
618/*
619 * If page->mapping was null, we failed to truncate this page for
620 * some reason. Most likely because it was truncated after being
621 * logged via data=journal.
622 *
623 * This does a check to see if the buffer belongs to one of these
624 * lost pages before doing the final put_bh. If page->mapping was
625 * null, it tries to free buffers on the page, which should make the
626 * final page_cache_release drop the page from the lru.
627 */
628static void release_buffer_page(struct buffer_head *bh)
629{
630 struct page *page = bh->b_page;
631 if (!page->mapping && !TestSetPageLocked(page)) {
632 page_cache_get(page);
633 put_bh(bh);
634 if (!page->mapping)
635 try_to_free_buffers(page);
636 unlock_page(page);
637 page_cache_release(page);
638 } else {
639 put_bh(bh);
640 }
641}
642
bd4c625c
LT
643static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
644{
645 char b[BDEVNAME_SIZE];
646
647 if (buffer_journaled(bh)) {
648 reiserfs_warning(NULL,
649 "clm-2084: pinned buffer %lu:%s sent to disk",
650 bh->b_blocknr, bdevname(bh->b_bdev, b));
651 }
652 if (uptodate)
653 set_buffer_uptodate(bh);
654 else
655 clear_buffer_uptodate(bh);
398c95bd 656
bd4c625c 657 unlock_buffer(bh);
398c95bd 658 release_buffer_page(bh);
bd4c625c
LT
659}
660
661static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate)
662{
663 if (uptodate)
664 set_buffer_uptodate(bh);
665 else
666 clear_buffer_uptodate(bh);
667 unlock_buffer(bh);
668 put_bh(bh);
669}
670
671static void submit_logged_buffer(struct buffer_head *bh)
672{
673 get_bh(bh);
674 bh->b_end_io = reiserfs_end_buffer_io_sync;
675 clear_buffer_journal_new(bh);
676 clear_buffer_dirty(bh);
677 if (!test_clear_buffer_journal_test(bh))
678 BUG();
679 if (!buffer_uptodate(bh))
680 BUG();
681 submit_bh(WRITE, bh);
682}
683
684static void submit_ordered_buffer(struct buffer_head *bh)
685{
686 get_bh(bh);
687 bh->b_end_io = reiserfs_end_ordered_io;
688 clear_buffer_dirty(bh);
689 if (!buffer_uptodate(bh))
690 BUG();
691 submit_bh(WRITE, bh);
692}
693
694static int submit_barrier_buffer(struct buffer_head *bh)
695{
696 get_bh(bh);
697 bh->b_end_io = reiserfs_end_ordered_io;
698 clear_buffer_dirty(bh);
699 if (!buffer_uptodate(bh))
700 BUG();
701 return submit_bh(WRITE_BARRIER, bh);
1da177e4
LT
702}
703
704static void check_barrier_completion(struct super_block *s,
bd4c625c
LT
705 struct buffer_head *bh)
706{
707 if (buffer_eopnotsupp(bh)) {
708 clear_buffer_eopnotsupp(bh);
709 disable_barrier(s);
710 set_buffer_uptodate(bh);
711 set_buffer_dirty(bh);
712 sync_dirty_buffer(bh);
713 }
1da177e4
LT
714}
715
716#define CHUNK_SIZE 32
717struct buffer_chunk {
bd4c625c
LT
718 struct buffer_head *bh[CHUNK_SIZE];
719 int nr;
1da177e4
LT
720};
721
bd4c625c
LT
722static void write_chunk(struct buffer_chunk *chunk)
723{
724 int i;
725 get_fs_excl();
726 for (i = 0; i < chunk->nr; i++) {
727 submit_logged_buffer(chunk->bh[i]);
728 }
729 chunk->nr = 0;
730 put_fs_excl();
1da177e4
LT
731}
732
bd4c625c
LT
733static void write_ordered_chunk(struct buffer_chunk *chunk)
734{
735 int i;
736 get_fs_excl();
737 for (i = 0; i < chunk->nr; i++) {
738 submit_ordered_buffer(chunk->bh[i]);
739 }
740 chunk->nr = 0;
741 put_fs_excl();
1da177e4
LT
742}
743
744static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
bd4c625c 745 spinlock_t * lock, void (fn) (struct buffer_chunk *))
1da177e4 746{
bd4c625c 747 int ret = 0;
14a61442 748 BUG_ON(chunk->nr >= CHUNK_SIZE);
bd4c625c
LT
749 chunk->bh[chunk->nr++] = bh;
750 if (chunk->nr >= CHUNK_SIZE) {
751 ret = 1;
752 if (lock)
753 spin_unlock(lock);
754 fn(chunk);
755 if (lock)
756 spin_lock(lock);
757 }
758 return ret;
1da177e4
LT
759}
760
1da177e4 761static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
bd4c625c
LT
762static struct reiserfs_jh *alloc_jh(void)
763{
764 struct reiserfs_jh *jh;
765 while (1) {
766 jh = kmalloc(sizeof(*jh), GFP_NOFS);
767 if (jh) {
768 atomic_inc(&nr_reiserfs_jh);
769 return jh;
770 }
771 yield();
1da177e4 772 }
1da177e4
LT
773}
774
775/*
776 * we want to free the jh when the buffer has been written
777 * and waited on
778 */
bd4c625c
LT
779void reiserfs_free_jh(struct buffer_head *bh)
780{
781 struct reiserfs_jh *jh;
782
783 jh = bh->b_private;
784 if (jh) {
785 bh->b_private = NULL;
786 jh->bh = NULL;
787 list_del_init(&jh->list);
788 kfree(jh);
789 if (atomic_read(&nr_reiserfs_jh) <= 0)
790 BUG();
791 atomic_dec(&nr_reiserfs_jh);
792 put_bh(bh);
793 }
1da177e4
LT
794}
795
796static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
bd4c625c 797 int tail)
1da177e4 798{
bd4c625c 799 struct reiserfs_jh *jh;
1da177e4 800
bd4c625c
LT
801 if (bh->b_private) {
802 spin_lock(&j->j_dirty_buffers_lock);
803 if (!bh->b_private) {
804 spin_unlock(&j->j_dirty_buffers_lock);
805 goto no_jh;
806 }
807 jh = bh->b_private;
808 list_del_init(&jh->list);
809 } else {
810 no_jh:
811 get_bh(bh);
812 jh = alloc_jh();
813 spin_lock(&j->j_dirty_buffers_lock);
814 /* buffer must be locked for __add_jh, should be able to have
815 * two adds at the same time
816 */
14a61442 817 BUG_ON(bh->b_private);
bd4c625c
LT
818 jh->bh = bh;
819 bh->b_private = jh;
1da177e4 820 }
bd4c625c
LT
821 jh->jl = j->j_current_jl;
822 if (tail)
823 list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
824 else {
825 list_add_tail(&jh->list, &jh->jl->j_bh_list);
826 }
827 spin_unlock(&j->j_dirty_buffers_lock);
828 return 0;
1da177e4
LT
829}
830
bd4c625c
LT
831int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh)
832{
833 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
1da177e4 834}
bd4c625c
LT
835int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh)
836{
837 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
1da177e4
LT
838}
839
840#define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
bd4c625c 841static int write_ordered_buffers(spinlock_t * lock,
1da177e4 842 struct reiserfs_journal *j,
bd4c625c 843 struct reiserfs_journal_list *jl,
1da177e4
LT
844 struct list_head *list)
845{
bd4c625c
LT
846 struct buffer_head *bh;
847 struct reiserfs_jh *jh;
848 int ret = j->j_errno;
849 struct buffer_chunk chunk;
850 struct list_head tmp;
851 INIT_LIST_HEAD(&tmp);
852
853 chunk.nr = 0;
854 spin_lock(lock);
855 while (!list_empty(list)) {
856 jh = JH_ENTRY(list->next);
857 bh = jh->bh;
858 get_bh(bh);
859 if (test_set_buffer_locked(bh)) {
860 if (!buffer_dirty(bh)) {
f116629d 861 list_move(&jh->list, &tmp);
bd4c625c
LT
862 goto loop_next;
863 }
864 spin_unlock(lock);
865 if (chunk.nr)
866 write_ordered_chunk(&chunk);
867 wait_on_buffer(bh);
868 cond_resched();
869 spin_lock(lock);
870 goto loop_next;
871 }
3d4492f8
CM
872 /* in theory, dirty non-uptodate buffers should never get here,
873 * but the upper layer io error paths still have a few quirks.
874 * Handle them here as gracefully as we can
875 */
876 if (!buffer_uptodate(bh) && buffer_dirty(bh)) {
877 clear_buffer_dirty(bh);
878 ret = -EIO;
879 }
bd4c625c 880 if (buffer_dirty(bh)) {
f116629d 881 list_move(&jh->list, &tmp);
bd4c625c
LT
882 add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
883 } else {
884 reiserfs_free_jh(bh);
885 unlock_buffer(bh);
886 }
887 loop_next:
888 put_bh(bh);
889 cond_resched_lock(lock);
890 }
891 if (chunk.nr) {
892 spin_unlock(lock);
1da177e4 893 write_ordered_chunk(&chunk);
bd4c625c 894 spin_lock(lock);
1da177e4 895 }
bd4c625c
LT
896 while (!list_empty(&tmp)) {
897 jh = JH_ENTRY(tmp.prev);
898 bh = jh->bh;
899 get_bh(bh);
900 reiserfs_free_jh(bh);
901
902 if (buffer_locked(bh)) {
903 spin_unlock(lock);
904 wait_on_buffer(bh);
905 spin_lock(lock);
906 }
907 if (!buffer_uptodate(bh)) {
908 ret = -EIO;
909 }
d62b1b87
CM
910 /* ugly interaction with invalidatepage here.
911 * reiserfs_invalidate_page will pin any buffer that has a valid
912 * journal head from an older transaction. If someone else sets
913 * our buffer dirty after we write it in the first loop, and
914 * then someone truncates the page away, nobody will ever write
915 * the buffer. We're safe if we write the page one last time
916 * after freeing the journal header.
917 */
918 if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
919 spin_unlock(lock);
920 ll_rw_block(WRITE, 1, &bh);
921 spin_lock(lock);
922 }
bd4c625c
LT
923 put_bh(bh);
924 cond_resched_lock(lock);
1da177e4 925 }
bd4c625c
LT
926 spin_unlock(lock);
927 return ret;
928}
1da177e4 929
bd4c625c
LT
930static int flush_older_commits(struct super_block *s,
931 struct reiserfs_journal_list *jl)
932{
933 struct reiserfs_journal *journal = SB_JOURNAL(s);
934 struct reiserfs_journal_list *other_jl;
935 struct reiserfs_journal_list *first_jl;
936 struct list_head *entry;
937 unsigned long trans_id = jl->j_trans_id;
938 unsigned long other_trans_id;
939 unsigned long first_trans_id;
940
941 find_first:
942 /*
943 * first we walk backwards to find the oldest uncommitted transation
944 */
945 first_jl = jl;
946 entry = jl->j_list.prev;
947 while (1) {
948 other_jl = JOURNAL_LIST_ENTRY(entry);
949 if (entry == &journal->j_journal_list ||
950 atomic_read(&other_jl->j_older_commits_done))
951 break;
1da177e4 952
bd4c625c
LT
953 first_jl = other_jl;
954 entry = other_jl->j_list.prev;
955 }
1da177e4 956
bd4c625c
LT
957 /* if we didn't find any older uncommitted transactions, return now */
958 if (first_jl == jl) {
959 return 0;
960 }
1da177e4 961
bd4c625c
LT
962 first_trans_id = first_jl->j_trans_id;
963
964 entry = &first_jl->j_list;
965 while (1) {
966 other_jl = JOURNAL_LIST_ENTRY(entry);
967 other_trans_id = other_jl->j_trans_id;
968
969 if (other_trans_id < trans_id) {
970 if (atomic_read(&other_jl->j_commit_left) != 0) {
971 flush_commit_list(s, other_jl, 0);
972
973 /* list we were called with is gone, return */
974 if (!journal_list_still_alive(s, trans_id))
975 return 1;
976
977 /* the one we just flushed is gone, this means all
978 * older lists are also gone, so first_jl is no longer
979 * valid either. Go back to the beginning.
980 */
981 if (!journal_list_still_alive
982 (s, other_trans_id)) {
983 goto find_first;
984 }
985 }
986 entry = entry->next;
987 if (entry == &journal->j_journal_list)
988 return 0;
989 } else {
990 return 0;
1da177e4 991 }
1da177e4 992 }
bd4c625c 993 return 0;
1da177e4 994}
deba0f49
AB
995
996static int reiserfs_async_progress_wait(struct super_block *s)
bd4c625c
LT
997{
998 DEFINE_WAIT(wait);
999 struct reiserfs_journal *j = SB_JOURNAL(s);
1000 if (atomic_read(&j->j_async_throttle))
3fcfab16 1001 congestion_wait(WRITE, HZ / 10);
bd4c625c 1002 return 0;
1da177e4
LT
1003}
1004
1005/*
1006** if this journal list still has commit blocks unflushed, send them to disk.
1007**
1008** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
1009** Before the commit block can by written, every other log block must be safely on disk
1010**
1011*/
bd4c625c
LT
1012static int flush_commit_list(struct super_block *s,
1013 struct reiserfs_journal_list *jl, int flushall)
1014{
1015 int i;
1016 int bn;
1017 struct buffer_head *tbh = NULL;
1018 unsigned long trans_id = jl->j_trans_id;
1019 struct reiserfs_journal *journal = SB_JOURNAL(s);
1020 int barrier = 0;
1021 int retval = 0;
e0e851cf 1022 int write_len;
bd4c625c
LT
1023
1024 reiserfs_check_lock_depth(s, "flush_commit_list");
1025
1026 if (atomic_read(&jl->j_older_commits_done)) {
1027 return 0;
1028 }
1029
1030 get_fs_excl();
1031
1032 /* before we can put our commit blocks on disk, we have to make sure everyone older than
1033 ** us is on disk too
1034 */
1035 BUG_ON(jl->j_len <= 0);
1036 BUG_ON(trans_id == journal->j_trans_id);
1037
1038 get_journal_list(jl);
1039 if (flushall) {
1040 if (flush_older_commits(s, jl) == 1) {
1041 /* list disappeared during flush_older_commits. return */
1042 goto put_jl;
1043 }
1044 }
1045
1046 /* make sure nobody is trying to flush this one at the same time */
1047 down(&jl->j_commit_lock);
1048 if (!journal_list_still_alive(s, trans_id)) {
1049 up(&jl->j_commit_lock);
1050 goto put_jl;
1051 }
1052 BUG_ON(jl->j_trans_id == 0);
1053
1054 /* this commit is done, exit */
1055 if (atomic_read(&(jl->j_commit_left)) <= 0) {
1056 if (flushall) {
1057 atomic_set(&(jl->j_older_commits_done), 1);
1058 }
1059 up(&jl->j_commit_lock);
1060 goto put_jl;
1061 }
1062
1063 if (!list_empty(&jl->j_bh_list)) {
3d4492f8 1064 int ret;
bd4c625c 1065 unlock_kernel();
3d4492f8
CM
1066 ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
1067 journal, jl, &jl->j_bh_list);
1068 if (ret < 0 && retval == 0)
1069 retval = ret;
bd4c625c
LT
1070 lock_kernel();
1071 }
1072 BUG_ON(!list_empty(&jl->j_bh_list));
1073 /*
1074 * for the description block and all the log blocks, submit any buffers
e0e851cf
CM
1075 * that haven't already reached the disk. Try to write at least 256
1076 * log blocks. later on, we will only wait on blocks that correspond
1077 * to this transaction, but while we're unplugging we might as well
1078 * get a chunk of data on there.
bd4c625c
LT
1079 */
1080 atomic_inc(&journal->j_async_throttle);
e0e851cf
CM
1081 write_len = jl->j_len + 1;
1082 if (write_len < 256)
1083 write_len = 256;
1084 for (i = 0 ; i < write_len ; i++) {
bd4c625c
LT
1085 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) %
1086 SB_ONDISK_JOURNAL_SIZE(s);
1087 tbh = journal_find_get_block(s, bn);
e0e851cf
CM
1088 if (tbh) {
1089 if (buffer_dirty(tbh))
1090 ll_rw_block(WRITE, 1, &tbh) ;
1091 put_bh(tbh) ;
1092 }
bd4c625c
LT
1093 }
1094 atomic_dec(&journal->j_async_throttle);
1095
5d5e8156
JM
1096 /* We're skipping the commit if there's an error */
1097 if (retval || reiserfs_is_journal_aborted(journal))
1098 barrier = 0;
1099
bd4c625c
LT
1100 /* wait on everything written so far before writing the commit
1101 * if we are in barrier mode, send the commit down now
1102 */
1103 barrier = reiserfs_barrier_flush(s);
1104 if (barrier) {
1105 int ret;
1106 lock_buffer(jl->j_commit_bh);
1107 ret = submit_barrier_buffer(jl->j_commit_bh);
1108 if (ret == -EOPNOTSUPP) {
1109 set_buffer_uptodate(jl->j_commit_bh);
1110 disable_barrier(s);
1111 barrier = 0;
1112 }
1113 }
1114 for (i = 0; i < (jl->j_len + 1); i++) {
1115 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1116 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
1117 tbh = journal_find_get_block(s, bn);
1118 wait_on_buffer(tbh);
1119 // since we're using ll_rw_blk above, it might have skipped over
1120 // a locked buffer. Double check here
1121 //
1122 if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */
1123 sync_dirty_buffer(tbh);
1124 if (unlikely(!buffer_uptodate(tbh))) {
1da177e4 1125#ifdef CONFIG_REISERFS_CHECK
bd4c625c 1126 reiserfs_warning(s, "journal-601, buffer write failed");
1da177e4 1127#endif
bd4c625c
LT
1128 retval = -EIO;
1129 }
1130 put_bh(tbh); /* once for journal_find_get_block */
1131 put_bh(tbh); /* once due to original getblk in do_journal_end */
1132 atomic_dec(&(jl->j_commit_left));
1133 }
1134
1135 BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);
1136
1137 if (!barrier) {
5d5e8156
JM
1138 /* If there was a write error in the journal - we can't commit
1139 * this transaction - it will be invalid and, if successful,
beb7dd86 1140 * will just end up propagating the write error out to
5d5e8156
JM
1141 * the file system. */
1142 if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
1143 if (buffer_dirty(jl->j_commit_bh))
1144 BUG();
1145 mark_buffer_dirty(jl->j_commit_bh) ;
1146 sync_dirty_buffer(jl->j_commit_bh) ;
1147 }
bd4c625c
LT
1148 } else
1149 wait_on_buffer(jl->j_commit_bh);
1150
1151 check_barrier_completion(s, jl->j_commit_bh);
1152
1153 /* If there was a write error in the journal - we can't commit this
1154 * transaction - it will be invalid and, if successful, will just end
beb7dd86 1155 * up propagating the write error out to the filesystem. */
bd4c625c 1156 if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
1da177e4 1157#ifdef CONFIG_REISERFS_CHECK
bd4c625c 1158 reiserfs_warning(s, "journal-615: buffer write failed");
1da177e4 1159#endif
bd4c625c
LT
1160 retval = -EIO;
1161 }
1162 bforget(jl->j_commit_bh);
1163 if (journal->j_last_commit_id != 0 &&
1164 (jl->j_trans_id - journal->j_last_commit_id) != 1) {
1165 reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",
1166 journal->j_last_commit_id, jl->j_trans_id);
1167 }
1168 journal->j_last_commit_id = jl->j_trans_id;
1169
1170 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
1171 cleanup_freed_for_journal_list(s, jl);
1172
1173 retval = retval ? retval : journal->j_errno;
1174
1175 /* mark the metadata dirty */
1176 if (!retval)
1177 dirty_one_transaction(s, jl);
1178 atomic_dec(&(jl->j_commit_left));
1179
1180 if (flushall) {
1181 atomic_set(&(jl->j_older_commits_done), 1);
1182 }
1183 up(&jl->j_commit_lock);
1184 put_jl:
1185 put_journal_list(s, jl);
1186
1187 if (retval)
1188 reiserfs_abort(s, retval, "Journal write error in %s",
1189 __FUNCTION__);
1190 put_fs_excl();
1191 return retval;
1da177e4
LT
1192}
1193
1194/*
1195** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
1196** returns NULL if it can't find anything
1197*/
bd4c625c
LT
1198static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1199 reiserfs_journal_cnode
1200 *cn)
1201{
1202 struct super_block *sb = cn->sb;
1203 b_blocknr_t blocknr = cn->blocknr;
1da177e4 1204
bd4c625c
LT
1205 cn = cn->hprev;
1206 while (cn) {
1207 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
1208 return cn->jlist;
1209 }
1210 cn = cn->hprev;
1211 }
1212 return NULL;
1da177e4
LT
1213}
1214
a3172027
CM
1215static int newer_jl_done(struct reiserfs_journal_cnode *cn)
1216{
1217 struct super_block *sb = cn->sb;
1218 b_blocknr_t blocknr = cn->blocknr;
1219
1220 cn = cn->hprev;
1221 while (cn) {
1222 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist &&
1223 atomic_read(&cn->jlist->j_commit_left) != 0)
1224 return 0;
1225 cn = cn->hprev;
1226 }
1227 return 1;
1228}
1229
bd4c625c
LT
1230static void remove_journal_hash(struct super_block *,
1231 struct reiserfs_journal_cnode **,
1232 struct reiserfs_journal_list *, unsigned long,
1233 int);
1da177e4
LT
1234
1235/*
1236** once all the real blocks have been flushed, it is safe to remove them from the
1237** journal list for this transaction. Aside from freeing the cnode, this also allows the
1238** block to be reallocated for data blocks if it had been deleted.
1239*/
bd4c625c
LT
1240static void remove_all_from_journal_list(struct super_block *p_s_sb,
1241 struct reiserfs_journal_list *jl,
1242 int debug)
1243{
1244 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1245 struct reiserfs_journal_cnode *cn, *last;
1246 cn = jl->j_realblock;
1247
1248 /* which is better, to lock once around the whole loop, or
1249 ** to lock for each call to remove_journal_hash?
1250 */
1251 while (cn) {
1252 if (cn->blocknr != 0) {
1253 if (debug) {
1254 reiserfs_warning(p_s_sb,
1255 "block %u, bh is %d, state %ld",
1256 cn->blocknr, cn->bh ? 1 : 0,
1257 cn->state);
1258 }
1259 cn->state = 0;
1260 remove_journal_hash(p_s_sb, journal->j_list_hash_table,
1261 jl, cn->blocknr, 1);
1262 }
1263 last = cn;
1264 cn = cn->next;
1265 free_cnode(p_s_sb, last);
1266 }
1267 jl->j_realblock = NULL;
1da177e4
LT
1268}
1269
1270/*
1271** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1272** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1273** releasing blocks in this transaction for reuse as data blocks.
1274** called by flush_journal_list, before it calls remove_all_from_journal_list
1275**
1276*/
bd4c625c
LT
1277static int _update_journal_header_block(struct super_block *p_s_sb,
1278 unsigned long offset,
1279 unsigned long trans_id)
1280{
1281 struct reiserfs_journal_header *jh;
1282 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1da177e4 1283
bd4c625c
LT
1284 if (reiserfs_is_journal_aborted(journal))
1285 return -EIO;
1da177e4 1286
bd4c625c
LT
1287 if (trans_id >= journal->j_last_flush_trans_id) {
1288 if (buffer_locked((journal->j_header_bh))) {
1289 wait_on_buffer((journal->j_header_bh));
1290 if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
1da177e4 1291#ifdef CONFIG_REISERFS_CHECK
bd4c625c
LT
1292 reiserfs_warning(p_s_sb,
1293 "journal-699: buffer write failed");
1da177e4 1294#endif
bd4c625c
LT
1295 return -EIO;
1296 }
1297 }
1298 journal->j_last_flush_trans_id = trans_id;
1299 journal->j_first_unflushed_offset = offset;
1300 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->
1301 b_data);
1302 jh->j_last_flush_trans_id = cpu_to_le32(trans_id);
1303 jh->j_first_unflushed_offset = cpu_to_le32(offset);
1304 jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
1305
1306 if (reiserfs_barrier_flush(p_s_sb)) {
1307 int ret;
1308 lock_buffer(journal->j_header_bh);
1309 ret = submit_barrier_buffer(journal->j_header_bh);
1310 if (ret == -EOPNOTSUPP) {
1311 set_buffer_uptodate(journal->j_header_bh);
1312 disable_barrier(p_s_sb);
1313 goto sync;
1314 }
1315 wait_on_buffer(journal->j_header_bh);
1316 check_barrier_completion(p_s_sb, journal->j_header_bh);
1317 } else {
1318 sync:
1319 set_buffer_dirty(journal->j_header_bh);
1320 sync_dirty_buffer(journal->j_header_bh);
1321 }
1322 if (!buffer_uptodate(journal->j_header_bh)) {
1323 reiserfs_warning(p_s_sb,
1324 "journal-837: IO error during journal replay");
1325 return -EIO;
1326 }
1327 }
1328 return 0;
1329}
1330
1331static int update_journal_header_block(struct super_block *p_s_sb,
1332 unsigned long offset,
1333 unsigned long trans_id)
1334{
1335 return _update_journal_header_block(p_s_sb, offset, trans_id);
1da177e4 1336}
bd4c625c 1337
1da177e4
LT
1338/*
1339** flush any and all journal lists older than you are
1340** can only be called from flush_journal_list
1341*/
1342static int flush_older_journal_lists(struct super_block *p_s_sb,
bd4c625c
LT
1343 struct reiserfs_journal_list *jl)
1344{
1345 struct list_head *entry;
1346 struct reiserfs_journal_list *other_jl;
1347 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1348 unsigned long trans_id = jl->j_trans_id;
1349
1350 /* we know we are the only ones flushing things, no extra race
1351 * protection is required.
1352 */
1353 restart:
1354 entry = journal->j_journal_list.next;
1355 /* Did we wrap? */
1356 if (entry == &journal->j_journal_list)
1357 return 0;
1358 other_jl = JOURNAL_LIST_ENTRY(entry);
1359 if (other_jl->j_trans_id < trans_id) {
1360 BUG_ON(other_jl->j_refcount <= 0);
1361 /* do not flush all */
1362 flush_journal_list(p_s_sb, other_jl, 0);
1363
1364 /* other_jl is now deleted from the list */
1365 goto restart;
1366 }
1367 return 0;
1da177e4
LT
1368}
1369
1370static void del_from_work_list(struct super_block *s,
bd4c625c
LT
1371 struct reiserfs_journal_list *jl)
1372{
1373 struct reiserfs_journal *journal = SB_JOURNAL(s);
1374 if (!list_empty(&jl->j_working_list)) {
1375 list_del_init(&jl->j_working_list);
1376 journal->j_num_work_lists--;
1377 }
1da177e4
LT
1378}
1379
1380/* flush a journal list, both commit and real blocks
1381**
1382** always set flushall to 1, unless you are calling from inside
1383** flush_journal_list
1384**
1385** IMPORTANT. This can only be called while there are no journal writers,
1386** and the journal is locked. That means it can only be called from
1387** do_journal_end, or by journal_release
1388*/
bd4c625c
LT
1389static int flush_journal_list(struct super_block *s,
1390 struct reiserfs_journal_list *jl, int flushall)
1da177e4 1391{
bd4c625c
LT
1392 struct reiserfs_journal_list *pjl;
1393 struct reiserfs_journal_cnode *cn, *last;
1394 int count;
1395 int was_jwait = 0;
1396 int was_dirty = 0;
1397 struct buffer_head *saved_bh;
1398 unsigned long j_len_saved = jl->j_len;
1399 struct reiserfs_journal *journal = SB_JOURNAL(s);
1400 int err = 0;
1401
1402 BUG_ON(j_len_saved <= 0);
1403
1404 if (atomic_read(&journal->j_wcount) != 0) {
1405 reiserfs_warning(s,
1406 "clm-2048: flush_journal_list called with wcount %d",
1407 atomic_read(&journal->j_wcount));
1408 }
1409 BUG_ON(jl->j_trans_id == 0);
1da177e4 1410
bd4c625c
LT
1411 /* if flushall == 0, the lock is already held */
1412 if (flushall) {
1413 down(&journal->j_flush_sem);
1414 } else if (!down_trylock(&journal->j_flush_sem)) {
1415 BUG();
1416 }
1da177e4 1417
bd4c625c
LT
1418 count = 0;
1419 if (j_len_saved > journal->j_trans_max) {
1420 reiserfs_panic(s,
1421 "journal-715: flush_journal_list, length is %lu, trans id %lu\n",
1422 j_len_saved, jl->j_trans_id);
1423 return 0;
1424 }
1da177e4 1425
bd4c625c
LT
1426 get_fs_excl();
1427
1428 /* if all the work is already done, get out of here */
1429 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1430 atomic_read(&(jl->j_commit_left)) <= 0) {
1431 goto flush_older_and_return;
1432 }
1433
1434 /* start by putting the commit list on disk. This will also flush
1435 ** the commit lists of any olders transactions
1436 */
1437 flush_commit_list(s, jl, 1);
1438
1439 if (!(jl->j_state & LIST_DIRTY)
1440 && !reiserfs_is_journal_aborted(journal))
1441 BUG();
1442
1443 /* are we done now? */
1444 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1445 atomic_read(&(jl->j_commit_left)) <= 0) {
1446 goto flush_older_and_return;
1447 }
1448
1449 /* loop through each cnode, see if we need to write it,
1450 ** or wait on a more recent transaction, or just ignore it
1451 */
1452 if (atomic_read(&(journal->j_wcount)) != 0) {
1453 reiserfs_panic(s,
1454 "journal-844: panic journal list is flushing, wcount is not 0\n");
1455 }
1456 cn = jl->j_realblock;
1457 while (cn) {
1458 was_jwait = 0;
1459 was_dirty = 0;
1460 saved_bh = NULL;
1461 /* blocknr of 0 is no longer in the hash, ignore it */
1462 if (cn->blocknr == 0) {
1463 goto free_cnode;
1464 }
1465
1466 /* This transaction failed commit. Don't write out to the disk */
1467 if (!(jl->j_state & LIST_DIRTY))
1468 goto free_cnode;
1469
1470 pjl = find_newer_jl_for_cn(cn);
1471 /* the order is important here. We check pjl to make sure we
1472 ** don't clear BH_JDirty_wait if we aren't the one writing this
1473 ** block to disk
1474 */
1475 if (!pjl && cn->bh) {
1476 saved_bh = cn->bh;
1477
1478 /* we do this to make sure nobody releases the buffer while
1479 ** we are working with it
1480 */
1481 get_bh(saved_bh);
1482
1483 if (buffer_journal_dirty(saved_bh)) {
1484 BUG_ON(!can_dirty(cn));
1485 was_jwait = 1;
1486 was_dirty = 1;
1487 } else if (can_dirty(cn)) {
1488 /* everything with !pjl && jwait should be writable */
1489 BUG();
1490 }
1491 }
1492
1493 /* if someone has this block in a newer transaction, just make
0779bf2d 1494 ** sure they are committed, and don't try writing it to disk
bd4c625c
LT
1495 */
1496 if (pjl) {
1497 if (atomic_read(&pjl->j_commit_left))
1498 flush_commit_list(s, pjl, 1);
1499 goto free_cnode;
1500 }
1501
1502 /* bh == NULL when the block got to disk on its own, OR,
1503 ** the block got freed in a future transaction
1504 */
1505 if (saved_bh == NULL) {
1506 goto free_cnode;
1507 }
1508
1509 /* this should never happen. kupdate_one_transaction has this list
1510 ** locked while it works, so we should never see a buffer here that
1511 ** is not marked JDirty_wait
1512 */
1513 if ((!was_jwait) && !buffer_locked(saved_bh)) {
1514 reiserfs_warning(s,
1515 "journal-813: BAD! buffer %llu %cdirty %cjwait, "
1516 "not in a newer tranasction",
1517 (unsigned long long)saved_bh->
1518 b_blocknr, was_dirty ? ' ' : '!',
1519 was_jwait ? ' ' : '!');
1520 }
1521 if (was_dirty) {
1522 /* we inc again because saved_bh gets decremented at free_cnode */
1523 get_bh(saved_bh);
1524 set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
1525 lock_buffer(saved_bh);
1526 BUG_ON(cn->blocknr != saved_bh->b_blocknr);
1527 if (buffer_dirty(saved_bh))
1528 submit_logged_buffer(saved_bh);
1529 else
1530 unlock_buffer(saved_bh);
1531 count++;
1532 } else {
1533 reiserfs_warning(s,
1534 "clm-2082: Unable to flush buffer %llu in %s",
1535 (unsigned long long)saved_bh->
1536 b_blocknr, __FUNCTION__);
1537 }
1538 free_cnode:
1539 last = cn;
1540 cn = cn->next;
1541 if (saved_bh) {
1542 /* we incremented this to keep others from taking the buffer head away */
1543 put_bh(saved_bh);
1544 if (atomic_read(&(saved_bh->b_count)) < 0) {
1545 reiserfs_warning(s,
1546 "journal-945: saved_bh->b_count < 0");
1547 }
1548 }
1549 }
1550 if (count > 0) {
1551 cn = jl->j_realblock;
1552 while (cn) {
1553 if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1554 if (!cn->bh) {
1555 reiserfs_panic(s,
1556 "journal-1011: cn->bh is NULL\n");
1557 }
1558 wait_on_buffer(cn->bh);
1559 if (!cn->bh) {
1560 reiserfs_panic(s,
1561 "journal-1012: cn->bh is NULL\n");
1562 }
1563 if (unlikely(!buffer_uptodate(cn->bh))) {
1564#ifdef CONFIG_REISERFS_CHECK
1565 reiserfs_warning(s,
1566 "journal-949: buffer write failed\n");
1567#endif
1568 err = -EIO;
1569 }
1570 /* note, we must clear the JDirty_wait bit after the up to date
1571 ** check, otherwise we race against our flushpage routine
1572 */
1573 BUG_ON(!test_clear_buffer_journal_dirty
1574 (cn->bh));
1575
398c95bd 1576 /* drop one ref for us */
bd4c625c 1577 put_bh(cn->bh);
398c95bd
CM
1578 /* drop one ref for journal_mark_dirty */
1579 release_buffer_page(cn->bh);
bd4c625c
LT
1580 }
1581 cn = cn->next;
1582 }
1583 }
1584
1585 if (err)
1586 reiserfs_abort(s, -EIO,
1587 "Write error while pushing transaction to disk in %s",
1588 __FUNCTION__);
1589 flush_older_and_return:
1590
1591 /* before we can update the journal header block, we _must_ flush all
1592 ** real blocks from all older transactions to disk. This is because
1593 ** once the header block is updated, this transaction will not be
1594 ** replayed after a crash
1595 */
1596 if (flushall) {
1597 flush_older_journal_lists(s, jl);
1598 }
1599
1600 err = journal->j_errno;
1601 /* before we can remove everything from the hash tables for this
1602 ** transaction, we must make sure it can never be replayed
1603 **
1604 ** since we are only called from do_journal_end, we know for sure there
1605 ** are no allocations going on while we are flushing journal lists. So,
1606 ** we only need to update the journal header block for the last list
1607 ** being flushed
1608 */
1609 if (!err && flushall) {
1610 err =
1611 update_journal_header_block(s,
1612 (jl->j_start + jl->j_len +
1613 2) % SB_ONDISK_JOURNAL_SIZE(s),
1614 jl->j_trans_id);
1615 if (err)
1616 reiserfs_abort(s, -EIO,
1617 "Write error while updating journal header in %s",
1618 __FUNCTION__);
1619 }
1620 remove_all_from_journal_list(s, jl, 0);
1621 list_del_init(&jl->j_list);
1622 journal->j_num_lists--;
1623 del_from_work_list(s, jl);
1624
1625 if (journal->j_last_flush_id != 0 &&
1626 (jl->j_trans_id - journal->j_last_flush_id) != 1) {
1627 reiserfs_warning(s, "clm-2201: last flush %lu, current %lu",
1628 journal->j_last_flush_id, jl->j_trans_id);
1629 }
1630 journal->j_last_flush_id = jl->j_trans_id;
1631
1632 /* not strictly required since we are freeing the list, but it should
1633 * help find code using dead lists later on
1634 */
1635 jl->j_len = 0;
1636 atomic_set(&(jl->j_nonzerolen), 0);
1637 jl->j_start = 0;
1638 jl->j_realblock = NULL;
1639 jl->j_commit_bh = NULL;
1640 jl->j_trans_id = 0;
1641 jl->j_state = 0;
1642 put_journal_list(s, jl);
1643 if (flushall)
1644 up(&journal->j_flush_sem);
1645 put_fs_excl();
1646 return err;
1647}
1648
a3172027
CM
1649static int test_transaction(struct super_block *s,
1650 struct reiserfs_journal_list *jl)
1651{
1652 struct reiserfs_journal_cnode *cn;
1653
1654 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0)
1655 return 1;
1656
1657 cn = jl->j_realblock;
1658 while (cn) {
1659 /* if the blocknr == 0, this has been cleared from the hash,
1660 ** skip it
1661 */
1662 if (cn->blocknr == 0) {
1663 goto next;
1664 }
1665 if (cn->bh && !newer_jl_done(cn))
1666 return 0;
1667 next:
1668 cn = cn->next;
1669 cond_resched();
1670 }
1671 return 0;
1672}
1673
bd4c625c
LT
1674static int write_one_transaction(struct super_block *s,
1675 struct reiserfs_journal_list *jl,
1676 struct buffer_chunk *chunk)
1677{
1678 struct reiserfs_journal_cnode *cn;
1679 int ret = 0;
1680
1681 jl->j_state |= LIST_TOUCHED;
1682 del_from_work_list(s, jl);
1683 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
1684 return 0;
1685 }
1686
1687 cn = jl->j_realblock;
1688 while (cn) {
1689 /* if the blocknr == 0, this has been cleared from the hash,
1690 ** skip it
1691 */
1692 if (cn->blocknr == 0) {
1693 goto next;
1694 }
1695 if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
1696 struct buffer_head *tmp_bh;
1697 /* we can race against journal_mark_freed when we try
1698 * to lock_buffer(cn->bh), so we have to inc the buffer
1699 * count, and recheck things after locking
1700 */
1701 tmp_bh = cn->bh;
1702 get_bh(tmp_bh);
1703 lock_buffer(tmp_bh);
1704 if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
1705 if (!buffer_journal_dirty(tmp_bh) ||
1706 buffer_journal_prepared(tmp_bh))
1707 BUG();
1708 add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
1709 ret++;
1710 } else {
1711 /* note, cn->bh might be null now */
1712 unlock_buffer(tmp_bh);
1713 }
1714 put_bh(tmp_bh);
1715 }
1716 next:
1717 cn = cn->next;
1718 cond_resched();
1719 }
1720 return ret;
1721}
1722
1723/* used by flush_commit_list */
1724static int dirty_one_transaction(struct super_block *s,
1725 struct reiserfs_journal_list *jl)
1726{
1727 struct reiserfs_journal_cnode *cn;
1728 struct reiserfs_journal_list *pjl;
1729 int ret = 0;
1730
1731 jl->j_state |= LIST_DIRTY;
1732 cn = jl->j_realblock;
1733 while (cn) {
1734 /* look for a more recent transaction that logged this
1735 ** buffer. Only the most recent transaction with a buffer in
1736 ** it is allowed to send that buffer to disk
1737 */
1738 pjl = find_newer_jl_for_cn(cn);
1739 if (!pjl && cn->blocknr && cn->bh
1740 && buffer_journal_dirty(cn->bh)) {
1741 BUG_ON(!can_dirty(cn));
1742 /* if the buffer is prepared, it will either be logged
1743 * or restored. If restored, we need to make sure
1744 * it actually gets marked dirty
1745 */
1746 clear_buffer_journal_new(cn->bh);
1747 if (buffer_journal_prepared(cn->bh)) {
1748 set_buffer_journal_restore_dirty(cn->bh);
1749 } else {
1750 set_buffer_journal_test(cn->bh);
1751 mark_buffer_dirty(cn->bh);
1752 }
1753 }
1754 cn = cn->next;
1755 }
1756 return ret;
1757}
1758
1759static int kupdate_transactions(struct super_block *s,
1760 struct reiserfs_journal_list *jl,
1761 struct reiserfs_journal_list **next_jl,
1762 unsigned long *next_trans_id,
1763 int num_blocks, int num_trans)
1764{
1765 int ret = 0;
1766 int written = 0;
1767 int transactions_flushed = 0;
1768 unsigned long orig_trans_id = jl->j_trans_id;
1769 struct buffer_chunk chunk;
1770 struct list_head *entry;
1771 struct reiserfs_journal *journal = SB_JOURNAL(s);
1772 chunk.nr = 0;
1773
1774 down(&journal->j_flush_sem);
1775 if (!journal_list_still_alive(s, orig_trans_id)) {
1776 goto done;
1777 }
1778
1779 /* we've got j_flush_sem held, nobody is going to delete any
1780 * of these lists out from underneath us
1781 */
1782 while ((num_trans && transactions_flushed < num_trans) ||
1783 (!num_trans && written < num_blocks)) {
1784
1785 if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
1786 atomic_read(&jl->j_commit_left)
1787 || !(jl->j_state & LIST_DIRTY)) {
1788 del_from_work_list(s, jl);
1789 break;
1790 }
1791 ret = write_one_transaction(s, jl, &chunk);
1792
1793 if (ret < 0)
1794 goto done;
1795 transactions_flushed++;
1796 written += ret;
1797 entry = jl->j_list.next;
1798
1799 /* did we wrap? */
1800 if (entry == &journal->j_journal_list) {
1801 break;
1802 }
1803 jl = JOURNAL_LIST_ENTRY(entry);
1804
1805 /* don't bother with older transactions */
1806 if (jl->j_trans_id <= orig_trans_id)
1807 break;
1808 }
1809 if (chunk.nr) {
1810 write_chunk(&chunk);
1811 }
1812
1813 done:
1814 up(&journal->j_flush_sem);
1815 return ret;
1816}
1817
1818/* for o_sync and fsync heavy applications, they tend to use
1819** all the journa list slots with tiny transactions. These
1820** trigger lots and lots of calls to update the header block, which
1821** adds seeks and slows things down.
1822**
1823** This function tries to clear out a large chunk of the journal lists
1824** at once, which makes everything faster since only the newest journal
1da177e4
LT
1825** list updates the header block
1826*/
1827static int flush_used_journal_lists(struct super_block *s,
bd4c625c
LT
1828 struct reiserfs_journal_list *jl)
1829{
1830 unsigned long len = 0;
1831 unsigned long cur_len;
1832 int ret;
1833 int i;
1834 int limit = 256;
1835 struct reiserfs_journal_list *tjl;
1836 struct reiserfs_journal_list *flush_jl;
1837 unsigned long trans_id;
1838 struct reiserfs_journal *journal = SB_JOURNAL(s);
1839
1840 flush_jl = tjl = jl;
1841
1842 /* in data logging mode, try harder to flush a lot of blocks */
1843 if (reiserfs_data_log(s))
1844 limit = 1024;
1845 /* flush for 256 transactions or limit blocks, whichever comes first */
1846 for (i = 0; i < 256 && len < limit; i++) {
1847 if (atomic_read(&tjl->j_commit_left) ||
1848 tjl->j_trans_id < jl->j_trans_id) {
1849 break;
1850 }
1851 cur_len = atomic_read(&tjl->j_nonzerolen);
1852 if (cur_len > 0) {
1853 tjl->j_state &= ~LIST_TOUCHED;
1854 }
1855 len += cur_len;
1856 flush_jl = tjl;
1857 if (tjl->j_list.next == &journal->j_journal_list)
1858 break;
1859 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1860 }
1861 /* try to find a group of blocks we can flush across all the
1862 ** transactions, but only bother if we've actually spanned
1863 ** across multiple lists
1864 */
1865 if (flush_jl != jl) {
1866 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1867 }
1868 flush_journal_list(s, flush_jl, 1);
1869 return 0;
1da177e4
LT
1870}
1871
1872/*
1873** removes any nodes in table with name block and dev as bh.
1874** only touchs the hnext and hprev pointers.
1875*/
1876void remove_journal_hash(struct super_block *sb,
bd4c625c
LT
1877 struct reiserfs_journal_cnode **table,
1878 struct reiserfs_journal_list *jl,
1879 unsigned long block, int remove_freed)
1880{
1881 struct reiserfs_journal_cnode *cur;
1882 struct reiserfs_journal_cnode **head;
1883
1884 head = &(journal_hash(table, sb, block));
1885 if (!head) {
1886 return;
1887 }
1888 cur = *head;
1889 while (cur) {
1890 if (cur->blocknr == block && cur->sb == sb
1891 && (jl == NULL || jl == cur->jlist)
1892 && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1893 if (cur->hnext) {
1894 cur->hnext->hprev = cur->hprev;
1895 }
1896 if (cur->hprev) {
1897 cur->hprev->hnext = cur->hnext;
1898 } else {
1899 *head = cur->hnext;
1900 }
1901 cur->blocknr = 0;
1902 cur->sb = NULL;
1903 cur->state = 0;
1904 if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
1905 atomic_dec(&(cur->jlist->j_nonzerolen));
1906 cur->bh = NULL;
1907 cur->jlist = NULL;
1908 }
1909 cur = cur->hnext;
1910 }
1911}
1912
1913static void free_journal_ram(struct super_block *p_s_sb)
1914{
1915 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
d739b42b 1916 kfree(journal->j_current_jl);
bd4c625c
LT
1917 journal->j_num_lists--;
1918
1919 vfree(journal->j_cnode_free_orig);
1920 free_list_bitmaps(p_s_sb, journal->j_list_bitmap);
1921 free_bitmap_nodes(p_s_sb); /* must be after free_list_bitmaps */
1922 if (journal->j_header_bh) {
1923 brelse(journal->j_header_bh);
1924 }
1925 /* j_header_bh is on the journal dev, make sure not to release the journal
1926 * dev until we brelse j_header_bh
1927 */
1928 release_journal_dev(p_s_sb, journal);
1929 vfree(journal);
1da177e4
LT
1930}
1931
1932/*
1933** call on unmount. Only set error to 1 if you haven't made your way out
1934** of read_super() yet. Any other caller must keep error at 0.
1935*/
bd4c625c
LT
1936static int do_journal_release(struct reiserfs_transaction_handle *th,
1937 struct super_block *p_s_sb, int error)
1938{
1939 struct reiserfs_transaction_handle myth;
1940 int flushed = 0;
1941 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1942
1943 /* we only want to flush out transactions if we were called with error == 0
1944 */
1945 if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
1946 /* end the current trans */
1947 BUG_ON(!th->t_trans_id);
1948 do_journal_end(th, p_s_sb, 10, FLUSH_ALL);
1949
1950 /* make sure something gets logged to force our way into the flush code */
1951 if (!journal_join(&myth, p_s_sb, 1)) {
1952 reiserfs_prepare_for_journal(p_s_sb,
1953 SB_BUFFER_WITH_SB(p_s_sb),
1954 1);
1955 journal_mark_dirty(&myth, p_s_sb,
1956 SB_BUFFER_WITH_SB(p_s_sb));
1957 do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1958 flushed = 1;
1959 }
1960 }
1961
1962 /* this also catches errors during the do_journal_end above */
1963 if (!error && reiserfs_is_journal_aborted(journal)) {
1964 memset(&myth, 0, sizeof(myth));
1965 if (!journal_join_abort(&myth, p_s_sb, 1)) {
1966 reiserfs_prepare_for_journal(p_s_sb,
1967 SB_BUFFER_WITH_SB(p_s_sb),
1968 1);
1969 journal_mark_dirty(&myth, p_s_sb,
1970 SB_BUFFER_WITH_SB(p_s_sb));
1971 do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1972 }
1973 }
1974
1975 reiserfs_mounted_fs_count--;
1976 /* wait for all commits to finish */
1977 cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work);
1978 flush_workqueue(commit_wq);
1979 if (!reiserfs_mounted_fs_count) {
1980 destroy_workqueue(commit_wq);
1981 commit_wq = NULL;
1982 }
1983
1984 free_journal_ram(p_s_sb);
1985
1986 return 0;
1da177e4
LT
1987}
1988
1989/*
1990** call on unmount. flush all journal trans, release all alloc'd ram
1991*/
bd4c625c
LT
1992int journal_release(struct reiserfs_transaction_handle *th,
1993 struct super_block *p_s_sb)
1994{
1995 return do_journal_release(th, p_s_sb, 0);
1da177e4 1996}
bd4c625c 1997
1da177e4
LT
1998/*
1999** only call from an error condition inside reiserfs_read_super!
2000*/
bd4c625c
LT
2001int journal_release_error(struct reiserfs_transaction_handle *th,
2002 struct super_block *p_s_sb)
2003{
2004 return do_journal_release(th, p_s_sb, 1);
1da177e4
LT
2005}
2006
2007/* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
bd4c625c
LT
2008static int journal_compare_desc_commit(struct super_block *p_s_sb,
2009 struct reiserfs_journal_desc *desc,
2010 struct reiserfs_journal_commit *commit)
2011{
2012 if (get_commit_trans_id(commit) != get_desc_trans_id(desc) ||
2013 get_commit_trans_len(commit) != get_desc_trans_len(desc) ||
2014 get_commit_trans_len(commit) > SB_JOURNAL(p_s_sb)->j_trans_max ||
2015 get_commit_trans_len(commit) <= 0) {
2016 return 1;
2017 }
2018 return 0;
1da177e4 2019}
bd4c625c 2020
1da177e4
LT
2021/* returns 0 if it did not find a description block
2022** returns -1 if it found a corrupt commit block
2023** returns 1 if both desc and commit were valid
2024*/
bd4c625c
LT
2025static int journal_transaction_is_valid(struct super_block *p_s_sb,
2026 struct buffer_head *d_bh,
2027 unsigned long *oldest_invalid_trans_id,
2028 unsigned long *newest_mount_id)
2029{
2030 struct reiserfs_journal_desc *desc;
2031 struct reiserfs_journal_commit *commit;
2032 struct buffer_head *c_bh;
2033 unsigned long offset;
2034
2035 if (!d_bh)
2036 return 0;
2037
2038 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2039 if (get_desc_trans_len(desc) > 0
2040 && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) {
2041 if (oldest_invalid_trans_id && *oldest_invalid_trans_id
2042 && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
2043 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2044 "journal-986: transaction "
2045 "is valid returning because trans_id %d is greater than "
2046 "oldest_invalid %lu",
2047 get_desc_trans_id(desc),
2048 *oldest_invalid_trans_id);
2049 return 0;
2050 }
2051 if (newest_mount_id
2052 && *newest_mount_id > get_desc_mount_id(desc)) {
2053 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2054 "journal-1087: transaction "
2055 "is valid returning because mount_id %d is less than "
2056 "newest_mount_id %lu",
2057 get_desc_mount_id(desc),
2058 *newest_mount_id);
2059 return -1;
2060 }
2061 if (get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max) {
2062 reiserfs_warning(p_s_sb,
2063 "journal-2018: Bad transaction length %d encountered, ignoring transaction",
2064 get_desc_trans_len(desc));
2065 return -1;
2066 }
2067 offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2068
2069 /* ok, we have a journal description block, lets see if the transaction was valid */
2070 c_bh =
2071 journal_bread(p_s_sb,
2072 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2073 ((offset + get_desc_trans_len(desc) +
2074 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
2075 if (!c_bh)
2076 return 0;
2077 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2078 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
2079 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2080 "journal_transaction_is_valid, commit offset %ld had bad "
2081 "time %d or length %d",
2082 c_bh->b_blocknr -
2083 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2084 get_commit_trans_id(commit),
2085 get_commit_trans_len(commit));
2086 brelse(c_bh);
2087 if (oldest_invalid_trans_id) {
2088 *oldest_invalid_trans_id =
2089 get_desc_trans_id(desc);
2090 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2091 "journal-1004: "
2092 "transaction_is_valid setting oldest invalid trans_id "
2093 "to %d",
2094 get_desc_trans_id(desc));
2095 }
2096 return -1;
2097 }
2098 brelse(c_bh);
2099 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2100 "journal-1006: found valid "
2101 "transaction start offset %llu, len %d id %d",
2102 d_bh->b_blocknr -
2103 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2104 get_desc_trans_len(desc),
2105 get_desc_trans_id(desc));
2106 return 1;
2107 } else {
2108 return 0;
2109 }
2110}
2111
2112static void brelse_array(struct buffer_head **heads, int num)
2113{
2114 int i;
2115 for (i = 0; i < num; i++) {
2116 brelse(heads[i]);
2117 }
1da177e4
LT
2118}
2119
2120/*
2121** given the start, and values for the oldest acceptable transactions,
2122** this either reads in a replays a transaction, or returns because the transaction
2123** is invalid, or too old.
2124*/
bd4c625c
LT
2125static int journal_read_transaction(struct super_block *p_s_sb,
2126 unsigned long cur_dblock,
2127 unsigned long oldest_start,
2128 unsigned long oldest_trans_id,
2129 unsigned long newest_mount_id)
2130{
2131 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2132 struct reiserfs_journal_desc *desc;
2133 struct reiserfs_journal_commit *commit;
2134 unsigned long trans_id = 0;
2135 struct buffer_head *c_bh;
2136 struct buffer_head *d_bh;
2137 struct buffer_head **log_blocks = NULL;
2138 struct buffer_head **real_blocks = NULL;
2139 unsigned long trans_offset;
2140 int i;
2141 int trans_half;
2142
2143 d_bh = journal_bread(p_s_sb, cur_dblock);
2144 if (!d_bh)
2145 return 1;
2146 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2147 trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2148 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
2149 "journal_read_transaction, offset %llu, len %d mount_id %d",
2150 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2151 get_desc_trans_len(desc), get_desc_mount_id(desc));
2152 if (get_desc_trans_id(desc) < oldest_trans_id) {
2153 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
2154 "journal_read_trans skipping because %lu is too old",
2155 cur_dblock -
2156 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2157 brelse(d_bh);
2158 return 1;
2159 }
2160 if (get_desc_mount_id(desc) != newest_mount_id) {
2161 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
2162 "journal_read_trans skipping because %d is != "
2163 "newest_mount_id %lu", get_desc_mount_id(desc),
2164 newest_mount_id);
2165 brelse(d_bh);
2166 return 1;
2167 }
2168 c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2169 ((trans_offset + get_desc_trans_len(desc) + 1) %
2170 SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
2171 if (!c_bh) {
2172 brelse(d_bh);
2173 return 1;
2174 }
2175 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2176 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
2177 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2178 "journal_read_transaction, "
2179 "commit offset %llu had bad time %d or length %d",
2180 c_bh->b_blocknr -
2181 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2182 get_commit_trans_id(commit),
2183 get_commit_trans_len(commit));
2184 brelse(c_bh);
2185 brelse(d_bh);
2186 return 1;
2187 }
2188 trans_id = get_desc_trans_id(desc);
2189 /* now we know we've got a good transaction, and it was inside the valid time ranges */
d739b42b
PE
2190 log_blocks = kmalloc(get_desc_trans_len(desc) *
2191 sizeof(struct buffer_head *), GFP_NOFS);
2192 real_blocks = kmalloc(get_desc_trans_len(desc) *
2193 sizeof(struct buffer_head *), GFP_NOFS);
bd4c625c
LT
2194 if (!log_blocks || !real_blocks) {
2195 brelse(c_bh);
2196 brelse(d_bh);
d739b42b
PE
2197 kfree(log_blocks);
2198 kfree(real_blocks);
bd4c625c
LT
2199 reiserfs_warning(p_s_sb,
2200 "journal-1169: kmalloc failed, unable to mount FS");
2201 return -1;
2202 }
2203 /* get all the buffer heads */
2204 trans_half = journal_trans_half(p_s_sb->s_blocksize);
2205 for (i = 0; i < get_desc_trans_len(desc); i++) {
2206 log_blocks[i] =
2207 journal_getblk(p_s_sb,
2208 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2209 (trans_offset + 1 +
2210 i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2211 if (i < trans_half) {
2212 real_blocks[i] =
2213 sb_getblk(p_s_sb,
2214 le32_to_cpu(desc->j_realblock[i]));
2215 } else {
2216 real_blocks[i] =
2217 sb_getblk(p_s_sb,
2218 le32_to_cpu(commit->
2219 j_realblock[i - trans_half]));
2220 }
2221 if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) {
2222 reiserfs_warning(p_s_sb,
2223 "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem");
2224 goto abort_replay;
2225 }
2226 /* make sure we don't try to replay onto log or reserved area */
2227 if (is_block_in_log_or_reserved_area
2228 (p_s_sb, real_blocks[i]->b_blocknr)) {
2229 reiserfs_warning(p_s_sb,
2230 "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block");
2231 abort_replay:
2232 brelse_array(log_blocks, i);
2233 brelse_array(real_blocks, i);
2234 brelse(c_bh);
2235 brelse(d_bh);
d739b42b
PE
2236 kfree(log_blocks);
2237 kfree(real_blocks);
bd4c625c
LT
2238 return -1;
2239 }
2240 }
2241 /* read in the log blocks, memcpy to the corresponding real block */
2242 ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
2243 for (i = 0; i < get_desc_trans_len(desc); i++) {
2244 wait_on_buffer(log_blocks[i]);
2245 if (!buffer_uptodate(log_blocks[i])) {
2246 reiserfs_warning(p_s_sb,
2247 "journal-1212: REPLAY FAILURE fsck required! buffer write failed");
2248 brelse_array(log_blocks + i,
2249 get_desc_trans_len(desc) - i);
2250 brelse_array(real_blocks, get_desc_trans_len(desc));
2251 brelse(c_bh);
2252 brelse(d_bh);
d739b42b
PE
2253 kfree(log_blocks);
2254 kfree(real_blocks);
bd4c625c
LT
2255 return -1;
2256 }
2257 memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data,
2258 real_blocks[i]->b_size);
2259 set_buffer_uptodate(real_blocks[i]);
2260 brelse(log_blocks[i]);
2261 }
2262 /* flush out the real blocks */
2263 for (i = 0; i < get_desc_trans_len(desc); i++) {
2264 set_buffer_dirty(real_blocks[i]);
53778ffd 2265 ll_rw_block(SWRITE, 1, real_blocks + i);
bd4c625c
LT
2266 }
2267 for (i = 0; i < get_desc_trans_len(desc); i++) {
2268 wait_on_buffer(real_blocks[i]);
2269 if (!buffer_uptodate(real_blocks[i])) {
2270 reiserfs_warning(p_s_sb,
2271 "journal-1226: REPLAY FAILURE, fsck required! buffer write failed");
2272 brelse_array(real_blocks + i,
2273 get_desc_trans_len(desc) - i);
2274 brelse(c_bh);
2275 brelse(d_bh);
d739b42b
PE
2276 kfree(log_blocks);
2277 kfree(real_blocks);
bd4c625c
LT
2278 return -1;
2279 }
2280 brelse(real_blocks[i]);
2281 }
2282 cur_dblock =
2283 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2284 ((trans_offset + get_desc_trans_len(desc) +
2285 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2286 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2287 "journal-1095: setting journal " "start to offset %ld",
2288 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2289
2290 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2291 journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2292 journal->j_last_flush_trans_id = trans_id;
2293 journal->j_trans_id = trans_id + 1;
a44c94a7
AZ
2294 /* check for trans_id overflow */
2295 if (journal->j_trans_id == 0)
2296 journal->j_trans_id = 10;
bd4c625c
LT
2297 brelse(c_bh);
2298 brelse(d_bh);
d739b42b
PE
2299 kfree(log_blocks);
2300 kfree(real_blocks);
bd4c625c 2301 return 0;
1da177e4
LT
2302}
2303
2304/* This function reads blocks starting from block and to max_block of bufsize
2305 size (but no more than BUFNR blocks at a time). This proved to improve
2306 mounting speed on self-rebuilding raid5 arrays at least.
2307 Right now it is only used from journal code. But later we might use it
2308 from other places.
2309 Note: Do not use journal_getblk/sb_getblk functions here! */
bd4c625c
LT
2310static struct buffer_head *reiserfs_breada(struct block_device *dev, int block,
2311 int bufsize, unsigned int max_block)
1da177e4 2312{
bd4c625c 2313 struct buffer_head *bhlist[BUFNR];
1da177e4 2314 unsigned int blocks = BUFNR;
bd4c625c 2315 struct buffer_head *bh;
1da177e4 2316 int i, j;
bd4c625c
LT
2317
2318 bh = __getblk(dev, block, bufsize);
2319 if (buffer_uptodate(bh))
2320 return (bh);
2321
1da177e4
LT
2322 if (block + BUFNR > max_block) {
2323 blocks = max_block - block;
2324 }
2325 bhlist[0] = bh;
2326 j = 1;
2327 for (i = 1; i < blocks; i++) {
bd4c625c
LT
2328 bh = __getblk(dev, block + i, bufsize);
2329 if (buffer_uptodate(bh)) {
2330 brelse(bh);
1da177e4 2331 break;
bd4c625c
LT
2332 } else
2333 bhlist[j++] = bh;
1da177e4 2334 }
bd4c625c
LT
2335 ll_rw_block(READ, j, bhlist);
2336 for (i = 1; i < j; i++)
2337 brelse(bhlist[i]);
1da177e4 2338 bh = bhlist[0];
bd4c625c
LT
2339 wait_on_buffer(bh);
2340 if (buffer_uptodate(bh))
1da177e4 2341 return bh;
bd4c625c 2342 brelse(bh);
1da177e4
LT
2343 return NULL;
2344}
2345
2346/*
2347** read and replay the log
2348** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
2349** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
2350**
2351** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
2352**
2353** On exit, it sets things up so the first transaction will work correctly.
2354*/
bd4c625c
LT
2355static int journal_read(struct super_block *p_s_sb)
2356{
2357 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2358 struct reiserfs_journal_desc *desc;
2359 unsigned long oldest_trans_id = 0;
2360 unsigned long oldest_invalid_trans_id = 0;
2361 time_t start;
2362 unsigned long oldest_start = 0;
2363 unsigned long cur_dblock = 0;
2364 unsigned long newest_mount_id = 9;
2365 struct buffer_head *d_bh;
2366 struct reiserfs_journal_header *jh;
2367 int valid_journal_header = 0;
2368 int replay_count = 0;
2369 int continue_replay = 1;
2370 int ret;
2371 char b[BDEVNAME_SIZE];
2372
2373 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2374 reiserfs_info(p_s_sb, "checking transaction log (%s)\n",
2375 bdevname(journal->j_dev_bd, b));
2376 start = get_seconds();
2377
2378 /* step 1, read in the journal header block. Check the transaction it says
2379 ** is the first unflushed, and if that transaction is not valid,
2380 ** replay is done
2381 */
2382 journal->j_header_bh = journal_bread(p_s_sb,
2383 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)
2384 + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2385 if (!journal->j_header_bh) {
2386 return 1;
2387 }
2388 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data);
c499ec24 2389 if (le32_to_cpu(jh->j_first_unflushed_offset) <
bd4c625c
LT
2390 SB_ONDISK_JOURNAL_SIZE(p_s_sb)
2391 && le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
2392 oldest_start =
2393 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2394 le32_to_cpu(jh->j_first_unflushed_offset);
2395 oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2396 newest_mount_id = le32_to_cpu(jh->j_mount_id);
2397 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2398 "journal-1153: found in "
2399 "header: first_unflushed_offset %d, last_flushed_trans_id "
2400 "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
2401 le32_to_cpu(jh->j_last_flush_trans_id));
2402 valid_journal_header = 1;
2403
2404 /* now, we try to read the first unflushed offset. If it is not valid,
2405 ** there is nothing more we can do, and it makes no sense to read
2406 ** through the whole log.
2407 */
2408 d_bh =
2409 journal_bread(p_s_sb,
2410 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2411 le32_to_cpu(jh->j_first_unflushed_offset));
2412 ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL);
2413 if (!ret) {
2414 continue_replay = 0;
2415 }
2416 brelse(d_bh);
2417 goto start_log_replay;
2418 }
2419
2420 if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
2421 reiserfs_warning(p_s_sb,
2422 "clm-2076: device is readonly, unable to replay log");
2423 return -1;
2424 }
2425
2426 /* ok, there are transactions that need to be replayed. start with the first log block, find
2427 ** all the valid transactions, and pick out the oldest.
2428 */
2429 while (continue_replay
2430 && cur_dblock <
2431 (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2432 SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
2433 /* Note that it is required for blocksize of primary fs device and journal
2434 device to be the same */
2435 d_bh =
2436 reiserfs_breada(journal->j_dev_bd, cur_dblock,
2437 p_s_sb->s_blocksize,
2438 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2439 SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2440 ret =
2441 journal_transaction_is_valid(p_s_sb, d_bh,
2442 &oldest_invalid_trans_id,
2443 &newest_mount_id);
2444 if (ret == 1) {
2445 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2446 if (oldest_start == 0) { /* init all oldest_ values */
2447 oldest_trans_id = get_desc_trans_id(desc);
2448 oldest_start = d_bh->b_blocknr;
2449 newest_mount_id = get_desc_mount_id(desc);
2450 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2451 "journal-1179: Setting "
2452 "oldest_start to offset %llu, trans_id %lu",
2453 oldest_start -
2454 SB_ONDISK_JOURNAL_1st_BLOCK
2455 (p_s_sb), oldest_trans_id);
2456 } else if (oldest_trans_id > get_desc_trans_id(desc)) {
2457 /* one we just read was older */
2458 oldest_trans_id = get_desc_trans_id(desc);
2459 oldest_start = d_bh->b_blocknr;
2460 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2461 "journal-1180: Resetting "
2462 "oldest_start to offset %lu, trans_id %lu",
2463 oldest_start -
2464 SB_ONDISK_JOURNAL_1st_BLOCK
2465 (p_s_sb), oldest_trans_id);
2466 }
2467 if (newest_mount_id < get_desc_mount_id(desc)) {
2468 newest_mount_id = get_desc_mount_id(desc);
2469 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2470 "journal-1299: Setting "
2471 "newest_mount_id to %d",
2472 get_desc_mount_id(desc));
2473 }
2474 cur_dblock += get_desc_trans_len(desc) + 2;
2475 } else {
2476 cur_dblock++;
2477 }
2478 brelse(d_bh);
2479 }
2480
2481 start_log_replay:
2482 cur_dblock = oldest_start;
2483 if (oldest_trans_id) {
2484 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2485 "journal-1206: Starting replay "
2486 "from offset %llu, trans_id %lu",
2487 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2488 oldest_trans_id);
2489
2490 }
2491 replay_count = 0;
2492 while (continue_replay && oldest_trans_id > 0) {
2493 ret =
2494 journal_read_transaction(p_s_sb, cur_dblock, oldest_start,
2495 oldest_trans_id, newest_mount_id);
2496 if (ret < 0) {
2497 return ret;
2498 } else if (ret != 0) {
2499 break;
2500 }
2501 cur_dblock =
2502 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start;
2503 replay_count++;
2504 if (cur_dblock == oldest_start)
2505 break;
2506 }
2507
2508 if (oldest_trans_id == 0) {
2509 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2510 "journal-1225: No valid " "transactions found");
2511 }
2512 /* j_start does not get set correctly if we don't replay any transactions.
2513 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2514 ** copy the trans_id from the header
2515 */
2516 if (valid_journal_header && replay_count == 0) {
2517 journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset);
2518 journal->j_trans_id =
2519 le32_to_cpu(jh->j_last_flush_trans_id) + 1;
a44c94a7
AZ
2520 /* check for trans_id overflow */
2521 if (journal->j_trans_id == 0)
2522 journal->j_trans_id = 10;
bd4c625c
LT
2523 journal->j_last_flush_trans_id =
2524 le32_to_cpu(jh->j_last_flush_trans_id);
2525 journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
2526 } else {
2527 journal->j_mount_id = newest_mount_id + 1;
2528 }
1da177e4 2529 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
bd4c625c
LT
2530 "newest_mount_id to %lu", journal->j_mount_id);
2531 journal->j_first_unflushed_offset = journal->j_start;
2532 if (replay_count > 0) {
2533 reiserfs_info(p_s_sb,
2534 "replayed %d transactions in %lu seconds\n",
2535 replay_count, get_seconds() - start);
2536 }
2537 if (!bdev_read_only(p_s_sb->s_bdev) &&
2538 _update_journal_header_block(p_s_sb, journal->j_start,
2539 journal->j_last_flush_trans_id)) {
2540 /* replay failed, caller must call free_journal_ram and abort
2541 ** the mount
2542 */
2543 return -1;
2544 }
2545 return 0;
1da177e4
LT
2546}
2547
2548static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
2549{
bd4c625c 2550 struct reiserfs_journal_list *jl;
8c777cc4
PE
2551 jl = kzalloc(sizeof(struct reiserfs_journal_list),
2552 GFP_NOFS | __GFP_NOFAIL);
bd4c625c
LT
2553 INIT_LIST_HEAD(&jl->j_list);
2554 INIT_LIST_HEAD(&jl->j_working_list);
2555 INIT_LIST_HEAD(&jl->j_tail_bh_list);
2556 INIT_LIST_HEAD(&jl->j_bh_list);
2557 sema_init(&jl->j_commit_lock, 1);
2558 SB_JOURNAL(s)->j_num_lists++;
2559 get_journal_list(jl);
2560 return jl;
2561}
2562
2563static void journal_list_init(struct super_block *p_s_sb)
2564{
2565 SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb);
2566}
2567
2568static int release_journal_dev(struct super_block *super,
2569 struct reiserfs_journal *journal)
2570{
2571 int result;
2572
2573 result = 0;
2574
2575 if (journal->j_dev_file != NULL) {
2576 result = filp_close(journal->j_dev_file, NULL);
2577 journal->j_dev_file = NULL;
2578 journal->j_dev_bd = NULL;
2579 } else if (journal->j_dev_bd != NULL) {
2580 result = blkdev_put(journal->j_dev_bd);
2581 journal->j_dev_bd = NULL;
2582 }
2583
2584 if (result != 0) {
2585 reiserfs_warning(super,
2586 "sh-457: release_journal_dev: Cannot release journal device: %i",
2587 result);
2588 }
2589 return result;
2590}
2591
2592static int journal_init_dev(struct super_block *super,
2593 struct reiserfs_journal *journal,
2594 const char *jdev_name)
1da177e4
LT
2595{
2596 int result;
2597 dev_t jdev;
2598 int blkdev_mode = FMODE_READ | FMODE_WRITE;
2599 char b[BDEVNAME_SIZE];
2600
2601 result = 0;
2602
bd4c625c
LT
2603 journal->j_dev_bd = NULL;
2604 journal->j_dev_file = NULL;
2605 jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
2606 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
1da177e4
LT
2607
2608 if (bdev_read_only(super->s_bdev))
bd4c625c 2609 blkdev_mode = FMODE_READ;
1da177e4
LT
2610
2611 /* there is no "jdev" option and journal is on separate device */
bd4c625c 2612 if ((!jdev_name || !jdev_name[0])) {
1da177e4
LT
2613 journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
2614 if (IS_ERR(journal->j_dev_bd)) {
2615 result = PTR_ERR(journal->j_dev_bd);
2616 journal->j_dev_bd = NULL;
bd4c625c
LT
2617 reiserfs_warning(super, "sh-458: journal_init_dev: "
2618 "cannot init journal device '%s': %i",
2619 __bdevname(jdev, b), result);
1da177e4
LT
2620 return result;
2621 } else if (jdev != super->s_dev)
2622 set_blocksize(journal->j_dev_bd, super->s_blocksize);
2623 return 0;
2624 }
2625
bd4c625c
LT
2626 journal->j_dev_file = filp_open(jdev_name, 0, 0);
2627 if (!IS_ERR(journal->j_dev_file)) {
1da177e4 2628 struct inode *jdev_inode = journal->j_dev_file->f_mapping->host;
bd4c625c 2629 if (!S_ISBLK(jdev_inode->i_mode)) {
74f9f974 2630 reiserfs_warning(super, "journal_init_dev: '%s' is "
bd4c625c 2631 "not a block device", jdev_name);
1da177e4 2632 result = -ENOTBLK;
bd4c625c
LT
2633 release_journal_dev(super, journal);
2634 } else {
1da177e4
LT
2635 /* ok */
2636 journal->j_dev_bd = I_BDEV(jdev_inode);
2637 set_blocksize(journal->j_dev_bd, super->s_blocksize);
bd4c625c
LT
2638 reiserfs_info(super,
2639 "journal_init_dev: journal device: %s\n",
74f9f974 2640 bdevname(journal->j_dev_bd, b));
1da177e4
LT
2641 }
2642 } else {
bd4c625c
LT
2643 result = PTR_ERR(journal->j_dev_file);
2644 journal->j_dev_file = NULL;
2645 reiserfs_warning(super,
2646 "journal_init_dev: Cannot open '%s': %i",
2647 jdev_name, result);
1da177e4 2648 }
1da177e4
LT
2649 return result;
2650}
2651
cf3d0b81
ES
2652/**
2653 * When creating/tuning a file system user can assign some
2654 * journal params within boundaries which depend on the ratio
2655 * blocksize/standard_blocksize.
2656 *
2657 * For blocks >= standard_blocksize transaction size should
2658 * be not less then JOURNAL_TRANS_MIN_DEFAULT, and not more
2659 * then JOURNAL_TRANS_MAX_DEFAULT.
2660 *
2661 * For blocks < standard_blocksize these boundaries should be
2662 * decreased proportionally.
2663 */
2664#define REISERFS_STANDARD_BLKSIZE (4096)
2665
2666static int check_advise_trans_params(struct super_block *p_s_sb,
2667 struct reiserfs_journal *journal)
2668{
2669 if (journal->j_trans_max) {
2670 /* Non-default journal params.
2671 Do sanity check for them. */
2672 int ratio = 1;
2673 if (p_s_sb->s_blocksize < REISERFS_STANDARD_BLKSIZE)
2674 ratio = REISERFS_STANDARD_BLKSIZE / p_s_sb->s_blocksize;
2675
2676 if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio ||
2677 journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio ||
2678 SB_ONDISK_JOURNAL_SIZE(p_s_sb) / journal->j_trans_max <
2679 JOURNAL_MIN_RATIO) {
2680 reiserfs_warning(p_s_sb,
2681 "sh-462: bad transaction max size (%u). FSCK?",
2682 journal->j_trans_max);
2683 return 1;
2684 }
2685 if (journal->j_max_batch != (journal->j_trans_max) *
2686 JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT) {
2687 reiserfs_warning(p_s_sb,
2688 "sh-463: bad transaction max batch (%u). FSCK?",
2689 journal->j_max_batch);
2690 return 1;
2691 }
2692 } else {
2693 /* Default journal params.
2694 The file system was created by old version
2695 of mkreiserfs, so some fields contain zeros,
2696 and we need to advise proper values for them */
2697 if (p_s_sb->s_blocksize != REISERFS_STANDARD_BLKSIZE)
2698 reiserfs_panic(p_s_sb, "sh-464: bad blocksize (%u)",
2699 p_s_sb->s_blocksize);
2700 journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT;
2701 journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT;
2702 journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE;
2703 }
2704 return 0;
2705}
2706
1da177e4
LT
2707/*
2708** must be called once on fs mount. calls journal_read for you
2709*/
bd4c625c
LT
2710int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
2711 int old_format, unsigned int commit_max_age)
2712{
2713 int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2;
2714 struct buffer_head *bhjh;
2715 struct reiserfs_super_block *rs;
2716 struct reiserfs_journal_header *jh;
2717 struct reiserfs_journal *journal;
2718 struct reiserfs_journal_list *jl;
2719 char b[BDEVNAME_SIZE];
2720
2721 journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof(struct reiserfs_journal));
2722 if (!journal) {
2723 reiserfs_warning(p_s_sb,
2724 "journal-1256: unable to get memory for journal structure");
2725 return 1;
2726 }
2727 memset(journal, 0, sizeof(struct reiserfs_journal));
2728 INIT_LIST_HEAD(&journal->j_bitmap_nodes);
2729 INIT_LIST_HEAD(&journal->j_prealloc_list);
2730 INIT_LIST_HEAD(&journal->j_working_list);
2731 INIT_LIST_HEAD(&journal->j_journal_list);
2732 journal->j_persistent_trans = 0;
2733 if (reiserfs_allocate_list_bitmaps(p_s_sb,
2734 journal->j_list_bitmap,
2735 SB_BMAP_NR(p_s_sb)))
2736 goto free_and_return;
2737 allocate_bitmap_nodes(p_s_sb);
2738
2739 /* reserved for journal area support */
2740 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
2741 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2742 / p_s_sb->s_blocksize +
2743 SB_BMAP_NR(p_s_sb) +
2744 1 :
2745 REISERFS_DISK_OFFSET_IN_BYTES /
2746 p_s_sb->s_blocksize + 2);
2747
2748 /* Sanity check to see is the standard journal fitting withing first bitmap
2749 (actual for small blocksizes) */
2750 if (!SB_ONDISK_JOURNAL_DEVICE(p_s_sb) &&
2751 (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) +
2752 SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8)) {
2753 reiserfs_warning(p_s_sb,
2754 "journal-1393: journal does not fit for area "
2755 "addressed by first of bitmap blocks. It starts at "
2756 "%u and its size is %u. Block size %ld",
2757 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb),
2758 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2759 p_s_sb->s_blocksize);
2760 goto free_and_return;
2761 }
2762
2763 if (journal_init_dev(p_s_sb, journal, j_dev_name) != 0) {
2764 reiserfs_warning(p_s_sb,
2765 "sh-462: unable to initialize jornal device");
2766 goto free_and_return;
2767 }
2768
2769 rs = SB_DISK_SUPER_BLOCK(p_s_sb);
2770
2771 /* read journal header */
2772 bhjh = journal_bread(p_s_sb,
2773 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2774 SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2775 if (!bhjh) {
2776 reiserfs_warning(p_s_sb,
2777 "sh-459: unable to read journal header");
2778 goto free_and_return;
2779 }
2780 jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2781
2782 /* make sure that journal matches to the super block */
2783 if (is_reiserfs_jr(rs)
2784 && (le32_to_cpu(jh->jh_journal.jp_journal_magic) !=
2785 sb_jp_journal_magic(rs))) {
2786 reiserfs_warning(p_s_sb,
2787 "sh-460: journal header magic %x "
2788 "(device %s) does not match to magic found in super "
2789 "block %x", jh->jh_journal.jp_journal_magic,
2790 bdevname(journal->j_dev_bd, b),
2791 sb_jp_journal_magic(rs));
2792 brelse(bhjh);
2793 goto free_and_return;
2794 }
2795
2796 journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max);
2797 journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch);
2798 journal->j_max_commit_age =
2799 le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age);
2800 journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
2801
cf3d0b81
ES
2802 if (check_advise_trans_params(p_s_sb, journal) != 0)
2803 goto free_and_return;
bd4c625c
LT
2804 journal->j_default_max_commit_age = journal->j_max_commit_age;
2805
2806 if (commit_max_age != 0) {
2807 journal->j_max_commit_age = commit_max_age;
2808 journal->j_max_trans_age = commit_max_age;
2809 }
2810
2811 reiserfs_info(p_s_sb, "journal params: device %s, size %u, "
2812 "journal first block %u, max trans len %u, max batch %u, "
2813 "max commit age %u, max trans age %u\n",
2814 bdevname(journal->j_dev_bd, b),
2815 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2816 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2817 journal->j_trans_max,
2818 journal->j_max_batch,
2819 journal->j_max_commit_age, journal->j_max_trans_age);
2820
2821 brelse(bhjh);
2822
2823 journal->j_list_bitmap_index = 0;
2824 journal_list_init(p_s_sb);
2825
2826 memset(journal->j_list_hash_table, 0,
2827 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
2828
2829 INIT_LIST_HEAD(&journal->j_dirty_buffers);
2830 spin_lock_init(&journal->j_dirty_buffers_lock);
2831
2832 journal->j_start = 0;
2833 journal->j_len = 0;
2834 journal->j_len_alloc = 0;
2835 atomic_set(&(journal->j_wcount), 0);
2836 atomic_set(&(journal->j_async_throttle), 0);
2837 journal->j_bcount = 0;
2838 journal->j_trans_start_time = 0;
2839 journal->j_last = NULL;
2840 journal->j_first = NULL;
2841 init_waitqueue_head(&(journal->j_join_wait));
2842 sema_init(&journal->j_lock, 1);
2843 sema_init(&journal->j_flush_sem, 1);
2844
2845 journal->j_trans_id = 10;
2846 journal->j_mount_id = 10;
2847 journal->j_state = 0;
2848 atomic_set(&(journal->j_jlock), 0);
2849 journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
2850 journal->j_cnode_free_orig = journal->j_cnode_free_list;
2851 journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
2852 journal->j_cnode_used = 0;
2853 journal->j_must_wait = 0;
2854
576f6d79
JM
2855 if (journal->j_cnode_free == 0) {
2856 reiserfs_warning(p_s_sb, "journal-2004: Journal cnode memory "
2857 "allocation failed (%ld bytes). Journal is "
2858 "too large for available memory. Usually "
2859 "this is due to a journal that is too large.",
2860 sizeof (struct reiserfs_journal_cnode) * num_cnodes);
2861 goto free_and_return;
2862 }
2863
bd4c625c
LT
2864 init_journal_hash(p_s_sb);
2865 jl = journal->j_current_jl;
2866 jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
2867 if (!jl->j_list_bitmap) {
2868 reiserfs_warning(p_s_sb,
2869 "journal-2005, get_list_bitmap failed for journal list 0");
2870 goto free_and_return;
2871 }
2872 if (journal_read(p_s_sb) < 0) {
2873 reiserfs_warning(p_s_sb, "Replay Failure, unable to mount");
2874 goto free_and_return;
2875 }
2876
2877 reiserfs_mounted_fs_count++;
2878 if (reiserfs_mounted_fs_count <= 1)
2879 commit_wq = create_workqueue("reiserfs");
2880
c4028958
DH
2881 INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
2882 journal->j_work_sb = p_s_sb;
bd4c625c
LT
2883 return 0;
2884 free_and_return:
2885 free_journal_ram(p_s_sb);
2886 return 1;
1da177e4
LT
2887}
2888
2889/*
2890** test for a polite end of the current transaction. Used by file_write, and should
2891** be used by delete to make sure they don't write more than can fit inside a single
2892** transaction
2893*/
bd4c625c
LT
2894int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
2895 int new_alloc)
2896{
2897 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2898 time_t now = get_seconds();
2899 /* cannot restart while nested */
2900 BUG_ON(!th->t_trans_id);
2901 if (th->t_refcount > 1)
2902 return 0;
2903 if (journal->j_must_wait > 0 ||
2904 (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
2905 atomic_read(&(journal->j_jlock)) ||
2906 (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
2907 journal->j_cnode_free < (journal->j_trans_max * 3)) {
2908 return 1;
2909 }
6ae1ea44
CM
2910 /* protected by the BKL here */
2911 journal->j_len_alloc += new_alloc;
2912 th->t_blocks_allocated += new_alloc ;
bd4c625c 2913 return 0;
1da177e4
LT
2914}
2915
2916/* this must be called inside a transaction, and requires the
2917** kernel_lock to be held
2918*/
bd4c625c
LT
2919void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
2920{
2921 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2922 BUG_ON(!th->t_trans_id);
2923 journal->j_must_wait = 1;
2924 set_bit(J_WRITERS_BLOCKED, &journal->j_state);
2925 return;
1da177e4
LT
2926}
2927
2928/* this must be called without a transaction started, and does not
2929** require BKL
2930*/
bd4c625c
LT
2931void reiserfs_allow_writes(struct super_block *s)
2932{
2933 struct reiserfs_journal *journal = SB_JOURNAL(s);
2934 clear_bit(J_WRITERS_BLOCKED, &journal->j_state);
2935 wake_up(&journal->j_join_wait);
1da177e4
LT
2936}
2937
2938/* this must be called without a transaction started, and does not
2939** require BKL
2940*/
bd4c625c
LT
2941void reiserfs_wait_on_write_block(struct super_block *s)
2942{
2943 struct reiserfs_journal *journal = SB_JOURNAL(s);
2944 wait_event(journal->j_join_wait,
2945 !test_bit(J_WRITERS_BLOCKED, &journal->j_state));
2946}
2947
2948static void queue_log_writer(struct super_block *s)
2949{
2950 wait_queue_t wait;
2951 struct reiserfs_journal *journal = SB_JOURNAL(s);
2952 set_bit(J_WRITERS_QUEUED, &journal->j_state);
2953
2954 /*
2955 * we don't want to use wait_event here because
2956 * we only want to wait once.
2957 */
2958 init_waitqueue_entry(&wait, current);
2959 add_wait_queue(&journal->j_join_wait, &wait);
1da177e4 2960 set_current_state(TASK_UNINTERRUPTIBLE);
bd4c625c
LT
2961 if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
2962 schedule();
5ab2f7e0 2963 __set_current_state(TASK_RUNNING);
bd4c625c
LT
2964 remove_wait_queue(&journal->j_join_wait, &wait);
2965}
2966
2967static void wake_queued_writers(struct super_block *s)
2968{
2969 struct reiserfs_journal *journal = SB_JOURNAL(s);
2970 if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
2971 wake_up(&journal->j_join_wait);
2972}
2973
2974static void let_transaction_grow(struct super_block *sb, unsigned long trans_id)
2975{
2976 struct reiserfs_journal *journal = SB_JOURNAL(sb);
2977 unsigned long bcount = journal->j_bcount;
2978 while (1) {
041e0e3b 2979 schedule_timeout_uninterruptible(1);
bd4c625c
LT
2980 journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
2981 while ((atomic_read(&journal->j_wcount) > 0 ||
2982 atomic_read(&journal->j_jlock)) &&
2983 journal->j_trans_id == trans_id) {
2984 queue_log_writer(sb);
2985 }
2986 if (journal->j_trans_id != trans_id)
2987 break;
2988 if (bcount == journal->j_bcount)
2989 break;
2990 bcount = journal->j_bcount;
1da177e4 2991 }
1da177e4
LT
2992}
2993
2994/* join == true if you must join an existing transaction.
2995** join == false if you can deal with waiting for others to finish
2996**
2997** this will block until the transaction is joinable. send the number of blocks you
2998** expect to use in nblocks.
2999*/
bd4c625c
LT
3000static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
3001 struct super_block *p_s_sb, unsigned long nblocks,
3002 int join)
3003{
3004 time_t now = get_seconds();
3005 int old_trans_id;
3006 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3007 struct reiserfs_transaction_handle myth;
3008 int sched_count = 0;
3009 int retval;
3010
3011 reiserfs_check_lock_depth(p_s_sb, "journal_begin");
14a61442 3012 BUG_ON(nblocks > journal->j_trans_max);
bd4c625c
LT
3013
3014 PROC_INFO_INC(p_s_sb, journal.journal_being);
3015 /* set here for journal_join */
3016 th->t_refcount = 1;
3017 th->t_super = p_s_sb;
3018
3019 relock:
3020 lock_journal(p_s_sb);
3021 if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) {
3022 unlock_journal(p_s_sb);
3023 retval = journal->j_errno;
3024 goto out_fail;
3025 }
3026 journal->j_bcount++;
3027
3028 if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
3029 unlock_journal(p_s_sb);
3030 reiserfs_wait_on_write_block(p_s_sb);
3031 PROC_INFO_INC(p_s_sb, journal.journal_relock_writers);
3032 goto relock;
3033 }
3034 now = get_seconds();
3035
3036 /* if there is no room in the journal OR
3037 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
3038 ** we don't sleep if there aren't other writers
3039 */
3040
3041 if ((!join && journal->j_must_wait > 0) ||
3042 (!join
3043 && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch)
3044 || (!join && atomic_read(&journal->j_wcount) > 0
3045 && journal->j_trans_start_time > 0
3046 && (now - journal->j_trans_start_time) >
3047 journal->j_max_trans_age) || (!join
3048 && atomic_read(&journal->j_jlock))
3049 || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
3050
3051 old_trans_id = journal->j_trans_id;
3052 unlock_journal(p_s_sb); /* allow others to finish this transaction */
3053
3054 if (!join && (journal->j_len_alloc + nblocks + 2) >=
3055 journal->j_max_batch &&
3056 ((journal->j_len + nblocks + 2) * 100) <
3057 (journal->j_len_alloc * 75)) {
3058 if (atomic_read(&journal->j_wcount) > 10) {
3059 sched_count++;
3060 queue_log_writer(p_s_sb);
3061 goto relock;
3062 }
3063 }
3064 /* don't mess with joining the transaction if all we have to do is
3065 * wait for someone else to do a commit
3066 */
3067 if (atomic_read(&journal->j_jlock)) {
3068 while (journal->j_trans_id == old_trans_id &&
3069 atomic_read(&journal->j_jlock)) {
3070 queue_log_writer(p_s_sb);
3071 }
3072 goto relock;
3073 }
3074 retval = journal_join(&myth, p_s_sb, 1);
3075 if (retval)
3076 goto out_fail;
3077
3078 /* someone might have ended the transaction while we joined */
3079 if (old_trans_id != journal->j_trans_id) {
3080 retval = do_journal_end(&myth, p_s_sb, 1, 0);
3081 } else {
3082 retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW);
3083 }
3084
3085 if (retval)
3086 goto out_fail;
3087
3088 PROC_INFO_INC(p_s_sb, journal.journal_relock_wcount);
3089 goto relock;
3090 }
3091 /* we are the first writer, set trans_id */
3092 if (journal->j_trans_start_time == 0) {
3093 journal->j_trans_start_time = get_seconds();
3094 }
3095 atomic_inc(&(journal->j_wcount));
3096 journal->j_len_alloc += nblocks;
3097 th->t_blocks_logged = 0;
3098 th->t_blocks_allocated = nblocks;
3099 th->t_trans_id = journal->j_trans_id;
3100 unlock_journal(p_s_sb);
3101 INIT_LIST_HEAD(&th->t_list);
3102 get_fs_excl();
3103 return 0;
3104
3105 out_fail:
3106 memset(th, 0, sizeof(*th));
3107 /* Re-set th->t_super, so we can properly keep track of how many
3108 * persistent transactions there are. We need to do this so if this
3109 * call is part of a failed restart_transaction, we can free it later */
3110 th->t_super = p_s_sb;
3111 return retval;
3112}
3113
3114struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
3115 super_block
3116 *s,
3117 int nblocks)
3118{
3119 int ret;
3120 struct reiserfs_transaction_handle *th;
3121
3122 /* if we're nesting into an existing transaction. It will be
3123 ** persistent on its own
3124 */
3125 if (reiserfs_transaction_running(s)) {
3126 th = current->journal_info;
3127 th->t_refcount++;
14a61442
ES
3128 BUG_ON(th->t_refcount < 2);
3129
bd4c625c
LT
3130 return th;
3131 }
d739b42b 3132 th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS);
bd4c625c
LT
3133 if (!th)
3134 return NULL;
3135 ret = journal_begin(th, s, nblocks);
3136 if (ret) {
d739b42b 3137 kfree(th);
bd4c625c
LT
3138 return NULL;
3139 }
3140
3141 SB_JOURNAL(s)->j_persistent_trans++;
3142 return th;
3143}
3144
3145int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th)
3146{
3147 struct super_block *s = th->t_super;
3148 int ret = 0;
3149 if (th->t_trans_id)
3150 ret = journal_end(th, th->t_super, th->t_blocks_allocated);
3151 else
3152 ret = -EIO;
3153 if (th->t_refcount == 0) {
3154 SB_JOURNAL(s)->j_persistent_trans--;
d739b42b 3155 kfree(th);
bd4c625c
LT
3156 }
3157 return ret;
3158}
3159
3160static int journal_join(struct reiserfs_transaction_handle *th,
3161 struct super_block *p_s_sb, unsigned long nblocks)
3162{
3163 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3164
3165 /* this keeps do_journal_end from NULLing out the current->journal_info
3166 ** pointer
3167 */
3168 th->t_handle_save = cur_th;
14a61442 3169 BUG_ON(cur_th && cur_th->t_refcount > 1);
bd4c625c
LT
3170 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN);
3171}
3172
3173int journal_join_abort(struct reiserfs_transaction_handle *th,
3174 struct super_block *p_s_sb, unsigned long nblocks)
3175{
3176 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3177
3178 /* this keeps do_journal_end from NULLing out the current->journal_info
3179 ** pointer
3180 */
3181 th->t_handle_save = cur_th;
14a61442 3182 BUG_ON(cur_th && cur_th->t_refcount > 1);
bd4c625c
LT
3183 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT);
3184}
3185
3186int journal_begin(struct reiserfs_transaction_handle *th,
3187 struct super_block *p_s_sb, unsigned long nblocks)
3188{
3189 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3190 int ret;
3191
3192 th->t_handle_save = NULL;
3193 if (cur_th) {
3194 /* we are nesting into the current transaction */
3195 if (cur_th->t_super == p_s_sb) {
3196 BUG_ON(!cur_th->t_refcount);
3197 cur_th->t_refcount++;
3198 memcpy(th, cur_th, sizeof(*th));
3199 if (th->t_refcount <= 1)
3200 reiserfs_warning(p_s_sb,
3201 "BAD: refcount <= 1, but journal_info != 0");
3202 return 0;
3203 } else {
3204 /* we've ended up with a handle from a different filesystem.
3205 ** save it and restore on journal_end. This should never
3206 ** really happen...
3207 */
3208 reiserfs_warning(p_s_sb,
3209 "clm-2100: nesting info a different FS");
3210 th->t_handle_save = current->journal_info;
3211 current->journal_info = th;
3212 }
1da177e4 3213 } else {
bd4c625c
LT
3214 current->journal_info = th;
3215 }
3216 ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG);
14a61442 3217 BUG_ON(current->journal_info != th);
1da177e4 3218
bd4c625c
LT
3219 /* I guess this boils down to being the reciprocal of clm-2100 above.
3220 * If do_journal_begin_r fails, we need to put it back, since journal_end
3221 * won't be called to do it. */
3222 if (ret)
3223 current->journal_info = th->t_handle_save;
3224 else
3225 BUG_ON(!th->t_refcount);
1da177e4 3226
bd4c625c 3227 return ret;
1da177e4
LT
3228}
3229
3230/*
3231** puts bh into the current transaction. If it was already there, reorders removes the
3232** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
3233**
3234** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
3235** transaction is committed.
3236**
3237** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3238*/
bd4c625c
LT
3239int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3240 struct super_block *p_s_sb, struct buffer_head *bh)
3241{
3242 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3243 struct reiserfs_journal_cnode *cn = NULL;
3244 int count_already_incd = 0;
3245 int prepared = 0;
3246 BUG_ON(!th->t_trans_id);
3247
3248 PROC_INFO_INC(p_s_sb, journal.mark_dirty);
3249 if (th->t_trans_id != journal->j_trans_id) {
3250 reiserfs_panic(th->t_super,
3251 "journal-1577: handle trans id %ld != current trans id %ld\n",
3252 th->t_trans_id, journal->j_trans_id);
3253 }
3254
3255 p_s_sb->s_dirt = 1;
3256
3257 prepared = test_clear_buffer_journal_prepared(bh);
3258 clear_buffer_journal_restore_dirty(bh);
3259 /* already in this transaction, we are done */
3260 if (buffer_journaled(bh)) {
3261 PROC_INFO_INC(p_s_sb, journal.mark_dirty_already);
3262 return 0;
3263 }
3264
3265 /* this must be turned into a panic instead of a warning. We can't allow
3266 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
3267 ** could get to disk too early. NOT GOOD.
3268 */
3269 if (!prepared || buffer_dirty(bh)) {
3270 reiserfs_warning(p_s_sb, "journal-1777: buffer %llu bad state "
3271 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3272 (unsigned long long)bh->b_blocknr,
3273 prepared ? ' ' : '!',
3274 buffer_locked(bh) ? ' ' : '!',
3275 buffer_dirty(bh) ? ' ' : '!',
3276 buffer_journal_dirty(bh) ? ' ' : '!');
3277 }
3278
3279 if (atomic_read(&(journal->j_wcount)) <= 0) {
3280 reiserfs_warning(p_s_sb,
3281 "journal-1409: journal_mark_dirty returning because j_wcount was %d",
3282 atomic_read(&(journal->j_wcount)));
3283 return 1;
3284 }
3285 /* this error means I've screwed up, and we've overflowed the transaction.
3286 ** Nothing can be done here, except make the FS readonly or panic.
3287 */
3288 if (journal->j_len >= journal->j_trans_max) {
3289 reiserfs_panic(th->t_super,
3290 "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n",
3291 journal->j_len);
3292 }
3293
3294 if (buffer_journal_dirty(bh)) {
3295 count_already_incd = 1;
3296 PROC_INFO_INC(p_s_sb, journal.mark_dirty_notjournal);
3297 clear_buffer_journal_dirty(bh);
3298 }
3299
3300 if (journal->j_len > journal->j_len_alloc) {
3301 journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT;
3302 }
3303
3304 set_buffer_journaled(bh);
3305
3306 /* now put this guy on the end */
3307 if (!cn) {
3308 cn = get_cnode(p_s_sb);
3309 if (!cn) {
3310 reiserfs_panic(p_s_sb, "get_cnode failed!\n");
3311 }
3312
3313 if (th->t_blocks_logged == th->t_blocks_allocated) {
3314 th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT;
3315 journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT;
3316 }
3317 th->t_blocks_logged++;
3318 journal->j_len++;
3319
3320 cn->bh = bh;
3321 cn->blocknr = bh->b_blocknr;
3322 cn->sb = p_s_sb;
3323 cn->jlist = NULL;
3324 insert_journal_hash(journal->j_hash_table, cn);
3325 if (!count_already_incd) {
3326 get_bh(bh);
3327 }
3328 }
3329 cn->next = NULL;
3330 cn->prev = journal->j_last;
3331 cn->bh = bh;
3332 if (journal->j_last) {
3333 journal->j_last->next = cn;
3334 journal->j_last = cn;
3335 } else {
3336 journal->j_first = cn;
3337 journal->j_last = cn;
3338 }
3339 return 0;
3340}
3341
3342int journal_end(struct reiserfs_transaction_handle *th,
3343 struct super_block *p_s_sb, unsigned long nblocks)
3344{
3345 if (!current->journal_info && th->t_refcount > 1)
3346 reiserfs_warning(p_s_sb, "REISER-NESTING: th NULL, refcount %d",
3347 th->t_refcount);
3348
3349 if (!th->t_trans_id) {
3350 WARN_ON(1);
3351 return -EIO;
3352 }
3353
3354 th->t_refcount--;
3355 if (th->t_refcount > 0) {
3356 struct reiserfs_transaction_handle *cur_th =
3357 current->journal_info;
3358
3359 /* we aren't allowed to close a nested transaction on a different
3360 ** filesystem from the one in the task struct
3361 */
14a61442 3362 BUG_ON(cur_th->t_super != th->t_super);
bd4c625c
LT
3363
3364 if (th != cur_th) {
3365 memcpy(current->journal_info, th, sizeof(*th));
3366 th->t_trans_id = 0;
3367 }
3368 return 0;
3369 } else {
3370 return do_journal_end(th, p_s_sb, nblocks, 0);
3371 }
1da177e4
LT
3372}
3373
3374/* removes from the current transaction, relsing and descrementing any counters.
3375** also files the removed buffer directly onto the clean list
3376**
3377** called by journal_mark_freed when a block has been deleted
3378**
3379** returns 1 if it cleaned and relsed the buffer. 0 otherwise
3380*/
bd4c625c
LT
3381static int remove_from_transaction(struct super_block *p_s_sb,
3382 b_blocknr_t blocknr, int already_cleaned)
3383{
3384 struct buffer_head *bh;
3385 struct reiserfs_journal_cnode *cn;
3386 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3387 int ret = 0;
3388
3389 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3390 if (!cn || !cn->bh) {
3391 return ret;
3392 }
3393 bh = cn->bh;
3394 if (cn->prev) {
3395 cn->prev->next = cn->next;
3396 }
3397 if (cn->next) {
3398 cn->next->prev = cn->prev;
3399 }
3400 if (cn == journal->j_first) {
3401 journal->j_first = cn->next;
3402 }
3403 if (cn == journal->j_last) {
3404 journal->j_last = cn->prev;
3405 }
3406 if (bh)
3407 remove_journal_hash(p_s_sb, journal->j_hash_table, NULL,
3408 bh->b_blocknr, 0);
3409 clear_buffer_journaled(bh); /* don't log this one */
3410
3411 if (!already_cleaned) {
3412 clear_buffer_journal_dirty(bh);
3413 clear_buffer_dirty(bh);
3414 clear_buffer_journal_test(bh);
3415 put_bh(bh);
3416 if (atomic_read(&(bh->b_count)) < 0) {
3417 reiserfs_warning(p_s_sb,
3418 "journal-1752: remove from trans, b_count < 0");
3419 }
3420 ret = 1;
3421 }
3422 journal->j_len--;
3423 journal->j_len_alloc--;
3424 free_cnode(p_s_sb, cn);
3425 return ret;
1da177e4
LT
3426}
3427
3428/*
3429** for any cnode in a journal list, it can only be dirtied of all the
0779bf2d 3430** transactions that include it are committed to disk.
1da177e4
LT
3431** this checks through each transaction, and returns 1 if you are allowed to dirty,
3432** and 0 if you aren't
3433**
3434** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3435** blocks for a given transaction on disk
3436**
3437*/
bd4c625c
LT
3438static int can_dirty(struct reiserfs_journal_cnode *cn)
3439{
3440 struct super_block *sb = cn->sb;
3441 b_blocknr_t blocknr = cn->blocknr;
3442 struct reiserfs_journal_cnode *cur = cn->hprev;
3443 int can_dirty = 1;
3444
3445 /* first test hprev. These are all newer than cn, so any node here
3446 ** with the same block number and dev means this node can't be sent
3447 ** to disk right now.
3448 */
3449 while (cur && can_dirty) {
3450 if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
3451 cur->blocknr == blocknr) {
3452 can_dirty = 0;
3453 }
3454 cur = cur->hprev;
3455 }
3456 /* then test hnext. These are all older than cn. As long as they
3457 ** are committed to the log, it is safe to write cn to disk
3458 */
3459 cur = cn->hnext;
3460 while (cur && can_dirty) {
3461 if (cur->jlist && cur->jlist->j_len > 0 &&
3462 atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
3463 cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
3464 can_dirty = 0;
3465 }
3466 cur = cur->hnext;
3467 }
3468 return can_dirty;
1da177e4
LT
3469}
3470
3471/* syncs the commit blocks, but does not force the real buffers to disk
0779bf2d 3472** will wait until the current transaction is done/committed before returning
1da177e4 3473*/
bd4c625c
LT
3474int journal_end_sync(struct reiserfs_transaction_handle *th,
3475 struct super_block *p_s_sb, unsigned long nblocks)
3476{
3477 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1da177e4 3478
bd4c625c
LT
3479 BUG_ON(!th->t_trans_id);
3480 /* you can sync while nested, very, very bad */
14a61442 3481 BUG_ON(th->t_refcount > 1);
bd4c625c
LT
3482 if (journal->j_len == 0) {
3483 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3484 1);
3485 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3486 }
3487 return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT);
1da177e4
LT
3488}
3489
3490/*
3491** writeback the pending async commits to disk
3492*/
c4028958 3493static void flush_async_commits(struct work_struct *work)
bd4c625c 3494{
c4028958
DH
3495 struct reiserfs_journal *journal =
3496 container_of(work, struct reiserfs_journal, j_work.work);
3497 struct super_block *p_s_sb = journal->j_work_sb;
bd4c625c
LT
3498 struct reiserfs_journal_list *jl;
3499 struct list_head *entry;
3500
3501 lock_kernel();
3502 if (!list_empty(&journal->j_journal_list)) {
3503 /* last entry is the youngest, commit it and you get everything */
3504 entry = journal->j_journal_list.prev;
3505 jl = JOURNAL_LIST_ENTRY(entry);
3506 flush_commit_list(p_s_sb, jl, 1);
3507 }
3508 unlock_kernel();
1da177e4
LT
3509}
3510
3511/*
3512** flushes any old transactions to disk
3513** ends the current transaction if it is too old
3514*/
bd4c625c
LT
3515int reiserfs_flush_old_commits(struct super_block *p_s_sb)
3516{
3517 time_t now;
3518 struct reiserfs_transaction_handle th;
3519 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3520
3521 now = get_seconds();
3522 /* safety check so we don't flush while we are replaying the log during
3523 * mount
3524 */
3525 if (list_empty(&journal->j_journal_list)) {
3526 return 0;
3527 }
3528
3529 /* check the current transaction. If there are no writers, and it is
3530 * too old, finish it, and force the commit blocks to disk
3531 */
3532 if (atomic_read(&journal->j_wcount) <= 0 &&
3533 journal->j_trans_start_time > 0 &&
3534 journal->j_len > 0 &&
3535 (now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3536 if (!journal_join(&th, p_s_sb, 1)) {
3537 reiserfs_prepare_for_journal(p_s_sb,
3538 SB_BUFFER_WITH_SB(p_s_sb),
3539 1);
3540 journal_mark_dirty(&th, p_s_sb,
3541 SB_BUFFER_WITH_SB(p_s_sb));
3542
3543 /* we're only being called from kreiserfsd, it makes no sense to do
3544 ** an async commit so that kreiserfsd can do it later
3545 */
3546 do_journal_end(&th, p_s_sb, 1, COMMIT_NOW | WAIT);
3547 }
3548 }
3549 return p_s_sb->s_dirt;
1da177e4
LT
3550}
3551
3552/*
3553** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3554**
3555** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3556** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
3557** flushes the commit list and returns 0.
3558**
3559** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
3560**
3561** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3562*/
bd4c625c
LT
3563static int check_journal_end(struct reiserfs_transaction_handle *th,
3564 struct super_block *p_s_sb, unsigned long nblocks,
3565 int flags)
3566{
3567
3568 time_t now;
3569 int flush = flags & FLUSH_ALL;
3570 int commit_now = flags & COMMIT_NOW;
3571 int wait_on_commit = flags & WAIT;
3572 struct reiserfs_journal_list *jl;
3573 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3574
3575 BUG_ON(!th->t_trans_id);
3576
3577 if (th->t_trans_id != journal->j_trans_id) {
3578 reiserfs_panic(th->t_super,
3579 "journal-1577: handle trans id %ld != current trans id %ld\n",
3580 th->t_trans_id, journal->j_trans_id);
3581 }
3582
3583 journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged);
3584 if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
3585 atomic_dec(&(journal->j_wcount));
3586 }
3587
3588 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3589 ** will be dealt with by next transaction that actually writes something, but should be taken
3590 ** care of in this trans
3591 */
14a61442
ES
3592 BUG_ON(journal->j_len == 0);
3593
bd4c625c
LT
3594 /* if wcount > 0, and we are called to with flush or commit_now,
3595 ** we wait on j_join_wait. We will wake up when the last writer has
3596 ** finished the transaction, and started it on its way to the disk.
3597 ** Then, we flush the commit or journal list, and just return 0
3598 ** because the rest of journal end was already done for this transaction.
3599 */
3600 if (atomic_read(&(journal->j_wcount)) > 0) {
3601 if (flush || commit_now) {
3602 unsigned trans_id;
3603
3604 jl = journal->j_current_jl;
3605 trans_id = jl->j_trans_id;
3606 if (wait_on_commit)
3607 jl->j_state |= LIST_COMMIT_PENDING;
3608 atomic_set(&(journal->j_jlock), 1);
3609 if (flush) {
3610 journal->j_next_full_flush = 1;
3611 }
3612 unlock_journal(p_s_sb);
3613
3614 /* sleep while the current transaction is still j_jlocked */
3615 while (journal->j_trans_id == trans_id) {
3616 if (atomic_read(&journal->j_jlock)) {
3617 queue_log_writer(p_s_sb);
3618 } else {
3619 lock_journal(p_s_sb);
3620 if (journal->j_trans_id == trans_id) {
3621 atomic_set(&(journal->j_jlock),
3622 1);
3623 }
3624 unlock_journal(p_s_sb);
3625 }
3626 }
14a61442
ES
3627 BUG_ON(journal->j_trans_id == trans_id);
3628
bd4c625c
LT
3629 if (commit_now
3630 && journal_list_still_alive(p_s_sb, trans_id)
3631 && wait_on_commit) {
3632 flush_commit_list(p_s_sb, jl, 1);
3633 }
3634 return 0;
3635 }
3636 unlock_journal(p_s_sb);
3637 return 0;
3638 }
3639
3640 /* deal with old transactions where we are the last writers */
3641 now = get_seconds();
3642 if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3643 commit_now = 1;
3644 journal->j_next_async_flush = 1;
3645 }
3646 /* don't batch when someone is waiting on j_join_wait */
3647 /* don't batch when syncing the commit or flushing the whole trans */
3648 if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock)))
3649 && !flush && !commit_now && (journal->j_len < journal->j_max_batch)
3650 && journal->j_len_alloc < journal->j_max_batch
3651 && journal->j_cnode_free > (journal->j_trans_max * 3)) {
3652 journal->j_bcount++;
3653 unlock_journal(p_s_sb);
3654 return 0;
3655 }
3656
3657 if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
3658 reiserfs_panic(p_s_sb,
3659 "journal-003: journal_end: j_start (%ld) is too high\n",
3660 journal->j_start);
3661 }
3662 return 1;
1da177e4
LT
3663}
3664
3665/*
3666** Does all the work that makes deleting blocks safe.
3667** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3668**
3669** otherwise:
3670** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
3671** before this transaction has finished.
3672**
3673** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
3674** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
3675** the block can't be reallocated yet.
3676**
3677** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3678*/
bd4c625c
LT
3679int journal_mark_freed(struct reiserfs_transaction_handle *th,
3680 struct super_block *p_s_sb, b_blocknr_t blocknr)
3681{
3682 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3683 struct reiserfs_journal_cnode *cn = NULL;
3684 struct buffer_head *bh = NULL;
3685 struct reiserfs_list_bitmap *jb = NULL;
3686 int cleaned = 0;
3687 BUG_ON(!th->t_trans_id);
3688
3689 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3690 if (cn && cn->bh) {
3691 bh = cn->bh;
3692 get_bh(bh);
3693 }
3694 /* if it is journal new, we just remove it from this transaction */
3695 if (bh && buffer_journal_new(bh)) {
3696 clear_buffer_journal_new(bh);
3697 clear_prepared_bits(bh);
3698 reiserfs_clean_and_file_buffer(bh);
3699 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3700 } else {
3701 /* set the bit for this block in the journal bitmap for this transaction */
3702 jb = journal->j_current_jl->j_list_bitmap;
3703 if (!jb) {
3704 reiserfs_panic(p_s_sb,
3705 "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n");
3706 }
3707 set_bit_in_list_bitmap(p_s_sb, blocknr, jb);
3708
3709 /* Note, the entire while loop is not allowed to schedule. */
3710
3711 if (bh) {
3712 clear_prepared_bits(bh);
3713 reiserfs_clean_and_file_buffer(bh);
3714 }
3715 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3716
3717 /* find all older transactions with this block, make sure they don't try to write it out */
3718 cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table,
3719 blocknr);
3720 while (cn) {
3721 if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
3722 set_bit(BLOCK_FREED, &cn->state);
3723 if (cn->bh) {
3724 if (!cleaned) {
3725 /* remove_from_transaction will brelse the buffer if it was
3726 ** in the current trans
3727 */
3728 clear_buffer_journal_dirty(cn->
3729 bh);
3730 clear_buffer_dirty(cn->bh);
3731 clear_buffer_journal_test(cn->
3732 bh);
3733 cleaned = 1;
3734 put_bh(cn->bh);
3735 if (atomic_read
3736 (&(cn->bh->b_count)) < 0) {
3737 reiserfs_warning(p_s_sb,
3738 "journal-2138: cn->bh->b_count < 0");
3739 }
3740 }
3741 if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
3742 atomic_dec(&
3743 (cn->jlist->
3744 j_nonzerolen));
3745 }
3746 cn->bh = NULL;
3747 }
3748 }
3749 cn = cn->hnext;
3750 }
3751 }
3752
398c95bd
CM
3753 if (bh)
3754 release_buffer_page(bh); /* get_hash grabs the buffer */
bd4c625c
LT
3755 return 0;
3756}
3757
3758void reiserfs_update_inode_transaction(struct inode *inode)
3759{
3760 struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb);
3761 REISERFS_I(inode)->i_jl = journal->j_current_jl;
3762 REISERFS_I(inode)->i_trans_id = journal->j_trans_id;
1da177e4
LT
3763}
3764
3765/*
3766 * returns -1 on error, 0 if no commits/barriers were done and 1
3767 * if a transaction was actually committed and the barrier was done
3768 */
3769static int __commit_trans_jl(struct inode *inode, unsigned long id,
bd4c625c 3770 struct reiserfs_journal_list *jl)
1da177e4 3771{
bd4c625c
LT
3772 struct reiserfs_transaction_handle th;
3773 struct super_block *sb = inode->i_sb;
3774 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3775 int ret = 0;
3776
3777 /* is it from the current transaction, or from an unknown transaction? */
3778 if (id == journal->j_trans_id) {
3779 jl = journal->j_current_jl;
3780 /* try to let other writers come in and grow this transaction */
3781 let_transaction_grow(sb, id);
3782 if (journal->j_trans_id != id) {
3783 goto flush_commit_only;
3784 }
1da177e4 3785
bd4c625c
LT
3786 ret = journal_begin(&th, sb, 1);
3787 if (ret)
3788 return ret;
3789
3790 /* someone might have ended this transaction while we joined */
3791 if (journal->j_trans_id != id) {
3792 reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3793 1);
3794 journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb));
3795 ret = journal_end(&th, sb, 1);
3796 goto flush_commit_only;
3797 }
1da177e4 3798
bd4c625c
LT
3799 ret = journal_end_sync(&th, sb, 1);
3800 if (!ret)
3801 ret = 1;
1da177e4 3802
bd4c625c
LT
3803 } else {
3804 /* this gets tricky, we have to make sure the journal list in
3805 * the inode still exists. We know the list is still around
3806 * if we've got a larger transaction id than the oldest list
3807 */
3808 flush_commit_only:
3809 if (journal_list_still_alive(inode->i_sb, id)) {
3810 /*
3811 * we only set ret to 1 when we know for sure
3812 * the barrier hasn't been started yet on the commit
3813 * block.
3814 */
3815 if (atomic_read(&jl->j_commit_left) > 1)
3816 ret = 1;
3817 flush_commit_list(sb, jl, 1);
3818 if (journal->j_errno)
3819 ret = journal->j_errno;
3820 }
1da177e4 3821 }
bd4c625c
LT
3822 /* otherwise the list is gone, and long since committed */
3823 return ret;
3824}
1da177e4 3825
bd4c625c
LT
3826int reiserfs_commit_for_inode(struct inode *inode)
3827{
3828 unsigned long id = REISERFS_I(inode)->i_trans_id;
3829 struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
1da177e4 3830
bd4c625c
LT
3831 /* for the whole inode, assume unset id means it was
3832 * changed in the current transaction. More conservative
1da177e4 3833 */
bd4c625c
LT
3834 if (!id || !jl) {
3835 reiserfs_update_inode_transaction(inode);
3836 id = REISERFS_I(inode)->i_trans_id;
3837 /* jl will be updated in __commit_trans_jl */
3838 }
3839
3840 return __commit_trans_jl(inode, id, jl);
3841}
3842
3843void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
3844 struct buffer_head *bh)
3845{
3846 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3847 PROC_INFO_INC(p_s_sb, journal.restore_prepared);
3848 if (!bh) {
3849 return;
3850 }
3851 if (test_clear_buffer_journal_restore_dirty(bh) &&
3852 buffer_journal_dirty(bh)) {
3853 struct reiserfs_journal_cnode *cn;
3854 cn = get_journal_hash_dev(p_s_sb,
3855 journal->j_list_hash_table,
3856 bh->b_blocknr);
3857 if (cn && can_dirty(cn)) {
3858 set_buffer_journal_test(bh);
3859 mark_buffer_dirty(bh);
3860 }
3861 }
3862 clear_buffer_journal_prepared(bh);
3863}
3864
3865extern struct tree_balance *cur_tb;
1da177e4
LT
3866/*
3867** before we can change a metadata block, we have to make sure it won't
3868** be written to disk while we are altering it. So, we must:
3869** clean it
3870** wait on it.
3871**
3872*/
3873int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
bd4c625c
LT
3874 struct buffer_head *bh, int wait)
3875{
3876 PROC_INFO_INC(p_s_sb, journal.prepare);
3877
3878 if (test_set_buffer_locked(bh)) {
3879 if (!wait)
3880 return 0;
3881 lock_buffer(bh);
3882 }
3883 set_buffer_journal_prepared(bh);
3884 if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
3885 clear_buffer_journal_test(bh);
3886 set_buffer_journal_restore_dirty(bh);
3887 }
3888 unlock_buffer(bh);
3889 return 1;
3890}
3891
3892static void flush_old_journal_lists(struct super_block *s)
3893{
3894 struct reiserfs_journal *journal = SB_JOURNAL(s);
3895 struct reiserfs_journal_list *jl;
3896 struct list_head *entry;
3897 time_t now = get_seconds();
3898
3899 while (!list_empty(&journal->j_journal_list)) {
3900 entry = journal->j_journal_list.next;
3901 jl = JOURNAL_LIST_ENTRY(entry);
3902 /* this check should always be run, to send old lists to disk */
a3172027
CM
3903 if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) &&
3904 atomic_read(&jl->j_commit_left) == 0 &&
3905 test_transaction(s, jl)) {
bd4c625c
LT
3906 flush_used_journal_lists(s, jl);
3907 } else {
3908 break;
3909 }
1da177e4 3910 }
1da177e4
LT
3911}
3912
3913/*
3914** long and ugly. If flush, will not return until all commit
3915** blocks and all real buffers in the trans are on disk.
3916** If no_async, won't return until all commit blocks are on disk.
3917**
3918** keep reading, there are comments as you go along
3919**
3920** If the journal is aborted, we just clean up. Things like flushing
3921** journal lists, etc just won't happen.
3922*/
bd4c625c
LT
3923static int do_journal_end(struct reiserfs_transaction_handle *th,
3924 struct super_block *p_s_sb, unsigned long nblocks,
3925 int flags)
3926{
3927 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3928 struct reiserfs_journal_cnode *cn, *next, *jl_cn;
3929 struct reiserfs_journal_cnode *last_cn = NULL;
3930 struct reiserfs_journal_desc *desc;
3931 struct reiserfs_journal_commit *commit;
3932 struct buffer_head *c_bh; /* commit bh */
3933 struct buffer_head *d_bh; /* desc bh */
3934 int cur_write_start = 0; /* start index of current log write */
3935 int old_start;
3936 int i;
a44c94a7
AZ
3937 int flush;
3938 int wait_on_commit;
bd4c625c
LT
3939 struct reiserfs_journal_list *jl, *temp_jl;
3940 struct list_head *entry, *safe;
3941 unsigned long jindex;
3942 unsigned long commit_trans_id;
3943 int trans_half;
3944
3945 BUG_ON(th->t_refcount > 1);
3946 BUG_ON(!th->t_trans_id);
3947
a44c94a7
AZ
3948 /* protect flush_older_commits from doing mistakes if the
3949 transaction ID counter gets overflowed. */
3950 if (th->t_trans_id == ~0UL)
3951 flags |= FLUSH_ALL | COMMIT_NOW | WAIT;
3952 flush = flags & FLUSH_ALL;
3953 wait_on_commit = flags & WAIT;
3954
bd4c625c
LT
3955 put_fs_excl();
3956 current->journal_info = th->t_handle_save;
3957 reiserfs_check_lock_depth(p_s_sb, "journal end");
3958 if (journal->j_len == 0) {
3959 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3960 1);
3961 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3962 }
1da177e4 3963
bd4c625c
LT
3964 lock_journal(p_s_sb);
3965 if (journal->j_next_full_flush) {
3966 flags |= FLUSH_ALL;
3967 flush = 1;
3968 }
3969 if (journal->j_next_async_flush) {
3970 flags |= COMMIT_NOW | WAIT;
3971 wait_on_commit = 1;
3972 }
3973
3974 /* check_journal_end locks the journal, and unlocks if it does not return 1
3975 ** it tells us if we should continue with the journal_end, or just return
3976 */
3977 if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
3978 p_s_sb->s_dirt = 1;
3979 wake_queued_writers(p_s_sb);
3980 reiserfs_async_progress_wait(p_s_sb);
3981 goto out;
3982 }
3983
3984 /* check_journal_end might set these, check again */
3985 if (journal->j_next_full_flush) {
3986 flush = 1;
3987 }
3988
3989 /*
3990 ** j must wait means we have to flush the log blocks, and the real blocks for
3991 ** this transaction
3992 */
3993 if (journal->j_must_wait > 0) {
3994 flush = 1;
3995 }
1da177e4 3996#ifdef REISERFS_PREALLOCATE
ef43bc4f
JK
3997 /* quota ops might need to nest, setup the journal_info pointer for them
3998 * and raise the refcount so that it is > 0. */
bd4c625c 3999 current->journal_info = th;
ef43bc4f 4000 th->t_refcount++;
bd4c625c
LT
4001 reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
4002 * the transaction */
ef43bc4f 4003 th->t_refcount--;
bd4c625c 4004 current->journal_info = th->t_handle_save;
1da177e4 4005#endif
bd4c625c
LT
4006
4007 /* setup description block */
4008 d_bh =
4009 journal_getblk(p_s_sb,
4010 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
4011 journal->j_start);
4012 set_buffer_uptodate(d_bh);
4013 desc = (struct reiserfs_journal_desc *)(d_bh)->b_data;
4014 memset(d_bh->b_data, 0, d_bh->b_size);
4015 memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8);
4016 set_desc_trans_id(desc, journal->j_trans_id);
4017
4018 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
4019 c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
4020 ((journal->j_start + journal->j_len +
4021 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
4022 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
4023 memset(c_bh->b_data, 0, c_bh->b_size);
4024 set_commit_trans_id(commit, journal->j_trans_id);
4025 set_buffer_uptodate(c_bh);
4026
4027 /* init this journal list */
4028 jl = journal->j_current_jl;
4029
4030 /* we lock the commit before doing anything because
4031 * we want to make sure nobody tries to run flush_commit_list until
4032 * the new transaction is fully setup, and we've already flushed the
4033 * ordered bh list
4034 */
4035 down(&jl->j_commit_lock);
4036
4037 /* save the transaction id in case we need to commit it later */
4038 commit_trans_id = jl->j_trans_id;
4039
4040 atomic_set(&jl->j_older_commits_done, 0);
4041 jl->j_trans_id = journal->j_trans_id;
4042 jl->j_timestamp = journal->j_trans_start_time;
4043 jl->j_commit_bh = c_bh;
4044 jl->j_start = journal->j_start;
4045 jl->j_len = journal->j_len;
4046 atomic_set(&jl->j_nonzerolen, journal->j_len);
4047 atomic_set(&jl->j_commit_left, journal->j_len + 2);
4048 jl->j_realblock = NULL;
4049
4050 /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
4051 ** for each real block, add it to the journal list hash,
4052 ** copy into real block index array in the commit or desc block
4053 */
4054 trans_half = journal_trans_half(p_s_sb->s_blocksize);
4055 for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
4056 if (buffer_journaled(cn->bh)) {
4057 jl_cn = get_cnode(p_s_sb);
4058 if (!jl_cn) {
4059 reiserfs_panic(p_s_sb,
4060 "journal-1676, get_cnode returned NULL\n");
4061 }
4062 if (i == 0) {
4063 jl->j_realblock = jl_cn;
4064 }
4065 jl_cn->prev = last_cn;
4066 jl_cn->next = NULL;
4067 if (last_cn) {
4068 last_cn->next = jl_cn;
4069 }
4070 last_cn = jl_cn;
4071 /* make sure the block we are trying to log is not a block
4072 of journal or reserved area */
4073
4074 if (is_block_in_log_or_reserved_area
4075 (p_s_sb, cn->bh->b_blocknr)) {
4076 reiserfs_panic(p_s_sb,
4077 "journal-2332: Trying to log block %lu, which is a log block\n",
4078 cn->bh->b_blocknr);
4079 }
4080 jl_cn->blocknr = cn->bh->b_blocknr;
4081 jl_cn->state = 0;
4082 jl_cn->sb = p_s_sb;
4083 jl_cn->bh = cn->bh;
4084 jl_cn->jlist = jl;
4085 insert_journal_hash(journal->j_list_hash_table, jl_cn);
4086 if (i < trans_half) {
4087 desc->j_realblock[i] =
4088 cpu_to_le32(cn->bh->b_blocknr);
4089 } else {
4090 commit->j_realblock[i - trans_half] =
4091 cpu_to_le32(cn->bh->b_blocknr);
4092 }
4093 } else {
4094 i--;
4095 }
4096 }
4097 set_desc_trans_len(desc, journal->j_len);
4098 set_desc_mount_id(desc, journal->j_mount_id);
4099 set_desc_trans_id(desc, journal->j_trans_id);
4100 set_commit_trans_len(commit, journal->j_len);
4101
4102 /* special check in case all buffers in the journal were marked for not logging */
14a61442 4103 BUG_ON(journal->j_len == 0);
bd4c625c
LT
4104
4105 /* we're about to dirty all the log blocks, mark the description block
4106 * dirty now too. Don't mark the commit block dirty until all the
4107 * others are on disk
4108 */
4109 mark_buffer_dirty(d_bh);
4110
4111 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
4112 cur_write_start = journal->j_start;
4113 cn = journal->j_first;
4114 jindex = 1; /* start at one so we don't get the desc again */
4115 while (cn) {
4116 clear_buffer_journal_new(cn->bh);
4117 /* copy all the real blocks into log area. dirty log blocks */
4118 if (buffer_journaled(cn->bh)) {
4119 struct buffer_head *tmp_bh;
4120 char *addr;
4121 struct page *page;
4122 tmp_bh =
4123 journal_getblk(p_s_sb,
4124 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
4125 ((cur_write_start +
4126 jindex) %
4127 SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
4128 set_buffer_uptodate(tmp_bh);
4129 page = cn->bh->b_page;
4130 addr = kmap(page);
4131 memcpy(tmp_bh->b_data,
4132 addr + offset_in_page(cn->bh->b_data),
4133 cn->bh->b_size);
4134 kunmap(page);
4135 mark_buffer_dirty(tmp_bh);
4136 jindex++;
4137 set_buffer_journal_dirty(cn->bh);
4138 clear_buffer_journaled(cn->bh);
4139 } else {
4140 /* JDirty cleared sometime during transaction. don't log this one */
4141 reiserfs_warning(p_s_sb,
4142 "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!");
4143 brelse(cn->bh);
4144 }
4145 next = cn->next;
4146 free_cnode(p_s_sb, cn);
4147 cn = next;
4148 cond_resched();
4149 }
4150
4151 /* we are done with both the c_bh and d_bh, but
4152 ** c_bh must be written after all other commit blocks,
4153 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4154 */
4155
4156 journal->j_current_jl = alloc_journal_list(p_s_sb);
4157
4158 /* now it is safe to insert this transaction on the main list */
4159 list_add_tail(&jl->j_list, &journal->j_journal_list);
4160 list_add_tail(&jl->j_working_list, &journal->j_working_list);
4161 journal->j_num_work_lists++;
4162
4163 /* reset journal values for the next transaction */
4164 old_start = journal->j_start;
4165 journal->j_start =
4166 (journal->j_start + journal->j_len +
4167 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
4168 atomic_set(&(journal->j_wcount), 0);
4169 journal->j_bcount = 0;
4170 journal->j_last = NULL;
4171 journal->j_first = NULL;
4172 journal->j_len = 0;
4173 journal->j_trans_start_time = 0;
a44c94a7
AZ
4174 /* check for trans_id overflow */
4175 if (++journal->j_trans_id == 0)
4176 journal->j_trans_id = 10;
bd4c625c
LT
4177 journal->j_current_jl->j_trans_id = journal->j_trans_id;
4178 journal->j_must_wait = 0;
4179 journal->j_len_alloc = 0;
4180 journal->j_next_full_flush = 0;
4181 journal->j_next_async_flush = 0;
4182 init_journal_hash(p_s_sb);
4183
4184 // make sure reiserfs_add_jh sees the new current_jl before we
4185 // write out the tails
4186 smp_mb();
4187
4188 /* tail conversion targets have to hit the disk before we end the
4189 * transaction. Otherwise a later transaction might repack the tail
4190 * before this transaction commits, leaving the data block unflushed and
4191 * clean, if we crash before the later transaction commits, the data block
4192 * is lost.
4193 */
4194 if (!list_empty(&jl->j_tail_bh_list)) {
4195 unlock_kernel();
4196 write_ordered_buffers(&journal->j_dirty_buffers_lock,
4197 journal, jl, &jl->j_tail_bh_list);
4198 lock_kernel();
4199 }
14a61442 4200 BUG_ON(!list_empty(&jl->j_tail_bh_list));
bd4c625c
LT
4201 up(&jl->j_commit_lock);
4202
4203 /* honor the flush wishes from the caller, simple commits can
4204 ** be done outside the journal lock, they are done below
4205 **
4206 ** if we don't flush the commit list right now, we put it into
4207 ** the work queue so the people waiting on the async progress work
4208 ** queue don't wait for this proc to flush journal lists and such.
4209 */
4210 if (flush) {
4211 flush_commit_list(p_s_sb, jl, 1);
4212 flush_journal_list(p_s_sb, jl, 1);
4213 } else if (!(jl->j_state & LIST_COMMIT_PENDING))
4214 queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
4215
4216 /* if the next transaction has any chance of wrapping, flush
4217 ** transactions that might get overwritten. If any journal lists are very
4218 ** old flush them as well.
4219 */
4220 first_jl:
4221 list_for_each_safe(entry, safe, &journal->j_journal_list) {
4222 temp_jl = JOURNAL_LIST_ENTRY(entry);
4223 if (journal->j_start <= temp_jl->j_start) {
4224 if ((journal->j_start + journal->j_trans_max + 1) >=
4225 temp_jl->j_start) {
4226 flush_used_journal_lists(p_s_sb, temp_jl);
4227 goto first_jl;
4228 } else if ((journal->j_start +
4229 journal->j_trans_max + 1) <
4230 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4231 /* if we don't cross into the next transaction and we don't
4232 * wrap, there is no way we can overlap any later transactions
4233 * break now
4234 */
4235 break;
4236 }
4237 } else if ((journal->j_start +
4238 journal->j_trans_max + 1) >
4239 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4240 if (((journal->j_start + journal->j_trans_max + 1) %
4241 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >=
4242 temp_jl->j_start) {
4243 flush_used_journal_lists(p_s_sb, temp_jl);
4244 goto first_jl;
4245 } else {
4246 /* we don't overlap anything from out start to the end of the
4247 * log, and our wrapped portion doesn't overlap anything at
4248 * the start of the log. We can break
4249 */
4250 break;
4251 }
4252 }
4253 }
4254 flush_old_journal_lists(p_s_sb);
4255
4256 journal->j_current_jl->j_list_bitmap =
4257 get_list_bitmap(p_s_sb, journal->j_current_jl);
4258
4259 if (!(journal->j_current_jl->j_list_bitmap)) {
4260 reiserfs_panic(p_s_sb,
4261 "journal-1996: do_journal_end, could not get a list bitmap\n");
4262 }
4263
4264 atomic_set(&(journal->j_jlock), 0);
4265 unlock_journal(p_s_sb);
4266 /* wake up any body waiting to join. */
4267 clear_bit(J_WRITERS_QUEUED, &journal->j_state);
4268 wake_up(&(journal->j_join_wait));
4269
4270 if (!flush && wait_on_commit &&
4271 journal_list_still_alive(p_s_sb, commit_trans_id)) {
4272 flush_commit_list(p_s_sb, jl, 1);
4273 }
4274 out:
4275 reiserfs_check_lock_depth(p_s_sb, "journal end2");
4276
4277 memset(th, 0, sizeof(*th));
4278 /* Re-set th->t_super, so we can properly keep track of how many
4279 * persistent transactions there are. We need to do this so if this
4280 * call is part of a failed restart_transaction, we can free it later */
4281 th->t_super = p_s_sb;
4282
4283 return journal->j_errno;
4284}
4285
4286static void __reiserfs_journal_abort_hard(struct super_block *sb)
4287{
4288 struct reiserfs_journal *journal = SB_JOURNAL(sb);
4289 if (test_bit(J_ABORTED, &journal->j_state))
4290 return;
4291
4292 printk(KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n",
4293 reiserfs_bdevname(sb));
4294
4295 sb->s_flags |= MS_RDONLY;
4296 set_bit(J_ABORTED, &journal->j_state);
1da177e4
LT
4297
4298#ifdef CONFIG_REISERFS_CHECK
bd4c625c 4299 dump_stack();
1da177e4
LT
4300#endif
4301}
4302
bd4c625c 4303static void __reiserfs_journal_abort_soft(struct super_block *sb, int errno)
1da177e4 4304{
bd4c625c
LT
4305 struct reiserfs_journal *journal = SB_JOURNAL(sb);
4306 if (test_bit(J_ABORTED, &journal->j_state))
4307 return;
1da177e4 4308
bd4c625c
LT
4309 if (!journal->j_errno)
4310 journal->j_errno = errno;
1da177e4 4311
bd4c625c 4312 __reiserfs_journal_abort_hard(sb);
1da177e4
LT
4313}
4314
bd4c625c 4315void reiserfs_journal_abort(struct super_block *sb, int errno)
1da177e4 4316{
bd4c625c 4317 return __reiserfs_journal_abort_soft(sb, errno);
1da177e4 4318}
This page took 0.47465 seconds and 5 git commands to generate.