[PATCH] reiserfs: remove kmalloc wrapper
[deliverable/linux.git] / fs / reiserfs / journal.c
CommitLineData
1da177e4
LT
1/*
2** Write ahead logging implementation copyright Chris Mason 2000
3**
4** The background commits make this code very interelated, and
5** overly complex. I need to rethink things a bit....The major players:
6**
7** journal_begin -- call with the number of blocks you expect to log.
8** If the current transaction is too
9** old, it will block until the current transaction is
10** finished, and then start a new one.
11** Usually, your transaction will get joined in with
12** previous ones for speed.
13**
14** journal_join -- same as journal_begin, but won't block on the current
15** transaction regardless of age. Don't ever call
16** this. Ever. There are only two places it should be
17** called from, and they are both inside this file.
18**
19** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20** that might make them get sent to disk
21** and then marks them BH_JDirty. Puts the buffer head
22** into the current transaction hash.
23**
24** journal_end -- if the current transaction is batchable, it does nothing
25** otherwise, it could do an async/synchronous commit, or
26** a full flush of all log and real blocks in the
27** transaction.
28**
29** flush_old_commits -- if the current transaction is too old, it is ended and
30** commit blocks are sent to disk. Forces commit blocks
31** to disk for all backgrounded commits that have been
32** around too long.
33** -- Note, if you call this as an immediate flush from
34** from within kupdate, it will ignore the immediate flag
35*/
36
37#include <linux/config.h>
38#include <asm/uaccess.h>
39#include <asm/system.h>
40
41#include <linux/time.h>
42#include <asm/semaphore.h>
43
44#include <linux/vmalloc.h>
45#include <linux/reiserfs_fs.h>
46
47#include <linux/kernel.h>
48#include <linux/errno.h>
49#include <linux/fcntl.h>
50#include <linux/stat.h>
51#include <linux/string.h>
52#include <linux/smp_lock.h>
53#include <linux/buffer_head.h>
54#include <linux/workqueue.h>
55#include <linux/writeback.h>
56#include <linux/blkdev.h>
57
1da177e4
LT
58/* gets a struct reiserfs_journal_list * from a list head */
59#define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
60 j_list))
61#define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
62 j_working_list))
63
64/* the number of mounted filesystems. This is used to decide when to
65** start and kill the commit workqueue
66*/
67static int reiserfs_mounted_fs_count;
68
69static struct workqueue_struct *commit_wq;
70
bd4c625c
LT
71#define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
72 structs at 4k */
73#define BUFNR 64 /*read ahead */
1da177e4
LT
74
75/* cnode stat bits. Move these into reiserfs_fs.h */
76
77#define BLOCK_FREED 2 /* this block was freed, and can't be written. */
bd4c625c 78#define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
1da177e4
LT
79
80#define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
81#define BLOCK_DIRTIED 5
82
1da177e4
LT
83/* journal list state bits */
84#define LIST_TOUCHED 1
85#define LIST_DIRTY 2
bd4c625c 86#define LIST_COMMIT_PENDING 4 /* someone will commit this list */
1da177e4
LT
87
88/* flags for do_journal_end */
89#define FLUSH_ALL 1 /* flush commit and real blocks */
90#define COMMIT_NOW 2 /* end and commit this transaction */
bd4c625c
LT
91#define WAIT 4 /* wait for the log blocks to hit the disk */
92
93static int do_journal_end(struct reiserfs_transaction_handle *,
94 struct super_block *, unsigned long nblocks,
95 int flags);
96static int flush_journal_list(struct super_block *s,
97 struct reiserfs_journal_list *jl, int flushall);
98static int flush_commit_list(struct super_block *s,
99 struct reiserfs_journal_list *jl, int flushall);
100static int can_dirty(struct reiserfs_journal_cnode *cn);
101static int journal_join(struct reiserfs_transaction_handle *th,
102 struct super_block *p_s_sb, unsigned long nblocks);
103static int release_journal_dev(struct super_block *super,
104 struct reiserfs_journal *journal);
1da177e4 105static int dirty_one_transaction(struct super_block *s,
bd4c625c 106 struct reiserfs_journal_list *jl);
1da177e4
LT
107static void flush_async_commits(void *p);
108static void queue_log_writer(struct super_block *s);
109
110/* values for join in do_journal_begin_r */
111enum {
bd4c625c
LT
112 JBEGIN_REG = 0, /* regular journal begin */
113 JBEGIN_JOIN = 1, /* join the running transaction if at all possible */
114 JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */
1da177e4
LT
115};
116
117static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
bd4c625c
LT
118 struct super_block *p_s_sb,
119 unsigned long nblocks, int join);
1da177e4 120
bd4c625c
LT
121static void init_journal_hash(struct super_block *p_s_sb)
122{
123 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
124 memset(journal->j_hash_table, 0,
125 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
1da177e4
LT
126}
127
128/*
129** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
130** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
131** more details.
132*/
bd4c625c
LT
133static int reiserfs_clean_and_file_buffer(struct buffer_head *bh)
134{
135 if (bh) {
136 clear_buffer_dirty(bh);
137 clear_buffer_journal_test(bh);
138 }
139 return 0;
1da177e4
LT
140}
141
142static void disable_barrier(struct super_block *s)
143{
bd4c625c
LT
144 REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
145 printk("reiserfs: disabling flush barriers on %s\n",
146 reiserfs_bdevname(s));
147}
148
149static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
150 *p_s_sb)
151{
152 struct reiserfs_bitmap_node *bn;
153 static int id;
154
d739b42b 155 bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS);
bd4c625c
LT
156 if (!bn) {
157 return NULL;
158 }
d739b42b 159 bn->data = kzalloc(p_s_sb->s_blocksize, GFP_NOFS);
bd4c625c 160 if (!bn->data) {
d739b42b 161 kfree(bn);
bd4c625c
LT
162 return NULL;
163 }
164 bn->id = id++;
bd4c625c
LT
165 INIT_LIST_HEAD(&bn->list);
166 return bn;
167}
168
169static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *p_s_sb)
170{
171 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
172 struct reiserfs_bitmap_node *bn = NULL;
173 struct list_head *entry = journal->j_bitmap_nodes.next;
174
175 journal->j_used_bitmap_nodes++;
176 repeat:
177
178 if (entry != &journal->j_bitmap_nodes) {
179 bn = list_entry(entry, struct reiserfs_bitmap_node, list);
180 list_del(entry);
181 memset(bn->data, 0, p_s_sb->s_blocksize);
182 journal->j_free_bitmap_nodes--;
183 return bn;
184 }
185 bn = allocate_bitmap_node(p_s_sb);
186 if (!bn) {
187 yield();
188 goto repeat;
189 }
190 return bn;
1da177e4
LT
191}
192static inline void free_bitmap_node(struct super_block *p_s_sb,
bd4c625c
LT
193 struct reiserfs_bitmap_node *bn)
194{
195 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
196 journal->j_used_bitmap_nodes--;
197 if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
d739b42b
PE
198 kfree(bn->data);
199 kfree(bn);
bd4c625c
LT
200 } else {
201 list_add(&bn->list, &journal->j_bitmap_nodes);
202 journal->j_free_bitmap_nodes++;
203 }
204}
205
206static void allocate_bitmap_nodes(struct super_block *p_s_sb)
207{
208 int i;
209 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
210 struct reiserfs_bitmap_node *bn = NULL;
211 for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) {
212 bn = allocate_bitmap_node(p_s_sb);
213 if (bn) {
214 list_add(&bn->list, &journal->j_bitmap_nodes);
215 journal->j_free_bitmap_nodes++;
216 } else {
217 break; // this is ok, we'll try again when more are needed
218 }
219 }
1da177e4
LT
220}
221
222static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
bd4c625c
LT
223 struct reiserfs_list_bitmap *jb)
224{
225 int bmap_nr = block / (p_s_sb->s_blocksize << 3);
226 int bit_nr = block % (p_s_sb->s_blocksize << 3);
1da177e4 227
bd4c625c
LT
228 if (!jb->bitmaps[bmap_nr]) {
229 jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb);
230 }
231 set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data);
232 return 0;
1da177e4
LT
233}
234
235static void cleanup_bitmap_list(struct super_block *p_s_sb,
bd4c625c
LT
236 struct reiserfs_list_bitmap *jb)
237{
238 int i;
239 if (jb->bitmaps == NULL)
240 return;
241
242 for (i = 0; i < SB_BMAP_NR(p_s_sb); i++) {
243 if (jb->bitmaps[i]) {
244 free_bitmap_node(p_s_sb, jb->bitmaps[i]);
245 jb->bitmaps[i] = NULL;
246 }
247 }
1da177e4
LT
248}
249
250/*
251** only call this on FS unmount.
252*/
253static int free_list_bitmaps(struct super_block *p_s_sb,
bd4c625c
LT
254 struct reiserfs_list_bitmap *jb_array)
255{
256 int i;
257 struct reiserfs_list_bitmap *jb;
258 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
259 jb = jb_array + i;
260 jb->journal_list = NULL;
261 cleanup_bitmap_list(p_s_sb, jb);
262 vfree(jb->bitmaps);
263 jb->bitmaps = NULL;
264 }
265 return 0;
266}
267
268static int free_bitmap_nodes(struct super_block *p_s_sb)
269{
270 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
271 struct list_head *next = journal->j_bitmap_nodes.next;
272 struct reiserfs_bitmap_node *bn;
273
274 while (next != &journal->j_bitmap_nodes) {
275 bn = list_entry(next, struct reiserfs_bitmap_node, list);
276 list_del(next);
d739b42b
PE
277 kfree(bn->data);
278 kfree(bn);
bd4c625c
LT
279 next = journal->j_bitmap_nodes.next;
280 journal->j_free_bitmap_nodes--;
281 }
282
283 return 0;
1da177e4
LT
284}
285
286/*
287** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
288** jb_array is the array to be filled in.
289*/
290int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
bd4c625c
LT
291 struct reiserfs_list_bitmap *jb_array,
292 int bmap_nr)
293{
294 int i;
295 int failed = 0;
296 struct reiserfs_list_bitmap *jb;
297 int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *);
298
299 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
300 jb = jb_array + i;
301 jb->journal_list = NULL;
302 jb->bitmaps = vmalloc(mem);
303 if (!jb->bitmaps) {
304 reiserfs_warning(p_s_sb,
305 "clm-2000, unable to allocate bitmaps for journal lists");
306 failed = 1;
307 break;
308 }
309 memset(jb->bitmaps, 0, mem);
310 }
311 if (failed) {
312 free_list_bitmaps(p_s_sb, jb_array);
313 return -1;
314 }
315 return 0;
1da177e4
LT
316}
317
318/*
319** find an available list bitmap. If you can't find one, flush a commit list
320** and try again
321*/
bd4c625c
LT
322static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
323 struct reiserfs_journal_list
324 *jl)
325{
326 int i, j;
327 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
328 struct reiserfs_list_bitmap *jb = NULL;
329
330 for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) {
331 i = journal->j_list_bitmap_index;
332 journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS;
333 jb = journal->j_list_bitmap + i;
334 if (journal->j_list_bitmap[i].journal_list) {
335 flush_commit_list(p_s_sb,
336 journal->j_list_bitmap[i].
337 journal_list, 1);
338 if (!journal->j_list_bitmap[i].journal_list) {
339 break;
340 }
341 } else {
342 break;
343 }
344 }
345 if (jb->journal_list) { /* double check to make sure if flushed correctly */
346 return NULL;
347 }
348 jb->journal_list = jl;
349 return jb;
1da177e4
LT
350}
351
352/*
353** allocates a new chunk of X nodes, and links them all together as a list.
354** Uses the cnode->next and cnode->prev pointers
355** returns NULL on failure
356*/
bd4c625c
LT
357static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes)
358{
359 struct reiserfs_journal_cnode *head;
360 int i;
361 if (num_cnodes <= 0) {
362 return NULL;
363 }
364 head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode));
365 if (!head) {
366 return NULL;
367 }
368 memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode));
369 head[0].prev = NULL;
370 head[0].next = head + 1;
371 for (i = 1; i < num_cnodes; i++) {
372 head[i].prev = head + (i - 1);
373 head[i].next = head + (i + 1); /* if last one, overwrite it after the if */
374 }
375 head[num_cnodes - 1].next = NULL;
376 return head;
1da177e4
LT
377}
378
379/*
380** pulls a cnode off the free list, or returns NULL on failure
381*/
bd4c625c
LT
382static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
383{
384 struct reiserfs_journal_cnode *cn;
385 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
386
387 reiserfs_check_lock_depth(p_s_sb, "get_cnode");
388
389 if (journal->j_cnode_free <= 0) {
390 return NULL;
391 }
392 journal->j_cnode_used++;
393 journal->j_cnode_free--;
394 cn = journal->j_cnode_free_list;
395 if (!cn) {
396 return cn;
397 }
398 if (cn->next) {
399 cn->next->prev = NULL;
400 }
401 journal->j_cnode_free_list = cn->next;
402 memset(cn, 0, sizeof(struct reiserfs_journal_cnode));
403 return cn;
1da177e4
LT
404}
405
406/*
407** returns a cnode to the free list
408*/
bd4c625c
LT
409static void free_cnode(struct super_block *p_s_sb,
410 struct reiserfs_journal_cnode *cn)
411{
412 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1da177e4 413
bd4c625c 414 reiserfs_check_lock_depth(p_s_sb, "free_cnode");
1da177e4 415
bd4c625c
LT
416 journal->j_cnode_used--;
417 journal->j_cnode_free++;
418 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
419 cn->next = journal->j_cnode_free_list;
420 if (journal->j_cnode_free_list) {
421 journal->j_cnode_free_list->prev = cn;
422 }
423 cn->prev = NULL; /* not needed with the memset, but I might kill the memset, and forget to do this */
424 journal->j_cnode_free_list = cn;
1da177e4
LT
425}
426
bd4c625c
LT
427static void clear_prepared_bits(struct buffer_head *bh)
428{
429 clear_buffer_journal_prepared(bh);
430 clear_buffer_journal_restore_dirty(bh);
1da177e4
LT
431}
432
433/* utility function to force a BUG if it is called without the big
434** kernel lock held. caller is the string printed just before calling BUG()
435*/
bd4c625c
LT
436void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
437{
1da177e4 438#ifdef CONFIG_SMP
bd4c625c
LT
439 if (current->lock_depth < 0) {
440 reiserfs_panic(sb, "%s called without kernel lock held",
441 caller);
442 }
1da177e4 443#else
bd4c625c 444 ;
1da177e4
LT
445#endif
446}
447
448/* return a cnode with same dev, block number and size in table, or null if not found */
bd4c625c
LT
449static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct
450 super_block
451 *sb,
452 struct
453 reiserfs_journal_cnode
454 **table,
455 long bl)
1da177e4 456{
bd4c625c
LT
457 struct reiserfs_journal_cnode *cn;
458 cn = journal_hash(table, sb, bl);
459 while (cn) {
460 if (cn->blocknr == bl && cn->sb == sb)
461 return cn;
462 cn = cn->hnext;
463 }
464 return (struct reiserfs_journal_cnode *)0;
1da177e4
LT
465}
466
467/*
468** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
469** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
470** being overwritten by a replay after crashing.
471**
472** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
473** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
474** sure you never write the block without logging it.
475**
476** next_zero_bit is a suggestion about the next block to try for find_forward.
477** when bl is rejected because it is set in a journal list bitmap, we search
478** for the next zero bit in the bitmap that rejected bl. Then, we return that
479** through next_zero_bit for find_forward to try.
480**
481** Just because we return something in next_zero_bit does not mean we won't
482** reject it on the next call to reiserfs_in_journal
483**
484*/
485int reiserfs_in_journal(struct super_block *p_s_sb,
bd4c625c
LT
486 int bmap_nr, int bit_nr, int search_all,
487 b_blocknr_t * next_zero_bit)
488{
489 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
490 struct reiserfs_journal_cnode *cn;
491 struct reiserfs_list_bitmap *jb;
492 int i;
493 unsigned long bl;
494
495 *next_zero_bit = 0; /* always start this at zero. */
496
497 PROC_INFO_INC(p_s_sb, journal.in_journal);
498 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
499 ** if we crash before the transaction that freed it commits, this transaction won't
500 ** have committed either, and the block will never be written
501 */
502 if (search_all) {
503 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
504 PROC_INFO_INC(p_s_sb, journal.in_journal_bitmap);
505 jb = journal->j_list_bitmap + i;
506 if (jb->journal_list && jb->bitmaps[bmap_nr] &&
507 test_bit(bit_nr,
508 (unsigned long *)jb->bitmaps[bmap_nr]->
509 data)) {
510 *next_zero_bit =
511 find_next_zero_bit((unsigned long *)
512 (jb->bitmaps[bmap_nr]->
513 data),
514 p_s_sb->s_blocksize << 3,
515 bit_nr + 1);
516 return 1;
517 }
518 }
519 }
520
521 bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
522 /* is it in any old transactions? */
523 if (search_all
524 && (cn =
525 get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {
526 return 1;
527 }
528
529 /* is it in the current transaction. This should never happen */
530 if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {
531 BUG();
532 return 1;
533 }
534
535 PROC_INFO_INC(p_s_sb, journal.in_journal_reusable);
536 /* safe for reuse */
537 return 0;
1da177e4
LT
538}
539
540/* insert cn into table
541*/
bd4c625c
LT
542static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
543 struct reiserfs_journal_cnode *cn)
544{
545 struct reiserfs_journal_cnode *cn_orig;
1da177e4 546
bd4c625c
LT
547 cn_orig = journal_hash(table, cn->sb, cn->blocknr);
548 cn->hnext = cn_orig;
549 cn->hprev = NULL;
550 if (cn_orig) {
551 cn_orig->hprev = cn;
552 }
553 journal_hash(table, cn->sb, cn->blocknr) = cn;
1da177e4
LT
554}
555
556/* lock the current transaction */
77933d72 557static inline void lock_journal(struct super_block *p_s_sb)
bd4c625c
LT
558{
559 PROC_INFO_INC(p_s_sb, journal.lock_journal);
560 down(&SB_JOURNAL(p_s_sb)->j_lock);
1da177e4
LT
561}
562
563/* unlock the current transaction */
77933d72 564static inline void unlock_journal(struct super_block *p_s_sb)
bd4c625c
LT
565{
566 up(&SB_JOURNAL(p_s_sb)->j_lock);
1da177e4
LT
567}
568
569static inline void get_journal_list(struct reiserfs_journal_list *jl)
570{
bd4c625c 571 jl->j_refcount++;
1da177e4
LT
572}
573
574static inline void put_journal_list(struct super_block *s,
bd4c625c 575 struct reiserfs_journal_list *jl)
1da177e4 576{
bd4c625c
LT
577 if (jl->j_refcount < 1) {
578 reiserfs_panic(s, "trans id %lu, refcount at %d",
579 jl->j_trans_id, jl->j_refcount);
580 }
581 if (--jl->j_refcount == 0)
d739b42b 582 kfree(jl);
1da177e4
LT
583}
584
585/*
586** this used to be much more involved, and I'm keeping it just in case things get ugly again.
587** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
588** transaction.
589*/
bd4c625c
LT
590static void cleanup_freed_for_journal_list(struct super_block *p_s_sb,
591 struct reiserfs_journal_list *jl)
592{
1da177e4 593
bd4c625c
LT
594 struct reiserfs_list_bitmap *jb = jl->j_list_bitmap;
595 if (jb) {
596 cleanup_bitmap_list(p_s_sb, jb);
597 }
598 jl->j_list_bitmap->journal_list = NULL;
599 jl->j_list_bitmap = NULL;
1da177e4
LT
600}
601
602static int journal_list_still_alive(struct super_block *s,
bd4c625c
LT
603 unsigned long trans_id)
604{
605 struct reiserfs_journal *journal = SB_JOURNAL(s);
606 struct list_head *entry = &journal->j_journal_list;
607 struct reiserfs_journal_list *jl;
608
609 if (!list_empty(entry)) {
610 jl = JOURNAL_LIST_ENTRY(entry->next);
611 if (jl->j_trans_id <= trans_id) {
612 return 1;
613 }
614 }
615 return 0;
616}
617
618static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
619{
620 char b[BDEVNAME_SIZE];
621
622 if (buffer_journaled(bh)) {
623 reiserfs_warning(NULL,
624 "clm-2084: pinned buffer %lu:%s sent to disk",
625 bh->b_blocknr, bdevname(bh->b_bdev, b));
626 }
627 if (uptodate)
628 set_buffer_uptodate(bh);
629 else
630 clear_buffer_uptodate(bh);
631 unlock_buffer(bh);
632 put_bh(bh);
633}
634
635static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate)
636{
637 if (uptodate)
638 set_buffer_uptodate(bh);
639 else
640 clear_buffer_uptodate(bh);
641 unlock_buffer(bh);
642 put_bh(bh);
643}
644
645static void submit_logged_buffer(struct buffer_head *bh)
646{
647 get_bh(bh);
648 bh->b_end_io = reiserfs_end_buffer_io_sync;
649 clear_buffer_journal_new(bh);
650 clear_buffer_dirty(bh);
651 if (!test_clear_buffer_journal_test(bh))
652 BUG();
653 if (!buffer_uptodate(bh))
654 BUG();
655 submit_bh(WRITE, bh);
656}
657
658static void submit_ordered_buffer(struct buffer_head *bh)
659{
660 get_bh(bh);
661 bh->b_end_io = reiserfs_end_ordered_io;
662 clear_buffer_dirty(bh);
663 if (!buffer_uptodate(bh))
664 BUG();
665 submit_bh(WRITE, bh);
666}
667
668static int submit_barrier_buffer(struct buffer_head *bh)
669{
670 get_bh(bh);
671 bh->b_end_io = reiserfs_end_ordered_io;
672 clear_buffer_dirty(bh);
673 if (!buffer_uptodate(bh))
674 BUG();
675 return submit_bh(WRITE_BARRIER, bh);
1da177e4
LT
676}
677
678static void check_barrier_completion(struct super_block *s,
bd4c625c
LT
679 struct buffer_head *bh)
680{
681 if (buffer_eopnotsupp(bh)) {
682 clear_buffer_eopnotsupp(bh);
683 disable_barrier(s);
684 set_buffer_uptodate(bh);
685 set_buffer_dirty(bh);
686 sync_dirty_buffer(bh);
687 }
1da177e4
LT
688}
689
690#define CHUNK_SIZE 32
691struct buffer_chunk {
bd4c625c
LT
692 struct buffer_head *bh[CHUNK_SIZE];
693 int nr;
1da177e4
LT
694};
695
bd4c625c
LT
696static void write_chunk(struct buffer_chunk *chunk)
697{
698 int i;
699 get_fs_excl();
700 for (i = 0; i < chunk->nr; i++) {
701 submit_logged_buffer(chunk->bh[i]);
702 }
703 chunk->nr = 0;
704 put_fs_excl();
1da177e4
LT
705}
706
bd4c625c
LT
707static void write_ordered_chunk(struct buffer_chunk *chunk)
708{
709 int i;
710 get_fs_excl();
711 for (i = 0; i < chunk->nr; i++) {
712 submit_ordered_buffer(chunk->bh[i]);
713 }
714 chunk->nr = 0;
715 put_fs_excl();
1da177e4
LT
716}
717
718static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
bd4c625c 719 spinlock_t * lock, void (fn) (struct buffer_chunk *))
1da177e4 720{
bd4c625c
LT
721 int ret = 0;
722 if (chunk->nr >= CHUNK_SIZE)
723 BUG();
724 chunk->bh[chunk->nr++] = bh;
725 if (chunk->nr >= CHUNK_SIZE) {
726 ret = 1;
727 if (lock)
728 spin_unlock(lock);
729 fn(chunk);
730 if (lock)
731 spin_lock(lock);
732 }
733 return ret;
1da177e4
LT
734}
735
1da177e4 736static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
bd4c625c
LT
737static struct reiserfs_jh *alloc_jh(void)
738{
739 struct reiserfs_jh *jh;
740 while (1) {
741 jh = kmalloc(sizeof(*jh), GFP_NOFS);
742 if (jh) {
743 atomic_inc(&nr_reiserfs_jh);
744 return jh;
745 }
746 yield();
1da177e4 747 }
1da177e4
LT
748}
749
750/*
751 * we want to free the jh when the buffer has been written
752 * and waited on
753 */
bd4c625c
LT
754void reiserfs_free_jh(struct buffer_head *bh)
755{
756 struct reiserfs_jh *jh;
757
758 jh = bh->b_private;
759 if (jh) {
760 bh->b_private = NULL;
761 jh->bh = NULL;
762 list_del_init(&jh->list);
763 kfree(jh);
764 if (atomic_read(&nr_reiserfs_jh) <= 0)
765 BUG();
766 atomic_dec(&nr_reiserfs_jh);
767 put_bh(bh);
768 }
1da177e4
LT
769}
770
771static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
bd4c625c 772 int tail)
1da177e4 773{
bd4c625c 774 struct reiserfs_jh *jh;
1da177e4 775
bd4c625c
LT
776 if (bh->b_private) {
777 spin_lock(&j->j_dirty_buffers_lock);
778 if (!bh->b_private) {
779 spin_unlock(&j->j_dirty_buffers_lock);
780 goto no_jh;
781 }
782 jh = bh->b_private;
783 list_del_init(&jh->list);
784 } else {
785 no_jh:
786 get_bh(bh);
787 jh = alloc_jh();
788 spin_lock(&j->j_dirty_buffers_lock);
789 /* buffer must be locked for __add_jh, should be able to have
790 * two adds at the same time
791 */
792 if (bh->b_private)
793 BUG();
794 jh->bh = bh;
795 bh->b_private = jh;
1da177e4 796 }
bd4c625c
LT
797 jh->jl = j->j_current_jl;
798 if (tail)
799 list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
800 else {
801 list_add_tail(&jh->list, &jh->jl->j_bh_list);
802 }
803 spin_unlock(&j->j_dirty_buffers_lock);
804 return 0;
1da177e4
LT
805}
806
bd4c625c
LT
807int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh)
808{
809 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
1da177e4 810}
bd4c625c
LT
811int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh)
812{
813 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
1da177e4
LT
814}
815
816#define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
bd4c625c 817static int write_ordered_buffers(spinlock_t * lock,
1da177e4 818 struct reiserfs_journal *j,
bd4c625c 819 struct reiserfs_journal_list *jl,
1da177e4
LT
820 struct list_head *list)
821{
bd4c625c
LT
822 struct buffer_head *bh;
823 struct reiserfs_jh *jh;
824 int ret = j->j_errno;
825 struct buffer_chunk chunk;
826 struct list_head tmp;
827 INIT_LIST_HEAD(&tmp);
828
829 chunk.nr = 0;
830 spin_lock(lock);
831 while (!list_empty(list)) {
832 jh = JH_ENTRY(list->next);
833 bh = jh->bh;
834 get_bh(bh);
835 if (test_set_buffer_locked(bh)) {
836 if (!buffer_dirty(bh)) {
837 list_del_init(&jh->list);
838 list_add(&jh->list, &tmp);
839 goto loop_next;
840 }
841 spin_unlock(lock);
842 if (chunk.nr)
843 write_ordered_chunk(&chunk);
844 wait_on_buffer(bh);
845 cond_resched();
846 spin_lock(lock);
847 goto loop_next;
848 }
849 if (buffer_dirty(bh)) {
850 list_del_init(&jh->list);
851 list_add(&jh->list, &tmp);
852 add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
853 } else {
854 reiserfs_free_jh(bh);
855 unlock_buffer(bh);
856 }
857 loop_next:
858 put_bh(bh);
859 cond_resched_lock(lock);
860 }
861 if (chunk.nr) {
862 spin_unlock(lock);
1da177e4 863 write_ordered_chunk(&chunk);
bd4c625c 864 spin_lock(lock);
1da177e4 865 }
bd4c625c
LT
866 while (!list_empty(&tmp)) {
867 jh = JH_ENTRY(tmp.prev);
868 bh = jh->bh;
869 get_bh(bh);
870 reiserfs_free_jh(bh);
871
872 if (buffer_locked(bh)) {
873 spin_unlock(lock);
874 wait_on_buffer(bh);
875 spin_lock(lock);
876 }
877 if (!buffer_uptodate(bh)) {
878 ret = -EIO;
879 }
880 put_bh(bh);
881 cond_resched_lock(lock);
1da177e4 882 }
bd4c625c
LT
883 spin_unlock(lock);
884 return ret;
885}
1da177e4 886
bd4c625c
LT
887static int flush_older_commits(struct super_block *s,
888 struct reiserfs_journal_list *jl)
889{
890 struct reiserfs_journal *journal = SB_JOURNAL(s);
891 struct reiserfs_journal_list *other_jl;
892 struct reiserfs_journal_list *first_jl;
893 struct list_head *entry;
894 unsigned long trans_id = jl->j_trans_id;
895 unsigned long other_trans_id;
896 unsigned long first_trans_id;
897
898 find_first:
899 /*
900 * first we walk backwards to find the oldest uncommitted transation
901 */
902 first_jl = jl;
903 entry = jl->j_list.prev;
904 while (1) {
905 other_jl = JOURNAL_LIST_ENTRY(entry);
906 if (entry == &journal->j_journal_list ||
907 atomic_read(&other_jl->j_older_commits_done))
908 break;
1da177e4 909
bd4c625c
LT
910 first_jl = other_jl;
911 entry = other_jl->j_list.prev;
912 }
1da177e4 913
bd4c625c
LT
914 /* if we didn't find any older uncommitted transactions, return now */
915 if (first_jl == jl) {
916 return 0;
917 }
1da177e4 918
bd4c625c
LT
919 first_trans_id = first_jl->j_trans_id;
920
921 entry = &first_jl->j_list;
922 while (1) {
923 other_jl = JOURNAL_LIST_ENTRY(entry);
924 other_trans_id = other_jl->j_trans_id;
925
926 if (other_trans_id < trans_id) {
927 if (atomic_read(&other_jl->j_commit_left) != 0) {
928 flush_commit_list(s, other_jl, 0);
929
930 /* list we were called with is gone, return */
931 if (!journal_list_still_alive(s, trans_id))
932 return 1;
933
934 /* the one we just flushed is gone, this means all
935 * older lists are also gone, so first_jl is no longer
936 * valid either. Go back to the beginning.
937 */
938 if (!journal_list_still_alive
939 (s, other_trans_id)) {
940 goto find_first;
941 }
942 }
943 entry = entry->next;
944 if (entry == &journal->j_journal_list)
945 return 0;
946 } else {
947 return 0;
1da177e4 948 }
1da177e4 949 }
bd4c625c 950 return 0;
1da177e4 951}
bd4c625c
LT
952int reiserfs_async_progress_wait(struct super_block *s)
953{
954 DEFINE_WAIT(wait);
955 struct reiserfs_journal *j = SB_JOURNAL(s);
956 if (atomic_read(&j->j_async_throttle))
957 blk_congestion_wait(WRITE, HZ / 10);
958 return 0;
1da177e4
LT
959}
960
961/*
962** if this journal list still has commit blocks unflushed, send them to disk.
963**
964** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
965** Before the commit block can by written, every other log block must be safely on disk
966**
967*/
bd4c625c
LT
968static int flush_commit_list(struct super_block *s,
969 struct reiserfs_journal_list *jl, int flushall)
970{
971 int i;
972 int bn;
973 struct buffer_head *tbh = NULL;
974 unsigned long trans_id = jl->j_trans_id;
975 struct reiserfs_journal *journal = SB_JOURNAL(s);
976 int barrier = 0;
977 int retval = 0;
978
979 reiserfs_check_lock_depth(s, "flush_commit_list");
980
981 if (atomic_read(&jl->j_older_commits_done)) {
982 return 0;
983 }
984
985 get_fs_excl();
986
987 /* before we can put our commit blocks on disk, we have to make sure everyone older than
988 ** us is on disk too
989 */
990 BUG_ON(jl->j_len <= 0);
991 BUG_ON(trans_id == journal->j_trans_id);
992
993 get_journal_list(jl);
994 if (flushall) {
995 if (flush_older_commits(s, jl) == 1) {
996 /* list disappeared during flush_older_commits. return */
997 goto put_jl;
998 }
999 }
1000
1001 /* make sure nobody is trying to flush this one at the same time */
1002 down(&jl->j_commit_lock);
1003 if (!journal_list_still_alive(s, trans_id)) {
1004 up(&jl->j_commit_lock);
1005 goto put_jl;
1006 }
1007 BUG_ON(jl->j_trans_id == 0);
1008
1009 /* this commit is done, exit */
1010 if (atomic_read(&(jl->j_commit_left)) <= 0) {
1011 if (flushall) {
1012 atomic_set(&(jl->j_older_commits_done), 1);
1013 }
1014 up(&jl->j_commit_lock);
1015 goto put_jl;
1016 }
1017
1018 if (!list_empty(&jl->j_bh_list)) {
1019 unlock_kernel();
1020 write_ordered_buffers(&journal->j_dirty_buffers_lock,
1021 journal, jl, &jl->j_bh_list);
1022 lock_kernel();
1023 }
1024 BUG_ON(!list_empty(&jl->j_bh_list));
1025 /*
1026 * for the description block and all the log blocks, submit any buffers
1027 * that haven't already reached the disk
1028 */
1029 atomic_inc(&journal->j_async_throttle);
1030 for (i = 0; i < (jl->j_len + 1); i++) {
1031 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) %
1032 SB_ONDISK_JOURNAL_SIZE(s);
1033 tbh = journal_find_get_block(s, bn);
1034 if (buffer_dirty(tbh)) /* redundant, ll_rw_block() checks */
53778ffd 1035 ll_rw_block(SWRITE, 1, &tbh);
bd4c625c
LT
1036 put_bh(tbh);
1037 }
1038 atomic_dec(&journal->j_async_throttle);
1039
5d5e8156
JM
1040 /* We're skipping the commit if there's an error */
1041 if (retval || reiserfs_is_journal_aborted(journal))
1042 barrier = 0;
1043
bd4c625c
LT
1044 /* wait on everything written so far before writing the commit
1045 * if we are in barrier mode, send the commit down now
1046 */
1047 barrier = reiserfs_barrier_flush(s);
1048 if (barrier) {
1049 int ret;
1050 lock_buffer(jl->j_commit_bh);
1051 ret = submit_barrier_buffer(jl->j_commit_bh);
1052 if (ret == -EOPNOTSUPP) {
1053 set_buffer_uptodate(jl->j_commit_bh);
1054 disable_barrier(s);
1055 barrier = 0;
1056 }
1057 }
1058 for (i = 0; i < (jl->j_len + 1); i++) {
1059 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1060 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
1061 tbh = journal_find_get_block(s, bn);
1062 wait_on_buffer(tbh);
1063 // since we're using ll_rw_blk above, it might have skipped over
1064 // a locked buffer. Double check here
1065 //
1066 if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */
1067 sync_dirty_buffer(tbh);
1068 if (unlikely(!buffer_uptodate(tbh))) {
1da177e4 1069#ifdef CONFIG_REISERFS_CHECK
bd4c625c 1070 reiserfs_warning(s, "journal-601, buffer write failed");
1da177e4 1071#endif
bd4c625c
LT
1072 retval = -EIO;
1073 }
1074 put_bh(tbh); /* once for journal_find_get_block */
1075 put_bh(tbh); /* once due to original getblk in do_journal_end */
1076 atomic_dec(&(jl->j_commit_left));
1077 }
1078
1079 BUG_ON(atomic_read(&(jl->j_commit_left)) != 1);
1080
1081 if (!barrier) {
5d5e8156
JM
1082 /* If there was a write error in the journal - we can't commit
1083 * this transaction - it will be invalid and, if successful,
1084 * will just end up propogating the write error out to
1085 * the file system. */
1086 if (likely(!retval && !reiserfs_is_journal_aborted (journal))) {
1087 if (buffer_dirty(jl->j_commit_bh))
1088 BUG();
1089 mark_buffer_dirty(jl->j_commit_bh) ;
1090 sync_dirty_buffer(jl->j_commit_bh) ;
1091 }
bd4c625c
LT
1092 } else
1093 wait_on_buffer(jl->j_commit_bh);
1094
1095 check_barrier_completion(s, jl->j_commit_bh);
1096
1097 /* If there was a write error in the journal - we can't commit this
1098 * transaction - it will be invalid and, if successful, will just end
1099 * up propogating the write error out to the filesystem. */
1100 if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
1da177e4 1101#ifdef CONFIG_REISERFS_CHECK
bd4c625c 1102 reiserfs_warning(s, "journal-615: buffer write failed");
1da177e4 1103#endif
bd4c625c
LT
1104 retval = -EIO;
1105 }
1106 bforget(jl->j_commit_bh);
1107 if (journal->j_last_commit_id != 0 &&
1108 (jl->j_trans_id - journal->j_last_commit_id) != 1) {
1109 reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",
1110 journal->j_last_commit_id, jl->j_trans_id);
1111 }
1112 journal->j_last_commit_id = jl->j_trans_id;
1113
1114 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
1115 cleanup_freed_for_journal_list(s, jl);
1116
1117 retval = retval ? retval : journal->j_errno;
1118
1119 /* mark the metadata dirty */
1120 if (!retval)
1121 dirty_one_transaction(s, jl);
1122 atomic_dec(&(jl->j_commit_left));
1123
1124 if (flushall) {
1125 atomic_set(&(jl->j_older_commits_done), 1);
1126 }
1127 up(&jl->j_commit_lock);
1128 put_jl:
1129 put_journal_list(s, jl);
1130
1131 if (retval)
1132 reiserfs_abort(s, retval, "Journal write error in %s",
1133 __FUNCTION__);
1134 put_fs_excl();
1135 return retval;
1da177e4
LT
1136}
1137
1138/*
1139** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
1140** returns NULL if it can't find anything
1141*/
bd4c625c
LT
1142static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
1143 reiserfs_journal_cnode
1144 *cn)
1145{
1146 struct super_block *sb = cn->sb;
1147 b_blocknr_t blocknr = cn->blocknr;
1da177e4 1148
bd4c625c
LT
1149 cn = cn->hprev;
1150 while (cn) {
1151 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
1152 return cn->jlist;
1153 }
1154 cn = cn->hprev;
1155 }
1156 return NULL;
1da177e4
LT
1157}
1158
bd4c625c
LT
1159static void remove_journal_hash(struct super_block *,
1160 struct reiserfs_journal_cnode **,
1161 struct reiserfs_journal_list *, unsigned long,
1162 int);
1da177e4
LT
1163
1164/*
1165** once all the real blocks have been flushed, it is safe to remove them from the
1166** journal list for this transaction. Aside from freeing the cnode, this also allows the
1167** block to be reallocated for data blocks if it had been deleted.
1168*/
bd4c625c
LT
1169static void remove_all_from_journal_list(struct super_block *p_s_sb,
1170 struct reiserfs_journal_list *jl,
1171 int debug)
1172{
1173 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1174 struct reiserfs_journal_cnode *cn, *last;
1175 cn = jl->j_realblock;
1176
1177 /* which is better, to lock once around the whole loop, or
1178 ** to lock for each call to remove_journal_hash?
1179 */
1180 while (cn) {
1181 if (cn->blocknr != 0) {
1182 if (debug) {
1183 reiserfs_warning(p_s_sb,
1184 "block %u, bh is %d, state %ld",
1185 cn->blocknr, cn->bh ? 1 : 0,
1186 cn->state);
1187 }
1188 cn->state = 0;
1189 remove_journal_hash(p_s_sb, journal->j_list_hash_table,
1190 jl, cn->blocknr, 1);
1191 }
1192 last = cn;
1193 cn = cn->next;
1194 free_cnode(p_s_sb, last);
1195 }
1196 jl->j_realblock = NULL;
1da177e4
LT
1197}
1198
1199/*
1200** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1201** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1202** releasing blocks in this transaction for reuse as data blocks.
1203** called by flush_journal_list, before it calls remove_all_from_journal_list
1204**
1205*/
bd4c625c
LT
1206static int _update_journal_header_block(struct super_block *p_s_sb,
1207 unsigned long offset,
1208 unsigned long trans_id)
1209{
1210 struct reiserfs_journal_header *jh;
1211 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1da177e4 1212
bd4c625c
LT
1213 if (reiserfs_is_journal_aborted(journal))
1214 return -EIO;
1da177e4 1215
bd4c625c
LT
1216 if (trans_id >= journal->j_last_flush_trans_id) {
1217 if (buffer_locked((journal->j_header_bh))) {
1218 wait_on_buffer((journal->j_header_bh));
1219 if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
1da177e4 1220#ifdef CONFIG_REISERFS_CHECK
bd4c625c
LT
1221 reiserfs_warning(p_s_sb,
1222 "journal-699: buffer write failed");
1da177e4 1223#endif
bd4c625c
LT
1224 return -EIO;
1225 }
1226 }
1227 journal->j_last_flush_trans_id = trans_id;
1228 journal->j_first_unflushed_offset = offset;
1229 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->
1230 b_data);
1231 jh->j_last_flush_trans_id = cpu_to_le32(trans_id);
1232 jh->j_first_unflushed_offset = cpu_to_le32(offset);
1233 jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
1234
1235 if (reiserfs_barrier_flush(p_s_sb)) {
1236 int ret;
1237 lock_buffer(journal->j_header_bh);
1238 ret = submit_barrier_buffer(journal->j_header_bh);
1239 if (ret == -EOPNOTSUPP) {
1240 set_buffer_uptodate(journal->j_header_bh);
1241 disable_barrier(p_s_sb);
1242 goto sync;
1243 }
1244 wait_on_buffer(journal->j_header_bh);
1245 check_barrier_completion(p_s_sb, journal->j_header_bh);
1246 } else {
1247 sync:
1248 set_buffer_dirty(journal->j_header_bh);
1249 sync_dirty_buffer(journal->j_header_bh);
1250 }
1251 if (!buffer_uptodate(journal->j_header_bh)) {
1252 reiserfs_warning(p_s_sb,
1253 "journal-837: IO error during journal replay");
1254 return -EIO;
1255 }
1256 }
1257 return 0;
1258}
1259
1260static int update_journal_header_block(struct super_block *p_s_sb,
1261 unsigned long offset,
1262 unsigned long trans_id)
1263{
1264 return _update_journal_header_block(p_s_sb, offset, trans_id);
1da177e4 1265}
bd4c625c 1266
1da177e4
LT
1267/*
1268** flush any and all journal lists older than you are
1269** can only be called from flush_journal_list
1270*/
1271static int flush_older_journal_lists(struct super_block *p_s_sb,
bd4c625c
LT
1272 struct reiserfs_journal_list *jl)
1273{
1274 struct list_head *entry;
1275 struct reiserfs_journal_list *other_jl;
1276 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1277 unsigned long trans_id = jl->j_trans_id;
1278
1279 /* we know we are the only ones flushing things, no extra race
1280 * protection is required.
1281 */
1282 restart:
1283 entry = journal->j_journal_list.next;
1284 /* Did we wrap? */
1285 if (entry == &journal->j_journal_list)
1286 return 0;
1287 other_jl = JOURNAL_LIST_ENTRY(entry);
1288 if (other_jl->j_trans_id < trans_id) {
1289 BUG_ON(other_jl->j_refcount <= 0);
1290 /* do not flush all */
1291 flush_journal_list(p_s_sb, other_jl, 0);
1292
1293 /* other_jl is now deleted from the list */
1294 goto restart;
1295 }
1296 return 0;
1da177e4
LT
1297}
1298
1299static void del_from_work_list(struct super_block *s,
bd4c625c
LT
1300 struct reiserfs_journal_list *jl)
1301{
1302 struct reiserfs_journal *journal = SB_JOURNAL(s);
1303 if (!list_empty(&jl->j_working_list)) {
1304 list_del_init(&jl->j_working_list);
1305 journal->j_num_work_lists--;
1306 }
1da177e4
LT
1307}
1308
1309/* flush a journal list, both commit and real blocks
1310**
1311** always set flushall to 1, unless you are calling from inside
1312** flush_journal_list
1313**
1314** IMPORTANT. This can only be called while there are no journal writers,
1315** and the journal is locked. That means it can only be called from
1316** do_journal_end, or by journal_release
1317*/
bd4c625c
LT
1318static int flush_journal_list(struct super_block *s,
1319 struct reiserfs_journal_list *jl, int flushall)
1da177e4 1320{
bd4c625c
LT
1321 struct reiserfs_journal_list *pjl;
1322 struct reiserfs_journal_cnode *cn, *last;
1323 int count;
1324 int was_jwait = 0;
1325 int was_dirty = 0;
1326 struct buffer_head *saved_bh;
1327 unsigned long j_len_saved = jl->j_len;
1328 struct reiserfs_journal *journal = SB_JOURNAL(s);
1329 int err = 0;
1330
1331 BUG_ON(j_len_saved <= 0);
1332
1333 if (atomic_read(&journal->j_wcount) != 0) {
1334 reiserfs_warning(s,
1335 "clm-2048: flush_journal_list called with wcount %d",
1336 atomic_read(&journal->j_wcount));
1337 }
1338 BUG_ON(jl->j_trans_id == 0);
1da177e4 1339
bd4c625c
LT
1340 /* if flushall == 0, the lock is already held */
1341 if (flushall) {
1342 down(&journal->j_flush_sem);
1343 } else if (!down_trylock(&journal->j_flush_sem)) {
1344 BUG();
1345 }
1da177e4 1346
bd4c625c
LT
1347 count = 0;
1348 if (j_len_saved > journal->j_trans_max) {
1349 reiserfs_panic(s,
1350 "journal-715: flush_journal_list, length is %lu, trans id %lu\n",
1351 j_len_saved, jl->j_trans_id);
1352 return 0;
1353 }
1da177e4 1354
bd4c625c
LT
1355 get_fs_excl();
1356
1357 /* if all the work is already done, get out of here */
1358 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1359 atomic_read(&(jl->j_commit_left)) <= 0) {
1360 goto flush_older_and_return;
1361 }
1362
1363 /* start by putting the commit list on disk. This will also flush
1364 ** the commit lists of any olders transactions
1365 */
1366 flush_commit_list(s, jl, 1);
1367
1368 if (!(jl->j_state & LIST_DIRTY)
1369 && !reiserfs_is_journal_aborted(journal))
1370 BUG();
1371
1372 /* are we done now? */
1373 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1374 atomic_read(&(jl->j_commit_left)) <= 0) {
1375 goto flush_older_and_return;
1376 }
1377
1378 /* loop through each cnode, see if we need to write it,
1379 ** or wait on a more recent transaction, or just ignore it
1380 */
1381 if (atomic_read(&(journal->j_wcount)) != 0) {
1382 reiserfs_panic(s,
1383 "journal-844: panic journal list is flushing, wcount is not 0\n");
1384 }
1385 cn = jl->j_realblock;
1386 while (cn) {
1387 was_jwait = 0;
1388 was_dirty = 0;
1389 saved_bh = NULL;
1390 /* blocknr of 0 is no longer in the hash, ignore it */
1391 if (cn->blocknr == 0) {
1392 goto free_cnode;
1393 }
1394
1395 /* This transaction failed commit. Don't write out to the disk */
1396 if (!(jl->j_state & LIST_DIRTY))
1397 goto free_cnode;
1398
1399 pjl = find_newer_jl_for_cn(cn);
1400 /* the order is important here. We check pjl to make sure we
1401 ** don't clear BH_JDirty_wait if we aren't the one writing this
1402 ** block to disk
1403 */
1404 if (!pjl && cn->bh) {
1405 saved_bh = cn->bh;
1406
1407 /* we do this to make sure nobody releases the buffer while
1408 ** we are working with it
1409 */
1410 get_bh(saved_bh);
1411
1412 if (buffer_journal_dirty(saved_bh)) {
1413 BUG_ON(!can_dirty(cn));
1414 was_jwait = 1;
1415 was_dirty = 1;
1416 } else if (can_dirty(cn)) {
1417 /* everything with !pjl && jwait should be writable */
1418 BUG();
1419 }
1420 }
1421
1422 /* if someone has this block in a newer transaction, just make
1423 ** sure they are commited, and don't try writing it to disk
1424 */
1425 if (pjl) {
1426 if (atomic_read(&pjl->j_commit_left))
1427 flush_commit_list(s, pjl, 1);
1428 goto free_cnode;
1429 }
1430
1431 /* bh == NULL when the block got to disk on its own, OR,
1432 ** the block got freed in a future transaction
1433 */
1434 if (saved_bh == NULL) {
1435 goto free_cnode;
1436 }
1437
1438 /* this should never happen. kupdate_one_transaction has this list
1439 ** locked while it works, so we should never see a buffer here that
1440 ** is not marked JDirty_wait
1441 */
1442 if ((!was_jwait) && !buffer_locked(saved_bh)) {
1443 reiserfs_warning(s,
1444 "journal-813: BAD! buffer %llu %cdirty %cjwait, "
1445 "not in a newer tranasction",
1446 (unsigned long long)saved_bh->
1447 b_blocknr, was_dirty ? ' ' : '!',
1448 was_jwait ? ' ' : '!');
1449 }
1450 if (was_dirty) {
1451 /* we inc again because saved_bh gets decremented at free_cnode */
1452 get_bh(saved_bh);
1453 set_bit(BLOCK_NEEDS_FLUSH, &cn->state);
1454 lock_buffer(saved_bh);
1455 BUG_ON(cn->blocknr != saved_bh->b_blocknr);
1456 if (buffer_dirty(saved_bh))
1457 submit_logged_buffer(saved_bh);
1458 else
1459 unlock_buffer(saved_bh);
1460 count++;
1461 } else {
1462 reiserfs_warning(s,
1463 "clm-2082: Unable to flush buffer %llu in %s",
1464 (unsigned long long)saved_bh->
1465 b_blocknr, __FUNCTION__);
1466 }
1467 free_cnode:
1468 last = cn;
1469 cn = cn->next;
1470 if (saved_bh) {
1471 /* we incremented this to keep others from taking the buffer head away */
1472 put_bh(saved_bh);
1473 if (atomic_read(&(saved_bh->b_count)) < 0) {
1474 reiserfs_warning(s,
1475 "journal-945: saved_bh->b_count < 0");
1476 }
1477 }
1478 }
1479 if (count > 0) {
1480 cn = jl->j_realblock;
1481 while (cn) {
1482 if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1483 if (!cn->bh) {
1484 reiserfs_panic(s,
1485 "journal-1011: cn->bh is NULL\n");
1486 }
1487 wait_on_buffer(cn->bh);
1488 if (!cn->bh) {
1489 reiserfs_panic(s,
1490 "journal-1012: cn->bh is NULL\n");
1491 }
1492 if (unlikely(!buffer_uptodate(cn->bh))) {
1493#ifdef CONFIG_REISERFS_CHECK
1494 reiserfs_warning(s,
1495 "journal-949: buffer write failed\n");
1496#endif
1497 err = -EIO;
1498 }
1499 /* note, we must clear the JDirty_wait bit after the up to date
1500 ** check, otherwise we race against our flushpage routine
1501 */
1502 BUG_ON(!test_clear_buffer_journal_dirty
1503 (cn->bh));
1504
1505 /* undo the inc from journal_mark_dirty */
1506 put_bh(cn->bh);
1507 brelse(cn->bh);
1508 }
1509 cn = cn->next;
1510 }
1511 }
1512
1513 if (err)
1514 reiserfs_abort(s, -EIO,
1515 "Write error while pushing transaction to disk in %s",
1516 __FUNCTION__);
1517 flush_older_and_return:
1518
1519 /* before we can update the journal header block, we _must_ flush all
1520 ** real blocks from all older transactions to disk. This is because
1521 ** once the header block is updated, this transaction will not be
1522 ** replayed after a crash
1523 */
1524 if (flushall) {
1525 flush_older_journal_lists(s, jl);
1526 }
1527
1528 err = journal->j_errno;
1529 /* before we can remove everything from the hash tables for this
1530 ** transaction, we must make sure it can never be replayed
1531 **
1532 ** since we are only called from do_journal_end, we know for sure there
1533 ** are no allocations going on while we are flushing journal lists. So,
1534 ** we only need to update the journal header block for the last list
1535 ** being flushed
1536 */
1537 if (!err && flushall) {
1538 err =
1539 update_journal_header_block(s,
1540 (jl->j_start + jl->j_len +
1541 2) % SB_ONDISK_JOURNAL_SIZE(s),
1542 jl->j_trans_id);
1543 if (err)
1544 reiserfs_abort(s, -EIO,
1545 "Write error while updating journal header in %s",
1546 __FUNCTION__);
1547 }
1548 remove_all_from_journal_list(s, jl, 0);
1549 list_del_init(&jl->j_list);
1550 journal->j_num_lists--;
1551 del_from_work_list(s, jl);
1552
1553 if (journal->j_last_flush_id != 0 &&
1554 (jl->j_trans_id - journal->j_last_flush_id) != 1) {
1555 reiserfs_warning(s, "clm-2201: last flush %lu, current %lu",
1556 journal->j_last_flush_id, jl->j_trans_id);
1557 }
1558 journal->j_last_flush_id = jl->j_trans_id;
1559
1560 /* not strictly required since we are freeing the list, but it should
1561 * help find code using dead lists later on
1562 */
1563 jl->j_len = 0;
1564 atomic_set(&(jl->j_nonzerolen), 0);
1565 jl->j_start = 0;
1566 jl->j_realblock = NULL;
1567 jl->j_commit_bh = NULL;
1568 jl->j_trans_id = 0;
1569 jl->j_state = 0;
1570 put_journal_list(s, jl);
1571 if (flushall)
1572 up(&journal->j_flush_sem);
1573 put_fs_excl();
1574 return err;
1575}
1576
1577static int write_one_transaction(struct super_block *s,
1578 struct reiserfs_journal_list *jl,
1579 struct buffer_chunk *chunk)
1580{
1581 struct reiserfs_journal_cnode *cn;
1582 int ret = 0;
1583
1584 jl->j_state |= LIST_TOUCHED;
1585 del_from_work_list(s, jl);
1586 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
1587 return 0;
1588 }
1589
1590 cn = jl->j_realblock;
1591 while (cn) {
1592 /* if the blocknr == 0, this has been cleared from the hash,
1593 ** skip it
1594 */
1595 if (cn->blocknr == 0) {
1596 goto next;
1597 }
1598 if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
1599 struct buffer_head *tmp_bh;
1600 /* we can race against journal_mark_freed when we try
1601 * to lock_buffer(cn->bh), so we have to inc the buffer
1602 * count, and recheck things after locking
1603 */
1604 tmp_bh = cn->bh;
1605 get_bh(tmp_bh);
1606 lock_buffer(tmp_bh);
1607 if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
1608 if (!buffer_journal_dirty(tmp_bh) ||
1609 buffer_journal_prepared(tmp_bh))
1610 BUG();
1611 add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
1612 ret++;
1613 } else {
1614 /* note, cn->bh might be null now */
1615 unlock_buffer(tmp_bh);
1616 }
1617 put_bh(tmp_bh);
1618 }
1619 next:
1620 cn = cn->next;
1621 cond_resched();
1622 }
1623 return ret;
1624}
1625
1626/* used by flush_commit_list */
1627static int dirty_one_transaction(struct super_block *s,
1628 struct reiserfs_journal_list *jl)
1629{
1630 struct reiserfs_journal_cnode *cn;
1631 struct reiserfs_journal_list *pjl;
1632 int ret = 0;
1633
1634 jl->j_state |= LIST_DIRTY;
1635 cn = jl->j_realblock;
1636 while (cn) {
1637 /* look for a more recent transaction that logged this
1638 ** buffer. Only the most recent transaction with a buffer in
1639 ** it is allowed to send that buffer to disk
1640 */
1641 pjl = find_newer_jl_for_cn(cn);
1642 if (!pjl && cn->blocknr && cn->bh
1643 && buffer_journal_dirty(cn->bh)) {
1644 BUG_ON(!can_dirty(cn));
1645 /* if the buffer is prepared, it will either be logged
1646 * or restored. If restored, we need to make sure
1647 * it actually gets marked dirty
1648 */
1649 clear_buffer_journal_new(cn->bh);
1650 if (buffer_journal_prepared(cn->bh)) {
1651 set_buffer_journal_restore_dirty(cn->bh);
1652 } else {
1653 set_buffer_journal_test(cn->bh);
1654 mark_buffer_dirty(cn->bh);
1655 }
1656 }
1657 cn = cn->next;
1658 }
1659 return ret;
1660}
1661
1662static int kupdate_transactions(struct super_block *s,
1663 struct reiserfs_journal_list *jl,
1664 struct reiserfs_journal_list **next_jl,
1665 unsigned long *next_trans_id,
1666 int num_blocks, int num_trans)
1667{
1668 int ret = 0;
1669 int written = 0;
1670 int transactions_flushed = 0;
1671 unsigned long orig_trans_id = jl->j_trans_id;
1672 struct buffer_chunk chunk;
1673 struct list_head *entry;
1674 struct reiserfs_journal *journal = SB_JOURNAL(s);
1675 chunk.nr = 0;
1676
1677 down(&journal->j_flush_sem);
1678 if (!journal_list_still_alive(s, orig_trans_id)) {
1679 goto done;
1680 }
1681
1682 /* we've got j_flush_sem held, nobody is going to delete any
1683 * of these lists out from underneath us
1684 */
1685 while ((num_trans && transactions_flushed < num_trans) ||
1686 (!num_trans && written < num_blocks)) {
1687
1688 if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
1689 atomic_read(&jl->j_commit_left)
1690 || !(jl->j_state & LIST_DIRTY)) {
1691 del_from_work_list(s, jl);
1692 break;
1693 }
1694 ret = write_one_transaction(s, jl, &chunk);
1695
1696 if (ret < 0)
1697 goto done;
1698 transactions_flushed++;
1699 written += ret;
1700 entry = jl->j_list.next;
1701
1702 /* did we wrap? */
1703 if (entry == &journal->j_journal_list) {
1704 break;
1705 }
1706 jl = JOURNAL_LIST_ENTRY(entry);
1707
1708 /* don't bother with older transactions */
1709 if (jl->j_trans_id <= orig_trans_id)
1710 break;
1711 }
1712 if (chunk.nr) {
1713 write_chunk(&chunk);
1714 }
1715
1716 done:
1717 up(&journal->j_flush_sem);
1718 return ret;
1719}
1720
1721/* for o_sync and fsync heavy applications, they tend to use
1722** all the journa list slots with tiny transactions. These
1723** trigger lots and lots of calls to update the header block, which
1724** adds seeks and slows things down.
1725**
1726** This function tries to clear out a large chunk of the journal lists
1727** at once, which makes everything faster since only the newest journal
1da177e4
LT
1728** list updates the header block
1729*/
1730static int flush_used_journal_lists(struct super_block *s,
bd4c625c
LT
1731 struct reiserfs_journal_list *jl)
1732{
1733 unsigned long len = 0;
1734 unsigned long cur_len;
1735 int ret;
1736 int i;
1737 int limit = 256;
1738 struct reiserfs_journal_list *tjl;
1739 struct reiserfs_journal_list *flush_jl;
1740 unsigned long trans_id;
1741 struct reiserfs_journal *journal = SB_JOURNAL(s);
1742
1743 flush_jl = tjl = jl;
1744
1745 /* in data logging mode, try harder to flush a lot of blocks */
1746 if (reiserfs_data_log(s))
1747 limit = 1024;
1748 /* flush for 256 transactions or limit blocks, whichever comes first */
1749 for (i = 0; i < 256 && len < limit; i++) {
1750 if (atomic_read(&tjl->j_commit_left) ||
1751 tjl->j_trans_id < jl->j_trans_id) {
1752 break;
1753 }
1754 cur_len = atomic_read(&tjl->j_nonzerolen);
1755 if (cur_len > 0) {
1756 tjl->j_state &= ~LIST_TOUCHED;
1757 }
1758 len += cur_len;
1759 flush_jl = tjl;
1760 if (tjl->j_list.next == &journal->j_journal_list)
1761 break;
1762 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1763 }
1764 /* try to find a group of blocks we can flush across all the
1765 ** transactions, but only bother if we've actually spanned
1766 ** across multiple lists
1767 */
1768 if (flush_jl != jl) {
1769 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1770 }
1771 flush_journal_list(s, flush_jl, 1);
1772 return 0;
1da177e4
LT
1773}
1774
1775/*
1776** removes any nodes in table with name block and dev as bh.
1777** only touchs the hnext and hprev pointers.
1778*/
1779void remove_journal_hash(struct super_block *sb,
bd4c625c
LT
1780 struct reiserfs_journal_cnode **table,
1781 struct reiserfs_journal_list *jl,
1782 unsigned long block, int remove_freed)
1783{
1784 struct reiserfs_journal_cnode *cur;
1785 struct reiserfs_journal_cnode **head;
1786
1787 head = &(journal_hash(table, sb, block));
1788 if (!head) {
1789 return;
1790 }
1791 cur = *head;
1792 while (cur) {
1793 if (cur->blocknr == block && cur->sb == sb
1794 && (jl == NULL || jl == cur->jlist)
1795 && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1796 if (cur->hnext) {
1797 cur->hnext->hprev = cur->hprev;
1798 }
1799 if (cur->hprev) {
1800 cur->hprev->hnext = cur->hnext;
1801 } else {
1802 *head = cur->hnext;
1803 }
1804 cur->blocknr = 0;
1805 cur->sb = NULL;
1806 cur->state = 0;
1807 if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
1808 atomic_dec(&(cur->jlist->j_nonzerolen));
1809 cur->bh = NULL;
1810 cur->jlist = NULL;
1811 }
1812 cur = cur->hnext;
1813 }
1814}
1815
1816static void free_journal_ram(struct super_block *p_s_sb)
1817{
1818 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
d739b42b 1819 kfree(journal->j_current_jl);
bd4c625c
LT
1820 journal->j_num_lists--;
1821
1822 vfree(journal->j_cnode_free_orig);
1823 free_list_bitmaps(p_s_sb, journal->j_list_bitmap);
1824 free_bitmap_nodes(p_s_sb); /* must be after free_list_bitmaps */
1825 if (journal->j_header_bh) {
1826 brelse(journal->j_header_bh);
1827 }
1828 /* j_header_bh is on the journal dev, make sure not to release the journal
1829 * dev until we brelse j_header_bh
1830 */
1831 release_journal_dev(p_s_sb, journal);
1832 vfree(journal);
1da177e4
LT
1833}
1834
1835/*
1836** call on unmount. Only set error to 1 if you haven't made your way out
1837** of read_super() yet. Any other caller must keep error at 0.
1838*/
bd4c625c
LT
1839static int do_journal_release(struct reiserfs_transaction_handle *th,
1840 struct super_block *p_s_sb, int error)
1841{
1842 struct reiserfs_transaction_handle myth;
1843 int flushed = 0;
1844 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1845
1846 /* we only want to flush out transactions if we were called with error == 0
1847 */
1848 if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
1849 /* end the current trans */
1850 BUG_ON(!th->t_trans_id);
1851 do_journal_end(th, p_s_sb, 10, FLUSH_ALL);
1852
1853 /* make sure something gets logged to force our way into the flush code */
1854 if (!journal_join(&myth, p_s_sb, 1)) {
1855 reiserfs_prepare_for_journal(p_s_sb,
1856 SB_BUFFER_WITH_SB(p_s_sb),
1857 1);
1858 journal_mark_dirty(&myth, p_s_sb,
1859 SB_BUFFER_WITH_SB(p_s_sb));
1860 do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1861 flushed = 1;
1862 }
1863 }
1864
1865 /* this also catches errors during the do_journal_end above */
1866 if (!error && reiserfs_is_journal_aborted(journal)) {
1867 memset(&myth, 0, sizeof(myth));
1868 if (!journal_join_abort(&myth, p_s_sb, 1)) {
1869 reiserfs_prepare_for_journal(p_s_sb,
1870 SB_BUFFER_WITH_SB(p_s_sb),
1871 1);
1872 journal_mark_dirty(&myth, p_s_sb,
1873 SB_BUFFER_WITH_SB(p_s_sb));
1874 do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
1875 }
1876 }
1877
1878 reiserfs_mounted_fs_count--;
1879 /* wait for all commits to finish */
1880 cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work);
1881 flush_workqueue(commit_wq);
1882 if (!reiserfs_mounted_fs_count) {
1883 destroy_workqueue(commit_wq);
1884 commit_wq = NULL;
1885 }
1886
1887 free_journal_ram(p_s_sb);
1888
1889 return 0;
1da177e4
LT
1890}
1891
1892/*
1893** call on unmount. flush all journal trans, release all alloc'd ram
1894*/
bd4c625c
LT
1895int journal_release(struct reiserfs_transaction_handle *th,
1896 struct super_block *p_s_sb)
1897{
1898 return do_journal_release(th, p_s_sb, 0);
1da177e4 1899}
bd4c625c 1900
1da177e4
LT
1901/*
1902** only call from an error condition inside reiserfs_read_super!
1903*/
bd4c625c
LT
1904int journal_release_error(struct reiserfs_transaction_handle *th,
1905 struct super_block *p_s_sb)
1906{
1907 return do_journal_release(th, p_s_sb, 1);
1da177e4
LT
1908}
1909
1910/* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
bd4c625c
LT
1911static int journal_compare_desc_commit(struct super_block *p_s_sb,
1912 struct reiserfs_journal_desc *desc,
1913 struct reiserfs_journal_commit *commit)
1914{
1915 if (get_commit_trans_id(commit) != get_desc_trans_id(desc) ||
1916 get_commit_trans_len(commit) != get_desc_trans_len(desc) ||
1917 get_commit_trans_len(commit) > SB_JOURNAL(p_s_sb)->j_trans_max ||
1918 get_commit_trans_len(commit) <= 0) {
1919 return 1;
1920 }
1921 return 0;
1da177e4 1922}
bd4c625c 1923
1da177e4
LT
1924/* returns 0 if it did not find a description block
1925** returns -1 if it found a corrupt commit block
1926** returns 1 if both desc and commit were valid
1927*/
bd4c625c
LT
1928static int journal_transaction_is_valid(struct super_block *p_s_sb,
1929 struct buffer_head *d_bh,
1930 unsigned long *oldest_invalid_trans_id,
1931 unsigned long *newest_mount_id)
1932{
1933 struct reiserfs_journal_desc *desc;
1934 struct reiserfs_journal_commit *commit;
1935 struct buffer_head *c_bh;
1936 unsigned long offset;
1937
1938 if (!d_bh)
1939 return 0;
1940
1941 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
1942 if (get_desc_trans_len(desc) > 0
1943 && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) {
1944 if (oldest_invalid_trans_id && *oldest_invalid_trans_id
1945 && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
1946 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1947 "journal-986: transaction "
1948 "is valid returning because trans_id %d is greater than "
1949 "oldest_invalid %lu",
1950 get_desc_trans_id(desc),
1951 *oldest_invalid_trans_id);
1952 return 0;
1953 }
1954 if (newest_mount_id
1955 && *newest_mount_id > get_desc_mount_id(desc)) {
1956 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1957 "journal-1087: transaction "
1958 "is valid returning because mount_id %d is less than "
1959 "newest_mount_id %lu",
1960 get_desc_mount_id(desc),
1961 *newest_mount_id);
1962 return -1;
1963 }
1964 if (get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max) {
1965 reiserfs_warning(p_s_sb,
1966 "journal-2018: Bad transaction length %d encountered, ignoring transaction",
1967 get_desc_trans_len(desc));
1968 return -1;
1969 }
1970 offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
1971
1972 /* ok, we have a journal description block, lets see if the transaction was valid */
1973 c_bh =
1974 journal_bread(p_s_sb,
1975 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
1976 ((offset + get_desc_trans_len(desc) +
1977 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
1978 if (!c_bh)
1979 return 0;
1980 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
1981 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
1982 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1983 "journal_transaction_is_valid, commit offset %ld had bad "
1984 "time %d or length %d",
1985 c_bh->b_blocknr -
1986 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1987 get_commit_trans_id(commit),
1988 get_commit_trans_len(commit));
1989 brelse(c_bh);
1990 if (oldest_invalid_trans_id) {
1991 *oldest_invalid_trans_id =
1992 get_desc_trans_id(desc);
1993 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1994 "journal-1004: "
1995 "transaction_is_valid setting oldest invalid trans_id "
1996 "to %d",
1997 get_desc_trans_id(desc));
1998 }
1999 return -1;
2000 }
2001 brelse(c_bh);
2002 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2003 "journal-1006: found valid "
2004 "transaction start offset %llu, len %d id %d",
2005 d_bh->b_blocknr -
2006 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2007 get_desc_trans_len(desc),
2008 get_desc_trans_id(desc));
2009 return 1;
2010 } else {
2011 return 0;
2012 }
2013}
2014
2015static void brelse_array(struct buffer_head **heads, int num)
2016{
2017 int i;
2018 for (i = 0; i < num; i++) {
2019 brelse(heads[i]);
2020 }
1da177e4
LT
2021}
2022
2023/*
2024** given the start, and values for the oldest acceptable transactions,
2025** this either reads in a replays a transaction, or returns because the transaction
2026** is invalid, or too old.
2027*/
bd4c625c
LT
2028static int journal_read_transaction(struct super_block *p_s_sb,
2029 unsigned long cur_dblock,
2030 unsigned long oldest_start,
2031 unsigned long oldest_trans_id,
2032 unsigned long newest_mount_id)
2033{
2034 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2035 struct reiserfs_journal_desc *desc;
2036 struct reiserfs_journal_commit *commit;
2037 unsigned long trans_id = 0;
2038 struct buffer_head *c_bh;
2039 struct buffer_head *d_bh;
2040 struct buffer_head **log_blocks = NULL;
2041 struct buffer_head **real_blocks = NULL;
2042 unsigned long trans_offset;
2043 int i;
2044 int trans_half;
2045
2046 d_bh = journal_bread(p_s_sb, cur_dblock);
2047 if (!d_bh)
2048 return 1;
2049 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2050 trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2051 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
2052 "journal_read_transaction, offset %llu, len %d mount_id %d",
2053 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2054 get_desc_trans_len(desc), get_desc_mount_id(desc));
2055 if (get_desc_trans_id(desc) < oldest_trans_id) {
2056 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
2057 "journal_read_trans skipping because %lu is too old",
2058 cur_dblock -
2059 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2060 brelse(d_bh);
2061 return 1;
2062 }
2063 if (get_desc_mount_id(desc) != newest_mount_id) {
2064 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
2065 "journal_read_trans skipping because %d is != "
2066 "newest_mount_id %lu", get_desc_mount_id(desc),
2067 newest_mount_id);
2068 brelse(d_bh);
2069 return 1;
2070 }
2071 c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2072 ((trans_offset + get_desc_trans_len(desc) + 1) %
2073 SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
2074 if (!c_bh) {
2075 brelse(d_bh);
2076 return 1;
2077 }
2078 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
2079 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
2080 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2081 "journal_read_transaction, "
2082 "commit offset %llu had bad time %d or length %d",
2083 c_bh->b_blocknr -
2084 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2085 get_commit_trans_id(commit),
2086 get_commit_trans_len(commit));
2087 brelse(c_bh);
2088 brelse(d_bh);
2089 return 1;
2090 }
2091 trans_id = get_desc_trans_id(desc);
2092 /* now we know we've got a good transaction, and it was inside the valid time ranges */
d739b42b
PE
2093 log_blocks = kmalloc(get_desc_trans_len(desc) *
2094 sizeof(struct buffer_head *), GFP_NOFS);
2095 real_blocks = kmalloc(get_desc_trans_len(desc) *
2096 sizeof(struct buffer_head *), GFP_NOFS);
bd4c625c
LT
2097 if (!log_blocks || !real_blocks) {
2098 brelse(c_bh);
2099 brelse(d_bh);
d739b42b
PE
2100 kfree(log_blocks);
2101 kfree(real_blocks);
bd4c625c
LT
2102 reiserfs_warning(p_s_sb,
2103 "journal-1169: kmalloc failed, unable to mount FS");
2104 return -1;
2105 }
2106 /* get all the buffer heads */
2107 trans_half = journal_trans_half(p_s_sb->s_blocksize);
2108 for (i = 0; i < get_desc_trans_len(desc); i++) {
2109 log_blocks[i] =
2110 journal_getblk(p_s_sb,
2111 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2112 (trans_offset + 1 +
2113 i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2114 if (i < trans_half) {
2115 real_blocks[i] =
2116 sb_getblk(p_s_sb,
2117 le32_to_cpu(desc->j_realblock[i]));
2118 } else {
2119 real_blocks[i] =
2120 sb_getblk(p_s_sb,
2121 le32_to_cpu(commit->
2122 j_realblock[i - trans_half]));
2123 }
2124 if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) {
2125 reiserfs_warning(p_s_sb,
2126 "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem");
2127 goto abort_replay;
2128 }
2129 /* make sure we don't try to replay onto log or reserved area */
2130 if (is_block_in_log_or_reserved_area
2131 (p_s_sb, real_blocks[i]->b_blocknr)) {
2132 reiserfs_warning(p_s_sb,
2133 "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block");
2134 abort_replay:
2135 brelse_array(log_blocks, i);
2136 brelse_array(real_blocks, i);
2137 brelse(c_bh);
2138 brelse(d_bh);
d739b42b
PE
2139 kfree(log_blocks);
2140 kfree(real_blocks);
bd4c625c
LT
2141 return -1;
2142 }
2143 }
2144 /* read in the log blocks, memcpy to the corresponding real block */
2145 ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
2146 for (i = 0; i < get_desc_trans_len(desc); i++) {
2147 wait_on_buffer(log_blocks[i]);
2148 if (!buffer_uptodate(log_blocks[i])) {
2149 reiserfs_warning(p_s_sb,
2150 "journal-1212: REPLAY FAILURE fsck required! buffer write failed");
2151 brelse_array(log_blocks + i,
2152 get_desc_trans_len(desc) - i);
2153 brelse_array(real_blocks, get_desc_trans_len(desc));
2154 brelse(c_bh);
2155 brelse(d_bh);
d739b42b
PE
2156 kfree(log_blocks);
2157 kfree(real_blocks);
bd4c625c
LT
2158 return -1;
2159 }
2160 memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data,
2161 real_blocks[i]->b_size);
2162 set_buffer_uptodate(real_blocks[i]);
2163 brelse(log_blocks[i]);
2164 }
2165 /* flush out the real blocks */
2166 for (i = 0; i < get_desc_trans_len(desc); i++) {
2167 set_buffer_dirty(real_blocks[i]);
53778ffd 2168 ll_rw_block(SWRITE, 1, real_blocks + i);
bd4c625c
LT
2169 }
2170 for (i = 0; i < get_desc_trans_len(desc); i++) {
2171 wait_on_buffer(real_blocks[i]);
2172 if (!buffer_uptodate(real_blocks[i])) {
2173 reiserfs_warning(p_s_sb,
2174 "journal-1226: REPLAY FAILURE, fsck required! buffer write failed");
2175 brelse_array(real_blocks + i,
2176 get_desc_trans_len(desc) - i);
2177 brelse(c_bh);
2178 brelse(d_bh);
d739b42b
PE
2179 kfree(log_blocks);
2180 kfree(real_blocks);
bd4c625c
LT
2181 return -1;
2182 }
2183 brelse(real_blocks[i]);
2184 }
2185 cur_dblock =
2186 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2187 ((trans_offset + get_desc_trans_len(desc) +
2188 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2189 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2190 "journal-1095: setting journal " "start to offset %ld",
2191 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
2192
2193 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2194 journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2195 journal->j_last_flush_trans_id = trans_id;
2196 journal->j_trans_id = trans_id + 1;
2197 brelse(c_bh);
2198 brelse(d_bh);
d739b42b
PE
2199 kfree(log_blocks);
2200 kfree(real_blocks);
bd4c625c 2201 return 0;
1da177e4
LT
2202}
2203
2204/* This function reads blocks starting from block and to max_block of bufsize
2205 size (but no more than BUFNR blocks at a time). This proved to improve
2206 mounting speed on self-rebuilding raid5 arrays at least.
2207 Right now it is only used from journal code. But later we might use it
2208 from other places.
2209 Note: Do not use journal_getblk/sb_getblk functions here! */
bd4c625c
LT
2210static struct buffer_head *reiserfs_breada(struct block_device *dev, int block,
2211 int bufsize, unsigned int max_block)
1da177e4 2212{
bd4c625c 2213 struct buffer_head *bhlist[BUFNR];
1da177e4 2214 unsigned int blocks = BUFNR;
bd4c625c 2215 struct buffer_head *bh;
1da177e4 2216 int i, j;
bd4c625c
LT
2217
2218 bh = __getblk(dev, block, bufsize);
2219 if (buffer_uptodate(bh))
2220 return (bh);
2221
1da177e4
LT
2222 if (block + BUFNR > max_block) {
2223 blocks = max_block - block;
2224 }
2225 bhlist[0] = bh;
2226 j = 1;
2227 for (i = 1; i < blocks; i++) {
bd4c625c
LT
2228 bh = __getblk(dev, block + i, bufsize);
2229 if (buffer_uptodate(bh)) {
2230 brelse(bh);
1da177e4 2231 break;
bd4c625c
LT
2232 } else
2233 bhlist[j++] = bh;
1da177e4 2234 }
bd4c625c
LT
2235 ll_rw_block(READ, j, bhlist);
2236 for (i = 1; i < j; i++)
2237 brelse(bhlist[i]);
1da177e4 2238 bh = bhlist[0];
bd4c625c
LT
2239 wait_on_buffer(bh);
2240 if (buffer_uptodate(bh))
1da177e4 2241 return bh;
bd4c625c 2242 brelse(bh);
1da177e4
LT
2243 return NULL;
2244}
2245
2246/*
2247** read and replay the log
2248** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
2249** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
2250**
2251** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
2252**
2253** On exit, it sets things up so the first transaction will work correctly.
2254*/
bd4c625c
LT
2255static int journal_read(struct super_block *p_s_sb)
2256{
2257 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2258 struct reiserfs_journal_desc *desc;
2259 unsigned long oldest_trans_id = 0;
2260 unsigned long oldest_invalid_trans_id = 0;
2261 time_t start;
2262 unsigned long oldest_start = 0;
2263 unsigned long cur_dblock = 0;
2264 unsigned long newest_mount_id = 9;
2265 struct buffer_head *d_bh;
2266 struct reiserfs_journal_header *jh;
2267 int valid_journal_header = 0;
2268 int replay_count = 0;
2269 int continue_replay = 1;
2270 int ret;
2271 char b[BDEVNAME_SIZE];
2272
2273 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
2274 reiserfs_info(p_s_sb, "checking transaction log (%s)\n",
2275 bdevname(journal->j_dev_bd, b));
2276 start = get_seconds();
2277
2278 /* step 1, read in the journal header block. Check the transaction it says
2279 ** is the first unflushed, and if that transaction is not valid,
2280 ** replay is done
2281 */
2282 journal->j_header_bh = journal_bread(p_s_sb,
2283 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)
2284 + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2285 if (!journal->j_header_bh) {
2286 return 1;
2287 }
2288 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data);
2289 if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 &&
2290 le32_to_cpu(jh->j_first_unflushed_offset) <
2291 SB_ONDISK_JOURNAL_SIZE(p_s_sb)
2292 && le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
2293 oldest_start =
2294 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2295 le32_to_cpu(jh->j_first_unflushed_offset);
2296 oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2297 newest_mount_id = le32_to_cpu(jh->j_mount_id);
2298 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2299 "journal-1153: found in "
2300 "header: first_unflushed_offset %d, last_flushed_trans_id "
2301 "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
2302 le32_to_cpu(jh->j_last_flush_trans_id));
2303 valid_journal_header = 1;
2304
2305 /* now, we try to read the first unflushed offset. If it is not valid,
2306 ** there is nothing more we can do, and it makes no sense to read
2307 ** through the whole log.
2308 */
2309 d_bh =
2310 journal_bread(p_s_sb,
2311 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2312 le32_to_cpu(jh->j_first_unflushed_offset));
2313 ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL);
2314 if (!ret) {
2315 continue_replay = 0;
2316 }
2317 brelse(d_bh);
2318 goto start_log_replay;
2319 }
2320
2321 if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
2322 reiserfs_warning(p_s_sb,
2323 "clm-2076: device is readonly, unable to replay log");
2324 return -1;
2325 }
2326
2327 /* ok, there are transactions that need to be replayed. start with the first log block, find
2328 ** all the valid transactions, and pick out the oldest.
2329 */
2330 while (continue_replay
2331 && cur_dblock <
2332 (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2333 SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
2334 /* Note that it is required for blocksize of primary fs device and journal
2335 device to be the same */
2336 d_bh =
2337 reiserfs_breada(journal->j_dev_bd, cur_dblock,
2338 p_s_sb->s_blocksize,
2339 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2340 SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2341 ret =
2342 journal_transaction_is_valid(p_s_sb, d_bh,
2343 &oldest_invalid_trans_id,
2344 &newest_mount_id);
2345 if (ret == 1) {
2346 desc = (struct reiserfs_journal_desc *)d_bh->b_data;
2347 if (oldest_start == 0) { /* init all oldest_ values */
2348 oldest_trans_id = get_desc_trans_id(desc);
2349 oldest_start = d_bh->b_blocknr;
2350 newest_mount_id = get_desc_mount_id(desc);
2351 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2352 "journal-1179: Setting "
2353 "oldest_start to offset %llu, trans_id %lu",
2354 oldest_start -
2355 SB_ONDISK_JOURNAL_1st_BLOCK
2356 (p_s_sb), oldest_trans_id);
2357 } else if (oldest_trans_id > get_desc_trans_id(desc)) {
2358 /* one we just read was older */
2359 oldest_trans_id = get_desc_trans_id(desc);
2360 oldest_start = d_bh->b_blocknr;
2361 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2362 "journal-1180: Resetting "
2363 "oldest_start to offset %lu, trans_id %lu",
2364 oldest_start -
2365 SB_ONDISK_JOURNAL_1st_BLOCK
2366 (p_s_sb), oldest_trans_id);
2367 }
2368 if (newest_mount_id < get_desc_mount_id(desc)) {
2369 newest_mount_id = get_desc_mount_id(desc);
2370 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2371 "journal-1299: Setting "
2372 "newest_mount_id to %d",
2373 get_desc_mount_id(desc));
2374 }
2375 cur_dblock += get_desc_trans_len(desc) + 2;
2376 } else {
2377 cur_dblock++;
2378 }
2379 brelse(d_bh);
2380 }
2381
2382 start_log_replay:
2383 cur_dblock = oldest_start;
2384 if (oldest_trans_id) {
2385 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2386 "journal-1206: Starting replay "
2387 "from offset %llu, trans_id %lu",
2388 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2389 oldest_trans_id);
2390
2391 }
2392 replay_count = 0;
2393 while (continue_replay && oldest_trans_id > 0) {
2394 ret =
2395 journal_read_transaction(p_s_sb, cur_dblock, oldest_start,
2396 oldest_trans_id, newest_mount_id);
2397 if (ret < 0) {
2398 return ret;
2399 } else if (ret != 0) {
2400 break;
2401 }
2402 cur_dblock =
2403 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start;
2404 replay_count++;
2405 if (cur_dblock == oldest_start)
2406 break;
2407 }
2408
2409 if (oldest_trans_id == 0) {
2410 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
2411 "journal-1225: No valid " "transactions found");
2412 }
2413 /* j_start does not get set correctly if we don't replay any transactions.
2414 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2415 ** copy the trans_id from the header
2416 */
2417 if (valid_journal_header && replay_count == 0) {
2418 journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset);
2419 journal->j_trans_id =
2420 le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2421 journal->j_last_flush_trans_id =
2422 le32_to_cpu(jh->j_last_flush_trans_id);
2423 journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
2424 } else {
2425 journal->j_mount_id = newest_mount_id + 1;
2426 }
1da177e4 2427 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
bd4c625c
LT
2428 "newest_mount_id to %lu", journal->j_mount_id);
2429 journal->j_first_unflushed_offset = journal->j_start;
2430 if (replay_count > 0) {
2431 reiserfs_info(p_s_sb,
2432 "replayed %d transactions in %lu seconds\n",
2433 replay_count, get_seconds() - start);
2434 }
2435 if (!bdev_read_only(p_s_sb->s_bdev) &&
2436 _update_journal_header_block(p_s_sb, journal->j_start,
2437 journal->j_last_flush_trans_id)) {
2438 /* replay failed, caller must call free_journal_ram and abort
2439 ** the mount
2440 */
2441 return -1;
2442 }
2443 return 0;
1da177e4
LT
2444}
2445
2446static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
2447{
bd4c625c
LT
2448 struct reiserfs_journal_list *jl;
2449 retry:
d739b42b 2450 jl = kzalloc(sizeof(struct reiserfs_journal_list), GFP_NOFS);
bd4c625c
LT
2451 if (!jl) {
2452 yield();
2453 goto retry;
2454 }
bd4c625c
LT
2455 INIT_LIST_HEAD(&jl->j_list);
2456 INIT_LIST_HEAD(&jl->j_working_list);
2457 INIT_LIST_HEAD(&jl->j_tail_bh_list);
2458 INIT_LIST_HEAD(&jl->j_bh_list);
2459 sema_init(&jl->j_commit_lock, 1);
2460 SB_JOURNAL(s)->j_num_lists++;
2461 get_journal_list(jl);
2462 return jl;
2463}
2464
2465static void journal_list_init(struct super_block *p_s_sb)
2466{
2467 SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb);
2468}
2469
2470static int release_journal_dev(struct super_block *super,
2471 struct reiserfs_journal *journal)
2472{
2473 int result;
2474
2475 result = 0;
2476
2477 if (journal->j_dev_file != NULL) {
2478 result = filp_close(journal->j_dev_file, NULL);
2479 journal->j_dev_file = NULL;
2480 journal->j_dev_bd = NULL;
2481 } else if (journal->j_dev_bd != NULL) {
2482 result = blkdev_put(journal->j_dev_bd);
2483 journal->j_dev_bd = NULL;
2484 }
2485
2486 if (result != 0) {
2487 reiserfs_warning(super,
2488 "sh-457: release_journal_dev: Cannot release journal device: %i",
2489 result);
2490 }
2491 return result;
2492}
2493
2494static int journal_init_dev(struct super_block *super,
2495 struct reiserfs_journal *journal,
2496 const char *jdev_name)
1da177e4
LT
2497{
2498 int result;
2499 dev_t jdev;
2500 int blkdev_mode = FMODE_READ | FMODE_WRITE;
2501 char b[BDEVNAME_SIZE];
2502
2503 result = 0;
2504
bd4c625c
LT
2505 journal->j_dev_bd = NULL;
2506 journal->j_dev_file = NULL;
2507 jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
2508 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
1da177e4
LT
2509
2510 if (bdev_read_only(super->s_bdev))
bd4c625c 2511 blkdev_mode = FMODE_READ;
1da177e4
LT
2512
2513 /* there is no "jdev" option and journal is on separate device */
bd4c625c 2514 if ((!jdev_name || !jdev_name[0])) {
1da177e4
LT
2515 journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
2516 if (IS_ERR(journal->j_dev_bd)) {
2517 result = PTR_ERR(journal->j_dev_bd);
2518 journal->j_dev_bd = NULL;
bd4c625c
LT
2519 reiserfs_warning(super, "sh-458: journal_init_dev: "
2520 "cannot init journal device '%s': %i",
2521 __bdevname(jdev, b), result);
1da177e4
LT
2522 return result;
2523 } else if (jdev != super->s_dev)
2524 set_blocksize(journal->j_dev_bd, super->s_blocksize);
2525 return 0;
2526 }
2527
bd4c625c
LT
2528 journal->j_dev_file = filp_open(jdev_name, 0, 0);
2529 if (!IS_ERR(journal->j_dev_file)) {
1da177e4 2530 struct inode *jdev_inode = journal->j_dev_file->f_mapping->host;
bd4c625c 2531 if (!S_ISBLK(jdev_inode->i_mode)) {
74f9f974 2532 reiserfs_warning(super, "journal_init_dev: '%s' is "
bd4c625c 2533 "not a block device", jdev_name);
1da177e4 2534 result = -ENOTBLK;
bd4c625c
LT
2535 release_journal_dev(super, journal);
2536 } else {
1da177e4
LT
2537 /* ok */
2538 journal->j_dev_bd = I_BDEV(jdev_inode);
2539 set_blocksize(journal->j_dev_bd, super->s_blocksize);
bd4c625c
LT
2540 reiserfs_info(super,
2541 "journal_init_dev: journal device: %s\n",
74f9f974 2542 bdevname(journal->j_dev_bd, b));
1da177e4
LT
2543 }
2544 } else {
bd4c625c
LT
2545 result = PTR_ERR(journal->j_dev_file);
2546 journal->j_dev_file = NULL;
2547 reiserfs_warning(super,
2548 "journal_init_dev: Cannot open '%s': %i",
2549 jdev_name, result);
1da177e4 2550 }
1da177e4
LT
2551 return result;
2552}
2553
2554/*
2555** must be called once on fs mount. calls journal_read for you
2556*/
bd4c625c
LT
2557int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
2558 int old_format, unsigned int commit_max_age)
2559{
2560 int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2;
2561 struct buffer_head *bhjh;
2562 struct reiserfs_super_block *rs;
2563 struct reiserfs_journal_header *jh;
2564 struct reiserfs_journal *journal;
2565 struct reiserfs_journal_list *jl;
2566 char b[BDEVNAME_SIZE];
2567
2568 journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof(struct reiserfs_journal));
2569 if (!journal) {
2570 reiserfs_warning(p_s_sb,
2571 "journal-1256: unable to get memory for journal structure");
2572 return 1;
2573 }
2574 memset(journal, 0, sizeof(struct reiserfs_journal));
2575 INIT_LIST_HEAD(&journal->j_bitmap_nodes);
2576 INIT_LIST_HEAD(&journal->j_prealloc_list);
2577 INIT_LIST_HEAD(&journal->j_working_list);
2578 INIT_LIST_HEAD(&journal->j_journal_list);
2579 journal->j_persistent_trans = 0;
2580 if (reiserfs_allocate_list_bitmaps(p_s_sb,
2581 journal->j_list_bitmap,
2582 SB_BMAP_NR(p_s_sb)))
2583 goto free_and_return;
2584 allocate_bitmap_nodes(p_s_sb);
2585
2586 /* reserved for journal area support */
2587 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
2588 REISERFS_OLD_DISK_OFFSET_IN_BYTES
2589 / p_s_sb->s_blocksize +
2590 SB_BMAP_NR(p_s_sb) +
2591 1 :
2592 REISERFS_DISK_OFFSET_IN_BYTES /
2593 p_s_sb->s_blocksize + 2);
2594
2595 /* Sanity check to see is the standard journal fitting withing first bitmap
2596 (actual for small blocksizes) */
2597 if (!SB_ONDISK_JOURNAL_DEVICE(p_s_sb) &&
2598 (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) +
2599 SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8)) {
2600 reiserfs_warning(p_s_sb,
2601 "journal-1393: journal does not fit for area "
2602 "addressed by first of bitmap blocks. It starts at "
2603 "%u and its size is %u. Block size %ld",
2604 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb),
2605 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2606 p_s_sb->s_blocksize);
2607 goto free_and_return;
2608 }
2609
2610 if (journal_init_dev(p_s_sb, journal, j_dev_name) != 0) {
2611 reiserfs_warning(p_s_sb,
2612 "sh-462: unable to initialize jornal device");
2613 goto free_and_return;
2614 }
2615
2616 rs = SB_DISK_SUPER_BLOCK(p_s_sb);
2617
2618 /* read journal header */
2619 bhjh = journal_bread(p_s_sb,
2620 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2621 SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2622 if (!bhjh) {
2623 reiserfs_warning(p_s_sb,
2624 "sh-459: unable to read journal header");
2625 goto free_and_return;
2626 }
2627 jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2628
2629 /* make sure that journal matches to the super block */
2630 if (is_reiserfs_jr(rs)
2631 && (le32_to_cpu(jh->jh_journal.jp_journal_magic) !=
2632 sb_jp_journal_magic(rs))) {
2633 reiserfs_warning(p_s_sb,
2634 "sh-460: journal header magic %x "
2635 "(device %s) does not match to magic found in super "
2636 "block %x", jh->jh_journal.jp_journal_magic,
2637 bdevname(journal->j_dev_bd, b),
2638 sb_jp_journal_magic(rs));
2639 brelse(bhjh);
2640 goto free_and_return;
2641 }
2642
2643 journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max);
2644 journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch);
2645 journal->j_max_commit_age =
2646 le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age);
2647 journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
2648
2649 if (journal->j_trans_max) {
2650 /* make sure these parameters are available, assign it if they are not */
2651 __u32 initial = journal->j_trans_max;
2652 __u32 ratio = 1;
2653
2654 if (p_s_sb->s_blocksize < 4096)
2655 ratio = 4096 / p_s_sb->s_blocksize;
2656
2657 if (SB_ONDISK_JOURNAL_SIZE(p_s_sb) / journal->j_trans_max <
2658 JOURNAL_MIN_RATIO)
2659 journal->j_trans_max =
2660 SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO;
2661 if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio)
2662 journal->j_trans_max =
2663 JOURNAL_TRANS_MAX_DEFAULT / ratio;
2664 if (journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio)
2665 journal->j_trans_max =
2666 JOURNAL_TRANS_MIN_DEFAULT / ratio;
2667
2668 if (journal->j_trans_max != initial)
2669 reiserfs_warning(p_s_sb,
2670 "sh-461: journal_init: wrong transaction max size (%u). Changed to %u",
2671 initial, journal->j_trans_max);
2672
2673 journal->j_max_batch = journal->j_trans_max *
2674 JOURNAL_MAX_BATCH_DEFAULT / JOURNAL_TRANS_MAX_DEFAULT;
2675 }
2676
2677 if (!journal->j_trans_max) {
2678 /*we have the file system was created by old version of mkreiserfs
2679 so this field contains zero value */
2680 journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT;
2681 journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT;
2682 journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE;
2683
2684 /* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
2685 trans max size is decreased proportionally */
2686 if (p_s_sb->s_blocksize < 4096) {
2687 journal->j_trans_max /= (4096 / p_s_sb->s_blocksize);
2688 journal->j_max_batch = (journal->j_trans_max) * 9 / 10;
2689 }
2690 }
2691
2692 journal->j_default_max_commit_age = journal->j_max_commit_age;
2693
2694 if (commit_max_age != 0) {
2695 journal->j_max_commit_age = commit_max_age;
2696 journal->j_max_trans_age = commit_max_age;
2697 }
2698
2699 reiserfs_info(p_s_sb, "journal params: device %s, size %u, "
2700 "journal first block %u, max trans len %u, max batch %u, "
2701 "max commit age %u, max trans age %u\n",
2702 bdevname(journal->j_dev_bd, b),
2703 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2704 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2705 journal->j_trans_max,
2706 journal->j_max_batch,
2707 journal->j_max_commit_age, journal->j_max_trans_age);
2708
2709 brelse(bhjh);
2710
2711 journal->j_list_bitmap_index = 0;
2712 journal_list_init(p_s_sb);
2713
2714 memset(journal->j_list_hash_table, 0,
2715 JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
2716
2717 INIT_LIST_HEAD(&journal->j_dirty_buffers);
2718 spin_lock_init(&journal->j_dirty_buffers_lock);
2719
2720 journal->j_start = 0;
2721 journal->j_len = 0;
2722 journal->j_len_alloc = 0;
2723 atomic_set(&(journal->j_wcount), 0);
2724 atomic_set(&(journal->j_async_throttle), 0);
2725 journal->j_bcount = 0;
2726 journal->j_trans_start_time = 0;
2727 journal->j_last = NULL;
2728 journal->j_first = NULL;
2729 init_waitqueue_head(&(journal->j_join_wait));
2730 sema_init(&journal->j_lock, 1);
2731 sema_init(&journal->j_flush_sem, 1);
2732
2733 journal->j_trans_id = 10;
2734 journal->j_mount_id = 10;
2735 journal->j_state = 0;
2736 atomic_set(&(journal->j_jlock), 0);
2737 journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
2738 journal->j_cnode_free_orig = journal->j_cnode_free_list;
2739 journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
2740 journal->j_cnode_used = 0;
2741 journal->j_must_wait = 0;
2742
576f6d79
JM
2743 if (journal->j_cnode_free == 0) {
2744 reiserfs_warning(p_s_sb, "journal-2004: Journal cnode memory "
2745 "allocation failed (%ld bytes). Journal is "
2746 "too large for available memory. Usually "
2747 "this is due to a journal that is too large.",
2748 sizeof (struct reiserfs_journal_cnode) * num_cnodes);
2749 goto free_and_return;
2750 }
2751
bd4c625c
LT
2752 init_journal_hash(p_s_sb);
2753 jl = journal->j_current_jl;
2754 jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
2755 if (!jl->j_list_bitmap) {
2756 reiserfs_warning(p_s_sb,
2757 "journal-2005, get_list_bitmap failed for journal list 0");
2758 goto free_and_return;
2759 }
2760 if (journal_read(p_s_sb) < 0) {
2761 reiserfs_warning(p_s_sb, "Replay Failure, unable to mount");
2762 goto free_and_return;
2763 }
2764
2765 reiserfs_mounted_fs_count++;
2766 if (reiserfs_mounted_fs_count <= 1)
2767 commit_wq = create_workqueue("reiserfs");
2768
2769 INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
2770 return 0;
2771 free_and_return:
2772 free_journal_ram(p_s_sb);
2773 return 1;
1da177e4
LT
2774}
2775
2776/*
2777** test for a polite end of the current transaction. Used by file_write, and should
2778** be used by delete to make sure they don't write more than can fit inside a single
2779** transaction
2780*/
bd4c625c
LT
2781int journal_transaction_should_end(struct reiserfs_transaction_handle *th,
2782 int new_alloc)
2783{
2784 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2785 time_t now = get_seconds();
2786 /* cannot restart while nested */
2787 BUG_ON(!th->t_trans_id);
2788 if (th->t_refcount > 1)
2789 return 0;
2790 if (journal->j_must_wait > 0 ||
2791 (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
2792 atomic_read(&(journal->j_jlock)) ||
2793 (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
2794 journal->j_cnode_free < (journal->j_trans_max * 3)) {
2795 return 1;
2796 }
2797 return 0;
1da177e4
LT
2798}
2799
2800/* this must be called inside a transaction, and requires the
2801** kernel_lock to be held
2802*/
bd4c625c
LT
2803void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
2804{
2805 struct reiserfs_journal *journal = SB_JOURNAL(th->t_super);
2806 BUG_ON(!th->t_trans_id);
2807 journal->j_must_wait = 1;
2808 set_bit(J_WRITERS_BLOCKED, &journal->j_state);
2809 return;
1da177e4
LT
2810}
2811
2812/* this must be called without a transaction started, and does not
2813** require BKL
2814*/
bd4c625c
LT
2815void reiserfs_allow_writes(struct super_block *s)
2816{
2817 struct reiserfs_journal *journal = SB_JOURNAL(s);
2818 clear_bit(J_WRITERS_BLOCKED, &journal->j_state);
2819 wake_up(&journal->j_join_wait);
1da177e4
LT
2820}
2821
2822/* this must be called without a transaction started, and does not
2823** require BKL
2824*/
bd4c625c
LT
2825void reiserfs_wait_on_write_block(struct super_block *s)
2826{
2827 struct reiserfs_journal *journal = SB_JOURNAL(s);
2828 wait_event(journal->j_join_wait,
2829 !test_bit(J_WRITERS_BLOCKED, &journal->j_state));
2830}
2831
2832static void queue_log_writer(struct super_block *s)
2833{
2834 wait_queue_t wait;
2835 struct reiserfs_journal *journal = SB_JOURNAL(s);
2836 set_bit(J_WRITERS_QUEUED, &journal->j_state);
2837
2838 /*
2839 * we don't want to use wait_event here because
2840 * we only want to wait once.
2841 */
2842 init_waitqueue_entry(&wait, current);
2843 add_wait_queue(&journal->j_join_wait, &wait);
1da177e4 2844 set_current_state(TASK_UNINTERRUPTIBLE);
bd4c625c
LT
2845 if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
2846 schedule();
2847 current->state = TASK_RUNNING;
2848 remove_wait_queue(&journal->j_join_wait, &wait);
2849}
2850
2851static void wake_queued_writers(struct super_block *s)
2852{
2853 struct reiserfs_journal *journal = SB_JOURNAL(s);
2854 if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
2855 wake_up(&journal->j_join_wait);
2856}
2857
2858static void let_transaction_grow(struct super_block *sb, unsigned long trans_id)
2859{
2860 struct reiserfs_journal *journal = SB_JOURNAL(sb);
2861 unsigned long bcount = journal->j_bcount;
2862 while (1) {
041e0e3b 2863 schedule_timeout_uninterruptible(1);
bd4c625c
LT
2864 journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
2865 while ((atomic_read(&journal->j_wcount) > 0 ||
2866 atomic_read(&journal->j_jlock)) &&
2867 journal->j_trans_id == trans_id) {
2868 queue_log_writer(sb);
2869 }
2870 if (journal->j_trans_id != trans_id)
2871 break;
2872 if (bcount == journal->j_bcount)
2873 break;
2874 bcount = journal->j_bcount;
1da177e4 2875 }
1da177e4
LT
2876}
2877
2878/* join == true if you must join an existing transaction.
2879** join == false if you can deal with waiting for others to finish
2880**
2881** this will block until the transaction is joinable. send the number of blocks you
2882** expect to use in nblocks.
2883*/
bd4c625c
LT
2884static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
2885 struct super_block *p_s_sb, unsigned long nblocks,
2886 int join)
2887{
2888 time_t now = get_seconds();
2889 int old_trans_id;
2890 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2891 struct reiserfs_transaction_handle myth;
2892 int sched_count = 0;
2893 int retval;
2894
2895 reiserfs_check_lock_depth(p_s_sb, "journal_begin");
2896 if (nblocks > journal->j_trans_max)
2897 BUG();
2898
2899 PROC_INFO_INC(p_s_sb, journal.journal_being);
2900 /* set here for journal_join */
2901 th->t_refcount = 1;
2902 th->t_super = p_s_sb;
2903
2904 relock:
2905 lock_journal(p_s_sb);
2906 if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) {
2907 unlock_journal(p_s_sb);
2908 retval = journal->j_errno;
2909 goto out_fail;
2910 }
2911 journal->j_bcount++;
2912
2913 if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
2914 unlock_journal(p_s_sb);
2915 reiserfs_wait_on_write_block(p_s_sb);
2916 PROC_INFO_INC(p_s_sb, journal.journal_relock_writers);
2917 goto relock;
2918 }
2919 now = get_seconds();
2920
2921 /* if there is no room in the journal OR
2922 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
2923 ** we don't sleep if there aren't other writers
2924 */
2925
2926 if ((!join && journal->j_must_wait > 0) ||
2927 (!join
2928 && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch)
2929 || (!join && atomic_read(&journal->j_wcount) > 0
2930 && journal->j_trans_start_time > 0
2931 && (now - journal->j_trans_start_time) >
2932 journal->j_max_trans_age) || (!join
2933 && atomic_read(&journal->j_jlock))
2934 || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
2935
2936 old_trans_id = journal->j_trans_id;
2937 unlock_journal(p_s_sb); /* allow others to finish this transaction */
2938
2939 if (!join && (journal->j_len_alloc + nblocks + 2) >=
2940 journal->j_max_batch &&
2941 ((journal->j_len + nblocks + 2) * 100) <
2942 (journal->j_len_alloc * 75)) {
2943 if (atomic_read(&journal->j_wcount) > 10) {
2944 sched_count++;
2945 queue_log_writer(p_s_sb);
2946 goto relock;
2947 }
2948 }
2949 /* don't mess with joining the transaction if all we have to do is
2950 * wait for someone else to do a commit
2951 */
2952 if (atomic_read(&journal->j_jlock)) {
2953 while (journal->j_trans_id == old_trans_id &&
2954 atomic_read(&journal->j_jlock)) {
2955 queue_log_writer(p_s_sb);
2956 }
2957 goto relock;
2958 }
2959 retval = journal_join(&myth, p_s_sb, 1);
2960 if (retval)
2961 goto out_fail;
2962
2963 /* someone might have ended the transaction while we joined */
2964 if (old_trans_id != journal->j_trans_id) {
2965 retval = do_journal_end(&myth, p_s_sb, 1, 0);
2966 } else {
2967 retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW);
2968 }
2969
2970 if (retval)
2971 goto out_fail;
2972
2973 PROC_INFO_INC(p_s_sb, journal.journal_relock_wcount);
2974 goto relock;
2975 }
2976 /* we are the first writer, set trans_id */
2977 if (journal->j_trans_start_time == 0) {
2978 journal->j_trans_start_time = get_seconds();
2979 }
2980 atomic_inc(&(journal->j_wcount));
2981 journal->j_len_alloc += nblocks;
2982 th->t_blocks_logged = 0;
2983 th->t_blocks_allocated = nblocks;
2984 th->t_trans_id = journal->j_trans_id;
2985 unlock_journal(p_s_sb);
2986 INIT_LIST_HEAD(&th->t_list);
2987 get_fs_excl();
2988 return 0;
2989
2990 out_fail:
2991 memset(th, 0, sizeof(*th));
2992 /* Re-set th->t_super, so we can properly keep track of how many
2993 * persistent transactions there are. We need to do this so if this
2994 * call is part of a failed restart_transaction, we can free it later */
2995 th->t_super = p_s_sb;
2996 return retval;
2997}
2998
2999struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
3000 super_block
3001 *s,
3002 int nblocks)
3003{
3004 int ret;
3005 struct reiserfs_transaction_handle *th;
3006
3007 /* if we're nesting into an existing transaction. It will be
3008 ** persistent on its own
3009 */
3010 if (reiserfs_transaction_running(s)) {
3011 th = current->journal_info;
3012 th->t_refcount++;
3013 if (th->t_refcount < 2) {
3014 BUG();
3015 }
3016 return th;
3017 }
d739b42b 3018 th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS);
bd4c625c
LT
3019 if (!th)
3020 return NULL;
3021 ret = journal_begin(th, s, nblocks);
3022 if (ret) {
d739b42b 3023 kfree(th);
bd4c625c
LT
3024 return NULL;
3025 }
3026
3027 SB_JOURNAL(s)->j_persistent_trans++;
3028 return th;
3029}
3030
3031int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th)
3032{
3033 struct super_block *s = th->t_super;
3034 int ret = 0;
3035 if (th->t_trans_id)
3036 ret = journal_end(th, th->t_super, th->t_blocks_allocated);
3037 else
3038 ret = -EIO;
3039 if (th->t_refcount == 0) {
3040 SB_JOURNAL(s)->j_persistent_trans--;
d739b42b 3041 kfree(th);
bd4c625c
LT
3042 }
3043 return ret;
3044}
3045
3046static int journal_join(struct reiserfs_transaction_handle *th,
3047 struct super_block *p_s_sb, unsigned long nblocks)
3048{
3049 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3050
3051 /* this keeps do_journal_end from NULLing out the current->journal_info
3052 ** pointer
3053 */
3054 th->t_handle_save = cur_th;
3055 if (cur_th && cur_th->t_refcount > 1) {
3056 BUG();
3057 }
3058 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN);
3059}
3060
3061int journal_join_abort(struct reiserfs_transaction_handle *th,
3062 struct super_block *p_s_sb, unsigned long nblocks)
3063{
3064 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3065
3066 /* this keeps do_journal_end from NULLing out the current->journal_info
3067 ** pointer
3068 */
3069 th->t_handle_save = cur_th;
3070 if (cur_th && cur_th->t_refcount > 1) {
3071 BUG();
3072 }
3073 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT);
3074}
3075
3076int journal_begin(struct reiserfs_transaction_handle *th,
3077 struct super_block *p_s_sb, unsigned long nblocks)
3078{
3079 struct reiserfs_transaction_handle *cur_th = current->journal_info;
3080 int ret;
3081
3082 th->t_handle_save = NULL;
3083 if (cur_th) {
3084 /* we are nesting into the current transaction */
3085 if (cur_th->t_super == p_s_sb) {
3086 BUG_ON(!cur_th->t_refcount);
3087 cur_th->t_refcount++;
3088 memcpy(th, cur_th, sizeof(*th));
3089 if (th->t_refcount <= 1)
3090 reiserfs_warning(p_s_sb,
3091 "BAD: refcount <= 1, but journal_info != 0");
3092 return 0;
3093 } else {
3094 /* we've ended up with a handle from a different filesystem.
3095 ** save it and restore on journal_end. This should never
3096 ** really happen...
3097 */
3098 reiserfs_warning(p_s_sb,
3099 "clm-2100: nesting info a different FS");
3100 th->t_handle_save = current->journal_info;
3101 current->journal_info = th;
3102 }
1da177e4 3103 } else {
bd4c625c
LT
3104 current->journal_info = th;
3105 }
3106 ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG);
3107 if (current->journal_info != th)
3108 BUG();
1da177e4 3109
bd4c625c
LT
3110 /* I guess this boils down to being the reciprocal of clm-2100 above.
3111 * If do_journal_begin_r fails, we need to put it back, since journal_end
3112 * won't be called to do it. */
3113 if (ret)
3114 current->journal_info = th->t_handle_save;
3115 else
3116 BUG_ON(!th->t_refcount);
1da177e4 3117
bd4c625c 3118 return ret;
1da177e4
LT
3119}
3120
3121/*
3122** puts bh into the current transaction. If it was already there, reorders removes the
3123** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
3124**
3125** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
3126** transaction is committed.
3127**
3128** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
3129*/
bd4c625c
LT
3130int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3131 struct super_block *p_s_sb, struct buffer_head *bh)
3132{
3133 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3134 struct reiserfs_journal_cnode *cn = NULL;
3135 int count_already_incd = 0;
3136 int prepared = 0;
3137 BUG_ON(!th->t_trans_id);
3138
3139 PROC_INFO_INC(p_s_sb, journal.mark_dirty);
3140 if (th->t_trans_id != journal->j_trans_id) {
3141 reiserfs_panic(th->t_super,
3142 "journal-1577: handle trans id %ld != current trans id %ld\n",
3143 th->t_trans_id, journal->j_trans_id);
3144 }
3145
3146 p_s_sb->s_dirt = 1;
3147
3148 prepared = test_clear_buffer_journal_prepared(bh);
3149 clear_buffer_journal_restore_dirty(bh);
3150 /* already in this transaction, we are done */
3151 if (buffer_journaled(bh)) {
3152 PROC_INFO_INC(p_s_sb, journal.mark_dirty_already);
3153 return 0;
3154 }
3155
3156 /* this must be turned into a panic instead of a warning. We can't allow
3157 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
3158 ** could get to disk too early. NOT GOOD.
3159 */
3160 if (!prepared || buffer_dirty(bh)) {
3161 reiserfs_warning(p_s_sb, "journal-1777: buffer %llu bad state "
3162 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
3163 (unsigned long long)bh->b_blocknr,
3164 prepared ? ' ' : '!',
3165 buffer_locked(bh) ? ' ' : '!',
3166 buffer_dirty(bh) ? ' ' : '!',
3167 buffer_journal_dirty(bh) ? ' ' : '!');
3168 }
3169
3170 if (atomic_read(&(journal->j_wcount)) <= 0) {
3171 reiserfs_warning(p_s_sb,
3172 "journal-1409: journal_mark_dirty returning because j_wcount was %d",
3173 atomic_read(&(journal->j_wcount)));
3174 return 1;
3175 }
3176 /* this error means I've screwed up, and we've overflowed the transaction.
3177 ** Nothing can be done here, except make the FS readonly or panic.
3178 */
3179 if (journal->j_len >= journal->j_trans_max) {
3180 reiserfs_panic(th->t_super,
3181 "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n",
3182 journal->j_len);
3183 }
3184
3185 if (buffer_journal_dirty(bh)) {
3186 count_already_incd = 1;
3187 PROC_INFO_INC(p_s_sb, journal.mark_dirty_notjournal);
3188 clear_buffer_journal_dirty(bh);
3189 }
3190
3191 if (journal->j_len > journal->j_len_alloc) {
3192 journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT;
3193 }
3194
3195 set_buffer_journaled(bh);
3196
3197 /* now put this guy on the end */
3198 if (!cn) {
3199 cn = get_cnode(p_s_sb);
3200 if (!cn) {
3201 reiserfs_panic(p_s_sb, "get_cnode failed!\n");
3202 }
3203
3204 if (th->t_blocks_logged == th->t_blocks_allocated) {
3205 th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT;
3206 journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT;
3207 }
3208 th->t_blocks_logged++;
3209 journal->j_len++;
3210
3211 cn->bh = bh;
3212 cn->blocknr = bh->b_blocknr;
3213 cn->sb = p_s_sb;
3214 cn->jlist = NULL;
3215 insert_journal_hash(journal->j_hash_table, cn);
3216 if (!count_already_incd) {
3217 get_bh(bh);
3218 }
3219 }
3220 cn->next = NULL;
3221 cn->prev = journal->j_last;
3222 cn->bh = bh;
3223 if (journal->j_last) {
3224 journal->j_last->next = cn;
3225 journal->j_last = cn;
3226 } else {
3227 journal->j_first = cn;
3228 journal->j_last = cn;
3229 }
3230 return 0;
3231}
3232
3233int journal_end(struct reiserfs_transaction_handle *th,
3234 struct super_block *p_s_sb, unsigned long nblocks)
3235{
3236 if (!current->journal_info && th->t_refcount > 1)
3237 reiserfs_warning(p_s_sb, "REISER-NESTING: th NULL, refcount %d",
3238 th->t_refcount);
3239
3240 if (!th->t_trans_id) {
3241 WARN_ON(1);
3242 return -EIO;
3243 }
3244
3245 th->t_refcount--;
3246 if (th->t_refcount > 0) {
3247 struct reiserfs_transaction_handle *cur_th =
3248 current->journal_info;
3249
3250 /* we aren't allowed to close a nested transaction on a different
3251 ** filesystem from the one in the task struct
3252 */
3253 if (cur_th->t_super != th->t_super)
3254 BUG();
3255
3256 if (th != cur_th) {
3257 memcpy(current->journal_info, th, sizeof(*th));
3258 th->t_trans_id = 0;
3259 }
3260 return 0;
3261 } else {
3262 return do_journal_end(th, p_s_sb, nblocks, 0);
3263 }
1da177e4
LT
3264}
3265
3266/* removes from the current transaction, relsing and descrementing any counters.
3267** also files the removed buffer directly onto the clean list
3268**
3269** called by journal_mark_freed when a block has been deleted
3270**
3271** returns 1 if it cleaned and relsed the buffer. 0 otherwise
3272*/
bd4c625c
LT
3273static int remove_from_transaction(struct super_block *p_s_sb,
3274 b_blocknr_t blocknr, int already_cleaned)
3275{
3276 struct buffer_head *bh;
3277 struct reiserfs_journal_cnode *cn;
3278 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3279 int ret = 0;
3280
3281 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3282 if (!cn || !cn->bh) {
3283 return ret;
3284 }
3285 bh = cn->bh;
3286 if (cn->prev) {
3287 cn->prev->next = cn->next;
3288 }
3289 if (cn->next) {
3290 cn->next->prev = cn->prev;
3291 }
3292 if (cn == journal->j_first) {
3293 journal->j_first = cn->next;
3294 }
3295 if (cn == journal->j_last) {
3296 journal->j_last = cn->prev;
3297 }
3298 if (bh)
3299 remove_journal_hash(p_s_sb, journal->j_hash_table, NULL,
3300 bh->b_blocknr, 0);
3301 clear_buffer_journaled(bh); /* don't log this one */
3302
3303 if (!already_cleaned) {
3304 clear_buffer_journal_dirty(bh);
3305 clear_buffer_dirty(bh);
3306 clear_buffer_journal_test(bh);
3307 put_bh(bh);
3308 if (atomic_read(&(bh->b_count)) < 0) {
3309 reiserfs_warning(p_s_sb,
3310 "journal-1752: remove from trans, b_count < 0");
3311 }
3312 ret = 1;
3313 }
3314 journal->j_len--;
3315 journal->j_len_alloc--;
3316 free_cnode(p_s_sb, cn);
3317 return ret;
1da177e4
LT
3318}
3319
3320/*
3321** for any cnode in a journal list, it can only be dirtied of all the
3322** transactions that include it are commited to disk.
3323** this checks through each transaction, and returns 1 if you are allowed to dirty,
3324** and 0 if you aren't
3325**
3326** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3327** blocks for a given transaction on disk
3328**
3329*/
bd4c625c
LT
3330static int can_dirty(struct reiserfs_journal_cnode *cn)
3331{
3332 struct super_block *sb = cn->sb;
3333 b_blocknr_t blocknr = cn->blocknr;
3334 struct reiserfs_journal_cnode *cur = cn->hprev;
3335 int can_dirty = 1;
3336
3337 /* first test hprev. These are all newer than cn, so any node here
3338 ** with the same block number and dev means this node can't be sent
3339 ** to disk right now.
3340 */
3341 while (cur && can_dirty) {
3342 if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
3343 cur->blocknr == blocknr) {
3344 can_dirty = 0;
3345 }
3346 cur = cur->hprev;
3347 }
3348 /* then test hnext. These are all older than cn. As long as they
3349 ** are committed to the log, it is safe to write cn to disk
3350 */
3351 cur = cn->hnext;
3352 while (cur && can_dirty) {
3353 if (cur->jlist && cur->jlist->j_len > 0 &&
3354 atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
3355 cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
3356 can_dirty = 0;
3357 }
3358 cur = cur->hnext;
3359 }
3360 return can_dirty;
1da177e4
LT
3361}
3362
3363/* syncs the commit blocks, but does not force the real buffers to disk
3364** will wait until the current transaction is done/commited before returning
3365*/
bd4c625c
LT
3366int journal_end_sync(struct reiserfs_transaction_handle *th,
3367 struct super_block *p_s_sb, unsigned long nblocks)
3368{
3369 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1da177e4 3370
bd4c625c
LT
3371 BUG_ON(!th->t_trans_id);
3372 /* you can sync while nested, very, very bad */
3373 if (th->t_refcount > 1) {
3374 BUG();
3375 }
3376 if (journal->j_len == 0) {
3377 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3378 1);
3379 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3380 }
3381 return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT);
1da177e4
LT
3382}
3383
3384/*
3385** writeback the pending async commits to disk
3386*/
bd4c625c
LT
3387static void flush_async_commits(void *p)
3388{
3389 struct super_block *p_s_sb = p;
3390 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3391 struct reiserfs_journal_list *jl;
3392 struct list_head *entry;
3393
3394 lock_kernel();
3395 if (!list_empty(&journal->j_journal_list)) {
3396 /* last entry is the youngest, commit it and you get everything */
3397 entry = journal->j_journal_list.prev;
3398 jl = JOURNAL_LIST_ENTRY(entry);
3399 flush_commit_list(p_s_sb, jl, 1);
3400 }
3401 unlock_kernel();
3402 /*
3403 * this is a little racey, but there's no harm in missing
3404 * the filemap_fdata_write
3405 */
3406 if (!atomic_read(&journal->j_async_throttle)
3407 && !reiserfs_is_journal_aborted(journal)) {
3408 atomic_inc(&journal->j_async_throttle);
3409 filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping);
3410 atomic_dec(&journal->j_async_throttle);
3411 }
1da177e4
LT
3412}
3413
3414/*
3415** flushes any old transactions to disk
3416** ends the current transaction if it is too old
3417*/
bd4c625c
LT
3418int reiserfs_flush_old_commits(struct super_block *p_s_sb)
3419{
3420 time_t now;
3421 struct reiserfs_transaction_handle th;
3422 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3423
3424 now = get_seconds();
3425 /* safety check so we don't flush while we are replaying the log during
3426 * mount
3427 */
3428 if (list_empty(&journal->j_journal_list)) {
3429 return 0;
3430 }
3431
3432 /* check the current transaction. If there are no writers, and it is
3433 * too old, finish it, and force the commit blocks to disk
3434 */
3435 if (atomic_read(&journal->j_wcount) <= 0 &&
3436 journal->j_trans_start_time > 0 &&
3437 journal->j_len > 0 &&
3438 (now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3439 if (!journal_join(&th, p_s_sb, 1)) {
3440 reiserfs_prepare_for_journal(p_s_sb,
3441 SB_BUFFER_WITH_SB(p_s_sb),
3442 1);
3443 journal_mark_dirty(&th, p_s_sb,
3444 SB_BUFFER_WITH_SB(p_s_sb));
3445
3446 /* we're only being called from kreiserfsd, it makes no sense to do
3447 ** an async commit so that kreiserfsd can do it later
3448 */
3449 do_journal_end(&th, p_s_sb, 1, COMMIT_NOW | WAIT);
3450 }
3451 }
3452 return p_s_sb->s_dirt;
1da177e4
LT
3453}
3454
3455/*
3456** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3457**
3458** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3459** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
3460** flushes the commit list and returns 0.
3461**
3462** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
3463**
3464** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3465*/
bd4c625c
LT
3466static int check_journal_end(struct reiserfs_transaction_handle *th,
3467 struct super_block *p_s_sb, unsigned long nblocks,
3468 int flags)
3469{
3470
3471 time_t now;
3472 int flush = flags & FLUSH_ALL;
3473 int commit_now = flags & COMMIT_NOW;
3474 int wait_on_commit = flags & WAIT;
3475 struct reiserfs_journal_list *jl;
3476 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3477
3478 BUG_ON(!th->t_trans_id);
3479
3480 if (th->t_trans_id != journal->j_trans_id) {
3481 reiserfs_panic(th->t_super,
3482 "journal-1577: handle trans id %ld != current trans id %ld\n",
3483 th->t_trans_id, journal->j_trans_id);
3484 }
3485
3486 journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged);
3487 if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
3488 atomic_dec(&(journal->j_wcount));
3489 }
3490
3491 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3492 ** will be dealt with by next transaction that actually writes something, but should be taken
3493 ** care of in this trans
3494 */
3495 if (journal->j_len == 0) {
3496 BUG();
3497 }
3498 /* if wcount > 0, and we are called to with flush or commit_now,
3499 ** we wait on j_join_wait. We will wake up when the last writer has
3500 ** finished the transaction, and started it on its way to the disk.
3501 ** Then, we flush the commit or journal list, and just return 0
3502 ** because the rest of journal end was already done for this transaction.
3503 */
3504 if (atomic_read(&(journal->j_wcount)) > 0) {
3505 if (flush || commit_now) {
3506 unsigned trans_id;
3507
3508 jl = journal->j_current_jl;
3509 trans_id = jl->j_trans_id;
3510 if (wait_on_commit)
3511 jl->j_state |= LIST_COMMIT_PENDING;
3512 atomic_set(&(journal->j_jlock), 1);
3513 if (flush) {
3514 journal->j_next_full_flush = 1;
3515 }
3516 unlock_journal(p_s_sb);
3517
3518 /* sleep while the current transaction is still j_jlocked */
3519 while (journal->j_trans_id == trans_id) {
3520 if (atomic_read(&journal->j_jlock)) {
3521 queue_log_writer(p_s_sb);
3522 } else {
3523 lock_journal(p_s_sb);
3524 if (journal->j_trans_id == trans_id) {
3525 atomic_set(&(journal->j_jlock),
3526 1);
3527 }
3528 unlock_journal(p_s_sb);
3529 }
3530 }
3531 if (journal->j_trans_id == trans_id) {
3532 BUG();
3533 }
3534 if (commit_now
3535 && journal_list_still_alive(p_s_sb, trans_id)
3536 && wait_on_commit) {
3537 flush_commit_list(p_s_sb, jl, 1);
3538 }
3539 return 0;
3540 }
3541 unlock_journal(p_s_sb);
3542 return 0;
3543 }
3544
3545 /* deal with old transactions where we are the last writers */
3546 now = get_seconds();
3547 if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3548 commit_now = 1;
3549 journal->j_next_async_flush = 1;
3550 }
3551 /* don't batch when someone is waiting on j_join_wait */
3552 /* don't batch when syncing the commit or flushing the whole trans */
3553 if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock)))
3554 && !flush && !commit_now && (journal->j_len < journal->j_max_batch)
3555 && journal->j_len_alloc < journal->j_max_batch
3556 && journal->j_cnode_free > (journal->j_trans_max * 3)) {
3557 journal->j_bcount++;
3558 unlock_journal(p_s_sb);
3559 return 0;
3560 }
3561
3562 if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
3563 reiserfs_panic(p_s_sb,
3564 "journal-003: journal_end: j_start (%ld) is too high\n",
3565 journal->j_start);
3566 }
3567 return 1;
1da177e4
LT
3568}
3569
3570/*
3571** Does all the work that makes deleting blocks safe.
3572** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3573**
3574** otherwise:
3575** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
3576** before this transaction has finished.
3577**
3578** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
3579** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
3580** the block can't be reallocated yet.
3581**
3582** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3583*/
bd4c625c
LT
3584int journal_mark_freed(struct reiserfs_transaction_handle *th,
3585 struct super_block *p_s_sb, b_blocknr_t blocknr)
3586{
3587 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3588 struct reiserfs_journal_cnode *cn = NULL;
3589 struct buffer_head *bh = NULL;
3590 struct reiserfs_list_bitmap *jb = NULL;
3591 int cleaned = 0;
3592 BUG_ON(!th->t_trans_id);
3593
3594 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3595 if (cn && cn->bh) {
3596 bh = cn->bh;
3597 get_bh(bh);
3598 }
3599 /* if it is journal new, we just remove it from this transaction */
3600 if (bh && buffer_journal_new(bh)) {
3601 clear_buffer_journal_new(bh);
3602 clear_prepared_bits(bh);
3603 reiserfs_clean_and_file_buffer(bh);
3604 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3605 } else {
3606 /* set the bit for this block in the journal bitmap for this transaction */
3607 jb = journal->j_current_jl->j_list_bitmap;
3608 if (!jb) {
3609 reiserfs_panic(p_s_sb,
3610 "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n");
3611 }
3612 set_bit_in_list_bitmap(p_s_sb, blocknr, jb);
3613
3614 /* Note, the entire while loop is not allowed to schedule. */
3615
3616 if (bh) {
3617 clear_prepared_bits(bh);
3618 reiserfs_clean_and_file_buffer(bh);
3619 }
3620 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
3621
3622 /* find all older transactions with this block, make sure they don't try to write it out */
3623 cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table,
3624 blocknr);
3625 while (cn) {
3626 if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
3627 set_bit(BLOCK_FREED, &cn->state);
3628 if (cn->bh) {
3629 if (!cleaned) {
3630 /* remove_from_transaction will brelse the buffer if it was
3631 ** in the current trans
3632 */
3633 clear_buffer_journal_dirty(cn->
3634 bh);
3635 clear_buffer_dirty(cn->bh);
3636 clear_buffer_journal_test(cn->
3637 bh);
3638 cleaned = 1;
3639 put_bh(cn->bh);
3640 if (atomic_read
3641 (&(cn->bh->b_count)) < 0) {
3642 reiserfs_warning(p_s_sb,
3643 "journal-2138: cn->bh->b_count < 0");
3644 }
3645 }
3646 if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
3647 atomic_dec(&
3648 (cn->jlist->
3649 j_nonzerolen));
3650 }
3651 cn->bh = NULL;
3652 }
3653 }
3654 cn = cn->hnext;
3655 }
3656 }
3657
3658 if (bh) {
3659 put_bh(bh); /* get_hash grabs the buffer */
3660 if (atomic_read(&(bh->b_count)) < 0) {
3661 reiserfs_warning(p_s_sb,
3662 "journal-2165: bh->b_count < 0");
3663 }
3664 }
3665 return 0;
3666}
3667
3668void reiserfs_update_inode_transaction(struct inode *inode)
3669{
3670 struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb);
3671 REISERFS_I(inode)->i_jl = journal->j_current_jl;
3672 REISERFS_I(inode)->i_trans_id = journal->j_trans_id;
1da177e4
LT
3673}
3674
3675/*
3676 * returns -1 on error, 0 if no commits/barriers were done and 1
3677 * if a transaction was actually committed and the barrier was done
3678 */
3679static int __commit_trans_jl(struct inode *inode, unsigned long id,
bd4c625c 3680 struct reiserfs_journal_list *jl)
1da177e4 3681{
bd4c625c
LT
3682 struct reiserfs_transaction_handle th;
3683 struct super_block *sb = inode->i_sb;
3684 struct reiserfs_journal *journal = SB_JOURNAL(sb);
3685 int ret = 0;
3686
3687 /* is it from the current transaction, or from an unknown transaction? */
3688 if (id == journal->j_trans_id) {
3689 jl = journal->j_current_jl;
3690 /* try to let other writers come in and grow this transaction */
3691 let_transaction_grow(sb, id);
3692 if (journal->j_trans_id != id) {
3693 goto flush_commit_only;
3694 }
1da177e4 3695
bd4c625c
LT
3696 ret = journal_begin(&th, sb, 1);
3697 if (ret)
3698 return ret;
3699
3700 /* someone might have ended this transaction while we joined */
3701 if (journal->j_trans_id != id) {
3702 reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
3703 1);
3704 journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb));
3705 ret = journal_end(&th, sb, 1);
3706 goto flush_commit_only;
3707 }
1da177e4 3708
bd4c625c
LT
3709 ret = journal_end_sync(&th, sb, 1);
3710 if (!ret)
3711 ret = 1;
1da177e4 3712
bd4c625c
LT
3713 } else {
3714 /* this gets tricky, we have to make sure the journal list in
3715 * the inode still exists. We know the list is still around
3716 * if we've got a larger transaction id than the oldest list
3717 */
3718 flush_commit_only:
3719 if (journal_list_still_alive(inode->i_sb, id)) {
3720 /*
3721 * we only set ret to 1 when we know for sure
3722 * the barrier hasn't been started yet on the commit
3723 * block.
3724 */
3725 if (atomic_read(&jl->j_commit_left) > 1)
3726 ret = 1;
3727 flush_commit_list(sb, jl, 1);
3728 if (journal->j_errno)
3729 ret = journal->j_errno;
3730 }
1da177e4 3731 }
bd4c625c
LT
3732 /* otherwise the list is gone, and long since committed */
3733 return ret;
3734}
1da177e4 3735
bd4c625c
LT
3736int reiserfs_commit_for_inode(struct inode *inode)
3737{
3738 unsigned long id = REISERFS_I(inode)->i_trans_id;
3739 struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
1da177e4 3740
bd4c625c
LT
3741 /* for the whole inode, assume unset id means it was
3742 * changed in the current transaction. More conservative
1da177e4 3743 */
bd4c625c
LT
3744 if (!id || !jl) {
3745 reiserfs_update_inode_transaction(inode);
3746 id = REISERFS_I(inode)->i_trans_id;
3747 /* jl will be updated in __commit_trans_jl */
3748 }
3749
3750 return __commit_trans_jl(inode, id, jl);
3751}
3752
3753void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
3754 struct buffer_head *bh)
3755{
3756 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3757 PROC_INFO_INC(p_s_sb, journal.restore_prepared);
3758 if (!bh) {
3759 return;
3760 }
3761 if (test_clear_buffer_journal_restore_dirty(bh) &&
3762 buffer_journal_dirty(bh)) {
3763 struct reiserfs_journal_cnode *cn;
3764 cn = get_journal_hash_dev(p_s_sb,
3765 journal->j_list_hash_table,
3766 bh->b_blocknr);
3767 if (cn && can_dirty(cn)) {
3768 set_buffer_journal_test(bh);
3769 mark_buffer_dirty(bh);
3770 }
3771 }
3772 clear_buffer_journal_prepared(bh);
3773}
3774
3775extern struct tree_balance *cur_tb;
1da177e4
LT
3776/*
3777** before we can change a metadata block, we have to make sure it won't
3778** be written to disk while we are altering it. So, we must:
3779** clean it
3780** wait on it.
3781**
3782*/
3783int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
bd4c625c
LT
3784 struct buffer_head *bh, int wait)
3785{
3786 PROC_INFO_INC(p_s_sb, journal.prepare);
3787
3788 if (test_set_buffer_locked(bh)) {
3789 if (!wait)
3790 return 0;
3791 lock_buffer(bh);
3792 }
3793 set_buffer_journal_prepared(bh);
3794 if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
3795 clear_buffer_journal_test(bh);
3796 set_buffer_journal_restore_dirty(bh);
3797 }
3798 unlock_buffer(bh);
3799 return 1;
3800}
3801
3802static void flush_old_journal_lists(struct super_block *s)
3803{
3804 struct reiserfs_journal *journal = SB_JOURNAL(s);
3805 struct reiserfs_journal_list *jl;
3806 struct list_head *entry;
3807 time_t now = get_seconds();
3808
3809 while (!list_empty(&journal->j_journal_list)) {
3810 entry = journal->j_journal_list.next;
3811 jl = JOURNAL_LIST_ENTRY(entry);
3812 /* this check should always be run, to send old lists to disk */
3813 if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4))) {
3814 flush_used_journal_lists(s, jl);
3815 } else {
3816 break;
3817 }
1da177e4 3818 }
1da177e4
LT
3819}
3820
3821/*
3822** long and ugly. If flush, will not return until all commit
3823** blocks and all real buffers in the trans are on disk.
3824** If no_async, won't return until all commit blocks are on disk.
3825**
3826** keep reading, there are comments as you go along
3827**
3828** If the journal is aborted, we just clean up. Things like flushing
3829** journal lists, etc just won't happen.
3830*/
bd4c625c
LT
3831static int do_journal_end(struct reiserfs_transaction_handle *th,
3832 struct super_block *p_s_sb, unsigned long nblocks,
3833 int flags)
3834{
3835 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
3836 struct reiserfs_journal_cnode *cn, *next, *jl_cn;
3837 struct reiserfs_journal_cnode *last_cn = NULL;
3838 struct reiserfs_journal_desc *desc;
3839 struct reiserfs_journal_commit *commit;
3840 struct buffer_head *c_bh; /* commit bh */
3841 struct buffer_head *d_bh; /* desc bh */
3842 int cur_write_start = 0; /* start index of current log write */
3843 int old_start;
3844 int i;
3845 int flush = flags & FLUSH_ALL;
3846 int wait_on_commit = flags & WAIT;
3847 struct reiserfs_journal_list *jl, *temp_jl;
3848 struct list_head *entry, *safe;
3849 unsigned long jindex;
3850 unsigned long commit_trans_id;
3851 int trans_half;
3852
3853 BUG_ON(th->t_refcount > 1);
3854 BUG_ON(!th->t_trans_id);
3855
3856 put_fs_excl();
3857 current->journal_info = th->t_handle_save;
3858 reiserfs_check_lock_depth(p_s_sb, "journal end");
3859 if (journal->j_len == 0) {
3860 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
3861 1);
3862 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
3863 }
1da177e4 3864
bd4c625c
LT
3865 lock_journal(p_s_sb);
3866 if (journal->j_next_full_flush) {
3867 flags |= FLUSH_ALL;
3868 flush = 1;
3869 }
3870 if (journal->j_next_async_flush) {
3871 flags |= COMMIT_NOW | WAIT;
3872 wait_on_commit = 1;
3873 }
3874
3875 /* check_journal_end locks the journal, and unlocks if it does not return 1
3876 ** it tells us if we should continue with the journal_end, or just return
3877 */
3878 if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
3879 p_s_sb->s_dirt = 1;
3880 wake_queued_writers(p_s_sb);
3881 reiserfs_async_progress_wait(p_s_sb);
3882 goto out;
3883 }
3884
3885 /* check_journal_end might set these, check again */
3886 if (journal->j_next_full_flush) {
3887 flush = 1;
3888 }
3889
3890 /*
3891 ** j must wait means we have to flush the log blocks, and the real blocks for
3892 ** this transaction
3893 */
3894 if (journal->j_must_wait > 0) {
3895 flush = 1;
3896 }
1da177e4 3897#ifdef REISERFS_PREALLOCATE
ef43bc4f
JK
3898 /* quota ops might need to nest, setup the journal_info pointer for them
3899 * and raise the refcount so that it is > 0. */
bd4c625c 3900 current->journal_info = th;
ef43bc4f 3901 th->t_refcount++;
bd4c625c
LT
3902 reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
3903 * the transaction */
ef43bc4f 3904 th->t_refcount--;
bd4c625c 3905 current->journal_info = th->t_handle_save;
1da177e4 3906#endif
bd4c625c
LT
3907
3908 /* setup description block */
3909 d_bh =
3910 journal_getblk(p_s_sb,
3911 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3912 journal->j_start);
3913 set_buffer_uptodate(d_bh);
3914 desc = (struct reiserfs_journal_desc *)(d_bh)->b_data;
3915 memset(d_bh->b_data, 0, d_bh->b_size);
3916 memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8);
3917 set_desc_trans_id(desc, journal->j_trans_id);
3918
3919 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
3920 c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3921 ((journal->j_start + journal->j_len +
3922 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
3923 commit = (struct reiserfs_journal_commit *)c_bh->b_data;
3924 memset(c_bh->b_data, 0, c_bh->b_size);
3925 set_commit_trans_id(commit, journal->j_trans_id);
3926 set_buffer_uptodate(c_bh);
3927
3928 /* init this journal list */
3929 jl = journal->j_current_jl;
3930
3931 /* we lock the commit before doing anything because
3932 * we want to make sure nobody tries to run flush_commit_list until
3933 * the new transaction is fully setup, and we've already flushed the
3934 * ordered bh list
3935 */
3936 down(&jl->j_commit_lock);
3937
3938 /* save the transaction id in case we need to commit it later */
3939 commit_trans_id = jl->j_trans_id;
3940
3941 atomic_set(&jl->j_older_commits_done, 0);
3942 jl->j_trans_id = journal->j_trans_id;
3943 jl->j_timestamp = journal->j_trans_start_time;
3944 jl->j_commit_bh = c_bh;
3945 jl->j_start = journal->j_start;
3946 jl->j_len = journal->j_len;
3947 atomic_set(&jl->j_nonzerolen, journal->j_len);
3948 atomic_set(&jl->j_commit_left, journal->j_len + 2);
3949 jl->j_realblock = NULL;
3950
3951 /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
3952 ** for each real block, add it to the journal list hash,
3953 ** copy into real block index array in the commit or desc block
3954 */
3955 trans_half = journal_trans_half(p_s_sb->s_blocksize);
3956 for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
3957 if (buffer_journaled(cn->bh)) {
3958 jl_cn = get_cnode(p_s_sb);
3959 if (!jl_cn) {
3960 reiserfs_panic(p_s_sb,
3961 "journal-1676, get_cnode returned NULL\n");
3962 }
3963 if (i == 0) {
3964 jl->j_realblock = jl_cn;
3965 }
3966 jl_cn->prev = last_cn;
3967 jl_cn->next = NULL;
3968 if (last_cn) {
3969 last_cn->next = jl_cn;
3970 }
3971 last_cn = jl_cn;
3972 /* make sure the block we are trying to log is not a block
3973 of journal or reserved area */
3974
3975 if (is_block_in_log_or_reserved_area
3976 (p_s_sb, cn->bh->b_blocknr)) {
3977 reiserfs_panic(p_s_sb,
3978 "journal-2332: Trying to log block %lu, which is a log block\n",
3979 cn->bh->b_blocknr);
3980 }
3981 jl_cn->blocknr = cn->bh->b_blocknr;
3982 jl_cn->state = 0;
3983 jl_cn->sb = p_s_sb;
3984 jl_cn->bh = cn->bh;
3985 jl_cn->jlist = jl;
3986 insert_journal_hash(journal->j_list_hash_table, jl_cn);
3987 if (i < trans_half) {
3988 desc->j_realblock[i] =
3989 cpu_to_le32(cn->bh->b_blocknr);
3990 } else {
3991 commit->j_realblock[i - trans_half] =
3992 cpu_to_le32(cn->bh->b_blocknr);
3993 }
3994 } else {
3995 i--;
3996 }
3997 }
3998 set_desc_trans_len(desc, journal->j_len);
3999 set_desc_mount_id(desc, journal->j_mount_id);
4000 set_desc_trans_id(desc, journal->j_trans_id);
4001 set_commit_trans_len(commit, journal->j_len);
4002
4003 /* special check in case all buffers in the journal were marked for not logging */
4004 if (journal->j_len == 0) {
4005 BUG();
4006 }
4007
4008 /* we're about to dirty all the log blocks, mark the description block
4009 * dirty now too. Don't mark the commit block dirty until all the
4010 * others are on disk
4011 */
4012 mark_buffer_dirty(d_bh);
4013
4014 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
4015 cur_write_start = journal->j_start;
4016 cn = journal->j_first;
4017 jindex = 1; /* start at one so we don't get the desc again */
4018 while (cn) {
4019 clear_buffer_journal_new(cn->bh);
4020 /* copy all the real blocks into log area. dirty log blocks */
4021 if (buffer_journaled(cn->bh)) {
4022 struct buffer_head *tmp_bh;
4023 char *addr;
4024 struct page *page;
4025 tmp_bh =
4026 journal_getblk(p_s_sb,
4027 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
4028 ((cur_write_start +
4029 jindex) %
4030 SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
4031 set_buffer_uptodate(tmp_bh);
4032 page = cn->bh->b_page;
4033 addr = kmap(page);
4034 memcpy(tmp_bh->b_data,
4035 addr + offset_in_page(cn->bh->b_data),
4036 cn->bh->b_size);
4037 kunmap(page);
4038 mark_buffer_dirty(tmp_bh);
4039 jindex++;
4040 set_buffer_journal_dirty(cn->bh);
4041 clear_buffer_journaled(cn->bh);
4042 } else {
4043 /* JDirty cleared sometime during transaction. don't log this one */
4044 reiserfs_warning(p_s_sb,
4045 "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!");
4046 brelse(cn->bh);
4047 }
4048 next = cn->next;
4049 free_cnode(p_s_sb, cn);
4050 cn = next;
4051 cond_resched();
4052 }
4053
4054 /* we are done with both the c_bh and d_bh, but
4055 ** c_bh must be written after all other commit blocks,
4056 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
4057 */
4058
4059 journal->j_current_jl = alloc_journal_list(p_s_sb);
4060
4061 /* now it is safe to insert this transaction on the main list */
4062 list_add_tail(&jl->j_list, &journal->j_journal_list);
4063 list_add_tail(&jl->j_working_list, &journal->j_working_list);
4064 journal->j_num_work_lists++;
4065
4066 /* reset journal values for the next transaction */
4067 old_start = journal->j_start;
4068 journal->j_start =
4069 (journal->j_start + journal->j_len +
4070 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
4071 atomic_set(&(journal->j_wcount), 0);
4072 journal->j_bcount = 0;
4073 journal->j_last = NULL;
4074 journal->j_first = NULL;
4075 journal->j_len = 0;
4076 journal->j_trans_start_time = 0;
4077 journal->j_trans_id++;
4078 journal->j_current_jl->j_trans_id = journal->j_trans_id;
4079 journal->j_must_wait = 0;
4080 journal->j_len_alloc = 0;
4081 journal->j_next_full_flush = 0;
4082 journal->j_next_async_flush = 0;
4083 init_journal_hash(p_s_sb);
4084
4085 // make sure reiserfs_add_jh sees the new current_jl before we
4086 // write out the tails
4087 smp_mb();
4088
4089 /* tail conversion targets have to hit the disk before we end the
4090 * transaction. Otherwise a later transaction might repack the tail
4091 * before this transaction commits, leaving the data block unflushed and
4092 * clean, if we crash before the later transaction commits, the data block
4093 * is lost.
4094 */
4095 if (!list_empty(&jl->j_tail_bh_list)) {
4096 unlock_kernel();
4097 write_ordered_buffers(&journal->j_dirty_buffers_lock,
4098 journal, jl, &jl->j_tail_bh_list);
4099 lock_kernel();
4100 }
4101 if (!list_empty(&jl->j_tail_bh_list))
4102 BUG();
4103 up(&jl->j_commit_lock);
4104
4105 /* honor the flush wishes from the caller, simple commits can
4106 ** be done outside the journal lock, they are done below
4107 **
4108 ** if we don't flush the commit list right now, we put it into
4109 ** the work queue so the people waiting on the async progress work
4110 ** queue don't wait for this proc to flush journal lists and such.
4111 */
4112 if (flush) {
4113 flush_commit_list(p_s_sb, jl, 1);
4114 flush_journal_list(p_s_sb, jl, 1);
4115 } else if (!(jl->j_state & LIST_COMMIT_PENDING))
4116 queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
4117
4118 /* if the next transaction has any chance of wrapping, flush
4119 ** transactions that might get overwritten. If any journal lists are very
4120 ** old flush them as well.
4121 */
4122 first_jl:
4123 list_for_each_safe(entry, safe, &journal->j_journal_list) {
4124 temp_jl = JOURNAL_LIST_ENTRY(entry);
4125 if (journal->j_start <= temp_jl->j_start) {
4126 if ((journal->j_start + journal->j_trans_max + 1) >=
4127 temp_jl->j_start) {
4128 flush_used_journal_lists(p_s_sb, temp_jl);
4129 goto first_jl;
4130 } else if ((journal->j_start +
4131 journal->j_trans_max + 1) <
4132 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4133 /* if we don't cross into the next transaction and we don't
4134 * wrap, there is no way we can overlap any later transactions
4135 * break now
4136 */
4137 break;
4138 }
4139 } else if ((journal->j_start +
4140 journal->j_trans_max + 1) >
4141 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
4142 if (((journal->j_start + journal->j_trans_max + 1) %
4143 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >=
4144 temp_jl->j_start) {
4145 flush_used_journal_lists(p_s_sb, temp_jl);
4146 goto first_jl;
4147 } else {
4148 /* we don't overlap anything from out start to the end of the
4149 * log, and our wrapped portion doesn't overlap anything at
4150 * the start of the log. We can break
4151 */
4152 break;
4153 }
4154 }
4155 }
4156 flush_old_journal_lists(p_s_sb);
4157
4158 journal->j_current_jl->j_list_bitmap =
4159 get_list_bitmap(p_s_sb, journal->j_current_jl);
4160
4161 if (!(journal->j_current_jl->j_list_bitmap)) {
4162 reiserfs_panic(p_s_sb,
4163 "journal-1996: do_journal_end, could not get a list bitmap\n");
4164 }
4165
4166 atomic_set(&(journal->j_jlock), 0);
4167 unlock_journal(p_s_sb);
4168 /* wake up any body waiting to join. */
4169 clear_bit(J_WRITERS_QUEUED, &journal->j_state);
4170 wake_up(&(journal->j_join_wait));
4171
4172 if (!flush && wait_on_commit &&
4173 journal_list_still_alive(p_s_sb, commit_trans_id)) {
4174 flush_commit_list(p_s_sb, jl, 1);
4175 }
4176 out:
4177 reiserfs_check_lock_depth(p_s_sb, "journal end2");
4178
4179 memset(th, 0, sizeof(*th));
4180 /* Re-set th->t_super, so we can properly keep track of how many
4181 * persistent transactions there are. We need to do this so if this
4182 * call is part of a failed restart_transaction, we can free it later */
4183 th->t_super = p_s_sb;
4184
4185 return journal->j_errno;
4186}
4187
4188static void __reiserfs_journal_abort_hard(struct super_block *sb)
4189{
4190 struct reiserfs_journal *journal = SB_JOURNAL(sb);
4191 if (test_bit(J_ABORTED, &journal->j_state))
4192 return;
4193
4194 printk(KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n",
4195 reiserfs_bdevname(sb));
4196
4197 sb->s_flags |= MS_RDONLY;
4198 set_bit(J_ABORTED, &journal->j_state);
1da177e4
LT
4199
4200#ifdef CONFIG_REISERFS_CHECK
bd4c625c 4201 dump_stack();
1da177e4
LT
4202#endif
4203}
4204
bd4c625c 4205static void __reiserfs_journal_abort_soft(struct super_block *sb, int errno)
1da177e4 4206{
bd4c625c
LT
4207 struct reiserfs_journal *journal = SB_JOURNAL(sb);
4208 if (test_bit(J_ABORTED, &journal->j_state))
4209 return;
1da177e4 4210
bd4c625c
LT
4211 if (!journal->j_errno)
4212 journal->j_errno = errno;
1da177e4 4213
bd4c625c 4214 __reiserfs_journal_abort_hard(sb);
1da177e4
LT
4215}
4216
bd4c625c 4217void reiserfs_journal_abort(struct super_block *sb, int errno)
1da177e4 4218{
bd4c625c 4219 return __reiserfs_journal_abort_soft(sb, errno);
1da177e4 4220}
This page took 0.287722 seconds and 5 git commands to generate.