2 * segment.c - NILFS segment constructor.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
24 #include <linux/pagemap.h>
25 #include <linux/buffer_head.h>
26 #include <linux/writeback.h>
27 #include <linux/bio.h>
28 #include <linux/completion.h>
29 #include <linux/blkdev.h>
30 #include <linux/backing-dev.h>
31 #include <linux/freezer.h>
32 #include <linux/kthread.h>
33 #include <linux/crc32.h>
34 #include <linux/pagevec.h>
35 #include <linux/slab.h>
49 #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
51 #define SC_MAX_SEGDELTA 64 /* Upper limit of the number of segments
52 appended in collection retry loop */
54 /* Construction mode */
56 SC_LSEG_SR
= 1, /* Make a logical segment having a super root */
57 SC_LSEG_DSYNC
, /* Flush data blocks of a given file and make
58 a logical segment without a super root */
59 SC_FLUSH_FILE
, /* Flush data files, leads to segment writes without
60 creating a checkpoint */
61 SC_FLUSH_DAT
, /* Flush DAT file. This also creates segments without
65 /* Stage numbers of dirty block collection */
68 NILFS_ST_GC
, /* Collecting dirty blocks for GC */
74 NILFS_ST_SR
, /* Super root */
75 NILFS_ST_DSYNC
, /* Data sync blocks */
79 /* State flags of collection */
80 #define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
81 #define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
82 #define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
83 #define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
85 /* Operations depending on the construction mode and file type */
86 struct nilfs_sc_operations
{
87 int (*collect_data
)(struct nilfs_sc_info
*, struct buffer_head
*,
89 int (*collect_node
)(struct nilfs_sc_info
*, struct buffer_head
*,
91 int (*collect_bmap
)(struct nilfs_sc_info
*, struct buffer_head
*,
93 void (*write_data_binfo
)(struct nilfs_sc_info
*,
94 struct nilfs_segsum_pointer
*,
96 void (*write_node_binfo
)(struct nilfs_sc_info
*,
97 struct nilfs_segsum_pointer
*,
104 static void nilfs_segctor_start_timer(struct nilfs_sc_info
*);
105 static void nilfs_segctor_do_flush(struct nilfs_sc_info
*, int);
106 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info
*);
107 static void nilfs_dispose_list(struct nilfs_sb_info
*, struct list_head
*,
110 #define nilfs_cnt32_gt(a, b) \
111 (typecheck(__u32, a) && typecheck(__u32, b) && \
112 ((__s32)(b) - (__s32)(a) < 0))
113 #define nilfs_cnt32_ge(a, b) \
114 (typecheck(__u32, a) && typecheck(__u32, b) && \
115 ((__s32)(a) - (__s32)(b) >= 0))
116 #define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
117 #define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
119 static int nilfs_prepare_segment_lock(struct nilfs_transaction_info
*ti
)
121 struct nilfs_transaction_info
*cur_ti
= current
->journal_info
;
125 if (cur_ti
->ti_magic
== NILFS_TI_MAGIC
)
126 return ++cur_ti
->ti_count
;
129 * If journal_info field is occupied by other FS,
130 * it is saved and will be restored on
131 * nilfs_transaction_commit().
134 "NILFS warning: journal info from a different "
136 save
= current
->journal_info
;
140 ti
= kmem_cache_alloc(nilfs_transaction_cachep
, GFP_NOFS
);
143 ti
->ti_flags
= NILFS_TI_DYNAMIC_ALLOC
;
149 ti
->ti_magic
= NILFS_TI_MAGIC
;
150 current
->journal_info
= ti
;
155 * nilfs_transaction_begin - start indivisible file operations.
157 * @ti: nilfs_transaction_info
158 * @vacancy_check: flags for vacancy rate checks
160 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
161 * the segment semaphore, to make a segment construction and write tasks
162 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
163 * The region enclosed by these two functions can be nested. To avoid a
164 * deadlock, the semaphore is only acquired or released in the outermost call.
166 * This function allocates a nilfs_transaction_info struct to keep context
167 * information on it. It is initialized and hooked onto the current task in
168 * the outermost call. If a pre-allocated struct is given to @ti, it is used
169 * instead; otherwise a new struct is assigned from a slab.
171 * When @vacancy_check flag is set, this function will check the amount of
172 * free space, and will wait for the GC to reclaim disk space if low capacity.
174 * Return Value: On success, 0 is returned. On error, one of the following
175 * negative error code is returned.
177 * %-ENOMEM - Insufficient memory available.
179 * %-ENOSPC - No space left on device
181 int nilfs_transaction_begin(struct super_block
*sb
,
182 struct nilfs_transaction_info
*ti
,
185 struct nilfs_sb_info
*sbi
;
186 struct the_nilfs
*nilfs
;
187 int ret
= nilfs_prepare_segment_lock(ti
);
189 if (unlikely(ret
< 0))
194 vfs_check_frozen(sb
, SB_FREEZE_WRITE
);
197 nilfs
= sbi
->s_nilfs
;
198 down_read(&nilfs
->ns_segctor_sem
);
199 if (vacancy_check
&& nilfs_near_disk_full(nilfs
)) {
200 up_read(&nilfs
->ns_segctor_sem
);
207 ti
= current
->journal_info
;
208 current
->journal_info
= ti
->ti_save
;
209 if (ti
->ti_flags
& NILFS_TI_DYNAMIC_ALLOC
)
210 kmem_cache_free(nilfs_transaction_cachep
, ti
);
215 * nilfs_transaction_commit - commit indivisible file operations.
218 * nilfs_transaction_commit() releases the read semaphore which is
219 * acquired by nilfs_transaction_begin(). This is only performed
220 * in outermost call of this function. If a commit flag is set,
221 * nilfs_transaction_commit() sets a timer to start the segment
222 * constructor. If a sync flag is set, it starts construction
225 int nilfs_transaction_commit(struct super_block
*sb
)
227 struct nilfs_transaction_info
*ti
= current
->journal_info
;
228 struct nilfs_sb_info
*sbi
;
229 struct nilfs_sc_info
*sci
;
232 BUG_ON(ti
== NULL
|| ti
->ti_magic
!= NILFS_TI_MAGIC
);
233 ti
->ti_flags
|= NILFS_TI_COMMIT
;
234 if (ti
->ti_count
> 0) {
241 if (ti
->ti_flags
& NILFS_TI_COMMIT
)
242 nilfs_segctor_start_timer(sci
);
243 if (atomic_read(&sbi
->s_nilfs
->ns_ndirtyblks
) >
245 nilfs_segctor_do_flush(sci
, 0);
247 up_read(&sbi
->s_nilfs
->ns_segctor_sem
);
248 current
->journal_info
= ti
->ti_save
;
250 if (ti
->ti_flags
& NILFS_TI_SYNC
)
251 err
= nilfs_construct_segment(sb
);
252 if (ti
->ti_flags
& NILFS_TI_DYNAMIC_ALLOC
)
253 kmem_cache_free(nilfs_transaction_cachep
, ti
);
257 void nilfs_transaction_abort(struct super_block
*sb
)
259 struct nilfs_transaction_info
*ti
= current
->journal_info
;
261 BUG_ON(ti
== NULL
|| ti
->ti_magic
!= NILFS_TI_MAGIC
);
262 if (ti
->ti_count
> 0) {
266 up_read(&NILFS_SB(sb
)->s_nilfs
->ns_segctor_sem
);
268 current
->journal_info
= ti
->ti_save
;
269 if (ti
->ti_flags
& NILFS_TI_DYNAMIC_ALLOC
)
270 kmem_cache_free(nilfs_transaction_cachep
, ti
);
273 void nilfs_relax_pressure_in_lock(struct super_block
*sb
)
275 struct nilfs_sb_info
*sbi
= NILFS_SB(sb
);
276 struct nilfs_sc_info
*sci
= NILFS_SC(sbi
);
277 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
279 if (!sci
|| !sci
->sc_flush_request
)
282 set_bit(NILFS_SC_PRIOR_FLUSH
, &sci
->sc_flags
);
283 up_read(&nilfs
->ns_segctor_sem
);
285 down_write(&nilfs
->ns_segctor_sem
);
286 if (sci
->sc_flush_request
&&
287 test_bit(NILFS_SC_PRIOR_FLUSH
, &sci
->sc_flags
)) {
288 struct nilfs_transaction_info
*ti
= current
->journal_info
;
290 ti
->ti_flags
|= NILFS_TI_WRITER
;
291 nilfs_segctor_do_immediate_flush(sci
);
292 ti
->ti_flags
&= ~NILFS_TI_WRITER
;
294 downgrade_write(&nilfs
->ns_segctor_sem
);
297 static void nilfs_transaction_lock(struct nilfs_sb_info
*sbi
,
298 struct nilfs_transaction_info
*ti
,
301 struct nilfs_transaction_info
*cur_ti
= current
->journal_info
;
304 ti
->ti_flags
= NILFS_TI_WRITER
;
306 ti
->ti_save
= cur_ti
;
307 ti
->ti_magic
= NILFS_TI_MAGIC
;
308 INIT_LIST_HEAD(&ti
->ti_garbage
);
309 current
->journal_info
= ti
;
312 down_write(&sbi
->s_nilfs
->ns_segctor_sem
);
313 if (!test_bit(NILFS_SC_PRIOR_FLUSH
, &NILFS_SC(sbi
)->sc_flags
))
316 nilfs_segctor_do_immediate_flush(NILFS_SC(sbi
));
318 up_write(&sbi
->s_nilfs
->ns_segctor_sem
);
322 ti
->ti_flags
|= NILFS_TI_GC
;
325 static void nilfs_transaction_unlock(struct nilfs_sb_info
*sbi
)
327 struct nilfs_transaction_info
*ti
= current
->journal_info
;
329 BUG_ON(ti
== NULL
|| ti
->ti_magic
!= NILFS_TI_MAGIC
);
330 BUG_ON(ti
->ti_count
> 0);
332 up_write(&sbi
->s_nilfs
->ns_segctor_sem
);
333 current
->journal_info
= ti
->ti_save
;
334 if (!list_empty(&ti
->ti_garbage
))
335 nilfs_dispose_list(sbi
, &ti
->ti_garbage
, 0);
338 static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info
*sci
,
339 struct nilfs_segsum_pointer
*ssp
,
342 struct nilfs_segment_buffer
*segbuf
= sci
->sc_curseg
;
343 unsigned blocksize
= sci
->sc_super
->s_blocksize
;
346 if (unlikely(ssp
->offset
+ bytes
> blocksize
)) {
348 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp
->bh
,
349 &segbuf
->sb_segsum_buffers
));
350 ssp
->bh
= NILFS_SEGBUF_NEXT_BH(ssp
->bh
);
352 p
= ssp
->bh
->b_data
+ ssp
->offset
;
353 ssp
->offset
+= bytes
;
358 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
359 * @sci: nilfs_sc_info
361 static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info
*sci
)
363 struct nilfs_segment_buffer
*segbuf
= sci
->sc_curseg
;
364 struct buffer_head
*sumbh
;
369 if (nilfs_doing_gc())
371 err
= nilfs_segbuf_reset(segbuf
, flags
, sci
->sc_seg_ctime
, sci
->sc_cno
);
375 sumbh
= NILFS_SEGBUF_FIRST_BH(&segbuf
->sb_segsum_buffers
);
376 sumbytes
= segbuf
->sb_sum
.sumbytes
;
377 sci
->sc_finfo_ptr
.bh
= sumbh
; sci
->sc_finfo_ptr
.offset
= sumbytes
;
378 sci
->sc_binfo_ptr
.bh
= sumbh
; sci
->sc_binfo_ptr
.offset
= sumbytes
;
379 sci
->sc_blk_cnt
= sci
->sc_datablk_cnt
= 0;
383 static int nilfs_segctor_feed_segment(struct nilfs_sc_info
*sci
)
385 sci
->sc_nblk_this_inc
+= sci
->sc_curseg
->sb_sum
.nblocks
;
386 if (NILFS_SEGBUF_IS_LAST(sci
->sc_curseg
, &sci
->sc_segbufs
))
387 return -E2BIG
; /* The current segment is filled up
389 sci
->sc_curseg
= NILFS_NEXT_SEGBUF(sci
->sc_curseg
);
390 return nilfs_segctor_reset_segment_buffer(sci
);
393 static int nilfs_segctor_add_super_root(struct nilfs_sc_info
*sci
)
395 struct nilfs_segment_buffer
*segbuf
= sci
->sc_curseg
;
398 if (segbuf
->sb_sum
.nblocks
>= segbuf
->sb_rest_blocks
) {
399 err
= nilfs_segctor_feed_segment(sci
);
402 segbuf
= sci
->sc_curseg
;
404 err
= nilfs_segbuf_extend_payload(segbuf
, &segbuf
->sb_super_root
);
406 segbuf
->sb_sum
.flags
|= NILFS_SS_SR
;
411 * Functions for making segment summary and payloads
413 static int nilfs_segctor_segsum_block_required(
414 struct nilfs_sc_info
*sci
, const struct nilfs_segsum_pointer
*ssp
,
417 unsigned blocksize
= sci
->sc_super
->s_blocksize
;
418 /* Size of finfo and binfo is enough small against blocksize */
420 return ssp
->offset
+ binfo_size
+
421 (!sci
->sc_blk_cnt
? sizeof(struct nilfs_finfo
) : 0) >
425 static void nilfs_segctor_begin_finfo(struct nilfs_sc_info
*sci
,
428 sci
->sc_curseg
->sb_sum
.nfinfo
++;
429 sci
->sc_binfo_ptr
= sci
->sc_finfo_ptr
;
430 nilfs_segctor_map_segsum_entry(
431 sci
, &sci
->sc_binfo_ptr
, sizeof(struct nilfs_finfo
));
433 if (NILFS_I(inode
)->i_root
&&
434 !test_bit(NILFS_SC_HAVE_DELTA
, &sci
->sc_flags
))
435 set_bit(NILFS_SC_HAVE_DELTA
, &sci
->sc_flags
);
439 static void nilfs_segctor_end_finfo(struct nilfs_sc_info
*sci
,
442 struct nilfs_finfo
*finfo
;
443 struct nilfs_inode_info
*ii
;
444 struct nilfs_segment_buffer
*segbuf
;
447 if (sci
->sc_blk_cnt
== 0)
452 if (test_bit(NILFS_I_GCINODE
, &ii
->i_state
))
454 else if (NILFS_ROOT_METADATA_FILE(inode
->i_ino
))
459 finfo
= nilfs_segctor_map_segsum_entry(sci
, &sci
->sc_finfo_ptr
,
461 finfo
->fi_ino
= cpu_to_le64(inode
->i_ino
);
462 finfo
->fi_nblocks
= cpu_to_le32(sci
->sc_blk_cnt
);
463 finfo
->fi_ndatablk
= cpu_to_le32(sci
->sc_datablk_cnt
);
464 finfo
->fi_cno
= cpu_to_le64(cno
);
466 segbuf
= sci
->sc_curseg
;
467 segbuf
->sb_sum
.sumbytes
= sci
->sc_binfo_ptr
.offset
+
468 sci
->sc_super
->s_blocksize
* (segbuf
->sb_sum
.nsumblk
- 1);
469 sci
->sc_finfo_ptr
= sci
->sc_binfo_ptr
;
470 sci
->sc_blk_cnt
= sci
->sc_datablk_cnt
= 0;
473 static int nilfs_segctor_add_file_block(struct nilfs_sc_info
*sci
,
474 struct buffer_head
*bh
,
478 struct nilfs_segment_buffer
*segbuf
;
479 int required
, err
= 0;
482 segbuf
= sci
->sc_curseg
;
483 required
= nilfs_segctor_segsum_block_required(
484 sci
, &sci
->sc_binfo_ptr
, binfo_size
);
485 if (segbuf
->sb_sum
.nblocks
+ required
+ 1 > segbuf
->sb_rest_blocks
) {
486 nilfs_segctor_end_finfo(sci
, inode
);
487 err
= nilfs_segctor_feed_segment(sci
);
492 if (unlikely(required
)) {
493 err
= nilfs_segbuf_extend_segsum(segbuf
);
497 if (sci
->sc_blk_cnt
== 0)
498 nilfs_segctor_begin_finfo(sci
, inode
);
500 nilfs_segctor_map_segsum_entry(sci
, &sci
->sc_binfo_ptr
, binfo_size
);
501 /* Substitution to vblocknr is delayed until update_blocknr() */
502 nilfs_segbuf_add_file_buffer(segbuf
, bh
);
509 * Callback functions that enumerate, mark, and collect dirty blocks
511 static int nilfs_collect_file_data(struct nilfs_sc_info
*sci
,
512 struct buffer_head
*bh
, struct inode
*inode
)
516 err
= nilfs_bmap_propagate(NILFS_I(inode
)->i_bmap
, bh
);
520 err
= nilfs_segctor_add_file_block(sci
, bh
, inode
,
521 sizeof(struct nilfs_binfo_v
));
523 sci
->sc_datablk_cnt
++;
527 static int nilfs_collect_file_node(struct nilfs_sc_info
*sci
,
528 struct buffer_head
*bh
,
531 return nilfs_bmap_propagate(NILFS_I(inode
)->i_bmap
, bh
);
534 static int nilfs_collect_file_bmap(struct nilfs_sc_info
*sci
,
535 struct buffer_head
*bh
,
538 WARN_ON(!buffer_dirty(bh
));
539 return nilfs_segctor_add_file_block(sci
, bh
, inode
, sizeof(__le64
));
542 static void nilfs_write_file_data_binfo(struct nilfs_sc_info
*sci
,
543 struct nilfs_segsum_pointer
*ssp
,
544 union nilfs_binfo
*binfo
)
546 struct nilfs_binfo_v
*binfo_v
= nilfs_segctor_map_segsum_entry(
547 sci
, ssp
, sizeof(*binfo_v
));
548 *binfo_v
= binfo
->bi_v
;
551 static void nilfs_write_file_node_binfo(struct nilfs_sc_info
*sci
,
552 struct nilfs_segsum_pointer
*ssp
,
553 union nilfs_binfo
*binfo
)
555 __le64
*vblocknr
= nilfs_segctor_map_segsum_entry(
556 sci
, ssp
, sizeof(*vblocknr
));
557 *vblocknr
= binfo
->bi_v
.bi_vblocknr
;
560 static struct nilfs_sc_operations nilfs_sc_file_ops
= {
561 .collect_data
= nilfs_collect_file_data
,
562 .collect_node
= nilfs_collect_file_node
,
563 .collect_bmap
= nilfs_collect_file_bmap
,
564 .write_data_binfo
= nilfs_write_file_data_binfo
,
565 .write_node_binfo
= nilfs_write_file_node_binfo
,
568 static int nilfs_collect_dat_data(struct nilfs_sc_info
*sci
,
569 struct buffer_head
*bh
, struct inode
*inode
)
573 err
= nilfs_bmap_propagate(NILFS_I(inode
)->i_bmap
, bh
);
577 err
= nilfs_segctor_add_file_block(sci
, bh
, inode
, sizeof(__le64
));
579 sci
->sc_datablk_cnt
++;
583 static int nilfs_collect_dat_bmap(struct nilfs_sc_info
*sci
,
584 struct buffer_head
*bh
, struct inode
*inode
)
586 WARN_ON(!buffer_dirty(bh
));
587 return nilfs_segctor_add_file_block(sci
, bh
, inode
,
588 sizeof(struct nilfs_binfo_dat
));
591 static void nilfs_write_dat_data_binfo(struct nilfs_sc_info
*sci
,
592 struct nilfs_segsum_pointer
*ssp
,
593 union nilfs_binfo
*binfo
)
595 __le64
*blkoff
= nilfs_segctor_map_segsum_entry(sci
, ssp
,
597 *blkoff
= binfo
->bi_dat
.bi_blkoff
;
600 static void nilfs_write_dat_node_binfo(struct nilfs_sc_info
*sci
,
601 struct nilfs_segsum_pointer
*ssp
,
602 union nilfs_binfo
*binfo
)
604 struct nilfs_binfo_dat
*binfo_dat
=
605 nilfs_segctor_map_segsum_entry(sci
, ssp
, sizeof(*binfo_dat
));
606 *binfo_dat
= binfo
->bi_dat
;
609 static struct nilfs_sc_operations nilfs_sc_dat_ops
= {
610 .collect_data
= nilfs_collect_dat_data
,
611 .collect_node
= nilfs_collect_file_node
,
612 .collect_bmap
= nilfs_collect_dat_bmap
,
613 .write_data_binfo
= nilfs_write_dat_data_binfo
,
614 .write_node_binfo
= nilfs_write_dat_node_binfo
,
617 static struct nilfs_sc_operations nilfs_sc_dsync_ops
= {
618 .collect_data
= nilfs_collect_file_data
,
619 .collect_node
= NULL
,
620 .collect_bmap
= NULL
,
621 .write_data_binfo
= nilfs_write_file_data_binfo
,
622 .write_node_binfo
= NULL
,
625 static size_t nilfs_lookup_dirty_data_buffers(struct inode
*inode
,
626 struct list_head
*listp
,
628 loff_t start
, loff_t end
)
630 struct address_space
*mapping
= inode
->i_mapping
;
632 pgoff_t index
= 0, last
= ULONG_MAX
;
636 if (unlikely(start
!= 0 || end
!= LLONG_MAX
)) {
638 * A valid range is given for sync-ing data pages. The
639 * range is rounded to per-page; extra dirty buffers
640 * may be included if blocksize < pagesize.
642 index
= start
>> PAGE_SHIFT
;
643 last
= end
>> PAGE_SHIFT
;
645 pagevec_init(&pvec
, 0);
647 if (unlikely(index
> last
) ||
648 !pagevec_lookup_tag(&pvec
, mapping
, &index
, PAGECACHE_TAG_DIRTY
,
649 min_t(pgoff_t
, last
- index
,
650 PAGEVEC_SIZE
- 1) + 1))
653 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
654 struct buffer_head
*bh
, *head
;
655 struct page
*page
= pvec
.pages
[i
];
657 if (unlikely(page
->index
> last
))
662 if (!page_has_buffers(page
))
663 create_empty_buffers(page
,
664 1 << inode
->i_blkbits
, 0);
668 bh
= head
= page_buffers(page
);
670 if (!buffer_dirty(bh
))
673 list_add_tail(&bh
->b_assoc_buffers
, listp
);
675 if (unlikely(ndirties
>= nlimit
)) {
676 pagevec_release(&pvec
);
680 } while (bh
= bh
->b_this_page
, bh
!= head
);
682 pagevec_release(&pvec
);
687 static void nilfs_lookup_dirty_node_buffers(struct inode
*inode
,
688 struct list_head
*listp
)
690 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
691 struct address_space
*mapping
= &ii
->i_btnode_cache
;
693 struct buffer_head
*bh
, *head
;
697 pagevec_init(&pvec
, 0);
699 while (pagevec_lookup_tag(&pvec
, mapping
, &index
, PAGECACHE_TAG_DIRTY
,
701 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
702 bh
= head
= page_buffers(pvec
.pages
[i
]);
704 if (buffer_dirty(bh
)) {
706 list_add_tail(&bh
->b_assoc_buffers
,
709 bh
= bh
->b_this_page
;
710 } while (bh
!= head
);
712 pagevec_release(&pvec
);
717 static void nilfs_dispose_list(struct nilfs_sb_info
*sbi
,
718 struct list_head
*head
, int force
)
720 struct nilfs_inode_info
*ii
, *n
;
721 struct nilfs_inode_info
*ivec
[SC_N_INODEVEC
], **pii
;
724 while (!list_empty(head
)) {
725 spin_lock(&sbi
->s_inode_lock
);
726 list_for_each_entry_safe(ii
, n
, head
, i_dirty
) {
727 list_del_init(&ii
->i_dirty
);
729 if (unlikely(ii
->i_bh
)) {
733 } else if (test_bit(NILFS_I_DIRTY
, &ii
->i_state
)) {
734 set_bit(NILFS_I_QUEUED
, &ii
->i_state
);
735 list_add_tail(&ii
->i_dirty
,
736 &sbi
->s_dirty_files
);
740 if (nv
== SC_N_INODEVEC
)
743 spin_unlock(&sbi
->s_inode_lock
);
745 for (pii
= ivec
; nv
> 0; pii
++, nv
--)
746 iput(&(*pii
)->vfs_inode
);
750 static int nilfs_test_metadata_dirty(struct the_nilfs
*nilfs
,
751 struct nilfs_root
*root
)
755 if (nilfs_mdt_fetch_dirty(root
->ifile
))
757 if (nilfs_mdt_fetch_dirty(nilfs
->ns_cpfile
))
759 if (nilfs_mdt_fetch_dirty(nilfs
->ns_sufile
))
761 if ((ret
|| nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs
->ns_dat
))
766 static int nilfs_segctor_clean(struct nilfs_sc_info
*sci
)
768 return list_empty(&sci
->sc_dirty_files
) &&
769 !test_bit(NILFS_SC_DIRTY
, &sci
->sc_flags
) &&
770 sci
->sc_nfreesegs
== 0 &&
771 (!nilfs_doing_gc() || list_empty(&sci
->sc_gc_inodes
));
774 static int nilfs_segctor_confirm(struct nilfs_sc_info
*sci
)
776 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
779 if (nilfs_test_metadata_dirty(sbi
->s_nilfs
, sci
->sc_root
))
780 set_bit(NILFS_SC_DIRTY
, &sci
->sc_flags
);
782 spin_lock(&sbi
->s_inode_lock
);
783 if (list_empty(&sbi
->s_dirty_files
) && nilfs_segctor_clean(sci
))
786 spin_unlock(&sbi
->s_inode_lock
);
790 static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info
*sci
)
792 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
793 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
795 nilfs_mdt_clear_dirty(sci
->sc_root
->ifile
);
796 nilfs_mdt_clear_dirty(nilfs
->ns_cpfile
);
797 nilfs_mdt_clear_dirty(nilfs
->ns_sufile
);
798 nilfs_mdt_clear_dirty(nilfs
->ns_dat
);
801 static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info
*sci
)
803 struct the_nilfs
*nilfs
= sci
->sc_sbi
->s_nilfs
;
804 struct buffer_head
*bh_cp
;
805 struct nilfs_checkpoint
*raw_cp
;
808 /* XXX: this interface will be changed */
809 err
= nilfs_cpfile_get_checkpoint(nilfs
->ns_cpfile
, nilfs
->ns_cno
, 1,
812 /* The following code is duplicated with cpfile. But, it is
813 needed to collect the checkpoint even if it was not newly
815 nilfs_mdt_mark_buffer_dirty(bh_cp
);
816 nilfs_mdt_mark_dirty(nilfs
->ns_cpfile
);
817 nilfs_cpfile_put_checkpoint(
818 nilfs
->ns_cpfile
, nilfs
->ns_cno
, bh_cp
);
820 WARN_ON(err
== -EINVAL
|| err
== -ENOENT
);
825 static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info
*sci
)
827 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
828 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
829 struct buffer_head
*bh_cp
;
830 struct nilfs_checkpoint
*raw_cp
;
833 err
= nilfs_cpfile_get_checkpoint(nilfs
->ns_cpfile
, nilfs
->ns_cno
, 0,
836 WARN_ON(err
== -EINVAL
|| err
== -ENOENT
);
839 raw_cp
->cp_snapshot_list
.ssl_next
= 0;
840 raw_cp
->cp_snapshot_list
.ssl_prev
= 0;
841 raw_cp
->cp_inodes_count
=
842 cpu_to_le64(atomic_read(&sci
->sc_root
->inodes_count
));
843 raw_cp
->cp_blocks_count
=
844 cpu_to_le64(atomic_read(&sci
->sc_root
->blocks_count
));
845 raw_cp
->cp_nblk_inc
=
846 cpu_to_le64(sci
->sc_nblk_inc
+ sci
->sc_nblk_this_inc
);
847 raw_cp
->cp_create
= cpu_to_le64(sci
->sc_seg_ctime
);
848 raw_cp
->cp_cno
= cpu_to_le64(nilfs
->ns_cno
);
850 if (test_bit(NILFS_SC_HAVE_DELTA
, &sci
->sc_flags
))
851 nilfs_checkpoint_clear_minor(raw_cp
);
853 nilfs_checkpoint_set_minor(raw_cp
);
855 nilfs_write_inode_common(sci
->sc_root
->ifile
,
856 &raw_cp
->cp_ifile_inode
, 1);
857 nilfs_cpfile_put_checkpoint(nilfs
->ns_cpfile
, nilfs
->ns_cno
, bh_cp
);
864 static void nilfs_fill_in_file_bmap(struct inode
*ifile
,
865 struct nilfs_inode_info
*ii
)
868 struct buffer_head
*ibh
;
869 struct nilfs_inode
*raw_inode
;
871 if (test_bit(NILFS_I_BMAP
, &ii
->i_state
)) {
874 raw_inode
= nilfs_ifile_map_inode(ifile
, ii
->vfs_inode
.i_ino
,
876 nilfs_bmap_write(ii
->i_bmap
, raw_inode
);
877 nilfs_ifile_unmap_inode(ifile
, ii
->vfs_inode
.i_ino
, ibh
);
881 static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info
*sci
)
883 struct nilfs_inode_info
*ii
;
885 list_for_each_entry(ii
, &sci
->sc_dirty_files
, i_dirty
) {
886 nilfs_fill_in_file_bmap(sci
->sc_root
->ifile
, ii
);
887 set_bit(NILFS_I_COLLECTED
, &ii
->i_state
);
891 static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info
*sci
,
892 struct the_nilfs
*nilfs
)
894 struct buffer_head
*bh_sr
;
895 struct nilfs_super_root
*raw_sr
;
896 unsigned isz
= nilfs
->ns_inode_size
;
898 bh_sr
= NILFS_LAST_SEGBUF(&sci
->sc_segbufs
)->sb_super_root
;
899 raw_sr
= (struct nilfs_super_root
*)bh_sr
->b_data
;
901 raw_sr
->sr_bytes
= cpu_to_le16(NILFS_SR_BYTES
);
902 raw_sr
->sr_nongc_ctime
903 = cpu_to_le64(nilfs_doing_gc() ?
904 nilfs
->ns_nongc_ctime
: sci
->sc_seg_ctime
);
905 raw_sr
->sr_flags
= 0;
907 nilfs_write_inode_common(nilfs
->ns_dat
, (void *)raw_sr
+
908 NILFS_SR_DAT_OFFSET(isz
), 1);
909 nilfs_write_inode_common(nilfs
->ns_cpfile
, (void *)raw_sr
+
910 NILFS_SR_CPFILE_OFFSET(isz
), 1);
911 nilfs_write_inode_common(nilfs
->ns_sufile
, (void *)raw_sr
+
912 NILFS_SR_SUFILE_OFFSET(isz
), 1);
915 static void nilfs_redirty_inodes(struct list_head
*head
)
917 struct nilfs_inode_info
*ii
;
919 list_for_each_entry(ii
, head
, i_dirty
) {
920 if (test_bit(NILFS_I_COLLECTED
, &ii
->i_state
))
921 clear_bit(NILFS_I_COLLECTED
, &ii
->i_state
);
925 static void nilfs_drop_collected_inodes(struct list_head
*head
)
927 struct nilfs_inode_info
*ii
;
929 list_for_each_entry(ii
, head
, i_dirty
) {
930 if (!test_and_clear_bit(NILFS_I_COLLECTED
, &ii
->i_state
))
933 clear_bit(NILFS_I_INODE_DIRTY
, &ii
->i_state
);
934 set_bit(NILFS_I_UPDATED
, &ii
->i_state
);
938 static int nilfs_segctor_apply_buffers(struct nilfs_sc_info
*sci
,
940 struct list_head
*listp
,
941 int (*collect
)(struct nilfs_sc_info
*,
942 struct buffer_head
*,
945 struct buffer_head
*bh
, *n
;
949 list_for_each_entry_safe(bh
, n
, listp
, b_assoc_buffers
) {
950 list_del_init(&bh
->b_assoc_buffers
);
951 err
= collect(sci
, bh
, inode
);
954 goto dispose_buffers
;
960 while (!list_empty(listp
)) {
961 bh
= list_entry(listp
->next
, struct buffer_head
,
963 list_del_init(&bh
->b_assoc_buffers
);
969 static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info
*sci
)
971 /* Remaining number of blocks within segment buffer */
972 return sci
->sc_segbuf_nblocks
-
973 (sci
->sc_nblk_this_inc
+ sci
->sc_curseg
->sb_sum
.nblocks
);
976 static int nilfs_segctor_scan_file(struct nilfs_sc_info
*sci
,
978 struct nilfs_sc_operations
*sc_ops
)
980 LIST_HEAD(data_buffers
);
981 LIST_HEAD(node_buffers
);
984 if (!(sci
->sc_stage
.flags
& NILFS_CF_NODE
)) {
985 size_t n
, rest
= nilfs_segctor_buffer_rest(sci
);
987 n
= nilfs_lookup_dirty_data_buffers(
988 inode
, &data_buffers
, rest
+ 1, 0, LLONG_MAX
);
990 err
= nilfs_segctor_apply_buffers(
991 sci
, inode
, &data_buffers
,
992 sc_ops
->collect_data
);
993 BUG_ON(!err
); /* always receive -E2BIG or true error */
997 nilfs_lookup_dirty_node_buffers(inode
, &node_buffers
);
999 if (!(sci
->sc_stage
.flags
& NILFS_CF_NODE
)) {
1000 err
= nilfs_segctor_apply_buffers(
1001 sci
, inode
, &data_buffers
, sc_ops
->collect_data
);
1002 if (unlikely(err
)) {
1003 /* dispose node list */
1004 nilfs_segctor_apply_buffers(
1005 sci
, inode
, &node_buffers
, NULL
);
1008 sci
->sc_stage
.flags
|= NILFS_CF_NODE
;
1011 err
= nilfs_segctor_apply_buffers(
1012 sci
, inode
, &node_buffers
, sc_ops
->collect_node
);
1016 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode
)->i_bmap
, &node_buffers
);
1017 err
= nilfs_segctor_apply_buffers(
1018 sci
, inode
, &node_buffers
, sc_ops
->collect_bmap
);
1022 nilfs_segctor_end_finfo(sci
, inode
);
1023 sci
->sc_stage
.flags
&= ~NILFS_CF_NODE
;
1029 static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info
*sci
,
1030 struct inode
*inode
)
1032 LIST_HEAD(data_buffers
);
1033 size_t n
, rest
= nilfs_segctor_buffer_rest(sci
);
1036 n
= nilfs_lookup_dirty_data_buffers(inode
, &data_buffers
, rest
+ 1,
1037 sci
->sc_dsync_start
,
1040 err
= nilfs_segctor_apply_buffers(sci
, inode
, &data_buffers
,
1041 nilfs_collect_file_data
);
1043 nilfs_segctor_end_finfo(sci
, inode
);
1045 /* always receive -E2BIG or true error if n > rest */
1050 static int nilfs_segctor_collect_blocks(struct nilfs_sc_info
*sci
, int mode
)
1052 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
1053 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
1054 struct list_head
*head
;
1055 struct nilfs_inode_info
*ii
;
1059 switch (sci
->sc_stage
.scnt
) {
1062 sci
->sc_stage
.flags
= 0;
1064 if (!test_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
)) {
1065 sci
->sc_nblk_inc
= 0;
1066 sci
->sc_curseg
->sb_sum
.flags
= NILFS_SS_LOGBGN
;
1067 if (mode
== SC_LSEG_DSYNC
) {
1068 sci
->sc_stage
.scnt
= NILFS_ST_DSYNC
;
1073 sci
->sc_stage
.dirty_file_ptr
= NULL
;
1074 sci
->sc_stage
.gc_inode_ptr
= NULL
;
1075 if (mode
== SC_FLUSH_DAT
) {
1076 sci
->sc_stage
.scnt
= NILFS_ST_DAT
;
1079 sci
->sc_stage
.scnt
++; /* Fall through */
1081 if (nilfs_doing_gc()) {
1082 head
= &sci
->sc_gc_inodes
;
1083 ii
= list_prepare_entry(sci
->sc_stage
.gc_inode_ptr
,
1085 list_for_each_entry_continue(ii
, head
, i_dirty
) {
1086 err
= nilfs_segctor_scan_file(
1087 sci
, &ii
->vfs_inode
,
1088 &nilfs_sc_file_ops
);
1089 if (unlikely(err
)) {
1090 sci
->sc_stage
.gc_inode_ptr
= list_entry(
1092 struct nilfs_inode_info
,
1096 set_bit(NILFS_I_COLLECTED
, &ii
->i_state
);
1098 sci
->sc_stage
.gc_inode_ptr
= NULL
;
1100 sci
->sc_stage
.scnt
++; /* Fall through */
1102 head
= &sci
->sc_dirty_files
;
1103 ii
= list_prepare_entry(sci
->sc_stage
.dirty_file_ptr
, head
,
1105 list_for_each_entry_continue(ii
, head
, i_dirty
) {
1106 clear_bit(NILFS_I_DIRTY
, &ii
->i_state
);
1108 err
= nilfs_segctor_scan_file(sci
, &ii
->vfs_inode
,
1109 &nilfs_sc_file_ops
);
1110 if (unlikely(err
)) {
1111 sci
->sc_stage
.dirty_file_ptr
=
1112 list_entry(ii
->i_dirty
.prev
,
1113 struct nilfs_inode_info
,
1117 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1118 /* XXX: required ? */
1120 sci
->sc_stage
.dirty_file_ptr
= NULL
;
1121 if (mode
== SC_FLUSH_FILE
) {
1122 sci
->sc_stage
.scnt
= NILFS_ST_DONE
;
1125 sci
->sc_stage
.scnt
++;
1126 sci
->sc_stage
.flags
|= NILFS_CF_IFILE_STARTED
;
1128 case NILFS_ST_IFILE
:
1129 err
= nilfs_segctor_scan_file(sci
, sci
->sc_root
->ifile
,
1130 &nilfs_sc_file_ops
);
1133 sci
->sc_stage
.scnt
++;
1134 /* Creating a checkpoint */
1135 err
= nilfs_segctor_create_checkpoint(sci
);
1139 case NILFS_ST_CPFILE
:
1140 err
= nilfs_segctor_scan_file(sci
, nilfs
->ns_cpfile
,
1141 &nilfs_sc_file_ops
);
1144 sci
->sc_stage
.scnt
++; /* Fall through */
1145 case NILFS_ST_SUFILE
:
1146 err
= nilfs_sufile_freev(nilfs
->ns_sufile
, sci
->sc_freesegs
,
1147 sci
->sc_nfreesegs
, &ndone
);
1148 if (unlikely(err
)) {
1149 nilfs_sufile_cancel_freev(nilfs
->ns_sufile
,
1150 sci
->sc_freesegs
, ndone
,
1154 sci
->sc_stage
.flags
|= NILFS_CF_SUFREED
;
1156 err
= nilfs_segctor_scan_file(sci
, nilfs
->ns_sufile
,
1157 &nilfs_sc_file_ops
);
1160 sci
->sc_stage
.scnt
++; /* Fall through */
1163 err
= nilfs_segctor_scan_file(sci
, nilfs
->ns_dat
,
1167 if (mode
== SC_FLUSH_DAT
) {
1168 sci
->sc_stage
.scnt
= NILFS_ST_DONE
;
1171 sci
->sc_stage
.scnt
++; /* Fall through */
1173 if (mode
== SC_LSEG_SR
) {
1174 /* Appending a super root */
1175 err
= nilfs_segctor_add_super_root(sci
);
1179 /* End of a logical segment */
1180 sci
->sc_curseg
->sb_sum
.flags
|= NILFS_SS_LOGEND
;
1181 sci
->sc_stage
.scnt
= NILFS_ST_DONE
;
1183 case NILFS_ST_DSYNC
:
1185 sci
->sc_curseg
->sb_sum
.flags
|= NILFS_SS_SYNDT
;
1186 ii
= sci
->sc_dsync_inode
;
1187 if (!test_bit(NILFS_I_BUSY
, &ii
->i_state
))
1190 err
= nilfs_segctor_scan_file_dsync(sci
, &ii
->vfs_inode
);
1193 sci
->sc_curseg
->sb_sum
.flags
|= NILFS_SS_LOGEND
;
1194 sci
->sc_stage
.scnt
= NILFS_ST_DONE
;
1207 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1208 * @sci: nilfs_sc_info
1209 * @nilfs: nilfs object
1211 static int nilfs_segctor_begin_construction(struct nilfs_sc_info
*sci
,
1212 struct the_nilfs
*nilfs
)
1214 struct nilfs_segment_buffer
*segbuf
, *prev
;
1218 segbuf
= nilfs_segbuf_new(sci
->sc_super
);
1219 if (unlikely(!segbuf
))
1222 if (list_empty(&sci
->sc_write_logs
)) {
1223 nilfs_segbuf_map(segbuf
, nilfs
->ns_segnum
,
1224 nilfs
->ns_pseg_offset
, nilfs
);
1225 if (segbuf
->sb_rest_blocks
< NILFS_PSEG_MIN_BLOCKS
) {
1226 nilfs_shift_to_next_segment(nilfs
);
1227 nilfs_segbuf_map(segbuf
, nilfs
->ns_segnum
, 0, nilfs
);
1230 segbuf
->sb_sum
.seg_seq
= nilfs
->ns_seg_seq
;
1231 nextnum
= nilfs
->ns_nextnum
;
1233 if (nilfs
->ns_segnum
== nilfs
->ns_nextnum
)
1234 /* Start from the head of a new full segment */
1238 prev
= NILFS_LAST_SEGBUF(&sci
->sc_write_logs
);
1239 nilfs_segbuf_map_cont(segbuf
, prev
);
1240 segbuf
->sb_sum
.seg_seq
= prev
->sb_sum
.seg_seq
;
1241 nextnum
= prev
->sb_nextnum
;
1243 if (segbuf
->sb_rest_blocks
< NILFS_PSEG_MIN_BLOCKS
) {
1244 nilfs_segbuf_map(segbuf
, prev
->sb_nextnum
, 0, nilfs
);
1245 segbuf
->sb_sum
.seg_seq
++;
1250 err
= nilfs_sufile_mark_dirty(nilfs
->ns_sufile
, segbuf
->sb_segnum
);
1255 err
= nilfs_sufile_alloc(nilfs
->ns_sufile
, &nextnum
);
1259 nilfs_segbuf_set_next_segnum(segbuf
, nextnum
, nilfs
);
1261 BUG_ON(!list_empty(&sci
->sc_segbufs
));
1262 list_add_tail(&segbuf
->sb_list
, &sci
->sc_segbufs
);
1263 sci
->sc_segbuf_nblocks
= segbuf
->sb_rest_blocks
;
1267 nilfs_segbuf_free(segbuf
);
1271 static int nilfs_segctor_extend_segments(struct nilfs_sc_info
*sci
,
1272 struct the_nilfs
*nilfs
, int nadd
)
1274 struct nilfs_segment_buffer
*segbuf
, *prev
;
1275 struct inode
*sufile
= nilfs
->ns_sufile
;
1280 prev
= NILFS_LAST_SEGBUF(&sci
->sc_segbufs
);
1282 * Since the segment specified with nextnum might be allocated during
1283 * the previous construction, the buffer including its segusage may
1284 * not be dirty. The following call ensures that the buffer is dirty
1285 * and will pin the buffer on memory until the sufile is written.
1287 err
= nilfs_sufile_mark_dirty(sufile
, prev
->sb_nextnum
);
1291 for (i
= 0; i
< nadd
; i
++) {
1292 /* extend segment info */
1294 segbuf
= nilfs_segbuf_new(sci
->sc_super
);
1295 if (unlikely(!segbuf
))
1298 /* map this buffer to region of segment on-disk */
1299 nilfs_segbuf_map(segbuf
, prev
->sb_nextnum
, 0, nilfs
);
1300 sci
->sc_segbuf_nblocks
+= segbuf
->sb_rest_blocks
;
1302 /* allocate the next next full segment */
1303 err
= nilfs_sufile_alloc(sufile
, &nextnextnum
);
1307 segbuf
->sb_sum
.seg_seq
= prev
->sb_sum
.seg_seq
+ 1;
1308 nilfs_segbuf_set_next_segnum(segbuf
, nextnextnum
, nilfs
);
1310 list_add_tail(&segbuf
->sb_list
, &list
);
1313 list_splice_tail(&list
, &sci
->sc_segbufs
);
1317 nilfs_segbuf_free(segbuf
);
1319 list_for_each_entry(segbuf
, &list
, sb_list
) {
1320 ret
= nilfs_sufile_free(sufile
, segbuf
->sb_nextnum
);
1321 WARN_ON(ret
); /* never fails */
1323 nilfs_destroy_logs(&list
);
1327 static void nilfs_free_incomplete_logs(struct list_head
*logs
,
1328 struct the_nilfs
*nilfs
)
1330 struct nilfs_segment_buffer
*segbuf
, *prev
;
1331 struct inode
*sufile
= nilfs
->ns_sufile
;
1334 segbuf
= NILFS_FIRST_SEGBUF(logs
);
1335 if (nilfs
->ns_nextnum
!= segbuf
->sb_nextnum
) {
1336 ret
= nilfs_sufile_free(sufile
, segbuf
->sb_nextnum
);
1337 WARN_ON(ret
); /* never fails */
1339 if (atomic_read(&segbuf
->sb_err
)) {
1340 /* Case 1: The first segment failed */
1341 if (segbuf
->sb_pseg_start
!= segbuf
->sb_fseg_start
)
1342 /* Case 1a: Partial segment appended into an existing
1344 nilfs_terminate_segment(nilfs
, segbuf
->sb_fseg_start
,
1345 segbuf
->sb_fseg_end
);
1346 else /* Case 1b: New full segment */
1347 set_nilfs_discontinued(nilfs
);
1351 list_for_each_entry_continue(segbuf
, logs
, sb_list
) {
1352 if (prev
->sb_nextnum
!= segbuf
->sb_nextnum
) {
1353 ret
= nilfs_sufile_free(sufile
, segbuf
->sb_nextnum
);
1354 WARN_ON(ret
); /* never fails */
1356 if (atomic_read(&segbuf
->sb_err
) &&
1357 segbuf
->sb_segnum
!= nilfs
->ns_nextnum
)
1358 /* Case 2: extended segment (!= next) failed */
1359 nilfs_sufile_set_error(sufile
, segbuf
->sb_segnum
);
1364 static void nilfs_segctor_update_segusage(struct nilfs_sc_info
*sci
,
1365 struct inode
*sufile
)
1367 struct nilfs_segment_buffer
*segbuf
;
1368 unsigned long live_blocks
;
1371 list_for_each_entry(segbuf
, &sci
->sc_segbufs
, sb_list
) {
1372 live_blocks
= segbuf
->sb_sum
.nblocks
+
1373 (segbuf
->sb_pseg_start
- segbuf
->sb_fseg_start
);
1374 ret
= nilfs_sufile_set_segment_usage(sufile
, segbuf
->sb_segnum
,
1377 WARN_ON(ret
); /* always succeed because the segusage is dirty */
1381 static void nilfs_cancel_segusage(struct list_head
*logs
, struct inode
*sufile
)
1383 struct nilfs_segment_buffer
*segbuf
;
1386 segbuf
= NILFS_FIRST_SEGBUF(logs
);
1387 ret
= nilfs_sufile_set_segment_usage(sufile
, segbuf
->sb_segnum
,
1388 segbuf
->sb_pseg_start
-
1389 segbuf
->sb_fseg_start
, 0);
1390 WARN_ON(ret
); /* always succeed because the segusage is dirty */
1392 list_for_each_entry_continue(segbuf
, logs
, sb_list
) {
1393 ret
= nilfs_sufile_set_segment_usage(sufile
, segbuf
->sb_segnum
,
1395 WARN_ON(ret
); /* always succeed */
1399 static void nilfs_segctor_truncate_segments(struct nilfs_sc_info
*sci
,
1400 struct nilfs_segment_buffer
*last
,
1401 struct inode
*sufile
)
1403 struct nilfs_segment_buffer
*segbuf
= last
;
1406 list_for_each_entry_continue(segbuf
, &sci
->sc_segbufs
, sb_list
) {
1407 sci
->sc_segbuf_nblocks
-= segbuf
->sb_rest_blocks
;
1408 ret
= nilfs_sufile_free(sufile
, segbuf
->sb_nextnum
);
1411 nilfs_truncate_logs(&sci
->sc_segbufs
, last
);
1415 static int nilfs_segctor_collect(struct nilfs_sc_info
*sci
,
1416 struct the_nilfs
*nilfs
, int mode
)
1418 struct nilfs_cstage prev_stage
= sci
->sc_stage
;
1421 /* Collection retry loop */
1423 sci
->sc_nblk_this_inc
= 0;
1424 sci
->sc_curseg
= NILFS_FIRST_SEGBUF(&sci
->sc_segbufs
);
1426 err
= nilfs_segctor_reset_segment_buffer(sci
);
1430 err
= nilfs_segctor_collect_blocks(sci
, mode
);
1431 sci
->sc_nblk_this_inc
+= sci
->sc_curseg
->sb_sum
.nblocks
;
1435 if (unlikely(err
!= -E2BIG
))
1438 /* The current segment is filled up */
1439 if (mode
!= SC_LSEG_SR
|| sci
->sc_stage
.scnt
< NILFS_ST_CPFILE
)
1442 nilfs_clear_logs(&sci
->sc_segbufs
);
1444 err
= nilfs_segctor_extend_segments(sci
, nilfs
, nadd
);
1448 if (sci
->sc_stage
.flags
& NILFS_CF_SUFREED
) {
1449 err
= nilfs_sufile_cancel_freev(nilfs
->ns_sufile
,
1453 WARN_ON(err
); /* do not happen */
1455 nadd
= min_t(int, nadd
<< 1, SC_MAX_SEGDELTA
);
1456 sci
->sc_stage
= prev_stage
;
1458 nilfs_segctor_truncate_segments(sci
, sci
->sc_curseg
, nilfs
->ns_sufile
);
1465 static void nilfs_list_replace_buffer(struct buffer_head
*old_bh
,
1466 struct buffer_head
*new_bh
)
1468 BUG_ON(!list_empty(&new_bh
->b_assoc_buffers
));
1470 list_replace_init(&old_bh
->b_assoc_buffers
, &new_bh
->b_assoc_buffers
);
1471 /* The caller must release old_bh */
1475 nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info
*sci
,
1476 struct nilfs_segment_buffer
*segbuf
,
1479 struct inode
*inode
= NULL
;
1481 unsigned long nfinfo
= segbuf
->sb_sum
.nfinfo
;
1482 unsigned long nblocks
= 0, ndatablk
= 0;
1483 struct nilfs_sc_operations
*sc_op
= NULL
;
1484 struct nilfs_segsum_pointer ssp
;
1485 struct nilfs_finfo
*finfo
= NULL
;
1486 union nilfs_binfo binfo
;
1487 struct buffer_head
*bh
, *bh_org
;
1494 blocknr
= segbuf
->sb_pseg_start
+ segbuf
->sb_sum
.nsumblk
;
1495 ssp
.bh
= NILFS_SEGBUF_FIRST_BH(&segbuf
->sb_segsum_buffers
);
1496 ssp
.offset
= sizeof(struct nilfs_segment_summary
);
1498 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
, b_assoc_buffers
) {
1499 if (bh
== segbuf
->sb_super_root
)
1502 finfo
= nilfs_segctor_map_segsum_entry(
1503 sci
, &ssp
, sizeof(*finfo
));
1504 ino
= le64_to_cpu(finfo
->fi_ino
);
1505 nblocks
= le32_to_cpu(finfo
->fi_nblocks
);
1506 ndatablk
= le32_to_cpu(finfo
->fi_ndatablk
);
1508 if (buffer_nilfs_node(bh
))
1509 inode
= NILFS_BTNC_I(bh
->b_page
->mapping
);
1511 inode
= NILFS_AS_I(bh
->b_page
->mapping
);
1513 if (mode
== SC_LSEG_DSYNC
)
1514 sc_op
= &nilfs_sc_dsync_ops
;
1515 else if (ino
== NILFS_DAT_INO
)
1516 sc_op
= &nilfs_sc_dat_ops
;
1517 else /* file blocks */
1518 sc_op
= &nilfs_sc_file_ops
;
1522 err
= nilfs_bmap_assign(NILFS_I(inode
)->i_bmap
, &bh
, blocknr
,
1525 nilfs_list_replace_buffer(bh_org
, bh
);
1531 sc_op
->write_data_binfo(sci
, &ssp
, &binfo
);
1533 sc_op
->write_node_binfo(sci
, &ssp
, &binfo
);
1536 if (--nblocks
== 0) {
1540 } else if (ndatablk
> 0)
1550 static int nilfs_segctor_assign(struct nilfs_sc_info
*sci
, int mode
)
1552 struct nilfs_segment_buffer
*segbuf
;
1555 list_for_each_entry(segbuf
, &sci
->sc_segbufs
, sb_list
) {
1556 err
= nilfs_segctor_update_payload_blocknr(sci
, segbuf
, mode
);
1559 nilfs_segbuf_fill_in_segsum(segbuf
);
1565 nilfs_copy_replace_page_buffers(struct page
*page
, struct list_head
*out
)
1567 struct page
*clone_page
;
1568 struct buffer_head
*bh
, *head
, *bh2
;
1571 bh
= head
= page_buffers(page
);
1573 clone_page
= nilfs_alloc_private_page(bh
->b_bdev
, bh
->b_size
, 0);
1574 if (unlikely(!clone_page
))
1577 bh2
= page_buffers(clone_page
);
1578 kaddr
= kmap_atomic(page
, KM_USER0
);
1580 if (list_empty(&bh
->b_assoc_buffers
))
1583 page_cache_get(clone_page
); /* for each bh */
1584 memcpy(bh2
->b_data
, kaddr
+ bh_offset(bh
), bh2
->b_size
);
1585 bh2
->b_blocknr
= bh
->b_blocknr
;
1586 list_replace(&bh
->b_assoc_buffers
, &bh2
->b_assoc_buffers
);
1587 list_add_tail(&bh
->b_assoc_buffers
, out
);
1588 } while (bh
= bh
->b_this_page
, bh2
= bh2
->b_this_page
, bh
!= head
);
1589 kunmap_atomic(kaddr
, KM_USER0
);
1591 if (!TestSetPageWriteback(clone_page
))
1592 account_page_writeback(clone_page
);
1593 unlock_page(clone_page
);
1598 static int nilfs_test_page_to_be_frozen(struct page
*page
)
1600 struct address_space
*mapping
= page
->mapping
;
1602 if (!mapping
|| !mapping
->host
|| S_ISDIR(mapping
->host
->i_mode
))
1605 if (page_mapped(page
)) {
1606 ClearPageChecked(page
);
1609 return PageChecked(page
);
1612 static int nilfs_begin_page_io(struct page
*page
, struct list_head
*out
)
1614 if (!page
|| PageWriteback(page
))
1615 /* For split b-tree node pages, this function may be called
1616 twice. We ignore the 2nd or later calls by this check. */
1620 clear_page_dirty_for_io(page
);
1621 set_page_writeback(page
);
1624 if (nilfs_test_page_to_be_frozen(page
)) {
1625 int err
= nilfs_copy_replace_page_buffers(page
, out
);
1632 static int nilfs_segctor_prepare_write(struct nilfs_sc_info
*sci
,
1633 struct page
**failed_page
)
1635 struct nilfs_segment_buffer
*segbuf
;
1636 struct page
*bd_page
= NULL
, *fs_page
= NULL
;
1637 struct list_head
*list
= &sci
->sc_copied_buffers
;
1640 *failed_page
= NULL
;
1641 list_for_each_entry(segbuf
, &sci
->sc_segbufs
, sb_list
) {
1642 struct buffer_head
*bh
;
1644 list_for_each_entry(bh
, &segbuf
->sb_segsum_buffers
,
1646 if (bh
->b_page
!= bd_page
) {
1649 clear_page_dirty_for_io(bd_page
);
1650 set_page_writeback(bd_page
);
1651 unlock_page(bd_page
);
1653 bd_page
= bh
->b_page
;
1657 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
,
1659 if (bh
== segbuf
->sb_super_root
) {
1660 if (bh
->b_page
!= bd_page
) {
1662 clear_page_dirty_for_io(bd_page
);
1663 set_page_writeback(bd_page
);
1664 unlock_page(bd_page
);
1665 bd_page
= bh
->b_page
;
1669 if (bh
->b_page
!= fs_page
) {
1670 err
= nilfs_begin_page_io(fs_page
, list
);
1671 if (unlikely(err
)) {
1672 *failed_page
= fs_page
;
1675 fs_page
= bh
->b_page
;
1681 clear_page_dirty_for_io(bd_page
);
1682 set_page_writeback(bd_page
);
1683 unlock_page(bd_page
);
1685 err
= nilfs_begin_page_io(fs_page
, list
);
1687 *failed_page
= fs_page
;
1692 static int nilfs_segctor_write(struct nilfs_sc_info
*sci
,
1693 struct the_nilfs
*nilfs
)
1697 ret
= nilfs_write_logs(&sci
->sc_segbufs
, nilfs
);
1698 list_splice_tail_init(&sci
->sc_segbufs
, &sci
->sc_write_logs
);
1702 static void __nilfs_end_page_io(struct page
*page
, int err
)
1705 if (!nilfs_page_buffers_clean(page
))
1706 __set_page_dirty_nobuffers(page
);
1707 ClearPageError(page
);
1709 __set_page_dirty_nobuffers(page
);
1713 if (buffer_nilfs_allocated(page_buffers(page
))) {
1714 if (TestClearPageWriteback(page
))
1715 dec_zone_page_state(page
, NR_WRITEBACK
);
1717 end_page_writeback(page
);
1720 static void nilfs_end_page_io(struct page
*page
, int err
)
1725 if (buffer_nilfs_node(page_buffers(page
)) && !PageWriteback(page
)) {
1727 * For b-tree node pages, this function may be called twice
1728 * or more because they might be split in a segment.
1730 if (PageDirty(page
)) {
1732 * For pages holding split b-tree node buffers, dirty
1733 * flag on the buffers may be cleared discretely.
1734 * In that case, the page is once redirtied for
1735 * remaining buffers, and it must be cancelled if
1736 * all the buffers get cleaned later.
1739 if (nilfs_page_buffers_clean(page
))
1740 __nilfs_clear_page_dirty(page
);
1746 __nilfs_end_page_io(page
, err
);
1749 static void nilfs_clear_copied_buffers(struct list_head
*list
, int err
)
1751 struct buffer_head
*bh
, *head
;
1754 while (!list_empty(list
)) {
1755 bh
= list_entry(list
->next
, struct buffer_head
,
1758 page_cache_get(page
);
1759 head
= bh
= page_buffers(page
);
1761 if (!list_empty(&bh
->b_assoc_buffers
)) {
1762 list_del_init(&bh
->b_assoc_buffers
);
1764 set_buffer_uptodate(bh
);
1765 clear_buffer_dirty(bh
);
1766 clear_buffer_delay(bh
);
1767 clear_buffer_nilfs_volatile(bh
);
1769 brelse(bh
); /* for b_assoc_buffers */
1771 } while ((bh
= bh
->b_this_page
) != head
);
1773 __nilfs_end_page_io(page
, err
);
1774 page_cache_release(page
);
1778 static void nilfs_abort_logs(struct list_head
*logs
, struct page
*failed_page
,
1781 struct nilfs_segment_buffer
*segbuf
;
1782 struct page
*bd_page
= NULL
, *fs_page
= NULL
;
1783 struct buffer_head
*bh
;
1785 if (list_empty(logs
))
1788 list_for_each_entry(segbuf
, logs
, sb_list
) {
1789 list_for_each_entry(bh
, &segbuf
->sb_segsum_buffers
,
1791 if (bh
->b_page
!= bd_page
) {
1793 end_page_writeback(bd_page
);
1794 bd_page
= bh
->b_page
;
1798 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
,
1800 if (bh
== segbuf
->sb_super_root
) {
1801 if (bh
->b_page
!= bd_page
) {
1802 end_page_writeback(bd_page
);
1803 bd_page
= bh
->b_page
;
1807 if (bh
->b_page
!= fs_page
) {
1808 nilfs_end_page_io(fs_page
, err
);
1809 if (fs_page
&& fs_page
== failed_page
)
1811 fs_page
= bh
->b_page
;
1816 end_page_writeback(bd_page
);
1818 nilfs_end_page_io(fs_page
, err
);
1821 static void nilfs_segctor_abort_construction(struct nilfs_sc_info
*sci
,
1822 struct the_nilfs
*nilfs
, int err
)
1827 list_splice_tail_init(&sci
->sc_write_logs
, &logs
);
1828 ret
= nilfs_wait_on_logs(&logs
);
1829 nilfs_abort_logs(&logs
, NULL
, ret
? : err
);
1831 list_splice_tail_init(&sci
->sc_segbufs
, &logs
);
1832 nilfs_cancel_segusage(&logs
, nilfs
->ns_sufile
);
1833 nilfs_free_incomplete_logs(&logs
, nilfs
);
1834 nilfs_clear_copied_buffers(&sci
->sc_copied_buffers
, err
);
1836 if (sci
->sc_stage
.flags
& NILFS_CF_SUFREED
) {
1837 ret
= nilfs_sufile_cancel_freev(nilfs
->ns_sufile
,
1841 WARN_ON(ret
); /* do not happen */
1844 nilfs_destroy_logs(&logs
);
1847 static void nilfs_set_next_segment(struct the_nilfs
*nilfs
,
1848 struct nilfs_segment_buffer
*segbuf
)
1850 nilfs
->ns_segnum
= segbuf
->sb_segnum
;
1851 nilfs
->ns_nextnum
= segbuf
->sb_nextnum
;
1852 nilfs
->ns_pseg_offset
= segbuf
->sb_pseg_start
- segbuf
->sb_fseg_start
1853 + segbuf
->sb_sum
.nblocks
;
1854 nilfs
->ns_seg_seq
= segbuf
->sb_sum
.seg_seq
;
1855 nilfs
->ns_ctime
= segbuf
->sb_sum
.ctime
;
1858 static void nilfs_segctor_complete_write(struct nilfs_sc_info
*sci
)
1860 struct nilfs_segment_buffer
*segbuf
;
1861 struct page
*bd_page
= NULL
, *fs_page
= NULL
;
1862 struct the_nilfs
*nilfs
= sci
->sc_sbi
->s_nilfs
;
1863 int update_sr
= false;
1865 list_for_each_entry(segbuf
, &sci
->sc_write_logs
, sb_list
) {
1866 struct buffer_head
*bh
;
1868 list_for_each_entry(bh
, &segbuf
->sb_segsum_buffers
,
1870 set_buffer_uptodate(bh
);
1871 clear_buffer_dirty(bh
);
1872 if (bh
->b_page
!= bd_page
) {
1874 end_page_writeback(bd_page
);
1875 bd_page
= bh
->b_page
;
1879 * We assume that the buffers which belong to the same page
1880 * continue over the buffer list.
1881 * Under this assumption, the last BHs of pages is
1882 * identifiable by the discontinuity of bh->b_page
1883 * (page != fs_page).
1885 * For B-tree node blocks, however, this assumption is not
1886 * guaranteed. The cleanup code of B-tree node pages needs
1889 list_for_each_entry(bh
, &segbuf
->sb_payload_buffers
,
1891 set_buffer_uptodate(bh
);
1892 clear_buffer_dirty(bh
);
1893 clear_buffer_delay(bh
);
1894 clear_buffer_nilfs_volatile(bh
);
1895 clear_buffer_nilfs_redirected(bh
);
1896 if (bh
== segbuf
->sb_super_root
) {
1897 if (bh
->b_page
!= bd_page
) {
1898 end_page_writeback(bd_page
);
1899 bd_page
= bh
->b_page
;
1904 if (bh
->b_page
!= fs_page
) {
1905 nilfs_end_page_io(fs_page
, 0);
1906 fs_page
= bh
->b_page
;
1910 if (!nilfs_segbuf_simplex(segbuf
)) {
1911 if (segbuf
->sb_sum
.flags
& NILFS_SS_LOGBGN
) {
1912 set_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
);
1913 sci
->sc_lseg_stime
= jiffies
;
1915 if (segbuf
->sb_sum
.flags
& NILFS_SS_LOGEND
)
1916 clear_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
);
1920 * Since pages may continue over multiple segment buffers,
1921 * end of the last page must be checked outside of the loop.
1924 end_page_writeback(bd_page
);
1926 nilfs_end_page_io(fs_page
, 0);
1928 nilfs_clear_copied_buffers(&sci
->sc_copied_buffers
, 0);
1930 nilfs_drop_collected_inodes(&sci
->sc_dirty_files
);
1932 if (nilfs_doing_gc())
1933 nilfs_drop_collected_inodes(&sci
->sc_gc_inodes
);
1935 nilfs
->ns_nongc_ctime
= sci
->sc_seg_ctime
;
1937 sci
->sc_nblk_inc
+= sci
->sc_nblk_this_inc
;
1939 segbuf
= NILFS_LAST_SEGBUF(&sci
->sc_write_logs
);
1940 nilfs_set_next_segment(nilfs
, segbuf
);
1943 nilfs_set_last_segment(nilfs
, segbuf
->sb_pseg_start
,
1944 segbuf
->sb_sum
.seg_seq
, nilfs
->ns_cno
++);
1946 clear_bit(NILFS_SC_HAVE_DELTA
, &sci
->sc_flags
);
1947 clear_bit(NILFS_SC_DIRTY
, &sci
->sc_flags
);
1948 set_bit(NILFS_SC_SUPER_ROOT
, &sci
->sc_flags
);
1949 nilfs_segctor_clear_metadata_dirty(sci
);
1951 clear_bit(NILFS_SC_SUPER_ROOT
, &sci
->sc_flags
);
1954 static int nilfs_segctor_wait(struct nilfs_sc_info
*sci
)
1958 ret
= nilfs_wait_on_logs(&sci
->sc_write_logs
);
1960 nilfs_segctor_complete_write(sci
);
1961 nilfs_destroy_logs(&sci
->sc_write_logs
);
1966 static int nilfs_segctor_check_in_files(struct nilfs_sc_info
*sci
,
1967 struct nilfs_sb_info
*sbi
)
1969 struct nilfs_inode_info
*ii
, *n
;
1970 struct inode
*ifile
= sci
->sc_root
->ifile
;
1972 spin_lock(&sbi
->s_inode_lock
);
1974 list_for_each_entry_safe(ii
, n
, &sbi
->s_dirty_files
, i_dirty
) {
1976 struct buffer_head
*ibh
;
1979 spin_unlock(&sbi
->s_inode_lock
);
1980 err
= nilfs_ifile_get_inode_block(
1981 ifile
, ii
->vfs_inode
.i_ino
, &ibh
);
1982 if (unlikely(err
)) {
1983 nilfs_warning(sbi
->s_super
, __func__
,
1984 "failed to get inode block.\n");
1987 nilfs_mdt_mark_buffer_dirty(ibh
);
1988 nilfs_mdt_mark_dirty(ifile
);
1989 spin_lock(&sbi
->s_inode_lock
);
1990 if (likely(!ii
->i_bh
))
1997 clear_bit(NILFS_I_QUEUED
, &ii
->i_state
);
1998 set_bit(NILFS_I_BUSY
, &ii
->i_state
);
1999 list_del(&ii
->i_dirty
);
2000 list_add_tail(&ii
->i_dirty
, &sci
->sc_dirty_files
);
2002 spin_unlock(&sbi
->s_inode_lock
);
2007 static void nilfs_segctor_check_out_files(struct nilfs_sc_info
*sci
,
2008 struct nilfs_sb_info
*sbi
)
2010 struct nilfs_transaction_info
*ti
= current
->journal_info
;
2011 struct nilfs_inode_info
*ii
, *n
;
2013 spin_lock(&sbi
->s_inode_lock
);
2014 list_for_each_entry_safe(ii
, n
, &sci
->sc_dirty_files
, i_dirty
) {
2015 if (!test_and_clear_bit(NILFS_I_UPDATED
, &ii
->i_state
) ||
2016 test_bit(NILFS_I_DIRTY
, &ii
->i_state
))
2019 clear_bit(NILFS_I_BUSY
, &ii
->i_state
);
2022 list_del(&ii
->i_dirty
);
2023 list_add_tail(&ii
->i_dirty
, &ti
->ti_garbage
);
2025 spin_unlock(&sbi
->s_inode_lock
);
2029 * Main procedure of segment constructor
2031 static int nilfs_segctor_do_construct(struct nilfs_sc_info
*sci
, int mode
)
2033 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
2034 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
2035 struct page
*failed_page
;
2038 sci
->sc_stage
.scnt
= NILFS_ST_INIT
;
2039 sci
->sc_cno
= nilfs
->ns_cno
;
2041 err
= nilfs_segctor_check_in_files(sci
, sbi
);
2045 if (nilfs_test_metadata_dirty(nilfs
, sci
->sc_root
))
2046 set_bit(NILFS_SC_DIRTY
, &sci
->sc_flags
);
2048 if (nilfs_segctor_clean(sci
))
2052 sci
->sc_stage
.flags
&= ~NILFS_CF_HISTORY_MASK
;
2054 err
= nilfs_segctor_begin_construction(sci
, nilfs
);
2058 /* Update time stamp */
2059 sci
->sc_seg_ctime
= get_seconds();
2061 err
= nilfs_segctor_collect(sci
, nilfs
, mode
);
2065 /* Avoid empty segment */
2066 if (sci
->sc_stage
.scnt
== NILFS_ST_DONE
&&
2067 nilfs_segbuf_empty(sci
->sc_curseg
)) {
2068 nilfs_segctor_abort_construction(sci
, nilfs
, 1);
2072 err
= nilfs_segctor_assign(sci
, mode
);
2076 if (sci
->sc_stage
.flags
& NILFS_CF_IFILE_STARTED
)
2077 nilfs_segctor_fill_in_file_bmap(sci
);
2079 if (mode
== SC_LSEG_SR
&&
2080 sci
->sc_stage
.scnt
>= NILFS_ST_CPFILE
) {
2081 err
= nilfs_segctor_fill_in_checkpoint(sci
);
2083 goto failed_to_write
;
2085 nilfs_segctor_fill_in_super_root(sci
, nilfs
);
2087 nilfs_segctor_update_segusage(sci
, nilfs
->ns_sufile
);
2089 /* Write partial segments */
2090 err
= nilfs_segctor_prepare_write(sci
, &failed_page
);
2092 nilfs_abort_logs(&sci
->sc_segbufs
, failed_page
, err
);
2093 goto failed_to_write
;
2096 nilfs_add_checksums_on_logs(&sci
->sc_segbufs
,
2097 nilfs
->ns_crc_seed
);
2099 err
= nilfs_segctor_write(sci
, nilfs
);
2101 goto failed_to_write
;
2103 if (sci
->sc_stage
.scnt
== NILFS_ST_DONE
||
2104 nilfs
->ns_blocksize_bits
!= PAGE_CACHE_SHIFT
) {
2106 * At this point, we avoid double buffering
2107 * for blocksize < pagesize because page dirty
2108 * flag is turned off during write and dirty
2109 * buffers are not properly collected for
2110 * pages crossing over segments.
2112 err
= nilfs_segctor_wait(sci
);
2114 goto failed_to_write
;
2116 } while (sci
->sc_stage
.scnt
!= NILFS_ST_DONE
);
2119 nilfs_segctor_check_out_files(sci
, sbi
);
2123 if (sci
->sc_stage
.flags
& NILFS_CF_IFILE_STARTED
)
2124 nilfs_redirty_inodes(&sci
->sc_dirty_files
);
2127 if (nilfs_doing_gc())
2128 nilfs_redirty_inodes(&sci
->sc_gc_inodes
);
2129 nilfs_segctor_abort_construction(sci
, nilfs
, err
);
2134 * nilfs_segctor_start_timer - set timer of background write
2135 * @sci: nilfs_sc_info
2137 * If the timer has already been set, it ignores the new request.
2138 * This function MUST be called within a section locking the segment
2141 static void nilfs_segctor_start_timer(struct nilfs_sc_info
*sci
)
2143 spin_lock(&sci
->sc_state_lock
);
2144 if (!(sci
->sc_state
& NILFS_SEGCTOR_COMMIT
)) {
2145 sci
->sc_timer
.expires
= jiffies
+ sci
->sc_interval
;
2146 add_timer(&sci
->sc_timer
);
2147 sci
->sc_state
|= NILFS_SEGCTOR_COMMIT
;
2149 spin_unlock(&sci
->sc_state_lock
);
2152 static void nilfs_segctor_do_flush(struct nilfs_sc_info
*sci
, int bn
)
2154 spin_lock(&sci
->sc_state_lock
);
2155 if (!(sci
->sc_flush_request
& (1 << bn
))) {
2156 unsigned long prev_req
= sci
->sc_flush_request
;
2158 sci
->sc_flush_request
|= (1 << bn
);
2160 wake_up(&sci
->sc_wait_daemon
);
2162 spin_unlock(&sci
->sc_state_lock
);
2166 * nilfs_flush_segment - trigger a segment construction for resource control
2168 * @ino: inode number of the file to be flushed out.
2170 void nilfs_flush_segment(struct super_block
*sb
, ino_t ino
)
2172 struct nilfs_sb_info
*sbi
= NILFS_SB(sb
);
2173 struct nilfs_sc_info
*sci
= NILFS_SC(sbi
);
2175 if (!sci
|| nilfs_doing_construction())
2177 nilfs_segctor_do_flush(sci
, NILFS_MDT_INODE(sb
, ino
) ? ino
: 0);
2178 /* assign bit 0 to data files */
2181 struct nilfs_segctor_wait_request
{
2188 static int nilfs_segctor_sync(struct nilfs_sc_info
*sci
)
2190 struct nilfs_segctor_wait_request wait_req
;
2193 spin_lock(&sci
->sc_state_lock
);
2194 init_wait(&wait_req
.wq
);
2196 atomic_set(&wait_req
.done
, 0);
2197 wait_req
.seq
= ++sci
->sc_seq_request
;
2198 spin_unlock(&sci
->sc_state_lock
);
2200 init_waitqueue_entry(&wait_req
.wq
, current
);
2201 add_wait_queue(&sci
->sc_wait_request
, &wait_req
.wq
);
2202 set_current_state(TASK_INTERRUPTIBLE
);
2203 wake_up(&sci
->sc_wait_daemon
);
2206 if (atomic_read(&wait_req
.done
)) {
2210 if (!signal_pending(current
)) {
2217 finish_wait(&sci
->sc_wait_request
, &wait_req
.wq
);
2221 static void nilfs_segctor_wakeup(struct nilfs_sc_info
*sci
, int err
)
2223 struct nilfs_segctor_wait_request
*wrq
, *n
;
2224 unsigned long flags
;
2226 spin_lock_irqsave(&sci
->sc_wait_request
.lock
, flags
);
2227 list_for_each_entry_safe(wrq
, n
, &sci
->sc_wait_request
.task_list
,
2229 if (!atomic_read(&wrq
->done
) &&
2230 nilfs_cnt32_ge(sci
->sc_seq_done
, wrq
->seq
)) {
2232 atomic_set(&wrq
->done
, 1);
2234 if (atomic_read(&wrq
->done
)) {
2235 wrq
->wq
.func(&wrq
->wq
,
2236 TASK_UNINTERRUPTIBLE
| TASK_INTERRUPTIBLE
,
2240 spin_unlock_irqrestore(&sci
->sc_wait_request
.lock
, flags
);
2244 * nilfs_construct_segment - construct a logical segment
2247 * Return Value: On success, 0 is retured. On errors, one of the following
2248 * negative error code is returned.
2250 * %-EROFS - Read only filesystem.
2254 * %-ENOSPC - No space left on device (only in a panic state).
2256 * %-ERESTARTSYS - Interrupted.
2258 * %-ENOMEM - Insufficient memory available.
2260 int nilfs_construct_segment(struct super_block
*sb
)
2262 struct nilfs_sb_info
*sbi
= NILFS_SB(sb
);
2263 struct nilfs_sc_info
*sci
= NILFS_SC(sbi
);
2264 struct nilfs_transaction_info
*ti
;
2270 /* A call inside transactions causes a deadlock. */
2271 BUG_ON((ti
= current
->journal_info
) && ti
->ti_magic
== NILFS_TI_MAGIC
);
2273 err
= nilfs_segctor_sync(sci
);
2278 * nilfs_construct_dsync_segment - construct a data-only logical segment
2280 * @inode: inode whose data blocks should be written out
2281 * @start: start byte offset
2282 * @end: end byte offset (inclusive)
2284 * Return Value: On success, 0 is retured. On errors, one of the following
2285 * negative error code is returned.
2287 * %-EROFS - Read only filesystem.
2291 * %-ENOSPC - No space left on device (only in a panic state).
2293 * %-ERESTARTSYS - Interrupted.
2295 * %-ENOMEM - Insufficient memory available.
2297 int nilfs_construct_dsync_segment(struct super_block
*sb
, struct inode
*inode
,
2298 loff_t start
, loff_t end
)
2300 struct nilfs_sb_info
*sbi
= NILFS_SB(sb
);
2301 struct nilfs_sc_info
*sci
= NILFS_SC(sbi
);
2302 struct nilfs_inode_info
*ii
;
2303 struct nilfs_transaction_info ti
;
2309 nilfs_transaction_lock(sbi
, &ti
, 0);
2311 ii
= NILFS_I(inode
);
2312 if (test_bit(NILFS_I_INODE_DIRTY
, &ii
->i_state
) ||
2313 nilfs_test_opt(sbi
, STRICT_ORDER
) ||
2314 test_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
) ||
2315 nilfs_discontinued(sbi
->s_nilfs
)) {
2316 nilfs_transaction_unlock(sbi
);
2317 err
= nilfs_segctor_sync(sci
);
2321 spin_lock(&sbi
->s_inode_lock
);
2322 if (!test_bit(NILFS_I_QUEUED
, &ii
->i_state
) &&
2323 !test_bit(NILFS_I_BUSY
, &ii
->i_state
)) {
2324 spin_unlock(&sbi
->s_inode_lock
);
2325 nilfs_transaction_unlock(sbi
);
2328 spin_unlock(&sbi
->s_inode_lock
);
2329 sci
->sc_dsync_inode
= ii
;
2330 sci
->sc_dsync_start
= start
;
2331 sci
->sc_dsync_end
= end
;
2333 err
= nilfs_segctor_do_construct(sci
, SC_LSEG_DSYNC
);
2335 nilfs_transaction_unlock(sbi
);
2339 #define FLUSH_FILE_BIT (0x1) /* data file only */
2340 #define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
2343 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2344 * @sci: segment constructor object
2346 static void nilfs_segctor_accept(struct nilfs_sc_info
*sci
)
2348 spin_lock(&sci
->sc_state_lock
);
2349 sci
->sc_seq_accepted
= sci
->sc_seq_request
;
2350 spin_unlock(&sci
->sc_state_lock
);
2351 del_timer_sync(&sci
->sc_timer
);
2355 * nilfs_segctor_notify - notify the result of request to caller threads
2356 * @sci: segment constructor object
2357 * @mode: mode of log forming
2358 * @err: error code to be notified
2360 static void nilfs_segctor_notify(struct nilfs_sc_info
*sci
, int mode
, int err
)
2362 /* Clear requests (even when the construction failed) */
2363 spin_lock(&sci
->sc_state_lock
);
2365 if (mode
== SC_LSEG_SR
) {
2366 sci
->sc_state
&= ~NILFS_SEGCTOR_COMMIT
;
2367 sci
->sc_seq_done
= sci
->sc_seq_accepted
;
2368 nilfs_segctor_wakeup(sci
, err
);
2369 sci
->sc_flush_request
= 0;
2371 if (mode
== SC_FLUSH_FILE
)
2372 sci
->sc_flush_request
&= ~FLUSH_FILE_BIT
;
2373 else if (mode
== SC_FLUSH_DAT
)
2374 sci
->sc_flush_request
&= ~FLUSH_DAT_BIT
;
2376 /* re-enable timer if checkpoint creation was not done */
2377 if ((sci
->sc_state
& NILFS_SEGCTOR_COMMIT
) &&
2378 time_before(jiffies
, sci
->sc_timer
.expires
))
2379 add_timer(&sci
->sc_timer
);
2381 spin_unlock(&sci
->sc_state_lock
);
2385 * nilfs_segctor_construct - form logs and write them to disk
2386 * @sci: segment constructor object
2387 * @mode: mode of log forming
2389 static int nilfs_segctor_construct(struct nilfs_sc_info
*sci
, int mode
)
2391 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
2392 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
2393 struct nilfs_super_block
**sbp
;
2396 nilfs_segctor_accept(sci
);
2398 if (nilfs_discontinued(nilfs
))
2400 if (!nilfs_segctor_confirm(sci
))
2401 err
= nilfs_segctor_do_construct(sci
, mode
);
2404 if (mode
!= SC_FLUSH_DAT
)
2405 atomic_set(&nilfs
->ns_ndirtyblks
, 0);
2406 if (test_bit(NILFS_SC_SUPER_ROOT
, &sci
->sc_flags
) &&
2407 nilfs_discontinued(nilfs
)) {
2408 down_write(&nilfs
->ns_sem
);
2410 sbp
= nilfs_prepare_super(sbi
,
2411 nilfs_sb_will_flip(nilfs
));
2413 nilfs_set_log_cursor(sbp
[0], nilfs
);
2414 err
= nilfs_commit_super(sbi
, NILFS_SB_COMMIT
);
2416 up_write(&nilfs
->ns_sem
);
2420 nilfs_segctor_notify(sci
, mode
, err
);
2424 static void nilfs_construction_timeout(unsigned long data
)
2426 struct task_struct
*p
= (struct task_struct
*)data
;
2431 nilfs_remove_written_gcinodes(struct the_nilfs
*nilfs
, struct list_head
*head
)
2433 struct nilfs_inode_info
*ii
, *n
;
2435 list_for_each_entry_safe(ii
, n
, head
, i_dirty
) {
2436 if (!test_bit(NILFS_I_UPDATED
, &ii
->i_state
))
2438 list_del_init(&ii
->i_dirty
);
2439 iput(&ii
->vfs_inode
);
2443 int nilfs_clean_segments(struct super_block
*sb
, struct nilfs_argv
*argv
,
2446 struct nilfs_sb_info
*sbi
= NILFS_SB(sb
);
2447 struct nilfs_sc_info
*sci
= NILFS_SC(sbi
);
2448 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
2449 struct nilfs_transaction_info ti
;
2455 nilfs_transaction_lock(sbi
, &ti
, 1);
2457 err
= nilfs_mdt_save_to_shadow_map(nilfs
->ns_dat
);
2461 err
= nilfs_ioctl_prepare_clean_segments(nilfs
, argv
, kbufs
);
2462 if (unlikely(err
)) {
2463 nilfs_mdt_restore_from_shadow_map(nilfs
->ns_dat
);
2467 sci
->sc_freesegs
= kbufs
[4];
2468 sci
->sc_nfreesegs
= argv
[4].v_nmembs
;
2469 list_splice_tail_init(&nilfs
->ns_gc_inodes
, &sci
->sc_gc_inodes
);
2472 err
= nilfs_segctor_construct(sci
, SC_LSEG_SR
);
2473 nilfs_remove_written_gcinodes(nilfs
, &sci
->sc_gc_inodes
);
2478 nilfs_warning(sb
, __func__
,
2479 "segment construction failed. (err=%d)", err
);
2480 set_current_state(TASK_INTERRUPTIBLE
);
2481 schedule_timeout(sci
->sc_interval
);
2483 if (nilfs_test_opt(sbi
, DISCARD
)) {
2484 int ret
= nilfs_discard_segments(nilfs
, sci
->sc_freesegs
,
2488 "NILFS warning: error %d on discard request, "
2489 "turning discards off for the device\n", ret
);
2490 nilfs_clear_opt(sbi
, DISCARD
);
2495 sci
->sc_freesegs
= NULL
;
2496 sci
->sc_nfreesegs
= 0;
2497 nilfs_mdt_clear_shadow_map(nilfs
->ns_dat
);
2498 nilfs_transaction_unlock(sbi
);
2502 static void nilfs_segctor_thread_construct(struct nilfs_sc_info
*sci
, int mode
)
2504 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
2505 struct nilfs_transaction_info ti
;
2507 nilfs_transaction_lock(sbi
, &ti
, 0);
2508 nilfs_segctor_construct(sci
, mode
);
2511 * Unclosed segment should be retried. We do this using sc_timer.
2512 * Timeout of sc_timer will invoke complete construction which leads
2513 * to close the current logical segment.
2515 if (test_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
))
2516 nilfs_segctor_start_timer(sci
);
2518 nilfs_transaction_unlock(sbi
);
2521 static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info
*sci
)
2526 spin_lock(&sci
->sc_state_lock
);
2527 mode
= (sci
->sc_flush_request
& FLUSH_DAT_BIT
) ?
2528 SC_FLUSH_DAT
: SC_FLUSH_FILE
;
2529 spin_unlock(&sci
->sc_state_lock
);
2532 err
= nilfs_segctor_do_construct(sci
, mode
);
2534 spin_lock(&sci
->sc_state_lock
);
2535 sci
->sc_flush_request
&= (mode
== SC_FLUSH_FILE
) ?
2536 ~FLUSH_FILE_BIT
: ~FLUSH_DAT_BIT
;
2537 spin_unlock(&sci
->sc_state_lock
);
2539 clear_bit(NILFS_SC_PRIOR_FLUSH
, &sci
->sc_flags
);
2542 static int nilfs_segctor_flush_mode(struct nilfs_sc_info
*sci
)
2544 if (!test_bit(NILFS_SC_UNCLOSED
, &sci
->sc_flags
) ||
2545 time_before(jiffies
, sci
->sc_lseg_stime
+ sci
->sc_mjcp_freq
)) {
2546 if (!(sci
->sc_flush_request
& ~FLUSH_FILE_BIT
))
2547 return SC_FLUSH_FILE
;
2548 else if (!(sci
->sc_flush_request
& ~FLUSH_DAT_BIT
))
2549 return SC_FLUSH_DAT
;
2555 * nilfs_segctor_thread - main loop of the segment constructor thread.
2556 * @arg: pointer to a struct nilfs_sc_info.
2558 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2559 * to execute segment constructions.
2561 static int nilfs_segctor_thread(void *arg
)
2563 struct nilfs_sc_info
*sci
= (struct nilfs_sc_info
*)arg
;
2564 struct the_nilfs
*nilfs
= sci
->sc_sbi
->s_nilfs
;
2567 sci
->sc_timer
.data
= (unsigned long)current
;
2568 sci
->sc_timer
.function
= nilfs_construction_timeout
;
2571 sci
->sc_task
= current
;
2572 wake_up(&sci
->sc_wait_task
); /* for nilfs_segctor_start_thread() */
2574 "segctord starting. Construction interval = %lu seconds, "
2575 "CP frequency < %lu seconds\n",
2576 sci
->sc_interval
/ HZ
, sci
->sc_mjcp_freq
/ HZ
);
2578 spin_lock(&sci
->sc_state_lock
);
2583 if (sci
->sc_state
& NILFS_SEGCTOR_QUIT
)
2586 if (timeout
|| sci
->sc_seq_request
!= sci
->sc_seq_done
)
2588 else if (!sci
->sc_flush_request
)
2591 mode
= nilfs_segctor_flush_mode(sci
);
2593 spin_unlock(&sci
->sc_state_lock
);
2594 nilfs_segctor_thread_construct(sci
, mode
);
2595 spin_lock(&sci
->sc_state_lock
);
2600 if (freezing(current
)) {
2601 spin_unlock(&sci
->sc_state_lock
);
2603 spin_lock(&sci
->sc_state_lock
);
2606 int should_sleep
= 1;
2608 prepare_to_wait(&sci
->sc_wait_daemon
, &wait
,
2609 TASK_INTERRUPTIBLE
);
2611 if (sci
->sc_seq_request
!= sci
->sc_seq_done
)
2613 else if (sci
->sc_flush_request
)
2615 else if (sci
->sc_state
& NILFS_SEGCTOR_COMMIT
)
2616 should_sleep
= time_before(jiffies
,
2617 sci
->sc_timer
.expires
);
2620 spin_unlock(&sci
->sc_state_lock
);
2622 spin_lock(&sci
->sc_state_lock
);
2624 finish_wait(&sci
->sc_wait_daemon
, &wait
);
2625 timeout
= ((sci
->sc_state
& NILFS_SEGCTOR_COMMIT
) &&
2626 time_after_eq(jiffies
, sci
->sc_timer
.expires
));
2628 if (nilfs_sb_dirty(nilfs
) && nilfs_sb_need_update(nilfs
))
2629 set_nilfs_discontinued(nilfs
);
2634 spin_unlock(&sci
->sc_state_lock
);
2637 sci
->sc_task
= NULL
;
2638 wake_up(&sci
->sc_wait_task
); /* for nilfs_segctor_kill_thread() */
2642 static int nilfs_segctor_start_thread(struct nilfs_sc_info
*sci
)
2644 struct task_struct
*t
;
2646 t
= kthread_run(nilfs_segctor_thread
, sci
, "segctord");
2648 int err
= PTR_ERR(t
);
2650 printk(KERN_ERR
"NILFS: error %d creating segctord thread\n",
2654 wait_event(sci
->sc_wait_task
, sci
->sc_task
!= NULL
);
2658 static void nilfs_segctor_kill_thread(struct nilfs_sc_info
*sci
)
2659 __acquires(&sci
->sc_state_lock
)
2660 __releases(&sci
->sc_state_lock
)
2662 sci
->sc_state
|= NILFS_SEGCTOR_QUIT
;
2664 while (sci
->sc_task
) {
2665 wake_up(&sci
->sc_wait_daemon
);
2666 spin_unlock(&sci
->sc_state_lock
);
2667 wait_event(sci
->sc_wait_task
, sci
->sc_task
== NULL
);
2668 spin_lock(&sci
->sc_state_lock
);
2673 * Setup & clean-up functions
2675 static struct nilfs_sc_info
*nilfs_segctor_new(struct nilfs_sb_info
*sbi
,
2676 struct nilfs_root
*root
)
2678 struct nilfs_sc_info
*sci
;
2680 sci
= kzalloc(sizeof(*sci
), GFP_KERNEL
);
2685 sci
->sc_super
= sbi
->s_super
;
2687 nilfs_get_root(root
);
2688 sci
->sc_root
= root
;
2690 init_waitqueue_head(&sci
->sc_wait_request
);
2691 init_waitqueue_head(&sci
->sc_wait_daemon
);
2692 init_waitqueue_head(&sci
->sc_wait_task
);
2693 spin_lock_init(&sci
->sc_state_lock
);
2694 INIT_LIST_HEAD(&sci
->sc_dirty_files
);
2695 INIT_LIST_HEAD(&sci
->sc_segbufs
);
2696 INIT_LIST_HEAD(&sci
->sc_write_logs
);
2697 INIT_LIST_HEAD(&sci
->sc_gc_inodes
);
2698 INIT_LIST_HEAD(&sci
->sc_copied_buffers
);
2699 init_timer(&sci
->sc_timer
);
2701 sci
->sc_interval
= HZ
* NILFS_SC_DEFAULT_TIMEOUT
;
2702 sci
->sc_mjcp_freq
= HZ
* NILFS_SC_DEFAULT_SR_FREQ
;
2703 sci
->sc_watermark
= NILFS_SC_DEFAULT_WATERMARK
;
2705 if (sbi
->s_interval
)
2706 sci
->sc_interval
= sbi
->s_interval
;
2707 if (sbi
->s_watermark
)
2708 sci
->sc_watermark
= sbi
->s_watermark
;
2712 static void nilfs_segctor_write_out(struct nilfs_sc_info
*sci
)
2714 int ret
, retrycount
= NILFS_SC_CLEANUP_RETRY
;
2716 /* The segctord thread was stopped and its timer was removed.
2717 But some tasks remain. */
2719 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
2720 struct nilfs_transaction_info ti
;
2722 nilfs_transaction_lock(sbi
, &ti
, 0);
2723 ret
= nilfs_segctor_construct(sci
, SC_LSEG_SR
);
2724 nilfs_transaction_unlock(sbi
);
2726 } while (ret
&& retrycount
-- > 0);
2730 * nilfs_segctor_destroy - destroy the segment constructor.
2731 * @sci: nilfs_sc_info
2733 * nilfs_segctor_destroy() kills the segctord thread and frees
2734 * the nilfs_sc_info struct.
2735 * Caller must hold the segment semaphore.
2737 static void nilfs_segctor_destroy(struct nilfs_sc_info
*sci
)
2739 struct nilfs_sb_info
*sbi
= sci
->sc_sbi
;
2742 up_write(&sbi
->s_nilfs
->ns_segctor_sem
);
2744 spin_lock(&sci
->sc_state_lock
);
2745 nilfs_segctor_kill_thread(sci
);
2746 flag
= ((sci
->sc_state
& NILFS_SEGCTOR_COMMIT
) || sci
->sc_flush_request
2747 || sci
->sc_seq_request
!= sci
->sc_seq_done
);
2748 spin_unlock(&sci
->sc_state_lock
);
2750 if (flag
|| !nilfs_segctor_confirm(sci
))
2751 nilfs_segctor_write_out(sci
);
2753 WARN_ON(!list_empty(&sci
->sc_copied_buffers
));
2755 if (!list_empty(&sci
->sc_dirty_files
)) {
2756 nilfs_warning(sbi
->s_super
, __func__
,
2757 "dirty file(s) after the final construction\n");
2758 nilfs_dispose_list(sbi
, &sci
->sc_dirty_files
, 1);
2761 WARN_ON(!list_empty(&sci
->sc_segbufs
));
2762 WARN_ON(!list_empty(&sci
->sc_write_logs
));
2764 nilfs_put_root(sci
->sc_root
);
2766 down_write(&sbi
->s_nilfs
->ns_segctor_sem
);
2768 del_timer_sync(&sci
->sc_timer
);
2773 * nilfs_attach_segment_constructor - attach a segment constructor
2774 * @sbi: nilfs_sb_info
2775 * @root: root object of the current filesystem tree
2777 * nilfs_attach_segment_constructor() allocates a struct nilfs_sc_info,
2778 * initializes it, and starts the segment constructor.
2780 * Return Value: On success, 0 is returned. On error, one of the following
2781 * negative error code is returned.
2783 * %-ENOMEM - Insufficient memory available.
2785 int nilfs_attach_segment_constructor(struct nilfs_sb_info
*sbi
,
2786 struct nilfs_root
*root
)
2790 if (NILFS_SC(sbi
)) {
2792 * This happens if the filesystem was remounted
2793 * read/write after nilfs_error degenerated it into a
2796 nilfs_detach_segment_constructor(sbi
);
2799 sbi
->s_sc_info
= nilfs_segctor_new(sbi
, root
);
2800 if (!sbi
->s_sc_info
)
2803 err
= nilfs_segctor_start_thread(NILFS_SC(sbi
));
2805 kfree(sbi
->s_sc_info
);
2806 sbi
->s_sc_info
= NULL
;
2812 * nilfs_detach_segment_constructor - destroy the segment constructor
2813 * @sbi: nilfs_sb_info
2815 * nilfs_detach_segment_constructor() kills the segment constructor daemon,
2816 * frees the struct nilfs_sc_info, and destroy the dirty file list.
2818 void nilfs_detach_segment_constructor(struct nilfs_sb_info
*sbi
)
2820 struct the_nilfs
*nilfs
= sbi
->s_nilfs
;
2821 LIST_HEAD(garbage_list
);
2823 down_write(&nilfs
->ns_segctor_sem
);
2824 if (NILFS_SC(sbi
)) {
2825 nilfs_segctor_destroy(NILFS_SC(sbi
));
2826 sbi
->s_sc_info
= NULL
;
2829 /* Force to free the list of dirty files */
2830 spin_lock(&sbi
->s_inode_lock
);
2831 if (!list_empty(&sbi
->s_dirty_files
)) {
2832 list_splice_init(&sbi
->s_dirty_files
, &garbage_list
);
2833 nilfs_warning(sbi
->s_super
, __func__
,
2834 "Non empty dirty list after the last "
2835 "segment construction\n");
2837 spin_unlock(&sbi
->s_inode_lock
);
2838 up_write(&nilfs
->ns_segctor_sem
);
2840 nilfs_dispose_list(sbi
, &garbage_list
, 1);