Merge tag 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux...
[deliverable/linux.git] / fs / nilfs2 / segment.c
CommitLineData
9ff05123
RK
1/*
2 * segment.c - NILFS segment constructor.
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
21 *
22 */
23
24#include <linux/pagemap.h>
25#include <linux/buffer_head.h>
26#include <linux/writeback.h>
27#include <linux/bio.h>
28#include <linux/completion.h>
29#include <linux/blkdev.h>
30#include <linux/backing-dev.h>
31#include <linux/freezer.h>
32#include <linux/kthread.h>
33#include <linux/crc32.h>
34#include <linux/pagevec.h>
5a0e3ad6 35#include <linux/slab.h>
9ff05123
RK
36#include "nilfs.h"
37#include "btnode.h"
38#include "page.h"
39#include "segment.h"
40#include "sufile.h"
41#include "cpfile.h"
42#include "ifile.h"
9ff05123
RK
43#include "segbuf.h"
44
45
46/*
47 * Segment constructor
48 */
49#define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */
50
51#define SC_MAX_SEGDELTA 64 /* Upper limit of the number of segments
52 appended in collection retry loop */
53
54/* Construction mode */
55enum {
56 SC_LSEG_SR = 1, /* Make a logical segment having a super root */
57 SC_LSEG_DSYNC, /* Flush data blocks of a given file and make
58 a logical segment without a super root */
59 SC_FLUSH_FILE, /* Flush data files, leads to segment writes without
60 creating a checkpoint */
61 SC_FLUSH_DAT, /* Flush DAT file. This also creates segments without
62 a checkpoint */
63};
64
65/* Stage numbers of dirty block collection */
66enum {
67 NILFS_ST_INIT = 0,
68 NILFS_ST_GC, /* Collecting dirty blocks for GC */
69 NILFS_ST_FILE,
9ff05123
RK
70 NILFS_ST_IFILE,
71 NILFS_ST_CPFILE,
72 NILFS_ST_SUFILE,
73 NILFS_ST_DAT,
74 NILFS_ST_SR, /* Super root */
75 NILFS_ST_DSYNC, /* Data sync blocks */
76 NILFS_ST_DONE,
77};
78
79/* State flags of collection */
80#define NILFS_CF_NODE 0x0001 /* Collecting node blocks */
81#define NILFS_CF_IFILE_STARTED 0x0002 /* IFILE stage has started */
071cb4b8
RK
82#define NILFS_CF_SUFREED 0x0004 /* segment usages has been freed */
83#define NILFS_CF_HISTORY_MASK (NILFS_CF_IFILE_STARTED | NILFS_CF_SUFREED)
9ff05123
RK
84
85/* Operations depending on the construction mode and file type */
86struct nilfs_sc_operations {
87 int (*collect_data)(struct nilfs_sc_info *, struct buffer_head *,
88 struct inode *);
89 int (*collect_node)(struct nilfs_sc_info *, struct buffer_head *,
90 struct inode *);
91 int (*collect_bmap)(struct nilfs_sc_info *, struct buffer_head *,
92 struct inode *);
93 void (*write_data_binfo)(struct nilfs_sc_info *,
94 struct nilfs_segsum_pointer *,
95 union nilfs_binfo *);
96 void (*write_node_binfo)(struct nilfs_sc_info *,
97 struct nilfs_segsum_pointer *,
98 union nilfs_binfo *);
99};
100
101/*
102 * Other definitions
103 */
104static void nilfs_segctor_start_timer(struct nilfs_sc_info *);
105static void nilfs_segctor_do_flush(struct nilfs_sc_info *, int);
106static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *);
693dd321 107static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
9ff05123
RK
108
109#define nilfs_cnt32_gt(a, b) \
110 (typecheck(__u32, a) && typecheck(__u32, b) && \
111 ((__s32)(b) - (__s32)(a) < 0))
112#define nilfs_cnt32_ge(a, b) \
113 (typecheck(__u32, a) && typecheck(__u32, b) && \
114 ((__s32)(a) - (__s32)(b) >= 0))
115#define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
116#define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
117
9ff05123
RK
118static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
119{
120 struct nilfs_transaction_info *cur_ti = current->journal_info;
121 void *save = NULL;
122
123 if (cur_ti) {
124 if (cur_ti->ti_magic == NILFS_TI_MAGIC)
125 return ++cur_ti->ti_count;
126 else {
127 /*
128 * If journal_info field is occupied by other FS,
47420c79
RK
129 * it is saved and will be restored on
130 * nilfs_transaction_commit().
9ff05123
RK
131 */
132 printk(KERN_WARNING
133 "NILFS warning: journal info from a different "
134 "FS\n");
135 save = current->journal_info;
136 }
137 }
138 if (!ti) {
139 ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS);
140 if (!ti)
141 return -ENOMEM;
142 ti->ti_flags = NILFS_TI_DYNAMIC_ALLOC;
143 } else {
144 ti->ti_flags = 0;
145 }
146 ti->ti_count = 0;
147 ti->ti_save = save;
148 ti->ti_magic = NILFS_TI_MAGIC;
149 current->journal_info = ti;
150 return 0;
151}
152
153/**
154 * nilfs_transaction_begin - start indivisible file operations.
155 * @sb: super block
156 * @ti: nilfs_transaction_info
157 * @vacancy_check: flags for vacancy rate checks
158 *
159 * nilfs_transaction_begin() acquires a reader/writer semaphore, called
160 * the segment semaphore, to make a segment construction and write tasks
47420c79 161 * exclusive. The function is used with nilfs_transaction_commit() in pairs.
9ff05123
RK
162 * The region enclosed by these two functions can be nested. To avoid a
163 * deadlock, the semaphore is only acquired or released in the outermost call.
164 *
165 * This function allocates a nilfs_transaction_info struct to keep context
166 * information on it. It is initialized and hooked onto the current task in
167 * the outermost call. If a pre-allocated struct is given to @ti, it is used
7a65004b 168 * instead; otherwise a new struct is assigned from a slab.
9ff05123
RK
169 *
170 * When @vacancy_check flag is set, this function will check the amount of
171 * free space, and will wait for the GC to reclaim disk space if low capacity.
172 *
173 * Return Value: On success, 0 is returned. On error, one of the following
174 * negative error code is returned.
175 *
176 * %-ENOMEM - Insufficient memory available.
177 *
9ff05123
RK
178 * %-ENOSPC - No space left on device
179 */
180int nilfs_transaction_begin(struct super_block *sb,
181 struct nilfs_transaction_info *ti,
182 int vacancy_check)
183{
9ff05123
RK
184 struct the_nilfs *nilfs;
185 int ret = nilfs_prepare_segment_lock(ti);
186
187 if (unlikely(ret < 0))
188 return ret;
189 if (ret > 0)
190 return 0;
191
2c22b337 192 sb_start_intwrite(sb);
5beb6e0b 193
e3154e97 194 nilfs = sb->s_fs_info;
9ff05123
RK
195 down_read(&nilfs->ns_segctor_sem);
196 if (vacancy_check && nilfs_near_disk_full(nilfs)) {
197 up_read(&nilfs->ns_segctor_sem);
198 ret = -ENOSPC;
199 goto failed;
200 }
201 return 0;
202
203 failed:
204 ti = current->journal_info;
205 current->journal_info = ti->ti_save;
206 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
207 kmem_cache_free(nilfs_transaction_cachep, ti);
2c22b337 208 sb_end_intwrite(sb);
9ff05123
RK
209 return ret;
210}
211
212/**
47420c79 213 * nilfs_transaction_commit - commit indivisible file operations.
9ff05123 214 * @sb: super block
9ff05123 215 *
47420c79
RK
216 * nilfs_transaction_commit() releases the read semaphore which is
217 * acquired by nilfs_transaction_begin(). This is only performed
218 * in outermost call of this function. If a commit flag is set,
219 * nilfs_transaction_commit() sets a timer to start the segment
220 * constructor. If a sync flag is set, it starts construction
221 * directly.
9ff05123 222 */
47420c79 223int nilfs_transaction_commit(struct super_block *sb)
9ff05123
RK
224{
225 struct nilfs_transaction_info *ti = current->journal_info;
e3154e97 226 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
227 int err = 0;
228
229 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
47420c79 230 ti->ti_flags |= NILFS_TI_COMMIT;
9ff05123
RK
231 if (ti->ti_count > 0) {
232 ti->ti_count--;
233 return 0;
234 }
3fd3fe5a
RK
235 if (nilfs->ns_writer) {
236 struct nilfs_sc_info *sci = nilfs->ns_writer;
237
9ff05123
RK
238 if (ti->ti_flags & NILFS_TI_COMMIT)
239 nilfs_segctor_start_timer(sci);
3fd3fe5a 240 if (atomic_read(&nilfs->ns_ndirtyblks) > sci->sc_watermark)
9ff05123
RK
241 nilfs_segctor_do_flush(sci, 0);
242 }
3fd3fe5a 243 up_read(&nilfs->ns_segctor_sem);
9ff05123
RK
244 current->journal_info = ti->ti_save;
245
246 if (ti->ti_flags & NILFS_TI_SYNC)
247 err = nilfs_construct_segment(sb);
248 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
249 kmem_cache_free(nilfs_transaction_cachep, ti);
2c22b337 250 sb_end_intwrite(sb);
9ff05123
RK
251 return err;
252}
253
47420c79
RK
254void nilfs_transaction_abort(struct super_block *sb)
255{
256 struct nilfs_transaction_info *ti = current->journal_info;
e3154e97 257 struct the_nilfs *nilfs = sb->s_fs_info;
47420c79
RK
258
259 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
260 if (ti->ti_count > 0) {
261 ti->ti_count--;
262 return;
263 }
e3154e97 264 up_read(&nilfs->ns_segctor_sem);
47420c79
RK
265
266 current->journal_info = ti->ti_save;
267 if (ti->ti_flags & NILFS_TI_DYNAMIC_ALLOC)
268 kmem_cache_free(nilfs_transaction_cachep, ti);
2c22b337 269 sb_end_intwrite(sb);
47420c79
RK
270}
271
9ff05123
RK
272void nilfs_relax_pressure_in_lock(struct super_block *sb)
273{
e3154e97 274 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 275 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123
RK
276
277 if (!sci || !sci->sc_flush_request)
278 return;
279
280 set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
281 up_read(&nilfs->ns_segctor_sem);
282
283 down_write(&nilfs->ns_segctor_sem);
284 if (sci->sc_flush_request &&
285 test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags)) {
286 struct nilfs_transaction_info *ti = current->journal_info;
287
288 ti->ti_flags |= NILFS_TI_WRITER;
289 nilfs_segctor_do_immediate_flush(sci);
290 ti->ti_flags &= ~NILFS_TI_WRITER;
291 }
292 downgrade_write(&nilfs->ns_segctor_sem);
293}
294
f7545144 295static void nilfs_transaction_lock(struct super_block *sb,
9ff05123
RK
296 struct nilfs_transaction_info *ti,
297 int gcflag)
298{
299 struct nilfs_transaction_info *cur_ti = current->journal_info;
e3154e97 300 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 301 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123 302
1f5abe7e 303 WARN_ON(cur_ti);
9ff05123
RK
304 ti->ti_flags = NILFS_TI_WRITER;
305 ti->ti_count = 0;
306 ti->ti_save = cur_ti;
307 ti->ti_magic = NILFS_TI_MAGIC;
9ff05123
RK
308 current->journal_info = ti;
309
310 for (;;) {
3fd3fe5a
RK
311 down_write(&nilfs->ns_segctor_sem);
312 if (!test_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags))
9ff05123
RK
313 break;
314
3fd3fe5a 315 nilfs_segctor_do_immediate_flush(sci);
9ff05123 316
f7545144 317 up_write(&nilfs->ns_segctor_sem);
9ff05123
RK
318 yield();
319 }
320 if (gcflag)
321 ti->ti_flags |= NILFS_TI_GC;
322}
323
f7545144 324static void nilfs_transaction_unlock(struct super_block *sb)
9ff05123
RK
325{
326 struct nilfs_transaction_info *ti = current->journal_info;
e3154e97 327 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
328
329 BUG_ON(ti == NULL || ti->ti_magic != NILFS_TI_MAGIC);
330 BUG_ON(ti->ti_count > 0);
331
693dd321 332 up_write(&nilfs->ns_segctor_sem);
9ff05123 333 current->journal_info = ti->ti_save;
9ff05123
RK
334}
335
336static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
337 struct nilfs_segsum_pointer *ssp,
338 unsigned bytes)
339{
340 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
341 unsigned blocksize = sci->sc_super->s_blocksize;
342 void *p;
343
344 if (unlikely(ssp->offset + bytes > blocksize)) {
345 ssp->offset = 0;
346 BUG_ON(NILFS_SEGBUF_BH_IS_LAST(ssp->bh,
347 &segbuf->sb_segsum_buffers));
348 ssp->bh = NILFS_SEGBUF_NEXT_BH(ssp->bh);
349 }
350 p = ssp->bh->b_data + ssp->offset;
351 ssp->offset += bytes;
352 return p;
353}
354
355/**
356 * nilfs_segctor_reset_segment_buffer - reset the current segment buffer
357 * @sci: nilfs_sc_info
358 */
359static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
360{
361 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
362 struct buffer_head *sumbh;
363 unsigned sumbytes;
364 unsigned flags = 0;
365 int err;
366
367 if (nilfs_doing_gc())
368 flags = NILFS_SS_GC;
6c43f410 369 err = nilfs_segbuf_reset(segbuf, flags, sci->sc_seg_ctime, sci->sc_cno);
9ff05123
RK
370 if (unlikely(err))
371 return err;
372
373 sumbh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
374 sumbytes = segbuf->sb_sum.sumbytes;
375 sci->sc_finfo_ptr.bh = sumbh; sci->sc_finfo_ptr.offset = sumbytes;
376 sci->sc_binfo_ptr.bh = sumbh; sci->sc_binfo_ptr.offset = sumbytes;
377 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
378 return 0;
379}
380
381static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
382{
383 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
384 if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs))
385 return -E2BIG; /* The current segment is filled up
386 (internal code) */
387 sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
388 return nilfs_segctor_reset_segment_buffer(sci);
389}
390
391static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci)
392{
393 struct nilfs_segment_buffer *segbuf = sci->sc_curseg;
394 int err;
395
396 if (segbuf->sb_sum.nblocks >= segbuf->sb_rest_blocks) {
397 err = nilfs_segctor_feed_segment(sci);
398 if (err)
399 return err;
400 segbuf = sci->sc_curseg;
401 }
1e2b68bf 402 err = nilfs_segbuf_extend_payload(segbuf, &segbuf->sb_super_root);
9ff05123
RK
403 if (likely(!err))
404 segbuf->sb_sum.flags |= NILFS_SS_SR;
405 return err;
406}
407
408/*
409 * Functions for making segment summary and payloads
410 */
411static int nilfs_segctor_segsum_block_required(
412 struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp,
413 unsigned binfo_size)
414{
415 unsigned blocksize = sci->sc_super->s_blocksize;
416 /* Size of finfo and binfo is enough small against blocksize */
417
418 return ssp->offset + binfo_size +
419 (!sci->sc_blk_cnt ? sizeof(struct nilfs_finfo) : 0) >
420 blocksize;
421}
422
423static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
424 struct inode *inode)
425{
426 sci->sc_curseg->sb_sum.nfinfo++;
427 sci->sc_binfo_ptr = sci->sc_finfo_ptr;
428 nilfs_segctor_map_segsum_entry(
429 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
c96fa464 430
72746ac6
RK
431 if (NILFS_I(inode)->i_root &&
432 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
c96fa464 433 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
9ff05123
RK
434 /* skip finfo */
435}
436
437static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci,
438 struct inode *inode)
439{
440 struct nilfs_finfo *finfo;
441 struct nilfs_inode_info *ii;
442 struct nilfs_segment_buffer *segbuf;
6c43f410 443 __u64 cno;
9ff05123
RK
444
445 if (sci->sc_blk_cnt == 0)
446 return;
447
448 ii = NILFS_I(inode);
6c43f410
RK
449
450 if (test_bit(NILFS_I_GCINODE, &ii->i_state))
451 cno = ii->i_cno;
452 else if (NILFS_ROOT_METADATA_FILE(inode->i_ino))
453 cno = 0;
454 else
455 cno = sci->sc_cno;
456
9ff05123
RK
457 finfo = nilfs_segctor_map_segsum_entry(sci, &sci->sc_finfo_ptr,
458 sizeof(*finfo));
459 finfo->fi_ino = cpu_to_le64(inode->i_ino);
460 finfo->fi_nblocks = cpu_to_le32(sci->sc_blk_cnt);
461 finfo->fi_ndatablk = cpu_to_le32(sci->sc_datablk_cnt);
6c43f410 462 finfo->fi_cno = cpu_to_le64(cno);
9ff05123
RK
463
464 segbuf = sci->sc_curseg;
465 segbuf->sb_sum.sumbytes = sci->sc_binfo_ptr.offset +
466 sci->sc_super->s_blocksize * (segbuf->sb_sum.nsumblk - 1);
467 sci->sc_finfo_ptr = sci->sc_binfo_ptr;
468 sci->sc_blk_cnt = sci->sc_datablk_cnt = 0;
469}
470
471static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
472 struct buffer_head *bh,
473 struct inode *inode,
474 unsigned binfo_size)
475{
476 struct nilfs_segment_buffer *segbuf;
477 int required, err = 0;
478
479 retry:
480 segbuf = sci->sc_curseg;
481 required = nilfs_segctor_segsum_block_required(
482 sci, &sci->sc_binfo_ptr, binfo_size);
483 if (segbuf->sb_sum.nblocks + required + 1 > segbuf->sb_rest_blocks) {
484 nilfs_segctor_end_finfo(sci, inode);
485 err = nilfs_segctor_feed_segment(sci);
486 if (err)
487 return err;
488 goto retry;
489 }
490 if (unlikely(required)) {
491 err = nilfs_segbuf_extend_segsum(segbuf);
492 if (unlikely(err))
493 goto failed;
494 }
495 if (sci->sc_blk_cnt == 0)
496 nilfs_segctor_begin_finfo(sci, inode);
497
498 nilfs_segctor_map_segsum_entry(sci, &sci->sc_binfo_ptr, binfo_size);
499 /* Substitution to vblocknr is delayed until update_blocknr() */
500 nilfs_segbuf_add_file_buffer(segbuf, bh);
501 sci->sc_blk_cnt++;
502 failed:
503 return err;
504}
505
9ff05123
RK
506/*
507 * Callback functions that enumerate, mark, and collect dirty blocks
508 */
509static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
510 struct buffer_head *bh, struct inode *inode)
511{
512 int err;
513
9ff05123 514 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
e828949e
RK
515 if (err < 0)
516 return err;
9ff05123
RK
517
518 err = nilfs_segctor_add_file_block(sci, bh, inode,
519 sizeof(struct nilfs_binfo_v));
520 if (!err)
521 sci->sc_datablk_cnt++;
522 return err;
523}
524
525static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
526 struct buffer_head *bh,
527 struct inode *inode)
528{
e828949e 529 return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
9ff05123
RK
530}
531
532static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
533 struct buffer_head *bh,
534 struct inode *inode)
535{
1f5abe7e 536 WARN_ON(!buffer_dirty(bh));
9ff05123
RK
537 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
538}
539
540static void nilfs_write_file_data_binfo(struct nilfs_sc_info *sci,
541 struct nilfs_segsum_pointer *ssp,
542 union nilfs_binfo *binfo)
543{
544 struct nilfs_binfo_v *binfo_v = nilfs_segctor_map_segsum_entry(
545 sci, ssp, sizeof(*binfo_v));
546 *binfo_v = binfo->bi_v;
547}
548
549static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci,
550 struct nilfs_segsum_pointer *ssp,
551 union nilfs_binfo *binfo)
552{
553 __le64 *vblocknr = nilfs_segctor_map_segsum_entry(
554 sci, ssp, sizeof(*vblocknr));
555 *vblocknr = binfo->bi_v.bi_vblocknr;
556}
557
4e819509 558static struct nilfs_sc_operations nilfs_sc_file_ops = {
9ff05123
RK
559 .collect_data = nilfs_collect_file_data,
560 .collect_node = nilfs_collect_file_node,
561 .collect_bmap = nilfs_collect_file_bmap,
562 .write_data_binfo = nilfs_write_file_data_binfo,
563 .write_node_binfo = nilfs_write_file_node_binfo,
564};
565
566static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
567 struct buffer_head *bh, struct inode *inode)
568{
569 int err;
570
571 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
e828949e
RK
572 if (err < 0)
573 return err;
9ff05123
RK
574
575 err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
576 if (!err)
577 sci->sc_datablk_cnt++;
578 return err;
579}
580
581static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
582 struct buffer_head *bh, struct inode *inode)
583{
1f5abe7e 584 WARN_ON(!buffer_dirty(bh));
9ff05123
RK
585 return nilfs_segctor_add_file_block(sci, bh, inode,
586 sizeof(struct nilfs_binfo_dat));
587}
588
589static void nilfs_write_dat_data_binfo(struct nilfs_sc_info *sci,
590 struct nilfs_segsum_pointer *ssp,
591 union nilfs_binfo *binfo)
592{
593 __le64 *blkoff = nilfs_segctor_map_segsum_entry(sci, ssp,
594 sizeof(*blkoff));
595 *blkoff = binfo->bi_dat.bi_blkoff;
596}
597
598static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci,
599 struct nilfs_segsum_pointer *ssp,
600 union nilfs_binfo *binfo)
601{
602 struct nilfs_binfo_dat *binfo_dat =
603 nilfs_segctor_map_segsum_entry(sci, ssp, sizeof(*binfo_dat));
604 *binfo_dat = binfo->bi_dat;
605}
606
4e819509 607static struct nilfs_sc_operations nilfs_sc_dat_ops = {
9ff05123
RK
608 .collect_data = nilfs_collect_dat_data,
609 .collect_node = nilfs_collect_file_node,
610 .collect_bmap = nilfs_collect_dat_bmap,
611 .write_data_binfo = nilfs_write_dat_data_binfo,
612 .write_node_binfo = nilfs_write_dat_node_binfo,
613};
614
4e819509 615static struct nilfs_sc_operations nilfs_sc_dsync_ops = {
9ff05123
RK
616 .collect_data = nilfs_collect_file_data,
617 .collect_node = NULL,
618 .collect_bmap = NULL,
619 .write_data_binfo = nilfs_write_file_data_binfo,
620 .write_node_binfo = NULL,
621};
622
f30bf3e4
RK
623static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
624 struct list_head *listp,
625 size_t nlimit,
626 loff_t start, loff_t end)
9ff05123 627{
9ff05123
RK
628 struct address_space *mapping = inode->i_mapping;
629 struct pagevec pvec;
f30bf3e4
RK
630 pgoff_t index = 0, last = ULONG_MAX;
631 size_t ndirties = 0;
632 int i;
9ff05123 633
f30bf3e4
RK
634 if (unlikely(start != 0 || end != LLONG_MAX)) {
635 /*
636 * A valid range is given for sync-ing data pages. The
637 * range is rounded to per-page; extra dirty buffers
638 * may be included if blocksize < pagesize.
639 */
640 index = start >> PAGE_SHIFT;
641 last = end >> PAGE_SHIFT;
642 }
9ff05123
RK
643 pagevec_init(&pvec, 0);
644 repeat:
f30bf3e4
RK
645 if (unlikely(index > last) ||
646 !pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
647 min_t(pgoff_t, last - index,
648 PAGEVEC_SIZE - 1) + 1))
649 return ndirties;
9ff05123
RK
650
651 for (i = 0; i < pagevec_count(&pvec); i++) {
652 struct buffer_head *bh, *head;
653 struct page *page = pvec.pages[i];
654
f30bf3e4
RK
655 if (unlikely(page->index > last))
656 break;
657
aa405b1f
RK
658 lock_page(page);
659 if (!page_has_buffers(page))
660 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
661 unlock_page(page);
9ff05123
RK
662
663 bh = head = page_buffers(page);
664 do {
7f42ec39 665 if (!buffer_dirty(bh) || buffer_async_write(bh))
f30bf3e4
RK
666 continue;
667 get_bh(bh);
668 list_add_tail(&bh->b_assoc_buffers, listp);
669 ndirties++;
670 if (unlikely(ndirties >= nlimit)) {
671 pagevec_release(&pvec);
672 cond_resched();
673 return ndirties;
9ff05123 674 }
f30bf3e4 675 } while (bh = bh->b_this_page, bh != head);
9ff05123
RK
676 }
677 pagevec_release(&pvec);
678 cond_resched();
f30bf3e4 679 goto repeat;
9ff05123
RK
680}
681
682static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
683 struct list_head *listp)
684{
685 struct nilfs_inode_info *ii = NILFS_I(inode);
686 struct address_space *mapping = &ii->i_btnode_cache;
687 struct pagevec pvec;
688 struct buffer_head *bh, *head;
689 unsigned int i;
690 pgoff_t index = 0;
691
692 pagevec_init(&pvec, 0);
693
694 while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY,
695 PAGEVEC_SIZE)) {
696 for (i = 0; i < pagevec_count(&pvec); i++) {
697 bh = head = page_buffers(pvec.pages[i]);
698 do {
7f42ec39
VD
699 if (buffer_dirty(bh) &&
700 !buffer_async_write(bh)) {
9ff05123
RK
701 get_bh(bh);
702 list_add_tail(&bh->b_assoc_buffers,
703 listp);
704 }
705 bh = bh->b_this_page;
706 } while (bh != head);
707 }
708 pagevec_release(&pvec);
709 cond_resched();
710 }
711}
712
693dd321 713static void nilfs_dispose_list(struct the_nilfs *nilfs,
9ff05123
RK
714 struct list_head *head, int force)
715{
716 struct nilfs_inode_info *ii, *n;
717 struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii;
718 unsigned nv = 0;
719
720 while (!list_empty(head)) {
693dd321 721 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
722 list_for_each_entry_safe(ii, n, head, i_dirty) {
723 list_del_init(&ii->i_dirty);
724 if (force) {
725 if (unlikely(ii->i_bh)) {
726 brelse(ii->i_bh);
727 ii->i_bh = NULL;
728 }
729 } else if (test_bit(NILFS_I_DIRTY, &ii->i_state)) {
730 set_bit(NILFS_I_QUEUED, &ii->i_state);
731 list_add_tail(&ii->i_dirty,
693dd321 732 &nilfs->ns_dirty_files);
9ff05123
RK
733 continue;
734 }
735 ivec[nv++] = ii;
736 if (nv == SC_N_INODEVEC)
737 break;
738 }
693dd321 739 spin_unlock(&nilfs->ns_inode_lock);
9ff05123
RK
740
741 for (pii = ivec; nv > 0; pii++, nv--)
742 iput(&(*pii)->vfs_inode);
743 }
744}
745
7ef3ff2f
RK
746static void nilfs_iput_work_func(struct work_struct *work)
747{
748 struct nilfs_sc_info *sci = container_of(work, struct nilfs_sc_info,
749 sc_iput_work);
750 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
751
752 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 0);
753}
754
e912a5b6
RK
755static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs,
756 struct nilfs_root *root)
9ff05123 757{
9ff05123
RK
758 int ret = 0;
759
e912a5b6 760 if (nilfs_mdt_fetch_dirty(root->ifile))
9ff05123
RK
761 ret++;
762 if (nilfs_mdt_fetch_dirty(nilfs->ns_cpfile))
763 ret++;
764 if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile))
765 ret++;
365e215c
RK
766 if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat))
767 ret++;
9ff05123
RK
768 return ret;
769}
770
771static int nilfs_segctor_clean(struct nilfs_sc_info *sci)
772{
773 return list_empty(&sci->sc_dirty_files) &&
774 !test_bit(NILFS_SC_DIRTY, &sci->sc_flags) &&
071cb4b8 775 sci->sc_nfreesegs == 0 &&
9ff05123
RK
776 (!nilfs_doing_gc() || list_empty(&sci->sc_gc_inodes));
777}
778
779static int nilfs_segctor_confirm(struct nilfs_sc_info *sci)
780{
e3154e97 781 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
782 int ret = 0;
783
693dd321 784 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
9ff05123
RK
785 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
786
693dd321
RK
787 spin_lock(&nilfs->ns_inode_lock);
788 if (list_empty(&nilfs->ns_dirty_files) && nilfs_segctor_clean(sci))
9ff05123
RK
789 ret++;
790
693dd321 791 spin_unlock(&nilfs->ns_inode_lock);
9ff05123
RK
792 return ret;
793}
794
795static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci)
796{
e3154e97 797 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123 798
e912a5b6 799 nilfs_mdt_clear_dirty(sci->sc_root->ifile);
9ff05123
RK
800 nilfs_mdt_clear_dirty(nilfs->ns_cpfile);
801 nilfs_mdt_clear_dirty(nilfs->ns_sufile);
365e215c 802 nilfs_mdt_clear_dirty(nilfs->ns_dat);
9ff05123
RK
803}
804
805static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
806{
e3154e97 807 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
808 struct buffer_head *bh_cp;
809 struct nilfs_checkpoint *raw_cp;
810 int err;
811
812 /* XXX: this interface will be changed */
813 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1,
814 &raw_cp, &bh_cp);
815 if (likely(!err)) {
816 /* The following code is duplicated with cpfile. But, it is
817 needed to collect the checkpoint even if it was not newly
818 created */
5fc7b141 819 mark_buffer_dirty(bh_cp);
9ff05123
RK
820 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
821 nilfs_cpfile_put_checkpoint(
822 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
1f5abe7e
RK
823 } else
824 WARN_ON(err == -EINVAL || err == -ENOENT);
825
9ff05123
RK
826 return err;
827}
828
829static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
830{
e3154e97 831 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
832 struct buffer_head *bh_cp;
833 struct nilfs_checkpoint *raw_cp;
834 int err;
835
836 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
837 &raw_cp, &bh_cp);
838 if (unlikely(err)) {
1f5abe7e 839 WARN_ON(err == -EINVAL || err == -ENOENT);
9ff05123
RK
840 goto failed_ibh;
841 }
842 raw_cp->cp_snapshot_list.ssl_next = 0;
843 raw_cp->cp_snapshot_list.ssl_prev = 0;
844 raw_cp->cp_inodes_count =
e5f7f848 845 cpu_to_le64(atomic64_read(&sci->sc_root->inodes_count));
9ff05123 846 raw_cp->cp_blocks_count =
e5f7f848 847 cpu_to_le64(atomic64_read(&sci->sc_root->blocks_count));
9ff05123
RK
848 raw_cp->cp_nblk_inc =
849 cpu_to_le64(sci->sc_nblk_inc + sci->sc_nblk_this_inc);
850 raw_cp->cp_create = cpu_to_le64(sci->sc_seg_ctime);
851 raw_cp->cp_cno = cpu_to_le64(nilfs->ns_cno);
458c5b08 852
c96fa464
RK
853 if (test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
854 nilfs_checkpoint_clear_minor(raw_cp);
855 else
856 nilfs_checkpoint_set_minor(raw_cp);
857
e912a5b6
RK
858 nilfs_write_inode_common(sci->sc_root->ifile,
859 &raw_cp->cp_ifile_inode, 1);
9ff05123
RK
860 nilfs_cpfile_put_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
861 return 0;
862
863 failed_ibh:
864 return err;
865}
866
867static void nilfs_fill_in_file_bmap(struct inode *ifile,
868 struct nilfs_inode_info *ii)
869
870{
871 struct buffer_head *ibh;
872 struct nilfs_inode *raw_inode;
873
874 if (test_bit(NILFS_I_BMAP, &ii->i_state)) {
875 ibh = ii->i_bh;
876 BUG_ON(!ibh);
877 raw_inode = nilfs_ifile_map_inode(ifile, ii->vfs_inode.i_ino,
878 ibh);
879 nilfs_bmap_write(ii->i_bmap, raw_inode);
880 nilfs_ifile_unmap_inode(ifile, ii->vfs_inode.i_ino, ibh);
881 }
882}
883
e912a5b6 884static void nilfs_segctor_fill_in_file_bmap(struct nilfs_sc_info *sci)
9ff05123
RK
885{
886 struct nilfs_inode_info *ii;
887
888 list_for_each_entry(ii, &sci->sc_dirty_files, i_dirty) {
e912a5b6 889 nilfs_fill_in_file_bmap(sci->sc_root->ifile, ii);
9ff05123
RK
890 set_bit(NILFS_I_COLLECTED, &ii->i_state);
891 }
9ff05123
RK
892}
893
9ff05123
RK
894static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
895 struct the_nilfs *nilfs)
896{
1e2b68bf
RK
897 struct buffer_head *bh_sr;
898 struct nilfs_super_root *raw_sr;
56eb5538 899 unsigned isz, srsz;
9ff05123 900
1e2b68bf
RK
901 bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
902 raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
56eb5538
RK
903 isz = nilfs->ns_inode_size;
904 srsz = NILFS_SR_BYTES(isz);
1e2b68bf 905
56eb5538 906 raw_sr->sr_bytes = cpu_to_le16(srsz);
9ff05123
RK
907 raw_sr->sr_nongc_ctime
908 = cpu_to_le64(nilfs_doing_gc() ?
909 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
910 raw_sr->sr_flags = 0;
911
365e215c 912 nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr +
3961f0e2
RK
913 NILFS_SR_DAT_OFFSET(isz), 1);
914 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
915 NILFS_SR_CPFILE_OFFSET(isz), 1);
916 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
917 NILFS_SR_SUFILE_OFFSET(isz), 1);
56eb5538 918 memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
9ff05123
RK
919}
920
921static void nilfs_redirty_inodes(struct list_head *head)
922{
923 struct nilfs_inode_info *ii;
924
925 list_for_each_entry(ii, head, i_dirty) {
926 if (test_bit(NILFS_I_COLLECTED, &ii->i_state))
927 clear_bit(NILFS_I_COLLECTED, &ii->i_state);
928 }
929}
930
931static void nilfs_drop_collected_inodes(struct list_head *head)
932{
933 struct nilfs_inode_info *ii;
934
935 list_for_each_entry(ii, head, i_dirty) {
936 if (!test_and_clear_bit(NILFS_I_COLLECTED, &ii->i_state))
937 continue;
938
b9f66140 939 clear_bit(NILFS_I_INODE_SYNC, &ii->i_state);
9ff05123
RK
940 set_bit(NILFS_I_UPDATED, &ii->i_state);
941 }
942}
943
9ff05123
RK
944static int nilfs_segctor_apply_buffers(struct nilfs_sc_info *sci,
945 struct inode *inode,
946 struct list_head *listp,
947 int (*collect)(struct nilfs_sc_info *,
948 struct buffer_head *,
949 struct inode *))
950{
951 struct buffer_head *bh, *n;
952 int err = 0;
953
954 if (collect) {
955 list_for_each_entry_safe(bh, n, listp, b_assoc_buffers) {
956 list_del_init(&bh->b_assoc_buffers);
957 err = collect(sci, bh, inode);
958 brelse(bh);
959 if (unlikely(err))
960 goto dispose_buffers;
961 }
962 return 0;
963 }
964
965 dispose_buffers:
966 while (!list_empty(listp)) {
0cc12838
RK
967 bh = list_first_entry(listp, struct buffer_head,
968 b_assoc_buffers);
9ff05123
RK
969 list_del_init(&bh->b_assoc_buffers);
970 brelse(bh);
971 }
972 return err;
973}
974
f30bf3e4
RK
975static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci)
976{
977 /* Remaining number of blocks within segment buffer */
978 return sci->sc_segbuf_nblocks -
979 (sci->sc_nblk_this_inc + sci->sc_curseg->sb_sum.nblocks);
980}
981
9ff05123
RK
982static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci,
983 struct inode *inode,
984 struct nilfs_sc_operations *sc_ops)
985{
986 LIST_HEAD(data_buffers);
987 LIST_HEAD(node_buffers);
f30bf3e4 988 int err;
9ff05123
RK
989
990 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
f30bf3e4
RK
991 size_t n, rest = nilfs_segctor_buffer_rest(sci);
992
993 n = nilfs_lookup_dirty_data_buffers(
994 inode, &data_buffers, rest + 1, 0, LLONG_MAX);
995 if (n > rest) {
996 err = nilfs_segctor_apply_buffers(
9ff05123 997 sci, inode, &data_buffers,
f30bf3e4
RK
998 sc_ops->collect_data);
999 BUG_ON(!err); /* always receive -E2BIG or true error */
9ff05123
RK
1000 goto break_or_fail;
1001 }
1002 }
1003 nilfs_lookup_dirty_node_buffers(inode, &node_buffers);
1004
1005 if (!(sci->sc_stage.flags & NILFS_CF_NODE)) {
1006 err = nilfs_segctor_apply_buffers(
1007 sci, inode, &data_buffers, sc_ops->collect_data);
1008 if (unlikely(err)) {
1009 /* dispose node list */
1010 nilfs_segctor_apply_buffers(
1011 sci, inode, &node_buffers, NULL);
1012 goto break_or_fail;
1013 }
1014 sci->sc_stage.flags |= NILFS_CF_NODE;
1015 }
1016 /* Collect node */
1017 err = nilfs_segctor_apply_buffers(
1018 sci, inode, &node_buffers, sc_ops->collect_node);
1019 if (unlikely(err))
1020 goto break_or_fail;
1021
1022 nilfs_bmap_lookup_dirty_buffers(NILFS_I(inode)->i_bmap, &node_buffers);
1023 err = nilfs_segctor_apply_buffers(
1024 sci, inode, &node_buffers, sc_ops->collect_bmap);
1025 if (unlikely(err))
1026 goto break_or_fail;
1027
1028 nilfs_segctor_end_finfo(sci, inode);
1029 sci->sc_stage.flags &= ~NILFS_CF_NODE;
1030
1031 break_or_fail:
1032 return err;
1033}
1034
1035static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
1036 struct inode *inode)
1037{
1038 LIST_HEAD(data_buffers);
f30bf3e4
RK
1039 size_t n, rest = nilfs_segctor_buffer_rest(sci);
1040 int err;
9ff05123 1041
f30bf3e4
RK
1042 n = nilfs_lookup_dirty_data_buffers(inode, &data_buffers, rest + 1,
1043 sci->sc_dsync_start,
1044 sci->sc_dsync_end);
1045
1046 err = nilfs_segctor_apply_buffers(sci, inode, &data_buffers,
1047 nilfs_collect_file_data);
1048 if (!err) {
9ff05123 1049 nilfs_segctor_end_finfo(sci, inode);
f30bf3e4
RK
1050 BUG_ON(n > rest);
1051 /* always receive -E2BIG or true error if n > rest */
1052 }
9ff05123
RK
1053 return err;
1054}
1055
1056static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1057{
e3154e97 1058 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
1059 struct list_head *head;
1060 struct nilfs_inode_info *ii;
071cb4b8 1061 size_t ndone;
9ff05123
RK
1062 int err = 0;
1063
1064 switch (sci->sc_stage.scnt) {
1065 case NILFS_ST_INIT:
1066 /* Pre-processes */
1067 sci->sc_stage.flags = 0;
1068
1069 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags)) {
1070 sci->sc_nblk_inc = 0;
1071 sci->sc_curseg->sb_sum.flags = NILFS_SS_LOGBGN;
1072 if (mode == SC_LSEG_DSYNC) {
1073 sci->sc_stage.scnt = NILFS_ST_DSYNC;
1074 goto dsync_mode;
1075 }
1076 }
1077
1078 sci->sc_stage.dirty_file_ptr = NULL;
1079 sci->sc_stage.gc_inode_ptr = NULL;
1080 if (mode == SC_FLUSH_DAT) {
1081 sci->sc_stage.scnt = NILFS_ST_DAT;
1082 goto dat_stage;
1083 }
1084 sci->sc_stage.scnt++; /* Fall through */
1085 case NILFS_ST_GC:
1086 if (nilfs_doing_gc()) {
1087 head = &sci->sc_gc_inodes;
1088 ii = list_prepare_entry(sci->sc_stage.gc_inode_ptr,
1089 head, i_dirty);
1090 list_for_each_entry_continue(ii, head, i_dirty) {
1091 err = nilfs_segctor_scan_file(
1092 sci, &ii->vfs_inode,
1093 &nilfs_sc_file_ops);
1094 if (unlikely(err)) {
1095 sci->sc_stage.gc_inode_ptr = list_entry(
1096 ii->i_dirty.prev,
1097 struct nilfs_inode_info,
1098 i_dirty);
1099 goto break_or_fail;
1100 }
1101 set_bit(NILFS_I_COLLECTED, &ii->i_state);
1102 }
1103 sci->sc_stage.gc_inode_ptr = NULL;
1104 }
1105 sci->sc_stage.scnt++; /* Fall through */
1106 case NILFS_ST_FILE:
1107 head = &sci->sc_dirty_files;
1108 ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head,
1109 i_dirty);
1110 list_for_each_entry_continue(ii, head, i_dirty) {
1111 clear_bit(NILFS_I_DIRTY, &ii->i_state);
1112
1113 err = nilfs_segctor_scan_file(sci, &ii->vfs_inode,
1114 &nilfs_sc_file_ops);
1115 if (unlikely(err)) {
1116 sci->sc_stage.dirty_file_ptr =
1117 list_entry(ii->i_dirty.prev,
1118 struct nilfs_inode_info,
1119 i_dirty);
1120 goto break_or_fail;
1121 }
1122 /* sci->sc_stage.dirty_file_ptr = NILFS_I(inode); */
1123 /* XXX: required ? */
1124 }
1125 sci->sc_stage.dirty_file_ptr = NULL;
1126 if (mode == SC_FLUSH_FILE) {
1127 sci->sc_stage.scnt = NILFS_ST_DONE;
1128 return 0;
1129 }
9ff05123
RK
1130 sci->sc_stage.scnt++;
1131 sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED;
1132 /* Fall through */
1133 case NILFS_ST_IFILE:
e912a5b6 1134 err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile,
9ff05123
RK
1135 &nilfs_sc_file_ops);
1136 if (unlikely(err))
1137 break;
1138 sci->sc_stage.scnt++;
1139 /* Creating a checkpoint */
1140 err = nilfs_segctor_create_checkpoint(sci);
1141 if (unlikely(err))
1142 break;
1143 /* Fall through */
1144 case NILFS_ST_CPFILE:
1145 err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile,
1146 &nilfs_sc_file_ops);
1147 if (unlikely(err))
1148 break;
1149 sci->sc_stage.scnt++; /* Fall through */
1150 case NILFS_ST_SUFILE:
071cb4b8
RK
1151 err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs,
1152 sci->sc_nfreesegs, &ndone);
1153 if (unlikely(err)) {
1154 nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1155 sci->sc_freesegs, ndone,
1156 NULL);
9ff05123 1157 break;
071cb4b8
RK
1158 }
1159 sci->sc_stage.flags |= NILFS_CF_SUFREED;
1160
9ff05123
RK
1161 err = nilfs_segctor_scan_file(sci, nilfs->ns_sufile,
1162 &nilfs_sc_file_ops);
1163 if (unlikely(err))
1164 break;
1165 sci->sc_stage.scnt++; /* Fall through */
1166 case NILFS_ST_DAT:
1167 dat_stage:
365e215c 1168 err = nilfs_segctor_scan_file(sci, nilfs->ns_dat,
9ff05123
RK
1169 &nilfs_sc_dat_ops);
1170 if (unlikely(err))
1171 break;
1172 if (mode == SC_FLUSH_DAT) {
1173 sci->sc_stage.scnt = NILFS_ST_DONE;
1174 return 0;
1175 }
1176 sci->sc_stage.scnt++; /* Fall through */
1177 case NILFS_ST_SR:
1178 if (mode == SC_LSEG_SR) {
1179 /* Appending a super root */
1180 err = nilfs_segctor_add_super_root(sci);
1181 if (unlikely(err))
1182 break;
1183 }
1184 /* End of a logical segment */
1185 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1186 sci->sc_stage.scnt = NILFS_ST_DONE;
1187 return 0;
1188 case NILFS_ST_DSYNC:
1189 dsync_mode:
1190 sci->sc_curseg->sb_sum.flags |= NILFS_SS_SYNDT;
f30bf3e4 1191 ii = sci->sc_dsync_inode;
9ff05123
RK
1192 if (!test_bit(NILFS_I_BUSY, &ii->i_state))
1193 break;
1194
1195 err = nilfs_segctor_scan_file_dsync(sci, &ii->vfs_inode);
1196 if (unlikely(err))
1197 break;
9ff05123
RK
1198 sci->sc_curseg->sb_sum.flags |= NILFS_SS_LOGEND;
1199 sci->sc_stage.scnt = NILFS_ST_DONE;
1200 return 0;
1201 case NILFS_ST_DONE:
1202 return 0;
1203 default:
1204 BUG();
1205 }
1206
1207 break_or_fail:
1208 return err;
1209}
1210
a694291a
RK
1211/**
1212 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1213 * @sci: nilfs_sc_info
1214 * @nilfs: nilfs object
1215 */
9ff05123
RK
1216static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1217 struct the_nilfs *nilfs)
1218{
a694291a 1219 struct nilfs_segment_buffer *segbuf, *prev;
9ff05123 1220 __u64 nextnum;
a694291a 1221 int err, alloc = 0;
9ff05123 1222
a694291a
RK
1223 segbuf = nilfs_segbuf_new(sci->sc_super);
1224 if (unlikely(!segbuf))
1225 return -ENOMEM;
9ff05123 1226
a694291a
RK
1227 if (list_empty(&sci->sc_write_logs)) {
1228 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1229 nilfs->ns_pseg_offset, nilfs);
1230 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1231 nilfs_shift_to_next_segment(nilfs);
1232 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1233 }
9ff05123 1234
a694291a
RK
1235 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1236 nextnum = nilfs->ns_nextnum;
1237
1238 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1239 /* Start from the head of a new full segment */
1240 alloc++;
1241 } else {
1242 /* Continue logs */
1243 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1244 nilfs_segbuf_map_cont(segbuf, prev);
1245 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1246 nextnum = prev->sb_nextnum;
1247
1248 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1249 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1250 segbuf->sb_sum.seg_seq++;
1251 alloc++;
1252 }
9ff05123 1253 }
9ff05123 1254
61a189e9 1255 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
a694291a
RK
1256 if (err)
1257 goto failed;
9ff05123 1258
a694291a 1259 if (alloc) {
cece5520 1260 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
a694291a
RK
1261 if (err)
1262 goto failed;
1263 }
9ff05123
RK
1264 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1265
a694291a
RK
1266 BUG_ON(!list_empty(&sci->sc_segbufs));
1267 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1268 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
cece5520 1269 return 0;
a694291a
RK
1270
1271 failed:
1272 nilfs_segbuf_free(segbuf);
1273 return err;
9ff05123
RK
1274}
1275
1276static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1277 struct the_nilfs *nilfs, int nadd)
1278{
e29df395 1279 struct nilfs_segment_buffer *segbuf, *prev;
9ff05123
RK
1280 struct inode *sufile = nilfs->ns_sufile;
1281 __u64 nextnextnum;
1282 LIST_HEAD(list);
1283 int err, ret, i;
1284
1285 prev = NILFS_LAST_SEGBUF(&sci->sc_segbufs);
1286 /*
1287 * Since the segment specified with nextnum might be allocated during
1288 * the previous construction, the buffer including its segusage may
1289 * not be dirty. The following call ensures that the buffer is dirty
1290 * and will pin the buffer on memory until the sufile is written.
1291 */
61a189e9 1292 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
9ff05123
RK
1293 if (unlikely(err))
1294 return err;
1295
1296 for (i = 0; i < nadd; i++) {
1297 /* extend segment info */
1298 err = -ENOMEM;
1299 segbuf = nilfs_segbuf_new(sci->sc_super);
1300 if (unlikely(!segbuf))
1301 goto failed;
1302
1303 /* map this buffer to region of segment on-disk */
cece5520 1304 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
9ff05123
RK
1305 sci->sc_segbuf_nblocks += segbuf->sb_rest_blocks;
1306
1307 /* allocate the next next full segment */
1308 err = nilfs_sufile_alloc(sufile, &nextnextnum);
1309 if (unlikely(err))
1310 goto failed_segbuf;
1311
1312 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq + 1;
1313 nilfs_segbuf_set_next_segnum(segbuf, nextnextnum, nilfs);
1314
1315 list_add_tail(&segbuf->sb_list, &list);
1316 prev = segbuf;
1317 }
0935db74 1318 list_splice_tail(&list, &sci->sc_segbufs);
9ff05123
RK
1319 return 0;
1320
1321 failed_segbuf:
1322 nilfs_segbuf_free(segbuf);
1323 failed:
e29df395 1324 list_for_each_entry(segbuf, &list, sb_list) {
9ff05123 1325 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1326 WARN_ON(ret); /* never fails */
9ff05123 1327 }
e29df395 1328 nilfs_destroy_logs(&list);
9ff05123
RK
1329 return err;
1330}
1331
a694291a
RK
1332static void nilfs_free_incomplete_logs(struct list_head *logs,
1333 struct the_nilfs *nilfs)
9ff05123 1334{
a694291a
RK
1335 struct nilfs_segment_buffer *segbuf, *prev;
1336 struct inode *sufile = nilfs->ns_sufile;
9284ad2a 1337 int ret;
9ff05123 1338
a694291a 1339 segbuf = NILFS_FIRST_SEGBUF(logs);
9ff05123 1340 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
a694291a 1341 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1342 WARN_ON(ret); /* never fails */
9ff05123 1343 }
9284ad2a 1344 if (atomic_read(&segbuf->sb_err)) {
9ff05123
RK
1345 /* Case 1: The first segment failed */
1346 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1347 /* Case 1a: Partial segment appended into an existing
1348 segment */
1349 nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start,
1350 segbuf->sb_fseg_end);
1351 else /* Case 1b: New full segment */
1352 set_nilfs_discontinued(nilfs);
9ff05123
RK
1353 }
1354
a694291a
RK
1355 prev = segbuf;
1356 list_for_each_entry_continue(segbuf, logs, sb_list) {
1357 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1358 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1359 WARN_ON(ret); /* never fails */
1360 }
9284ad2a
RK
1361 if (atomic_read(&segbuf->sb_err) &&
1362 segbuf->sb_segnum != nilfs->ns_nextnum)
1363 /* Case 2: extended segment (!= next) failed */
a694291a
RK
1364 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1365 prev = segbuf;
9ff05123 1366 }
9ff05123
RK
1367}
1368
1369static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1370 struct inode *sufile)
1371{
1372 struct nilfs_segment_buffer *segbuf;
9ff05123
RK
1373 unsigned long live_blocks;
1374 int ret;
1375
1376 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
9ff05123
RK
1377 live_blocks = segbuf->sb_sum.nblocks +
1378 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
071ec54d
RK
1379 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1380 live_blocks,
1381 sci->sc_seg_ctime);
1382 WARN_ON(ret); /* always succeed because the segusage is dirty */
9ff05123
RK
1383 }
1384}
1385
a694291a 1386static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
9ff05123
RK
1387{
1388 struct nilfs_segment_buffer *segbuf;
9ff05123
RK
1389 int ret;
1390
a694291a 1391 segbuf = NILFS_FIRST_SEGBUF(logs);
071ec54d
RK
1392 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1393 segbuf->sb_pseg_start -
1394 segbuf->sb_fseg_start, 0);
1395 WARN_ON(ret); /* always succeed because the segusage is dirty */
9ff05123 1396
a694291a 1397 list_for_each_entry_continue(segbuf, logs, sb_list) {
071ec54d
RK
1398 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1399 0, 0);
1f5abe7e 1400 WARN_ON(ret); /* always succeed */
9ff05123
RK
1401 }
1402}
1403
1404static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1405 struct nilfs_segment_buffer *last,
1406 struct inode *sufile)
1407{
e29df395 1408 struct nilfs_segment_buffer *segbuf = last;
9ff05123
RK
1409 int ret;
1410
e29df395 1411 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
9ff05123
RK
1412 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1413 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1f5abe7e 1414 WARN_ON(ret);
9ff05123 1415 }
e29df395 1416 nilfs_truncate_logs(&sci->sc_segbufs, last);
9ff05123
RK
1417}
1418
1419
1420static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1421 struct the_nilfs *nilfs, int mode)
1422{
1423 struct nilfs_cstage prev_stage = sci->sc_stage;
1424 int err, nadd = 1;
1425
1426 /* Collection retry loop */
1427 for (;;) {
9ff05123
RK
1428 sci->sc_nblk_this_inc = 0;
1429 sci->sc_curseg = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1430
1431 err = nilfs_segctor_reset_segment_buffer(sci);
1432 if (unlikely(err))
1433 goto failed;
1434
1435 err = nilfs_segctor_collect_blocks(sci, mode);
1436 sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
1437 if (!err)
1438 break;
1439
1440 if (unlikely(err != -E2BIG))
1441 goto failed;
1442
1443 /* The current segment is filled up */
1444 if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE)
1445 break;
1446
2d8428ac
RK
1447 nilfs_clear_logs(&sci->sc_segbufs);
1448
071cb4b8
RK
1449 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1450 err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1451 sci->sc_freesegs,
1452 sci->sc_nfreesegs,
1453 NULL);
1454 WARN_ON(err); /* do not happen */
70f2fe3a 1455 sci->sc_stage.flags &= ~NILFS_CF_SUFREED;
071cb4b8 1456 }
70f2fe3a
AR
1457
1458 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1459 if (unlikely(err))
1460 return err;
1461
9ff05123
RK
1462 nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
1463 sci->sc_stage = prev_stage;
1464 }
1465 nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
1466 return 0;
1467
1468 failed:
1469 return err;
1470}
1471
1472static void nilfs_list_replace_buffer(struct buffer_head *old_bh,
1473 struct buffer_head *new_bh)
1474{
1475 BUG_ON(!list_empty(&new_bh->b_assoc_buffers));
1476
1477 list_replace_init(&old_bh->b_assoc_buffers, &new_bh->b_assoc_buffers);
1478 /* The caller must release old_bh */
1479}
1480
1481static int
1482nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci,
1483 struct nilfs_segment_buffer *segbuf,
1484 int mode)
1485{
1486 struct inode *inode = NULL;
1487 sector_t blocknr;
1488 unsigned long nfinfo = segbuf->sb_sum.nfinfo;
1489 unsigned long nblocks = 0, ndatablk = 0;
1490 struct nilfs_sc_operations *sc_op = NULL;
1491 struct nilfs_segsum_pointer ssp;
1492 struct nilfs_finfo *finfo = NULL;
1493 union nilfs_binfo binfo;
1494 struct buffer_head *bh, *bh_org;
1495 ino_t ino = 0;
1496 int err = 0;
1497
1498 if (!nfinfo)
1499 goto out;
1500
1501 blocknr = segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk;
1502 ssp.bh = NILFS_SEGBUF_FIRST_BH(&segbuf->sb_segsum_buffers);
1503 ssp.offset = sizeof(struct nilfs_segment_summary);
1504
1505 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
1e2b68bf 1506 if (bh == segbuf->sb_super_root)
9ff05123
RK
1507 break;
1508 if (!finfo) {
1509 finfo = nilfs_segctor_map_segsum_entry(
1510 sci, &ssp, sizeof(*finfo));
1511 ino = le64_to_cpu(finfo->fi_ino);
1512 nblocks = le32_to_cpu(finfo->fi_nblocks);
1513 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
1514
aa405b1f 1515 inode = bh->b_page->mapping->host;
9ff05123
RK
1516
1517 if (mode == SC_LSEG_DSYNC)
1518 sc_op = &nilfs_sc_dsync_ops;
1519 else if (ino == NILFS_DAT_INO)
1520 sc_op = &nilfs_sc_dat_ops;
1521 else /* file blocks */
1522 sc_op = &nilfs_sc_file_ops;
1523 }
1524 bh_org = bh;
1525 get_bh(bh_org);
1526 err = nilfs_bmap_assign(NILFS_I(inode)->i_bmap, &bh, blocknr,
1527 &binfo);
1528 if (bh != bh_org)
1529 nilfs_list_replace_buffer(bh_org, bh);
1530 brelse(bh_org);
1531 if (unlikely(err))
1532 goto failed_bmap;
1533
1534 if (ndatablk > 0)
1535 sc_op->write_data_binfo(sci, &ssp, &binfo);
1536 else
1537 sc_op->write_node_binfo(sci, &ssp, &binfo);
1538
1539 blocknr++;
1540 if (--nblocks == 0) {
1541 finfo = NULL;
1542 if (--nfinfo == 0)
1543 break;
1544 } else if (ndatablk > 0)
1545 ndatablk--;
1546 }
1547 out:
1548 return 0;
1549
1550 failed_bmap:
9ff05123
RK
1551 return err;
1552}
1553
1554static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
1555{
1556 struct nilfs_segment_buffer *segbuf;
1557 int err;
1558
1559 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1560 err = nilfs_segctor_update_payload_blocknr(sci, segbuf, mode);
1561 if (unlikely(err))
1562 return err;
1563 nilfs_segbuf_fill_in_segsum(segbuf);
1564 }
1565 return 0;
1566}
1567
1cb2d38c 1568static void nilfs_begin_page_io(struct page *page)
9ff05123
RK
1569{
1570 if (!page || PageWriteback(page))
1571 /* For split b-tree node pages, this function may be called
1572 twice. We ignore the 2nd or later calls by this check. */
1cb2d38c 1573 return;
9ff05123
RK
1574
1575 lock_page(page);
1576 clear_page_dirty_for_io(page);
1577 set_page_writeback(page);
1578 unlock_page(page);
9ff05123
RK
1579}
1580
1cb2d38c 1581static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
9ff05123
RK
1582{
1583 struct nilfs_segment_buffer *segbuf;
1584 struct page *bd_page = NULL, *fs_page = NULL;
9ff05123 1585
9ff05123
RK
1586 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1587 struct buffer_head *bh;
1588
1589 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1590 b_assoc_buffers) {
7f42ec39 1591 set_buffer_async_write(bh);
9ff05123
RK
1592 if (bh->b_page != bd_page) {
1593 if (bd_page) {
1594 lock_page(bd_page);
1595 clear_page_dirty_for_io(bd_page);
1596 set_page_writeback(bd_page);
1597 unlock_page(bd_page);
1598 }
1599 bd_page = bh->b_page;
1600 }
1601 }
1602
1603 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1604 b_assoc_buffers) {
7f42ec39 1605 set_buffer_async_write(bh);
1e2b68bf 1606 if (bh == segbuf->sb_super_root) {
9ff05123
RK
1607 if (bh->b_page != bd_page) {
1608 lock_page(bd_page);
1609 clear_page_dirty_for_io(bd_page);
1610 set_page_writeback(bd_page);
1611 unlock_page(bd_page);
1612 bd_page = bh->b_page;
1613 }
1614 break;
1615 }
1616 if (bh->b_page != fs_page) {
1cb2d38c 1617 nilfs_begin_page_io(fs_page);
9ff05123
RK
1618 fs_page = bh->b_page;
1619 }
1620 }
1621 }
1622 if (bd_page) {
1623 lock_page(bd_page);
1624 clear_page_dirty_for_io(bd_page);
1625 set_page_writeback(bd_page);
1626 unlock_page(bd_page);
1627 }
1cb2d38c 1628 nilfs_begin_page_io(fs_page);
9ff05123
RK
1629}
1630
1631static int nilfs_segctor_write(struct nilfs_sc_info *sci,
9c965bac 1632 struct the_nilfs *nilfs)
9ff05123 1633{
d1c6b72a 1634 int ret;
9ff05123 1635
d1c6b72a 1636 ret = nilfs_write_logs(&sci->sc_segbufs, nilfs);
a694291a
RK
1637 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1638 return ret;
9ff05123
RK
1639}
1640
9ff05123
RK
1641static void nilfs_end_page_io(struct page *page, int err)
1642{
1643 if (!page)
1644 return;
1645
a9777845 1646 if (buffer_nilfs_node(page_buffers(page)) && !PageWriteback(page)) {
8227b297
RK
1647 /*
1648 * For b-tree node pages, this function may be called twice
1649 * or more because they might be split in a segment.
1650 */
a9777845
RK
1651 if (PageDirty(page)) {
1652 /*
1653 * For pages holding split b-tree node buffers, dirty
1654 * flag on the buffers may be cleared discretely.
1655 * In that case, the page is once redirtied for
1656 * remaining buffers, and it must be cancelled if
1657 * all the buffers get cleaned later.
1658 */
1659 lock_page(page);
1660 if (nilfs_page_buffers_clean(page))
1661 __nilfs_clear_page_dirty(page);
1662 unlock_page(page);
1663 }
9ff05123 1664 return;
a9777845 1665 }
9ff05123 1666
1cb2d38c
RK
1667 if (!err) {
1668 if (!nilfs_page_buffers_clean(page))
1669 __set_page_dirty_nobuffers(page);
1670 ClearPageError(page);
1671 } else {
1672 __set_page_dirty_nobuffers(page);
1673 SetPageError(page);
9ff05123 1674 }
1cb2d38c
RK
1675
1676 end_page_writeback(page);
9ff05123
RK
1677}
1678
1cb2d38c 1679static void nilfs_abort_logs(struct list_head *logs, int err)
9ff05123
RK
1680{
1681 struct nilfs_segment_buffer *segbuf;
1682 struct page *bd_page = NULL, *fs_page = NULL;
a694291a 1683 struct buffer_head *bh;
9ff05123 1684
a694291a
RK
1685 if (list_empty(logs))
1686 return;
9ff05123 1687
a694291a 1688 list_for_each_entry(segbuf, logs, sb_list) {
9ff05123
RK
1689 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1690 b_assoc_buffers) {
7f42ec39 1691 clear_buffer_async_write(bh);
9ff05123
RK
1692 if (bh->b_page != bd_page) {
1693 if (bd_page)
1694 end_page_writeback(bd_page);
1695 bd_page = bh->b_page;
1696 }
1697 }
1698
1699 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1700 b_assoc_buffers) {
7f42ec39 1701 clear_buffer_async_write(bh);
1e2b68bf 1702 if (bh == segbuf->sb_super_root) {
9ff05123
RK
1703 if (bh->b_page != bd_page) {
1704 end_page_writeback(bd_page);
1705 bd_page = bh->b_page;
1706 }
1707 break;
1708 }
1709 if (bh->b_page != fs_page) {
1710 nilfs_end_page_io(fs_page, err);
9ff05123
RK
1711 fs_page = bh->b_page;
1712 }
1713 }
1714 }
1715 if (bd_page)
1716 end_page_writeback(bd_page);
1717
1718 nilfs_end_page_io(fs_page, err);
a694291a
RK
1719}
1720
1721static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1722 struct the_nilfs *nilfs, int err)
1723{
1724 LIST_HEAD(logs);
1725 int ret;
1726
1727 list_splice_tail_init(&sci->sc_write_logs, &logs);
1728 ret = nilfs_wait_on_logs(&logs);
1cb2d38c 1729 nilfs_abort_logs(&logs, ret ? : err);
a694291a
RK
1730
1731 list_splice_tail_init(&sci->sc_segbufs, &logs);
1732 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1733 nilfs_free_incomplete_logs(&logs, nilfs);
a694291a
RK
1734
1735 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1736 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1737 sci->sc_freesegs,
1738 sci->sc_nfreesegs,
1739 NULL);
1740 WARN_ON(ret); /* do not happen */
1741 }
1742
1743 nilfs_destroy_logs(&logs);
9ff05123
RK
1744}
1745
1746static void nilfs_set_next_segment(struct the_nilfs *nilfs,
1747 struct nilfs_segment_buffer *segbuf)
1748{
1749 nilfs->ns_segnum = segbuf->sb_segnum;
1750 nilfs->ns_nextnum = segbuf->sb_nextnum;
1751 nilfs->ns_pseg_offset = segbuf->sb_pseg_start - segbuf->sb_fseg_start
1752 + segbuf->sb_sum.nblocks;
1753 nilfs->ns_seg_seq = segbuf->sb_sum.seg_seq;
1754 nilfs->ns_ctime = segbuf->sb_sum.ctime;
1755}
1756
1757static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1758{
1759 struct nilfs_segment_buffer *segbuf;
1760 struct page *bd_page = NULL, *fs_page = NULL;
e3154e97 1761 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1e2b68bf 1762 int update_sr = false;
9ff05123 1763
a694291a 1764 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
9ff05123
RK
1765 struct buffer_head *bh;
1766
1767 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1768 b_assoc_buffers) {
1769 set_buffer_uptodate(bh);
1770 clear_buffer_dirty(bh);
7f42ec39 1771 clear_buffer_async_write(bh);
9ff05123
RK
1772 if (bh->b_page != bd_page) {
1773 if (bd_page)
1774 end_page_writeback(bd_page);
1775 bd_page = bh->b_page;
1776 }
1777 }
1778 /*
1779 * We assume that the buffers which belong to the same page
1780 * continue over the buffer list.
1781 * Under this assumption, the last BHs of pages is
1782 * identifiable by the discontinuity of bh->b_page
1783 * (page != fs_page).
1784 *
1785 * For B-tree node blocks, however, this assumption is not
1786 * guaranteed. The cleanup code of B-tree node pages needs
1787 * special care.
1788 */
1789 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1790 b_assoc_buffers) {
1791 set_buffer_uptodate(bh);
1792 clear_buffer_dirty(bh);
7f42ec39 1793 clear_buffer_async_write(bh);
27e6c7a3 1794 clear_buffer_delay(bh);
9ff05123 1795 clear_buffer_nilfs_volatile(bh);
b1f6a4f2 1796 clear_buffer_nilfs_redirected(bh);
1e2b68bf 1797 if (bh == segbuf->sb_super_root) {
9ff05123
RK
1798 if (bh->b_page != bd_page) {
1799 end_page_writeback(bd_page);
1800 bd_page = bh->b_page;
1801 }
1e2b68bf 1802 update_sr = true;
9ff05123
RK
1803 break;
1804 }
1805 if (bh->b_page != fs_page) {
1806 nilfs_end_page_io(fs_page, 0);
1807 fs_page = bh->b_page;
1808 }
1809 }
1810
4762077c
RK
1811 if (!nilfs_segbuf_simplex(segbuf)) {
1812 if (segbuf->sb_sum.flags & NILFS_SS_LOGBGN) {
9ff05123
RK
1813 set_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1814 sci->sc_lseg_stime = jiffies;
1815 }
4762077c 1816 if (segbuf->sb_sum.flags & NILFS_SS_LOGEND)
9ff05123
RK
1817 clear_bit(NILFS_SC_UNCLOSED, &sci->sc_flags);
1818 }
1819 }
1820 /*
1821 * Since pages may continue over multiple segment buffers,
1822 * end of the last page must be checked outside of the loop.
1823 */
1824 if (bd_page)
1825 end_page_writeback(bd_page);
1826
1827 nilfs_end_page_io(fs_page, 0);
1828
9ff05123
RK
1829 nilfs_drop_collected_inodes(&sci->sc_dirty_files);
1830
c1c1d709 1831 if (nilfs_doing_gc())
9ff05123 1832 nilfs_drop_collected_inodes(&sci->sc_gc_inodes);
c1c1d709 1833 else
9ff05123 1834 nilfs->ns_nongc_ctime = sci->sc_seg_ctime;
9ff05123
RK
1835
1836 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
1837
a694291a 1838 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
9ff05123
RK
1839 nilfs_set_next_segment(nilfs, segbuf);
1840
1841 if (update_sr) {
e2c7617a 1842 nilfs->ns_flushed_device = 0;
9ff05123 1843 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start,
e339ad31 1844 segbuf->sb_sum.seg_seq, nilfs->ns_cno++);
9ff05123 1845
c96fa464 1846 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
9ff05123
RK
1847 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1848 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
a694291a 1849 nilfs_segctor_clear_metadata_dirty(sci);
9ff05123
RK
1850 } else
1851 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
1852}
1853
a694291a
RK
1854static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
1855{
1856 int ret;
1857
1858 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
1859 if (!ret) {
1860 nilfs_segctor_complete_write(sci);
1861 nilfs_destroy_logs(&sci->sc_write_logs);
1862 }
1863 return ret;
1864}
1865
693dd321
RK
1866static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
1867 struct the_nilfs *nilfs)
9ff05123
RK
1868{
1869 struct nilfs_inode_info *ii, *n;
e912a5b6 1870 struct inode *ifile = sci->sc_root->ifile;
9ff05123 1871
693dd321 1872 spin_lock(&nilfs->ns_inode_lock);
9ff05123 1873 retry:
693dd321 1874 list_for_each_entry_safe(ii, n, &nilfs->ns_dirty_files, i_dirty) {
9ff05123
RK
1875 if (!ii->i_bh) {
1876 struct buffer_head *ibh;
1877 int err;
1878
693dd321 1879 spin_unlock(&nilfs->ns_inode_lock);
9ff05123 1880 err = nilfs_ifile_get_inode_block(
e912a5b6 1881 ifile, ii->vfs_inode.i_ino, &ibh);
9ff05123 1882 if (unlikely(err)) {
693dd321 1883 nilfs_warning(sci->sc_super, __func__,
9ff05123
RK
1884 "failed to get inode block.\n");
1885 return err;
1886 }
5fc7b141 1887 mark_buffer_dirty(ibh);
e912a5b6 1888 nilfs_mdt_mark_dirty(ifile);
693dd321 1889 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
1890 if (likely(!ii->i_bh))
1891 ii->i_bh = ibh;
1892 else
1893 brelse(ibh);
1894 goto retry;
1895 }
9ff05123
RK
1896
1897 clear_bit(NILFS_I_QUEUED, &ii->i_state);
1898 set_bit(NILFS_I_BUSY, &ii->i_state);
eaae0f37 1899 list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
9ff05123 1900 }
693dd321 1901 spin_unlock(&nilfs->ns_inode_lock);
9ff05123 1902
9ff05123
RK
1903 return 0;
1904}
1905
693dd321
RK
1906static void nilfs_segctor_drop_written_files(struct nilfs_sc_info *sci,
1907 struct the_nilfs *nilfs)
9ff05123 1908{
9ff05123 1909 struct nilfs_inode_info *ii, *n;
283ee148 1910 int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
7ef3ff2f 1911 int defer_iput = false;
9ff05123 1912
693dd321 1913 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
1914 list_for_each_entry_safe(ii, n, &sci->sc_dirty_files, i_dirty) {
1915 if (!test_and_clear_bit(NILFS_I_UPDATED, &ii->i_state) ||
6c43f410 1916 test_bit(NILFS_I_DIRTY, &ii->i_state))
9ff05123 1917 continue;
6c43f410 1918
9ff05123
RK
1919 clear_bit(NILFS_I_BUSY, &ii->i_state);
1920 brelse(ii->i_bh);
1921 ii->i_bh = NULL;
7ef3ff2f 1922 list_del_init(&ii->i_dirty);
283ee148 1923 if (!ii->vfs_inode.i_nlink || during_mount) {
7ef3ff2f 1924 /*
283ee148
RK
1925 * Defer calling iput() to avoid deadlocks if
1926 * i_nlink == 0 or mount is not yet finished.
7ef3ff2f
RK
1927 */
1928 list_add_tail(&ii->i_dirty, &sci->sc_iput_queue);
1929 defer_iput = true;
1930 } else {
1931 spin_unlock(&nilfs->ns_inode_lock);
1932 iput(&ii->vfs_inode);
1933 spin_lock(&nilfs->ns_inode_lock);
1934 }
9ff05123 1935 }
693dd321 1936 spin_unlock(&nilfs->ns_inode_lock);
7ef3ff2f
RK
1937
1938 if (defer_iput)
1939 schedule_work(&sci->sc_iput_work);
9ff05123
RK
1940}
1941
9ff05123
RK
1942/*
1943 * Main procedure of segment constructor
1944 */
1945static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
1946{
e3154e97 1947 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
1e2b68bf 1948 int err;
9ff05123
RK
1949
1950 sci->sc_stage.scnt = NILFS_ST_INIT;
6c43f410 1951 sci->sc_cno = nilfs->ns_cno;
9ff05123 1952
693dd321 1953 err = nilfs_segctor_collect_dirty_files(sci, nilfs);
9ff05123
RK
1954 if (unlikely(err))
1955 goto out;
1956
e912a5b6 1957 if (nilfs_test_metadata_dirty(nilfs, sci->sc_root))
9ff05123
RK
1958 set_bit(NILFS_SC_DIRTY, &sci->sc_flags);
1959
1960 if (nilfs_segctor_clean(sci))
1961 goto out;
1962
1963 do {
1964 sci->sc_stage.flags &= ~NILFS_CF_HISTORY_MASK;
1965
1966 err = nilfs_segctor_begin_construction(sci, nilfs);
1967 if (unlikely(err))
1968 goto out;
1969
1970 /* Update time stamp */
1971 sci->sc_seg_ctime = get_seconds();
1972
1973 err = nilfs_segctor_collect(sci, nilfs, mode);
1974 if (unlikely(err))
1975 goto failed;
1976
9ff05123
RK
1977 /* Avoid empty segment */
1978 if (sci->sc_stage.scnt == NILFS_ST_DONE &&
4762077c 1979 nilfs_segbuf_empty(sci->sc_curseg)) {
a694291a 1980 nilfs_segctor_abort_construction(sci, nilfs, 1);
9ff05123
RK
1981 goto out;
1982 }
1983
1984 err = nilfs_segctor_assign(sci, mode);
1985 if (unlikely(err))
1986 goto failed;
1987
9ff05123 1988 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
e912a5b6 1989 nilfs_segctor_fill_in_file_bmap(sci);
9ff05123 1990
1e2b68bf
RK
1991 if (mode == SC_LSEG_SR &&
1992 sci->sc_stage.scnt >= NILFS_ST_CPFILE) {
9ff05123
RK
1993 err = nilfs_segctor_fill_in_checkpoint(sci);
1994 if (unlikely(err))
a694291a 1995 goto failed_to_write;
9ff05123
RK
1996
1997 nilfs_segctor_fill_in_super_root(sci, nilfs);
1998 }
1999 nilfs_segctor_update_segusage(sci, nilfs->ns_sufile);
2000
2001 /* Write partial segments */
1cb2d38c 2002 nilfs_segctor_prepare_write(sci);
aaed1d5b
RK
2003
2004 nilfs_add_checksums_on_logs(&sci->sc_segbufs,
2005 nilfs->ns_crc_seed);
9ff05123 2006
9c965bac 2007 err = nilfs_segctor_write(sci, nilfs);
9ff05123
RK
2008 if (unlikely(err))
2009 goto failed_to_write;
2010
a694291a
RK
2011 if (sci->sc_stage.scnt == NILFS_ST_DONE ||
2012 nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) {
2013 /*
2014 * At this point, we avoid double buffering
2015 * for blocksize < pagesize because page dirty
2016 * flag is turned off during write and dirty
2017 * buffers are not properly collected for
2018 * pages crossing over segments.
2019 */
2020 err = nilfs_segctor_wait(sci);
2021 if (err)
2022 goto failed_to_write;
2023 }
9ff05123
RK
2024 } while (sci->sc_stage.scnt != NILFS_ST_DONE);
2025
9ff05123 2026 out:
693dd321 2027 nilfs_segctor_drop_written_files(sci, nilfs);
9ff05123
RK
2028 return err;
2029
2030 failed_to_write:
9ff05123
RK
2031 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2032 nilfs_redirty_inodes(&sci->sc_dirty_files);
9ff05123
RK
2033
2034 failed:
2035 if (nilfs_doing_gc())
2036 nilfs_redirty_inodes(&sci->sc_gc_inodes);
a694291a 2037 nilfs_segctor_abort_construction(sci, nilfs, err);
9ff05123
RK
2038 goto out;
2039}
2040
2041/**
9ccf56c1 2042 * nilfs_segctor_start_timer - set timer of background write
9ff05123
RK
2043 * @sci: nilfs_sc_info
2044 *
2045 * If the timer has already been set, it ignores the new request.
2046 * This function MUST be called within a section locking the segment
2047 * semaphore.
2048 */
2049static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
2050{
2051 spin_lock(&sci->sc_state_lock);
fdce895e
LH
2052 if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
2053 sci->sc_timer.expires = jiffies + sci->sc_interval;
2054 add_timer(&sci->sc_timer);
9ff05123
RK
2055 sci->sc_state |= NILFS_SEGCTOR_COMMIT;
2056 }
2057 spin_unlock(&sci->sc_state_lock);
2058}
2059
2060static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
2061{
2062 spin_lock(&sci->sc_state_lock);
2063 if (!(sci->sc_flush_request & (1 << bn))) {
2064 unsigned long prev_req = sci->sc_flush_request;
2065
2066 sci->sc_flush_request |= (1 << bn);
2067 if (!prev_req)
2068 wake_up(&sci->sc_wait_daemon);
2069 }
2070 spin_unlock(&sci->sc_state_lock);
2071}
2072
2073/**
2074 * nilfs_flush_segment - trigger a segment construction for resource control
2075 * @sb: super block
2076 * @ino: inode number of the file to be flushed out.
2077 */
2078void nilfs_flush_segment(struct super_block *sb, ino_t ino)
2079{
e3154e97 2080 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2081 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123
RK
2082
2083 if (!sci || nilfs_doing_construction())
2084 return;
2085 nilfs_segctor_do_flush(sci, NILFS_MDT_INODE(sb, ino) ? ino : 0);
2086 /* assign bit 0 to data files */
2087}
2088
9ff05123
RK
2089struct nilfs_segctor_wait_request {
2090 wait_queue_t wq;
2091 __u32 seq;
2092 int err;
2093 atomic_t done;
2094};
2095
2096static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
2097{
2098 struct nilfs_segctor_wait_request wait_req;
2099 int err = 0;
2100
2101 spin_lock(&sci->sc_state_lock);
2102 init_wait(&wait_req.wq);
2103 wait_req.err = 0;
2104 atomic_set(&wait_req.done, 0);
2105 wait_req.seq = ++sci->sc_seq_request;
2106 spin_unlock(&sci->sc_state_lock);
2107
2108 init_waitqueue_entry(&wait_req.wq, current);
2109 add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
2110 set_current_state(TASK_INTERRUPTIBLE);
2111 wake_up(&sci->sc_wait_daemon);
2112
2113 for (;;) {
2114 if (atomic_read(&wait_req.done)) {
2115 err = wait_req.err;
2116 break;
2117 }
2118 if (!signal_pending(current)) {
2119 schedule();
2120 continue;
2121 }
2122 err = -ERESTARTSYS;
2123 break;
2124 }
2125 finish_wait(&sci->sc_wait_request, &wait_req.wq);
2126 return err;
2127}
2128
2129static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
2130{
2131 struct nilfs_segctor_wait_request *wrq, *n;
2132 unsigned long flags;
2133
2134 spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
2135 list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
2136 wq.task_list) {
2137 if (!atomic_read(&wrq->done) &&
2138 nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
2139 wrq->err = err;
2140 atomic_set(&wrq->done, 1);
2141 }
2142 if (atomic_read(&wrq->done)) {
2143 wrq->wq.func(&wrq->wq,
2144 TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
2145 0, NULL);
2146 }
2147 }
2148 spin_unlock_irqrestore(&sci->sc_wait_request.lock, flags);
2149}
2150
2151/**
2152 * nilfs_construct_segment - construct a logical segment
2153 * @sb: super block
2154 *
2155 * Return Value: On success, 0 is retured. On errors, one of the following
2156 * negative error code is returned.
2157 *
2158 * %-EROFS - Read only filesystem.
2159 *
2160 * %-EIO - I/O error
2161 *
2162 * %-ENOSPC - No space left on device (only in a panic state).
2163 *
2164 * %-ERESTARTSYS - Interrupted.
2165 *
2166 * %-ENOMEM - Insufficient memory available.
2167 */
2168int nilfs_construct_segment(struct super_block *sb)
2169{
e3154e97 2170 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2171 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123
RK
2172 struct nilfs_transaction_info *ti;
2173 int err;
2174
2175 if (!sci)
2176 return -EROFS;
2177
2178 /* A call inside transactions causes a deadlock. */
2179 BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
2180
2181 err = nilfs_segctor_sync(sci);
2182 return err;
2183}
2184
2185/**
2186 * nilfs_construct_dsync_segment - construct a data-only logical segment
2187 * @sb: super block
f30bf3e4
RK
2188 * @inode: inode whose data blocks should be written out
2189 * @start: start byte offset
2190 * @end: end byte offset (inclusive)
9ff05123
RK
2191 *
2192 * Return Value: On success, 0 is retured. On errors, one of the following
2193 * negative error code is returned.
2194 *
2195 * %-EROFS - Read only filesystem.
2196 *
2197 * %-EIO - I/O error
2198 *
2199 * %-ENOSPC - No space left on device (only in a panic state).
2200 *
2201 * %-ERESTARTSYS - Interrupted.
2202 *
2203 * %-ENOMEM - Insufficient memory available.
2204 */
f30bf3e4
RK
2205int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
2206 loff_t start, loff_t end)
9ff05123 2207{
e3154e97 2208 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2209 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123
RK
2210 struct nilfs_inode_info *ii;
2211 struct nilfs_transaction_info ti;
2212 int err = 0;
2213
2214 if (!sci)
2215 return -EROFS;
2216
f7545144 2217 nilfs_transaction_lock(sb, &ti, 0);
9ff05123
RK
2218
2219 ii = NILFS_I(inode);
b9f66140 2220 if (test_bit(NILFS_I_INODE_SYNC, &ii->i_state) ||
3b2ce58b 2221 nilfs_test_opt(nilfs, STRICT_ORDER) ||
9ff05123 2222 test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
3b2ce58b 2223 nilfs_discontinued(nilfs)) {
f7545144 2224 nilfs_transaction_unlock(sb);
9ff05123
RK
2225 err = nilfs_segctor_sync(sci);
2226 return err;
2227 }
2228
693dd321 2229 spin_lock(&nilfs->ns_inode_lock);
9ff05123
RK
2230 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
2231 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
693dd321 2232 spin_unlock(&nilfs->ns_inode_lock);
f7545144 2233 nilfs_transaction_unlock(sb);
9ff05123
RK
2234 return 0;
2235 }
693dd321 2236 spin_unlock(&nilfs->ns_inode_lock);
f30bf3e4
RK
2237 sci->sc_dsync_inode = ii;
2238 sci->sc_dsync_start = start;
2239 sci->sc_dsync_end = end;
9ff05123
RK
2240
2241 err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC);
e2c7617a
AR
2242 if (!err)
2243 nilfs->ns_flushed_device = 0;
9ff05123 2244
f7545144 2245 nilfs_transaction_unlock(sb);
9ff05123
RK
2246 return err;
2247}
2248
9ff05123
RK
2249#define FLUSH_FILE_BIT (0x1) /* data file only */
2250#define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
2251
dcd76186
RK
2252/**
2253 * nilfs_segctor_accept - record accepted sequence count of log-write requests
2254 * @sci: segment constructor object
2255 */
2256static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
9ff05123 2257{
9ff05123 2258 spin_lock(&sci->sc_state_lock);
dcd76186 2259 sci->sc_seq_accepted = sci->sc_seq_request;
9ff05123 2260 spin_unlock(&sci->sc_state_lock);
fdce895e 2261 del_timer_sync(&sci->sc_timer);
9ff05123
RK
2262}
2263
dcd76186
RK
2264/**
2265 * nilfs_segctor_notify - notify the result of request to caller threads
2266 * @sci: segment constructor object
2267 * @mode: mode of log forming
2268 * @err: error code to be notified
2269 */
2270static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
9ff05123
RK
2271{
2272 /* Clear requests (even when the construction failed) */
2273 spin_lock(&sci->sc_state_lock);
2274
dcd76186 2275 if (mode == SC_LSEG_SR) {
aeda7f63 2276 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
dcd76186
RK
2277 sci->sc_seq_done = sci->sc_seq_accepted;
2278 nilfs_segctor_wakeup(sci, err);
9ff05123 2279 sci->sc_flush_request = 0;
aeda7f63 2280 } else {
dcd76186 2281 if (mode == SC_FLUSH_FILE)
aeda7f63 2282 sci->sc_flush_request &= ~FLUSH_FILE_BIT;
dcd76186 2283 else if (mode == SC_FLUSH_DAT)
aeda7f63
RK
2284 sci->sc_flush_request &= ~FLUSH_DAT_BIT;
2285
2286 /* re-enable timer if checkpoint creation was not done */
fdce895e
LH
2287 if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
2288 time_before(jiffies, sci->sc_timer.expires))
2289 add_timer(&sci->sc_timer);
aeda7f63 2290 }
9ff05123
RK
2291 spin_unlock(&sci->sc_state_lock);
2292}
2293
dcd76186
RK
2294/**
2295 * nilfs_segctor_construct - form logs and write them to disk
2296 * @sci: segment constructor object
2297 * @mode: mode of log forming
2298 */
2299static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
9ff05123 2300{
e3154e97 2301 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
d26493b6 2302 struct nilfs_super_block **sbp;
9ff05123
RK
2303 int err = 0;
2304
dcd76186
RK
2305 nilfs_segctor_accept(sci);
2306
9ff05123 2307 if (nilfs_discontinued(nilfs))
dcd76186
RK
2308 mode = SC_LSEG_SR;
2309 if (!nilfs_segctor_confirm(sci))
2310 err = nilfs_segctor_do_construct(sci, mode);
2311
9ff05123 2312 if (likely(!err)) {
dcd76186 2313 if (mode != SC_FLUSH_DAT)
9ff05123
RK
2314 atomic_set(&nilfs->ns_ndirtyblks, 0);
2315 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) &&
2316 nilfs_discontinued(nilfs)) {
2317 down_write(&nilfs->ns_sem);
d26493b6 2318 err = -EIO;
f7545144 2319 sbp = nilfs_prepare_super(sci->sc_super,
b2ac86e1
JS
2320 nilfs_sb_will_flip(nilfs));
2321 if (likely(sbp)) {
2322 nilfs_set_log_cursor(sbp[0], nilfs);
f7545144
RK
2323 err = nilfs_commit_super(sci->sc_super,
2324 NILFS_SB_COMMIT);
b2ac86e1 2325 }
9ff05123
RK
2326 up_write(&nilfs->ns_sem);
2327 }
2328 }
dcd76186
RK
2329
2330 nilfs_segctor_notify(sci, mode, err);
9ff05123
RK
2331 return err;
2332}
2333
2334static void nilfs_construction_timeout(unsigned long data)
2335{
2336 struct task_struct *p = (struct task_struct *)data;
2337 wake_up_process(p);
2338}
2339
2340static void
2341nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2342{
2343 struct nilfs_inode_info *ii, *n;
2344
2345 list_for_each_entry_safe(ii, n, head, i_dirty) {
2346 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2347 continue;
9ff05123 2348 list_del_init(&ii->i_dirty);
fbb24a3a
RK
2349 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2350 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
263d90ce 2351 iput(&ii->vfs_inode);
9ff05123
RK
2352 }
2353}
2354
4f6b8288
RK
2355int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2356 void **kbufs)
9ff05123 2357{
e3154e97 2358 struct the_nilfs *nilfs = sb->s_fs_info;
3fd3fe5a 2359 struct nilfs_sc_info *sci = nilfs->ns_writer;
9ff05123 2360 struct nilfs_transaction_info ti;
9ff05123
RK
2361 int err;
2362
2363 if (unlikely(!sci))
2364 return -EROFS;
2365
f7545144 2366 nilfs_transaction_lock(sb, &ti, 1);
9ff05123 2367
c1c1d709 2368 err = nilfs_mdt_save_to_shadow_map(nilfs->ns_dat);
9ff05123
RK
2369 if (unlikely(err))
2370 goto out_unlock;
071cb4b8 2371
4f6b8288 2372 err = nilfs_ioctl_prepare_clean_segments(nilfs, argv, kbufs);
c1c1d709
RK
2373 if (unlikely(err)) {
2374 nilfs_mdt_restore_from_shadow_map(nilfs->ns_dat);
9ff05123 2375 goto out_unlock;
c1c1d709 2376 }
9ff05123 2377
071cb4b8
RK
2378 sci->sc_freesegs = kbufs[4];
2379 sci->sc_nfreesegs = argv[4].v_nmembs;
0935db74 2380 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
9ff05123
RK
2381
2382 for (;;) {
dcd76186 2383 err = nilfs_segctor_construct(sci, SC_LSEG_SR);
9ff05123 2384 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes);
9ff05123
RK
2385
2386 if (likely(!err))
2387 break;
2388
2389 nilfs_warning(sb, __func__,
2390 "segment construction failed. (err=%d)", err);
2391 set_current_state(TASK_INTERRUPTIBLE);
2392 schedule_timeout(sci->sc_interval);
2393 }
3b2ce58b 2394 if (nilfs_test_opt(nilfs, DISCARD)) {
e902ec99
JS
2395 int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
2396 sci->sc_nfreesegs);
2397 if (ret) {
2398 printk(KERN_WARNING
2399 "NILFS warning: error %d on discard request, "
2400 "turning discards off for the device\n", ret);
3b2ce58b 2401 nilfs_clear_opt(nilfs, DISCARD);
e902ec99
JS
2402 }
2403 }
9ff05123
RK
2404
2405 out_unlock:
071cb4b8
RK
2406 sci->sc_freesegs = NULL;
2407 sci->sc_nfreesegs = 0;
c1c1d709 2408 nilfs_mdt_clear_shadow_map(nilfs->ns_dat);
f7545144 2409 nilfs_transaction_unlock(sb);
9ff05123
RK
2410 return err;
2411}
2412
2413static void nilfs_segctor_thread_construct(struct nilfs_sc_info *sci, int mode)
2414{
9ff05123 2415 struct nilfs_transaction_info ti;
9ff05123 2416
f7545144 2417 nilfs_transaction_lock(sci->sc_super, &ti, 0);
dcd76186 2418 nilfs_segctor_construct(sci, mode);
9ff05123
RK
2419
2420 /*
2421 * Unclosed segment should be retried. We do this using sc_timer.
2422 * Timeout of sc_timer will invoke complete construction which leads
2423 * to close the current logical segment.
2424 */
2425 if (test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags))
2426 nilfs_segctor_start_timer(sci);
2427
f7545144 2428 nilfs_transaction_unlock(sci->sc_super);
9ff05123
RK
2429}
2430
2431static void nilfs_segctor_do_immediate_flush(struct nilfs_sc_info *sci)
2432{
2433 int mode = 0;
2434 int err;
2435
2436 spin_lock(&sci->sc_state_lock);
2437 mode = (sci->sc_flush_request & FLUSH_DAT_BIT) ?
2438 SC_FLUSH_DAT : SC_FLUSH_FILE;
2439 spin_unlock(&sci->sc_state_lock);
2440
2441 if (mode) {
2442 err = nilfs_segctor_do_construct(sci, mode);
2443
2444 spin_lock(&sci->sc_state_lock);
2445 sci->sc_flush_request &= (mode == SC_FLUSH_FILE) ?
2446 ~FLUSH_FILE_BIT : ~FLUSH_DAT_BIT;
2447 spin_unlock(&sci->sc_state_lock);
2448 }
2449 clear_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
2450}
2451
2452static int nilfs_segctor_flush_mode(struct nilfs_sc_info *sci)
2453{
2454 if (!test_bit(NILFS_SC_UNCLOSED, &sci->sc_flags) ||
2455 time_before(jiffies, sci->sc_lseg_stime + sci->sc_mjcp_freq)) {
2456 if (!(sci->sc_flush_request & ~FLUSH_FILE_BIT))
2457 return SC_FLUSH_FILE;
2458 else if (!(sci->sc_flush_request & ~FLUSH_DAT_BIT))
2459 return SC_FLUSH_DAT;
2460 }
2461 return SC_LSEG_SR;
2462}
2463
2464/**
2465 * nilfs_segctor_thread - main loop of the segment constructor thread.
2466 * @arg: pointer to a struct nilfs_sc_info.
2467 *
2468 * nilfs_segctor_thread() initializes a timer and serves as a daemon
2469 * to execute segment constructions.
2470 */
2471static int nilfs_segctor_thread(void *arg)
2472{
2473 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg;
e3154e97 2474 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
2475 int timeout = 0;
2476
fdce895e
LH
2477 sci->sc_timer.data = (unsigned long)current;
2478 sci->sc_timer.function = nilfs_construction_timeout;
9ff05123
RK
2479
2480 /* start sync. */
2481 sci->sc_task = current;
2482 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
2483 printk(KERN_INFO
2484 "segctord starting. Construction interval = %lu seconds, "
2485 "CP frequency < %lu seconds\n",
2486 sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
2487
2488 spin_lock(&sci->sc_state_lock);
2489 loop:
2490 for (;;) {
2491 int mode;
2492
2493 if (sci->sc_state & NILFS_SEGCTOR_QUIT)
2494 goto end_thread;
2495
2496 if (timeout || sci->sc_seq_request != sci->sc_seq_done)
2497 mode = SC_LSEG_SR;
2498 else if (!sci->sc_flush_request)
2499 break;
2500 else
2501 mode = nilfs_segctor_flush_mode(sci);
2502
2503 spin_unlock(&sci->sc_state_lock);
2504 nilfs_segctor_thread_construct(sci, mode);
2505 spin_lock(&sci->sc_state_lock);
2506 timeout = 0;
2507 }
2508
2509
2510 if (freezing(current)) {
2511 spin_unlock(&sci->sc_state_lock);
a0acae0e 2512 try_to_freeze();
9ff05123
RK
2513 spin_lock(&sci->sc_state_lock);
2514 } else {
2515 DEFINE_WAIT(wait);
2516 int should_sleep = 1;
2517
2518 prepare_to_wait(&sci->sc_wait_daemon, &wait,
2519 TASK_INTERRUPTIBLE);
2520
2521 if (sci->sc_seq_request != sci->sc_seq_done)
2522 should_sleep = 0;
2523 else if (sci->sc_flush_request)
2524 should_sleep = 0;
2525 else if (sci->sc_state & NILFS_SEGCTOR_COMMIT)
2526 should_sleep = time_before(jiffies,
fdce895e 2527 sci->sc_timer.expires);
9ff05123
RK
2528
2529 if (should_sleep) {
2530 spin_unlock(&sci->sc_state_lock);
2531 schedule();
2532 spin_lock(&sci->sc_state_lock);
2533 }
2534 finish_wait(&sci->sc_wait_daemon, &wait);
2535 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
fdce895e 2536 time_after_eq(jiffies, sci->sc_timer.expires));
e605f0a7
RK
2537
2538 if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs))
1dfa2710 2539 set_nilfs_discontinued(nilfs);
9ff05123
RK
2540 }
2541 goto loop;
2542
2543 end_thread:
2544 spin_unlock(&sci->sc_state_lock);
9ff05123
RK
2545
2546 /* end sync. */
2547 sci->sc_task = NULL;
2548 wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
2549 return 0;
2550}
2551
2552static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
2553{
2554 struct task_struct *t;
2555
2556 t = kthread_run(nilfs_segctor_thread, sci, "segctord");
2557 if (IS_ERR(t)) {
2558 int err = PTR_ERR(t);
2559
2560 printk(KERN_ERR "NILFS: error %d creating segctord thread\n",
2561 err);
2562 return err;
2563 }
2564 wait_event(sci->sc_wait_task, sci->sc_task != NULL);
2565 return 0;
2566}
2567
2568static void nilfs_segctor_kill_thread(struct nilfs_sc_info *sci)
6b81e14e
JS
2569 __acquires(&sci->sc_state_lock)
2570 __releases(&sci->sc_state_lock)
9ff05123
RK
2571{
2572 sci->sc_state |= NILFS_SEGCTOR_QUIT;
2573
2574 while (sci->sc_task) {
2575 wake_up(&sci->sc_wait_daemon);
2576 spin_unlock(&sci->sc_state_lock);
2577 wait_event(sci->sc_wait_task, sci->sc_task == NULL);
2578 spin_lock(&sci->sc_state_lock);
2579 }
2580}
2581
9ff05123
RK
2582/*
2583 * Setup & clean-up functions
2584 */
f7545144 2585static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
e912a5b6 2586 struct nilfs_root *root)
9ff05123 2587{
e3154e97 2588 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
2589 struct nilfs_sc_info *sci;
2590
2591 sci = kzalloc(sizeof(*sci), GFP_KERNEL);
2592 if (!sci)
2593 return NULL;
2594
f7545144 2595 sci->sc_super = sb;
9ff05123 2596
e912a5b6
RK
2597 nilfs_get_root(root);
2598 sci->sc_root = root;
2599
9ff05123
RK
2600 init_waitqueue_head(&sci->sc_wait_request);
2601 init_waitqueue_head(&sci->sc_wait_daemon);
2602 init_waitqueue_head(&sci->sc_wait_task);
2603 spin_lock_init(&sci->sc_state_lock);
2604 INIT_LIST_HEAD(&sci->sc_dirty_files);
2605 INIT_LIST_HEAD(&sci->sc_segbufs);
a694291a 2606 INIT_LIST_HEAD(&sci->sc_write_logs);
9ff05123 2607 INIT_LIST_HEAD(&sci->sc_gc_inodes);
7ef3ff2f
RK
2608 INIT_LIST_HEAD(&sci->sc_iput_queue);
2609 INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
fdce895e 2610 init_timer(&sci->sc_timer);
9ff05123
RK
2611
2612 sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
2613 sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
2614 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2615
574e6c31 2616 if (nilfs->ns_interval)
071d73cf 2617 sci->sc_interval = HZ * nilfs->ns_interval;
574e6c31
RK
2618 if (nilfs->ns_watermark)
2619 sci->sc_watermark = nilfs->ns_watermark;
9ff05123
RK
2620 return sci;
2621}
2622
2623static void nilfs_segctor_write_out(struct nilfs_sc_info *sci)
2624{
2625 int ret, retrycount = NILFS_SC_CLEANUP_RETRY;
2626
2627 /* The segctord thread was stopped and its timer was removed.
2628 But some tasks remain. */
2629 do {
9ff05123 2630 struct nilfs_transaction_info ti;
9ff05123 2631
f7545144 2632 nilfs_transaction_lock(sci->sc_super, &ti, 0);
dcd76186 2633 ret = nilfs_segctor_construct(sci, SC_LSEG_SR);
f7545144 2634 nilfs_transaction_unlock(sci->sc_super);
9ff05123 2635
7ef3ff2f
RK
2636 flush_work(&sci->sc_iput_work);
2637
9ff05123
RK
2638 } while (ret && retrycount-- > 0);
2639}
2640
2641/**
2642 * nilfs_segctor_destroy - destroy the segment constructor.
2643 * @sci: nilfs_sc_info
2644 *
2645 * nilfs_segctor_destroy() kills the segctord thread and frees
2646 * the nilfs_sc_info struct.
2647 * Caller must hold the segment semaphore.
2648 */
2649static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2650{
e3154e97 2651 struct the_nilfs *nilfs = sci->sc_super->s_fs_info;
9ff05123
RK
2652 int flag;
2653
693dd321 2654 up_write(&nilfs->ns_segctor_sem);
9ff05123
RK
2655
2656 spin_lock(&sci->sc_state_lock);
2657 nilfs_segctor_kill_thread(sci);
2658 flag = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) || sci->sc_flush_request
2659 || sci->sc_seq_request != sci->sc_seq_done);
2660 spin_unlock(&sci->sc_state_lock);
2661
7ef3ff2f
RK
2662 if (flush_work(&sci->sc_iput_work))
2663 flag = true;
2664
3256a055 2665 if (flag || !nilfs_segctor_confirm(sci))
9ff05123
RK
2666 nilfs_segctor_write_out(sci);
2667
9ff05123 2668 if (!list_empty(&sci->sc_dirty_files)) {
693dd321 2669 nilfs_warning(sci->sc_super, __func__,
9ff05123 2670 "dirty file(s) after the final construction\n");
693dd321 2671 nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
9ff05123 2672 }
9ff05123 2673
7ef3ff2f
RK
2674 if (!list_empty(&sci->sc_iput_queue)) {
2675 nilfs_warning(sci->sc_super, __func__,
2676 "iput queue is not empty\n");
2677 nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
2678 }
2679
1f5abe7e 2680 WARN_ON(!list_empty(&sci->sc_segbufs));
a694291a 2681 WARN_ON(!list_empty(&sci->sc_write_logs));
9ff05123 2682
e912a5b6
RK
2683 nilfs_put_root(sci->sc_root);
2684
693dd321 2685 down_write(&nilfs->ns_segctor_sem);
9ff05123 2686
fdce895e 2687 del_timer_sync(&sci->sc_timer);
9ff05123
RK
2688 kfree(sci);
2689}
2690
2691/**
f7545144
RK
2692 * nilfs_attach_log_writer - attach log writer
2693 * @sb: super block instance
e912a5b6 2694 * @root: root object of the current filesystem tree
9ff05123 2695 *
f7545144
RK
2696 * This allocates a log writer object, initializes it, and starts the
2697 * log writer.
9ff05123
RK
2698 *
2699 * Return Value: On success, 0 is returned. On error, one of the following
2700 * negative error code is returned.
2701 *
2702 * %-ENOMEM - Insufficient memory available.
2703 */
f7545144 2704int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
9ff05123 2705{
e3154e97 2706 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
2707 int err;
2708
3fd3fe5a 2709 if (nilfs->ns_writer) {
fe5f171b
RK
2710 /*
2711 * This happens if the filesystem was remounted
2712 * read/write after nilfs_error degenerated it into a
2713 * read-only mount.
2714 */
f7545144 2715 nilfs_detach_log_writer(sb);
fe5f171b
RK
2716 }
2717
f7545144 2718 nilfs->ns_writer = nilfs_segctor_new(sb, root);
3fd3fe5a 2719 if (!nilfs->ns_writer)
9ff05123
RK
2720 return -ENOMEM;
2721
3fd3fe5a 2722 err = nilfs_segctor_start_thread(nilfs->ns_writer);
9ff05123 2723 if (err) {
3fd3fe5a
RK
2724 kfree(nilfs->ns_writer);
2725 nilfs->ns_writer = NULL;
9ff05123
RK
2726 }
2727 return err;
2728}
2729
2730/**
f7545144
RK
2731 * nilfs_detach_log_writer - destroy log writer
2732 * @sb: super block instance
9ff05123 2733 *
f7545144
RK
2734 * This kills log writer daemon, frees the log writer object, and
2735 * destroys list of dirty files.
9ff05123 2736 */
f7545144 2737void nilfs_detach_log_writer(struct super_block *sb)
9ff05123 2738{
e3154e97 2739 struct the_nilfs *nilfs = sb->s_fs_info;
9ff05123
RK
2740 LIST_HEAD(garbage_list);
2741
2742 down_write(&nilfs->ns_segctor_sem);
3fd3fe5a
RK
2743 if (nilfs->ns_writer) {
2744 nilfs_segctor_destroy(nilfs->ns_writer);
2745 nilfs->ns_writer = NULL;
9ff05123
RK
2746 }
2747
2748 /* Force to free the list of dirty files */
693dd321
RK
2749 spin_lock(&nilfs->ns_inode_lock);
2750 if (!list_empty(&nilfs->ns_dirty_files)) {
2751 list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
f7545144
RK
2752 nilfs_warning(sb, __func__,
2753 "Hit dirty file after stopped log writer\n");
9ff05123 2754 }
693dd321 2755 spin_unlock(&nilfs->ns_inode_lock);
9ff05123
RK
2756 up_write(&nilfs->ns_segctor_sem);
2757
693dd321 2758 nilfs_dispose_list(nilfs, &garbage_list, 1);
9ff05123 2759}
This page took 0.765586 seconds and 5 git commands to generate.