2 * inode.c - NILFS inode operations.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/writeback.h>
28 #include <linux/uio.h>
37 struct nilfs_iget_args
{
40 struct nilfs_root
*root
;
45 * nilfs_get_block() - get a file block on the filesystem (callback function)
46 * @inode - inode struct of the target file
47 * @blkoff - file block number
48 * @bh_result - buffer head to be mapped on
49 * @create - indicate whether allocating the block or not when it has not
52 * This function does not issue actual read request of the specified data
53 * block. It is done by VFS.
55 int nilfs_get_block(struct inode
*inode
, sector_t blkoff
,
56 struct buffer_head
*bh_result
, int create
)
58 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
61 struct inode
*dat
= nilfs_dat_inode(NILFS_I_NILFS(inode
));
62 unsigned maxblocks
= bh_result
->b_size
>> inode
->i_blkbits
;
64 down_read(&NILFS_MDT(dat
)->mi_sem
);
65 ret
= nilfs_bmap_lookup_contig(ii
->i_bmap
, blkoff
, &blknum
, maxblocks
);
66 up_read(&NILFS_MDT(dat
)->mi_sem
);
67 if (ret
>= 0) { /* found */
68 map_bh(bh_result
, inode
->i_sb
, blknum
);
70 bh_result
->b_size
= (ret
<< inode
->i_blkbits
);
73 /* data block was not found */
74 if (ret
== -ENOENT
&& create
) {
75 struct nilfs_transaction_info ti
;
77 bh_result
->b_blocknr
= 0;
78 err
= nilfs_transaction_begin(inode
->i_sb
, &ti
, 1);
81 err
= nilfs_bmap_insert(ii
->i_bmap
, (unsigned long)blkoff
,
82 (unsigned long)bh_result
);
83 if (unlikely(err
!= 0)) {
86 * The get_block() function could be called
87 * from multiple callers for an inode.
88 * However, the page having this block must
89 * be locked in this case.
92 "nilfs_get_block: a race condition "
93 "while inserting a data block. "
94 "(inode number=%lu, file block "
97 (unsigned long long)blkoff
);
100 nilfs_transaction_abort(inode
->i_sb
);
103 nilfs_mark_inode_dirty(inode
);
104 nilfs_transaction_commit(inode
->i_sb
); /* never fails */
105 /* Error handling should be detailed */
106 set_buffer_new(bh_result
);
107 set_buffer_delay(bh_result
);
108 map_bh(bh_result
, inode
->i_sb
, 0); /* dbn must be changed
110 } else if (ret
== -ENOENT
) {
111 /* not found is not error (e.g. hole); must return without
112 the mapped state flag. */
123 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
124 * address_space_operations.
125 * @file - file struct of the file to be read
126 * @page - the page to be read
128 static int nilfs_readpage(struct file
*file
, struct page
*page
)
130 return mpage_readpage(page
, nilfs_get_block
);
134 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
135 * address_space_operations.
136 * @file - file struct of the file to be read
137 * @mapping - address_space struct used for reading multiple pages
138 * @pages - the pages to be read
139 * @nr_pages - number of pages to be read
141 static int nilfs_readpages(struct file
*file
, struct address_space
*mapping
,
142 struct list_head
*pages
, unsigned nr_pages
)
144 return mpage_readpages(mapping
, pages
, nr_pages
, nilfs_get_block
);
147 static int nilfs_writepages(struct address_space
*mapping
,
148 struct writeback_control
*wbc
)
150 struct inode
*inode
= mapping
->host
;
153 if (wbc
->sync_mode
== WB_SYNC_ALL
)
154 err
= nilfs_construct_dsync_segment(inode
->i_sb
, inode
,
160 static int nilfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
162 struct inode
*inode
= page
->mapping
->host
;
165 redirty_page_for_writepage(wbc
, page
);
168 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
169 err
= nilfs_construct_segment(inode
->i_sb
);
172 } else if (wbc
->for_reclaim
)
173 nilfs_flush_segment(inode
->i_sb
, inode
->i_ino
);
178 static int nilfs_set_page_dirty(struct page
*page
)
180 int ret
= __set_page_dirty_buffers(page
);
183 struct inode
*inode
= page
->mapping
->host
;
184 struct nilfs_sb_info
*sbi
= NILFS_SB(inode
->i_sb
);
185 unsigned nr_dirty
= 1 << (PAGE_SHIFT
- inode
->i_blkbits
);
187 nilfs_set_file_dirty(sbi
, inode
, nr_dirty
);
192 static int nilfs_write_begin(struct file
*file
, struct address_space
*mapping
,
193 loff_t pos
, unsigned len
, unsigned flags
,
194 struct page
**pagep
, void **fsdata
)
197 struct inode
*inode
= mapping
->host
;
198 int err
= nilfs_transaction_begin(inode
->i_sb
, NULL
, 1);
203 err
= block_write_begin(mapping
, pos
, len
, flags
, pagep
,
206 loff_t isize
= mapping
->host
->i_size
;
207 if (pos
+ len
> isize
)
208 vmtruncate(mapping
->host
, isize
);
210 nilfs_transaction_abort(inode
->i_sb
);
215 static int nilfs_write_end(struct file
*file
, struct address_space
*mapping
,
216 loff_t pos
, unsigned len
, unsigned copied
,
217 struct page
*page
, void *fsdata
)
219 struct inode
*inode
= mapping
->host
;
220 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
224 nr_dirty
= nilfs_page_count_clean_buffers(page
, start
,
226 copied
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
,
228 nilfs_set_file_dirty(NILFS_SB(inode
->i_sb
), inode
, nr_dirty
);
229 err
= nilfs_transaction_commit(inode
->i_sb
);
230 return err
? : copied
;
234 nilfs_direct_IO(int rw
, struct kiocb
*iocb
, const struct iovec
*iov
,
235 loff_t offset
, unsigned long nr_segs
)
237 struct file
*file
= iocb
->ki_filp
;
238 struct inode
*inode
= file
->f_mapping
->host
;
244 /* Needs synchronization with the cleaner */
245 size
= blockdev_direct_IO(rw
, iocb
, inode
, inode
->i_sb
->s_bdev
, iov
,
246 offset
, nr_segs
, nilfs_get_block
, NULL
);
249 * In case of error extending write may have instantiated a few
250 * blocks outside i_size. Trim these off again.
252 if (unlikely((rw
& WRITE
) && size
< 0)) {
253 loff_t isize
= i_size_read(inode
);
254 loff_t end
= offset
+ iov_length(iov
, nr_segs
);
257 vmtruncate(inode
, isize
);
263 const struct address_space_operations nilfs_aops
= {
264 .writepage
= nilfs_writepage
,
265 .readpage
= nilfs_readpage
,
266 .sync_page
= block_sync_page
,
267 .writepages
= nilfs_writepages
,
268 .set_page_dirty
= nilfs_set_page_dirty
,
269 .readpages
= nilfs_readpages
,
270 .write_begin
= nilfs_write_begin
,
271 .write_end
= nilfs_write_end
,
272 /* .releasepage = nilfs_releasepage, */
273 .invalidatepage
= block_invalidatepage
,
274 .direct_IO
= nilfs_direct_IO
,
275 .is_partially_uptodate
= block_is_partially_uptodate
,
278 struct inode
*nilfs_new_inode(struct inode
*dir
, int mode
)
280 struct super_block
*sb
= dir
->i_sb
;
281 struct nilfs_sb_info
*sbi
= NILFS_SB(sb
);
283 struct nilfs_inode_info
*ii
;
284 struct nilfs_root
*root
;
288 inode
= new_inode(sb
);
289 if (unlikely(!inode
))
292 mapping_set_gfp_mask(inode
->i_mapping
,
293 mapping_gfp_mask(inode
->i_mapping
) & ~__GFP_FS
);
295 root
= NILFS_I(dir
)->i_root
;
297 ii
->i_state
= 1 << NILFS_I_NEW
;
300 err
= nilfs_ifile_create_inode(root
->ifile
, &ino
, &ii
->i_bh
);
302 goto failed_ifile_create_inode
;
303 /* reference count of i_bh inherits from nilfs_mdt_read_block() */
305 atomic_inc(&root
->inodes_count
);
306 inode_init_owner(inode
, dir
, mode
);
308 inode
->i_mtime
= inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
310 if (S_ISREG(mode
) || S_ISDIR(mode
) || S_ISLNK(mode
)) {
311 err
= nilfs_bmap_read(ii
->i_bmap
, NULL
);
315 set_bit(NILFS_I_BMAP
, &ii
->i_state
);
316 /* No lock is needed; iget() ensures it. */
319 ii
->i_flags
= NILFS_I(dir
)->i_flags
;
321 ii
->i_flags
&= ~(NILFS_IMMUTABLE_FL
| NILFS_APPEND_FL
);
323 ii
->i_flags
&= ~NILFS_DIRSYNC_FL
;
325 /* ii->i_file_acl = 0; */
326 /* ii->i_dir_acl = 0; */
327 ii
->i_dir_start_lookup
= 0;
328 nilfs_set_inode_flags(inode
);
329 spin_lock(&sbi
->s_next_gen_lock
);
330 inode
->i_generation
= sbi
->s_next_generation
++;
331 spin_unlock(&sbi
->s_next_gen_lock
);
332 insert_inode_hash(inode
);
334 err
= nilfs_init_acl(inode
, dir
);
336 goto failed_acl
; /* never occur. When supporting
337 nilfs_init_acl(), proper cancellation of
338 above jobs should be considered */
345 iput(inode
); /* raw_inode will be deleted through
346 generic_delete_inode() */
349 failed_ifile_create_inode
:
350 make_bad_inode(inode
);
351 iput(inode
); /* if i_nlink == 1, generic_forget_inode() will be
357 void nilfs_set_inode_flags(struct inode
*inode
)
359 unsigned int flags
= NILFS_I(inode
)->i_flags
;
361 inode
->i_flags
&= ~(S_SYNC
| S_APPEND
| S_IMMUTABLE
| S_NOATIME
|
363 if (flags
& NILFS_SYNC_FL
)
364 inode
->i_flags
|= S_SYNC
;
365 if (flags
& NILFS_APPEND_FL
)
366 inode
->i_flags
|= S_APPEND
;
367 if (flags
& NILFS_IMMUTABLE_FL
)
368 inode
->i_flags
|= S_IMMUTABLE
;
369 #ifndef NILFS_ATIME_DISABLE
370 if (flags
& NILFS_NOATIME_FL
)
372 inode
->i_flags
|= S_NOATIME
;
373 if (flags
& NILFS_DIRSYNC_FL
)
374 inode
->i_flags
|= S_DIRSYNC
;
375 mapping_set_gfp_mask(inode
->i_mapping
,
376 mapping_gfp_mask(inode
->i_mapping
) & ~__GFP_FS
);
379 int nilfs_read_inode_common(struct inode
*inode
,
380 struct nilfs_inode
*raw_inode
)
382 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
385 inode
->i_mode
= le16_to_cpu(raw_inode
->i_mode
);
386 inode
->i_uid
= (uid_t
)le32_to_cpu(raw_inode
->i_uid
);
387 inode
->i_gid
= (gid_t
)le32_to_cpu(raw_inode
->i_gid
);
388 inode
->i_nlink
= le16_to_cpu(raw_inode
->i_links_count
);
389 inode
->i_size
= le64_to_cpu(raw_inode
->i_size
);
390 inode
->i_atime
.tv_sec
= le64_to_cpu(raw_inode
->i_mtime
);
391 inode
->i_ctime
.tv_sec
= le64_to_cpu(raw_inode
->i_ctime
);
392 inode
->i_mtime
.tv_sec
= le64_to_cpu(raw_inode
->i_mtime
);
393 inode
->i_atime
.tv_nsec
= le32_to_cpu(raw_inode
->i_mtime_nsec
);
394 inode
->i_ctime
.tv_nsec
= le32_to_cpu(raw_inode
->i_ctime_nsec
);
395 inode
->i_mtime
.tv_nsec
= le32_to_cpu(raw_inode
->i_mtime_nsec
);
396 if (inode
->i_nlink
== 0 && inode
->i_mode
== 0)
397 return -EINVAL
; /* this inode is deleted */
399 inode
->i_blocks
= le64_to_cpu(raw_inode
->i_blocks
);
400 ii
->i_flags
= le32_to_cpu(raw_inode
->i_flags
);
402 ii
->i_file_acl
= le32_to_cpu(raw_inode
->i_file_acl
);
403 ii
->i_dir_acl
= S_ISREG(inode
->i_mode
) ?
404 0 : le32_to_cpu(raw_inode
->i_dir_acl
);
406 ii
->i_dir_start_lookup
= 0;
407 inode
->i_generation
= le32_to_cpu(raw_inode
->i_generation
);
409 if (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
410 S_ISLNK(inode
->i_mode
)) {
411 err
= nilfs_bmap_read(ii
->i_bmap
, raw_inode
);
414 set_bit(NILFS_I_BMAP
, &ii
->i_state
);
415 /* No lock is needed; iget() ensures it. */
420 static int __nilfs_read_inode(struct super_block
*sb
,
421 struct nilfs_root
*root
, unsigned long ino
,
424 struct nilfs_sb_info
*sbi
= NILFS_SB(sb
);
425 struct inode
*dat
= nilfs_dat_inode(sbi
->s_nilfs
);
426 struct buffer_head
*bh
;
427 struct nilfs_inode
*raw_inode
;
430 down_read(&NILFS_MDT(dat
)->mi_sem
); /* XXX */
431 err
= nilfs_ifile_get_inode_block(root
->ifile
, ino
, &bh
);
435 raw_inode
= nilfs_ifile_map_inode(root
->ifile
, ino
, bh
);
437 err
= nilfs_read_inode_common(inode
, raw_inode
);
441 if (S_ISREG(inode
->i_mode
)) {
442 inode
->i_op
= &nilfs_file_inode_operations
;
443 inode
->i_fop
= &nilfs_file_operations
;
444 inode
->i_mapping
->a_ops
= &nilfs_aops
;
445 } else if (S_ISDIR(inode
->i_mode
)) {
446 inode
->i_op
= &nilfs_dir_inode_operations
;
447 inode
->i_fop
= &nilfs_dir_operations
;
448 inode
->i_mapping
->a_ops
= &nilfs_aops
;
449 } else if (S_ISLNK(inode
->i_mode
)) {
450 inode
->i_op
= &nilfs_symlink_inode_operations
;
451 inode
->i_mapping
->a_ops
= &nilfs_aops
;
453 inode
->i_op
= &nilfs_special_inode_operations
;
455 inode
, inode
->i_mode
,
456 huge_decode_dev(le64_to_cpu(raw_inode
->i_device_code
)));
458 nilfs_ifile_unmap_inode(root
->ifile
, ino
, bh
);
460 up_read(&NILFS_MDT(dat
)->mi_sem
); /* XXX */
461 nilfs_set_inode_flags(inode
);
465 nilfs_ifile_unmap_inode(root
->ifile
, ino
, bh
);
469 up_read(&NILFS_MDT(dat
)->mi_sem
); /* XXX */
473 static int nilfs_iget_test(struct inode
*inode
, void *opaque
)
475 struct nilfs_iget_args
*args
= opaque
;
476 struct nilfs_inode_info
*ii
;
478 if (args
->ino
!= inode
->i_ino
|| args
->root
!= NILFS_I(inode
)->i_root
)
482 if (!test_bit(NILFS_I_GCINODE
, &ii
->i_state
))
483 return !args
->for_gc
;
485 return args
->for_gc
&& args
->cno
== ii
->i_cno
;
488 static int nilfs_iget_set(struct inode
*inode
, void *opaque
)
490 struct nilfs_iget_args
*args
= opaque
;
492 inode
->i_ino
= args
->ino
;
494 NILFS_I(inode
)->i_state
= 1 << NILFS_I_GCINODE
;
495 NILFS_I(inode
)->i_cno
= args
->cno
;
496 NILFS_I(inode
)->i_root
= NULL
;
498 if (args
->root
&& args
->ino
== NILFS_ROOT_INO
)
499 nilfs_get_root(args
->root
);
500 NILFS_I(inode
)->i_root
= args
->root
;
505 struct inode
*nilfs_ilookup(struct super_block
*sb
, struct nilfs_root
*root
,
508 struct nilfs_iget_args args
= {
509 .ino
= ino
, .root
= root
, .cno
= 0, .for_gc
= 0
512 return ilookup5(sb
, ino
, nilfs_iget_test
, &args
);
515 struct inode
*nilfs_iget_locked(struct super_block
*sb
, struct nilfs_root
*root
,
518 struct nilfs_iget_args args
= {
519 .ino
= ino
, .root
= root
, .cno
= 0, .for_gc
= 0
522 return iget5_locked(sb
, ino
, nilfs_iget_test
, nilfs_iget_set
, &args
);
525 struct inode
*nilfs_iget(struct super_block
*sb
, struct nilfs_root
*root
,
531 inode
= nilfs_iget_locked(sb
, root
, ino
);
532 if (unlikely(!inode
))
533 return ERR_PTR(-ENOMEM
);
534 if (!(inode
->i_state
& I_NEW
))
537 err
= __nilfs_read_inode(sb
, root
, ino
, inode
);
542 unlock_new_inode(inode
);
546 struct inode
*nilfs_iget_for_gc(struct super_block
*sb
, unsigned long ino
,
549 struct nilfs_iget_args args
= {
550 .ino
= ino
, .root
= NULL
, .cno
= cno
, .for_gc
= 1
555 inode
= iget5_locked(sb
, ino
, nilfs_iget_test
, nilfs_iget_set
, &args
);
556 if (unlikely(!inode
))
557 return ERR_PTR(-ENOMEM
);
558 if (!(inode
->i_state
& I_NEW
))
561 err
= nilfs_init_gcinode(inode
);
566 unlock_new_inode(inode
);
570 void nilfs_write_inode_common(struct inode
*inode
,
571 struct nilfs_inode
*raw_inode
, int has_bmap
)
573 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
575 raw_inode
->i_mode
= cpu_to_le16(inode
->i_mode
);
576 raw_inode
->i_uid
= cpu_to_le32(inode
->i_uid
);
577 raw_inode
->i_gid
= cpu_to_le32(inode
->i_gid
);
578 raw_inode
->i_links_count
= cpu_to_le16(inode
->i_nlink
);
579 raw_inode
->i_size
= cpu_to_le64(inode
->i_size
);
580 raw_inode
->i_ctime
= cpu_to_le64(inode
->i_ctime
.tv_sec
);
581 raw_inode
->i_mtime
= cpu_to_le64(inode
->i_mtime
.tv_sec
);
582 raw_inode
->i_ctime_nsec
= cpu_to_le32(inode
->i_ctime
.tv_nsec
);
583 raw_inode
->i_mtime_nsec
= cpu_to_le32(inode
->i_mtime
.tv_nsec
);
584 raw_inode
->i_blocks
= cpu_to_le64(inode
->i_blocks
);
586 raw_inode
->i_flags
= cpu_to_le32(ii
->i_flags
);
587 raw_inode
->i_generation
= cpu_to_le32(inode
->i_generation
);
590 nilfs_bmap_write(ii
->i_bmap
, raw_inode
);
591 else if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
))
592 raw_inode
->i_device_code
=
593 cpu_to_le64(huge_encode_dev(inode
->i_rdev
));
594 /* When extending inode, nilfs->ns_inode_size should be checked
595 for substitutions of appended fields */
598 void nilfs_update_inode(struct inode
*inode
, struct buffer_head
*ibh
)
600 ino_t ino
= inode
->i_ino
;
601 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
602 struct inode
*ifile
= ii
->i_root
->ifile
;
603 struct nilfs_inode
*raw_inode
;
605 raw_inode
= nilfs_ifile_map_inode(ifile
, ino
, ibh
);
607 if (test_and_clear_bit(NILFS_I_NEW
, &ii
->i_state
))
608 memset(raw_inode
, 0, NILFS_MDT(ifile
)->mi_entry_size
);
609 set_bit(NILFS_I_INODE_DIRTY
, &ii
->i_state
);
611 nilfs_write_inode_common(inode
, raw_inode
, 0);
612 /* XXX: call with has_bmap = 0 is a workaround to avoid
613 deadlock of bmap. This delays update of i_bmap to just
615 nilfs_ifile_unmap_inode(ifile
, ino
, ibh
);
618 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
620 static void nilfs_truncate_bmap(struct nilfs_inode_info
*ii
,
626 if (!test_bit(NILFS_I_BMAP
, &ii
->i_state
))
629 ret
= nilfs_bmap_last_key(ii
->i_bmap
, &b
);
638 b
-= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS
, b
- from
);
639 ret
= nilfs_bmap_truncate(ii
->i_bmap
, b
);
640 nilfs_relax_pressure_in_lock(ii
->vfs_inode
.i_sb
);
641 if (!ret
|| (ret
== -ENOMEM
&&
642 nilfs_bmap_truncate(ii
->i_bmap
, b
) == 0))
646 nilfs_warning(ii
->vfs_inode
.i_sb
, __func__
,
647 "failed to truncate bmap (ino=%lu, err=%d)",
648 ii
->vfs_inode
.i_ino
, ret
);
651 void nilfs_truncate(struct inode
*inode
)
653 unsigned long blkoff
;
654 unsigned int blocksize
;
655 struct nilfs_transaction_info ti
;
656 struct super_block
*sb
= inode
->i_sb
;
657 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
659 if (!test_bit(NILFS_I_BMAP
, &ii
->i_state
))
661 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
664 blocksize
= sb
->s_blocksize
;
665 blkoff
= (inode
->i_size
+ blocksize
- 1) >> sb
->s_blocksize_bits
;
666 nilfs_transaction_begin(sb
, &ti
, 0); /* never fails */
668 block_truncate_page(inode
->i_mapping
, inode
->i_size
, nilfs_get_block
);
670 nilfs_truncate_bmap(ii
, blkoff
);
672 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
674 nilfs_set_transaction_flag(NILFS_TI_SYNC
);
676 nilfs_mark_inode_dirty(inode
);
677 nilfs_set_file_dirty(NILFS_SB(sb
), inode
, 0);
678 nilfs_transaction_commit(sb
);
679 /* May construct a logical segment and may fail in sync mode.
680 But truncate has no return value. */
683 static void nilfs_clear_inode(struct inode
*inode
)
685 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
686 struct nilfs_mdt_info
*mdi
= NILFS_MDT(inode
);
689 * Free resources allocated in nilfs_read_inode(), here.
691 BUG_ON(!list_empty(&ii
->i_dirty
));
695 if (mdi
&& mdi
->mi_palloc_cache
)
696 nilfs_palloc_destroy_cache(inode
);
698 if (test_bit(NILFS_I_BMAP
, &ii
->i_state
))
699 nilfs_bmap_clear(ii
->i_bmap
);
701 nilfs_btnode_cache_clear(&ii
->i_btnode_cache
);
703 if (ii
->i_root
&& inode
->i_ino
== NILFS_ROOT_INO
)
704 nilfs_put_root(ii
->i_root
);
707 void nilfs_evict_inode(struct inode
*inode
)
709 struct nilfs_transaction_info ti
;
710 struct super_block
*sb
= inode
->i_sb
;
711 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
713 if (inode
->i_nlink
|| !ii
->i_root
|| unlikely(is_bad_inode(inode
))) {
714 if (inode
->i_data
.nrpages
)
715 truncate_inode_pages(&inode
->i_data
, 0);
716 end_writeback(inode
);
717 nilfs_clear_inode(inode
);
720 nilfs_transaction_begin(sb
, &ti
, 0); /* never fails */
722 if (inode
->i_data
.nrpages
)
723 truncate_inode_pages(&inode
->i_data
, 0);
725 /* TODO: some of the following operations may fail. */
726 nilfs_truncate_bmap(ii
, 0);
727 nilfs_mark_inode_dirty(inode
);
728 end_writeback(inode
);
730 nilfs_ifile_delete_inode(ii
->i_root
->ifile
, inode
->i_ino
);
731 atomic_dec(&ii
->i_root
->inodes_count
);
733 nilfs_clear_inode(inode
);
736 nilfs_set_transaction_flag(NILFS_TI_SYNC
);
737 nilfs_transaction_commit(sb
);
738 /* May construct a logical segment and may fail in sync mode.
739 But delete_inode has no return value. */
742 int nilfs_setattr(struct dentry
*dentry
, struct iattr
*iattr
)
744 struct nilfs_transaction_info ti
;
745 struct inode
*inode
= dentry
->d_inode
;
746 struct super_block
*sb
= inode
->i_sb
;
749 err
= inode_change_ok(inode
, iattr
);
753 err
= nilfs_transaction_begin(sb
, &ti
, 0);
757 if ((iattr
->ia_valid
& ATTR_SIZE
) &&
758 iattr
->ia_size
!= i_size_read(inode
)) {
759 err
= vmtruncate(inode
, iattr
->ia_size
);
764 setattr_copy(inode
, iattr
);
765 mark_inode_dirty(inode
);
767 if (iattr
->ia_valid
& ATTR_MODE
) {
768 err
= nilfs_acl_chmod(inode
);
773 return nilfs_transaction_commit(sb
);
776 nilfs_transaction_abort(sb
);
780 int nilfs_permission(struct inode
*inode
, int mask
, unsigned int flags
)
782 struct nilfs_root
*root
;
784 if (flags
& IPERM_FLAG_RCU
)
787 root
= NILFS_I(inode
)->i_root
;
788 if ((mask
& MAY_WRITE
) && root
&&
789 root
->cno
!= NILFS_CPTREE_CURRENT_CNO
)
790 return -EROFS
; /* snapshot is not writable */
792 return generic_permission(inode
, mask
, flags
, NULL
);
795 int nilfs_load_inode_block(struct nilfs_sb_info
*sbi
, struct inode
*inode
,
796 struct buffer_head
**pbh
)
798 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
801 spin_lock(&sbi
->s_inode_lock
);
802 if (ii
->i_bh
== NULL
) {
803 spin_unlock(&sbi
->s_inode_lock
);
804 err
= nilfs_ifile_get_inode_block(ii
->i_root
->ifile
,
808 spin_lock(&sbi
->s_inode_lock
);
809 if (ii
->i_bh
== NULL
)
819 spin_unlock(&sbi
->s_inode_lock
);
823 int nilfs_inode_dirty(struct inode
*inode
)
825 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
826 struct nilfs_sb_info
*sbi
= NILFS_SB(inode
->i_sb
);
829 if (!list_empty(&ii
->i_dirty
)) {
830 spin_lock(&sbi
->s_inode_lock
);
831 ret
= test_bit(NILFS_I_DIRTY
, &ii
->i_state
) ||
832 test_bit(NILFS_I_BUSY
, &ii
->i_state
);
833 spin_unlock(&sbi
->s_inode_lock
);
838 int nilfs_set_file_dirty(struct nilfs_sb_info
*sbi
, struct inode
*inode
,
841 struct nilfs_inode_info
*ii
= NILFS_I(inode
);
843 atomic_add(nr_dirty
, &sbi
->s_nilfs
->ns_ndirtyblks
);
845 if (test_and_set_bit(NILFS_I_DIRTY
, &ii
->i_state
))
848 spin_lock(&sbi
->s_inode_lock
);
849 if (!test_bit(NILFS_I_QUEUED
, &ii
->i_state
) &&
850 !test_bit(NILFS_I_BUSY
, &ii
->i_state
)) {
851 /* Because this routine may race with nilfs_dispose_list(),
852 we have to check NILFS_I_QUEUED here, too. */
853 if (list_empty(&ii
->i_dirty
) && igrab(inode
) == NULL
) {
854 /* This will happen when somebody is freeing
856 nilfs_warning(sbi
->s_super
, __func__
,
857 "cannot get inode (ino=%lu)\n",
859 spin_unlock(&sbi
->s_inode_lock
);
860 return -EINVAL
; /* NILFS_I_DIRTY may remain for
863 list_del(&ii
->i_dirty
);
864 list_add_tail(&ii
->i_dirty
, &sbi
->s_dirty_files
);
865 set_bit(NILFS_I_QUEUED
, &ii
->i_state
);
867 spin_unlock(&sbi
->s_inode_lock
);
871 int nilfs_mark_inode_dirty(struct inode
*inode
)
873 struct nilfs_sb_info
*sbi
= NILFS_SB(inode
->i_sb
);
874 struct buffer_head
*ibh
;
877 err
= nilfs_load_inode_block(sbi
, inode
, &ibh
);
879 nilfs_warning(inode
->i_sb
, __func__
,
880 "failed to reget inode block.\n");
883 nilfs_update_inode(inode
, ibh
);
884 nilfs_mdt_mark_buffer_dirty(ibh
);
885 nilfs_mdt_mark_dirty(NILFS_I(inode
)->i_root
->ifile
);
891 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
892 * @inode: inode of the file to be registered.
894 * nilfs_dirty_inode() loads a inode block containing the specified
895 * @inode and copies data from a nilfs_inode to a corresponding inode
896 * entry in the inode block. This operation is excluded from the segment
897 * construction. This function can be called both as a single operation
898 * and as a part of indivisible file operations.
900 void nilfs_dirty_inode(struct inode
*inode
)
902 struct nilfs_transaction_info ti
;
903 struct nilfs_mdt_info
*mdi
= NILFS_MDT(inode
);
905 if (is_bad_inode(inode
)) {
906 nilfs_warning(inode
->i_sb
, __func__
,
907 "tried to mark bad_inode dirty. ignored.\n");
912 nilfs_mdt_mark_dirty(inode
);
915 nilfs_transaction_begin(inode
->i_sb
, &ti
, 0);
916 nilfs_mark_inode_dirty(inode
);
917 nilfs_transaction_commit(inode
->i_sb
); /* never fails */
920 int nilfs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
921 __u64 start
, __u64 len
)
923 struct the_nilfs
*nilfs
= NILFS_I_NILFS(inode
);
924 __u64 logical
= 0, phys
= 0, size
= 0;
927 sector_t blkoff
, end_blkoff
;
928 sector_t delalloc_blkoff
;
929 unsigned long delalloc_blklen
;
930 unsigned int blkbits
= inode
->i_blkbits
;
933 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
);
937 mutex_lock(&inode
->i_mutex
);
939 isize
= i_size_read(inode
);
941 blkoff
= start
>> blkbits
;
942 end_blkoff
= (start
+ len
- 1) >> blkbits
;
944 delalloc_blklen
= nilfs_find_uncommitted_extent(inode
, blkoff
,
949 unsigned int maxblocks
;
951 if (delalloc_blklen
&& blkoff
== delalloc_blkoff
) {
953 /* End of the current extent */
954 ret
= fiemap_fill_next_extent(
955 fieinfo
, logical
, phys
, size
, flags
);
959 if (blkoff
> end_blkoff
)
962 flags
= FIEMAP_EXTENT_MERGED
| FIEMAP_EXTENT_DELALLOC
;
963 logical
= blkoff
<< blkbits
;
965 size
= delalloc_blklen
<< blkbits
;
967 blkoff
= delalloc_blkoff
+ delalloc_blklen
;
968 delalloc_blklen
= nilfs_find_uncommitted_extent(
969 inode
, blkoff
, &delalloc_blkoff
);
974 * Limit the number of blocks that we look up so as
975 * not to get into the next delayed allocation extent.
979 maxblocks
= min_t(sector_t
, delalloc_blkoff
- blkoff
,
983 down_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
984 n
= nilfs_bmap_lookup_contig(
985 NILFS_I(inode
)->i_bmap
, blkoff
, &blkphy
, maxblocks
);
986 up_read(&NILFS_MDT(nilfs
->ns_dat
)->mi_sem
);
991 if (unlikely(n
!= -ENOENT
))
996 past_eof
= ((blkoff
<< blkbits
) >= isize
);
999 /* End of the current extent */
1002 flags
|= FIEMAP_EXTENT_LAST
;
1004 ret
= fiemap_fill_next_extent(
1005 fieinfo
, logical
, phys
, size
, flags
);
1010 if (blkoff
> end_blkoff
|| past_eof
)
1014 if (phys
&& blkphy
<< blkbits
== phys
+ size
) {
1015 /* The current extent goes on */
1016 size
+= n
<< blkbits
;
1018 /* Terminate the current extent */
1019 ret
= fiemap_fill_next_extent(
1020 fieinfo
, logical
, phys
, size
,
1022 if (ret
|| blkoff
> end_blkoff
)
1025 /* Start another extent */
1026 flags
= FIEMAP_EXTENT_MERGED
;
1027 logical
= blkoff
<< blkbits
;
1028 phys
= blkphy
<< blkbits
;
1029 size
= n
<< blkbits
;
1032 /* Start a new extent */
1033 flags
= FIEMAP_EXTENT_MERGED
;
1034 logical
= blkoff
<< blkbits
;
1035 phys
= blkphy
<< blkbits
;
1036 size
= n
<< blkbits
;
1043 /* If ret is 1 then we just hit the end of the extent array */
1047 mutex_unlock(&inode
->i_mutex
);