5 * Daniel Pirkl <daniel.pirkl@email.cz>
6 * Charles University, Faculty of Mathematics and Physics
10 * linux/fs/ext2/inode.c
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card (card@masi.ibp.fr)
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
19 * linux/fs/minix/inode.c
21 * Copyright (C) 1991, 1992 Linus Torvalds
23 * Goal-directed block allocation by Stephen Tweedie (sct@dcs.ed.ac.uk), 1993
24 * Big-endian to little-endian byte-swapping/bitmaps by
25 * David S. Miller (davem@caip.rutgers.edu), 1995
28 #include <asm/uaccess.h>
30 #include <linux/errno.h>
32 #include <linux/time.h>
33 #include <linux/stat.h>
34 #include <linux/string.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
44 static int ufs_block_to_path(struct inode
*inode
, sector_t i_block
, unsigned offsets
[4])
46 struct ufs_sb_private_info
*uspi
= UFS_SB(inode
->i_sb
)->s_uspi
;
47 int ptrs
= uspi
->s_apb
;
48 int ptrs_bits
= uspi
->s_apbshift
;
49 const long direct_blocks
= UFS_NDADDR
,
50 indirect_blocks
= ptrs
,
51 double_blocks
= (1 << (ptrs_bits
* 2));
55 UFSD("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs
,double_blocks
);
56 if (i_block
< direct_blocks
) {
57 offsets
[n
++] = i_block
;
58 } else if ((i_block
-= direct_blocks
) < indirect_blocks
) {
59 offsets
[n
++] = UFS_IND_BLOCK
;
60 offsets
[n
++] = i_block
;
61 } else if ((i_block
-= indirect_blocks
) < double_blocks
) {
62 offsets
[n
++] = UFS_DIND_BLOCK
;
63 offsets
[n
++] = i_block
>> ptrs_bits
;
64 offsets
[n
++] = i_block
& (ptrs
- 1);
65 } else if (((i_block
-= double_blocks
) >> (ptrs_bits
* 2)) < ptrs
) {
66 offsets
[n
++] = UFS_TIND_BLOCK
;
67 offsets
[n
++] = i_block
>> (ptrs_bits
* 2);
68 offsets
[n
++] = (i_block
>> ptrs_bits
) & (ptrs
- 1);
69 offsets
[n
++] = i_block
& (ptrs
- 1);
71 ufs_warning(inode
->i_sb
, "ufs_block_to_path", "block > big");
82 struct buffer_head
*bh
;
85 static inline int grow_chain32(struct ufs_inode_info
*ufsi
,
86 struct buffer_head
*bh
, __fs32
*v
,
87 Indirect
*from
, Indirect
*to
)
93 seq
= read_seqbegin(&ufsi
->meta_lock
);
94 to
->key32
= *(__fs32
*)(to
->p
= v
);
95 for (p
= from
; p
<= to
&& p
->key32
== *(__fs32
*)p
->p
; p
++)
97 } while (read_seqretry(&ufsi
->meta_lock
, seq
));
101 static inline int grow_chain64(struct ufs_inode_info
*ufsi
,
102 struct buffer_head
*bh
, __fs64
*v
,
103 Indirect
*from
, Indirect
*to
)
109 seq
= read_seqbegin(&ufsi
->meta_lock
);
110 to
->key64
= *(__fs64
*)(to
->p
= v
);
111 for (p
= from
; p
<= to
&& p
->key64
== *(__fs64
*)p
->p
; p
++)
113 } while (read_seqretry(&ufsi
->meta_lock
, seq
));
118 * Returns the location of the fragment from
119 * the beginning of the filesystem.
122 static u64
ufs_frag_map(struct inode
*inode
, unsigned offsets
[4], int depth
)
124 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
125 struct super_block
*sb
= inode
->i_sb
;
126 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
127 u64 mask
= (u64
) uspi
->s_apbmask
>>uspi
->s_fpbshift
;
128 int shift
= uspi
->s_apbshift
-uspi
->s_fpbshift
;
129 Indirect chain
[4], *q
= chain
;
131 unsigned flags
= UFS_SB(sb
)->s_flags
;
134 UFSD(": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",
135 uspi
->s_fpbshift
, uspi
->s_apbmask
,
136 (unsigned long long)mask
);
144 if ((flags
& UFS_TYPE_MASK
) == UFS_TYPE_UFS2
)
147 if (!grow_chain32(ufsi
, NULL
, &ufsi
->i_u1
.i_data
[*p
++], chain
, q
))
153 struct buffer_head
*bh
;
156 bh
= sb_bread(sb
, uspi
->s_sbbase
+
157 fs32_to_cpu(sb
, q
->key32
) + (n
>>shift
));
160 ptr
= (__fs32
*)bh
->b_data
+ (n
& mask
);
161 if (!grow_chain32(ufsi
, bh
, ptr
, chain
, ++q
))
166 res
= fs32_to_cpu(sb
, q
->key32
);
170 if (!grow_chain64(ufsi
, NULL
, &ufsi
->i_u1
.u2_i_data
[*p
++], chain
, q
))
177 struct buffer_head
*bh
;
180 bh
= sb_bread(sb
, uspi
->s_sbbase
+
181 fs64_to_cpu(sb
, q
->key64
) + (n
>>shift
));
184 ptr
= (__fs64
*)bh
->b_data
+ (n
& mask
);
185 if (!grow_chain64(ufsi
, bh
, ptr
, chain
, ++q
))
190 res
= fs64_to_cpu(sb
, q
->key64
);
192 res
+= uspi
->s_sbbase
;
209 * ufs_inode_getfrag() - allocate new fragment(s)
210 * @inode: pointer to inode
211 * @fragment: number of `fragment' which hold pointer
212 * to new allocated fragment(s)
213 * @new_fragment: number of new allocated fragment(s)
214 * @required: how many fragment(s) we require
215 * @err: we set it if something wrong
216 * @phys: pointer to where we save physical number of new allocated fragments,
217 * NULL if we allocate not data(indirect blocks for example).
218 * @new: we set it if we allocate new block
219 * @locked_page: for ufs_new_fragments()
221 static struct buffer_head
*
222 ufs_inode_getfrag(struct inode
*inode
, u64 fragment
,
223 sector_t new_fragment
, unsigned int required
, int *err
,
224 long *phys
, int *new, struct page
*locked_page
)
226 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
227 struct super_block
*sb
= inode
->i_sb
;
228 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
229 struct buffer_head
* result
;
230 unsigned blockoff
, lastblockoff
;
231 u64 tmp
, goal
, lastfrag
, block
, lastblock
;
234 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, required %u, "
235 "metadata %d\n", inode
->i_ino
, (unsigned long long)fragment
,
236 (unsigned long long)new_fragment
, required
, !phys
);
238 /* TODO : to be done for write support
239 if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
243 block
= ufs_fragstoblks (fragment
);
244 blockoff
= ufs_fragnum (fragment
);
245 p
= ufs_get_direct_data_ptr(uspi
, ufsi
, block
);
249 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
251 lastfrag
= ufsi
->i_lastfrag
;
252 if (tmp
&& fragment
< lastfrag
) {
254 return sb_getblk(sb
, uspi
->s_sbbase
+ tmp
+ blockoff
);
256 *phys
= uspi
->s_sbbase
+ tmp
+ blockoff
;
261 lastblock
= ufs_fragstoblks (lastfrag
);
262 lastblockoff
= ufs_fragnum (lastfrag
);
264 * We will extend file into new block beyond last allocated block
266 if (lastblock
< block
) {
268 * We must reallocate last allocated block
271 p2
= ufs_get_direct_data_ptr(uspi
, ufsi
, lastblock
);
272 tmp
= ufs_new_fragments(inode
, p2
, lastfrag
,
273 ufs_data_ptr_to_cpu(sb
, p2
),
274 uspi
->s_fpb
- lastblockoff
,
278 lastfrag
= ufsi
->i_lastfrag
;
280 tmp
= ufs_data_ptr_to_cpu(sb
,
281 ufs_get_direct_data_ptr(uspi
, ufsi
,
284 goal
= tmp
+ uspi
->s_fpb
;
285 tmp
= ufs_new_fragments (inode
, p
, fragment
- blockoff
,
286 goal
, required
+ blockoff
,
288 phys
!= NULL
? locked_page
: NULL
);
289 } else if (lastblock
== block
) {
291 * We will extend last allocated block
293 tmp
= ufs_new_fragments(inode
, p
, fragment
-
294 (blockoff
- lastblockoff
),
295 ufs_data_ptr_to_cpu(sb
, p
),
296 required
+ (blockoff
- lastblockoff
),
297 err
, phys
!= NULL
? locked_page
: NULL
);
298 } else /* (lastblock > block) */ {
300 * We will allocate new block before last allocated block
303 tmp
= ufs_data_ptr_to_cpu(sb
,
304 ufs_get_direct_data_ptr(uspi
, ufsi
, block
- 1));
306 goal
= tmp
+ uspi
->s_fpb
;
308 tmp
= ufs_new_fragments(inode
, p
, fragment
- blockoff
,
309 goal
, uspi
->s_fpb
, err
,
310 phys
!= NULL
? locked_page
: NULL
);
318 result
= sb_getblk(sb
, uspi
->s_sbbase
+ tmp
+ blockoff
);
320 *phys
= uspi
->s_sbbase
+ tmp
+ blockoff
;
326 inode
->i_ctime
= CURRENT_TIME_SEC
;
328 ufs_sync_inode (inode
);
329 mark_inode_dirty(inode
);
332 /* This part : To be implemented ....
333 Required only for writing, not required for READ-ONLY.
336 u2_block = ufs_fragstoblks(fragment);
337 u2_blockoff = ufs_fragnum(fragment);
338 p = ufsi->i_u1.u2_i_data + block;
342 tmp = fs32_to_cpu(sb, *p);
343 lastfrag = ufsi->i_lastfrag;
349 * ufs_inode_getblock() - allocate new block
350 * @inode: pointer to inode
351 * @bh: pointer to block which hold "pointer" to new allocated block
352 * @fragment: number of `fragment' which hold pointer
353 * to new allocated block
354 * @new_fragment: number of new allocated fragment
355 * (block will hold this fragment and also uspi->s_fpb-1)
356 * @err: see ufs_inode_getfrag()
357 * @phys: see ufs_inode_getfrag()
358 * @new: see ufs_inode_getfrag()
359 * @locked_page: see ufs_inode_getfrag()
361 static struct buffer_head
*
362 ufs_inode_getblock(struct inode
*inode
, struct buffer_head
*bh
,
363 u64 fragment
, sector_t new_fragment
, int *err
,
364 long *phys
, int *new, struct page
*locked_page
)
366 struct super_block
*sb
= inode
->i_sb
;
367 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
368 struct buffer_head
* result
;
370 u64 tmp
, goal
, block
;
373 block
= ufs_fragstoblks (fragment
);
374 blockoff
= ufs_fragnum (fragment
);
376 UFSD("ENTER, ino %lu, fragment %llu, new_fragment %llu, metadata %d\n",
377 inode
->i_ino
, (unsigned long long)fragment
,
378 (unsigned long long)new_fragment
, !phys
);
383 if (!buffer_uptodate(bh
)) {
384 ll_rw_block (READ
, 1, &bh
);
386 if (!buffer_uptodate(bh
))
389 if (uspi
->fs_magic
== UFS2_MAGIC
)
390 p
= (__fs64
*)bh
->b_data
+ block
;
392 p
= (__fs32
*)bh
->b_data
+ block
;
394 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
397 result
= sb_getblk(sb
, uspi
->s_sbbase
+ tmp
+ blockoff
);
399 *phys
= uspi
->s_sbbase
+ tmp
+ blockoff
;
403 if (block
&& (uspi
->fs_magic
== UFS2_MAGIC
?
404 (tmp
= fs64_to_cpu(sb
, ((__fs64
*)bh
->b_data
)[block
-1])) :
405 (tmp
= fs32_to_cpu(sb
, ((__fs32
*)bh
->b_data
)[block
-1]))))
406 goal
= tmp
+ uspi
->s_fpb
;
408 goal
= bh
->b_blocknr
+ uspi
->s_fpb
;
409 tmp
= ufs_new_fragments(inode
, p
, ufs_blknum(new_fragment
), goal
,
410 uspi
->s_fpb
, err
, locked_page
);
415 result
= sb_getblk(sb
, uspi
->s_sbbase
+ tmp
+ blockoff
);
417 *phys
= uspi
->s_sbbase
+ tmp
+ blockoff
;
421 mark_buffer_dirty(bh
);
423 sync_dirty_buffer(bh
);
424 inode
->i_ctime
= CURRENT_TIME_SEC
;
425 mark_inode_dirty(inode
);
433 * ufs_getfrag_block() - `get_block_t' function, interface between UFS and
434 * readpage, writepage and so on
437 static int ufs_getfrag_block(struct inode
*inode
, sector_t fragment
, struct buffer_head
*bh_result
, int create
)
439 struct super_block
* sb
= inode
->i_sb
;
440 struct ufs_sb_info
* sbi
= UFS_SB(sb
);
441 struct ufs_sb_private_info
* uspi
= sbi
->s_uspi
;
442 struct buffer_head
* bh
;
445 int depth
= ufs_block_to_path(inode
, fragment
>> uspi
->s_fpbshift
, offsets
);
446 unsigned long ptr
,phys
;
450 phys64
= ufs_frag_map(inode
, offsets
, depth
);
452 phys64
+= fragment
& uspi
->s_fpbmask
;
453 map_bh(bh_result
, sb
, phys64
);
458 /* This code entered only while writing ....? */
465 mutex_lock(&UFS_I(inode
)->truncate_mutex
);
467 UFSD("ENTER, ino %lu, fragment %llu\n", inode
->i_ino
, (unsigned long long)fragment
);
469 ((UFS_NDADDR
+ uspi
->s_apb
+ uspi
->s_2apb
+ uspi
->s_3apb
)
470 << uspi
->s_fpbshift
))
477 * ok, these macros clean the logic up a bit and make
478 * it much more readable:
480 #define GET_INODE_DATABLOCK(x) \
481 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new,\
483 #define GET_INODE_PTR(x) \
484 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL,\
486 #define GET_INDIRECT_DATABLOCK(x) \
487 ufs_inode_getblock(inode, bh, x, fragment, \
488 &err, &phys, &new, bh_result->b_page)
489 #define GET_INDIRECT_PTR(x) \
490 ufs_inode_getblock(inode, bh, x, fragment, \
491 &err, NULL, NULL, NULL)
493 if (ptr
< UFS_NDIR_FRAGMENT
) {
494 bh
= GET_INODE_DATABLOCK(ptr
);
497 ptr
-= UFS_NDIR_FRAGMENT
;
498 if (ptr
< (1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
))) {
499 bh
= GET_INODE_PTR(UFS_IND_FRAGMENT
+ (ptr
>> uspi
->s_apbshift
));
502 ptr
-= 1 << (uspi
->s_apbshift
+ uspi
->s_fpbshift
);
503 if (ptr
< (1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
))) {
504 bh
= GET_INODE_PTR(UFS_DIND_FRAGMENT
+ (ptr
>> uspi
->s_2apbshift
));
507 ptr
-= 1 << (uspi
->s_2apbshift
+ uspi
->s_fpbshift
);
508 bh
= GET_INODE_PTR(UFS_TIND_FRAGMENT
+ (ptr
>> uspi
->s_3apbshift
));
509 bh
= GET_INDIRECT_PTR((ptr
>> uspi
->s_2apbshift
) & uspi
->s_apbmask
);
511 bh
= GET_INDIRECT_PTR((ptr
>> uspi
->s_apbshift
) & uspi
->s_apbmask
);
513 bh
= GET_INDIRECT_DATABLOCK(ptr
& uspi
->s_apbmask
);
515 #undef GET_INODE_DATABLOCK
517 #undef GET_INDIRECT_DATABLOCK
518 #undef GET_INDIRECT_PTR
524 set_buffer_new(bh_result
);
525 map_bh(bh_result
, sb
, phys
);
527 mutex_unlock(&UFS_I(inode
)->truncate_mutex
);
532 ufs_warning(sb
, "ufs_get_block", "block > big");
536 static int ufs_writepage(struct page
*page
, struct writeback_control
*wbc
)
538 return block_write_full_page(page
,ufs_getfrag_block
,wbc
);
541 static int ufs_readpage(struct file
*file
, struct page
*page
)
543 return block_read_full_page(page
,ufs_getfrag_block
);
546 int ufs_prepare_chunk(struct page
*page
, loff_t pos
, unsigned len
)
548 return __block_write_begin(page
, pos
, len
, ufs_getfrag_block
);
551 static void ufs_truncate_blocks(struct inode
*);
553 static void ufs_write_failed(struct address_space
*mapping
, loff_t to
)
555 struct inode
*inode
= mapping
->host
;
557 if (to
> inode
->i_size
) {
558 truncate_pagecache(inode
, inode
->i_size
);
559 ufs_truncate_blocks(inode
);
563 static int ufs_write_begin(struct file
*file
, struct address_space
*mapping
,
564 loff_t pos
, unsigned len
, unsigned flags
,
565 struct page
**pagep
, void **fsdata
)
569 ret
= block_write_begin(mapping
, pos
, len
, flags
, pagep
,
572 ufs_write_failed(mapping
, pos
+ len
);
577 static int ufs_write_end(struct file
*file
, struct address_space
*mapping
,
578 loff_t pos
, unsigned len
, unsigned copied
,
579 struct page
*page
, void *fsdata
)
583 ret
= generic_write_end(file
, mapping
, pos
, len
, copied
, page
, fsdata
);
585 ufs_write_failed(mapping
, pos
+ len
);
589 static sector_t
ufs_bmap(struct address_space
*mapping
, sector_t block
)
591 return generic_block_bmap(mapping
,block
,ufs_getfrag_block
);
594 const struct address_space_operations ufs_aops
= {
595 .readpage
= ufs_readpage
,
596 .writepage
= ufs_writepage
,
597 .write_begin
= ufs_write_begin
,
598 .write_end
= ufs_write_end
,
602 static void ufs_set_inode_ops(struct inode
*inode
)
604 if (S_ISREG(inode
->i_mode
)) {
605 inode
->i_op
= &ufs_file_inode_operations
;
606 inode
->i_fop
= &ufs_file_operations
;
607 inode
->i_mapping
->a_ops
= &ufs_aops
;
608 } else if (S_ISDIR(inode
->i_mode
)) {
609 inode
->i_op
= &ufs_dir_inode_operations
;
610 inode
->i_fop
= &ufs_dir_operations
;
611 inode
->i_mapping
->a_ops
= &ufs_aops
;
612 } else if (S_ISLNK(inode
->i_mode
)) {
613 if (!inode
->i_blocks
) {
614 inode
->i_op
= &ufs_fast_symlink_inode_operations
;
615 inode
->i_link
= (char *)UFS_I(inode
)->i_u1
.i_symlink
;
617 inode
->i_op
= &ufs_symlink_inode_operations
;
618 inode
->i_mapping
->a_ops
= &ufs_aops
;
621 init_special_inode(inode
, inode
->i_mode
,
622 ufs_get_inode_dev(inode
->i_sb
, UFS_I(inode
)));
625 static int ufs1_read_inode(struct inode
*inode
, struct ufs_inode
*ufs_inode
)
627 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
628 struct super_block
*sb
= inode
->i_sb
;
632 * Copy data to the in-core inode.
634 inode
->i_mode
= mode
= fs16_to_cpu(sb
, ufs_inode
->ui_mode
);
635 set_nlink(inode
, fs16_to_cpu(sb
, ufs_inode
->ui_nlink
));
636 if (inode
->i_nlink
== 0) {
637 ufs_error (sb
, "ufs_read_inode", "inode %lu has zero nlink\n", inode
->i_ino
);
642 * Linux now has 32-bit uid and gid, so we can support EFT.
644 i_uid_write(inode
, ufs_get_inode_uid(sb
, ufs_inode
));
645 i_gid_write(inode
, ufs_get_inode_gid(sb
, ufs_inode
));
647 inode
->i_size
= fs64_to_cpu(sb
, ufs_inode
->ui_size
);
648 inode
->i_atime
.tv_sec
= fs32_to_cpu(sb
, ufs_inode
->ui_atime
.tv_sec
);
649 inode
->i_ctime
.tv_sec
= fs32_to_cpu(sb
, ufs_inode
->ui_ctime
.tv_sec
);
650 inode
->i_mtime
.tv_sec
= fs32_to_cpu(sb
, ufs_inode
->ui_mtime
.tv_sec
);
651 inode
->i_mtime
.tv_nsec
= 0;
652 inode
->i_atime
.tv_nsec
= 0;
653 inode
->i_ctime
.tv_nsec
= 0;
654 inode
->i_blocks
= fs32_to_cpu(sb
, ufs_inode
->ui_blocks
);
655 inode
->i_generation
= fs32_to_cpu(sb
, ufs_inode
->ui_gen
);
656 ufsi
->i_flags
= fs32_to_cpu(sb
, ufs_inode
->ui_flags
);
657 ufsi
->i_shadow
= fs32_to_cpu(sb
, ufs_inode
->ui_u3
.ui_sun
.ui_shadow
);
658 ufsi
->i_oeftflag
= fs32_to_cpu(sb
, ufs_inode
->ui_u3
.ui_sun
.ui_oeftflag
);
661 if (S_ISCHR(mode
) || S_ISBLK(mode
) || inode
->i_blocks
) {
662 memcpy(ufsi
->i_u1
.i_data
, &ufs_inode
->ui_u2
.ui_addr
,
663 sizeof(ufs_inode
->ui_u2
.ui_addr
));
665 memcpy(ufsi
->i_u1
.i_symlink
, ufs_inode
->ui_u2
.ui_symlink
,
666 sizeof(ufs_inode
->ui_u2
.ui_symlink
) - 1);
667 ufsi
->i_u1
.i_symlink
[sizeof(ufs_inode
->ui_u2
.ui_symlink
) - 1] = 0;
672 static int ufs2_read_inode(struct inode
*inode
, struct ufs2_inode
*ufs2_inode
)
674 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
675 struct super_block
*sb
= inode
->i_sb
;
678 UFSD("Reading ufs2 inode, ino %lu\n", inode
->i_ino
);
680 * Copy data to the in-core inode.
682 inode
->i_mode
= mode
= fs16_to_cpu(sb
, ufs2_inode
->ui_mode
);
683 set_nlink(inode
, fs16_to_cpu(sb
, ufs2_inode
->ui_nlink
));
684 if (inode
->i_nlink
== 0) {
685 ufs_error (sb
, "ufs_read_inode", "inode %lu has zero nlink\n", inode
->i_ino
);
690 * Linux now has 32-bit uid and gid, so we can support EFT.
692 i_uid_write(inode
, fs32_to_cpu(sb
, ufs2_inode
->ui_uid
));
693 i_gid_write(inode
, fs32_to_cpu(sb
, ufs2_inode
->ui_gid
));
695 inode
->i_size
= fs64_to_cpu(sb
, ufs2_inode
->ui_size
);
696 inode
->i_atime
.tv_sec
= fs64_to_cpu(sb
, ufs2_inode
->ui_atime
);
697 inode
->i_ctime
.tv_sec
= fs64_to_cpu(sb
, ufs2_inode
->ui_ctime
);
698 inode
->i_mtime
.tv_sec
= fs64_to_cpu(sb
, ufs2_inode
->ui_mtime
);
699 inode
->i_atime
.tv_nsec
= fs32_to_cpu(sb
, ufs2_inode
->ui_atimensec
);
700 inode
->i_ctime
.tv_nsec
= fs32_to_cpu(sb
, ufs2_inode
->ui_ctimensec
);
701 inode
->i_mtime
.tv_nsec
= fs32_to_cpu(sb
, ufs2_inode
->ui_mtimensec
);
702 inode
->i_blocks
= fs64_to_cpu(sb
, ufs2_inode
->ui_blocks
);
703 inode
->i_generation
= fs32_to_cpu(sb
, ufs2_inode
->ui_gen
);
704 ufsi
->i_flags
= fs32_to_cpu(sb
, ufs2_inode
->ui_flags
);
706 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
707 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
710 if (S_ISCHR(mode
) || S_ISBLK(mode
) || inode
->i_blocks
) {
711 memcpy(ufsi
->i_u1
.u2_i_data
, &ufs2_inode
->ui_u2
.ui_addr
,
712 sizeof(ufs2_inode
->ui_u2
.ui_addr
));
714 memcpy(ufsi
->i_u1
.i_symlink
, ufs2_inode
->ui_u2
.ui_symlink
,
715 sizeof(ufs2_inode
->ui_u2
.ui_symlink
) - 1);
716 ufsi
->i_u1
.i_symlink
[sizeof(ufs2_inode
->ui_u2
.ui_symlink
) - 1] = 0;
721 struct inode
*ufs_iget(struct super_block
*sb
, unsigned long ino
)
723 struct ufs_inode_info
*ufsi
;
724 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
725 struct buffer_head
* bh
;
729 UFSD("ENTER, ino %lu\n", ino
);
731 if (ino
< UFS_ROOTINO
|| ino
> (uspi
->s_ncg
* uspi
->s_ipg
)) {
732 ufs_warning(sb
, "ufs_read_inode", "bad inode number (%lu)\n",
734 return ERR_PTR(-EIO
);
737 inode
= iget_locked(sb
, ino
);
739 return ERR_PTR(-ENOMEM
);
740 if (!(inode
->i_state
& I_NEW
))
745 bh
= sb_bread(sb
, uspi
->s_sbbase
+ ufs_inotofsba(inode
->i_ino
));
747 ufs_warning(sb
, "ufs_read_inode", "unable to read inode %lu\n",
751 if ((UFS_SB(sb
)->s_flags
& UFS_TYPE_MASK
) == UFS_TYPE_UFS2
) {
752 struct ufs2_inode
*ufs2_inode
= (struct ufs2_inode
*)bh
->b_data
;
754 err
= ufs2_read_inode(inode
,
755 ufs2_inode
+ ufs_inotofsbo(inode
->i_ino
));
757 struct ufs_inode
*ufs_inode
= (struct ufs_inode
*)bh
->b_data
;
759 err
= ufs1_read_inode(inode
,
760 ufs_inode
+ ufs_inotofsbo(inode
->i_ino
));
767 (inode
->i_size
+ uspi
->s_fsize
- 1) >> uspi
->s_fshift
;
768 ufsi
->i_dir_start_lookup
= 0;
771 ufs_set_inode_ops(inode
);
776 unlock_new_inode(inode
);
781 return ERR_PTR(-EIO
);
784 static void ufs1_update_inode(struct inode
*inode
, struct ufs_inode
*ufs_inode
)
786 struct super_block
*sb
= inode
->i_sb
;
787 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
789 ufs_inode
->ui_mode
= cpu_to_fs16(sb
, inode
->i_mode
);
790 ufs_inode
->ui_nlink
= cpu_to_fs16(sb
, inode
->i_nlink
);
792 ufs_set_inode_uid(sb
, ufs_inode
, i_uid_read(inode
));
793 ufs_set_inode_gid(sb
, ufs_inode
, i_gid_read(inode
));
795 ufs_inode
->ui_size
= cpu_to_fs64(sb
, inode
->i_size
);
796 ufs_inode
->ui_atime
.tv_sec
= cpu_to_fs32(sb
, inode
->i_atime
.tv_sec
);
797 ufs_inode
->ui_atime
.tv_usec
= 0;
798 ufs_inode
->ui_ctime
.tv_sec
= cpu_to_fs32(sb
, inode
->i_ctime
.tv_sec
);
799 ufs_inode
->ui_ctime
.tv_usec
= 0;
800 ufs_inode
->ui_mtime
.tv_sec
= cpu_to_fs32(sb
, inode
->i_mtime
.tv_sec
);
801 ufs_inode
->ui_mtime
.tv_usec
= 0;
802 ufs_inode
->ui_blocks
= cpu_to_fs32(sb
, inode
->i_blocks
);
803 ufs_inode
->ui_flags
= cpu_to_fs32(sb
, ufsi
->i_flags
);
804 ufs_inode
->ui_gen
= cpu_to_fs32(sb
, inode
->i_generation
);
806 if ((UFS_SB(sb
)->s_flags
& UFS_UID_MASK
) == UFS_UID_EFT
) {
807 ufs_inode
->ui_u3
.ui_sun
.ui_shadow
= cpu_to_fs32(sb
, ufsi
->i_shadow
);
808 ufs_inode
->ui_u3
.ui_sun
.ui_oeftflag
= cpu_to_fs32(sb
, ufsi
->i_oeftflag
);
811 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
812 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
813 ufs_inode
->ui_u2
.ui_addr
.ui_db
[0] = ufsi
->i_u1
.i_data
[0];
814 } else if (inode
->i_blocks
) {
815 memcpy(&ufs_inode
->ui_u2
.ui_addr
, ufsi
->i_u1
.i_data
,
816 sizeof(ufs_inode
->ui_u2
.ui_addr
));
819 memcpy(&ufs_inode
->ui_u2
.ui_symlink
, ufsi
->i_u1
.i_symlink
,
820 sizeof(ufs_inode
->ui_u2
.ui_symlink
));
824 memset (ufs_inode
, 0, sizeof(struct ufs_inode
));
827 static void ufs2_update_inode(struct inode
*inode
, struct ufs2_inode
*ufs_inode
)
829 struct super_block
*sb
= inode
->i_sb
;
830 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
833 ufs_inode
->ui_mode
= cpu_to_fs16(sb
, inode
->i_mode
);
834 ufs_inode
->ui_nlink
= cpu_to_fs16(sb
, inode
->i_nlink
);
836 ufs_inode
->ui_uid
= cpu_to_fs32(sb
, i_uid_read(inode
));
837 ufs_inode
->ui_gid
= cpu_to_fs32(sb
, i_gid_read(inode
));
839 ufs_inode
->ui_size
= cpu_to_fs64(sb
, inode
->i_size
);
840 ufs_inode
->ui_atime
= cpu_to_fs64(sb
, inode
->i_atime
.tv_sec
);
841 ufs_inode
->ui_atimensec
= cpu_to_fs32(sb
, inode
->i_atime
.tv_nsec
);
842 ufs_inode
->ui_ctime
= cpu_to_fs64(sb
, inode
->i_ctime
.tv_sec
);
843 ufs_inode
->ui_ctimensec
= cpu_to_fs32(sb
, inode
->i_ctime
.tv_nsec
);
844 ufs_inode
->ui_mtime
= cpu_to_fs64(sb
, inode
->i_mtime
.tv_sec
);
845 ufs_inode
->ui_mtimensec
= cpu_to_fs32(sb
, inode
->i_mtime
.tv_nsec
);
847 ufs_inode
->ui_blocks
= cpu_to_fs64(sb
, inode
->i_blocks
);
848 ufs_inode
->ui_flags
= cpu_to_fs32(sb
, ufsi
->i_flags
);
849 ufs_inode
->ui_gen
= cpu_to_fs32(sb
, inode
->i_generation
);
851 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
852 /* ufs_inode->ui_u2.ui_addr.ui_db[0] = cpu_to_fs32(sb, inode->i_rdev); */
853 ufs_inode
->ui_u2
.ui_addr
.ui_db
[0] = ufsi
->i_u1
.u2_i_data
[0];
854 } else if (inode
->i_blocks
) {
855 memcpy(&ufs_inode
->ui_u2
.ui_addr
, ufsi
->i_u1
.u2_i_data
,
856 sizeof(ufs_inode
->ui_u2
.ui_addr
));
858 memcpy(&ufs_inode
->ui_u2
.ui_symlink
, ufsi
->i_u1
.i_symlink
,
859 sizeof(ufs_inode
->ui_u2
.ui_symlink
));
863 memset (ufs_inode
, 0, sizeof(struct ufs2_inode
));
867 static int ufs_update_inode(struct inode
* inode
, int do_sync
)
869 struct super_block
*sb
= inode
->i_sb
;
870 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
871 struct buffer_head
* bh
;
873 UFSD("ENTER, ino %lu\n", inode
->i_ino
);
875 if (inode
->i_ino
< UFS_ROOTINO
||
876 inode
->i_ino
> (uspi
->s_ncg
* uspi
->s_ipg
)) {
877 ufs_warning (sb
, "ufs_read_inode", "bad inode number (%lu)\n", inode
->i_ino
);
881 bh
= sb_bread(sb
, ufs_inotofsba(inode
->i_ino
));
883 ufs_warning (sb
, "ufs_read_inode", "unable to read inode %lu\n", inode
->i_ino
);
886 if (uspi
->fs_magic
== UFS2_MAGIC
) {
887 struct ufs2_inode
*ufs2_inode
= (struct ufs2_inode
*)bh
->b_data
;
889 ufs2_update_inode(inode
,
890 ufs2_inode
+ ufs_inotofsbo(inode
->i_ino
));
892 struct ufs_inode
*ufs_inode
= (struct ufs_inode
*) bh
->b_data
;
894 ufs1_update_inode(inode
, ufs_inode
+ ufs_inotofsbo(inode
->i_ino
));
897 mark_buffer_dirty(bh
);
899 sync_dirty_buffer(bh
);
906 int ufs_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
908 return ufs_update_inode(inode
, wbc
->sync_mode
== WB_SYNC_ALL
);
911 int ufs_sync_inode (struct inode
*inode
)
913 return ufs_update_inode (inode
, 1);
916 void ufs_evict_inode(struct inode
* inode
)
920 if (!inode
->i_nlink
&& !is_bad_inode(inode
))
923 truncate_inode_pages_final(&inode
->i_data
);
927 ufs_truncate_blocks(inode
);
930 invalidate_inode_buffers(inode
);
934 ufs_free_inode(inode
);
943 static inline void free_data(struct to_free
*ctx
, u64 from
, unsigned count
)
945 if (ctx
->count
&& ctx
->to
!= from
) {
946 ufs_free_blocks(ctx
->inode
, ctx
->to
- ctx
->count
, ctx
->count
);
950 ctx
->to
= from
+ count
;
953 #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
954 #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
956 static void ufs_trunc_direct(struct inode
*inode
)
958 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
959 struct super_block
* sb
;
960 struct ufs_sb_private_info
* uspi
;
962 u64 frag1
, frag2
, frag3
, frag4
, block1
, block2
;
963 struct to_free ctx
= {.inode
= inode
};
966 UFSD("ENTER: ino %lu\n", inode
->i_ino
);
969 uspi
= UFS_SB(sb
)->s_uspi
;
971 frag1
= DIRECT_FRAGMENT
;
972 frag4
= min_t(u64
, UFS_NDIR_FRAGMENT
, ufsi
->i_lastfrag
);
973 frag2
= ((frag1
& uspi
->s_fpbmask
) ? ((frag1
| uspi
->s_fpbmask
) + 1) : frag1
);
974 frag3
= frag4
& ~uspi
->s_fpbmask
;
979 } else if (frag2
< frag3
) {
980 block1
= ufs_fragstoblks (frag2
);
981 block2
= ufs_fragstoblks (frag3
);
984 UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu,"
985 " frag3 %llu, frag4 %llu\n", inode
->i_ino
,
986 (unsigned long long)frag1
, (unsigned long long)frag2
,
987 (unsigned long long)block1
, (unsigned long long)block2
,
988 (unsigned long long)frag3
, (unsigned long long)frag4
);
994 * Free first free fragments
996 p
= ufs_get_direct_data_ptr(uspi
, ufsi
, ufs_fragstoblks(frag1
));
997 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
999 ufs_panic (sb
, "ufs_trunc_direct", "internal error");
1001 frag1
= ufs_fragnum (frag1
);
1003 ufs_free_fragments(inode
, tmp
+ frag1
, frag2
);
1009 for (i
= block1
; i
< block2
; i
++) {
1010 p
= ufs_get_direct_data_ptr(uspi
, ufsi
, i
);
1011 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
1014 write_seqlock(&ufsi
->meta_lock
);
1015 ufs_data_ptr_clear(uspi
, p
);
1016 write_sequnlock(&ufsi
->meta_lock
);
1018 free_data(&ctx
, tmp
, uspi
->s_fpb
);
1021 free_data(&ctx
, 0, 0);
1027 * Free last free fragments
1029 p
= ufs_get_direct_data_ptr(uspi
, ufsi
, ufs_fragstoblks(frag3
));
1030 tmp
= ufs_data_ptr_to_cpu(sb
, p
);
1032 ufs_panic(sb
, "ufs_truncate_direct", "internal error");
1033 frag4
= ufs_fragnum (frag4
);
1034 write_seqlock(&ufsi
->meta_lock
);
1035 ufs_data_ptr_clear(uspi
, p
);
1036 write_sequnlock(&ufsi
->meta_lock
);
1038 ufs_free_fragments (inode
, tmp
, frag4
);
1041 UFSD("EXIT: ino %lu\n", inode
->i_ino
);
1044 static void free_full_branch(struct inode
*inode
, u64 ind_block
, int depth
)
1046 struct super_block
*sb
= inode
->i_sb
;
1047 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
1048 struct ufs_buffer_head
*ubh
= ubh_bread(sb
, ind_block
, uspi
->s_bsize
);
1055 for (i
= 0; i
< uspi
->s_apb
; i
++) {
1056 void *p
= ubh_get_data_ptr(uspi
, ubh
, i
);
1057 u64 block
= ufs_data_ptr_to_cpu(sb
, p
);
1059 free_full_branch(inode
, block
, depth
);
1062 struct to_free ctx
= {.inode
= inode
};
1064 for (i
= 0; i
< uspi
->s_apb
; i
++) {
1065 void *p
= ubh_get_data_ptr(uspi
, ubh
, i
);
1066 u64 block
= ufs_data_ptr_to_cpu(sb
, p
);
1068 free_data(&ctx
, block
, uspi
->s_fpb
);
1070 free_data(&ctx
, 0, 0);
1074 ufs_free_blocks(inode
, ind_block
, uspi
->s_fpb
);
1077 static void free_branch_tail(struct inode
*inode
, unsigned from
, struct ufs_buffer_head
*ubh
, int depth
)
1079 struct super_block
*sb
= inode
->i_sb
;
1080 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
1084 for (i
= from
; i
< uspi
->s_apb
; i
++) {
1085 void *p
= ubh_get_data_ptr(uspi
, ubh
, i
);
1086 u64 block
= ufs_data_ptr_to_cpu(sb
, p
);
1088 write_seqlock(&UFS_I(inode
)->meta_lock
);
1089 ufs_data_ptr_clear(uspi
, p
);
1090 write_sequnlock(&UFS_I(inode
)->meta_lock
);
1091 ubh_mark_buffer_dirty(ubh
);
1092 free_full_branch(inode
, block
, depth
);
1096 struct to_free ctx
= {.inode
= inode
};
1098 for (i
= from
; i
< uspi
->s_apb
; i
++) {
1099 void *p
= ubh_get_data_ptr(uspi
, ubh
, i
);
1100 u64 block
= ufs_data_ptr_to_cpu(sb
, p
);
1102 write_seqlock(&UFS_I(inode
)->meta_lock
);
1103 ufs_data_ptr_clear(uspi
, p
);
1104 write_sequnlock(&UFS_I(inode
)->meta_lock
);
1105 ubh_mark_buffer_dirty(ubh
);
1106 free_data(&ctx
, block
, uspi
->s_fpb
);
1109 free_data(&ctx
, 0, 0);
1111 if (IS_SYNC(inode
) && ubh_buffer_dirty(ubh
))
1112 ubh_sync_block(ubh
);
1116 static int ufs_alloc_lastblock(struct inode
*inode
, loff_t size
)
1119 struct super_block
*sb
= inode
->i_sb
;
1120 struct address_space
*mapping
= inode
->i_mapping
;
1121 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
1124 struct page
*lastpage
;
1125 struct buffer_head
*bh
;
1128 lastfrag
= (size
+ uspi
->s_fsize
- 1) >> uspi
->s_fshift
;
1135 lastpage
= ufs_get_locked_page(mapping
, lastfrag
>>
1136 (PAGE_CACHE_SHIFT
- inode
->i_blkbits
));
1137 if (IS_ERR(lastpage
)) {
1142 end
= lastfrag
& ((1 << (PAGE_CACHE_SHIFT
- inode
->i_blkbits
)) - 1);
1143 bh
= page_buffers(lastpage
);
1144 for (i
= 0; i
< end
; ++i
)
1145 bh
= bh
->b_this_page
;
1148 err
= ufs_getfrag_block(inode
, lastfrag
, bh
, 1);
1153 if (buffer_new(bh
)) {
1154 clear_buffer_new(bh
);
1155 unmap_underlying_metadata(bh
->b_bdev
,
1158 * we do not zeroize fragment, because of
1159 * if it maped to hole, it already contains zeroes
1161 set_buffer_uptodate(bh
);
1162 mark_buffer_dirty(bh
);
1163 set_page_dirty(lastpage
);
1166 if (lastfrag
>= UFS_IND_FRAGMENT
) {
1167 end
= uspi
->s_fpb
- ufs_fragnum(lastfrag
) - 1;
1168 phys64
= bh
->b_blocknr
+ 1;
1169 for (i
= 0; i
< end
; ++i
) {
1170 bh
= sb_getblk(sb
, i
+ phys64
);
1172 memset(bh
->b_data
, 0, sb
->s_blocksize
);
1173 set_buffer_uptodate(bh
);
1174 mark_buffer_dirty(bh
);
1176 sync_dirty_buffer(bh
);
1181 ufs_put_locked_page(lastpage
);
1186 static void __ufs_truncate_blocks(struct inode
*inode
)
1188 struct ufs_inode_info
*ufsi
= UFS_I(inode
);
1189 struct super_block
*sb
= inode
->i_sb
;
1190 struct ufs_sb_private_info
*uspi
= UFS_SB(sb
)->s_uspi
;
1191 unsigned offsets
[4];
1192 int depth
= ufs_block_to_path(inode
, DIRECT_BLOCK
, offsets
);
1195 struct ufs_buffer_head
*ubh
[3];
1202 /* find the last non-zero in offsets[] */
1203 for (depth2
= depth
- 1; depth2
; depth2
--)
1204 if (offsets
[depth2
])
1207 mutex_lock(&ufsi
->truncate_mutex
);
1209 ufs_trunc_direct(inode
);
1210 offsets
[0] = UFS_IND_BLOCK
;
1212 /* get the blocks that should be partially emptied */
1213 p
= ufs_get_direct_data_ptr(uspi
, ufsi
, offsets
[0]);
1214 for (i
= 0; i
< depth2
; i
++) {
1215 offsets
[i
]++; /* next branch is fully freed */
1216 block
= ufs_data_ptr_to_cpu(sb
, p
);
1219 ubh
[i
] = ubh_bread(sb
, block
, uspi
->s_bsize
);
1221 write_seqlock(&ufsi
->meta_lock
);
1222 ufs_data_ptr_clear(uspi
, p
);
1223 write_sequnlock(&ufsi
->meta_lock
);
1226 p
= ubh_get_data_ptr(uspi
, ubh
[i
], offsets
[i
+ 1]);
1229 free_branch_tail(inode
, offsets
[i
+ 1], ubh
[i
], depth
- i
- 1);
1231 for (i
= offsets
[0]; i
<= UFS_TIND_BLOCK
; i
++) {
1232 p
= ufs_get_direct_data_ptr(uspi
, ufsi
, i
);
1233 block
= ufs_data_ptr_to_cpu(sb
, p
);
1235 write_seqlock(&ufsi
->meta_lock
);
1236 ufs_data_ptr_clear(uspi
, p
);
1237 write_sequnlock(&ufsi
->meta_lock
);
1238 free_full_branch(inode
, block
, i
- UFS_IND_BLOCK
+ 1);
1241 ufsi
->i_lastfrag
= DIRECT_FRAGMENT
;
1242 mark_inode_dirty(inode
);
1243 mutex_unlock(&ufsi
->truncate_mutex
);
1246 static int ufs_truncate(struct inode
*inode
, loff_t size
)
1250 UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n",
1251 inode
->i_ino
, (unsigned long long)size
,
1252 (unsigned long long)i_size_read(inode
));
1254 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1255 S_ISLNK(inode
->i_mode
)))
1257 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
1260 err
= ufs_alloc_lastblock(inode
, size
);
1265 block_truncate_page(inode
->i_mapping
, size
, ufs_getfrag_block
);
1267 truncate_setsize(inode
, size
);
1269 __ufs_truncate_blocks(inode
);
1270 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME_SEC
;
1271 mark_inode_dirty(inode
);
1273 UFSD("EXIT: err %d\n", err
);
1277 void ufs_truncate_blocks(struct inode
*inode
)
1279 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1280 S_ISLNK(inode
->i_mode
)))
1282 if (IS_APPEND(inode
) || IS_IMMUTABLE(inode
))
1284 __ufs_truncate_blocks(inode
);
1287 int ufs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
1289 struct inode
*inode
= d_inode(dentry
);
1290 unsigned int ia_valid
= attr
->ia_valid
;
1293 error
= inode_change_ok(inode
, attr
);
1297 if (ia_valid
& ATTR_SIZE
&& attr
->ia_size
!= inode
->i_size
) {
1298 error
= ufs_truncate(inode
, attr
->ia_size
);
1303 setattr_copy(inode
, attr
);
1304 mark_inode_dirty(inode
);
1308 const struct inode_operations ufs_file_inode_operations
= {
1309 .setattr
= ufs_setattr
,