block: remove per-queue plugging
[deliverable/linux.git] / fs / nilfs2 / inode.c
1 /*
2 * inode.c - NILFS inode operations.
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>
21 *
22 */
23
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/writeback.h>
28 #include <linux/uio.h>
29 #include "nilfs.h"
30 #include "btnode.h"
31 #include "segment.h"
32 #include "page.h"
33 #include "mdt.h"
34 #include "cpfile.h"
35 #include "ifile.h"
36
37 struct nilfs_iget_args {
38 u64 ino;
39 __u64 cno;
40 struct nilfs_root *root;
41 int for_gc;
42 };
43
44 /**
45 * nilfs_get_block() - get a file block on the filesystem (callback function)
46 * @inode - inode struct of the target file
47 * @blkoff - file block number
48 * @bh_result - buffer head to be mapped on
49 * @create - indicate whether allocating the block or not when it has not
50 * been allocated yet.
51 *
52 * This function does not issue actual read request of the specified data
53 * block. It is done by VFS.
54 */
55 int nilfs_get_block(struct inode *inode, sector_t blkoff,
56 struct buffer_head *bh_result, int create)
57 {
58 struct nilfs_inode_info *ii = NILFS_I(inode);
59 __u64 blknum = 0;
60 int err = 0, ret;
61 struct inode *dat = NILFS_I_NILFS(inode)->ns_dat;
62 unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
63
64 down_read(&NILFS_MDT(dat)->mi_sem);
65 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
66 up_read(&NILFS_MDT(dat)->mi_sem);
67 if (ret >= 0) { /* found */
68 map_bh(bh_result, inode->i_sb, blknum);
69 if (ret > 0)
70 bh_result->b_size = (ret << inode->i_blkbits);
71 goto out;
72 }
73 /* data block was not found */
74 if (ret == -ENOENT && create) {
75 struct nilfs_transaction_info ti;
76
77 bh_result->b_blocknr = 0;
78 err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
79 if (unlikely(err))
80 goto out;
81 err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
82 (unsigned long)bh_result);
83 if (unlikely(err != 0)) {
84 if (err == -EEXIST) {
85 /*
86 * The get_block() function could be called
87 * from multiple callers for an inode.
88 * However, the page having this block must
89 * be locked in this case.
90 */
91 printk(KERN_WARNING
92 "nilfs_get_block: a race condition "
93 "while inserting a data block. "
94 "(inode number=%lu, file block "
95 "offset=%llu)\n",
96 inode->i_ino,
97 (unsigned long long)blkoff);
98 err = 0;
99 }
100 nilfs_transaction_abort(inode->i_sb);
101 goto out;
102 }
103 nilfs_mark_inode_dirty(inode);
104 nilfs_transaction_commit(inode->i_sb); /* never fails */
105 /* Error handling should be detailed */
106 set_buffer_new(bh_result);
107 set_buffer_delay(bh_result);
108 map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
109 to proper value */
110 } else if (ret == -ENOENT) {
111 /* not found is not error (e.g. hole); must return without
112 the mapped state flag. */
113 ;
114 } else {
115 err = ret;
116 }
117
118 out:
119 return err;
120 }
121
122 /**
123 * nilfs_readpage() - implement readpage() method of nilfs_aops {}
124 * address_space_operations.
125 * @file - file struct of the file to be read
126 * @page - the page to be read
127 */
128 static int nilfs_readpage(struct file *file, struct page *page)
129 {
130 return mpage_readpage(page, nilfs_get_block);
131 }
132
133 /**
134 * nilfs_readpages() - implement readpages() method of nilfs_aops {}
135 * address_space_operations.
136 * @file - file struct of the file to be read
137 * @mapping - address_space struct used for reading multiple pages
138 * @pages - the pages to be read
139 * @nr_pages - number of pages to be read
140 */
141 static int nilfs_readpages(struct file *file, struct address_space *mapping,
142 struct list_head *pages, unsigned nr_pages)
143 {
144 return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
145 }
146
147 static int nilfs_writepages(struct address_space *mapping,
148 struct writeback_control *wbc)
149 {
150 struct inode *inode = mapping->host;
151 int err = 0;
152
153 if (wbc->sync_mode == WB_SYNC_ALL)
154 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
155 wbc->range_start,
156 wbc->range_end);
157 return err;
158 }
159
160 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
161 {
162 struct inode *inode = page->mapping->host;
163 int err;
164
165 redirty_page_for_writepage(wbc, page);
166 unlock_page(page);
167
168 if (wbc->sync_mode == WB_SYNC_ALL) {
169 err = nilfs_construct_segment(inode->i_sb);
170 if (unlikely(err))
171 return err;
172 } else if (wbc->for_reclaim)
173 nilfs_flush_segment(inode->i_sb, inode->i_ino);
174
175 return 0;
176 }
177
178 static int nilfs_set_page_dirty(struct page *page)
179 {
180 int ret = __set_page_dirty_buffers(page);
181
182 if (ret) {
183 struct inode *inode = page->mapping->host;
184 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
185
186 nilfs_set_file_dirty(inode, nr_dirty);
187 }
188 return ret;
189 }
190
191 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
192 loff_t pos, unsigned len, unsigned flags,
193 struct page **pagep, void **fsdata)
194
195 {
196 struct inode *inode = mapping->host;
197 int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
198
199 if (unlikely(err))
200 return err;
201
202 err = block_write_begin(mapping, pos, len, flags, pagep,
203 nilfs_get_block);
204 if (unlikely(err)) {
205 loff_t isize = mapping->host->i_size;
206 if (pos + len > isize)
207 vmtruncate(mapping->host, isize);
208
209 nilfs_transaction_abort(inode->i_sb);
210 }
211 return err;
212 }
213
214 static int nilfs_write_end(struct file *file, struct address_space *mapping,
215 loff_t pos, unsigned len, unsigned copied,
216 struct page *page, void *fsdata)
217 {
218 struct inode *inode = mapping->host;
219 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
220 unsigned nr_dirty;
221 int err;
222
223 nr_dirty = nilfs_page_count_clean_buffers(page, start,
224 start + copied);
225 copied = generic_write_end(file, mapping, pos, len, copied, page,
226 fsdata);
227 nilfs_set_file_dirty(inode, nr_dirty);
228 err = nilfs_transaction_commit(inode->i_sb);
229 return err ? : copied;
230 }
231
232 static ssize_t
233 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
234 loff_t offset, unsigned long nr_segs)
235 {
236 struct file *file = iocb->ki_filp;
237 struct inode *inode = file->f_mapping->host;
238 ssize_t size;
239
240 if (rw == WRITE)
241 return 0;
242
243 /* Needs synchronization with the cleaner */
244 size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
245 offset, nr_segs, nilfs_get_block, NULL);
246
247 /*
248 * In case of error extending write may have instantiated a few
249 * blocks outside i_size. Trim these off again.
250 */
251 if (unlikely((rw & WRITE) && size < 0)) {
252 loff_t isize = i_size_read(inode);
253 loff_t end = offset + iov_length(iov, nr_segs);
254
255 if (end > isize)
256 vmtruncate(inode, isize);
257 }
258
259 return size;
260 }
261
262 const struct address_space_operations nilfs_aops = {
263 .writepage = nilfs_writepage,
264 .readpage = nilfs_readpage,
265 .writepages = nilfs_writepages,
266 .set_page_dirty = nilfs_set_page_dirty,
267 .readpages = nilfs_readpages,
268 .write_begin = nilfs_write_begin,
269 .write_end = nilfs_write_end,
270 /* .releasepage = nilfs_releasepage, */
271 .invalidatepage = block_invalidatepage,
272 .direct_IO = nilfs_direct_IO,
273 .is_partially_uptodate = block_is_partially_uptodate,
274 };
275
276 struct inode *nilfs_new_inode(struct inode *dir, int mode)
277 {
278 struct super_block *sb = dir->i_sb;
279 struct nilfs_sb_info *sbi = NILFS_SB(sb);
280 struct inode *inode;
281 struct nilfs_inode_info *ii;
282 struct nilfs_root *root;
283 int err = -ENOMEM;
284 ino_t ino;
285
286 inode = new_inode(sb);
287 if (unlikely(!inode))
288 goto failed;
289
290 mapping_set_gfp_mask(inode->i_mapping,
291 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
292
293 root = NILFS_I(dir)->i_root;
294 ii = NILFS_I(inode);
295 ii->i_state = 1 << NILFS_I_NEW;
296 ii->i_root = root;
297
298 err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
299 if (unlikely(err))
300 goto failed_ifile_create_inode;
301 /* reference count of i_bh inherits from nilfs_mdt_read_block() */
302
303 atomic_inc(&root->inodes_count);
304 inode_init_owner(inode, dir, mode);
305 inode->i_ino = ino;
306 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
307
308 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
309 err = nilfs_bmap_read(ii->i_bmap, NULL);
310 if (err < 0)
311 goto failed_bmap;
312
313 set_bit(NILFS_I_BMAP, &ii->i_state);
314 /* No lock is needed; iget() ensures it. */
315 }
316
317 ii->i_flags = NILFS_I(dir)->i_flags;
318 if (S_ISLNK(mode))
319 ii->i_flags &= ~(NILFS_IMMUTABLE_FL | NILFS_APPEND_FL);
320 if (!S_ISDIR(mode))
321 ii->i_flags &= ~NILFS_DIRSYNC_FL;
322
323 /* ii->i_file_acl = 0; */
324 /* ii->i_dir_acl = 0; */
325 ii->i_dir_start_lookup = 0;
326 nilfs_set_inode_flags(inode);
327 spin_lock(&sbi->s_next_gen_lock);
328 inode->i_generation = sbi->s_next_generation++;
329 spin_unlock(&sbi->s_next_gen_lock);
330 insert_inode_hash(inode);
331
332 err = nilfs_init_acl(inode, dir);
333 if (unlikely(err))
334 goto failed_acl; /* never occur. When supporting
335 nilfs_init_acl(), proper cancellation of
336 above jobs should be considered */
337
338 return inode;
339
340 failed_acl:
341 failed_bmap:
342 inode->i_nlink = 0;
343 iput(inode); /* raw_inode will be deleted through
344 generic_delete_inode() */
345 goto failed;
346
347 failed_ifile_create_inode:
348 make_bad_inode(inode);
349 iput(inode); /* if i_nlink == 1, generic_forget_inode() will be
350 called */
351 failed:
352 return ERR_PTR(err);
353 }
354
355 void nilfs_set_inode_flags(struct inode *inode)
356 {
357 unsigned int flags = NILFS_I(inode)->i_flags;
358
359 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
360 S_DIRSYNC);
361 if (flags & NILFS_SYNC_FL)
362 inode->i_flags |= S_SYNC;
363 if (flags & NILFS_APPEND_FL)
364 inode->i_flags |= S_APPEND;
365 if (flags & NILFS_IMMUTABLE_FL)
366 inode->i_flags |= S_IMMUTABLE;
367 #ifndef NILFS_ATIME_DISABLE
368 if (flags & NILFS_NOATIME_FL)
369 #endif
370 inode->i_flags |= S_NOATIME;
371 if (flags & NILFS_DIRSYNC_FL)
372 inode->i_flags |= S_DIRSYNC;
373 mapping_set_gfp_mask(inode->i_mapping,
374 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
375 }
376
377 int nilfs_read_inode_common(struct inode *inode,
378 struct nilfs_inode *raw_inode)
379 {
380 struct nilfs_inode_info *ii = NILFS_I(inode);
381 int err;
382
383 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
384 inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid);
385 inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid);
386 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
387 inode->i_size = le64_to_cpu(raw_inode->i_size);
388 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
389 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
390 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
391 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
392 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
393 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
394 if (inode->i_nlink == 0 && inode->i_mode == 0)
395 return -EINVAL; /* this inode is deleted */
396
397 inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
398 ii->i_flags = le32_to_cpu(raw_inode->i_flags);
399 #if 0
400 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
401 ii->i_dir_acl = S_ISREG(inode->i_mode) ?
402 0 : le32_to_cpu(raw_inode->i_dir_acl);
403 #endif
404 ii->i_dir_start_lookup = 0;
405 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
406
407 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
408 S_ISLNK(inode->i_mode)) {
409 err = nilfs_bmap_read(ii->i_bmap, raw_inode);
410 if (err < 0)
411 return err;
412 set_bit(NILFS_I_BMAP, &ii->i_state);
413 /* No lock is needed; iget() ensures it. */
414 }
415 return 0;
416 }
417
418 static int __nilfs_read_inode(struct super_block *sb,
419 struct nilfs_root *root, unsigned long ino,
420 struct inode *inode)
421 {
422 struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
423 struct buffer_head *bh;
424 struct nilfs_inode *raw_inode;
425 int err;
426
427 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
428 err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
429 if (unlikely(err))
430 goto bad_inode;
431
432 raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
433
434 err = nilfs_read_inode_common(inode, raw_inode);
435 if (err)
436 goto failed_unmap;
437
438 if (S_ISREG(inode->i_mode)) {
439 inode->i_op = &nilfs_file_inode_operations;
440 inode->i_fop = &nilfs_file_operations;
441 inode->i_mapping->a_ops = &nilfs_aops;
442 } else if (S_ISDIR(inode->i_mode)) {
443 inode->i_op = &nilfs_dir_inode_operations;
444 inode->i_fop = &nilfs_dir_operations;
445 inode->i_mapping->a_ops = &nilfs_aops;
446 } else if (S_ISLNK(inode->i_mode)) {
447 inode->i_op = &nilfs_symlink_inode_operations;
448 inode->i_mapping->a_ops = &nilfs_aops;
449 } else {
450 inode->i_op = &nilfs_special_inode_operations;
451 init_special_inode(
452 inode, inode->i_mode,
453 huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
454 }
455 nilfs_ifile_unmap_inode(root->ifile, ino, bh);
456 brelse(bh);
457 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
458 nilfs_set_inode_flags(inode);
459 return 0;
460
461 failed_unmap:
462 nilfs_ifile_unmap_inode(root->ifile, ino, bh);
463 brelse(bh);
464
465 bad_inode:
466 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
467 return err;
468 }
469
470 static int nilfs_iget_test(struct inode *inode, void *opaque)
471 {
472 struct nilfs_iget_args *args = opaque;
473 struct nilfs_inode_info *ii;
474
475 if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
476 return 0;
477
478 ii = NILFS_I(inode);
479 if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
480 return !args->for_gc;
481
482 return args->for_gc && args->cno == ii->i_cno;
483 }
484
485 static int nilfs_iget_set(struct inode *inode, void *opaque)
486 {
487 struct nilfs_iget_args *args = opaque;
488
489 inode->i_ino = args->ino;
490 if (args->for_gc) {
491 NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
492 NILFS_I(inode)->i_cno = args->cno;
493 NILFS_I(inode)->i_root = NULL;
494 } else {
495 if (args->root && args->ino == NILFS_ROOT_INO)
496 nilfs_get_root(args->root);
497 NILFS_I(inode)->i_root = args->root;
498 }
499 return 0;
500 }
501
502 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
503 unsigned long ino)
504 {
505 struct nilfs_iget_args args = {
506 .ino = ino, .root = root, .cno = 0, .for_gc = 0
507 };
508
509 return ilookup5(sb, ino, nilfs_iget_test, &args);
510 }
511
512 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
513 unsigned long ino)
514 {
515 struct nilfs_iget_args args = {
516 .ino = ino, .root = root, .cno = 0, .for_gc = 0
517 };
518
519 return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
520 }
521
522 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
523 unsigned long ino)
524 {
525 struct inode *inode;
526 int err;
527
528 inode = nilfs_iget_locked(sb, root, ino);
529 if (unlikely(!inode))
530 return ERR_PTR(-ENOMEM);
531 if (!(inode->i_state & I_NEW))
532 return inode;
533
534 err = __nilfs_read_inode(sb, root, ino, inode);
535 if (unlikely(err)) {
536 iget_failed(inode);
537 return ERR_PTR(err);
538 }
539 unlock_new_inode(inode);
540 return inode;
541 }
542
543 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
544 __u64 cno)
545 {
546 struct nilfs_iget_args args = {
547 .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
548 };
549 struct inode *inode;
550 int err;
551
552 inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
553 if (unlikely(!inode))
554 return ERR_PTR(-ENOMEM);
555 if (!(inode->i_state & I_NEW))
556 return inode;
557
558 err = nilfs_init_gcinode(inode);
559 if (unlikely(err)) {
560 iget_failed(inode);
561 return ERR_PTR(err);
562 }
563 unlock_new_inode(inode);
564 return inode;
565 }
566
567 void nilfs_write_inode_common(struct inode *inode,
568 struct nilfs_inode *raw_inode, int has_bmap)
569 {
570 struct nilfs_inode_info *ii = NILFS_I(inode);
571
572 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
573 raw_inode->i_uid = cpu_to_le32(inode->i_uid);
574 raw_inode->i_gid = cpu_to_le32(inode->i_gid);
575 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
576 raw_inode->i_size = cpu_to_le64(inode->i_size);
577 raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
578 raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
579 raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
580 raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
581 raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
582
583 raw_inode->i_flags = cpu_to_le32(ii->i_flags);
584 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
585
586 if (has_bmap)
587 nilfs_bmap_write(ii->i_bmap, raw_inode);
588 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
589 raw_inode->i_device_code =
590 cpu_to_le64(huge_encode_dev(inode->i_rdev));
591 /* When extending inode, nilfs->ns_inode_size should be checked
592 for substitutions of appended fields */
593 }
594
595 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
596 {
597 ino_t ino = inode->i_ino;
598 struct nilfs_inode_info *ii = NILFS_I(inode);
599 struct inode *ifile = ii->i_root->ifile;
600 struct nilfs_inode *raw_inode;
601
602 raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
603
604 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
605 memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
606 set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
607
608 nilfs_write_inode_common(inode, raw_inode, 0);
609 /* XXX: call with has_bmap = 0 is a workaround to avoid
610 deadlock of bmap. This delays update of i_bmap to just
611 before writing */
612 nilfs_ifile_unmap_inode(ifile, ino, ibh);
613 }
614
615 #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
616
617 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
618 unsigned long from)
619 {
620 unsigned long b;
621 int ret;
622
623 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
624 return;
625 repeat:
626 ret = nilfs_bmap_last_key(ii->i_bmap, &b);
627 if (ret == -ENOENT)
628 return;
629 else if (ret < 0)
630 goto failed;
631
632 if (b < from)
633 return;
634
635 b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
636 ret = nilfs_bmap_truncate(ii->i_bmap, b);
637 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
638 if (!ret || (ret == -ENOMEM &&
639 nilfs_bmap_truncate(ii->i_bmap, b) == 0))
640 goto repeat;
641
642 failed:
643 nilfs_warning(ii->vfs_inode.i_sb, __func__,
644 "failed to truncate bmap (ino=%lu, err=%d)",
645 ii->vfs_inode.i_ino, ret);
646 }
647
648 void nilfs_truncate(struct inode *inode)
649 {
650 unsigned long blkoff;
651 unsigned int blocksize;
652 struct nilfs_transaction_info ti;
653 struct super_block *sb = inode->i_sb;
654 struct nilfs_inode_info *ii = NILFS_I(inode);
655
656 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
657 return;
658 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
659 return;
660
661 blocksize = sb->s_blocksize;
662 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
663 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
664
665 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
666
667 nilfs_truncate_bmap(ii, blkoff);
668
669 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
670 if (IS_SYNC(inode))
671 nilfs_set_transaction_flag(NILFS_TI_SYNC);
672
673 nilfs_mark_inode_dirty(inode);
674 nilfs_set_file_dirty(inode, 0);
675 nilfs_transaction_commit(sb);
676 /* May construct a logical segment and may fail in sync mode.
677 But truncate has no return value. */
678 }
679
680 static void nilfs_clear_inode(struct inode *inode)
681 {
682 struct nilfs_inode_info *ii = NILFS_I(inode);
683 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
684
685 /*
686 * Free resources allocated in nilfs_read_inode(), here.
687 */
688 BUG_ON(!list_empty(&ii->i_dirty));
689 brelse(ii->i_bh);
690 ii->i_bh = NULL;
691
692 if (mdi && mdi->mi_palloc_cache)
693 nilfs_palloc_destroy_cache(inode);
694
695 if (test_bit(NILFS_I_BMAP, &ii->i_state))
696 nilfs_bmap_clear(ii->i_bmap);
697
698 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
699
700 if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
701 nilfs_put_root(ii->i_root);
702 }
703
704 void nilfs_evict_inode(struct inode *inode)
705 {
706 struct nilfs_transaction_info ti;
707 struct super_block *sb = inode->i_sb;
708 struct nilfs_inode_info *ii = NILFS_I(inode);
709
710 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
711 if (inode->i_data.nrpages)
712 truncate_inode_pages(&inode->i_data, 0);
713 end_writeback(inode);
714 nilfs_clear_inode(inode);
715 return;
716 }
717 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
718
719 if (inode->i_data.nrpages)
720 truncate_inode_pages(&inode->i_data, 0);
721
722 /* TODO: some of the following operations may fail. */
723 nilfs_truncate_bmap(ii, 0);
724 nilfs_mark_inode_dirty(inode);
725 end_writeback(inode);
726
727 nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
728 atomic_dec(&ii->i_root->inodes_count);
729
730 nilfs_clear_inode(inode);
731
732 if (IS_SYNC(inode))
733 nilfs_set_transaction_flag(NILFS_TI_SYNC);
734 nilfs_transaction_commit(sb);
735 /* May construct a logical segment and may fail in sync mode.
736 But delete_inode has no return value. */
737 }
738
739 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
740 {
741 struct nilfs_transaction_info ti;
742 struct inode *inode = dentry->d_inode;
743 struct super_block *sb = inode->i_sb;
744 int err;
745
746 err = inode_change_ok(inode, iattr);
747 if (err)
748 return err;
749
750 err = nilfs_transaction_begin(sb, &ti, 0);
751 if (unlikely(err))
752 return err;
753
754 if ((iattr->ia_valid & ATTR_SIZE) &&
755 iattr->ia_size != i_size_read(inode)) {
756 err = vmtruncate(inode, iattr->ia_size);
757 if (unlikely(err))
758 goto out_err;
759 }
760
761 setattr_copy(inode, iattr);
762 mark_inode_dirty(inode);
763
764 if (iattr->ia_valid & ATTR_MODE) {
765 err = nilfs_acl_chmod(inode);
766 if (unlikely(err))
767 goto out_err;
768 }
769
770 return nilfs_transaction_commit(sb);
771
772 out_err:
773 nilfs_transaction_abort(sb);
774 return err;
775 }
776
777 int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
778 {
779 struct nilfs_root *root;
780
781 if (flags & IPERM_FLAG_RCU)
782 return -ECHILD;
783
784 root = NILFS_I(inode)->i_root;
785 if ((mask & MAY_WRITE) && root &&
786 root->cno != NILFS_CPTREE_CURRENT_CNO)
787 return -EROFS; /* snapshot is not writable */
788
789 return generic_permission(inode, mask, flags, NULL);
790 }
791
792 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
793 {
794 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
795 struct nilfs_inode_info *ii = NILFS_I(inode);
796 int err;
797
798 spin_lock(&sbi->s_inode_lock);
799 if (ii->i_bh == NULL) {
800 spin_unlock(&sbi->s_inode_lock);
801 err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
802 inode->i_ino, pbh);
803 if (unlikely(err))
804 return err;
805 spin_lock(&sbi->s_inode_lock);
806 if (ii->i_bh == NULL)
807 ii->i_bh = *pbh;
808 else {
809 brelse(*pbh);
810 *pbh = ii->i_bh;
811 }
812 } else
813 *pbh = ii->i_bh;
814
815 get_bh(*pbh);
816 spin_unlock(&sbi->s_inode_lock);
817 return 0;
818 }
819
820 int nilfs_inode_dirty(struct inode *inode)
821 {
822 struct nilfs_inode_info *ii = NILFS_I(inode);
823 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
824 int ret = 0;
825
826 if (!list_empty(&ii->i_dirty)) {
827 spin_lock(&sbi->s_inode_lock);
828 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
829 test_bit(NILFS_I_BUSY, &ii->i_state);
830 spin_unlock(&sbi->s_inode_lock);
831 }
832 return ret;
833 }
834
835 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
836 {
837 struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb);
838 struct nilfs_inode_info *ii = NILFS_I(inode);
839
840 atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks);
841
842 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
843 return 0;
844
845 spin_lock(&sbi->s_inode_lock);
846 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
847 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
848 /* Because this routine may race with nilfs_dispose_list(),
849 we have to check NILFS_I_QUEUED here, too. */
850 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
851 /* This will happen when somebody is freeing
852 this inode. */
853 nilfs_warning(sbi->s_super, __func__,
854 "cannot get inode (ino=%lu)\n",
855 inode->i_ino);
856 spin_unlock(&sbi->s_inode_lock);
857 return -EINVAL; /* NILFS_I_DIRTY may remain for
858 freeing inode */
859 }
860 list_del(&ii->i_dirty);
861 list_add_tail(&ii->i_dirty, &sbi->s_dirty_files);
862 set_bit(NILFS_I_QUEUED, &ii->i_state);
863 }
864 spin_unlock(&sbi->s_inode_lock);
865 return 0;
866 }
867
868 int nilfs_mark_inode_dirty(struct inode *inode)
869 {
870 struct buffer_head *ibh;
871 int err;
872
873 err = nilfs_load_inode_block(inode, &ibh);
874 if (unlikely(err)) {
875 nilfs_warning(inode->i_sb, __func__,
876 "failed to reget inode block.\n");
877 return err;
878 }
879 nilfs_update_inode(inode, ibh);
880 nilfs_mdt_mark_buffer_dirty(ibh);
881 nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
882 brelse(ibh);
883 return 0;
884 }
885
886 /**
887 * nilfs_dirty_inode - reflect changes on given inode to an inode block.
888 * @inode: inode of the file to be registered.
889 *
890 * nilfs_dirty_inode() loads a inode block containing the specified
891 * @inode and copies data from a nilfs_inode to a corresponding inode
892 * entry in the inode block. This operation is excluded from the segment
893 * construction. This function can be called both as a single operation
894 * and as a part of indivisible file operations.
895 */
896 void nilfs_dirty_inode(struct inode *inode)
897 {
898 struct nilfs_transaction_info ti;
899 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
900
901 if (is_bad_inode(inode)) {
902 nilfs_warning(inode->i_sb, __func__,
903 "tried to mark bad_inode dirty. ignored.\n");
904 dump_stack();
905 return;
906 }
907 if (mdi) {
908 nilfs_mdt_mark_dirty(inode);
909 return;
910 }
911 nilfs_transaction_begin(inode->i_sb, &ti, 0);
912 nilfs_mark_inode_dirty(inode);
913 nilfs_transaction_commit(inode->i_sb); /* never fails */
914 }
915
916 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
917 __u64 start, __u64 len)
918 {
919 struct the_nilfs *nilfs = NILFS_I_NILFS(inode);
920 __u64 logical = 0, phys = 0, size = 0;
921 __u32 flags = 0;
922 loff_t isize;
923 sector_t blkoff, end_blkoff;
924 sector_t delalloc_blkoff;
925 unsigned long delalloc_blklen;
926 unsigned int blkbits = inode->i_blkbits;
927 int ret, n;
928
929 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
930 if (ret)
931 return ret;
932
933 mutex_lock(&inode->i_mutex);
934
935 isize = i_size_read(inode);
936
937 blkoff = start >> blkbits;
938 end_blkoff = (start + len - 1) >> blkbits;
939
940 delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
941 &delalloc_blkoff);
942
943 do {
944 __u64 blkphy;
945 unsigned int maxblocks;
946
947 if (delalloc_blklen && blkoff == delalloc_blkoff) {
948 if (size) {
949 /* End of the current extent */
950 ret = fiemap_fill_next_extent(
951 fieinfo, logical, phys, size, flags);
952 if (ret)
953 break;
954 }
955 if (blkoff > end_blkoff)
956 break;
957
958 flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
959 logical = blkoff << blkbits;
960 phys = 0;
961 size = delalloc_blklen << blkbits;
962
963 blkoff = delalloc_blkoff + delalloc_blklen;
964 delalloc_blklen = nilfs_find_uncommitted_extent(
965 inode, blkoff, &delalloc_blkoff);
966 continue;
967 }
968
969 /*
970 * Limit the number of blocks that we look up so as
971 * not to get into the next delayed allocation extent.
972 */
973 maxblocks = INT_MAX;
974 if (delalloc_blklen)
975 maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
976 maxblocks);
977 blkphy = 0;
978
979 down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
980 n = nilfs_bmap_lookup_contig(
981 NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
982 up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
983
984 if (n < 0) {
985 int past_eof;
986
987 if (unlikely(n != -ENOENT))
988 break; /* error */
989
990 /* HOLE */
991 blkoff++;
992 past_eof = ((blkoff << blkbits) >= isize);
993
994 if (size) {
995 /* End of the current extent */
996
997 if (past_eof)
998 flags |= FIEMAP_EXTENT_LAST;
999
1000 ret = fiemap_fill_next_extent(
1001 fieinfo, logical, phys, size, flags);
1002 if (ret)
1003 break;
1004 size = 0;
1005 }
1006 if (blkoff > end_blkoff || past_eof)
1007 break;
1008 } else {
1009 if (size) {
1010 if (phys && blkphy << blkbits == phys + size) {
1011 /* The current extent goes on */
1012 size += n << blkbits;
1013 } else {
1014 /* Terminate the current extent */
1015 ret = fiemap_fill_next_extent(
1016 fieinfo, logical, phys, size,
1017 flags);
1018 if (ret || blkoff > end_blkoff)
1019 break;
1020
1021 /* Start another extent */
1022 flags = FIEMAP_EXTENT_MERGED;
1023 logical = blkoff << blkbits;
1024 phys = blkphy << blkbits;
1025 size = n << blkbits;
1026 }
1027 } else {
1028 /* Start a new extent */
1029 flags = FIEMAP_EXTENT_MERGED;
1030 logical = blkoff << blkbits;
1031 phys = blkphy << blkbits;
1032 size = n << blkbits;
1033 }
1034 blkoff += n;
1035 }
1036 cond_resched();
1037 } while (true);
1038
1039 /* If ret is 1 then we just hit the end of the extent array */
1040 if (ret == 1)
1041 ret = 0;
1042
1043 mutex_unlock(&inode->i_mutex);
1044 return ret;
1045 }
This page took 0.479661 seconds and 5 git commands to generate.