f2fs: add an option to avoid unnecessary BUG_ONs
[deliverable/linux.git] / fs / f2fs / file.c
1 /*
2 * fs/f2fs/file.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22
23 #include "f2fs.h"
24 #include "node.h"
25 #include "segment.h"
26 #include "xattr.h"
27 #include "acl.h"
28 #include <trace/events/f2fs.h>
29
30 static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
31 struct vm_fault *vmf)
32 {
33 struct page *page = vmf->page;
34 struct inode *inode = file_inode(vma->vm_file);
35 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
36 block_t old_blk_addr;
37 struct dnode_of_data dn;
38 int err;
39
40 f2fs_balance_fs(sbi);
41
42 sb_start_pagefault(inode->i_sb);
43
44 /* block allocation */
45 f2fs_lock_op(sbi);
46 set_new_dnode(&dn, inode, NULL, NULL, 0);
47 err = get_dnode_of_data(&dn, page->index, ALLOC_NODE);
48 if (err) {
49 f2fs_unlock_op(sbi);
50 goto out;
51 }
52
53 old_blk_addr = dn.data_blkaddr;
54
55 if (old_blk_addr == NULL_ADDR) {
56 err = reserve_new_block(&dn);
57 if (err) {
58 f2fs_put_dnode(&dn);
59 f2fs_unlock_op(sbi);
60 goto out;
61 }
62 }
63 f2fs_put_dnode(&dn);
64 f2fs_unlock_op(sbi);
65
66 file_update_time(vma->vm_file);
67 lock_page(page);
68 if (page->mapping != inode->i_mapping ||
69 page_offset(page) > i_size_read(inode) ||
70 !PageUptodate(page)) {
71 unlock_page(page);
72 err = -EFAULT;
73 goto out;
74 }
75
76 /*
77 * check to see if the page is mapped already (no holes)
78 */
79 if (PageMappedToDisk(page))
80 goto mapped;
81
82 /* page is wholly or partially inside EOF */
83 if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
84 unsigned offset;
85 offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
86 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
87 }
88 set_page_dirty(page);
89 SetPageUptodate(page);
90
91 trace_f2fs_vm_page_mkwrite(page, DATA);
92 mapped:
93 /* fill the page */
94 wait_on_page_writeback(page);
95 out:
96 sb_end_pagefault(inode->i_sb);
97 return block_page_mkwrite_return(err);
98 }
99
100 static const struct vm_operations_struct f2fs_file_vm_ops = {
101 .fault = filemap_fault,
102 .page_mkwrite = f2fs_vm_page_mkwrite,
103 .remap_pages = generic_file_remap_pages,
104 };
105
106 static int get_parent_ino(struct inode *inode, nid_t *pino)
107 {
108 struct dentry *dentry;
109
110 inode = igrab(inode);
111 dentry = d_find_any_alias(inode);
112 iput(inode);
113 if (!dentry)
114 return 0;
115
116 if (update_dent_inode(inode, &dentry->d_name)) {
117 dput(dentry);
118 return 0;
119 }
120
121 *pino = parent_ino(dentry);
122 dput(dentry);
123 return 1;
124 }
125
126 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
127 {
128 struct inode *inode = file->f_mapping->host;
129 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
130 int ret = 0;
131 bool need_cp = false;
132 struct writeback_control wbc = {
133 .sync_mode = WB_SYNC_ALL,
134 .nr_to_write = LONG_MAX,
135 .for_reclaim = 0,
136 };
137
138 if (f2fs_readonly(inode->i_sb))
139 return 0;
140
141 trace_f2fs_sync_file_enter(inode);
142 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
143 if (ret) {
144 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
145 return ret;
146 }
147
148 /* guarantee free sections for fsync */
149 f2fs_balance_fs(sbi);
150
151 mutex_lock(&inode->i_mutex);
152
153 /*
154 * Both of fdatasync() and fsync() are able to be recovered from
155 * sudden-power-off.
156 */
157 if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
158 need_cp = true;
159 else if (file_wrong_pino(inode))
160 need_cp = true;
161 else if (!space_for_roll_forward(sbi))
162 need_cp = true;
163 else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
164 need_cp = true;
165 else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
166 need_cp = true;
167
168 if (need_cp) {
169 nid_t pino;
170
171 F2FS_I(inode)->xattr_ver = 0;
172
173 /* all the dirty node pages should be flushed for POR */
174 ret = f2fs_sync_fs(inode->i_sb, 1);
175 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
176 get_parent_ino(inode, &pino)) {
177 F2FS_I(inode)->i_pino = pino;
178 file_got_pino(inode);
179 mark_inode_dirty_sync(inode);
180 ret = f2fs_write_inode(inode, NULL);
181 if (ret)
182 goto out;
183 }
184 } else {
185 /* if there is no written node page, write its inode page */
186 while (!sync_node_pages(sbi, inode->i_ino, &wbc)) {
187 mark_inode_dirty_sync(inode);
188 ret = f2fs_write_inode(inode, NULL);
189 if (ret)
190 goto out;
191 }
192 filemap_fdatawait_range(sbi->node_inode->i_mapping,
193 0, LONG_MAX);
194 ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
195 }
196 out:
197 mutex_unlock(&inode->i_mutex);
198 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
199 return ret;
200 }
201
202 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
203 {
204 file_accessed(file);
205 vma->vm_ops = &f2fs_file_vm_ops;
206 return 0;
207 }
208
209 int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
210 {
211 int nr_free = 0, ofs = dn->ofs_in_node;
212 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
213 struct f2fs_node *raw_node;
214 __le32 *addr;
215
216 raw_node = F2FS_NODE(dn->node_page);
217 addr = blkaddr_in_node(raw_node) + ofs;
218
219 for ( ; count > 0; count--, addr++, dn->ofs_in_node++) {
220 block_t blkaddr = le32_to_cpu(*addr);
221 if (blkaddr == NULL_ADDR)
222 continue;
223
224 update_extent_cache(NULL_ADDR, dn);
225 invalidate_blocks(sbi, blkaddr);
226 nr_free++;
227 }
228 if (nr_free) {
229 dec_valid_block_count(sbi, dn->inode, nr_free);
230 set_page_dirty(dn->node_page);
231 sync_inode_page(dn);
232 }
233 dn->ofs_in_node = ofs;
234
235 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
236 dn->ofs_in_node, nr_free);
237 return nr_free;
238 }
239
240 void truncate_data_blocks(struct dnode_of_data *dn)
241 {
242 truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
243 }
244
245 static void truncate_partial_data_page(struct inode *inode, u64 from)
246 {
247 unsigned offset = from & (PAGE_CACHE_SIZE - 1);
248 struct page *page;
249
250 if (!offset)
251 return;
252
253 page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false);
254 if (IS_ERR(page))
255 return;
256
257 lock_page(page);
258 if (page->mapping != inode->i_mapping) {
259 f2fs_put_page(page, 1);
260 return;
261 }
262 wait_on_page_writeback(page);
263 zero_user(page, offset, PAGE_CACHE_SIZE - offset);
264 set_page_dirty(page);
265 f2fs_put_page(page, 1);
266 }
267
268 static int truncate_blocks(struct inode *inode, u64 from)
269 {
270 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
271 unsigned int blocksize = inode->i_sb->s_blocksize;
272 struct dnode_of_data dn;
273 pgoff_t free_from;
274 int count = 0;
275 int err;
276
277 trace_f2fs_truncate_blocks_enter(inode, from);
278
279 free_from = (pgoff_t)
280 ((from + blocksize - 1) >> (sbi->log_blocksize));
281
282 f2fs_lock_op(sbi);
283 set_new_dnode(&dn, inode, NULL, NULL, 0);
284 err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
285 if (err) {
286 if (err == -ENOENT)
287 goto free_next;
288 f2fs_unlock_op(sbi);
289 trace_f2fs_truncate_blocks_exit(inode, err);
290 return err;
291 }
292
293 if (IS_INODE(dn.node_page))
294 count = ADDRS_PER_INODE(F2FS_I(inode));
295 else
296 count = ADDRS_PER_BLOCK;
297
298 count -= dn.ofs_in_node;
299 f2fs_bug_on(count < 0);
300
301 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
302 truncate_data_blocks_range(&dn, count);
303 free_from += count;
304 }
305
306 f2fs_put_dnode(&dn);
307 free_next:
308 err = truncate_inode_blocks(inode, free_from);
309 f2fs_unlock_op(sbi);
310
311 /* lastly zero out the first data page */
312 truncate_partial_data_page(inode, from);
313
314 trace_f2fs_truncate_blocks_exit(inode, err);
315 return err;
316 }
317
318 void f2fs_truncate(struct inode *inode)
319 {
320 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
321 S_ISLNK(inode->i_mode)))
322 return;
323
324 trace_f2fs_truncate(inode);
325
326 if (!truncate_blocks(inode, i_size_read(inode))) {
327 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
328 mark_inode_dirty(inode);
329 }
330 }
331
332 int f2fs_getattr(struct vfsmount *mnt,
333 struct dentry *dentry, struct kstat *stat)
334 {
335 struct inode *inode = dentry->d_inode;
336 generic_fillattr(inode, stat);
337 stat->blocks <<= 3;
338 return 0;
339 }
340
341 #ifdef CONFIG_F2FS_FS_POSIX_ACL
342 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
343 {
344 struct f2fs_inode_info *fi = F2FS_I(inode);
345 unsigned int ia_valid = attr->ia_valid;
346
347 if (ia_valid & ATTR_UID)
348 inode->i_uid = attr->ia_uid;
349 if (ia_valid & ATTR_GID)
350 inode->i_gid = attr->ia_gid;
351 if (ia_valid & ATTR_ATIME)
352 inode->i_atime = timespec_trunc(attr->ia_atime,
353 inode->i_sb->s_time_gran);
354 if (ia_valid & ATTR_MTIME)
355 inode->i_mtime = timespec_trunc(attr->ia_mtime,
356 inode->i_sb->s_time_gran);
357 if (ia_valid & ATTR_CTIME)
358 inode->i_ctime = timespec_trunc(attr->ia_ctime,
359 inode->i_sb->s_time_gran);
360 if (ia_valid & ATTR_MODE) {
361 umode_t mode = attr->ia_mode;
362
363 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
364 mode &= ~S_ISGID;
365 set_acl_inode(fi, mode);
366 }
367 }
368 #else
369 #define __setattr_copy setattr_copy
370 #endif
371
372 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
373 {
374 struct inode *inode = dentry->d_inode;
375 struct f2fs_inode_info *fi = F2FS_I(inode);
376 int err;
377
378 err = inode_change_ok(inode, attr);
379 if (err)
380 return err;
381
382 if ((attr->ia_valid & ATTR_SIZE) &&
383 attr->ia_size != i_size_read(inode)) {
384 truncate_setsize(inode, attr->ia_size);
385 f2fs_truncate(inode);
386 f2fs_balance_fs(F2FS_SB(inode->i_sb));
387 }
388
389 __setattr_copy(inode, attr);
390
391 if (attr->ia_valid & ATTR_MODE) {
392 err = f2fs_acl_chmod(inode);
393 if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
394 inode->i_mode = fi->i_acl_mode;
395 clear_inode_flag(fi, FI_ACL_MODE);
396 }
397 }
398
399 mark_inode_dirty(inode);
400 return err;
401 }
402
403 const struct inode_operations f2fs_file_inode_operations = {
404 .getattr = f2fs_getattr,
405 .setattr = f2fs_setattr,
406 .get_acl = f2fs_get_acl,
407 #ifdef CONFIG_F2FS_FS_XATTR
408 .setxattr = generic_setxattr,
409 .getxattr = generic_getxattr,
410 .listxattr = f2fs_listxattr,
411 .removexattr = generic_removexattr,
412 #endif
413 };
414
415 static void fill_zero(struct inode *inode, pgoff_t index,
416 loff_t start, loff_t len)
417 {
418 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
419 struct page *page;
420
421 if (!len)
422 return;
423
424 f2fs_balance_fs(sbi);
425
426 f2fs_lock_op(sbi);
427 page = get_new_data_page(inode, NULL, index, false);
428 f2fs_unlock_op(sbi);
429
430 if (!IS_ERR(page)) {
431 wait_on_page_writeback(page);
432 zero_user(page, start, len);
433 set_page_dirty(page);
434 f2fs_put_page(page, 1);
435 }
436 }
437
438 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
439 {
440 pgoff_t index;
441 int err;
442
443 for (index = pg_start; index < pg_end; index++) {
444 struct dnode_of_data dn;
445
446 set_new_dnode(&dn, inode, NULL, NULL, 0);
447 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
448 if (err) {
449 if (err == -ENOENT)
450 continue;
451 return err;
452 }
453
454 if (dn.data_blkaddr != NULL_ADDR)
455 truncate_data_blocks_range(&dn, 1);
456 f2fs_put_dnode(&dn);
457 }
458 return 0;
459 }
460
461 static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
462 {
463 pgoff_t pg_start, pg_end;
464 loff_t off_start, off_end;
465 int ret = 0;
466
467 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
468 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
469
470 off_start = offset & (PAGE_CACHE_SIZE - 1);
471 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
472
473 if (pg_start == pg_end) {
474 fill_zero(inode, pg_start, off_start,
475 off_end - off_start);
476 } else {
477 if (off_start)
478 fill_zero(inode, pg_start++, off_start,
479 PAGE_CACHE_SIZE - off_start);
480 if (off_end)
481 fill_zero(inode, pg_end, 0, off_end);
482
483 if (pg_start < pg_end) {
484 struct address_space *mapping = inode->i_mapping;
485 loff_t blk_start, blk_end;
486 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
487
488 f2fs_balance_fs(sbi);
489
490 blk_start = pg_start << PAGE_CACHE_SHIFT;
491 blk_end = pg_end << PAGE_CACHE_SHIFT;
492 truncate_inode_pages_range(mapping, blk_start,
493 blk_end - 1);
494
495 f2fs_lock_op(sbi);
496 ret = truncate_hole(inode, pg_start, pg_end);
497 f2fs_unlock_op(sbi);
498 }
499 }
500
501 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
502 i_size_read(inode) <= (offset + len)) {
503 i_size_write(inode, offset);
504 mark_inode_dirty(inode);
505 }
506
507 return ret;
508 }
509
510 static int expand_inode_data(struct inode *inode, loff_t offset,
511 loff_t len, int mode)
512 {
513 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
514 pgoff_t index, pg_start, pg_end;
515 loff_t new_size = i_size_read(inode);
516 loff_t off_start, off_end;
517 int ret = 0;
518
519 ret = inode_newsize_ok(inode, (len + offset));
520 if (ret)
521 return ret;
522
523 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
524 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
525
526 off_start = offset & (PAGE_CACHE_SIZE - 1);
527 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
528
529 for (index = pg_start; index <= pg_end; index++) {
530 struct dnode_of_data dn;
531
532 f2fs_lock_op(sbi);
533 set_new_dnode(&dn, inode, NULL, NULL, 0);
534 ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
535 if (ret) {
536 f2fs_unlock_op(sbi);
537 break;
538 }
539
540 if (dn.data_blkaddr == NULL_ADDR) {
541 ret = reserve_new_block(&dn);
542 if (ret) {
543 f2fs_put_dnode(&dn);
544 f2fs_unlock_op(sbi);
545 break;
546 }
547 }
548 f2fs_put_dnode(&dn);
549 f2fs_unlock_op(sbi);
550
551 if (pg_start == pg_end)
552 new_size = offset + len;
553 else if (index == pg_start && off_start)
554 new_size = (index + 1) << PAGE_CACHE_SHIFT;
555 else if (index == pg_end)
556 new_size = (index << PAGE_CACHE_SHIFT) + off_end;
557 else
558 new_size += PAGE_CACHE_SIZE;
559 }
560
561 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
562 i_size_read(inode) < new_size) {
563 i_size_write(inode, new_size);
564 mark_inode_dirty(inode);
565 }
566
567 return ret;
568 }
569
570 static long f2fs_fallocate(struct file *file, int mode,
571 loff_t offset, loff_t len)
572 {
573 struct inode *inode = file_inode(file);
574 long ret;
575
576 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
577 return -EOPNOTSUPP;
578
579 if (mode & FALLOC_FL_PUNCH_HOLE)
580 ret = punch_hole(inode, offset, len, mode);
581 else
582 ret = expand_inode_data(inode, offset, len, mode);
583
584 if (!ret) {
585 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
586 mark_inode_dirty(inode);
587 }
588 trace_f2fs_fallocate(inode, mode, offset, len, ret);
589 return ret;
590 }
591
592 #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
593 #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
594
595 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
596 {
597 if (S_ISDIR(mode))
598 return flags;
599 else if (S_ISREG(mode))
600 return flags & F2FS_REG_FLMASK;
601 else
602 return flags & F2FS_OTHER_FLMASK;
603 }
604
605 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
606 {
607 struct inode *inode = file_inode(filp);
608 struct f2fs_inode_info *fi = F2FS_I(inode);
609 unsigned int flags;
610 int ret;
611
612 switch (cmd) {
613 case F2FS_IOC_GETFLAGS:
614 flags = fi->i_flags & FS_FL_USER_VISIBLE;
615 return put_user(flags, (int __user *) arg);
616 case F2FS_IOC_SETFLAGS:
617 {
618 unsigned int oldflags;
619
620 ret = mnt_want_write_file(filp);
621 if (ret)
622 return ret;
623
624 if (!inode_owner_or_capable(inode)) {
625 ret = -EACCES;
626 goto out;
627 }
628
629 if (get_user(flags, (int __user *) arg)) {
630 ret = -EFAULT;
631 goto out;
632 }
633
634 flags = f2fs_mask_flags(inode->i_mode, flags);
635
636 mutex_lock(&inode->i_mutex);
637
638 oldflags = fi->i_flags;
639
640 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
641 if (!capable(CAP_LINUX_IMMUTABLE)) {
642 mutex_unlock(&inode->i_mutex);
643 ret = -EPERM;
644 goto out;
645 }
646 }
647
648 flags = flags & FS_FL_USER_MODIFIABLE;
649 flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
650 fi->i_flags = flags;
651 mutex_unlock(&inode->i_mutex);
652
653 f2fs_set_inode_flags(inode);
654 inode->i_ctime = CURRENT_TIME;
655 mark_inode_dirty(inode);
656 out:
657 mnt_drop_write_file(filp);
658 return ret;
659 }
660 default:
661 return -ENOTTY;
662 }
663 }
664
665 #ifdef CONFIG_COMPAT
666 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
667 {
668 switch (cmd) {
669 case F2FS_IOC32_GETFLAGS:
670 cmd = F2FS_IOC_GETFLAGS;
671 break;
672 case F2FS_IOC32_SETFLAGS:
673 cmd = F2FS_IOC_SETFLAGS;
674 break;
675 default:
676 return -ENOIOCTLCMD;
677 }
678 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
679 }
680 #endif
681
682 const struct file_operations f2fs_file_operations = {
683 .llseek = generic_file_llseek,
684 .read = do_sync_read,
685 .write = do_sync_write,
686 .aio_read = generic_file_aio_read,
687 .aio_write = generic_file_aio_write,
688 .open = generic_file_open,
689 .mmap = f2fs_file_mmap,
690 .fsync = f2fs_sync_file,
691 .fallocate = f2fs_fallocate,
692 .unlocked_ioctl = f2fs_ioctl,
693 #ifdef CONFIG_COMPAT
694 .compat_ioctl = f2fs_compat_ioctl,
695 #endif
696 .splice_read = generic_file_splice_read,
697 .splice_write = generic_file_splice_write,
698 };
This page took 0.07394 seconds and 5 git commands to generate.