f2fs: recover date requested by fdatasync
[deliverable/linux.git] / fs / f2fs / file.c
1 /*
2 * fs/f2fs/file.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22
23 #include "f2fs.h"
24 #include "node.h"
25 #include "segment.h"
26 #include "xattr.h"
27 #include "acl.h"
28 #include <trace/events/f2fs.h>
29
30 static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
31 struct vm_fault *vmf)
32 {
33 struct page *page = vmf->page;
34 struct inode *inode = file_inode(vma->vm_file);
35 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
36 block_t old_blk_addr;
37 struct dnode_of_data dn;
38 int err, ilock;
39
40 f2fs_balance_fs(sbi);
41
42 sb_start_pagefault(inode->i_sb);
43
44 /* block allocation */
45 ilock = mutex_lock_op(sbi);
46 set_new_dnode(&dn, inode, NULL, NULL, 0);
47 err = get_dnode_of_data(&dn, page->index, ALLOC_NODE);
48 if (err) {
49 mutex_unlock_op(sbi, ilock);
50 goto out;
51 }
52
53 old_blk_addr = dn.data_blkaddr;
54
55 if (old_blk_addr == NULL_ADDR) {
56 err = reserve_new_block(&dn);
57 if (err) {
58 f2fs_put_dnode(&dn);
59 mutex_unlock_op(sbi, ilock);
60 goto out;
61 }
62 }
63 f2fs_put_dnode(&dn);
64 mutex_unlock_op(sbi, ilock);
65
66 file_update_time(vma->vm_file);
67 lock_page(page);
68 if (page->mapping != inode->i_mapping ||
69 page_offset(page) > i_size_read(inode) ||
70 !PageUptodate(page)) {
71 unlock_page(page);
72 err = -EFAULT;
73 goto out;
74 }
75
76 /*
77 * check to see if the page is mapped already (no holes)
78 */
79 if (PageMappedToDisk(page))
80 goto mapped;
81
82 /* page is wholly or partially inside EOF */
83 if (((page->index + 1) << PAGE_CACHE_SHIFT) > i_size_read(inode)) {
84 unsigned offset;
85 offset = i_size_read(inode) & ~PAGE_CACHE_MASK;
86 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
87 }
88 set_page_dirty(page);
89 SetPageUptodate(page);
90
91 mapped:
92 /* fill the page */
93 wait_on_page_writeback(page);
94 out:
95 sb_end_pagefault(inode->i_sb);
96 return block_page_mkwrite_return(err);
97 }
98
99 static const struct vm_operations_struct f2fs_file_vm_ops = {
100 .fault = filemap_fault,
101 .page_mkwrite = f2fs_vm_page_mkwrite,
102 .remap_pages = generic_file_remap_pages,
103 };
104
105 static int get_parent_ino(struct inode *inode, nid_t *pino)
106 {
107 struct dentry *dentry;
108
109 inode = igrab(inode);
110 dentry = d_find_any_alias(inode);
111 iput(inode);
112 if (!dentry)
113 return 0;
114
115 inode = igrab(dentry->d_parent->d_inode);
116 dput(dentry);
117
118 *pino = inode->i_ino;
119 iput(inode);
120 return 1;
121 }
122
123 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
124 {
125 struct inode *inode = file->f_mapping->host;
126 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
127 int ret = 0;
128 bool need_cp = false;
129 struct writeback_control wbc = {
130 .sync_mode = WB_SYNC_ALL,
131 .nr_to_write = LONG_MAX,
132 .for_reclaim = 0,
133 };
134
135 if (f2fs_readonly(inode->i_sb))
136 return 0;
137
138 trace_f2fs_sync_file_enter(inode);
139 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
140 if (ret) {
141 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
142 return ret;
143 }
144
145 /* guarantee free sections for fsync */
146 f2fs_balance_fs(sbi);
147
148 mutex_lock(&inode->i_mutex);
149
150 /*
151 * Both of fdatasync() and fsync() are able to be recovered from
152 * sudden-power-off.
153 */
154 if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
155 need_cp = true;
156 else if (file_wrong_pino(inode))
157 need_cp = true;
158 else if (!space_for_roll_forward(sbi))
159 need_cp = true;
160 else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
161 need_cp = true;
162
163 if (need_cp) {
164 nid_t pino;
165
166 /* all the dirty node pages should be flushed for POR */
167 ret = f2fs_sync_fs(inode->i_sb, 1);
168 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
169 get_parent_ino(inode, &pino)) {
170 F2FS_I(inode)->i_pino = pino;
171 file_got_pino(inode);
172 mark_inode_dirty_sync(inode);
173 ret = f2fs_write_inode(inode, NULL);
174 if (ret)
175 goto out;
176 }
177 } else {
178 /* if there is no written node page, write its inode page */
179 while (!sync_node_pages(sbi, inode->i_ino, &wbc)) {
180 mark_inode_dirty_sync(inode);
181 ret = f2fs_write_inode(inode, NULL);
182 if (ret)
183 goto out;
184 }
185 filemap_fdatawait_range(sbi->node_inode->i_mapping,
186 0, LONG_MAX);
187 ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
188 }
189 out:
190 mutex_unlock(&inode->i_mutex);
191 trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
192 return ret;
193 }
194
195 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
196 {
197 file_accessed(file);
198 vma->vm_ops = &f2fs_file_vm_ops;
199 return 0;
200 }
201
202 int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
203 {
204 int nr_free = 0, ofs = dn->ofs_in_node;
205 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
206 struct f2fs_node *raw_node;
207 __le32 *addr;
208
209 raw_node = page_address(dn->node_page);
210 addr = blkaddr_in_node(raw_node) + ofs;
211
212 for ( ; count > 0; count--, addr++, dn->ofs_in_node++) {
213 block_t blkaddr = le32_to_cpu(*addr);
214 if (blkaddr == NULL_ADDR)
215 continue;
216
217 update_extent_cache(NULL_ADDR, dn);
218 invalidate_blocks(sbi, blkaddr);
219 nr_free++;
220 }
221 if (nr_free) {
222 dec_valid_block_count(sbi, dn->inode, nr_free);
223 set_page_dirty(dn->node_page);
224 sync_inode_page(dn);
225 }
226 dn->ofs_in_node = ofs;
227
228 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
229 dn->ofs_in_node, nr_free);
230 return nr_free;
231 }
232
233 void truncate_data_blocks(struct dnode_of_data *dn)
234 {
235 truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
236 }
237
238 static void truncate_partial_data_page(struct inode *inode, u64 from)
239 {
240 unsigned offset = from & (PAGE_CACHE_SIZE - 1);
241 struct page *page;
242
243 if (!offset)
244 return;
245
246 page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false);
247 if (IS_ERR(page))
248 return;
249
250 lock_page(page);
251 if (page->mapping != inode->i_mapping) {
252 f2fs_put_page(page, 1);
253 return;
254 }
255 wait_on_page_writeback(page);
256 zero_user(page, offset, PAGE_CACHE_SIZE - offset);
257 set_page_dirty(page);
258 f2fs_put_page(page, 1);
259 }
260
261 static int truncate_blocks(struct inode *inode, u64 from)
262 {
263 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
264 unsigned int blocksize = inode->i_sb->s_blocksize;
265 struct dnode_of_data dn;
266 pgoff_t free_from;
267 int count = 0, ilock = -1;
268 int err;
269
270 trace_f2fs_truncate_blocks_enter(inode, from);
271
272 free_from = (pgoff_t)
273 ((from + blocksize - 1) >> (sbi->log_blocksize));
274
275 ilock = mutex_lock_op(sbi);
276 set_new_dnode(&dn, inode, NULL, NULL, 0);
277 err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
278 if (err) {
279 if (err == -ENOENT)
280 goto free_next;
281 mutex_unlock_op(sbi, ilock);
282 trace_f2fs_truncate_blocks_exit(inode, err);
283 return err;
284 }
285
286 if (IS_INODE(dn.node_page))
287 count = ADDRS_PER_INODE;
288 else
289 count = ADDRS_PER_BLOCK;
290
291 count -= dn.ofs_in_node;
292 BUG_ON(count < 0);
293
294 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
295 truncate_data_blocks_range(&dn, count);
296 free_from += count;
297 }
298
299 f2fs_put_dnode(&dn);
300 free_next:
301 err = truncate_inode_blocks(inode, free_from);
302 mutex_unlock_op(sbi, ilock);
303
304 /* lastly zero out the first data page */
305 truncate_partial_data_page(inode, from);
306
307 trace_f2fs_truncate_blocks_exit(inode, err);
308 return err;
309 }
310
311 void f2fs_truncate(struct inode *inode)
312 {
313 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
314 S_ISLNK(inode->i_mode)))
315 return;
316
317 trace_f2fs_truncate(inode);
318
319 if (!truncate_blocks(inode, i_size_read(inode))) {
320 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
321 mark_inode_dirty(inode);
322 }
323 }
324
325 int f2fs_getattr(struct vfsmount *mnt,
326 struct dentry *dentry, struct kstat *stat)
327 {
328 struct inode *inode = dentry->d_inode;
329 generic_fillattr(inode, stat);
330 stat->blocks <<= 3;
331 return 0;
332 }
333
334 #ifdef CONFIG_F2FS_FS_POSIX_ACL
335 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
336 {
337 struct f2fs_inode_info *fi = F2FS_I(inode);
338 unsigned int ia_valid = attr->ia_valid;
339
340 if (ia_valid & ATTR_UID)
341 inode->i_uid = attr->ia_uid;
342 if (ia_valid & ATTR_GID)
343 inode->i_gid = attr->ia_gid;
344 if (ia_valid & ATTR_ATIME)
345 inode->i_atime = timespec_trunc(attr->ia_atime,
346 inode->i_sb->s_time_gran);
347 if (ia_valid & ATTR_MTIME)
348 inode->i_mtime = timespec_trunc(attr->ia_mtime,
349 inode->i_sb->s_time_gran);
350 if (ia_valid & ATTR_CTIME)
351 inode->i_ctime = timespec_trunc(attr->ia_ctime,
352 inode->i_sb->s_time_gran);
353 if (ia_valid & ATTR_MODE) {
354 umode_t mode = attr->ia_mode;
355
356 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
357 mode &= ~S_ISGID;
358 set_acl_inode(fi, mode);
359 }
360 }
361 #else
362 #define __setattr_copy setattr_copy
363 #endif
364
365 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
366 {
367 struct inode *inode = dentry->d_inode;
368 struct f2fs_inode_info *fi = F2FS_I(inode);
369 int err;
370
371 err = inode_change_ok(inode, attr);
372 if (err)
373 return err;
374
375 if ((attr->ia_valid & ATTR_SIZE) &&
376 attr->ia_size != i_size_read(inode)) {
377 truncate_setsize(inode, attr->ia_size);
378 f2fs_truncate(inode);
379 f2fs_balance_fs(F2FS_SB(inode->i_sb));
380 }
381
382 __setattr_copy(inode, attr);
383
384 if (attr->ia_valid & ATTR_MODE) {
385 err = f2fs_acl_chmod(inode);
386 if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
387 inode->i_mode = fi->i_acl_mode;
388 clear_inode_flag(fi, FI_ACL_MODE);
389 }
390 }
391
392 mark_inode_dirty(inode);
393 return err;
394 }
395
396 const struct inode_operations f2fs_file_inode_operations = {
397 .getattr = f2fs_getattr,
398 .setattr = f2fs_setattr,
399 .get_acl = f2fs_get_acl,
400 #ifdef CONFIG_F2FS_FS_XATTR
401 .setxattr = generic_setxattr,
402 .getxattr = generic_getxattr,
403 .listxattr = f2fs_listxattr,
404 .removexattr = generic_removexattr,
405 #endif
406 };
407
408 static void fill_zero(struct inode *inode, pgoff_t index,
409 loff_t start, loff_t len)
410 {
411 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
412 struct page *page;
413 int ilock;
414
415 if (!len)
416 return;
417
418 f2fs_balance_fs(sbi);
419
420 ilock = mutex_lock_op(sbi);
421 page = get_new_data_page(inode, NULL, index, false);
422 mutex_unlock_op(sbi, ilock);
423
424 if (!IS_ERR(page)) {
425 wait_on_page_writeback(page);
426 zero_user(page, start, len);
427 set_page_dirty(page);
428 f2fs_put_page(page, 1);
429 }
430 }
431
432 int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
433 {
434 pgoff_t index;
435 int err;
436
437 for (index = pg_start; index < pg_end; index++) {
438 struct dnode_of_data dn;
439
440 set_new_dnode(&dn, inode, NULL, NULL, 0);
441 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
442 if (err) {
443 if (err == -ENOENT)
444 continue;
445 return err;
446 }
447
448 if (dn.data_blkaddr != NULL_ADDR)
449 truncate_data_blocks_range(&dn, 1);
450 f2fs_put_dnode(&dn);
451 }
452 return 0;
453 }
454
455 static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
456 {
457 pgoff_t pg_start, pg_end;
458 loff_t off_start, off_end;
459 int ret = 0;
460
461 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
462 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
463
464 off_start = offset & (PAGE_CACHE_SIZE - 1);
465 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
466
467 if (pg_start == pg_end) {
468 fill_zero(inode, pg_start, off_start,
469 off_end - off_start);
470 } else {
471 if (off_start)
472 fill_zero(inode, pg_start++, off_start,
473 PAGE_CACHE_SIZE - off_start);
474 if (off_end)
475 fill_zero(inode, pg_end, 0, off_end);
476
477 if (pg_start < pg_end) {
478 struct address_space *mapping = inode->i_mapping;
479 loff_t blk_start, blk_end;
480 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
481 int ilock;
482
483 f2fs_balance_fs(sbi);
484
485 blk_start = pg_start << PAGE_CACHE_SHIFT;
486 blk_end = pg_end << PAGE_CACHE_SHIFT;
487 truncate_inode_pages_range(mapping, blk_start,
488 blk_end - 1);
489
490 ilock = mutex_lock_op(sbi);
491 ret = truncate_hole(inode, pg_start, pg_end);
492 mutex_unlock_op(sbi, ilock);
493 }
494 }
495
496 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
497 i_size_read(inode) <= (offset + len)) {
498 i_size_write(inode, offset);
499 mark_inode_dirty(inode);
500 }
501
502 return ret;
503 }
504
505 static int expand_inode_data(struct inode *inode, loff_t offset,
506 loff_t len, int mode)
507 {
508 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
509 pgoff_t index, pg_start, pg_end;
510 loff_t new_size = i_size_read(inode);
511 loff_t off_start, off_end;
512 int ret = 0;
513
514 ret = inode_newsize_ok(inode, (len + offset));
515 if (ret)
516 return ret;
517
518 pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT;
519 pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT;
520
521 off_start = offset & (PAGE_CACHE_SIZE - 1);
522 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
523
524 for (index = pg_start; index <= pg_end; index++) {
525 struct dnode_of_data dn;
526 int ilock;
527
528 ilock = mutex_lock_op(sbi);
529 set_new_dnode(&dn, inode, NULL, NULL, 0);
530 ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
531 if (ret) {
532 mutex_unlock_op(sbi, ilock);
533 break;
534 }
535
536 if (dn.data_blkaddr == NULL_ADDR) {
537 ret = reserve_new_block(&dn);
538 if (ret) {
539 f2fs_put_dnode(&dn);
540 mutex_unlock_op(sbi, ilock);
541 break;
542 }
543 }
544 f2fs_put_dnode(&dn);
545 mutex_unlock_op(sbi, ilock);
546
547 if (pg_start == pg_end)
548 new_size = offset + len;
549 else if (index == pg_start && off_start)
550 new_size = (index + 1) << PAGE_CACHE_SHIFT;
551 else if (index == pg_end)
552 new_size = (index << PAGE_CACHE_SHIFT) + off_end;
553 else
554 new_size += PAGE_CACHE_SIZE;
555 }
556
557 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
558 i_size_read(inode) < new_size) {
559 i_size_write(inode, new_size);
560 mark_inode_dirty(inode);
561 }
562
563 return ret;
564 }
565
566 static long f2fs_fallocate(struct file *file, int mode,
567 loff_t offset, loff_t len)
568 {
569 struct inode *inode = file_inode(file);
570 long ret;
571
572 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
573 return -EOPNOTSUPP;
574
575 if (mode & FALLOC_FL_PUNCH_HOLE)
576 ret = punch_hole(inode, offset, len, mode);
577 else
578 ret = expand_inode_data(inode, offset, len, mode);
579
580 if (!ret) {
581 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
582 mark_inode_dirty(inode);
583 }
584 trace_f2fs_fallocate(inode, mode, offset, len, ret);
585 return ret;
586 }
587
588 #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
589 #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
590
591 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
592 {
593 if (S_ISDIR(mode))
594 return flags;
595 else if (S_ISREG(mode))
596 return flags & F2FS_REG_FLMASK;
597 else
598 return flags & F2FS_OTHER_FLMASK;
599 }
600
601 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
602 {
603 struct inode *inode = file_inode(filp);
604 struct f2fs_inode_info *fi = F2FS_I(inode);
605 unsigned int flags;
606 int ret;
607
608 switch (cmd) {
609 case F2FS_IOC_GETFLAGS:
610 flags = fi->i_flags & FS_FL_USER_VISIBLE;
611 return put_user(flags, (int __user *) arg);
612 case F2FS_IOC_SETFLAGS:
613 {
614 unsigned int oldflags;
615
616 ret = mnt_want_write_file(filp);
617 if (ret)
618 return ret;
619
620 if (!inode_owner_or_capable(inode)) {
621 ret = -EACCES;
622 goto out;
623 }
624
625 if (get_user(flags, (int __user *) arg)) {
626 ret = -EFAULT;
627 goto out;
628 }
629
630 flags = f2fs_mask_flags(inode->i_mode, flags);
631
632 mutex_lock(&inode->i_mutex);
633
634 oldflags = fi->i_flags;
635
636 if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
637 if (!capable(CAP_LINUX_IMMUTABLE)) {
638 mutex_unlock(&inode->i_mutex);
639 ret = -EPERM;
640 goto out;
641 }
642 }
643
644 flags = flags & FS_FL_USER_MODIFIABLE;
645 flags |= oldflags & ~FS_FL_USER_MODIFIABLE;
646 fi->i_flags = flags;
647 mutex_unlock(&inode->i_mutex);
648
649 f2fs_set_inode_flags(inode);
650 inode->i_ctime = CURRENT_TIME;
651 mark_inode_dirty(inode);
652 out:
653 mnt_drop_write_file(filp);
654 return ret;
655 }
656 default:
657 return -ENOTTY;
658 }
659 }
660
661 #ifdef CONFIG_COMPAT
662 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
663 {
664 switch (cmd) {
665 case F2FS_IOC32_GETFLAGS:
666 cmd = F2FS_IOC_GETFLAGS;
667 break;
668 case F2FS_IOC32_SETFLAGS:
669 cmd = F2FS_IOC_SETFLAGS;
670 break;
671 default:
672 return -ENOIOCTLCMD;
673 }
674 return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
675 }
676 #endif
677
678 const struct file_operations f2fs_file_operations = {
679 .llseek = generic_file_llseek,
680 .read = do_sync_read,
681 .write = do_sync_write,
682 .aio_read = generic_file_aio_read,
683 .aio_write = generic_file_aio_write,
684 .open = generic_file_open,
685 .mmap = f2fs_file_mmap,
686 .fsync = f2fs_sync_file,
687 .fallocate = f2fs_fallocate,
688 .unlocked_ioctl = f2fs_ioctl,
689 #ifdef CONFIG_COMPAT
690 .compat_ioctl = f2fs_compat_ioctl,
691 #endif
692 .splice_read = generic_file_splice_read,
693 .splice_write = generic_file_splice_write,
694 };
This page took 0.05859 seconds and 6 git commands to generate.