f2fs: add inline_data recovery routine
[deliverable/linux.git] / fs / f2fs / inline.c
1 /*
2 * fs/f2fs/inline.c
3 * Copyright (c) 2013, Intel Corporation
4 * Authors: Huajun Li <huajun.li@intel.com>
5 * Haicheng Li <haicheng.li@intel.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13
14 #include "f2fs.h"
15
16 bool f2fs_may_inline(struct inode *inode)
17 {
18 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
19 block_t nr_blocks;
20 loff_t i_size;
21
22 if (!test_opt(sbi, INLINE_DATA))
23 return false;
24
25 nr_blocks = F2FS_I(inode)->i_xattr_nid ? 3 : 2;
26 if (inode->i_blocks > nr_blocks)
27 return false;
28
29 i_size = i_size_read(inode);
30 if (i_size > MAX_INLINE_DATA)
31 return false;
32
33 return true;
34 }
35
36 int f2fs_read_inline_data(struct inode *inode, struct page *page)
37 {
38 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
39 struct page *ipage;
40 void *src_addr, *dst_addr;
41
42 ipage = get_node_page(sbi, inode->i_ino);
43 if (IS_ERR(ipage))
44 return PTR_ERR(ipage);
45
46 zero_user_segment(page, INLINE_DATA_OFFSET,
47 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
48
49 /* Copy the whole inline data block */
50 src_addr = inline_data_addr(ipage);
51 dst_addr = kmap(page);
52 memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
53 kunmap(page);
54 f2fs_put_page(ipage, 1);
55
56 SetPageUptodate(page);
57 unlock_page(page);
58
59 return 0;
60 }
61
62 static int __f2fs_convert_inline_data(struct inode *inode, struct page *page)
63 {
64 int err;
65 struct page *ipage;
66 struct dnode_of_data dn;
67 void *src_addr, *dst_addr;
68 block_t new_blk_addr;
69 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
70 struct f2fs_io_info fio = {
71 .type = DATA,
72 .rw = WRITE_SYNC | REQ_PRIO,
73 };
74
75 f2fs_lock_op(sbi);
76 ipage = get_node_page(sbi, inode->i_ino);
77 if (IS_ERR(ipage))
78 return PTR_ERR(ipage);
79
80 /*
81 * i_addr[0] is not used for inline data,
82 * so reserving new block will not destroy inline data
83 */
84 set_new_dnode(&dn, inode, ipage, ipage, 0);
85 err = f2fs_reserve_block(&dn, 0);
86 if (err) {
87 f2fs_put_page(ipage, 1);
88 f2fs_unlock_op(sbi);
89 return err;
90 }
91
92 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
93
94 /* Copy the whole inline data block */
95 src_addr = inline_data_addr(ipage);
96 dst_addr = kmap(page);
97 memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
98 kunmap(page);
99 SetPageUptodate(page);
100
101 /* write data page to try to make data consistent */
102 set_page_writeback(page);
103 write_data_page(page, &dn, &new_blk_addr, &fio);
104 update_extent_cache(new_blk_addr, &dn);
105 f2fs_wait_on_page_writeback(page, DATA, true);
106
107 /* clear inline data and flag after data writeback */
108 zero_user_segment(ipage, INLINE_DATA_OFFSET,
109 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
110 clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
111 stat_dec_inline_inode(inode);
112
113 sync_inode_page(&dn);
114 f2fs_put_page(ipage, 1);
115 f2fs_unlock_op(sbi);
116
117 return err;
118 }
119
120 int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size)
121 {
122 struct page *page;
123 int err;
124
125 if (!f2fs_has_inline_data(inode))
126 return 0;
127 else if (to_size <= MAX_INLINE_DATA)
128 return 0;
129
130 page = grab_cache_page_write_begin(inode->i_mapping, 0, AOP_FLAG_NOFS);
131 if (!page)
132 return -ENOMEM;
133
134 err = __f2fs_convert_inline_data(inode, page);
135 f2fs_put_page(page, 1);
136 return err;
137 }
138
139 int f2fs_write_inline_data(struct inode *inode,
140 struct page *page, unsigned size)
141 {
142 void *src_addr, *dst_addr;
143 struct page *ipage;
144 struct dnode_of_data dn;
145 int err;
146
147 set_new_dnode(&dn, inode, NULL, NULL, 0);
148 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
149 if (err)
150 return err;
151 ipage = dn.inode_page;
152
153 zero_user_segment(ipage, INLINE_DATA_OFFSET,
154 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
155 src_addr = kmap(page);
156 dst_addr = inline_data_addr(ipage);
157 memcpy(dst_addr, src_addr, size);
158 kunmap(page);
159
160 /* Release the first data block if it is allocated */
161 if (!f2fs_has_inline_data(inode)) {
162 truncate_data_blocks_range(&dn, 1);
163 set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
164 stat_inc_inline_inode(inode);
165 }
166
167 sync_inode_page(&dn);
168 f2fs_put_dnode(&dn);
169
170 return 0;
171 }
172
173 int recover_inline_data(struct inode *inode, struct page *npage)
174 {
175 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
176 struct f2fs_inode *ri = NULL;
177 void *src_addr, *dst_addr;
178 struct page *ipage;
179
180 /*
181 * The inline_data recovery policy is as follows.
182 * [prev.] [next] of inline_data flag
183 * o o -> recover inline_data
184 * o x -> remove inline_data, and then recover data blocks
185 * x o -> remove inline_data, and then recover inline_data
186 * x x -> recover data blocks
187 */
188 if (IS_INODE(npage))
189 ri = F2FS_INODE(npage);
190
191 if (f2fs_has_inline_data(inode) &&
192 ri && ri->i_inline & F2FS_INLINE_DATA) {
193 process_inline:
194 ipage = get_node_page(sbi, inode->i_ino);
195 f2fs_bug_on(IS_ERR(ipage));
196
197 src_addr = inline_data_addr(npage);
198 dst_addr = inline_data_addr(ipage);
199 memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
200 update_inode(inode, ipage);
201 f2fs_put_page(ipage, 1);
202 return -1;
203 }
204
205 if (f2fs_has_inline_data(inode)) {
206 ipage = get_node_page(sbi, inode->i_ino);
207 f2fs_bug_on(IS_ERR(ipage));
208 zero_user_segment(ipage, INLINE_DATA_OFFSET,
209 INLINE_DATA_OFFSET + MAX_INLINE_DATA);
210 clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
211 update_inode(inode, ipage);
212 f2fs_put_page(ipage, 1);
213 } else if (ri && ri->i_inline & F2FS_INLINE_DATA) {
214 truncate_blocks(inode, 0);
215 set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
216 goto process_inline;
217 }
218 return 0;
219 }
This page took 0.040085 seconds and 5 git commands to generate.