Commit | Line | Data |
---|---|---|
0a8165d7 | 1 | /* |
eb47b800 JK |
2 | * fs/f2fs/data.c |
3 | * | |
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | |
5 | * http://www.samsung.com/ | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #include <linux/fs.h> | |
12 | #include <linux/f2fs_fs.h> | |
13 | #include <linux/buffer_head.h> | |
14 | #include <linux/mpage.h> | |
15 | #include <linux/writeback.h> | |
16 | #include <linux/backing-dev.h> | |
17 | #include <linux/blkdev.h> | |
18 | #include <linux/bio.h> | |
690e4a3e | 19 | #include <linux/prefetch.h> |
e2e40f2c | 20 | #include <linux/uio.h> |
f1e88660 | 21 | #include <linux/cleancache.h> |
eb47b800 JK |
22 | |
23 | #include "f2fs.h" | |
24 | #include "node.h" | |
25 | #include "segment.h" | |
db9f7c1a | 26 | #include "trace.h" |
848753aa | 27 | #include <trace/events/f2fs.h> |
eb47b800 | 28 | |
429511cd CY |
29 | static struct kmem_cache *extent_tree_slab; |
30 | static struct kmem_cache *extent_node_slab; | |
31 | ||
93dfe2ac JK |
32 | static void f2fs_read_end_io(struct bio *bio, int err) |
33 | { | |
f568849e LT |
34 | struct bio_vec *bvec; |
35 | int i; | |
93dfe2ac | 36 | |
4375a336 JK |
37 | if (f2fs_bio_encrypted(bio)) { |
38 | if (err) { | |
39 | f2fs_release_crypto_ctx(bio->bi_private); | |
40 | } else { | |
41 | f2fs_end_io_crypto_work(bio->bi_private, bio); | |
42 | return; | |
43 | } | |
44 | } | |
45 | ||
12377024 CY |
46 | bio_for_each_segment_all(bvec, bio, i) { |
47 | struct page *page = bvec->bv_page; | |
f1e88660 JK |
48 | |
49 | if (!err) { | |
50 | SetPageUptodate(page); | |
51 | } else { | |
52 | ClearPageUptodate(page); | |
53 | SetPageError(page); | |
54 | } | |
55 | unlock_page(page); | |
56 | } | |
f1e88660 JK |
57 | bio_put(bio); |
58 | } | |
59 | ||
93dfe2ac JK |
60 | static void f2fs_write_end_io(struct bio *bio, int err) |
61 | { | |
1b1f559f | 62 | struct f2fs_sb_info *sbi = bio->bi_private; |
f568849e LT |
63 | struct bio_vec *bvec; |
64 | int i; | |
93dfe2ac | 65 | |
f568849e | 66 | bio_for_each_segment_all(bvec, bio, i) { |
93dfe2ac JK |
67 | struct page *page = bvec->bv_page; |
68 | ||
4375a336 JK |
69 | f2fs_restore_and_release_control_page(&page); |
70 | ||
f568849e | 71 | if (unlikely(err)) { |
cf779cab | 72 | set_page_dirty(page); |
93dfe2ac | 73 | set_bit(AS_EIO, &page->mapping->flags); |
744602cf | 74 | f2fs_stop_checkpoint(sbi); |
93dfe2ac JK |
75 | } |
76 | end_page_writeback(page); | |
77 | dec_page_count(sbi, F2FS_WRITEBACK); | |
f568849e | 78 | } |
93dfe2ac | 79 | |
93dfe2ac JK |
80 | if (!get_pages(sbi, F2FS_WRITEBACK) && |
81 | !list_empty(&sbi->cp_wait.task_list)) | |
82 | wake_up(&sbi->cp_wait); | |
83 | ||
84 | bio_put(bio); | |
85 | } | |
86 | ||
940a6d34 GZ |
87 | /* |
88 | * Low-level block read/write IO operations. | |
89 | */ | |
90 | static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, | |
91 | int npages, bool is_read) | |
92 | { | |
93 | struct bio *bio; | |
94 | ||
95 | /* No failure on bio allocation */ | |
96 | bio = bio_alloc(GFP_NOIO, npages); | |
97 | ||
98 | bio->bi_bdev = sbi->sb->s_bdev; | |
55cf9cb6 | 99 | bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); |
940a6d34 | 100 | bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; |
12377024 | 101 | bio->bi_private = is_read ? NULL : sbi; |
940a6d34 GZ |
102 | |
103 | return bio; | |
104 | } | |
105 | ||
458e6197 | 106 | static void __submit_merged_bio(struct f2fs_bio_info *io) |
93dfe2ac | 107 | { |
458e6197 | 108 | struct f2fs_io_info *fio = &io->fio; |
93dfe2ac JK |
109 | |
110 | if (!io->bio) | |
111 | return; | |
112 | ||
6a8f8ca5 | 113 | if (is_read_io(fio->rw)) |
2ace38e0 | 114 | trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); |
6a8f8ca5 | 115 | else |
2ace38e0 | 116 | trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); |
940a6d34 | 117 | |
6a8f8ca5 | 118 | submit_bio(fio->rw, io->bio); |
93dfe2ac JK |
119 | io->bio = NULL; |
120 | } | |
121 | ||
122 | void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, | |
458e6197 | 123 | enum page_type type, int rw) |
93dfe2ac JK |
124 | { |
125 | enum page_type btype = PAGE_TYPE_OF_BIO(type); | |
126 | struct f2fs_bio_info *io; | |
127 | ||
128 | io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; | |
129 | ||
df0f8dc0 | 130 | down_write(&io->io_rwsem); |
458e6197 JK |
131 | |
132 | /* change META to META_FLUSH in the checkpoint procedure */ | |
133 | if (type >= META_FLUSH) { | |
134 | io->fio.type = META_FLUSH; | |
0f7b2abd JK |
135 | if (test_opt(sbi, NOBARRIER)) |
136 | io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO; | |
137 | else | |
138 | io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; | |
458e6197 JK |
139 | } |
140 | __submit_merged_bio(io); | |
df0f8dc0 | 141 | up_write(&io->io_rwsem); |
93dfe2ac JK |
142 | } |
143 | ||
144 | /* | |
145 | * Fill the locked page with data located in the block address. | |
146 | * Return unlocked page. | |
147 | */ | |
05ca3632 | 148 | int f2fs_submit_page_bio(struct f2fs_io_info *fio) |
93dfe2ac | 149 | { |
93dfe2ac | 150 | struct bio *bio; |
4375a336 | 151 | struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; |
93dfe2ac | 152 | |
2ace38e0 | 153 | trace_f2fs_submit_page_bio(page, fio); |
05ca3632 | 154 | f2fs_trace_ios(fio, 0); |
93dfe2ac JK |
155 | |
156 | /* Allocate a new bio */ | |
05ca3632 | 157 | bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw)); |
93dfe2ac JK |
158 | |
159 | if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { | |
160 | bio_put(bio); | |
161 | f2fs_put_page(page, 1); | |
162 | return -EFAULT; | |
163 | } | |
164 | ||
cf04e8eb | 165 | submit_bio(fio->rw, bio); |
93dfe2ac JK |
166 | return 0; |
167 | } | |
168 | ||
05ca3632 | 169 | void f2fs_submit_page_mbio(struct f2fs_io_info *fio) |
93dfe2ac | 170 | { |
05ca3632 | 171 | struct f2fs_sb_info *sbi = fio->sbi; |
458e6197 | 172 | enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); |
93dfe2ac | 173 | struct f2fs_bio_info *io; |
940a6d34 | 174 | bool is_read = is_read_io(fio->rw); |
4375a336 | 175 | struct page *bio_page; |
93dfe2ac | 176 | |
940a6d34 | 177 | io = is_read ? &sbi->read_io : &sbi->write_io[btype]; |
93dfe2ac | 178 | |
cf04e8eb | 179 | verify_block_addr(sbi, fio->blk_addr); |
93dfe2ac | 180 | |
df0f8dc0 | 181 | down_write(&io->io_rwsem); |
93dfe2ac | 182 | |
940a6d34 | 183 | if (!is_read) |
93dfe2ac JK |
184 | inc_page_count(sbi, F2FS_WRITEBACK); |
185 | ||
cf04e8eb | 186 | if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 || |
458e6197 JK |
187 | io->fio.rw != fio->rw)) |
188 | __submit_merged_bio(io); | |
93dfe2ac JK |
189 | alloc_new: |
190 | if (io->bio == NULL) { | |
90a893c7 | 191 | int bio_blocks = MAX_BIO_BLOCKS(sbi); |
940a6d34 | 192 | |
cf04e8eb | 193 | io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read); |
458e6197 | 194 | io->fio = *fio; |
93dfe2ac JK |
195 | } |
196 | ||
4375a336 JK |
197 | bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; |
198 | ||
199 | if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) < | |
93dfe2ac | 200 | PAGE_CACHE_SIZE) { |
458e6197 | 201 | __submit_merged_bio(io); |
93dfe2ac JK |
202 | goto alloc_new; |
203 | } | |
204 | ||
cf04e8eb | 205 | io->last_block_in_bio = fio->blk_addr; |
05ca3632 | 206 | f2fs_trace_ios(fio, 0); |
93dfe2ac | 207 | |
df0f8dc0 | 208 | up_write(&io->io_rwsem); |
05ca3632 | 209 | trace_f2fs_submit_page_mbio(fio->page, fio); |
93dfe2ac JK |
210 | } |
211 | ||
0a8165d7 | 212 | /* |
eb47b800 JK |
213 | * Lock ordering for the change of data block address: |
214 | * ->data_page | |
215 | * ->node_page | |
216 | * update block addresses in the node page | |
217 | */ | |
216a620a | 218 | void set_data_blkaddr(struct dnode_of_data *dn) |
eb47b800 JK |
219 | { |
220 | struct f2fs_node *rn; | |
221 | __le32 *addr_array; | |
222 | struct page *node_page = dn->node_page; | |
223 | unsigned int ofs_in_node = dn->ofs_in_node; | |
224 | ||
5514f0aa | 225 | f2fs_wait_on_page_writeback(node_page, NODE); |
eb47b800 | 226 | |
45590710 | 227 | rn = F2FS_NODE(node_page); |
eb47b800 JK |
228 | |
229 | /* Get physical address of data block */ | |
230 | addr_array = blkaddr_in_node(rn); | |
e1509cf2 | 231 | addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr); |
eb47b800 JK |
232 | set_page_dirty(node_page); |
233 | } | |
234 | ||
235 | int reserve_new_block(struct dnode_of_data *dn) | |
236 | { | |
4081363f | 237 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
eb47b800 | 238 | |
6bacf52f | 239 | if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) |
eb47b800 | 240 | return -EPERM; |
cfb271d4 | 241 | if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) |
eb47b800 JK |
242 | return -ENOSPC; |
243 | ||
c01e2853 NJ |
244 | trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); |
245 | ||
eb47b800 | 246 | dn->data_blkaddr = NEW_ADDR; |
216a620a | 247 | set_data_blkaddr(dn); |
a18ff063 | 248 | mark_inode_dirty(dn->inode); |
eb47b800 JK |
249 | sync_inode_page(dn); |
250 | return 0; | |
251 | } | |
252 | ||
b600965c HL |
253 | int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) |
254 | { | |
255 | bool need_put = dn->inode_page ? false : true; | |
256 | int err; | |
257 | ||
258 | err = get_dnode_of_data(dn, index, ALLOC_NODE); | |
259 | if (err) | |
260 | return err; | |
a8865372 | 261 | |
b600965c HL |
262 | if (dn->data_blkaddr == NULL_ADDR) |
263 | err = reserve_new_block(dn); | |
a8865372 | 264 | if (err || need_put) |
b600965c HL |
265 | f2fs_put_dnode(dn); |
266 | return err; | |
267 | } | |
268 | ||
429511cd CY |
269 | static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi, |
270 | struct extent_tree *et, struct extent_info *ei, | |
271 | struct rb_node *parent, struct rb_node **p) | |
272 | { | |
273 | struct extent_node *en; | |
274 | ||
275 | en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC); | |
276 | if (!en) | |
277 | return NULL; | |
278 | ||
279 | en->ei = *ei; | |
280 | INIT_LIST_HEAD(&en->list); | |
281 | ||
282 | rb_link_node(&en->rb_node, parent, p); | |
283 | rb_insert_color(&en->rb_node, &et->root); | |
284 | et->count++; | |
285 | atomic_inc(&sbi->total_ext_node); | |
286 | return en; | |
287 | } | |
288 | ||
289 | static void __detach_extent_node(struct f2fs_sb_info *sbi, | |
290 | struct extent_tree *et, struct extent_node *en) | |
291 | { | |
292 | rb_erase(&en->rb_node, &et->root); | |
293 | et->count--; | |
294 | atomic_dec(&sbi->total_ext_node); | |
62c8af65 CY |
295 | |
296 | if (et->cached_en == en) | |
297 | et->cached_en = NULL; | |
429511cd CY |
298 | } |
299 | ||
93dfc526 CY |
300 | static struct extent_tree *__grab_extent_tree(struct inode *inode) |
301 | { | |
302 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
303 | struct extent_tree *et; | |
304 | nid_t ino = inode->i_ino; | |
305 | ||
306 | down_write(&sbi->extent_tree_lock); | |
307 | et = radix_tree_lookup(&sbi->extent_tree_root, ino); | |
308 | if (!et) { | |
309 | et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS); | |
310 | f2fs_radix_tree_insert(&sbi->extent_tree_root, ino, et); | |
311 | memset(et, 0, sizeof(struct extent_tree)); | |
312 | et->ino = ino; | |
313 | et->root = RB_ROOT; | |
314 | et->cached_en = NULL; | |
315 | rwlock_init(&et->lock); | |
316 | atomic_set(&et->refcount, 0); | |
317 | et->count = 0; | |
318 | sbi->total_ext_tree++; | |
319 | } | |
320 | atomic_inc(&et->refcount); | |
321 | up_write(&sbi->extent_tree_lock); | |
322 | ||
3e72f721 JK |
323 | /* never died untill evict_inode */ |
324 | F2FS_I(inode)->extent_tree = et; | |
325 | ||
93dfc526 CY |
326 | return et; |
327 | } | |
328 | ||
429511cd CY |
329 | static struct extent_node *__lookup_extent_tree(struct extent_tree *et, |
330 | unsigned int fofs) | |
331 | { | |
332 | struct rb_node *node = et->root.rb_node; | |
333 | struct extent_node *en; | |
334 | ||
62c8af65 CY |
335 | if (et->cached_en) { |
336 | struct extent_info *cei = &et->cached_en->ei; | |
337 | ||
338 | if (cei->fofs <= fofs && cei->fofs + cei->len > fofs) | |
339 | return et->cached_en; | |
340 | } | |
341 | ||
429511cd CY |
342 | while (node) { |
343 | en = rb_entry(node, struct extent_node, rb_node); | |
344 | ||
244f4fc1 | 345 | if (fofs < en->ei.fofs) |
429511cd | 346 | node = node->rb_left; |
244f4fc1 | 347 | else if (fofs >= en->ei.fofs + en->ei.len) |
429511cd | 348 | node = node->rb_right; |
244f4fc1 | 349 | else |
429511cd CY |
350 | return en; |
351 | } | |
352 | return NULL; | |
353 | } | |
354 | ||
355 | static struct extent_node *__try_back_merge(struct f2fs_sb_info *sbi, | |
356 | struct extent_tree *et, struct extent_node *en) | |
357 | { | |
358 | struct extent_node *prev; | |
359 | struct rb_node *node; | |
360 | ||
361 | node = rb_prev(&en->rb_node); | |
362 | if (!node) | |
363 | return NULL; | |
364 | ||
365 | prev = rb_entry(node, struct extent_node, rb_node); | |
366 | if (__is_back_mergeable(&en->ei, &prev->ei)) { | |
367 | en->ei.fofs = prev->ei.fofs; | |
368 | en->ei.blk = prev->ei.blk; | |
369 | en->ei.len += prev->ei.len; | |
370 | __detach_extent_node(sbi, et, prev); | |
371 | return prev; | |
372 | } | |
373 | return NULL; | |
374 | } | |
375 | ||
376 | static struct extent_node *__try_front_merge(struct f2fs_sb_info *sbi, | |
377 | struct extent_tree *et, struct extent_node *en) | |
378 | { | |
379 | struct extent_node *next; | |
380 | struct rb_node *node; | |
381 | ||
382 | node = rb_next(&en->rb_node); | |
383 | if (!node) | |
384 | return NULL; | |
385 | ||
386 | next = rb_entry(node, struct extent_node, rb_node); | |
387 | if (__is_front_mergeable(&en->ei, &next->ei)) { | |
388 | en->ei.len += next->ei.len; | |
389 | __detach_extent_node(sbi, et, next); | |
390 | return next; | |
391 | } | |
392 | return NULL; | |
393 | } | |
394 | ||
395 | static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi, | |
396 | struct extent_tree *et, struct extent_info *ei, | |
397 | struct extent_node **den) | |
398 | { | |
399 | struct rb_node **p = &et->root.rb_node; | |
400 | struct rb_node *parent = NULL; | |
401 | struct extent_node *en; | |
402 | ||
403 | while (*p) { | |
404 | parent = *p; | |
405 | en = rb_entry(parent, struct extent_node, rb_node); | |
406 | ||
407 | if (ei->fofs < en->ei.fofs) { | |
408 | if (__is_front_mergeable(ei, &en->ei)) { | |
409 | f2fs_bug_on(sbi, !den); | |
410 | en->ei.fofs = ei->fofs; | |
411 | en->ei.blk = ei->blk; | |
412 | en->ei.len += ei->len; | |
413 | *den = __try_back_merge(sbi, et, en); | |
3e72f721 | 414 | goto update_out; |
429511cd CY |
415 | } |
416 | p = &(*p)->rb_left; | |
417 | } else if (ei->fofs >= en->ei.fofs + en->ei.len) { | |
418 | if (__is_back_mergeable(ei, &en->ei)) { | |
419 | f2fs_bug_on(sbi, !den); | |
420 | en->ei.len += ei->len; | |
421 | *den = __try_front_merge(sbi, et, en); | |
3e72f721 | 422 | goto update_out; |
429511cd CY |
423 | } |
424 | p = &(*p)->rb_right; | |
425 | } else { | |
426 | f2fs_bug_on(sbi, 1); | |
427 | } | |
428 | } | |
429 | ||
3e72f721 JK |
430 | en = __attach_extent_node(sbi, et, ei, parent, p); |
431 | if (!en) | |
432 | return NULL; | |
433 | update_out: | |
434 | if (en->ei.len > et->largest.len) | |
435 | et->largest = en->ei; | |
436 | et->cached_en = en; | |
437 | return en; | |
429511cd CY |
438 | } |
439 | ||
440 | static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi, | |
441 | struct extent_tree *et, bool free_all) | |
442 | { | |
443 | struct rb_node *node, *next; | |
444 | struct extent_node *en; | |
445 | unsigned int count = et->count; | |
446 | ||
447 | node = rb_first(&et->root); | |
448 | while (node) { | |
449 | next = rb_next(node); | |
450 | en = rb_entry(node, struct extent_node, rb_node); | |
451 | ||
452 | if (free_all) { | |
453 | spin_lock(&sbi->extent_lock); | |
454 | if (!list_empty(&en->list)) | |
455 | list_del_init(&en->list); | |
456 | spin_unlock(&sbi->extent_lock); | |
457 | } | |
458 | ||
459 | if (free_all || list_empty(&en->list)) { | |
460 | __detach_extent_node(sbi, et, en); | |
461 | kmem_cache_free(extent_node_slab, en); | |
462 | } | |
463 | node = next; | |
464 | } | |
465 | ||
466 | return count - et->count; | |
467 | } | |
468 | ||
3e72f721 JK |
469 | static void __drop_largest_extent(struct inode *inode, pgoff_t fofs) |
470 | { | |
471 | struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest; | |
472 | ||
473 | if (largest->fofs <= fofs && largest->fofs + largest->len > fofs) | |
474 | largest->len = 0; | |
475 | } | |
476 | ||
477 | void f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext) | |
028a41e8 CY |
478 | { |
479 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
480 | struct extent_tree *et; | |
481 | struct extent_node *en; | |
482 | struct extent_info ei; | |
483 | ||
3e72f721 | 484 | if (!f2fs_may_extent_tree(inode)) |
028a41e8 CY |
485 | return; |
486 | ||
487 | et = __grab_extent_tree(inode); | |
488 | ||
3e72f721 JK |
489 | if (!i_ext || le32_to_cpu(i_ext->len) < F2FS_MIN_EXTENT_LEN) |
490 | return; | |
028a41e8 CY |
491 | |
492 | set_extent_info(&ei, le32_to_cpu(i_ext->fofs), | |
493 | le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len)); | |
494 | ||
3e72f721 JK |
495 | write_lock(&et->lock); |
496 | if (et->count) | |
497 | goto out; | |
498 | ||
028a41e8 CY |
499 | en = __insert_extent_tree(sbi, et, &ei, NULL); |
500 | if (en) { | |
028a41e8 CY |
501 | spin_lock(&sbi->extent_lock); |
502 | list_add_tail(&en->list, &sbi->extent_list); | |
503 | spin_unlock(&sbi->extent_lock); | |
504 | } | |
505 | out: | |
506 | write_unlock(&et->lock); | |
028a41e8 CY |
507 | } |
508 | ||
429511cd CY |
509 | static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs, |
510 | struct extent_info *ei) | |
511 | { | |
512 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
3e72f721 | 513 | struct extent_tree *et = F2FS_I(inode)->extent_tree; |
429511cd CY |
514 | struct extent_node *en; |
515 | ||
3e72f721 | 516 | f2fs_bug_on(sbi, !et); |
1ec4610c | 517 | |
3e72f721 | 518 | trace_f2fs_lookup_extent_tree_start(inode, pgofs); |
429511cd CY |
519 | |
520 | read_lock(&et->lock); | |
521 | en = __lookup_extent_tree(et, pgofs); | |
522 | if (en) { | |
523 | *ei = en->ei; | |
524 | spin_lock(&sbi->extent_lock); | |
525 | if (!list_empty(&en->list)) | |
526 | list_move_tail(&en->list, &sbi->extent_list); | |
244f4fc1 | 527 | et->cached_en = en; |
429511cd CY |
528 | spin_unlock(&sbi->extent_lock); |
529 | stat_inc_read_hit(sbi->sb); | |
530 | } | |
531 | stat_inc_total_hit(sbi->sb); | |
532 | read_unlock(&et->lock); | |
533 | ||
1ec4610c | 534 | trace_f2fs_lookup_extent_tree_end(inode, pgofs, en); |
429511cd CY |
535 | return en ? true : false; |
536 | } | |
537 | ||
3e72f721 JK |
538 | /* return true, if on-disk extent should be updated */ |
539 | static bool f2fs_update_extent_tree(struct inode *inode, pgoff_t fofs, | |
429511cd CY |
540 | block_t blkaddr) |
541 | { | |
542 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
3e72f721 | 543 | struct extent_tree *et = F2FS_I(inode)->extent_tree; |
429511cd CY |
544 | struct extent_node *en = NULL, *en1 = NULL, *en2 = NULL, *en3 = NULL; |
545 | struct extent_node *den = NULL; | |
3e72f721 | 546 | struct extent_info ei, dei, prev; |
429511cd CY |
547 | unsigned int endofs; |
548 | ||
3e72f721 JK |
549 | if (!et) |
550 | return false; | |
1ec4610c | 551 | |
3e72f721 | 552 | trace_f2fs_update_extent_tree(inode, fofs, blkaddr); |
429511cd CY |
553 | |
554 | write_lock(&et->lock); | |
555 | ||
3e72f721 JK |
556 | if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) { |
557 | write_unlock(&et->lock); | |
558 | return false; | |
559 | } | |
560 | ||
561 | prev = et->largest; | |
562 | dei.len = 0; | |
563 | ||
564 | /* we do not guarantee that the largest extent is cached all the time */ | |
565 | __drop_largest_extent(inode, fofs); | |
566 | ||
429511cd CY |
567 | /* 1. lookup and remove existing extent info in cache */ |
568 | en = __lookup_extent_tree(et, fofs); | |
569 | if (!en) | |
570 | goto update_extent; | |
571 | ||
572 | dei = en->ei; | |
573 | __detach_extent_node(sbi, et, en); | |
574 | ||
575 | /* 2. if extent can be split more, split and insert the left part */ | |
576 | if (dei.len > 1) { | |
577 | /* insert left part of split extent into cache */ | |
578 | if (fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN) { | |
579 | set_extent_info(&ei, dei.fofs, dei.blk, | |
580 | fofs - dei.fofs); | |
581 | en1 = __insert_extent_tree(sbi, et, &ei, NULL); | |
582 | } | |
583 | ||
584 | /* insert right part of split extent into cache */ | |
585 | endofs = dei.fofs + dei.len - 1; | |
586 | if (endofs - fofs >= F2FS_MIN_EXTENT_LEN) { | |
587 | set_extent_info(&ei, fofs + 1, | |
7a2cb678 | 588 | fofs - dei.fofs + dei.blk + 1, endofs - fofs); |
429511cd CY |
589 | en2 = __insert_extent_tree(sbi, et, &ei, NULL); |
590 | } | |
591 | } | |
592 | ||
593 | update_extent: | |
594 | /* 3. update extent in extent cache */ | |
595 | if (blkaddr) { | |
596 | set_extent_info(&ei, fofs, blkaddr, 1); | |
597 | en3 = __insert_extent_tree(sbi, et, &ei, &den); | |
3e72f721 JK |
598 | |
599 | /* give up extent_cache, if split and small updates happen */ | |
600 | if (dei.len >= 1 && | |
601 | prev.len < F2FS_MIN_EXTENT_LEN && | |
602 | et->largest.len < F2FS_MIN_EXTENT_LEN) { | |
603 | et->largest.len = 0; | |
604 | set_inode_flag(F2FS_I(inode), FI_NO_EXTENT); | |
605 | } | |
429511cd CY |
606 | } |
607 | ||
608 | /* 4. update in global extent list */ | |
609 | spin_lock(&sbi->extent_lock); | |
610 | if (en && !list_empty(&en->list)) | |
611 | list_del(&en->list); | |
612 | /* | |
613 | * en1 and en2 split from en, they will become more and more smaller | |
614 | * fragments after splitting several times. So if the length is smaller | |
615 | * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree. | |
616 | */ | |
617 | if (en1) | |
618 | list_add_tail(&en1->list, &sbi->extent_list); | |
619 | if (en2) | |
620 | list_add_tail(&en2->list, &sbi->extent_list); | |
621 | if (en3) { | |
622 | if (list_empty(&en3->list)) | |
623 | list_add_tail(&en3->list, &sbi->extent_list); | |
624 | else | |
625 | list_move_tail(&en3->list, &sbi->extent_list); | |
626 | } | |
627 | if (den && !list_empty(&den->list)) | |
628 | list_del(&den->list); | |
629 | spin_unlock(&sbi->extent_lock); | |
630 | ||
631 | /* 5. release extent node */ | |
632 | if (en) | |
633 | kmem_cache_free(extent_node_slab, en); | |
634 | if (den) | |
635 | kmem_cache_free(extent_node_slab, den); | |
636 | ||
3e72f721 JK |
637 | if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) |
638 | __free_extent_tree(sbi, et, true); | |
0bdee482 | 639 | |
3e72f721 | 640 | write_unlock(&et->lock); |
0bdee482 | 641 | |
3e72f721 | 642 | return !__is_extent_same(&prev, &et->largest); |
0bdee482 CY |
643 | } |
644 | ||
554df79e | 645 | unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink) |
429511cd CY |
646 | { |
647 | struct extent_tree *treevec[EXT_TREE_VEC_SIZE]; | |
648 | struct extent_node *en, *tmp; | |
649 | unsigned long ino = F2FS_ROOT_INO(sbi); | |
3e72f721 | 650 | struct radix_tree_root *root = &sbi->extent_tree_root; |
429511cd | 651 | unsigned int found; |
1ec4610c | 652 | unsigned int node_cnt = 0, tree_cnt = 0; |
429511cd | 653 | |
1dcc336b | 654 | if (!test_opt(sbi, EXTENT_CACHE)) |
554df79e | 655 | return 0; |
429511cd CY |
656 | |
657 | spin_lock(&sbi->extent_lock); | |
658 | list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) { | |
659 | if (!nr_shrink--) | |
660 | break; | |
661 | list_del_init(&en->list); | |
662 | } | |
663 | spin_unlock(&sbi->extent_lock); | |
664 | ||
3e72f721 | 665 | if (!down_write_trylock(&sbi->extent_tree_lock)) |
554df79e JK |
666 | goto out; |
667 | ||
3e72f721 | 668 | while ((found = radix_tree_gang_lookup(root, |
429511cd CY |
669 | (void **)treevec, ino, EXT_TREE_VEC_SIZE))) { |
670 | unsigned i; | |
671 | ||
672 | ino = treevec[found - 1]->ino + 1; | |
673 | for (i = 0; i < found; i++) { | |
674 | struct extent_tree *et = treevec[i]; | |
675 | ||
429511cd | 676 | write_lock(&et->lock); |
1ec4610c | 677 | node_cnt += __free_extent_tree(sbi, et, false); |
429511cd | 678 | write_unlock(&et->lock); |
3e72f721 JK |
679 | if (!atomic_read(&et->refcount) && !et->count) { |
680 | radix_tree_delete(root, et->ino); | |
681 | kmem_cache_free(extent_tree_slab, et); | |
682 | sbi->total_ext_tree--; | |
683 | tree_cnt++; | |
684 | } | |
429511cd CY |
685 | } |
686 | } | |
687 | up_write(&sbi->extent_tree_lock); | |
554df79e | 688 | out: |
1ec4610c | 689 | trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt); |
554df79e JK |
690 | |
691 | return node_cnt + tree_cnt; | |
429511cd CY |
692 | } |
693 | ||
3e72f721 | 694 | unsigned int f2fs_destroy_extent_node(struct inode *inode) |
429511cd CY |
695 | { |
696 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
3e72f721 | 697 | struct extent_tree *et = F2FS_I(inode)->extent_tree; |
1ec4610c | 698 | unsigned int node_cnt = 0; |
429511cd | 699 | |
93dfc526 | 700 | if (!et) |
3e72f721 | 701 | return 0; |
429511cd | 702 | |
429511cd | 703 | write_lock(&et->lock); |
1ec4610c | 704 | node_cnt = __free_extent_tree(sbi, et, true); |
429511cd CY |
705 | write_unlock(&et->lock); |
706 | ||
3e72f721 JK |
707 | return node_cnt; |
708 | } | |
429511cd | 709 | |
3e72f721 JK |
710 | void f2fs_destroy_extent_tree(struct inode *inode) |
711 | { | |
712 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
713 | struct extent_tree *et = F2FS_I(inode)->extent_tree; | |
714 | unsigned int node_cnt = 0; | |
715 | ||
716 | if (!et) | |
717 | return; | |
718 | ||
719 | if (inode->i_nlink && !is_bad_inode(inode) && et->count) { | |
720 | atomic_dec(&et->refcount); | |
721 | return; | |
429511cd | 722 | } |
3e72f721 JK |
723 | |
724 | /* free all extent info belong to this extent tree */ | |
725 | node_cnt = f2fs_destroy_extent_node(inode); | |
726 | ||
727 | /* delete extent tree entry in radix tree */ | |
728 | down_write(&sbi->extent_tree_lock); | |
729 | atomic_dec(&et->refcount); | |
429511cd CY |
730 | f2fs_bug_on(sbi, atomic_read(&et->refcount) || et->count); |
731 | radix_tree_delete(&sbi->extent_tree_root, inode->i_ino); | |
732 | kmem_cache_free(extent_tree_slab, et); | |
733 | sbi->total_ext_tree--; | |
734 | up_write(&sbi->extent_tree_lock); | |
eb47b800 | 735 | |
3e72f721 | 736 | F2FS_I(inode)->extent_tree = NULL; |
028a41e8 | 737 | |
3e72f721 JK |
738 | trace_f2fs_destroy_extent_tree(inode, node_cnt); |
739 | return; | |
028a41e8 CY |
740 | } |
741 | ||
7e4dde79 CY |
742 | static bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs, |
743 | struct extent_info *ei) | |
744 | { | |
3e72f721 | 745 | if (!f2fs_may_extent_tree(inode)) |
91c5d9bc CY |
746 | return false; |
747 | ||
3e72f721 | 748 | return f2fs_lookup_extent_tree(inode, pgofs, ei); |
7e4dde79 CY |
749 | } |
750 | ||
751 | void f2fs_update_extent_cache(struct dnode_of_data *dn) | |
752 | { | |
753 | struct f2fs_inode_info *fi = F2FS_I(dn->inode); | |
754 | pgoff_t fofs; | |
755 | ||
3e72f721 | 756 | if (!f2fs_may_extent_tree(dn->inode)) |
91c5d9bc CY |
757 | return; |
758 | ||
3e72f721 JK |
759 | f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR); |
760 | ||
7e4dde79 CY |
761 | fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + |
762 | dn->ofs_in_node; | |
763 | ||
3e72f721 | 764 | if (f2fs_update_extent_tree(dn->inode, fofs, dn->data_blkaddr)) |
c11abd1a | 765 | sync_inode_page(dn); |
eb47b800 JK |
766 | } |
767 | ||
43f3eae1 | 768 | struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw) |
eb47b800 | 769 | { |
eb47b800 JK |
770 | struct address_space *mapping = inode->i_mapping; |
771 | struct dnode_of_data dn; | |
772 | struct page *page; | |
cb3bc9ee | 773 | struct extent_info ei; |
eb47b800 | 774 | int err; |
cf04e8eb | 775 | struct f2fs_io_info fio = { |
05ca3632 | 776 | .sbi = F2FS_I_SB(inode), |
cf04e8eb | 777 | .type = DATA, |
43f3eae1 | 778 | .rw = rw, |
4375a336 | 779 | .encrypted_page = NULL, |
cf04e8eb | 780 | }; |
eb47b800 | 781 | |
4375a336 JK |
782 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) |
783 | return read_mapping_page(mapping, index, NULL); | |
784 | ||
9ac1349a | 785 | page = grab_cache_page(mapping, index); |
650495de JK |
786 | if (!page) |
787 | return ERR_PTR(-ENOMEM); | |
788 | ||
cb3bc9ee CY |
789 | if (f2fs_lookup_extent_cache(inode, index, &ei)) { |
790 | dn.data_blkaddr = ei.blk + index - ei.fofs; | |
791 | goto got_it; | |
792 | } | |
793 | ||
eb47b800 | 794 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
266e97a8 | 795 | err = get_dnode_of_data(&dn, index, LOOKUP_NODE); |
650495de JK |
796 | if (err) { |
797 | f2fs_put_page(page, 1); | |
eb47b800 | 798 | return ERR_PTR(err); |
650495de | 799 | } |
eb47b800 JK |
800 | f2fs_put_dnode(&dn); |
801 | ||
6bacf52f | 802 | if (unlikely(dn.data_blkaddr == NULL_ADDR)) { |
650495de | 803 | f2fs_put_page(page, 1); |
eb47b800 | 804 | return ERR_PTR(-ENOENT); |
650495de | 805 | } |
cb3bc9ee | 806 | got_it: |
43f3eae1 JK |
807 | if (PageUptodate(page)) { |
808 | unlock_page(page); | |
eb47b800 | 809 | return page; |
43f3eae1 | 810 | } |
eb47b800 | 811 | |
d59ff4df JK |
812 | /* |
813 | * A new dentry page is allocated but not able to be written, since its | |
814 | * new inode page couldn't be allocated due to -ENOSPC. | |
815 | * In such the case, its blkaddr can be remained as NEW_ADDR. | |
816 | * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. | |
817 | */ | |
818 | if (dn.data_blkaddr == NEW_ADDR) { | |
819 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | |
820 | SetPageUptodate(page); | |
43f3eae1 | 821 | unlock_page(page); |
d59ff4df JK |
822 | return page; |
823 | } | |
eb47b800 | 824 | |
cf04e8eb | 825 | fio.blk_addr = dn.data_blkaddr; |
05ca3632 JK |
826 | fio.page = page; |
827 | err = f2fs_submit_page_bio(&fio); | |
393ff91f | 828 | if (err) |
eb47b800 | 829 | return ERR_PTR(err); |
43f3eae1 JK |
830 | return page; |
831 | } | |
832 | ||
833 | struct page *find_data_page(struct inode *inode, pgoff_t index) | |
834 | { | |
835 | struct address_space *mapping = inode->i_mapping; | |
836 | struct page *page; | |
837 | ||
838 | page = find_get_page(mapping, index); | |
839 | if (page && PageUptodate(page)) | |
840 | return page; | |
841 | f2fs_put_page(page, 0); | |
842 | ||
843 | page = get_read_data_page(inode, index, READ_SYNC); | |
844 | if (IS_ERR(page)) | |
845 | return page; | |
846 | ||
847 | if (PageUptodate(page)) | |
848 | return page; | |
849 | ||
850 | wait_on_page_locked(page); | |
851 | if (unlikely(!PageUptodate(page))) { | |
852 | f2fs_put_page(page, 0); | |
853 | return ERR_PTR(-EIO); | |
854 | } | |
855 | return page; | |
856 | } | |
857 | ||
858 | /* | |
859 | * If it tries to access a hole, return an error. | |
860 | * Because, the callers, functions in dir.c and GC, should be able to know | |
861 | * whether this page exists or not. | |
862 | */ | |
863 | struct page *get_lock_data_page(struct inode *inode, pgoff_t index) | |
864 | { | |
865 | struct address_space *mapping = inode->i_mapping; | |
866 | struct page *page; | |
867 | repeat: | |
868 | page = get_read_data_page(inode, index, READ_SYNC); | |
869 | if (IS_ERR(page)) | |
870 | return page; | |
393ff91f | 871 | |
43f3eae1 | 872 | /* wait for read completion */ |
393ff91f | 873 | lock_page(page); |
6bacf52f | 874 | if (unlikely(!PageUptodate(page))) { |
393ff91f JK |
875 | f2fs_put_page(page, 1); |
876 | return ERR_PTR(-EIO); | |
eb47b800 | 877 | } |
6bacf52f | 878 | if (unlikely(page->mapping != mapping)) { |
afcb7ca0 JK |
879 | f2fs_put_page(page, 1); |
880 | goto repeat; | |
eb47b800 JK |
881 | } |
882 | return page; | |
883 | } | |
884 | ||
0a8165d7 | 885 | /* |
eb47b800 JK |
886 | * Caller ensures that this data page is never allocated. |
887 | * A new zero-filled data page is allocated in the page cache. | |
39936837 | 888 | * |
4f4124d0 CY |
889 | * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and |
890 | * f2fs_unlock_op(). | |
a8865372 | 891 | * Note that, ipage is set only by make_empty_dir. |
eb47b800 | 892 | */ |
64aa7ed9 | 893 | struct page *get_new_data_page(struct inode *inode, |
a8865372 | 894 | struct page *ipage, pgoff_t index, bool new_i_size) |
eb47b800 | 895 | { |
eb47b800 JK |
896 | struct address_space *mapping = inode->i_mapping; |
897 | struct page *page; | |
898 | struct dnode_of_data dn; | |
899 | int err; | |
01f28610 JK |
900 | repeat: |
901 | page = grab_cache_page(mapping, index); | |
902 | if (!page) | |
903 | return ERR_PTR(-ENOMEM); | |
eb47b800 | 904 | |
a8865372 | 905 | set_new_dnode(&dn, inode, ipage, NULL, 0); |
b600965c | 906 | err = f2fs_reserve_block(&dn, index); |
01f28610 JK |
907 | if (err) { |
908 | f2fs_put_page(page, 1); | |
eb47b800 | 909 | return ERR_PTR(err); |
a8865372 | 910 | } |
01f28610 JK |
911 | if (!ipage) |
912 | f2fs_put_dnode(&dn); | |
eb47b800 JK |
913 | |
914 | if (PageUptodate(page)) | |
01f28610 | 915 | goto got_it; |
eb47b800 JK |
916 | |
917 | if (dn.data_blkaddr == NEW_ADDR) { | |
918 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | |
393ff91f | 919 | SetPageUptodate(page); |
eb47b800 | 920 | } else { |
4375a336 | 921 | f2fs_put_page(page, 1); |
a8865372 | 922 | |
4375a336 JK |
923 | page = get_read_data_page(inode, index, READ_SYNC); |
924 | if (IS_ERR(page)) | |
afcb7ca0 | 925 | goto repeat; |
4375a336 JK |
926 | |
927 | /* wait for read completion */ | |
928 | lock_page(page); | |
eb47b800 | 929 | } |
01f28610 | 930 | got_it: |
eb47b800 JK |
931 | if (new_i_size && |
932 | i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) { | |
933 | i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT)); | |
699489bb JK |
934 | /* Only the directory inode sets new_i_size */ |
935 | set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); | |
eb47b800 JK |
936 | } |
937 | return page; | |
938 | } | |
939 | ||
bfad7c2d JK |
940 | static int __allocate_data_block(struct dnode_of_data *dn) |
941 | { | |
4081363f | 942 | struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); |
976e4c50 | 943 | struct f2fs_inode_info *fi = F2FS_I(dn->inode); |
bfad7c2d | 944 | struct f2fs_summary sum; |
bfad7c2d | 945 | struct node_info ni; |
38aa0889 | 946 | int seg = CURSEG_WARM_DATA; |
976e4c50 | 947 | pgoff_t fofs; |
bfad7c2d JK |
948 | |
949 | if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) | |
950 | return -EPERM; | |
df6136ef CY |
951 | |
952 | dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); | |
953 | if (dn->data_blkaddr == NEW_ADDR) | |
954 | goto alloc; | |
955 | ||
bfad7c2d JK |
956 | if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) |
957 | return -ENOSPC; | |
958 | ||
df6136ef | 959 | alloc: |
bfad7c2d JK |
960 | get_node_info(sbi, dn->nid, &ni); |
961 | set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); | |
962 | ||
38aa0889 JK |
963 | if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page) |
964 | seg = CURSEG_DIRECT_IO; | |
965 | ||
df6136ef CY |
966 | allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr, |
967 | &sum, seg); | |
216a620a | 968 | set_data_blkaddr(dn); |
bfad7c2d | 969 | |
976e4c50 JK |
970 | /* update i_size */ |
971 | fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + | |
972 | dn->ofs_in_node; | |
973 | if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT)) | |
974 | i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT)); | |
975 | ||
3e72f721 JK |
976 | /* direct IO doesn't use extent cache to maximize the performance */ |
977 | __drop_largest_extent(dn->inode, fofs); | |
978 | ||
bfad7c2d JK |
979 | return 0; |
980 | } | |
981 | ||
59b802e5 JK |
982 | static void __allocate_data_blocks(struct inode *inode, loff_t offset, |
983 | size_t count) | |
984 | { | |
985 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
986 | struct dnode_of_data dn; | |
987 | u64 start = F2FS_BYTES_TO_BLK(offset); | |
988 | u64 len = F2FS_BYTES_TO_BLK(count); | |
989 | bool allocated; | |
990 | u64 end_offset; | |
991 | ||
992 | while (len) { | |
993 | f2fs_balance_fs(sbi); | |
994 | f2fs_lock_op(sbi); | |
995 | ||
996 | /* When reading holes, we need its node page */ | |
997 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
998 | if (get_dnode_of_data(&dn, start, ALLOC_NODE)) | |
999 | goto out; | |
1000 | ||
1001 | allocated = false; | |
1002 | end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); | |
1003 | ||
1004 | while (dn.ofs_in_node < end_offset && len) { | |
d6d4f1cb CY |
1005 | block_t blkaddr; |
1006 | ||
1007 | blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); | |
df6136ef | 1008 | if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) { |
59b802e5 JK |
1009 | if (__allocate_data_block(&dn)) |
1010 | goto sync_out; | |
1011 | allocated = true; | |
1012 | } | |
1013 | len--; | |
1014 | start++; | |
1015 | dn.ofs_in_node++; | |
1016 | } | |
1017 | ||
1018 | if (allocated) | |
1019 | sync_inode_page(&dn); | |
1020 | ||
1021 | f2fs_put_dnode(&dn); | |
1022 | f2fs_unlock_op(sbi); | |
1023 | } | |
1024 | return; | |
1025 | ||
1026 | sync_out: | |
1027 | if (allocated) | |
1028 | sync_inode_page(&dn); | |
1029 | f2fs_put_dnode(&dn); | |
1030 | out: | |
1031 | f2fs_unlock_op(sbi); | |
1032 | return; | |
1033 | } | |
1034 | ||
0a8165d7 | 1035 | /* |
003a3e1d JK |
1036 | * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with |
1037 | * f2fs_map_blocks structure. | |
4f4124d0 CY |
1038 | * If original data blocks are allocated, then give them to blockdev. |
1039 | * Otherwise, | |
1040 | * a. preallocate requested block addresses | |
1041 | * b. do not use extent cache for better performance | |
1042 | * c. give the block addresses to blockdev | |
eb47b800 | 1043 | */ |
003a3e1d JK |
1044 | static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, |
1045 | int create, bool fiemap) | |
eb47b800 | 1046 | { |
003a3e1d | 1047 | unsigned int maxblocks = map->m_len; |
eb47b800 | 1048 | struct dnode_of_data dn; |
bfad7c2d JK |
1049 | int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; |
1050 | pgoff_t pgofs, end_offset; | |
1051 | int err = 0, ofs = 1; | |
a2e7d1bf | 1052 | struct extent_info ei; |
bfad7c2d | 1053 | bool allocated = false; |
eb47b800 | 1054 | |
003a3e1d JK |
1055 | map->m_len = 0; |
1056 | map->m_flags = 0; | |
1057 | ||
1058 | /* it only supports block size == page size */ | |
1059 | pgofs = (pgoff_t)map->m_lblk; | |
eb47b800 | 1060 | |
7e4dde79 | 1061 | if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) { |
003a3e1d JK |
1062 | map->m_pblk = ei.blk + pgofs - ei.fofs; |
1063 | map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); | |
1064 | map->m_flags = F2FS_MAP_MAPPED; | |
bfad7c2d | 1065 | goto out; |
a2e7d1bf | 1066 | } |
bfad7c2d | 1067 | |
59b802e5 | 1068 | if (create) |
4081363f | 1069 | f2fs_lock_op(F2FS_I_SB(inode)); |
eb47b800 JK |
1070 | |
1071 | /* When reading holes, we need its node page */ | |
1072 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
bfad7c2d | 1073 | err = get_dnode_of_data(&dn, pgofs, mode); |
1ec79083 | 1074 | if (err) { |
bfad7c2d JK |
1075 | if (err == -ENOENT) |
1076 | err = 0; | |
1077 | goto unlock_out; | |
848753aa | 1078 | } |
ccfb3000 | 1079 | if (dn.data_blkaddr == NEW_ADDR && !fiemap) |
1ec79083 | 1080 | goto put_out; |
eb47b800 | 1081 | |
bfad7c2d | 1082 | if (dn.data_blkaddr != NULL_ADDR) { |
003a3e1d JK |
1083 | map->m_flags = F2FS_MAP_MAPPED; |
1084 | map->m_pblk = dn.data_blkaddr; | |
7f63eb77 JK |
1085 | if (dn.data_blkaddr == NEW_ADDR) |
1086 | map->m_flags |= F2FS_MAP_UNWRITTEN; | |
bfad7c2d JK |
1087 | } else if (create) { |
1088 | err = __allocate_data_block(&dn); | |
1089 | if (err) | |
1090 | goto put_out; | |
1091 | allocated = true; | |
003a3e1d JK |
1092 | map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED; |
1093 | map->m_pblk = dn.data_blkaddr; | |
bfad7c2d JK |
1094 | } else { |
1095 | goto put_out; | |
1096 | } | |
1097 | ||
6403eb1f | 1098 | end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); |
003a3e1d | 1099 | map->m_len = 1; |
bfad7c2d JK |
1100 | dn.ofs_in_node++; |
1101 | pgofs++; | |
1102 | ||
1103 | get_next: | |
1104 | if (dn.ofs_in_node >= end_offset) { | |
1105 | if (allocated) | |
1106 | sync_inode_page(&dn); | |
1107 | allocated = false; | |
1108 | f2fs_put_dnode(&dn); | |
1109 | ||
1110 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
1111 | err = get_dnode_of_data(&dn, pgofs, mode); | |
1ec79083 | 1112 | if (err) { |
bfad7c2d JK |
1113 | if (err == -ENOENT) |
1114 | err = 0; | |
1115 | goto unlock_out; | |
1116 | } | |
ccfb3000 | 1117 | if (dn.data_blkaddr == NEW_ADDR && !fiemap) |
1ec79083 JK |
1118 | goto put_out; |
1119 | ||
6403eb1f | 1120 | end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); |
bfad7c2d | 1121 | } |
eb47b800 | 1122 | |
003a3e1d | 1123 | if (maxblocks > map->m_len) { |
bfad7c2d JK |
1124 | block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); |
1125 | if (blkaddr == NULL_ADDR && create) { | |
1126 | err = __allocate_data_block(&dn); | |
1127 | if (err) | |
1128 | goto sync_out; | |
1129 | allocated = true; | |
003a3e1d | 1130 | map->m_flags |= F2FS_MAP_NEW; |
bfad7c2d JK |
1131 | blkaddr = dn.data_blkaddr; |
1132 | } | |
e1c42045 | 1133 | /* Give more consecutive addresses for the readahead */ |
7f63eb77 JK |
1134 | if ((map->m_pblk != NEW_ADDR && |
1135 | blkaddr == (map->m_pblk + ofs)) || | |
1136 | (map->m_pblk == NEW_ADDR && | |
1137 | blkaddr == NEW_ADDR)) { | |
bfad7c2d JK |
1138 | ofs++; |
1139 | dn.ofs_in_node++; | |
1140 | pgofs++; | |
003a3e1d | 1141 | map->m_len++; |
bfad7c2d JK |
1142 | goto get_next; |
1143 | } | |
eb47b800 | 1144 | } |
bfad7c2d JK |
1145 | sync_out: |
1146 | if (allocated) | |
1147 | sync_inode_page(&dn); | |
1148 | put_out: | |
eb47b800 | 1149 | f2fs_put_dnode(&dn); |
bfad7c2d JK |
1150 | unlock_out: |
1151 | if (create) | |
4081363f | 1152 | f2fs_unlock_op(F2FS_I_SB(inode)); |
bfad7c2d | 1153 | out: |
003a3e1d | 1154 | trace_f2fs_map_blocks(inode, map, err); |
bfad7c2d | 1155 | return err; |
eb47b800 JK |
1156 | } |
1157 | ||
003a3e1d JK |
1158 | static int __get_data_block(struct inode *inode, sector_t iblock, |
1159 | struct buffer_head *bh, int create, bool fiemap) | |
1160 | { | |
1161 | struct f2fs_map_blocks map; | |
1162 | int ret; | |
1163 | ||
1164 | map.m_lblk = iblock; | |
1165 | map.m_len = bh->b_size >> inode->i_blkbits; | |
1166 | ||
1167 | ret = f2fs_map_blocks(inode, &map, create, fiemap); | |
1168 | if (!ret) { | |
1169 | map_bh(bh, inode->i_sb, map.m_pblk); | |
1170 | bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; | |
1171 | bh->b_size = map.m_len << inode->i_blkbits; | |
1172 | } | |
1173 | return ret; | |
1174 | } | |
1175 | ||
ccfb3000 JK |
1176 | static int get_data_block(struct inode *inode, sector_t iblock, |
1177 | struct buffer_head *bh_result, int create) | |
1178 | { | |
1179 | return __get_data_block(inode, iblock, bh_result, create, false); | |
1180 | } | |
1181 | ||
1182 | static int get_data_block_fiemap(struct inode *inode, sector_t iblock, | |
1183 | struct buffer_head *bh_result, int create) | |
1184 | { | |
1185 | return __get_data_block(inode, iblock, bh_result, create, true); | |
1186 | } | |
1187 | ||
7f63eb77 JK |
1188 | static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) |
1189 | { | |
1190 | return (offset >> inode->i_blkbits); | |
1191 | } | |
1192 | ||
1193 | static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) | |
1194 | { | |
1195 | return (blk << inode->i_blkbits); | |
1196 | } | |
1197 | ||
9ab70134 JK |
1198 | int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
1199 | u64 start, u64 len) | |
1200 | { | |
7f63eb77 JK |
1201 | struct buffer_head map_bh; |
1202 | sector_t start_blk, last_blk; | |
1203 | loff_t isize = i_size_read(inode); | |
1204 | u64 logical = 0, phys = 0, size = 0; | |
1205 | u32 flags = 0; | |
1206 | bool past_eof = false, whole_file = false; | |
1207 | int ret = 0; | |
1208 | ||
1209 | ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); | |
1210 | if (ret) | |
1211 | return ret; | |
1212 | ||
1213 | mutex_lock(&inode->i_mutex); | |
1214 | ||
1215 | if (len >= isize) { | |
1216 | whole_file = true; | |
1217 | len = isize; | |
1218 | } | |
1219 | ||
1220 | if (logical_to_blk(inode, len) == 0) | |
1221 | len = blk_to_logical(inode, 1); | |
1222 | ||
1223 | start_blk = logical_to_blk(inode, start); | |
1224 | last_blk = logical_to_blk(inode, start + len - 1); | |
1225 | next: | |
1226 | memset(&map_bh, 0, sizeof(struct buffer_head)); | |
1227 | map_bh.b_size = len; | |
1228 | ||
1229 | ret = get_data_block_fiemap(inode, start_blk, &map_bh, 0); | |
1230 | if (ret) | |
1231 | goto out; | |
1232 | ||
1233 | /* HOLE */ | |
1234 | if (!buffer_mapped(&map_bh)) { | |
1235 | start_blk++; | |
1236 | ||
1237 | if (!past_eof && blk_to_logical(inode, start_blk) >= isize) | |
1238 | past_eof = 1; | |
1239 | ||
1240 | if (past_eof && size) { | |
1241 | flags |= FIEMAP_EXTENT_LAST; | |
1242 | ret = fiemap_fill_next_extent(fieinfo, logical, | |
1243 | phys, size, flags); | |
1244 | } else if (size) { | |
1245 | ret = fiemap_fill_next_extent(fieinfo, logical, | |
1246 | phys, size, flags); | |
1247 | size = 0; | |
1248 | } | |
1249 | ||
1250 | /* if we have holes up to/past EOF then we're done */ | |
1251 | if (start_blk > last_blk || past_eof || ret) | |
1252 | goto out; | |
1253 | } else { | |
1254 | if (start_blk > last_blk && !whole_file) { | |
1255 | ret = fiemap_fill_next_extent(fieinfo, logical, | |
1256 | phys, size, flags); | |
1257 | goto out; | |
1258 | } | |
1259 | ||
1260 | /* | |
1261 | * if size != 0 then we know we already have an extent | |
1262 | * to add, so add it. | |
1263 | */ | |
1264 | if (size) { | |
1265 | ret = fiemap_fill_next_extent(fieinfo, logical, | |
1266 | phys, size, flags); | |
1267 | if (ret) | |
1268 | goto out; | |
1269 | } | |
1270 | ||
1271 | logical = blk_to_logical(inode, start_blk); | |
1272 | phys = blk_to_logical(inode, map_bh.b_blocknr); | |
1273 | size = map_bh.b_size; | |
1274 | flags = 0; | |
1275 | if (buffer_unwritten(&map_bh)) | |
1276 | flags = FIEMAP_EXTENT_UNWRITTEN; | |
1277 | ||
1278 | start_blk += logical_to_blk(inode, size); | |
1279 | ||
1280 | /* | |
1281 | * If we are past the EOF, then we need to make sure as | |
1282 | * soon as we find a hole that the last extent we found | |
1283 | * is marked with FIEMAP_EXTENT_LAST | |
1284 | */ | |
1285 | if (!past_eof && logical + size >= isize) | |
1286 | past_eof = true; | |
1287 | } | |
1288 | cond_resched(); | |
1289 | if (fatal_signal_pending(current)) | |
1290 | ret = -EINTR; | |
1291 | else | |
1292 | goto next; | |
1293 | out: | |
1294 | if (ret == 1) | |
1295 | ret = 0; | |
1296 | ||
1297 | mutex_unlock(&inode->i_mutex); | |
1298 | return ret; | |
9ab70134 JK |
1299 | } |
1300 | ||
f1e88660 JK |
1301 | /* |
1302 | * This function was originally taken from fs/mpage.c, and customized for f2fs. | |
1303 | * Major change was from block_size == page_size in f2fs by default. | |
1304 | */ | |
1305 | static int f2fs_mpage_readpages(struct address_space *mapping, | |
1306 | struct list_head *pages, struct page *page, | |
1307 | unsigned nr_pages) | |
1308 | { | |
1309 | struct bio *bio = NULL; | |
1310 | unsigned page_idx; | |
1311 | sector_t last_block_in_bio = 0; | |
1312 | struct inode *inode = mapping->host; | |
1313 | const unsigned blkbits = inode->i_blkbits; | |
1314 | const unsigned blocksize = 1 << blkbits; | |
1315 | sector_t block_in_file; | |
1316 | sector_t last_block; | |
1317 | sector_t last_block_in_file; | |
1318 | sector_t block_nr; | |
1319 | struct block_device *bdev = inode->i_sb->s_bdev; | |
1320 | struct f2fs_map_blocks map; | |
1321 | ||
1322 | map.m_pblk = 0; | |
1323 | map.m_lblk = 0; | |
1324 | map.m_len = 0; | |
1325 | map.m_flags = 0; | |
1326 | ||
1327 | for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { | |
1328 | ||
1329 | prefetchw(&page->flags); | |
1330 | if (pages) { | |
1331 | page = list_entry(pages->prev, struct page, lru); | |
1332 | list_del(&page->lru); | |
1333 | if (add_to_page_cache_lru(page, mapping, | |
1334 | page->index, GFP_KERNEL)) | |
1335 | goto next_page; | |
1336 | } | |
1337 | ||
1338 | block_in_file = (sector_t)page->index; | |
1339 | last_block = block_in_file + nr_pages; | |
1340 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> | |
1341 | blkbits; | |
1342 | if (last_block > last_block_in_file) | |
1343 | last_block = last_block_in_file; | |
1344 | ||
1345 | /* | |
1346 | * Map blocks using the previous result first. | |
1347 | */ | |
1348 | if ((map.m_flags & F2FS_MAP_MAPPED) && | |
1349 | block_in_file > map.m_lblk && | |
1350 | block_in_file < (map.m_lblk + map.m_len)) | |
1351 | goto got_it; | |
1352 | ||
1353 | /* | |
1354 | * Then do more f2fs_map_blocks() calls until we are | |
1355 | * done with this page. | |
1356 | */ | |
1357 | map.m_flags = 0; | |
1358 | ||
1359 | if (block_in_file < last_block) { | |
1360 | map.m_lblk = block_in_file; | |
1361 | map.m_len = last_block - block_in_file; | |
1362 | ||
1363 | if (f2fs_map_blocks(inode, &map, 0, false)) | |
1364 | goto set_error_page; | |
1365 | } | |
1366 | got_it: | |
1367 | if ((map.m_flags & F2FS_MAP_MAPPED)) { | |
1368 | block_nr = map.m_pblk + block_in_file - map.m_lblk; | |
1369 | SetPageMappedToDisk(page); | |
1370 | ||
1371 | if (!PageUptodate(page) && !cleancache_get_page(page)) { | |
1372 | SetPageUptodate(page); | |
1373 | goto confused; | |
1374 | } | |
1375 | } else { | |
1376 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | |
1377 | SetPageUptodate(page); | |
1378 | unlock_page(page); | |
1379 | goto next_page; | |
1380 | } | |
1381 | ||
1382 | /* | |
1383 | * This page will go to BIO. Do we need to send this | |
1384 | * BIO off first? | |
1385 | */ | |
1386 | if (bio && (last_block_in_bio != block_nr - 1)) { | |
1387 | submit_and_realloc: | |
1388 | submit_bio(READ, bio); | |
1389 | bio = NULL; | |
1390 | } | |
1391 | if (bio == NULL) { | |
4375a336 JK |
1392 | struct f2fs_crypto_ctx *ctx = NULL; |
1393 | ||
1394 | if (f2fs_encrypted_inode(inode) && | |
1395 | S_ISREG(inode->i_mode)) { | |
1396 | struct page *cpage; | |
1397 | ||
1398 | ctx = f2fs_get_crypto_ctx(inode); | |
1399 | if (IS_ERR(ctx)) | |
1400 | goto set_error_page; | |
1401 | ||
1402 | /* wait the page to be moved by cleaning */ | |
1403 | cpage = find_lock_page( | |
1404 | META_MAPPING(F2FS_I_SB(inode)), | |
1405 | block_nr); | |
1406 | if (cpage) { | |
1407 | f2fs_wait_on_page_writeback(cpage, | |
1408 | DATA); | |
1409 | f2fs_put_page(cpage, 1); | |
1410 | } | |
1411 | } | |
1412 | ||
f1e88660 JK |
1413 | bio = bio_alloc(GFP_KERNEL, |
1414 | min_t(int, nr_pages, bio_get_nr_vecs(bdev))); | |
4375a336 JK |
1415 | if (!bio) { |
1416 | if (ctx) | |
1417 | f2fs_release_crypto_ctx(ctx); | |
f1e88660 | 1418 | goto set_error_page; |
4375a336 | 1419 | } |
f1e88660 JK |
1420 | bio->bi_bdev = bdev; |
1421 | bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr); | |
12377024 | 1422 | bio->bi_end_io = f2fs_read_end_io; |
4375a336 | 1423 | bio->bi_private = ctx; |
f1e88660 JK |
1424 | } |
1425 | ||
1426 | if (bio_add_page(bio, page, blocksize, 0) < blocksize) | |
1427 | goto submit_and_realloc; | |
1428 | ||
1429 | last_block_in_bio = block_nr; | |
1430 | goto next_page; | |
1431 | set_error_page: | |
1432 | SetPageError(page); | |
1433 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); | |
1434 | unlock_page(page); | |
1435 | goto next_page; | |
1436 | confused: | |
1437 | if (bio) { | |
1438 | submit_bio(READ, bio); | |
1439 | bio = NULL; | |
1440 | } | |
1441 | unlock_page(page); | |
1442 | next_page: | |
1443 | if (pages) | |
1444 | page_cache_release(page); | |
1445 | } | |
1446 | BUG_ON(pages && !list_empty(pages)); | |
1447 | if (bio) | |
1448 | submit_bio(READ, bio); | |
1449 | return 0; | |
1450 | } | |
1451 | ||
eb47b800 JK |
1452 | static int f2fs_read_data_page(struct file *file, struct page *page) |
1453 | { | |
9ffe0fb5 | 1454 | struct inode *inode = page->mapping->host; |
b3d208f9 | 1455 | int ret = -EAGAIN; |
9ffe0fb5 | 1456 | |
c20e89cd CY |
1457 | trace_f2fs_readpage(page, DATA); |
1458 | ||
e1c42045 | 1459 | /* If the file has inline data, try to read it directly */ |
9ffe0fb5 HL |
1460 | if (f2fs_has_inline_data(inode)) |
1461 | ret = f2fs_read_inline_data(inode, page); | |
b3d208f9 | 1462 | if (ret == -EAGAIN) |
f1e88660 | 1463 | ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1); |
9ffe0fb5 | 1464 | return ret; |
eb47b800 JK |
1465 | } |
1466 | ||
1467 | static int f2fs_read_data_pages(struct file *file, | |
1468 | struct address_space *mapping, | |
1469 | struct list_head *pages, unsigned nr_pages) | |
1470 | { | |
9ffe0fb5 HL |
1471 | struct inode *inode = file->f_mapping->host; |
1472 | ||
1473 | /* If the file has inline data, skip readpages */ | |
1474 | if (f2fs_has_inline_data(inode)) | |
1475 | return 0; | |
1476 | ||
f1e88660 | 1477 | return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages); |
eb47b800 JK |
1478 | } |
1479 | ||
05ca3632 | 1480 | int do_write_data_page(struct f2fs_io_info *fio) |
eb47b800 | 1481 | { |
05ca3632 | 1482 | struct page *page = fio->page; |
eb47b800 | 1483 | struct inode *inode = page->mapping->host; |
eb47b800 JK |
1484 | struct dnode_of_data dn; |
1485 | int err = 0; | |
1486 | ||
1487 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
266e97a8 | 1488 | err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); |
eb47b800 JK |
1489 | if (err) |
1490 | return err; | |
1491 | ||
cf04e8eb | 1492 | fio->blk_addr = dn.data_blkaddr; |
eb47b800 JK |
1493 | |
1494 | /* This page is already truncated */ | |
2bca1e23 JK |
1495 | if (fio->blk_addr == NULL_ADDR) { |
1496 | ClearPageUptodate(page); | |
eb47b800 | 1497 | goto out_writepage; |
2bca1e23 | 1498 | } |
eb47b800 | 1499 | |
4375a336 JK |
1500 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { |
1501 | fio->encrypted_page = f2fs_encrypt(inode, fio->page); | |
1502 | if (IS_ERR(fio->encrypted_page)) { | |
1503 | err = PTR_ERR(fio->encrypted_page); | |
1504 | goto out_writepage; | |
1505 | } | |
1506 | } | |
1507 | ||
eb47b800 JK |
1508 | set_page_writeback(page); |
1509 | ||
1510 | /* | |
1511 | * If current allocation needs SSR, | |
1512 | * it had better in-place writes for updated data. | |
1513 | */ | |
cf04e8eb | 1514 | if (unlikely(fio->blk_addr != NEW_ADDR && |
b25958b6 HL |
1515 | !is_cold_data(page) && |
1516 | need_inplace_update(inode))) { | |
05ca3632 | 1517 | rewrite_data_page(fio); |
fff04f90 | 1518 | set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); |
8ce67cb0 | 1519 | trace_f2fs_do_write_data_page(page, IPU); |
eb47b800 | 1520 | } else { |
05ca3632 | 1521 | write_data_page(&dn, fio); |
216a620a | 1522 | set_data_blkaddr(&dn); |
7e4dde79 | 1523 | f2fs_update_extent_cache(&dn); |
8ce67cb0 | 1524 | trace_f2fs_do_write_data_page(page, OPU); |
fff04f90 | 1525 | set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); |
3c6c2beb JK |
1526 | if (page->index == 0) |
1527 | set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); | |
eb47b800 JK |
1528 | } |
1529 | out_writepage: | |
1530 | f2fs_put_dnode(&dn); | |
1531 | return err; | |
1532 | } | |
1533 | ||
1534 | static int f2fs_write_data_page(struct page *page, | |
1535 | struct writeback_control *wbc) | |
1536 | { | |
1537 | struct inode *inode = page->mapping->host; | |
4081363f | 1538 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
eb47b800 JK |
1539 | loff_t i_size = i_size_read(inode); |
1540 | const pgoff_t end_index = ((unsigned long long) i_size) | |
1541 | >> PAGE_CACHE_SHIFT; | |
9ffe0fb5 | 1542 | unsigned offset = 0; |
39936837 | 1543 | bool need_balance_fs = false; |
eb47b800 | 1544 | int err = 0; |
458e6197 | 1545 | struct f2fs_io_info fio = { |
05ca3632 | 1546 | .sbi = sbi, |
458e6197 | 1547 | .type = DATA, |
6c311ec6 | 1548 | .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, |
05ca3632 | 1549 | .page = page, |
4375a336 | 1550 | .encrypted_page = NULL, |
458e6197 | 1551 | }; |
eb47b800 | 1552 | |
ecda0de3 CY |
1553 | trace_f2fs_writepage(page, DATA); |
1554 | ||
eb47b800 | 1555 | if (page->index < end_index) |
39936837 | 1556 | goto write; |
eb47b800 JK |
1557 | |
1558 | /* | |
1559 | * If the offset is out-of-range of file size, | |
1560 | * this page does not have to be written to disk. | |
1561 | */ | |
1562 | offset = i_size & (PAGE_CACHE_SIZE - 1); | |
76f60268 | 1563 | if ((page->index >= end_index + 1) || !offset) |
39936837 | 1564 | goto out; |
eb47b800 JK |
1565 | |
1566 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); | |
39936837 | 1567 | write: |
caf0047e | 1568 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) |
eb47b800 | 1569 | goto redirty_out; |
1e84371f JK |
1570 | if (f2fs_is_drop_cache(inode)) |
1571 | goto out; | |
1572 | if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim && | |
1573 | available_free_memory(sbi, BASE_CHECK)) | |
1574 | goto redirty_out; | |
eb47b800 | 1575 | |
39936837 | 1576 | /* Dentry blocks are controlled by checkpoint */ |
eb47b800 | 1577 | if (S_ISDIR(inode->i_mode)) { |
cf779cab JK |
1578 | if (unlikely(f2fs_cp_error(sbi))) |
1579 | goto redirty_out; | |
05ca3632 | 1580 | err = do_write_data_page(&fio); |
8618b881 JK |
1581 | goto done; |
1582 | } | |
9ffe0fb5 | 1583 | |
cf779cab JK |
1584 | /* we should bypass data pages to proceed the kworkder jobs */ |
1585 | if (unlikely(f2fs_cp_error(sbi))) { | |
1586 | SetPageError(page); | |
a7ffdbe2 | 1587 | goto out; |
cf779cab JK |
1588 | } |
1589 | ||
8618b881 | 1590 | if (!wbc->for_reclaim) |
39936837 | 1591 | need_balance_fs = true; |
8618b881 | 1592 | else if (has_not_enough_free_secs(sbi, 0)) |
39936837 | 1593 | goto redirty_out; |
eb47b800 | 1594 | |
b3d208f9 | 1595 | err = -EAGAIN; |
8618b881 | 1596 | f2fs_lock_op(sbi); |
b3d208f9 JK |
1597 | if (f2fs_has_inline_data(inode)) |
1598 | err = f2fs_write_inline_data(inode, page); | |
1599 | if (err == -EAGAIN) | |
05ca3632 | 1600 | err = do_write_data_page(&fio); |
8618b881 JK |
1601 | f2fs_unlock_op(sbi); |
1602 | done: | |
1603 | if (err && err != -ENOENT) | |
1604 | goto redirty_out; | |
eb47b800 | 1605 | |
eb47b800 | 1606 | clear_cold_data(page); |
39936837 | 1607 | out: |
a7ffdbe2 | 1608 | inode_dec_dirty_pages(inode); |
2bca1e23 JK |
1609 | if (err) |
1610 | ClearPageUptodate(page); | |
eb47b800 | 1611 | unlock_page(page); |
39936837 | 1612 | if (need_balance_fs) |
eb47b800 | 1613 | f2fs_balance_fs(sbi); |
2aea39ec JK |
1614 | if (wbc->for_reclaim) |
1615 | f2fs_submit_merged_bio(sbi, DATA, WRITE); | |
eb47b800 JK |
1616 | return 0; |
1617 | ||
eb47b800 | 1618 | redirty_out: |
76f60268 | 1619 | redirty_page_for_writepage(wbc, page); |
8618b881 | 1620 | return AOP_WRITEPAGE_ACTIVATE; |
eb47b800 JK |
1621 | } |
1622 | ||
fa9150a8 NJ |
1623 | static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, |
1624 | void *data) | |
1625 | { | |
1626 | struct address_space *mapping = data; | |
1627 | int ret = mapping->a_ops->writepage(page, wbc); | |
1628 | mapping_set_error(mapping, ret); | |
1629 | return ret; | |
1630 | } | |
1631 | ||
25ca923b | 1632 | static int f2fs_write_data_pages(struct address_space *mapping, |
eb47b800 JK |
1633 | struct writeback_control *wbc) |
1634 | { | |
1635 | struct inode *inode = mapping->host; | |
4081363f | 1636 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
5463e7c1 | 1637 | bool locked = false; |
eb47b800 | 1638 | int ret; |
50c8cdb3 | 1639 | long diff; |
eb47b800 | 1640 | |
e5748434 CY |
1641 | trace_f2fs_writepages(mapping->host, wbc, DATA); |
1642 | ||
cfb185a1 | 1643 | /* deal with chardevs and other special file */ |
1644 | if (!mapping->a_ops->writepage) | |
1645 | return 0; | |
1646 | ||
87d6f890 | 1647 | if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && |
a7ffdbe2 | 1648 | get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && |
6fb03f3a | 1649 | available_free_memory(sbi, DIRTY_DENTS)) |
d3baf95d | 1650 | goto skip_write; |
87d6f890 | 1651 | |
d5669f7b JK |
1652 | /* during POR, we don't need to trigger writepage at all. */ |
1653 | if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) | |
1654 | goto skip_write; | |
1655 | ||
50c8cdb3 | 1656 | diff = nr_pages_to_write(sbi, DATA, wbc); |
eb47b800 | 1657 | |
5463e7c1 JK |
1658 | if (!S_ISDIR(inode->i_mode)) { |
1659 | mutex_lock(&sbi->writepages); | |
1660 | locked = true; | |
1661 | } | |
fa9150a8 | 1662 | ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); |
5463e7c1 JK |
1663 | if (locked) |
1664 | mutex_unlock(&sbi->writepages); | |
458e6197 JK |
1665 | |
1666 | f2fs_submit_merged_bio(sbi, DATA, WRITE); | |
eb47b800 JK |
1667 | |
1668 | remove_dirty_dir_inode(inode); | |
1669 | ||
50c8cdb3 | 1670 | wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); |
eb47b800 | 1671 | return ret; |
d3baf95d JK |
1672 | |
1673 | skip_write: | |
a7ffdbe2 | 1674 | wbc->pages_skipped += get_dirty_pages(inode); |
d3baf95d | 1675 | return 0; |
eb47b800 JK |
1676 | } |
1677 | ||
3aab8f82 CY |
1678 | static void f2fs_write_failed(struct address_space *mapping, loff_t to) |
1679 | { | |
1680 | struct inode *inode = mapping->host; | |
1681 | ||
1682 | if (to > inode->i_size) { | |
1683 | truncate_pagecache(inode, inode->i_size); | |
764aa3e9 | 1684 | truncate_blocks(inode, inode->i_size, true); |
3aab8f82 CY |
1685 | } |
1686 | } | |
1687 | ||
eb47b800 JK |
1688 | static int f2fs_write_begin(struct file *file, struct address_space *mapping, |
1689 | loff_t pos, unsigned len, unsigned flags, | |
1690 | struct page **pagep, void **fsdata) | |
1691 | { | |
1692 | struct inode *inode = mapping->host; | |
4081363f | 1693 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
9ba69cf9 | 1694 | struct page *page, *ipage; |
eb47b800 JK |
1695 | pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; |
1696 | struct dnode_of_data dn; | |
1697 | int err = 0; | |
1698 | ||
62aed044 CY |
1699 | trace_f2fs_write_begin(inode, pos, len, flags); |
1700 | ||
eb47b800 | 1701 | f2fs_balance_fs(sbi); |
5f727395 JK |
1702 | |
1703 | /* | |
1704 | * We should check this at this moment to avoid deadlock on inode page | |
1705 | * and #0 page. The locking rule for inline_data conversion should be: | |
1706 | * lock_page(page #0) -> lock_page(inode_page) | |
1707 | */ | |
1708 | if (index != 0) { | |
1709 | err = f2fs_convert_inline_inode(inode); | |
1710 | if (err) | |
1711 | goto fail; | |
1712 | } | |
afcb7ca0 | 1713 | repeat: |
eb47b800 | 1714 | page = grab_cache_page_write_begin(mapping, index, flags); |
3aab8f82 CY |
1715 | if (!page) { |
1716 | err = -ENOMEM; | |
1717 | goto fail; | |
1718 | } | |
d5f66990 | 1719 | |
eb47b800 JK |
1720 | *pagep = page; |
1721 | ||
e479556b | 1722 | f2fs_lock_op(sbi); |
9ba69cf9 JK |
1723 | |
1724 | /* check inline_data */ | |
1725 | ipage = get_node_page(sbi, inode->i_ino); | |
cd34e296 CY |
1726 | if (IS_ERR(ipage)) { |
1727 | err = PTR_ERR(ipage); | |
9ba69cf9 | 1728 | goto unlock_fail; |
cd34e296 | 1729 | } |
9ba69cf9 | 1730 | |
b3d208f9 JK |
1731 | set_new_dnode(&dn, inode, ipage, ipage, 0); |
1732 | ||
9ba69cf9 | 1733 | if (f2fs_has_inline_data(inode)) { |
b3d208f9 JK |
1734 | if (pos + len <= MAX_INLINE_DATA) { |
1735 | read_inline_data(page, ipage); | |
1736 | set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); | |
1737 | sync_inode_page(&dn); | |
1738 | goto put_next; | |
b3d208f9 | 1739 | } |
5f727395 JK |
1740 | err = f2fs_convert_inline_page(&dn, page); |
1741 | if (err) | |
1742 | goto put_fail; | |
b600965c | 1743 | } |
9ba69cf9 JK |
1744 | err = f2fs_reserve_block(&dn, index); |
1745 | if (err) | |
8cdcb713 | 1746 | goto put_fail; |
b3d208f9 | 1747 | put_next: |
9ba69cf9 JK |
1748 | f2fs_put_dnode(&dn); |
1749 | f2fs_unlock_op(sbi); | |
1750 | ||
eb47b800 JK |
1751 | if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) |
1752 | return 0; | |
1753 | ||
b3d208f9 JK |
1754 | f2fs_wait_on_page_writeback(page, DATA); |
1755 | ||
eb47b800 JK |
1756 | if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { |
1757 | unsigned start = pos & (PAGE_CACHE_SIZE - 1); | |
1758 | unsigned end = start + len; | |
1759 | ||
1760 | /* Reading beyond i_size is simple: memset to zero */ | |
1761 | zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); | |
393ff91f | 1762 | goto out; |
eb47b800 JK |
1763 | } |
1764 | ||
b3d208f9 | 1765 | if (dn.data_blkaddr == NEW_ADDR) { |
eb47b800 JK |
1766 | zero_user_segment(page, 0, PAGE_CACHE_SIZE); |
1767 | } else { | |
cf04e8eb | 1768 | struct f2fs_io_info fio = { |
05ca3632 | 1769 | .sbi = sbi, |
cf04e8eb JK |
1770 | .type = DATA, |
1771 | .rw = READ_SYNC, | |
1772 | .blk_addr = dn.data_blkaddr, | |
05ca3632 | 1773 | .page = page, |
4375a336 | 1774 | .encrypted_page = NULL, |
cf04e8eb | 1775 | }; |
05ca3632 | 1776 | err = f2fs_submit_page_bio(&fio); |
9234f319 JK |
1777 | if (err) |
1778 | goto fail; | |
d54c795b | 1779 | |
393ff91f | 1780 | lock_page(page); |
6bacf52f | 1781 | if (unlikely(!PageUptodate(page))) { |
393ff91f | 1782 | f2fs_put_page(page, 1); |
3aab8f82 CY |
1783 | err = -EIO; |
1784 | goto fail; | |
eb47b800 | 1785 | } |
6bacf52f | 1786 | if (unlikely(page->mapping != mapping)) { |
afcb7ca0 JK |
1787 | f2fs_put_page(page, 1); |
1788 | goto repeat; | |
eb47b800 | 1789 | } |
4375a336 JK |
1790 | |
1791 | /* avoid symlink page */ | |
1792 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { | |
1793 | err = f2fs_decrypt_one(inode, page); | |
1794 | if (err) { | |
1795 | f2fs_put_page(page, 1); | |
1796 | goto fail; | |
1797 | } | |
1798 | } | |
eb47b800 | 1799 | } |
393ff91f | 1800 | out: |
eb47b800 JK |
1801 | SetPageUptodate(page); |
1802 | clear_cold_data(page); | |
1803 | return 0; | |
9ba69cf9 | 1804 | |
8cdcb713 JK |
1805 | put_fail: |
1806 | f2fs_put_dnode(&dn); | |
9ba69cf9 JK |
1807 | unlock_fail: |
1808 | f2fs_unlock_op(sbi); | |
b3d208f9 | 1809 | f2fs_put_page(page, 1); |
3aab8f82 CY |
1810 | fail: |
1811 | f2fs_write_failed(mapping, pos + len); | |
1812 | return err; | |
eb47b800 JK |
1813 | } |
1814 | ||
a1dd3c13 JK |
1815 | static int f2fs_write_end(struct file *file, |
1816 | struct address_space *mapping, | |
1817 | loff_t pos, unsigned len, unsigned copied, | |
1818 | struct page *page, void *fsdata) | |
1819 | { | |
1820 | struct inode *inode = page->mapping->host; | |
1821 | ||
dfb2bf38 CY |
1822 | trace_f2fs_write_end(inode, pos, len, copied); |
1823 | ||
34ba94ba | 1824 | set_page_dirty(page); |
a1dd3c13 JK |
1825 | |
1826 | if (pos + copied > i_size_read(inode)) { | |
1827 | i_size_write(inode, pos + copied); | |
1828 | mark_inode_dirty(inode); | |
1829 | update_inode_page(inode); | |
1830 | } | |
1831 | ||
75c3c8bc | 1832 | f2fs_put_page(page, 1); |
a1dd3c13 JK |
1833 | return copied; |
1834 | } | |
1835 | ||
6f673763 OS |
1836 | static int check_direct_IO(struct inode *inode, struct iov_iter *iter, |
1837 | loff_t offset) | |
944fcfc1 JK |
1838 | { |
1839 | unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; | |
944fcfc1 | 1840 | |
6f673763 | 1841 | if (iov_iter_rw(iter) == READ) |
944fcfc1 JK |
1842 | return 0; |
1843 | ||
1844 | if (offset & blocksize_mask) | |
1845 | return -EINVAL; | |
1846 | ||
5b46f25d AV |
1847 | if (iov_iter_alignment(iter) & blocksize_mask) |
1848 | return -EINVAL; | |
1849 | ||
944fcfc1 JK |
1850 | return 0; |
1851 | } | |
1852 | ||
22c6186e OS |
1853 | static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, |
1854 | loff_t offset) | |
eb47b800 JK |
1855 | { |
1856 | struct file *file = iocb->ki_filp; | |
3aab8f82 CY |
1857 | struct address_space *mapping = file->f_mapping; |
1858 | struct inode *inode = mapping->host; | |
1859 | size_t count = iov_iter_count(iter); | |
1860 | int err; | |
944fcfc1 | 1861 | |
b3d208f9 JK |
1862 | /* we don't need to use inline_data strictly */ |
1863 | if (f2fs_has_inline_data(inode)) { | |
1864 | err = f2fs_convert_inline_inode(inode); | |
1865 | if (err) | |
1866 | return err; | |
1867 | } | |
9ffe0fb5 | 1868 | |
fcc85a4d JK |
1869 | if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) |
1870 | return 0; | |
1871 | ||
6f673763 | 1872 | if (check_direct_IO(inode, iter, offset)) |
944fcfc1 JK |
1873 | return 0; |
1874 | ||
6f673763 | 1875 | trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); |
70407fad | 1876 | |
6f673763 | 1877 | if (iov_iter_rw(iter) == WRITE) |
59b802e5 JK |
1878 | __allocate_data_blocks(inode, offset, count); |
1879 | ||
17f8c842 | 1880 | err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block); |
6f673763 | 1881 | if (err < 0 && iov_iter_rw(iter) == WRITE) |
3aab8f82 | 1882 | f2fs_write_failed(mapping, offset + count); |
70407fad | 1883 | |
6f673763 | 1884 | trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err); |
70407fad | 1885 | |
3aab8f82 | 1886 | return err; |
eb47b800 JK |
1887 | } |
1888 | ||
487261f3 CY |
1889 | void f2fs_invalidate_page(struct page *page, unsigned int offset, |
1890 | unsigned int length) | |
eb47b800 JK |
1891 | { |
1892 | struct inode *inode = page->mapping->host; | |
487261f3 | 1893 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
a7ffdbe2 | 1894 | |
487261f3 CY |
1895 | if (inode->i_ino >= F2FS_ROOT_INO(sbi) && |
1896 | (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)) | |
a7ffdbe2 JK |
1897 | return; |
1898 | ||
487261f3 CY |
1899 | if (PageDirty(page)) { |
1900 | if (inode->i_ino == F2FS_META_INO(sbi)) | |
1901 | dec_page_count(sbi, F2FS_DIRTY_META); | |
1902 | else if (inode->i_ino == F2FS_NODE_INO(sbi)) | |
1903 | dec_page_count(sbi, F2FS_DIRTY_NODES); | |
1904 | else | |
1905 | inode_dec_dirty_pages(inode); | |
1906 | } | |
eb47b800 JK |
1907 | ClearPagePrivate(page); |
1908 | } | |
1909 | ||
487261f3 | 1910 | int f2fs_release_page(struct page *page, gfp_t wait) |
eb47b800 | 1911 | { |
f68daeeb JK |
1912 | /* If this is dirty page, keep PagePrivate */ |
1913 | if (PageDirty(page)) | |
1914 | return 0; | |
1915 | ||
eb47b800 | 1916 | ClearPagePrivate(page); |
c3850aa1 | 1917 | return 1; |
eb47b800 JK |
1918 | } |
1919 | ||
1920 | static int f2fs_set_data_page_dirty(struct page *page) | |
1921 | { | |
1922 | struct address_space *mapping = page->mapping; | |
1923 | struct inode *inode = mapping->host; | |
1924 | ||
26c6b887 JK |
1925 | trace_f2fs_set_page_dirty(page, DATA); |
1926 | ||
eb47b800 | 1927 | SetPageUptodate(page); |
34ba94ba | 1928 | |
1e84371f | 1929 | if (f2fs_is_atomic_file(inode)) { |
34ba94ba JK |
1930 | register_inmem_page(inode, page); |
1931 | return 1; | |
1932 | } | |
1933 | ||
eb47b800 JK |
1934 | if (!PageDirty(page)) { |
1935 | __set_page_dirty_nobuffers(page); | |
a7ffdbe2 | 1936 | update_dirty_page(inode, page); |
eb47b800 JK |
1937 | return 1; |
1938 | } | |
1939 | return 0; | |
1940 | } | |
1941 | ||
c01e54b7 JK |
1942 | static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) |
1943 | { | |
454ae7e5 CY |
1944 | struct inode *inode = mapping->host; |
1945 | ||
b3d208f9 JK |
1946 | /* we don't need to use inline_data strictly */ |
1947 | if (f2fs_has_inline_data(inode)) { | |
1948 | int err = f2fs_convert_inline_inode(inode); | |
1949 | if (err) | |
1950 | return err; | |
1951 | } | |
bfad7c2d | 1952 | return generic_block_bmap(mapping, block, get_data_block); |
c01e54b7 JK |
1953 | } |
1954 | ||
429511cd CY |
1955 | void init_extent_cache_info(struct f2fs_sb_info *sbi) |
1956 | { | |
1957 | INIT_RADIX_TREE(&sbi->extent_tree_root, GFP_NOIO); | |
1958 | init_rwsem(&sbi->extent_tree_lock); | |
1959 | INIT_LIST_HEAD(&sbi->extent_list); | |
1960 | spin_lock_init(&sbi->extent_lock); | |
1961 | sbi->total_ext_tree = 0; | |
1962 | atomic_set(&sbi->total_ext_node, 0); | |
1963 | } | |
1964 | ||
1965 | int __init create_extent_cache(void) | |
1966 | { | |
1967 | extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree", | |
1968 | sizeof(struct extent_tree)); | |
1969 | if (!extent_tree_slab) | |
1970 | return -ENOMEM; | |
1971 | extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node", | |
1972 | sizeof(struct extent_node)); | |
1973 | if (!extent_node_slab) { | |
1974 | kmem_cache_destroy(extent_tree_slab); | |
1975 | return -ENOMEM; | |
1976 | } | |
1977 | return 0; | |
1978 | } | |
1979 | ||
1980 | void destroy_extent_cache(void) | |
1981 | { | |
1982 | kmem_cache_destroy(extent_node_slab); | |
1983 | kmem_cache_destroy(extent_tree_slab); | |
1984 | } | |
1985 | ||
eb47b800 JK |
1986 | const struct address_space_operations f2fs_dblock_aops = { |
1987 | .readpage = f2fs_read_data_page, | |
1988 | .readpages = f2fs_read_data_pages, | |
1989 | .writepage = f2fs_write_data_page, | |
1990 | .writepages = f2fs_write_data_pages, | |
1991 | .write_begin = f2fs_write_begin, | |
a1dd3c13 | 1992 | .write_end = f2fs_write_end, |
eb47b800 | 1993 | .set_page_dirty = f2fs_set_data_page_dirty, |
487261f3 CY |
1994 | .invalidatepage = f2fs_invalidate_page, |
1995 | .releasepage = f2fs_release_page, | |
eb47b800 | 1996 | .direct_IO = f2fs_direct_IO, |
c01e54b7 | 1997 | .bmap = f2fs_bmap, |
eb47b800 | 1998 | }; |