Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[deliverable/linux.git] / fs / f2fs / data.c
CommitLineData
0a8165d7 1/*
eb47b800
JK
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/buffer_head.h>
14#include <linux/mpage.h>
15#include <linux/writeback.h>
16#include <linux/backing-dev.h>
8f46dcae 17#include <linux/pagevec.h>
eb47b800
JK
18#include <linux/blkdev.h>
19#include <linux/bio.h>
690e4a3e 20#include <linux/prefetch.h>
e2e40f2c 21#include <linux/uio.h>
fe76b796
JK
22#include <linux/mm.h>
23#include <linux/memcontrol.h>
f1e88660 24#include <linux/cleancache.h>
eb47b800
JK
25
26#include "f2fs.h"
27#include "node.h"
28#include "segment.h"
db9f7c1a 29#include "trace.h"
848753aa 30#include <trace/events/f2fs.h>
eb47b800 31
4246a0b6 32static void f2fs_read_end_io(struct bio *bio)
93dfe2ac 33{
f568849e
LT
34 struct bio_vec *bvec;
35 int i;
93dfe2ac 36
4375a336 37 if (f2fs_bio_encrypted(bio)) {
4246a0b6 38 if (bio->bi_error) {
0b81d077 39 fscrypt_release_ctx(bio->bi_private);
4375a336 40 } else {
0b81d077 41 fscrypt_decrypt_bio_pages(bio->bi_private, bio);
4375a336
JK
42 return;
43 }
44 }
45
12377024
CY
46 bio_for_each_segment_all(bvec, bio, i) {
47 struct page *page = bvec->bv_page;
f1e88660 48
4246a0b6 49 if (!bio->bi_error) {
237c0790
JK
50 if (!PageUptodate(page))
51 SetPageUptodate(page);
f1e88660
JK
52 } else {
53 ClearPageUptodate(page);
54 SetPageError(page);
55 }
56 unlock_page(page);
57 }
f1e88660
JK
58 bio_put(bio);
59}
60
4246a0b6 61static void f2fs_write_end_io(struct bio *bio)
93dfe2ac 62{
1b1f559f 63 struct f2fs_sb_info *sbi = bio->bi_private;
f568849e
LT
64 struct bio_vec *bvec;
65 int i;
93dfe2ac 66
f568849e 67 bio_for_each_segment_all(bvec, bio, i) {
93dfe2ac
JK
68 struct page *page = bvec->bv_page;
69
0b81d077 70 fscrypt_pullback_bio_page(&page, true);
4375a336 71
4246a0b6 72 if (unlikely(bio->bi_error)) {
93dfe2ac 73 set_bit(AS_EIO, &page->mapping->flags);
38f91ca8 74 f2fs_stop_checkpoint(sbi, true);
93dfe2ac
JK
75 }
76 end_page_writeback(page);
f568849e 77 }
f5730184
JK
78 if (atomic_dec_and_test(&sbi->nr_wb_bios) &&
79 wq_has_sleeper(&sbi->cp_wait))
93dfe2ac
JK
80 wake_up(&sbi->cp_wait);
81
82 bio_put(bio);
83}
84
940a6d34
GZ
85/*
86 * Low-level block read/write IO operations.
87 */
88static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
89 int npages, bool is_read)
90{
91 struct bio *bio;
92
740432f8 93 bio = f2fs_bio_alloc(npages);
940a6d34
GZ
94
95 bio->bi_bdev = sbi->sb->s_bdev;
55cf9cb6 96 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
940a6d34 97 bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
12377024 98 bio->bi_private = is_read ? NULL : sbi;
940a6d34
GZ
99
100 return bio;
101}
102
4fc29c1a
LT
103static inline void __submit_bio(struct f2fs_sb_info *sbi,
104 struct bio *bio, enum page_type type)
f5730184 105{
4fc29c1a 106 if (!is_read_io(bio_op(bio))) {
f5730184 107 atomic_inc(&sbi->nr_wb_bios);
52763a4b
JK
108 if (f2fs_sb_mounted_hmsmr(sbi->sb) &&
109 current->plug && (type == DATA || type == NODE))
19a5f5e2
JK
110 blk_finish_plug(current->plug);
111 }
4e49ea4a 112 submit_bio(bio);
f5730184
JK
113}
114
458e6197 115static void __submit_merged_bio(struct f2fs_bio_info *io)
93dfe2ac 116{
458e6197 117 struct f2fs_io_info *fio = &io->fio;
93dfe2ac
JK
118
119 if (!io->bio)
120 return;
121
04d328de 122 if (is_read_io(fio->op))
2ace38e0 123 trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
6a8f8ca5 124 else
2ace38e0 125 trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
940a6d34 126
04d328de
MC
127 bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
128
4fc29c1a 129 __submit_bio(io->sbi, io->bio, fio->type);
93dfe2ac
JK
130 io->bio = NULL;
131}
132
0c3a5797
CY
133static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
134 struct page *page, nid_t ino)
0fd785eb 135{
0fd785eb
CY
136 struct bio_vec *bvec;
137 struct page *target;
138 int i;
139
0c3a5797 140 if (!io->bio)
0fd785eb 141 return false;
0c3a5797
CY
142
143 if (!inode && !page && !ino)
144 return true;
0fd785eb
CY
145
146 bio_for_each_segment_all(bvec, io->bio, i) {
147
0b81d077 148 if (bvec->bv_page->mapping)
0fd785eb 149 target = bvec->bv_page;
0b81d077
JK
150 else
151 target = fscrypt_control_page(bvec->bv_page);
0fd785eb 152
0c3a5797
CY
153 if (inode && inode == target->mapping->host)
154 return true;
155 if (page && page == target)
156 return true;
157 if (ino && ino == ino_of_node(target))
0fd785eb 158 return true;
0fd785eb
CY
159 }
160
0fd785eb
CY
161 return false;
162}
163
0c3a5797
CY
164static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
165 struct page *page, nid_t ino,
166 enum page_type type)
167{
168 enum page_type btype = PAGE_TYPE_OF_BIO(type);
169 struct f2fs_bio_info *io = &sbi->write_io[btype];
170 bool ret;
171
172 down_read(&io->io_rwsem);
173 ret = __has_merged_page(io, inode, page, ino);
174 up_read(&io->io_rwsem);
175 return ret;
176}
177
178static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
179 struct inode *inode, struct page *page,
180 nid_t ino, enum page_type type, int rw)
93dfe2ac
JK
181{
182 enum page_type btype = PAGE_TYPE_OF_BIO(type);
183 struct f2fs_bio_info *io;
184
185 io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
186
df0f8dc0 187 down_write(&io->io_rwsem);
458e6197 188
0c3a5797
CY
189 if (!__has_merged_page(io, inode, page, ino))
190 goto out;
191
458e6197
JK
192 /* change META to META_FLUSH in the checkpoint procedure */
193 if (type >= META_FLUSH) {
194 io->fio.type = META_FLUSH;
04d328de 195 io->fio.op = REQ_OP_WRITE;
0f7b2abd 196 if (test_opt(sbi, NOBARRIER))
04d328de 197 io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO;
0f7b2abd 198 else
04d328de
MC
199 io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META |
200 REQ_PRIO;
458e6197
JK
201 }
202 __submit_merged_bio(io);
0c3a5797 203out:
df0f8dc0 204 up_write(&io->io_rwsem);
93dfe2ac
JK
205}
206
0c3a5797
CY
207void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
208 int rw)
209{
210 __f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
211}
212
213void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
214 struct inode *inode, struct page *page,
215 nid_t ino, enum page_type type, int rw)
216{
217 if (has_merged_page(sbi, inode, page, ino, type))
218 __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
219}
220
406657dd
CY
221void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
222{
223 f2fs_submit_merged_bio(sbi, DATA, WRITE);
224 f2fs_submit_merged_bio(sbi, NODE, WRITE);
225 f2fs_submit_merged_bio(sbi, META, WRITE);
226}
227
93dfe2ac
JK
228/*
229 * Fill the locked page with data located in the block address.
230 * Return unlocked page.
231 */
05ca3632 232int f2fs_submit_page_bio(struct f2fs_io_info *fio)
93dfe2ac 233{
93dfe2ac 234 struct bio *bio;
0b81d077
JK
235 struct page *page = fio->encrypted_page ?
236 fio->encrypted_page : fio->page;
93dfe2ac 237
2ace38e0 238 trace_f2fs_submit_page_bio(page, fio);
05ca3632 239 f2fs_trace_ios(fio, 0);
93dfe2ac
JK
240
241 /* Allocate a new bio */
04d328de 242 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
93dfe2ac 243
09cbfeaf 244 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
93dfe2ac 245 bio_put(bio);
93dfe2ac
JK
246 return -EFAULT;
247 }
04d328de
MC
248 bio->bi_rw = fio->op_flags;
249 bio_set_op_attrs(bio, fio->op, fio->op_flags);
93dfe2ac 250
4fc29c1a 251 __submit_bio(fio->sbi, bio, fio->type);
93dfe2ac
JK
252 return 0;
253}
254
05ca3632 255void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
93dfe2ac 256{
05ca3632 257 struct f2fs_sb_info *sbi = fio->sbi;
458e6197 258 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
93dfe2ac 259 struct f2fs_bio_info *io;
04d328de 260 bool is_read = is_read_io(fio->op);
4375a336 261 struct page *bio_page;
93dfe2ac 262
940a6d34 263 io = is_read ? &sbi->read_io : &sbi->write_io[btype];
93dfe2ac 264
7a9d7548
CY
265 if (fio->old_blkaddr != NEW_ADDR)
266 verify_block_addr(sbi, fio->old_blkaddr);
267 verify_block_addr(sbi, fio->new_blkaddr);
93dfe2ac 268
df0f8dc0 269 down_write(&io->io_rwsem);
93dfe2ac 270
7a9d7548 271 if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
04d328de 272 (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags)))
458e6197 273 __submit_merged_bio(io);
93dfe2ac
JK
274alloc_new:
275 if (io->bio == NULL) {
90a893c7 276 int bio_blocks = MAX_BIO_BLOCKS(sbi);
940a6d34 277
7a9d7548
CY
278 io->bio = __bio_alloc(sbi, fio->new_blkaddr,
279 bio_blocks, is_read);
458e6197 280 io->fio = *fio;
93dfe2ac
JK
281 }
282
4375a336
JK
283 bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
284
09cbfeaf
KS
285 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) <
286 PAGE_SIZE) {
458e6197 287 __submit_merged_bio(io);
93dfe2ac
JK
288 goto alloc_new;
289 }
290
7a9d7548 291 io->last_block_in_bio = fio->new_blkaddr;
05ca3632 292 f2fs_trace_ios(fio, 0);
93dfe2ac 293
df0f8dc0 294 up_write(&io->io_rwsem);
05ca3632 295 trace_f2fs_submit_page_mbio(fio->page, fio);
93dfe2ac
JK
296}
297
46008c6d
CY
298static void __set_data_blkaddr(struct dnode_of_data *dn)
299{
300 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
301 __le32 *addr_array;
302
303 /* Get physical address of data block */
304 addr_array = blkaddr_in_node(rn);
305 addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
306}
307
0a8165d7 308/*
eb47b800
JK
309 * Lock ordering for the change of data block address:
310 * ->data_page
311 * ->node_page
312 * update block addresses in the node page
313 */
216a620a 314void set_data_blkaddr(struct dnode_of_data *dn)
eb47b800 315{
46008c6d
CY
316 f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
317 __set_data_blkaddr(dn);
318 if (set_page_dirty(dn->node_page))
12719ae1 319 dn->node_changed = true;
eb47b800
JK
320}
321
f28b3434
CY
322void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
323{
324 dn->data_blkaddr = blkaddr;
325 set_data_blkaddr(dn);
326 f2fs_update_extent_cache(dn);
327}
328
46008c6d
CY
329/* dn->ofs_in_node will be returned with up-to-date last block pointer */
330int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
eb47b800 331{
4081363f 332 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
eb47b800 333
46008c6d
CY
334 if (!count)
335 return 0;
336
91942321 337 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
eb47b800 338 return -EPERM;
46008c6d 339 if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
eb47b800
JK
340 return -ENOSPC;
341
46008c6d
CY
342 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
343 dn->ofs_in_node, count);
344
345 f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
346
347 for (; count > 0; dn->ofs_in_node++) {
348 block_t blkaddr =
349 datablock_addr(dn->node_page, dn->ofs_in_node);
350 if (blkaddr == NULL_ADDR) {
351 dn->data_blkaddr = NEW_ADDR;
352 __set_data_blkaddr(dn);
353 count--;
354 }
355 }
356
357 if (set_page_dirty(dn->node_page))
358 dn->node_changed = true;
eb47b800
JK
359 return 0;
360}
361
46008c6d
CY
362/* Should keep dn->ofs_in_node unchanged */
363int reserve_new_block(struct dnode_of_data *dn)
364{
365 unsigned int ofs_in_node = dn->ofs_in_node;
366 int ret;
367
368 ret = reserve_new_blocks(dn, 1);
369 dn->ofs_in_node = ofs_in_node;
370 return ret;
371}
372
b600965c
HL
373int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
374{
375 bool need_put = dn->inode_page ? false : true;
376 int err;
377
378 err = get_dnode_of_data(dn, index, ALLOC_NODE);
379 if (err)
380 return err;
a8865372 381
b600965c
HL
382 if (dn->data_blkaddr == NULL_ADDR)
383 err = reserve_new_block(dn);
a8865372 384 if (err || need_put)
b600965c
HL
385 f2fs_put_dnode(dn);
386 return err;
387}
388
759af1c9 389int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
eb47b800 390{
028a41e8 391 struct extent_info ei;
759af1c9 392 struct inode *inode = dn->inode;
028a41e8 393
759af1c9
FL
394 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
395 dn->data_blkaddr = ei.blk + index - ei.fofs;
396 return 0;
429511cd 397 }
028a41e8 398
759af1c9 399 return f2fs_reserve_block(dn, index);
eb47b800
JK
400}
401
a56c7c6f 402struct page *get_read_data_page(struct inode *inode, pgoff_t index,
04d328de 403 int op_flags, bool for_write)
eb47b800 404{
eb47b800
JK
405 struct address_space *mapping = inode->i_mapping;
406 struct dnode_of_data dn;
407 struct page *page;
cb3bc9ee 408 struct extent_info ei;
eb47b800 409 int err;
cf04e8eb 410 struct f2fs_io_info fio = {
05ca3632 411 .sbi = F2FS_I_SB(inode),
cf04e8eb 412 .type = DATA,
04d328de
MC
413 .op = REQ_OP_READ,
414 .op_flags = op_flags,
4375a336 415 .encrypted_page = NULL,
cf04e8eb 416 };
eb47b800 417
4375a336
JK
418 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
419 return read_mapping_page(mapping, index, NULL);
420
a56c7c6f 421 page = f2fs_grab_cache_page(mapping, index, for_write);
650495de
JK
422 if (!page)
423 return ERR_PTR(-ENOMEM);
424
cb3bc9ee
CY
425 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
426 dn.data_blkaddr = ei.blk + index - ei.fofs;
427 goto got_it;
428 }
429
eb47b800 430 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 431 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
86531d6b
JK
432 if (err)
433 goto put_err;
eb47b800
JK
434 f2fs_put_dnode(&dn);
435
6bacf52f 436 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
86531d6b
JK
437 err = -ENOENT;
438 goto put_err;
650495de 439 }
cb3bc9ee 440got_it:
43f3eae1
JK
441 if (PageUptodate(page)) {
442 unlock_page(page);
eb47b800 443 return page;
43f3eae1 444 }
eb47b800 445
d59ff4df
JK
446 /*
447 * A new dentry page is allocated but not able to be written, since its
448 * new inode page couldn't be allocated due to -ENOSPC.
449 * In such the case, its blkaddr can be remained as NEW_ADDR.
450 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
451 */
452 if (dn.data_blkaddr == NEW_ADDR) {
09cbfeaf 453 zero_user_segment(page, 0, PAGE_SIZE);
237c0790
JK
454 if (!PageUptodate(page))
455 SetPageUptodate(page);
43f3eae1 456 unlock_page(page);
d59ff4df
JK
457 return page;
458 }
eb47b800 459
7a9d7548 460 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
05ca3632
JK
461 fio.page = page;
462 err = f2fs_submit_page_bio(&fio);
393ff91f 463 if (err)
86531d6b 464 goto put_err;
43f3eae1 465 return page;
86531d6b
JK
466
467put_err:
468 f2fs_put_page(page, 1);
469 return ERR_PTR(err);
43f3eae1
JK
470}
471
472struct page *find_data_page(struct inode *inode, pgoff_t index)
473{
474 struct address_space *mapping = inode->i_mapping;
475 struct page *page;
476
477 page = find_get_page(mapping, index);
478 if (page && PageUptodate(page))
479 return page;
480 f2fs_put_page(page, 0);
481
a56c7c6f 482 page = get_read_data_page(inode, index, READ_SYNC, false);
43f3eae1
JK
483 if (IS_ERR(page))
484 return page;
485
486 if (PageUptodate(page))
487 return page;
488
489 wait_on_page_locked(page);
490 if (unlikely(!PageUptodate(page))) {
491 f2fs_put_page(page, 0);
492 return ERR_PTR(-EIO);
493 }
494 return page;
495}
496
497/*
498 * If it tries to access a hole, return an error.
499 * Because, the callers, functions in dir.c and GC, should be able to know
500 * whether this page exists or not.
501 */
a56c7c6f
JK
502struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
503 bool for_write)
43f3eae1
JK
504{
505 struct address_space *mapping = inode->i_mapping;
506 struct page *page;
507repeat:
a56c7c6f 508 page = get_read_data_page(inode, index, READ_SYNC, for_write);
43f3eae1
JK
509 if (IS_ERR(page))
510 return page;
393ff91f 511
43f3eae1 512 /* wait for read completion */
393ff91f 513 lock_page(page);
6bacf52f 514 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
515 f2fs_put_page(page, 1);
516 goto repeat;
eb47b800 517 }
1563ac75
CY
518 if (unlikely(!PageUptodate(page))) {
519 f2fs_put_page(page, 1);
520 return ERR_PTR(-EIO);
521 }
eb47b800
JK
522 return page;
523}
524
0a8165d7 525/*
eb47b800
JK
526 * Caller ensures that this data page is never allocated.
527 * A new zero-filled data page is allocated in the page cache.
39936837 528 *
4f4124d0
CY
529 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
530 * f2fs_unlock_op().
470f00e9
CY
531 * Note that, ipage is set only by make_empty_dir, and if any error occur,
532 * ipage should be released by this function.
eb47b800 533 */
64aa7ed9 534struct page *get_new_data_page(struct inode *inode,
a8865372 535 struct page *ipage, pgoff_t index, bool new_i_size)
eb47b800 536{
eb47b800
JK
537 struct address_space *mapping = inode->i_mapping;
538 struct page *page;
539 struct dnode_of_data dn;
540 int err;
7612118a 541
a56c7c6f 542 page = f2fs_grab_cache_page(mapping, index, true);
470f00e9
CY
543 if (!page) {
544 /*
545 * before exiting, we should make sure ipage will be released
546 * if any error occur.
547 */
548 f2fs_put_page(ipage, 1);
01f28610 549 return ERR_PTR(-ENOMEM);
470f00e9 550 }
eb47b800 551
a8865372 552 set_new_dnode(&dn, inode, ipage, NULL, 0);
b600965c 553 err = f2fs_reserve_block(&dn, index);
01f28610
JK
554 if (err) {
555 f2fs_put_page(page, 1);
eb47b800 556 return ERR_PTR(err);
a8865372 557 }
01f28610
JK
558 if (!ipage)
559 f2fs_put_dnode(&dn);
eb47b800
JK
560
561 if (PageUptodate(page))
01f28610 562 goto got_it;
eb47b800
JK
563
564 if (dn.data_blkaddr == NEW_ADDR) {
09cbfeaf 565 zero_user_segment(page, 0, PAGE_SIZE);
237c0790
JK
566 if (!PageUptodate(page))
567 SetPageUptodate(page);
eb47b800 568 } else {
4375a336 569 f2fs_put_page(page, 1);
a8865372 570
7612118a
JK
571 /* if ipage exists, blkaddr should be NEW_ADDR */
572 f2fs_bug_on(F2FS_I_SB(inode), ipage);
573 page = get_lock_data_page(inode, index, true);
4375a336 574 if (IS_ERR(page))
7612118a 575 return page;
eb47b800 576 }
01f28610 577got_it:
9edcdabf 578 if (new_i_size && i_size_read(inode) <
ee6d182f 579 ((loff_t)(index + 1) << PAGE_SHIFT))
fc9581c8 580 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
eb47b800
JK
581 return page;
582}
583
bfad7c2d
JK
584static int __allocate_data_block(struct dnode_of_data *dn)
585{
4081363f 586 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
bfad7c2d 587 struct f2fs_summary sum;
bfad7c2d 588 struct node_info ni;
38aa0889 589 int seg = CURSEG_WARM_DATA;
976e4c50 590 pgoff_t fofs;
46008c6d 591 blkcnt_t count = 1;
bfad7c2d 592
91942321 593 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
bfad7c2d 594 return -EPERM;
df6136ef
CY
595
596 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
597 if (dn->data_blkaddr == NEW_ADDR)
598 goto alloc;
599
46008c6d 600 if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
bfad7c2d
JK
601 return -ENOSPC;
602
df6136ef 603alloc:
bfad7c2d
JK
604 get_node_info(sbi, dn->nid, &ni);
605 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
606
38aa0889
JK
607 if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page)
608 seg = CURSEG_DIRECT_IO;
609
df6136ef
CY
610 allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
611 &sum, seg);
216a620a 612 set_data_blkaddr(dn);
bfad7c2d 613
976e4c50 614 /* update i_size */
81ca7350 615 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
976e4c50 616 dn->ofs_in_node;
09cbfeaf 617 if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
fc9581c8 618 f2fs_i_size_write(dn->inode,
09cbfeaf 619 ((loff_t)(fofs + 1) << PAGE_SHIFT));
bfad7c2d
JK
620 return 0;
621}
622
b439b103 623ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
59b802e5 624{
b439b103 625 struct inode *inode = file_inode(iocb->ki_filp);
5b8db7fa 626 struct f2fs_map_blocks map;
b439b103 627 ssize_t ret = 0;
59b802e5 628
0080c507
JK
629 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
630 map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from));
da85985c 631 map.m_next_pgofs = NULL;
2a340760 632
24b84912
JK
633 if (f2fs_encrypted_inode(inode))
634 return 0;
635
636 if (iocb->ki_flags & IOCB_DIRECT) {
637 ret = f2fs_convert_inline_inode(inode);
638 if (ret)
639 return ret;
640 return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
641 }
642 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
b439b103
JK
643 ret = f2fs_convert_inline_inode(inode);
644 if (ret)
645 return ret;
b439b103 646 }
24b84912
JK
647 if (!f2fs_has_inline_data(inode))
648 return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
b439b103 649 return ret;
59b802e5
JK
650}
651
0a8165d7 652/*
003a3e1d
JK
653 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
654 * f2fs_map_blocks structure.
4f4124d0
CY
655 * If original data blocks are allocated, then give them to blockdev.
656 * Otherwise,
657 * a. preallocate requested block addresses
658 * b. do not use extent cache for better performance
659 * c. give the block addresses to blockdev
eb47b800 660 */
d323d005 661int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
e2b4e2bc 662 int create, int flag)
eb47b800 663{
003a3e1d 664 unsigned int maxblocks = map->m_len;
eb47b800 665 struct dnode_of_data dn;
f9811703 666 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
ac6f1999 667 int mode = create ? ALLOC_NODE : LOOKUP_NODE;
46008c6d 668 pgoff_t pgofs, end_offset, end;
bfad7c2d 669 int err = 0, ofs = 1;
46008c6d
CY
670 unsigned int ofs_in_node, last_ofs_in_node;
671 blkcnt_t prealloc;
a2e7d1bf 672 struct extent_info ei;
bfad7c2d 673 bool allocated = false;
7df3a431 674 block_t blkaddr;
eb47b800 675
003a3e1d
JK
676 map->m_len = 0;
677 map->m_flags = 0;
678
679 /* it only supports block size == page size */
680 pgofs = (pgoff_t)map->m_lblk;
46008c6d 681 end = pgofs + maxblocks;
eb47b800 682
24b84912 683 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
003a3e1d
JK
684 map->m_pblk = ei.blk + pgofs - ei.fofs;
685 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
686 map->m_flags = F2FS_MAP_MAPPED;
bfad7c2d 687 goto out;
a2e7d1bf 688 }
bfad7c2d 689
4fe71e88 690next_dnode:
59b802e5 691 if (create)
3104af35 692 f2fs_lock_op(sbi);
eb47b800
JK
693
694 /* When reading holes, we need its node page */
695 set_new_dnode(&dn, inode, NULL, NULL, 0);
bfad7c2d 696 err = get_dnode_of_data(&dn, pgofs, mode);
1ec79083 697 if (err) {
43473f96
CY
698 if (flag == F2FS_GET_BLOCK_BMAP)
699 map->m_pblk = 0;
da85985c 700 if (err == -ENOENT) {
bfad7c2d 701 err = 0;
da85985c
CY
702 if (map->m_next_pgofs)
703 *map->m_next_pgofs =
704 get_next_page_offset(&dn, pgofs);
705 }
bfad7c2d 706 goto unlock_out;
848753aa 707 }
973163fc 708
46008c6d
CY
709 prealloc = 0;
710 ofs_in_node = dn.ofs_in_node;
81ca7350 711 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
4fe71e88
CY
712
713next_block:
714 blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
715
716 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
973163fc 717 if (create) {
f9811703
CY
718 if (unlikely(f2fs_cp_error(sbi))) {
719 err = -EIO;
4fe71e88 720 goto sync_out;
f9811703 721 }
24b84912 722 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
46008c6d
CY
723 if (blkaddr == NULL_ADDR) {
724 prealloc++;
725 last_ofs_in_node = dn.ofs_in_node;
726 }
24b84912
JK
727 } else {
728 err = __allocate_data_block(&dn);
46008c6d 729 if (!err) {
91942321 730 set_inode_flag(inode, FI_APPEND_WRITE);
46008c6d
CY
731 allocated = true;
732 }
24b84912 733 }
973163fc 734 if (err)
4fe71e88 735 goto sync_out;
973163fc 736 map->m_flags = F2FS_MAP_NEW;
4fe71e88 737 blkaddr = dn.data_blkaddr;
973163fc 738 } else {
43473f96
CY
739 if (flag == F2FS_GET_BLOCK_BMAP) {
740 map->m_pblk = 0;
741 goto sync_out;
742 }
da85985c
CY
743 if (flag == F2FS_GET_BLOCK_FIEMAP &&
744 blkaddr == NULL_ADDR) {
745 if (map->m_next_pgofs)
746 *map->m_next_pgofs = pgofs + 1;
747 }
973163fc 748 if (flag != F2FS_GET_BLOCK_FIEMAP ||
43473f96 749 blkaddr != NEW_ADDR)
4fe71e88 750 goto sync_out;
e2b4e2bc 751 }
e2b4e2bc 752 }
eb47b800 753
46008c6d
CY
754 if (flag == F2FS_GET_BLOCK_PRE_AIO)
755 goto skip;
756
4fe71e88
CY
757 if (map->m_len == 0) {
758 /* preallocated unwritten block should be mapped for fiemap. */
759 if (blkaddr == NEW_ADDR)
760 map->m_flags |= F2FS_MAP_UNWRITTEN;
761 map->m_flags |= F2FS_MAP_MAPPED;
762
763 map->m_pblk = blkaddr;
764 map->m_len = 1;
765 } else if ((map->m_pblk != NEW_ADDR &&
766 blkaddr == (map->m_pblk + ofs)) ||
b439b103 767 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
46008c6d 768 flag == F2FS_GET_BLOCK_PRE_DIO) {
4fe71e88
CY
769 ofs++;
770 map->m_len++;
771 } else {
772 goto sync_out;
773 }
bfad7c2d 774
46008c6d 775skip:
bfad7c2d
JK
776 dn.ofs_in_node++;
777 pgofs++;
778
46008c6d
CY
779 /* preallocate blocks in batch for one dnode page */
780 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
781 (pgofs == end || dn.ofs_in_node == end_offset)) {
7df3a431 782
46008c6d
CY
783 dn.ofs_in_node = ofs_in_node;
784 err = reserve_new_blocks(&dn, prealloc);
785 if (err)
786 goto sync_out;
bfad7c2d 787
46008c6d
CY
788 map->m_len += dn.ofs_in_node - ofs_in_node;
789 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
790 err = -ENOSPC;
791 goto sync_out;
3104af35 792 }
46008c6d
CY
793 dn.ofs_in_node = end_offset;
794 }
795
796 if (pgofs >= end)
797 goto sync_out;
798 else if (dn.ofs_in_node < end_offset)
799 goto next_block;
800
46008c6d
CY
801 f2fs_put_dnode(&dn);
802
803 if (create) {
804 f2fs_unlock_op(sbi);
805 f2fs_balance_fs(sbi, allocated);
eb47b800 806 }
46008c6d
CY
807 allocated = false;
808 goto next_dnode;
7df3a431 809
bfad7c2d 810sync_out:
eb47b800 811 f2fs_put_dnode(&dn);
bfad7c2d 812unlock_out:
2a340760 813 if (create) {
3104af35 814 f2fs_unlock_op(sbi);
3c082b7b 815 f2fs_balance_fs(sbi, allocated);
2a340760 816 }
bfad7c2d 817out:
003a3e1d 818 trace_f2fs_map_blocks(inode, map, err);
bfad7c2d 819 return err;
eb47b800
JK
820}
821
003a3e1d 822static int __get_data_block(struct inode *inode, sector_t iblock,
da85985c
CY
823 struct buffer_head *bh, int create, int flag,
824 pgoff_t *next_pgofs)
003a3e1d
JK
825{
826 struct f2fs_map_blocks map;
827 int ret;
828
829 map.m_lblk = iblock;
830 map.m_len = bh->b_size >> inode->i_blkbits;
da85985c 831 map.m_next_pgofs = next_pgofs;
003a3e1d 832
e2b4e2bc 833 ret = f2fs_map_blocks(inode, &map, create, flag);
003a3e1d
JK
834 if (!ret) {
835 map_bh(bh, inode->i_sb, map.m_pblk);
836 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
837 bh->b_size = map.m_len << inode->i_blkbits;
838 }
839 return ret;
840}
841
ccfb3000 842static int get_data_block(struct inode *inode, sector_t iblock,
da85985c
CY
843 struct buffer_head *bh_result, int create, int flag,
844 pgoff_t *next_pgofs)
e2b4e2bc 845{
da85985c
CY
846 return __get_data_block(inode, iblock, bh_result, create,
847 flag, next_pgofs);
e2b4e2bc
CY
848}
849
850static int get_data_block_dio(struct inode *inode, sector_t iblock,
ccfb3000
JK
851 struct buffer_head *bh_result, int create)
852{
e2b4e2bc 853 return __get_data_block(inode, iblock, bh_result, create,
da85985c 854 F2FS_GET_BLOCK_DIO, NULL);
ccfb3000
JK
855}
856
e2b4e2bc 857static int get_data_block_bmap(struct inode *inode, sector_t iblock,
ccfb3000
JK
858 struct buffer_head *bh_result, int create)
859{
179448bf 860 /* Block number less than F2FS MAX BLOCKS */
e0afc4d6 861 if (unlikely(iblock >= F2FS_I_SB(inode)->max_file_blocks))
179448bf
YH
862 return -EFBIG;
863
e2b4e2bc 864 return __get_data_block(inode, iblock, bh_result, create,
da85985c 865 F2FS_GET_BLOCK_BMAP, NULL);
ccfb3000
JK
866}
867
7f63eb77
JK
868static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
869{
870 return (offset >> inode->i_blkbits);
871}
872
873static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
874{
875 return (blk << inode->i_blkbits);
876}
877
9ab70134
JK
878int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
879 u64 start, u64 len)
880{
7f63eb77
JK
881 struct buffer_head map_bh;
882 sector_t start_blk, last_blk;
da85985c 883 pgoff_t next_pgofs;
de1475cc 884 loff_t isize;
7f63eb77
JK
885 u64 logical = 0, phys = 0, size = 0;
886 u32 flags = 0;
7f63eb77
JK
887 int ret = 0;
888
889 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
890 if (ret)
891 return ret;
892
67f8cf3c
JK
893 if (f2fs_has_inline_data(inode)) {
894 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
895 if (ret != -EAGAIN)
896 return ret;
897 }
898
5955102c 899 inode_lock(inode);
de1475cc
FL
900
901 isize = i_size_read(inode);
9a950d52
FL
902 if (start >= isize)
903 goto out;
7f63eb77 904
9a950d52
FL
905 if (start + len > isize)
906 len = isize - start;
7f63eb77
JK
907
908 if (logical_to_blk(inode, len) == 0)
909 len = blk_to_logical(inode, 1);
910
911 start_blk = logical_to_blk(inode, start);
912 last_blk = logical_to_blk(inode, start + len - 1);
9a950d52 913
7f63eb77
JK
914next:
915 memset(&map_bh, 0, sizeof(struct buffer_head));
916 map_bh.b_size = len;
917
e2b4e2bc 918 ret = get_data_block(inode, start_blk, &map_bh, 0,
da85985c 919 F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
7f63eb77
JK
920 if (ret)
921 goto out;
922
923 /* HOLE */
924 if (!buffer_mapped(&map_bh)) {
da85985c 925 start_blk = next_pgofs;
9a950d52 926 /* Go through holes util pass the EOF */
da85985c 927 if (blk_to_logical(inode, start_blk) < isize)
9a950d52
FL
928 goto prep_next;
929 /* Found a hole beyond isize means no more extents.
930 * Note that the premise is that filesystems don't
931 * punch holes beyond isize and keep size unchanged.
932 */
933 flags |= FIEMAP_EXTENT_LAST;
934 }
7f63eb77 935
da5af127
CY
936 if (size) {
937 if (f2fs_encrypted_inode(inode))
938 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
939
9a950d52
FL
940 ret = fiemap_fill_next_extent(fieinfo, logical,
941 phys, size, flags);
da5af127 942 }
7f63eb77 943
9a950d52
FL
944 if (start_blk > last_blk || ret)
945 goto out;
7f63eb77 946
9a950d52
FL
947 logical = blk_to_logical(inode, start_blk);
948 phys = blk_to_logical(inode, map_bh.b_blocknr);
949 size = map_bh.b_size;
950 flags = 0;
951 if (buffer_unwritten(&map_bh))
952 flags = FIEMAP_EXTENT_UNWRITTEN;
7f63eb77 953
9a950d52 954 start_blk += logical_to_blk(inode, size);
7f63eb77 955
9a950d52 956prep_next:
7f63eb77
JK
957 cond_resched();
958 if (fatal_signal_pending(current))
959 ret = -EINTR;
960 else
961 goto next;
962out:
963 if (ret == 1)
964 ret = 0;
965
5955102c 966 inode_unlock(inode);
7f63eb77 967 return ret;
9ab70134
JK
968}
969
78682f79
CY
970struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
971 unsigned nr_pages)
972{
973 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
974 struct fscrypt_ctx *ctx = NULL;
975 struct block_device *bdev = sbi->sb->s_bdev;
976 struct bio *bio;
977
978 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
979 ctx = fscrypt_get_ctx(inode, GFP_NOFS);
980 if (IS_ERR(ctx))
981 return ERR_CAST(ctx);
982
983 /* wait the page to be moved by cleaning */
984 f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
985 }
986
987 bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
988 if (!bio) {
989 if (ctx)
990 fscrypt_release_ctx(ctx);
991 return ERR_PTR(-ENOMEM);
992 }
993 bio->bi_bdev = bdev;
994 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr);
995 bio->bi_end_io = f2fs_read_end_io;
996 bio->bi_private = ctx;
997
998 return bio;
999}
1000
f1e88660
JK
1001/*
1002 * This function was originally taken from fs/mpage.c, and customized for f2fs.
1003 * Major change was from block_size == page_size in f2fs by default.
1004 */
1005static int f2fs_mpage_readpages(struct address_space *mapping,
1006 struct list_head *pages, struct page *page,
1007 unsigned nr_pages)
1008{
1009 struct bio *bio = NULL;
1010 unsigned page_idx;
1011 sector_t last_block_in_bio = 0;
1012 struct inode *inode = mapping->host;
1013 const unsigned blkbits = inode->i_blkbits;
1014 const unsigned blocksize = 1 << blkbits;
1015 sector_t block_in_file;
1016 sector_t last_block;
1017 sector_t last_block_in_file;
1018 sector_t block_nr;
f1e88660
JK
1019 struct f2fs_map_blocks map;
1020
1021 map.m_pblk = 0;
1022 map.m_lblk = 0;
1023 map.m_len = 0;
1024 map.m_flags = 0;
da85985c 1025 map.m_next_pgofs = NULL;
f1e88660
JK
1026
1027 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
1028
1029 prefetchw(&page->flags);
1030 if (pages) {
1031 page = list_entry(pages->prev, struct page, lru);
1032 list_del(&page->lru);
1033 if (add_to_page_cache_lru(page, mapping,
8a5c743e
MH
1034 page->index,
1035 readahead_gfp_mask(mapping)))
f1e88660
JK
1036 goto next_page;
1037 }
1038
1039 block_in_file = (sector_t)page->index;
1040 last_block = block_in_file + nr_pages;
1041 last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
1042 blkbits;
1043 if (last_block > last_block_in_file)
1044 last_block = last_block_in_file;
1045
1046 /*
1047 * Map blocks using the previous result first.
1048 */
1049 if ((map.m_flags & F2FS_MAP_MAPPED) &&
1050 block_in_file > map.m_lblk &&
1051 block_in_file < (map.m_lblk + map.m_len))
1052 goto got_it;
1053
1054 /*
1055 * Then do more f2fs_map_blocks() calls until we are
1056 * done with this page.
1057 */
1058 map.m_flags = 0;
1059
1060 if (block_in_file < last_block) {
1061 map.m_lblk = block_in_file;
1062 map.m_len = last_block - block_in_file;
1063
46c9e141 1064 if (f2fs_map_blocks(inode, &map, 0,
da85985c 1065 F2FS_GET_BLOCK_READ))
f1e88660
JK
1066 goto set_error_page;
1067 }
1068got_it:
1069 if ((map.m_flags & F2FS_MAP_MAPPED)) {
1070 block_nr = map.m_pblk + block_in_file - map.m_lblk;
1071 SetPageMappedToDisk(page);
1072
1073 if (!PageUptodate(page) && !cleancache_get_page(page)) {
1074 SetPageUptodate(page);
1075 goto confused;
1076 }
1077 } else {
09cbfeaf 1078 zero_user_segment(page, 0, PAGE_SIZE);
237c0790
JK
1079 if (!PageUptodate(page))
1080 SetPageUptodate(page);
f1e88660
JK
1081 unlock_page(page);
1082 goto next_page;
1083 }
1084
1085 /*
1086 * This page will go to BIO. Do we need to send this
1087 * BIO off first?
1088 */
1089 if (bio && (last_block_in_bio != block_nr - 1)) {
1090submit_and_realloc:
4fc29c1a 1091 __submit_bio(F2FS_I_SB(inode), bio, DATA);
f1e88660
JK
1092 bio = NULL;
1093 }
1094 if (bio == NULL) {
78682f79 1095 bio = f2fs_grab_bio(inode, block_nr, nr_pages);
1d353eb7
JK
1096 if (IS_ERR(bio)) {
1097 bio = NULL;
f1e88660 1098 goto set_error_page;
4375a336 1099 }
04d328de 1100 bio_set_op_attrs(bio, REQ_OP_READ, 0);
f1e88660
JK
1101 }
1102
1103 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
1104 goto submit_and_realloc;
1105
1106 last_block_in_bio = block_nr;
1107 goto next_page;
1108set_error_page:
1109 SetPageError(page);
09cbfeaf 1110 zero_user_segment(page, 0, PAGE_SIZE);
f1e88660
JK
1111 unlock_page(page);
1112 goto next_page;
1113confused:
1114 if (bio) {
4fc29c1a 1115 __submit_bio(F2FS_I_SB(inode), bio, DATA);
f1e88660
JK
1116 bio = NULL;
1117 }
1118 unlock_page(page);
1119next_page:
1120 if (pages)
09cbfeaf 1121 put_page(page);
f1e88660
JK
1122 }
1123 BUG_ON(pages && !list_empty(pages));
1124 if (bio)
4fc29c1a 1125 __submit_bio(F2FS_I_SB(inode), bio, DATA);
f1e88660
JK
1126 return 0;
1127}
1128
eb47b800
JK
1129static int f2fs_read_data_page(struct file *file, struct page *page)
1130{
9ffe0fb5 1131 struct inode *inode = page->mapping->host;
b3d208f9 1132 int ret = -EAGAIN;
9ffe0fb5 1133
c20e89cd
CY
1134 trace_f2fs_readpage(page, DATA);
1135
e1c42045 1136 /* If the file has inline data, try to read it directly */
9ffe0fb5
HL
1137 if (f2fs_has_inline_data(inode))
1138 ret = f2fs_read_inline_data(inode, page);
b3d208f9 1139 if (ret == -EAGAIN)
f1e88660 1140 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
9ffe0fb5 1141 return ret;
eb47b800
JK
1142}
1143
1144static int f2fs_read_data_pages(struct file *file,
1145 struct address_space *mapping,
1146 struct list_head *pages, unsigned nr_pages)
1147{
9ffe0fb5 1148 struct inode *inode = file->f_mapping->host;
b8c29400
CY
1149 struct page *page = list_entry(pages->prev, struct page, lru);
1150
1151 trace_f2fs_readpages(inode, page, nr_pages);
9ffe0fb5
HL
1152
1153 /* If the file has inline data, skip readpages */
1154 if (f2fs_has_inline_data(inode))
1155 return 0;
1156
f1e88660 1157 return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
eb47b800
JK
1158}
1159
05ca3632 1160int do_write_data_page(struct f2fs_io_info *fio)
eb47b800 1161{
05ca3632 1162 struct page *page = fio->page;
eb47b800 1163 struct inode *inode = page->mapping->host;
eb47b800
JK
1164 struct dnode_of_data dn;
1165 int err = 0;
1166
1167 set_new_dnode(&dn, inode, NULL, NULL, 0);
266e97a8 1168 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
eb47b800
JK
1169 if (err)
1170 return err;
1171
28bc106b 1172 fio->old_blkaddr = dn.data_blkaddr;
eb47b800
JK
1173
1174 /* This page is already truncated */
7a9d7548 1175 if (fio->old_blkaddr == NULL_ADDR) {
2bca1e23 1176 ClearPageUptodate(page);
eb47b800 1177 goto out_writepage;
2bca1e23 1178 }
eb47b800 1179
4375a336 1180 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
b32e4482 1181 gfp_t gfp_flags = GFP_NOFS;
08b39fbd
CY
1182
1183 /* wait for GCed encrypted page writeback */
1184 f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
7a9d7548 1185 fio->old_blkaddr);
b32e4482
JK
1186retry_encrypt:
1187 fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
1188 gfp_flags);
4375a336
JK
1189 if (IS_ERR(fio->encrypted_page)) {
1190 err = PTR_ERR(fio->encrypted_page);
b32e4482
JK
1191 if (err == -ENOMEM) {
1192 /* flush pending ios and wait for a while */
1193 f2fs_flush_merged_bios(F2FS_I_SB(inode));
1194 congestion_wait(BLK_RW_ASYNC, HZ/50);
1195 gfp_flags |= __GFP_NOFAIL;
1196 err = 0;
1197 goto retry_encrypt;
1198 }
4375a336
JK
1199 goto out_writepage;
1200 }
1201 }
1202
eb47b800
JK
1203 set_page_writeback(page);
1204
1205 /*
1206 * If current allocation needs SSR,
1207 * it had better in-place writes for updated data.
1208 */
7a9d7548 1209 if (unlikely(fio->old_blkaddr != NEW_ADDR &&
b25958b6 1210 !is_cold_data(page) &&
2da3e027 1211 !IS_ATOMIC_WRITTEN_PAGE(page) &&
b25958b6 1212 need_inplace_update(inode))) {
05ca3632 1213 rewrite_data_page(fio);
91942321 1214 set_inode_flag(inode, FI_UPDATE_WRITE);
8ce67cb0 1215 trace_f2fs_do_write_data_page(page, IPU);
eb47b800 1216 } else {
05ca3632 1217 write_data_page(&dn, fio);
8ce67cb0 1218 trace_f2fs_do_write_data_page(page, OPU);
91942321 1219 set_inode_flag(inode, FI_APPEND_WRITE);
3c6c2beb 1220 if (page->index == 0)
91942321 1221 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
eb47b800
JK
1222 }
1223out_writepage:
1224 f2fs_put_dnode(&dn);
1225 return err;
1226}
1227
1228static int f2fs_write_data_page(struct page *page,
1229 struct writeback_control *wbc)
1230{
1231 struct inode *inode = page->mapping->host;
4081363f 1232 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
eb47b800
JK
1233 loff_t i_size = i_size_read(inode);
1234 const pgoff_t end_index = ((unsigned long long) i_size)
09cbfeaf 1235 >> PAGE_SHIFT;
26de9b11 1236 loff_t psize = (page->index + 1) << PAGE_SHIFT;
9ffe0fb5 1237 unsigned offset = 0;
39936837 1238 bool need_balance_fs = false;
eb47b800 1239 int err = 0;
458e6197 1240 struct f2fs_io_info fio = {
05ca3632 1241 .sbi = sbi,
458e6197 1242 .type = DATA,
04d328de
MC
1243 .op = REQ_OP_WRITE,
1244 .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
05ca3632 1245 .page = page,
4375a336 1246 .encrypted_page = NULL,
458e6197 1247 };
eb47b800 1248
ecda0de3
CY
1249 trace_f2fs_writepage(page, DATA);
1250
eb47b800 1251 if (page->index < end_index)
39936837 1252 goto write;
eb47b800
JK
1253
1254 /*
1255 * If the offset is out-of-range of file size,
1256 * this page does not have to be written to disk.
1257 */
09cbfeaf 1258 offset = i_size & (PAGE_SIZE - 1);
76f60268 1259 if ((page->index >= end_index + 1) || !offset)
39936837 1260 goto out;
eb47b800 1261
09cbfeaf 1262 zero_user_segment(page, offset, PAGE_SIZE);
39936837 1263write:
caf0047e 1264 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
eb47b800 1265 goto redirty_out;
1e84371f
JK
1266 if (f2fs_is_drop_cache(inode))
1267 goto out;
e6e5f561
JK
1268 /* we should not write 0'th page having journal header */
1269 if (f2fs_is_volatile_file(inode) && (!page->index ||
1270 (!wbc->for_reclaim &&
1271 available_free_memory(sbi, BASE_CHECK))))
1e84371f 1272 goto redirty_out;
eb47b800 1273
cf779cab
JK
1274 /* we should bypass data pages to proceed the kworkder jobs */
1275 if (unlikely(f2fs_cp_error(sbi))) {
7f319975 1276 mapping_set_error(page->mapping, -EIO);
a7ffdbe2 1277 goto out;
cf779cab
JK
1278 }
1279
39936837 1280 /* Dentry blocks are controlled by checkpoint */
eb47b800 1281 if (S_ISDIR(inode->i_mode)) {
05ca3632 1282 err = do_write_data_page(&fio);
8618b881
JK
1283 goto done;
1284 }
9ffe0fb5 1285
8618b881 1286 if (!wbc->for_reclaim)
39936837 1287 need_balance_fs = true;
8618b881 1288 else if (has_not_enough_free_secs(sbi, 0))
39936837 1289 goto redirty_out;
eb47b800 1290
b3d208f9 1291 err = -EAGAIN;
8618b881 1292 f2fs_lock_op(sbi);
b3d208f9
JK
1293 if (f2fs_has_inline_data(inode))
1294 err = f2fs_write_inline_data(inode, page);
1295 if (err == -EAGAIN)
05ca3632 1296 err = do_write_data_page(&fio);
26de9b11
JK
1297 if (F2FS_I(inode)->last_disk_size < psize)
1298 F2FS_I(inode)->last_disk_size = psize;
8618b881
JK
1299 f2fs_unlock_op(sbi);
1300done:
1301 if (err && err != -ENOENT)
1302 goto redirty_out;
eb47b800 1303
eb47b800 1304 clear_cold_data(page);
39936837 1305out:
a7ffdbe2 1306 inode_dec_dirty_pages(inode);
2bca1e23
JK
1307 if (err)
1308 ClearPageUptodate(page);
0c3a5797
CY
1309
1310 if (wbc->for_reclaim) {
1311 f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
1312 remove_dirty_inode(inode);
1313 }
1314
eb47b800 1315 unlock_page(page);
2c4db1a6 1316 f2fs_balance_fs(sbi, need_balance_fs);
0c3a5797
CY
1317
1318 if (unlikely(f2fs_cp_error(sbi)))
2aea39ec 1319 f2fs_submit_merged_bio(sbi, DATA, WRITE);
0c3a5797 1320
eb47b800
JK
1321 return 0;
1322
eb47b800 1323redirty_out:
76f60268 1324 redirty_page_for_writepage(wbc, page);
b230e6ca
JK
1325 unlock_page(page);
1326 return err;
fa9150a8
NJ
1327}
1328
8f46dcae
CY
1329/*
1330 * This function was copied from write_cche_pages from mm/page-writeback.c.
1331 * The major change is making write step of cold data page separately from
1332 * warm/hot data page.
1333 */
1334static int f2fs_write_cache_pages(struct address_space *mapping,
b230e6ca 1335 struct writeback_control *wbc)
8f46dcae
CY
1336{
1337 int ret = 0;
1338 int done = 0;
1339 struct pagevec pvec;
1340 int nr_pages;
1341 pgoff_t uninitialized_var(writeback_index);
1342 pgoff_t index;
1343 pgoff_t end; /* Inclusive */
1344 pgoff_t done_index;
1345 int cycled;
1346 int range_whole = 0;
1347 int tag;
8f46dcae
CY
1348
1349 pagevec_init(&pvec, 0);
46ae957f 1350
8f46dcae
CY
1351 if (wbc->range_cyclic) {
1352 writeback_index = mapping->writeback_index; /* prev offset */
1353 index = writeback_index;
1354 if (index == 0)
1355 cycled = 1;
1356 else
1357 cycled = 0;
1358 end = -1;
1359 } else {
09cbfeaf
KS
1360 index = wbc->range_start >> PAGE_SHIFT;
1361 end = wbc->range_end >> PAGE_SHIFT;
8f46dcae
CY
1362 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1363 range_whole = 1;
1364 cycled = 1; /* ignore range_cyclic tests */
1365 }
1366 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1367 tag = PAGECACHE_TAG_TOWRITE;
1368 else
1369 tag = PAGECACHE_TAG_DIRTY;
1370retry:
1371 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1372 tag_pages_for_writeback(mapping, index, end);
1373 done_index = index;
1374 while (!done && (index <= end)) {
1375 int i;
1376
1377 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1378 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1);
1379 if (nr_pages == 0)
1380 break;
1381
1382 for (i = 0; i < nr_pages; i++) {
1383 struct page *page = pvec.pages[i];
1384
1385 if (page->index > end) {
1386 done = 1;
1387 break;
1388 }
1389
1390 done_index = page->index;
1391
1392 lock_page(page);
1393
1394 if (unlikely(page->mapping != mapping)) {
1395continue_unlock:
1396 unlock_page(page);
1397 continue;
1398 }
1399
1400 if (!PageDirty(page)) {
1401 /* someone wrote it for us */
1402 goto continue_unlock;
1403 }
1404
8f46dcae
CY
1405 if (PageWriteback(page)) {
1406 if (wbc->sync_mode != WB_SYNC_NONE)
fec1d657
JK
1407 f2fs_wait_on_page_writeback(page,
1408 DATA, true);
8f46dcae
CY
1409 else
1410 goto continue_unlock;
1411 }
1412
1413 BUG_ON(PageWriteback(page));
1414 if (!clear_page_dirty_for_io(page))
1415 goto continue_unlock;
1416
b230e6ca 1417 ret = mapping->a_ops->writepage(page, wbc);
8f46dcae 1418 if (unlikely(ret)) {
b230e6ca
JK
1419 done_index = page->index + 1;
1420 done = 1;
1421 break;
8f46dcae
CY
1422 }
1423
1424 if (--wbc->nr_to_write <= 0 &&
1425 wbc->sync_mode == WB_SYNC_NONE) {
1426 done = 1;
1427 break;
1428 }
1429 }
1430 pagevec_release(&pvec);
1431 cond_resched();
1432 }
1433
8f46dcae
CY
1434 if (!cycled && !done) {
1435 cycled = 1;
1436 index = 0;
1437 end = writeback_index - 1;
1438 goto retry;
1439 }
1440 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1441 mapping->writeback_index = done_index;
1442
1443 return ret;
1444}
1445
25ca923b 1446static int f2fs_write_data_pages(struct address_space *mapping,
eb47b800
JK
1447 struct writeback_control *wbc)
1448{
1449 struct inode *inode = mapping->host;
4081363f 1450 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
9dfa1baf 1451 struct blk_plug plug;
eb47b800 1452 int ret;
eb47b800 1453
cfb185a1 1454 /* deal with chardevs and other special file */
1455 if (!mapping->a_ops->writepage)
1456 return 0;
1457
6a290544
CY
1458 /* skip writing if there is no dirty page in this inode */
1459 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
1460 return 0;
1461
a1257023
JK
1462 if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
1463 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
1464 available_free_memory(sbi, DIRTY_DENTS))
1465 goto skip_write;
1466
d323d005 1467 /* skip writing during file defragment */
91942321 1468 if (is_inode_flag_set(inode, FI_DO_DEFRAG))
d323d005
CY
1469 goto skip_write;
1470
d5669f7b
JK
1471 /* during POR, we don't need to trigger writepage at all. */
1472 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1473 goto skip_write;
1474
d31c7c3f
YH
1475 trace_f2fs_writepages(mapping->host, wbc, DATA);
1476
9dfa1baf 1477 blk_start_plug(&plug);
b230e6ca 1478 ret = f2fs_write_cache_pages(mapping, wbc);
9dfa1baf 1479 blk_finish_plug(&plug);
28ea6162
JK
1480 /*
1481 * if some pages were truncated, we cannot guarantee its mapping->host
1482 * to detect pending bios.
1483 */
1484 f2fs_submit_merged_bio(sbi, DATA, WRITE);
458e6197 1485
c227f912 1486 remove_dirty_inode(inode);
eb47b800 1487 return ret;
d3baf95d
JK
1488
1489skip_write:
a7ffdbe2 1490 wbc->pages_skipped += get_dirty_pages(inode);
d31c7c3f 1491 trace_f2fs_writepages(mapping->host, wbc, DATA);
d3baf95d 1492 return 0;
eb47b800
JK
1493}
1494
3aab8f82
CY
1495static void f2fs_write_failed(struct address_space *mapping, loff_t to)
1496{
1497 struct inode *inode = mapping->host;
819d9153 1498 loff_t i_size = i_size_read(inode);
3aab8f82 1499
819d9153
JK
1500 if (to > i_size) {
1501 truncate_pagecache(inode, i_size);
1502 truncate_blocks(inode, i_size, true);
3aab8f82
CY
1503 }
1504}
1505
2aadac08
JK
1506static int prepare_write_begin(struct f2fs_sb_info *sbi,
1507 struct page *page, loff_t pos, unsigned len,
1508 block_t *blk_addr, bool *node_changed)
1509{
1510 struct inode *inode = page->mapping->host;
1511 pgoff_t index = page->index;
1512 struct dnode_of_data dn;
1513 struct page *ipage;
b4d07a3e
JK
1514 bool locked = false;
1515 struct extent_info ei;
2aadac08
JK
1516 int err = 0;
1517
24b84912
JK
1518 /*
1519 * we already allocated all the blocks, so we don't need to get
1520 * the block addresses when there is no need to fill the page.
1521 */
1522 if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
09cbfeaf 1523 len == PAGE_SIZE)
24b84912
JK
1524 return 0;
1525
b4d07a3e 1526 if (f2fs_has_inline_data(inode) ||
09cbfeaf 1527 (pos & PAGE_MASK) >= i_size_read(inode)) {
b4d07a3e
JK
1528 f2fs_lock_op(sbi);
1529 locked = true;
1530 }
1531restart:
2aadac08
JK
1532 /* check inline_data */
1533 ipage = get_node_page(sbi, inode->i_ino);
1534 if (IS_ERR(ipage)) {
1535 err = PTR_ERR(ipage);
1536 goto unlock_out;
1537 }
1538
1539 set_new_dnode(&dn, inode, ipage, ipage, 0);
1540
1541 if (f2fs_has_inline_data(inode)) {
1542 if (pos + len <= MAX_INLINE_DATA) {
1543 read_inline_data(page, ipage);
91942321 1544 set_inode_flag(inode, FI_DATA_EXIST);
ab47036d
CY
1545 if (inode->i_nlink)
1546 set_inline_node(ipage);
2aadac08
JK
1547 } else {
1548 err = f2fs_convert_inline_page(&dn, page);
1549 if (err)
b4d07a3e
JK
1550 goto out;
1551 if (dn.data_blkaddr == NULL_ADDR)
1552 err = f2fs_get_block(&dn, index);
1553 }
1554 } else if (locked) {
1555 err = f2fs_get_block(&dn, index);
1556 } else {
1557 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1558 dn.data_blkaddr = ei.blk + index - ei.fofs;
1559 } else {
b4d07a3e
JK
1560 /* hole case */
1561 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
4da7bf5a 1562 if (err || dn.data_blkaddr == NULL_ADDR) {
b4d07a3e
JK
1563 f2fs_put_dnode(&dn);
1564 f2fs_lock_op(sbi);
1565 locked = true;
1566 goto restart;
1567 }
2aadac08
JK
1568 }
1569 }
b4d07a3e 1570
2aadac08
JK
1571 /* convert_inline_page can make node_changed */
1572 *blk_addr = dn.data_blkaddr;
1573 *node_changed = dn.node_changed;
b4d07a3e 1574out:
2aadac08
JK
1575 f2fs_put_dnode(&dn);
1576unlock_out:
b4d07a3e
JK
1577 if (locked)
1578 f2fs_unlock_op(sbi);
2aadac08
JK
1579 return err;
1580}
1581
eb47b800
JK
1582static int f2fs_write_begin(struct file *file, struct address_space *mapping,
1583 loff_t pos, unsigned len, unsigned flags,
1584 struct page **pagep, void **fsdata)
1585{
1586 struct inode *inode = mapping->host;
4081363f 1587 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
86531d6b 1588 struct page *page = NULL;
09cbfeaf 1589 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
2aadac08
JK
1590 bool need_balance = false;
1591 block_t blkaddr = NULL_ADDR;
eb47b800
JK
1592 int err = 0;
1593
62aed044
CY
1594 trace_f2fs_write_begin(inode, pos, len, flags);
1595
5f727395
JK
1596 /*
1597 * We should check this at this moment to avoid deadlock on inode page
1598 * and #0 page. The locking rule for inline_data conversion should be:
1599 * lock_page(page #0) -> lock_page(inode_page)
1600 */
1601 if (index != 0) {
1602 err = f2fs_convert_inline_inode(inode);
1603 if (err)
1604 goto fail;
1605 }
afcb7ca0 1606repeat:
eb47b800 1607 page = grab_cache_page_write_begin(mapping, index, flags);
3aab8f82
CY
1608 if (!page) {
1609 err = -ENOMEM;
1610 goto fail;
1611 }
d5f66990 1612
eb47b800
JK
1613 *pagep = page;
1614
2aadac08
JK
1615 err = prepare_write_begin(sbi, page, pos, len,
1616 &blkaddr, &need_balance);
9ba69cf9 1617 if (err)
2aadac08 1618 goto fail;
9ba69cf9 1619
2aadac08 1620 if (need_balance && has_not_enough_free_secs(sbi, 0)) {
2a340760 1621 unlock_page(page);
2c4db1a6 1622 f2fs_balance_fs(sbi, true);
2a340760
JK
1623 lock_page(page);
1624 if (page->mapping != mapping) {
1625 /* The page got truncated from under us */
1626 f2fs_put_page(page, 1);
1627 goto repeat;
1628 }
1629 }
1630
fec1d657 1631 f2fs_wait_on_page_writeback(page, DATA, false);
b3d208f9 1632
08b39fbd
CY
1633 /* wait for GCed encrypted page writeback */
1634 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
2aadac08 1635 f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
08b39fbd 1636
09cbfeaf 1637 if (len == PAGE_SIZE)
90d4388a
CY
1638 goto out_update;
1639 if (PageUptodate(page))
1640 goto out_clear;
eb47b800 1641
09cbfeaf
KS
1642 if ((pos & PAGE_MASK) >= i_size_read(inode)) {
1643 unsigned start = pos & (PAGE_SIZE - 1);
eb47b800
JK
1644 unsigned end = start + len;
1645
1646 /* Reading beyond i_size is simple: memset to zero */
09cbfeaf 1647 zero_user_segments(page, 0, start, end, PAGE_SIZE);
90d4388a 1648 goto out_update;
eb47b800
JK
1649 }
1650
2aadac08 1651 if (blkaddr == NEW_ADDR) {
09cbfeaf 1652 zero_user_segment(page, 0, PAGE_SIZE);
eb47b800 1653 } else {
78682f79 1654 struct bio *bio;
d54c795b 1655
78682f79
CY
1656 bio = f2fs_grab_bio(inode, blkaddr, 1);
1657 if (IS_ERR(bio)) {
1658 err = PTR_ERR(bio);
3aab8f82 1659 goto fail;
eb47b800 1660 }
4fc29c1a 1661 bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
78682f79
CY
1662 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1663 bio_put(bio);
1664 err = -EFAULT;
1665 goto fail;
1666 }
1667
4fc29c1a 1668 __submit_bio(sbi, bio, DATA);
d54c795b 1669
393ff91f 1670 lock_page(page);
6bacf52f 1671 if (unlikely(page->mapping != mapping)) {
afcb7ca0
JK
1672 f2fs_put_page(page, 1);
1673 goto repeat;
eb47b800 1674 }
1563ac75
CY
1675 if (unlikely(!PageUptodate(page))) {
1676 err = -EIO;
1677 goto fail;
4375a336 1678 }
eb47b800 1679 }
90d4388a 1680out_update:
237c0790
JK
1681 if (!PageUptodate(page))
1682 SetPageUptodate(page);
90d4388a 1683out_clear:
eb47b800
JK
1684 clear_cold_data(page);
1685 return 0;
9ba69cf9 1686
3aab8f82 1687fail:
86531d6b 1688 f2fs_put_page(page, 1);
3aab8f82
CY
1689 f2fs_write_failed(mapping, pos + len);
1690 return err;
eb47b800
JK
1691}
1692
a1dd3c13
JK
1693static int f2fs_write_end(struct file *file,
1694 struct address_space *mapping,
1695 loff_t pos, unsigned len, unsigned copied,
1696 struct page *page, void *fsdata)
1697{
1698 struct inode *inode = page->mapping->host;
1699
dfb2bf38
CY
1700 trace_f2fs_write_end(inode, pos, len, copied);
1701
34ba94ba 1702 set_page_dirty(page);
a2ee0a30 1703 f2fs_put_page(page, 1);
a1dd3c13 1704
fc9581c8
JK
1705 if (pos + copied > i_size_read(inode))
1706 f2fs_i_size_write(inode, pos + copied);
a1dd3c13 1707
d0239e1b 1708 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
a1dd3c13
JK
1709 return copied;
1710}
1711
6f673763
OS
1712static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
1713 loff_t offset)
944fcfc1
JK
1714{
1715 unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
944fcfc1 1716
944fcfc1
JK
1717 if (offset & blocksize_mask)
1718 return -EINVAL;
1719
5b46f25d
AV
1720 if (iov_iter_alignment(iter) & blocksize_mask)
1721 return -EINVAL;
1722
944fcfc1
JK
1723 return 0;
1724}
1725
c8b8e32d 1726static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
eb47b800 1727{
b439b103 1728 struct address_space *mapping = iocb->ki_filp->f_mapping;
3aab8f82
CY
1729 struct inode *inode = mapping->host;
1730 size_t count = iov_iter_count(iter);
c8b8e32d 1731 loff_t offset = iocb->ki_pos;
82e0a5aa 1732 int rw = iov_iter_rw(iter);
3aab8f82 1733 int err;
944fcfc1 1734
b439b103 1735 err = check_direct_IO(inode, iter, offset);
b9d777b8
JK
1736 if (err)
1737 return err;
9ffe0fb5 1738
fcc85a4d
JK
1739 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
1740 return 0;
36abef4e
JK
1741 if (test_opt(F2FS_I_SB(inode), LFS))
1742 return 0;
fcc85a4d 1743
5302fb00 1744 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
70407fad 1745
82e0a5aa 1746 down_read(&F2FS_I(inode)->dio_rwsem[rw]);
c8b8e32d 1747 err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
82e0a5aa
CY
1748 up_read(&F2FS_I(inode)->dio_rwsem[rw]);
1749
1750 if (rw == WRITE) {
6bfc4919 1751 if (err > 0)
91942321 1752 set_inode_flag(inode, FI_UPDATE_WRITE);
6bfc4919
JK
1753 else if (err < 0)
1754 f2fs_write_failed(mapping, offset + count);
1755 }
70407fad 1756
5302fb00 1757 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
70407fad 1758
3aab8f82 1759 return err;
eb47b800
JK
1760}
1761
487261f3
CY
1762void f2fs_invalidate_page(struct page *page, unsigned int offset,
1763 unsigned int length)
eb47b800
JK
1764{
1765 struct inode *inode = page->mapping->host;
487261f3 1766 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
a7ffdbe2 1767
487261f3 1768 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
09cbfeaf 1769 (offset % PAGE_SIZE || length != PAGE_SIZE))
a7ffdbe2
JK
1770 return;
1771
487261f3
CY
1772 if (PageDirty(page)) {
1773 if (inode->i_ino == F2FS_META_INO(sbi))
1774 dec_page_count(sbi, F2FS_DIRTY_META);
1775 else if (inode->i_ino == F2FS_NODE_INO(sbi))
1776 dec_page_count(sbi, F2FS_DIRTY_NODES);
1777 else
1778 inode_dec_dirty_pages(inode);
1779 }
decd36b6
CY
1780
1781 /* This is atomic written page, keep Private */
1782 if (IS_ATOMIC_WRITTEN_PAGE(page))
1783 return;
1784
23dc974e 1785 set_page_private(page, 0);
eb47b800
JK
1786 ClearPagePrivate(page);
1787}
1788
487261f3 1789int f2fs_release_page(struct page *page, gfp_t wait)
eb47b800 1790{
f68daeeb
JK
1791 /* If this is dirty page, keep PagePrivate */
1792 if (PageDirty(page))
1793 return 0;
1794
decd36b6
CY
1795 /* This is atomic written page, keep Private */
1796 if (IS_ATOMIC_WRITTEN_PAGE(page))
1797 return 0;
1798
23dc974e 1799 set_page_private(page, 0);
eb47b800 1800 ClearPagePrivate(page);
c3850aa1 1801 return 1;
eb47b800
JK
1802}
1803
fe76b796
JK
1804/*
1805 * This was copied from __set_page_dirty_buffers which gives higher performance
1806 * in very high speed storages. (e.g., pmem)
1807 */
1808void f2fs_set_page_dirty_nobuffers(struct page *page)
1809{
1810 struct address_space *mapping = page->mapping;
1811 unsigned long flags;
1812
1813 if (unlikely(!mapping))
1814 return;
1815
1816 spin_lock(&mapping->private_lock);
1817 lock_page_memcg(page);
1818 SetPageDirty(page);
1819 spin_unlock(&mapping->private_lock);
1820
1821 spin_lock_irqsave(&mapping->tree_lock, flags);
1822 WARN_ON_ONCE(!PageUptodate(page));
1823 account_page_dirtied(page, mapping);
1824 radix_tree_tag_set(&mapping->page_tree,
1825 page_index(page), PAGECACHE_TAG_DIRTY);
1826 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1827 unlock_page_memcg(page);
1828
1829 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1830 return;
1831}
1832
eb47b800
JK
1833static int f2fs_set_data_page_dirty(struct page *page)
1834{
1835 struct address_space *mapping = page->mapping;
1836 struct inode *inode = mapping->host;
1837
26c6b887
JK
1838 trace_f2fs_set_page_dirty(page, DATA);
1839
237c0790
JK
1840 if (!PageUptodate(page))
1841 SetPageUptodate(page);
34ba94ba 1842
1e84371f 1843 if (f2fs_is_atomic_file(inode)) {
decd36b6
CY
1844 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
1845 register_inmem_page(inode, page);
1846 return 1;
1847 }
1848 /*
1849 * Previously, this page has been registered, we just
1850 * return here.
1851 */
1852 return 0;
34ba94ba
JK
1853 }
1854
eb47b800 1855 if (!PageDirty(page)) {
fe76b796 1856 f2fs_set_page_dirty_nobuffers(page);
a7ffdbe2 1857 update_dirty_page(inode, page);
eb47b800
JK
1858 return 1;
1859 }
1860 return 0;
1861}
1862
c01e54b7
JK
1863static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
1864{
454ae7e5
CY
1865 struct inode *inode = mapping->host;
1866
1d373a0e
JK
1867 if (f2fs_has_inline_data(inode))
1868 return 0;
1869
1870 /* make sure allocating whole blocks */
1871 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1872 filemap_write_and_wait(mapping);
1873
e2b4e2bc 1874 return generic_block_bmap(mapping, block, get_data_block_bmap);
429511cd
CY
1875}
1876
eb47b800
JK
1877const struct address_space_operations f2fs_dblock_aops = {
1878 .readpage = f2fs_read_data_page,
1879 .readpages = f2fs_read_data_pages,
1880 .writepage = f2fs_write_data_page,
1881 .writepages = f2fs_write_data_pages,
1882 .write_begin = f2fs_write_begin,
a1dd3c13 1883 .write_end = f2fs_write_end,
eb47b800 1884 .set_page_dirty = f2fs_set_data_page_dirty,
487261f3
CY
1885 .invalidatepage = f2fs_invalidate_page,
1886 .releasepage = f2fs_release_page,
eb47b800 1887 .direct_IO = f2fs_direct_IO,
c01e54b7 1888 .bmap = f2fs_bmap,
eb47b800 1889};
This page took 0.416792 seconds and 5 git commands to generate.