f2fs: use sbi->write_mutex for write bios
[deliverable/linux.git] / fs / f2fs / data.c
1 /*
2 * fs/f2fs/data.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/aio.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
21
22 #include "f2fs.h"
23 #include "node.h"
24 #include "segment.h"
25 #include <trace/events/f2fs.h>
26
27 /*
28 * Lock ordering for the change of data block address:
29 * ->data_page
30 * ->node_page
31 * update block addresses in the node page
32 */
33 static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
34 {
35 struct f2fs_node *rn;
36 __le32 *addr_array;
37 struct page *node_page = dn->node_page;
38 unsigned int ofs_in_node = dn->ofs_in_node;
39
40 f2fs_wait_on_page_writeback(node_page, NODE, false);
41
42 rn = F2FS_NODE(node_page);
43
44 /* Get physical address of data block */
45 addr_array = blkaddr_in_node(rn);
46 addr_array[ofs_in_node] = cpu_to_le32(new_addr);
47 set_page_dirty(node_page);
48 }
49
50 int reserve_new_block(struct dnode_of_data *dn)
51 {
52 struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
53
54 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
55 return -EPERM;
56 if (!inc_valid_block_count(sbi, dn->inode, 1))
57 return -ENOSPC;
58
59 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
60
61 __set_data_blkaddr(dn, NEW_ADDR);
62 dn->data_blkaddr = NEW_ADDR;
63 sync_inode_page(dn);
64 return 0;
65 }
66
67 static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
68 struct buffer_head *bh_result)
69 {
70 struct f2fs_inode_info *fi = F2FS_I(inode);
71 pgoff_t start_fofs, end_fofs;
72 block_t start_blkaddr;
73
74 read_lock(&fi->ext.ext_lock);
75 if (fi->ext.len == 0) {
76 read_unlock(&fi->ext.ext_lock);
77 return 0;
78 }
79
80 stat_inc_total_hit(inode->i_sb);
81
82 start_fofs = fi->ext.fofs;
83 end_fofs = fi->ext.fofs + fi->ext.len - 1;
84 start_blkaddr = fi->ext.blk_addr;
85
86 if (pgofs >= start_fofs && pgofs <= end_fofs) {
87 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
88 size_t count;
89
90 clear_buffer_new(bh_result);
91 map_bh(bh_result, inode->i_sb,
92 start_blkaddr + pgofs - start_fofs);
93 count = end_fofs - pgofs + 1;
94 if (count < (UINT_MAX >> blkbits))
95 bh_result->b_size = (count << blkbits);
96 else
97 bh_result->b_size = UINT_MAX;
98
99 stat_inc_read_hit(inode->i_sb);
100 read_unlock(&fi->ext.ext_lock);
101 return 1;
102 }
103 read_unlock(&fi->ext.ext_lock);
104 return 0;
105 }
106
107 void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
108 {
109 struct f2fs_inode_info *fi = F2FS_I(dn->inode);
110 pgoff_t fofs, start_fofs, end_fofs;
111 block_t start_blkaddr, end_blkaddr;
112
113 f2fs_bug_on(blk_addr == NEW_ADDR);
114 fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
115 dn->ofs_in_node;
116
117 /* Update the page address in the parent node */
118 __set_data_blkaddr(dn, blk_addr);
119
120 write_lock(&fi->ext.ext_lock);
121
122 start_fofs = fi->ext.fofs;
123 end_fofs = fi->ext.fofs + fi->ext.len - 1;
124 start_blkaddr = fi->ext.blk_addr;
125 end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
126
127 /* Drop and initialize the matched extent */
128 if (fi->ext.len == 1 && fofs == start_fofs)
129 fi->ext.len = 0;
130
131 /* Initial extent */
132 if (fi->ext.len == 0) {
133 if (blk_addr != NULL_ADDR) {
134 fi->ext.fofs = fofs;
135 fi->ext.blk_addr = blk_addr;
136 fi->ext.len = 1;
137 }
138 goto end_update;
139 }
140
141 /* Front merge */
142 if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
143 fi->ext.fofs--;
144 fi->ext.blk_addr--;
145 fi->ext.len++;
146 goto end_update;
147 }
148
149 /* Back merge */
150 if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
151 fi->ext.len++;
152 goto end_update;
153 }
154
155 /* Split the existing extent */
156 if (fi->ext.len > 1 &&
157 fofs >= start_fofs && fofs <= end_fofs) {
158 if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
159 fi->ext.len = fofs - start_fofs;
160 } else {
161 fi->ext.fofs = fofs + 1;
162 fi->ext.blk_addr = start_blkaddr +
163 fofs - start_fofs + 1;
164 fi->ext.len -= fofs - start_fofs + 1;
165 }
166 goto end_update;
167 }
168 write_unlock(&fi->ext.ext_lock);
169 return;
170
171 end_update:
172 write_unlock(&fi->ext.ext_lock);
173 sync_inode_page(dn);
174 }
175
176 struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
177 {
178 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
179 struct address_space *mapping = inode->i_mapping;
180 struct dnode_of_data dn;
181 struct page *page;
182 int err;
183
184 page = find_get_page(mapping, index);
185 if (page && PageUptodate(page))
186 return page;
187 f2fs_put_page(page, 0);
188
189 set_new_dnode(&dn, inode, NULL, NULL, 0);
190 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
191 if (err)
192 return ERR_PTR(err);
193 f2fs_put_dnode(&dn);
194
195 if (dn.data_blkaddr == NULL_ADDR)
196 return ERR_PTR(-ENOENT);
197
198 /* By fallocate(), there is no cached page, but with NEW_ADDR */
199 if (dn.data_blkaddr == NEW_ADDR)
200 return ERR_PTR(-EINVAL);
201
202 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
203 if (!page)
204 return ERR_PTR(-ENOMEM);
205
206 if (PageUptodate(page)) {
207 unlock_page(page);
208 return page;
209 }
210
211 err = f2fs_readpage(sbi, page, dn.data_blkaddr,
212 sync ? READ_SYNC : READA);
213 if (sync) {
214 wait_on_page_locked(page);
215 if (!PageUptodate(page)) {
216 f2fs_put_page(page, 0);
217 return ERR_PTR(-EIO);
218 }
219 }
220 return page;
221 }
222
223 /*
224 * If it tries to access a hole, return an error.
225 * Because, the callers, functions in dir.c and GC, should be able to know
226 * whether this page exists or not.
227 */
228 struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
229 {
230 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
231 struct address_space *mapping = inode->i_mapping;
232 struct dnode_of_data dn;
233 struct page *page;
234 int err;
235
236 repeat:
237 page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
238 if (!page)
239 return ERR_PTR(-ENOMEM);
240
241 set_new_dnode(&dn, inode, NULL, NULL, 0);
242 err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
243 if (err) {
244 f2fs_put_page(page, 1);
245 return ERR_PTR(err);
246 }
247 f2fs_put_dnode(&dn);
248
249 if (dn.data_blkaddr == NULL_ADDR) {
250 f2fs_put_page(page, 1);
251 return ERR_PTR(-ENOENT);
252 }
253
254 if (PageUptodate(page))
255 return page;
256
257 /*
258 * A new dentry page is allocated but not able to be written, since its
259 * new inode page couldn't be allocated due to -ENOSPC.
260 * In such the case, its blkaddr can be remained as NEW_ADDR.
261 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
262 */
263 if (dn.data_blkaddr == NEW_ADDR) {
264 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
265 SetPageUptodate(page);
266 return page;
267 }
268
269 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
270 if (err)
271 return ERR_PTR(err);
272
273 lock_page(page);
274 if (!PageUptodate(page)) {
275 f2fs_put_page(page, 1);
276 return ERR_PTR(-EIO);
277 }
278 if (page->mapping != mapping) {
279 f2fs_put_page(page, 1);
280 goto repeat;
281 }
282 return page;
283 }
284
285 /*
286 * Caller ensures that this data page is never allocated.
287 * A new zero-filled data page is allocated in the page cache.
288 *
289 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
290 * mutex_unlock_op().
291 * Note that, npage is set only by make_empty_dir.
292 */
293 struct page *get_new_data_page(struct inode *inode,
294 struct page *npage, pgoff_t index, bool new_i_size)
295 {
296 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
297 struct address_space *mapping = inode->i_mapping;
298 struct page *page;
299 struct dnode_of_data dn;
300 int err;
301
302 set_new_dnode(&dn, inode, npage, npage, 0);
303 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
304 if (err)
305 return ERR_PTR(err);
306
307 if (dn.data_blkaddr == NULL_ADDR) {
308 if (reserve_new_block(&dn)) {
309 if (!npage)
310 f2fs_put_dnode(&dn);
311 return ERR_PTR(-ENOSPC);
312 }
313 }
314 if (!npage)
315 f2fs_put_dnode(&dn);
316 repeat:
317 page = grab_cache_page(mapping, index);
318 if (!page)
319 return ERR_PTR(-ENOMEM);
320
321 if (PageUptodate(page))
322 return page;
323
324 if (dn.data_blkaddr == NEW_ADDR) {
325 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
326 SetPageUptodate(page);
327 } else {
328 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
329 if (err)
330 return ERR_PTR(err);
331 lock_page(page);
332 if (!PageUptodate(page)) {
333 f2fs_put_page(page, 1);
334 return ERR_PTR(-EIO);
335 }
336 if (page->mapping != mapping) {
337 f2fs_put_page(page, 1);
338 goto repeat;
339 }
340 }
341
342 if (new_i_size &&
343 i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
344 i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
345 /* Only the directory inode sets new_i_size */
346 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
347 mark_inode_dirty_sync(inode);
348 }
349 return page;
350 }
351
352 static void read_end_io(struct bio *bio, int err)
353 {
354 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
355 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
356
357 do {
358 struct page *page = bvec->bv_page;
359
360 if (--bvec >= bio->bi_io_vec)
361 prefetchw(&bvec->bv_page->flags);
362
363 if (uptodate) {
364 SetPageUptodate(page);
365 } else {
366 ClearPageUptodate(page);
367 SetPageError(page);
368 }
369 unlock_page(page);
370 } while (bvec >= bio->bi_io_vec);
371 bio_put(bio);
372 }
373
374 /*
375 * Fill the locked page with data located in the block address.
376 * Return unlocked page.
377 */
378 int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
379 block_t blk_addr, int type)
380 {
381 struct block_device *bdev = sbi->sb->s_bdev;
382 struct bio *bio;
383
384 trace_f2fs_readpage(page, blk_addr, type);
385
386 /* Allocate a new bio */
387 bio = f2fs_bio_alloc(bdev, 1);
388
389 /* Initialize the bio */
390 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
391 bio->bi_end_io = read_end_io;
392
393 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
394 bio_put(bio);
395 f2fs_put_page(page, 1);
396 return -EFAULT;
397 }
398
399 submit_bio(type, bio);
400 return 0;
401 }
402
403 /*
404 * This function should be used by the data read flow only where it
405 * does not check the "create" flag that indicates block allocation.
406 * The reason for this special functionality is to exploit VFS readahead
407 * mechanism.
408 */
409 static int get_data_block_ro(struct inode *inode, sector_t iblock,
410 struct buffer_head *bh_result, int create)
411 {
412 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
413 unsigned maxblocks = bh_result->b_size >> blkbits;
414 struct dnode_of_data dn;
415 pgoff_t pgofs;
416 int err;
417
418 /* Get the page offset from the block offset(iblock) */
419 pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
420
421 if (check_extent_cache(inode, pgofs, bh_result)) {
422 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
423 return 0;
424 }
425
426 /* When reading holes, we need its node page */
427 set_new_dnode(&dn, inode, NULL, NULL, 0);
428 err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
429 if (err) {
430 trace_f2fs_get_data_block(inode, iblock, bh_result, err);
431 return (err == -ENOENT) ? 0 : err;
432 }
433
434 /* It does not support data allocation */
435 f2fs_bug_on(create);
436
437 if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
438 int i;
439 unsigned int end_offset;
440
441 end_offset = IS_INODE(dn.node_page) ?
442 ADDRS_PER_INODE(F2FS_I(inode)) :
443 ADDRS_PER_BLOCK;
444
445 clear_buffer_new(bh_result);
446
447 /* Give more consecutive addresses for the read ahead */
448 for (i = 0; i < end_offset - dn.ofs_in_node; i++)
449 if (((datablock_addr(dn.node_page,
450 dn.ofs_in_node + i))
451 != (dn.data_blkaddr + i)) || maxblocks == i)
452 break;
453 map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
454 bh_result->b_size = (i << blkbits);
455 }
456 f2fs_put_dnode(&dn);
457 trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
458 return 0;
459 }
460
461 static int f2fs_read_data_page(struct file *file, struct page *page)
462 {
463 return mpage_readpage(page, get_data_block_ro);
464 }
465
466 static int f2fs_read_data_pages(struct file *file,
467 struct address_space *mapping,
468 struct list_head *pages, unsigned nr_pages)
469 {
470 return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
471 }
472
473 int do_write_data_page(struct page *page)
474 {
475 struct inode *inode = page->mapping->host;
476 block_t old_blk_addr, new_blk_addr;
477 struct dnode_of_data dn;
478 int err = 0;
479
480 set_new_dnode(&dn, inode, NULL, NULL, 0);
481 err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
482 if (err)
483 return err;
484
485 old_blk_addr = dn.data_blkaddr;
486
487 /* This page is already truncated */
488 if (old_blk_addr == NULL_ADDR)
489 goto out_writepage;
490
491 set_page_writeback(page);
492
493 /*
494 * If current allocation needs SSR,
495 * it had better in-place writes for updated data.
496 */
497 if (unlikely(old_blk_addr != NEW_ADDR &&
498 !is_cold_data(page) &&
499 need_inplace_update(inode))) {
500 rewrite_data_page(F2FS_SB(inode->i_sb), page,
501 old_blk_addr);
502 } else {
503 write_data_page(inode, page, &dn,
504 old_blk_addr, &new_blk_addr);
505 update_extent_cache(new_blk_addr, &dn);
506 }
507 out_writepage:
508 f2fs_put_dnode(&dn);
509 return err;
510 }
511
512 static int f2fs_write_data_page(struct page *page,
513 struct writeback_control *wbc)
514 {
515 struct inode *inode = page->mapping->host;
516 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
517 loff_t i_size = i_size_read(inode);
518 const pgoff_t end_index = ((unsigned long long) i_size)
519 >> PAGE_CACHE_SHIFT;
520 unsigned offset;
521 bool need_balance_fs = false;
522 int err = 0;
523
524 if (page->index < end_index)
525 goto write;
526
527 /*
528 * If the offset is out-of-range of file size,
529 * this page does not have to be written to disk.
530 */
531 offset = i_size & (PAGE_CACHE_SIZE - 1);
532 if ((page->index >= end_index + 1) || !offset) {
533 if (S_ISDIR(inode->i_mode)) {
534 dec_page_count(sbi, F2FS_DIRTY_DENTS);
535 inode_dec_dirty_dents(inode);
536 }
537 goto out;
538 }
539
540 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
541 write:
542 if (sbi->por_doing) {
543 err = AOP_WRITEPAGE_ACTIVATE;
544 goto redirty_out;
545 }
546
547 /* Dentry blocks are controlled by checkpoint */
548 if (S_ISDIR(inode->i_mode)) {
549 dec_page_count(sbi, F2FS_DIRTY_DENTS);
550 inode_dec_dirty_dents(inode);
551 err = do_write_data_page(page);
552 } else {
553 f2fs_lock_op(sbi);
554 err = do_write_data_page(page);
555 f2fs_unlock_op(sbi);
556 need_balance_fs = true;
557 }
558 if (err == -ENOENT)
559 goto out;
560 else if (err)
561 goto redirty_out;
562
563 if (wbc->for_reclaim)
564 f2fs_submit_bio(sbi, DATA, true);
565
566 clear_cold_data(page);
567 out:
568 unlock_page(page);
569 if (need_balance_fs)
570 f2fs_balance_fs(sbi);
571 return 0;
572
573 redirty_out:
574 wbc->pages_skipped++;
575 set_page_dirty(page);
576 return err;
577 }
578
579 #define MAX_DESIRED_PAGES_WP 4096
580
581 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
582 void *data)
583 {
584 struct address_space *mapping = data;
585 int ret = mapping->a_ops->writepage(page, wbc);
586 mapping_set_error(mapping, ret);
587 return ret;
588 }
589
590 static int f2fs_write_data_pages(struct address_space *mapping,
591 struct writeback_control *wbc)
592 {
593 struct inode *inode = mapping->host;
594 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
595 bool locked = false;
596 int ret;
597 long excess_nrtw = 0, desired_nrtw;
598
599 /* deal with chardevs and other special file */
600 if (!mapping->a_ops->writepage)
601 return 0;
602
603 if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
604 desired_nrtw = MAX_DESIRED_PAGES_WP;
605 excess_nrtw = desired_nrtw - wbc->nr_to_write;
606 wbc->nr_to_write = desired_nrtw;
607 }
608
609 if (!S_ISDIR(inode->i_mode)) {
610 mutex_lock(&sbi->writepages);
611 locked = true;
612 }
613 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
614 if (locked)
615 mutex_unlock(&sbi->writepages);
616 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
617
618 remove_dirty_dir_inode(inode);
619
620 wbc->nr_to_write -= excess_nrtw;
621 return ret;
622 }
623
624 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
625 loff_t pos, unsigned len, unsigned flags,
626 struct page **pagep, void **fsdata)
627 {
628 struct inode *inode = mapping->host;
629 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
630 struct page *page;
631 pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
632 struct dnode_of_data dn;
633 int err = 0;
634
635 f2fs_balance_fs(sbi);
636 repeat:
637 page = grab_cache_page_write_begin(mapping, index, flags);
638 if (!page)
639 return -ENOMEM;
640 *pagep = page;
641
642 f2fs_lock_op(sbi);
643
644 set_new_dnode(&dn, inode, NULL, NULL, 0);
645 err = get_dnode_of_data(&dn, index, ALLOC_NODE);
646 if (err)
647 goto err;
648
649 if (dn.data_blkaddr == NULL_ADDR)
650 err = reserve_new_block(&dn);
651
652 f2fs_put_dnode(&dn);
653 if (err)
654 goto err;
655
656 f2fs_unlock_op(sbi);
657
658 if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
659 return 0;
660
661 if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
662 unsigned start = pos & (PAGE_CACHE_SIZE - 1);
663 unsigned end = start + len;
664
665 /* Reading beyond i_size is simple: memset to zero */
666 zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
667 goto out;
668 }
669
670 if (dn.data_blkaddr == NEW_ADDR) {
671 zero_user_segment(page, 0, PAGE_CACHE_SIZE);
672 } else {
673 err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
674 if (err)
675 return err;
676 lock_page(page);
677 if (!PageUptodate(page)) {
678 f2fs_put_page(page, 1);
679 return -EIO;
680 }
681 if (page->mapping != mapping) {
682 f2fs_put_page(page, 1);
683 goto repeat;
684 }
685 }
686 out:
687 SetPageUptodate(page);
688 clear_cold_data(page);
689 return 0;
690
691 err:
692 f2fs_unlock_op(sbi);
693 f2fs_put_page(page, 1);
694 return err;
695 }
696
697 static int f2fs_write_end(struct file *file,
698 struct address_space *mapping,
699 loff_t pos, unsigned len, unsigned copied,
700 struct page *page, void *fsdata)
701 {
702 struct inode *inode = page->mapping->host;
703
704 SetPageUptodate(page);
705 set_page_dirty(page);
706
707 if (pos + copied > i_size_read(inode)) {
708 i_size_write(inode, pos + copied);
709 mark_inode_dirty(inode);
710 update_inode_page(inode);
711 }
712
713 f2fs_put_page(page, 1);
714 return copied;
715 }
716
717 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
718 const struct iovec *iov, loff_t offset, unsigned long nr_segs)
719 {
720 struct file *file = iocb->ki_filp;
721 struct inode *inode = file->f_mapping->host;
722
723 if (rw == WRITE)
724 return 0;
725
726 /* Needs synchronization with the cleaner */
727 return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
728 get_data_block_ro);
729 }
730
731 static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
732 unsigned int length)
733 {
734 struct inode *inode = page->mapping->host;
735 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
736 if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
737 dec_page_count(sbi, F2FS_DIRTY_DENTS);
738 inode_dec_dirty_dents(inode);
739 }
740 ClearPagePrivate(page);
741 }
742
743 static int f2fs_release_data_page(struct page *page, gfp_t wait)
744 {
745 ClearPagePrivate(page);
746 return 1;
747 }
748
749 static int f2fs_set_data_page_dirty(struct page *page)
750 {
751 struct address_space *mapping = page->mapping;
752 struct inode *inode = mapping->host;
753
754 trace_f2fs_set_page_dirty(page, DATA);
755
756 SetPageUptodate(page);
757 if (!PageDirty(page)) {
758 __set_page_dirty_nobuffers(page);
759 set_dirty_dir_page(inode, page);
760 return 1;
761 }
762 return 0;
763 }
764
765 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
766 {
767 return generic_block_bmap(mapping, block, get_data_block_ro);
768 }
769
770 const struct address_space_operations f2fs_dblock_aops = {
771 .readpage = f2fs_read_data_page,
772 .readpages = f2fs_read_data_pages,
773 .writepage = f2fs_write_data_page,
774 .writepages = f2fs_write_data_pages,
775 .write_begin = f2fs_write_begin,
776 .write_end = f2fs_write_end,
777 .set_page_dirty = f2fs_set_data_page_dirty,
778 .invalidatepage = f2fs_invalidate_data_page,
779 .releasepage = f2fs_release_data_page,
780 .direct_IO = f2fs_direct_IO,
781 .bmap = f2fs_bmap,
782 };
This page took 0.08229 seconds and 5 git commands to generate.