Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
[deliverable/linux.git] / fs / f2fs / segment.c
index eff046a792adcf8f50e66f26a7c47025e0ac6932..a46296f57b026e650f2a82950ae7f63a02fd2fb0 100644 (file)
@@ -257,7 +257,8 @@ static int __commit_inmem_pages(struct inode *inode,
        struct f2fs_io_info fio = {
                .sbi = sbi,
                .type = DATA,
-               .rw = WRITE_SYNC | REQ_PRIO,
+               .op = REQ_OP_WRITE,
+               .op_flags = WRITE_SYNC | REQ_PRIO,
                .encrypted_page = NULL,
        };
        bool submit_bio = false;
@@ -371,7 +372,9 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
                try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
 
        if (!available_free_memory(sbi, FREE_NIDS))
-               try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES);
+               try_to_free_nids(sbi, MAX_FREE_NIDS);
+       else
+               build_free_nids(sbi);
 
        /* checkpoint is the only way to shrink partial cached entries */
        if (!available_free_memory(sbi, NAT_ENTRIES) ||
@@ -379,8 +382,13 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
                        excess_prefree_segs(sbi) ||
                        excess_dirty_nats(sbi) ||
                        (is_idle(sbi) && f2fs_time_over(sbi, CP_TIME))) {
-               if (test_opt(sbi, DATA_FLUSH))
+               if (test_opt(sbi, DATA_FLUSH)) {
+                       struct blk_plug plug;
+
+                       blk_start_plug(&plug);
                        sync_dirty_inodes(sbi, FILE_INODE);
+                       blk_finish_plug(&plug);
+               }
                f2fs_sync_fs(sbi->sb, true);
                stat_inc_bg_cp_count(sbi->stat_info);
        }
@@ -406,7 +414,8 @@ repeat:
                fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
 
                bio->bi_bdev = sbi->sb->s_bdev;
-               ret = submit_bio_wait(WRITE_FLUSH, bio);
+               bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+               ret = submit_bio_wait(bio);
 
                llist_for_each_entry_safe(cmd, next,
                                          fcc->dispatch_list, llnode) {
@@ -439,7 +448,8 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
 
                atomic_inc(&fcc->submit_flush);
                bio->bi_bdev = sbi->sb->s_bdev;
-               ret = submit_bio_wait(WRITE_FLUSH, bio);
+               bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+               ret = submit_bio_wait(bio);
                atomic_dec(&fcc->submit_flush);
                bio_put(bio);
                return ret;
@@ -670,6 +680,10 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
                        break;
 
                end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
+               if (force && start && end != max_blocks
+                                       && (end - start) < cpc->trim_minlen)
+                       continue;
+
                __add_discard_entry(sbi, cpc, se, start, end);
        }
 }
@@ -707,6 +721,8 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
        struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
        unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
        unsigned int start = 0, end = -1;
+       unsigned int secno, start_segno;
+       bool force = (cpc->reason == CP_DISCARD);
 
        mutex_lock(&dirty_i->seglist_lock);
 
@@ -723,17 +739,31 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 
                dirty_i->nr_dirty[PRE] -= end - start;
 
-               if (!test_opt(sbi, DISCARD))
+               if (force || !test_opt(sbi, DISCARD))
                        continue;
 
-               f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
+               if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
+                       f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
                                (end - start) << sbi->log_blocks_per_seg);
+                       continue;
+               }
+next:
+               secno = GET_SECNO(sbi, start);
+               start_segno = secno * sbi->segs_per_sec;
+               if (!IS_CURSEC(sbi, secno) &&
+                       !get_valid_blocks(sbi, start, sbi->segs_per_sec))
+                       f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
+                               sbi->segs_per_sec << sbi->log_blocks_per_seg);
+
+               start = start_segno + sbi->segs_per_sec;
+               if (start < end)
+                       goto next;
        }
        mutex_unlock(&dirty_i->seglist_lock);
 
        /* send small discards */
        list_for_each_entry_safe(entry, this, head, list) {
-               if (cpc->reason == CP_DISCARD && entry->len < cpc->trim_minlen)
+               if (force && entry->len < cpc->trim_minlen)
                        goto skip;
                f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
                cpc->trimmed += entry->len;
@@ -1221,6 +1251,9 @@ void allocate_new_segments(struct f2fs_sb_info *sbi)
 {
        int i;
 
+       if (test_opt(sbi, LFS))
+               return;
+
        for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
                __allocate_new_segments(sbi, i);
 }
@@ -1412,7 +1445,8 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
        struct f2fs_io_info fio = {
                .sbi = sbi,
                .type = META,
-               .rw = WRITE_SYNC | REQ_META | REQ_PRIO,
+               .op = REQ_OP_WRITE,
+               .op_flags = WRITE_SYNC | REQ_META | REQ_PRIO,
                .old_blkaddr = page->index,
                .new_blkaddr = page->index,
                .page = page,
@@ -1420,7 +1454,7 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
        };
 
        if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
-               fio.rw &= ~REQ_META;
+               fio.op_flags &= ~REQ_META;
 
        set_page_writeback(page);
        f2fs_submit_page_mbio(&fio);
@@ -2384,7 +2418,11 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
        sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
        sm_info->rec_prefree_segments = sm_info->main_segments *
                                        DEF_RECLAIM_PREFREE_SEGMENTS / 100;
-       sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
+       if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
+               sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
+
+       if (!test_opt(sbi, LFS))
+               sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
        sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
        sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
 
This page took 0.042692 seconds and 5 git commands to generate.