4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
23 #include <trace/events/f2fs.h>
25 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
27 static struct kmem_cache
*nat_entry_slab
;
28 static struct kmem_cache
*free_nid_slab
;
29 static struct kmem_cache
*nat_entry_set_slab
;
31 bool available_free_memory(struct f2fs_sb_info
*sbi
, int type
)
33 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
35 unsigned long avail_ram
;
36 unsigned long mem_size
= 0;
41 /* only uses low memory */
42 avail_ram
= val
.totalram
- val
.totalhigh
;
45 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
47 if (type
== FREE_NIDS
) {
48 mem_size
= (nm_i
->fcnt
* sizeof(struct free_nid
)) >>
50 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 2);
51 } else if (type
== NAT_ENTRIES
) {
52 mem_size
= (nm_i
->nat_cnt
* sizeof(struct nat_entry
)) >>
54 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 2);
55 } else if (type
== DIRTY_DENTS
) {
56 if (sbi
->sb
->s_bdi
->wb
.dirty_exceeded
)
58 mem_size
= get_pages(sbi
, F2FS_DIRTY_DENTS
);
59 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
60 } else if (type
== INO_ENTRIES
) {
63 for (i
= 0; i
<= UPDATE_INO
; i
++)
64 mem_size
+= (sbi
->im
[i
].ino_num
*
65 sizeof(struct ino_entry
)) >> PAGE_CACHE_SHIFT
;
66 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
67 } else if (type
== EXTENT_CACHE
) {
68 mem_size
= (atomic_read(&sbi
->total_ext_tree
) *
69 sizeof(struct extent_tree
) +
70 atomic_read(&sbi
->total_ext_node
) *
71 sizeof(struct extent_node
)) >> PAGE_CACHE_SHIFT
;
72 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
74 if (!sbi
->sb
->s_bdi
->wb
.dirty_exceeded
)
80 static void clear_node_page_dirty(struct page
*page
)
82 struct address_space
*mapping
= page
->mapping
;
83 unsigned int long flags
;
85 if (PageDirty(page
)) {
86 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
87 radix_tree_tag_clear(&mapping
->page_tree
,
90 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
92 clear_page_dirty_for_io(page
);
93 dec_page_count(F2FS_M_SB(mapping
), F2FS_DIRTY_NODES
);
95 ClearPageUptodate(page
);
98 static struct page
*get_current_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
100 pgoff_t index
= current_nat_addr(sbi
, nid
);
101 return get_meta_page(sbi
, index
);
104 static struct page
*get_next_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
106 struct page
*src_page
;
107 struct page
*dst_page
;
112 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
114 src_off
= current_nat_addr(sbi
, nid
);
115 dst_off
= next_nat_addr(sbi
, src_off
);
117 /* get current nat block page with lock */
118 src_page
= get_meta_page(sbi
, src_off
);
119 dst_page
= grab_meta_page(sbi
, dst_off
);
120 f2fs_bug_on(sbi
, PageDirty(src_page
));
122 src_addr
= page_address(src_page
);
123 dst_addr
= page_address(dst_page
);
124 memcpy(dst_addr
, src_addr
, PAGE_CACHE_SIZE
);
125 set_page_dirty(dst_page
);
126 f2fs_put_page(src_page
, 1);
128 set_to_next_nat(nm_i
, nid
);
133 static struct nat_entry
*__lookup_nat_cache(struct f2fs_nm_info
*nm_i
, nid_t n
)
135 return radix_tree_lookup(&nm_i
->nat_root
, n
);
138 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info
*nm_i
,
139 nid_t start
, unsigned int nr
, struct nat_entry
**ep
)
141 return radix_tree_gang_lookup(&nm_i
->nat_root
, (void **)ep
, start
, nr
);
144 static void __del_from_nat_cache(struct f2fs_nm_info
*nm_i
, struct nat_entry
*e
)
147 radix_tree_delete(&nm_i
->nat_root
, nat_get_nid(e
));
149 kmem_cache_free(nat_entry_slab
, e
);
152 static void __set_nat_cache_dirty(struct f2fs_nm_info
*nm_i
,
153 struct nat_entry
*ne
)
155 nid_t set
= NAT_BLOCK_OFFSET(ne
->ni
.nid
);
156 struct nat_entry_set
*head
;
158 if (get_nat_flag(ne
, IS_DIRTY
))
161 head
= radix_tree_lookup(&nm_i
->nat_set_root
, set
);
163 head
= f2fs_kmem_cache_alloc(nat_entry_set_slab
, GFP_NOFS
);
165 INIT_LIST_HEAD(&head
->entry_list
);
166 INIT_LIST_HEAD(&head
->set_list
);
169 f2fs_radix_tree_insert(&nm_i
->nat_set_root
, set
, head
);
171 list_move_tail(&ne
->list
, &head
->entry_list
);
172 nm_i
->dirty_nat_cnt
++;
174 set_nat_flag(ne
, IS_DIRTY
, true);
177 static void __clear_nat_cache_dirty(struct f2fs_nm_info
*nm_i
,
178 struct nat_entry
*ne
)
180 nid_t set
= NAT_BLOCK_OFFSET(ne
->ni
.nid
);
181 struct nat_entry_set
*head
;
183 head
= radix_tree_lookup(&nm_i
->nat_set_root
, set
);
185 list_move_tail(&ne
->list
, &nm_i
->nat_entries
);
186 set_nat_flag(ne
, IS_DIRTY
, false);
188 nm_i
->dirty_nat_cnt
--;
192 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info
*nm_i
,
193 nid_t start
, unsigned int nr
, struct nat_entry_set
**ep
)
195 return radix_tree_gang_lookup(&nm_i
->nat_set_root
, (void **)ep
,
199 int need_dentry_mark(struct f2fs_sb_info
*sbi
, nid_t nid
)
201 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
205 down_read(&nm_i
->nat_tree_lock
);
206 e
= __lookup_nat_cache(nm_i
, nid
);
208 if (!get_nat_flag(e
, IS_CHECKPOINTED
) &&
209 !get_nat_flag(e
, HAS_FSYNCED_INODE
))
212 up_read(&nm_i
->nat_tree_lock
);
216 bool is_checkpointed_node(struct f2fs_sb_info
*sbi
, nid_t nid
)
218 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
222 down_read(&nm_i
->nat_tree_lock
);
223 e
= __lookup_nat_cache(nm_i
, nid
);
224 if (e
&& !get_nat_flag(e
, IS_CHECKPOINTED
))
226 up_read(&nm_i
->nat_tree_lock
);
230 bool need_inode_block_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
232 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
234 bool need_update
= true;
236 down_read(&nm_i
->nat_tree_lock
);
237 e
= __lookup_nat_cache(nm_i
, ino
);
238 if (e
&& get_nat_flag(e
, HAS_LAST_FSYNC
) &&
239 (get_nat_flag(e
, IS_CHECKPOINTED
) ||
240 get_nat_flag(e
, HAS_FSYNCED_INODE
)))
242 up_read(&nm_i
->nat_tree_lock
);
246 static struct nat_entry
*grab_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
)
248 struct nat_entry
*new;
250 new = f2fs_kmem_cache_alloc(nat_entry_slab
, GFP_NOFS
);
251 f2fs_radix_tree_insert(&nm_i
->nat_root
, nid
, new);
252 memset(new, 0, sizeof(struct nat_entry
));
253 nat_set_nid(new, nid
);
255 list_add_tail(&new->list
, &nm_i
->nat_entries
);
260 static void cache_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
,
261 struct f2fs_nat_entry
*ne
)
265 e
= __lookup_nat_cache(nm_i
, nid
);
267 e
= grab_nat_entry(nm_i
, nid
);
268 node_info_from_raw_nat(&e
->ni
, ne
);
272 static void set_node_addr(struct f2fs_sb_info
*sbi
, struct node_info
*ni
,
273 block_t new_blkaddr
, bool fsync_done
)
275 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
278 down_write(&nm_i
->nat_tree_lock
);
279 e
= __lookup_nat_cache(nm_i
, ni
->nid
);
281 e
= grab_nat_entry(nm_i
, ni
->nid
);
282 copy_node_info(&e
->ni
, ni
);
283 f2fs_bug_on(sbi
, ni
->blk_addr
== NEW_ADDR
);
284 } else if (new_blkaddr
== NEW_ADDR
) {
286 * when nid is reallocated,
287 * previous nat entry can be remained in nat cache.
288 * So, reinitialize it with new information.
290 copy_node_info(&e
->ni
, ni
);
291 f2fs_bug_on(sbi
, ni
->blk_addr
!= NULL_ADDR
);
295 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) != ni
->blk_addr
);
296 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) == NULL_ADDR
&&
297 new_blkaddr
== NULL_ADDR
);
298 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) == NEW_ADDR
&&
299 new_blkaddr
== NEW_ADDR
);
300 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) != NEW_ADDR
&&
301 nat_get_blkaddr(e
) != NULL_ADDR
&&
302 new_blkaddr
== NEW_ADDR
);
304 /* increment version no as node is removed */
305 if (nat_get_blkaddr(e
) != NEW_ADDR
&& new_blkaddr
== NULL_ADDR
) {
306 unsigned char version
= nat_get_version(e
);
307 nat_set_version(e
, inc_node_version(version
));
309 /* in order to reuse the nid */
310 if (nm_i
->next_scan_nid
> ni
->nid
)
311 nm_i
->next_scan_nid
= ni
->nid
;
315 nat_set_blkaddr(e
, new_blkaddr
);
316 if (new_blkaddr
== NEW_ADDR
|| new_blkaddr
== NULL_ADDR
)
317 set_nat_flag(e
, IS_CHECKPOINTED
, false);
318 __set_nat_cache_dirty(nm_i
, e
);
320 /* update fsync_mark if its inode nat entry is still alive */
321 if (ni
->nid
!= ni
->ino
)
322 e
= __lookup_nat_cache(nm_i
, ni
->ino
);
324 if (fsync_done
&& ni
->nid
== ni
->ino
)
325 set_nat_flag(e
, HAS_FSYNCED_INODE
, true);
326 set_nat_flag(e
, HAS_LAST_FSYNC
, fsync_done
);
328 up_write(&nm_i
->nat_tree_lock
);
331 int try_to_free_nats(struct f2fs_sb_info
*sbi
, int nr_shrink
)
333 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
336 if (!down_write_trylock(&nm_i
->nat_tree_lock
))
339 while (nr_shrink
&& !list_empty(&nm_i
->nat_entries
)) {
340 struct nat_entry
*ne
;
341 ne
= list_first_entry(&nm_i
->nat_entries
,
342 struct nat_entry
, list
);
343 __del_from_nat_cache(nm_i
, ne
);
346 up_write(&nm_i
->nat_tree_lock
);
347 return nr
- nr_shrink
;
351 * This function always returns success
353 void get_node_info(struct f2fs_sb_info
*sbi
, nid_t nid
, struct node_info
*ni
)
355 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
356 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
357 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
358 nid_t start_nid
= START_NID(nid
);
359 struct f2fs_nat_block
*nat_blk
;
360 struct page
*page
= NULL
;
361 struct f2fs_nat_entry ne
;
367 /* Check nat cache */
368 down_read(&nm_i
->nat_tree_lock
);
369 e
= __lookup_nat_cache(nm_i
, nid
);
371 ni
->ino
= nat_get_ino(e
);
372 ni
->blk_addr
= nat_get_blkaddr(e
);
373 ni
->version
= nat_get_version(e
);
375 up_read(&nm_i
->nat_tree_lock
);
379 memset(&ne
, 0, sizeof(struct f2fs_nat_entry
));
381 down_write(&nm_i
->nat_tree_lock
);
383 /* Check current segment summary */
384 mutex_lock(&curseg
->curseg_mutex
);
385 i
= lookup_journal_in_cursum(sum
, NAT_JOURNAL
, nid
, 0);
387 ne
= nat_in_journal(sum
, i
);
388 node_info_from_raw_nat(ni
, &ne
);
390 mutex_unlock(&curseg
->curseg_mutex
);
394 /* Fill node_info from nat page */
395 page
= get_current_nat_page(sbi
, start_nid
);
396 nat_blk
= (struct f2fs_nat_block
*)page_address(page
);
397 ne
= nat_blk
->entries
[nid
- start_nid
];
398 node_info_from_raw_nat(ni
, &ne
);
399 f2fs_put_page(page
, 1);
401 /* cache nat entry */
402 cache_nat_entry(NM_I(sbi
), nid
, &ne
);
403 up_write(&nm_i
->nat_tree_lock
);
406 pgoff_t
get_next_page_offset(struct dnode_of_data
*dn
, pgoff_t pgofs
)
408 const long direct_index
= ADDRS_PER_INODE(dn
->inode
);
409 const long direct_blks
= ADDRS_PER_BLOCK
;
410 const long indirect_blks
= ADDRS_PER_BLOCK
* NIDS_PER_BLOCK
;
411 unsigned int skipped_unit
= ADDRS_PER_BLOCK
;
412 int cur_level
= dn
->cur_level
;
413 int max_level
= dn
->max_level
;
419 while (max_level
-- > cur_level
)
420 skipped_unit
*= NIDS_PER_BLOCK
;
422 switch (dn
->max_level
) {
424 base
+= 2 * indirect_blks
;
426 base
+= 2 * direct_blks
;
428 base
+= direct_index
;
431 f2fs_bug_on(F2FS_I_SB(dn
->inode
), 1);
434 return ((pgofs
- base
) / skipped_unit
+ 1) * skipped_unit
+ base
;
438 * The maximum depth is four.
439 * Offset[0] will have raw inode offset.
441 static int get_node_path(struct inode
*inode
, long block
,
442 int offset
[4], unsigned int noffset
[4])
444 const long direct_index
= ADDRS_PER_INODE(inode
);
445 const long direct_blks
= ADDRS_PER_BLOCK
;
446 const long dptrs_per_blk
= NIDS_PER_BLOCK
;
447 const long indirect_blks
= ADDRS_PER_BLOCK
* NIDS_PER_BLOCK
;
448 const long dindirect_blks
= indirect_blks
* NIDS_PER_BLOCK
;
454 if (block
< direct_index
) {
458 block
-= direct_index
;
459 if (block
< direct_blks
) {
460 offset
[n
++] = NODE_DIR1_BLOCK
;
466 block
-= direct_blks
;
467 if (block
< direct_blks
) {
468 offset
[n
++] = NODE_DIR2_BLOCK
;
474 block
-= direct_blks
;
475 if (block
< indirect_blks
) {
476 offset
[n
++] = NODE_IND1_BLOCK
;
478 offset
[n
++] = block
/ direct_blks
;
479 noffset
[n
] = 4 + offset
[n
- 1];
480 offset
[n
] = block
% direct_blks
;
484 block
-= indirect_blks
;
485 if (block
< indirect_blks
) {
486 offset
[n
++] = NODE_IND2_BLOCK
;
487 noffset
[n
] = 4 + dptrs_per_blk
;
488 offset
[n
++] = block
/ direct_blks
;
489 noffset
[n
] = 5 + dptrs_per_blk
+ offset
[n
- 1];
490 offset
[n
] = block
% direct_blks
;
494 block
-= indirect_blks
;
495 if (block
< dindirect_blks
) {
496 offset
[n
++] = NODE_DIND_BLOCK
;
497 noffset
[n
] = 5 + (dptrs_per_blk
* 2);
498 offset
[n
++] = block
/ indirect_blks
;
499 noffset
[n
] = 6 + (dptrs_per_blk
* 2) +
500 offset
[n
- 1] * (dptrs_per_blk
+ 1);
501 offset
[n
++] = (block
/ direct_blks
) % dptrs_per_blk
;
502 noffset
[n
] = 7 + (dptrs_per_blk
* 2) +
503 offset
[n
- 2] * (dptrs_per_blk
+ 1) +
505 offset
[n
] = block
% direct_blks
;
516 * Caller should call f2fs_put_dnode(dn).
517 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
518 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
519 * In the case of RDONLY_NODE, we don't need to care about mutex.
521 int get_dnode_of_data(struct dnode_of_data
*dn
, pgoff_t index
, int mode
)
523 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
524 struct page
*npage
[4];
525 struct page
*parent
= NULL
;
527 unsigned int noffset
[4];
532 level
= get_node_path(dn
->inode
, index
, offset
, noffset
);
534 nids
[0] = dn
->inode
->i_ino
;
535 npage
[0] = dn
->inode_page
;
538 npage
[0] = get_node_page(sbi
, nids
[0]);
539 if (IS_ERR(npage
[0]))
540 return PTR_ERR(npage
[0]);
543 /* if inline_data is set, should not report any block indices */
544 if (f2fs_has_inline_data(dn
->inode
) && index
) {
546 f2fs_put_page(npage
[0], 1);
552 nids
[1] = get_nid(parent
, offset
[0], true);
553 dn
->inode_page
= npage
[0];
554 dn
->inode_page_locked
= true;
556 /* get indirect or direct nodes */
557 for (i
= 1; i
<= level
; i
++) {
560 if (!nids
[i
] && mode
== ALLOC_NODE
) {
562 if (!alloc_nid(sbi
, &(nids
[i
]))) {
568 npage
[i
] = new_node_page(dn
, noffset
[i
], NULL
);
569 if (IS_ERR(npage
[i
])) {
570 alloc_nid_failed(sbi
, nids
[i
]);
571 err
= PTR_ERR(npage
[i
]);
575 set_nid(parent
, offset
[i
- 1], nids
[i
], i
== 1);
576 alloc_nid_done(sbi
, nids
[i
]);
578 } else if (mode
== LOOKUP_NODE_RA
&& i
== level
&& level
> 1) {
579 npage
[i
] = get_node_page_ra(parent
, offset
[i
- 1]);
580 if (IS_ERR(npage
[i
])) {
581 err
= PTR_ERR(npage
[i
]);
587 dn
->inode_page_locked
= false;
590 f2fs_put_page(parent
, 1);
594 npage
[i
] = get_node_page(sbi
, nids
[i
]);
595 if (IS_ERR(npage
[i
])) {
596 err
= PTR_ERR(npage
[i
]);
597 f2fs_put_page(npage
[0], 0);
603 nids
[i
+ 1] = get_nid(parent
, offset
[i
], false);
606 dn
->nid
= nids
[level
];
607 dn
->ofs_in_node
= offset
[level
];
608 dn
->node_page
= npage
[level
];
609 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
613 f2fs_put_page(parent
, 1);
615 f2fs_put_page(npage
[0], 0);
617 dn
->inode_page
= NULL
;
618 dn
->node_page
= NULL
;
619 if (err
== -ENOENT
) {
621 dn
->max_level
= level
;
626 static void truncate_node(struct dnode_of_data
*dn
)
628 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
631 get_node_info(sbi
, dn
->nid
, &ni
);
632 if (dn
->inode
->i_blocks
== 0) {
633 f2fs_bug_on(sbi
, ni
.blk_addr
!= NULL_ADDR
);
636 f2fs_bug_on(sbi
, ni
.blk_addr
== NULL_ADDR
);
638 /* Deallocate node address */
639 invalidate_blocks(sbi
, ni
.blk_addr
);
640 dec_valid_node_count(sbi
, dn
->inode
);
641 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
643 if (dn
->nid
== dn
->inode
->i_ino
) {
644 remove_orphan_inode(sbi
, dn
->nid
);
645 dec_valid_inode_count(sbi
);
650 clear_node_page_dirty(dn
->node_page
);
651 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
653 f2fs_put_page(dn
->node_page
, 1);
655 invalidate_mapping_pages(NODE_MAPPING(sbi
),
656 dn
->node_page
->index
, dn
->node_page
->index
);
658 dn
->node_page
= NULL
;
659 trace_f2fs_truncate_node(dn
->inode
, dn
->nid
, ni
.blk_addr
);
662 static int truncate_dnode(struct dnode_of_data
*dn
)
669 /* get direct node */
670 page
= get_node_page(F2FS_I_SB(dn
->inode
), dn
->nid
);
671 if (IS_ERR(page
) && PTR_ERR(page
) == -ENOENT
)
673 else if (IS_ERR(page
))
674 return PTR_ERR(page
);
676 /* Make dnode_of_data for parameter */
677 dn
->node_page
= page
;
679 truncate_data_blocks(dn
);
684 static int truncate_nodes(struct dnode_of_data
*dn
, unsigned int nofs
,
687 struct dnode_of_data rdn
= *dn
;
689 struct f2fs_node
*rn
;
691 unsigned int child_nofs
;
696 return NIDS_PER_BLOCK
+ 1;
698 trace_f2fs_truncate_nodes_enter(dn
->inode
, dn
->nid
, dn
->data_blkaddr
);
700 page
= get_node_page(F2FS_I_SB(dn
->inode
), dn
->nid
);
702 trace_f2fs_truncate_nodes_exit(dn
->inode
, PTR_ERR(page
));
703 return PTR_ERR(page
);
706 rn
= F2FS_NODE(page
);
708 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++, freed
++) {
709 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
713 ret
= truncate_dnode(&rdn
);
716 if (set_nid(page
, i
, 0, false))
717 dn
->node_changed
= true;
720 child_nofs
= nofs
+ ofs
* (NIDS_PER_BLOCK
+ 1) + 1;
721 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++) {
722 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
723 if (child_nid
== 0) {
724 child_nofs
+= NIDS_PER_BLOCK
+ 1;
728 ret
= truncate_nodes(&rdn
, child_nofs
, 0, depth
- 1);
729 if (ret
== (NIDS_PER_BLOCK
+ 1)) {
730 if (set_nid(page
, i
, 0, false))
731 dn
->node_changed
= true;
733 } else if (ret
< 0 && ret
!= -ENOENT
) {
741 /* remove current indirect node */
742 dn
->node_page
= page
;
746 f2fs_put_page(page
, 1);
748 trace_f2fs_truncate_nodes_exit(dn
->inode
, freed
);
752 f2fs_put_page(page
, 1);
753 trace_f2fs_truncate_nodes_exit(dn
->inode
, ret
);
757 static int truncate_partial_nodes(struct dnode_of_data
*dn
,
758 struct f2fs_inode
*ri
, int *offset
, int depth
)
760 struct page
*pages
[2];
767 nid
[0] = le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
771 /* get indirect nodes in the path */
772 for (i
= 0; i
< idx
+ 1; i
++) {
773 /* reference count'll be increased */
774 pages
[i
] = get_node_page(F2FS_I_SB(dn
->inode
), nid
[i
]);
775 if (IS_ERR(pages
[i
])) {
776 err
= PTR_ERR(pages
[i
]);
780 nid
[i
+ 1] = get_nid(pages
[i
], offset
[i
+ 1], false);
783 /* free direct nodes linked to a partial indirect node */
784 for (i
= offset
[idx
+ 1]; i
< NIDS_PER_BLOCK
; i
++) {
785 child_nid
= get_nid(pages
[idx
], i
, false);
789 err
= truncate_dnode(dn
);
792 if (set_nid(pages
[idx
], i
, 0, false))
793 dn
->node_changed
= true;
796 if (offset
[idx
+ 1] == 0) {
797 dn
->node_page
= pages
[idx
];
801 f2fs_put_page(pages
[idx
], 1);
807 for (i
= idx
; i
>= 0; i
--)
808 f2fs_put_page(pages
[i
], 1);
810 trace_f2fs_truncate_partial_nodes(dn
->inode
, nid
, depth
, err
);
816 * All the block addresses of data and nodes should be nullified.
818 int truncate_inode_blocks(struct inode
*inode
, pgoff_t from
)
820 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
821 int err
= 0, cont
= 1;
822 int level
, offset
[4], noffset
[4];
823 unsigned int nofs
= 0;
824 struct f2fs_inode
*ri
;
825 struct dnode_of_data dn
;
828 trace_f2fs_truncate_inode_blocks_enter(inode
, from
);
830 level
= get_node_path(inode
, from
, offset
, noffset
);
832 page
= get_node_page(sbi
, inode
->i_ino
);
834 trace_f2fs_truncate_inode_blocks_exit(inode
, PTR_ERR(page
));
835 return PTR_ERR(page
);
838 set_new_dnode(&dn
, inode
, page
, NULL
, 0);
841 ri
= F2FS_INODE(page
);
849 if (!offset
[level
- 1])
851 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
852 if (err
< 0 && err
!= -ENOENT
)
854 nofs
+= 1 + NIDS_PER_BLOCK
;
857 nofs
= 5 + 2 * NIDS_PER_BLOCK
;
858 if (!offset
[level
- 1])
860 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
861 if (err
< 0 && err
!= -ENOENT
)
870 dn
.nid
= le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
872 case NODE_DIR1_BLOCK
:
873 case NODE_DIR2_BLOCK
:
874 err
= truncate_dnode(&dn
);
877 case NODE_IND1_BLOCK
:
878 case NODE_IND2_BLOCK
:
879 err
= truncate_nodes(&dn
, nofs
, offset
[1], 2);
882 case NODE_DIND_BLOCK
:
883 err
= truncate_nodes(&dn
, nofs
, offset
[1], 3);
890 if (err
< 0 && err
!= -ENOENT
)
892 if (offset
[1] == 0 &&
893 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]) {
895 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
896 f2fs_put_page(page
, 1);
899 f2fs_wait_on_page_writeback(page
, NODE
, true);
900 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
] = 0;
901 set_page_dirty(page
);
909 f2fs_put_page(page
, 0);
910 trace_f2fs_truncate_inode_blocks_exit(inode
, err
);
911 return err
> 0 ? 0 : err
;
914 int truncate_xattr_node(struct inode
*inode
, struct page
*page
)
916 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
917 nid_t nid
= F2FS_I(inode
)->i_xattr_nid
;
918 struct dnode_of_data dn
;
924 npage
= get_node_page(sbi
, nid
);
926 return PTR_ERR(npage
);
928 F2FS_I(inode
)->i_xattr_nid
= 0;
930 /* need to do checkpoint during fsync */
931 F2FS_I(inode
)->xattr_ver
= cur_cp_version(F2FS_CKPT(sbi
));
933 set_new_dnode(&dn
, inode
, page
, npage
, nid
);
936 dn
.inode_page_locked
= true;
942 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
945 int remove_inode_page(struct inode
*inode
)
947 struct dnode_of_data dn
;
950 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
951 err
= get_dnode_of_data(&dn
, 0, LOOKUP_NODE
);
955 err
= truncate_xattr_node(inode
, dn
.inode_page
);
961 /* remove potential inline_data blocks */
962 if (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
963 S_ISLNK(inode
->i_mode
))
964 truncate_data_blocks_range(&dn
, 1);
966 /* 0 is possible, after f2fs_new_inode() has failed */
967 f2fs_bug_on(F2FS_I_SB(inode
),
968 inode
->i_blocks
!= 0 && inode
->i_blocks
!= 1);
970 /* will put inode & node pages */
975 struct page
*new_inode_page(struct inode
*inode
)
977 struct dnode_of_data dn
;
979 /* allocate inode page for new inode */
980 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
982 /* caller should f2fs_put_page(page, 1); */
983 return new_node_page(&dn
, 0, NULL
);
986 struct page
*new_node_page(struct dnode_of_data
*dn
,
987 unsigned int ofs
, struct page
*ipage
)
989 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
990 struct node_info old_ni
, new_ni
;
994 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
995 return ERR_PTR(-EPERM
);
997 page
= grab_cache_page(NODE_MAPPING(sbi
), dn
->nid
);
999 return ERR_PTR(-ENOMEM
);
1001 if (unlikely(!inc_valid_node_count(sbi
, dn
->inode
))) {
1006 get_node_info(sbi
, dn
->nid
, &old_ni
);
1008 /* Reinitialize old_ni with new node page */
1009 f2fs_bug_on(sbi
, old_ni
.blk_addr
!= NULL_ADDR
);
1011 new_ni
.ino
= dn
->inode
->i_ino
;
1012 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
1014 f2fs_wait_on_page_writeback(page
, NODE
, true);
1015 fill_node_footer(page
, dn
->nid
, dn
->inode
->i_ino
, ofs
, true);
1016 set_cold_node(dn
->inode
, page
);
1017 SetPageUptodate(page
);
1018 if (set_page_dirty(page
))
1019 dn
->node_changed
= true;
1021 if (f2fs_has_xattr_block(ofs
))
1022 F2FS_I(dn
->inode
)->i_xattr_nid
= dn
->nid
;
1024 dn
->node_page
= page
;
1026 update_inode(dn
->inode
, ipage
);
1028 sync_inode_page(dn
);
1030 inc_valid_inode_count(sbi
);
1035 clear_node_page_dirty(page
);
1036 f2fs_put_page(page
, 1);
1037 return ERR_PTR(err
);
1041 * Caller should do after getting the following values.
1042 * 0: f2fs_put_page(page, 0)
1043 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1045 static int read_node_page(struct page
*page
, int rw
)
1047 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
1048 struct node_info ni
;
1049 struct f2fs_io_info fio
= {
1054 .encrypted_page
= NULL
,
1057 get_node_info(sbi
, page
->index
, &ni
);
1059 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
1060 ClearPageUptodate(page
);
1064 if (PageUptodate(page
))
1067 fio
.blk_addr
= ni
.blk_addr
;
1068 return f2fs_submit_page_bio(&fio
);
1072 * Readahead a node page
1074 void ra_node_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
1081 f2fs_bug_on(sbi
, check_nid_range(sbi
, nid
));
1083 apage
= find_get_page(NODE_MAPPING(sbi
), nid
);
1084 if (apage
&& PageUptodate(apage
)) {
1085 f2fs_put_page(apage
, 0);
1088 f2fs_put_page(apage
, 0);
1090 apage
= grab_cache_page(NODE_MAPPING(sbi
), nid
);
1094 err
= read_node_page(apage
, READA
);
1095 f2fs_put_page(apage
, err
? 1 : 0);
1099 * readahead MAX_RA_NODE number of node pages.
1101 void ra_node_pages(struct page
*parent
, int start
)
1103 struct f2fs_sb_info
*sbi
= F2FS_P_SB(parent
);
1104 struct blk_plug plug
;
1108 blk_start_plug(&plug
);
1110 /* Then, try readahead for siblings of the desired node */
1111 end
= start
+ MAX_RA_NODE
;
1112 end
= min(end
, NIDS_PER_BLOCK
);
1113 for (i
= start
; i
< end
; i
++) {
1114 nid
= get_nid(parent
, i
, false);
1115 ra_node_page(sbi
, nid
);
1118 blk_finish_plug(&plug
);
1121 struct page
*__get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
,
1122 struct page
*parent
, int start
)
1128 return ERR_PTR(-ENOENT
);
1129 f2fs_bug_on(sbi
, check_nid_range(sbi
, nid
));
1131 page
= grab_cache_page(NODE_MAPPING(sbi
), nid
);
1133 return ERR_PTR(-ENOMEM
);
1135 err
= read_node_page(page
, READ_SYNC
);
1137 f2fs_put_page(page
, 1);
1138 return ERR_PTR(err
);
1139 } else if (err
== LOCKED_PAGE
) {
1144 ra_node_pages(parent
, start
+ 1);
1148 if (unlikely(!PageUptodate(page
))) {
1149 f2fs_put_page(page
, 1);
1150 return ERR_PTR(-EIO
);
1152 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1153 f2fs_put_page(page
, 1);
1157 f2fs_bug_on(sbi
, nid
!= nid_of_node(page
));
1161 struct page
*get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
)
1163 return __get_node_page(sbi
, nid
, NULL
, 0);
1166 struct page
*get_node_page_ra(struct page
*parent
, int start
)
1168 struct f2fs_sb_info
*sbi
= F2FS_P_SB(parent
);
1169 nid_t nid
= get_nid(parent
, start
, false);
1171 return __get_node_page(sbi
, nid
, parent
, start
);
1174 void sync_inode_page(struct dnode_of_data
*dn
)
1178 if (IS_INODE(dn
->node_page
) || dn
->inode_page
== dn
->node_page
) {
1179 ret
= update_inode(dn
->inode
, dn
->node_page
);
1180 } else if (dn
->inode_page
) {
1181 if (!dn
->inode_page_locked
)
1182 lock_page(dn
->inode_page
);
1183 ret
= update_inode(dn
->inode
, dn
->inode_page
);
1184 if (!dn
->inode_page_locked
)
1185 unlock_page(dn
->inode_page
);
1187 ret
= update_inode_page(dn
->inode
);
1189 dn
->node_changed
= ret
? true: false;
1192 static void flush_inline_data(struct f2fs_sb_info
*sbi
, nid_t ino
)
1194 struct inode
*inode
;
1197 /* should flush inline_data before evict_inode */
1198 inode
= ilookup(sbi
->sb
, ino
);
1202 page
= pagecache_get_page(inode
->i_mapping
, 0, FGP_LOCK
|FGP_NOWAIT
, 0);
1206 if (!PageUptodate(page
))
1209 if (!PageDirty(page
))
1212 if (!clear_page_dirty_for_io(page
))
1215 if (!f2fs_write_inline_data(inode
, page
))
1216 inode_dec_dirty_pages(inode
);
1218 set_page_dirty(page
);
1220 f2fs_put_page(page
, 1);
1225 int sync_node_pages(struct f2fs_sb_info
*sbi
, nid_t ino
,
1226 struct writeback_control
*wbc
)
1229 struct pagevec pvec
;
1230 int step
= ino
? 2 : 0;
1231 int nwritten
= 0, wrote
= 0;
1233 pagevec_init(&pvec
, 0);
1239 while (index
<= end
) {
1241 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1242 PAGECACHE_TAG_DIRTY
,
1243 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1247 for (i
= 0; i
< nr_pages
; i
++) {
1248 struct page
*page
= pvec
.pages
[i
];
1250 if (unlikely(f2fs_cp_error(sbi
))) {
1251 pagevec_release(&pvec
);
1256 * flushing sequence with step:
1261 if (step
== 0 && IS_DNODE(page
))
1263 if (step
== 1 && (!IS_DNODE(page
) ||
1264 is_cold_node(page
)))
1266 if (step
== 2 && (!IS_DNODE(page
) ||
1267 !is_cold_node(page
)))
1272 * we should not skip writing node pages.
1274 if (ino
&& ino_of_node(page
) == ino
)
1276 else if (!trylock_page(page
))
1279 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1284 if (ino
&& ino_of_node(page
) != ino
)
1285 goto continue_unlock
;
1287 if (!PageDirty(page
)) {
1288 /* someone wrote it for us */
1289 goto continue_unlock
;
1292 /* flush inline_data */
1293 if (!ino
&& is_inline_node(page
)) {
1294 clear_inline_node(page
);
1296 flush_inline_data(sbi
, ino_of_node(page
));
1300 f2fs_wait_on_page_writeback(page
, NODE
, true);
1302 BUG_ON(PageWriteback(page
));
1303 if (!clear_page_dirty_for_io(page
))
1304 goto continue_unlock
;
1306 /* called by fsync() */
1307 if (ino
&& IS_DNODE(page
)) {
1308 set_fsync_mark(page
, 1);
1310 set_dentry_mark(page
,
1311 need_dentry_mark(sbi
, ino
));
1314 set_fsync_mark(page
, 0);
1315 set_dentry_mark(page
, 0);
1318 if (NODE_MAPPING(sbi
)->a_ops
->writepage(page
, wbc
))
1323 if (--wbc
->nr_to_write
== 0)
1326 pagevec_release(&pvec
);
1329 if (wbc
->nr_to_write
== 0) {
1341 f2fs_submit_merged_bio(sbi
, NODE
, WRITE
);
1345 int wait_on_node_pages_writeback(struct f2fs_sb_info
*sbi
, nid_t ino
)
1347 pgoff_t index
= 0, end
= LONG_MAX
;
1348 struct pagevec pvec
;
1349 int ret2
= 0, ret
= 0;
1351 pagevec_init(&pvec
, 0);
1353 while (index
<= end
) {
1355 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1356 PAGECACHE_TAG_WRITEBACK
,
1357 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1361 for (i
= 0; i
< nr_pages
; i
++) {
1362 struct page
*page
= pvec
.pages
[i
];
1364 /* until radix tree lookup accepts end_index */
1365 if (unlikely(page
->index
> end
))
1368 if (ino
&& ino_of_node(page
) == ino
) {
1369 f2fs_wait_on_page_writeback(page
, NODE
, true);
1370 if (TestClearPageError(page
))
1374 pagevec_release(&pvec
);
1378 if (unlikely(test_and_clear_bit(AS_ENOSPC
, &NODE_MAPPING(sbi
)->flags
)))
1380 if (unlikely(test_and_clear_bit(AS_EIO
, &NODE_MAPPING(sbi
)->flags
)))
1387 static int f2fs_write_node_page(struct page
*page
,
1388 struct writeback_control
*wbc
)
1390 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
1392 struct node_info ni
;
1393 struct f2fs_io_info fio
= {
1396 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
1398 .encrypted_page
= NULL
,
1401 trace_f2fs_writepage(page
, NODE
);
1403 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1405 if (unlikely(f2fs_cp_error(sbi
)))
1408 /* get old block addr of this node page */
1409 nid
= nid_of_node(page
);
1410 f2fs_bug_on(sbi
, page
->index
!= nid
);
1412 if (wbc
->for_reclaim
) {
1413 if (!down_read_trylock(&sbi
->node_write
))
1416 down_read(&sbi
->node_write
);
1419 get_node_info(sbi
, nid
, &ni
);
1421 /* This page is already truncated */
1422 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
1423 ClearPageUptodate(page
);
1424 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1425 up_read(&sbi
->node_write
);
1430 set_page_writeback(page
);
1431 fio
.blk_addr
= ni
.blk_addr
;
1432 write_node_page(nid
, &fio
);
1433 set_node_addr(sbi
, &ni
, fio
.blk_addr
, is_fsync_dnode(page
));
1434 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1435 up_read(&sbi
->node_write
);
1438 if (wbc
->for_reclaim
|| unlikely(f2fs_cp_error(sbi
)))
1439 f2fs_submit_merged_bio(sbi
, NODE
, WRITE
);
1444 redirty_page_for_writepage(wbc
, page
);
1445 return AOP_WRITEPAGE_ACTIVATE
;
1448 static int f2fs_write_node_pages(struct address_space
*mapping
,
1449 struct writeback_control
*wbc
)
1451 struct f2fs_sb_info
*sbi
= F2FS_M_SB(mapping
);
1454 trace_f2fs_writepages(mapping
->host
, wbc
, NODE
);
1456 /* balancing f2fs's metadata in background */
1457 f2fs_balance_fs_bg(sbi
);
1459 /* collect a number of dirty node pages and write together */
1460 if (get_pages(sbi
, F2FS_DIRTY_NODES
) < nr_pages_to_skip(sbi
, NODE
))
1463 diff
= nr_pages_to_write(sbi
, NODE
, wbc
);
1464 wbc
->sync_mode
= WB_SYNC_NONE
;
1465 sync_node_pages(sbi
, 0, wbc
);
1466 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
1470 wbc
->pages_skipped
+= get_pages(sbi
, F2FS_DIRTY_NODES
);
1474 static int f2fs_set_node_page_dirty(struct page
*page
)
1476 trace_f2fs_set_page_dirty(page
, NODE
);
1478 SetPageUptodate(page
);
1479 if (!PageDirty(page
)) {
1480 __set_page_dirty_nobuffers(page
);
1481 inc_page_count(F2FS_P_SB(page
), F2FS_DIRTY_NODES
);
1482 SetPagePrivate(page
);
1483 f2fs_trace_pid(page
);
1490 * Structure of the f2fs node operations
1492 const struct address_space_operations f2fs_node_aops
= {
1493 .writepage
= f2fs_write_node_page
,
1494 .writepages
= f2fs_write_node_pages
,
1495 .set_page_dirty
= f2fs_set_node_page_dirty
,
1496 .invalidatepage
= f2fs_invalidate_page
,
1497 .releasepage
= f2fs_release_page
,
1500 static struct free_nid
*__lookup_free_nid_list(struct f2fs_nm_info
*nm_i
,
1503 return radix_tree_lookup(&nm_i
->free_nid_root
, n
);
1506 static void __del_from_free_nid_list(struct f2fs_nm_info
*nm_i
,
1510 radix_tree_delete(&nm_i
->free_nid_root
, i
->nid
);
1513 static int add_free_nid(struct f2fs_sb_info
*sbi
, nid_t nid
, bool build
)
1515 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1517 struct nat_entry
*ne
;
1518 bool allocated
= false;
1520 if (!available_free_memory(sbi
, FREE_NIDS
))
1523 /* 0 nid should not be used */
1524 if (unlikely(nid
== 0))
1528 /* do not add allocated nids */
1529 ne
= __lookup_nat_cache(nm_i
, nid
);
1530 if (ne
&& (!get_nat_flag(ne
, IS_CHECKPOINTED
) ||
1531 nat_get_blkaddr(ne
) != NULL_ADDR
))
1537 i
= f2fs_kmem_cache_alloc(free_nid_slab
, GFP_NOFS
);
1541 if (radix_tree_preload(GFP_NOFS
)) {
1542 kmem_cache_free(free_nid_slab
, i
);
1546 spin_lock(&nm_i
->free_nid_list_lock
);
1547 if (radix_tree_insert(&nm_i
->free_nid_root
, i
->nid
, i
)) {
1548 spin_unlock(&nm_i
->free_nid_list_lock
);
1549 radix_tree_preload_end();
1550 kmem_cache_free(free_nid_slab
, i
);
1553 list_add_tail(&i
->list
, &nm_i
->free_nid_list
);
1555 spin_unlock(&nm_i
->free_nid_list_lock
);
1556 radix_tree_preload_end();
1560 static void remove_free_nid(struct f2fs_nm_info
*nm_i
, nid_t nid
)
1563 bool need_free
= false;
1565 spin_lock(&nm_i
->free_nid_list_lock
);
1566 i
= __lookup_free_nid_list(nm_i
, nid
);
1567 if (i
&& i
->state
== NID_NEW
) {
1568 __del_from_free_nid_list(nm_i
, i
);
1572 spin_unlock(&nm_i
->free_nid_list_lock
);
1575 kmem_cache_free(free_nid_slab
, i
);
1578 static void scan_nat_page(struct f2fs_sb_info
*sbi
,
1579 struct page
*nat_page
, nid_t start_nid
)
1581 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1582 struct f2fs_nat_block
*nat_blk
= page_address(nat_page
);
1586 i
= start_nid
% NAT_ENTRY_PER_BLOCK
;
1588 for (; i
< NAT_ENTRY_PER_BLOCK
; i
++, start_nid
++) {
1590 if (unlikely(start_nid
>= nm_i
->max_nid
))
1593 blk_addr
= le32_to_cpu(nat_blk
->entries
[i
].block_addr
);
1594 f2fs_bug_on(sbi
, blk_addr
== NEW_ADDR
);
1595 if (blk_addr
== NULL_ADDR
) {
1596 if (add_free_nid(sbi
, start_nid
, true) < 0)
1602 static void build_free_nids(struct f2fs_sb_info
*sbi
)
1604 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1605 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1606 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1608 nid_t nid
= nm_i
->next_scan_nid
;
1610 /* Enough entries */
1611 if (nm_i
->fcnt
> NAT_ENTRY_PER_BLOCK
)
1614 /* readahead nat pages to be scanned */
1615 ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), FREE_NID_PAGES
,
1618 down_read(&nm_i
->nat_tree_lock
);
1621 struct page
*page
= get_current_nat_page(sbi
, nid
);
1623 scan_nat_page(sbi
, page
, nid
);
1624 f2fs_put_page(page
, 1);
1626 nid
+= (NAT_ENTRY_PER_BLOCK
- (nid
% NAT_ENTRY_PER_BLOCK
));
1627 if (unlikely(nid
>= nm_i
->max_nid
))
1630 if (++i
>= FREE_NID_PAGES
)
1634 /* go to the next free nat pages to find free nids abundantly */
1635 nm_i
->next_scan_nid
= nid
;
1637 /* find free nids from current sum_pages */
1638 mutex_lock(&curseg
->curseg_mutex
);
1639 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1640 block_t addr
= le32_to_cpu(nat_in_journal(sum
, i
).block_addr
);
1641 nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1642 if (addr
== NULL_ADDR
)
1643 add_free_nid(sbi
, nid
, true);
1645 remove_free_nid(nm_i
, nid
);
1647 mutex_unlock(&curseg
->curseg_mutex
);
1648 up_read(&nm_i
->nat_tree_lock
);
1650 ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nm_i
->next_scan_nid
),
1651 nm_i
->ra_nid_pages
, META_NAT
, false);
1655 * If this function returns success, caller can obtain a new nid
1656 * from second parameter of this function.
1657 * The returned nid could be used ino as well as nid when inode is created.
1659 bool alloc_nid(struct f2fs_sb_info
*sbi
, nid_t
*nid
)
1661 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1662 struct free_nid
*i
= NULL
;
1664 if (unlikely(sbi
->total_valid_node_count
+ 1 > nm_i
->available_nids
))
1667 spin_lock(&nm_i
->free_nid_list_lock
);
1669 /* We should not use stale free nids created by build_free_nids */
1670 if (nm_i
->fcnt
&& !on_build_free_nids(nm_i
)) {
1671 f2fs_bug_on(sbi
, list_empty(&nm_i
->free_nid_list
));
1672 list_for_each_entry(i
, &nm_i
->free_nid_list
, list
)
1673 if (i
->state
== NID_NEW
)
1676 f2fs_bug_on(sbi
, i
->state
!= NID_NEW
);
1678 i
->state
= NID_ALLOC
;
1680 spin_unlock(&nm_i
->free_nid_list_lock
);
1683 spin_unlock(&nm_i
->free_nid_list_lock
);
1685 /* Let's scan nat pages and its caches to get free nids */
1686 mutex_lock(&nm_i
->build_lock
);
1687 build_free_nids(sbi
);
1688 mutex_unlock(&nm_i
->build_lock
);
1693 * alloc_nid() should be called prior to this function.
1695 void alloc_nid_done(struct f2fs_sb_info
*sbi
, nid_t nid
)
1697 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1700 spin_lock(&nm_i
->free_nid_list_lock
);
1701 i
= __lookup_free_nid_list(nm_i
, nid
);
1702 f2fs_bug_on(sbi
, !i
|| i
->state
!= NID_ALLOC
);
1703 __del_from_free_nid_list(nm_i
, i
);
1704 spin_unlock(&nm_i
->free_nid_list_lock
);
1706 kmem_cache_free(free_nid_slab
, i
);
1710 * alloc_nid() should be called prior to this function.
1712 void alloc_nid_failed(struct f2fs_sb_info
*sbi
, nid_t nid
)
1714 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1716 bool need_free
= false;
1721 spin_lock(&nm_i
->free_nid_list_lock
);
1722 i
= __lookup_free_nid_list(nm_i
, nid
);
1723 f2fs_bug_on(sbi
, !i
|| i
->state
!= NID_ALLOC
);
1724 if (!available_free_memory(sbi
, FREE_NIDS
)) {
1725 __del_from_free_nid_list(nm_i
, i
);
1731 spin_unlock(&nm_i
->free_nid_list_lock
);
1734 kmem_cache_free(free_nid_slab
, i
);
1737 int try_to_free_nids(struct f2fs_sb_info
*sbi
, int nr_shrink
)
1739 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1740 struct free_nid
*i
, *next
;
1743 if (!mutex_trylock(&nm_i
->build_lock
))
1746 spin_lock(&nm_i
->free_nid_list_lock
);
1747 list_for_each_entry_safe(i
, next
, &nm_i
->free_nid_list
, list
) {
1748 if (nr_shrink
<= 0 || nm_i
->fcnt
<= NAT_ENTRY_PER_BLOCK
)
1750 if (i
->state
== NID_ALLOC
)
1752 __del_from_free_nid_list(nm_i
, i
);
1753 kmem_cache_free(free_nid_slab
, i
);
1757 spin_unlock(&nm_i
->free_nid_list_lock
);
1758 mutex_unlock(&nm_i
->build_lock
);
1760 return nr
- nr_shrink
;
1763 void recover_inline_xattr(struct inode
*inode
, struct page
*page
)
1765 void *src_addr
, *dst_addr
;
1768 struct f2fs_inode
*ri
;
1770 ipage
= get_node_page(F2FS_I_SB(inode
), inode
->i_ino
);
1771 f2fs_bug_on(F2FS_I_SB(inode
), IS_ERR(ipage
));
1773 ri
= F2FS_INODE(page
);
1774 if (!(ri
->i_inline
& F2FS_INLINE_XATTR
)) {
1775 clear_inode_flag(F2FS_I(inode
), FI_INLINE_XATTR
);
1779 dst_addr
= inline_xattr_addr(ipage
);
1780 src_addr
= inline_xattr_addr(page
);
1781 inline_size
= inline_xattr_size(inode
);
1783 f2fs_wait_on_page_writeback(ipage
, NODE
, true);
1784 memcpy(dst_addr
, src_addr
, inline_size
);
1786 update_inode(inode
, ipage
);
1787 f2fs_put_page(ipage
, 1);
1790 void recover_xattr_data(struct inode
*inode
, struct page
*page
, block_t blkaddr
)
1792 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1793 nid_t prev_xnid
= F2FS_I(inode
)->i_xattr_nid
;
1794 nid_t new_xnid
= nid_of_node(page
);
1795 struct node_info ni
;
1797 /* 1: invalidate the previous xattr nid */
1801 /* Deallocate node address */
1802 get_node_info(sbi
, prev_xnid
, &ni
);
1803 f2fs_bug_on(sbi
, ni
.blk_addr
== NULL_ADDR
);
1804 invalidate_blocks(sbi
, ni
.blk_addr
);
1805 dec_valid_node_count(sbi
, inode
);
1806 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
1809 /* 2: allocate new xattr nid */
1810 if (unlikely(!inc_valid_node_count(sbi
, inode
)))
1811 f2fs_bug_on(sbi
, 1);
1813 remove_free_nid(NM_I(sbi
), new_xnid
);
1814 get_node_info(sbi
, new_xnid
, &ni
);
1815 ni
.ino
= inode
->i_ino
;
1816 set_node_addr(sbi
, &ni
, NEW_ADDR
, false);
1817 F2FS_I(inode
)->i_xattr_nid
= new_xnid
;
1819 /* 3: update xattr blkaddr */
1820 refresh_sit_entry(sbi
, NEW_ADDR
, blkaddr
);
1821 set_node_addr(sbi
, &ni
, blkaddr
, false);
1823 update_inode_page(inode
);
1826 int recover_inode_page(struct f2fs_sb_info
*sbi
, struct page
*page
)
1828 struct f2fs_inode
*src
, *dst
;
1829 nid_t ino
= ino_of_node(page
);
1830 struct node_info old_ni
, new_ni
;
1833 get_node_info(sbi
, ino
, &old_ni
);
1835 if (unlikely(old_ni
.blk_addr
!= NULL_ADDR
))
1838 ipage
= grab_cache_page(NODE_MAPPING(sbi
), ino
);
1842 /* Should not use this inode from free nid list */
1843 remove_free_nid(NM_I(sbi
), ino
);
1845 SetPageUptodate(ipage
);
1846 fill_node_footer(ipage
, ino
, ino
, 0, true);
1848 src
= F2FS_INODE(page
);
1849 dst
= F2FS_INODE(ipage
);
1851 memcpy(dst
, src
, (unsigned long)&src
->i_ext
- (unsigned long)src
);
1853 dst
->i_blocks
= cpu_to_le64(1);
1854 dst
->i_links
= cpu_to_le32(1);
1855 dst
->i_xattr_nid
= 0;
1856 dst
->i_inline
= src
->i_inline
& F2FS_INLINE_XATTR
;
1861 if (unlikely(!inc_valid_node_count(sbi
, NULL
)))
1863 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
1864 inc_valid_inode_count(sbi
);
1865 set_page_dirty(ipage
);
1866 f2fs_put_page(ipage
, 1);
1870 int restore_node_summary(struct f2fs_sb_info
*sbi
,
1871 unsigned int segno
, struct f2fs_summary_block
*sum
)
1873 struct f2fs_node
*rn
;
1874 struct f2fs_summary
*sum_entry
;
1876 int bio_blocks
= MAX_BIO_BLOCKS(sbi
);
1877 int i
, idx
, last_offset
, nrpages
;
1879 /* scan the node segment */
1880 last_offset
= sbi
->blocks_per_seg
;
1881 addr
= START_BLOCK(sbi
, segno
);
1882 sum_entry
= &sum
->entries
[0];
1884 for (i
= 0; i
< last_offset
; i
+= nrpages
, addr
+= nrpages
) {
1885 nrpages
= min(last_offset
- i
, bio_blocks
);
1887 /* readahead node pages */
1888 ra_meta_pages(sbi
, addr
, nrpages
, META_POR
, true);
1890 for (idx
= addr
; idx
< addr
+ nrpages
; idx
++) {
1891 struct page
*page
= get_tmp_page(sbi
, idx
);
1893 rn
= F2FS_NODE(page
);
1894 sum_entry
->nid
= rn
->footer
.nid
;
1895 sum_entry
->version
= 0;
1896 sum_entry
->ofs_in_node
= 0;
1898 f2fs_put_page(page
, 1);
1901 invalidate_mapping_pages(META_MAPPING(sbi
), addr
,
1907 static void remove_nats_in_journal(struct f2fs_sb_info
*sbi
)
1909 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1910 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1911 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1914 mutex_lock(&curseg
->curseg_mutex
);
1915 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1916 struct nat_entry
*ne
;
1917 struct f2fs_nat_entry raw_ne
;
1918 nid_t nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1920 raw_ne
= nat_in_journal(sum
, i
);
1922 ne
= __lookup_nat_cache(nm_i
, nid
);
1924 ne
= grab_nat_entry(nm_i
, nid
);
1925 node_info_from_raw_nat(&ne
->ni
, &raw_ne
);
1927 __set_nat_cache_dirty(nm_i
, ne
);
1929 update_nats_in_cursum(sum
, -i
);
1930 mutex_unlock(&curseg
->curseg_mutex
);
1933 static void __adjust_nat_entry_set(struct nat_entry_set
*nes
,
1934 struct list_head
*head
, int max
)
1936 struct nat_entry_set
*cur
;
1938 if (nes
->entry_cnt
>= max
)
1941 list_for_each_entry(cur
, head
, set_list
) {
1942 if (cur
->entry_cnt
>= nes
->entry_cnt
) {
1943 list_add(&nes
->set_list
, cur
->set_list
.prev
);
1948 list_add_tail(&nes
->set_list
, head
);
1951 static void __flush_nat_entry_set(struct f2fs_sb_info
*sbi
,
1952 struct nat_entry_set
*set
)
1954 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1955 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1956 nid_t start_nid
= set
->set
* NAT_ENTRY_PER_BLOCK
;
1957 bool to_journal
= true;
1958 struct f2fs_nat_block
*nat_blk
;
1959 struct nat_entry
*ne
, *cur
;
1960 struct page
*page
= NULL
;
1963 * there are two steps to flush nat entries:
1964 * #1, flush nat entries to journal in current hot data summary block.
1965 * #2, flush nat entries to nat page.
1967 if (!__has_cursum_space(sum
, set
->entry_cnt
, NAT_JOURNAL
))
1971 mutex_lock(&curseg
->curseg_mutex
);
1973 page
= get_next_nat_page(sbi
, start_nid
);
1974 nat_blk
= page_address(page
);
1975 f2fs_bug_on(sbi
, !nat_blk
);
1978 /* flush dirty nats in nat entry set */
1979 list_for_each_entry_safe(ne
, cur
, &set
->entry_list
, list
) {
1980 struct f2fs_nat_entry
*raw_ne
;
1981 nid_t nid
= nat_get_nid(ne
);
1984 if (nat_get_blkaddr(ne
) == NEW_ADDR
)
1988 offset
= lookup_journal_in_cursum(sum
,
1989 NAT_JOURNAL
, nid
, 1);
1990 f2fs_bug_on(sbi
, offset
< 0);
1991 raw_ne
= &nat_in_journal(sum
, offset
);
1992 nid_in_journal(sum
, offset
) = cpu_to_le32(nid
);
1994 raw_ne
= &nat_blk
->entries
[nid
- start_nid
];
1996 raw_nat_from_node_info(raw_ne
, &ne
->ni
);
1998 __clear_nat_cache_dirty(NM_I(sbi
), ne
);
1999 if (nat_get_blkaddr(ne
) == NULL_ADDR
)
2000 add_free_nid(sbi
, nid
, false);
2004 mutex_unlock(&curseg
->curseg_mutex
);
2006 f2fs_put_page(page
, 1);
2008 f2fs_bug_on(sbi
, set
->entry_cnt
);
2010 radix_tree_delete(&NM_I(sbi
)->nat_set_root
, set
->set
);
2011 kmem_cache_free(nat_entry_set_slab
, set
);
2015 * This function is called during the checkpointing process.
2017 void flush_nat_entries(struct f2fs_sb_info
*sbi
)
2019 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2020 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
2021 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
2022 struct nat_entry_set
*setvec
[SETVEC_SIZE
];
2023 struct nat_entry_set
*set
, *tmp
;
2028 if (!nm_i
->dirty_nat_cnt
)
2031 down_write(&nm_i
->nat_tree_lock
);
2034 * if there are no enough space in journal to store dirty nat
2035 * entries, remove all entries from journal and merge them
2036 * into nat entry set.
2038 if (!__has_cursum_space(sum
, nm_i
->dirty_nat_cnt
, NAT_JOURNAL
))
2039 remove_nats_in_journal(sbi
);
2041 while ((found
= __gang_lookup_nat_set(nm_i
,
2042 set_idx
, SETVEC_SIZE
, setvec
))) {
2044 set_idx
= setvec
[found
- 1]->set
+ 1;
2045 for (idx
= 0; idx
< found
; idx
++)
2046 __adjust_nat_entry_set(setvec
[idx
], &sets
,
2047 MAX_NAT_JENTRIES(sum
));
2050 /* flush dirty nats in nat entry set */
2051 list_for_each_entry_safe(set
, tmp
, &sets
, set_list
)
2052 __flush_nat_entry_set(sbi
, set
);
2054 up_write(&nm_i
->nat_tree_lock
);
2056 f2fs_bug_on(sbi
, nm_i
->dirty_nat_cnt
);
2059 static int init_node_manager(struct f2fs_sb_info
*sbi
)
2061 struct f2fs_super_block
*sb_raw
= F2FS_RAW_SUPER(sbi
);
2062 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2063 unsigned char *version_bitmap
;
2064 unsigned int nat_segs
, nat_blocks
;
2066 nm_i
->nat_blkaddr
= le32_to_cpu(sb_raw
->nat_blkaddr
);
2068 /* segment_count_nat includes pair segment so divide to 2. */
2069 nat_segs
= le32_to_cpu(sb_raw
->segment_count_nat
) >> 1;
2070 nat_blocks
= nat_segs
<< le32_to_cpu(sb_raw
->log_blocks_per_seg
);
2072 nm_i
->max_nid
= NAT_ENTRY_PER_BLOCK
* nat_blocks
;
2074 /* not used nids: 0, node, meta, (and root counted as valid node) */
2075 nm_i
->available_nids
= nm_i
->max_nid
- F2FS_RESERVED_NODE_NUM
;
2078 nm_i
->ram_thresh
= DEF_RAM_THRESHOLD
;
2079 nm_i
->ra_nid_pages
= DEF_RA_NID_PAGES
;
2080 nm_i
->dirty_nats_ratio
= DEF_DIRTY_NAT_RATIO_THRESHOLD
;
2082 INIT_RADIX_TREE(&nm_i
->free_nid_root
, GFP_ATOMIC
);
2083 INIT_LIST_HEAD(&nm_i
->free_nid_list
);
2084 INIT_RADIX_TREE(&nm_i
->nat_root
, GFP_NOIO
);
2085 INIT_RADIX_TREE(&nm_i
->nat_set_root
, GFP_NOIO
);
2086 INIT_LIST_HEAD(&nm_i
->nat_entries
);
2088 mutex_init(&nm_i
->build_lock
);
2089 spin_lock_init(&nm_i
->free_nid_list_lock
);
2090 init_rwsem(&nm_i
->nat_tree_lock
);
2092 nm_i
->next_scan_nid
= le32_to_cpu(sbi
->ckpt
->next_free_nid
);
2093 nm_i
->bitmap_size
= __bitmap_size(sbi
, NAT_BITMAP
);
2094 version_bitmap
= __bitmap_ptr(sbi
, NAT_BITMAP
);
2095 if (!version_bitmap
)
2098 nm_i
->nat_bitmap
= kmemdup(version_bitmap
, nm_i
->bitmap_size
,
2100 if (!nm_i
->nat_bitmap
)
2105 int build_node_manager(struct f2fs_sb_info
*sbi
)
2109 sbi
->nm_info
= kzalloc(sizeof(struct f2fs_nm_info
), GFP_KERNEL
);
2113 err
= init_node_manager(sbi
);
2117 build_free_nids(sbi
);
2121 void destroy_node_manager(struct f2fs_sb_info
*sbi
)
2123 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2124 struct free_nid
*i
, *next_i
;
2125 struct nat_entry
*natvec
[NATVEC_SIZE
];
2126 struct nat_entry_set
*setvec
[SETVEC_SIZE
];
2133 /* destroy free nid list */
2134 spin_lock(&nm_i
->free_nid_list_lock
);
2135 list_for_each_entry_safe(i
, next_i
, &nm_i
->free_nid_list
, list
) {
2136 f2fs_bug_on(sbi
, i
->state
== NID_ALLOC
);
2137 __del_from_free_nid_list(nm_i
, i
);
2139 spin_unlock(&nm_i
->free_nid_list_lock
);
2140 kmem_cache_free(free_nid_slab
, i
);
2141 spin_lock(&nm_i
->free_nid_list_lock
);
2143 f2fs_bug_on(sbi
, nm_i
->fcnt
);
2144 spin_unlock(&nm_i
->free_nid_list_lock
);
2146 /* destroy nat cache */
2147 down_write(&nm_i
->nat_tree_lock
);
2148 while ((found
= __gang_lookup_nat_cache(nm_i
,
2149 nid
, NATVEC_SIZE
, natvec
))) {
2152 nid
= nat_get_nid(natvec
[found
- 1]) + 1;
2153 for (idx
= 0; idx
< found
; idx
++)
2154 __del_from_nat_cache(nm_i
, natvec
[idx
]);
2156 f2fs_bug_on(sbi
, nm_i
->nat_cnt
);
2158 /* destroy nat set cache */
2160 while ((found
= __gang_lookup_nat_set(nm_i
,
2161 nid
, SETVEC_SIZE
, setvec
))) {
2164 nid
= setvec
[found
- 1]->set
+ 1;
2165 for (idx
= 0; idx
< found
; idx
++) {
2166 /* entry_cnt is not zero, when cp_error was occurred */
2167 f2fs_bug_on(sbi
, !list_empty(&setvec
[idx
]->entry_list
));
2168 radix_tree_delete(&nm_i
->nat_set_root
, setvec
[idx
]->set
);
2169 kmem_cache_free(nat_entry_set_slab
, setvec
[idx
]);
2172 up_write(&nm_i
->nat_tree_lock
);
2174 kfree(nm_i
->nat_bitmap
);
2175 sbi
->nm_info
= NULL
;
2179 int __init
create_node_manager_caches(void)
2181 nat_entry_slab
= f2fs_kmem_cache_create("nat_entry",
2182 sizeof(struct nat_entry
));
2183 if (!nat_entry_slab
)
2186 free_nid_slab
= f2fs_kmem_cache_create("free_nid",
2187 sizeof(struct free_nid
));
2189 goto destroy_nat_entry
;
2191 nat_entry_set_slab
= f2fs_kmem_cache_create("nat_entry_set",
2192 sizeof(struct nat_entry_set
));
2193 if (!nat_entry_set_slab
)
2194 goto destroy_free_nid
;
2198 kmem_cache_destroy(free_nid_slab
);
2200 kmem_cache_destroy(nat_entry_slab
);
2205 void destroy_node_manager_caches(void)
2207 kmem_cache_destroy(nat_entry_set_slab
);
2208 kmem_cache_destroy(free_nid_slab
);
2209 kmem_cache_destroy(nat_entry_slab
);