4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
22 #include <trace/events/f2fs.h>
24 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
26 static struct kmem_cache
*nat_entry_slab
;
27 static struct kmem_cache
*free_nid_slab
;
28 static struct kmem_cache
*nat_entry_set_slab
;
30 bool available_free_memory(struct f2fs_sb_info
*sbi
, int type
)
32 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
34 unsigned long avail_ram
;
35 unsigned long mem_size
= 0;
40 /* only uses low memory */
41 avail_ram
= val
.totalram
- val
.totalhigh
;
43 /* give 25%, 25%, 50%, 50% memory for each components respectively */
44 if (type
== FREE_NIDS
) {
45 mem_size
= (nm_i
->fcnt
* sizeof(struct free_nid
)) >>
47 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 2);
48 } else if (type
== NAT_ENTRIES
) {
49 mem_size
= (nm_i
->nat_cnt
* sizeof(struct nat_entry
)) >>
51 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 2);
52 } else if (type
== DIRTY_DENTS
) {
53 if (sbi
->sb
->s_bdi
->dirty_exceeded
)
55 mem_size
= get_pages(sbi
, F2FS_DIRTY_DENTS
);
56 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
57 } else if (type
== INO_ENTRIES
) {
60 if (sbi
->sb
->s_bdi
->dirty_exceeded
)
62 for (i
= 0; i
<= UPDATE_INO
; i
++)
63 mem_size
+= (sbi
->im
[i
].ino_num
*
64 sizeof(struct ino_entry
)) >> PAGE_CACHE_SHIFT
;
65 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
70 static void clear_node_page_dirty(struct page
*page
)
72 struct address_space
*mapping
= page
->mapping
;
73 unsigned int long flags
;
75 if (PageDirty(page
)) {
76 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
77 radix_tree_tag_clear(&mapping
->page_tree
,
80 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
82 clear_page_dirty_for_io(page
);
83 dec_page_count(F2FS_M_SB(mapping
), F2FS_DIRTY_NODES
);
85 ClearPageUptodate(page
);
88 static struct page
*get_current_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
90 pgoff_t index
= current_nat_addr(sbi
, nid
);
91 return get_meta_page(sbi
, index
);
94 static struct page
*get_next_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
96 struct page
*src_page
;
97 struct page
*dst_page
;
102 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
104 src_off
= current_nat_addr(sbi
, nid
);
105 dst_off
= next_nat_addr(sbi
, src_off
);
107 /* get current nat block page with lock */
108 src_page
= get_meta_page(sbi
, src_off
);
109 dst_page
= grab_meta_page(sbi
, dst_off
);
110 f2fs_bug_on(sbi
, PageDirty(src_page
));
112 src_addr
= page_address(src_page
);
113 dst_addr
= page_address(dst_page
);
114 memcpy(dst_addr
, src_addr
, PAGE_CACHE_SIZE
);
115 set_page_dirty(dst_page
);
116 f2fs_put_page(src_page
, 1);
118 set_to_next_nat(nm_i
, nid
);
123 static struct nat_entry
*__lookup_nat_cache(struct f2fs_nm_info
*nm_i
, nid_t n
)
125 return radix_tree_lookup(&nm_i
->nat_root
, n
);
128 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info
*nm_i
,
129 nid_t start
, unsigned int nr
, struct nat_entry
**ep
)
131 return radix_tree_gang_lookup(&nm_i
->nat_root
, (void **)ep
, start
, nr
);
134 static void __del_from_nat_cache(struct f2fs_nm_info
*nm_i
, struct nat_entry
*e
)
137 radix_tree_delete(&nm_i
->nat_root
, nat_get_nid(e
));
139 kmem_cache_free(nat_entry_slab
, e
);
142 static void __set_nat_cache_dirty(struct f2fs_nm_info
*nm_i
,
143 struct nat_entry
*ne
)
145 nid_t set
= NAT_BLOCK_OFFSET(ne
->ni
.nid
);
146 struct nat_entry_set
*head
;
148 if (get_nat_flag(ne
, IS_DIRTY
))
151 head
= radix_tree_lookup(&nm_i
->nat_set_root
, set
);
153 head
= f2fs_kmem_cache_alloc(nat_entry_set_slab
, GFP_ATOMIC
);
155 INIT_LIST_HEAD(&head
->entry_list
);
156 INIT_LIST_HEAD(&head
->set_list
);
159 f2fs_radix_tree_insert(&nm_i
->nat_set_root
, set
, head
);
161 list_move_tail(&ne
->list
, &head
->entry_list
);
162 nm_i
->dirty_nat_cnt
++;
164 set_nat_flag(ne
, IS_DIRTY
, true);
167 static void __clear_nat_cache_dirty(struct f2fs_nm_info
*nm_i
,
168 struct nat_entry
*ne
)
170 nid_t set
= NAT_BLOCK_OFFSET(ne
->ni
.nid
);
171 struct nat_entry_set
*head
;
173 head
= radix_tree_lookup(&nm_i
->nat_set_root
, set
);
175 list_move_tail(&ne
->list
, &nm_i
->nat_entries
);
176 set_nat_flag(ne
, IS_DIRTY
, false);
178 nm_i
->dirty_nat_cnt
--;
182 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info
*nm_i
,
183 nid_t start
, unsigned int nr
, struct nat_entry_set
**ep
)
185 return radix_tree_gang_lookup(&nm_i
->nat_set_root
, (void **)ep
,
189 bool is_checkpointed_node(struct f2fs_sb_info
*sbi
, nid_t nid
)
191 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
195 down_read(&nm_i
->nat_tree_lock
);
196 e
= __lookup_nat_cache(nm_i
, nid
);
197 if (e
&& !get_nat_flag(e
, IS_CHECKPOINTED
))
199 up_read(&nm_i
->nat_tree_lock
);
203 bool has_fsynced_inode(struct f2fs_sb_info
*sbi
, nid_t ino
)
205 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
207 bool fsynced
= false;
209 down_read(&nm_i
->nat_tree_lock
);
210 e
= __lookup_nat_cache(nm_i
, ino
);
211 if (e
&& get_nat_flag(e
, HAS_FSYNCED_INODE
))
213 up_read(&nm_i
->nat_tree_lock
);
217 bool need_inode_block_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
219 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
221 bool need_update
= true;
223 down_read(&nm_i
->nat_tree_lock
);
224 e
= __lookup_nat_cache(nm_i
, ino
);
225 if (e
&& get_nat_flag(e
, HAS_LAST_FSYNC
) &&
226 (get_nat_flag(e
, IS_CHECKPOINTED
) ||
227 get_nat_flag(e
, HAS_FSYNCED_INODE
)))
229 up_read(&nm_i
->nat_tree_lock
);
233 static struct nat_entry
*grab_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
)
235 struct nat_entry
*new;
237 new = f2fs_kmem_cache_alloc(nat_entry_slab
, GFP_ATOMIC
);
238 f2fs_radix_tree_insert(&nm_i
->nat_root
, nid
, new);
239 memset(new, 0, sizeof(struct nat_entry
));
240 nat_set_nid(new, nid
);
242 list_add_tail(&new->list
, &nm_i
->nat_entries
);
247 static void cache_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
,
248 struct f2fs_nat_entry
*ne
)
252 down_write(&nm_i
->nat_tree_lock
);
253 e
= __lookup_nat_cache(nm_i
, nid
);
255 e
= grab_nat_entry(nm_i
, nid
);
256 node_info_from_raw_nat(&e
->ni
, ne
);
258 up_write(&nm_i
->nat_tree_lock
);
261 static void set_node_addr(struct f2fs_sb_info
*sbi
, struct node_info
*ni
,
262 block_t new_blkaddr
, bool fsync_done
)
264 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
267 down_write(&nm_i
->nat_tree_lock
);
268 e
= __lookup_nat_cache(nm_i
, ni
->nid
);
270 e
= grab_nat_entry(nm_i
, ni
->nid
);
272 f2fs_bug_on(sbi
, ni
->blk_addr
== NEW_ADDR
);
273 } else if (new_blkaddr
== NEW_ADDR
) {
275 * when nid is reallocated,
276 * previous nat entry can be remained in nat cache.
277 * So, reinitialize it with new information.
280 f2fs_bug_on(sbi
, ni
->blk_addr
!= NULL_ADDR
);
284 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) != ni
->blk_addr
);
285 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) == NULL_ADDR
&&
286 new_blkaddr
== NULL_ADDR
);
287 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) == NEW_ADDR
&&
288 new_blkaddr
== NEW_ADDR
);
289 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) != NEW_ADDR
&&
290 nat_get_blkaddr(e
) != NULL_ADDR
&&
291 new_blkaddr
== NEW_ADDR
);
293 /* increment version no as node is removed */
294 if (nat_get_blkaddr(e
) != NEW_ADDR
&& new_blkaddr
== NULL_ADDR
) {
295 unsigned char version
= nat_get_version(e
);
296 nat_set_version(e
, inc_node_version(version
));
300 nat_set_blkaddr(e
, new_blkaddr
);
301 if (new_blkaddr
== NEW_ADDR
|| new_blkaddr
== NULL_ADDR
)
302 set_nat_flag(e
, IS_CHECKPOINTED
, false);
303 __set_nat_cache_dirty(nm_i
, e
);
305 /* update fsync_mark if its inode nat entry is still alive */
306 e
= __lookup_nat_cache(nm_i
, ni
->ino
);
308 if (fsync_done
&& ni
->nid
== ni
->ino
)
309 set_nat_flag(e
, HAS_FSYNCED_INODE
, true);
310 set_nat_flag(e
, HAS_LAST_FSYNC
, fsync_done
);
312 up_write(&nm_i
->nat_tree_lock
);
315 int try_to_free_nats(struct f2fs_sb_info
*sbi
, int nr_shrink
)
317 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
319 if (available_free_memory(sbi
, NAT_ENTRIES
))
322 down_write(&nm_i
->nat_tree_lock
);
323 while (nr_shrink
&& !list_empty(&nm_i
->nat_entries
)) {
324 struct nat_entry
*ne
;
325 ne
= list_first_entry(&nm_i
->nat_entries
,
326 struct nat_entry
, list
);
327 __del_from_nat_cache(nm_i
, ne
);
330 up_write(&nm_i
->nat_tree_lock
);
335 * This function always returns success
337 void get_node_info(struct f2fs_sb_info
*sbi
, nid_t nid
, struct node_info
*ni
)
339 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
340 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
341 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
342 nid_t start_nid
= START_NID(nid
);
343 struct f2fs_nat_block
*nat_blk
;
344 struct page
*page
= NULL
;
345 struct f2fs_nat_entry ne
;
349 memset(&ne
, 0, sizeof(struct f2fs_nat_entry
));
352 /* Check nat cache */
353 down_read(&nm_i
->nat_tree_lock
);
354 e
= __lookup_nat_cache(nm_i
, nid
);
356 ni
->ino
= nat_get_ino(e
);
357 ni
->blk_addr
= nat_get_blkaddr(e
);
358 ni
->version
= nat_get_version(e
);
360 up_read(&nm_i
->nat_tree_lock
);
364 /* Check current segment summary */
365 mutex_lock(&curseg
->curseg_mutex
);
366 i
= lookup_journal_in_cursum(sum
, NAT_JOURNAL
, nid
, 0);
368 ne
= nat_in_journal(sum
, i
);
369 node_info_from_raw_nat(ni
, &ne
);
371 mutex_unlock(&curseg
->curseg_mutex
);
375 /* Fill node_info from nat page */
376 page
= get_current_nat_page(sbi
, start_nid
);
377 nat_blk
= (struct f2fs_nat_block
*)page_address(page
);
378 ne
= nat_blk
->entries
[nid
- start_nid
];
379 node_info_from_raw_nat(ni
, &ne
);
380 f2fs_put_page(page
, 1);
382 /* cache nat entry */
383 cache_nat_entry(NM_I(sbi
), nid
, &ne
);
387 * The maximum depth is four.
388 * Offset[0] will have raw inode offset.
390 static int get_node_path(struct f2fs_inode_info
*fi
, long block
,
391 int offset
[4], unsigned int noffset
[4])
393 const long direct_index
= ADDRS_PER_INODE(fi
);
394 const long direct_blks
= ADDRS_PER_BLOCK
;
395 const long dptrs_per_blk
= NIDS_PER_BLOCK
;
396 const long indirect_blks
= ADDRS_PER_BLOCK
* NIDS_PER_BLOCK
;
397 const long dindirect_blks
= indirect_blks
* NIDS_PER_BLOCK
;
403 if (block
< direct_index
) {
407 block
-= direct_index
;
408 if (block
< direct_blks
) {
409 offset
[n
++] = NODE_DIR1_BLOCK
;
415 block
-= direct_blks
;
416 if (block
< direct_blks
) {
417 offset
[n
++] = NODE_DIR2_BLOCK
;
423 block
-= direct_blks
;
424 if (block
< indirect_blks
) {
425 offset
[n
++] = NODE_IND1_BLOCK
;
427 offset
[n
++] = block
/ direct_blks
;
428 noffset
[n
] = 4 + offset
[n
- 1];
429 offset
[n
] = block
% direct_blks
;
433 block
-= indirect_blks
;
434 if (block
< indirect_blks
) {
435 offset
[n
++] = NODE_IND2_BLOCK
;
436 noffset
[n
] = 4 + dptrs_per_blk
;
437 offset
[n
++] = block
/ direct_blks
;
438 noffset
[n
] = 5 + dptrs_per_blk
+ offset
[n
- 1];
439 offset
[n
] = block
% direct_blks
;
443 block
-= indirect_blks
;
444 if (block
< dindirect_blks
) {
445 offset
[n
++] = NODE_DIND_BLOCK
;
446 noffset
[n
] = 5 + (dptrs_per_blk
* 2);
447 offset
[n
++] = block
/ indirect_blks
;
448 noffset
[n
] = 6 + (dptrs_per_blk
* 2) +
449 offset
[n
- 1] * (dptrs_per_blk
+ 1);
450 offset
[n
++] = (block
/ direct_blks
) % dptrs_per_blk
;
451 noffset
[n
] = 7 + (dptrs_per_blk
* 2) +
452 offset
[n
- 2] * (dptrs_per_blk
+ 1) +
454 offset
[n
] = block
% direct_blks
;
465 * Caller should call f2fs_put_dnode(dn).
466 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
467 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
468 * In the case of RDONLY_NODE, we don't need to care about mutex.
470 int get_dnode_of_data(struct dnode_of_data
*dn
, pgoff_t index
, int mode
)
472 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
473 struct page
*npage
[4];
476 unsigned int noffset
[4];
481 level
= get_node_path(F2FS_I(dn
->inode
), index
, offset
, noffset
);
483 nids
[0] = dn
->inode
->i_ino
;
484 npage
[0] = dn
->inode_page
;
487 npage
[0] = get_node_page(sbi
, nids
[0]);
488 if (IS_ERR(npage
[0]))
489 return PTR_ERR(npage
[0]);
493 nids
[1] = get_nid(parent
, offset
[0], true);
494 dn
->inode_page
= npage
[0];
495 dn
->inode_page_locked
= true;
497 /* get indirect or direct nodes */
498 for (i
= 1; i
<= level
; i
++) {
501 if (!nids
[i
] && mode
== ALLOC_NODE
) {
503 if (!alloc_nid(sbi
, &(nids
[i
]))) {
509 npage
[i
] = new_node_page(dn
, noffset
[i
], NULL
);
510 if (IS_ERR(npage
[i
])) {
511 alloc_nid_failed(sbi
, nids
[i
]);
512 err
= PTR_ERR(npage
[i
]);
516 set_nid(parent
, offset
[i
- 1], nids
[i
], i
== 1);
517 alloc_nid_done(sbi
, nids
[i
]);
519 } else if (mode
== LOOKUP_NODE_RA
&& i
== level
&& level
> 1) {
520 npage
[i
] = get_node_page_ra(parent
, offset
[i
- 1]);
521 if (IS_ERR(npage
[i
])) {
522 err
= PTR_ERR(npage
[i
]);
528 dn
->inode_page_locked
= false;
531 f2fs_put_page(parent
, 1);
535 npage
[i
] = get_node_page(sbi
, nids
[i
]);
536 if (IS_ERR(npage
[i
])) {
537 err
= PTR_ERR(npage
[i
]);
538 f2fs_put_page(npage
[0], 0);
544 nids
[i
+ 1] = get_nid(parent
, offset
[i
], false);
547 dn
->nid
= nids
[level
];
548 dn
->ofs_in_node
= offset
[level
];
549 dn
->node_page
= npage
[level
];
550 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
554 f2fs_put_page(parent
, 1);
556 f2fs_put_page(npage
[0], 0);
558 dn
->inode_page
= NULL
;
559 dn
->node_page
= NULL
;
563 static void truncate_node(struct dnode_of_data
*dn
)
565 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
568 get_node_info(sbi
, dn
->nid
, &ni
);
569 if (dn
->inode
->i_blocks
== 0) {
570 f2fs_bug_on(sbi
, ni
.blk_addr
!= NULL_ADDR
);
573 f2fs_bug_on(sbi
, ni
.blk_addr
== NULL_ADDR
);
575 /* Deallocate node address */
576 invalidate_blocks(sbi
, ni
.blk_addr
);
577 dec_valid_node_count(sbi
, dn
->inode
);
578 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
580 if (dn
->nid
== dn
->inode
->i_ino
) {
581 remove_orphan_inode(sbi
, dn
->nid
);
582 dec_valid_inode_count(sbi
);
587 clear_node_page_dirty(dn
->node_page
);
588 F2FS_SET_SB_DIRT(sbi
);
590 f2fs_put_page(dn
->node_page
, 1);
592 invalidate_mapping_pages(NODE_MAPPING(sbi
),
593 dn
->node_page
->index
, dn
->node_page
->index
);
595 dn
->node_page
= NULL
;
596 trace_f2fs_truncate_node(dn
->inode
, dn
->nid
, ni
.blk_addr
);
599 static int truncate_dnode(struct dnode_of_data
*dn
)
606 /* get direct node */
607 page
= get_node_page(F2FS_I_SB(dn
->inode
), dn
->nid
);
608 if (IS_ERR(page
) && PTR_ERR(page
) == -ENOENT
)
610 else if (IS_ERR(page
))
611 return PTR_ERR(page
);
613 /* Make dnode_of_data for parameter */
614 dn
->node_page
= page
;
616 truncate_data_blocks(dn
);
621 static int truncate_nodes(struct dnode_of_data
*dn
, unsigned int nofs
,
624 struct dnode_of_data rdn
= *dn
;
626 struct f2fs_node
*rn
;
628 unsigned int child_nofs
;
633 return NIDS_PER_BLOCK
+ 1;
635 trace_f2fs_truncate_nodes_enter(dn
->inode
, dn
->nid
, dn
->data_blkaddr
);
637 page
= get_node_page(F2FS_I_SB(dn
->inode
), dn
->nid
);
639 trace_f2fs_truncate_nodes_exit(dn
->inode
, PTR_ERR(page
));
640 return PTR_ERR(page
);
643 rn
= F2FS_NODE(page
);
645 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++, freed
++) {
646 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
650 ret
= truncate_dnode(&rdn
);
653 set_nid(page
, i
, 0, false);
656 child_nofs
= nofs
+ ofs
* (NIDS_PER_BLOCK
+ 1) + 1;
657 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++) {
658 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
659 if (child_nid
== 0) {
660 child_nofs
+= NIDS_PER_BLOCK
+ 1;
664 ret
= truncate_nodes(&rdn
, child_nofs
, 0, depth
- 1);
665 if (ret
== (NIDS_PER_BLOCK
+ 1)) {
666 set_nid(page
, i
, 0, false);
668 } else if (ret
< 0 && ret
!= -ENOENT
) {
676 /* remove current indirect node */
677 dn
->node_page
= page
;
681 f2fs_put_page(page
, 1);
683 trace_f2fs_truncate_nodes_exit(dn
->inode
, freed
);
687 f2fs_put_page(page
, 1);
688 trace_f2fs_truncate_nodes_exit(dn
->inode
, ret
);
692 static int truncate_partial_nodes(struct dnode_of_data
*dn
,
693 struct f2fs_inode
*ri
, int *offset
, int depth
)
695 struct page
*pages
[2];
702 nid
[0] = le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
706 /* get indirect nodes in the path */
707 for (i
= 0; i
< idx
+ 1; i
++) {
708 /* reference count'll be increased */
709 pages
[i
] = get_node_page(F2FS_I_SB(dn
->inode
), nid
[i
]);
710 if (IS_ERR(pages
[i
])) {
711 err
= PTR_ERR(pages
[i
]);
715 nid
[i
+ 1] = get_nid(pages
[i
], offset
[i
+ 1], false);
718 /* free direct nodes linked to a partial indirect node */
719 for (i
= offset
[idx
+ 1]; i
< NIDS_PER_BLOCK
; i
++) {
720 child_nid
= get_nid(pages
[idx
], i
, false);
724 err
= truncate_dnode(dn
);
727 set_nid(pages
[idx
], i
, 0, false);
730 if (offset
[idx
+ 1] == 0) {
731 dn
->node_page
= pages
[idx
];
735 f2fs_put_page(pages
[idx
], 1);
741 for (i
= idx
; i
>= 0; i
--)
742 f2fs_put_page(pages
[i
], 1);
744 trace_f2fs_truncate_partial_nodes(dn
->inode
, nid
, depth
, err
);
750 * All the block addresses of data and nodes should be nullified.
752 int truncate_inode_blocks(struct inode
*inode
, pgoff_t from
)
754 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
755 int err
= 0, cont
= 1;
756 int level
, offset
[4], noffset
[4];
757 unsigned int nofs
= 0;
758 struct f2fs_inode
*ri
;
759 struct dnode_of_data dn
;
762 trace_f2fs_truncate_inode_blocks_enter(inode
, from
);
764 level
= get_node_path(F2FS_I(inode
), from
, offset
, noffset
);
766 page
= get_node_page(sbi
, inode
->i_ino
);
768 trace_f2fs_truncate_inode_blocks_exit(inode
, PTR_ERR(page
));
769 return PTR_ERR(page
);
772 set_new_dnode(&dn
, inode
, page
, NULL
, 0);
775 ri
= F2FS_INODE(page
);
783 if (!offset
[level
- 1])
785 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
786 if (err
< 0 && err
!= -ENOENT
)
788 nofs
+= 1 + NIDS_PER_BLOCK
;
791 nofs
= 5 + 2 * NIDS_PER_BLOCK
;
792 if (!offset
[level
- 1])
794 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
795 if (err
< 0 && err
!= -ENOENT
)
804 dn
.nid
= le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
806 case NODE_DIR1_BLOCK
:
807 case NODE_DIR2_BLOCK
:
808 err
= truncate_dnode(&dn
);
811 case NODE_IND1_BLOCK
:
812 case NODE_IND2_BLOCK
:
813 err
= truncate_nodes(&dn
, nofs
, offset
[1], 2);
816 case NODE_DIND_BLOCK
:
817 err
= truncate_nodes(&dn
, nofs
, offset
[1], 3);
824 if (err
< 0 && err
!= -ENOENT
)
826 if (offset
[1] == 0 &&
827 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]) {
829 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
830 f2fs_put_page(page
, 1);
833 f2fs_wait_on_page_writeback(page
, NODE
);
834 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
] = 0;
835 set_page_dirty(page
);
843 f2fs_put_page(page
, 0);
844 trace_f2fs_truncate_inode_blocks_exit(inode
, err
);
845 return err
> 0 ? 0 : err
;
848 int truncate_xattr_node(struct inode
*inode
, struct page
*page
)
850 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
851 nid_t nid
= F2FS_I(inode
)->i_xattr_nid
;
852 struct dnode_of_data dn
;
858 npage
= get_node_page(sbi
, nid
);
860 return PTR_ERR(npage
);
862 F2FS_I(inode
)->i_xattr_nid
= 0;
864 /* need to do checkpoint during fsync */
865 F2FS_I(inode
)->xattr_ver
= cur_cp_version(F2FS_CKPT(sbi
));
867 set_new_dnode(&dn
, inode
, page
, npage
, nid
);
870 dn
.inode_page_locked
= true;
876 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
879 void remove_inode_page(struct inode
*inode
)
881 struct dnode_of_data dn
;
883 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
884 if (get_dnode_of_data(&dn
, 0, LOOKUP_NODE
))
887 if (truncate_xattr_node(inode
, dn
.inode_page
)) {
892 /* remove potential inline_data blocks */
893 if (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
894 S_ISLNK(inode
->i_mode
))
895 truncate_data_blocks_range(&dn
, 1);
897 /* 0 is possible, after f2fs_new_inode() has failed */
898 f2fs_bug_on(F2FS_I_SB(inode
),
899 inode
->i_blocks
!= 0 && inode
->i_blocks
!= 1);
901 /* will put inode & node pages */
905 struct page
*new_inode_page(struct inode
*inode
)
907 struct dnode_of_data dn
;
909 /* allocate inode page for new inode */
910 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
912 /* caller should f2fs_put_page(page, 1); */
913 return new_node_page(&dn
, 0, NULL
);
916 struct page
*new_node_page(struct dnode_of_data
*dn
,
917 unsigned int ofs
, struct page
*ipage
)
919 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
920 struct node_info old_ni
, new_ni
;
924 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
925 return ERR_PTR(-EPERM
);
927 page
= grab_cache_page(NODE_MAPPING(sbi
), dn
->nid
);
929 return ERR_PTR(-ENOMEM
);
931 if (unlikely(!inc_valid_node_count(sbi
, dn
->inode
))) {
936 get_node_info(sbi
, dn
->nid
, &old_ni
);
938 /* Reinitialize old_ni with new node page */
939 f2fs_bug_on(sbi
, old_ni
.blk_addr
!= NULL_ADDR
);
941 new_ni
.ino
= dn
->inode
->i_ino
;
942 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
944 f2fs_wait_on_page_writeback(page
, NODE
);
945 fill_node_footer(page
, dn
->nid
, dn
->inode
->i_ino
, ofs
, true);
946 set_cold_node(dn
->inode
, page
);
947 SetPageUptodate(page
);
948 set_page_dirty(page
);
950 if (f2fs_has_xattr_block(ofs
))
951 F2FS_I(dn
->inode
)->i_xattr_nid
= dn
->nid
;
953 dn
->node_page
= page
;
955 update_inode(dn
->inode
, ipage
);
959 inc_valid_inode_count(sbi
);
964 clear_node_page_dirty(page
);
965 f2fs_put_page(page
, 1);
970 * Caller should do after getting the following values.
971 * 0: f2fs_put_page(page, 0)
972 * LOCKED_PAGE: f2fs_put_page(page, 1)
975 static int read_node_page(struct page
*page
, int rw
)
977 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
980 get_node_info(sbi
, page
->index
, &ni
);
982 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
983 f2fs_put_page(page
, 1);
987 if (PageUptodate(page
))
990 return f2fs_submit_page_bio(sbi
, page
, ni
.blk_addr
, rw
);
994 * Readahead a node page
996 void ra_node_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
1001 apage
= find_get_page(NODE_MAPPING(sbi
), nid
);
1002 if (apage
&& PageUptodate(apage
)) {
1003 f2fs_put_page(apage
, 0);
1006 f2fs_put_page(apage
, 0);
1008 apage
= grab_cache_page(NODE_MAPPING(sbi
), nid
);
1012 err
= read_node_page(apage
, READA
);
1014 f2fs_put_page(apage
, 0);
1015 else if (err
== LOCKED_PAGE
)
1016 f2fs_put_page(apage
, 1);
1019 struct page
*get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
)
1024 page
= grab_cache_page(NODE_MAPPING(sbi
), nid
);
1026 return ERR_PTR(-ENOMEM
);
1028 err
= read_node_page(page
, READ_SYNC
);
1030 return ERR_PTR(err
);
1031 else if (err
== LOCKED_PAGE
)
1035 if (unlikely(!PageUptodate(page
) || nid
!= nid_of_node(page
))) {
1036 f2fs_put_page(page
, 1);
1037 return ERR_PTR(-EIO
);
1039 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1040 f2fs_put_page(page
, 1);
1048 * Return a locked page for the desired node page.
1049 * And, readahead MAX_RA_NODE number of node pages.
1051 struct page
*get_node_page_ra(struct page
*parent
, int start
)
1053 struct f2fs_sb_info
*sbi
= F2FS_P_SB(parent
);
1054 struct blk_plug plug
;
1059 /* First, try getting the desired direct node. */
1060 nid
= get_nid(parent
, start
, false);
1062 return ERR_PTR(-ENOENT
);
1064 page
= grab_cache_page(NODE_MAPPING(sbi
), nid
);
1066 return ERR_PTR(-ENOMEM
);
1068 err
= read_node_page(page
, READ_SYNC
);
1070 return ERR_PTR(err
);
1071 else if (err
== LOCKED_PAGE
)
1074 blk_start_plug(&plug
);
1076 /* Then, try readahead for siblings of the desired node */
1077 end
= start
+ MAX_RA_NODE
;
1078 end
= min(end
, NIDS_PER_BLOCK
);
1079 for (i
= start
+ 1; i
< end
; i
++) {
1080 nid
= get_nid(parent
, i
, false);
1083 ra_node_page(sbi
, nid
);
1086 blk_finish_plug(&plug
);
1089 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1090 f2fs_put_page(page
, 1);
1094 if (unlikely(!PageUptodate(page
))) {
1095 f2fs_put_page(page
, 1);
1096 return ERR_PTR(-EIO
);
1101 void sync_inode_page(struct dnode_of_data
*dn
)
1103 if (IS_INODE(dn
->node_page
) || dn
->inode_page
== dn
->node_page
) {
1104 update_inode(dn
->inode
, dn
->node_page
);
1105 } else if (dn
->inode_page
) {
1106 if (!dn
->inode_page_locked
)
1107 lock_page(dn
->inode_page
);
1108 update_inode(dn
->inode
, dn
->inode_page
);
1109 if (!dn
->inode_page_locked
)
1110 unlock_page(dn
->inode_page
);
1112 update_inode_page(dn
->inode
);
1116 int sync_node_pages(struct f2fs_sb_info
*sbi
, nid_t ino
,
1117 struct writeback_control
*wbc
)
1120 struct pagevec pvec
;
1121 int step
= ino
? 2 : 0;
1122 int nwritten
= 0, wrote
= 0;
1124 pagevec_init(&pvec
, 0);
1130 while (index
<= end
) {
1132 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1133 PAGECACHE_TAG_DIRTY
,
1134 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1138 for (i
= 0; i
< nr_pages
; i
++) {
1139 struct page
*page
= pvec
.pages
[i
];
1142 * flushing sequence with step:
1147 if (step
== 0 && IS_DNODE(page
))
1149 if (step
== 1 && (!IS_DNODE(page
) ||
1150 is_cold_node(page
)))
1152 if (step
== 2 && (!IS_DNODE(page
) ||
1153 !is_cold_node(page
)))
1158 * we should not skip writing node pages.
1160 if (ino
&& ino_of_node(page
) == ino
)
1162 else if (!trylock_page(page
))
1165 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1170 if (ino
&& ino_of_node(page
) != ino
)
1171 goto continue_unlock
;
1173 if (!PageDirty(page
)) {
1174 /* someone wrote it for us */
1175 goto continue_unlock
;
1178 if (!clear_page_dirty_for_io(page
))
1179 goto continue_unlock
;
1181 /* called by fsync() */
1182 if (ino
&& IS_DNODE(page
)) {
1183 set_fsync_mark(page
, 1);
1184 if (IS_INODE(page
)) {
1185 if (!is_checkpointed_node(sbi
, ino
) &&
1186 !has_fsynced_inode(sbi
, ino
))
1187 set_dentry_mark(page
, 1);
1189 set_dentry_mark(page
, 0);
1193 set_fsync_mark(page
, 0);
1194 set_dentry_mark(page
, 0);
1197 if (NODE_MAPPING(sbi
)->a_ops
->writepage(page
, wbc
))
1202 if (--wbc
->nr_to_write
== 0)
1205 pagevec_release(&pvec
);
1208 if (wbc
->nr_to_write
== 0) {
1220 f2fs_submit_merged_bio(sbi
, NODE
, WRITE
);
1224 int wait_on_node_pages_writeback(struct f2fs_sb_info
*sbi
, nid_t ino
)
1226 pgoff_t index
= 0, end
= LONG_MAX
;
1227 struct pagevec pvec
;
1228 int ret2
= 0, ret
= 0;
1230 pagevec_init(&pvec
, 0);
1232 while (index
<= end
) {
1234 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1235 PAGECACHE_TAG_WRITEBACK
,
1236 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1240 for (i
= 0; i
< nr_pages
; i
++) {
1241 struct page
*page
= pvec
.pages
[i
];
1243 /* until radix tree lookup accepts end_index */
1244 if (unlikely(page
->index
> end
))
1247 if (ino
&& ino_of_node(page
) == ino
) {
1248 f2fs_wait_on_page_writeback(page
, NODE
);
1249 if (TestClearPageError(page
))
1253 pagevec_release(&pvec
);
1257 if (unlikely(test_and_clear_bit(AS_ENOSPC
, &NODE_MAPPING(sbi
)->flags
)))
1259 if (unlikely(test_and_clear_bit(AS_EIO
, &NODE_MAPPING(sbi
)->flags
)))
1266 static int f2fs_write_node_page(struct page
*page
,
1267 struct writeback_control
*wbc
)
1269 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
1272 struct node_info ni
;
1273 struct f2fs_io_info fio
= {
1275 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
1278 trace_f2fs_writepage(page
, NODE
);
1280 if (unlikely(sbi
->por_doing
))
1282 if (unlikely(f2fs_cp_error(sbi
)))
1285 f2fs_wait_on_page_writeback(page
, NODE
);
1287 /* get old block addr of this node page */
1288 nid
= nid_of_node(page
);
1289 f2fs_bug_on(sbi
, page
->index
!= nid
);
1291 get_node_info(sbi
, nid
, &ni
);
1293 /* This page is already truncated */
1294 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
1295 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1300 if (wbc
->for_reclaim
) {
1301 if (!down_read_trylock(&sbi
->node_write
))
1304 down_read(&sbi
->node_write
);
1306 set_page_writeback(page
);
1307 write_node_page(sbi
, page
, &fio
, nid
, ni
.blk_addr
, &new_addr
);
1308 set_node_addr(sbi
, &ni
, new_addr
, is_fsync_dnode(page
));
1309 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1310 up_read(&sbi
->node_write
);
1313 if (wbc
->for_reclaim
)
1314 f2fs_submit_merged_bio(sbi
, NODE
, WRITE
);
1319 redirty_page_for_writepage(wbc
, page
);
1320 return AOP_WRITEPAGE_ACTIVATE
;
1323 static int f2fs_write_node_pages(struct address_space
*mapping
,
1324 struct writeback_control
*wbc
)
1326 struct f2fs_sb_info
*sbi
= F2FS_M_SB(mapping
);
1329 trace_f2fs_writepages(mapping
->host
, wbc
, NODE
);
1331 /* balancing f2fs's metadata in background */
1332 f2fs_balance_fs_bg(sbi
);
1334 /* collect a number of dirty node pages and write together */
1335 if (get_pages(sbi
, F2FS_DIRTY_NODES
) < nr_pages_to_skip(sbi
, NODE
))
1338 diff
= nr_pages_to_write(sbi
, NODE
, wbc
);
1339 wbc
->sync_mode
= WB_SYNC_NONE
;
1340 sync_node_pages(sbi
, 0, wbc
);
1341 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
1345 wbc
->pages_skipped
+= get_pages(sbi
, F2FS_DIRTY_NODES
);
1349 static int f2fs_set_node_page_dirty(struct page
*page
)
1351 trace_f2fs_set_page_dirty(page
, NODE
);
1353 SetPageUptodate(page
);
1354 if (!PageDirty(page
)) {
1355 __set_page_dirty_nobuffers(page
);
1356 inc_page_count(F2FS_P_SB(page
), F2FS_DIRTY_NODES
);
1357 SetPagePrivate(page
);
1363 static void f2fs_invalidate_node_page(struct page
*page
, unsigned int offset
,
1364 unsigned int length
)
1366 struct inode
*inode
= page
->mapping
->host
;
1367 if (PageDirty(page
))
1368 dec_page_count(F2FS_I_SB(inode
), F2FS_DIRTY_NODES
);
1369 ClearPagePrivate(page
);
1372 static int f2fs_release_node_page(struct page
*page
, gfp_t wait
)
1374 ClearPagePrivate(page
);
1379 * Structure of the f2fs node operations
1381 const struct address_space_operations f2fs_node_aops
= {
1382 .writepage
= f2fs_write_node_page
,
1383 .writepages
= f2fs_write_node_pages
,
1384 .set_page_dirty
= f2fs_set_node_page_dirty
,
1385 .invalidatepage
= f2fs_invalidate_node_page
,
1386 .releasepage
= f2fs_release_node_page
,
1389 static struct free_nid
*__lookup_free_nid_list(struct f2fs_nm_info
*nm_i
,
1392 return radix_tree_lookup(&nm_i
->free_nid_root
, n
);
1395 static void __del_from_free_nid_list(struct f2fs_nm_info
*nm_i
,
1399 radix_tree_delete(&nm_i
->free_nid_root
, i
->nid
);
1402 static int add_free_nid(struct f2fs_sb_info
*sbi
, nid_t nid
, bool build
)
1404 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1406 struct nat_entry
*ne
;
1407 bool allocated
= false;
1409 if (!available_free_memory(sbi
, FREE_NIDS
))
1412 /* 0 nid should not be used */
1413 if (unlikely(nid
== 0))
1417 /* do not add allocated nids */
1418 down_read(&nm_i
->nat_tree_lock
);
1419 ne
= __lookup_nat_cache(nm_i
, nid
);
1421 (!get_nat_flag(ne
, IS_CHECKPOINTED
) ||
1422 nat_get_blkaddr(ne
) != NULL_ADDR
))
1424 up_read(&nm_i
->nat_tree_lock
);
1429 i
= f2fs_kmem_cache_alloc(free_nid_slab
, GFP_NOFS
);
1433 if (radix_tree_preload(GFP_NOFS
)) {
1434 kmem_cache_free(free_nid_slab
, i
);
1438 spin_lock(&nm_i
->free_nid_list_lock
);
1439 if (radix_tree_insert(&nm_i
->free_nid_root
, i
->nid
, i
)) {
1440 spin_unlock(&nm_i
->free_nid_list_lock
);
1441 radix_tree_preload_end();
1442 kmem_cache_free(free_nid_slab
, i
);
1445 list_add_tail(&i
->list
, &nm_i
->free_nid_list
);
1447 spin_unlock(&nm_i
->free_nid_list_lock
);
1448 radix_tree_preload_end();
1452 static void remove_free_nid(struct f2fs_nm_info
*nm_i
, nid_t nid
)
1455 bool need_free
= false;
1457 spin_lock(&nm_i
->free_nid_list_lock
);
1458 i
= __lookup_free_nid_list(nm_i
, nid
);
1459 if (i
&& i
->state
== NID_NEW
) {
1460 __del_from_free_nid_list(nm_i
, i
);
1464 spin_unlock(&nm_i
->free_nid_list_lock
);
1467 kmem_cache_free(free_nid_slab
, i
);
1470 static void scan_nat_page(struct f2fs_sb_info
*sbi
,
1471 struct page
*nat_page
, nid_t start_nid
)
1473 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1474 struct f2fs_nat_block
*nat_blk
= page_address(nat_page
);
1478 i
= start_nid
% NAT_ENTRY_PER_BLOCK
;
1480 for (; i
< NAT_ENTRY_PER_BLOCK
; i
++, start_nid
++) {
1482 if (unlikely(start_nid
>= nm_i
->max_nid
))
1485 blk_addr
= le32_to_cpu(nat_blk
->entries
[i
].block_addr
);
1486 f2fs_bug_on(sbi
, blk_addr
== NEW_ADDR
);
1487 if (blk_addr
== NULL_ADDR
) {
1488 if (add_free_nid(sbi
, start_nid
, true) < 0)
1494 static void build_free_nids(struct f2fs_sb_info
*sbi
)
1496 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1497 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1498 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1500 nid_t nid
= nm_i
->next_scan_nid
;
1502 /* Enough entries */
1503 if (nm_i
->fcnt
> NAT_ENTRY_PER_BLOCK
)
1506 /* readahead nat pages to be scanned */
1507 ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), FREE_NID_PAGES
, META_NAT
);
1510 struct page
*page
= get_current_nat_page(sbi
, nid
);
1512 scan_nat_page(sbi
, page
, nid
);
1513 f2fs_put_page(page
, 1);
1515 nid
+= (NAT_ENTRY_PER_BLOCK
- (nid
% NAT_ENTRY_PER_BLOCK
));
1516 if (unlikely(nid
>= nm_i
->max_nid
))
1519 if (i
++ == FREE_NID_PAGES
)
1523 /* go to the next free nat pages to find free nids abundantly */
1524 nm_i
->next_scan_nid
= nid
;
1526 /* find free nids from current sum_pages */
1527 mutex_lock(&curseg
->curseg_mutex
);
1528 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1529 block_t addr
= le32_to_cpu(nat_in_journal(sum
, i
).block_addr
);
1530 nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1531 if (addr
== NULL_ADDR
)
1532 add_free_nid(sbi
, nid
, true);
1534 remove_free_nid(nm_i
, nid
);
1536 mutex_unlock(&curseg
->curseg_mutex
);
1540 * If this function returns success, caller can obtain a new nid
1541 * from second parameter of this function.
1542 * The returned nid could be used ino as well as nid when inode is created.
1544 bool alloc_nid(struct f2fs_sb_info
*sbi
, nid_t
*nid
)
1546 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1547 struct free_nid
*i
= NULL
;
1549 if (unlikely(sbi
->total_valid_node_count
+ 1 > nm_i
->available_nids
))
1552 spin_lock(&nm_i
->free_nid_list_lock
);
1554 /* We should not use stale free nids created by build_free_nids */
1555 if (nm_i
->fcnt
&& !on_build_free_nids(nm_i
)) {
1556 f2fs_bug_on(sbi
, list_empty(&nm_i
->free_nid_list
));
1557 list_for_each_entry(i
, &nm_i
->free_nid_list
, list
)
1558 if (i
->state
== NID_NEW
)
1561 f2fs_bug_on(sbi
, i
->state
!= NID_NEW
);
1563 i
->state
= NID_ALLOC
;
1565 spin_unlock(&nm_i
->free_nid_list_lock
);
1568 spin_unlock(&nm_i
->free_nid_list_lock
);
1570 /* Let's scan nat pages and its caches to get free nids */
1571 mutex_lock(&nm_i
->build_lock
);
1572 build_free_nids(sbi
);
1573 mutex_unlock(&nm_i
->build_lock
);
1578 * alloc_nid() should be called prior to this function.
1580 void alloc_nid_done(struct f2fs_sb_info
*sbi
, nid_t nid
)
1582 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1585 spin_lock(&nm_i
->free_nid_list_lock
);
1586 i
= __lookup_free_nid_list(nm_i
, nid
);
1587 f2fs_bug_on(sbi
, !i
|| i
->state
!= NID_ALLOC
);
1588 __del_from_free_nid_list(nm_i
, i
);
1589 spin_unlock(&nm_i
->free_nid_list_lock
);
1591 kmem_cache_free(free_nid_slab
, i
);
1595 * alloc_nid() should be called prior to this function.
1597 void alloc_nid_failed(struct f2fs_sb_info
*sbi
, nid_t nid
)
1599 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1601 bool need_free
= false;
1606 spin_lock(&nm_i
->free_nid_list_lock
);
1607 i
= __lookup_free_nid_list(nm_i
, nid
);
1608 f2fs_bug_on(sbi
, !i
|| i
->state
!= NID_ALLOC
);
1609 if (!available_free_memory(sbi
, FREE_NIDS
)) {
1610 __del_from_free_nid_list(nm_i
, i
);
1616 spin_unlock(&nm_i
->free_nid_list_lock
);
1619 kmem_cache_free(free_nid_slab
, i
);
1622 void recover_inline_xattr(struct inode
*inode
, struct page
*page
)
1624 void *src_addr
, *dst_addr
;
1627 struct f2fs_inode
*ri
;
1629 ipage
= get_node_page(F2FS_I_SB(inode
), inode
->i_ino
);
1630 f2fs_bug_on(F2FS_I_SB(inode
), IS_ERR(ipage
));
1632 ri
= F2FS_INODE(page
);
1633 if (!(ri
->i_inline
& F2FS_INLINE_XATTR
)) {
1634 clear_inode_flag(F2FS_I(inode
), FI_INLINE_XATTR
);
1638 dst_addr
= inline_xattr_addr(ipage
);
1639 src_addr
= inline_xattr_addr(page
);
1640 inline_size
= inline_xattr_size(inode
);
1642 f2fs_wait_on_page_writeback(ipage
, NODE
);
1643 memcpy(dst_addr
, src_addr
, inline_size
);
1645 update_inode(inode
, ipage
);
1646 f2fs_put_page(ipage
, 1);
1649 void recover_xattr_data(struct inode
*inode
, struct page
*page
, block_t blkaddr
)
1651 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1652 nid_t prev_xnid
= F2FS_I(inode
)->i_xattr_nid
;
1653 nid_t new_xnid
= nid_of_node(page
);
1654 struct node_info ni
;
1656 /* 1: invalidate the previous xattr nid */
1660 /* Deallocate node address */
1661 get_node_info(sbi
, prev_xnid
, &ni
);
1662 f2fs_bug_on(sbi
, ni
.blk_addr
== NULL_ADDR
);
1663 invalidate_blocks(sbi
, ni
.blk_addr
);
1664 dec_valid_node_count(sbi
, inode
);
1665 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
1668 /* 2: allocate new xattr nid */
1669 if (unlikely(!inc_valid_node_count(sbi
, inode
)))
1670 f2fs_bug_on(sbi
, 1);
1672 remove_free_nid(NM_I(sbi
), new_xnid
);
1673 get_node_info(sbi
, new_xnid
, &ni
);
1674 ni
.ino
= inode
->i_ino
;
1675 set_node_addr(sbi
, &ni
, NEW_ADDR
, false);
1676 F2FS_I(inode
)->i_xattr_nid
= new_xnid
;
1678 /* 3: update xattr blkaddr */
1679 refresh_sit_entry(sbi
, NEW_ADDR
, blkaddr
);
1680 set_node_addr(sbi
, &ni
, blkaddr
, false);
1682 update_inode_page(inode
);
1685 int recover_inode_page(struct f2fs_sb_info
*sbi
, struct page
*page
)
1687 struct f2fs_inode
*src
, *dst
;
1688 nid_t ino
= ino_of_node(page
);
1689 struct node_info old_ni
, new_ni
;
1692 get_node_info(sbi
, ino
, &old_ni
);
1694 if (unlikely(old_ni
.blk_addr
!= NULL_ADDR
))
1697 ipage
= grab_cache_page(NODE_MAPPING(sbi
), ino
);
1701 /* Should not use this inode from free nid list */
1702 remove_free_nid(NM_I(sbi
), ino
);
1704 SetPageUptodate(ipage
);
1705 fill_node_footer(ipage
, ino
, ino
, 0, true);
1707 src
= F2FS_INODE(page
);
1708 dst
= F2FS_INODE(ipage
);
1710 memcpy(dst
, src
, (unsigned long)&src
->i_ext
- (unsigned long)src
);
1712 dst
->i_blocks
= cpu_to_le64(1);
1713 dst
->i_links
= cpu_to_le32(1);
1714 dst
->i_xattr_nid
= 0;
1715 dst
->i_inline
= src
->i_inline
& F2FS_INLINE_XATTR
;
1720 if (unlikely(!inc_valid_node_count(sbi
, NULL
)))
1722 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
1723 inc_valid_inode_count(sbi
);
1724 set_page_dirty(ipage
);
1725 f2fs_put_page(ipage
, 1);
1730 * ra_sum_pages() merge contiguous pages into one bio and submit.
1731 * these pre-read pages are allocated in bd_inode's mapping tree.
1733 static int ra_sum_pages(struct f2fs_sb_info
*sbi
, struct page
**pages
,
1734 int start
, int nrpages
)
1736 struct inode
*inode
= sbi
->sb
->s_bdev
->bd_inode
;
1737 struct address_space
*mapping
= inode
->i_mapping
;
1738 int i
, page_idx
= start
;
1739 struct f2fs_io_info fio
= {
1741 .rw
= READ_SYNC
| REQ_META
| REQ_PRIO
1744 for (i
= 0; page_idx
< start
+ nrpages
; page_idx
++, i
++) {
1745 /* alloc page in bd_inode for reading node summary info */
1746 pages
[i
] = grab_cache_page(mapping
, page_idx
);
1749 f2fs_submit_page_mbio(sbi
, pages
[i
], page_idx
, &fio
);
1752 f2fs_submit_merged_bio(sbi
, META
, READ
);
1756 int restore_node_summary(struct f2fs_sb_info
*sbi
,
1757 unsigned int segno
, struct f2fs_summary_block
*sum
)
1759 struct f2fs_node
*rn
;
1760 struct f2fs_summary
*sum_entry
;
1761 struct inode
*inode
= sbi
->sb
->s_bdev
->bd_inode
;
1763 int bio_blocks
= MAX_BIO_BLOCKS(sbi
);
1764 struct page
*pages
[bio_blocks
];
1765 int i
, idx
, last_offset
, nrpages
, err
= 0;
1767 /* scan the node segment */
1768 last_offset
= sbi
->blocks_per_seg
;
1769 addr
= START_BLOCK(sbi
, segno
);
1770 sum_entry
= &sum
->entries
[0];
1772 for (i
= 0; !err
&& i
< last_offset
; i
+= nrpages
, addr
+= nrpages
) {
1773 nrpages
= min(last_offset
- i
, bio_blocks
);
1775 /* readahead node pages */
1776 nrpages
= ra_sum_pages(sbi
, pages
, addr
, nrpages
);
1780 for (idx
= 0; idx
< nrpages
; idx
++) {
1784 lock_page(pages
[idx
]);
1785 if (unlikely(!PageUptodate(pages
[idx
]))) {
1788 rn
= F2FS_NODE(pages
[idx
]);
1789 sum_entry
->nid
= rn
->footer
.nid
;
1790 sum_entry
->version
= 0;
1791 sum_entry
->ofs_in_node
= 0;
1794 unlock_page(pages
[idx
]);
1796 page_cache_release(pages
[idx
]);
1799 invalidate_mapping_pages(inode
->i_mapping
, addr
,
1805 static void remove_nats_in_journal(struct f2fs_sb_info
*sbi
)
1807 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1808 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1809 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1812 mutex_lock(&curseg
->curseg_mutex
);
1813 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1814 struct nat_entry
*ne
;
1815 struct f2fs_nat_entry raw_ne
;
1816 nid_t nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1818 raw_ne
= nat_in_journal(sum
, i
);
1820 down_write(&nm_i
->nat_tree_lock
);
1821 ne
= __lookup_nat_cache(nm_i
, nid
);
1823 ne
= grab_nat_entry(nm_i
, nid
);
1824 node_info_from_raw_nat(&ne
->ni
, &raw_ne
);
1826 __set_nat_cache_dirty(nm_i
, ne
);
1827 up_write(&nm_i
->nat_tree_lock
);
1829 update_nats_in_cursum(sum
, -i
);
1830 mutex_unlock(&curseg
->curseg_mutex
);
1833 static void __adjust_nat_entry_set(struct nat_entry_set
*nes
,
1834 struct list_head
*head
, int max
)
1836 struct nat_entry_set
*cur
;
1838 if (nes
->entry_cnt
>= max
)
1841 list_for_each_entry(cur
, head
, set_list
) {
1842 if (cur
->entry_cnt
>= nes
->entry_cnt
) {
1843 list_add(&nes
->set_list
, cur
->set_list
.prev
);
1848 list_add_tail(&nes
->set_list
, head
);
1851 static void __flush_nat_entry_set(struct f2fs_sb_info
*sbi
,
1852 struct nat_entry_set
*set
)
1854 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1855 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1856 nid_t start_nid
= set
->set
* NAT_ENTRY_PER_BLOCK
;
1857 bool to_journal
= true;
1858 struct f2fs_nat_block
*nat_blk
;
1859 struct nat_entry
*ne
, *cur
;
1860 struct page
*page
= NULL
;
1863 * there are two steps to flush nat entries:
1864 * #1, flush nat entries to journal in current hot data summary block.
1865 * #2, flush nat entries to nat page.
1867 if (!__has_cursum_space(sum
, set
->entry_cnt
, NAT_JOURNAL
))
1871 mutex_lock(&curseg
->curseg_mutex
);
1873 page
= get_next_nat_page(sbi
, start_nid
);
1874 nat_blk
= page_address(page
);
1875 f2fs_bug_on(sbi
, !nat_blk
);
1878 /* flush dirty nats in nat entry set */
1879 list_for_each_entry_safe(ne
, cur
, &set
->entry_list
, list
) {
1880 struct f2fs_nat_entry
*raw_ne
;
1881 nid_t nid
= nat_get_nid(ne
);
1884 if (nat_get_blkaddr(ne
) == NEW_ADDR
)
1888 offset
= lookup_journal_in_cursum(sum
,
1889 NAT_JOURNAL
, nid
, 1);
1890 f2fs_bug_on(sbi
, offset
< 0);
1891 raw_ne
= &nat_in_journal(sum
, offset
);
1892 nid_in_journal(sum
, offset
) = cpu_to_le32(nid
);
1894 raw_ne
= &nat_blk
->entries
[nid
- start_nid
];
1896 raw_nat_from_node_info(raw_ne
, &ne
->ni
);
1898 down_write(&NM_I(sbi
)->nat_tree_lock
);
1900 __clear_nat_cache_dirty(NM_I(sbi
), ne
);
1901 up_write(&NM_I(sbi
)->nat_tree_lock
);
1903 if (nat_get_blkaddr(ne
) == NULL_ADDR
)
1904 add_free_nid(sbi
, nid
, false);
1908 mutex_unlock(&curseg
->curseg_mutex
);
1910 f2fs_put_page(page
, 1);
1912 f2fs_bug_on(sbi
, set
->entry_cnt
);
1914 radix_tree_delete(&NM_I(sbi
)->nat_set_root
, set
->set
);
1915 kmem_cache_free(nat_entry_set_slab
, set
);
1919 * This function is called during the checkpointing process.
1921 void flush_nat_entries(struct f2fs_sb_info
*sbi
)
1923 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1924 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1925 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1926 struct nat_entry_set
*setvec
[NATVEC_SIZE
];
1927 struct nat_entry_set
*set
, *tmp
;
1932 if (!nm_i
->dirty_nat_cnt
)
1935 * if there are no enough space in journal to store dirty nat
1936 * entries, remove all entries from journal and merge them
1937 * into nat entry set.
1939 if (!__has_cursum_space(sum
, nm_i
->dirty_nat_cnt
, NAT_JOURNAL
))
1940 remove_nats_in_journal(sbi
);
1942 while ((found
= __gang_lookup_nat_set(nm_i
,
1943 set_idx
, NATVEC_SIZE
, setvec
))) {
1945 set_idx
= setvec
[found
- 1]->set
+ 1;
1946 for (idx
= 0; idx
< found
; idx
++)
1947 __adjust_nat_entry_set(setvec
[idx
], &sets
,
1948 MAX_NAT_JENTRIES(sum
));
1951 /* flush dirty nats in nat entry set */
1952 list_for_each_entry_safe(set
, tmp
, &sets
, set_list
)
1953 __flush_nat_entry_set(sbi
, set
);
1955 f2fs_bug_on(sbi
, nm_i
->dirty_nat_cnt
);
1958 static int init_node_manager(struct f2fs_sb_info
*sbi
)
1960 struct f2fs_super_block
*sb_raw
= F2FS_RAW_SUPER(sbi
);
1961 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1962 unsigned char *version_bitmap
;
1963 unsigned int nat_segs
, nat_blocks
;
1965 nm_i
->nat_blkaddr
= le32_to_cpu(sb_raw
->nat_blkaddr
);
1967 /* segment_count_nat includes pair segment so divide to 2. */
1968 nat_segs
= le32_to_cpu(sb_raw
->segment_count_nat
) >> 1;
1969 nat_blocks
= nat_segs
<< le32_to_cpu(sb_raw
->log_blocks_per_seg
);
1971 nm_i
->max_nid
= NAT_ENTRY_PER_BLOCK
* nat_blocks
;
1973 /* not used nids: 0, node, meta, (and root counted as valid node) */
1974 nm_i
->available_nids
= nm_i
->max_nid
- F2FS_RESERVED_NODE_NUM
;
1977 nm_i
->ram_thresh
= DEF_RAM_THRESHOLD
;
1979 INIT_RADIX_TREE(&nm_i
->free_nid_root
, GFP_ATOMIC
);
1980 INIT_LIST_HEAD(&nm_i
->free_nid_list
);
1981 INIT_RADIX_TREE(&nm_i
->nat_root
, GFP_NOIO
);
1982 INIT_RADIX_TREE(&nm_i
->nat_set_root
, GFP_NOIO
);
1983 INIT_LIST_HEAD(&nm_i
->nat_entries
);
1985 mutex_init(&nm_i
->build_lock
);
1986 spin_lock_init(&nm_i
->free_nid_list_lock
);
1987 init_rwsem(&nm_i
->nat_tree_lock
);
1989 nm_i
->next_scan_nid
= le32_to_cpu(sbi
->ckpt
->next_free_nid
);
1990 nm_i
->bitmap_size
= __bitmap_size(sbi
, NAT_BITMAP
);
1991 version_bitmap
= __bitmap_ptr(sbi
, NAT_BITMAP
);
1992 if (!version_bitmap
)
1995 nm_i
->nat_bitmap
= kmemdup(version_bitmap
, nm_i
->bitmap_size
,
1997 if (!nm_i
->nat_bitmap
)
2002 int build_node_manager(struct f2fs_sb_info
*sbi
)
2006 sbi
->nm_info
= kzalloc(sizeof(struct f2fs_nm_info
), GFP_KERNEL
);
2010 err
= init_node_manager(sbi
);
2014 build_free_nids(sbi
);
2018 void destroy_node_manager(struct f2fs_sb_info
*sbi
)
2020 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2021 struct free_nid
*i
, *next_i
;
2022 struct nat_entry
*natvec
[NATVEC_SIZE
];
2029 /* destroy free nid list */
2030 spin_lock(&nm_i
->free_nid_list_lock
);
2031 list_for_each_entry_safe(i
, next_i
, &nm_i
->free_nid_list
, list
) {
2032 f2fs_bug_on(sbi
, i
->state
== NID_ALLOC
);
2033 __del_from_free_nid_list(nm_i
, i
);
2035 spin_unlock(&nm_i
->free_nid_list_lock
);
2036 kmem_cache_free(free_nid_slab
, i
);
2037 spin_lock(&nm_i
->free_nid_list_lock
);
2039 f2fs_bug_on(sbi
, nm_i
->fcnt
);
2040 spin_unlock(&nm_i
->free_nid_list_lock
);
2042 /* destroy nat cache */
2043 down_write(&nm_i
->nat_tree_lock
);
2044 while ((found
= __gang_lookup_nat_cache(nm_i
,
2045 nid
, NATVEC_SIZE
, natvec
))) {
2047 nid
= nat_get_nid(natvec
[found
- 1]) + 1;
2048 for (idx
= 0; idx
< found
; idx
++)
2049 __del_from_nat_cache(nm_i
, natvec
[idx
]);
2051 f2fs_bug_on(sbi
, nm_i
->nat_cnt
);
2052 up_write(&nm_i
->nat_tree_lock
);
2054 kfree(nm_i
->nat_bitmap
);
2055 sbi
->nm_info
= NULL
;
2059 int __init
create_node_manager_caches(void)
2061 nat_entry_slab
= f2fs_kmem_cache_create("nat_entry",
2062 sizeof(struct nat_entry
));
2063 if (!nat_entry_slab
)
2066 free_nid_slab
= f2fs_kmem_cache_create("free_nid",
2067 sizeof(struct free_nid
));
2069 goto destroy_nat_entry
;
2071 nat_entry_set_slab
= f2fs_kmem_cache_create("nat_entry_set",
2072 sizeof(struct nat_entry_set
));
2073 if (!nat_entry_set_slab
)
2074 goto destroy_free_nid
;
2078 kmem_cache_destroy(free_nid_slab
);
2080 kmem_cache_destroy(nat_entry_slab
);
2085 void destroy_node_manager_caches(void)
2087 kmem_cache_destroy(nat_entry_set_slab
);
2088 kmem_cache_destroy(free_nid_slab
);
2089 kmem_cache_destroy(nat_entry_slab
);