b448c8fec7fc06ca8838f1e235698dac3e5dc30c
[deliverable/linux.git] / fs / f2fs / node.c
1 /*
2 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
18
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include "trace.h"
23 #include <trace/events/f2fs.h>
24
25 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
26
27 static struct kmem_cache *nat_entry_slab;
28 static struct kmem_cache *free_nid_slab;
29 static struct kmem_cache *nat_entry_set_slab;
30
31 bool available_free_memory(struct f2fs_sb_info *sbi, int type)
32 {
33 struct f2fs_nm_info *nm_i = NM_I(sbi);
34 struct sysinfo val;
35 unsigned long avail_ram;
36 unsigned long mem_size = 0;
37 bool res = false;
38
39 si_meminfo(&val);
40
41 /* only uses low memory */
42 avail_ram = val.totalram - val.totalhigh;
43
44 /*
45 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
46 */
47 if (type == FREE_NIDS) {
48 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
49 PAGE_SHIFT;
50 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
51 } else if (type == NAT_ENTRIES) {
52 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
53 PAGE_SHIFT;
54 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
55 if (excess_cached_nats(sbi))
56 res = false;
57 if (nm_i->nat_cnt > DEF_NAT_CACHE_THRESHOLD)
58 res = false;
59 } else if (type == DIRTY_DENTS) {
60 if (sbi->sb->s_bdi->wb.dirty_exceeded)
61 return false;
62 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
63 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
64 } else if (type == INO_ENTRIES) {
65 int i;
66
67 for (i = 0; i <= UPDATE_INO; i++)
68 mem_size += (sbi->im[i].ino_num *
69 sizeof(struct ino_entry)) >> PAGE_SHIFT;
70 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
71 } else if (type == EXTENT_CACHE) {
72 mem_size = (atomic_read(&sbi->total_ext_tree) *
73 sizeof(struct extent_tree) +
74 atomic_read(&sbi->total_ext_node) *
75 sizeof(struct extent_node)) >> PAGE_SHIFT;
76 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
77 } else {
78 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
79 return true;
80 }
81 return res;
82 }
83
84 static void clear_node_page_dirty(struct page *page)
85 {
86 struct address_space *mapping = page->mapping;
87 unsigned int long flags;
88
89 if (PageDirty(page)) {
90 spin_lock_irqsave(&mapping->tree_lock, flags);
91 radix_tree_tag_clear(&mapping->page_tree,
92 page_index(page),
93 PAGECACHE_TAG_DIRTY);
94 spin_unlock_irqrestore(&mapping->tree_lock, flags);
95
96 clear_page_dirty_for_io(page);
97 dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
98 }
99 ClearPageUptodate(page);
100 }
101
102 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
103 {
104 pgoff_t index = current_nat_addr(sbi, nid);
105 return get_meta_page(sbi, index);
106 }
107
108 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
109 {
110 struct page *src_page;
111 struct page *dst_page;
112 pgoff_t src_off;
113 pgoff_t dst_off;
114 void *src_addr;
115 void *dst_addr;
116 struct f2fs_nm_info *nm_i = NM_I(sbi);
117
118 src_off = current_nat_addr(sbi, nid);
119 dst_off = next_nat_addr(sbi, src_off);
120
121 /* get current nat block page with lock */
122 src_page = get_meta_page(sbi, src_off);
123 dst_page = grab_meta_page(sbi, dst_off);
124 f2fs_bug_on(sbi, PageDirty(src_page));
125
126 src_addr = page_address(src_page);
127 dst_addr = page_address(dst_page);
128 memcpy(dst_addr, src_addr, PAGE_SIZE);
129 set_page_dirty(dst_page);
130 f2fs_put_page(src_page, 1);
131
132 set_to_next_nat(nm_i, nid);
133
134 return dst_page;
135 }
136
137 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
138 {
139 return radix_tree_lookup(&nm_i->nat_root, n);
140 }
141
142 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
143 nid_t start, unsigned int nr, struct nat_entry **ep)
144 {
145 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
146 }
147
148 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
149 {
150 list_del(&e->list);
151 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
152 nm_i->nat_cnt--;
153 kmem_cache_free(nat_entry_slab, e);
154 }
155
156 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
157 struct nat_entry *ne)
158 {
159 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
160 struct nat_entry_set *head;
161
162 if (get_nat_flag(ne, IS_DIRTY))
163 return;
164
165 head = radix_tree_lookup(&nm_i->nat_set_root, set);
166 if (!head) {
167 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
168
169 INIT_LIST_HEAD(&head->entry_list);
170 INIT_LIST_HEAD(&head->set_list);
171 head->set = set;
172 head->entry_cnt = 0;
173 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
174 }
175 list_move_tail(&ne->list, &head->entry_list);
176 nm_i->dirty_nat_cnt++;
177 head->entry_cnt++;
178 set_nat_flag(ne, IS_DIRTY, true);
179 }
180
181 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
182 struct nat_entry *ne)
183 {
184 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
185 struct nat_entry_set *head;
186
187 head = radix_tree_lookup(&nm_i->nat_set_root, set);
188 if (head) {
189 list_move_tail(&ne->list, &nm_i->nat_entries);
190 set_nat_flag(ne, IS_DIRTY, false);
191 head->entry_cnt--;
192 nm_i->dirty_nat_cnt--;
193 }
194 }
195
196 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
197 nid_t start, unsigned int nr, struct nat_entry_set **ep)
198 {
199 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
200 start, nr);
201 }
202
203 int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
204 {
205 struct f2fs_nm_info *nm_i = NM_I(sbi);
206 struct nat_entry *e;
207 bool need = false;
208
209 down_read(&nm_i->nat_tree_lock);
210 e = __lookup_nat_cache(nm_i, nid);
211 if (e) {
212 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
213 !get_nat_flag(e, HAS_FSYNCED_INODE))
214 need = true;
215 }
216 up_read(&nm_i->nat_tree_lock);
217 return need;
218 }
219
220 bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
221 {
222 struct f2fs_nm_info *nm_i = NM_I(sbi);
223 struct nat_entry *e;
224 bool is_cp = true;
225
226 down_read(&nm_i->nat_tree_lock);
227 e = __lookup_nat_cache(nm_i, nid);
228 if (e && !get_nat_flag(e, IS_CHECKPOINTED))
229 is_cp = false;
230 up_read(&nm_i->nat_tree_lock);
231 return is_cp;
232 }
233
234 bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
235 {
236 struct f2fs_nm_info *nm_i = NM_I(sbi);
237 struct nat_entry *e;
238 bool need_update = true;
239
240 down_read(&nm_i->nat_tree_lock);
241 e = __lookup_nat_cache(nm_i, ino);
242 if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
243 (get_nat_flag(e, IS_CHECKPOINTED) ||
244 get_nat_flag(e, HAS_FSYNCED_INODE)))
245 need_update = false;
246 up_read(&nm_i->nat_tree_lock);
247 return need_update;
248 }
249
250 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
251 {
252 struct nat_entry *new;
253
254 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
255 f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
256 memset(new, 0, sizeof(struct nat_entry));
257 nat_set_nid(new, nid);
258 nat_reset_flag(new);
259 list_add_tail(&new->list, &nm_i->nat_entries);
260 nm_i->nat_cnt++;
261 return new;
262 }
263
264 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
265 struct f2fs_nat_entry *ne)
266 {
267 struct f2fs_nm_info *nm_i = NM_I(sbi);
268 struct nat_entry *e;
269
270 e = __lookup_nat_cache(nm_i, nid);
271 if (!e) {
272 e = grab_nat_entry(nm_i, nid);
273 node_info_from_raw_nat(&e->ni, ne);
274 } else {
275 f2fs_bug_on(sbi, nat_get_ino(e) != ne->ino ||
276 nat_get_blkaddr(e) != ne->block_addr ||
277 nat_get_version(e) != ne->version);
278 }
279 }
280
281 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
282 block_t new_blkaddr, bool fsync_done)
283 {
284 struct f2fs_nm_info *nm_i = NM_I(sbi);
285 struct nat_entry *e;
286
287 down_write(&nm_i->nat_tree_lock);
288 e = __lookup_nat_cache(nm_i, ni->nid);
289 if (!e) {
290 e = grab_nat_entry(nm_i, ni->nid);
291 copy_node_info(&e->ni, ni);
292 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
293 } else if (new_blkaddr == NEW_ADDR) {
294 /*
295 * when nid is reallocated,
296 * previous nat entry can be remained in nat cache.
297 * So, reinitialize it with new information.
298 */
299 copy_node_info(&e->ni, ni);
300 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
301 }
302
303 /* sanity check */
304 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
305 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
306 new_blkaddr == NULL_ADDR);
307 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
308 new_blkaddr == NEW_ADDR);
309 f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
310 nat_get_blkaddr(e) != NULL_ADDR &&
311 new_blkaddr == NEW_ADDR);
312
313 /* increment version no as node is removed */
314 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
315 unsigned char version = nat_get_version(e);
316 nat_set_version(e, inc_node_version(version));
317
318 /* in order to reuse the nid */
319 if (nm_i->next_scan_nid > ni->nid)
320 nm_i->next_scan_nid = ni->nid;
321 }
322
323 /* change address */
324 nat_set_blkaddr(e, new_blkaddr);
325 if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
326 set_nat_flag(e, IS_CHECKPOINTED, false);
327 __set_nat_cache_dirty(nm_i, e);
328
329 /* update fsync_mark if its inode nat entry is still alive */
330 if (ni->nid != ni->ino)
331 e = __lookup_nat_cache(nm_i, ni->ino);
332 if (e) {
333 if (fsync_done && ni->nid == ni->ino)
334 set_nat_flag(e, HAS_FSYNCED_INODE, true);
335 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
336 }
337 up_write(&nm_i->nat_tree_lock);
338 }
339
340 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
341 {
342 struct f2fs_nm_info *nm_i = NM_I(sbi);
343 int nr = nr_shrink;
344
345 if (!down_write_trylock(&nm_i->nat_tree_lock))
346 return 0;
347
348 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
349 struct nat_entry *ne;
350 ne = list_first_entry(&nm_i->nat_entries,
351 struct nat_entry, list);
352 __del_from_nat_cache(nm_i, ne);
353 nr_shrink--;
354 }
355 up_write(&nm_i->nat_tree_lock);
356 return nr - nr_shrink;
357 }
358
359 /*
360 * This function always returns success
361 */
362 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
363 {
364 struct f2fs_nm_info *nm_i = NM_I(sbi);
365 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
366 struct f2fs_journal *journal = curseg->journal;
367 nid_t start_nid = START_NID(nid);
368 struct f2fs_nat_block *nat_blk;
369 struct page *page = NULL;
370 struct f2fs_nat_entry ne;
371 struct nat_entry *e;
372 int i;
373
374 ni->nid = nid;
375
376 /* Check nat cache */
377 down_read(&nm_i->nat_tree_lock);
378 e = __lookup_nat_cache(nm_i, nid);
379 if (e) {
380 ni->ino = nat_get_ino(e);
381 ni->blk_addr = nat_get_blkaddr(e);
382 ni->version = nat_get_version(e);
383 up_read(&nm_i->nat_tree_lock);
384 return;
385 }
386
387 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
388
389 /* Check current segment summary */
390 down_read(&curseg->journal_rwsem);
391 i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
392 if (i >= 0) {
393 ne = nat_in_journal(journal, i);
394 node_info_from_raw_nat(ni, &ne);
395 }
396 up_read(&curseg->journal_rwsem);
397 if (i >= 0)
398 goto cache;
399
400 /* Fill node_info from nat page */
401 page = get_current_nat_page(sbi, start_nid);
402 nat_blk = (struct f2fs_nat_block *)page_address(page);
403 ne = nat_blk->entries[nid - start_nid];
404 node_info_from_raw_nat(ni, &ne);
405 f2fs_put_page(page, 1);
406 cache:
407 up_read(&nm_i->nat_tree_lock);
408 /* cache nat entry */
409 down_write(&nm_i->nat_tree_lock);
410 cache_nat_entry(sbi, nid, &ne);
411 up_write(&nm_i->nat_tree_lock);
412 }
413
414 /*
415 * readahead MAX_RA_NODE number of node pages.
416 */
417 static void ra_node_pages(struct page *parent, int start, int n)
418 {
419 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
420 struct blk_plug plug;
421 int i, end;
422 nid_t nid;
423
424 blk_start_plug(&plug);
425
426 /* Then, try readahead for siblings of the desired node */
427 end = start + n;
428 end = min(end, NIDS_PER_BLOCK);
429 for (i = start; i < end; i++) {
430 nid = get_nid(parent, i, false);
431 ra_node_page(sbi, nid);
432 }
433
434 blk_finish_plug(&plug);
435 }
436
437 pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
438 {
439 const long direct_index = ADDRS_PER_INODE(dn->inode);
440 const long direct_blks = ADDRS_PER_BLOCK;
441 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
442 unsigned int skipped_unit = ADDRS_PER_BLOCK;
443 int cur_level = dn->cur_level;
444 int max_level = dn->max_level;
445 pgoff_t base = 0;
446
447 if (!dn->max_level)
448 return pgofs + 1;
449
450 while (max_level-- > cur_level)
451 skipped_unit *= NIDS_PER_BLOCK;
452
453 switch (dn->max_level) {
454 case 3:
455 base += 2 * indirect_blks;
456 case 2:
457 base += 2 * direct_blks;
458 case 1:
459 base += direct_index;
460 break;
461 default:
462 f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
463 }
464
465 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
466 }
467
468 /*
469 * The maximum depth is four.
470 * Offset[0] will have raw inode offset.
471 */
472 static int get_node_path(struct inode *inode, long block,
473 int offset[4], unsigned int noffset[4])
474 {
475 const long direct_index = ADDRS_PER_INODE(inode);
476 const long direct_blks = ADDRS_PER_BLOCK;
477 const long dptrs_per_blk = NIDS_PER_BLOCK;
478 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
479 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
480 int n = 0;
481 int level = 0;
482
483 noffset[0] = 0;
484
485 if (block < direct_index) {
486 offset[n] = block;
487 goto got;
488 }
489 block -= direct_index;
490 if (block < direct_blks) {
491 offset[n++] = NODE_DIR1_BLOCK;
492 noffset[n] = 1;
493 offset[n] = block;
494 level = 1;
495 goto got;
496 }
497 block -= direct_blks;
498 if (block < direct_blks) {
499 offset[n++] = NODE_DIR2_BLOCK;
500 noffset[n] = 2;
501 offset[n] = block;
502 level = 1;
503 goto got;
504 }
505 block -= direct_blks;
506 if (block < indirect_blks) {
507 offset[n++] = NODE_IND1_BLOCK;
508 noffset[n] = 3;
509 offset[n++] = block / direct_blks;
510 noffset[n] = 4 + offset[n - 1];
511 offset[n] = block % direct_blks;
512 level = 2;
513 goto got;
514 }
515 block -= indirect_blks;
516 if (block < indirect_blks) {
517 offset[n++] = NODE_IND2_BLOCK;
518 noffset[n] = 4 + dptrs_per_blk;
519 offset[n++] = block / direct_blks;
520 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
521 offset[n] = block % direct_blks;
522 level = 2;
523 goto got;
524 }
525 block -= indirect_blks;
526 if (block < dindirect_blks) {
527 offset[n++] = NODE_DIND_BLOCK;
528 noffset[n] = 5 + (dptrs_per_blk * 2);
529 offset[n++] = block / indirect_blks;
530 noffset[n] = 6 + (dptrs_per_blk * 2) +
531 offset[n - 1] * (dptrs_per_blk + 1);
532 offset[n++] = (block / direct_blks) % dptrs_per_blk;
533 noffset[n] = 7 + (dptrs_per_blk * 2) +
534 offset[n - 2] * (dptrs_per_blk + 1) +
535 offset[n - 1];
536 offset[n] = block % direct_blks;
537 level = 3;
538 goto got;
539 } else {
540 BUG();
541 }
542 got:
543 return level;
544 }
545
546 /*
547 * Caller should call f2fs_put_dnode(dn).
548 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
549 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
550 * In the case of RDONLY_NODE, we don't need to care about mutex.
551 */
552 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
553 {
554 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
555 struct page *npage[4];
556 struct page *parent = NULL;
557 int offset[4];
558 unsigned int noffset[4];
559 nid_t nids[4];
560 int level, i = 0;
561 int err = 0;
562
563 level = get_node_path(dn->inode, index, offset, noffset);
564
565 nids[0] = dn->inode->i_ino;
566 npage[0] = dn->inode_page;
567
568 if (!npage[0]) {
569 npage[0] = get_node_page(sbi, nids[0]);
570 if (IS_ERR(npage[0]))
571 return PTR_ERR(npage[0]);
572 }
573
574 /* if inline_data is set, should not report any block indices */
575 if (f2fs_has_inline_data(dn->inode) && index) {
576 err = -ENOENT;
577 f2fs_put_page(npage[0], 1);
578 goto release_out;
579 }
580
581 parent = npage[0];
582 if (level != 0)
583 nids[1] = get_nid(parent, offset[0], true);
584 dn->inode_page = npage[0];
585 dn->inode_page_locked = true;
586
587 /* get indirect or direct nodes */
588 for (i = 1; i <= level; i++) {
589 bool done = false;
590
591 if (!nids[i] && mode == ALLOC_NODE) {
592 /* alloc new node */
593 if (!alloc_nid(sbi, &(nids[i]))) {
594 err = -ENOSPC;
595 goto release_pages;
596 }
597
598 dn->nid = nids[i];
599 npage[i] = new_node_page(dn, noffset[i], NULL);
600 if (IS_ERR(npage[i])) {
601 alloc_nid_failed(sbi, nids[i]);
602 err = PTR_ERR(npage[i]);
603 goto release_pages;
604 }
605
606 set_nid(parent, offset[i - 1], nids[i], i == 1);
607 alloc_nid_done(sbi, nids[i]);
608 done = true;
609 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
610 npage[i] = get_node_page_ra(parent, offset[i - 1]);
611 if (IS_ERR(npage[i])) {
612 err = PTR_ERR(npage[i]);
613 goto release_pages;
614 }
615 done = true;
616 }
617 if (i == 1) {
618 dn->inode_page_locked = false;
619 unlock_page(parent);
620 } else {
621 f2fs_put_page(parent, 1);
622 }
623
624 if (!done) {
625 npage[i] = get_node_page(sbi, nids[i]);
626 if (IS_ERR(npage[i])) {
627 err = PTR_ERR(npage[i]);
628 f2fs_put_page(npage[0], 0);
629 goto release_out;
630 }
631 }
632 if (i < level) {
633 parent = npage[i];
634 nids[i + 1] = get_nid(parent, offset[i], false);
635 }
636 }
637 dn->nid = nids[level];
638 dn->ofs_in_node = offset[level];
639 dn->node_page = npage[level];
640 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
641 return 0;
642
643 release_pages:
644 f2fs_put_page(parent, 1);
645 if (i > 1)
646 f2fs_put_page(npage[0], 0);
647 release_out:
648 dn->inode_page = NULL;
649 dn->node_page = NULL;
650 if (err == -ENOENT) {
651 dn->cur_level = i;
652 dn->max_level = level;
653 }
654 return err;
655 }
656
657 static void truncate_node(struct dnode_of_data *dn)
658 {
659 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
660 struct node_info ni;
661
662 get_node_info(sbi, dn->nid, &ni);
663 if (dn->inode->i_blocks == 0) {
664 f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR);
665 goto invalidate;
666 }
667 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
668
669 /* Deallocate node address */
670 invalidate_blocks(sbi, ni.blk_addr);
671 dec_valid_node_count(sbi, dn->inode);
672 set_node_addr(sbi, &ni, NULL_ADDR, false);
673
674 if (dn->nid == dn->inode->i_ino) {
675 remove_orphan_inode(sbi, dn->nid);
676 dec_valid_inode_count(sbi);
677 f2fs_inode_synced(dn->inode);
678 }
679 invalidate:
680 clear_node_page_dirty(dn->node_page);
681 set_sbi_flag(sbi, SBI_IS_DIRTY);
682
683 f2fs_put_page(dn->node_page, 1);
684
685 invalidate_mapping_pages(NODE_MAPPING(sbi),
686 dn->node_page->index, dn->node_page->index);
687
688 dn->node_page = NULL;
689 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
690 }
691
692 static int truncate_dnode(struct dnode_of_data *dn)
693 {
694 struct page *page;
695
696 if (dn->nid == 0)
697 return 1;
698
699 /* get direct node */
700 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
701 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
702 return 1;
703 else if (IS_ERR(page))
704 return PTR_ERR(page);
705
706 /* Make dnode_of_data for parameter */
707 dn->node_page = page;
708 dn->ofs_in_node = 0;
709 truncate_data_blocks(dn);
710 truncate_node(dn);
711 return 1;
712 }
713
714 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
715 int ofs, int depth)
716 {
717 struct dnode_of_data rdn = *dn;
718 struct page *page;
719 struct f2fs_node *rn;
720 nid_t child_nid;
721 unsigned int child_nofs;
722 int freed = 0;
723 int i, ret;
724
725 if (dn->nid == 0)
726 return NIDS_PER_BLOCK + 1;
727
728 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
729
730 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
731 if (IS_ERR(page)) {
732 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
733 return PTR_ERR(page);
734 }
735
736 ra_node_pages(page, ofs, NIDS_PER_BLOCK);
737
738 rn = F2FS_NODE(page);
739 if (depth < 3) {
740 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
741 child_nid = le32_to_cpu(rn->in.nid[i]);
742 if (child_nid == 0)
743 continue;
744 rdn.nid = child_nid;
745 ret = truncate_dnode(&rdn);
746 if (ret < 0)
747 goto out_err;
748 if (set_nid(page, i, 0, false))
749 dn->node_changed = true;
750 }
751 } else {
752 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
753 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
754 child_nid = le32_to_cpu(rn->in.nid[i]);
755 if (child_nid == 0) {
756 child_nofs += NIDS_PER_BLOCK + 1;
757 continue;
758 }
759 rdn.nid = child_nid;
760 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
761 if (ret == (NIDS_PER_BLOCK + 1)) {
762 if (set_nid(page, i, 0, false))
763 dn->node_changed = true;
764 child_nofs += ret;
765 } else if (ret < 0 && ret != -ENOENT) {
766 goto out_err;
767 }
768 }
769 freed = child_nofs;
770 }
771
772 if (!ofs) {
773 /* remove current indirect node */
774 dn->node_page = page;
775 truncate_node(dn);
776 freed++;
777 } else {
778 f2fs_put_page(page, 1);
779 }
780 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
781 return freed;
782
783 out_err:
784 f2fs_put_page(page, 1);
785 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
786 return ret;
787 }
788
789 static int truncate_partial_nodes(struct dnode_of_data *dn,
790 struct f2fs_inode *ri, int *offset, int depth)
791 {
792 struct page *pages[2];
793 nid_t nid[3];
794 nid_t child_nid;
795 int err = 0;
796 int i;
797 int idx = depth - 2;
798
799 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
800 if (!nid[0])
801 return 0;
802
803 /* get indirect nodes in the path */
804 for (i = 0; i < idx + 1; i++) {
805 /* reference count'll be increased */
806 pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]);
807 if (IS_ERR(pages[i])) {
808 err = PTR_ERR(pages[i]);
809 idx = i - 1;
810 goto fail;
811 }
812 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
813 }
814
815 ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
816
817 /* free direct nodes linked to a partial indirect node */
818 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
819 child_nid = get_nid(pages[idx], i, false);
820 if (!child_nid)
821 continue;
822 dn->nid = child_nid;
823 err = truncate_dnode(dn);
824 if (err < 0)
825 goto fail;
826 if (set_nid(pages[idx], i, 0, false))
827 dn->node_changed = true;
828 }
829
830 if (offset[idx + 1] == 0) {
831 dn->node_page = pages[idx];
832 dn->nid = nid[idx];
833 truncate_node(dn);
834 } else {
835 f2fs_put_page(pages[idx], 1);
836 }
837 offset[idx]++;
838 offset[idx + 1] = 0;
839 idx--;
840 fail:
841 for (i = idx; i >= 0; i--)
842 f2fs_put_page(pages[i], 1);
843
844 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
845
846 return err;
847 }
848
849 /*
850 * All the block addresses of data and nodes should be nullified.
851 */
852 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
853 {
854 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
855 int err = 0, cont = 1;
856 int level, offset[4], noffset[4];
857 unsigned int nofs = 0;
858 struct f2fs_inode *ri;
859 struct dnode_of_data dn;
860 struct page *page;
861
862 trace_f2fs_truncate_inode_blocks_enter(inode, from);
863
864 level = get_node_path(inode, from, offset, noffset);
865
866 page = get_node_page(sbi, inode->i_ino);
867 if (IS_ERR(page)) {
868 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
869 return PTR_ERR(page);
870 }
871
872 set_new_dnode(&dn, inode, page, NULL, 0);
873 unlock_page(page);
874
875 ri = F2FS_INODE(page);
876 switch (level) {
877 case 0:
878 case 1:
879 nofs = noffset[1];
880 break;
881 case 2:
882 nofs = noffset[1];
883 if (!offset[level - 1])
884 goto skip_partial;
885 err = truncate_partial_nodes(&dn, ri, offset, level);
886 if (err < 0 && err != -ENOENT)
887 goto fail;
888 nofs += 1 + NIDS_PER_BLOCK;
889 break;
890 case 3:
891 nofs = 5 + 2 * NIDS_PER_BLOCK;
892 if (!offset[level - 1])
893 goto skip_partial;
894 err = truncate_partial_nodes(&dn, ri, offset, level);
895 if (err < 0 && err != -ENOENT)
896 goto fail;
897 break;
898 default:
899 BUG();
900 }
901
902 skip_partial:
903 while (cont) {
904 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
905 switch (offset[0]) {
906 case NODE_DIR1_BLOCK:
907 case NODE_DIR2_BLOCK:
908 err = truncate_dnode(&dn);
909 break;
910
911 case NODE_IND1_BLOCK:
912 case NODE_IND2_BLOCK:
913 err = truncate_nodes(&dn, nofs, offset[1], 2);
914 break;
915
916 case NODE_DIND_BLOCK:
917 err = truncate_nodes(&dn, nofs, offset[1], 3);
918 cont = 0;
919 break;
920
921 default:
922 BUG();
923 }
924 if (err < 0 && err != -ENOENT)
925 goto fail;
926 if (offset[1] == 0 &&
927 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
928 lock_page(page);
929 BUG_ON(page->mapping != NODE_MAPPING(sbi));
930 f2fs_wait_on_page_writeback(page, NODE, true);
931 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
932 set_page_dirty(page);
933 unlock_page(page);
934 }
935 offset[1] = 0;
936 offset[0]++;
937 nofs += err;
938 }
939 fail:
940 f2fs_put_page(page, 0);
941 trace_f2fs_truncate_inode_blocks_exit(inode, err);
942 return err > 0 ? 0 : err;
943 }
944
945 int truncate_xattr_node(struct inode *inode, struct page *page)
946 {
947 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
948 nid_t nid = F2FS_I(inode)->i_xattr_nid;
949 struct dnode_of_data dn;
950 struct page *npage;
951
952 if (!nid)
953 return 0;
954
955 npage = get_node_page(sbi, nid);
956 if (IS_ERR(npage))
957 return PTR_ERR(npage);
958
959 f2fs_i_xnid_write(inode, 0);
960
961 /* need to do checkpoint during fsync */
962 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
963
964 set_new_dnode(&dn, inode, page, npage, nid);
965
966 if (page)
967 dn.inode_page_locked = true;
968 truncate_node(&dn);
969 return 0;
970 }
971
972 /*
973 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
974 * f2fs_unlock_op().
975 */
976 int remove_inode_page(struct inode *inode)
977 {
978 struct dnode_of_data dn;
979 int err;
980
981 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
982 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
983 if (err)
984 return err;
985
986 err = truncate_xattr_node(inode, dn.inode_page);
987 if (err) {
988 f2fs_put_dnode(&dn);
989 return err;
990 }
991
992 /* remove potential inline_data blocks */
993 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
994 S_ISLNK(inode->i_mode))
995 truncate_data_blocks_range(&dn, 1);
996
997 /* 0 is possible, after f2fs_new_inode() has failed */
998 f2fs_bug_on(F2FS_I_SB(inode),
999 inode->i_blocks != 0 && inode->i_blocks != 1);
1000
1001 /* will put inode & node pages */
1002 truncate_node(&dn);
1003 return 0;
1004 }
1005
1006 struct page *new_inode_page(struct inode *inode)
1007 {
1008 struct dnode_of_data dn;
1009
1010 /* allocate inode page for new inode */
1011 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1012
1013 /* caller should f2fs_put_page(page, 1); */
1014 return new_node_page(&dn, 0, NULL);
1015 }
1016
1017 struct page *new_node_page(struct dnode_of_data *dn,
1018 unsigned int ofs, struct page *ipage)
1019 {
1020 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1021 struct node_info old_ni, new_ni;
1022 struct page *page;
1023 int err;
1024
1025 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1026 return ERR_PTR(-EPERM);
1027
1028 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1029 if (!page)
1030 return ERR_PTR(-ENOMEM);
1031
1032 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
1033 err = -ENOSPC;
1034 goto fail;
1035 }
1036
1037 get_node_info(sbi, dn->nid, &old_ni);
1038
1039 /* Reinitialize old_ni with new node page */
1040 f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR);
1041 new_ni = old_ni;
1042 new_ni.ino = dn->inode->i_ino;
1043 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1044
1045 f2fs_wait_on_page_writeback(page, NODE, true);
1046 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1047 set_cold_node(dn->inode, page);
1048 SetPageUptodate(page);
1049 if (set_page_dirty(page))
1050 dn->node_changed = true;
1051
1052 if (f2fs_has_xattr_block(ofs))
1053 f2fs_i_xnid_write(dn->inode, dn->nid);
1054
1055 if (ofs == 0)
1056 inc_valid_inode_count(sbi);
1057 return page;
1058
1059 fail:
1060 clear_node_page_dirty(page);
1061 f2fs_put_page(page, 1);
1062 return ERR_PTR(err);
1063 }
1064
1065 /*
1066 * Caller should do after getting the following values.
1067 * 0: f2fs_put_page(page, 0)
1068 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1069 */
1070 static int read_node_page(struct page *page, int rw)
1071 {
1072 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1073 struct node_info ni;
1074 struct f2fs_io_info fio = {
1075 .sbi = sbi,
1076 .type = NODE,
1077 .rw = rw,
1078 .page = page,
1079 .encrypted_page = NULL,
1080 };
1081
1082 get_node_info(sbi, page->index, &ni);
1083
1084 if (unlikely(ni.blk_addr == NULL_ADDR)) {
1085 ClearPageUptodate(page);
1086 return -ENOENT;
1087 }
1088
1089 if (PageUptodate(page))
1090 return LOCKED_PAGE;
1091
1092 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1093 return f2fs_submit_page_bio(&fio);
1094 }
1095
1096 /*
1097 * Readahead a node page
1098 */
1099 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1100 {
1101 struct page *apage;
1102 int err;
1103
1104 if (!nid)
1105 return;
1106 f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1107
1108 rcu_read_lock();
1109 apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
1110 rcu_read_unlock();
1111 if (apage)
1112 return;
1113
1114 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1115 if (!apage)
1116 return;
1117
1118 err = read_node_page(apage, READA);
1119 f2fs_put_page(apage, err ? 1 : 0);
1120 }
1121
1122 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1123 struct page *parent, int start)
1124 {
1125 struct page *page;
1126 int err;
1127
1128 if (!nid)
1129 return ERR_PTR(-ENOENT);
1130 f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1131 repeat:
1132 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1133 if (!page)
1134 return ERR_PTR(-ENOMEM);
1135
1136 err = read_node_page(page, READ_SYNC);
1137 if (err < 0) {
1138 f2fs_put_page(page, 1);
1139 return ERR_PTR(err);
1140 } else if (err == LOCKED_PAGE) {
1141 goto page_hit;
1142 }
1143
1144 if (parent)
1145 ra_node_pages(parent, start + 1, MAX_RA_NODE);
1146
1147 lock_page(page);
1148
1149 if (unlikely(!PageUptodate(page)))
1150 goto out_err;
1151
1152 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1153 f2fs_put_page(page, 1);
1154 goto repeat;
1155 }
1156 page_hit:
1157 if(unlikely(nid != nid_of_node(page))) {
1158 f2fs_bug_on(sbi, 1);
1159 ClearPageUptodate(page);
1160 out_err:
1161 f2fs_put_page(page, 1);
1162 return ERR_PTR(-EIO);
1163 }
1164 return page;
1165 }
1166
1167 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1168 {
1169 return __get_node_page(sbi, nid, NULL, 0);
1170 }
1171
1172 struct page *get_node_page_ra(struct page *parent, int start)
1173 {
1174 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1175 nid_t nid = get_nid(parent, start, false);
1176
1177 return __get_node_page(sbi, nid, parent, start);
1178 }
1179
1180 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1181 {
1182 struct inode *inode;
1183 struct page *page;
1184 int ret;
1185
1186 /* should flush inline_data before evict_inode */
1187 inode = ilookup(sbi->sb, ino);
1188 if (!inode)
1189 return;
1190
1191 page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0);
1192 if (!page)
1193 goto iput_out;
1194
1195 if (!PageUptodate(page))
1196 goto page_out;
1197
1198 if (!PageDirty(page))
1199 goto page_out;
1200
1201 if (!clear_page_dirty_for_io(page))
1202 goto page_out;
1203
1204 ret = f2fs_write_inline_data(inode, page);
1205 inode_dec_dirty_pages(inode);
1206 if (ret)
1207 set_page_dirty(page);
1208 page_out:
1209 f2fs_put_page(page, 1);
1210 iput_out:
1211 iput(inode);
1212 }
1213
1214 void move_node_page(struct page *node_page, int gc_type)
1215 {
1216 if (gc_type == FG_GC) {
1217 struct f2fs_sb_info *sbi = F2FS_P_SB(node_page);
1218 struct writeback_control wbc = {
1219 .sync_mode = WB_SYNC_ALL,
1220 .nr_to_write = 1,
1221 .for_reclaim = 0,
1222 };
1223
1224 set_page_dirty(node_page);
1225 f2fs_wait_on_page_writeback(node_page, NODE, true);
1226
1227 f2fs_bug_on(sbi, PageWriteback(node_page));
1228 if (!clear_page_dirty_for_io(node_page))
1229 goto out_page;
1230
1231 if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc))
1232 unlock_page(node_page);
1233 goto release_page;
1234 } else {
1235 /* set page dirty and write it */
1236 if (!PageWriteback(node_page))
1237 set_page_dirty(node_page);
1238 }
1239 out_page:
1240 unlock_page(node_page);
1241 release_page:
1242 f2fs_put_page(node_page, 0);
1243 }
1244
1245 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1246 {
1247 pgoff_t index, end;
1248 struct pagevec pvec;
1249 struct page *last_page = NULL;
1250
1251 pagevec_init(&pvec, 0);
1252 index = 0;
1253 end = ULONG_MAX;
1254
1255 while (index <= end) {
1256 int i, nr_pages;
1257 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1258 PAGECACHE_TAG_DIRTY,
1259 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1260 if (nr_pages == 0)
1261 break;
1262
1263 for (i = 0; i < nr_pages; i++) {
1264 struct page *page = pvec.pages[i];
1265
1266 if (unlikely(f2fs_cp_error(sbi))) {
1267 f2fs_put_page(last_page, 0);
1268 pagevec_release(&pvec);
1269 return ERR_PTR(-EIO);
1270 }
1271
1272 if (!IS_DNODE(page) || !is_cold_node(page))
1273 continue;
1274 if (ino_of_node(page) != ino)
1275 continue;
1276
1277 lock_page(page);
1278
1279 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1280 continue_unlock:
1281 unlock_page(page);
1282 continue;
1283 }
1284 if (ino_of_node(page) != ino)
1285 goto continue_unlock;
1286
1287 if (!PageDirty(page)) {
1288 /* someone wrote it for us */
1289 goto continue_unlock;
1290 }
1291
1292 if (last_page)
1293 f2fs_put_page(last_page, 0);
1294
1295 get_page(page);
1296 last_page = page;
1297 unlock_page(page);
1298 }
1299 pagevec_release(&pvec);
1300 cond_resched();
1301 }
1302 return last_page;
1303 }
1304
1305 int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1306 struct writeback_control *wbc, bool atomic)
1307 {
1308 pgoff_t index, end;
1309 struct pagevec pvec;
1310 int ret = 0;
1311 struct page *last_page = NULL;
1312 bool marked = false;
1313 nid_t ino = inode->i_ino;
1314
1315 if (atomic) {
1316 last_page = last_fsync_dnode(sbi, ino);
1317 if (IS_ERR_OR_NULL(last_page))
1318 return PTR_ERR_OR_ZERO(last_page);
1319 }
1320 retry:
1321 pagevec_init(&pvec, 0);
1322 index = 0;
1323 end = ULONG_MAX;
1324
1325 while (index <= end) {
1326 int i, nr_pages;
1327 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1328 PAGECACHE_TAG_DIRTY,
1329 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1330 if (nr_pages == 0)
1331 break;
1332
1333 for (i = 0; i < nr_pages; i++) {
1334 struct page *page = pvec.pages[i];
1335
1336 if (unlikely(f2fs_cp_error(sbi))) {
1337 f2fs_put_page(last_page, 0);
1338 pagevec_release(&pvec);
1339 return -EIO;
1340 }
1341
1342 if (!IS_DNODE(page) || !is_cold_node(page))
1343 continue;
1344 if (ino_of_node(page) != ino)
1345 continue;
1346
1347 lock_page(page);
1348
1349 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1350 continue_unlock:
1351 unlock_page(page);
1352 continue;
1353 }
1354 if (ino_of_node(page) != ino)
1355 goto continue_unlock;
1356
1357 if (!PageDirty(page) && page != last_page) {
1358 /* someone wrote it for us */
1359 goto continue_unlock;
1360 }
1361
1362 f2fs_wait_on_page_writeback(page, NODE, true);
1363 BUG_ON(PageWriteback(page));
1364
1365 if (!atomic || page == last_page) {
1366 set_fsync_mark(page, 1);
1367 if (IS_INODE(page)) {
1368 if (is_inode_flag_set(inode,
1369 FI_DIRTY_INODE))
1370 update_inode(inode, page);
1371 set_dentry_mark(page,
1372 need_dentry_mark(sbi, ino));
1373 }
1374 /* may be written by other thread */
1375 if (!PageDirty(page))
1376 set_page_dirty(page);
1377 }
1378
1379 if (!clear_page_dirty_for_io(page))
1380 goto continue_unlock;
1381
1382 ret = NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
1383 if (ret) {
1384 unlock_page(page);
1385 f2fs_put_page(last_page, 0);
1386 break;
1387 }
1388 if (page == last_page) {
1389 f2fs_put_page(page, 0);
1390 marked = true;
1391 break;
1392 }
1393 }
1394 pagevec_release(&pvec);
1395 cond_resched();
1396
1397 if (ret || marked)
1398 break;
1399 }
1400 if (!ret && atomic && !marked) {
1401 f2fs_msg(sbi->sb, KERN_DEBUG,
1402 "Retry to write fsync mark: ino=%u, idx=%lx",
1403 ino, last_page->index);
1404 lock_page(last_page);
1405 set_page_dirty(last_page);
1406 unlock_page(last_page);
1407 goto retry;
1408 }
1409 return ret ? -EIO: 0;
1410 }
1411
1412 int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
1413 {
1414 pgoff_t index, end;
1415 struct pagevec pvec;
1416 int step = 0;
1417 int nwritten = 0;
1418
1419 pagevec_init(&pvec, 0);
1420
1421 next_step:
1422 index = 0;
1423 end = ULONG_MAX;
1424
1425 while (index <= end) {
1426 int i, nr_pages;
1427 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1428 PAGECACHE_TAG_DIRTY,
1429 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1430 if (nr_pages == 0)
1431 break;
1432
1433 for (i = 0; i < nr_pages; i++) {
1434 struct page *page = pvec.pages[i];
1435
1436 if (unlikely(f2fs_cp_error(sbi))) {
1437 pagevec_release(&pvec);
1438 return -EIO;
1439 }
1440
1441 /*
1442 * flushing sequence with step:
1443 * 0. indirect nodes
1444 * 1. dentry dnodes
1445 * 2. file dnodes
1446 */
1447 if (step == 0 && IS_DNODE(page))
1448 continue;
1449 if (step == 1 && (!IS_DNODE(page) ||
1450 is_cold_node(page)))
1451 continue;
1452 if (step == 2 && (!IS_DNODE(page) ||
1453 !is_cold_node(page)))
1454 continue;
1455 lock_node:
1456 if (!trylock_page(page))
1457 continue;
1458
1459 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1460 continue_unlock:
1461 unlock_page(page);
1462 continue;
1463 }
1464
1465 if (!PageDirty(page)) {
1466 /* someone wrote it for us */
1467 goto continue_unlock;
1468 }
1469
1470 /* flush inline_data */
1471 if (is_inline_node(page)) {
1472 clear_inline_node(page);
1473 unlock_page(page);
1474 flush_inline_data(sbi, ino_of_node(page));
1475 goto lock_node;
1476 }
1477
1478 f2fs_wait_on_page_writeback(page, NODE, true);
1479
1480 BUG_ON(PageWriteback(page));
1481 if (!clear_page_dirty_for_io(page))
1482 goto continue_unlock;
1483
1484 set_fsync_mark(page, 0);
1485 set_dentry_mark(page, 0);
1486
1487 if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
1488 unlock_page(page);
1489
1490 if (--wbc->nr_to_write == 0)
1491 break;
1492 }
1493 pagevec_release(&pvec);
1494 cond_resched();
1495
1496 if (wbc->nr_to_write == 0) {
1497 step = 2;
1498 break;
1499 }
1500 }
1501
1502 if (step < 2) {
1503 step++;
1504 goto next_step;
1505 }
1506 return nwritten;
1507 }
1508
1509 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1510 {
1511 pgoff_t index = 0, end = ULONG_MAX;
1512 struct pagevec pvec;
1513 int ret2 = 0, ret = 0;
1514
1515 pagevec_init(&pvec, 0);
1516
1517 while (index <= end) {
1518 int i, nr_pages;
1519 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1520 PAGECACHE_TAG_WRITEBACK,
1521 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1522 if (nr_pages == 0)
1523 break;
1524
1525 for (i = 0; i < nr_pages; i++) {
1526 struct page *page = pvec.pages[i];
1527
1528 /* until radix tree lookup accepts end_index */
1529 if (unlikely(page->index > end))
1530 continue;
1531
1532 if (ino && ino_of_node(page) == ino) {
1533 f2fs_wait_on_page_writeback(page, NODE, true);
1534 if (TestClearPageError(page))
1535 ret = -EIO;
1536 }
1537 }
1538 pagevec_release(&pvec);
1539 cond_resched();
1540 }
1541
1542 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
1543 ret2 = -ENOSPC;
1544 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
1545 ret2 = -EIO;
1546 if (!ret)
1547 ret = ret2;
1548 return ret;
1549 }
1550
1551 static int f2fs_write_node_page(struct page *page,
1552 struct writeback_control *wbc)
1553 {
1554 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1555 nid_t nid;
1556 struct node_info ni;
1557 struct f2fs_io_info fio = {
1558 .sbi = sbi,
1559 .type = NODE,
1560 .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
1561 .page = page,
1562 .encrypted_page = NULL,
1563 };
1564
1565 trace_f2fs_writepage(page, NODE);
1566
1567 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1568 goto redirty_out;
1569 if (unlikely(f2fs_cp_error(sbi)))
1570 goto redirty_out;
1571
1572 /* get old block addr of this node page */
1573 nid = nid_of_node(page);
1574 f2fs_bug_on(sbi, page->index != nid);
1575
1576 if (wbc->for_reclaim) {
1577 if (!down_read_trylock(&sbi->node_write))
1578 goto redirty_out;
1579 } else {
1580 down_read(&sbi->node_write);
1581 }
1582
1583 get_node_info(sbi, nid, &ni);
1584
1585 /* This page is already truncated */
1586 if (unlikely(ni.blk_addr == NULL_ADDR)) {
1587 ClearPageUptodate(page);
1588 dec_page_count(sbi, F2FS_DIRTY_NODES);
1589 up_read(&sbi->node_write);
1590 unlock_page(page);
1591 return 0;
1592 }
1593
1594 set_page_writeback(page);
1595 fio.old_blkaddr = ni.blk_addr;
1596 write_node_page(nid, &fio);
1597 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1598 dec_page_count(sbi, F2FS_DIRTY_NODES);
1599 up_read(&sbi->node_write);
1600
1601 if (wbc->for_reclaim)
1602 f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, NODE, WRITE);
1603
1604 unlock_page(page);
1605
1606 if (unlikely(f2fs_cp_error(sbi)))
1607 f2fs_submit_merged_bio(sbi, NODE, WRITE);
1608
1609 return 0;
1610
1611 redirty_out:
1612 redirty_page_for_writepage(wbc, page);
1613 return AOP_WRITEPAGE_ACTIVATE;
1614 }
1615
1616 static int f2fs_write_node_pages(struct address_space *mapping,
1617 struct writeback_control *wbc)
1618 {
1619 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1620 long diff;
1621
1622 /* balancing f2fs's metadata in background */
1623 f2fs_balance_fs_bg(sbi);
1624
1625 /* collect a number of dirty node pages and write together */
1626 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1627 goto skip_write;
1628
1629 trace_f2fs_writepages(mapping->host, wbc, NODE);
1630
1631 diff = nr_pages_to_write(sbi, NODE, wbc);
1632 wbc->sync_mode = WB_SYNC_NONE;
1633 sync_node_pages(sbi, wbc);
1634 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1635 return 0;
1636
1637 skip_write:
1638 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1639 trace_f2fs_writepages(mapping->host, wbc, NODE);
1640 return 0;
1641 }
1642
1643 static int f2fs_set_node_page_dirty(struct page *page)
1644 {
1645 trace_f2fs_set_page_dirty(page, NODE);
1646
1647 SetPageUptodate(page);
1648 if (!PageDirty(page)) {
1649 __set_page_dirty_nobuffers(page);
1650 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
1651 SetPagePrivate(page);
1652 f2fs_trace_pid(page);
1653 return 1;
1654 }
1655 return 0;
1656 }
1657
1658 /*
1659 * Structure of the f2fs node operations
1660 */
1661 const struct address_space_operations f2fs_node_aops = {
1662 .writepage = f2fs_write_node_page,
1663 .writepages = f2fs_write_node_pages,
1664 .set_page_dirty = f2fs_set_node_page_dirty,
1665 .invalidatepage = f2fs_invalidate_page,
1666 .releasepage = f2fs_release_page,
1667 };
1668
1669 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1670 nid_t n)
1671 {
1672 return radix_tree_lookup(&nm_i->free_nid_root, n);
1673 }
1674
1675 static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
1676 struct free_nid *i)
1677 {
1678 list_del(&i->list);
1679 radix_tree_delete(&nm_i->free_nid_root, i->nid);
1680 }
1681
1682 static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
1683 {
1684 struct f2fs_nm_info *nm_i = NM_I(sbi);
1685 struct free_nid *i;
1686 struct nat_entry *ne;
1687
1688 if (!available_free_memory(sbi, FREE_NIDS))
1689 return -1;
1690
1691 /* 0 nid should not be used */
1692 if (unlikely(nid == 0))
1693 return 0;
1694
1695 if (build) {
1696 /* do not add allocated nids */
1697 ne = __lookup_nat_cache(nm_i, nid);
1698 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
1699 nat_get_blkaddr(ne) != NULL_ADDR))
1700 return 0;
1701 }
1702
1703 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1704 i->nid = nid;
1705 i->state = NID_NEW;
1706
1707 if (radix_tree_preload(GFP_NOFS)) {
1708 kmem_cache_free(free_nid_slab, i);
1709 return 0;
1710 }
1711
1712 spin_lock(&nm_i->free_nid_list_lock);
1713 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
1714 spin_unlock(&nm_i->free_nid_list_lock);
1715 radix_tree_preload_end();
1716 kmem_cache_free(free_nid_slab, i);
1717 return 0;
1718 }
1719 list_add_tail(&i->list, &nm_i->free_nid_list);
1720 nm_i->fcnt++;
1721 spin_unlock(&nm_i->free_nid_list_lock);
1722 radix_tree_preload_end();
1723 return 1;
1724 }
1725
1726 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1727 {
1728 struct free_nid *i;
1729 bool need_free = false;
1730
1731 spin_lock(&nm_i->free_nid_list_lock);
1732 i = __lookup_free_nid_list(nm_i, nid);
1733 if (i && i->state == NID_NEW) {
1734 __del_from_free_nid_list(nm_i, i);
1735 nm_i->fcnt--;
1736 need_free = true;
1737 }
1738 spin_unlock(&nm_i->free_nid_list_lock);
1739
1740 if (need_free)
1741 kmem_cache_free(free_nid_slab, i);
1742 }
1743
1744 static void scan_nat_page(struct f2fs_sb_info *sbi,
1745 struct page *nat_page, nid_t start_nid)
1746 {
1747 struct f2fs_nm_info *nm_i = NM_I(sbi);
1748 struct f2fs_nat_block *nat_blk = page_address(nat_page);
1749 block_t blk_addr;
1750 int i;
1751
1752 i = start_nid % NAT_ENTRY_PER_BLOCK;
1753
1754 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1755
1756 if (unlikely(start_nid >= nm_i->max_nid))
1757 break;
1758
1759 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1760 f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
1761 if (blk_addr == NULL_ADDR) {
1762 if (add_free_nid(sbi, start_nid, true) < 0)
1763 break;
1764 }
1765 }
1766 }
1767
1768 static void build_free_nids(struct f2fs_sb_info *sbi)
1769 {
1770 struct f2fs_nm_info *nm_i = NM_I(sbi);
1771 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1772 struct f2fs_journal *journal = curseg->journal;
1773 int i = 0;
1774 nid_t nid = nm_i->next_scan_nid;
1775
1776 /* Enough entries */
1777 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1778 return;
1779
1780 /* readahead nat pages to be scanned */
1781 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
1782 META_NAT, true);
1783
1784 down_read(&nm_i->nat_tree_lock);
1785
1786 while (1) {
1787 struct page *page = get_current_nat_page(sbi, nid);
1788
1789 scan_nat_page(sbi, page, nid);
1790 f2fs_put_page(page, 1);
1791
1792 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1793 if (unlikely(nid >= nm_i->max_nid))
1794 nid = 0;
1795
1796 if (++i >= FREE_NID_PAGES)
1797 break;
1798 }
1799
1800 /* go to the next free nat pages to find free nids abundantly */
1801 nm_i->next_scan_nid = nid;
1802
1803 /* find free nids from current sum_pages */
1804 down_read(&curseg->journal_rwsem);
1805 for (i = 0; i < nats_in_cursum(journal); i++) {
1806 block_t addr;
1807
1808 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
1809 nid = le32_to_cpu(nid_in_journal(journal, i));
1810 if (addr == NULL_ADDR)
1811 add_free_nid(sbi, nid, true);
1812 else
1813 remove_free_nid(nm_i, nid);
1814 }
1815 up_read(&curseg->journal_rwsem);
1816 up_read(&nm_i->nat_tree_lock);
1817
1818 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
1819 nm_i->ra_nid_pages, META_NAT, false);
1820 }
1821
1822 /*
1823 * If this function returns success, caller can obtain a new nid
1824 * from second parameter of this function.
1825 * The returned nid could be used ino as well as nid when inode is created.
1826 */
1827 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1828 {
1829 struct f2fs_nm_info *nm_i = NM_I(sbi);
1830 struct free_nid *i = NULL;
1831 retry:
1832 #ifdef CONFIG_F2FS_FAULT_INJECTION
1833 if (time_to_inject(FAULT_ALLOC_NID))
1834 return false;
1835 #endif
1836 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
1837 return false;
1838
1839 spin_lock(&nm_i->free_nid_list_lock);
1840
1841 /* We should not use stale free nids created by build_free_nids */
1842 if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
1843 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
1844 list_for_each_entry(i, &nm_i->free_nid_list, list)
1845 if (i->state == NID_NEW)
1846 break;
1847
1848 f2fs_bug_on(sbi, i->state != NID_NEW);
1849 *nid = i->nid;
1850 i->state = NID_ALLOC;
1851 nm_i->fcnt--;
1852 spin_unlock(&nm_i->free_nid_list_lock);
1853 return true;
1854 }
1855 spin_unlock(&nm_i->free_nid_list_lock);
1856
1857 /* Let's scan nat pages and its caches to get free nids */
1858 mutex_lock(&nm_i->build_lock);
1859 build_free_nids(sbi);
1860 mutex_unlock(&nm_i->build_lock);
1861 goto retry;
1862 }
1863
1864 /*
1865 * alloc_nid() should be called prior to this function.
1866 */
1867 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1868 {
1869 struct f2fs_nm_info *nm_i = NM_I(sbi);
1870 struct free_nid *i;
1871
1872 spin_lock(&nm_i->free_nid_list_lock);
1873 i = __lookup_free_nid_list(nm_i, nid);
1874 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
1875 __del_from_free_nid_list(nm_i, i);
1876 spin_unlock(&nm_i->free_nid_list_lock);
1877
1878 kmem_cache_free(free_nid_slab, i);
1879 }
1880
1881 /*
1882 * alloc_nid() should be called prior to this function.
1883 */
1884 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1885 {
1886 struct f2fs_nm_info *nm_i = NM_I(sbi);
1887 struct free_nid *i;
1888 bool need_free = false;
1889
1890 if (!nid)
1891 return;
1892
1893 spin_lock(&nm_i->free_nid_list_lock);
1894 i = __lookup_free_nid_list(nm_i, nid);
1895 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
1896 if (!available_free_memory(sbi, FREE_NIDS)) {
1897 __del_from_free_nid_list(nm_i, i);
1898 need_free = true;
1899 } else {
1900 i->state = NID_NEW;
1901 nm_i->fcnt++;
1902 }
1903 spin_unlock(&nm_i->free_nid_list_lock);
1904
1905 if (need_free)
1906 kmem_cache_free(free_nid_slab, i);
1907 }
1908
1909 int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
1910 {
1911 struct f2fs_nm_info *nm_i = NM_I(sbi);
1912 struct free_nid *i, *next;
1913 int nr = nr_shrink;
1914
1915 if (!mutex_trylock(&nm_i->build_lock))
1916 return 0;
1917
1918 spin_lock(&nm_i->free_nid_list_lock);
1919 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
1920 if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
1921 break;
1922 if (i->state == NID_ALLOC)
1923 continue;
1924 __del_from_free_nid_list(nm_i, i);
1925 kmem_cache_free(free_nid_slab, i);
1926 nm_i->fcnt--;
1927 nr_shrink--;
1928 }
1929 spin_unlock(&nm_i->free_nid_list_lock);
1930 mutex_unlock(&nm_i->build_lock);
1931
1932 return nr - nr_shrink;
1933 }
1934
1935 void recover_inline_xattr(struct inode *inode, struct page *page)
1936 {
1937 void *src_addr, *dst_addr;
1938 size_t inline_size;
1939 struct page *ipage;
1940 struct f2fs_inode *ri;
1941
1942 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
1943 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
1944
1945 ri = F2FS_INODE(page);
1946 if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
1947 clear_inode_flag(inode, FI_INLINE_XATTR);
1948 goto update_inode;
1949 }
1950
1951 dst_addr = inline_xattr_addr(ipage);
1952 src_addr = inline_xattr_addr(page);
1953 inline_size = inline_xattr_size(inode);
1954
1955 f2fs_wait_on_page_writeback(ipage, NODE, true);
1956 memcpy(dst_addr, src_addr, inline_size);
1957 update_inode:
1958 update_inode(inode, ipage);
1959 f2fs_put_page(ipage, 1);
1960 }
1961
1962 void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
1963 {
1964 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1965 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
1966 nid_t new_xnid = nid_of_node(page);
1967 struct node_info ni;
1968
1969 /* 1: invalidate the previous xattr nid */
1970 if (!prev_xnid)
1971 goto recover_xnid;
1972
1973 /* Deallocate node address */
1974 get_node_info(sbi, prev_xnid, &ni);
1975 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
1976 invalidate_blocks(sbi, ni.blk_addr);
1977 dec_valid_node_count(sbi, inode);
1978 set_node_addr(sbi, &ni, NULL_ADDR, false);
1979
1980 recover_xnid:
1981 /* 2: allocate new xattr nid */
1982 if (unlikely(!inc_valid_node_count(sbi, inode)))
1983 f2fs_bug_on(sbi, 1);
1984
1985 remove_free_nid(NM_I(sbi), new_xnid);
1986 get_node_info(sbi, new_xnid, &ni);
1987 ni.ino = inode->i_ino;
1988 set_node_addr(sbi, &ni, NEW_ADDR, false);
1989 f2fs_i_xnid_write(inode, new_xnid);
1990
1991 /* 3: update xattr blkaddr */
1992 refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
1993 set_node_addr(sbi, &ni, blkaddr, false);
1994 }
1995
1996 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1997 {
1998 struct f2fs_inode *src, *dst;
1999 nid_t ino = ino_of_node(page);
2000 struct node_info old_ni, new_ni;
2001 struct page *ipage;
2002
2003 get_node_info(sbi, ino, &old_ni);
2004
2005 if (unlikely(old_ni.blk_addr != NULL_ADDR))
2006 return -EINVAL;
2007
2008 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2009 if (!ipage)
2010 return -ENOMEM;
2011
2012 /* Should not use this inode from free nid list */
2013 remove_free_nid(NM_I(sbi), ino);
2014
2015 SetPageUptodate(ipage);
2016 fill_node_footer(ipage, ino, ino, 0, true);
2017
2018 src = F2FS_INODE(page);
2019 dst = F2FS_INODE(ipage);
2020
2021 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
2022 dst->i_size = 0;
2023 dst->i_blocks = cpu_to_le64(1);
2024 dst->i_links = cpu_to_le32(1);
2025 dst->i_xattr_nid = 0;
2026 dst->i_inline = src->i_inline & F2FS_INLINE_XATTR;
2027
2028 new_ni = old_ni;
2029 new_ni.ino = ino;
2030
2031 if (unlikely(!inc_valid_node_count(sbi, NULL)))
2032 WARN_ON(1);
2033 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2034 inc_valid_inode_count(sbi);
2035 set_page_dirty(ipage);
2036 f2fs_put_page(ipage, 1);
2037 return 0;
2038 }
2039
2040 int restore_node_summary(struct f2fs_sb_info *sbi,
2041 unsigned int segno, struct f2fs_summary_block *sum)
2042 {
2043 struct f2fs_node *rn;
2044 struct f2fs_summary *sum_entry;
2045 block_t addr;
2046 int bio_blocks = MAX_BIO_BLOCKS(sbi);
2047 int i, idx, last_offset, nrpages;
2048
2049 /* scan the node segment */
2050 last_offset = sbi->blocks_per_seg;
2051 addr = START_BLOCK(sbi, segno);
2052 sum_entry = &sum->entries[0];
2053
2054 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2055 nrpages = min(last_offset - i, bio_blocks);
2056
2057 /* readahead node pages */
2058 ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2059
2060 for (idx = addr; idx < addr + nrpages; idx++) {
2061 struct page *page = get_tmp_page(sbi, idx);
2062
2063 rn = F2FS_NODE(page);
2064 sum_entry->nid = rn->footer.nid;
2065 sum_entry->version = 0;
2066 sum_entry->ofs_in_node = 0;
2067 sum_entry++;
2068 f2fs_put_page(page, 1);
2069 }
2070
2071 invalidate_mapping_pages(META_MAPPING(sbi), addr,
2072 addr + nrpages);
2073 }
2074 return 0;
2075 }
2076
2077 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2078 {
2079 struct f2fs_nm_info *nm_i = NM_I(sbi);
2080 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2081 struct f2fs_journal *journal = curseg->journal;
2082 int i;
2083
2084 down_write(&curseg->journal_rwsem);
2085 for (i = 0; i < nats_in_cursum(journal); i++) {
2086 struct nat_entry *ne;
2087 struct f2fs_nat_entry raw_ne;
2088 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2089
2090 raw_ne = nat_in_journal(journal, i);
2091
2092 ne = __lookup_nat_cache(nm_i, nid);
2093 if (!ne) {
2094 ne = grab_nat_entry(nm_i, nid);
2095 node_info_from_raw_nat(&ne->ni, &raw_ne);
2096 }
2097 __set_nat_cache_dirty(nm_i, ne);
2098 }
2099 update_nats_in_cursum(journal, -i);
2100 up_write(&curseg->journal_rwsem);
2101 }
2102
2103 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2104 struct list_head *head, int max)
2105 {
2106 struct nat_entry_set *cur;
2107
2108 if (nes->entry_cnt >= max)
2109 goto add_out;
2110
2111 list_for_each_entry(cur, head, set_list) {
2112 if (cur->entry_cnt >= nes->entry_cnt) {
2113 list_add(&nes->set_list, cur->set_list.prev);
2114 return;
2115 }
2116 }
2117 add_out:
2118 list_add_tail(&nes->set_list, head);
2119 }
2120
2121 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2122 struct nat_entry_set *set)
2123 {
2124 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2125 struct f2fs_journal *journal = curseg->journal;
2126 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2127 bool to_journal = true;
2128 struct f2fs_nat_block *nat_blk;
2129 struct nat_entry *ne, *cur;
2130 struct page *page = NULL;
2131
2132 /*
2133 * there are two steps to flush nat entries:
2134 * #1, flush nat entries to journal in current hot data summary block.
2135 * #2, flush nat entries to nat page.
2136 */
2137 if (!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2138 to_journal = false;
2139
2140 if (to_journal) {
2141 down_write(&curseg->journal_rwsem);
2142 } else {
2143 page = get_next_nat_page(sbi, start_nid);
2144 nat_blk = page_address(page);
2145 f2fs_bug_on(sbi, !nat_blk);
2146 }
2147
2148 /* flush dirty nats in nat entry set */
2149 list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2150 struct f2fs_nat_entry *raw_ne;
2151 nid_t nid = nat_get_nid(ne);
2152 int offset;
2153
2154 if (nat_get_blkaddr(ne) == NEW_ADDR)
2155 continue;
2156
2157 if (to_journal) {
2158 offset = lookup_journal_in_cursum(journal,
2159 NAT_JOURNAL, nid, 1);
2160 f2fs_bug_on(sbi, offset < 0);
2161 raw_ne = &nat_in_journal(journal, offset);
2162 nid_in_journal(journal, offset) = cpu_to_le32(nid);
2163 } else {
2164 raw_ne = &nat_blk->entries[nid - start_nid];
2165 }
2166 raw_nat_from_node_info(raw_ne, &ne->ni);
2167 nat_reset_flag(ne);
2168 __clear_nat_cache_dirty(NM_I(sbi), ne);
2169 if (nat_get_blkaddr(ne) == NULL_ADDR)
2170 add_free_nid(sbi, nid, false);
2171 }
2172
2173 if (to_journal)
2174 up_write(&curseg->journal_rwsem);
2175 else
2176 f2fs_put_page(page, 1);
2177
2178 f2fs_bug_on(sbi, set->entry_cnt);
2179
2180 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2181 kmem_cache_free(nat_entry_set_slab, set);
2182 }
2183
2184 /*
2185 * This function is called during the checkpointing process.
2186 */
2187 void flush_nat_entries(struct f2fs_sb_info *sbi)
2188 {
2189 struct f2fs_nm_info *nm_i = NM_I(sbi);
2190 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2191 struct f2fs_journal *journal = curseg->journal;
2192 struct nat_entry_set *setvec[SETVEC_SIZE];
2193 struct nat_entry_set *set, *tmp;
2194 unsigned int found;
2195 nid_t set_idx = 0;
2196 LIST_HEAD(sets);
2197
2198 if (!nm_i->dirty_nat_cnt)
2199 return;
2200
2201 down_write(&nm_i->nat_tree_lock);
2202
2203 /*
2204 * if there are no enough space in journal to store dirty nat
2205 * entries, remove all entries from journal and merge them
2206 * into nat entry set.
2207 */
2208 if (!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
2209 remove_nats_in_journal(sbi);
2210
2211 while ((found = __gang_lookup_nat_set(nm_i,
2212 set_idx, SETVEC_SIZE, setvec))) {
2213 unsigned idx;
2214 set_idx = setvec[found - 1]->set + 1;
2215 for (idx = 0; idx < found; idx++)
2216 __adjust_nat_entry_set(setvec[idx], &sets,
2217 MAX_NAT_JENTRIES(journal));
2218 }
2219
2220 /* flush dirty nats in nat entry set */
2221 list_for_each_entry_safe(set, tmp, &sets, set_list)
2222 __flush_nat_entry_set(sbi, set);
2223
2224 up_write(&nm_i->nat_tree_lock);
2225
2226 f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
2227 }
2228
2229 static int init_node_manager(struct f2fs_sb_info *sbi)
2230 {
2231 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
2232 struct f2fs_nm_info *nm_i = NM_I(sbi);
2233 unsigned char *version_bitmap;
2234 unsigned int nat_segs, nat_blocks;
2235
2236 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
2237
2238 /* segment_count_nat includes pair segment so divide to 2. */
2239 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
2240 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
2241
2242 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
2243
2244 /* not used nids: 0, node, meta, (and root counted as valid node) */
2245 nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
2246 nm_i->fcnt = 0;
2247 nm_i->nat_cnt = 0;
2248 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
2249 nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
2250 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
2251
2252 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
2253 INIT_LIST_HEAD(&nm_i->free_nid_list);
2254 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
2255 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
2256 INIT_LIST_HEAD(&nm_i->nat_entries);
2257
2258 mutex_init(&nm_i->build_lock);
2259 spin_lock_init(&nm_i->free_nid_list_lock);
2260 init_rwsem(&nm_i->nat_tree_lock);
2261
2262 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
2263 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
2264 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
2265 if (!version_bitmap)
2266 return -EFAULT;
2267
2268 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
2269 GFP_KERNEL);
2270 if (!nm_i->nat_bitmap)
2271 return -ENOMEM;
2272 return 0;
2273 }
2274
2275 int build_node_manager(struct f2fs_sb_info *sbi)
2276 {
2277 int err;
2278
2279 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
2280 if (!sbi->nm_info)
2281 return -ENOMEM;
2282
2283 err = init_node_manager(sbi);
2284 if (err)
2285 return err;
2286
2287 build_free_nids(sbi);
2288 return 0;
2289 }
2290
2291 void destroy_node_manager(struct f2fs_sb_info *sbi)
2292 {
2293 struct f2fs_nm_info *nm_i = NM_I(sbi);
2294 struct free_nid *i, *next_i;
2295 struct nat_entry *natvec[NATVEC_SIZE];
2296 struct nat_entry_set *setvec[SETVEC_SIZE];
2297 nid_t nid = 0;
2298 unsigned int found;
2299
2300 if (!nm_i)
2301 return;
2302
2303 /* destroy free nid list */
2304 spin_lock(&nm_i->free_nid_list_lock);
2305 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
2306 f2fs_bug_on(sbi, i->state == NID_ALLOC);
2307 __del_from_free_nid_list(nm_i, i);
2308 nm_i->fcnt--;
2309 spin_unlock(&nm_i->free_nid_list_lock);
2310 kmem_cache_free(free_nid_slab, i);
2311 spin_lock(&nm_i->free_nid_list_lock);
2312 }
2313 f2fs_bug_on(sbi, nm_i->fcnt);
2314 spin_unlock(&nm_i->free_nid_list_lock);
2315
2316 /* destroy nat cache */
2317 down_write(&nm_i->nat_tree_lock);
2318 while ((found = __gang_lookup_nat_cache(nm_i,
2319 nid, NATVEC_SIZE, natvec))) {
2320 unsigned idx;
2321
2322 nid = nat_get_nid(natvec[found - 1]) + 1;
2323 for (idx = 0; idx < found; idx++)
2324 __del_from_nat_cache(nm_i, natvec[idx]);
2325 }
2326 f2fs_bug_on(sbi, nm_i->nat_cnt);
2327
2328 /* destroy nat set cache */
2329 nid = 0;
2330 while ((found = __gang_lookup_nat_set(nm_i,
2331 nid, SETVEC_SIZE, setvec))) {
2332 unsigned idx;
2333
2334 nid = setvec[found - 1]->set + 1;
2335 for (idx = 0; idx < found; idx++) {
2336 /* entry_cnt is not zero, when cp_error was occurred */
2337 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
2338 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
2339 kmem_cache_free(nat_entry_set_slab, setvec[idx]);
2340 }
2341 }
2342 up_write(&nm_i->nat_tree_lock);
2343
2344 kfree(nm_i->nat_bitmap);
2345 sbi->nm_info = NULL;
2346 kfree(nm_i);
2347 }
2348
2349 int __init create_node_manager_caches(void)
2350 {
2351 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
2352 sizeof(struct nat_entry));
2353 if (!nat_entry_slab)
2354 goto fail;
2355
2356 free_nid_slab = f2fs_kmem_cache_create("free_nid",
2357 sizeof(struct free_nid));
2358 if (!free_nid_slab)
2359 goto destroy_nat_entry;
2360
2361 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
2362 sizeof(struct nat_entry_set));
2363 if (!nat_entry_set_slab)
2364 goto destroy_free_nid;
2365 return 0;
2366
2367 destroy_free_nid:
2368 kmem_cache_destroy(free_nid_slab);
2369 destroy_nat_entry:
2370 kmem_cache_destroy(nat_entry_slab);
2371 fail:
2372 return -ENOMEM;
2373 }
2374
2375 void destroy_node_manager_caches(void)
2376 {
2377 kmem_cache_destroy(nat_entry_set_slab);
2378 kmem_cache_destroy(free_nid_slab);
2379 kmem_cache_destroy(nat_entry_slab);
2380 }
This page took 0.108141 seconds and 4 git commands to generate.