Merge tag 'gpio-v4.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux...
[deliverable/linux.git] / fs / f2fs / node.c
1 /*
2 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
18
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include "trace.h"
23 #include <trace/events/f2fs.h>
24
25 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
26
27 static struct kmem_cache *nat_entry_slab;
28 static struct kmem_cache *free_nid_slab;
29 static struct kmem_cache *nat_entry_set_slab;
30
31 bool available_free_memory(struct f2fs_sb_info *sbi, int type)
32 {
33 struct f2fs_nm_info *nm_i = NM_I(sbi);
34 struct sysinfo val;
35 unsigned long avail_ram;
36 unsigned long mem_size = 0;
37 bool res = false;
38
39 si_meminfo(&val);
40
41 /* only uses low memory */
42 avail_ram = val.totalram - val.totalhigh;
43
44 /*
45 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
46 */
47 if (type == FREE_NIDS) {
48 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
49 PAGE_SHIFT;
50 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
51 } else if (type == NAT_ENTRIES) {
52 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
53 PAGE_SHIFT;
54 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
55 } else if (type == DIRTY_DENTS) {
56 if (sbi->sb->s_bdi->wb.dirty_exceeded)
57 return false;
58 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
59 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
60 } else if (type == INO_ENTRIES) {
61 int i;
62
63 for (i = 0; i <= UPDATE_INO; i++)
64 mem_size += (sbi->im[i].ino_num *
65 sizeof(struct ino_entry)) >> PAGE_SHIFT;
66 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
67 } else if (type == EXTENT_CACHE) {
68 mem_size = (atomic_read(&sbi->total_ext_tree) *
69 sizeof(struct extent_tree) +
70 atomic_read(&sbi->total_ext_node) *
71 sizeof(struct extent_node)) >> PAGE_SHIFT;
72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
73 } else {
74 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
75 return true;
76 }
77 return res;
78 }
79
80 static void clear_node_page_dirty(struct page *page)
81 {
82 struct address_space *mapping = page->mapping;
83 unsigned int long flags;
84
85 if (PageDirty(page)) {
86 spin_lock_irqsave(&mapping->tree_lock, flags);
87 radix_tree_tag_clear(&mapping->page_tree,
88 page_index(page),
89 PAGECACHE_TAG_DIRTY);
90 spin_unlock_irqrestore(&mapping->tree_lock, flags);
91
92 clear_page_dirty_for_io(page);
93 dec_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
94 }
95 ClearPageUptodate(page);
96 }
97
98 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
99 {
100 pgoff_t index = current_nat_addr(sbi, nid);
101 return get_meta_page(sbi, index);
102 }
103
104 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
105 {
106 struct page *src_page;
107 struct page *dst_page;
108 pgoff_t src_off;
109 pgoff_t dst_off;
110 void *src_addr;
111 void *dst_addr;
112 struct f2fs_nm_info *nm_i = NM_I(sbi);
113
114 src_off = current_nat_addr(sbi, nid);
115 dst_off = next_nat_addr(sbi, src_off);
116
117 /* get current nat block page with lock */
118 src_page = get_meta_page(sbi, src_off);
119 dst_page = grab_meta_page(sbi, dst_off);
120 f2fs_bug_on(sbi, PageDirty(src_page));
121
122 src_addr = page_address(src_page);
123 dst_addr = page_address(dst_page);
124 memcpy(dst_addr, src_addr, PAGE_SIZE);
125 set_page_dirty(dst_page);
126 f2fs_put_page(src_page, 1);
127
128 set_to_next_nat(nm_i, nid);
129
130 return dst_page;
131 }
132
133 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
134 {
135 return radix_tree_lookup(&nm_i->nat_root, n);
136 }
137
138 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
139 nid_t start, unsigned int nr, struct nat_entry **ep)
140 {
141 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
142 }
143
144 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
145 {
146 list_del(&e->list);
147 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
148 nm_i->nat_cnt--;
149 kmem_cache_free(nat_entry_slab, e);
150 }
151
152 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
153 struct nat_entry *ne)
154 {
155 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
156 struct nat_entry_set *head;
157
158 if (get_nat_flag(ne, IS_DIRTY))
159 return;
160
161 head = radix_tree_lookup(&nm_i->nat_set_root, set);
162 if (!head) {
163 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
164
165 INIT_LIST_HEAD(&head->entry_list);
166 INIT_LIST_HEAD(&head->set_list);
167 head->set = set;
168 head->entry_cnt = 0;
169 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
170 }
171 list_move_tail(&ne->list, &head->entry_list);
172 nm_i->dirty_nat_cnt++;
173 head->entry_cnt++;
174 set_nat_flag(ne, IS_DIRTY, true);
175 }
176
177 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
178 struct nat_entry *ne)
179 {
180 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
181 struct nat_entry_set *head;
182
183 head = radix_tree_lookup(&nm_i->nat_set_root, set);
184 if (head) {
185 list_move_tail(&ne->list, &nm_i->nat_entries);
186 set_nat_flag(ne, IS_DIRTY, false);
187 head->entry_cnt--;
188 nm_i->dirty_nat_cnt--;
189 }
190 }
191
192 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
193 nid_t start, unsigned int nr, struct nat_entry_set **ep)
194 {
195 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
196 start, nr);
197 }
198
199 int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
200 {
201 struct f2fs_nm_info *nm_i = NM_I(sbi);
202 struct nat_entry *e;
203 bool need = false;
204
205 down_read(&nm_i->nat_tree_lock);
206 e = __lookup_nat_cache(nm_i, nid);
207 if (e) {
208 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
209 !get_nat_flag(e, HAS_FSYNCED_INODE))
210 need = true;
211 }
212 up_read(&nm_i->nat_tree_lock);
213 return need;
214 }
215
216 bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
217 {
218 struct f2fs_nm_info *nm_i = NM_I(sbi);
219 struct nat_entry *e;
220 bool is_cp = true;
221
222 down_read(&nm_i->nat_tree_lock);
223 e = __lookup_nat_cache(nm_i, nid);
224 if (e && !get_nat_flag(e, IS_CHECKPOINTED))
225 is_cp = false;
226 up_read(&nm_i->nat_tree_lock);
227 return is_cp;
228 }
229
230 bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
231 {
232 struct f2fs_nm_info *nm_i = NM_I(sbi);
233 struct nat_entry *e;
234 bool need_update = true;
235
236 down_read(&nm_i->nat_tree_lock);
237 e = __lookup_nat_cache(nm_i, ino);
238 if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
239 (get_nat_flag(e, IS_CHECKPOINTED) ||
240 get_nat_flag(e, HAS_FSYNCED_INODE)))
241 need_update = false;
242 up_read(&nm_i->nat_tree_lock);
243 return need_update;
244 }
245
246 static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
247 {
248 struct nat_entry *new;
249
250 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS);
251 f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
252 memset(new, 0, sizeof(struct nat_entry));
253 nat_set_nid(new, nid);
254 nat_reset_flag(new);
255 list_add_tail(&new->list, &nm_i->nat_entries);
256 nm_i->nat_cnt++;
257 return new;
258 }
259
260 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
261 struct f2fs_nat_entry *ne)
262 {
263 struct f2fs_nm_info *nm_i = NM_I(sbi);
264 struct nat_entry *e;
265
266 e = __lookup_nat_cache(nm_i, nid);
267 if (!e) {
268 e = grab_nat_entry(nm_i, nid);
269 node_info_from_raw_nat(&e->ni, ne);
270 } else {
271 f2fs_bug_on(sbi, nat_get_ino(e) != ne->ino ||
272 nat_get_blkaddr(e) != ne->block_addr ||
273 nat_get_version(e) != ne->version);
274 }
275 }
276
277 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
278 block_t new_blkaddr, bool fsync_done)
279 {
280 struct f2fs_nm_info *nm_i = NM_I(sbi);
281 struct nat_entry *e;
282
283 down_write(&nm_i->nat_tree_lock);
284 e = __lookup_nat_cache(nm_i, ni->nid);
285 if (!e) {
286 e = grab_nat_entry(nm_i, ni->nid);
287 copy_node_info(&e->ni, ni);
288 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
289 } else if (new_blkaddr == NEW_ADDR) {
290 /*
291 * when nid is reallocated,
292 * previous nat entry can be remained in nat cache.
293 * So, reinitialize it with new information.
294 */
295 copy_node_info(&e->ni, ni);
296 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
297 }
298
299 /* sanity check */
300 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
301 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
302 new_blkaddr == NULL_ADDR);
303 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
304 new_blkaddr == NEW_ADDR);
305 f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
306 nat_get_blkaddr(e) != NULL_ADDR &&
307 new_blkaddr == NEW_ADDR);
308
309 /* increment version no as node is removed */
310 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
311 unsigned char version = nat_get_version(e);
312 nat_set_version(e, inc_node_version(version));
313
314 /* in order to reuse the nid */
315 if (nm_i->next_scan_nid > ni->nid)
316 nm_i->next_scan_nid = ni->nid;
317 }
318
319 /* change address */
320 nat_set_blkaddr(e, new_blkaddr);
321 if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
322 set_nat_flag(e, IS_CHECKPOINTED, false);
323 __set_nat_cache_dirty(nm_i, e);
324
325 /* update fsync_mark if its inode nat entry is still alive */
326 if (ni->nid != ni->ino)
327 e = __lookup_nat_cache(nm_i, ni->ino);
328 if (e) {
329 if (fsync_done && ni->nid == ni->ino)
330 set_nat_flag(e, HAS_FSYNCED_INODE, true);
331 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
332 }
333 up_write(&nm_i->nat_tree_lock);
334 }
335
336 int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
337 {
338 struct f2fs_nm_info *nm_i = NM_I(sbi);
339 int nr = nr_shrink;
340
341 if (!down_write_trylock(&nm_i->nat_tree_lock))
342 return 0;
343
344 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
345 struct nat_entry *ne;
346 ne = list_first_entry(&nm_i->nat_entries,
347 struct nat_entry, list);
348 __del_from_nat_cache(nm_i, ne);
349 nr_shrink--;
350 }
351 up_write(&nm_i->nat_tree_lock);
352 return nr - nr_shrink;
353 }
354
355 /*
356 * This function always returns success
357 */
358 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
359 {
360 struct f2fs_nm_info *nm_i = NM_I(sbi);
361 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
362 struct f2fs_journal *journal = curseg->journal;
363 nid_t start_nid = START_NID(nid);
364 struct f2fs_nat_block *nat_blk;
365 struct page *page = NULL;
366 struct f2fs_nat_entry ne;
367 struct nat_entry *e;
368 int i;
369
370 ni->nid = nid;
371
372 /* Check nat cache */
373 down_read(&nm_i->nat_tree_lock);
374 e = __lookup_nat_cache(nm_i, nid);
375 if (e) {
376 ni->ino = nat_get_ino(e);
377 ni->blk_addr = nat_get_blkaddr(e);
378 ni->version = nat_get_version(e);
379 up_read(&nm_i->nat_tree_lock);
380 return;
381 }
382
383 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
384
385 /* Check current segment summary */
386 down_read(&curseg->journal_rwsem);
387 i = lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
388 if (i >= 0) {
389 ne = nat_in_journal(journal, i);
390 node_info_from_raw_nat(ni, &ne);
391 }
392 up_read(&curseg->journal_rwsem);
393 if (i >= 0)
394 goto cache;
395
396 /* Fill node_info from nat page */
397 page = get_current_nat_page(sbi, start_nid);
398 nat_blk = (struct f2fs_nat_block *)page_address(page);
399 ne = nat_blk->entries[nid - start_nid];
400 node_info_from_raw_nat(ni, &ne);
401 f2fs_put_page(page, 1);
402 cache:
403 up_read(&nm_i->nat_tree_lock);
404 /* cache nat entry */
405 down_write(&nm_i->nat_tree_lock);
406 cache_nat_entry(sbi, nid, &ne);
407 up_write(&nm_i->nat_tree_lock);
408 }
409
410 /*
411 * readahead MAX_RA_NODE number of node pages.
412 */
413 static void ra_node_pages(struct page *parent, int start, int n)
414 {
415 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
416 struct blk_plug plug;
417 int i, end;
418 nid_t nid;
419
420 blk_start_plug(&plug);
421
422 /* Then, try readahead for siblings of the desired node */
423 end = start + n;
424 end = min(end, NIDS_PER_BLOCK);
425 for (i = start; i < end; i++) {
426 nid = get_nid(parent, i, false);
427 ra_node_page(sbi, nid);
428 }
429
430 blk_finish_plug(&plug);
431 }
432
433 pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
434 {
435 const long direct_index = ADDRS_PER_INODE(dn->inode);
436 const long direct_blks = ADDRS_PER_BLOCK;
437 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
438 unsigned int skipped_unit = ADDRS_PER_BLOCK;
439 int cur_level = dn->cur_level;
440 int max_level = dn->max_level;
441 pgoff_t base = 0;
442
443 if (!dn->max_level)
444 return pgofs + 1;
445
446 while (max_level-- > cur_level)
447 skipped_unit *= NIDS_PER_BLOCK;
448
449 switch (dn->max_level) {
450 case 3:
451 base += 2 * indirect_blks;
452 case 2:
453 base += 2 * direct_blks;
454 case 1:
455 base += direct_index;
456 break;
457 default:
458 f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
459 }
460
461 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
462 }
463
464 /*
465 * The maximum depth is four.
466 * Offset[0] will have raw inode offset.
467 */
468 static int get_node_path(struct inode *inode, long block,
469 int offset[4], unsigned int noffset[4])
470 {
471 const long direct_index = ADDRS_PER_INODE(inode);
472 const long direct_blks = ADDRS_PER_BLOCK;
473 const long dptrs_per_blk = NIDS_PER_BLOCK;
474 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
475 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
476 int n = 0;
477 int level = 0;
478
479 noffset[0] = 0;
480
481 if (block < direct_index) {
482 offset[n] = block;
483 goto got;
484 }
485 block -= direct_index;
486 if (block < direct_blks) {
487 offset[n++] = NODE_DIR1_BLOCK;
488 noffset[n] = 1;
489 offset[n] = block;
490 level = 1;
491 goto got;
492 }
493 block -= direct_blks;
494 if (block < direct_blks) {
495 offset[n++] = NODE_DIR2_BLOCK;
496 noffset[n] = 2;
497 offset[n] = block;
498 level = 1;
499 goto got;
500 }
501 block -= direct_blks;
502 if (block < indirect_blks) {
503 offset[n++] = NODE_IND1_BLOCK;
504 noffset[n] = 3;
505 offset[n++] = block / direct_blks;
506 noffset[n] = 4 + offset[n - 1];
507 offset[n] = block % direct_blks;
508 level = 2;
509 goto got;
510 }
511 block -= indirect_blks;
512 if (block < indirect_blks) {
513 offset[n++] = NODE_IND2_BLOCK;
514 noffset[n] = 4 + dptrs_per_blk;
515 offset[n++] = block / direct_blks;
516 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
517 offset[n] = block % direct_blks;
518 level = 2;
519 goto got;
520 }
521 block -= indirect_blks;
522 if (block < dindirect_blks) {
523 offset[n++] = NODE_DIND_BLOCK;
524 noffset[n] = 5 + (dptrs_per_blk * 2);
525 offset[n++] = block / indirect_blks;
526 noffset[n] = 6 + (dptrs_per_blk * 2) +
527 offset[n - 1] * (dptrs_per_blk + 1);
528 offset[n++] = (block / direct_blks) % dptrs_per_blk;
529 noffset[n] = 7 + (dptrs_per_blk * 2) +
530 offset[n - 2] * (dptrs_per_blk + 1) +
531 offset[n - 1];
532 offset[n] = block % direct_blks;
533 level = 3;
534 goto got;
535 } else {
536 BUG();
537 }
538 got:
539 return level;
540 }
541
542 /*
543 * Caller should call f2fs_put_dnode(dn).
544 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
545 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
546 * In the case of RDONLY_NODE, we don't need to care about mutex.
547 */
548 int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
549 {
550 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
551 struct page *npage[4];
552 struct page *parent = NULL;
553 int offset[4];
554 unsigned int noffset[4];
555 nid_t nids[4];
556 int level, i = 0;
557 int err = 0;
558
559 level = get_node_path(dn->inode, index, offset, noffset);
560
561 nids[0] = dn->inode->i_ino;
562 npage[0] = dn->inode_page;
563
564 if (!npage[0]) {
565 npage[0] = get_node_page(sbi, nids[0]);
566 if (IS_ERR(npage[0]))
567 return PTR_ERR(npage[0]);
568 }
569
570 /* if inline_data is set, should not report any block indices */
571 if (f2fs_has_inline_data(dn->inode) && index) {
572 err = -ENOENT;
573 f2fs_put_page(npage[0], 1);
574 goto release_out;
575 }
576
577 parent = npage[0];
578 if (level != 0)
579 nids[1] = get_nid(parent, offset[0], true);
580 dn->inode_page = npage[0];
581 dn->inode_page_locked = true;
582
583 /* get indirect or direct nodes */
584 for (i = 1; i <= level; i++) {
585 bool done = false;
586
587 if (!nids[i] && mode == ALLOC_NODE) {
588 /* alloc new node */
589 if (!alloc_nid(sbi, &(nids[i]))) {
590 err = -ENOSPC;
591 goto release_pages;
592 }
593
594 dn->nid = nids[i];
595 npage[i] = new_node_page(dn, noffset[i], NULL);
596 if (IS_ERR(npage[i])) {
597 alloc_nid_failed(sbi, nids[i]);
598 err = PTR_ERR(npage[i]);
599 goto release_pages;
600 }
601
602 set_nid(parent, offset[i - 1], nids[i], i == 1);
603 alloc_nid_done(sbi, nids[i]);
604 done = true;
605 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
606 npage[i] = get_node_page_ra(parent, offset[i - 1]);
607 if (IS_ERR(npage[i])) {
608 err = PTR_ERR(npage[i]);
609 goto release_pages;
610 }
611 done = true;
612 }
613 if (i == 1) {
614 dn->inode_page_locked = false;
615 unlock_page(parent);
616 } else {
617 f2fs_put_page(parent, 1);
618 }
619
620 if (!done) {
621 npage[i] = get_node_page(sbi, nids[i]);
622 if (IS_ERR(npage[i])) {
623 err = PTR_ERR(npage[i]);
624 f2fs_put_page(npage[0], 0);
625 goto release_out;
626 }
627 }
628 if (i < level) {
629 parent = npage[i];
630 nids[i + 1] = get_nid(parent, offset[i], false);
631 }
632 }
633 dn->nid = nids[level];
634 dn->ofs_in_node = offset[level];
635 dn->node_page = npage[level];
636 dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
637 return 0;
638
639 release_pages:
640 f2fs_put_page(parent, 1);
641 if (i > 1)
642 f2fs_put_page(npage[0], 0);
643 release_out:
644 dn->inode_page = NULL;
645 dn->node_page = NULL;
646 if (err == -ENOENT) {
647 dn->cur_level = i;
648 dn->max_level = level;
649 }
650 return err;
651 }
652
653 static void truncate_node(struct dnode_of_data *dn)
654 {
655 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
656 struct node_info ni;
657
658 get_node_info(sbi, dn->nid, &ni);
659 if (dn->inode->i_blocks == 0) {
660 f2fs_bug_on(sbi, ni.blk_addr != NULL_ADDR);
661 goto invalidate;
662 }
663 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
664
665 /* Deallocate node address */
666 invalidate_blocks(sbi, ni.blk_addr);
667 dec_valid_node_count(sbi, dn->inode);
668 set_node_addr(sbi, &ni, NULL_ADDR, false);
669
670 if (dn->nid == dn->inode->i_ino) {
671 remove_orphan_inode(sbi, dn->nid);
672 dec_valid_inode_count(sbi);
673 } else {
674 sync_inode_page(dn);
675 }
676 invalidate:
677 clear_node_page_dirty(dn->node_page);
678 set_sbi_flag(sbi, SBI_IS_DIRTY);
679
680 f2fs_put_page(dn->node_page, 1);
681
682 invalidate_mapping_pages(NODE_MAPPING(sbi),
683 dn->node_page->index, dn->node_page->index);
684
685 dn->node_page = NULL;
686 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
687 }
688
689 static int truncate_dnode(struct dnode_of_data *dn)
690 {
691 struct page *page;
692
693 if (dn->nid == 0)
694 return 1;
695
696 /* get direct node */
697 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
698 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
699 return 1;
700 else if (IS_ERR(page))
701 return PTR_ERR(page);
702
703 /* Make dnode_of_data for parameter */
704 dn->node_page = page;
705 dn->ofs_in_node = 0;
706 truncate_data_blocks(dn);
707 truncate_node(dn);
708 return 1;
709 }
710
711 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
712 int ofs, int depth)
713 {
714 struct dnode_of_data rdn = *dn;
715 struct page *page;
716 struct f2fs_node *rn;
717 nid_t child_nid;
718 unsigned int child_nofs;
719 int freed = 0;
720 int i, ret;
721
722 if (dn->nid == 0)
723 return NIDS_PER_BLOCK + 1;
724
725 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
726
727 page = get_node_page(F2FS_I_SB(dn->inode), dn->nid);
728 if (IS_ERR(page)) {
729 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
730 return PTR_ERR(page);
731 }
732
733 ra_node_pages(page, ofs, NIDS_PER_BLOCK);
734
735 rn = F2FS_NODE(page);
736 if (depth < 3) {
737 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
738 child_nid = le32_to_cpu(rn->in.nid[i]);
739 if (child_nid == 0)
740 continue;
741 rdn.nid = child_nid;
742 ret = truncate_dnode(&rdn);
743 if (ret < 0)
744 goto out_err;
745 if (set_nid(page, i, 0, false))
746 dn->node_changed = true;
747 }
748 } else {
749 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
750 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
751 child_nid = le32_to_cpu(rn->in.nid[i]);
752 if (child_nid == 0) {
753 child_nofs += NIDS_PER_BLOCK + 1;
754 continue;
755 }
756 rdn.nid = child_nid;
757 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
758 if (ret == (NIDS_PER_BLOCK + 1)) {
759 if (set_nid(page, i, 0, false))
760 dn->node_changed = true;
761 child_nofs += ret;
762 } else if (ret < 0 && ret != -ENOENT) {
763 goto out_err;
764 }
765 }
766 freed = child_nofs;
767 }
768
769 if (!ofs) {
770 /* remove current indirect node */
771 dn->node_page = page;
772 truncate_node(dn);
773 freed++;
774 } else {
775 f2fs_put_page(page, 1);
776 }
777 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
778 return freed;
779
780 out_err:
781 f2fs_put_page(page, 1);
782 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
783 return ret;
784 }
785
786 static int truncate_partial_nodes(struct dnode_of_data *dn,
787 struct f2fs_inode *ri, int *offset, int depth)
788 {
789 struct page *pages[2];
790 nid_t nid[3];
791 nid_t child_nid;
792 int err = 0;
793 int i;
794 int idx = depth - 2;
795
796 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
797 if (!nid[0])
798 return 0;
799
800 /* get indirect nodes in the path */
801 for (i = 0; i < idx + 1; i++) {
802 /* reference count'll be increased */
803 pages[i] = get_node_page(F2FS_I_SB(dn->inode), nid[i]);
804 if (IS_ERR(pages[i])) {
805 err = PTR_ERR(pages[i]);
806 idx = i - 1;
807 goto fail;
808 }
809 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
810 }
811
812 ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
813
814 /* free direct nodes linked to a partial indirect node */
815 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
816 child_nid = get_nid(pages[idx], i, false);
817 if (!child_nid)
818 continue;
819 dn->nid = child_nid;
820 err = truncate_dnode(dn);
821 if (err < 0)
822 goto fail;
823 if (set_nid(pages[idx], i, 0, false))
824 dn->node_changed = true;
825 }
826
827 if (offset[idx + 1] == 0) {
828 dn->node_page = pages[idx];
829 dn->nid = nid[idx];
830 truncate_node(dn);
831 } else {
832 f2fs_put_page(pages[idx], 1);
833 }
834 offset[idx]++;
835 offset[idx + 1] = 0;
836 idx--;
837 fail:
838 for (i = idx; i >= 0; i--)
839 f2fs_put_page(pages[i], 1);
840
841 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
842
843 return err;
844 }
845
846 /*
847 * All the block addresses of data and nodes should be nullified.
848 */
849 int truncate_inode_blocks(struct inode *inode, pgoff_t from)
850 {
851 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
852 int err = 0, cont = 1;
853 int level, offset[4], noffset[4];
854 unsigned int nofs = 0;
855 struct f2fs_inode *ri;
856 struct dnode_of_data dn;
857 struct page *page;
858
859 trace_f2fs_truncate_inode_blocks_enter(inode, from);
860
861 level = get_node_path(inode, from, offset, noffset);
862
863 page = get_node_page(sbi, inode->i_ino);
864 if (IS_ERR(page)) {
865 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
866 return PTR_ERR(page);
867 }
868
869 set_new_dnode(&dn, inode, page, NULL, 0);
870 unlock_page(page);
871
872 ri = F2FS_INODE(page);
873 switch (level) {
874 case 0:
875 case 1:
876 nofs = noffset[1];
877 break;
878 case 2:
879 nofs = noffset[1];
880 if (!offset[level - 1])
881 goto skip_partial;
882 err = truncate_partial_nodes(&dn, ri, offset, level);
883 if (err < 0 && err != -ENOENT)
884 goto fail;
885 nofs += 1 + NIDS_PER_BLOCK;
886 break;
887 case 3:
888 nofs = 5 + 2 * NIDS_PER_BLOCK;
889 if (!offset[level - 1])
890 goto skip_partial;
891 err = truncate_partial_nodes(&dn, ri, offset, level);
892 if (err < 0 && err != -ENOENT)
893 goto fail;
894 break;
895 default:
896 BUG();
897 }
898
899 skip_partial:
900 while (cont) {
901 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
902 switch (offset[0]) {
903 case NODE_DIR1_BLOCK:
904 case NODE_DIR2_BLOCK:
905 err = truncate_dnode(&dn);
906 break;
907
908 case NODE_IND1_BLOCK:
909 case NODE_IND2_BLOCK:
910 err = truncate_nodes(&dn, nofs, offset[1], 2);
911 break;
912
913 case NODE_DIND_BLOCK:
914 err = truncate_nodes(&dn, nofs, offset[1], 3);
915 cont = 0;
916 break;
917
918 default:
919 BUG();
920 }
921 if (err < 0 && err != -ENOENT)
922 goto fail;
923 if (offset[1] == 0 &&
924 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
925 lock_page(page);
926 BUG_ON(page->mapping != NODE_MAPPING(sbi));
927 f2fs_wait_on_page_writeback(page, NODE, true);
928 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
929 set_page_dirty(page);
930 unlock_page(page);
931 }
932 offset[1] = 0;
933 offset[0]++;
934 nofs += err;
935 }
936 fail:
937 f2fs_put_page(page, 0);
938 trace_f2fs_truncate_inode_blocks_exit(inode, err);
939 return err > 0 ? 0 : err;
940 }
941
942 int truncate_xattr_node(struct inode *inode, struct page *page)
943 {
944 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
945 nid_t nid = F2FS_I(inode)->i_xattr_nid;
946 struct dnode_of_data dn;
947 struct page *npage;
948
949 if (!nid)
950 return 0;
951
952 npage = get_node_page(sbi, nid);
953 if (IS_ERR(npage))
954 return PTR_ERR(npage);
955
956 F2FS_I(inode)->i_xattr_nid = 0;
957
958 /* need to do checkpoint during fsync */
959 F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
960
961 set_new_dnode(&dn, inode, page, npage, nid);
962
963 if (page)
964 dn.inode_page_locked = true;
965 truncate_node(&dn);
966 return 0;
967 }
968
969 /*
970 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
971 * f2fs_unlock_op().
972 */
973 int remove_inode_page(struct inode *inode)
974 {
975 struct dnode_of_data dn;
976 int err;
977
978 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
979 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE);
980 if (err)
981 return err;
982
983 err = truncate_xattr_node(inode, dn.inode_page);
984 if (err) {
985 f2fs_put_dnode(&dn);
986 return err;
987 }
988
989 /* remove potential inline_data blocks */
990 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
991 S_ISLNK(inode->i_mode))
992 truncate_data_blocks_range(&dn, 1);
993
994 /* 0 is possible, after f2fs_new_inode() has failed */
995 f2fs_bug_on(F2FS_I_SB(inode),
996 inode->i_blocks != 0 && inode->i_blocks != 1);
997
998 /* will put inode & node pages */
999 truncate_node(&dn);
1000 return 0;
1001 }
1002
1003 struct page *new_inode_page(struct inode *inode)
1004 {
1005 struct dnode_of_data dn;
1006
1007 /* allocate inode page for new inode */
1008 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1009
1010 /* caller should f2fs_put_page(page, 1); */
1011 return new_node_page(&dn, 0, NULL);
1012 }
1013
1014 struct page *new_node_page(struct dnode_of_data *dn,
1015 unsigned int ofs, struct page *ipage)
1016 {
1017 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1018 struct node_info old_ni, new_ni;
1019 struct page *page;
1020 int err;
1021
1022 if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
1023 return ERR_PTR(-EPERM);
1024
1025 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1026 if (!page)
1027 return ERR_PTR(-ENOMEM);
1028
1029 if (unlikely(!inc_valid_node_count(sbi, dn->inode))) {
1030 err = -ENOSPC;
1031 goto fail;
1032 }
1033
1034 get_node_info(sbi, dn->nid, &old_ni);
1035
1036 /* Reinitialize old_ni with new node page */
1037 f2fs_bug_on(sbi, old_ni.blk_addr != NULL_ADDR);
1038 new_ni = old_ni;
1039 new_ni.ino = dn->inode->i_ino;
1040 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1041
1042 f2fs_wait_on_page_writeback(page, NODE, true);
1043 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1044 set_cold_node(dn->inode, page);
1045 SetPageUptodate(page);
1046 if (set_page_dirty(page))
1047 dn->node_changed = true;
1048
1049 if (f2fs_has_xattr_block(ofs))
1050 F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
1051
1052 dn->node_page = page;
1053 if (ipage)
1054 update_inode(dn->inode, ipage);
1055 else
1056 sync_inode_page(dn);
1057 if (ofs == 0)
1058 inc_valid_inode_count(sbi);
1059
1060 return page;
1061
1062 fail:
1063 clear_node_page_dirty(page);
1064 f2fs_put_page(page, 1);
1065 return ERR_PTR(err);
1066 }
1067
1068 /*
1069 * Caller should do after getting the following values.
1070 * 0: f2fs_put_page(page, 0)
1071 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1072 */
1073 static int read_node_page(struct page *page, int op_flags)
1074 {
1075 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1076 struct node_info ni;
1077 struct f2fs_io_info fio = {
1078 .sbi = sbi,
1079 .type = NODE,
1080 .op = REQ_OP_READ,
1081 .op_flags = op_flags,
1082 .page = page,
1083 .encrypted_page = NULL,
1084 };
1085
1086 get_node_info(sbi, page->index, &ni);
1087
1088 if (unlikely(ni.blk_addr == NULL_ADDR)) {
1089 ClearPageUptodate(page);
1090 return -ENOENT;
1091 }
1092
1093 if (PageUptodate(page))
1094 return LOCKED_PAGE;
1095
1096 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1097 return f2fs_submit_page_bio(&fio);
1098 }
1099
1100 /*
1101 * Readahead a node page
1102 */
1103 void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1104 {
1105 struct page *apage;
1106 int err;
1107
1108 if (!nid)
1109 return;
1110 f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1111
1112 rcu_read_lock();
1113 apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
1114 rcu_read_unlock();
1115 if (apage)
1116 return;
1117
1118 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1119 if (!apage)
1120 return;
1121
1122 err = read_node_page(apage, REQ_RAHEAD);
1123 f2fs_put_page(apage, err ? 1 : 0);
1124 }
1125
1126 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1127 struct page *parent, int start)
1128 {
1129 struct page *page;
1130 int err;
1131
1132 if (!nid)
1133 return ERR_PTR(-ENOENT);
1134 f2fs_bug_on(sbi, check_nid_range(sbi, nid));
1135 repeat:
1136 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1137 if (!page)
1138 return ERR_PTR(-ENOMEM);
1139
1140 err = read_node_page(page, READ_SYNC);
1141 if (err < 0) {
1142 f2fs_put_page(page, 1);
1143 return ERR_PTR(err);
1144 } else if (err == LOCKED_PAGE) {
1145 goto page_hit;
1146 }
1147
1148 if (parent)
1149 ra_node_pages(parent, start + 1, MAX_RA_NODE);
1150
1151 lock_page(page);
1152
1153 if (unlikely(!PageUptodate(page))) {
1154 f2fs_put_page(page, 1);
1155 return ERR_PTR(-EIO);
1156 }
1157 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1158 f2fs_put_page(page, 1);
1159 goto repeat;
1160 }
1161 page_hit:
1162 f2fs_bug_on(sbi, nid != nid_of_node(page));
1163 return page;
1164 }
1165
1166 struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1167 {
1168 return __get_node_page(sbi, nid, NULL, 0);
1169 }
1170
1171 struct page *get_node_page_ra(struct page *parent, int start)
1172 {
1173 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1174 nid_t nid = get_nid(parent, start, false);
1175
1176 return __get_node_page(sbi, nid, parent, start);
1177 }
1178
1179 void sync_inode_page(struct dnode_of_data *dn)
1180 {
1181 int ret = 0;
1182
1183 if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
1184 ret = update_inode(dn->inode, dn->node_page);
1185 } else if (dn->inode_page) {
1186 if (!dn->inode_page_locked)
1187 lock_page(dn->inode_page);
1188 ret = update_inode(dn->inode, dn->inode_page);
1189 if (!dn->inode_page_locked)
1190 unlock_page(dn->inode_page);
1191 } else {
1192 ret = update_inode_page(dn->inode);
1193 }
1194 dn->node_changed = ret ? true: false;
1195 }
1196
1197 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1198 {
1199 struct inode *inode;
1200 struct page *page;
1201 int ret;
1202
1203 /* should flush inline_data before evict_inode */
1204 inode = ilookup(sbi->sb, ino);
1205 if (!inode)
1206 return;
1207
1208 page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0);
1209 if (!page)
1210 goto iput_out;
1211
1212 if (!PageUptodate(page))
1213 goto page_out;
1214
1215 if (!PageDirty(page))
1216 goto page_out;
1217
1218 if (!clear_page_dirty_for_io(page))
1219 goto page_out;
1220
1221 ret = f2fs_write_inline_data(inode, page);
1222 inode_dec_dirty_pages(inode);
1223 if (ret)
1224 set_page_dirty(page);
1225 page_out:
1226 f2fs_put_page(page, 1);
1227 iput_out:
1228 iput(inode);
1229 }
1230
1231 void move_node_page(struct page *node_page, int gc_type)
1232 {
1233 if (gc_type == FG_GC) {
1234 struct f2fs_sb_info *sbi = F2FS_P_SB(node_page);
1235 struct writeback_control wbc = {
1236 .sync_mode = WB_SYNC_ALL,
1237 .nr_to_write = 1,
1238 .for_reclaim = 0,
1239 };
1240
1241 set_page_dirty(node_page);
1242 f2fs_wait_on_page_writeback(node_page, NODE, true);
1243
1244 f2fs_bug_on(sbi, PageWriteback(node_page));
1245 if (!clear_page_dirty_for_io(node_page))
1246 goto out_page;
1247
1248 if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc))
1249 unlock_page(node_page);
1250 goto release_page;
1251 } else {
1252 /* set page dirty and write it */
1253 if (!PageWriteback(node_page))
1254 set_page_dirty(node_page);
1255 }
1256 out_page:
1257 unlock_page(node_page);
1258 release_page:
1259 f2fs_put_page(node_page, 0);
1260 }
1261
1262 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1263 {
1264 pgoff_t index, end;
1265 struct pagevec pvec;
1266 struct page *last_page = NULL;
1267
1268 pagevec_init(&pvec, 0);
1269 index = 0;
1270 end = ULONG_MAX;
1271
1272 while (index <= end) {
1273 int i, nr_pages;
1274 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1275 PAGECACHE_TAG_DIRTY,
1276 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1277 if (nr_pages == 0)
1278 break;
1279
1280 for (i = 0; i < nr_pages; i++) {
1281 struct page *page = pvec.pages[i];
1282
1283 if (unlikely(f2fs_cp_error(sbi))) {
1284 f2fs_put_page(last_page, 0);
1285 pagevec_release(&pvec);
1286 return ERR_PTR(-EIO);
1287 }
1288
1289 if (!IS_DNODE(page) || !is_cold_node(page))
1290 continue;
1291 if (ino_of_node(page) != ino)
1292 continue;
1293
1294 lock_page(page);
1295
1296 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1297 continue_unlock:
1298 unlock_page(page);
1299 continue;
1300 }
1301 if (ino_of_node(page) != ino)
1302 goto continue_unlock;
1303
1304 if (!PageDirty(page)) {
1305 /* someone wrote it for us */
1306 goto continue_unlock;
1307 }
1308
1309 if (last_page)
1310 f2fs_put_page(last_page, 0);
1311
1312 get_page(page);
1313 last_page = page;
1314 unlock_page(page);
1315 }
1316 pagevec_release(&pvec);
1317 cond_resched();
1318 }
1319 return last_page;
1320 }
1321
1322 int fsync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
1323 struct writeback_control *wbc, bool atomic)
1324 {
1325 pgoff_t index, end;
1326 struct pagevec pvec;
1327 int ret = 0;
1328 struct page *last_page = NULL;
1329 bool marked = false;
1330
1331 if (atomic) {
1332 last_page = last_fsync_dnode(sbi, ino);
1333 if (IS_ERR_OR_NULL(last_page))
1334 return PTR_ERR_OR_ZERO(last_page);
1335 }
1336 retry:
1337 pagevec_init(&pvec, 0);
1338 index = 0;
1339 end = ULONG_MAX;
1340
1341 while (index <= end) {
1342 int i, nr_pages;
1343 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1344 PAGECACHE_TAG_DIRTY,
1345 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1346 if (nr_pages == 0)
1347 break;
1348
1349 for (i = 0; i < nr_pages; i++) {
1350 struct page *page = pvec.pages[i];
1351
1352 if (unlikely(f2fs_cp_error(sbi))) {
1353 f2fs_put_page(last_page, 0);
1354 pagevec_release(&pvec);
1355 return -EIO;
1356 }
1357
1358 if (!IS_DNODE(page) || !is_cold_node(page))
1359 continue;
1360 if (ino_of_node(page) != ino)
1361 continue;
1362
1363 lock_page(page);
1364
1365 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1366 continue_unlock:
1367 unlock_page(page);
1368 continue;
1369 }
1370 if (ino_of_node(page) != ino)
1371 goto continue_unlock;
1372
1373 if (!PageDirty(page) && page != last_page) {
1374 /* someone wrote it for us */
1375 goto continue_unlock;
1376 }
1377
1378 f2fs_wait_on_page_writeback(page, NODE, true);
1379 BUG_ON(PageWriteback(page));
1380
1381 if (!atomic || page == last_page) {
1382 set_fsync_mark(page, 1);
1383 if (IS_INODE(page))
1384 set_dentry_mark(page,
1385 need_dentry_mark(sbi, ino));
1386 /* may be written by other thread */
1387 if (!PageDirty(page))
1388 set_page_dirty(page);
1389 }
1390
1391 if (!clear_page_dirty_for_io(page))
1392 goto continue_unlock;
1393
1394 ret = NODE_MAPPING(sbi)->a_ops->writepage(page, wbc);
1395 if (ret) {
1396 unlock_page(page);
1397 f2fs_put_page(last_page, 0);
1398 break;
1399 }
1400 if (page == last_page) {
1401 f2fs_put_page(page, 0);
1402 marked = true;
1403 break;
1404 }
1405 }
1406 pagevec_release(&pvec);
1407 cond_resched();
1408
1409 if (ret || marked)
1410 break;
1411 }
1412 if (!ret && atomic && !marked) {
1413 f2fs_msg(sbi->sb, KERN_DEBUG,
1414 "Retry to write fsync mark: ino=%u, idx=%lx",
1415 ino, last_page->index);
1416 lock_page(last_page);
1417 set_page_dirty(last_page);
1418 unlock_page(last_page);
1419 goto retry;
1420 }
1421 return ret ? -EIO: 0;
1422 }
1423
1424 int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc)
1425 {
1426 pgoff_t index, end;
1427 struct pagevec pvec;
1428 int step = 0;
1429 int nwritten = 0;
1430
1431 pagevec_init(&pvec, 0);
1432
1433 next_step:
1434 index = 0;
1435 end = ULONG_MAX;
1436
1437 while (index <= end) {
1438 int i, nr_pages;
1439 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1440 PAGECACHE_TAG_DIRTY,
1441 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1442 if (nr_pages == 0)
1443 break;
1444
1445 for (i = 0; i < nr_pages; i++) {
1446 struct page *page = pvec.pages[i];
1447
1448 if (unlikely(f2fs_cp_error(sbi))) {
1449 pagevec_release(&pvec);
1450 return -EIO;
1451 }
1452
1453 /*
1454 * flushing sequence with step:
1455 * 0. indirect nodes
1456 * 1. dentry dnodes
1457 * 2. file dnodes
1458 */
1459 if (step == 0 && IS_DNODE(page))
1460 continue;
1461 if (step == 1 && (!IS_DNODE(page) ||
1462 is_cold_node(page)))
1463 continue;
1464 if (step == 2 && (!IS_DNODE(page) ||
1465 !is_cold_node(page)))
1466 continue;
1467 lock_node:
1468 if (!trylock_page(page))
1469 continue;
1470
1471 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1472 continue_unlock:
1473 unlock_page(page);
1474 continue;
1475 }
1476
1477 if (!PageDirty(page)) {
1478 /* someone wrote it for us */
1479 goto continue_unlock;
1480 }
1481
1482 /* flush inline_data */
1483 if (is_inline_node(page)) {
1484 clear_inline_node(page);
1485 unlock_page(page);
1486 flush_inline_data(sbi, ino_of_node(page));
1487 goto lock_node;
1488 }
1489
1490 f2fs_wait_on_page_writeback(page, NODE, true);
1491
1492 BUG_ON(PageWriteback(page));
1493 if (!clear_page_dirty_for_io(page))
1494 goto continue_unlock;
1495
1496 set_fsync_mark(page, 0);
1497 set_dentry_mark(page, 0);
1498
1499 if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc))
1500 unlock_page(page);
1501
1502 if (--wbc->nr_to_write == 0)
1503 break;
1504 }
1505 pagevec_release(&pvec);
1506 cond_resched();
1507
1508 if (wbc->nr_to_write == 0) {
1509 step = 2;
1510 break;
1511 }
1512 }
1513
1514 if (step < 2) {
1515 step++;
1516 goto next_step;
1517 }
1518 return nwritten;
1519 }
1520
1521 int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1522 {
1523 pgoff_t index = 0, end = ULONG_MAX;
1524 struct pagevec pvec;
1525 int ret2 = 0, ret = 0;
1526
1527 pagevec_init(&pvec, 0);
1528
1529 while (index <= end) {
1530 int i, nr_pages;
1531 nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1532 PAGECACHE_TAG_WRITEBACK,
1533 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1534 if (nr_pages == 0)
1535 break;
1536
1537 for (i = 0; i < nr_pages; i++) {
1538 struct page *page = pvec.pages[i];
1539
1540 /* until radix tree lookup accepts end_index */
1541 if (unlikely(page->index > end))
1542 continue;
1543
1544 if (ino && ino_of_node(page) == ino) {
1545 f2fs_wait_on_page_writeback(page, NODE, true);
1546 if (TestClearPageError(page))
1547 ret = -EIO;
1548 }
1549 }
1550 pagevec_release(&pvec);
1551 cond_resched();
1552 }
1553
1554 if (unlikely(test_and_clear_bit(AS_ENOSPC, &NODE_MAPPING(sbi)->flags)))
1555 ret2 = -ENOSPC;
1556 if (unlikely(test_and_clear_bit(AS_EIO, &NODE_MAPPING(sbi)->flags)))
1557 ret2 = -EIO;
1558 if (!ret)
1559 ret = ret2;
1560 return ret;
1561 }
1562
1563 static int f2fs_write_node_page(struct page *page,
1564 struct writeback_control *wbc)
1565 {
1566 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1567 nid_t nid;
1568 struct node_info ni;
1569 struct f2fs_io_info fio = {
1570 .sbi = sbi,
1571 .type = NODE,
1572 .op = REQ_OP_WRITE,
1573 .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
1574 .page = page,
1575 .encrypted_page = NULL,
1576 };
1577
1578 trace_f2fs_writepage(page, NODE);
1579
1580 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1581 goto redirty_out;
1582 if (unlikely(f2fs_cp_error(sbi)))
1583 goto redirty_out;
1584
1585 /* get old block addr of this node page */
1586 nid = nid_of_node(page);
1587 f2fs_bug_on(sbi, page->index != nid);
1588
1589 if (wbc->for_reclaim) {
1590 if (!down_read_trylock(&sbi->node_write))
1591 goto redirty_out;
1592 } else {
1593 down_read(&sbi->node_write);
1594 }
1595
1596 get_node_info(sbi, nid, &ni);
1597
1598 /* This page is already truncated */
1599 if (unlikely(ni.blk_addr == NULL_ADDR)) {
1600 ClearPageUptodate(page);
1601 dec_page_count(sbi, F2FS_DIRTY_NODES);
1602 up_read(&sbi->node_write);
1603 unlock_page(page);
1604 return 0;
1605 }
1606
1607 set_page_writeback(page);
1608 fio.old_blkaddr = ni.blk_addr;
1609 write_node_page(nid, &fio);
1610 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1611 dec_page_count(sbi, F2FS_DIRTY_NODES);
1612 up_read(&sbi->node_write);
1613
1614 if (wbc->for_reclaim)
1615 f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, NODE, WRITE);
1616
1617 unlock_page(page);
1618
1619 if (unlikely(f2fs_cp_error(sbi)))
1620 f2fs_submit_merged_bio(sbi, NODE, WRITE);
1621
1622 return 0;
1623
1624 redirty_out:
1625 redirty_page_for_writepage(wbc, page);
1626 return AOP_WRITEPAGE_ACTIVATE;
1627 }
1628
1629 static int f2fs_write_node_pages(struct address_space *mapping,
1630 struct writeback_control *wbc)
1631 {
1632 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1633 long diff;
1634
1635 /* balancing f2fs's metadata in background */
1636 f2fs_balance_fs_bg(sbi);
1637
1638 /* collect a number of dirty node pages and write together */
1639 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1640 goto skip_write;
1641
1642 trace_f2fs_writepages(mapping->host, wbc, NODE);
1643
1644 diff = nr_pages_to_write(sbi, NODE, wbc);
1645 wbc->sync_mode = WB_SYNC_NONE;
1646 sync_node_pages(sbi, wbc);
1647 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1648 return 0;
1649
1650 skip_write:
1651 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1652 trace_f2fs_writepages(mapping->host, wbc, NODE);
1653 return 0;
1654 }
1655
1656 static int f2fs_set_node_page_dirty(struct page *page)
1657 {
1658 trace_f2fs_set_page_dirty(page, NODE);
1659
1660 SetPageUptodate(page);
1661 if (!PageDirty(page)) {
1662 __set_page_dirty_nobuffers(page);
1663 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
1664 SetPagePrivate(page);
1665 f2fs_trace_pid(page);
1666 return 1;
1667 }
1668 return 0;
1669 }
1670
1671 /*
1672 * Structure of the f2fs node operations
1673 */
1674 const struct address_space_operations f2fs_node_aops = {
1675 .writepage = f2fs_write_node_page,
1676 .writepages = f2fs_write_node_pages,
1677 .set_page_dirty = f2fs_set_node_page_dirty,
1678 .invalidatepage = f2fs_invalidate_page,
1679 .releasepage = f2fs_release_page,
1680 };
1681
1682 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1683 nid_t n)
1684 {
1685 return radix_tree_lookup(&nm_i->free_nid_root, n);
1686 }
1687
1688 static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
1689 struct free_nid *i)
1690 {
1691 list_del(&i->list);
1692 radix_tree_delete(&nm_i->free_nid_root, i->nid);
1693 }
1694
1695 static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
1696 {
1697 struct f2fs_nm_info *nm_i = NM_I(sbi);
1698 struct free_nid *i;
1699 struct nat_entry *ne;
1700
1701 if (!available_free_memory(sbi, FREE_NIDS))
1702 return -1;
1703
1704 /* 0 nid should not be used */
1705 if (unlikely(nid == 0))
1706 return 0;
1707
1708 if (build) {
1709 /* do not add allocated nids */
1710 ne = __lookup_nat_cache(nm_i, nid);
1711 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
1712 nat_get_blkaddr(ne) != NULL_ADDR))
1713 return 0;
1714 }
1715
1716 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1717 i->nid = nid;
1718 i->state = NID_NEW;
1719
1720 if (radix_tree_preload(GFP_NOFS)) {
1721 kmem_cache_free(free_nid_slab, i);
1722 return 0;
1723 }
1724
1725 spin_lock(&nm_i->free_nid_list_lock);
1726 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
1727 spin_unlock(&nm_i->free_nid_list_lock);
1728 radix_tree_preload_end();
1729 kmem_cache_free(free_nid_slab, i);
1730 return 0;
1731 }
1732 list_add_tail(&i->list, &nm_i->free_nid_list);
1733 nm_i->fcnt++;
1734 spin_unlock(&nm_i->free_nid_list_lock);
1735 radix_tree_preload_end();
1736 return 1;
1737 }
1738
1739 static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1740 {
1741 struct free_nid *i;
1742 bool need_free = false;
1743
1744 spin_lock(&nm_i->free_nid_list_lock);
1745 i = __lookup_free_nid_list(nm_i, nid);
1746 if (i && i->state == NID_NEW) {
1747 __del_from_free_nid_list(nm_i, i);
1748 nm_i->fcnt--;
1749 need_free = true;
1750 }
1751 spin_unlock(&nm_i->free_nid_list_lock);
1752
1753 if (need_free)
1754 kmem_cache_free(free_nid_slab, i);
1755 }
1756
1757 static void scan_nat_page(struct f2fs_sb_info *sbi,
1758 struct page *nat_page, nid_t start_nid)
1759 {
1760 struct f2fs_nm_info *nm_i = NM_I(sbi);
1761 struct f2fs_nat_block *nat_blk = page_address(nat_page);
1762 block_t blk_addr;
1763 int i;
1764
1765 i = start_nid % NAT_ENTRY_PER_BLOCK;
1766
1767 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1768
1769 if (unlikely(start_nid >= nm_i->max_nid))
1770 break;
1771
1772 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1773 f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
1774 if (blk_addr == NULL_ADDR) {
1775 if (add_free_nid(sbi, start_nid, true) < 0)
1776 break;
1777 }
1778 }
1779 }
1780
1781 static void build_free_nids(struct f2fs_sb_info *sbi)
1782 {
1783 struct f2fs_nm_info *nm_i = NM_I(sbi);
1784 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1785 struct f2fs_journal *journal = curseg->journal;
1786 int i = 0;
1787 nid_t nid = nm_i->next_scan_nid;
1788
1789 /* Enough entries */
1790 if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
1791 return;
1792
1793 /* readahead nat pages to be scanned */
1794 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
1795 META_NAT, true);
1796
1797 down_read(&nm_i->nat_tree_lock);
1798
1799 while (1) {
1800 struct page *page = get_current_nat_page(sbi, nid);
1801
1802 scan_nat_page(sbi, page, nid);
1803 f2fs_put_page(page, 1);
1804
1805 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1806 if (unlikely(nid >= nm_i->max_nid))
1807 nid = 0;
1808
1809 if (++i >= FREE_NID_PAGES)
1810 break;
1811 }
1812
1813 /* go to the next free nat pages to find free nids abundantly */
1814 nm_i->next_scan_nid = nid;
1815
1816 /* find free nids from current sum_pages */
1817 down_read(&curseg->journal_rwsem);
1818 for (i = 0; i < nats_in_cursum(journal); i++) {
1819 block_t addr;
1820
1821 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
1822 nid = le32_to_cpu(nid_in_journal(journal, i));
1823 if (addr == NULL_ADDR)
1824 add_free_nid(sbi, nid, true);
1825 else
1826 remove_free_nid(nm_i, nid);
1827 }
1828 up_read(&curseg->journal_rwsem);
1829 up_read(&nm_i->nat_tree_lock);
1830
1831 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
1832 nm_i->ra_nid_pages, META_NAT, false);
1833 }
1834
1835 /*
1836 * If this function returns success, caller can obtain a new nid
1837 * from second parameter of this function.
1838 * The returned nid could be used ino as well as nid when inode is created.
1839 */
1840 bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1841 {
1842 struct f2fs_nm_info *nm_i = NM_I(sbi);
1843 struct free_nid *i = NULL;
1844 retry:
1845 #ifdef CONFIG_F2FS_FAULT_INJECTION
1846 if (time_to_inject(FAULT_ALLOC_NID))
1847 return false;
1848 #endif
1849 if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids))
1850 return false;
1851
1852 spin_lock(&nm_i->free_nid_list_lock);
1853
1854 /* We should not use stale free nids created by build_free_nids */
1855 if (nm_i->fcnt && !on_build_free_nids(nm_i)) {
1856 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
1857 list_for_each_entry(i, &nm_i->free_nid_list, list)
1858 if (i->state == NID_NEW)
1859 break;
1860
1861 f2fs_bug_on(sbi, i->state != NID_NEW);
1862 *nid = i->nid;
1863 i->state = NID_ALLOC;
1864 nm_i->fcnt--;
1865 spin_unlock(&nm_i->free_nid_list_lock);
1866 return true;
1867 }
1868 spin_unlock(&nm_i->free_nid_list_lock);
1869
1870 /* Let's scan nat pages and its caches to get free nids */
1871 mutex_lock(&nm_i->build_lock);
1872 build_free_nids(sbi);
1873 mutex_unlock(&nm_i->build_lock);
1874 goto retry;
1875 }
1876
1877 /*
1878 * alloc_nid() should be called prior to this function.
1879 */
1880 void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
1881 {
1882 struct f2fs_nm_info *nm_i = NM_I(sbi);
1883 struct free_nid *i;
1884
1885 spin_lock(&nm_i->free_nid_list_lock);
1886 i = __lookup_free_nid_list(nm_i, nid);
1887 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
1888 __del_from_free_nid_list(nm_i, i);
1889 spin_unlock(&nm_i->free_nid_list_lock);
1890
1891 kmem_cache_free(free_nid_slab, i);
1892 }
1893
1894 /*
1895 * alloc_nid() should be called prior to this function.
1896 */
1897 void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
1898 {
1899 struct f2fs_nm_info *nm_i = NM_I(sbi);
1900 struct free_nid *i;
1901 bool need_free = false;
1902
1903 if (!nid)
1904 return;
1905
1906 spin_lock(&nm_i->free_nid_list_lock);
1907 i = __lookup_free_nid_list(nm_i, nid);
1908 f2fs_bug_on(sbi, !i || i->state != NID_ALLOC);
1909 if (!available_free_memory(sbi, FREE_NIDS)) {
1910 __del_from_free_nid_list(nm_i, i);
1911 need_free = true;
1912 } else {
1913 i->state = NID_NEW;
1914 nm_i->fcnt++;
1915 }
1916 spin_unlock(&nm_i->free_nid_list_lock);
1917
1918 if (need_free)
1919 kmem_cache_free(free_nid_slab, i);
1920 }
1921
1922 int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
1923 {
1924 struct f2fs_nm_info *nm_i = NM_I(sbi);
1925 struct free_nid *i, *next;
1926 int nr = nr_shrink;
1927
1928 if (!mutex_trylock(&nm_i->build_lock))
1929 return 0;
1930
1931 spin_lock(&nm_i->free_nid_list_lock);
1932 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
1933 if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
1934 break;
1935 if (i->state == NID_ALLOC)
1936 continue;
1937 __del_from_free_nid_list(nm_i, i);
1938 kmem_cache_free(free_nid_slab, i);
1939 nm_i->fcnt--;
1940 nr_shrink--;
1941 }
1942 spin_unlock(&nm_i->free_nid_list_lock);
1943 mutex_unlock(&nm_i->build_lock);
1944
1945 return nr - nr_shrink;
1946 }
1947
1948 void recover_inline_xattr(struct inode *inode, struct page *page)
1949 {
1950 void *src_addr, *dst_addr;
1951 size_t inline_size;
1952 struct page *ipage;
1953 struct f2fs_inode *ri;
1954
1955 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
1956 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
1957
1958 ri = F2FS_INODE(page);
1959 if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
1960 clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR);
1961 goto update_inode;
1962 }
1963
1964 dst_addr = inline_xattr_addr(ipage);
1965 src_addr = inline_xattr_addr(page);
1966 inline_size = inline_xattr_size(inode);
1967
1968 f2fs_wait_on_page_writeback(ipage, NODE, true);
1969 memcpy(dst_addr, src_addr, inline_size);
1970 update_inode:
1971 update_inode(inode, ipage);
1972 f2fs_put_page(ipage, 1);
1973 }
1974
1975 void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr)
1976 {
1977 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1978 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
1979 nid_t new_xnid = nid_of_node(page);
1980 struct node_info ni;
1981
1982 /* 1: invalidate the previous xattr nid */
1983 if (!prev_xnid)
1984 goto recover_xnid;
1985
1986 /* Deallocate node address */
1987 get_node_info(sbi, prev_xnid, &ni);
1988 f2fs_bug_on(sbi, ni.blk_addr == NULL_ADDR);
1989 invalidate_blocks(sbi, ni.blk_addr);
1990 dec_valid_node_count(sbi, inode);
1991 set_node_addr(sbi, &ni, NULL_ADDR, false);
1992
1993 recover_xnid:
1994 /* 2: allocate new xattr nid */
1995 if (unlikely(!inc_valid_node_count(sbi, inode)))
1996 f2fs_bug_on(sbi, 1);
1997
1998 remove_free_nid(NM_I(sbi), new_xnid);
1999 get_node_info(sbi, new_xnid, &ni);
2000 ni.ino = inode->i_ino;
2001 set_node_addr(sbi, &ni, NEW_ADDR, false);
2002 F2FS_I(inode)->i_xattr_nid = new_xnid;
2003
2004 /* 3: update xattr blkaddr */
2005 refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
2006 set_node_addr(sbi, &ni, blkaddr, false);
2007
2008 update_inode_page(inode);
2009 }
2010
2011 int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2012 {
2013 struct f2fs_inode *src, *dst;
2014 nid_t ino = ino_of_node(page);
2015 struct node_info old_ni, new_ni;
2016 struct page *ipage;
2017
2018 get_node_info(sbi, ino, &old_ni);
2019
2020 if (unlikely(old_ni.blk_addr != NULL_ADDR))
2021 return -EINVAL;
2022
2023 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2024 if (!ipage)
2025 return -ENOMEM;
2026
2027 /* Should not use this inode from free nid list */
2028 remove_free_nid(NM_I(sbi), ino);
2029
2030 SetPageUptodate(ipage);
2031 fill_node_footer(ipage, ino, ino, 0, true);
2032
2033 src = F2FS_INODE(page);
2034 dst = F2FS_INODE(ipage);
2035
2036 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
2037 dst->i_size = 0;
2038 dst->i_blocks = cpu_to_le64(1);
2039 dst->i_links = cpu_to_le32(1);
2040 dst->i_xattr_nid = 0;
2041 dst->i_inline = src->i_inline & F2FS_INLINE_XATTR;
2042
2043 new_ni = old_ni;
2044 new_ni.ino = ino;
2045
2046 if (unlikely(!inc_valid_node_count(sbi, NULL)))
2047 WARN_ON(1);
2048 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2049 inc_valid_inode_count(sbi);
2050 set_page_dirty(ipage);
2051 f2fs_put_page(ipage, 1);
2052 return 0;
2053 }
2054
2055 int restore_node_summary(struct f2fs_sb_info *sbi,
2056 unsigned int segno, struct f2fs_summary_block *sum)
2057 {
2058 struct f2fs_node *rn;
2059 struct f2fs_summary *sum_entry;
2060 block_t addr;
2061 int bio_blocks = MAX_BIO_BLOCKS(sbi);
2062 int i, idx, last_offset, nrpages;
2063
2064 /* scan the node segment */
2065 last_offset = sbi->blocks_per_seg;
2066 addr = START_BLOCK(sbi, segno);
2067 sum_entry = &sum->entries[0];
2068
2069 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2070 nrpages = min(last_offset - i, bio_blocks);
2071
2072 /* readahead node pages */
2073 ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2074
2075 for (idx = addr; idx < addr + nrpages; idx++) {
2076 struct page *page = get_tmp_page(sbi, idx);
2077
2078 rn = F2FS_NODE(page);
2079 sum_entry->nid = rn->footer.nid;
2080 sum_entry->version = 0;
2081 sum_entry->ofs_in_node = 0;
2082 sum_entry++;
2083 f2fs_put_page(page, 1);
2084 }
2085
2086 invalidate_mapping_pages(META_MAPPING(sbi), addr,
2087 addr + nrpages);
2088 }
2089 return 0;
2090 }
2091
2092 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2093 {
2094 struct f2fs_nm_info *nm_i = NM_I(sbi);
2095 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2096 struct f2fs_journal *journal = curseg->journal;
2097 int i;
2098
2099 down_write(&curseg->journal_rwsem);
2100 for (i = 0; i < nats_in_cursum(journal); i++) {
2101 struct nat_entry *ne;
2102 struct f2fs_nat_entry raw_ne;
2103 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2104
2105 raw_ne = nat_in_journal(journal, i);
2106
2107 ne = __lookup_nat_cache(nm_i, nid);
2108 if (!ne) {
2109 ne = grab_nat_entry(nm_i, nid);
2110 node_info_from_raw_nat(&ne->ni, &raw_ne);
2111 }
2112 __set_nat_cache_dirty(nm_i, ne);
2113 }
2114 update_nats_in_cursum(journal, -i);
2115 up_write(&curseg->journal_rwsem);
2116 }
2117
2118 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2119 struct list_head *head, int max)
2120 {
2121 struct nat_entry_set *cur;
2122
2123 if (nes->entry_cnt >= max)
2124 goto add_out;
2125
2126 list_for_each_entry(cur, head, set_list) {
2127 if (cur->entry_cnt >= nes->entry_cnt) {
2128 list_add(&nes->set_list, cur->set_list.prev);
2129 return;
2130 }
2131 }
2132 add_out:
2133 list_add_tail(&nes->set_list, head);
2134 }
2135
2136 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2137 struct nat_entry_set *set)
2138 {
2139 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2140 struct f2fs_journal *journal = curseg->journal;
2141 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2142 bool to_journal = true;
2143 struct f2fs_nat_block *nat_blk;
2144 struct nat_entry *ne, *cur;
2145 struct page *page = NULL;
2146
2147 /*
2148 * there are two steps to flush nat entries:
2149 * #1, flush nat entries to journal in current hot data summary block.
2150 * #2, flush nat entries to nat page.
2151 */
2152 if (!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2153 to_journal = false;
2154
2155 if (to_journal) {
2156 down_write(&curseg->journal_rwsem);
2157 } else {
2158 page = get_next_nat_page(sbi, start_nid);
2159 nat_blk = page_address(page);
2160 f2fs_bug_on(sbi, !nat_blk);
2161 }
2162
2163 /* flush dirty nats in nat entry set */
2164 list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2165 struct f2fs_nat_entry *raw_ne;
2166 nid_t nid = nat_get_nid(ne);
2167 int offset;
2168
2169 if (nat_get_blkaddr(ne) == NEW_ADDR)
2170 continue;
2171
2172 if (to_journal) {
2173 offset = lookup_journal_in_cursum(journal,
2174 NAT_JOURNAL, nid, 1);
2175 f2fs_bug_on(sbi, offset < 0);
2176 raw_ne = &nat_in_journal(journal, offset);
2177 nid_in_journal(journal, offset) = cpu_to_le32(nid);
2178 } else {
2179 raw_ne = &nat_blk->entries[nid - start_nid];
2180 }
2181 raw_nat_from_node_info(raw_ne, &ne->ni);
2182 nat_reset_flag(ne);
2183 __clear_nat_cache_dirty(NM_I(sbi), ne);
2184 if (nat_get_blkaddr(ne) == NULL_ADDR)
2185 add_free_nid(sbi, nid, false);
2186 }
2187
2188 if (to_journal)
2189 up_write(&curseg->journal_rwsem);
2190 else
2191 f2fs_put_page(page, 1);
2192
2193 f2fs_bug_on(sbi, set->entry_cnt);
2194
2195 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2196 kmem_cache_free(nat_entry_set_slab, set);
2197 }
2198
2199 /*
2200 * This function is called during the checkpointing process.
2201 */
2202 void flush_nat_entries(struct f2fs_sb_info *sbi)
2203 {
2204 struct f2fs_nm_info *nm_i = NM_I(sbi);
2205 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2206 struct f2fs_journal *journal = curseg->journal;
2207 struct nat_entry_set *setvec[SETVEC_SIZE];
2208 struct nat_entry_set *set, *tmp;
2209 unsigned int found;
2210 nid_t set_idx = 0;
2211 LIST_HEAD(sets);
2212
2213 if (!nm_i->dirty_nat_cnt)
2214 return;
2215
2216 down_write(&nm_i->nat_tree_lock);
2217
2218 /*
2219 * if there are no enough space in journal to store dirty nat
2220 * entries, remove all entries from journal and merge them
2221 * into nat entry set.
2222 */
2223 if (!__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
2224 remove_nats_in_journal(sbi);
2225
2226 while ((found = __gang_lookup_nat_set(nm_i,
2227 set_idx, SETVEC_SIZE, setvec))) {
2228 unsigned idx;
2229 set_idx = setvec[found - 1]->set + 1;
2230 for (idx = 0; idx < found; idx++)
2231 __adjust_nat_entry_set(setvec[idx], &sets,
2232 MAX_NAT_JENTRIES(journal));
2233 }
2234
2235 /* flush dirty nats in nat entry set */
2236 list_for_each_entry_safe(set, tmp, &sets, set_list)
2237 __flush_nat_entry_set(sbi, set);
2238
2239 up_write(&nm_i->nat_tree_lock);
2240
2241 f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
2242 }
2243
2244 static int init_node_manager(struct f2fs_sb_info *sbi)
2245 {
2246 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
2247 struct f2fs_nm_info *nm_i = NM_I(sbi);
2248 unsigned char *version_bitmap;
2249 unsigned int nat_segs, nat_blocks;
2250
2251 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
2252
2253 /* segment_count_nat includes pair segment so divide to 2. */
2254 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
2255 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
2256
2257 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
2258
2259 /* not used nids: 0, node, meta, (and root counted as valid node) */
2260 nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM;
2261 nm_i->fcnt = 0;
2262 nm_i->nat_cnt = 0;
2263 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
2264 nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
2265 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
2266
2267 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
2268 INIT_LIST_HEAD(&nm_i->free_nid_list);
2269 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
2270 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
2271 INIT_LIST_HEAD(&nm_i->nat_entries);
2272
2273 mutex_init(&nm_i->build_lock);
2274 spin_lock_init(&nm_i->free_nid_list_lock);
2275 init_rwsem(&nm_i->nat_tree_lock);
2276
2277 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
2278 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
2279 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
2280 if (!version_bitmap)
2281 return -EFAULT;
2282
2283 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
2284 GFP_KERNEL);
2285 if (!nm_i->nat_bitmap)
2286 return -ENOMEM;
2287 return 0;
2288 }
2289
2290 int build_node_manager(struct f2fs_sb_info *sbi)
2291 {
2292 int err;
2293
2294 sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL);
2295 if (!sbi->nm_info)
2296 return -ENOMEM;
2297
2298 err = init_node_manager(sbi);
2299 if (err)
2300 return err;
2301
2302 build_free_nids(sbi);
2303 return 0;
2304 }
2305
2306 void destroy_node_manager(struct f2fs_sb_info *sbi)
2307 {
2308 struct f2fs_nm_info *nm_i = NM_I(sbi);
2309 struct free_nid *i, *next_i;
2310 struct nat_entry *natvec[NATVEC_SIZE];
2311 struct nat_entry_set *setvec[SETVEC_SIZE];
2312 nid_t nid = 0;
2313 unsigned int found;
2314
2315 if (!nm_i)
2316 return;
2317
2318 /* destroy free nid list */
2319 spin_lock(&nm_i->free_nid_list_lock);
2320 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
2321 f2fs_bug_on(sbi, i->state == NID_ALLOC);
2322 __del_from_free_nid_list(nm_i, i);
2323 nm_i->fcnt--;
2324 spin_unlock(&nm_i->free_nid_list_lock);
2325 kmem_cache_free(free_nid_slab, i);
2326 spin_lock(&nm_i->free_nid_list_lock);
2327 }
2328 f2fs_bug_on(sbi, nm_i->fcnt);
2329 spin_unlock(&nm_i->free_nid_list_lock);
2330
2331 /* destroy nat cache */
2332 down_write(&nm_i->nat_tree_lock);
2333 while ((found = __gang_lookup_nat_cache(nm_i,
2334 nid, NATVEC_SIZE, natvec))) {
2335 unsigned idx;
2336
2337 nid = nat_get_nid(natvec[found - 1]) + 1;
2338 for (idx = 0; idx < found; idx++)
2339 __del_from_nat_cache(nm_i, natvec[idx]);
2340 }
2341 f2fs_bug_on(sbi, nm_i->nat_cnt);
2342
2343 /* destroy nat set cache */
2344 nid = 0;
2345 while ((found = __gang_lookup_nat_set(nm_i,
2346 nid, SETVEC_SIZE, setvec))) {
2347 unsigned idx;
2348
2349 nid = setvec[found - 1]->set + 1;
2350 for (idx = 0; idx < found; idx++) {
2351 /* entry_cnt is not zero, when cp_error was occurred */
2352 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
2353 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
2354 kmem_cache_free(nat_entry_set_slab, setvec[idx]);
2355 }
2356 }
2357 up_write(&nm_i->nat_tree_lock);
2358
2359 kfree(nm_i->nat_bitmap);
2360 sbi->nm_info = NULL;
2361 kfree(nm_i);
2362 }
2363
2364 int __init create_node_manager_caches(void)
2365 {
2366 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
2367 sizeof(struct nat_entry));
2368 if (!nat_entry_slab)
2369 goto fail;
2370
2371 free_nid_slab = f2fs_kmem_cache_create("free_nid",
2372 sizeof(struct free_nid));
2373 if (!free_nid_slab)
2374 goto destroy_nat_entry;
2375
2376 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
2377 sizeof(struct nat_entry_set));
2378 if (!nat_entry_set_slab)
2379 goto destroy_free_nid;
2380 return 0;
2381
2382 destroy_free_nid:
2383 kmem_cache_destroy(free_nid_slab);
2384 destroy_nat_entry:
2385 kmem_cache_destroy(nat_entry_slab);
2386 fail:
2387 return -ENOMEM;
2388 }
2389
2390 void destroy_node_manager_caches(void)
2391 {
2392 kmem_cache_destroy(nat_entry_set_slab);
2393 kmem_cache_destroy(free_nid_slab);
2394 kmem_cache_destroy(nat_entry_slab);
2395 }
This page took 0.078732 seconds and 6 git commands to generate.