1 /*
2 * fs/f2fs/node.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
18
19 #include "f2fs.h"
20 #include "node.h"
21 #include "segment.h"
22 #include "xattr.h"
23 #include "trace.h"
24 #include <trace/events/f2fs.h>
25
26 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
27
28 static struct kmem_cache *nat_entry_slab;
29 static struct kmem_cache *free_nid_slab;
30 static struct kmem_cache *nat_entry_set_slab;
31 static struct kmem_cache *fsync_node_entry_slab;
32
33 /*
34 * Check whether the given nid is within node id range.
35 */
f2fs_check_nid_range(struct f2fs_sb_info * sbi,nid_t nid)36 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
37 {
38 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
39 set_sbi_flag(sbi, SBI_NEED_FSCK);
40 f2fs_msg(sbi->sb, KERN_WARNING,
41 "%s: out-of-range nid=%x, run fsck to fix.",
42 __func__, nid);
43 return -EFSCORRUPTED;
44 }
45 return 0;
46 }
47
f2fs_available_free_memory(struct f2fs_sb_info * sbi,int type)48 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
49 {
50 struct f2fs_nm_info *nm_i = NM_I(sbi);
51 struct sysinfo val;
52 unsigned long avail_ram;
53 unsigned long mem_size = 0;
54 bool res = false;
55
56 si_meminfo(&val);
57
58 /* only uses low memory */
59 avail_ram = val.totalram - val.totalhigh;
60
61 /*
62 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
63 */
64 if (type == FREE_NIDS) {
65 mem_size = (nm_i->nid_cnt[FREE_NID] *
66 sizeof(struct free_nid)) >> PAGE_SHIFT;
67 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
68 } else if (type == NAT_ENTRIES) {
69 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
70 PAGE_SHIFT;
71 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
72 if (excess_cached_nats(sbi))
73 res = false;
74 } else if (type == DIRTY_DENTS) {
75 if (sbi->sb->s_bdi->wb.dirty_exceeded)
76 return false;
77 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
78 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
79 } else if (type == INO_ENTRIES) {
80 int i;
81
82 for (i = 0; i < MAX_INO_ENTRY; i++)
83 mem_size += sbi->im[i].ino_num *
84 sizeof(struct ino_entry);
85 mem_size >>= PAGE_SHIFT;
86 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
87 } else if (type == EXTENT_CACHE) {
88 mem_size = (atomic_read(&sbi->total_ext_tree) *
89 sizeof(struct extent_tree) +
90 atomic_read(&sbi->total_ext_node) *
91 sizeof(struct extent_node)) >> PAGE_SHIFT;
92 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
93 } else if (type == INMEM_PAGES) {
94 /* it allows 20% / total_ram for inmemory pages */
95 mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
96 res = mem_size < (val.totalram / 5);
97 } else {
98 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
99 return true;
100 }
101 return res;
102 }
103
clear_node_page_dirty(struct page * page)104 static void clear_node_page_dirty(struct page *page)
105 {
106 if (PageDirty(page)) {
107 f2fs_clear_radix_tree_dirty_tag(page);
108 clear_page_dirty_for_io(page);
109 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
110 }
111 ClearPageUptodate(page);
112 }
113
get_current_nat_page(struct f2fs_sb_info * sbi,nid_t nid)114 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
115 {
116 return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
117 }
118
get_next_nat_page(struct f2fs_sb_info * sbi,nid_t nid)119 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
120 {
121 struct page *src_page;
122 struct page *dst_page;
123 pgoff_t dst_off;
124 void *src_addr;
125 void *dst_addr;
126 struct f2fs_nm_info *nm_i = NM_I(sbi);
127
128 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
129
130 /* get current nat block page with lock */
131 src_page = get_current_nat_page(sbi, nid);
132 dst_page = f2fs_grab_meta_page(sbi, dst_off);
133 f2fs_bug_on(sbi, PageDirty(src_page));
134
135 src_addr = page_address(src_page);
136 dst_addr = page_address(dst_page);
137 memcpy(dst_addr, src_addr, PAGE_SIZE);
138 set_page_dirty(dst_page);
139 f2fs_put_page(src_page, 1);
140
141 set_to_next_nat(nm_i, nid);
142
143 return dst_page;
144 }
145
__alloc_nat_entry(nid_t nid,bool no_fail)146 static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
147 {
148 struct nat_entry *new;
149
150 if (no_fail)
151 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
152 else
153 new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
154 if (new) {
155 nat_set_nid(new, nid);
156 nat_reset_flag(new);
157 }
158 return new;
159 }
160
__free_nat_entry(struct nat_entry * e)161 static void __free_nat_entry(struct nat_entry *e)
162 {
163 kmem_cache_free(nat_entry_slab, e);
164 }
165
166 /* must be locked by nat_tree_lock */
__init_nat_entry(struct f2fs_nm_info * nm_i,struct nat_entry * ne,struct f2fs_nat_entry * raw_ne,bool no_fail)167 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
168 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
169 {
170 if (no_fail)
171 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
172 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
173 return NULL;
174
175 if (raw_ne)
176 node_info_from_raw_nat(&ne->ni, raw_ne);
177
178 spin_lock(&nm_i->nat_list_lock);
179 list_add_tail(&ne->list, &nm_i->nat_entries);
180 spin_unlock(&nm_i->nat_list_lock);
181
182 nm_i->nat_cnt++;
183 return ne;
184 }
185
__lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t n)186 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
187 {
188 struct nat_entry *ne;
189
190 ne = radix_tree_lookup(&nm_i->nat_root, n);
191
192 /* for recent accessed nat entry, move it to tail of lru list */
193 if (ne && !get_nat_flag(ne, IS_DIRTY)) {
194 spin_lock(&nm_i->nat_list_lock);
195 if (!list_empty(&ne->list))
196 list_move_tail(&ne->list, &nm_i->nat_entries);
197 spin_unlock(&nm_i->nat_list_lock);
198 }
199
200 return ne;
201 }
202
__gang_lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry ** ep)203 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
204 nid_t start, unsigned int nr, struct nat_entry **ep)
205 {
206 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
207 }
208
__del_from_nat_cache(struct f2fs_nm_info * nm_i,struct nat_entry * e)209 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
210 {
211 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
212 nm_i->nat_cnt--;
213 __free_nat_entry(e);
214 }
215
__grab_nat_entry_set(struct f2fs_nm_info * nm_i,struct nat_entry * ne)216 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
217 struct nat_entry *ne)
218 {
219 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
220 struct nat_entry_set *head;
221
222 head = radix_tree_lookup(&nm_i->nat_set_root, set);
223 if (!head) {
224 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
225
226 INIT_LIST_HEAD(&head->entry_list);
227 INIT_LIST_HEAD(&head->set_list);
228 head->set = set;
229 head->entry_cnt = 0;
230 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
231 }
232 return head;
233 }
234
__set_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry * ne)235 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
236 struct nat_entry *ne)
237 {
238 struct nat_entry_set *head;
239 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
240
241 if (!new_ne)
242 head = __grab_nat_entry_set(nm_i, ne);
243
244 /*
245 * update entry_cnt in below condition:
246 * 1. update NEW_ADDR to valid block address;
247 * 2. update old block address to new one;
248 */
249 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
250 !get_nat_flag(ne, IS_DIRTY)))
251 head->entry_cnt++;
252
253 set_nat_flag(ne, IS_PREALLOC, new_ne);
254
255 if (get_nat_flag(ne, IS_DIRTY))
256 goto refresh_list;
257
258 nm_i->dirty_nat_cnt++;
259 set_nat_flag(ne, IS_DIRTY, true);
260 refresh_list:
261 spin_lock(&nm_i->nat_list_lock);
262 if (new_ne)
263 list_del_init(&ne->list);
264 else
265 list_move_tail(&ne->list, &head->entry_list);
266 spin_unlock(&nm_i->nat_list_lock);
267 }
268
__clear_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry_set * set,struct nat_entry * ne)269 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
270 struct nat_entry_set *set, struct nat_entry *ne)
271 {
272 spin_lock(&nm_i->nat_list_lock);
273 list_move_tail(&ne->list, &nm_i->nat_entries);
274 spin_unlock(&nm_i->nat_list_lock);
275
276 set_nat_flag(ne, IS_DIRTY, false);
277 set->entry_cnt--;
278 nm_i->dirty_nat_cnt--;
279 }
280
__gang_lookup_nat_set(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry_set ** ep)281 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
282 nid_t start, unsigned int nr, struct nat_entry_set **ep)
283 {
284 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
285 start, nr);
286 }
287
f2fs_in_warm_node_list(struct f2fs_sb_info * sbi,struct page * page)288 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
289 {
290 return NODE_MAPPING(sbi) == page->mapping &&
291 IS_DNODE(page) && is_cold_node(page);
292 }
293
f2fs_init_fsync_node_info(struct f2fs_sb_info * sbi)294 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
295 {
296 spin_lock_init(&sbi->fsync_node_lock);
297 INIT_LIST_HEAD(&sbi->fsync_node_list);
298 sbi->fsync_seg_id = 0;
299 sbi->fsync_node_num = 0;
300 }
301
f2fs_add_fsync_node_entry(struct f2fs_sb_info * sbi,struct page * page)302 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
303 struct page *page)
304 {
305 struct fsync_node_entry *fn;
306 unsigned long flags;
307 unsigned int seq_id;
308
309 fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
310
311 get_page(page);
312 fn->page = page;
313 INIT_LIST_HEAD(&fn->list);
314
315 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
316 list_add_tail(&fn->list, &sbi->fsync_node_list);
317 fn->seq_id = sbi->fsync_seg_id++;
318 seq_id = fn->seq_id;
319 sbi->fsync_node_num++;
320 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
321
322 return seq_id;
323 }
324
f2fs_del_fsync_node_entry(struct f2fs_sb_info * sbi,struct page * page)325 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
326 {
327 struct fsync_node_entry *fn;
328 unsigned long flags;
329
330 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
331 list_for_each_entry(fn, &sbi->fsync_node_list, list) {
332 if (fn->page == page) {
333 list_del(&fn->list);
334 sbi->fsync_node_num--;
335 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
336 kmem_cache_free(fsync_node_entry_slab, fn);
337 put_page(page);
338 return;
339 }
340 }
341 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
342 f2fs_bug_on(sbi, 1);
343 }
344
f2fs_reset_fsync_node_info(struct f2fs_sb_info * sbi)345 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
346 {
347 unsigned long flags;
348
349 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
350 sbi->fsync_seg_id = 0;
351 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
352 }
353
f2fs_need_dentry_mark(struct f2fs_sb_info * sbi,nid_t nid)354 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
355 {
356 struct f2fs_nm_info *nm_i = NM_I(sbi);
357 struct nat_entry *e;
358 bool need = false;
359
360 down_read(&nm_i->nat_tree_lock);
361 e = __lookup_nat_cache(nm_i, nid);
362 if (e) {
363 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
364 !get_nat_flag(e, HAS_FSYNCED_INODE))
365 need = true;
366 }
367 up_read(&nm_i->nat_tree_lock);
368 return need;
369 }
370
f2fs_is_checkpointed_node(struct f2fs_sb_info * sbi,nid_t nid)371 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
372 {
373 struct f2fs_nm_info *nm_i = NM_I(sbi);
374 struct nat_entry *e;
375 bool is_cp = true;
376
377 down_read(&nm_i->nat_tree_lock);
378 e = __lookup_nat_cache(nm_i, nid);
379 if (e && !get_nat_flag(e, IS_CHECKPOINTED))
380 is_cp = false;
381 up_read(&nm_i->nat_tree_lock);
382 return is_cp;
383 }
384
f2fs_need_inode_block_update(struct f2fs_sb_info * sbi,nid_t ino)385 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
386 {
387 struct f2fs_nm_info *nm_i = NM_I(sbi);
388 struct nat_entry *e;
389 bool need_update = true;
390
391 down_read(&nm_i->nat_tree_lock);
392 e = __lookup_nat_cache(nm_i, ino);
393 if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
394 (get_nat_flag(e, IS_CHECKPOINTED) ||
395 get_nat_flag(e, HAS_FSYNCED_INODE)))
396 need_update = false;
397 up_read(&nm_i->nat_tree_lock);
398 return need_update;
399 }
400
401 /* must be locked by nat_tree_lock */
cache_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,struct f2fs_nat_entry * ne)402 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
403 struct f2fs_nat_entry *ne)
404 {
405 struct f2fs_nm_info *nm_i = NM_I(sbi);
406 struct nat_entry *new, *e;
407
408 new = __alloc_nat_entry(nid, false);
409 if (!new)
410 return;
411
412 down_write(&nm_i->nat_tree_lock);
413 e = __lookup_nat_cache(nm_i, nid);
414 if (!e)
415 e = __init_nat_entry(nm_i, new, ne, false);
416 else
417 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
418 nat_get_blkaddr(e) !=
419 le32_to_cpu(ne->block_addr) ||
420 nat_get_version(e) != ne->version);
421 up_write(&nm_i->nat_tree_lock);
422 if (e != new)
423 __free_nat_entry(new);
424 }
425
set_node_addr(struct f2fs_sb_info * sbi,struct node_info * ni,block_t new_blkaddr,bool fsync_done)426 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
427 block_t new_blkaddr, bool fsync_done)
428 {
429 struct f2fs_nm_info *nm_i = NM_I(sbi);
430 struct nat_entry *e;
431 struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
432
433 down_write(&nm_i->nat_tree_lock);
434 e = __lookup_nat_cache(nm_i, ni->nid);
435 if (!e) {
436 e = __init_nat_entry(nm_i, new, NULL, true);
437 copy_node_info(&e->ni, ni);
438 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
439 } else if (new_blkaddr == NEW_ADDR) {
440 /*
441 * when nid is reallocated,
442 * previous nat entry can be remained in nat cache.
443 * So, reinitialize it with new information.
444 */
445 copy_node_info(&e->ni, ni);
446 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
447 }
448 /* let's free early to reduce memory consumption */
449 if (e != new)
450 __free_nat_entry(new);
451
452 /* sanity check */
453 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
454 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
455 new_blkaddr == NULL_ADDR);
456 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
457 new_blkaddr == NEW_ADDR);
458 f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
459 new_blkaddr == NEW_ADDR);
460
461 /* increment version no as node is removed */
462 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
463 unsigned char version = nat_get_version(e);
464 nat_set_version(e, inc_node_version(version));
465 }
466
467 /* change address */
468 nat_set_blkaddr(e, new_blkaddr);
469 if (!is_valid_data_blkaddr(sbi, new_blkaddr))
470 set_nat_flag(e, IS_CHECKPOINTED, false);
471 __set_nat_cache_dirty(nm_i, e);
472
473 /* update fsync_mark if its inode nat entry is still alive */
474 if (ni->nid != ni->ino)
475 e = __lookup_nat_cache(nm_i, ni->ino);
476 if (e) {
477 if (fsync_done && ni->nid == ni->ino)
478 set_nat_flag(e, HAS_FSYNCED_INODE, true);
479 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
480 }
481 up_write(&nm_i->nat_tree_lock);
482 }
483
f2fs_try_to_free_nats(struct f2fs_sb_info * sbi,int nr_shrink)484 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
485 {
486 struct f2fs_nm_info *nm_i = NM_I(sbi);
487 int nr = nr_shrink;
488
489 if (!down_write_trylock(&nm_i->nat_tree_lock))
490 return 0;
491
492 spin_lock(&nm_i->nat_list_lock);
493 while (nr_shrink) {
494 struct nat_entry *ne;
495
496 if (list_empty(&nm_i->nat_entries))
497 break;
498
499 ne = list_first_entry(&nm_i->nat_entries,
500 struct nat_entry, list);
501 list_del(&ne->list);
502 spin_unlock(&nm_i->nat_list_lock);
503
504 __del_from_nat_cache(nm_i, ne);
505 nr_shrink--;
506
507 spin_lock(&nm_i->nat_list_lock);
508 }
509 spin_unlock(&nm_i->nat_list_lock);
510
511 up_write(&nm_i->nat_tree_lock);
512 return nr - nr_shrink;
513 }
514
515 /*
516 * This function always returns success
517 */
f2fs_get_node_info(struct f2fs_sb_info * sbi,nid_t nid,struct node_info * ni)518 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
519 struct node_info *ni)
520 {
521 struct f2fs_nm_info *nm_i = NM_I(sbi);
522 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
523 struct f2fs_journal *journal = curseg->journal;
524 nid_t start_nid = START_NID(nid);
525 struct f2fs_nat_block *nat_blk;
526 struct page *page = NULL;
527 struct f2fs_nat_entry ne;
528 struct nat_entry *e;
529 pgoff_t index;
530 int i;
531
532 ni->nid = nid;
533
534 /* Check nat cache */
535 down_read(&nm_i->nat_tree_lock);
536 e = __lookup_nat_cache(nm_i, nid);
537 if (e) {
538 ni->ino = nat_get_ino(e);
539 ni->blk_addr = nat_get_blkaddr(e);
540 ni->version = nat_get_version(e);
541 up_read(&nm_i->nat_tree_lock);
542 return 0;
543 }
544
545 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
546
547 /* Check current segment summary */
548 down_read(&curseg->journal_rwsem);
549 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
550 if (i >= 0) {
551 ne = nat_in_journal(journal, i);
552 node_info_from_raw_nat(ni, &ne);
553 }
554 up_read(&curseg->journal_rwsem);
555 if (i >= 0) {
556 up_read(&nm_i->nat_tree_lock);
557 goto cache;
558 }
559
560 /* Fill node_info from nat page */
561 index = current_nat_addr(sbi, nid);
562 up_read(&nm_i->nat_tree_lock);
563
564 page = f2fs_get_meta_page(sbi, index);
565 if (IS_ERR(page))
566 return PTR_ERR(page);
567
568 nat_blk = (struct f2fs_nat_block *)page_address(page);
569 ne = nat_blk->entries[nid - start_nid];
570 node_info_from_raw_nat(ni, &ne);
571 f2fs_put_page(page, 1);
572 cache:
573 /* cache nat entry */
574 cache_nat_entry(sbi, nid, &ne);
575 return 0;
576 }
577
578 /*
579 * readahead MAX_RA_NODE number of node pages.
580 */
f2fs_ra_node_pages(struct page * parent,int start,int n)581 static void f2fs_ra_node_pages(struct page *parent, int start, int n)
582 {
583 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
584 struct blk_plug plug;
585 int i, end;
586 nid_t nid;
587
588 blk_start_plug(&plug);
589
590 /* Then, try readahead for siblings of the desired node */
591 end = start + n;
592 end = min(end, NIDS_PER_BLOCK);
593 for (i = start; i < end; i++) {
594 nid = get_nid(parent, i, false);
595 f2fs_ra_node_page(sbi, nid);
596 }
597
598 blk_finish_plug(&plug);
599 }
600
f2fs_get_next_page_offset(struct dnode_of_data * dn,pgoff_t pgofs)601 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
602 {
603 const long direct_index = ADDRS_PER_INODE(dn->inode);
604 const long direct_blks = ADDRS_PER_BLOCK;
605 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
606 unsigned int skipped_unit = ADDRS_PER_BLOCK;
607 int cur_level = dn->cur_level;
608 int max_level = dn->max_level;
609 pgoff_t base = 0;
610
611 if (!dn->max_level)
612 return pgofs + 1;
613
614 while (max_level-- > cur_level)
615 skipped_unit *= NIDS_PER_BLOCK;
616
617 switch (dn->max_level) {
618 case 3:
619 base += 2 * indirect_blks;
620 case 2:
621 base += 2 * direct_blks;
622 case 1:
623 base += direct_index;
624 break;
625 default:
626 f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
627 }
628
629 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
630 }
631
632 /*
633 * The maximum depth is four.
634 * Offset[0] will have raw inode offset.
635 */
get_node_path(struct inode * inode,long block,int offset[4],unsigned int noffset[4])636 static int get_node_path(struct inode *inode, long block,
637 int offset[4], unsigned int noffset[4])
638 {
639 const long direct_index = ADDRS_PER_INODE(inode);
640 const long direct_blks = ADDRS_PER_BLOCK;
641 const long dptrs_per_blk = NIDS_PER_BLOCK;
642 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
643 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
644 int n = 0;
645 int level = 0;
646
647 noffset[0] = 0;
648
649 if (block < direct_index) {
650 offset[n] = block;
651 goto got;
652 }
653 block -= direct_index;
654 if (block < direct_blks) {
655 offset[n++] = NODE_DIR1_BLOCK;
656 noffset[n] = 1;
657 offset[n] = block;
658 level = 1;
659 goto got;
660 }
661 block -= direct_blks;
662 if (block < direct_blks) {
663 offset[n++] = NODE_DIR2_BLOCK;
664 noffset[n] = 2;
665 offset[n] = block;
666 level = 1;
667 goto got;
668 }
669 block -= direct_blks;
670 if (block < indirect_blks) {
671 offset[n++] = NODE_IND1_BLOCK;
672 noffset[n] = 3;
673 offset[n++] = block / direct_blks;
674 noffset[n] = 4 + offset[n - 1];
675 offset[n] = block % direct_blks;
676 level = 2;
677 goto got;
678 }
679 block -= indirect_blks;
680 if (block < indirect_blks) {
681 offset[n++] = NODE_IND2_BLOCK;
682 noffset[n] = 4 + dptrs_per_blk;
683 offset[n++] = block / direct_blks;
684 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
685 offset[n] = block % direct_blks;
686 level = 2;
687 goto got;
688 }
689 block -= indirect_blks;
690 if (block < dindirect_blks) {
691 offset[n++] = NODE_DIND_BLOCK;
692 noffset[n] = 5 + (dptrs_per_blk * 2);
693 offset[n++] = block / indirect_blks;
694 noffset[n] = 6 + (dptrs_per_blk * 2) +
695 offset[n - 1] * (dptrs_per_blk + 1);
696 offset[n++] = (block / direct_blks) % dptrs_per_blk;
697 noffset[n] = 7 + (dptrs_per_blk * 2) +
698 offset[n - 2] * (dptrs_per_blk + 1) +
699 offset[n - 1];
700 offset[n] = block % direct_blks;
701 level = 3;
702 goto got;
703 } else {
704 return -E2BIG;
705 }
706 got:
707 return level;
708 }
709
710 /*
711 * Caller should call f2fs_put_dnode(dn).
712 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
713 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
714 * In the case of RDONLY_NODE, we don't need to care about mutex.
715 */
f2fs_get_dnode_of_data(struct dnode_of_data * dn,pgoff_t index,int mode)716 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
717 {
718 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
719 struct page *npage[4];
720 struct page *parent = NULL;
721 int offset[4];
722 unsigned int noffset[4];
723 nid_t nids[4];
724 int level, i = 0;
725 int err = 0;
726
727 level = get_node_path(dn->inode, index, offset, noffset);
728 if (level < 0)
729 return level;
730
731 nids[0] = dn->inode->i_ino;
732 npage[0] = dn->inode_page;
733
734 if (!npage[0]) {
735 npage[0] = f2fs_get_node_page(sbi, nids[0]);
736 if (IS_ERR(npage[0]))
737 return PTR_ERR(npage[0]);
738 }
739
740 /* if inline_data is set, should not report any block indices */
741 if (f2fs_has_inline_data(dn->inode) && index) {
742 err = -ENOENT;
743 f2fs_put_page(npage[0], 1);
744 goto release_out;
745 }
746
747 parent = npage[0];
748 if (level != 0)
749 nids[1] = get_nid(parent, offset[0], true);
750 dn->inode_page = npage[0];
751 dn->inode_page_locked = true;
752
753 /* get indirect or direct nodes */
754 for (i = 1; i <= level; i++) {
755 bool done = false;
756
757 if (!nids[i] && mode == ALLOC_NODE) {
758 /* alloc new node */
759 if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
760 err = -ENOSPC;
761 goto release_pages;
762 }
763
764 dn->nid = nids[i];
765 npage[i] = f2fs_new_node_page(dn, noffset[i]);
766 if (IS_ERR(npage[i])) {
767 f2fs_alloc_nid_failed(sbi, nids[i]);
768 err = PTR_ERR(npage[i]);
769 goto release_pages;
770 }
771
772 set_nid(parent, offset[i - 1], nids[i], i == 1);
773 f2fs_alloc_nid_done(sbi, nids[i]);
774 done = true;
775 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
776 npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
777 if (IS_ERR(npage[i])) {
778 err = PTR_ERR(npage[i]);
779 goto release_pages;
780 }
781 done = true;
782 }
783 if (i == 1) {
784 dn->inode_page_locked = false;
785 unlock_page(parent);
786 } else {
787 f2fs_put_page(parent, 1);
788 }
789
790 if (!done) {
791 npage[i] = f2fs_get_node_page(sbi, nids[i]);
792 if (IS_ERR(npage[i])) {
793 err = PTR_ERR(npage[i]);
794 f2fs_put_page(npage[0], 0);
795 goto release_out;
796 }
797 }
798 if (i < level) {
799 parent = npage[i];
800 nids[i + 1] = get_nid(parent, offset[i], false);
801 }
802 }
803 dn->nid = nids[level];
804 dn->ofs_in_node = offset[level];
805 dn->node_page = npage[level];
806 dn->data_blkaddr = datablock_addr(dn->inode,
807 dn->node_page, dn->ofs_in_node);
808 return 0;
809
810 release_pages:
811 f2fs_put_page(parent, 1);
812 if (i > 1)
813 f2fs_put_page(npage[0], 0);
814 release_out:
815 dn->inode_page = NULL;
816 dn->node_page = NULL;
817 if (err == -ENOENT) {
818 dn->cur_level = i;
819 dn->max_level = level;
820 dn->ofs_in_node = offset[level];
821 }
822 return err;
823 }
824
truncate_node(struct dnode_of_data * dn)825 static int truncate_node(struct dnode_of_data *dn)
826 {
827 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
828 struct node_info ni;
829 int err;
830 pgoff_t index;
831
832 err = f2fs_get_node_info(sbi, dn->nid, &ni);
833 if (err)
834 return err;
835
836 /* Deallocate node address */
837 f2fs_invalidate_blocks(sbi, ni.blk_addr);
838 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
839 set_node_addr(sbi, &ni, NULL_ADDR, false);
840
841 if (dn->nid == dn->inode->i_ino) {
842 f2fs_remove_orphan_inode(sbi, dn->nid);
843 dec_valid_inode_count(sbi);
844 f2fs_inode_synced(dn->inode);
845 }
846
847 clear_node_page_dirty(dn->node_page);
848 set_sbi_flag(sbi, SBI_IS_DIRTY);
849
850 index = dn->node_page->index;
851 f2fs_put_page(dn->node_page, 1);
852
853 invalidate_mapping_pages(NODE_MAPPING(sbi),
854 index, index);
855
856 dn->node_page = NULL;
857 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
858
859 return 0;
860 }
861
truncate_dnode(struct dnode_of_data * dn)862 static int truncate_dnode(struct dnode_of_data *dn)
863 {
864 struct page *page;
865 int err;
866
867 if (dn->nid == 0)
868 return 1;
869
870 /* get direct node */
871 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
872 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
873 return 1;
874 else if (IS_ERR(page))
875 return PTR_ERR(page);
876
877 /* Make dnode_of_data for parameter */
878 dn->node_page = page;
879 dn->ofs_in_node = 0;
880 f2fs_truncate_data_blocks(dn);
881 err = truncate_node(dn);
882 if (err) {
883 f2fs_put_page(page, 1);
884 return err;
885 }
886
887 return 1;
888 }
889
truncate_nodes(struct dnode_of_data * dn,unsigned int nofs,int ofs,int depth)890 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
891 int ofs, int depth)
892 {
893 struct dnode_of_data rdn = *dn;
894 struct page *page;
895 struct f2fs_node *rn;
896 nid_t child_nid;
897 unsigned int child_nofs;
898 int freed = 0;
899 int i, ret;
900
901 if (dn->nid == 0)
902 return NIDS_PER_BLOCK + 1;
903
904 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
905
906 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
907 if (IS_ERR(page)) {
908 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
909 return PTR_ERR(page);
910 }
911
912 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
913
914 rn = F2FS_NODE(page);
915 if (depth < 3) {
916 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
917 child_nid = le32_to_cpu(rn->in.nid[i]);
918 if (child_nid == 0)
919 continue;
920 rdn.nid = child_nid;
921 ret = truncate_dnode(&rdn);
922 if (ret < 0)
923 goto out_err;
924 if (set_nid(page, i, 0, false))
925 dn->node_changed = true;
926 }
927 } else {
928 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
929 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
930 child_nid = le32_to_cpu(rn->in.nid[i]);
931 if (child_nid == 0) {
932 child_nofs += NIDS_PER_BLOCK + 1;
933 continue;
934 }
935 rdn.nid = child_nid;
936 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
937 if (ret == (NIDS_PER_BLOCK + 1)) {
938 if (set_nid(page, i, 0, false))
939 dn->node_changed = true;
940 child_nofs += ret;
941 } else if (ret < 0 && ret != -ENOENT) {
942 goto out_err;
943 }
944 }
945 freed = child_nofs;
946 }
947
948 if (!ofs) {
949 /* remove current indirect node */
950 dn->node_page = page;
951 ret = truncate_node(dn);
952 if (ret)
953 goto out_err;
954 freed++;
955 } else {
956 f2fs_put_page(page, 1);
957 }
958 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
959 return freed;
960
961 out_err:
962 f2fs_put_page(page, 1);
963 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
964 return ret;
965 }
966
truncate_partial_nodes(struct dnode_of_data * dn,struct f2fs_inode * ri,int * offset,int depth)967 static int truncate_partial_nodes(struct dnode_of_data *dn,
968 struct f2fs_inode *ri, int *offset, int depth)
969 {
970 struct page *pages[2];
971 nid_t nid[3];
972 nid_t child_nid;
973 int err = 0;
974 int i;
975 int idx = depth - 2;
976
977 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
978 if (!nid[0])
979 return 0;
980
981 /* get indirect nodes in the path */
982 for (i = 0; i < idx + 1; i++) {
983 /* reference count'll be increased */
984 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
985 if (IS_ERR(pages[i])) {
986 err = PTR_ERR(pages[i]);
987 idx = i - 1;
988 goto fail;
989 }
990 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
991 }
992
993 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
994
995 /* free direct nodes linked to a partial indirect node */
996 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
997 child_nid = get_nid(pages[idx], i, false);
998 if (!child_nid)
999 continue;
1000 dn->nid = child_nid;
1001 err = truncate_dnode(dn);
1002 if (err < 0)
1003 goto fail;
1004 if (set_nid(pages[idx], i, 0, false))
1005 dn->node_changed = true;
1006 }
1007
1008 if (offset[idx + 1] == 0) {
1009 dn->node_page = pages[idx];
1010 dn->nid = nid[idx];
1011 err = truncate_node(dn);
1012 if (err)
1013 goto fail;
1014 } else {
1015 f2fs_put_page(pages[idx], 1);
1016 }
1017 offset[idx]++;
1018 offset[idx + 1] = 0;
1019 idx--;
1020 fail:
1021 for (i = idx; i >= 0; i--)
1022 f2fs_put_page(pages[i], 1);
1023
1024 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1025
1026 return err;
1027 }
1028
1029 /*
1030 * All the block addresses of data and nodes should be nullified.
1031 */
f2fs_truncate_inode_blocks(struct inode * inode,pgoff_t from)1032 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1033 {
1034 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1035 int err = 0, cont = 1;
1036 int level, offset[4], noffset[4];
1037 unsigned int nofs = 0;
1038 struct f2fs_inode *ri;
1039 struct dnode_of_data dn;
1040 struct page *page;
1041
1042 trace_f2fs_truncate_inode_blocks_enter(inode, from);
1043
1044 level = get_node_path(inode, from, offset, noffset);
1045 if (level < 0)
1046 return level;
1047
1048 page = f2fs_get_node_page(sbi, inode->i_ino);
1049 if (IS_ERR(page)) {
1050 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1051 return PTR_ERR(page);
1052 }
1053
1054 set_new_dnode(&dn, inode, page, NULL, 0);
1055 unlock_page(page);
1056
1057 ri = F2FS_INODE(page);
1058 switch (level) {
1059 case 0:
1060 case 1:
1061 nofs = noffset[1];
1062 break;
1063 case 2:
1064 nofs = noffset[1];
1065 if (!offset[level - 1])
1066 goto skip_partial;
1067 err = truncate_partial_nodes(&dn, ri, offset, level);
1068 if (err < 0 && err != -ENOENT)
1069 goto fail;
1070 nofs += 1 + NIDS_PER_BLOCK;
1071 break;
1072 case 3:
1073 nofs = 5 + 2 * NIDS_PER_BLOCK;
1074 if (!offset[level - 1])
1075 goto skip_partial;
1076 err = truncate_partial_nodes(&dn, ri, offset, level);
1077 if (err < 0 && err != -ENOENT)
1078 goto fail;
1079 break;
1080 default:
1081 BUG();
1082 }
1083
1084 skip_partial:
1085 while (cont) {
1086 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1087 switch (offset[0]) {
1088 case NODE_DIR1_BLOCK:
1089 case NODE_DIR2_BLOCK:
1090 err = truncate_dnode(&dn);
1091 break;
1092
1093 case NODE_IND1_BLOCK:
1094 case NODE_IND2_BLOCK:
1095 err = truncate_nodes(&dn, nofs, offset[1], 2);
1096 break;
1097
1098 case NODE_DIND_BLOCK:
1099 err = truncate_nodes(&dn, nofs, offset[1], 3);
1100 cont = 0;
1101 break;
1102
1103 default:
1104 BUG();
1105 }
1106 if (err < 0 && err != -ENOENT)
1107 goto fail;
1108 if (offset[1] == 0 &&
1109 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1110 lock_page(page);
1111 BUG_ON(page->mapping != NODE_MAPPING(sbi));
1112 f2fs_wait_on_page_writeback(page, NODE, true);
1113 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1114 set_page_dirty(page);
1115 unlock_page(page);
1116 }
1117 offset[1] = 0;
1118 offset[0]++;
1119 nofs += err;
1120 }
1121 fail:
1122 f2fs_put_page(page, 0);
1123 trace_f2fs_truncate_inode_blocks_exit(inode, err);
1124 return err > 0 ? 0 : err;
1125 }
1126
1127 /* caller must lock inode page */
f2fs_truncate_xattr_node(struct inode * inode)1128 int f2fs_truncate_xattr_node(struct inode *inode)
1129 {
1130 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1131 nid_t nid = F2FS_I(inode)->i_xattr_nid;
1132 struct dnode_of_data dn;
1133 struct page *npage;
1134 int err;
1135
1136 if (!nid)
1137 return 0;
1138
1139 npage = f2fs_get_node_page(sbi, nid);
1140 if (IS_ERR(npage))
1141 return PTR_ERR(npage);
1142
1143 set_new_dnode(&dn, inode, NULL, npage, nid);
1144 err = truncate_node(&dn);
1145 if (err) {
1146 f2fs_put_page(npage, 1);
1147 return err;
1148 }
1149
1150 f2fs_i_xnid_write(inode, 0);
1151
1152 return 0;
1153 }
1154
1155 /*
1156 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1157 * f2fs_unlock_op().
1158 */
f2fs_remove_inode_page(struct inode * inode)1159 int f2fs_remove_inode_page(struct inode *inode)
1160 {
1161 struct dnode_of_data dn;
1162 int err;
1163
1164 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1165 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1166 if (err)
1167 return err;
1168
1169 err = f2fs_truncate_xattr_node(inode);
1170 if (err) {
1171 f2fs_put_dnode(&dn);
1172 return err;
1173 }
1174
1175 /* remove potential inline_data blocks */
1176 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1177 S_ISLNK(inode->i_mode))
1178 f2fs_truncate_data_blocks_range(&dn, 1);
1179
1180 /* 0 is possible, after f2fs_new_inode() has failed */
1181 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1182 f2fs_put_dnode(&dn);
1183 return -EIO;
1184 }
1185
1186 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1187 f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING,
1188 "Inconsistent i_blocks, ino:%lu, iblocks:%llu",
1189 inode->i_ino,
1190 (unsigned long long)inode->i_blocks);
1191 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1192 }
1193
1194 /* will put inode & node pages */
1195 err = truncate_node(&dn);
1196 if (err) {
1197 f2fs_put_dnode(&dn);
1198 return err;
1199 }
1200 return 0;
1201 }
1202
f2fs_new_inode_page(struct inode * inode)1203 struct page *f2fs_new_inode_page(struct inode *inode)
1204 {
1205 struct dnode_of_data dn;
1206
1207 /* allocate inode page for new inode */
1208 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1209
1210 /* caller should f2fs_put_page(page, 1); */
1211 return f2fs_new_node_page(&dn, 0);
1212 }
1213
f2fs_new_node_page(struct dnode_of_data * dn,unsigned int ofs)1214 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1215 {
1216 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1217 struct node_info new_ni;
1218 struct page *page;
1219 int err;
1220
1221 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1222 return ERR_PTR(-EPERM);
1223
1224 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1225 if (!page)
1226 return ERR_PTR(-ENOMEM);
1227
1228 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1229 goto fail;
1230
1231 #ifdef CONFIG_F2FS_CHECK_FS
1232 err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
1233 if (err) {
1234 dec_valid_node_count(sbi, dn->inode, !ofs);
1235 goto fail;
1236 }
1237 if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
1238 err = -EFSCORRUPTED;
1239 set_sbi_flag(sbi, SBI_NEED_FSCK);
1240 goto fail;
1241 }
1242 #endif
1243 new_ni.nid = dn->nid;
1244 new_ni.ino = dn->inode->i_ino;
1245 new_ni.blk_addr = NULL_ADDR;
1246 new_ni.flag = 0;
1247 new_ni.version = 0;
1248 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1249
1250 f2fs_wait_on_page_writeback(page, NODE, true);
1251 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1252 set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1253 if (!PageUptodate(page))
1254 SetPageUptodate(page);
1255 if (set_page_dirty(page))
1256 dn->node_changed = true;
1257
1258 if (f2fs_has_xattr_block(ofs))
1259 f2fs_i_xnid_write(dn->inode, dn->nid);
1260
1261 if (ofs == 0)
1262 inc_valid_inode_count(sbi);
1263 return page;
1264
1265 fail:
1266 clear_node_page_dirty(page);
1267 f2fs_put_page(page, 1);
1268 return ERR_PTR(err);
1269 }
1270
1271 /*
1272 * Caller should do after getting the following values.
1273 * 0: f2fs_put_page(page, 0)
1274 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1275 */
read_node_page(struct page * page,int op_flags)1276 static int read_node_page(struct page *page, int op_flags)
1277 {
1278 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1279 struct node_info ni;
1280 struct f2fs_io_info fio = {
1281 .sbi = sbi,
1282 .type = NODE,
1283 .op = REQ_OP_READ,
1284 .op_flags = op_flags,
1285 .page = page,
1286 .encrypted_page = NULL,
1287 };
1288 int err;
1289
1290 if (PageUptodate(page)) {
1291 if (!f2fs_inode_chksum_verify(sbi, page)) {
1292 ClearPageUptodate(page);
1293 return -EFSBADCRC;
1294 }
1295 return LOCKED_PAGE;
1296 }
1297
1298 err = f2fs_get_node_info(sbi, page->index, &ni);
1299 if (err)
1300 return err;
1301
1302 if (unlikely(ni.blk_addr == NULL_ADDR) ||
1303 is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
1304 ClearPageUptodate(page);
1305 return -ENOENT;
1306 }
1307
1308 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1309 return f2fs_submit_page_bio(&fio);
1310 }
1311
1312 /*
1313 * Readahead a node page
1314 */
f2fs_ra_node_page(struct f2fs_sb_info * sbi,nid_t nid)1315 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1316 {
1317 struct page *apage;
1318 int err;
1319
1320 if (!nid)
1321 return;
1322 if (f2fs_check_nid_range(sbi, nid))
1323 return;
1324
1325 rcu_read_lock();
1326 apage = radix_tree_lookup(&NODE_MAPPING(sbi)->i_pages, nid);
1327 rcu_read_unlock();
1328 if (apage)
1329 return;
1330
1331 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1332 if (!apage)
1333 return;
1334
1335 err = read_node_page(apage, REQ_RAHEAD);
1336 f2fs_put_page(apage, err ? 1 : 0);
1337 }
1338
__get_node_page(struct f2fs_sb_info * sbi,pgoff_t nid,struct page * parent,int start)1339 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1340 struct page *parent, int start)
1341 {
1342 struct page *page;
1343 int err;
1344
1345 if (!nid)
1346 return ERR_PTR(-ENOENT);
1347 if (f2fs_check_nid_range(sbi, nid))
1348 return ERR_PTR(-EINVAL);
1349 repeat:
1350 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1351 if (!page)
1352 return ERR_PTR(-ENOMEM);
1353
1354 err = read_node_page(page, 0);
1355 if (err < 0) {
1356 f2fs_put_page(page, 1);
1357 return ERR_PTR(err);
1358 } else if (err == LOCKED_PAGE) {
1359 err = 0;
1360 goto page_hit;
1361 }
1362
1363 if (parent)
1364 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1365
1366 lock_page(page);
1367
1368 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1369 f2fs_put_page(page, 1);
1370 goto repeat;
1371 }
1372
1373 if (unlikely(!PageUptodate(page))) {
1374 err = -EIO;
1375 goto out_err;
1376 }
1377
1378 if (!f2fs_inode_chksum_verify(sbi, page)) {
1379 err = -EFSBADCRC;
1380 goto out_err;
1381 }
1382 page_hit:
1383 if(unlikely(nid != nid_of_node(page))) {
1384 f2fs_msg(sbi->sb, KERN_WARNING, "inconsistent node block, "
1385 "nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1386 nid, nid_of_node(page), ino_of_node(page),
1387 ofs_of_node(page), cpver_of_node(page),
1388 next_blkaddr_of_node(page));
1389 err = -EINVAL;
1390 out_err:
1391 ClearPageUptodate(page);
1392 f2fs_put_page(page, 1);
1393 return ERR_PTR(err);
1394 }
1395 return page;
1396 }
1397
f2fs_get_node_page(struct f2fs_sb_info * sbi,pgoff_t nid)1398 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1399 {
1400 return __get_node_page(sbi, nid, NULL, 0);
1401 }
1402
f2fs_get_node_page_ra(struct page * parent,int start)1403 struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1404 {
1405 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1406 nid_t nid = get_nid(parent, start, false);
1407
1408 return __get_node_page(sbi, nid, parent, start);
1409 }
1410
flush_inline_data(struct f2fs_sb_info * sbi,nid_t ino)1411 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1412 {
1413 struct inode *inode;
1414 struct page *page;
1415 int ret;
1416
1417 /* should flush inline_data before evict_inode */
1418 inode = ilookup(sbi->sb, ino);
1419 if (!inode)
1420 return;
1421
1422 page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1423 FGP_LOCK|FGP_NOWAIT, 0);
1424 if (!page)
1425 goto iput_out;
1426
1427 if (!PageUptodate(page))
1428 goto page_out;
1429
1430 if (!PageDirty(page))
1431 goto page_out;
1432
1433 if (!clear_page_dirty_for_io(page))
1434 goto page_out;
1435
1436 ret = f2fs_write_inline_data(inode, page);
1437 inode_dec_dirty_pages(inode);
1438 f2fs_remove_dirty_inode(inode);
1439 if (ret)
1440 set_page_dirty(page);
1441 page_out:
1442 f2fs_put_page(page, 1);
1443 iput_out:
1444 iput(inode);
1445 }
1446
last_fsync_dnode(struct f2fs_sb_info * sbi,nid_t ino)1447 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1448 {
1449 pgoff_t index;
1450 struct pagevec pvec;
1451 struct page *last_page = NULL;
1452 int nr_pages;
1453
1454 pagevec_init(&pvec);
1455 index = 0;
1456
1457 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1458 PAGECACHE_TAG_DIRTY))) {
1459 int i;
1460
1461 for (i = 0; i < nr_pages; i++) {
1462 struct page *page = pvec.pages[i];
1463
1464 if (unlikely(f2fs_cp_error(sbi))) {
1465 f2fs_put_page(last_page, 0);
1466 pagevec_release(&pvec);
1467 return ERR_PTR(-EIO);
1468 }
1469
1470 if (!IS_DNODE(page) || !is_cold_node(page))
1471 continue;
1472 if (ino_of_node(page) != ino)
1473 continue;
1474
1475 lock_page(page);
1476
1477 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1478 continue_unlock:
1479 unlock_page(page);
1480 continue;
1481 }
1482 if (ino_of_node(page) != ino)
1483 goto continue_unlock;
1484
1485 if (!PageDirty(page)) {
1486 /* someone wrote it for us */
1487 goto continue_unlock;
1488 }
1489
1490 if (last_page)
1491 f2fs_put_page(last_page, 0);
1492
1493 get_page(page);
1494 last_page = page;
1495 unlock_page(page);
1496 }
1497 pagevec_release(&pvec);
1498 cond_resched();
1499 }
1500 return last_page;
1501 }
1502
__write_node_page(struct page * page,bool atomic,bool * submitted,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type,unsigned int * seq_id)1503 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1504 struct writeback_control *wbc, bool do_balance,
1505 enum iostat_type io_type, unsigned int *seq_id)
1506 {
1507 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1508 nid_t nid;
1509 struct node_info ni;
1510 struct f2fs_io_info fio = {
1511 .sbi = sbi,
1512 .ino = ino_of_node(page),
1513 .type = NODE,
1514 .op = REQ_OP_WRITE,
1515 .op_flags = wbc_to_write_flags(wbc),
1516 .page = page,
1517 .encrypted_page = NULL,
1518 .submitted = false,
1519 .io_type = io_type,
1520 .io_wbc = wbc,
1521 };
1522 unsigned int seq;
1523
1524 trace_f2fs_writepage(page, NODE);
1525
1526 if (unlikely(f2fs_cp_error(sbi)))
1527 goto redirty_out;
1528
1529 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1530 goto redirty_out;
1531
1532 if (wbc->sync_mode == WB_SYNC_NONE &&
1533 IS_DNODE(page) && is_cold_node(page))
1534 goto redirty_out;
1535
1536 /* get old block addr of this node page */
1537 nid = nid_of_node(page);
1538 f2fs_bug_on(sbi, page->index != nid);
1539
1540 if (f2fs_get_node_info(sbi, nid, &ni))
1541 goto redirty_out;
1542
1543 if (wbc->for_reclaim) {
1544 if (!down_read_trylock(&sbi->node_write))
1545 goto redirty_out;
1546 } else {
1547 down_read(&sbi->node_write);
1548 }
1549
1550 /* This page is already truncated */
1551 if (unlikely(ni.blk_addr == NULL_ADDR)) {
1552 ClearPageUptodate(page);
1553 dec_page_count(sbi, F2FS_DIRTY_NODES);
1554 up_read(&sbi->node_write);
1555 unlock_page(page);
1556 return 0;
1557 }
1558
1559 if (__is_valid_data_blkaddr(ni.blk_addr) &&
1560 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC)) {
1561 up_read(&sbi->node_write);
1562 goto redirty_out;
1563 }
1564
1565 if (atomic && !test_opt(sbi, NOBARRIER))
1566 fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1567
1568 /* should add to global list before clearing PAGECACHE status */
1569 if (f2fs_in_warm_node_list(sbi, page)) {
1570 seq = f2fs_add_fsync_node_entry(sbi, page);
1571 if (seq_id)
1572 *seq_id = seq;
1573 }
1574
1575 set_page_writeback(page);
1576 ClearPageError(page);
1577
1578 fio.old_blkaddr = ni.blk_addr;
1579 f2fs_do_write_node_page(nid, &fio);
1580 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1581 dec_page_count(sbi, F2FS_DIRTY_NODES);
1582 up_read(&sbi->node_write);
1583
1584 if (wbc->for_reclaim) {
1585 f2fs_submit_merged_write_cond(sbi, page->mapping->host, 0,
1586 page->index, NODE);
1587 submitted = NULL;
1588 }
1589
1590 unlock_page(page);
1591
1592 if (unlikely(f2fs_cp_error(sbi))) {
1593 f2fs_submit_merged_write(sbi, NODE);
1594 submitted = NULL;
1595 }
1596 if (submitted)
1597 *submitted = fio.submitted;
1598
1599 if (do_balance)
1600 f2fs_balance_fs(sbi, false);
1601 return 0;
1602
1603 redirty_out:
1604 redirty_page_for_writepage(wbc, page);
1605 return AOP_WRITEPAGE_ACTIVATE;
1606 }
1607
f2fs_move_node_page(struct page * node_page,int gc_type)1608 void f2fs_move_node_page(struct page *node_page, int gc_type)
1609 {
1610 if (gc_type == FG_GC) {
1611 struct writeback_control wbc = {
1612 .sync_mode = WB_SYNC_ALL,
1613 .nr_to_write = 1,
1614 .for_reclaim = 0,
1615 };
1616
1617 set_page_dirty(node_page);
1618 f2fs_wait_on_page_writeback(node_page, NODE, true);
1619
1620 f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page));
1621 if (!clear_page_dirty_for_io(node_page))
1622 goto out_page;
1623
1624 if (__write_node_page(node_page, false, NULL,
1625 &wbc, false, FS_GC_NODE_IO, NULL))
1626 unlock_page(node_page);
1627 goto release_page;
1628 } else {
1629 /* set page dirty and write it */
1630 if (!PageWriteback(node_page))
1631 set_page_dirty(node_page);
1632 }
1633 out_page:
1634 unlock_page(node_page);
1635 release_page:
1636 f2fs_put_page(node_page, 0);
1637 }
1638
f2fs_write_node_page(struct page * page,struct writeback_control * wbc)1639 static int f2fs_write_node_page(struct page *page,
1640 struct writeback_control *wbc)
1641 {
1642 return __write_node_page(page, false, NULL, wbc, false,
1643 FS_NODE_IO, NULL);
1644 }
1645
f2fs_fsync_node_pages(struct f2fs_sb_info * sbi,struct inode * inode,struct writeback_control * wbc,bool atomic,unsigned int * seq_id)1646 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1647 struct writeback_control *wbc, bool atomic,
1648 unsigned int *seq_id)
1649 {
1650 pgoff_t index;
1651 pgoff_t last_idx = ULONG_MAX;
1652 struct pagevec pvec;
1653 int ret = 0;
1654 struct page *last_page = NULL;
1655 bool marked = false;
1656 nid_t ino = inode->i_ino;
1657 int nr_pages;
1658
1659 if (atomic) {
1660 last_page = last_fsync_dnode(sbi, ino);
1661 if (IS_ERR_OR_NULL(last_page))
1662 return PTR_ERR_OR_ZERO(last_page);
1663 }
1664 retry:
1665 pagevec_init(&pvec);
1666 index = 0;
1667
1668 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1669 PAGECACHE_TAG_DIRTY))) {
1670 int i;
1671
1672 for (i = 0; i < nr_pages; i++) {
1673 struct page *page = pvec.pages[i];
1674 bool submitted = false;
1675
1676 if (unlikely(f2fs_cp_error(sbi))) {
1677 f2fs_put_page(last_page, 0);
1678 pagevec_release(&pvec);
1679 ret = -EIO;
1680 goto out;
1681 }
1682
1683 if (!IS_DNODE(page) || !is_cold_node(page))
1684 continue;
1685 if (ino_of_node(page) != ino)
1686 continue;
1687
1688 lock_page(page);
1689
1690 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1691 continue_unlock:
1692 unlock_page(page);
1693 continue;
1694 }
1695 if (ino_of_node(page) != ino)
1696 goto continue_unlock;
1697
1698 if (!PageDirty(page) && page != last_page) {
1699 /* someone wrote it for us */
1700 goto continue_unlock;
1701 }
1702
1703 f2fs_wait_on_page_writeback(page, NODE, true);
1704 BUG_ON(PageWriteback(page));
1705
1706 set_fsync_mark(page, 0);
1707 set_dentry_mark(page, 0);
1708
1709 if (!atomic || page == last_page) {
1710 set_fsync_mark(page, 1);
1711 if (IS_INODE(page)) {
1712 if (is_inode_flag_set(inode,
1713 FI_DIRTY_INODE))
1714 f2fs_update_inode(inode, page);
1715 set_dentry_mark(page,
1716 f2fs_need_dentry_mark(sbi, ino));
1717 }
1718 /* may be written by other thread */
1719 if (!PageDirty(page))
1720 set_page_dirty(page);
1721 }
1722
1723 if (!clear_page_dirty_for_io(page))
1724 goto continue_unlock;
1725
1726 ret = __write_node_page(page, atomic &&
1727 page == last_page,
1728 &submitted, wbc, true,
1729 FS_NODE_IO, seq_id);
1730 if (ret) {
1731 unlock_page(page);
1732 f2fs_put_page(last_page, 0);
1733 break;
1734 } else if (submitted) {
1735 last_idx = page->index;
1736 }
1737
1738 if (page == last_page) {
1739 f2fs_put_page(page, 0);
1740 marked = true;
1741 break;
1742 }
1743 }
1744 pagevec_release(&pvec);
1745 cond_resched();
1746
1747 if (ret || marked)
1748 break;
1749 }
1750 if (!ret && atomic && !marked) {
1751 f2fs_msg(sbi->sb, KERN_DEBUG,
1752 "Retry to write fsync mark: ino=%u, idx=%lx",
1753 ino, last_page->index);
1754 lock_page(last_page);
1755 f2fs_wait_on_page_writeback(last_page, NODE, true);
1756 set_page_dirty(last_page);
1757 unlock_page(last_page);
1758 goto retry;
1759 }
1760 out:
1761 if (last_idx != ULONG_MAX)
1762 f2fs_submit_merged_write_cond(sbi, NULL, ino, last_idx, NODE);
1763 return ret ? -EIO: 0;
1764 }
1765
f2fs_sync_node_pages(struct f2fs_sb_info * sbi,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type)1766 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1767 struct writeback_control *wbc,
1768 bool do_balance, enum iostat_type io_type)
1769 {
1770 pgoff_t index;
1771 struct pagevec pvec;
1772 int step = 0;
1773 int nwritten = 0;
1774 int ret = 0;
1775 int nr_pages, done = 0;
1776
1777 pagevec_init(&pvec);
1778
1779 next_step:
1780 index = 0;
1781
1782 while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
1783 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1784 int i;
1785
1786 for (i = 0; i < nr_pages; i++) {
1787 struct page *page = pvec.pages[i];
1788 bool submitted = false;
1789
1790 /* give a priority to WB_SYNC threads */
1791 if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1792 wbc->sync_mode == WB_SYNC_NONE) {
1793 done = 1;
1794 break;
1795 }
1796
1797 /*
1798 * flushing sequence with step:
1799 * 0. indirect nodes
1800 * 1. dentry dnodes
1801 * 2. file dnodes
1802 */
1803 if (step == 0 && IS_DNODE(page))
1804 continue;
1805 if (step == 1 && (!IS_DNODE(page) ||
1806 is_cold_node(page)))
1807 continue;
1808 if (step == 2 && (!IS_DNODE(page) ||
1809 !is_cold_node(page)))
1810 continue;
1811 lock_node:
1812 if (wbc->sync_mode == WB_SYNC_ALL)
1813 lock_page(page);
1814 else if (!trylock_page(page))
1815 continue;
1816
1817 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1818 continue_unlock:
1819 unlock_page(page);
1820 continue;
1821 }
1822
1823 if (!PageDirty(page)) {
1824 /* someone wrote it for us */
1825 goto continue_unlock;
1826 }
1827
1828 /* flush inline_data */
1829 if (is_inline_node(page)) {
1830 clear_inline_node(page);
1831 unlock_page(page);
1832 flush_inline_data(sbi, ino_of_node(page));
1833 goto lock_node;
1834 }
1835
1836 f2fs_wait_on_page_writeback(page, NODE, true);
1837
1838 BUG_ON(PageWriteback(page));
1839 if (!clear_page_dirty_for_io(page))
1840 goto continue_unlock;
1841
1842 set_fsync_mark(page, 0);
1843 set_dentry_mark(page, 0);
1844
1845 ret = __write_node_page(page, false, &submitted,
1846 wbc, do_balance, io_type, NULL);
1847 if (ret)
1848 unlock_page(page);
1849 else if (submitted)
1850 nwritten++;
1851
1852 if (--wbc->nr_to_write == 0)
1853 break;
1854 }
1855 pagevec_release(&pvec);
1856 cond_resched();
1857
1858 if (wbc->nr_to_write == 0) {
1859 step = 2;
1860 break;
1861 }
1862 }
1863
1864 if (step < 2) {
1865 if (wbc->sync_mode == WB_SYNC_NONE && step == 1)
1866 goto out;
1867 step++;
1868 goto next_step;
1869 }
1870 out:
1871 if (nwritten)
1872 f2fs_submit_merged_write(sbi, NODE);
1873
1874 if (unlikely(f2fs_cp_error(sbi)))
1875 return -EIO;
1876 return ret;
1877 }
1878
f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info * sbi,unsigned int seq_id)1879 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
1880 unsigned int seq_id)
1881 {
1882 struct fsync_node_entry *fn;
1883 struct page *page;
1884 struct list_head *head = &sbi->fsync_node_list;
1885 unsigned long flags;
1886 unsigned int cur_seq_id = 0;
1887 int ret2, ret = 0;
1888
1889 while (seq_id && cur_seq_id < seq_id) {
1890 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
1891 if (list_empty(head)) {
1892 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
1893 break;
1894 }
1895 fn = list_first_entry(head, struct fsync_node_entry, list);
1896 if (fn->seq_id > seq_id) {
1897 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
1898 break;
1899 }
1900 cur_seq_id = fn->seq_id;
1901 page = fn->page;
1902 get_page(page);
1903 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
1904
1905 f2fs_wait_on_page_writeback(page, NODE, true);
1906 if (TestClearPageError(page))
1907 ret = -EIO;
1908
1909 put_page(page);
1910
1911 if (ret)
1912 break;
1913 }
1914
1915 ret2 = filemap_check_errors(NODE_MAPPING(sbi));
1916 if (!ret)
1917 ret = ret2;
1918
1919 return ret;
1920 }
1921
f2fs_write_node_pages(struct address_space * mapping,struct writeback_control * wbc)1922 static int f2fs_write_node_pages(struct address_space *mapping,
1923 struct writeback_control *wbc)
1924 {
1925 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1926 struct blk_plug plug;
1927 long diff;
1928
1929 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1930 goto skip_write;
1931
1932 /* balancing f2fs's metadata in background */
1933 f2fs_balance_fs_bg(sbi);
1934
1935 /* collect a number of dirty node pages and write together */
1936 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1937 goto skip_write;
1938
1939 if (wbc->sync_mode == WB_SYNC_ALL)
1940 atomic_inc(&sbi->wb_sync_req[NODE]);
1941 else if (atomic_read(&sbi->wb_sync_req[NODE]))
1942 goto skip_write;
1943
1944 trace_f2fs_writepages(mapping->host, wbc, NODE);
1945
1946 diff = nr_pages_to_write(sbi, NODE, wbc);
1947 blk_start_plug(&plug);
1948 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
1949 blk_finish_plug(&plug);
1950 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1951
1952 if (wbc->sync_mode == WB_SYNC_ALL)
1953 atomic_dec(&sbi->wb_sync_req[NODE]);
1954 return 0;
1955
1956 skip_write:
1957 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1958 trace_f2fs_writepages(mapping->host, wbc, NODE);
1959 return 0;
1960 }
1961
f2fs_set_node_page_dirty(struct page * page)1962 static int f2fs_set_node_page_dirty(struct page *page)
1963 {
1964 trace_f2fs_set_page_dirty(page, NODE);
1965
1966 if (!PageUptodate(page))
1967 SetPageUptodate(page);
1968 #ifdef CONFIG_F2FS_CHECK_FS
1969 if (IS_INODE(page))
1970 f2fs_inode_chksum_set(F2FS_P_SB(page), page);
1971 #endif
1972 if (!PageDirty(page)) {
1973 __set_page_dirty_nobuffers(page);
1974 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
1975 SetPagePrivate(page);
1976 f2fs_trace_pid(page);
1977 return 1;
1978 }
1979 return 0;
1980 }
1981
1982 /*
1983 * Structure of the f2fs node operations
1984 */
1985 const struct address_space_operations f2fs_node_aops = {
1986 .writepage = f2fs_write_node_page,
1987 .writepages = f2fs_write_node_pages,
1988 .set_page_dirty = f2fs_set_node_page_dirty,
1989 .invalidatepage = f2fs_invalidate_page,
1990 .releasepage = f2fs_release_page,
1991 #ifdef CONFIG_MIGRATION
1992 .migratepage = f2fs_migrate_page,
1993 #endif
1994 };
1995
__lookup_free_nid_list(struct f2fs_nm_info * nm_i,nid_t n)1996 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1997 nid_t n)
1998 {
1999 return radix_tree_lookup(&nm_i->free_nid_root, n);
2000 }
2001
__insert_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state state)2002 static int __insert_free_nid(struct f2fs_sb_info *sbi,
2003 struct free_nid *i, enum nid_state state)
2004 {
2005 struct f2fs_nm_info *nm_i = NM_I(sbi);
2006
2007 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2008 if (err)
2009 return err;
2010
2011 f2fs_bug_on(sbi, state != i->state);
2012 nm_i->nid_cnt[state]++;
2013 if (state == FREE_NID)
2014 list_add_tail(&i->list, &nm_i->free_nid_list);
2015 return 0;
2016 }
2017
__remove_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state state)2018 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2019 struct free_nid *i, enum nid_state state)
2020 {
2021 struct f2fs_nm_info *nm_i = NM_I(sbi);
2022
2023 f2fs_bug_on(sbi, state != i->state);
2024 nm_i->nid_cnt[state]--;
2025 if (state == FREE_NID)
2026 list_del(&i->list);
2027 radix_tree_delete(&nm_i->free_nid_root, i->nid);
2028 }
2029
__move_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state org_state,enum nid_state dst_state)2030 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2031 enum nid_state org_state, enum nid_state dst_state)
2032 {
2033 struct f2fs_nm_info *nm_i = NM_I(sbi);
2034
2035 f2fs_bug_on(sbi, org_state != i->state);
2036 i->state = dst_state;
2037 nm_i->nid_cnt[org_state]--;
2038 nm_i->nid_cnt[dst_state]++;
2039
2040 switch (dst_state) {
2041 case PREALLOC_NID:
2042 list_del(&i->list);
2043 break;
2044 case FREE_NID:
2045 list_add_tail(&i->list, &nm_i->free_nid_list);
2046 break;
2047 default:
2048 BUG_ON(1);
2049 }
2050 }
2051
update_free_nid_bitmap(struct f2fs_sb_info * sbi,nid_t nid,bool set,bool build)2052 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2053 bool set, bool build)
2054 {
2055 struct f2fs_nm_info *nm_i = NM_I(sbi);
2056 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2057 unsigned int nid_ofs = nid - START_NID(nid);
2058
2059 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2060 return;
2061
2062 if (set) {
2063 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2064 return;
2065 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2066 nm_i->free_nid_count[nat_ofs]++;
2067 } else {
2068 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2069 return;
2070 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2071 if (!build)
2072 nm_i->free_nid_count[nat_ofs]--;
2073 }
2074 }
2075
2076 /* return if the nid is recognized as free */
add_free_nid(struct f2fs_sb_info * sbi,nid_t nid,bool build,bool update)2077 static bool add_free_nid(struct f2fs_sb_info *sbi,
2078 nid_t nid, bool build, bool update)
2079 {
2080 struct f2fs_nm_info *nm_i = NM_I(sbi);
2081 struct free_nid *i, *e;
2082 struct nat_entry *ne;
2083 int err = -EINVAL;
2084 bool ret = false;
2085
2086 /* 0 nid should not be used */
2087 if (unlikely(nid == 0))
2088 return false;
2089
2090 if (unlikely(f2fs_check_nid_range(sbi, nid)))
2091 return false;
2092
2093 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
2094 i->nid = nid;
2095 i->state = FREE_NID;
2096
2097 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2098
2099 spin_lock(&nm_i->nid_list_lock);
2100
2101 if (build) {
2102 /*
2103 * Thread A Thread B
2104 * - f2fs_create
2105 * - f2fs_new_inode
2106 * - f2fs_alloc_nid
2107 * - __insert_nid_to_list(PREALLOC_NID)
2108 * - f2fs_balance_fs_bg
2109 * - f2fs_build_free_nids
2110 * - __f2fs_build_free_nids
2111 * - scan_nat_page
2112 * - add_free_nid
2113 * - __lookup_nat_cache
2114 * - f2fs_add_link
2115 * - f2fs_init_inode_metadata
2116 * - f2fs_new_inode_page
2117 * - f2fs_new_node_page
2118 * - set_node_addr
2119 * - f2fs_alloc_nid_done
2120 * - __remove_nid_from_list(PREALLOC_NID)
2121 * - __insert_nid_to_list(FREE_NID)
2122 */
2123 ne = __lookup_nat_cache(nm_i, nid);
2124 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2125 nat_get_blkaddr(ne) != NULL_ADDR))
2126 goto err_out;
2127
2128 e = __lookup_free_nid_list(nm_i, nid);
2129 if (e) {
2130 if (e->state == FREE_NID)
2131 ret = true;
2132 goto err_out;
2133 }
2134 }
2135 ret = true;
2136 err = __insert_free_nid(sbi, i, FREE_NID);
2137 err_out:
2138 if (update) {
2139 update_free_nid_bitmap(sbi, nid, ret, build);
2140 if (!build)
2141 nm_i->available_nids++;
2142 }
2143 spin_unlock(&nm_i->nid_list_lock);
2144 radix_tree_preload_end();
2145
2146 if (err)
2147 kmem_cache_free(free_nid_slab, i);
2148 return ret;
2149 }
2150
remove_free_nid(struct f2fs_sb_info * sbi,nid_t nid)2151 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2152 {
2153 struct f2fs_nm_info *nm_i = NM_I(sbi);
2154 struct free_nid *i;
2155 bool need_free = false;
2156
2157 spin_lock(&nm_i->nid_list_lock);
2158 i = __lookup_free_nid_list(nm_i, nid);
2159 if (i && i->state == FREE_NID) {
2160 __remove_free_nid(sbi, i, FREE_NID);
2161 need_free = true;
2162 }
2163 spin_unlock(&nm_i->nid_list_lock);
2164
2165 if (need_free)
2166 kmem_cache_free(free_nid_slab, i);
2167 }
2168
scan_nat_page(struct f2fs_sb_info * sbi,struct page * nat_page,nid_t start_nid)2169 static int scan_nat_page(struct f2fs_sb_info *sbi,
2170 struct page *nat_page, nid_t start_nid)
2171 {
2172 struct f2fs_nm_info *nm_i = NM_I(sbi);
2173 struct f2fs_nat_block *nat_blk = page_address(nat_page);
2174 block_t blk_addr;
2175 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2176 int i;
2177
2178 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2179
2180 i = start_nid % NAT_ENTRY_PER_BLOCK;
2181
2182 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2183 if (unlikely(start_nid >= nm_i->max_nid))
2184 break;
2185
2186 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2187
2188 if (blk_addr == NEW_ADDR)
2189 return -EINVAL;
2190
2191 if (blk_addr == NULL_ADDR) {
2192 add_free_nid(sbi, start_nid, true, true);
2193 } else {
2194 spin_lock(&NM_I(sbi)->nid_list_lock);
2195 update_free_nid_bitmap(sbi, start_nid, false, true);
2196 spin_unlock(&NM_I(sbi)->nid_list_lock);
2197 }
2198 }
2199
2200 return 0;
2201 }
2202
scan_curseg_cache(struct f2fs_sb_info * sbi)2203 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2204 {
2205 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2206 struct f2fs_journal *journal = curseg->journal;
2207 int i;
2208
2209 down_read(&curseg->journal_rwsem);
2210 for (i = 0; i < nats_in_cursum(journal); i++) {
2211 block_t addr;
2212 nid_t nid;
2213
2214 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2215 nid = le32_to_cpu(nid_in_journal(journal, i));
2216 if (addr == NULL_ADDR)
2217 add_free_nid(sbi, nid, true, false);
2218 else
2219 remove_free_nid(sbi, nid);
2220 }
2221 up_read(&curseg->journal_rwsem);
2222 }
2223
scan_free_nid_bits(struct f2fs_sb_info * sbi)2224 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2225 {
2226 struct f2fs_nm_info *nm_i = NM_I(sbi);
2227 unsigned int i, idx;
2228 nid_t nid;
2229
2230 down_read(&nm_i->nat_tree_lock);
2231
2232 for (i = 0; i < nm_i->nat_blocks; i++) {
2233 if (!test_bit_le(i, nm_i->nat_block_bitmap))
2234 continue;
2235 if (!nm_i->free_nid_count[i])
2236 continue;
2237 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2238 idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2239 NAT_ENTRY_PER_BLOCK, idx);
2240 if (idx >= NAT_ENTRY_PER_BLOCK)
2241 break;
2242
2243 nid = i * NAT_ENTRY_PER_BLOCK + idx;
2244 add_free_nid(sbi, nid, true, false);
2245
2246 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2247 goto out;
2248 }
2249 }
2250 out:
2251 scan_curseg_cache(sbi);
2252
2253 up_read(&nm_i->nat_tree_lock);
2254 }
2255
__f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2256 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2257 bool sync, bool mount)
2258 {
2259 struct f2fs_nm_info *nm_i = NM_I(sbi);
2260 int i = 0, ret;
2261 nid_t nid = nm_i->next_scan_nid;
2262
2263 if (unlikely(nid >= nm_i->max_nid))
2264 nid = 0;
2265
2266 if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2267 nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2268
2269 /* Enough entries */
2270 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2271 return 0;
2272
2273 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2274 return 0;
2275
2276 if (!mount) {
2277 /* try to find free nids in free_nid_bitmap */
2278 scan_free_nid_bits(sbi);
2279
2280 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2281 return 0;
2282 }
2283
2284 /* readahead nat pages to be scanned */
2285 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2286 META_NAT, true);
2287
2288 down_read(&nm_i->nat_tree_lock);
2289
2290 while (1) {
2291 if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2292 nm_i->nat_block_bitmap)) {
2293 struct page *page = get_current_nat_page(sbi, nid);
2294
2295 ret = scan_nat_page(sbi, page, nid);
2296 f2fs_put_page(page, 1);
2297
2298 if (ret) {
2299 up_read(&nm_i->nat_tree_lock);
2300 f2fs_bug_on(sbi, !mount);
2301 f2fs_msg(sbi->sb, KERN_ERR,
2302 "NAT is corrupt, run fsck to fix it");
2303 return -EINVAL;
2304 }
2305 }
2306
2307 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2308 if (unlikely(nid >= nm_i->max_nid))
2309 nid = 0;
2310
2311 if (++i >= FREE_NID_PAGES)
2312 break;
2313 }
2314
2315 /* go to the next free nat pages to find free nids abundantly */
2316 nm_i->next_scan_nid = nid;
2317
2318 /* find free nids from current sum_pages */
2319 scan_curseg_cache(sbi);
2320
2321 up_read(&nm_i->nat_tree_lock);
2322
2323 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2324 nm_i->ra_nid_pages, META_NAT, false);
2325
2326 return 0;
2327 }
2328
f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2329 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2330 {
2331 int ret;
2332
2333 mutex_lock(&NM_I(sbi)->build_lock);
2334 ret = __f2fs_build_free_nids(sbi, sync, mount);
2335 mutex_unlock(&NM_I(sbi)->build_lock);
2336
2337 return ret;
2338 }
2339
2340 /*
2341 * If this function returns success, caller can obtain a new nid
2342 * from second parameter of this function.
2343 * The returned nid could be used ino as well as nid when inode is created.
2344 */
f2fs_alloc_nid(struct f2fs_sb_info * sbi,nid_t * nid)2345 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2346 {
2347 struct f2fs_nm_info *nm_i = NM_I(sbi);
2348 struct free_nid *i = NULL;
2349 retry:
2350 if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2351 f2fs_show_injection_info(FAULT_ALLOC_NID);
2352 return false;
2353 }
2354
2355 spin_lock(&nm_i->nid_list_lock);
2356
2357 if (unlikely(nm_i->available_nids == 0)) {
2358 spin_unlock(&nm_i->nid_list_lock);
2359 return false;
2360 }
2361
2362 /* We should not use stale free nids created by f2fs_build_free_nids */
2363 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2364 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2365 i = list_first_entry(&nm_i->free_nid_list,
2366 struct free_nid, list);
2367 *nid = i->nid;
2368
2369 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2370 nm_i->available_nids--;
2371
2372 update_free_nid_bitmap(sbi, *nid, false, false);
2373
2374 spin_unlock(&nm_i->nid_list_lock);
2375 return true;
2376 }
2377 spin_unlock(&nm_i->nid_list_lock);
2378
2379 /* Let's scan nat pages and its caches to get free nids */
2380 if (!f2fs_build_free_nids(sbi, true, false))
2381 goto retry;
2382 return false;
2383 }
2384
2385 /*
2386 * f2fs_alloc_nid() should be called prior to this function.
2387 */
f2fs_alloc_nid_done(struct f2fs_sb_info * sbi,nid_t nid)2388 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2389 {
2390 struct f2fs_nm_info *nm_i = NM_I(sbi);
2391 struct free_nid *i;
2392
2393 spin_lock(&nm_i->nid_list_lock);
2394 i = __lookup_free_nid_list(nm_i, nid);
2395 f2fs_bug_on(sbi, !i);
2396 __remove_free_nid(sbi, i, PREALLOC_NID);
2397 spin_unlock(&nm_i->nid_list_lock);
2398
2399 kmem_cache_free(free_nid_slab, i);
2400 }
2401
2402 /*
2403 * f2fs_alloc_nid() should be called prior to this function.
2404 */
f2fs_alloc_nid_failed(struct f2fs_sb_info * sbi,nid_t nid)2405 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2406 {
2407 struct f2fs_nm_info *nm_i = NM_I(sbi);
2408 struct free_nid *i;
2409 bool need_free = false;
2410
2411 if (!nid)
2412 return;
2413
2414 spin_lock(&nm_i->nid_list_lock);
2415 i = __lookup_free_nid_list(nm_i, nid);
2416 f2fs_bug_on(sbi, !i);
2417
2418 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2419 __remove_free_nid(sbi, i, PREALLOC_NID);
2420 need_free = true;
2421 } else {
2422 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2423 }
2424
2425 nm_i->available_nids++;
2426
2427 update_free_nid_bitmap(sbi, nid, true, false);
2428
2429 spin_unlock(&nm_i->nid_list_lock);
2430
2431 if (need_free)
2432 kmem_cache_free(free_nid_slab, i);
2433 }
2434
f2fs_try_to_free_nids(struct f2fs_sb_info * sbi,int nr_shrink)2435 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2436 {
2437 struct f2fs_nm_info *nm_i = NM_I(sbi);
2438 struct free_nid *i, *next;
2439 int nr = nr_shrink;
2440
2441 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2442 return 0;
2443
2444 if (!mutex_trylock(&nm_i->build_lock))
2445 return 0;
2446
2447 spin_lock(&nm_i->nid_list_lock);
2448 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2449 if (nr_shrink <= 0 ||
2450 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2451 break;
2452
2453 __remove_free_nid(sbi, i, FREE_NID);
2454 kmem_cache_free(free_nid_slab, i);
2455 nr_shrink--;
2456 }
2457 spin_unlock(&nm_i->nid_list_lock);
2458 mutex_unlock(&nm_i->build_lock);
2459
2460 return nr - nr_shrink;
2461 }
2462
f2fs_recover_inline_xattr(struct inode * inode,struct page * page)2463 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2464 {
2465 void *src_addr, *dst_addr;
2466 size_t inline_size;
2467 struct page *ipage;
2468 struct f2fs_inode *ri;
2469
2470 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2471 if (IS_ERR(ipage))
2472 return PTR_ERR(ipage);
2473
2474 ri = F2FS_INODE(page);
2475 if (ri->i_inline & F2FS_INLINE_XATTR) {
2476 set_inode_flag(inode, FI_INLINE_XATTR);
2477 } else {
2478 clear_inode_flag(inode, FI_INLINE_XATTR);
2479 goto update_inode;
2480 }
2481
2482 dst_addr = inline_xattr_addr(inode, ipage);
2483 src_addr = inline_xattr_addr(inode, page);
2484 inline_size = inline_xattr_size(inode);
2485
2486 f2fs_wait_on_page_writeback(ipage, NODE, true);
2487 memcpy(dst_addr, src_addr, inline_size);
2488 update_inode:
2489 f2fs_update_inode(inode, ipage);
2490 f2fs_put_page(ipage, 1);
2491 return 0;
2492 }
2493
f2fs_recover_xattr_data(struct inode * inode,struct page * page)2494 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2495 {
2496 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2497 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2498 nid_t new_xnid;
2499 struct dnode_of_data dn;
2500 struct node_info ni;
2501 struct page *xpage;
2502 int err;
2503
2504 if (!prev_xnid)
2505 goto recover_xnid;
2506
2507 /* 1: invalidate the previous xattr nid */
2508 err = f2fs_get_node_info(sbi, prev_xnid, &ni);
2509 if (err)
2510 return err;
2511
2512 f2fs_invalidate_blocks(sbi, ni.blk_addr);
2513 dec_valid_node_count(sbi, inode, false);
2514 set_node_addr(sbi, &ni, NULL_ADDR, false);
2515
2516 recover_xnid:
2517 /* 2: update xattr nid in inode */
2518 if (!f2fs_alloc_nid(sbi, &new_xnid))
2519 return -ENOSPC;
2520
2521 set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2522 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2523 if (IS_ERR(xpage)) {
2524 f2fs_alloc_nid_failed(sbi, new_xnid);
2525 return PTR_ERR(xpage);
2526 }
2527
2528 f2fs_alloc_nid_done(sbi, new_xnid);
2529 f2fs_update_inode_page(inode);
2530
2531 /* 3: update and set xattr node page dirty */
2532 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2533
2534 set_page_dirty(xpage);
2535 f2fs_put_page(xpage, 1);
2536
2537 return 0;
2538 }
2539
f2fs_recover_inode_page(struct f2fs_sb_info * sbi,struct page * page)2540 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2541 {
2542 struct f2fs_inode *src, *dst;
2543 nid_t ino = ino_of_node(page);
2544 struct node_info old_ni, new_ni;
2545 struct page *ipage;
2546 int err;
2547
2548 err = f2fs_get_node_info(sbi, ino, &old_ni);
2549 if (err)
2550 return err;
2551
2552 if (unlikely(old_ni.blk_addr != NULL_ADDR))
2553 return -EINVAL;
2554 retry:
2555 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2556 if (!ipage) {
2557 congestion_wait(BLK_RW_ASYNC, HZ/50);
2558 goto retry;
2559 }
2560
2561 /* Should not use this inode from free nid list */
2562 remove_free_nid(sbi, ino);
2563
2564 if (!PageUptodate(ipage))
2565 SetPageUptodate(ipage);
2566 fill_node_footer(ipage, ino, ino, 0, true);
2567 set_cold_node(ipage, false);
2568
2569 src = F2FS_INODE(page);
2570 dst = F2FS_INODE(ipage);
2571
2572 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
2573 dst->i_size = 0;
2574 dst->i_blocks = cpu_to_le64(1);
2575 dst->i_links = cpu_to_le32(1);
2576 dst->i_xattr_nid = 0;
2577 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2578 if (dst->i_inline & F2FS_EXTRA_ATTR) {
2579 dst->i_extra_isize = src->i_extra_isize;
2580
2581 if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) &&
2582 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2583 i_inline_xattr_size))
2584 dst->i_inline_xattr_size = src->i_inline_xattr_size;
2585
2586 if (f2fs_sb_has_project_quota(sbi->sb) &&
2587 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2588 i_projid))
2589 dst->i_projid = src->i_projid;
2590
2591 if (f2fs_sb_has_inode_crtime(sbi->sb) &&
2592 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2593 i_crtime_nsec)) {
2594 dst->i_crtime = src->i_crtime;
2595 dst->i_crtime_nsec = src->i_crtime_nsec;
2596 }
2597 }
2598
2599 new_ni = old_ni;
2600 new_ni.ino = ino;
2601
2602 if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2603 WARN_ON(1);
2604 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2605 inc_valid_inode_count(sbi);
2606 set_page_dirty(ipage);
2607 f2fs_put_page(ipage, 1);
2608 return 0;
2609 }
2610
f2fs_restore_node_summary(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_summary_block * sum)2611 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2612 unsigned int segno, struct f2fs_summary_block *sum)
2613 {
2614 struct f2fs_node *rn;
2615 struct f2fs_summary *sum_entry;
2616 block_t addr;
2617 int i, idx, last_offset, nrpages;
2618
2619 /* scan the node segment */
2620 last_offset = sbi->blocks_per_seg;
2621 addr = START_BLOCK(sbi, segno);
2622 sum_entry = &sum->entries[0];
2623
2624 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2625 nrpages = min(last_offset - i, BIO_MAX_PAGES);
2626
2627 /* readahead node pages */
2628 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2629
2630 for (idx = addr; idx < addr + nrpages; idx++) {
2631 struct page *page = f2fs_get_tmp_page(sbi, idx);
2632
2633 if (IS_ERR(page))
2634 return PTR_ERR(page);
2635
2636 rn = F2FS_NODE(page);
2637 sum_entry->nid = rn->footer.nid;
2638 sum_entry->version = 0;
2639 sum_entry->ofs_in_node = 0;
2640 sum_entry++;
2641 f2fs_put_page(page, 1);
2642 }
2643
2644 invalidate_mapping_pages(META_MAPPING(sbi), addr,
2645 addr + nrpages);
2646 }
2647 return 0;
2648 }
2649
remove_nats_in_journal(struct f2fs_sb_info * sbi)2650 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2651 {
2652 struct f2fs_nm_info *nm_i = NM_I(sbi);
2653 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2654 struct f2fs_journal *journal = curseg->journal;
2655 int i;
2656
2657 down_write(&curseg->journal_rwsem);
2658 for (i = 0; i < nats_in_cursum(journal); i++) {
2659 struct nat_entry *ne;
2660 struct f2fs_nat_entry raw_ne;
2661 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2662
2663 if (f2fs_check_nid_range(sbi, nid))
2664 continue;
2665
2666 raw_ne = nat_in_journal(journal, i);
2667
2668 ne = __lookup_nat_cache(nm_i, nid);
2669 if (!ne) {
2670 ne = __alloc_nat_entry(nid, true);
2671 __init_nat_entry(nm_i, ne, &raw_ne, true);
2672 }
2673
2674 /*
2675 * if a free nat in journal has not been used after last
2676 * checkpoint, we should remove it from available nids,
2677 * since later we will add it again.
2678 */
2679 if (!get_nat_flag(ne, IS_DIRTY) &&
2680 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2681 spin_lock(&nm_i->nid_list_lock);
2682 nm_i->available_nids--;
2683 spin_unlock(&nm_i->nid_list_lock);
2684 }
2685
2686 __set_nat_cache_dirty(nm_i, ne);
2687 }
2688 update_nats_in_cursum(journal, -i);
2689 up_write(&curseg->journal_rwsem);
2690 }
2691
__adjust_nat_entry_set(struct nat_entry_set * nes,struct list_head * head,int max)2692 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2693 struct list_head *head, int max)
2694 {
2695 struct nat_entry_set *cur;
2696
2697 if (nes->entry_cnt >= max)
2698 goto add_out;
2699
2700 list_for_each_entry(cur, head, set_list) {
2701 if (cur->entry_cnt >= nes->entry_cnt) {
2702 list_add(&nes->set_list, cur->set_list.prev);
2703 return;
2704 }
2705 }
2706 add_out:
2707 list_add_tail(&nes->set_list, head);
2708 }
2709
__update_nat_bits(struct f2fs_sb_info * sbi,nid_t start_nid,struct page * page)2710 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2711 struct page *page)
2712 {
2713 struct f2fs_nm_info *nm_i = NM_I(sbi);
2714 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2715 struct f2fs_nat_block *nat_blk = page_address(page);
2716 int valid = 0;
2717 int i = 0;
2718
2719 if (!enabled_nat_bits(sbi, NULL))
2720 return;
2721
2722 if (nat_index == 0) {
2723 valid = 1;
2724 i = 1;
2725 }
2726 for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2727 if (nat_blk->entries[i].block_addr != NULL_ADDR)
2728 valid++;
2729 }
2730 if (valid == 0) {
2731 __set_bit_le(nat_index, nm_i->empty_nat_bits);
2732 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2733 return;
2734 }
2735
2736 __clear_bit_le(nat_index, nm_i->empty_nat_bits);
2737 if (valid == NAT_ENTRY_PER_BLOCK)
2738 __set_bit_le(nat_index, nm_i->full_nat_bits);
2739 else
2740 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2741 }
2742
__flush_nat_entry_set(struct f2fs_sb_info * sbi,struct nat_entry_set * set,struct cp_control * cpc)2743 static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2744 struct nat_entry_set *set, struct cp_control *cpc)
2745 {
2746 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2747 struct f2fs_journal *journal = curseg->journal;
2748 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2749 bool to_journal = true;
2750 struct f2fs_nat_block *nat_blk;
2751 struct nat_entry *ne, *cur;
2752 struct page *page = NULL;
2753
2754 /*
2755 * there are two steps to flush nat entries:
2756 * #1, flush nat entries to journal in current hot data summary block.
2757 * #2, flush nat entries to nat page.
2758 */
2759 if (enabled_nat_bits(sbi, cpc) ||
2760 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2761 to_journal = false;
2762
2763 if (to_journal) {
2764 down_write(&curseg->journal_rwsem);
2765 } else {
2766 page = get_next_nat_page(sbi, start_nid);
2767 nat_blk = page_address(page);
2768 f2fs_bug_on(sbi, !nat_blk);
2769 }
2770
2771 /* flush dirty nats in nat entry set */
2772 list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2773 struct f2fs_nat_entry *raw_ne;
2774 nid_t nid = nat_get_nid(ne);
2775 int offset;
2776
2777 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
2778
2779 if (to_journal) {
2780 offset = f2fs_lookup_journal_in_cursum(journal,
2781 NAT_JOURNAL, nid, 1);
2782 f2fs_bug_on(sbi, offset < 0);
2783 raw_ne = &nat_in_journal(journal, offset);
2784 nid_in_journal(journal, offset) = cpu_to_le32(nid);
2785 } else {
2786 raw_ne = &nat_blk->entries[nid - start_nid];
2787 }
2788 raw_nat_from_node_info(raw_ne, &ne->ni);
2789 nat_reset_flag(ne);
2790 __clear_nat_cache_dirty(NM_I(sbi), set, ne);
2791 if (nat_get_blkaddr(ne) == NULL_ADDR) {
2792 add_free_nid(sbi, nid, false, true);
2793 } else {
2794 spin_lock(&NM_I(sbi)->nid_list_lock);
2795 update_free_nid_bitmap(sbi, nid, false, false);
2796 spin_unlock(&NM_I(sbi)->nid_list_lock);
2797 }
2798 }
2799
2800 if (to_journal) {
2801 up_write(&curseg->journal_rwsem);
2802 } else {
2803 __update_nat_bits(sbi, start_nid, page);
2804 f2fs_put_page(page, 1);
2805 }
2806
2807 /* Allow dirty nats by node block allocation in write_begin */
2808 if (!set->entry_cnt) {
2809 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2810 kmem_cache_free(nat_entry_set_slab, set);
2811 }
2812 }
2813
2814 /*
2815 * This function is called during the checkpointing process.
2816 */
f2fs_flush_nat_entries(struct f2fs_sb_info * sbi,struct cp_control * cpc)2817 void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2818 {
2819 struct f2fs_nm_info *nm_i = NM_I(sbi);
2820 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2821 struct f2fs_journal *journal = curseg->journal;
2822 struct nat_entry_set *setvec[SETVEC_SIZE];
2823 struct nat_entry_set *set, *tmp;
2824 unsigned int found;
2825 nid_t set_idx = 0;
2826 LIST_HEAD(sets);
2827
2828 /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
2829 if (enabled_nat_bits(sbi, cpc)) {
2830 down_write(&nm_i->nat_tree_lock);
2831 remove_nats_in_journal(sbi);
2832 up_write(&nm_i->nat_tree_lock);
2833 }
2834
2835 if (!nm_i->dirty_nat_cnt)
2836 return;
2837
2838 down_write(&nm_i->nat_tree_lock);
2839
2840 /*
2841 * if there are no enough space in journal to store dirty nat
2842 * entries, remove all entries from journal and merge them
2843 * into nat entry set.
2844 */
2845 if (enabled_nat_bits(sbi, cpc) ||
2846 !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
2847 remove_nats_in_journal(sbi);
2848
2849 while ((found = __gang_lookup_nat_set(nm_i,
2850 set_idx, SETVEC_SIZE, setvec))) {
2851 unsigned idx;
2852 set_idx = setvec[found - 1]->set + 1;
2853 for (idx = 0; idx < found; idx++)
2854 __adjust_nat_entry_set(setvec[idx], &sets,
2855 MAX_NAT_JENTRIES(journal));
2856 }
2857
2858 /* flush dirty nats in nat entry set */
2859 list_for_each_entry_safe(set, tmp, &sets, set_list)
2860 __flush_nat_entry_set(sbi, set, cpc);
2861
2862 up_write(&nm_i->nat_tree_lock);
2863 /* Allow dirty nats by node block allocation in write_begin */
2864 }
2865
__get_nat_bitmaps(struct f2fs_sb_info * sbi)2866 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
2867 {
2868 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2869 struct f2fs_nm_info *nm_i = NM_I(sbi);
2870 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
2871 unsigned int i;
2872 __u64 cp_ver = cur_cp_version(ckpt);
2873 block_t nat_bits_addr;
2874
2875 if (!enabled_nat_bits(sbi, NULL))
2876 return 0;
2877
2878 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
2879 nm_i->nat_bits = f2fs_kzalloc(sbi,
2880 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
2881 if (!nm_i->nat_bits)
2882 return -ENOMEM;
2883
2884 nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
2885 nm_i->nat_bits_blocks;
2886 for (i = 0; i < nm_i->nat_bits_blocks; i++) {
2887 struct page *page;
2888
2889 page = f2fs_get_meta_page(sbi, nat_bits_addr++);
2890 if (IS_ERR(page)) {
2891 disable_nat_bits(sbi, true);
2892 return PTR_ERR(page);
2893 }
2894
2895 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
2896 page_address(page), F2FS_BLKSIZE);
2897 f2fs_put_page(page, 1);
2898 }
2899
2900 cp_ver |= (cur_cp_crc(ckpt) << 32);
2901 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
2902 disable_nat_bits(sbi, true);
2903 return 0;
2904 }
2905
2906 nm_i->full_nat_bits = nm_i->nat_bits + 8;
2907 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
2908
2909 f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint");
2910 return 0;
2911 }
2912
load_free_nid_bitmap(struct f2fs_sb_info * sbi)2913 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
2914 {
2915 struct f2fs_nm_info *nm_i = NM_I(sbi);
2916 unsigned int i = 0;
2917 nid_t nid, last_nid;
2918
2919 if (!enabled_nat_bits(sbi, NULL))
2920 return;
2921
2922 for (i = 0; i < nm_i->nat_blocks; i++) {
2923 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
2924 if (i >= nm_i->nat_blocks)
2925 break;
2926
2927 __set_bit_le(i, nm_i->nat_block_bitmap);
2928
2929 nid = i * NAT_ENTRY_PER_BLOCK;
2930 last_nid = nid + NAT_ENTRY_PER_BLOCK;
2931
2932 spin_lock(&NM_I(sbi)->nid_list_lock);
2933 for (; nid < last_nid; nid++)
2934 update_free_nid_bitmap(sbi, nid, true, true);
2935 spin_unlock(&NM_I(sbi)->nid_list_lock);
2936 }
2937
2938 for (i = 0; i < nm_i->nat_blocks; i++) {
2939 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
2940 if (i >= nm_i->nat_blocks)
2941 break;
2942
2943 __set_bit_le(i, nm_i->nat_block_bitmap);
2944 }
2945 }
2946
init_node_manager(struct f2fs_sb_info * sbi)2947 static int init_node_manager(struct f2fs_sb_info *sbi)
2948 {
2949 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
2950 struct f2fs_nm_info *nm_i = NM_I(sbi);
2951 unsigned char *version_bitmap;
2952 unsigned int nat_segs;
2953 int err;
2954
2955 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
2956
2957 /* segment_count_nat includes pair segment so divide to 2. */
2958 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
2959 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
2960 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
2961
2962 /* not used nids: 0, node, meta, (and root counted as valid node) */
2963 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
2964 sbi->nquota_files - F2FS_RESERVED_NODE_NUM;
2965 nm_i->nid_cnt[FREE_NID] = 0;
2966 nm_i->nid_cnt[PREALLOC_NID] = 0;
2967 nm_i->nat_cnt = 0;
2968 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
2969 nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
2970 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
2971
2972 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
2973 INIT_LIST_HEAD(&nm_i->free_nid_list);
2974 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
2975 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
2976 INIT_LIST_HEAD(&nm_i->nat_entries);
2977 spin_lock_init(&nm_i->nat_list_lock);
2978
2979 mutex_init(&nm_i->build_lock);
2980 spin_lock_init(&nm_i->nid_list_lock);
2981 init_rwsem(&nm_i->nat_tree_lock);
2982
2983 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
2984 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
2985 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
2986 if (!version_bitmap)
2987 return -EFAULT;
2988
2989 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
2990 GFP_KERNEL);
2991 if (!nm_i->nat_bitmap)
2992 return -ENOMEM;
2993
2994 err = __get_nat_bitmaps(sbi);
2995 if (err)
2996 return err;
2997
2998 #ifdef CONFIG_F2FS_CHECK_FS
2999 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3000 GFP_KERNEL);
3001 if (!nm_i->nat_bitmap_mir)
3002 return -ENOMEM;
3003 #endif
3004
3005 return 0;
3006 }
3007
init_free_nid_cache(struct f2fs_sb_info * sbi)3008 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3009 {
3010 struct f2fs_nm_info *nm_i = NM_I(sbi);
3011 int i;
3012
3013 nm_i->free_nid_bitmap =
3014 f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *),
3015 nm_i->nat_blocks),
3016 GFP_KERNEL);
3017 if (!nm_i->free_nid_bitmap)
3018 return -ENOMEM;
3019
3020 for (i = 0; i < nm_i->nat_blocks; i++) {
3021 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3022 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3023 if (!nm_i->free_nid_bitmap[i])
3024 return -ENOMEM;
3025 }
3026
3027 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3028 GFP_KERNEL);
3029 if (!nm_i->nat_block_bitmap)
3030 return -ENOMEM;
3031
3032 nm_i->free_nid_count =
3033 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3034 nm_i->nat_blocks),
3035 GFP_KERNEL);
3036 if (!nm_i->free_nid_count)
3037 return -ENOMEM;
3038 return 0;
3039 }
3040
f2fs_build_node_manager(struct f2fs_sb_info * sbi)3041 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3042 {
3043 int err;
3044
3045 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3046 GFP_KERNEL);
3047 if (!sbi->nm_info)
3048 return -ENOMEM;
3049
3050 err = init_node_manager(sbi);
3051 if (err)
3052 return err;
3053
3054 err = init_free_nid_cache(sbi);
3055 if (err)
3056 return err;
3057
3058 /* load free nid status from nat_bits table */
3059 load_free_nid_bitmap(sbi);
3060
3061 return f2fs_build_free_nids(sbi, true, true);
3062 }
3063
f2fs_destroy_node_manager(struct f2fs_sb_info * sbi)3064 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3065 {
3066 struct f2fs_nm_info *nm_i = NM_I(sbi);
3067 struct free_nid *i, *next_i;
3068 struct nat_entry *natvec[NATVEC_SIZE];
3069 struct nat_entry_set *setvec[SETVEC_SIZE];
3070 nid_t nid = 0;
3071 unsigned int found;
3072
3073 if (!nm_i)
3074 return;
3075
3076 /* destroy free nid list */
3077 spin_lock(&nm_i->nid_list_lock);
3078 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3079 __remove_free_nid(sbi, i, FREE_NID);
3080 spin_unlock(&nm_i->nid_list_lock);
3081 kmem_cache_free(free_nid_slab, i);
3082 spin_lock(&nm_i->nid_list_lock);
3083 }
3084 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3085 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3086 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3087 spin_unlock(&nm_i->nid_list_lock);
3088
3089 /* destroy nat cache */
3090 down_write(&nm_i->nat_tree_lock);
3091 while ((found = __gang_lookup_nat_cache(nm_i,
3092 nid, NATVEC_SIZE, natvec))) {
3093 unsigned idx;
3094
3095 nid = nat_get_nid(natvec[found - 1]) + 1;
3096 for (idx = 0; idx < found; idx++) {
3097 spin_lock(&nm_i->nat_list_lock);
3098 list_del(&natvec[idx]->list);
3099 spin_unlock(&nm_i->nat_list_lock);
3100
3101 __del_from_nat_cache(nm_i, natvec[idx]);
3102 }
3103 }
3104 f2fs_bug_on(sbi, nm_i->nat_cnt);
3105
3106 /* destroy nat set cache */
3107 nid = 0;
3108 while ((found = __gang_lookup_nat_set(nm_i,
3109 nid, SETVEC_SIZE, setvec))) {
3110 unsigned idx;
3111
3112 nid = setvec[found - 1]->set + 1;
3113 for (idx = 0; idx < found; idx++) {
3114 /* entry_cnt is not zero, when cp_error was occurred */
3115 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3116 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3117 kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3118 }
3119 }
3120 up_write(&nm_i->nat_tree_lock);
3121
3122 kvfree(nm_i->nat_block_bitmap);
3123 if (nm_i->free_nid_bitmap) {
3124 int i;
3125
3126 for (i = 0; i < nm_i->nat_blocks; i++)
3127 kvfree(nm_i->free_nid_bitmap[i]);
3128 kfree(nm_i->free_nid_bitmap);
3129 }
3130 kvfree(nm_i->free_nid_count);
3131
3132 kfree(nm_i->nat_bitmap);
3133 kfree(nm_i->nat_bits);
3134 #ifdef CONFIG_F2FS_CHECK_FS
3135 kfree(nm_i->nat_bitmap_mir);
3136 #endif
3137 sbi->nm_info = NULL;
3138 kfree(nm_i);
3139 }
3140
f2fs_create_node_manager_caches(void)3141 int __init f2fs_create_node_manager_caches(void)
3142 {
3143 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
3144 sizeof(struct nat_entry));
3145 if (!nat_entry_slab)
3146 goto fail;
3147
3148 free_nid_slab = f2fs_kmem_cache_create("free_nid",
3149 sizeof(struct free_nid));
3150 if (!free_nid_slab)
3151 goto destroy_nat_entry;
3152
3153 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
3154 sizeof(struct nat_entry_set));
3155 if (!nat_entry_set_slab)
3156 goto destroy_free_nid;
3157
3158 fsync_node_entry_slab = f2fs_kmem_cache_create("fsync_node_entry",
3159 sizeof(struct fsync_node_entry));
3160 if (!fsync_node_entry_slab)
3161 goto destroy_nat_entry_set;
3162 return 0;
3163
3164 destroy_nat_entry_set:
3165 kmem_cache_destroy(nat_entry_set_slab);
3166 destroy_free_nid:
3167 kmem_cache_destroy(free_nid_slab);
3168 destroy_nat_entry:
3169 kmem_cache_destroy(nat_entry_slab);
3170 fail:
3171 return -ENOMEM;
3172 }
3173
f2fs_destroy_node_manager_caches(void)3174 void f2fs_destroy_node_manager_caches(void)
3175 {
3176 kmem_cache_destroy(fsync_node_entry_slab);
3177 kmem_cache_destroy(nat_entry_set_slab);
3178 kmem_cache_destroy(free_nid_slab);
3179 kmem_cache_destroy(nat_entry_slab);
3180 }
3181