f2fs: don't interrupt free nids building during nid allocation
Let build_free_nids support sync/async methods, in allocation flow of nids, we use synchronuous method, so that we can avoid looping in alloc_nid when free memory is low; in unblock_operations and f2fs_balance_fs_bg we use asynchronuous method in where low memory condition can interrupt us. Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
eb0aa4b807
commit
3a2ad5672b
4 changed files with 13 additions and 15 deletions
|
@ -987,7 +987,7 @@ static void unblock_operations(struct f2fs_sb_info *sbi)
|
|||
{
|
||||
up_write(&sbi->node_write);
|
||||
|
||||
build_free_nids(sbi);
|
||||
build_free_nids(sbi, false);
|
||||
f2fs_unlock_all(sbi);
|
||||
}
|
||||
|
||||
|
|
|
@ -2040,7 +2040,7 @@ void move_node_page(struct page *, int);
|
|||
int fsync_node_pages(struct f2fs_sb_info *, struct inode *,
|
||||
struct writeback_control *, bool);
|
||||
int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *);
|
||||
void build_free_nids(struct f2fs_sb_info *);
|
||||
void build_free_nids(struct f2fs_sb_info *, bool);
|
||||
bool alloc_nid(struct f2fs_sb_info *, nid_t *);
|
||||
void alloc_nid_done(struct f2fs_sb_info *, nid_t);
|
||||
void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
|
||||
|
|
|
@ -1733,9 +1733,6 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
|
|||
struct nat_entry *ne;
|
||||
int err;
|
||||
|
||||
if (!available_free_memory(sbi, FREE_NIDS))
|
||||
return -1;
|
||||
|
||||
/* 0 nid should not be used */
|
||||
if (unlikely(nid == 0))
|
||||
return 0;
|
||||
|
@ -1803,14 +1800,12 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
|
|||
|
||||
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
|
||||
f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
|
||||
if (blk_addr == NULL_ADDR) {
|
||||
if (add_free_nid(sbi, start_nid, true) < 0)
|
||||
break;
|
||||
}
|
||||
if (blk_addr == NULL_ADDR)
|
||||
add_free_nid(sbi, start_nid, true);
|
||||
}
|
||||
}
|
||||
|
||||
void __build_free_nids(struct f2fs_sb_info *sbi)
|
||||
void __build_free_nids(struct f2fs_sb_info *sbi, bool sync)
|
||||
{
|
||||
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
||||
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
|
||||
|
@ -1822,6 +1817,9 @@ void __build_free_nids(struct f2fs_sb_info *sbi)
|
|||
if (nm_i->nid_cnt[FREE_NID_LIST] >= NAT_ENTRY_PER_BLOCK)
|
||||
return;
|
||||
|
||||
if (!sync && !available_free_memory(sbi, FREE_NIDS))
|
||||
return;
|
||||
|
||||
/* readahead nat pages to be scanned */
|
||||
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
|
||||
META_NAT, true);
|
||||
|
@ -1864,10 +1862,10 @@ void __build_free_nids(struct f2fs_sb_info *sbi)
|
|||
nm_i->ra_nid_pages, META_NAT, false);
|
||||
}
|
||||
|
||||
void build_free_nids(struct f2fs_sb_info *sbi)
|
||||
void build_free_nids(struct f2fs_sb_info *sbi, bool sync)
|
||||
{
|
||||
mutex_lock(&NM_I(sbi)->build_lock);
|
||||
__build_free_nids(sbi);
|
||||
__build_free_nids(sbi, sync);
|
||||
mutex_unlock(&NM_I(sbi)->build_lock);
|
||||
}
|
||||
|
||||
|
@ -1906,7 +1904,7 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
|
|||
spin_unlock(&nm_i->nid_list_lock);
|
||||
|
||||
/* Let's scan nat pages and its caches to get free nids */
|
||||
build_free_nids(sbi);
|
||||
build_free_nids(sbi, true);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
|
@ -2343,7 +2341,7 @@ int build_node_manager(struct f2fs_sb_info *sbi)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
build_free_nids(sbi);
|
||||
build_free_nids(sbi, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -382,7 +382,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
|
|||
if (!available_free_memory(sbi, FREE_NIDS))
|
||||
try_to_free_nids(sbi, MAX_FREE_NIDS);
|
||||
else
|
||||
build_free_nids(sbi);
|
||||
build_free_nids(sbi, false);
|
||||
|
||||
/* checkpoint is the only way to shrink partial cached entries */
|
||||
if (!available_free_memory(sbi, NAT_ENTRIES) ||
|
||||
|
|
Loading…
Reference in a new issue