f2fs: introduce ra_meta_pages to readahead CP/NAT/SIT pages

This patch help us to cleanup the readahead code by merging ra_{sit,nat}_pages
function into ra_meta_pages.
Additionally the new function is used to readahead cp block in
recover_orphan_inodes.

Change log from v1:
 o fix a deadloop bug pointed by Jaegeuk Kim.

Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
This commit is contained in:
Chao Yu 2014-02-07 16:11:53 +08:00 committed by Jaegeuk Kim
parent 3375f696bd
commit 662befda25
4 changed files with 90 additions and 79 deletions

View file

@ -75,6 +75,82 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
return page; return page;
} }
inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
{
switch (type) {
case META_NAT:
return NM_I(sbi)->max_nid / NAT_ENTRY_PER_BLOCK;
case META_SIT:
return SIT_BLK_CNT(sbi);
case META_CP:
return 0;
default:
BUG();
}
}
/*
* Readahead CP/NAT/SIT pages
*/
int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type)
{
block_t prev_blk_addr = 0;
struct page *page;
int blkno = start;
int max_blks = get_max_meta_blks(sbi, type);
struct f2fs_io_info fio = {
.type = META,
.rw = READ_SYNC | REQ_META | REQ_PRIO
};
for (; nrpages-- > 0; blkno++) {
block_t blk_addr;
switch (type) {
case META_NAT:
/* get nat block addr */
if (unlikely(blkno >= max_blks))
blkno = 0;
blk_addr = current_nat_addr(sbi,
blkno * NAT_ENTRY_PER_BLOCK);
break;
case META_SIT:
/* get sit block addr */
if (unlikely(blkno >= max_blks))
goto out;
blk_addr = current_sit_addr(sbi,
blkno * SIT_ENTRY_PER_BLOCK);
if (blkno != start && prev_blk_addr + 1 != blk_addr)
goto out;
prev_blk_addr = blk_addr;
break;
case META_CP:
/* get cp block addr */
blk_addr = blkno;
break;
default:
BUG();
}
page = grab_cache_page(META_MAPPING(sbi), blk_addr);
if (!page)
continue;
if (PageUptodate(page)) {
mark_page_accessed(page);
f2fs_put_page(page, 1);
continue;
}
f2fs_submit_page_mbio(sbi, page, blk_addr, &fio);
mark_page_accessed(page);
f2fs_put_page(page, 0);
}
out:
f2fs_submit_merged_bio(sbi, META, READ);
return blkno - start;
}
static int f2fs_write_meta_page(struct page *page, static int f2fs_write_meta_page(struct page *page,
struct writeback_control *wbc) struct writeback_control *wbc)
{ {
@ -298,6 +374,8 @@ void recover_orphan_inodes(struct f2fs_sb_info *sbi)
start_blk = __start_cp_addr(sbi) + 1; start_blk = __start_cp_addr(sbi) + 1;
orphan_blkaddr = __start_sum_addr(sbi) - 1; orphan_blkaddr = __start_sum_addr(sbi) - 1;
ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP);
for (i = 0; i < orphan_blkaddr; i++) { for (i = 0; i < orphan_blkaddr; i++) {
struct page *page = get_meta_page(sbi, start_blk + i); struct page *page = get_meta_page(sbi, start_blk + i);
struct f2fs_orphan_block *orphan_blk; struct f2fs_orphan_block *orphan_blk;

View file

@ -88,6 +88,15 @@ enum {
SIT_BITMAP SIT_BITMAP
}; };
/*
* For CP/NAT/SIT readahead
*/
enum {
META_CP,
META_NAT,
META_SIT
};
/* for the list of orphan inodes */ /* for the list of orphan inodes */
struct orphan_inode_entry { struct orphan_inode_entry {
struct list_head list; /* list head */ struct list_head list; /* list head */
@ -1176,6 +1185,7 @@ void destroy_segment_manager_caches(void);
*/ */
struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
int ra_meta_pages(struct f2fs_sb_info *, int, int, int);
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
int acquire_orphan_inode(struct f2fs_sb_info *); int acquire_orphan_inode(struct f2fs_sb_info *);
void release_orphan_inode(struct f2fs_sb_info *); void release_orphan_inode(struct f2fs_sb_info *);

View file

@ -82,42 +82,6 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
return dst_page; return dst_page;
} }
/*
* Readahead NAT pages
*/
static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
{
struct address_space *mapping = META_MAPPING(sbi);
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct page *page;
pgoff_t index;
int i;
struct f2fs_io_info fio = {
.type = META,
.rw = READ_SYNC | REQ_META | REQ_PRIO
};
for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
if (unlikely(nid >= nm_i->max_nid))
nid = 0;
index = current_nat_addr(sbi, nid);
page = grab_cache_page(mapping, index);
if (!page)
continue;
if (PageUptodate(page)) {
mark_page_accessed(page);
f2fs_put_page(page, 1);
continue;
}
f2fs_submit_page_mbio(sbi, page, index, &fio);
mark_page_accessed(page);
f2fs_put_page(page, 0);
}
f2fs_submit_merged_bio(sbi, META, READ);
}
static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
{ {
return radix_tree_lookup(&nm_i->nat_root, n); return radix_tree_lookup(&nm_i->nat_root, n);
@ -1413,7 +1377,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
return; return;
/* readahead nat pages to be scanned */ /* readahead nat pages to be scanned */
ra_nat_pages(sbi, nid); ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT);
while (1) { while (1) {
struct page *page = get_current_nat_page(sbi, nid); struct page *page = get_current_nat_page(sbi, nid);

View file

@ -1576,47 +1576,6 @@ static int build_curseg(struct f2fs_sb_info *sbi)
return restore_curseg_summaries(sbi); return restore_curseg_summaries(sbi);
} }
static int ra_sit_pages(struct f2fs_sb_info *sbi, int start, int nrpages)
{
struct address_space *mapping = META_MAPPING(sbi);
struct page *page;
block_t blk_addr, prev_blk_addr = 0;
int sit_blk_cnt = SIT_BLK_CNT(sbi);
int blkno = start;
struct f2fs_io_info fio = {
.type = META,
.rw = READ_SYNC | REQ_META | REQ_PRIO
};
for (; blkno < start + nrpages && blkno < sit_blk_cnt; blkno++) {
blk_addr = current_sit_addr(sbi, blkno * SIT_ENTRY_PER_BLOCK);
if (blkno != start && prev_blk_addr + 1 != blk_addr)
break;
prev_blk_addr = blk_addr;
repeat:
page = grab_cache_page(mapping, blk_addr);
if (!page) {
cond_resched();
goto repeat;
}
if (PageUptodate(page)) {
mark_page_accessed(page);
f2fs_put_page(page, 1);
continue;
}
f2fs_submit_page_mbio(sbi, page, blk_addr, &fio);
mark_page_accessed(page);
f2fs_put_page(page, 0);
}
f2fs_submit_merged_bio(sbi, META, READ);
return blkno - start;
}
static void build_sit_entries(struct f2fs_sb_info *sbi) static void build_sit_entries(struct f2fs_sb_info *sbi)
{ {
struct sit_info *sit_i = SIT_I(sbi); struct sit_info *sit_i = SIT_I(sbi);
@ -1628,7 +1587,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
do { do {
readed = ra_sit_pages(sbi, start_blk, nrpages); readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT);
start = start_blk * sit_i->sents_per_block; start = start_blk * sit_i->sents_per_block;
end = (start_blk + readed) * sit_i->sents_per_block; end = (start_blk + readed) * sit_i->sents_per_block;