f2fs: merge flags in struct f2fs_sb_info
Currently, there are several variables with Boolean type as below: struct f2fs_sb_info { ... int s_dirty; bool need_fsck; bool s_closing; ... bool por_doing; ... } For this there are some issues: 1. there are some space of f2fs_sb_info is wasted due to aligning after Boolean type variables by compiler. 2. if we continuously add new flag into f2fs_sb_info, structure will be messed up. So in this patch, we try to: 1. switch s_dirty to Boolean type variable since it has two status 0/1. 2. merge s_dirty/need_fsck/s_closing/por_doing variables into s_flag. 3. introduce an enum type which can indicate different states of sbi. 4. use new introduced universal interfaces is_sbi_flag_set/{set,clear}_sbi_flag to operate flags for sbi. After that, above issues will be fixed. Signed-off-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
88dd893419
commit
caf0047e7e
8 changed files with 46 additions and 35 deletions
|
@ -190,7 +190,7 @@ static int f2fs_write_meta_page(struct page *page,
|
||||||
|
|
||||||
trace_f2fs_writepage(page, META);
|
trace_f2fs_writepage(page, META);
|
||||||
|
|
||||||
if (unlikely(sbi->por_doing))
|
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
|
||||||
goto redirty_out;
|
goto redirty_out;
|
||||||
if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
|
if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
|
||||||
goto redirty_out;
|
goto redirty_out;
|
||||||
|
@ -485,7 +485,7 @@ void recover_orphan_inodes(struct f2fs_sb_info *sbi)
|
||||||
if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
|
if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
sbi->por_doing = true;
|
set_sbi_flag(sbi, SBI_POR_DOING);
|
||||||
|
|
||||||
start_blk = __start_cp_addr(sbi) + 1 +
|
start_blk = __start_cp_addr(sbi) + 1 +
|
||||||
le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
|
le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
|
||||||
|
@ -506,7 +506,7 @@ void recover_orphan_inodes(struct f2fs_sb_info *sbi)
|
||||||
}
|
}
|
||||||
/* clear Orphan Flag */
|
/* clear Orphan Flag */
|
||||||
clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
|
clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
|
||||||
sbi->por_doing = false;
|
clear_sbi_flag(sbi, SBI_POR_DOING);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -973,7 +973,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||||
else
|
else
|
||||||
clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
|
clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
|
||||||
|
|
||||||
if (sbi->need_fsck)
|
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
|
||||||
set_ckpt_flags(ckpt, CP_FSCK_FLAG);
|
set_ckpt_flags(ckpt, CP_FSCK_FLAG);
|
||||||
|
|
||||||
/* update SIT/NAT bitmap */
|
/* update SIT/NAT bitmap */
|
||||||
|
@ -1047,7 +1047,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
clear_prefree_segments(sbi);
|
clear_prefree_segments(sbi);
|
||||||
F2FS_RESET_SB_DIRT(sbi);
|
clear_sbi_flag(sbi, SBI_IS_DIRTY);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1062,7 +1062,7 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
||||||
|
|
||||||
mutex_lock(&sbi->cp_mutex);
|
mutex_lock(&sbi->cp_mutex);
|
||||||
|
|
||||||
if (!sbi->s_dirty &&
|
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
|
||||||
cpc->reason != CP_DISCARD && cpc->reason != CP_UMOUNT)
|
cpc->reason != CP_DISCARD && cpc->reason != CP_UMOUNT)
|
||||||
goto out;
|
goto out;
|
||||||
if (unlikely(f2fs_cp_error(sbi)))
|
if (unlikely(f2fs_cp_error(sbi)))
|
||||||
|
|
|
@ -818,7 +818,7 @@ static int f2fs_write_data_page(struct page *page,
|
||||||
|
|
||||||
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
|
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
|
||||||
write:
|
write:
|
||||||
if (unlikely(sbi->por_doing))
|
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
|
||||||
goto redirty_out;
|
goto redirty_out;
|
||||||
if (f2fs_is_drop_cache(inode))
|
if (f2fs_is_drop_cache(inode))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
do { \
|
do { \
|
||||||
if (unlikely(condition)) { \
|
if (unlikely(condition)) { \
|
||||||
WARN_ON(1); \
|
WARN_ON(1); \
|
||||||
sbi->need_fsck = true; \
|
set_sbi_flag(sbi, SBI_NEED_FSCK); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
#define f2fs_down_write(x, y) down_write(x)
|
#define f2fs_down_write(x, y) down_write(x)
|
||||||
|
@ -519,14 +519,20 @@ struct inode_management {
|
||||||
unsigned long ino_num; /* number of entries */
|
unsigned long ino_num; /* number of entries */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* For s_flag in struct f2fs_sb_info */
|
||||||
|
enum {
|
||||||
|
SBI_IS_DIRTY, /* dirty flag for checkpoint */
|
||||||
|
SBI_IS_CLOSE, /* specify unmounting */
|
||||||
|
SBI_NEED_FSCK, /* need fsck.f2fs to fix */
|
||||||
|
SBI_POR_DOING, /* recovery is doing or not */
|
||||||
|
};
|
||||||
|
|
||||||
struct f2fs_sb_info {
|
struct f2fs_sb_info {
|
||||||
struct super_block *sb; /* pointer to VFS super block */
|
struct super_block *sb; /* pointer to VFS super block */
|
||||||
struct proc_dir_entry *s_proc; /* proc entry */
|
struct proc_dir_entry *s_proc; /* proc entry */
|
||||||
struct buffer_head *raw_super_buf; /* buffer head of raw sb */
|
struct buffer_head *raw_super_buf; /* buffer head of raw sb */
|
||||||
struct f2fs_super_block *raw_super; /* raw super block pointer */
|
struct f2fs_super_block *raw_super; /* raw super block pointer */
|
||||||
int s_dirty; /* dirty flag for checkpoint */
|
int s_flag; /* flags for sbi */
|
||||||
bool need_fsck; /* need fsck.f2fs to fix */
|
|
||||||
bool s_closing; /* specify unmounting */
|
|
||||||
|
|
||||||
/* for node-related operations */
|
/* for node-related operations */
|
||||||
struct f2fs_nm_info *nm_info; /* node manager */
|
struct f2fs_nm_info *nm_info; /* node manager */
|
||||||
|
@ -546,7 +552,6 @@ struct f2fs_sb_info {
|
||||||
struct rw_semaphore cp_rwsem; /* blocking FS operations */
|
struct rw_semaphore cp_rwsem; /* blocking FS operations */
|
||||||
struct rw_semaphore node_write; /* locking node writes */
|
struct rw_semaphore node_write; /* locking node writes */
|
||||||
struct mutex writepages; /* mutex for writepages() */
|
struct mutex writepages; /* mutex for writepages() */
|
||||||
bool por_doing; /* recovery is doing or not */
|
|
||||||
wait_queue_head_t cp_wait;
|
wait_queue_head_t cp_wait;
|
||||||
|
|
||||||
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
|
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
|
||||||
|
@ -699,14 +704,19 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
|
||||||
return sbi->node_inode->i_mapping;
|
return sbi->node_inode->i_mapping;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi)
|
static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
|
||||||
{
|
{
|
||||||
sbi->s_dirty = 1;
|
return sbi->s_flag & (0x01 << type);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
|
static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
|
||||||
{
|
{
|
||||||
sbi->s_dirty = 0;
|
sbi->s_flag |= (0x01 << type);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
|
||||||
|
{
|
||||||
|
sbi->s_flag &= ~(0x01 << type);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
|
static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
|
||||||
|
@ -818,7 +828,7 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
|
||||||
static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
|
static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
|
||||||
{
|
{
|
||||||
atomic_inc(&sbi->nr_pages[count_type]);
|
atomic_inc(&sbi->nr_pages[count_type]);
|
||||||
F2FS_SET_SB_DIRT(sbi);
|
set_sbi_flag(sbi, SBI_IS_DIRTY);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void inode_inc_dirty_pages(struct inode *inode)
|
static inline void inode_inc_dirty_pages(struct inode *inode)
|
||||||
|
|
|
@ -588,7 +588,7 @@ static void truncate_node(struct dnode_of_data *dn)
|
||||||
}
|
}
|
||||||
invalidate:
|
invalidate:
|
||||||
clear_node_page_dirty(dn->node_page);
|
clear_node_page_dirty(dn->node_page);
|
||||||
F2FS_SET_SB_DIRT(sbi);
|
set_sbi_flag(sbi, SBI_IS_DIRTY);
|
||||||
|
|
||||||
f2fs_put_page(dn->node_page, 1);
|
f2fs_put_page(dn->node_page, 1);
|
||||||
|
|
||||||
|
@ -1284,7 +1284,7 @@ static int f2fs_write_node_page(struct page *page,
|
||||||
|
|
||||||
trace_f2fs_writepage(page, NODE);
|
trace_f2fs_writepage(page, NODE);
|
||||||
|
|
||||||
if (unlikely(sbi->por_doing))
|
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
|
||||||
goto redirty_out;
|
goto redirty_out;
|
||||||
if (unlikely(f2fs_cp_error(sbi)))
|
if (unlikely(f2fs_cp_error(sbi)))
|
||||||
goto redirty_out;
|
goto redirty_out;
|
||||||
|
|
|
@ -508,7 +508,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
|
||||||
INIT_LIST_HEAD(&inode_list);
|
INIT_LIST_HEAD(&inode_list);
|
||||||
|
|
||||||
/* step #1: find fsynced inode numbers */
|
/* step #1: find fsynced inode numbers */
|
||||||
sbi->por_doing = true;
|
set_sbi_flag(sbi, SBI_POR_DOING);
|
||||||
|
|
||||||
/* prevent checkpoint */
|
/* prevent checkpoint */
|
||||||
mutex_lock(&sbi->cp_mutex);
|
mutex_lock(&sbi->cp_mutex);
|
||||||
|
@ -541,7 +541,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
|
||||||
truncate_inode_pages_final(META_MAPPING(sbi));
|
truncate_inode_pages_final(META_MAPPING(sbi));
|
||||||
}
|
}
|
||||||
|
|
||||||
sbi->por_doing = false;
|
clear_sbi_flag(sbi, SBI_POR_DOING);
|
||||||
if (err) {
|
if (err) {
|
||||||
discard_next_dnode(sbi, blkaddr);
|
discard_next_dnode(sbi, blkaddr);
|
||||||
|
|
||||||
|
|
|
@ -460,7 +460,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
|
||||||
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
|
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
|
||||||
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
|
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
|
||||||
|
|
||||||
if (unlikely(sbi->por_doing))
|
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
|
return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
|
||||||
|
@ -599,13 +599,13 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
|
||||||
static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
|
static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
|
||||||
{
|
{
|
||||||
if (segno > TOTAL_SEGS(sbi) - 1)
|
if (segno > TOTAL_SEGS(sbi) - 1)
|
||||||
sbi->need_fsck = true;
|
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
|
static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
|
||||||
{
|
{
|
||||||
if (blk_addr < SEG0_BLKADDR(sbi) || blk_addr >= MAX_BLKADDR(sbi))
|
if (blk_addr < SEG0_BLKADDR(sbi) || blk_addr >= MAX_BLKADDR(sbi))
|
||||||
sbi->need_fsck = true;
|
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -616,11 +616,11 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
|
||||||
{
|
{
|
||||||
/* check segment usage */
|
/* check segment usage */
|
||||||
if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
|
if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
|
||||||
sbi->need_fsck = true;
|
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||||
|
|
||||||
/* check boundary of a given segment number */
|
/* check boundary of a given segment number */
|
||||||
if (segno > TOTAL_SEGS(sbi) - 1)
|
if (segno > TOTAL_SEGS(sbi) - 1)
|
||||||
sbi->need_fsck = true;
|
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -452,7 +452,7 @@ static void f2fs_put_super(struct super_block *sb)
|
||||||
* But, the previous checkpoint was not done by umount, it needs to do
|
* But, the previous checkpoint was not done by umount, it needs to do
|
||||||
* clean checkpoint again.
|
* clean checkpoint again.
|
||||||
*/
|
*/
|
||||||
if (sbi->s_dirty ||
|
if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
|
||||||
!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
|
!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
|
||||||
struct cp_control cpc = {
|
struct cp_control cpc = {
|
||||||
.reason = CP_UMOUNT,
|
.reason = CP_UMOUNT,
|
||||||
|
@ -492,7 +492,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
|
||||||
if (sync) {
|
if (sync) {
|
||||||
struct cp_control cpc;
|
struct cp_control cpc;
|
||||||
|
|
||||||
cpc.reason = (test_opt(sbi, FASTBOOT) || sbi->s_closing) ?
|
cpc.reason = (test_opt(sbi, FASTBOOT) ||
|
||||||
|
is_sbi_flag_set(sbi, SBI_IS_CLOSE)) ?
|
||||||
CP_UMOUNT : CP_SYNC;
|
CP_UMOUNT : CP_SYNC;
|
||||||
mutex_lock(&sbi->gc_mutex);
|
mutex_lock(&sbi->gc_mutex);
|
||||||
write_checkpoint(sbi, &cpc);
|
write_checkpoint(sbi, &cpc);
|
||||||
|
@ -895,7 +896,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
|
||||||
atomic_set(&sbi->nr_pages[i], 0);
|
atomic_set(&sbi->nr_pages[i], 0);
|
||||||
|
|
||||||
sbi->dir_level = DEF_DIR_LEVEL;
|
sbi->dir_level = DEF_DIR_LEVEL;
|
||||||
sbi->need_fsck = false;
|
clear_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1006,7 +1007,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
mutex_init(&sbi->writepages);
|
mutex_init(&sbi->writepages);
|
||||||
mutex_init(&sbi->cp_mutex);
|
mutex_init(&sbi->cp_mutex);
|
||||||
init_rwsem(&sbi->node_write);
|
init_rwsem(&sbi->node_write);
|
||||||
sbi->por_doing = false;
|
clear_sbi_flag(sbi, SBI_POR_DOING);
|
||||||
spin_lock_init(&sbi->stat_lock);
|
spin_lock_init(&sbi->stat_lock);
|
||||||
|
|
||||||
init_rwsem(&sbi->read_io.io_rwsem);
|
init_rwsem(&sbi->read_io.io_rwsem);
|
||||||
|
@ -1130,7 +1131,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
goto free_proc;
|
goto free_proc;
|
||||||
|
|
||||||
if (!retry)
|
if (!retry)
|
||||||
sbi->need_fsck = true;
|
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||||
|
|
||||||
/* recover fsynced data */
|
/* recover fsynced data */
|
||||||
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
|
if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) {
|
||||||
|
@ -1199,7 +1200,7 @@ static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
|
||||||
static void kill_f2fs_super(struct super_block *sb)
|
static void kill_f2fs_super(struct super_block *sb)
|
||||||
{
|
{
|
||||||
if (sb->s_root)
|
if (sb->s_root)
|
||||||
F2FS_SB(sb)->s_closing = true;
|
set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
|
||||||
kill_block_super(sb);
|
kill_block_super(sb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -184,13 +184,13 @@ TRACE_EVENT(f2fs_sync_fs,
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(dev_t, dev)
|
__field(dev_t, dev)
|
||||||
__field(int, dirty)
|
__field(bool, dirty)
|
||||||
__field(int, wait)
|
__field(int, wait)
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->dev = sb->s_dev;
|
__entry->dev = sb->s_dev;
|
||||||
__entry->dirty = F2FS_SB(sb)->s_dirty;
|
__entry->dirty = is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY);
|
||||||
__entry->wait = wait;
|
__entry->wait = wait;
|
||||||
),
|
),
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue