2012-11-28 21:28:09 -07:00
|
|
|
/*
|
2012-11-02 15:50:41 -06:00
|
|
|
* f2fs debugging statistics
|
|
|
|
*
|
|
|
|
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
|
|
|
|
* http://www.samsung.com/
|
|
|
|
* Copyright (c) 2012 Linux Foundation
|
|
|
|
* Copyright (c) 2012 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/backing-dev.h>
|
|
|
|
#include <linux/f2fs_fs.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
|
|
|
|
#include "f2fs.h"
|
|
|
|
#include "node.h"
|
|
|
|
#include "segment.h"
|
|
|
|
#include "gc.h"
|
|
|
|
|
|
|
|
static LIST_HEAD(f2fs_stat_list);
|
2013-12-03 06:09:29 -07:00
|
|
|
static struct dentry *f2fs_debugfs_root;
|
2013-01-14 05:08:16 -07:00
|
|
|
static DEFINE_MUTEX(f2fs_stat_mutex);
|
2012-11-02 15:50:41 -06:00
|
|
|
|
2012-11-28 00:12:41 -07:00
|
|
|
static void update_general_status(struct f2fs_sb_info *sbi)
|
2012-11-02 15:50:41 -06:00
|
|
|
{
|
2013-07-12 00:47:11 -06:00
|
|
|
struct f2fs_stat_info *si = F2FS_STAT(sbi);
|
2012-11-02 15:50:41 -06:00
|
|
|
int i;
|
|
|
|
|
2014-08-06 08:22:50 -06:00
|
|
|
/* validation check of the segment numbers */
|
2015-09-30 03:38:48 -06:00
|
|
|
si->hit_largest = atomic64_read(&sbi->read_hit_largest);
|
|
|
|
si->hit_cached = atomic64_read(&sbi->read_hit_cached);
|
|
|
|
si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
|
2015-08-19 05:13:25 -06:00
|
|
|
si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
|
2015-09-30 03:38:48 -06:00
|
|
|
si->total_ext = atomic64_read(&sbi->total_hit_ext);
|
2015-12-21 20:20:15 -07:00
|
|
|
si->ext_tree = atomic_read(&sbi->total_ext_tree);
|
2015-12-31 16:24:14 -07:00
|
|
|
si->zombie_tree = atomic_read(&sbi->total_zombie_tree);
|
2015-02-05 02:58:28 -07:00
|
|
|
si->ext_node = atomic_read(&sbi->total_ext_node);
|
2012-11-02 15:50:41 -06:00
|
|
|
si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
|
|
|
|
si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
|
|
|
|
si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
|
2015-12-17 02:14:44 -07:00
|
|
|
si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
|
2016-09-18 09:30:08 -06:00
|
|
|
si->ndirty_imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
|
2015-12-17 02:14:44 -07:00
|
|
|
si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
|
|
|
|
si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
|
2016-05-20 12:10:10 -06:00
|
|
|
si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
|
2014-12-05 18:18:15 -07:00
|
|
|
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
|
2016-05-17 17:23:36 -06:00
|
|
|
si->wb_bios = atomic_read(&sbi->nr_wb_bios);
|
2012-11-02 15:50:41 -06:00
|
|
|
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
|
|
|
|
si->rsvd_segs = reserved_segments(sbi);
|
|
|
|
si->overp_segs = overprovision_segments(sbi);
|
|
|
|
si->valid_count = valid_user_blocks(sbi);
|
2016-08-18 07:01:18 -06:00
|
|
|
si->discard_blks = discard_blocks(sbi);
|
2012-11-02 15:50:41 -06:00
|
|
|
si->valid_node_count = valid_node_count(sbi);
|
|
|
|
si->valid_inode_count = valid_inode_count(sbi);
|
2015-07-15 03:28:53 -06:00
|
|
|
si->inline_xattr = atomic_read(&sbi->inline_xattr);
|
2014-12-08 04:08:20 -07:00
|
|
|
si->inline_inode = atomic_read(&sbi->inline_inode);
|
|
|
|
si->inline_dir = atomic_read(&sbi->inline_dir);
|
2016-05-10 20:13:50 -06:00
|
|
|
si->orphans = sbi->im[ORPHAN_INO].ino_num;
|
2012-11-02 15:50:41 -06:00
|
|
|
si->utilization = utilization(sbi);
|
|
|
|
|
|
|
|
si->free_segs = free_segments(sbi);
|
|
|
|
si->free_secs = free_sections(sbi);
|
|
|
|
si->prefree_count = prefree_segments(sbi);
|
|
|
|
si->dirty_count = dirty_segments(sbi);
|
2014-01-21 02:51:16 -07:00
|
|
|
si->node_pages = NODE_MAPPING(sbi)->nrpages;
|
2014-01-20 03:37:04 -07:00
|
|
|
si->meta_pages = META_MAPPING(sbi)->nrpages;
|
2012-11-02 15:50:41 -06:00
|
|
|
si->nats = NM_I(sbi)->nat_cnt;
|
2015-01-07 12:09:37 -07:00
|
|
|
si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
|
|
|
|
si->sits = MAIN_SEGS(sbi);
|
|
|
|
si->dirty_sits = SIT_I(sbi)->dirty_sentries;
|
2012-11-02 15:50:41 -06:00
|
|
|
si->fnids = NM_I(sbi)->fcnt;
|
|
|
|
si->bg_gc = sbi->bg_gc;
|
|
|
|
si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg)
|
|
|
|
* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
|
|
|
|
/ 2;
|
|
|
|
si->util_valid = (int)(written_block_count(sbi) >>
|
|
|
|
sbi->log_blocks_per_seg)
|
|
|
|
* 100 / (int)(sbi->user_block_count >> sbi->log_blocks_per_seg)
|
|
|
|
/ 2;
|
|
|
|
si->util_invalid = 50 - si->util_free - si->util_valid;
|
|
|
|
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_NODE; i++) {
|
|
|
|
struct curseg_info *curseg = CURSEG_I(sbi, i);
|
|
|
|
si->curseg[i] = curseg->segno;
|
|
|
|
si->cursec[i] = curseg->segno / sbi->segs_per_sec;
|
|
|
|
si->curzone[i] = si->cursec[i] / sbi->secs_per_zone;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
si->segment_count[i] = sbi->segment_count[i];
|
|
|
|
si->block_count[i] = sbi->block_count[i];
|
|
|
|
}
|
2014-12-23 10:16:54 -07:00
|
|
|
|
|
|
|
si->inplace_count = atomic_read(&sbi->inplace_count);
|
2012-11-02 15:50:41 -06:00
|
|
|
}
|
|
|
|
|
2012-11-28 21:28:09 -07:00
|
|
|
/*
|
2012-11-02 15:50:41 -06:00
|
|
|
* This function calculates BDF of every segments
|
|
|
|
*/
|
|
|
|
static void update_sit_info(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
2013-07-12 00:47:11 -06:00
|
|
|
struct f2fs_stat_info *si = F2FS_STAT(sbi);
|
2015-05-08 17:37:28 -06:00
|
|
|
unsigned long long blks_per_sec, hblks_per_sec, total_vblocks;
|
|
|
|
unsigned long long bimodal, dist;
|
2012-11-02 15:50:41 -06:00
|
|
|
unsigned int segno, vblocks;
|
|
|
|
int ndirty = 0;
|
|
|
|
|
|
|
|
bimodal = 0;
|
|
|
|
total_vblocks = 0;
|
2015-11-30 20:56:52 -07:00
|
|
|
blks_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
|
2012-11-02 15:50:41 -06:00
|
|
|
hblks_per_sec = blks_per_sec / 2;
|
2014-09-23 12:23:01 -06:00
|
|
|
for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
|
2012-11-02 15:50:41 -06:00
|
|
|
vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
|
|
|
|
dist = abs(vblocks - hblks_per_sec);
|
|
|
|
bimodal += dist * dist;
|
|
|
|
|
|
|
|
if (vblocks > 0 && vblocks < blks_per_sec) {
|
|
|
|
total_vblocks += vblocks;
|
|
|
|
ndirty++;
|
|
|
|
}
|
|
|
|
}
|
2015-05-13 14:49:58 -06:00
|
|
|
dist = div_u64(MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec, 100);
|
2015-09-22 19:25:43 -06:00
|
|
|
si->bimodal = div64_u64(bimodal, dist);
|
2012-11-02 15:50:41 -06:00
|
|
|
if (si->dirty_count)
|
2015-05-13 14:49:58 -06:00
|
|
|
si->avg_vblocks = div_u64(total_vblocks, ndirty);
|
2012-11-02 15:50:41 -06:00
|
|
|
else
|
|
|
|
si->avg_vblocks = 0;
|
|
|
|
}
|
|
|
|
|
2012-11-28 21:28:09 -07:00
|
|
|
/*
|
2012-11-02 15:50:41 -06:00
|
|
|
* This function calculates memory footprint.
|
|
|
|
*/
|
|
|
|
static void update_mem_info(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
2013-07-12 00:47:11 -06:00
|
|
|
struct f2fs_stat_info *si = F2FS_STAT(sbi);
|
2012-11-02 15:50:41 -06:00
|
|
|
unsigned npages;
|
2014-11-06 16:16:04 -07:00
|
|
|
int i;
|
2012-11-02 15:50:41 -06:00
|
|
|
|
|
|
|
if (si->base_mem)
|
|
|
|
goto get_cache;
|
|
|
|
|
|
|
|
si->base_mem = sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize;
|
|
|
|
si->base_mem += 2 * sizeof(struct f2fs_inode_info);
|
|
|
|
si->base_mem += sizeof(*sbi->ckpt);
|
2016-05-13 13:36:58 -06:00
|
|
|
si->base_mem += sizeof(struct percpu_counter) * NR_COUNT_TYPE;
|
2012-11-02 15:50:41 -06:00
|
|
|
|
|
|
|
/* build sm */
|
|
|
|
si->base_mem += sizeof(struct f2fs_sm_info);
|
|
|
|
|
|
|
|
/* build sit */
|
|
|
|
si->base_mem += sizeof(struct sit_info);
|
2014-09-23 12:23:01 -06:00
|
|
|
si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry);
|
|
|
|
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
|
2016-08-02 11:56:40 -06:00
|
|
|
si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
|
|
|
|
if (f2fs_discard_en(sbi))
|
|
|
|
si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
|
2015-02-10 17:44:29 -07:00
|
|
|
si->base_mem += SIT_VBLOCK_MAP_SIZE;
|
2012-11-02 15:50:41 -06:00
|
|
|
if (sbi->segs_per_sec > 1)
|
2014-09-23 12:23:01 -06:00
|
|
|
si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
|
2012-11-02 15:50:41 -06:00
|
|
|
si->base_mem += __bitmap_size(sbi, SIT_BITMAP);
|
|
|
|
|
|
|
|
/* build free segmap */
|
|
|
|
si->base_mem += sizeof(struct free_segmap_info);
|
2014-09-23 12:23:01 -06:00
|
|
|
si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi));
|
|
|
|
si->base_mem += f2fs_bitmap_size(MAIN_SECS(sbi));
|
2012-11-02 15:50:41 -06:00
|
|
|
|
|
|
|
/* build curseg */
|
|
|
|
si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 06:29:47 -06:00
|
|
|
si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE;
|
2012-11-02 15:50:41 -06:00
|
|
|
|
|
|
|
/* build dirty segmap */
|
|
|
|
si->base_mem += sizeof(struct dirty_seglist_info);
|
2014-09-23 12:23:01 -06:00
|
|
|
si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(MAIN_SEGS(sbi));
|
|
|
|
si->base_mem += f2fs_bitmap_size(MAIN_SECS(sbi));
|
2012-11-02 15:50:41 -06:00
|
|
|
|
2014-08-06 08:22:50 -06:00
|
|
|
/* build nm */
|
2012-11-02 15:50:41 -06:00
|
|
|
si->base_mem += sizeof(struct f2fs_nm_info);
|
|
|
|
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
|
|
|
|
|
2015-01-10 05:09:52 -07:00
|
|
|
get_cache:
|
|
|
|
si->cache_mem = 0;
|
|
|
|
|
2012-11-02 15:50:41 -06:00
|
|
|
/* build gc */
|
2015-01-10 05:09:52 -07:00
|
|
|
if (sbi->gc_thread)
|
|
|
|
si->cache_mem += sizeof(struct f2fs_gc_kthread);
|
|
|
|
|
|
|
|
/* build merge flush thread */
|
|
|
|
if (SM_I(sbi)->cmd_control_info)
|
|
|
|
si->cache_mem += sizeof(struct flush_cmd_control);
|
2012-11-02 15:50:41 -06:00
|
|
|
|
|
|
|
/* free nids */
|
2015-01-10 05:09:52 -07:00
|
|
|
si->cache_mem += NM_I(sbi)->fcnt * sizeof(struct free_nid);
|
|
|
|
si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry);
|
2015-01-10 22:37:36 -07:00
|
|
|
si->cache_mem += NM_I(sbi)->dirty_nat_cnt *
|
|
|
|
sizeof(struct nat_entry_set);
|
|
|
|
si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages);
|
2016-05-10 20:13:50 -06:00
|
|
|
for (i = 0; i <= ORPHAN_INO; i++)
|
2014-11-17 20:18:36 -07:00
|
|
|
si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
|
2015-12-21 20:20:15 -07:00
|
|
|
si->cache_mem += atomic_read(&sbi->total_ext_tree) *
|
|
|
|
sizeof(struct extent_tree);
|
2015-02-05 02:58:28 -07:00
|
|
|
si->cache_mem += atomic_read(&sbi->total_ext_node) *
|
|
|
|
sizeof(struct extent_node);
|
2015-01-10 22:37:36 -07:00
|
|
|
|
|
|
|
si->page_mem = 0;
|
|
|
|
npages = NODE_MAPPING(sbi)->nrpages;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 06:29:47 -06:00
|
|
|
si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
|
2015-01-10 22:37:36 -07:00
|
|
|
npages = META_MAPPING(sbi)->nrpages;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 06:29:47 -06:00
|
|
|
si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
|
2012-11-02 15:50:41 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static int stat_show(struct seq_file *s, void *v)
|
|
|
|
{
|
2013-05-14 06:06:46 -06:00
|
|
|
struct f2fs_stat_info *si;
|
2012-11-02 15:50:41 -06:00
|
|
|
int i = 0;
|
|
|
|
int j;
|
|
|
|
|
2013-01-14 05:08:16 -07:00
|
|
|
mutex_lock(&f2fs_stat_mutex);
|
2013-05-14 06:06:46 -06:00
|
|
|
list_for_each_entry(si, &f2fs_stat_list, stat_list) {
|
2012-11-02 15:50:41 -06:00
|
|
|
update_general_status(si->sbi);
|
|
|
|
|
2016-03-18 10:46:10 -06:00
|
|
|
seq_printf(s, "\n=====[ partition info(%pg). #%d, %s]=====\n",
|
|
|
|
si->sbi->sb->s_bdev, i++,
|
|
|
|
f2fs_readonly(si->sbi->sb) ? "RO": "RW");
|
2012-12-30 22:59:09 -07:00
|
|
|
seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
|
|
|
|
si->sit_area_segs, si->nat_area_segs);
|
2012-11-02 15:50:41 -06:00
|
|
|
seq_printf(s, "[SSA: %d] [MAIN: %d",
|
|
|
|
si->ssa_area_segs, si->main_area_segs);
|
|
|
|
seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
|
|
|
|
si->overp_segs, si->rsvd_segs);
|
2016-08-18 07:01:18 -06:00
|
|
|
if (test_opt(si->sbi, DISCARD))
|
|
|
|
seq_printf(s, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n",
|
|
|
|
si->utilization, si->valid_count, si->discard_blks);
|
|
|
|
else
|
|
|
|
seq_printf(s, "Utilization: %u%% (%u valid blocks)\n",
|
|
|
|
si->utilization, si->valid_count);
|
|
|
|
|
2012-11-02 15:50:41 -06:00
|
|
|
seq_printf(s, " - Node: %u (Inode: %u, ",
|
|
|
|
si->valid_node_count, si->valid_inode_count);
|
|
|
|
seq_printf(s, "Other: %u)\n - Data: %u\n",
|
|
|
|
si->valid_node_count - si->valid_inode_count,
|
|
|
|
si->valid_count - si->valid_node_count);
|
2015-07-15 03:28:53 -06:00
|
|
|
seq_printf(s, " - Inline_xattr Inode: %u\n",
|
|
|
|
si->inline_xattr);
|
2013-11-25 19:08:57 -07:00
|
|
|
seq_printf(s, " - Inline_data Inode: %u\n",
|
|
|
|
si->inline_inode);
|
2014-10-13 21:00:16 -06:00
|
|
|
seq_printf(s, " - Inline_dentry Inode: %u\n",
|
|
|
|
si->inline_dir);
|
2016-05-10 20:13:50 -06:00
|
|
|
seq_printf(s, " - Orphan Inode: %u\n",
|
|
|
|
si->orphans);
|
2012-11-02 15:50:41 -06:00
|
|
|
seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n",
|
|
|
|
si->main_area_segs, si->main_area_sections,
|
|
|
|
si->main_area_zones);
|
|
|
|
seq_printf(s, " - COLD data: %d, %d, %d\n",
|
|
|
|
si->curseg[CURSEG_COLD_DATA],
|
|
|
|
si->cursec[CURSEG_COLD_DATA],
|
|
|
|
si->curzone[CURSEG_COLD_DATA]);
|
|
|
|
seq_printf(s, " - WARM data: %d, %d, %d\n",
|
|
|
|
si->curseg[CURSEG_WARM_DATA],
|
|
|
|
si->cursec[CURSEG_WARM_DATA],
|
|
|
|
si->curzone[CURSEG_WARM_DATA]);
|
|
|
|
seq_printf(s, " - HOT data: %d, %d, %d\n",
|
|
|
|
si->curseg[CURSEG_HOT_DATA],
|
|
|
|
si->cursec[CURSEG_HOT_DATA],
|
|
|
|
si->curzone[CURSEG_HOT_DATA]);
|
|
|
|
seq_printf(s, " - Dir dnode: %d, %d, %d\n",
|
|
|
|
si->curseg[CURSEG_HOT_NODE],
|
|
|
|
si->cursec[CURSEG_HOT_NODE],
|
|
|
|
si->curzone[CURSEG_HOT_NODE]);
|
|
|
|
seq_printf(s, " - File dnode: %d, %d, %d\n",
|
|
|
|
si->curseg[CURSEG_WARM_NODE],
|
|
|
|
si->cursec[CURSEG_WARM_NODE],
|
|
|
|
si->curzone[CURSEG_WARM_NODE]);
|
|
|
|
seq_printf(s, " - Indir nodes: %d, %d, %d\n",
|
|
|
|
si->curseg[CURSEG_COLD_NODE],
|
|
|
|
si->cursec[CURSEG_COLD_NODE],
|
|
|
|
si->curzone[CURSEG_COLD_NODE]);
|
|
|
|
seq_printf(s, "\n - Valid: %d\n - Dirty: %d\n",
|
|
|
|
si->main_area_segs - si->dirty_count -
|
|
|
|
si->prefree_count - si->free_segs,
|
|
|
|
si->dirty_count);
|
|
|
|
seq_printf(s, " - Prefree: %d\n - Free: %d (%d)\n\n",
|
|
|
|
si->prefree_count, si->free_segs, si->free_secs);
|
2016-01-09 14:45:17 -07:00
|
|
|
seq_printf(s, "CP calls: %d (BG: %d)\n",
|
|
|
|
si->cp_count, si->bg_cp_count);
|
2012-11-02 15:50:41 -06:00
|
|
|
seq_printf(s, "GC calls: %d (BG: %d)\n",
|
|
|
|
si->call_count, si->bg_gc);
|
2014-12-22 16:37:39 -07:00
|
|
|
seq_printf(s, " - data segments : %d (%d)\n",
|
|
|
|
si->data_segs, si->bg_data_segs);
|
|
|
|
seq_printf(s, " - node segments : %d (%d)\n",
|
|
|
|
si->node_segs, si->bg_node_segs);
|
|
|
|
seq_printf(s, "Try to move %d blocks (BG: %d)\n", si->tot_blks,
|
|
|
|
si->bg_data_blks + si->bg_node_blks);
|
|
|
|
seq_printf(s, " - data blocks : %d (%d)\n", si->data_blks,
|
|
|
|
si->bg_data_blks);
|
|
|
|
seq_printf(s, " - node blocks : %d (%d)\n", si->node_blks,
|
|
|
|
si->bg_node_blks);
|
2015-08-19 05:13:25 -06:00
|
|
|
seq_puts(s, "\nExtent Cache:\n");
|
2015-09-30 03:38:48 -06:00
|
|
|
seq_printf(s, " - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
|
2015-08-19 05:12:20 -06:00
|
|
|
si->hit_largest, si->hit_cached,
|
2015-08-19 05:13:25 -06:00
|
|
|
si->hit_rbtree);
|
2015-09-30 03:38:48 -06:00
|
|
|
seq_printf(s, " - Hit Ratio: %llu%% (%llu / %llu)\n",
|
2015-08-19 05:13:25 -06:00
|
|
|
!si->total_ext ? 0 :
|
2015-09-30 03:38:48 -06:00
|
|
|
div64_u64(si->hit_total * 100, si->total_ext),
|
2015-08-19 05:13:25 -06:00
|
|
|
si->hit_total, si->total_ext);
|
2015-12-31 16:24:14 -07:00
|
|
|
seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n",
|
|
|
|
si->ext_tree, si->zombie_tree, si->ext_node);
|
2014-01-17 13:44:39 -07:00
|
|
|
seq_puts(s, "\nBalancing F2FS Async:\n");
|
2016-05-13 13:36:58 -06:00
|
|
|
seq_printf(s, " - inmem: %4lld, wb_bios: %4d\n",
|
2016-05-17 17:23:36 -06:00
|
|
|
si->inmem_pages, si->wb_bios);
|
2016-05-13 13:36:58 -06:00
|
|
|
seq_printf(s, " - nodes: %4lld in %4d\n",
|
2012-11-02 15:50:41 -06:00
|
|
|
si->ndirty_node, si->node_pages);
|
2016-05-20 12:10:10 -06:00
|
|
|
seq_printf(s, " - dents: %4lld in dirs:%4d (%4d)\n",
|
|
|
|
si->ndirty_dent, si->ndirty_dirs, si->ndirty_all);
|
2016-05-13 13:36:58 -06:00
|
|
|
seq_printf(s, " - datas: %4lld in files:%4d\n",
|
2015-12-17 02:14:44 -07:00
|
|
|
si->ndirty_data, si->ndirty_files);
|
2016-05-13 13:36:58 -06:00
|
|
|
seq_printf(s, " - meta: %4lld in %4d\n",
|
2012-11-02 15:50:41 -06:00
|
|
|
si->ndirty_meta, si->meta_pages);
|
2016-09-18 09:30:08 -06:00
|
|
|
seq_printf(s, " - imeta: %4lld\n",
|
|
|
|
si->ndirty_imeta);
|
2015-01-07 12:09:37 -07:00
|
|
|
seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
|
|
|
|
si->dirty_nats, si->nats, si->dirty_sits, si->sits);
|
2014-03-18 22:45:52 -06:00
|
|
|
seq_printf(s, " - free_nids: %9d\n",
|
|
|
|
si->fnids);
|
2013-07-22 02:33:32 -06:00
|
|
|
seq_puts(s, "\nDistribution of User Blocks:");
|
|
|
|
seq_puts(s, " [ valid | invalid | free ]\n");
|
|
|
|
seq_puts(s, " [");
|
2012-11-02 15:50:41 -06:00
|
|
|
|
|
|
|
for (j = 0; j < si->util_valid; j++)
|
2013-07-22 02:33:32 -06:00
|
|
|
seq_putc(s, '-');
|
|
|
|
seq_putc(s, '|');
|
2012-11-02 15:50:41 -06:00
|
|
|
|
|
|
|
for (j = 0; j < si->util_invalid; j++)
|
2013-07-22 02:33:32 -06:00
|
|
|
seq_putc(s, '-');
|
|
|
|
seq_putc(s, '|');
|
2012-11-02 15:50:41 -06:00
|
|
|
|
|
|
|
for (j = 0; j < si->util_free; j++)
|
2013-07-22 02:33:32 -06:00
|
|
|
seq_putc(s, '-');
|
|
|
|
seq_puts(s, "]\n\n");
|
2014-12-23 10:16:54 -07:00
|
|
|
seq_printf(s, "IPU: %u blocks\n", si->inplace_count);
|
2012-11-02 15:50:41 -06:00
|
|
|
seq_printf(s, "SSR: %u blocks in %u segments\n",
|
|
|
|
si->block_count[SSR], si->segment_count[SSR]);
|
|
|
|
seq_printf(s, "LFS: %u blocks in %u segments\n",
|
|
|
|
si->block_count[LFS], si->segment_count[LFS]);
|
|
|
|
|
|
|
|
/* segment usage info */
|
|
|
|
update_sit_info(si->sbi);
|
|
|
|
seq_printf(s, "\nBDF: %u, avg. vblocks: %u\n",
|
|
|
|
si->bimodal, si->avg_vblocks);
|
|
|
|
|
|
|
|
/* memory footprint */
|
|
|
|
update_mem_info(si->sbi);
|
2015-09-11 00:43:52 -06:00
|
|
|
seq_printf(s, "\nMemory: %llu KB\n",
|
2015-01-10 22:37:36 -07:00
|
|
|
(si->base_mem + si->cache_mem + si->page_mem) >> 10);
|
2015-09-11 00:43:52 -06:00
|
|
|
seq_printf(s, " - static: %llu KB\n",
|
2015-01-10 22:37:36 -07:00
|
|
|
si->base_mem >> 10);
|
2015-09-11 00:43:52 -06:00
|
|
|
seq_printf(s, " - cached: %llu KB\n",
|
2015-01-10 22:37:36 -07:00
|
|
|
si->cache_mem >> 10);
|
2015-09-11 00:43:52 -06:00
|
|
|
seq_printf(s, " - paged : %llu KB\n",
|
2015-01-10 22:37:36 -07:00
|
|
|
si->page_mem >> 10);
|
2012-11-02 15:50:41 -06:00
|
|
|
}
|
2013-01-14 05:08:16 -07:00
|
|
|
mutex_unlock(&f2fs_stat_mutex);
|
2012-11-02 15:50:41 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int stat_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, stat_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations stat_fops = {
|
|
|
|
.open = stat_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
2013-01-15 03:58:47 -07:00
|
|
|
int f2fs_build_stats(struct f2fs_sb_info *sbi)
|
2012-11-02 15:50:41 -06:00
|
|
|
{
|
|
|
|
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
|
|
|
|
struct f2fs_stat_info *si;
|
|
|
|
|
2013-07-12 00:47:11 -06:00
|
|
|
si = kzalloc(sizeof(struct f2fs_stat_info), GFP_KERNEL);
|
|
|
|
if (!si)
|
2012-11-02 15:50:41 -06:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
si->all_area_segs = le32_to_cpu(raw_super->segment_count);
|
|
|
|
si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit);
|
|
|
|
si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat);
|
|
|
|
si->ssa_area_segs = le32_to_cpu(raw_super->segment_count_ssa);
|
|
|
|
si->main_area_segs = le32_to_cpu(raw_super->segment_count_main);
|
|
|
|
si->main_area_sections = le32_to_cpu(raw_super->section_count);
|
|
|
|
si->main_area_zones = si->main_area_sections /
|
|
|
|
le32_to_cpu(raw_super->secs_per_zone);
|
|
|
|
si->sbi = sbi;
|
2013-07-12 00:47:11 -06:00
|
|
|
sbi->stat_info = si;
|
2013-01-14 05:08:16 -07:00
|
|
|
|
2015-09-30 03:38:48 -06:00
|
|
|
atomic64_set(&sbi->total_hit_ext, 0);
|
|
|
|
atomic64_set(&sbi->read_hit_rbtree, 0);
|
|
|
|
atomic64_set(&sbi->read_hit_largest, 0);
|
|
|
|
atomic64_set(&sbi->read_hit_cached, 0);
|
2015-07-15 03:29:49 -06:00
|
|
|
|
2015-07-15 03:28:53 -06:00
|
|
|
atomic_set(&sbi->inline_xattr, 0);
|
2014-12-08 04:08:20 -07:00
|
|
|
atomic_set(&sbi->inline_inode, 0);
|
|
|
|
atomic_set(&sbi->inline_dir, 0);
|
2014-12-23 10:16:54 -07:00
|
|
|
atomic_set(&sbi->inplace_count, 0);
|
2014-12-08 04:08:20 -07:00
|
|
|
|
2013-01-14 05:08:16 -07:00
|
|
|
mutex_lock(&f2fs_stat_mutex);
|
|
|
|
list_add_tail(&si->stat_list, &f2fs_stat_list);
|
|
|
|
mutex_unlock(&f2fs_stat_mutex);
|
|
|
|
|
2012-11-02 15:50:41 -06:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
2013-07-12 00:47:11 -06:00
|
|
|
struct f2fs_stat_info *si = F2FS_STAT(sbi);
|
2012-11-02 15:50:41 -06:00
|
|
|
|
2013-01-14 05:08:16 -07:00
|
|
|
mutex_lock(&f2fs_stat_mutex);
|
2012-11-02 15:50:41 -06:00
|
|
|
list_del(&si->stat_list);
|
2013-01-14 05:08:16 -07:00
|
|
|
mutex_unlock(&f2fs_stat_mutex);
|
|
|
|
|
2013-07-12 00:47:11 -06:00
|
|
|
kfree(si);
|
2012-11-02 15:50:41 -06:00
|
|
|
}
|
|
|
|
|
2015-10-28 19:13:04 -06:00
|
|
|
int __init f2fs_create_root_stats(void)
|
2013-01-15 03:58:47 -07:00
|
|
|
{
|
2013-12-03 05:11:46 -07:00
|
|
|
struct dentry *file;
|
|
|
|
|
2013-12-03 06:09:29 -07:00
|
|
|
f2fs_debugfs_root = debugfs_create_dir("f2fs", NULL);
|
|
|
|
if (!f2fs_debugfs_root)
|
2015-10-28 19:13:04 -06:00
|
|
|
return -ENOMEM;
|
2013-12-03 05:11:46 -07:00
|
|
|
|
2013-12-03 06:09:29 -07:00
|
|
|
file = debugfs_create_file("status", S_IRUGO, f2fs_debugfs_root,
|
2013-12-03 05:11:46 -07:00
|
|
|
NULL, &stat_fops);
|
2014-07-11 04:35:43 -06:00
|
|
|
if (!file) {
|
|
|
|
debugfs_remove(f2fs_debugfs_root);
|
|
|
|
f2fs_debugfs_root = NULL;
|
2015-10-28 19:13:04 -06:00
|
|
|
return -ENOMEM;
|
2014-07-11 04:35:43 -06:00
|
|
|
}
|
2015-10-28 19:13:04 -06:00
|
|
|
|
|
|
|
return 0;
|
2013-01-15 03:58:47 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void f2fs_destroy_root_stats(void)
|
2012-11-02 15:50:41 -06:00
|
|
|
{
|
2013-12-03 06:09:29 -07:00
|
|
|
if (!f2fs_debugfs_root)
|
2013-12-03 05:11:46 -07:00
|
|
|
return;
|
|
|
|
|
2013-12-03 06:09:29 -07:00
|
|
|
debugfs_remove_recursive(f2fs_debugfs_root);
|
|
|
|
f2fs_debugfs_root = NULL;
|
2012-11-02 15:50:41 -06:00
|
|
|
}
|