mm, memcg: avoid unnecessary function call when memcg is disabled
While profiling numa/core v16 with cgroup_disable=memory on the command line, I noticed mem_cgroup_count_vm_event() still showed up as high as 0.60% in perftop. This occurs because the function is called extremely often even when memcg is disabled. To fix this, inline the check for mem_cgroup_disabled() so we avoid the unnecessary function call if memcg is disabled. Signed-off-by: David Rientjes <rientjes@google.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Glauber Costa <glommer@parallels.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
05b0afd73d
commit
68ae564bba
2 changed files with 12 additions and 3 deletions
|
@ -181,7 +181,14 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
|
||||||
gfp_t gfp_mask,
|
gfp_t gfp_mask,
|
||||||
unsigned long *total_scanned);
|
unsigned long *total_scanned);
|
||||||
|
|
||||||
void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
|
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
|
||||||
|
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
|
||||||
|
enum vm_event_item idx)
|
||||||
|
{
|
||||||
|
if (mem_cgroup_disabled())
|
||||||
|
return;
|
||||||
|
__mem_cgroup_count_vm_event(mm, idx);
|
||||||
|
}
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
void mem_cgroup_split_huge_fixup(struct page *head);
|
void mem_cgroup_split_huge_fixup(struct page *head);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -59,6 +59,8 @@
|
||||||
#include <trace/events/vmscan.h>
|
#include <trace/events/vmscan.h>
|
||||||
|
|
||||||
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
|
struct cgroup_subsys mem_cgroup_subsys __read_mostly;
|
||||||
|
EXPORT_SYMBOL(mem_cgroup_subsys);
|
||||||
|
|
||||||
#define MEM_CGROUP_RECLAIM_RETRIES 5
|
#define MEM_CGROUP_RECLAIM_RETRIES 5
|
||||||
static struct mem_cgroup *root_mem_cgroup __read_mostly;
|
static struct mem_cgroup *root_mem_cgroup __read_mostly;
|
||||||
|
|
||||||
|
@ -1015,7 +1017,7 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
|
||||||
iter != NULL; \
|
iter != NULL; \
|
||||||
iter = mem_cgroup_iter(NULL, iter, NULL))
|
iter = mem_cgroup_iter(NULL, iter, NULL))
|
||||||
|
|
||||||
void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
|
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
|
||||||
{
|
{
|
||||||
struct mem_cgroup *memcg;
|
struct mem_cgroup *memcg;
|
||||||
|
|
||||||
|
@ -1040,7 +1042,7 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
|
||||||
out:
|
out:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mem_cgroup_count_vm_event);
|
EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
|
* mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
|
||||||
|
|
Loading…
Reference in a new issue