33c3fc71c8
Knowing the portion of memory that is not used by a certain application or memory cgroup (idle memory) can be useful for partitioning the system efficiently, e.g. by setting memory cgroup limits appropriately. Currently, the only means to estimate the amount of idle memory provided by the kernel is /proc/PID/{clear_refs,smaps}: the user can clear the access bit for all pages mapped to a particular process by writing 1 to clear_refs, wait for some time, and then count smaps:Referenced. However, this method has two serious shortcomings: - it does not count unmapped file pages - it affects the reclaimer logic To overcome these drawbacks, this patch introduces two new page flags, Idle and Young, and a new sysfs file, /sys/kernel/mm/page_idle/bitmap. A page's Idle flag can only be set from userspace by setting bit in /sys/kernel/mm/page_idle/bitmap at the offset corresponding to the page, and it is cleared whenever the page is accessed either through page tables (it is cleared in page_referenced() in this case) or using the read(2) system call (mark_page_accessed()). Thus by setting the Idle flag for pages of a particular workload, which can be found e.g. by reading /proc/PID/pagemap, waiting for some time to let the workload access its working set, and then reading the bitmap file, one can estimate the amount of pages that are not used by the workload. The Young page flag is used to avoid interference with the memory reclaimer. A page's Young flag is set whenever the Access bit of a page table entry pointing to the page is cleared by writing to the bitmap file. If page_referenced() is called on a Young page, it will add 1 to its return value, therefore concealing the fact that the Access bit was cleared. Note, since there is no room for extra page flags on 32 bit, this feature uses extended page flags when compiled on 32 bit. [akpm@linux-foundation.org: fix build] [akpm@linux-foundation.org: kpageidle requires an MMU] [akpm@linux-foundation.org: decouple from page-flags rework] Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Reviewed-by: Andres Lagar-Cavilla <andreslc@google.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Greg Thelen <gthelen@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: David Rientjes <rientjes@google.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Jonathan Corbet <corbet@lwn.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
88 lines
1.9 KiB
C
88 lines
1.9 KiB
C
#ifndef __LINUX_PAGE_EXT_H
|
|
#define __LINUX_PAGE_EXT_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/stacktrace.h>
|
|
|
|
struct pglist_data;
|
|
struct page_ext_operations {
|
|
bool (*need)(void);
|
|
void (*init)(void);
|
|
};
|
|
|
|
#ifdef CONFIG_PAGE_EXTENSION
|
|
|
|
/*
|
|
* page_ext->flags bits:
|
|
*
|
|
* PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to
|
|
* implement generic debug pagealloc feature. The pages are filled with
|
|
* poison patterns and set this flag after free_pages(). The poisoned
|
|
* pages are verified whether the patterns are not corrupted and clear
|
|
* the flag before alloc_pages().
|
|
*/
|
|
|
|
enum page_ext_flags {
|
|
PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
|
|
PAGE_EXT_DEBUG_GUARD,
|
|
PAGE_EXT_OWNER,
|
|
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
|
|
PAGE_EXT_YOUNG,
|
|
PAGE_EXT_IDLE,
|
|
#endif
|
|
};
|
|
|
|
/*
|
|
* Page Extension can be considered as an extended mem_map.
|
|
* A page_ext page is associated with every page descriptor. The
|
|
* page_ext helps us add more information about the page.
|
|
* All page_ext are allocated at boot or memory hotplug event,
|
|
* then the page_ext for pfn always exists.
|
|
*/
|
|
struct page_ext {
|
|
unsigned long flags;
|
|
#ifdef CONFIG_PAGE_OWNER
|
|
unsigned int order;
|
|
gfp_t gfp_mask;
|
|
unsigned int nr_entries;
|
|
unsigned long trace_entries[8];
|
|
#endif
|
|
};
|
|
|
|
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
|
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
static inline void page_ext_init_flatmem(void)
|
|
{
|
|
}
|
|
extern void page_ext_init(void);
|
|
#else
|
|
extern void page_ext_init_flatmem(void);
|
|
static inline void page_ext_init(void)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
struct page_ext *lookup_page_ext(struct page *page);
|
|
|
|
#else /* !CONFIG_PAGE_EXTENSION */
|
|
struct page_ext;
|
|
|
|
static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
|
|
{
|
|
}
|
|
|
|
static inline struct page_ext *lookup_page_ext(struct page *page)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void page_ext_init(void)
|
|
{
|
|
}
|
|
|
|
static inline void page_ext_init_flatmem(void)
|
|
{
|
|
}
|
|
#endif /* CONFIG_PAGE_EXTENSION */
|
|
#endif /* __LINUX_PAGE_EXT_H */
|