2008-10-18 21:26:30 -06:00
|
|
|
#ifndef LINUX_MM_INLINE_H
|
|
|
|
#define LINUX_MM_INLINE_H
|
|
|
|
|
|
|
|
/**
|
|
|
|
* page_is_file_cache - should the page be on a file LRU or anon LRU?
|
|
|
|
* @page: the page to test
|
|
|
|
*
|
2008-10-18 21:26:32 -06:00
|
|
|
* Returns LRU_FILE if @page is page cache page backed by a regular filesystem,
|
2008-10-18 21:26:30 -06:00
|
|
|
* or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
|
|
|
|
* Used by functions that manipulate the LRU lists, to sort a page
|
|
|
|
* onto the right LRU list.
|
|
|
|
*
|
|
|
|
* We would like to get this info without a page flag, but the state
|
|
|
|
* needs to survive until the page is last deleted from the LRU, which
|
|
|
|
* could be as far down as __page_cache_release.
|
|
|
|
*/
|
|
|
|
static inline int page_is_file_cache(struct page *page)
|
|
|
|
{
|
|
|
|
if (PageSwapBacked(page))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* The page is page cache backed by a normal filesystem. */
|
2008-10-18 21:26:32 -06:00
|
|
|
return LRU_FILE;
|
2008-10-18 21:26:30 -06:00
|
|
|
}
|
|
|
|
|
2008-10-18 21:26:14 -06:00
|
|
|
static inline void
|
|
|
|
add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
|
|
|
|
{
|
|
|
|
list_add(&page->lru, &zone->lru[l].list);
|
|
|
|
__inc_zone_state(zone, NR_LRU_BASE + l);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
|
|
|
|
{
|
|
|
|
list_del(&page->lru);
|
|
|
|
__dec_zone_state(zone, NR_LRU_BASE + l);
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
static inline void
|
|
|
|
del_page_from_lru(struct zone *zone, struct page *page)
|
|
|
|
{
|
2008-10-18 21:26:32 -06:00
|
|
|
enum lru_list l = LRU_BASE;
|
2008-10-18 21:26:14 -06:00
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
list_del(&page->lru);
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-18 21:26:39 -06:00
|
|
|
if (PageUnevictable(page)) {
|
|
|
|
__ClearPageUnevictable(page);
|
|
|
|
l = LRU_UNEVICTABLE;
|
|
|
|
} else {
|
|
|
|
if (PageActive(page)) {
|
|
|
|
__ClearPageActive(page);
|
|
|
|
l += LRU_ACTIVE;
|
|
|
|
}
|
|
|
|
l += page_is_file_cache(page);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
2008-10-18 21:26:14 -06:00
|
|
|
__dec_zone_state(zone, NR_LRU_BASE + l);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
2006-01-08 02:00:45 -07:00
|
|
|
|
2008-10-18 21:26:14 -06:00
|
|
|
/**
|
|
|
|
* page_lru - which LRU list should a page be on?
|
|
|
|
* @page: the page to test
|
|
|
|
*
|
|
|
|
* Returns the LRU list a page should be on, as an index
|
|
|
|
* into the array of LRU lists.
|
|
|
|
*/
|
|
|
|
static inline enum lru_list page_lru(struct page *page)
|
|
|
|
{
|
|
|
|
enum lru_list lru = LRU_BASE;
|
|
|
|
|
Unevictable LRU Infrastructure
When the system contains lots of mlocked or otherwise unevictable pages,
the pageout code (kswapd) can spend lots of time scanning over these
pages. Worse still, the presence of lots of unevictable pages can confuse
kswapd into thinking that more aggressive pageout modes are required,
resulting in all kinds of bad behaviour.
Infrastructure to manage pages excluded from reclaim--i.e., hidden from
vmscan. Based on a patch by Larry Woodman of Red Hat. Reworked to
maintain "unevictable" pages on a separate per-zone LRU list, to "hide"
them from vmscan.
Kosaki Motohiro added the support for the memory controller unevictable
lru list.
Pages on the unevictable list have both PG_unevictable and PG_lru set.
Thus, PG_unevictable is analogous to and mutually exclusive with
PG_active--it specifies which LRU list the page is on.
The unevictable infrastructure is enabled by a new mm Kconfig option
[CONFIG_]UNEVICTABLE_LRU.
A new function 'page_evictable(page, vma)' in vmscan.c tests whether or
not a page may be evictable. Subsequent patches will add the various
!evictable tests. We'll want to keep these tests light-weight for use in
shrink_active_list() and, possibly, the fault path.
To avoid races between tasks putting pages [back] onto an LRU list and
tasks that might be moving the page from non-evictable to evictable state,
the new function 'putback_lru_page()' -- inverse to 'isolate_lru_page()'
-- tests the "evictability" of a page after placing it on the LRU, before
dropping the reference. If the page has become unevictable,
putback_lru_page() will redo the 'putback', thus moving the page to the
unevictable list. This way, we avoid "stranding" evictable pages on the
unevictable list.
[akpm@linux-foundation.org: fix fallout from out-of-order merge]
[riel@redhat.com: fix UNEVICTABLE_LRU and !PROC_PAGE_MONITOR build]
[nishimura@mxp.nes.nec.co.jp: remove redundant mapping check]
[kosaki.motohiro@jp.fujitsu.com: unevictable-lru-infrastructure: putback_lru_page()/unevictable page handling rework]
[kosaki.motohiro@jp.fujitsu.com: kill unnecessary lock_page() in vmscan.c]
[kosaki.motohiro@jp.fujitsu.com: revert migration change of unevictable lru infrastructure]
[kosaki.motohiro@jp.fujitsu.com: revert to unevictable-lru-infrastructure-kconfig-fix.patch]
[kosaki.motohiro@jp.fujitsu.com: restore patch failure of vmstat-unevictable-and-mlocked-pages-vm-events.patch]
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Debugged-by: Benjamin Kidwell <benjkidwell@yahoo.com>
Signed-off-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-18 21:26:39 -06:00
|
|
|
if (PageUnevictable(page))
|
|
|
|
lru = LRU_UNEVICTABLE;
|
|
|
|
else {
|
|
|
|
if (PageActive(page))
|
|
|
|
lru += LRU_ACTIVE;
|
|
|
|
lru += page_is_file_cache(page);
|
|
|
|
}
|
2008-10-18 21:26:14 -06:00
|
|
|
|
|
|
|
return lru;
|
|
|
|
}
|
2008-10-18 21:26:30 -06:00
|
|
|
|
2008-10-18 21:26:34 -06:00
|
|
|
/**
|
|
|
|
* inactive_anon_is_low - check if anonymous pages need to be deactivated
|
|
|
|
* @zone: zone to check
|
|
|
|
*
|
|
|
|
* Returns true if the zone does not have enough inactive anon pages,
|
|
|
|
* meaning some active anon pages need to be deactivated.
|
|
|
|
*/
|
|
|
|
static inline int inactive_anon_is_low(struct zone *zone)
|
|
|
|
{
|
|
|
|
unsigned long active, inactive;
|
|
|
|
|
|
|
|
active = zone_page_state(zone, NR_ACTIVE_ANON);
|
|
|
|
inactive = zone_page_state(zone, NR_INACTIVE_ANON);
|
|
|
|
|
|
|
|
if (inactive * zone->inactive_ratio < active)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2008-10-18 21:26:30 -06:00
|
|
|
#endif
|