kernel-fxtec-pro1x/include/linux/gfp.h
Christoph Lameter 4ae7c03943 [PATCH] Periodically drain non local pagesets
The pageset array can potentially acquire a huge amount of memory on large
NUMA systems.  F.e.  on a system with 512 processors and 256 nodes there
will be 256*512 pagesets.  If each pageset only holds 5 pages then we are
talking about 655360 pages.With a 16K page size on IA64 this results in
potentially 10 Gigabytes of memory being trapped in pagesets.  The typical
cases are much less for smaller systems but there is still the potential of
memory being trapped in off node pagesets.  Off node memory may be rarely
used if local memory is available and so we may potentially have memory in
seldom used pagesets without this patch.

The slab allocator flushes its per cpu caches every 2 seconds.  The
following patch flushes the off node pageset caches in the same way by
tying into the slab flush.

The patch also changes /proc/zoneinfo to include the number of pages
currently in each pageset.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-21 18:46:18 -07:00

142 lines
4.8 KiB
C

#ifndef __LINUX_GFP_H
#define __LINUX_GFP_H
#include <linux/mmzone.h>
#include <linux/stddef.h>
#include <linux/linkage.h>
#include <linux/config.h>
struct vm_area_struct;
/*
* GFP bitmasks..
*/
/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
#define __GFP_DMA 0x01
#define __GFP_HIGHMEM 0x02
/*
* Action modifiers - doesn't change the zoning
*
* __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
* _might_ fail. This depends upon the particular VM implementation.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
* cannot handle allocation failures.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
*/
#define __GFP_WAIT 0x10u /* Can wait and reschedule? */
#define __GFP_HIGH 0x20u /* Should access emergency pools? */
#define __GFP_IO 0x40u /* Can start physical IO? */
#define __GFP_FS 0x80u /* Can call down to low-level FS? */
#define __GFP_COLD 0x100u /* Cache-cold page required */
#define __GFP_NOWARN 0x200u /* Suppress page allocation failure warning */
#define __GFP_REPEAT 0x400u /* Retry the allocation. Might fail */
#define __GFP_NOFAIL 0x800u /* Retry for ever. Cannot fail */
#define __GFP_NORETRY 0x1000u /* Do not retry. Might fail */
#define __GFP_NO_GROW 0x2000u /* Slab internal usage */
#define __GFP_COMP 0x4000u /* Add compound page metadata */
#define __GFP_ZERO 0x8000u /* Return zeroed page on success */
#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
#define __GFP_NORECLAIM 0x20000u /* No realy zone reclaim during allocation */
#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
/* if you forget to add the bitmask here kernel will crash, period */
#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
__GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
__GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
__GFP_NOMEMALLOC|__GFP_NORECLAIM)
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_NOIO (__GFP_WAIT)
#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS)
#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HIGHMEM)
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */
#define GFP_DMA __GFP_DMA
/*
* There is only one page-allocator function, and two main namespaces to
* it. The alloc_page*() variants return 'struct page *' and as such
* can allocate highmem pages, the *get*page*() variants return
* virtual kernel addresses to the allocated page(s).
*/
/*
* We get the zone list from the current node and the gfp_mask.
* This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
*
* For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
* optimized to &contig_page_data at compile-time.
*/
#ifndef HAVE_ARCH_FREE_PAGE
static inline void arch_free_page(struct page *page, int order) { }
#endif
extern struct page *
FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
static inline struct page *alloc_pages_node(int nid, unsigned int __nocast gfp_mask,
unsigned int order)
{
if (unlikely(order >= MAX_ORDER))
return NULL;
return __alloc_pages(gfp_mask, order,
NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
}
#ifdef CONFIG_NUMA
extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask, unsigned order);
static inline struct page *
alloc_pages(unsigned int __nocast gfp_mask, unsigned int order)
{
if (unlikely(order >= MAX_ORDER))
return NULL;
return alloc_pages_current(gfp_mask, order);
}
extern struct page *alloc_page_vma(unsigned __nocast gfp_mask,
struct vm_area_struct *vma, unsigned long addr);
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, unsigned int order));
extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask));
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask),0)
#define __get_dma_pages(gfp_mask, order) \
__get_free_pages((gfp_mask) | GFP_DMA,(order))
extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
extern void FASTCALL(free_hot_page(struct page *page));
extern void FASTCALL(free_cold_page(struct page *page));
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr),0)
void page_alloc_init(void);
#ifdef CONFIG_NUMA
void drain_remote_pages(void);
#else
static inline void drain_remote_pages(void) { };
#endif
#endif /* __LINUX_GFP_H */