remove PAGE_GROUP_BY_MOBILITY

Grouping pages by mobility can be disabled at compile-time. This was
considered undesirable by a number of people. However, in the current stack of
patches, it is not a simple case of just dropping the configurable patch as it
would cause merge conflicts.  This patch backs out the configuration option.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Acked-by: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Mel Gorman 2007-10-16 01:25:58 -07:00 committed by Linus Torvalds
parent 56fd56b868
commit ac0e5b7a6b
3 changed files with 2 additions and 62 deletions

View file

@ -33,21 +33,12 @@
*/
#define PAGE_ALLOC_COSTLY_ORDER 3
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
#define MIGRATE_UNMOVABLE 0
#define MIGRATE_RECLAIMABLE 1
#define MIGRATE_MOVABLE 2
#define MIGRATE_HIGHATOMIC 3
#define MIGRATE_RESERVE 4
#define MIGRATE_TYPES 5
#else
#define MIGRATE_UNMOVABLE 0
#define MIGRATE_UNRECLAIMABLE 0
#define MIGRATE_MOVABLE 0
#define MIGRATE_HIGHATOMIC 0
#define MIGRATE_RESERVE 0
#define MIGRATE_TYPES 1
#endif
#define for_each_migratetype_order(order, type) \
for (order = 0; order < MAX_ORDER; order++) \

View file

@ -607,19 +607,6 @@ config BASE_SMALL
default 0 if BASE_FULL
default 1 if !BASE_FULL
config PAGE_GROUP_BY_MOBILITY
bool "Group pages based on their mobility in the page allocator"
def_bool y
help
The standard allocator will fragment memory over time which means
that high order allocations will fail even if kswapd is running. If
this option is set, the allocator will try and group page types
based on their ability to migrate or reclaim. This is a best effort
attempt at lowering fragmentation which a few workloads care about.
The loss is a more complex allocator that may perform slower. If
you are interested in working with large pages, say Y and set
/proc/sys/vm/min_free_bytes to 16374. Otherwise say N
menuconfig MODULES
bool "Enable loadable module support"
help

View file

@ -158,7 +158,6 @@ int nr_node_ids __read_mostly = MAX_NUMNODES;
EXPORT_SYMBOL(nr_node_ids);
#endif
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
int page_group_by_mobility_disabled __read_mostly;
static inline int get_pageblock_migratetype(struct page *page)
@ -192,22 +191,6 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
((gfp_flags & __GFP_RECLAIMABLE) != 0);
}
#else
static inline int get_pageblock_migratetype(struct page *page)
{
return MIGRATE_UNMOVABLE;
}
static void set_pageblock_migratetype(struct page *page, int migratetype)
{
}
static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
{
return MIGRATE_UNMOVABLE;
}
#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
#ifdef CONFIG_DEBUG_VM
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
{
@ -718,7 +701,6 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
}
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
/*
* This array describes the order lists are fallen back to when
* the free lists for the desirable migrate type are depleted
@ -750,7 +732,7 @@ int move_freepages(struct zone *zone,
* CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
* anyway as we check zone boundaries in move_freepages_block().
* Remove at a later date when no bug reports exist related to
* CONFIG_PAGE_GROUP_BY_MOBILITY
* grouping pages by mobility
*/
BUG_ON(page_zone(start_page) != page_zone(end_page));
#endif
@ -899,13 +881,6 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
/* Use MIGRATE_RESERVE rather than fail an allocation */
return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
}
#else
static struct page *__rmqueue_fallback(struct zone *zone, int order,
int start_migratetype)
{
return NULL;
}
#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
/*
* Do the hard work of removing an element from the buddy allocator.
@ -1033,7 +1008,6 @@ void mark_free_pages(struct zone *zone)
}
#endif /* CONFIG_PM */
#if defined(CONFIG_HIBERNATION) || defined(CONFIG_PAGE_GROUP_BY_MOBILITY)
/*
* Spill all of this CPU's per-cpu pages back into the buddy allocator.
*/
@ -1064,9 +1038,6 @@ void drain_all_local_pages(void)
smp_call_function(smp_drain_local_pages, NULL, 0, 1);
}
#else
void drain_all_local_pages(void) {}
#endif /* CONFIG_HIBERNATION || CONFIG_PAGE_GROUP_BY_MOBILITY */
/*
* Free a 0-order page
@ -1157,7 +1128,6 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
goto failed;
}
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
/* Find a page of the appropriate migrate type */
list_for_each_entry(page, &pcp->list, lru)
if (page_private(page) == migratetype)
@ -1169,9 +1139,6 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
pcp->batch, &pcp->list, migratetype);
page = list_entry(pcp->list.next, struct page, lru);
}
#else
page = list_entry(pcp->list.next, struct page, lru);
#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
list_del(&page->lru);
pcp->count--;
@ -2525,7 +2492,6 @@ static inline unsigned long wait_table_bits(unsigned long size)
#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
/*
* Mark a number of MAX_ORDER_NR_PAGES blocks as MIGRATE_RESERVE. The number
* of blocks reserved is based on zone->pages_min. The memory within the
@ -2579,11 +2545,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
}
}
}
#else
static inline void setup_zone_migrate_reserve(struct zone *zone)
{
}
#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
/*
* Initially all pages are reserved - free ones are freed
* up by free_all_bootmem() once the early boot process is