mm, page_alloc: reduce number of alloc_pages* functions' parameters
Introduce struct alloc_context to accumulate the numerous parameters passed between the alloc_pages* family of functions and get_page_from_freelist(). This excludes gfp_flags and alloc_info, which mutate too much along the way, and allocation order, which is conceptually different. The result is shorter function signatures, as well as overal code size and stack usage reductions. bloat-o-meter: add/remove: 0/0 grow/shrink: 1/2 up/down: 127/-310 (-183) function old new delta get_page_from_freelist 2525 2652 +127 __alloc_pages_direct_compact 329 283 -46 __alloc_pages_nodemask 2564 2300 -264 checkstack.pl: function old new __alloc_pages_nodemask 248 200 get_page_from_freelist 168 184 __alloc_pages_direct_compact 40 24 Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Cc: Minchan Kim <minchan@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
753791910e
commit
a9263751e1
1 changed files with 108 additions and 121 deletions
229
mm/page_alloc.c
229
mm/page_alloc.c
|
@ -232,6 +232,27 @@ EXPORT_SYMBOL(nr_node_ids);
|
||||||
EXPORT_SYMBOL(nr_online_nodes);
|
EXPORT_SYMBOL(nr_online_nodes);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Structure for holding the mostly immutable allocation parameters passed
|
||||||
|
* between alloc_pages* family of functions.
|
||||||
|
*
|
||||||
|
* nodemask, migratetype and high_zoneidx are initialized only once in
|
||||||
|
* __alloc_pages_nodemask() and then never change.
|
||||||
|
*
|
||||||
|
* zonelist, preferred_zone and classzone_idx are set first in
|
||||||
|
* __alloc_pages_nodemask() for the fast path, and might be later changed
|
||||||
|
* in __alloc_pages_slowpath(). All other functions pass the whole strucure
|
||||||
|
* by a const pointer.
|
||||||
|
*/
|
||||||
|
struct alloc_context {
|
||||||
|
struct zonelist *zonelist;
|
||||||
|
nodemask_t *nodemask;
|
||||||
|
struct zone *preferred_zone;
|
||||||
|
int classzone_idx;
|
||||||
|
int migratetype;
|
||||||
|
enum zone_type high_zoneidx;
|
||||||
|
};
|
||||||
|
|
||||||
int page_group_by_mobility_disabled __read_mostly;
|
int page_group_by_mobility_disabled __read_mostly;
|
||||||
|
|
||||||
void set_pageblock_migratetype(struct page *page, int migratetype)
|
void set_pageblock_migratetype(struct page *page, int migratetype)
|
||||||
|
@ -2037,10 +2058,10 @@ static void reset_alloc_batches(struct zone *preferred_zone)
|
||||||
* a page.
|
* a page.
|
||||||
*/
|
*/
|
||||||
static struct page *
|
static struct page *
|
||||||
get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
|
get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
|
||||||
struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
|
const struct alloc_context *ac)
|
||||||
struct zone *preferred_zone, int classzone_idx, int migratetype)
|
|
||||||
{
|
{
|
||||||
|
struct zonelist *zonelist = ac->zonelist;
|
||||||
struct zoneref *z;
|
struct zoneref *z;
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
|
@ -2059,8 +2080,8 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
|
||||||
* Scan zonelist, looking for a zone with enough free.
|
* Scan zonelist, looking for a zone with enough free.
|
||||||
* See also __cpuset_node_allowed() comment in kernel/cpuset.c.
|
* See also __cpuset_node_allowed() comment in kernel/cpuset.c.
|
||||||
*/
|
*/
|
||||||
for_each_zone_zonelist_nodemask(zone, z, zonelist,
|
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
|
||||||
high_zoneidx, nodemask) {
|
ac->nodemask) {
|
||||||
unsigned long mark;
|
unsigned long mark;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
|
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
|
||||||
|
@ -2077,7 +2098,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
|
||||||
* time the page has in memory before being reclaimed.
|
* time the page has in memory before being reclaimed.
|
||||||
*/
|
*/
|
||||||
if (alloc_flags & ALLOC_FAIR) {
|
if (alloc_flags & ALLOC_FAIR) {
|
||||||
if (!zone_local(preferred_zone, zone))
|
if (!zone_local(ac->preferred_zone, zone))
|
||||||
break;
|
break;
|
||||||
if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
|
if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
|
||||||
nr_fair_skipped++;
|
nr_fair_skipped++;
|
||||||
|
@ -2115,7 +2136,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
|
||||||
|
|
||||||
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
|
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
|
||||||
if (!zone_watermark_ok(zone, order, mark,
|
if (!zone_watermark_ok(zone, order, mark,
|
||||||
classzone_idx, alloc_flags)) {
|
ac->classzone_idx, alloc_flags)) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Checked here to keep the fast path fast */
|
/* Checked here to keep the fast path fast */
|
||||||
|
@ -2136,7 +2157,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (zone_reclaim_mode == 0 ||
|
if (zone_reclaim_mode == 0 ||
|
||||||
!zone_allows_reclaim(preferred_zone, zone))
|
!zone_allows_reclaim(ac->preferred_zone, zone))
|
||||||
goto this_zone_full;
|
goto this_zone_full;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2158,7 +2179,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
|
||||||
default:
|
default:
|
||||||
/* did we reclaim enough */
|
/* did we reclaim enough */
|
||||||
if (zone_watermark_ok(zone, order, mark,
|
if (zone_watermark_ok(zone, order, mark,
|
||||||
classzone_idx, alloc_flags))
|
ac->classzone_idx, alloc_flags))
|
||||||
goto try_this_zone;
|
goto try_this_zone;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2179,8 +2200,8 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
|
||||||
}
|
}
|
||||||
|
|
||||||
try_this_zone:
|
try_this_zone:
|
||||||
page = buffered_rmqueue(preferred_zone, zone, order,
|
page = buffered_rmqueue(ac->preferred_zone, zone, order,
|
||||||
gfp_mask, migratetype);
|
gfp_mask, ac->migratetype);
|
||||||
if (page) {
|
if (page) {
|
||||||
if (prep_new_page(page, order, gfp_mask, alloc_flags))
|
if (prep_new_page(page, order, gfp_mask, alloc_flags))
|
||||||
goto try_this_zone;
|
goto try_this_zone;
|
||||||
|
@ -2203,7 +2224,7 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
|
||||||
alloc_flags &= ~ALLOC_FAIR;
|
alloc_flags &= ~ALLOC_FAIR;
|
||||||
if (nr_fair_skipped) {
|
if (nr_fair_skipped) {
|
||||||
zonelist_rescan = true;
|
zonelist_rescan = true;
|
||||||
reset_alloc_batches(preferred_zone);
|
reset_alloc_batches(ac->preferred_zone);
|
||||||
}
|
}
|
||||||
if (nr_online_nodes > 1)
|
if (nr_online_nodes > 1)
|
||||||
zonelist_rescan = true;
|
zonelist_rescan = true;
|
||||||
|
@ -2325,9 +2346,7 @@ should_alloc_retry(gfp_t gfp_mask, unsigned int order,
|
||||||
|
|
||||||
static inline struct page *
|
static inline struct page *
|
||||||
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
||||||
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
const struct alloc_context *ac, unsigned long *did_some_progress)
|
||||||
nodemask_t *nodemask, struct zone *preferred_zone,
|
|
||||||
int classzone_idx, int migratetype, unsigned long *did_some_progress)
|
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
|
@ -2340,7 +2359,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
||||||
* Acquire the per-zone oom lock for each zone. If that
|
* Acquire the per-zone oom lock for each zone. If that
|
||||||
* fails, somebody else is making progress for us.
|
* fails, somebody else is making progress for us.
|
||||||
*/
|
*/
|
||||||
if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
|
if (!oom_zonelist_trylock(ac->zonelist, gfp_mask)) {
|
||||||
*did_some_progress = 1;
|
*did_some_progress = 1;
|
||||||
schedule_timeout_uninterruptible(1);
|
schedule_timeout_uninterruptible(1);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -2359,10 +2378,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
||||||
* here, this is only to catch a parallel oom killing, we must fail if
|
* here, this is only to catch a parallel oom killing, we must fail if
|
||||||
* we're still under heavy pressure.
|
* we're still under heavy pressure.
|
||||||
*/
|
*/
|
||||||
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
|
page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
|
||||||
order, zonelist, high_zoneidx,
|
ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
|
||||||
ALLOC_WMARK_HIGH|ALLOC_CPUSET,
|
|
||||||
preferred_zone, classzone_idx, migratetype);
|
|
||||||
if (page)
|
if (page)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -2374,7 +2391,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
||||||
if (order > PAGE_ALLOC_COSTLY_ORDER)
|
if (order > PAGE_ALLOC_COSTLY_ORDER)
|
||||||
goto out;
|
goto out;
|
||||||
/* The OOM killer does not needlessly kill tasks for lowmem */
|
/* The OOM killer does not needlessly kill tasks for lowmem */
|
||||||
if (high_zoneidx < ZONE_NORMAL)
|
if (ac->high_zoneidx < ZONE_NORMAL)
|
||||||
goto out;
|
goto out;
|
||||||
/* The OOM killer does not compensate for light reclaim */
|
/* The OOM killer does not compensate for light reclaim */
|
||||||
if (!(gfp_mask & __GFP_FS))
|
if (!(gfp_mask & __GFP_FS))
|
||||||
|
@ -2390,10 +2407,10 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
/* Exhausted what can be done so it's blamo time */
|
/* Exhausted what can be done so it's blamo time */
|
||||||
out_of_memory(zonelist, gfp_mask, order, nodemask, false);
|
out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false);
|
||||||
*did_some_progress = 1;
|
*did_some_progress = 1;
|
||||||
out:
|
out:
|
||||||
oom_zonelist_unlock(zonelist, gfp_mask);
|
oom_zonelist_unlock(ac->zonelist, gfp_mask);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2401,10 +2418,9 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
|
||||||
/* Try memory compaction for high-order allocations before reclaim */
|
/* Try memory compaction for high-order allocations before reclaim */
|
||||||
static struct page *
|
static struct page *
|
||||||
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
||||||
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
int alloc_flags, const struct alloc_context *ac,
|
||||||
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
|
enum migrate_mode mode, int *contended_compaction,
|
||||||
int classzone_idx, int migratetype, enum migrate_mode mode,
|
bool *deferred_compaction)
|
||||||
int *contended_compaction, bool *deferred_compaction)
|
|
||||||
{
|
{
|
||||||
unsigned long compact_result;
|
unsigned long compact_result;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
@ -2413,10 +2429,10 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
current->flags |= PF_MEMALLOC;
|
current->flags |= PF_MEMALLOC;
|
||||||
compact_result = try_to_compact_pages(zonelist, order, gfp_mask,
|
compact_result = try_to_compact_pages(ac->zonelist, order, gfp_mask,
|
||||||
nodemask, mode,
|
ac->nodemask, mode,
|
||||||
contended_compaction,
|
contended_compaction,
|
||||||
alloc_flags, classzone_idx);
|
alloc_flags, ac->classzone_idx);
|
||||||
current->flags &= ~PF_MEMALLOC;
|
current->flags &= ~PF_MEMALLOC;
|
||||||
|
|
||||||
switch (compact_result) {
|
switch (compact_result) {
|
||||||
|
@ -2435,10 +2451,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
||||||
*/
|
*/
|
||||||
count_vm_event(COMPACTSTALL);
|
count_vm_event(COMPACTSTALL);
|
||||||
|
|
||||||
page = get_page_from_freelist(gfp_mask, nodemask,
|
page = get_page_from_freelist(gfp_mask, order,
|
||||||
order, zonelist, high_zoneidx,
|
alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
|
||||||
alloc_flags & ~ALLOC_NO_WATERMARKS,
|
|
||||||
preferred_zone, classzone_idx, migratetype);
|
|
||||||
|
|
||||||
if (page) {
|
if (page) {
|
||||||
struct zone *zone = page_zone(page);
|
struct zone *zone = page_zone(page);
|
||||||
|
@ -2462,10 +2476,9 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
||||||
#else
|
#else
|
||||||
static inline struct page *
|
static inline struct page *
|
||||||
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
||||||
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
int alloc_flags, const struct alloc_context *ac,
|
||||||
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
|
enum migrate_mode mode, int *contended_compaction,
|
||||||
int classzone_idx, int migratetype, enum migrate_mode mode,
|
bool *deferred_compaction)
|
||||||
int *contended_compaction, bool *deferred_compaction)
|
|
||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -2473,8 +2486,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
||||||
|
|
||||||
/* Perform direct synchronous page reclaim */
|
/* Perform direct synchronous page reclaim */
|
||||||
static int
|
static int
|
||||||
__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
|
__perform_reclaim(gfp_t gfp_mask, unsigned int order,
|
||||||
nodemask_t *nodemask)
|
const struct alloc_context *ac)
|
||||||
{
|
{
|
||||||
struct reclaim_state reclaim_state;
|
struct reclaim_state reclaim_state;
|
||||||
int progress;
|
int progress;
|
||||||
|
@ -2488,7 +2501,8 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
|
||||||
reclaim_state.reclaimed_slab = 0;
|
reclaim_state.reclaimed_slab = 0;
|
||||||
current->reclaim_state = &reclaim_state;
|
current->reclaim_state = &reclaim_state;
|
||||||
|
|
||||||
progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
|
progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
|
||||||
|
ac->nodemask);
|
||||||
|
|
||||||
current->reclaim_state = NULL;
|
current->reclaim_state = NULL;
|
||||||
lockdep_clear_current_reclaim_state();
|
lockdep_clear_current_reclaim_state();
|
||||||
|
@ -2502,28 +2516,23 @@ __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
|
||||||
/* The really slow allocator path where we enter direct reclaim */
|
/* The really slow allocator path where we enter direct reclaim */
|
||||||
static inline struct page *
|
static inline struct page *
|
||||||
__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
|
__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
|
||||||
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
int alloc_flags, const struct alloc_context *ac,
|
||||||
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
|
unsigned long *did_some_progress)
|
||||||
int classzone_idx, int migratetype, unsigned long *did_some_progress)
|
|
||||||
{
|
{
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
bool drained = false;
|
bool drained = false;
|
||||||
|
|
||||||
*did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
|
*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
|
||||||
nodemask);
|
|
||||||
if (unlikely(!(*did_some_progress)))
|
if (unlikely(!(*did_some_progress)))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* After successful reclaim, reconsider all zones for allocation */
|
/* After successful reclaim, reconsider all zones for allocation */
|
||||||
if (IS_ENABLED(CONFIG_NUMA))
|
if (IS_ENABLED(CONFIG_NUMA))
|
||||||
zlc_clear_zones_full(zonelist);
|
zlc_clear_zones_full(ac->zonelist);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
page = get_page_from_freelist(gfp_mask, nodemask, order,
|
page = get_page_from_freelist(gfp_mask, order,
|
||||||
zonelist, high_zoneidx,
|
alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
|
||||||
alloc_flags & ~ALLOC_NO_WATERMARKS,
|
|
||||||
preferred_zone, classzone_idx,
|
|
||||||
migratetype);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If an allocation failed after direct reclaim, it could be because
|
* If an allocation failed after direct reclaim, it could be because
|
||||||
|
@ -2544,36 +2553,30 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
|
||||||
*/
|
*/
|
||||||
static inline struct page *
|
static inline struct page *
|
||||||
__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
|
__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
|
||||||
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
const struct alloc_context *ac)
|
||||||
nodemask_t *nodemask, struct zone *preferred_zone,
|
|
||||||
int classzone_idx, int migratetype)
|
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
page = get_page_from_freelist(gfp_mask, nodemask, order,
|
page = get_page_from_freelist(gfp_mask, order,
|
||||||
zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
|
ALLOC_NO_WATERMARKS, ac);
|
||||||
preferred_zone, classzone_idx, migratetype);
|
|
||||||
|
|
||||||
if (!page && gfp_mask & __GFP_NOFAIL)
|
if (!page && gfp_mask & __GFP_NOFAIL)
|
||||||
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
|
wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC,
|
||||||
|
HZ/50);
|
||||||
} while (!page && (gfp_mask & __GFP_NOFAIL));
|
} while (!page && (gfp_mask & __GFP_NOFAIL));
|
||||||
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void wake_all_kswapds(unsigned int order,
|
static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
|
||||||
struct zonelist *zonelist,
|
|
||||||
enum zone_type high_zoneidx,
|
|
||||||
struct zone *preferred_zone,
|
|
||||||
nodemask_t *nodemask)
|
|
||||||
{
|
{
|
||||||
struct zoneref *z;
|
struct zoneref *z;
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
|
|
||||||
for_each_zone_zonelist_nodemask(zone, z, zonelist,
|
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
|
||||||
high_zoneidx, nodemask)
|
ac->high_zoneidx, ac->nodemask)
|
||||||
wakeup_kswapd(zone, order, zone_idx(preferred_zone));
|
wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
|
@ -2632,9 +2635,7 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
|
||||||
|
|
||||||
static inline struct page *
|
static inline struct page *
|
||||||
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||||
struct zonelist *zonelist, enum zone_type high_zoneidx,
|
struct alloc_context *ac)
|
||||||
nodemask_t *nodemask, struct zone *preferred_zone,
|
|
||||||
int classzone_idx, int migratetype)
|
|
||||||
{
|
{
|
||||||
const gfp_t wait = gfp_mask & __GFP_WAIT;
|
const gfp_t wait = gfp_mask & __GFP_WAIT;
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
@ -2670,8 +2671,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
if (!(gfp_mask & __GFP_NO_KSWAPD))
|
if (!(gfp_mask & __GFP_NO_KSWAPD))
|
||||||
wake_all_kswapds(order, zonelist, high_zoneidx,
|
wake_all_kswapds(order, ac);
|
||||||
preferred_zone, nodemask);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* OK, we're below the kswapd watermark and have kicked background
|
* OK, we're below the kswapd watermark and have kicked background
|
||||||
|
@ -2684,17 +2684,16 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||||
* Find the true preferred zone if the allocation is unconstrained by
|
* Find the true preferred zone if the allocation is unconstrained by
|
||||||
* cpusets.
|
* cpusets.
|
||||||
*/
|
*/
|
||||||
if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) {
|
if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
|
||||||
struct zoneref *preferred_zoneref;
|
struct zoneref *preferred_zoneref;
|
||||||
preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
|
preferred_zoneref = first_zones_zonelist(ac->zonelist,
|
||||||
NULL, &preferred_zone);
|
ac->high_zoneidx, NULL, &ac->preferred_zone);
|
||||||
classzone_idx = zonelist_zone_idx(preferred_zoneref);
|
ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This is the last chance, in general, before the goto nopage. */
|
/* This is the last chance, in general, before the goto nopage. */
|
||||||
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
|
page = get_page_from_freelist(gfp_mask, order,
|
||||||
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
|
alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
|
||||||
preferred_zone, classzone_idx, migratetype);
|
|
||||||
if (page)
|
if (page)
|
||||||
goto got_pg;
|
goto got_pg;
|
||||||
|
|
||||||
|
@ -2705,11 +2704,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||||
* the allocation is high priority and these type of
|
* the allocation is high priority and these type of
|
||||||
* allocations are system rather than user orientated
|
* allocations are system rather than user orientated
|
||||||
*/
|
*/
|
||||||
zonelist = node_zonelist(numa_node_id(), gfp_mask);
|
ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
|
||||||
|
|
||||||
|
page = __alloc_pages_high_priority(gfp_mask, order, ac);
|
||||||
|
|
||||||
page = __alloc_pages_high_priority(gfp_mask, order,
|
|
||||||
zonelist, high_zoneidx, nodemask,
|
|
||||||
preferred_zone, classzone_idx, migratetype);
|
|
||||||
if (page) {
|
if (page) {
|
||||||
goto got_pg;
|
goto got_pg;
|
||||||
}
|
}
|
||||||
|
@ -2738,11 +2736,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||||
* Try direct compaction. The first pass is asynchronous. Subsequent
|
* Try direct compaction. The first pass is asynchronous. Subsequent
|
||||||
* attempts after direct reclaim are synchronous
|
* attempts after direct reclaim are synchronous
|
||||||
*/
|
*/
|
||||||
page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
|
page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
|
||||||
high_zoneidx, nodemask, alloc_flags,
|
migration_mode,
|
||||||
preferred_zone,
|
&contended_compaction,
|
||||||
classzone_idx, migratetype,
|
|
||||||
migration_mode, &contended_compaction,
|
|
||||||
&deferred_compaction);
|
&deferred_compaction);
|
||||||
if (page)
|
if (page)
|
||||||
goto got_pg;
|
goto got_pg;
|
||||||
|
@ -2788,12 +2784,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||||
migration_mode = MIGRATE_SYNC_LIGHT;
|
migration_mode = MIGRATE_SYNC_LIGHT;
|
||||||
|
|
||||||
/* Try direct reclaim and then allocating */
|
/* Try direct reclaim and then allocating */
|
||||||
page = __alloc_pages_direct_reclaim(gfp_mask, order,
|
page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
|
||||||
zonelist, high_zoneidx,
|
&did_some_progress);
|
||||||
nodemask,
|
|
||||||
alloc_flags, preferred_zone,
|
|
||||||
classzone_idx, migratetype,
|
|
||||||
&did_some_progress);
|
|
||||||
if (page)
|
if (page)
|
||||||
goto got_pg;
|
goto got_pg;
|
||||||
|
|
||||||
|
@ -2807,17 +2799,15 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||||
* start OOM killing tasks.
|
* start OOM killing tasks.
|
||||||
*/
|
*/
|
||||||
if (!did_some_progress) {
|
if (!did_some_progress) {
|
||||||
page = __alloc_pages_may_oom(gfp_mask, order, zonelist,
|
page = __alloc_pages_may_oom(gfp_mask, order, ac,
|
||||||
high_zoneidx, nodemask,
|
&did_some_progress);
|
||||||
preferred_zone, classzone_idx,
|
|
||||||
migratetype,&did_some_progress);
|
|
||||||
if (page)
|
if (page)
|
||||||
goto got_pg;
|
goto got_pg;
|
||||||
if (!did_some_progress)
|
if (!did_some_progress)
|
||||||
goto nopage;
|
goto nopage;
|
||||||
}
|
}
|
||||||
/* Wait for some write requests to complete then retry */
|
/* Wait for some write requests to complete then retry */
|
||||||
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
|
wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
|
||||||
goto retry;
|
goto retry;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
@ -2825,11 +2815,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||||
* direct reclaim and reclaim/compaction depends on compaction
|
* direct reclaim and reclaim/compaction depends on compaction
|
||||||
* being called after reclaim so call directly if necessary
|
* being called after reclaim so call directly if necessary
|
||||||
*/
|
*/
|
||||||
page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
|
page = __alloc_pages_direct_compact(gfp_mask, order,
|
||||||
high_zoneidx, nodemask, alloc_flags,
|
alloc_flags, ac, migration_mode,
|
||||||
preferred_zone,
|
&contended_compaction,
|
||||||
classzone_idx, migratetype,
|
|
||||||
migration_mode, &contended_compaction,
|
|
||||||
&deferred_compaction);
|
&deferred_compaction);
|
||||||
if (page)
|
if (page)
|
||||||
goto got_pg;
|
goto got_pg;
|
||||||
|
@ -2848,15 +2836,16 @@ struct page *
|
||||||
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
||||||
struct zonelist *zonelist, nodemask_t *nodemask)
|
struct zonelist *zonelist, nodemask_t *nodemask)
|
||||||
{
|
{
|
||||||
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
|
|
||||||
struct zone *preferred_zone;
|
|
||||||
struct zoneref *preferred_zoneref;
|
struct zoneref *preferred_zoneref;
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
int migratetype = gfpflags_to_migratetype(gfp_mask);
|
|
||||||
unsigned int cpuset_mems_cookie;
|
unsigned int cpuset_mems_cookie;
|
||||||
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
|
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
|
||||||
int classzone_idx;
|
|
||||||
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
|
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
|
||||||
|
struct alloc_context ac = {
|
||||||
|
.high_zoneidx = gfp_zone(gfp_mask),
|
||||||
|
.nodemask = nodemask,
|
||||||
|
.migratetype = gfpflags_to_migratetype(gfp_mask),
|
||||||
|
};
|
||||||
|
|
||||||
gfp_mask &= gfp_allowed_mask;
|
gfp_mask &= gfp_allowed_mask;
|
||||||
|
|
||||||
|
@ -2875,25 +2864,25 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
||||||
if (unlikely(!zonelist->_zonerefs->zone))
|
if (unlikely(!zonelist->_zonerefs->zone))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_CMA) && migratetype == MIGRATE_MOVABLE)
|
if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
|
||||||
alloc_flags |= ALLOC_CMA;
|
alloc_flags |= ALLOC_CMA;
|
||||||
|
|
||||||
retry_cpuset:
|
retry_cpuset:
|
||||||
cpuset_mems_cookie = read_mems_allowed_begin();
|
cpuset_mems_cookie = read_mems_allowed_begin();
|
||||||
|
|
||||||
|
/* We set it here, as __alloc_pages_slowpath might have changed it */
|
||||||
|
ac.zonelist = zonelist;
|
||||||
/* The preferred zone is used for statistics later */
|
/* The preferred zone is used for statistics later */
|
||||||
preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
|
preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
|
||||||
nodemask ? : &cpuset_current_mems_allowed,
|
ac.nodemask ? : &cpuset_current_mems_allowed,
|
||||||
&preferred_zone);
|
&ac.preferred_zone);
|
||||||
if (!preferred_zone)
|
if (!ac.preferred_zone)
|
||||||
goto out;
|
goto out;
|
||||||
classzone_idx = zonelist_zone_idx(preferred_zoneref);
|
ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
|
||||||
|
|
||||||
/* First allocation attempt */
|
/* First allocation attempt */
|
||||||
alloc_mask = gfp_mask|__GFP_HARDWALL;
|
alloc_mask = gfp_mask|__GFP_HARDWALL;
|
||||||
page = get_page_from_freelist(alloc_mask, nodemask, order, zonelist,
|
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
|
||||||
high_zoneidx, alloc_flags, preferred_zone,
|
|
||||||
classzone_idx, migratetype);
|
|
||||||
if (unlikely(!page)) {
|
if (unlikely(!page)) {
|
||||||
/*
|
/*
|
||||||
* Runtime PM, block IO and its error handling path
|
* Runtime PM, block IO and its error handling path
|
||||||
|
@ -2902,15 +2891,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
|
||||||
*/
|
*/
|
||||||
alloc_mask = memalloc_noio_flags(gfp_mask);
|
alloc_mask = memalloc_noio_flags(gfp_mask);
|
||||||
|
|
||||||
page = __alloc_pages_slowpath(alloc_mask, order,
|
page = __alloc_pages_slowpath(alloc_mask, order, &ac);
|
||||||
zonelist, high_zoneidx, nodemask,
|
|
||||||
preferred_zone, classzone_idx, migratetype);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (kmemcheck_enabled && page)
|
if (kmemcheck_enabled && page)
|
||||||
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
|
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
|
||||||
|
|
||||||
trace_mm_page_alloc(page, order, alloc_mask, migratetype);
|
trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in a new issue