UPSTREAM: mm: use alloc_flags to record if kswapd can wake
This is a preparation patch that copies the GFP flag __GFP_KSWAPD_RECLAIM into alloc_flags. This is a preparation patch only that avoids having to pass gfp_mask through a long callchain in a future patch. Note that the setting in the fast path happens in alloc_flags_nofragment() and it may be claimed that this has nothing to do with ALLOC_NO_FRAGMENT. That's true in this patch but is not true later so it's done now for easier review to show where the flag needs to be recorded. No functional change. [mgorman@techsingularity.net: ALLOC_KSWAPD flag needs to be applied in the !CONFIG_ZONE_DMA32 case] Link: http://lkml.kernel.org/r/20181126143503.GO23260@techsingularity.net Link: http://lkml.kernel.org/r/20181123114528.28802-4-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Zi Yan <zi.yan@cs.rutgers.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Change-Id: I3f54fbfd87f02bd9f926a3913d88ba3055dde33c Git-commit: 0a79cdad5eb213b3a629e624565b1b3bf9192b7c Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> (cherry picked from commit 0a79cdad5eb213b3a629e624565b1b3bf9192b7c) Signed-off-by: Mark Salyzyn <salyzyn@google.com> Bug: 150378964
This commit is contained in:
parent
8ad4b225e8
commit
112ced56ce
2 changed files with 19 additions and 15 deletions
|
@ -499,6 +499,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
|
|||
#else
|
||||
#define ALLOC_NOFRAGMENT 0x0
|
||||
#endif
|
||||
#define ALLOC_KSWAPD 0x200 /* allow waking of kswapd */
|
||||
|
||||
enum ttu_flags;
|
||||
struct tlbflush_unmap_batch;
|
||||
|
|
|
@ -3357,7 +3357,6 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
|
|||
}
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
/*
|
||||
* The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
|
||||
* fragmentation is subtle. If the preferred zone was HIGHMEM then
|
||||
|
@ -3367,10 +3366,16 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
|
|||
* fragmentation between the Normal and DMA32 zones.
|
||||
*/
|
||||
static inline unsigned int
|
||||
alloc_flags_nofragment(struct zone *zone)
|
||||
alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
|
||||
{
|
||||
unsigned int alloc_flags = 0;
|
||||
|
||||
if (gfp_mask & __GFP_KSWAPD_RECLAIM)
|
||||
alloc_flags |= ALLOC_KSWAPD;
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
if (zone_idx(zone) != ZONE_NORMAL)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
|
||||
|
@ -3379,17 +3384,12 @@ alloc_flags_nofragment(struct zone *zone)
|
|||
*/
|
||||
BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
|
||||
if (nr_online_nodes > 1 && !populated_zone(--zone))
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
return ALLOC_NOFRAGMENT;
|
||||
out:
|
||||
#endif /* CONFIG_ZONE_DMA32 */
|
||||
return alloc_flags;
|
||||
}
|
||||
#else
|
||||
static inline unsigned int
|
||||
alloc_flags_nofragment(struct zone *zone)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* get_page_from_freelist goes through the zonelist trying to allocate
|
||||
|
@ -4042,6 +4042,9 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
|
|||
} else if (unlikely(rt_task(current)) && !in_interrupt())
|
||||
alloc_flags |= ALLOC_HARDER;
|
||||
|
||||
if (gfp_mask & __GFP_KSWAPD_RECLAIM)
|
||||
alloc_flags |= ALLOC_KSWAPD;
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
|
||||
alloc_flags |= ALLOC_CMA;
|
||||
|
@ -4273,7 +4276,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|||
if (!ac->preferred_zoneref->zone)
|
||||
goto nopage;
|
||||
|
||||
if (gfp_mask & __GFP_KSWAPD_RECLAIM)
|
||||
if (alloc_flags & ALLOC_KSWAPD)
|
||||
wake_all_kswapds(order, gfp_mask, ac);
|
||||
|
||||
/*
|
||||
|
@ -4331,7 +4334,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|||
|
||||
retry:
|
||||
/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
|
||||
if (gfp_mask & __GFP_KSWAPD_RECLAIM)
|
||||
if (alloc_flags & ALLOC_KSWAPD)
|
||||
wake_all_kswapds(order, gfp_mask, ac);
|
||||
|
||||
reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
|
||||
|
@ -4554,7 +4557,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
|
|||
* Forbid the first pass from falling back to types that fragment
|
||||
* memory until all local zones are considered.
|
||||
*/
|
||||
alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone);
|
||||
alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
|
||||
|
||||
/* First allocation attempt */
|
||||
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
|
||||
|
|
Loading…
Reference in a new issue