diff --git a/mm/internal.h b/mm/internal.h index 5b9734aff4a5..e47045878a1a 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -499,7 +499,6 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, #else #define ALLOC_NOFRAGMENT 0x0 #endif -#define ALLOC_KSWAPD 0x200 /* allow waking of kswapd */ enum ttu_flags; struct tlbflush_unmap_batch; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6b7d11de5e07..1b3bda7ad21f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3430,6 +3430,7 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) } #endif /* CONFIG_NUMA */ +#ifdef CONFIG_ZONE_DMA32 /* * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid * fragmentation is subtle. If the preferred zone was HIGHMEM then @@ -3439,16 +3440,10 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) * fragmentation between the Normal and DMA32 zones. */ static inline unsigned int -alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) +alloc_flags_nofragment(struct zone *zone) { - unsigned int alloc_flags = 0; - - if (gfp_mask & __GFP_KSWAPD_RECLAIM) - alloc_flags |= ALLOC_KSWAPD; - -#ifdef CONFIG_ZONE_DMA32 if (zone_idx(zone) != ZONE_NORMAL) - goto out; + return 0; /* * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and @@ -3457,12 +3452,17 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) */ BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); if (nr_online_nodes > 1 && !populated_zone(--zone)) - goto out; + return 0; -out: -#endif /* CONFIG_ZONE_DMA32 */ - return alloc_flags; + return ALLOC_NOFRAGMENT; } +#else +static inline unsigned int +alloc_flags_nofragment(struct zone *zone) +{ + return 0; +} +#endif /* * get_page_from_freelist goes through the zonelist trying to allocate @@ -4115,9 +4115,6 @@ gfp_to_alloc_flags(gfp_t gfp_mask) } else if (unlikely(rt_task(current)) && !in_interrupt()) alloc_flags |= ALLOC_HARDER; - if (gfp_mask & __GFP_KSWAPD_RECLAIM) - alloc_flags |= ALLOC_KSWAPD; - #ifdef CONFIG_CMA if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; @@ -4349,7 +4346,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, if (!ac->preferred_zoneref->zone) goto nopage; - if (alloc_flags & ALLOC_KSWAPD) + if (gfp_mask & __GFP_KSWAPD_RECLAIM) wake_all_kswapds(order, gfp_mask, ac); /* @@ -4407,7 +4404,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, retry: /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ - if (alloc_flags & ALLOC_KSWAPD) + if (gfp_mask & __GFP_KSWAPD_RECLAIM) wake_all_kswapds(order, gfp_mask, ac); reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); @@ -4630,7 +4627,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, * Forbid the first pass from falling back to types that fragment * memory until all local zones are considered. */ - alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); + alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone); /* First allocation attempt */ page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);