mm: check the return value of lookup_page_ext for all call sites
Per the discussion with Joonsoo Kim [1], we need check the return value of lookup_page_ext() for all call sites since it might return NULL in some cases, although it is unlikely, i.e. memory hotplug. Tested with ltp with "page_owner=0". [1] http://lkml.kernel.org/r/20160519002809.GA10245@js1304-P5Q-DELUXE [akpm@linux-foundation.org: fix build-breaking typos] [arnd@arndb.de: fix build problems from lookup_page_ext] Link: http://lkml.kernel.org/r/6285269.2CksypHdYp@wuerfel [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/1464023768-31025-1-git-send-email-yang.shi@linaro.org Signed-off-by: Yang Shi <yang.shi@linaro.org> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d8bae33ddd
commit
f86e427197
5 changed files with 77 additions and 8 deletions
|
@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
|
|||
|
||||
static inline bool page_is_young(struct page *page)
|
||||
{
|
||||
return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return false;
|
||||
|
||||
return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
|
||||
}
|
||||
|
||||
static inline void set_page_young(struct page *page)
|
||||
{
|
||||
set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
|
||||
}
|
||||
|
||||
static inline bool test_and_clear_page_young(struct page *page)
|
||||
{
|
||||
return test_and_clear_bit(PAGE_EXT_YOUNG,
|
||||
&lookup_page_ext(page)->flags);
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return false;
|
||||
|
||||
return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
|
||||
}
|
||||
|
||||
static inline bool page_is_idle(struct page *page)
|
||||
{
|
||||
return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return false;
|
||||
|
||||
return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
|
||||
}
|
||||
|
||||
static inline void set_page_idle(struct page *page)
|
||||
{
|
||||
set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
set_bit(PAGE_EXT_IDLE, &page_ext->flags);
|
||||
}
|
||||
|
||||
static inline void clear_page_idle(struct page *page)
|
||||
{
|
||||
clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
|
|
|
@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
|
|||
return;
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
|
||||
|
||||
INIT_LIST_HEAD(&page->lru);
|
||||
|
@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
|
|||
return;
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
|
||||
|
||||
set_page_private(page, 0);
|
||||
|
|
|
@ -55,6 +55,8 @@ void __reset_page_owner(struct page *page, unsigned int order)
|
|||
|
||||
for (i = 0; i < (1 << order); i++) {
|
||||
page_ext = lookup_page_ext(page + i);
|
||||
if (unlikely(!page_ext))
|
||||
continue;
|
||||
__clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
|
||||
}
|
||||
}
|
||||
|
@ -62,6 +64,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
|
|||
void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
|
||||
{
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
|
||||
struct stack_trace trace = {
|
||||
.nr_entries = 0,
|
||||
.max_entries = ARRAY_SIZE(page_ext->trace_entries),
|
||||
|
@ -69,6 +72,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
|
|||
.skip = 3,
|
||||
};
|
||||
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
save_stack_trace(&trace);
|
||||
|
||||
page_ext->order = order;
|
||||
|
@ -82,6 +88,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
|
|||
void __set_page_owner_migrate_reason(struct page *page, int reason)
|
||||
{
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
page_ext->last_migrate_reason = reason;
|
||||
}
|
||||
|
@ -89,6 +97,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
|
|||
gfp_t __get_page_owner_gfp(struct page *page)
|
||||
{
|
||||
struct page_ext *page_ext = lookup_page_ext(page);
|
||||
if (unlikely(!page_ext))
|
||||
/*
|
||||
* The caller just returns 0 if no valid gfp
|
||||
* So return 0 here too.
|
||||
*/
|
||||
return 0;
|
||||
|
||||
return page_ext->gfp_mask;
|
||||
}
|
||||
|
@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
|
|||
struct page_ext *new_ext = lookup_page_ext(newpage);
|
||||
int i;
|
||||
|
||||
if (unlikely(!old_ext || !new_ext))
|
||||
return;
|
||||
|
||||
new_ext->order = old_ext->order;
|
||||
new_ext->gfp_mask = old_ext->gfp_mask;
|
||||
new_ext->nr_entries = old_ext->nr_entries;
|
||||
|
@ -193,6 +210,11 @@ void __dump_page_owner(struct page *page)
|
|||
gfp_t gfp_mask = page_ext->gfp_mask;
|
||||
int mt = gfpflags_to_migratetype(gfp_mask);
|
||||
|
||||
if (unlikely(!page_ext)) {
|
||||
pr_alert("There is not page extension available.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
|
||||
pr_alert("page_owner info is not active (free page?)\n");
|
||||
return;
|
||||
|
@ -251,6 +273,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
|||
}
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
if (unlikely(!page_ext))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Some pages could be missed by concurrent allocation or free,
|
||||
|
@ -317,6 +341,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
|
|||
continue;
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
if (unlikely(!page_ext))
|
||||
continue;
|
||||
|
||||
/* Maybe overraping zone */
|
||||
if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
|
||||
|
|
|
@ -54,6 +54,9 @@ static inline void set_page_poison(struct page *page)
|
|||
struct page_ext *page_ext;
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
__set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
|
||||
}
|
||||
|
||||
|
@ -62,6 +65,9 @@ static inline void clear_page_poison(struct page *page)
|
|||
struct page_ext *page_ext;
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
__clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
|
||||
}
|
||||
|
||||
|
@ -70,7 +76,7 @@ bool page_is_poisoned(struct page *page)
|
|||
struct page_ext *page_ext;
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
if (!page_ext)
|
||||
if (unlikely(!page_ext))
|
||||
return false;
|
||||
|
||||
return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
|
||||
|
|
|
@ -1061,6 +1061,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
|
|||
continue;
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
if (unlikely(!page_ext))
|
||||
continue;
|
||||
|
||||
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
|
||||
continue;
|
||||
|
|
Loading…
Reference in a new issue