ksm: check for ERR_PTR from follow_page()
The follow_page() function can potentially return -EFAULT so I added checks for this. Also I silenced an uninitialized variable warning on my version of gcc (version 4.3.2). Signed-off-by: Dan Carpenter <error27@gmail.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Izik Eidus <ieidus@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
453dc65931
commit
22eccdd7d2
1 changed files with 6 additions and 6 deletions
12
mm/ksm.c
12
mm/ksm.c
|
@ -365,7 +365,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
|
||||||
do {
|
do {
|
||||||
cond_resched();
|
cond_resched();
|
||||||
page = follow_page(vma, addr, FOLL_GET);
|
page = follow_page(vma, addr, FOLL_GET);
|
||||||
if (!page)
|
if (IS_ERR_OR_NULL(page))
|
||||||
break;
|
break;
|
||||||
if (PageKsm(page))
|
if (PageKsm(page))
|
||||||
ret = handle_mm_fault(vma->vm_mm, vma, addr,
|
ret = handle_mm_fault(vma->vm_mm, vma, addr,
|
||||||
|
@ -447,7 +447,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
page = follow_page(vma, addr, FOLL_GET);
|
page = follow_page(vma, addr, FOLL_GET);
|
||||||
if (!page)
|
if (IS_ERR_OR_NULL(page))
|
||||||
goto out;
|
goto out;
|
||||||
if (PageAnon(page)) {
|
if (PageAnon(page)) {
|
||||||
flush_anon_page(vma, page, addr);
|
flush_anon_page(vma, page, addr);
|
||||||
|
@ -1086,7 +1086,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
|
||||||
cond_resched();
|
cond_resched();
|
||||||
tree_rmap_item = rb_entry(*new, struct rmap_item, node);
|
tree_rmap_item = rb_entry(*new, struct rmap_item, node);
|
||||||
tree_page = get_mergeable_page(tree_rmap_item);
|
tree_page = get_mergeable_page(tree_rmap_item);
|
||||||
if (!tree_page)
|
if (IS_ERR_OR_NULL(tree_page))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1294,7 +1294,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
|
||||||
if (ksm_test_exit(mm))
|
if (ksm_test_exit(mm))
|
||||||
break;
|
break;
|
||||||
*page = follow_page(vma, ksm_scan.address, FOLL_GET);
|
*page = follow_page(vma, ksm_scan.address, FOLL_GET);
|
||||||
if (*page && PageAnon(*page)) {
|
if (!IS_ERR_OR_NULL(*page) && PageAnon(*page)) {
|
||||||
flush_anon_page(vma, *page, ksm_scan.address);
|
flush_anon_page(vma, *page, ksm_scan.address);
|
||||||
flush_dcache_page(*page);
|
flush_dcache_page(*page);
|
||||||
rmap_item = get_next_rmap_item(slot,
|
rmap_item = get_next_rmap_item(slot,
|
||||||
|
@ -1308,7 +1308,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
|
||||||
up_read(&mm->mmap_sem);
|
up_read(&mm->mmap_sem);
|
||||||
return rmap_item;
|
return rmap_item;
|
||||||
}
|
}
|
||||||
if (*page)
|
if (!IS_ERR_OR_NULL(*page))
|
||||||
put_page(*page);
|
put_page(*page);
|
||||||
ksm_scan.address += PAGE_SIZE;
|
ksm_scan.address += PAGE_SIZE;
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
@ -1367,7 +1367,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
|
||||||
static void ksm_do_scan(unsigned int scan_npages)
|
static void ksm_do_scan(unsigned int scan_npages)
|
||||||
{
|
{
|
||||||
struct rmap_item *rmap_item;
|
struct rmap_item *rmap_item;
|
||||||
struct page *page;
|
struct page *uninitialized_var(page);
|
||||||
|
|
||||||
while (scan_npages--) {
|
while (scan_npages--) {
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
Loading…
Reference in a new issue