diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c03273807182..bd031a4c738e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2515,21 +2515,18 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, hugepage_add_new_anon_rmap(page, vma, address); } } else { + /* + * If memory error occurs between mmap() and fault, some process + * don't have hwpoisoned swap entry for errored virtual address. + * So we need to block hugepage fault by PG_hwpoison bit check. + */ + if (unlikely(PageHWPoison(page))) { + ret = VM_FAULT_HWPOISON; + goto backout_unlocked; + } page_dup_rmap(page); } - /* - * Since memory error handler replaces pte into hwpoison swap entry - * at the time of error handling, a process which reserved but not have - * the mapping to the error hugepage does not have hwpoison swap entry. - * So we need to block accesses from such a process by checking - * PG_hwpoison bit here. - */ - if (unlikely(PageHWPoison(page))) { - ret = VM_FAULT_HWPOISON; - goto backout_unlocked; - } - /* * If we are going to COW a private mapping later, we examine the * pending reservations for this page now. This will ensure that