hugetlb: redefine hugepage copy functions
This patch modifies hugepage copy functions to have only destination and source hugepages as arguments for later use. The old ones are renamed from copy_{gigantic,huge}_page() to copy_user_{gigantic,huge}_page(). This naming convention is consistent with that between copy_highpage() and copy_user_highpage(). ChangeLog since v4: - add blank line between local declaration and code - remove unnecessary might_sleep() ChangeLog since v2: - change copy_huge_page() from macro to inline dummy function to avoid compile warning when !CONFIG_HUGETLB_PAGE. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Christoph Lameter <cl@linux.com> Signed-off-by: Andi Kleen <ak@linux.intel.com>
This commit is contained in:
parent
bf50bab2b3
commit
0ebabb416f
2 changed files with 44 additions and 5 deletions
|
@ -44,6 +44,7 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to,
|
||||||
int acctflags);
|
int acctflags);
|
||||||
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
|
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
|
||||||
void __isolate_hwpoisoned_huge_page(struct page *page);
|
void __isolate_hwpoisoned_huge_page(struct page *page);
|
||||||
|
void copy_huge_page(struct page *dst, struct page *src);
|
||||||
|
|
||||||
extern unsigned long hugepages_treat_as_movable;
|
extern unsigned long hugepages_treat_as_movable;
|
||||||
extern const unsigned long hugetlb_zero, hugetlb_infinity;
|
extern const unsigned long hugetlb_zero, hugetlb_infinity;
|
||||||
|
@ -102,6 +103,9 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
|
||||||
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
|
#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
|
||||||
#define huge_pte_offset(mm, address) 0
|
#define huge_pte_offset(mm, address) 0
|
||||||
#define __isolate_hwpoisoned_huge_page(page) 0
|
#define __isolate_hwpoisoned_huge_page(page) 0
|
||||||
|
static inline void copy_huge_page(struct page *dst, struct page *src)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#define hugetlb_change_protection(vma, address, end, newprot)
|
#define hugetlb_change_protection(vma, address, end, newprot)
|
||||||
|
|
||||||
|
|
45
mm/hugetlb.c
45
mm/hugetlb.c
|
@ -423,14 +423,14 @@ static void clear_huge_page(struct page *page,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void copy_gigantic_page(struct page *dst, struct page *src,
|
static void copy_user_gigantic_page(struct page *dst, struct page *src,
|
||||||
unsigned long addr, struct vm_area_struct *vma)
|
unsigned long addr, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct hstate *h = hstate_vma(vma);
|
struct hstate *h = hstate_vma(vma);
|
||||||
struct page *dst_base = dst;
|
struct page *dst_base = dst;
|
||||||
struct page *src_base = src;
|
struct page *src_base = src;
|
||||||
might_sleep();
|
|
||||||
for (i = 0; i < pages_per_huge_page(h); ) {
|
for (i = 0; i < pages_per_huge_page(h); ) {
|
||||||
cond_resched();
|
cond_resched();
|
||||||
copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
|
copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
|
||||||
|
@ -440,14 +440,15 @@ static void copy_gigantic_page(struct page *dst, struct page *src,
|
||||||
src = mem_map_next(src, src_base, i);
|
src = mem_map_next(src, src_base, i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
static void copy_huge_page(struct page *dst, struct page *src,
|
|
||||||
|
static void copy_user_huge_page(struct page *dst, struct page *src,
|
||||||
unsigned long addr, struct vm_area_struct *vma)
|
unsigned long addr, struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct hstate *h = hstate_vma(vma);
|
struct hstate *h = hstate_vma(vma);
|
||||||
|
|
||||||
if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
|
if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
|
||||||
copy_gigantic_page(dst, src, addr, vma);
|
copy_user_gigantic_page(dst, src, addr, vma);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -458,6 +459,40 @@ static void copy_huge_page(struct page *dst, struct page *src,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void copy_gigantic_page(struct page *dst, struct page *src)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct hstate *h = page_hstate(src);
|
||||||
|
struct page *dst_base = dst;
|
||||||
|
struct page *src_base = src;
|
||||||
|
|
||||||
|
for (i = 0; i < pages_per_huge_page(h); ) {
|
||||||
|
cond_resched();
|
||||||
|
copy_highpage(dst, src);
|
||||||
|
|
||||||
|
i++;
|
||||||
|
dst = mem_map_next(dst, dst_base, i);
|
||||||
|
src = mem_map_next(src, src_base, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void copy_huge_page(struct page *dst, struct page *src)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct hstate *h = page_hstate(src);
|
||||||
|
|
||||||
|
if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
|
||||||
|
copy_gigantic_page(dst, src);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
might_sleep();
|
||||||
|
for (i = 0; i < pages_per_huge_page(h); i++) {
|
||||||
|
cond_resched();
|
||||||
|
copy_highpage(dst + i, src + i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void enqueue_huge_page(struct hstate *h, struct page *page)
|
static void enqueue_huge_page(struct hstate *h, struct page *page)
|
||||||
{
|
{
|
||||||
int nid = page_to_nid(page);
|
int nid = page_to_nid(page);
|
||||||
|
@ -2412,7 +2447,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
if (unlikely(anon_vma_prepare(vma)))
|
if (unlikely(anon_vma_prepare(vma)))
|
||||||
return VM_FAULT_OOM;
|
return VM_FAULT_OOM;
|
||||||
|
|
||||||
copy_huge_page(new_page, old_page, address, vma);
|
copy_user_huge_page(new_page, old_page, address, vma);
|
||||||
__SetPageUptodate(new_page);
|
__SetPageUptodate(new_page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in a new issue