mm: remove unused GUP flags
GUP_FLAGS_IGNORE_VMA_PERMISSIONS and GUP_FLAGS_IGNORE_SIGKILL were flags added solely to prevent __get_user_pages() from doing some of what it usually does, in the munlock case: we can now remove them. Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Acked-by: Rik van Riel <riel@redhat.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
408e82b78b
commit
1c3aff1cee
3 changed files with 8 additions and 18 deletions
|
@ -250,10 +250,8 @@ static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
|
|||
}
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
||||
#define GUP_FLAGS_WRITE 0x1
|
||||
#define GUP_FLAGS_FORCE 0x2
|
||||
#define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4
|
||||
#define GUP_FLAGS_IGNORE_SIGKILL 0x8
|
||||
#define GUP_FLAGS_WRITE 0x01
|
||||
#define GUP_FLAGS_FORCE 0x02
|
||||
|
||||
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int flags,
|
||||
|
|
14
mm/memory.c
14
mm/memory.c
|
@ -1217,8 +1217,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
unsigned int vm_flags = 0;
|
||||
int write = !!(flags & GUP_FLAGS_WRITE);
|
||||
int force = !!(flags & GUP_FLAGS_FORCE);
|
||||
int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
|
||||
int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
|
||||
|
||||
if (nr_pages <= 0)
|
||||
return 0;
|
||||
|
@ -1244,7 +1242,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
pte_t *pte;
|
||||
|
||||
/* user gate pages are read-only */
|
||||
if (!ignore && write)
|
||||
if (write)
|
||||
return i ? : -EFAULT;
|
||||
if (pg > TASK_SIZE)
|
||||
pgd = pgd_offset_k(pg);
|
||||
|
@ -1278,7 +1276,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
|
||||
if (!vma ||
|
||||
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
|
||||
(!ignore && !(vm_flags & vma->vm_flags)))
|
||||
!(vm_flags & vma->vm_flags))
|
||||
return i ? : -EFAULT;
|
||||
|
||||
if (is_vm_hugetlb_page(vma)) {
|
||||
|
@ -1298,13 +1296,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
|
||||
/*
|
||||
* If we have a pending SIGKILL, don't keep faulting
|
||||
* pages and potentially allocating memory, unless
|
||||
* current is handling munlock--e.g., on exit. In
|
||||
* that case, we are not allocating memory. Rather,
|
||||
* we're only unlocking already resident/mapped pages.
|
||||
* pages and potentially allocating memory.
|
||||
*/
|
||||
if (unlikely(!ignore_sigkill &&
|
||||
fatal_signal_pending(current)))
|
||||
if (unlikely(fatal_signal_pending(current)))
|
||||
return i ? i : -ERESTARTSYS;
|
||||
|
||||
if (write)
|
||||
|
|
|
@ -176,7 +176,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
int i;
|
||||
int write = !!(flags & GUP_FLAGS_WRITE);
|
||||
int force = !!(flags & GUP_FLAGS_FORCE);
|
||||
int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
|
||||
|
||||
/* calculate required read or write permissions.
|
||||
* - if 'force' is set, we only require the "MAY" flags.
|
||||
|
@ -190,8 +189,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
goto finish_or_fault;
|
||||
|
||||
/* protect what we can, including chardevs */
|
||||
if (vma->vm_flags & (VM_IO | VM_PFNMAP) ||
|
||||
(!ignore && !(vm_flags & vma->vm_flags)))
|
||||
if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
|
||||
!(vm_flags & vma->vm_flags))
|
||||
goto finish_or_fault;
|
||||
|
||||
if (pages) {
|
||||
|
@ -210,7 +209,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
return i ? : -EFAULT;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* get a list of pages in an address range belonging to the specified process
|
||||
* and indicate the VMA that covers each page
|
||||
|
|
Loading…
Reference in a new issue