mm: remove write/force parameters from __get_user_pages_unlocked()
This removes the redundant 'write' and 'force' parameters from __get_user_pages_unlocked() to make the use of FOLL_FORCE explicit in callers as use of this flag can result in surprising behaviour (and hence bugs) within the mm subsystem. Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Jan Kara <jack@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
859110d749
commit
d4944b0ece
6 changed files with 34 additions and 19 deletions
|
@ -1285,8 +1285,7 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
|
|||
int write, int force, struct page **pages, int *locked);
|
||||
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags);
|
||||
struct page **pages, unsigned int gup_flags);
|
||||
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages);
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
|
|
17
mm/gup.c
17
mm/gup.c
|
@ -875,17 +875,11 @@ EXPORT_SYMBOL(get_user_pages_locked);
|
|||
*/
|
||||
__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags)
|
||||
struct page **pages, unsigned int gup_flags)
|
||||
{
|
||||
long ret;
|
||||
int locked = 1;
|
||||
|
||||
if (write)
|
||||
gup_flags |= FOLL_WRITE;
|
||||
if (force)
|
||||
gup_flags |= FOLL_FORCE;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
|
||||
&locked, false, gup_flags);
|
||||
|
@ -915,8 +909,15 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
|
|||
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages)
|
||||
{
|
||||
unsigned int flags = FOLL_TOUCH;
|
||||
|
||||
if (write)
|
||||
flags |= FOLL_WRITE;
|
||||
if (force)
|
||||
flags |= FOLL_FORCE;
|
||||
|
||||
return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
|
||||
write, force, pages, FOLL_TOUCH);
|
||||
pages, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_unlocked);
|
||||
|
||||
|
|
12
mm/nommu.c
12
mm/nommu.c
|
@ -185,8 +185,7 @@ EXPORT_SYMBOL(get_user_pages_locked);
|
|||
|
||||
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages,
|
||||
unsigned int gup_flags)
|
||||
struct page **pages, unsigned int gup_flags)
|
||||
{
|
||||
long ret;
|
||||
down_read(&mm->mmap_sem);
|
||||
|
@ -200,8 +199,15 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
|
|||
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
int write, int force, struct page **pages)
|
||||
{
|
||||
unsigned int flags = 0;
|
||||
|
||||
if (write)
|
||||
flags |= FOLL_WRITE;
|
||||
if (force)
|
||||
flags |= FOLL_FORCE;
|
||||
|
||||
return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
|
||||
write, force, pages, 0);
|
||||
pages, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages_unlocked);
|
||||
|
||||
|
|
|
@ -88,12 +88,16 @@ static int process_vm_rw_single_vec(unsigned long addr,
|
|||
ssize_t rc = 0;
|
||||
unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
|
||||
/ sizeof(struct pages *);
|
||||
unsigned int flags = FOLL_REMOTE;
|
||||
|
||||
/* Work out address and page range required */
|
||||
if (len == 0)
|
||||
return 0;
|
||||
nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
|
||||
|
||||
if (vm_write)
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
while (!rc && nr_pages && iov_iter_count(iter)) {
|
||||
int pages = min(nr_pages, max_pages_per_loop);
|
||||
size_t bytes;
|
||||
|
@ -104,8 +108,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
|
|||
* current/current->mm
|
||||
*/
|
||||
pages = __get_user_pages_unlocked(task, mm, pa, pages,
|
||||
vm_write, 0, process_pages,
|
||||
FOLL_REMOTE);
|
||||
process_pages, flags);
|
||||
if (pages <= 0)
|
||||
return -EFAULT;
|
||||
|
||||
|
|
|
@ -84,7 +84,8 @@ static void async_pf_execute(struct work_struct *work)
|
|||
* mm and might be done in another context, so we must
|
||||
* use FOLL_REMOTE.
|
||||
*/
|
||||
__get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL, FOLL_REMOTE);
|
||||
__get_user_pages_unlocked(NULL, mm, addr, 1, NULL,
|
||||
FOLL_WRITE | FOLL_REMOTE);
|
||||
|
||||
kvm_async_page_present_sync(vcpu, apf);
|
||||
|
||||
|
|
|
@ -1416,10 +1416,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
|
|||
down_read(¤t->mm->mmap_sem);
|
||||
npages = get_user_page_nowait(addr, write_fault, page);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
} else
|
||||
} else {
|
||||
unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
|
||||
|
||||
if (write_fault)
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
|
||||
write_fault, 0, page,
|
||||
FOLL_TOUCH|FOLL_HWPOISON);
|
||||
page, flags);
|
||||
}
|
||||
if (npages != 1)
|
||||
return npages;
|
||||
|
||||
|
|
Loading…
Reference in a new issue