arm64 fixes:

- Add missing PAN toggling in the futex code
 
 - Fix missing #include that briefly caused issues in -next
 
 - Allow changing of vmalloc permissions with set_memory_* (used by bpf)
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABCgAGBQJWs2KkAAoJELescNyEwWM0kn4H+gIGvX7Dk792AJlLj8u1hIb6
 m+Z4hgLcNIFZCWEGEEWOjHAJ1n2+SgRPdJgDmhN4KP+5Oh9+Qkqhj3wDy7RB05Bf
 q3B/dZzr/rASRpycOyNL4CoyqScS7YsP9+X+7tNC8dAMr8UeYahvVKYVwzMlXfDh
 4hh3gZkhBQ/hIUse02VtE+OR6lbZAwMYahrM2T9YHslNKanQkpezqBVD4Ic0J5ic
 Jkpep8tBdTBLrFm4WERoO6vv8YCrXDP+6DutAO/nOmvHTr07LgkRwiOn+HmoCxiv
 Ir5j01SV2SNlr7AfLBWBLN/NluoRByzmQ6SeEsQ3thN35fIsVndStKcAQAAu1nU=
 =pr2h
 -----END PGP SIGNATURE-----

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "Nothing particularly interesting here, but all important fixes
  nonetheless:

   - Add missing PAN toggling in the futex code

   - Fix missing #include that briefly caused issues in -next

   - Allow changing of vmalloc permissions with set_memory_* (used by
     bpf)"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: asm: Explicitly include linux/personality.h in asm/page.h
  arm64: futex.h: Add missing PAN toggling
  arm64: allow vmalloc regions to be set with set_memory_*
This commit is contained in:
Linus Torvalds 2016-02-04 14:09:55 -08:00
commit df48ab3c2f
3 changed files with 22 additions and 4 deletions

View file

@ -121,6 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
" prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n"
@ -137,6 +138,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .align 3\n"
" .quad 1b, 4b, 2b, 4b\n"
" .popsection\n"
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
: "memory");

View file

@ -39,6 +39,7 @@
#ifndef __ASSEMBLY__
#include <linux/personality.h> /* for READ_IMPLIES_EXEC */
#include <asm/pgtable-types.h>
extern void __cpu_clear_user_page(void *p, unsigned long user);

View file

@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@ -44,6 +45,7 @@ static int change_memory_common(unsigned long addr, int numpages,
unsigned long end = start + size;
int ret;
struct page_change_data data;
struct vm_struct *area;
if (!PAGE_ALIGNED(addr)) {
start &= PAGE_MASK;
@ -51,10 +53,23 @@ static int change_memory_common(unsigned long addr, int numpages,
WARN_ON_ONCE(1);
}
if (start < MODULES_VADDR || start >= MODULES_END)
return -EINVAL;
if (end < MODULES_VADDR || end >= MODULES_END)
/*
* Kernel VA mappings are always live, and splitting live section
* mappings into page mappings may cause TLB conflicts. This means
* we have to ensure that changing the permission bits of the range
* we are operating on does not result in such splitting.
*
* Let's restrict ourselves to mappings created by vmalloc (or vmap).
* Those are guaranteed to consist entirely of page mappings, and
* splitting is never needed.
*
* So check whether the [addr, addr + size) interval is entirely
* covered by precisely one VM area that has the VM_ALLOC flag set.
*/
area = find_vm_area((void *)addr);
if (!area ||
end > (unsigned long)area->addr + area->size ||
!(area->flags & VM_ALLOC))
return -EINVAL;
if (!numpages)