x86: some lock annotations for user copy paths, v2
- introduce might_fault() - handle the atomic user copy paths correctly [ mingo@elte.hu: move might_sleep() outside of in_atomic(). ] Signed-off-by: Nick Piggin <npiggin@suse.de> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
c10d38dda1
commit
3ee1afa308
7 changed files with 39 additions and 47 deletions
|
@ -32,9 +32,7 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
|
|||
#define __do_strncpy_from_user(dst, src, count, res) \
|
||||
do { \
|
||||
int __d0, __d1, __d2; \
|
||||
might_sleep(); \
|
||||
if (current->mm) \
|
||||
might_lock_read(¤t->mm->mmap_sem); \
|
||||
might_fault(); \
|
||||
__asm__ __volatile__( \
|
||||
" testl %1,%1\n" \
|
||||
" jz 2f\n" \
|
||||
|
@ -121,9 +119,7 @@ EXPORT_SYMBOL(strncpy_from_user);
|
|||
#define __do_clear_user(addr,size) \
|
||||
do { \
|
||||
int __d0; \
|
||||
might_sleep(); \
|
||||
if (current->mm) \
|
||||
might_lock_read(¤t->mm->mmap_sem); \
|
||||
might_fault(); \
|
||||
__asm__ __volatile__( \
|
||||
"0: rep; stosl\n" \
|
||||
" movl %2,%0\n" \
|
||||
|
@ -193,9 +189,7 @@ long strnlen_user(const char __user *s, long n)
|
|||
unsigned long mask = -__addr_ok(s);
|
||||
unsigned long res, tmp;
|
||||
|
||||
might_sleep();
|
||||
if (current->mm)
|
||||
might_lock_read(¤t->mm->mmap_sem);
|
||||
might_fault();
|
||||
|
||||
__asm__ __volatile__(
|
||||
" testl %0, %0\n"
|
||||
|
|
|
@ -15,9 +15,7 @@
|
|||
#define __do_strncpy_from_user(dst,src,count,res) \
|
||||
do { \
|
||||
long __d0, __d1, __d2; \
|
||||
might_sleep(); \
|
||||
if (current->mm) \
|
||||
might_lock_read(¤t->mm->mmap_sem); \
|
||||
might_fault(); \
|
||||
__asm__ __volatile__( \
|
||||
" testq %1,%1\n" \
|
||||
" jz 2f\n" \
|
||||
|
@ -66,9 +64,7 @@ EXPORT_SYMBOL(strncpy_from_user);
|
|||
unsigned long __clear_user(void __user *addr, unsigned long size)
|
||||
{
|
||||
long __d0;
|
||||
might_sleep();
|
||||
if (current->mm)
|
||||
might_lock_read(¤t->mm->mmap_sem);
|
||||
might_fault();
|
||||
/* no memory constraint because it doesn't change any memory gcc knows
|
||||
about */
|
||||
asm volatile(
|
||||
|
|
|
@ -8,8 +8,6 @@
|
|||
#include <linux/thread_info.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
|
@ -159,9 +157,7 @@ extern int __get_user_bad(void);
|
|||
int __ret_gu; \
|
||||
unsigned long __val_gu; \
|
||||
__chk_user_ptr(ptr); \
|
||||
might_sleep(); \
|
||||
if (current->mm) \
|
||||
might_lock_read(¤t->mm->mmap_sem); \
|
||||
might_fault(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
__get_user_x(1, __ret_gu, __val_gu, ptr); \
|
||||
|
@ -246,9 +242,7 @@ extern void __put_user_8(void);
|
|||
int __ret_pu; \
|
||||
__typeof__(*(ptr)) __pu_val; \
|
||||
__chk_user_ptr(ptr); \
|
||||
might_sleep(); \
|
||||
if (current->mm) \
|
||||
might_lock_read(¤t->mm->mmap_sem); \
|
||||
might_fault(); \
|
||||
__pu_val = x; \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
|
@ -273,9 +267,7 @@ extern void __put_user_8(void);
|
|||
#define __put_user_size(x, ptr, size, retval, errret) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
might_sleep(); \
|
||||
if (current->mm) \
|
||||
might_lock_read(¤t->mm->mmap_sem); \
|
||||
might_fault(); \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
|
@ -328,9 +320,7 @@ do { \
|
|||
#define __get_user_size(x, ptr, size, retval, errret) \
|
||||
do { \
|
||||
retval = 0; \
|
||||
might_sleep(); \
|
||||
if (current->mm) \
|
||||
might_lock_read(¤t->mm->mmap_sem); \
|
||||
might_fault(); \
|
||||
__chk_user_ptr(ptr); \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
|
|
|
@ -82,9 +82,7 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
|
|||
static __always_inline unsigned long __must_check
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
might_sleep();
|
||||
if (current->mm)
|
||||
might_lock_read(¤t->mm->mmap_sem);
|
||||
might_fault();
|
||||
return __copy_to_user_inatomic(to, from, n);
|
||||
}
|
||||
|
||||
|
@ -139,9 +137,7 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
|
|||
static __always_inline unsigned long
|
||||
__copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
might_sleep();
|
||||
if (current->mm)
|
||||
might_lock_read(¤t->mm->mmap_sem);
|
||||
might_fault();
|
||||
if (__builtin_constant_p(n)) {
|
||||
unsigned long ret;
|
||||
|
||||
|
@ -163,9 +159,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
|
|||
static __always_inline unsigned long __copy_from_user_nocache(void *to,
|
||||
const void __user *from, unsigned long n)
|
||||
{
|
||||
might_sleep();
|
||||
if (current->mm)
|
||||
might_lock_read(¤t->mm->mmap_sem);
|
||||
might_fault();
|
||||
if (__builtin_constant_p(n)) {
|
||||
unsigned long ret;
|
||||
|
||||
|
|
|
@ -29,9 +29,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
might_sleep();
|
||||
if (current->mm)
|
||||
might_lock_read(¤t->mm->mmap_sem);
|
||||
might_fault();
|
||||
if (!__builtin_constant_p(size))
|
||||
return copy_user_generic(dst, (__force void *)src, size);
|
||||
switch (size) {
|
||||
|
@ -75,9 +73,7 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
might_sleep();
|
||||
if (current->mm)
|
||||
might_lock_read(¤t->mm->mmap_sem);
|
||||
might_fault();
|
||||
if (!__builtin_constant_p(size))
|
||||
return copy_user_generic((__force void *)dst, src, size);
|
||||
switch (size) {
|
||||
|
@ -121,9 +117,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
might_sleep();
|
||||
if (current->mm)
|
||||
might_lock_read(¤t->mm->mmap_sem);
|
||||
might_fault();
|
||||
if (!__builtin_constant_p(size))
|
||||
return copy_user_generic((__force void *)dst,
|
||||
(__force void *)src, size);
|
||||
|
|
|
@ -140,6 +140,15 @@ extern int _cond_resched(void);
|
|||
(__x < 0) ? -__x : __x; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
void might_fault(void);
|
||||
#else
|
||||
static inline void might_fault(void)
|
||||
{
|
||||
might_sleep();
|
||||
}
|
||||
#endif
|
||||
|
||||
extern struct atomic_notifier_head panic_notifier_list;
|
||||
extern long (*panic_blink)(long time);
|
||||
NORET_TYPE void panic(const char * fmt, ...)
|
||||
|
|
15
mm/memory.c
15
mm/memory.c
|
@ -3016,3 +3016,18 @@ void print_vma_addr(char *prefix, unsigned long ip)
|
|||
}
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
void might_fault(void)
|
||||
{
|
||||
might_sleep();
|
||||
/*
|
||||
* it would be nicer only to annotate paths which are not under
|
||||
* pagefault_disable, however that requires a larger audit and
|
||||
* providing helpers like get_user_atomic.
|
||||
*/
|
||||
if (!in_atomic() && current->mm)
|
||||
might_lock_read(¤t->mm->mmap_sem);
|
||||
}
|
||||
EXPORT_SYMBOL(might_fault);
|
||||
#endif
|
||||
|
|
Loading…
Reference in a new issue