a866374aec
Introduce pagefault_{disable,enable}() and use these where previously we did manual preempt increments/decrements to make the pagefault handler do the atomic thing. Currently they still rely on the increased preempt count, but do not rely on the disabled preemption, this might go away in the future. (NOTE: the extra barrier() in pagefault_disable might fix some holes on machines which have too many registers for their own good) [heiko.carstens@de.ibm.com: s390 fix] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Nick Piggin <npiggin@suse.de> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
79 lines
2.1 KiB
C
79 lines
2.1 KiB
C
#ifndef __LINUX_UACCESS_H__
|
|
#define __LINUX_UACCESS_H__
|
|
|
|
#include <linux/preempt.h>
|
|
#include <asm/uaccess.h>
|
|
|
|
/*
|
|
* These routines enable/disable the pagefault handler in that
|
|
* it will not take any locks and go straight to the fixup table.
|
|
*
|
|
* They have great resemblance to the preempt_disable/enable calls
|
|
* and in fact they are identical; this is because currently there is
|
|
* no other way to make the pagefault handlers do this. So we do
|
|
* disable preemption but we don't necessarily care about that.
|
|
*/
|
|
static inline void pagefault_disable(void)
|
|
{
|
|
inc_preempt_count();
|
|
/*
|
|
* make sure to have issued the store before a pagefault
|
|
* can hit.
|
|
*/
|
|
barrier();
|
|
}
|
|
|
|
static inline void pagefault_enable(void)
|
|
{
|
|
/*
|
|
* make sure to issue those last loads/stores before enabling
|
|
* the pagefault handler again.
|
|
*/
|
|
barrier();
|
|
dec_preempt_count();
|
|
/*
|
|
* make sure we do..
|
|
*/
|
|
barrier();
|
|
preempt_check_resched();
|
|
}
|
|
|
|
#ifndef ARCH_HAS_NOCACHE_UACCESS
|
|
|
|
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
|
|
const void __user *from, unsigned long n)
|
|
{
|
|
return __copy_from_user_inatomic(to, from, n);
|
|
}
|
|
|
|
static inline unsigned long __copy_from_user_nocache(void *to,
|
|
const void __user *from, unsigned long n)
|
|
{
|
|
return __copy_from_user(to, from, n);
|
|
}
|
|
|
|
#endif /* ARCH_HAS_NOCACHE_UACCESS */
|
|
|
|
/**
|
|
* probe_kernel_address(): safely attempt to read from a location
|
|
* @addr: address to read from - its type is type typeof(retval)*
|
|
* @retval: read into this variable
|
|
*
|
|
* Safely read from address @addr into variable @revtal. If a kernel fault
|
|
* happens, handle that and return -EFAULT.
|
|
* We ensure that the __get_user() is executed in atomic context so that
|
|
* do_page_fault() doesn't attempt to take mmap_sem. This makes
|
|
* probe_kernel_address() suitable for use within regions where the caller
|
|
* already holds mmap_sem, or other locks which nest inside mmap_sem.
|
|
*/
|
|
#define probe_kernel_address(addr, retval) \
|
|
({ \
|
|
long ret; \
|
|
\
|
|
pagefault_disable(); \
|
|
ret = __get_user(retval, addr); \
|
|
pagefault_enable(); \
|
|
ret; \
|
|
})
|
|
|
|
#endif /* __LINUX_UACCESS_H__ */
|