[SPARC64]: Simplify user fault fixup handling.
Instead of doing byte-at-a-time user accesses to figure out where the fault occurred, read the saved fault_address from the current thread structure. For the sake of defensive programming, if the fault_address does not fall into the user buffer range, simply assume the whole area faulted. This will cause the fixup for copy_from_user() to clear the entire kernel side buffer. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
5fd29752f0
commit
efdc1e2083
3 changed files with 35 additions and 46 deletions
|
@ -11,61 +11,56 @@
|
|||
|
||||
/* Calculating the exact fault address when using
|
||||
* block loads and stores can be very complicated.
|
||||
*
|
||||
* Instead of trying to be clever and handling all
|
||||
* of the cases, just fix things up simply here.
|
||||
*/
|
||||
|
||||
static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
|
||||
{
|
||||
unsigned long fault_addr = current_thread_info()->fault_address;
|
||||
unsigned long end = start + size;
|
||||
|
||||
if (fault_addr < start || fault_addr >= end) {
|
||||
*offset = 0;
|
||||
} else {
|
||||
*offset = start - fault_addr;
|
||||
size = end - fault_addr;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
|
||||
{
|
||||
char *dst = to;
|
||||
const char __user *src = from;
|
||||
unsigned long offset;
|
||||
|
||||
while (size) {
|
||||
if (__get_user(*dst, src))
|
||||
break;
|
||||
dst++;
|
||||
src++;
|
||||
size--;
|
||||
}
|
||||
|
||||
if (size)
|
||||
memset(dst, 0, size);
|
||||
size = compute_size((unsigned long) from, size, &offset);
|
||||
if (likely(size))
|
||||
memset(to + offset, 0, size);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
|
||||
{
|
||||
char __user *dst = to;
|
||||
const char *src = from;
|
||||
unsigned long offset;
|
||||
|
||||
while (size) {
|
||||
if (__put_user(*src, dst))
|
||||
break;
|
||||
dst++;
|
||||
src++;
|
||||
size--;
|
||||
}
|
||||
|
||||
return size;
|
||||
return compute_size((unsigned long) to, size, &offset);
|
||||
}
|
||||
|
||||
unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
|
||||
{
|
||||
char __user *dst = to;
|
||||
char __user *src = from;
|
||||
unsigned long fault_addr = current_thread_info()->fault_address;
|
||||
unsigned long start = (unsigned long) to;
|
||||
unsigned long end = start + size;
|
||||
|
||||
while (size) {
|
||||
char tmp;
|
||||
if (fault_addr >= start && fault_addr < end)
|
||||
return end - fault_addr;
|
||||
|
||||
if (__get_user(tmp, src))
|
||||
break;
|
||||
if (__put_user(tmp, dst))
|
||||
break;
|
||||
dst++;
|
||||
src++;
|
||||
size--;
|
||||
}
|
||||
start = (unsigned long) from;
|
||||
end = start + size;
|
||||
if (fault_addr >= start && fault_addr < end)
|
||||
return end - fault_addr;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
|
|
@ -457,7 +457,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
|||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
goto fault_done;
|
||||
return;
|
||||
|
||||
/*
|
||||
* Something tried to access memory that isn't in our memory map..
|
||||
|
@ -469,8 +469,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
|||
|
||||
handle_kernel_fault:
|
||||
do_kernel_fault(regs, si_code, fault_code, insn, address);
|
||||
|
||||
goto fault_done;
|
||||
return;
|
||||
|
||||
/*
|
||||
* We ran out of memory, or some other thing happened to us that made
|
||||
|
@ -501,9 +500,4 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
|||
/* Kernel mode? Handle exceptions or die */
|
||||
if (regs->tstate & TSTATE_PRIV)
|
||||
goto handle_kernel_fault;
|
||||
|
||||
fault_done:
|
||||
/* These values are no longer needed, clear them. */
|
||||
set_thread_fault_code(0);
|
||||
current_thread_info()->fault_address = 0;
|
||||
}
|
||||
|
|
|
@ -251,7 +251,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
|
|||
{
|
||||
unsigned long ret = ___copy_from_user(to, from, size);
|
||||
|
||||
if (ret)
|
||||
if (unlikely(ret))
|
||||
ret = copy_from_user_fixup(to, from, size);
|
||||
return ret;
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
|
|||
{
|
||||
unsigned long ret = ___copy_to_user(to, from, size);
|
||||
|
||||
if (ret)
|
||||
if (unlikely(ret))
|
||||
ret = copy_to_user_fixup(to, from, size);
|
||||
return ret;
|
||||
}
|
||||
|
@ -283,7 +283,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size)
|
|||
{
|
||||
unsigned long ret = ___copy_in_user(to, from, size);
|
||||
|
||||
if (ret)
|
||||
if (unlikely(ret))
|
||||
ret = copy_in_user_fixup(to, from, size);
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue