make 'user_access_begin()' do 'access_ok()'

commit 594cc251fdd0d231d342d88b2fdff4bc42fb0690 upstream.

Originally, the rule used to be that you'd have to do access_ok()
separately, and then user_access_begin() before actually doing the
direct (optimized) user access.

But experience has shown that people then decide not to do access_ok()
at all, and instead rely on it being implied by other operations or
similar.  Which makes it very hard to verify that the access has
actually been range-checked.

If you use the unsafe direct user accesses, hardware features (either
SMAP - Supervisor Mode Access Protection - on x86, or PAN - Privileged
Access Never - on ARM) do force you to use user_access_begin().  But
nothing really forces the range check.

By putting the range check into user_access_begin(), we actually force
people to do the right thing (tm), and the range check vill be visible
near the actual accesses.  We have way too long a history of people
trying to avoid them.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Miles Chen <miles.chen@mediatek.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Linus Torvalds 2019-01-04 12:56:09 -08:00 committed by Greg Kroah-Hartman
parent 6f89ad2e79
commit 216284c4a1
7 changed files with 40 additions and 20 deletions

View file

@ -711,7 +711,17 @@ extern struct movsl_mask {
* checking before using them, but you have to surround them with the
* user_access_begin/end() pair.
*/
#define user_access_begin() __uaccess_begin()
static __must_check inline bool user_access_begin(int type,
const void __user *ptr,
size_t len)
{
if (unlikely(!access_ok(type, ptr, len)))
return 0;
__uaccess_begin();
return 1;
}
#define user_access_begin(a, b, c) user_access_begin(a, b, c)
#define user_access_end() __uaccess_end()
#define unsafe_put_user(x, ptr, err_label) \

View file

@ -1604,7 +1604,9 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
* happened we would make the mistake of assuming that the
* relocations were valid.
*/
user_access_begin();
if (!user_access_begin(VERIFY_WRITE, urelocs, size))
goto end_user;
for (copied = 0; copied < nreloc; copied++)
unsafe_put_user(-1,
&urelocs[copied].presumed_offset,
@ -2649,7 +2651,17 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
unsigned int i;
/* Copy the new buffer offsets back to the user's exec list. */
user_access_begin();
/*
* Note: count * sizeof(*user_exec_list) does not overflow,
* because we checked 'count' in check_buffer_count().
*
* And this range already got effectively checked earlier
* when we did the "copy_from_user()" above.
*/
if (!user_access_begin(VERIFY_WRITE, user_exec_list,
count * sizeof(*user_exec_list)))
goto end_user;
for (i = 0; i < args->buffer_count; i++) {
if (!(exec2_list[i].offset & UPDATE))
continue;

View file

@ -267,7 +267,7 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
probe_kernel_read(&retval, addr, sizeof(retval))
#ifndef user_access_begin
#define user_access_begin() do { } while (0)
#define user_access_begin(type, ptr, len) access_ok(type, ptr, len)
#define user_access_end() do { } while (0)
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)

View file

@ -354,10 +354,9 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
if (!access_ok(VERIFY_READ, umask, bitmap_size / 8))
if (!user_access_begin(VERIFY_READ, umask, bitmap_size / 8))
return -EFAULT;
user_access_begin();
while (nr_compat_longs > 1) {
compat_ulong_t l1, l2;
unsafe_get_user(l1, umask++, Efault);
@ -384,10 +383,9 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
bitmap_size = ALIGN(bitmap_size, BITS_PER_COMPAT_LONG);
nr_compat_longs = BITS_TO_COMPAT_LONGS(bitmap_size);
if (!access_ok(VERIFY_WRITE, umask, bitmap_size / 8))
if (!user_access_begin(VERIFY_WRITE, umask, bitmap_size / 8))
return -EFAULT;
user_access_begin();
while (nr_compat_longs > 1) {
unsigned long m = *mask++;
unsafe_put_user((compat_ulong_t)m, umask++, Efault);

View file

@ -1617,10 +1617,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
if (!infop)
return err;
if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop)))
return -EFAULT;
user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault);
unsafe_put_user(0, &infop->si_errno, Efault);
unsafe_put_user(info.cause, &infop->si_code, Efault);
@ -1745,10 +1744,9 @@ COMPAT_SYSCALL_DEFINE5(waitid,
if (!infop)
return err;
if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
if (!user_access_begin(VERIFY_WRITE, infop, sizeof(*infop)))
return -EFAULT;
user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault);
unsafe_put_user(0, &infop->si_errno, Efault);
unsafe_put_user(info.cause, &infop->si_code, Efault);

View file

@ -115,10 +115,11 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
kasan_check_write(dst, count);
check_object_size(dst, count, false);
user_access_begin();
retval = do_strncpy_from_user(dst, src, count, max);
user_access_end();
return retval;
if (user_access_begin(VERIFY_READ, src, max)) {
retval = do_strncpy_from_user(dst, src, count, max);
user_access_end();
return retval;
}
}
return -EFAULT;
}

View file

@ -114,10 +114,11 @@ long strnlen_user(const char __user *str, long count)
unsigned long max = max_addr - src_addr;
long retval;
user_access_begin();
retval = do_strnlen_user(str, count, max);
user_access_end();
return retval;
if (user_access_begin(VERIFY_READ, str, max)) {
retval = do_strnlen_user(str, count, max);
user_access_end();
return retval;
}
}
return 0;
}