Merge android-4.19.78 (75337a6) into msm-4.19

* refs/heads/tmp-75337a6:
  ANDROID: usb: gadget: Fix dependency for f_accessory
  ANDROID: properly export new symbols with _GPL tag
  UPSTREAM: mm/kasan: fix false positive invalid-free reports with CONFIG_KASAN_SW_TAGS=y
  UPSTREAM: kasan: initialize tag to 0xff in __kasan_kmalloc
  UPSTREAM: x86/boot: Provide KASAN compatible aliases for string routines
  UPSTREAM: x86/uaccess, kasan: Fix KASAN vs SMAP
  BACKPORT: x86/uaccess: Introduce user_access_{save,restore}()
  UPSTREAM: kasan: fix variable 'tag' set but not used warning
  UPSTREAM: Revert "x86_64: Increase stack size for KASAN_EXTRA"
  UPSTREAM: kasan: fix coccinelle warnings in kasan_p*_table
  UPSTREAM: kasan: fix kasan_check_read/write definitions
  BACKPORT: kasan: remove use after scope bugs detection.
  BACKPORT: kasan: turn off asan-stack for clang-8 and earlier
  UPSTREAM: slub: fix a crash with SLUB_DEBUG + KASAN_SW_TAGS
  UPSTREAM: kasan, slab: remove redundant kasan_slab_alloc hooks
  UPSTREAM: kasan, slab: make freelist stored without tags
  UPSTREAM: kasan, slab: fix conflicts with CONFIG_HARDENED_USERCOPY
  UPSTREAM: kasan: prevent tracing of tags.c
  UPSTREAM: kasan: fix random seed generation for tag-based mode
  UPSTREAM: slub: fix SLAB_CONSISTENCY_CHECKS + KASAN_SW_TAGS
  UPSTREAM: kasan, slub: fix more conflicts with CONFIG_SLAB_FREELIST_HARDENED
  UPSTREAM: kasan, slub: fix conflicts with CONFIG_SLAB_FREELIST_HARDENED
  UPSTREAM: kasan, slub: move kasan_poison_slab hook before page_address
  UPSTREAM: kasan, kmemleak: pass tagged pointers to kmemleak
  UPSTREAM: kasan: fix assigning tags twice
  UPSTREAM: kasan: mark file common so ftrace doesn't trace it
  UPSTREAM: kasan, arm64: remove redundant ARCH_SLAB_MINALIGN define
  UPSTREAM: kasan: fix krealloc handling for tag-based mode
  UPSTREAM: kasan: make tag based mode work with CONFIG_HARDENED_USERCOPY
  UPSTREAM: kasan, arm64: use ARCH_SLAB_MINALIGN instead of manual aligning
  BACKPORT: mm/memblock.c: skip kmemleak for kasan_init()
  UPSTREAM: kasan: add SPDX-License-Identifier mark to source files
  UPSTREAM: kasan: update documentation
  UPSTREAM: kasan, arm64: select HAVE_ARCH_KASAN_SW_TAGS
  UPSTREAM: kasan: add __must_check annotations to kasan hooks
  UPSTREAM: kasan, mm, arm64: tag non slab memory allocated via pagealloc
  UPSTREAM: kasan, arm64: add brk handler for inline instrumentation
  UPSTREAM: kasan: add hooks implementation for tag-based mode
  UPSTREAM: mm: move obj_to_index to include/linux/slab_def.h
  UPSTREAM: kasan: add bug reporting routines for tag-based mode
  UPSTREAM: kasan: split out generic_report.c from report.c
  UPSTREAM: kasan, mm: perform untagged pointers comparison in krealloc
  BACKPORT: kasan, arm64: enable top byte ignore for the kernel
  BACKPORT: kasan, arm64: fix up fault handling logic
  UPSTREAM: kasan: preassign tags to objects with ctors or SLAB_TYPESAFE_BY_RCU
  UPSTREAM: kasan, arm64: untag address in _virt_addr_is_linear
  UPSTREAM: kasan: add tag related helper functions
  UPSTREAM: arm64: move untagged_addr macro from uaccess.h to memory.h
  BACKPORT: kasan: initialize shadow to 0xff for tag-based mode
  BACKPORT: kasan: rename kasan_zero_page to kasan_early_shadow_page
  UPSTREAM: kasan, arm64: adjust shadow size for tag-based mode
  BACKPORT: kasan: add CONFIG_KASAN_GENERIC and CONFIG_KASAN_SW_TAGS
  UPSTREAM: kasan: rename source files to reflect the new naming scheme
  UPSTREAM: kasan: move common generic and tag-based code to common.c
  UPSTREAM: kasan, slub: handle pointer tags in early_kmem_cache_node_alloc
  UPSTREAM: kasan, mm: change hooks signatures
  UPSTREAM: arm64: add EXPORT_SYMBOL_NOKASAN()
  BACKPORT: compiler: remove __no_sanitize_address_or_inline again
  UPSTREAM: mm/kasan/quarantine.c: make quarantine_lock a raw_spinlock_t
  UPSTREAM: lib/test_kasan.c: add tests for several string/memory API functions
  UPSTREAM: arm64: lib: use C string functions with KASAN enabled
  UPSTREAM: compiler: introduce __no_sanitize_address_or_inline
  UPSTREAM: arm64: Fix typo in a comment in arch/arm64/mm/kasan_init.c
  ANDROID: enable CONFIG_ION_SYSTEM_HEAP for GKI
  Update ABI definition after libabigail upgrade
  ANDROID: update abi due to 4.19.75 changes
  ANDROID: Remove CONFIG_USELIB from x86 gki config
  ANDROID: net: enable wireless core features with GKI_LEGACY_WEXT_ALLCONFIG
  ANDROID: arm64: bpf: implement arch_bpf_jit_check_func
  ANDROID: bpf: validate bpf_func when BPF_JIT is enabled with CFI
  UPSTREAM: kcm: use BPF_PROG_RUN
  ANDROID: gki_defconfig: CONFIG_MMC_BLOCK=m
  UPSTREAM: psi: get poll_work to run when calling poll syscall next time
  UPSTREAM: sched/psi: Do not require setsched permission from the trigger creator
  UPSTREAM: sched/psi: Reduce psimon FIFO priority
  ANDROID: gki_defconfig: Enable HiSilicon SoCs
  UPSTREAM: PCI: kirin: Fix section mismatch warning
  ANDROID: gki_defconfig: Enable SERIAL_DEV_BUS
  ANDROID: gki_defconfig: Add GKI_HACKS_to_FIX config
  ANDROID: init: GKI: enable hidden configs for GPIO
  ANDROID: init: GKI: enable hidden configs for SND_SOC
  ANDROID: init: GKI: enable hidden configs for regmap
  ANDROID: init: GKI: enable hidden configs for DRM
  ANDROID: init: GKI: add GKI_HACKS_TO_FIX
  ABI: Update ABI after fscrypto merge
  ANDROID: gki_defconfig: enable CONFIG_UIO
  UPSTREAM: ALSA: pcm: add support for 352.8KHz and 384KHz sample rate
  ANDROID: Log which device failed to suspend in dpm_suspend_start()
  UPSTREAM: arm64: Add support for relocating the kernel with RELR relocations
  ANDROID: update ABI after CONFIG_MMC=m
  CONFIG_MMC=m
  ABI: Update ABI for LTS, 8250 changes
  ANDROID: Removed extraneous serial 8250 configs
  Adding SERIAL_OF_PLATFORM module to gki
  fscrypt: document testing with xfstests
  fscrypt: remove selection of CONFIG_CRYPTO_SHA256
  fscrypt: remove unnecessary includes of ratelimit.h
  fscrypt: don't set policy for a dead directory
  fscrypt: decrypt only the needed blocks in __fscrypt_decrypt_bio()
  fscrypt: support decrypting multiple filesystem blocks per page
  fscrypt: introduce fscrypt_decrypt_block_inplace()
  fscrypt: handle blocksize < PAGE_SIZE in fscrypt_zeroout_range()
  fscrypt: support encrypting multiple filesystem blocks per page
  fscrypt: introduce fscrypt_encrypt_block_inplace()
  fscrypt: clean up some BUG_ON()s in block encryption/decryption
  fscrypt: rename fscrypt_do_page_crypto() to fscrypt_crypt_block()
  fscrypt: remove the "write" part of struct fscrypt_ctx
  fscrypt: simplify bounce page handling

Conflicts:
	arch/Kconfig
	fs/crypto/bio.c
	fs/ext4/page-io.c
	fs/f2fs/data.c
	fs/f2fs/f2fs.h
	fs/f2fs/super.c
	include/linux/fscrypt.h
	sound/core/pcm_native.c

Change-Id: Ia94ba2ae85e04be9f69115e2da2d69d0dc76545f
Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
Ivaylo Georgiev 2020-02-24 07:45:48 -08:00
commit a025b6c910
83 changed files with 103485 additions and 111901 deletions

View file

@ -4,15 +4,25 @@ The Kernel Address Sanitizer (KASAN)
Overview
--------
KernelAddressSANitizer (KASAN) is a dynamic memory error detector. It provides
a fast and comprehensive solution for finding use-after-free and out-of-bounds
bugs.
KernelAddressSANitizer (KASAN) is a dynamic memory error detector designed to
find out-of-bound and use-after-free bugs. KASAN has two modes: generic KASAN
(similar to userspace ASan) and software tag-based KASAN (similar to userspace
HWASan).
KASAN uses compile-time instrumentation for checking every memory access,
therefore you will need a GCC version 4.9.2 or later. GCC 5.0 or later is
required for detection of out-of-bounds accesses to stack or global variables.
KASAN uses compile-time instrumentation to insert validity checks before every
memory access, and therefore requires a compiler version that supports that.
Currently KASAN is supported only for the x86_64 and arm64 architectures.
Generic KASAN is supported in both GCC and Clang. With GCC it requires version
4.9.2 or later for basic support and version 5.0 or later for detection of
out-of-bounds accesses for stack and global variables and for inline
instrumentation mode (see the Usage section). With Clang it requires version
7.0.0 or later and it doesn't support detection of out-of-bounds accesses for
global variables yet.
Tag-based KASAN is only supported in Clang and requires version 7.0.0 or later.
Currently generic KASAN is supported for the x86_64, arm64, xtensa and s390
architectures, and tag-based KASAN is supported only for arm64.
Usage
-----
@ -21,12 +31,14 @@ To enable KASAN configure kernel with::
CONFIG_KASAN = y
and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline and
inline are compiler instrumentation types. The former produces smaller binary
the latter is 1.1 - 2 times faster. Inline instrumentation requires a GCC
version 5.0 or later.
and choose between CONFIG_KASAN_GENERIC (to enable generic KASAN) and
CONFIG_KASAN_SW_TAGS (to enable software tag-based KASAN).
KASAN works with both SLUB and SLAB memory allocators.
You also need to choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE.
Outline and inline are compiler instrumentation types. The former produces
smaller binary while the latter is 1.1 - 2 times faster.
Both KASAN modes work with both SLUB and SLAB memory allocators.
For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
To disable instrumentation for specific files or directories, add a line
@ -43,85 +55,85 @@ similar to the following to the respective kernel Makefile:
Error reports
~~~~~~~~~~~~~
A typical out of bounds access report looks like this::
A typical out-of-bounds access generic KASAN report looks like this::
==================================================================
BUG: AddressSanitizer: out of bounds access in kmalloc_oob_right+0x65/0x75 [test_kasan] at addr ffff8800693bc5d3
Write of size 1 by task modprobe/1689
=============================================================================
BUG kmalloc-128 (Not tainted): kasan error
-----------------------------------------------------------------------------
BUG: KASAN: slab-out-of-bounds in kmalloc_oob_right+0xa8/0xbc [test_kasan]
Write of size 1 at addr ffff8801f44ec37b by task insmod/2760
Disabling lock debugging due to kernel taint
INFO: Allocated in kmalloc_oob_right+0x3d/0x75 [test_kasan] age=0 cpu=0 pid=1689
__slab_alloc+0x4b4/0x4f0
kmem_cache_alloc_trace+0x10b/0x190
kmalloc_oob_right+0x3d/0x75 [test_kasan]
init_module+0x9/0x47 [test_kasan]
do_one_initcall+0x99/0x200
load_module+0x2cb3/0x3b20
SyS_finit_module+0x76/0x80
system_call_fastpath+0x12/0x17
INFO: Slab 0xffffea0001a4ef00 objects=17 used=7 fp=0xffff8800693bd728 flags=0x100000000004080
INFO: Object 0xffff8800693bc558 @offset=1368 fp=0xffff8800693bc720
Bytes b4 ffff8800693bc548: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
Object ffff8800693bc558: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
Object ffff8800693bc568: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
Object ffff8800693bc578: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
Object ffff8800693bc588: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
Object ffff8800693bc598: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
Object ffff8800693bc5a8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
Object ffff8800693bc5b8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
Object ffff8800693bc5c8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b a5 kkkkkkkkkkkkkkk.
Redzone ffff8800693bc5d8: cc cc cc cc cc cc cc cc ........
Padding ffff8800693bc718: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
CPU: 0 PID: 1689 Comm: modprobe Tainted: G B 3.18.0-rc1-mm1+ #98
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014
ffff8800693bc000 0000000000000000 ffff8800693bc558 ffff88006923bb78
ffffffff81cc68ae 00000000000000f3 ffff88006d407600 ffff88006923bba8
ffffffff811fd848 ffff88006d407600 ffffea0001a4ef00 ffff8800693bc558
CPU: 1 PID: 2760 Comm: insmod Not tainted 4.19.0-rc3+ #698
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1 04/01/2014
Call Trace:
[<ffffffff81cc68ae>] dump_stack+0x46/0x58
[<ffffffff811fd848>] print_trailer+0xf8/0x160
[<ffffffffa00026a7>] ? kmem_cache_oob+0xc3/0xc3 [test_kasan]
[<ffffffff811ff0f5>] object_err+0x35/0x40
[<ffffffffa0002065>] ? kmalloc_oob_right+0x65/0x75 [test_kasan]
[<ffffffff8120b9fa>] kasan_report_error+0x38a/0x3f0
[<ffffffff8120a79f>] ? kasan_poison_shadow+0x2f/0x40
[<ffffffff8120b344>] ? kasan_unpoison_shadow+0x14/0x40
[<ffffffff8120a79f>] ? kasan_poison_shadow+0x2f/0x40
[<ffffffffa00026a7>] ? kmem_cache_oob+0xc3/0xc3 [test_kasan]
[<ffffffff8120a995>] __asan_store1+0x75/0xb0
[<ffffffffa0002601>] ? kmem_cache_oob+0x1d/0xc3 [test_kasan]
[<ffffffffa0002065>] ? kmalloc_oob_right+0x65/0x75 [test_kasan]
[<ffffffffa0002065>] kmalloc_oob_right+0x65/0x75 [test_kasan]
[<ffffffffa00026b0>] init_module+0x9/0x47 [test_kasan]
[<ffffffff810002d9>] do_one_initcall+0x99/0x200
[<ffffffff811e4e5c>] ? __vunmap+0xec/0x160
[<ffffffff81114f63>] load_module+0x2cb3/0x3b20
[<ffffffff8110fd70>] ? m_show+0x240/0x240
[<ffffffff81115f06>] SyS_finit_module+0x76/0x80
[<ffffffff81cd3129>] system_call_fastpath+0x12/0x17
dump_stack+0x94/0xd8
print_address_description+0x73/0x280
kasan_report+0x144/0x187
__asan_report_store1_noabort+0x17/0x20
kmalloc_oob_right+0xa8/0xbc [test_kasan]
kmalloc_tests_init+0x16/0x700 [test_kasan]
do_one_initcall+0xa5/0x3ae
do_init_module+0x1b6/0x547
load_module+0x75df/0x8070
__do_sys_init_module+0x1c6/0x200
__x64_sys_init_module+0x6e/0xb0
do_syscall_64+0x9f/0x2c0
entry_SYSCALL_64_after_hwframe+0x44/0xa9
RIP: 0033:0x7f96443109da
RSP: 002b:00007ffcf0b51b08 EFLAGS: 00000202 ORIG_RAX: 00000000000000af
RAX: ffffffffffffffda RBX: 000055dc3ee521a0 RCX: 00007f96443109da
RDX: 00007f96445cff88 RSI: 0000000000057a50 RDI: 00007f9644992000
RBP: 000055dc3ee510b0 R08: 0000000000000003 R09: 0000000000000000
R10: 00007f964430cd0a R11: 0000000000000202 R12: 00007f96445cff88
R13: 000055dc3ee51090 R14: 0000000000000000 R15: 0000000000000000
Allocated by task 2760:
save_stack+0x43/0xd0
kasan_kmalloc+0xa7/0xd0
kmem_cache_alloc_trace+0xe1/0x1b0
kmalloc_oob_right+0x56/0xbc [test_kasan]
kmalloc_tests_init+0x16/0x700 [test_kasan]
do_one_initcall+0xa5/0x3ae
do_init_module+0x1b6/0x547
load_module+0x75df/0x8070
__do_sys_init_module+0x1c6/0x200
__x64_sys_init_module+0x6e/0xb0
do_syscall_64+0x9f/0x2c0
entry_SYSCALL_64_after_hwframe+0x44/0xa9
Freed by task 815:
save_stack+0x43/0xd0
__kasan_slab_free+0x135/0x190
kasan_slab_free+0xe/0x10
kfree+0x93/0x1a0
umh_complete+0x6a/0xa0
call_usermodehelper_exec_async+0x4c3/0x640
ret_from_fork+0x35/0x40
The buggy address belongs to the object at ffff8801f44ec300
which belongs to the cache kmalloc-128 of size 128
The buggy address is located 123 bytes inside of
128-byte region [ffff8801f44ec300, ffff8801f44ec380)
The buggy address belongs to the page:
page:ffffea0007d13b00 count:1 mapcount:0 mapping:ffff8801f7001640 index:0x0
flags: 0x200000000000100(slab)
raw: 0200000000000100 ffffea0007d11dc0 0000001a0000001a ffff8801f7001640
raw: 0000000000000000 0000000080150015 00000001ffffffff 0000000000000000
page dumped because: kasan: bad access detected
Memory state around the buggy address:
ffff8800693bc300: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
ffff8800693bc380: fc fc 00 00 00 00 00 00 00 00 00 00 00 00 00 fc
ffff8800693bc400: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
ffff8800693bc480: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
ffff8800693bc500: fc fc fc fc fc fc fc fc fc fc fc 00 00 00 00 00
>ffff8800693bc580: 00 00 00 00 00 00 00 00 00 00 03 fc fc fc fc fc
^
ffff8800693bc600: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
ffff8800693bc680: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
ffff8800693bc700: fc fc fc fc fb fb fb fb fb fb fb fb fb fb fb fb
ffff8800693bc780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8800693bc800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
ffff8801f44ec200: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb
ffff8801f44ec280: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
>ffff8801f44ec300: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 03
^
ffff8801f44ec380: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb
ffff8801f44ec400: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
==================================================================
The header of the report discribe what kind of bug happened and what kind of
access caused it. It's followed by the description of the accessed slub object
(see 'SLUB Debug output' section in Documentation/vm/slub.rst for details) and
the description of the accessed memory page.
The header of the report provides a short summary of what kind of bug happened
and what kind of access caused it. It's followed by a stack trace of the bad
access, a stack trace of where the accessed memory was allocated (in case bad
access happens on a slab object), and a stack trace of where the object was
freed (in case of a use-after-free bug report). Next comes a description of
the accessed slab object and information about the accessed memory page.
In the last section the report shows memory state around the accessed address.
Reading this part requires some understanding of how KASAN works.
@ -138,18 +150,24 @@ inaccessible memory like redzones or freed memory (see mm/kasan/kasan.h).
In the report above the arrows point to the shadow byte 03, which means that
the accessed address is partially accessible.
For tag-based KASAN this last report section shows the memory tags around the
accessed address (see Implementation details section).
Implementation details
----------------------
Generic KASAN
~~~~~~~~~~~~~
From a high level, our approach to memory error detection is similar to that
of kmemcheck: use shadow memory to record whether each byte of memory is safe
to access, and use compile-time instrumentation to check shadow memory on each
memory access.
to access, and use compile-time instrumentation to insert checks of shadow
memory on each memory access.
AddressSanitizer dedicates 1/8 of kernel memory to its shadow memory
(e.g. 16TB to cover 128TB on x86_64) and uses direct mapping with a scale and
offset to translate a memory address to its corresponding shadow address.
Generic KASAN dedicates 1/8th of kernel memory to its shadow memory (e.g. 16TB
to cover 128TB on x86_64) and uses direct mapping with a scale and offset to
translate a memory address to its corresponding shadow address.
Here is the function which translates an address to its corresponding shadow
address::
@ -162,12 +180,38 @@ address::
where ``KASAN_SHADOW_SCALE_SHIFT = 3``.
Compile-time instrumentation used for checking memory accesses. Compiler inserts
function calls (__asan_load*(addr), __asan_store*(addr)) before each memory
access of size 1, 2, 4, 8 or 16. These functions check whether memory access is
valid or not by checking corresponding shadow memory.
Compile-time instrumentation is used to insert memory access checks. Compiler
inserts function calls (__asan_load*(addr), __asan_store*(addr)) before each
memory access of size 1, 2, 4, 8 or 16. These functions check whether memory
access is valid or not by checking corresponding shadow memory.
GCC 5.0 has possibility to perform inline instrumentation. Instead of making
function calls GCC directly inserts the code to check the shadow memory.
This option significantly enlarges kernel but it gives x1.1-x2 performance
boost over outline instrumented kernel.
Software tag-based KASAN
~~~~~~~~~~~~~~~~~~~~~~~~
Tag-based KASAN uses the Top Byte Ignore (TBI) feature of modern arm64 CPUs to
store a pointer tag in the top byte of kernel pointers. Like generic KASAN it
uses shadow memory to store memory tags associated with each 16-byte memory
cell (therefore it dedicates 1/16th of the kernel memory for shadow memory).
On each memory allocation tag-based KASAN generates a random tag, tags the
allocated memory with this tag, and embeds this tag into the returned pointer.
Software tag-based KASAN uses compile-time instrumentation to insert checks
before each memory access. These checks make sure that tag of the memory that
is being accessed is equal to tag of the pointer that is used to access this
memory. In case of a tag mismatch tag-based KASAN prints a bug report.
Software tag-based KASAN also has two instrumentation modes (outline, that
emits callbacks to check memory accesses; and inline, that performs the shadow
memory checks inline). With outline instrumentation mode, a bug report is
simply printed from the function that performs the access check. With inline
instrumentation a brk instruction is emitted by the compiler, and a dedicated
brk handler is used to print bug reports.
A potential expansion of this mode is a hardware tag-based mode, which would
use hardware memory tagging support instead of compiler instrumentation and
manual shadow memory manipulation.

View file

@ -191,7 +191,9 @@ Currently, the following pairs of encryption modes are supported:
If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair.
AES-128-CBC was added only for low-powered embedded devices with
crypto accelerators such as CAAM or CESA that do not support XTS.
crypto accelerators such as CAAM or CESA that do not support XTS. To
use AES-128-CBC, CONFIG_CRYPTO_SHA256 (or another SHA-256
implementation) must be enabled so that ESSIV can be used.
Adiantum is a (primarily) stream cipher-based mode that is fast even
on CPUs without dedicated crypto instructions. It's also a true
@ -647,3 +649,42 @@ Note that the precise way that filenames are presented to userspace
without the key is subject to change in the future. It is only meant
as a way to temporarily present valid filenames so that commands like
``rm -r`` work as expected on encrypted directories.
Tests
=====
To test fscrypt, use xfstests, which is Linux's de facto standard
filesystem test suite. First, run all the tests in the "encrypt"
group on the relevant filesystem(s). For example, to test ext4 and
f2fs encryption using `kvm-xfstests
<https://github.com/tytso/xfstests-bld/blob/master/Documentation/kvm-quickstart.md>`_::
kvm-xfstests -c ext4,f2fs -g encrypt
UBIFS encryption can also be tested this way, but it should be done in
a separate command, and it takes some time for kvm-xfstests to set up
emulated UBI volumes::
kvm-xfstests -c ubifs -g encrypt
No tests should fail. However, tests that use non-default encryption
modes (e.g. generic/549 and generic/550) will be skipped if the needed
algorithms were not built into the kernel's crypto API. Also, tests
that access the raw block device (e.g. generic/399, generic/548,
generic/549, generic/550) will be skipped on UBIFS.
Besides running the "encrypt" group tests, for ext4 and f2fs it's also
possible to run most xfstests with the "test_dummy_encryption" mount
option. This option causes all new files to be automatically
encrypted with a dummy key, without having to make any API calls.
This tests the encrypted I/O paths more thoroughly. To do this with
kvm-xfstests, use the "encrypt" filesystem configuration::
kvm-xfstests -c ext4/encrypt,f2fs/encrypt -g auto
Because this runs many more tests than "-g encrypt" does, it takes
much longer to run; so also consider using `gce-xfstests
<https://github.com/tytso/xfstests-bld/blob/master/Documentation/gce-xfstests.md>`_
instead of kvm-xfstests::
gce-xfstests -c ext4/encrypt,f2fs/encrypt -g auto

View file

@ -954,6 +954,10 @@ ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
LDFLAGS_vmlinux += $(call ld-option, -X,)
endif
ifeq ($(CONFIG_RELR),y)
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
endif
# insure the checker run with the right endianness
CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)

File diff suppressed because it is too large Load diff

View file

@ -939,6 +939,20 @@ config PANIC_ON_REFCOUNT_ERROR
or potential memory-leaks) with an object associated with that
reference counter.
# Select if the architecture has support for applying RELR relocations.
config ARCH_HAS_RELR
bool
config RELR
bool "Use RELR relocation packing"
depends on ARCH_HAS_RELR && TOOLS_SUPPORT_RELR
default y
help
Store the kernel's dynamic relocations in the RELR relocation packing
format. Requires a compatible linker (LLD supports this feature), as
well as compatible NM and OBJCOPY utilities (llvm-nm and llvm-objcopy
are compatible).
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"

View file

@ -107,6 +107,7 @@ config ARM64
select HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
@ -1287,6 +1288,7 @@ config ARM64_MODULE_PLTS
config RELOCATABLE
bool
select ARCH_HAS_RELR
help
This builds the kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required to relocate the

View file

@ -97,10 +97,19 @@ else
TEXT_OFFSET := 0x00080000
endif
ifeq ($(CONFIG_KASAN_SW_TAGS), y)
KASAN_SHADOW_SCALE_SHIFT := 4
else
KASAN_SHADOW_SCALE_SHIFT := 3
endif
KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
# - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT))
# in 32-bit arithmetic
KASAN_SHADOW_SCALE_SHIFT := 3
KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
(0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
+ (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \

View file

@ -36,9 +36,11 @@ CONFIG_EMBEDDED=y
CONFIG_SLAB_FREELIST_RANDOM=y
CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_PROFILING=y
CONFIG_ARCH_HISI=y
CONFIG_ARCH_QCOM=y
CONFIG_PCI=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCIE_KIRIN=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=32
CONFIG_SECCOMP=y
@ -74,6 +76,7 @@ CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_GKI_HACKS_TO_FIX=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA=y
@ -266,13 +269,10 @@ CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_EXAR is not set
CONFIG_SERIAL_8250_NR_UARTS=48
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_OF_PLATFORM=m
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_SERIAL_DEV_BUS=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
@ -284,6 +284,7 @@ CONFIG_SPI=y
CONFIG_SPMI=y
CONFIG_PINCTRL_AMD=y
CONFIG_POWER_AVS=y
CONFIG_POWER_RESET_HISI=y
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
CONFIG_THERMAL_GOV_USER_SPACE=y
@ -327,11 +328,11 @@ CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_MMC=y
CONFIG_MMC=m
# CONFIG_PWRSEQ_EMMC is not set
# CONFIG_PWRSEQ_SIMPLE is not set
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI=m
CONFIG_MMC_SDHCI_PLTFM=m
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
@ -341,6 +342,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PL030=y
CONFIG_RTC_DRV_PL031=y
CONFIG_DMADEVICES=y
CONFIG_UIO=y
CONFIG_VIRTIO_PCI=y
# CONFIG_VIRTIO_PCI_LEGACY is not set
CONFIG_VIRTIO_INPUT=y
@ -350,6 +352,7 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_VSOC=y
CONFIG_ION=y
CONFIG_ION_SYSTEM_HEAP=y
CONFIG_COMMON_CLK_SCPI=y
# CONFIG_COMMON_CLK_XGENE is not set
CONFIG_HWSPINLOCK=y

View file

@ -23,6 +23,8 @@
#ifndef __ASM_ASSEMBLER_H
#define __ASM_ASSEMBLER_H
#include <asm-generic/export.h>
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
@ -500,6 +502,13 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
#else
#define NOKPROBE(x)
#endif
#ifdef CONFIG_KASAN
#define EXPORT_SYMBOL_NOKASAN(name)
#else
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
#endif
/*
* Emit a 64-bit absolute little endian symbol reference in a way that
* ensures that it will be resolved at build time, even when building a

View file

@ -16,10 +16,12 @@
* 0x400: for dynamic BRK instruction
* 0x401: for compile time BRK instruction
* 0x800: kernel-mode BUG() and WARN() traps
* 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff)
*/
#define FAULT_BRK_IMM 0x100
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
#define BUG_BRK_IMM 0x800
#define KASAN_BRK_IMM 0x900
#endif

View file

@ -49,6 +49,10 @@
*/
#define ARCH_DMA_MINALIGN (128)
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
#endif
#ifndef __ASSEMBLY__
#include <linux/bitops.h>

View file

@ -4,12 +4,16 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_KASAN
#include <linux/linkage.h>
#include <asm/memory.h>
#include <asm/pgtable-types.h>
#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
#define arch_kasan_reset_tag(addr) __tag_reset(addr)
#define arch_kasan_get_tag(addr) __tag_get(addr)
#ifdef CONFIG_KASAN
/*
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,

View file

@ -74,19 +74,13 @@
#define KERNEL_END _end
/*
* KASAN requires 1/8th of the kernel virtual address space for the shadow
* region. KASAN can bloat the stack significantly, so double the (minimum)
* stack size when KASAN is in use, and then double it again if KASAN_EXTRA is
* on.
* Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual
* address space for the shadow region respectively. They can bloat the stack
* significantly, so double the (minimum) stack size when they are in use.
*/
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
#ifdef CONFIG_KASAN_EXTRA
#define KASAN_THREAD_SHIFT 2
#else
#define KASAN_THREAD_SHIFT 1
#endif /* CONFIG_KASAN_EXTRA */
#else
#define KASAN_SHADOW_SIZE (0)
#define KASAN_THREAD_SHIFT 0
@ -220,6 +214,26 @@ static inline unsigned long kaslr_offset(void)
*/
#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
/*
* When dealing with data aborts, watchpoints, or instruction traps we may end
* up with a tagged userland pointer. Clear the tag to get a sane pointer to
* pass on to access_ok(), for instance.
*/
#define untagged_addr(addr) \
((__typeof__(addr))sign_extend64((u64)(addr), 55))
#ifdef CONFIG_KASAN_SW_TAGS
#define __tag_shifted(tag) ((u64)(tag) << 56)
#define __tag_set(addr, tag) (__typeof__(addr))( \
((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag))
#define __tag_reset(addr) untagged_addr(addr)
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
#else
#define __tag_set(addr, tag) (addr)
#define __tag_reset(addr) (addr)
#define __tag_get(addr) 0
#endif
/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
@ -303,7 +317,13 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define page_to_virt(page) ({ \
unsigned long __addr = \
((__page_to_voff(page)) | PAGE_OFFSET); \
__addr = __tag_set(__addr, page_kasan_tag(page)); \
((void *)__addr); \
})
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
@ -311,9 +331,10 @@ static inline void *phys_to_virt(phys_addr_t x)
#endif
#endif
#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET)
#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
_virt_addr_valid(kaddr))
#define _virt_addr_is_linear(kaddr) \
(__tag_reset((u64)(kaddr)) >= PAGE_OFFSET)
#define virt_addr_valid(kaddr) \
(_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
#include <asm-generic/memory_model.h>

View file

@ -289,6 +289,7 @@
#define TCR_A1 (UL(1) << 22)
#define TCR_ASID16 (UL(1) << 36)
#define TCR_TBI0 (UL(1) << 37)
#define TCR_TBI1 (UL(1) << 38)
#define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40)
#define TCR_NFD1 (UL(1) << 54)

View file

@ -97,13 +97,6 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
return ret;
}
/*
* When dealing with data aborts, watchpoints, or instruction traps we may end
* up with a tagged userland pointer. Clear the tag to get a sane pointer to
* pass on to access_ok(), for instance.
*/
#define untagged_addr(addr) sign_extend64(addr, 55)
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs

View file

@ -112,6 +112,8 @@ pe_header:
* x23 stext() .. start_kernel() physical misalignment/KASLR offset
* x28 __create_page_tables() callee preserved temp register
* x19/x20 __primary_switch() callee preserved temp registers
* x24 __primary_switch() .. relocate_kernel()
* current RELR displacement
*/
ENTRY(stext)
bl preserve_boot_args
@ -830,14 +832,93 @@ __relocate_kernel:
0: cmp x9, x10
b.hs 1f
ldp x11, x12, [x9], #24
ldr x13, [x9, #-8]
cmp w12, #R_AARCH64_RELATIVE
ldp x12, x13, [x9], #24
ldr x14, [x9, #-8]
cmp w13, #R_AARCH64_RELATIVE
b.ne 0b
add x13, x13, x23 // relocate
str x13, [x11, x23]
add x14, x14, x23 // relocate
str x14, [x12, x23]
b 0b
1: ret
1:
#ifdef CONFIG_RELR
/*
* Apply RELR relocations.
*
* RELR is a compressed format for storing relative relocations. The
* encoded sequence of entries looks like:
* [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
*
* i.e. start with an address, followed by any number of bitmaps. The
* address entry encodes 1 relocation. The subsequent bitmap entries
* encode up to 63 relocations each, at subsequent offsets following
* the last address entry.
*
* The bitmap entries must have 1 in the least significant bit. The
* assumption here is that an address cannot have 1 in lsb. Odd
* addresses are not supported. Any odd addresses are stored in the RELA
* section, which is handled above.
*
* Excluding the least significant bit in the bitmap, each non-zero
* bit in the bitmap represents a relocation to be applied to
* a corresponding machine word that follows the base address
* word. The second least significant bit represents the machine
* word immediately following the initial address, and each bit
* that follows represents the next word, in linear order. As such,
* a single bitmap can encode up to 63 relocations in a 64-bit object.
*
* In this implementation we store the address of the next RELR table
* entry in x9, the address being relocated by the current address or
* bitmap entry in x13 and the address being relocated by the current
* bit in x14.
*
* Because addends are stored in place in the binary, RELR relocations
* cannot be applied idempotently. We use x24 to keep track of the
* currently applied displacement so that we can correctly relocate if
* __relocate_kernel is called twice with non-zero displacements (i.e.
* if there is both a physical misalignment and a KASLR displacement).
*/
ldr w9, =__relr_offset // offset to reloc table
ldr w10, =__relr_size // size of reloc table
add x9, x9, x11 // __va(.relr)
add x10, x9, x10 // __va(.relr) + sizeof(.relr)
sub x15, x23, x24 // delta from previous offset
cbz x15, 7f // nothing to do if unchanged
mov x24, x23 // save new offset
2: cmp x9, x10
b.hs 7f
ldr x11, [x9], #8
tbnz x11, #0, 3f // branch to handle bitmaps
add x13, x11, x23
ldr x12, [x13] // relocate address entry
add x12, x12, x15
str x12, [x13], #8 // adjust to start of bitmap
b 2b
3: mov x14, x13
4: lsr x11, x11, #1
cbz x11, 6f
tbz x11, #0, 5f // skip bit if not set
ldr x12, [x14] // relocate bit
add x12, x12, x15
str x12, [x14]
5: add x14, x14, #8 // move to next bit's address
b 4b
6: /*
* Move to the next bitmap's address. 8 is the word size, and 63 is the
* number of significant bits in a bitmap entry.
*/
add x13, x13, #(8 * 63)
b 2b
7:
#endif
ret
ENDPROC(__relocate_kernel)
#endif
@ -849,6 +930,9 @@ __primary_switch:
bl __enable_mmu
#ifdef CONFIG_RELOCATABLE
#ifdef CONFIG_RELR
mov x24, #0 // no RELR displacement yet
#endif
bl __relocate_kernel
#ifdef CONFIG_RANDOMIZE_BASE
ldr x8, =__primary_switched

View file

@ -352,6 +352,9 @@ void __init setup_arch(char **cmdline_p)
smp_init_cpus();
smp_build_mpidr_hash();
/* Init percpu seeds for random tags after cpus are set up. */
kasan_init_tags();
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Make sure init_thread_info.ttbr0 always generates translation

View file

@ -35,6 +35,7 @@
#include <linux/sizes.h>
#include <linux/syscalls.h>
#include <linux/mm_types.h>
#include <linux/kasan.h>
#include <asm/atomic.h>
#include <asm/barrier.h>
@ -947,6 +948,58 @@ static struct break_hook bug_break_hook = {
.fn = bug_handler,
};
#ifdef CONFIG_KASAN_SW_TAGS
#define KASAN_ESR_RECOVER 0x20
#define KASAN_ESR_WRITE 0x10
#define KASAN_ESR_SIZE_MASK 0x0f
#define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
static int kasan_handler(struct pt_regs *regs, unsigned int esr)
{
bool recover = esr & KASAN_ESR_RECOVER;
bool write = esr & KASAN_ESR_WRITE;
size_t size = KASAN_ESR_SIZE(esr);
u64 addr = regs->regs[0];
u64 pc = regs->pc;
if (user_mode(regs))
return DBG_HOOK_ERROR;
kasan_report(addr, size, write, pc);
/*
* The instrumentation allows to control whether we can proceed after
* a crash was detected. This is done by passing the -recover flag to
* the compiler. Disabling recovery allows to generate more compact
* code.
*
* Unfortunately disabling recovery doesn't work for the kernel right
* now. KASAN reporting is disabled in some contexts (for example when
* the allocator accesses slab object metadata; this is controlled by
* current->kasan_depth). All these accesses are detected by the tool,
* even though the reports for them are not printed.
*
* This is something that might be fixed at some point in the future.
*/
if (!recover)
die("Oops - KASAN", regs, 0);
/* If thread survives, skip over the brk instruction and continue: */
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
return DBG_HOOK_HANDLED;
}
#define KASAN_ESR_VAL (0xf2000000 | KASAN_BRK_IMM)
#define KASAN_ESR_MASK 0xffffff00
static struct break_hook kasan_break_hook = {
.esr_val = KASAN_ESR_VAL,
.esr_mask = KASAN_ESR_MASK,
.fn = kasan_handler,
};
#endif
/*
* Initial handler for AArch64 BRK exceptions
* This handler only used until debug_traps_init().
@ -954,6 +1007,10 @@ static struct break_hook bug_break_hook = {
int __init early_brk64(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
#ifdef CONFIG_KASAN_SW_TAGS
if ((esr & KASAN_ESR_MASK) == KASAN_ESR_VAL)
return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
#endif
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
}
@ -961,4 +1018,7 @@ int __init early_brk64(unsigned long addr, unsigned int esr,
void __init trap_init(void)
{
register_break_hook(&bug_break_hook);
#ifdef CONFIG_KASAN_SW_TAGS
register_break_hook(&kasan_break_hook);
#endif
}

View file

@ -188,6 +188,15 @@ SECTIONS
__rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
__rela_size = SIZEOF(.rela.dyn);
#ifdef CONFIG_RELR
.relr.dyn : ALIGN(8) {
*(.relr.dyn)
}
__relr_offset = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR);
__relr_size = SIZEOF(.relr.dyn);
#endif
. = ALIGN(SEGMENT_ALIGN);
__initdata_end = .;
__init_end = .;

View file

@ -39,6 +39,7 @@
#include <asm/exception.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/kasan.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/pgtable.h>
@ -126,6 +127,18 @@ static void mem_abort_decode(unsigned int esr)
data_abort_decode(esr);
}
static inline bool is_ttbr0_addr(unsigned long addr)
{
/* entry assembly clears tags for TTBR0 addrs */
return addr < TASK_SIZE;
}
static inline bool is_ttbr1_addr(unsigned long addr)
{
/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
return arch_kasan_reset_tag(addr) >= VA_START;
}
/*
* Dump out the page tables associated with 'addr' in the currently active mm.
*/
@ -135,7 +148,7 @@ void show_pte(unsigned long addr)
pgd_t *pgdp;
pgd_t pgd;
if (addr < TASK_SIZE) {
if (is_ttbr0_addr(addr)) {
/* TTBR0 */
mm = current->active_mm;
if (mm == &init_mm) {
@ -143,7 +156,7 @@ void show_pte(unsigned long addr)
addr);
return;
}
} else if (addr >= VA_START) {
} else if (is_ttbr1_addr(addr)) {
/* TTBR1 */
mm = &init_mm;
} else {
@ -249,7 +262,7 @@ static inline bool is_el1_permission_fault(unsigned int esr,
if (fsc_type == ESR_ELx_FSC_PERM)
return true;
if (addr < TASK_SIZE && system_uses_ttbr0_pan())
if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
return fsc_type == ESR_ELx_FSC_FAULT &&
(regs->pstate & PSR_PAN_BIT);
@ -314,7 +327,7 @@ static void __do_user_fault(struct siginfo *info, unsigned int esr)
* type", so we ignore this wrinkle and just return the translation
* fault.)
*/
if (current->thread.fault_address >= TASK_SIZE) {
if (!is_ttbr0_addr(current->thread.fault_address)) {
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_DABT_LOW:
/*
@ -454,7 +467,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
if (addr < TASK_SIZE && is_el1_permission_fault(esr, regs, addr)) {
if (is_ttbr0_addr(addr) && is_el1_permission_fault(esr, regs, addr)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die_kernel_fault("access to user memory with fs=KERNEL_DS",
@ -641,7 +654,7 @@ static int __kprobes do_translation_fault(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
{
if (addr < TASK_SIZE)
if (is_ttbr0_addr(addr))
return do_page_fault(addr, esr, regs);
do_bad_area(addr, esr, regs);
@ -805,7 +818,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
* re-enabled IRQs. If the address is a kernel address, apply
* BP hardening prior to enabling IRQs and pre-emption.
*/
if (addr > TASK_SIZE)
if (!is_ttbr0_addr(addr))
arm64_apply_bp_hardening();
local_irq_enable();
@ -820,7 +833,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
struct siginfo info;
if (user_mode(regs)) {
if (instruction_pointer(regs) > TASK_SIZE)
if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
local_irq_enable();
}
@ -912,7 +925,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
if (interrupts_enabled(regs))
trace_hardirqs_off();
if (user_mode(regs) && pc > TASK_SIZE)
if (user_mode(regs) && !is_ttbr0_addr(pc))
arm64_apply_bp_hardening();
if (!inf->fn(addr_if_watchpoint, esr, regs)) {

View file

@ -40,7 +40,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
{
void *p = memblock_virt_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
MEMBLOCK_ALLOC_ACCESSIBLE, node);
MEMBLOCK_ALLOC_KASAN, node);
return __pa(p);
}
@ -48,8 +48,9 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
bool early)
{
if (pmd_none(READ_ONCE(*pmdp))) {
phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
: kasan_alloc_zeroed_page(node);
phys_addr_t pte_phys = early ?
__pa_symbol(kasan_early_shadow_pte)
: kasan_alloc_zeroed_page(node);
__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
}
@ -61,8 +62,9 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
bool early)
{
if (pud_none(READ_ONCE(*pudp))) {
phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
: kasan_alloc_zeroed_page(node);
phys_addr_t pmd_phys = early ?
__pa_symbol(kasan_early_shadow_pmd)
: kasan_alloc_zeroed_page(node);
__pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
}
@ -73,8 +75,9 @@ static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
bool early)
{
if (pgd_none(READ_ONCE(*pgdp))) {
phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
: kasan_alloc_zeroed_page(node);
phys_addr_t pud_phys = early ?
__pa_symbol(kasan_early_shadow_pud)
: kasan_alloc_zeroed_page(node);
__pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
}
@ -88,8 +91,11 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
do {
phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
: kasan_alloc_zeroed_page(node);
phys_addr_t page_phys = early ?
__pa_symbol(kasan_early_shadow_page)
: kasan_alloc_zeroed_page(node);
if (!early)
memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
next = addr + PAGE_SIZE;
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
@ -192,7 +198,7 @@ void __init kasan_init(void)
/*
* We are going to perform proper setup of shadow memory.
* At first we should unmap early shadow (clear_pgds() call bellow).
* At first we should unmap early shadow (clear_pgds() call below).
* However, instrumented code couldn't execute without shadow memory.
* tmp_pg_dir used to keep early shadow mapped until full shadow
* setup will be finished.
@ -206,14 +212,14 @@ void __init kasan_init(void)
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
(void *)mod_shadow_start);
kasan_populate_zero_shadow((void *)kimg_shadow_end,
kasan_mem_to_shadow((void *)PAGE_OFFSET));
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
(void *)mod_shadow_start);
kasan_populate_early_shadow((void *)kimg_shadow_end,
kasan_mem_to_shadow((void *)PAGE_OFFSET));
if (kimg_shadow_start > mod_shadow_end)
kasan_populate_zero_shadow((void *)mod_shadow_end,
(void *)kimg_shadow_start);
kasan_populate_early_shadow((void *)mod_shadow_end,
(void *)kimg_shadow_start);
for_each_memblock(memory, reg) {
void *start = (void *)__phys_to_virt(reg->base);
@ -228,14 +234,15 @@ void __init kasan_init(void)
}
/*
* KAsan may reuse the contents of kasan_zero_pte directly, so we
* should make sure that it maps the zero page read-only.
* KAsan may reuse the contents of kasan_early_shadow_pte directly,
* so we should make sure that it maps the zero page read-only.
*/
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_zero_pte[i],
pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
set_pte(&kasan_early_shadow_pte[i],
pfn_pte(sym_to_pfn(kasan_early_shadow_page),
PAGE_KERNEL_RO));
memset(kasan_zero_page, 0, PAGE_SIZE);
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
/* At this point kasan is fully initialized. Enable error messages */

View file

@ -47,6 +47,12 @@
/* PTWs cacheable, inner/outer WBWA */
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
#ifdef CONFIG_KASAN_SW_TAGS
#define TCR_KASAN_FLAGS TCR_TBI1
#else
#define TCR_KASAN_FLAGS 0
#endif
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
/*
@ -497,7 +503,7 @@ ENTRY(__cpu_setup)
*/
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1
TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
tcr_set_idmap_t0sz x10, x9
/*

View file

@ -11,7 +11,7 @@
#include "../string.c"
#ifdef CONFIG_X86_32
static void *__memcpy(void *dest, const void *src, size_t n)
static void *____memcpy(void *dest, const void *src, size_t n)
{
int d0, d1, d2;
asm volatile(
@ -25,7 +25,7 @@ static void *__memcpy(void *dest, const void *src, size_t n)
return dest;
}
#else
static void *__memcpy(void *dest, const void *src, size_t n)
static void *____memcpy(void *dest, const void *src, size_t n)
{
long d0, d1, d2;
asm volatile(
@ -56,7 +56,7 @@ void *memmove(void *dest, const void *src, size_t n)
const unsigned char *s = src;
if (d <= s || d - s >= n)
return __memcpy(dest, src, n);
return ____memcpy(dest, src, n);
while (n-- > 0)
d[n] = s[n];
@ -71,5 +71,11 @@ void *memcpy(void *dest, const void *src, size_t n)
warn("Avoiding potentially unsafe overlapping memcpy()!");
return memmove(dest, src, n);
}
return __memcpy(dest, src, n);
return ____memcpy(dest, src, n);
}
#ifdef CONFIG_KASAN
extern void *__memset(void *s, int c, size_t n) __alias(memset);
extern void *__memmove(void *dest, const void *src, size_t n) __alias(memmove);
extern void *__memcpy(void *dest, const void *src, size_t n) __alias(memcpy);
#endif

View file

@ -1,3 +1,4 @@
# CONFIG_USELIB is not set
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@ -46,6 +47,7 @@ CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_GKI_HACKS_TO_FIX=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_ZSMALLOC=y
@ -168,6 +170,7 @@ CONFIG_RFKILL=y
# CONFIG_UEVENT_HELPER is not set
# CONFIG_ALLOW_DEV_COREDUMP is not set
CONFIG_DEBUG_DEVRES=y
CONFIG_OF=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
@ -236,10 +239,8 @@ CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_EXAR is not set
CONFIG_SERIAL_8250_NR_UARTS=48
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_OF_PLATFORM=m
CONFIG_SERIAL_DEV_BUS=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=m
# CONFIG_DEVPORT is not set
@ -285,14 +286,14 @@ CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_MMC=y
# CONFIG_MMC_BLOCK is not set
CONFIG_MMC=m
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_SYSTOHC is not set
CONFIG_RTC_DRV_TEST=m
CONFIG_UIO=y
CONFIG_VIRTIO_PCI=m
# CONFIG_VIRTIO_PCI_LEGACY is not set
CONFIG_VIRTIO_BALLOON=m
@ -302,6 +303,7 @@ CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ION=y
CONFIG_ION_SYSTEM_HEAP=y
CONFIG_PM_DEVFREQ=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y

View file

@ -7,11 +7,7 @@
#endif
#ifdef CONFIG_KASAN
#ifdef CONFIG_KASAN_EXTRA
#define KASAN_STACK_ORDER 2
#else
#define KASAN_STACK_ORDER 1
#endif
#else
#define KASAN_STACK_ORDER 0
#endif

View file

@ -58,6 +58,23 @@ static __always_inline void stac(void)
alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
}
static __always_inline unsigned long smap_save(void)
{
unsigned long flags;
asm volatile (ALTERNATIVE("", "pushf; pop %0; " __stringify(__ASM_CLAC),
X86_FEATURE_SMAP)
: "=rm" (flags) : : "memory", "cc");
return flags;
}
static __always_inline void smap_restore(unsigned long flags)
{
asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP)
: : "g" (flags) : "memory", "cc");
}
/* These macros can be used in asm() statements */
#define ASM_CLAC \
ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
@ -69,6 +86,9 @@ static __always_inline void stac(void)
static inline void clac(void) { }
static inline void stac(void) { }
static inline unsigned long smap_save(void) { return 0; }
static inline void smap_restore(unsigned long flags) { }
#define ASM_CLAC
#define ASM_STAC

View file

@ -724,6 +724,9 @@ static __must_check inline bool user_access_begin(int type,
#define user_access_begin(a, b, c) user_access_begin(a, b, c)
#define user_access_end() __uaccess_end()
#define user_access_save() smap_save()
#define user_access_restore(x) smap_restore(x)
#define unsafe_put_user(x, ptr, err_label) \
do { \
int __pu_err; \

View file

@ -377,7 +377,7 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
/*
* This is an optimization for KASAN=y case. Since all kasan page tables
* eventually point to the kasan_zero_page we could call note_page()
* eventually point to the kasan_early_shadow_page we could call note_page()
* right away without walking through lower level page tables. This saves
* us dozens of seconds (minutes for 5-level config) while checking for
* W+X mapping or reading kernel_page_tables debugfs file.
@ -385,10 +385,11 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
void *pt)
{
if (__pa(pt) == __pa(kasan_zero_pmd) ||
(pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) ||
__pa(pt) == __pa(kasan_zero_pud)) {
pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
if (__pa(pt) == __pa(kasan_early_shadow_pmd) ||
(pgtable_l5_enabled() &&
__pa(pt) == __pa(kasan_early_shadow_p4d)) ||
__pa(pt) == __pa(kasan_early_shadow_pud)) {
pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]);
note_page(m, st, __pgprot(prot), 0, 5);
return true;
}

View file

@ -212,7 +212,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
unsigned long next;
if (pgd_none(*pgd)) {
pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
pgd_entry = __pgd(_KERNPG_TABLE |
__pa_nodebug(kasan_early_shadow_p4d));
set_pgd(pgd, pgd_entry);
}
@ -223,7 +224,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
if (!p4d_none(*p4d))
continue;
p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
p4d_entry = __p4d(_KERNPG_TABLE |
__pa_nodebug(kasan_early_shadow_pud));
set_p4d(p4d, p4d_entry);
} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
}
@ -262,10 +264,11 @@ static struct notifier_block kasan_die_notifier = {
void __init kasan_early_init(void)
{
int i;
pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
__PAGE_KERNEL | _PAGE_ENC;
pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
/* Mask out unsupported __PAGE_KERNEL bits: */
pte_val &= __default_kernel_pte_mask;
@ -274,16 +277,16 @@ void __init kasan_early_init(void)
p4d_val &= __default_kernel_pte_mask;
for (i = 0; i < PTRS_PER_PTE; i++)
kasan_zero_pte[i] = __pte(pte_val);
kasan_early_shadow_pte[i] = __pte(pte_val);
for (i = 0; i < PTRS_PER_PMD; i++)
kasan_zero_pmd[i] = __pmd(pmd_val);
kasan_early_shadow_pmd[i] = __pmd(pmd_val);
for (i = 0; i < PTRS_PER_PUD; i++)
kasan_zero_pud[i] = __pud(pud_val);
kasan_early_shadow_pud[i] = __pud(pud_val);
for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
kasan_zero_p4d[i] = __p4d(p4d_val);
kasan_early_shadow_p4d[i] = __p4d(p4d_val);
kasan_map_early_shadow(early_top_pgt);
kasan_map_early_shadow(init_top_pgt);
@ -327,7 +330,7 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
kasan_mem_to_shadow((void *)PAGE_OFFSET));
for (i = 0; i < E820_MAX_ENTRIES; i++) {
@ -339,41 +342,41 @@ void __init kasan_init(void)
shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
PAGE_SIZE);
shadow_cpu_entry_begin = (void *)round_down(
(unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);
shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
CPU_ENTRY_AREA_MAP_SIZE);
shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
PAGE_SIZE);
shadow_cpu_entry_end = (void *)round_up(
(unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
kasan_populate_zero_shadow(
kasan_populate_early_shadow(
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
shadow_cpu_entry_begin);
kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
(unsigned long)shadow_cpu_entry_end, 0);
kasan_populate_zero_shadow(shadow_cpu_entry_end,
kasan_mem_to_shadow((void *)__START_KERNEL_map));
kasan_populate_early_shadow(shadow_cpu_entry_end,
kasan_mem_to_shadow((void *)__START_KERNEL_map));
kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
(unsigned long)kasan_mem_to_shadow(_end),
early_pfn_to_nid(__pa(_stext)));
kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
(void *)KASAN_SHADOW_END);
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
(void *)KASAN_SHADOW_END);
load_cr3(init_top_pgt);
__flush_tlb_all();
/*
* kasan_zero_page has been used as early shadow memory, thus it may
* contain some garbage. Now we can clear and write protect it, since
* after the TLB flush no one should write to it.
* kasan_early_shadow_page has been used as early shadow memory, thus
* it may contain some garbage. Now we can clear and write protect it,
* since after the TLB flush no one should write to it.
*/
memset(kasan_zero_page, 0, PAGE_SIZE);
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
for (i = 0; i < PTRS_PER_PTE; i++) {
pte_t pte;
pgprot_t prot;
@ -381,8 +384,8 @@ void __init kasan_init(void)
prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
pgprot_val(prot) &= __default_kernel_pte_mask;
pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot));
set_pte(&kasan_zero_pte[i], pte);
pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
set_pte(&kasan_early_shadow_pte[i], pte);
}
/* Flush TLBs again to be sure that write protection applied. */
__flush_tlb_all();

View file

@ -25,12 +25,13 @@ void __init kasan_early_init(void)
int i;
for (i = 0; i < PTRS_PER_PTE; ++i)
set_pte(kasan_zero_pte + i,
mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL));
set_pte(kasan_early_shadow_pte + i,
mk_pte(virt_to_page(kasan_early_shadow_page),
PAGE_KERNEL));
for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
BUG_ON(!pmd_none(*pmd));
set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte));
set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
}
early_trap_init();
}
@ -81,13 +82,16 @@ void __init kasan_init(void)
populate(kasan_mem_to_shadow((void *)VMALLOC_START),
kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
/* Write protect kasan_zero_page and zero-initialize it again. */
/*
* Write protect kasan_early_shadow_page and zero-initialize it again.
*/
for (i = 0; i < PTRS_PER_PTE; ++i)
set_pte(kasan_zero_pte + i,
mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO));
set_pte(kasan_early_shadow_pte + i,
mk_pte(virt_to_page(kasan_early_shadow_page),
PAGE_KERNEL_RO));
local_flush_tlb_all();
memset(kasan_zero_page, 0, PAGE_SIZE);
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
/* At this point kasan is fully initialized. Enable error messages. */
current->kasan_depth = 0;

View file

@ -2025,6 +2025,7 @@ int dpm_prepare(pm_message_t state)
printk(KERN_INFO "PM: Device %s not prepared "
"for power transition: code %d\n",
dev_name(dev), error);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
}

View file

@ -412,6 +412,7 @@ config USB_CONFIGFS_F_FS
config USB_CONFIGFS_F_ACC
bool "Accessory gadget"
depends on USB_CONFIGFS
depends on HID=y
select USB_F_ACC
help
USB gadget Accessory support

View file

@ -6,7 +6,6 @@ config FS_ENCRYPTION
select CRYPTO_ECB
select CRYPTO_XTS
select CRYPTO_CTS
select CRYPTO_SHA256
select KEYS
help
Enable encryption of files and directories. This

View file

@ -36,8 +36,9 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
if (fscrypt_using_hardware_encryption(page->mapping->host)) {
SetPageUptodate(page);
} else {
int ret = fscrypt_decrypt_page(page->mapping->host,
page, PAGE_SIZE, 0, page->index);
int ret = fscrypt_decrypt_pagecache_blocks(page,
bv->bv_len,
bv->bv_offset);
if (ret)
SetPageError(page);
else if (done)
@ -56,9 +57,8 @@ EXPORT_SYMBOL(fscrypt_decrypt_bio);
static void completion_pages(struct work_struct *work)
{
struct fscrypt_ctx *ctx =
container_of(work, struct fscrypt_ctx, r.work);
struct bio *bio = ctx->r.bio;
struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work);
struct bio *bio = ctx->bio;
__fscrypt_decrypt_bio(bio, true);
fscrypt_release_ctx(ctx);
@ -67,57 +67,29 @@ static void completion_pages(struct work_struct *work)
void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
{
INIT_WORK(&ctx->r.work, completion_pages);
ctx->r.bio = bio;
fscrypt_enqueue_decrypt_work(&ctx->r.work);
INIT_WORK(&ctx->work, completion_pages);
ctx->bio = bio;
fscrypt_enqueue_decrypt_work(&ctx->work);
}
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
struct fscrypt_ctx *ctx;
struct page *bounce_page;
/* The bounce data pages are unmapped. */
if ((*page)->mapping)
return;
/* The bounce data page is unmapped. */
bounce_page = *page;
ctx = (struct fscrypt_ctx *)page_private(bounce_page);
/* restore control page */
*page = ctx->w.control_page;
if (restore)
fscrypt_restore_control_page(bounce_page);
}
EXPORT_SYMBOL(fscrypt_pullback_bio_page);
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
struct fscrypt_ctx *ctx;
struct page *ciphertext_page = NULL;
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocksize = 1 << blockbits;
struct page *ciphertext_page;
struct bio *bio;
int ret, err = 0;
BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
ctx = fscrypt_get_ctx(GFP_NOFS);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT);
if (IS_ERR(ciphertext_page)) {
err = PTR_ERR(ciphertext_page);
goto errout;
}
ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT);
if (!ciphertext_page)
return -ENOMEM;
while (len--) {
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page,
PAGE_SIZE, 0, GFP_NOFS);
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
ZERO_PAGE(0), ciphertext_page,
blocksize, 0, GFP_NOFS);
if (err)
goto errout;
@ -127,14 +99,11 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
goto errout;
}
bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector =
pblk << (inode->i_sb->s_blocksize_bits - 9);
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_NOENCRYPT);
ret = bio_add_page(bio, ciphertext_page,
inode->i_sb->s_blocksize, 0);
if (ret != inode->i_sb->s_blocksize) {
ret = bio_add_page(bio, ciphertext_page, blocksize, 0);
if (WARN_ON(ret != blocksize)) {
/* should never happen! */
WARN_ON(1);
bio_put(bio);
err = -EIO;
goto errout;
@ -150,7 +119,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
}
err = 0;
errout:
fscrypt_release_ctx(ctx);
fscrypt_free_bounce_page(ciphertext_page);
return err;
}
EXPORT_SYMBOL(fscrypt_zeroout_range);

View file

@ -58,23 +58,16 @@ void fscrypt_enqueue_decrypt_work(struct work_struct *work)
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
/**
* fscrypt_release_ctx() - Releases an encryption context
* @ctx: The encryption context to release.
* fscrypt_release_ctx() - Release a decryption context
* @ctx: The decryption context to release.
*
* If the encryption context was allocated from the pre-allocated pool, returns
* it to that pool. Else, frees it.
*
* If there's a bounce page in the context, this frees that.
* If the decryption context was allocated from the pre-allocated pool, return
* it to that pool. Else, free it.
*/
void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
{
unsigned long flags;
if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
ctx->w.bounce_page = NULL;
}
ctx->w.control_page = NULL;
if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
kmem_cache_free(fscrypt_ctx_cachep, ctx);
} else {
@ -86,12 +79,12 @@ void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
EXPORT_SYMBOL(fscrypt_release_ctx);
/**
* fscrypt_get_ctx() - Gets an encryption context
* fscrypt_get_ctx() - Get a decryption context
* @gfp_flags: The gfp flag for memory allocation
*
* Allocates and initializes an encryption context.
* Allocate and initialize a decryption context.
*
* Return: A new encryption context on success; an ERR_PTR() otherwise.
* Return: A new decryption context on success; an ERR_PTR() otherwise.
*/
struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
{
@ -99,14 +92,8 @@ struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
unsigned long flags;
/*
* We first try getting the ctx from a free list because in
* the common case the ctx will have an allocated and
* initialized crypto tfm, so it's probably a worthwhile
* optimization. For the bounce page, we first try getting it
* from the kernel allocator because that's just about as fast
* as getting it from a list and because a cache of free pages
* should generally be a "last resort" option for a filesystem
* to be able to do its job.
* First try getting a ctx from the free list so that we don't have to
* call into the slab allocator.
*/
spin_lock_irqsave(&fscrypt_ctx_lock, flags);
ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
@ -122,11 +109,31 @@ struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
} else {
ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
}
ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
return ctx;
}
EXPORT_SYMBOL(fscrypt_get_ctx);
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
{
return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
}
/**
* fscrypt_free_bounce_page() - free a ciphertext bounce page
*
* Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(),
* or by fscrypt_alloc_bounce_page() directly.
*/
void fscrypt_free_bounce_page(struct page *bounce_page)
{
if (!bounce_page)
return;
set_page_private(bounce_page, (unsigned long)NULL);
ClearPagePrivate(bounce_page);
mempool_free(bounce_page, fscrypt_bounce_page_pool);
}
EXPORT_SYMBOL(fscrypt_free_bounce_page);
void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
const struct fscrypt_info *ci)
{
@ -140,10 +147,11 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
}
int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
u64 lblk_num, struct page *src_page,
struct page *dest_page, unsigned int len,
unsigned int offs, gfp_t gfp_flags)
/* Encrypt or decrypt a single filesystem block of file contents */
int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
u64 lblk_num, struct page *src_page,
struct page *dest_page, unsigned int len,
unsigned int offs, gfp_t gfp_flags)
{
union fscrypt_iv iv;
struct skcipher_request *req = NULL;
@ -188,126 +196,158 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
return 0;
}
struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
gfp_t gfp_flags)
{
ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
if (ctx->w.bounce_page == NULL)
return ERR_PTR(-ENOMEM);
ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
return ctx->w.bounce_page;
}
/**
* fscypt_encrypt_page() - Encrypts a page
* @inode: The inode for which the encryption should take place
* @page: The page to encrypt. Must be locked for bounce-page
* encryption.
* @len: Length of data to encrypt in @page and encrypted
* data in returned page.
* @offs: Offset of data within @page and returned
* page holding encrypted data.
* @lblk_num: Logical block number. This must be unique for multiple
* calls with same inode, except when overwriting
* previously written data.
* @gfp_flags: The gfp flag for memory allocation
* fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a pagecache page
* @page: The locked pagecache page containing the block(s) to encrypt
* @len: Total size of the block(s) to encrypt. Must be a nonzero
* multiple of the filesystem's block size.
* @offs: Byte offset within @page of the first block to encrypt. Must be
* a multiple of the filesystem's block size.
* @gfp_flags: Memory allocation flags
*
* Encrypts @page using the ctx encryption context. Performs encryption
* either in-place or into a newly allocated bounce page.
* Called on the page write path.
* A new bounce page is allocated, and the specified block(s) are encrypted into
* it. In the bounce page, the ciphertext block(s) will be located at the same
* offsets at which the plaintext block(s) were located in the source page; any
* other parts of the bounce page will be left uninitialized. However, normally
* blocksize == PAGE_SIZE and the whole page is encrypted at once.
*
* Bounce page allocation is the default.
* In this case, the contents of @page are encrypted and stored in an
* allocated bounce page. @page has to be locked and the caller must call
* fscrypt_restore_control_page() on the returned ciphertext page to
* release the bounce buffer and the encryption context.
* This is for use by the filesystem's ->writepages() method.
*
* In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
* fscrypt_operations. Here, the input-page is returned with its content
* encrypted.
*
* Return: A page with the encrypted content on success. Else, an
* error value or NULL.
* Return: the new encrypted bounce page on success; an ERR_PTR() on failure
*/
struct page *fscrypt_encrypt_page(const struct inode *inode,
struct page *page,
unsigned int len,
unsigned int offs,
u64 lblk_num, gfp_t gfp_flags)
struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs,
gfp_t gfp_flags)
{
struct fscrypt_ctx *ctx;
struct page *ciphertext_page = page;
const struct inode *inode = page->mapping->host;
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocksize = 1 << blockbits;
struct page *ciphertext_page;
u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
(offs >> blockbits);
unsigned int i;
int err;
if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
/* with inplace-encryption we just encrypt the page */
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
ciphertext_page, len, offs,
gfp_flags);
if (err)
return ERR_PTR(err);
return ciphertext_page;
}
if (WARN_ON_ONCE(!PageLocked(page)))
return ERR_PTR(-EINVAL);
ctx = fscrypt_get_ctx(gfp_flags);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
return ERR_PTR(-EINVAL);
/* The encryption operation will require a bounce page. */
ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
if (IS_ERR(ciphertext_page))
goto errout;
ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
if (!ciphertext_page)
return ERR_PTR(-ENOMEM);
ctx->w.control_page = page;
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
page, ciphertext_page, len, offs,
gfp_flags);
if (err) {
ciphertext_page = ERR_PTR(err);
goto errout;
for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
page, ciphertext_page,
blocksize, i, gfp_flags);
if (err) {
fscrypt_free_bounce_page(ciphertext_page);
return ERR_PTR(err);
}
}
SetPagePrivate(ciphertext_page);
set_page_private(ciphertext_page, (unsigned long)ctx);
lock_page(ciphertext_page);
return ciphertext_page;
errout:
fscrypt_release_ctx(ctx);
set_page_private(ciphertext_page, (unsigned long)page);
return ciphertext_page;
}
EXPORT_SYMBOL(fscrypt_encrypt_page);
EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
/**
* fscrypt_decrypt_page() - Decrypts a page in-place
* @inode: The corresponding inode for the page to decrypt.
* @page: The page to decrypt. Must be locked in case
* it is a writeback page (FS_CFLG_OWN_PAGES unset).
* @len: Number of bytes in @page to be decrypted.
* @offs: Start of data in @page.
* @lblk_num: Logical block number.
* fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
* @inode: The inode to which this block belongs
* @page: The page containing the block to encrypt
* @len: Size of block to encrypt. Doesn't need to be a multiple of the
* fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE.
* @offs: Byte offset within @page at which the block to encrypt begins
* @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
* number of the block within the file
* @gfp_flags: Memory allocation flags
*
* Decrypts page in-place using the ctx encryption context.
* Encrypt a possibly-compressed filesystem block that is located in an
* arbitrary page, not necessarily in the original pagecache page. The @inode
* and @lblk_num must be specified, as they can't be determined from @page.
*
* Called from the read completion callback.
*
* Return: Zero on success, non-zero otherwise.
* Return: 0 on success; -errno on failure
*/
int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs, u64 lblk_num)
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num, gfp_t gfp_flags)
{
if (WARN_ON_ONCE(!PageLocked(page) &&
!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)))
return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
len, offs, gfp_flags);
}
EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
/**
* fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a pagecache page
* @page: The locked pagecache page containing the block(s) to decrypt
* @len: Total size of the block(s) to decrypt. Must be a nonzero
* multiple of the filesystem's block size.
* @offs: Byte offset within @page of the first block to decrypt. Must be
* a multiple of the filesystem's block size.
*
* The specified block(s) are decrypted in-place within the pagecache page,
* which must still be locked and not uptodate. Normally, blocksize ==
* PAGE_SIZE and the whole page is decrypted at once.
*
* This is for use by the filesystem's ->readpages() method.
*
* Return: 0 on success; -errno on failure
*/
int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
unsigned int offs)
{
const struct inode *inode = page->mapping->host;
const unsigned int blockbits = inode->i_blkbits;
const unsigned int blocksize = 1 << blockbits;
u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
(offs >> blockbits);
unsigned int i;
int err;
if (WARN_ON_ONCE(!PageLocked(page)))
return -EINVAL;
return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
len, offs, GFP_NOFS);
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
return -EINVAL;
for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
page, blocksize, i, GFP_NOFS);
if (err)
return err;
}
return 0;
}
EXPORT_SYMBOL(fscrypt_decrypt_page);
EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
/**
* fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
* @inode: The inode to which this block belongs
* @page: The page containing the block to decrypt
* @len: Size of block to decrypt. Doesn't need to be a multiple of the
* fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE.
* @offs: Byte offset within @page at which the block to decrypt begins
* @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
* number of the block within the file
*
* Decrypt a possibly-compressed filesystem block that is located in an
* arbitrary page, not necessarily in the original pagecache page. The @inode
* and @lblk_num must be specified, as they can't be determined from @page.
*
* Return: 0 on success; -errno on failure
*/
int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num)
{
return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
len, offs, GFP_NOFS);
}
EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
/*
* Validate dentries in encrypted directories to make sure we aren't potentially
@ -357,18 +397,6 @@ const struct dentry_operations fscrypt_d_ops = {
.d_revalidate = fscrypt_d_revalidate,
};
void fscrypt_restore_control_page(struct page *page)
{
struct fscrypt_ctx *ctx;
ctx = (struct fscrypt_ctx *)page_private(page);
set_page_private(page, (unsigned long)NULL);
ClearPagePrivate(page);
unlock_page(page);
fscrypt_release_ctx(ctx);
}
EXPORT_SYMBOL(fscrypt_restore_control_page);
static void fscrypt_destroy(void)
{
struct fscrypt_ctx *pos, *n;

View file

@ -12,7 +12,6 @@
*/
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"

View file

@ -106,7 +106,6 @@ typedef enum {
} fscrypt_direction_t;
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
#define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002
static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
u32 filenames_mode)
@ -133,14 +132,12 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
/* crypto.c */
extern struct kmem_cache *fscrypt_info_cachep;
extern int fscrypt_initialize(unsigned int cop_flags);
extern int fscrypt_do_page_crypto(const struct inode *inode,
fscrypt_direction_t rw, u64 lblk_num,
struct page *src_page,
struct page *dest_page,
unsigned int len, unsigned int offs,
gfp_t gfp_flags);
extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
gfp_t gfp_flags);
extern int fscrypt_crypt_block(const struct inode *inode,
fscrypt_direction_t rw, u64 lblk_num,
struct page *src_page, struct page *dest_page,
unsigned int len, unsigned int offs,
gfp_t gfp_flags);
extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags);
extern const struct dentry_operations fscrypt_d_ops;
extern void __printf(3, 4) __cold

View file

@ -4,7 +4,6 @@
* Encryption hooks for higher-level filesystem operations.
*/
#include <linux/ratelimit.h>
#include "fscrypt_private.h"
/**

View file

@ -12,7 +12,6 @@
#include <keys/user-type.h>
#include <linux/hashtable.h>
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/sha.h>

View file

@ -1254,8 +1254,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
if (unlikely(err))
page_zero_new_buffers(page, from, to);
else if (decrypt)
err = fscrypt_decrypt_page(page->mapping->host, page,
PAGE_SIZE, 0, page->index);
err = fscrypt_decrypt_pagecache_blocks(page, PAGE_SIZE, 0);
return err;
}
#endif
@ -4135,8 +4134,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);
WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host,
page, PAGE_SIZE, 0, page->index));
WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks(
page, PAGE_SIZE, 0));
}
}
if (ext4_should_journal_data(inode)) {

View file

@ -66,9 +66,7 @@ static void ext4_finish_bio(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
#ifdef CONFIG_FS_ENCRYPTION
struct page *data_page = NULL;
#endif
struct page *bounce_page = NULL;
struct buffer_head *bh, *head;
unsigned bio_start = bvec->bv_offset;
unsigned bio_end = bio_start + bvec->bv_len;
@ -78,13 +76,10 @@ static void ext4_finish_bio(struct bio *bio)
if (!page)
continue;
#ifdef CONFIG_FS_ENCRYPTION
if (!page->mapping) {
/* The bounce data pages are unmapped. */
data_page = page;
fscrypt_pullback_bio_page(&page, false);
if (fscrypt_is_bounce_page(page)) {
bounce_page = page;
page = fscrypt_pagecache_page(bounce_page);
}
#endif
if (bio->bi_status) {
SetPageError(page);
@ -111,10 +106,7 @@ static void ext4_finish_bio(struct bio *bio)
bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
local_irq_restore(flags);
if (!under_io) {
#ifdef CONFIG_FS_ENCRYPTION
if (data_page)
fscrypt_restore_control_page(data_page);
#endif
fscrypt_free_bounce_page(bounce_page);
end_page_writeback(page);
}
}
@ -418,7 +410,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
struct writeback_control *wbc,
bool keep_towrite)
{
struct page *data_page = NULL;
struct page *bounce_page = NULL;
struct inode *inode = page->mapping->host;
unsigned block_start;
struct buffer_head *bh, *head;
@ -485,11 +477,10 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
retry_encrypt:
if (!fscrypt_using_hardware_encryption(inode))
data_page = fscrypt_encrypt_page(inode,
page, PAGE_SIZE, 0,
page->index, gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
bounce_page = fscrypt_encrypt_pagecache_blocks(page,
PAGE_SIZE,0, gfp_flags);
if (IS_ERR(bounce_page)) {
ret = PTR_ERR(bounce_page);
if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
if (io->io_bio) {
ext4_io_submit(io);
@ -498,7 +489,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
gfp_flags |= __GFP_NOFAIL;
goto retry_encrypt;
}
data_page = NULL;
bounce_page = NULL;
goto out;
}
}
@ -507,10 +498,9 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
do {
if (!buffer_async_write(bh))
continue;
if (data_page)
if (bounce_page)
io->io_flags |= EXT4_IO_ENCRYPTED;
ret = io_submit_add_bh(io, inode,
data_page ? data_page : page, bh);
ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh);
if (ret) {
/*
* We only get here on ENOMEM. Not much else
@ -526,8 +516,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
/* Error stopped previous loop? Clean up buffers... */
if (ret) {
out:
if (data_page)
fscrypt_restore_control_page(data_page);
fscrypt_free_bounce_page(bounce_page);
printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
redirty_page_for_writepage(wbc, page);
do {

View file

@ -196,7 +196,7 @@ static void f2fs_write_end_io(struct bio *bio)
continue;
}
fscrypt_pullback_bio_page(&page, true);
fscrypt_finalize_bounce_page(&page);
if (unlikely(bio->bi_status)) {
mapping_set_error(page->mapping, -EIO);
@ -399,10 +399,9 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
bio_for_each_segment_all(bvec, bio, i) {
if (bvec->bv_page->mapping)
target = bvec->bv_page;
else
target = fscrypt_control_page(bvec->bv_page);
target = bvec->bv_page;
if (fscrypt_is_bounce_page(target))
target = fscrypt_pagecache_page(target);
if (inode && inode == target->mapping->host)
return true;
@ -1882,8 +1881,9 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
if (fscrypt_using_hardware_encryption(inode))
return 0;
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
PAGE_SIZE, 0, fio->page->index, gfp_flags);
fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(fio->page,
PAGE_SIZE, 0,
gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
/* flush pending IOs and wait for a while in the ENOMEM case */
if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
@ -2055,8 +2055,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
err = f2fs_inplace_write_data(fio);
if (err) {
if (f2fs_encrypted_file(inode))
fscrypt_pullback_bio_page(&fio->encrypted_page,
true);
fscrypt_finalize_bounce_page(&fio->encrypted_page);
if (PageWriteback(page))
end_page_writeback(page);
} else {

View file

@ -2918,6 +2918,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
if (err) {
f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
block + 1);
err = -EFSCORRUPTED;
brelse(bh);
continue;
}

View file

@ -29,8 +29,8 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
{
struct ubifs_info *c = inode->i_sb->s_fs_info;
void *p = &dn->data;
struct page *ret;
unsigned int pad_len = round_up(in_len, UBIFS_CIPHER_BLOCK_SIZE);
int err;
ubifs_assert(c, pad_len <= *out_len);
dn->compr_size = cpu_to_le16(in_len);
@ -39,11 +39,11 @@ int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
if (pad_len != in_len)
memset(p + in_len, 0, pad_len - in_len);
ret = fscrypt_encrypt_page(inode, virt_to_page(&dn->data), pad_len,
offset_in_page(&dn->data), block, GFP_NOFS);
if (IS_ERR(ret)) {
ubifs_err(c, "fscrypt_encrypt_page failed: %ld", PTR_ERR(ret));
return PTR_ERR(ret);
err = fscrypt_encrypt_block_inplace(inode, virt_to_page(p), pad_len,
offset_in_page(p), block, GFP_NOFS);
if (err) {
ubifs_err(c, "fscrypt_encrypt_block_inplace() failed: %d", err);
return err;
}
*out_len = pad_len;
@ -64,10 +64,11 @@ int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn,
}
ubifs_assert(c, dlen <= UBIFS_BLOCK_SIZE);
err = fscrypt_decrypt_page(inode, virt_to_page(&dn->data), dlen,
offset_in_page(&dn->data), block);
err = fscrypt_decrypt_block_inplace(inode, virt_to_page(&dn->data),
dlen, offset_in_page(&dn->data),
block);
if (err) {
ubifs_err(c, "fscrypt_decrypt_page failed: %i", err);
ubifs_err(c, "fscrypt_decrypt_block_inplace() failed: %d", err);
return err;
}
*out_len = clen;

View file

@ -15,12 +15,17 @@
/* all clang versions usable with the kernel support KASAN ABI version 5 */
#define KASAN_ABI_VERSION 5
/* emulate gcc's __SANITIZE_ADDRESS__ flag */
#if __has_feature(address_sanitizer)
#define __SANITIZE_ADDRESS__
#endif
/* __no_sanitize_address has been already defined compiler-gcc.h */
#undef __no_sanitize_address
#define __no_sanitize_address __attribute__((no_sanitize("address")))
#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
/* emulate gcc's __SANITIZE_ADDRESS__ flag */
#define __SANITIZE_ADDRESS__
#define __no_sanitize_address \
__attribute__((no_sanitize("address", "hwaddress")))
#else
#define __no_sanitize_address
#endif
/*
* Not all versions of clang implement the the type-generic versions

View file

@ -183,13 +183,10 @@
#define KASAN_ABI_VERSION 3
#endif
#if GCC_VERSION >= 40902
/*
* Tell the compiler that address safety instrumentation (KASAN)
* should not be applied to that function.
* Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
*/
#if __has_attribute(__no_sanitize_address__)
#define __no_sanitize_address __attribute__((no_sanitize_address))
#else
#define __no_sanitize_address
#endif
#if GCC_VERSION >= 50100

View file

@ -200,7 +200,7 @@ void __read_once_size(const volatile void *p, void *res, int size)
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
#else
# define __no_kasan_or_inline __always_inline
#endif

View file

@ -69,16 +69,13 @@ struct fscrypt_operations {
bool (*is_encrypted)(struct inode *inode);
};
/* Decryption work */
struct fscrypt_ctx {
union {
struct {
struct page *bounce_page; /* Ciphertext page */
struct page *control_page; /* Original page */
} w;
struct {
struct bio *bio;
struct work_struct work;
} r;
};
struct list_head free_list; /* Free list */
};
u8 flags; /* Flags */
@ -112,18 +109,33 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
unsigned int, unsigned int,
u64, gfp_t);
extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int,
unsigned int, u64);
static inline struct page *fscrypt_control_page(struct page *page)
extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs,
gfp_t gfp_flags);
extern int fscrypt_encrypt_block_inplace(const struct inode *inode,
struct page *page, unsigned int len,
unsigned int offs, u64 lblk_num,
gfp_t gfp_flags);
extern int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
unsigned int offs);
extern int fscrypt_decrypt_block_inplace(const struct inode *inode,
struct page *page, unsigned int len,
unsigned int offs, u64 lblk_num);
static inline bool fscrypt_is_bounce_page(struct page *page)
{
return ((struct fscrypt_ctx *)page_private(page))->w.control_page;
return page->mapping == NULL;
}
extern void fscrypt_restore_control_page(struct page *);
static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
{
return (struct page *)page_private(bounce_page);
}
extern void fscrypt_free_bounce_page(struct page *bounce_page);
/* policy.c */
extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
@ -229,7 +241,6 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
extern void fscrypt_decrypt_bio(struct bio *);
extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
struct bio *bio);
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
@ -284,32 +295,51 @@ static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
return;
}
static inline struct page *fscrypt_encrypt_page(const struct inode *inode,
struct page *page,
unsigned int len,
unsigned int offs,
u64 lblk_num, gfp_t gfp_flags)
static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs,
gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline int fscrypt_decrypt_page(const struct inode *inode,
struct page *page,
unsigned int len, unsigned int offs,
u64 lblk_num)
static inline int fscrypt_encrypt_block_inplace(const struct inode *inode,
struct page *page,
unsigned int len,
unsigned int offs, u64 lblk_num,
gfp_t gfp_flags)
{
return -EOPNOTSUPP;
}
static inline struct page *fscrypt_control_page(struct page *page)
static inline int fscrypt_decrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs)
{
return -EOPNOTSUPP;
}
static inline int fscrypt_decrypt_block_inplace(const struct inode *inode,
struct page *page,
unsigned int len,
unsigned int offs, u64 lblk_num)
{
return -EOPNOTSUPP;
}
static inline bool fscrypt_is_bounce_page(struct page *page)
{
return false;
}
static inline struct page *fscrypt_pagecache_page(struct page *bounce_page)
{
WARN_ON_ONCE(1);
return ERR_PTR(-EINVAL);
}
static inline void fscrypt_restore_control_page(struct page *page)
static inline void fscrypt_free_bounce_page(struct page *bounce_page)
{
return;
}
/* policy.c */
@ -411,11 +441,6 @@ static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
{
}
static inline void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
return;
}
static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
sector_t pblk, unsigned int len)
{
@ -713,4 +738,16 @@ static inline bool fscrypt_mergeable_bio(struct bio *bio,
return true;
}
#endif
/* If *pagep is a bounce page, free it and set *pagep to the pagecache page */
static inline void fscrypt_finalize_bounce_page(struct page **pagep)
{
struct page *page = *pagep;
if (fscrypt_is_bounce_page(page)) {
*pagep = fscrypt_pagecache_page(page);
fscrypt_free_bounce_page(page);
}
}
#endif /* _LINUX_FSCRYPT_H */

View file

@ -2,7 +2,7 @@
#ifndef _LINUX_KASAN_CHECKS_H
#define _LINUX_KASAN_CHECKS_H
#ifdef CONFIG_KASAN
#if defined(__SANITIZE_ADDRESS__) || defined(__KASAN_INTERNAL)
void kasan_check_read(const volatile void *p, unsigned int size);
void kasan_check_write(const volatile void *p, unsigned int size);
#else

View file

@ -14,13 +14,13 @@ struct task_struct;
#include <asm/kasan.h>
#include <asm/pgtable.h>
extern unsigned char kasan_zero_page[PAGE_SIZE];
extern pte_t kasan_zero_pte[PTRS_PER_PTE];
extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
extern pud_t kasan_zero_pud[PTRS_PER_PUD];
extern p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D];
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_zero_shadow(const void *shadow_start,
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
static inline void *kasan_mem_to_shadow(const void *addr)
@ -45,22 +45,24 @@ void kasan_free_pages(struct page *page, unsigned int order);
void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
slab_flags_t *flags);
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_poison_slab(struct page *page);
void kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
void kasan_poison_object_data(struct kmem_cache *cache, void *object);
void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
const void *object);
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
gfp_t flags);
void kasan_kfree_large(void *ptr, unsigned long ip);
void kasan_poison_kfree(void *ptr, unsigned long ip);
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
gfp_t flags);
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
void * __must_check kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size, gfp_t flags);
void * __must_check kasan_krealloc(const void *object, size_t new_size,
gfp_t flags);
void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
void * __must_check kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags);
bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
struct kasan_cache {
@ -97,27 +99,40 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache,
void *object) {}
static inline void kasan_init_slab_obj(struct kmem_cache *cache,
const void *object) {}
static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
{
return (void *)object;
}
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
static inline void *kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags)
{
return ptr;
}
static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size, gfp_t flags) {}
static inline void kasan_krealloc(const void *object, size_t new_size,
gfp_t flags) {}
static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
size_t size, gfp_t flags)
{
return (void *)object;
}
static inline void *kasan_krealloc(const void *object, size_t new_size,
gfp_t flags)
{
return (void *)object;
}
static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags) {}
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
gfp_t flags)
{
return object;
}
static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
unsigned long ip)
{
@ -140,4 +155,40 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
#endif /* CONFIG_KASAN */
#ifdef CONFIG_KASAN_GENERIC
#define KASAN_SHADOW_INIT 0
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
#else /* CONFIG_KASAN_GENERIC */
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
#endif /* CONFIG_KASAN_GENERIC */
#ifdef CONFIG_KASAN_SW_TAGS
#define KASAN_SHADOW_INIT 0xFF
void kasan_init_tags(void);
void *kasan_reset_tag(const void *addr);
void kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
#else /* CONFIG_KASAN_SW_TAGS */
static inline void kasan_init_tags(void) { }
static inline void *kasan_reset_tag(const void *addr)
{
return (void *)addr;
}
#endif /* CONFIG_KASAN_SW_TAGS */
#endif /* LINUX_KASAN_H */

View file

@ -342,6 +342,7 @@ static inline bool memblock_bottom_up(void)
/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
#define MEMBLOCK_ALLOC_KASAN 1
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,

View file

@ -821,6 +821,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
/*
* Define the bit shifts to access each section. For non-existent
@ -831,6 +832,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0))
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
@ -853,6 +855,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1)
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
static inline enum zone_type page_zonenum(const struct page *page)
@ -1111,6 +1114,32 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
}
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_KASAN_SW_TAGS
static inline u8 page_kasan_tag(const struct page *page)
{
return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
}
static inline void page_kasan_tag_set(struct page *page, u8 tag)
{
page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
}
static inline void page_kasan_tag_reset(struct page *page)
{
page_kasan_tag_set(page, 0xff);
}
#else
static inline u8 page_kasan_tag(const struct page *page)
{
return 0xff;
}
static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
static inline void page_kasan_tag_reset(struct page *page) { }
#endif
static inline struct zone *page_zone(const struct page *page)
{
return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];

View file

@ -82,6 +82,16 @@
#define LAST_CPUPID_WIDTH 0
#endif
#ifdef CONFIG_KASAN_SW_TAGS
#define KASAN_TAG_WIDTH 8
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
> BITS_PER_LONG - NR_PAGEFLAGS
#error "KASAN: not enough bits in page flags for tag"
#endif
#else
#define KASAN_TAG_WIDTH 0
#endif
/*
* We are going to use the flags for the page to node mapping if its in
* there. This includes the case where there is no node, so it is implicit.

View file

@ -415,7 +415,7 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
{
void *ret = kmem_cache_alloc(s, flags);
kasan_kmalloc(s, ret, size, flags);
ret = kasan_kmalloc(s, ret, size, flags);
return ret;
}
@ -426,7 +426,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
{
void *ret = kmem_cache_alloc_node(s, gfpflags, node);
kasan_kmalloc(s, ret, size, gfpflags);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
#endif /* CONFIG_TRACING */

View file

@ -104,4 +104,17 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
return object;
}
/*
* We want to avoid an expensive divide : (offset / cache->size)
* Using the fact that size is a constant for a particular cache,
* we can replace (offset / cache->size) by
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
*/
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
const struct page *page, void *obj)
{
u32 offset = (obj - page->s_mem);
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}
#endif /* _LINUX_SLAB_DEF_H */

View file

@ -271,6 +271,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
#define user_access_end() do { } while (0)
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
static inline unsigned long user_access_save(void) { return 0UL; }
static inline void user_access_restore(unsigned long flags) { }
#endif
#ifdef CONFIG_HARDENED_USERCOPY

View file

@ -29,6 +29,9 @@ config CLANG_VERSION
config CC_HAS_ASM_GOTO
def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
config TOOLS_SUPPORT_RELR
def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh)
config CONSTRUCTORS
bool
depends on !UML
@ -2141,3 +2144,5 @@ config ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
# <asm/syscall_wrapper.h>.
config ARCH_HAS_SYSCALL_WRAPPER
def_bool n
source "init/Kconfig.gki"

75
init/Kconfig.gki Normal file
View file

@ -0,0 +1,75 @@
config GKI_HIDDEN_DRM_CONFIGS
bool "Hidden DRM configs needed for GKI"
select DRM_KMS_HELPER if (HAS_IOMEM && DRM)
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
select DRM_MIPI_DSI
help
Dummy config option used to enable hidden DRM configs.
These are normally selected implicitely when including a
DRM module, but for GKI, the modules are built out-of-tree.
config GKI_HIDDEN_REGMAP_CONFIGS
bool "Hidden Regmap configs needed for GKI"
select REGMAP_IRQ
help
Dummy config option used to enable hidden regmap configs.
These are normally selected implicitely when a module
that relies on it is configured.
config GKI_HIDDEN_SND_SOC_CONFIGS
bool "Hidden SND_SOC configs needed for GKI"
select SND_SOC_GENERIC_DMAENGINE_PCM if (SND_SOC && SND)
help
Dummy config option used to enable hidden SND_SOC configs.
These are normally selected implicitely when a module
that relies on it is configured.
config GKI_HIDDEN_GPIO_CONFIGS
bool "Hidden GPIO configs needed for GKI"
select PINCTRL_SINGLE if (PINCTRL && OF && HAS_IOMEM)
select GPIO_PL061 if (HAS_IOMEM && ARM_AMBA && GPIOLIB)
help
Dummy config option used to enable hidden GPIO configs.
These are normally selected implicitely when a module
that relies on it is configured.
# LEGACY_WEXT_ALLCONFIG Discussed upstream, soundly rejected as a unique
# problem for GKI to solve. It should be noted that these extensions are
# in-effect deprecated and generally unsupported and we should pressure
# the SOC vendors to drop any modules that require these extensions.
config GKI_LEGACY_WEXT_ALLCONFIG
bool "Hidden wireless extension configs needed for GKI"
select WIRELESS_EXT
select WEXT_CORE
select WEXT_PROC
select WEXT_SPY
select WEXT_PRIV
help
Dummy config option used to enable all the hidden legacy wireless
extensions to the core wireless network functionality used by
add-in modules.
If you are not building a kernel to be used for a variety of
out-of-kernel built wireless modules, say N here.
# Atrocities needed for
# a) building GKI modules in separate tree, or
# b) building drivers that are not modularizable
#
# All of these should be reworked into an upstream solution
# if possible.
#
config GKI_HACKS_TO_FIX
bool "GKI Dummy config options"
select GKI_HIDDEN_DRM_CONFIGS
select GKI_HIDDEN_REGMAP_CONFIGS
select GKI_HIDDEN_SND_SOC_CONFIGS
select GKI_HIDDEN_GPIO_CONFIGS
select GKI_LEGACY_WEXT_ALLCONFIG
help
Dummy config option used to enable core functionality used by
modules that may not be selectable in this config.
Unless you are building a GKI kernel to be used with modules
built from a different config, say N here.

View file

@ -496,7 +496,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
*/
int suspend_devices_and_enter(suspend_state_t state)
{
int error;
int error, last_dev;
bool wakeup = false;
if (!sleep_state_supported(state))
@ -512,8 +512,11 @@ int suspend_devices_and_enter(suspend_state_t state)
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
last_dev %= REC_FAILED_NUM;
pr_err("Some devices failed to suspend, or early wake event detected\n");
log_suspend_abort_reason("Some devices failed to suspend, or early wake event detected");
log_suspend_abort_reason("%s device failed to suspend, or early wake event detected",
suspend_stats.failed_devs[last_dev]);
goto Recover_platform;
}
suspend_test_finish("suspend devices");

View file

@ -1167,7 +1167,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
if (!rcu_access_pointer(group->poll_kworker)) {
struct sched_param param = {
.sched_priority = MAX_RT_PRIO - 1,
.sched_priority = 1,
};
struct kthread_worker *kworker;
@ -1177,7 +1177,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
mutex_unlock(&group->trigger_lock);
return ERR_CAST(kworker);
}
sched_setscheduler(kworker->task, SCHED_FIFO, &param);
sched_setscheduler_nocheck(kworker->task, SCHED_FIFO, &param);
kthread_init_delayed_work(&group->poll_work,
psi_poll_work);
rcu_assign_pointer(group->poll_kworker, kworker);
@ -1248,7 +1248,15 @@ static void psi_trigger_destroy(struct kref *ref)
* deadlock while waiting for psi_poll_work to acquire trigger_lock
*/
if (kworker_to_destroy) {
/*
* After the RCU grace period has expired, the worker
* can no longer be found through group->poll_kworker.
* But it might have been already scheduled before
* that - deschedule it cleanly before destroying it.
*/
kthread_cancel_delayed_work_sync(&group->poll_work);
atomic_set(&group->poll_scheduled, 0);
kthread_destroy_worker(kworker_to_destroy);
}
kfree(t);

View file

@ -243,7 +243,6 @@ config ENABLE_MUST_CHECK
config FRAME_WARN
int "Warn for stack frames larger than (needs gcc 4.4)"
range 0 8192
default 3072 if KASAN_EXTRA
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 1280 if (!64BIT && PARISC)
default 1024 if (!64BIT && !PARISC)

View file

@ -1,36 +1,82 @@
# This config refers to the generic KASAN mode.
config HAVE_ARCH_KASAN
bool
if HAVE_ARCH_KASAN
config HAVE_ARCH_KASAN_SW_TAGS
bool
config CC_HAS_KASAN_GENERIC
def_bool $(cc-option, -fsanitize=kernel-address)
config CC_HAS_KASAN_SW_TAGS
def_bool $(cc-option, -fsanitize=kernel-hwaddress)
config KASAN
bool "KASan: runtime memory debugger"
bool "KASAN: runtime memory debugger"
depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
help
Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs.
See Documentation/dev-tools/kasan.rst for details.
choice
prompt "KASAN mode"
depends on KASAN
default KASAN_GENERIC
help
KASAN has two modes: generic KASAN (similar to userspace ASan,
x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC) and
software tag-based KASAN (a version based on software memory
tagging, arm64 only, similar to userspace HWASan, enabled with
CONFIG_KASAN_SW_TAGS).
Both generic and tag-based KASAN are strictly debugging features.
config KASAN_GENERIC
bool "Generic mode"
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
select STACKDEPOT
help
Enables kernel address sanitizer - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs.
This is strictly a debugging feature and it requires a gcc version
of 4.9.2 or later. Detection of out of bounds accesses to stack or
global variables requires gcc 5.0 or later.
This feature consumes about 1/8 of available memory and brings about
~x3 performance slowdown.
Enables generic KASAN mode.
Supported in both GCC and Clang. With GCC it requires version 4.9.2
or later for basic support and version 5.0 or later for detection of
out-of-bounds accesses for stack and global variables and for inline
instrumentation mode (CONFIG_KASAN_INLINE). With Clang it requires
version 3.7.0 or later and it doesn't support detection of
out-of-bounds accesses for global variables yet.
This mode consumes about 1/8th of available memory at kernel start
and introduces an overhead of ~x1.5 for the rest of the allocations.
The performance slowdown is ~x3.
For better error detection enable CONFIG_STACKTRACE.
Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB
Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).
config KASAN_EXTRA
bool "KAsan: extra checks"
depends on KASAN && DEBUG_KERNEL && !COMPILE_TEST
config KASAN_SW_TAGS
bool "Software tag-based mode"
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
select STACKDEPOT
help
This enables further checks in the kernel address sanitizer, for now
it only includes the address-use-after-scope check that can lead
to excessive kernel stack usage, frame size warnings and longer
compile time.
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 has more
Enables software tag-based KASAN mode.
This mode requires Top Byte Ignore support by the CPU and therefore
is only supported for arm64.
This mode requires Clang version 7.0.0 or later.
This mode consumes about 1/16th of available memory at kernel start
and introduces an overhead of ~20% for the rest of the allocations.
This mode may potentially introduce problems relating to pointer
casting and comparison, as it embeds tags into the top byte of each
pointer.
For better error detection enable CONFIG_STACKTRACE.
Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB
(the resulting kernel does not boot).
endchoice
choice
prompt "Instrumentation type"
@ -53,16 +99,36 @@ config KASAN_INLINE
memory accesses. This is faster than outline (in some workloads
it gives about x2 boost over outline instrumentation), but
make kernel's .text size much bigger.
This requires a gcc version of 5.0 or later.
For CONFIG_KASAN_GENERIC this requires GCC 5.0 or later.
endchoice
config KASAN_STACK_ENABLE
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
default !(CLANG_VERSION < 90000)
depends on KASAN
help
The LLVM stack address sanitizer has a know problem that
causes excessive stack usage in a lot of functions, see
https://bugs.llvm.org/show_bug.cgi?id=38809
Disabling asan-stack makes it safe to run kernels build
with clang-8 with KASAN enabled, though it loses some of
the functionality.
This feature is always disabled when compile-testing with clang-8
or earlier to avoid cluttering the output in stack overflow
warnings, but clang-8 users can still enable it for builds without
CONFIG_COMPILE_TEST. On gcc and later clang versions it is
assumed to always be safe to use and enabled by default.
config KASAN_STACK
int
default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
default 0
config TEST_KASAN
tristate "Module for testing kasan for bug detection"
tristate "Module for testing KASAN for bug detection"
depends on m && KASAN
help
This is a test module doing various nasty things like
out of bounds accesses, use after free. It is useful for testing
kernel debugging features like kernel address sanitizer.
endif
kernel debugging features like KASAN.

View file

@ -480,29 +480,6 @@ static noinline void __init copy_user_test(void)
kfree(kmem);
}
static noinline void __init use_after_scope_test(void)
{
volatile char *volatile p;
pr_info("use-after-scope on int\n");
{
int local = 0;
p = (char *)&local;
}
p[0] = 1;
p[3] = 1;
pr_info("use-after-scope on array\n");
{
char local[1024] = {0};
p = local;
}
p[0] = 1;
p[1023] = 1;
}
static noinline void __init kasan_alloca_oob_left(void)
{
volatile int i = 10;
@ -579,6 +556,73 @@ static noinline void __init kmem_cache_invalid_free(void)
kmem_cache_destroy(cache);
}
static noinline void __init kasan_memchr(void)
{
char *ptr;
size_t size = 24;
pr_info("out-of-bounds in memchr\n");
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
if (!ptr)
return;
memchr(ptr, '1', size + 1);
kfree(ptr);
}
static noinline void __init kasan_memcmp(void)
{
char *ptr;
size_t size = 24;
int arr[9];
pr_info("out-of-bounds in memcmp\n");
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
if (!ptr)
return;
memset(arr, 0, sizeof(arr));
memcmp(ptr, arr, size+1);
kfree(ptr);
}
static noinline void __init kasan_strings(void)
{
char *ptr;
size_t size = 24;
pr_info("use-after-free in strchr\n");
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
if (!ptr)
return;
kfree(ptr);
/*
* Try to cause only 1 invalid access (less spam in dmesg).
* For that we need ptr to point to zeroed byte.
* Skip metadata that could be stored in freed object so ptr
* will likely point to zeroed byte.
*/
ptr += 16;
strchr(ptr, '1');
pr_info("use-after-free in strrchr\n");
strrchr(ptr, '1');
pr_info("use-after-free in strcmp\n");
strcmp(ptr, "2");
pr_info("use-after-free in strncmp\n");
strncmp(ptr, "2", 1);
pr_info("use-after-free in strlen\n");
strlen(ptr);
pr_info("use-after-free in strnlen\n");
strnlen(ptr, 1);
}
static int __init kmalloc_tests_init(void)
{
/*
@ -615,9 +659,11 @@ static int __init kmalloc_tests_init(void)
kasan_alloca_oob_right();
ksize_unpoisons_memory();
copy_user_test();
use_after_scope_test();
kmem_cache_double_free();
kmem_cache_invalid_free();
kasan_memchr();
kasan_memcmp();
kasan_strings();
kasan_restore_multi_shot(multishot);

View file

@ -453,6 +453,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
unsigned long pfn = -1;
unsigned long start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
size_t i;
struct page *page = NULL;
int ret = -ENOMEM;
int retry_after_sleep = 0;
@ -545,6 +546,16 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
trace_cma_alloc(pfn, page, count, align);
/*
* CMA can allocate multiple page blocks, which results in different
* blocks being marked with different tags. Reset the tags to ignore
* those page blocks.
*/
if (page) {
for (i = 0; i < count; i++)
page_kasan_tag_reset(page + i);
}
if (ret && !no_warn) {
pr_err("%s: %s: alloc failed, req-size: %zu pages, ret: %d\n",
__func__, cma->name, cma->count, ret);

View file

@ -1,11 +1,24 @@
# SPDX-License-Identifier: GPL-2.0
KASAN_SANITIZE := n
UBSAN_SANITIZE_kasan.o := n
UBSAN_SANITIZE_common.o := n
UBSAN_SANITIZE_generic.o := n
UBSAN_SANITIZE_generic_report.o := n
UBSAN_SANITIZE_tags.o := n
KCOV_INSTRUMENT := n
CFLAGS_REMOVE_kasan.o = -pg
CFLAGS_REMOVE_common.o = -pg
CFLAGS_REMOVE_generic.o = -pg
CFLAGS_REMOVE_generic_report.o = -pg
CFLAGS_REMOVE_tags.o = -pg
# Function splitter causes unnecessary splits in __asan_load1/__asan_store1
# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
obj-y := kasan.o report.o kasan_init.o quarantine.o
CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
obj-$(CONFIG_KASAN) := common.o init.o report.o
obj-$(CONFIG_KASAN_GENERIC) += generic.o generic_report.o quarantine.o
obj-$(CONFIG_KASAN_SW_TAGS) += tags.o tags_report.o

File diff suppressed because it is too large Load diff

325
mm/kasan/generic.c Normal file
View file

@ -0,0 +1,325 @@
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains core generic KASAN code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
*
* Some code borrowed from https://github.com/xairy/kasan-prototype by
* Andrey Konovalov <andreyknvl@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DISABLE_BRANCH_PROFILING
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/linkage.h>
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/bug.h>
#include "kasan.h"
#include "../slab.h"
/*
* All functions below always inlined so compiler could
* perform better optimizations in each of __asan_loadX/__assn_storeX
* depending on memory access size X.
*/
static __always_inline bool memory_is_poisoned_1(unsigned long addr)
{
s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
if (unlikely(shadow_value)) {
s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
return unlikely(last_accessible_byte >= shadow_value);
}
return false;
}
static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
unsigned long size)
{
u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
/*
* Access crosses 8(shadow size)-byte boundary. Such access maps
* into 2 shadow bytes, so we need to check them both.
*/
if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
return memory_is_poisoned_1(addr + size - 1);
}
static __always_inline bool memory_is_poisoned_16(unsigned long addr)
{
u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
/* Unaligned 16-bytes access maps into 3 shadow bytes. */
if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
return *shadow_addr || memory_is_poisoned_1(addr + 15);
return *shadow_addr;
}
static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
size_t size)
{
while (size) {
if (unlikely(*start))
return (unsigned long)start;
start++;
size--;
}
return 0;
}
static __always_inline unsigned long memory_is_nonzero(const void *start,
const void *end)
{
unsigned int words;
unsigned long ret;
unsigned int prefix = (unsigned long)start % 8;
if (end - start <= 16)
return bytes_is_nonzero(start, end - start);
if (prefix) {
prefix = 8 - prefix;
ret = bytes_is_nonzero(start, prefix);
if (unlikely(ret))
return ret;
start += prefix;
}
words = (end - start) / 8;
while (words) {
if (unlikely(*(u64 *)start))
return bytes_is_nonzero(start, 8);
start += 8;
words--;
}
return bytes_is_nonzero(start, (end - start) % 8);
}
static __always_inline bool memory_is_poisoned_n(unsigned long addr,
size_t size)
{
unsigned long ret;
ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
kasan_mem_to_shadow((void *)addr + size - 1) + 1);
if (unlikely(ret)) {
unsigned long last_byte = addr + size - 1;
s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
if (unlikely(ret != (unsigned long)last_shadow ||
((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
return true;
}
return false;
}
static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
{
if (__builtin_constant_p(size)) {
switch (size) {
case 1:
return memory_is_poisoned_1(addr);
case 2:
case 4:
case 8:
return memory_is_poisoned_2_4_8(addr, size);
case 16:
return memory_is_poisoned_16(addr);
default:
BUILD_BUG();
}
}
return memory_is_poisoned_n(addr, size);
}
static __always_inline void check_memory_region_inline(unsigned long addr,
size_t size, bool write,
unsigned long ret_ip)
{
if (unlikely(size == 0))
return;
if (unlikely((void *)addr <
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
kasan_report(addr, size, write, ret_ip);
return;
}
if (likely(!memory_is_poisoned(addr, size)))
return;
kasan_report(addr, size, write, ret_ip);
}
void check_memory_region(unsigned long addr, size_t size, bool write,
unsigned long ret_ip)
{
check_memory_region_inline(addr, size, write, ret_ip);
}
void kasan_cache_shrink(struct kmem_cache *cache)
{
quarantine_remove_cache(cache);
}
void kasan_cache_shutdown(struct kmem_cache *cache)
{
if (!__kmem_cache_empty(cache))
quarantine_remove_cache(cache);
}
static void register_global(struct kasan_global *global)
{
size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
kasan_unpoison_shadow(global->beg, global->size);
kasan_poison_shadow(global->beg + aligned_size,
global->size_with_redzone - aligned_size,
KASAN_GLOBAL_REDZONE);
}
void __asan_register_globals(struct kasan_global *globals, size_t size)
{
int i;
for (i = 0; i < size; i++)
register_global(&globals[i]);
}
EXPORT_SYMBOL(__asan_register_globals);
void __asan_unregister_globals(struct kasan_global *globals, size_t size)
{
}
EXPORT_SYMBOL(__asan_unregister_globals);
#define DEFINE_ASAN_LOAD_STORE(size) \
void __asan_load##size(unsigned long addr) \
{ \
check_memory_region_inline(addr, size, false, _RET_IP_);\
} \
EXPORT_SYMBOL(__asan_load##size); \
__alias(__asan_load##size) \
void __asan_load##size##_noabort(unsigned long); \
EXPORT_SYMBOL(__asan_load##size##_noabort); \
void __asan_store##size(unsigned long addr) \
{ \
check_memory_region_inline(addr, size, true, _RET_IP_); \
} \
EXPORT_SYMBOL(__asan_store##size); \
__alias(__asan_store##size) \
void __asan_store##size##_noabort(unsigned long); \
EXPORT_SYMBOL(__asan_store##size##_noabort)
DEFINE_ASAN_LOAD_STORE(1);
DEFINE_ASAN_LOAD_STORE(2);
DEFINE_ASAN_LOAD_STORE(4);
DEFINE_ASAN_LOAD_STORE(8);
DEFINE_ASAN_LOAD_STORE(16);
void __asan_loadN(unsigned long addr, size_t size)
{
check_memory_region(addr, size, false, _RET_IP_);
}
EXPORT_SYMBOL(__asan_loadN);
__alias(__asan_loadN)
void __asan_loadN_noabort(unsigned long, size_t);
EXPORT_SYMBOL(__asan_loadN_noabort);
void __asan_storeN(unsigned long addr, size_t size)
{
check_memory_region(addr, size, true, _RET_IP_);
}
EXPORT_SYMBOL(__asan_storeN);
__alias(__asan_storeN)
void __asan_storeN_noabort(unsigned long, size_t);
EXPORT_SYMBOL(__asan_storeN_noabort);
/* to shut up compiler complaints */
void __asan_handle_no_return(void) {}
EXPORT_SYMBOL(__asan_handle_no_return);
/* Emitted by compiler to poison alloca()ed objects. */
void __asan_alloca_poison(unsigned long addr, size_t size)
{
size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
rounded_up_size;
size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
const void *left_redzone = (const void *)(addr -
KASAN_ALLOCA_REDZONE_SIZE);
const void *right_redzone = (const void *)(addr + rounded_up_size);
WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
size - rounded_down_size);
kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
KASAN_ALLOCA_LEFT);
kasan_poison_shadow(right_redzone,
padding_size + KASAN_ALLOCA_REDZONE_SIZE,
KASAN_ALLOCA_RIGHT);
}
EXPORT_SYMBOL(__asan_alloca_poison);
/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
{
if (unlikely(!stack_top || stack_top > stack_bottom))
return;
kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
}
EXPORT_SYMBOL(__asan_allocas_unpoison);
/* Emitted by the compiler to [un]poison local variables. */
#define DEFINE_ASAN_SET_SHADOW(byte) \
void __asan_set_shadow_##byte(const void *addr, size_t size) \
{ \
__memset((void *)addr, 0x##byte, size); \
} \
EXPORT_SYMBOL(__asan_set_shadow_##byte)
DEFINE_ASAN_SET_SHADOW(00);
DEFINE_ASAN_SET_SHADOW(f1);
DEFINE_ASAN_SET_SHADOW(f2);
DEFINE_ASAN_SET_SHADOW(f3);
DEFINE_ASAN_SET_SHADOW(f5);
DEFINE_ASAN_SET_SHADOW(f8);

150
mm/kasan/generic_report.c Normal file
View file

@ -0,0 +1,150 @@
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains generic KASAN specific error reporting code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
*
* Some code borrowed from https://github.com/xairy/kasan-prototype by
* Andrey Konovalov <andreyknvl@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/bitops.h>
#include <linux/ftrace.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kasan.h>
#include <linux/module.h>
#include <asm/sections.h>
#include "kasan.h"
#include "../slab.h"
void *find_first_bad_addr(void *addr, size_t size)
{
void *p = addr;
while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p)))
p += KASAN_SHADOW_SCALE_SIZE;
return p;
}
static const char *get_shadow_bug_type(struct kasan_access_info *info)
{
const char *bug_type = "unknown-crash";
u8 *shadow_addr;
shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
/*
* If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look
* at the next shadow byte to determine the type of the bad access.
*/
if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1)
shadow_addr++;
switch (*shadow_addr) {
case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
/*
* In theory it's still possible to see these shadow values
* due to a data race in the kernel code.
*/
bug_type = "out-of-bounds";
break;
case KASAN_PAGE_REDZONE:
case KASAN_KMALLOC_REDZONE:
bug_type = "slab-out-of-bounds";
break;
case KASAN_GLOBAL_REDZONE:
bug_type = "global-out-of-bounds";
break;
case KASAN_STACK_LEFT:
case KASAN_STACK_MID:
case KASAN_STACK_RIGHT:
case KASAN_STACK_PARTIAL:
bug_type = "stack-out-of-bounds";
break;
case KASAN_FREE_PAGE:
case KASAN_KMALLOC_FREE:
bug_type = "use-after-free";
break;
case KASAN_ALLOCA_LEFT:
case KASAN_ALLOCA_RIGHT:
bug_type = "alloca-out-of-bounds";
break;
}
return bug_type;
}
static const char *get_wild_bug_type(struct kasan_access_info *info)
{
const char *bug_type = "unknown-crash";
if ((unsigned long)info->access_addr < PAGE_SIZE)
bug_type = "null-ptr-deref";
else if ((unsigned long)info->access_addr < TASK_SIZE)
bug_type = "user-memory-access";
else
bug_type = "wild-memory-access";
return bug_type;
}
const char *get_bug_type(struct kasan_access_info *info)
{
if (addr_has_shadow(info->access_addr))
return get_shadow_bug_type(info);
return get_wild_bug_type(info);
}
#define DEFINE_ASAN_REPORT_LOAD(size) \
void __asan_report_load##size##_noabort(unsigned long addr) \
{ \
kasan_report(addr, size, false, _RET_IP_); \
} \
EXPORT_SYMBOL(__asan_report_load##size##_noabort)
#define DEFINE_ASAN_REPORT_STORE(size) \
void __asan_report_store##size##_noabort(unsigned long addr) \
{ \
kasan_report(addr, size, true, _RET_IP_); \
} \
EXPORT_SYMBOL(__asan_report_store##size##_noabort)
DEFINE_ASAN_REPORT_LOAD(1);
DEFINE_ASAN_REPORT_LOAD(2);
DEFINE_ASAN_REPORT_LOAD(4);
DEFINE_ASAN_REPORT_LOAD(8);
DEFINE_ASAN_REPORT_LOAD(16);
DEFINE_ASAN_REPORT_STORE(1);
DEFINE_ASAN_REPORT_STORE(2);
DEFINE_ASAN_REPORT_STORE(4);
DEFINE_ASAN_REPORT_STORE(8);
DEFINE_ASAN_REPORT_STORE(16);
void __asan_report_load_n_noabort(unsigned long addr, size_t size)
{
kasan_report(addr, size, false, _RET_IP_);
}
EXPORT_SYMBOL(__asan_report_load_n_noabort);
void __asan_report_store_n_noabort(unsigned long addr, size_t size)
{
kasan_report(addr, size, true, _RET_IP_);
}
EXPORT_SYMBOL(__asan_report_store_n_noabort);

View file

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains some kasan initialization code.
*
@ -31,54 +32,54 @@
* - Latter it reused it as zero shadow to cover large ranges of memory
* that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
*/
unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss;
#if CONFIG_PGTABLE_LEVELS > 4
p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
static inline bool kasan_p4d_table(pgd_t pgd)
{
return pgd_page(pgd) == virt_to_page(lm_alias(kasan_zero_p4d));
return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d));
}
#else
static inline bool kasan_p4d_table(pgd_t pgd)
{
return 0;
return false;
}
#endif
#if CONFIG_PGTABLE_LEVELS > 3
pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
pud_t kasan_early_shadow_pud[PTRS_PER_PUD] __page_aligned_bss;
static inline bool kasan_pud_table(p4d_t p4d)
{
return p4d_page(p4d) == virt_to_page(lm_alias(kasan_zero_pud));
return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
}
#else
static inline bool kasan_pud_table(p4d_t p4d)
{
return 0;
return false;
}
#endif
#if CONFIG_PGTABLE_LEVELS > 2
pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD] __page_aligned_bss;
static inline bool kasan_pmd_table(pud_t pud)
{
return pud_page(pud) == virt_to_page(lm_alias(kasan_zero_pmd));
return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
}
#else
static inline bool kasan_pmd_table(pud_t pud)
{
return 0;
return false;
}
#endif
pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss;
static inline bool kasan_pte_table(pmd_t pmd)
{
return pmd_page(pmd) == virt_to_page(lm_alias(kasan_zero_pte));
return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte));
}
static inline bool kasan_zero_page_entry(pte_t pte)
static inline bool kasan_early_shadow_page_entry(pte_t pte)
{
return pte_page(pte) == virt_to_page(lm_alias(kasan_zero_page));
return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page));
}
static __init void *early_alloc(size_t size, int node)
@ -93,7 +94,8 @@ static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
pte_t *pte = pte_offset_kernel(pmd, addr);
pte_t zero_pte;
zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL);
zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_early_shadow_page)),
PAGE_KERNEL);
zero_pte = pte_wrprotect(zero_pte);
while (addr + PAGE_SIZE <= end) {
@ -113,7 +115,8 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
next = pmd_addr_end(addr, end);
if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
pmd_populate_kernel(&init_mm, pmd,
lm_alias(kasan_early_shadow_pte));
continue;
}
@ -146,9 +149,11 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
pmd_t *pmd;
pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
pud_populate(&init_mm, pud,
lm_alias(kasan_early_shadow_pmd));
pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
pmd_populate_kernel(&init_mm, pmd,
lm_alias(kasan_early_shadow_pte));
continue;
}
@ -182,12 +187,14 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
pud_t *pud;
pmd_t *pmd;
p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud));
p4d_populate(&init_mm, p4d,
lm_alias(kasan_early_shadow_pud));
pud = pud_offset(p4d, addr);
pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
pud_populate(&init_mm, pud,
lm_alias(kasan_early_shadow_pmd));
pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd,
lm_alias(kasan_zero_pte));
lm_alias(kasan_early_shadow_pte));
continue;
}
@ -210,13 +217,13 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
}
/**
* kasan_populate_zero_shadow - populate shadow memory region with
* kasan_zero_page
* kasan_populate_early_shadow - populate shadow memory region with
* kasan_early_shadow_page
* @shadow_start - start of the memory range to populate
* @shadow_end - end of the memory range to populate
*/
int __ref kasan_populate_zero_shadow(const void *shadow_start,
const void *shadow_end)
int __ref kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end)
{
unsigned long addr = (unsigned long)shadow_start;
unsigned long end = (unsigned long)shadow_end;
@ -232,7 +239,7 @@ int __ref kasan_populate_zero_shadow(const void *shadow_start,
pmd_t *pmd;
/*
* kasan_zero_pud should be populated with pmds
* kasan_early_shadow_pud should be populated with pmds
* at this moment.
* [pud,pmd]_populate*() below needed only for
* 3,2 - level page tables where we don't have
@ -242,21 +249,25 @@ int __ref kasan_populate_zero_shadow(const void *shadow_start,
* The ifndef is required to avoid build breakage.
*
* With 5level-fixup.h, pgd_populate() is not nop and
* we reference kasan_zero_p4d. It's not defined
* we reference kasan_early_shadow_p4d. It's not defined
* unless 5-level paging enabled.
*
* The ifndef can be dropped once all KASAN-enabled
* architectures will switch to pgtable-nop4d.h.
*/
#ifndef __ARCH_HAS_5LEVEL_HACK
pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_p4d));
pgd_populate(&init_mm, pgd,
lm_alias(kasan_early_shadow_p4d));
#endif
p4d = p4d_offset(pgd, addr);
p4d_populate(&init_mm, p4d, lm_alias(kasan_zero_pud));
p4d_populate(&init_mm, p4d,
lm_alias(kasan_early_shadow_pud));
pud = pud_offset(p4d, addr);
pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd));
pud_populate(&init_mm, pud,
lm_alias(kasan_early_shadow_pmd));
pmd = pmd_offset(pud, addr);
pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte));
pmd_populate_kernel(&init_mm, pmd,
lm_alias(kasan_early_shadow_pte));
continue;
}
@ -351,7 +362,7 @@ static void kasan_remove_pte_table(pte_t *pte, unsigned long addr,
if (!pte_present(*pte))
continue;
if (WARN_ON(!kasan_zero_page_entry(*pte)))
if (WARN_ON(!kasan_early_shadow_page_entry(*pte)))
continue;
pte_clear(&init_mm, addr, pte);
}
@ -481,7 +492,7 @@ int kasan_add_zero_shadow(void *start, unsigned long size)
WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
return -EINVAL;
ret = kasan_populate_zero_shadow(shadow_start, shadow_end);
ret = kasan_populate_early_shadow(shadow_start, shadow_end);
if (ret)
kasan_remove_zero_shadow(shadow_start,
size >> KASAN_SHADOW_SCALE_SHIFT);

View file

@ -8,10 +8,22 @@
#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
#ifdef CONFIG_KASAN_GENERIC
#define KASAN_FREE_PAGE 0xFF /* page was freed */
#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
#else
#define KASAN_FREE_PAGE KASAN_TAG_INVALID
#define KASAN_PAGE_REDZONE KASAN_TAG_INVALID
#define KASAN_KMALLOC_REDZONE KASAN_TAG_INVALID
#define KASAN_KMALLOC_FREE KASAN_TAG_INVALID
#endif
#define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */
/*
@ -22,7 +34,6 @@
#define KASAN_STACK_MID 0xF2
#define KASAN_STACK_RIGHT 0xF3
#define KASAN_STACK_PARTIAL 0xF4
#define KASAN_USE_AFTER_SCOPE 0xF8
/*
* alloca redzone shadow values
@ -105,11 +116,25 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
<< KASAN_SHADOW_SCALE_SHIFT);
}
static inline bool addr_has_shadow(const void *addr)
{
return (addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
}
void kasan_poison_shadow(const void *address, size_t size, u8 value);
void check_memory_region(unsigned long addr, size_t size, bool write,
unsigned long ret_ip);
void *find_first_bad_addr(void *addr, size_t size);
const char *get_bug_type(struct kasan_access_info *info);
void kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
void kasan_report_invalid_free(void *object, unsigned long ip);
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB)
#if defined(CONFIG_KASAN_GENERIC) && \
(defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache);
void quarantine_reduce(void);
void quarantine_remove_cache(struct kmem_cache *cache);
@ -120,6 +145,40 @@ static inline void quarantine_reduce(void) { }
static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
#endif
#ifdef CONFIG_KASAN_SW_TAGS
void print_tags(u8 addr_tag, const void *addr);
u8 random_tag(void);
#else
static inline void print_tags(u8 addr_tag, const void *addr) { }
static inline u8 random_tag(void)
{
return 0;
}
#endif
#ifndef arch_kasan_set_tag
static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
{
return addr;
}
#endif
#ifndef arch_kasan_reset_tag
#define arch_kasan_reset_tag(addr) ((void *)(addr))
#endif
#ifndef arch_kasan_get_tag
#define arch_kasan_get_tag(addr) 0
#endif
#define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag)))
#define reset_tag(addr) ((void *)arch_kasan_reset_tag(addr))
#define get_tag(addr) arch_kasan_get_tag(addr)
/*
* Exported functions for interfaces called from assembly or from generated
* code. Declarations here to avoid warning about missing declarations.
@ -130,8 +189,6 @@ void __asan_unregister_globals(struct kasan_global *globals, size_t size);
void __asan_loadN(unsigned long addr, size_t size);
void __asan_storeN(unsigned long addr, size_t size);
void __asan_handle_no_return(void);
void __asan_poison_stack_memory(const void *addr, size_t size);
void __asan_unpoison_stack_memory(const void *addr, size_t size);
void __asan_alloca_poison(unsigned long addr, size_t size);
void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom);

View file

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* KASAN quarantine.
*
@ -103,7 +104,7 @@ static int quarantine_head;
static int quarantine_tail;
/* Total size of all objects in global_quarantine across all batches. */
static unsigned long quarantine_size;
static DEFINE_SPINLOCK(quarantine_lock);
static DEFINE_RAW_SPINLOCK(quarantine_lock);
DEFINE_STATIC_SRCU(remove_cache_srcu);
/* Maximum size of the global queue. */
@ -190,7 +191,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
qlist_move_all(q, &temp);
spin_lock(&quarantine_lock);
raw_spin_lock(&quarantine_lock);
WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
if (global_quarantine[quarantine_tail].bytes >=
@ -203,7 +204,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
if (new_tail != quarantine_head)
quarantine_tail = new_tail;
}
spin_unlock(&quarantine_lock);
raw_spin_unlock(&quarantine_lock);
}
local_irq_restore(flags);
@ -230,7 +231,7 @@ void quarantine_reduce(void)
* expected case).
*/
srcu_idx = srcu_read_lock(&remove_cache_srcu);
spin_lock_irqsave(&quarantine_lock, flags);
raw_spin_lock_irqsave(&quarantine_lock, flags);
/*
* Update quarantine size in case of hotplug. Allocate a fraction of
@ -254,7 +255,7 @@ void quarantine_reduce(void)
quarantine_head = 0;
}
spin_unlock_irqrestore(&quarantine_lock, flags);
raw_spin_unlock_irqrestore(&quarantine_lock, flags);
qlist_free_all(&to_free, NULL);
srcu_read_unlock(&remove_cache_srcu, srcu_idx);
@ -310,17 +311,17 @@ void quarantine_remove_cache(struct kmem_cache *cache)
*/
on_each_cpu(per_cpu_remove_cache, cache, 1);
spin_lock_irqsave(&quarantine_lock, flags);
raw_spin_lock_irqsave(&quarantine_lock, flags);
for (i = 0; i < QUARANTINE_BATCHES; i++) {
if (qlist_empty(&global_quarantine[i]))
continue;
qlist_move_cache(&global_quarantine[i], &to_free, cache);
/* Scanning whole quarantine can take a while. */
spin_unlock_irqrestore(&quarantine_lock, flags);
raw_spin_unlock_irqrestore(&quarantine_lock, flags);
cond_resched();
spin_lock_irqsave(&quarantine_lock, flags);
raw_spin_lock_irqsave(&quarantine_lock, flags);
}
spin_unlock_irqrestore(&quarantine_lock, flags);
raw_spin_unlock_irqrestore(&quarantine_lock, flags);
qlist_free_all(&to_free, cache);

View file

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains error reporting code.
* This file contains common generic and tag-based KASAN error reporting code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
@ -39,129 +40,43 @@
#define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK)
#define SHADOW_ROWS_AROUND_ADDR 2
static const void *find_first_bad_addr(const void *addr, size_t size)
static unsigned long kasan_flags;
#define KASAN_BIT_REPORTED 0
#define KASAN_BIT_MULTI_SHOT 1
bool kasan_save_enable_multi_shot(void)
{
u8 shadow_val = *(u8 *)kasan_mem_to_shadow(addr);
const void *first_bad_addr = addr;
while (!shadow_val && first_bad_addr < addr + size) {
first_bad_addr += KASAN_SHADOW_SCALE_SIZE;
shadow_val = *(u8 *)kasan_mem_to_shadow(first_bad_addr);
}
return first_bad_addr;
return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
}
EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
static bool addr_has_shadow(struct kasan_access_info *info)
void kasan_restore_multi_shot(bool enabled)
{
return (info->access_addr >=
kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
if (!enabled)
clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
}
EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
static const char *get_shadow_bug_type(struct kasan_access_info *info)
static int __init kasan_set_multi_shot(char *str)
{
const char *bug_type = "unknown-crash";
u8 *shadow_addr;
info->first_bad_addr = find_first_bad_addr(info->access_addr,
info->access_size);
shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
/*
* If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look
* at the next shadow byte to determine the type of the bad access.
*/
if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1)
shadow_addr++;
switch (*shadow_addr) {
case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
/*
* In theory it's still possible to see these shadow values
* due to a data race in the kernel code.
*/
bug_type = "out-of-bounds";
break;
case KASAN_PAGE_REDZONE:
case KASAN_KMALLOC_REDZONE:
bug_type = "slab-out-of-bounds";
break;
case KASAN_GLOBAL_REDZONE:
bug_type = "global-out-of-bounds";
break;
case KASAN_STACK_LEFT:
case KASAN_STACK_MID:
case KASAN_STACK_RIGHT:
case KASAN_STACK_PARTIAL:
bug_type = "stack-out-of-bounds";
break;
case KASAN_FREE_PAGE:
case KASAN_KMALLOC_FREE:
bug_type = "use-after-free";
break;
case KASAN_USE_AFTER_SCOPE:
bug_type = "use-after-scope";
break;
case KASAN_ALLOCA_LEFT:
case KASAN_ALLOCA_RIGHT:
bug_type = "alloca-out-of-bounds";
break;
}
return bug_type;
}
static const char *get_wild_bug_type(struct kasan_access_info *info)
{
const char *bug_type = "unknown-crash";
if ((unsigned long)info->access_addr < PAGE_SIZE)
bug_type = "null-ptr-deref";
else if ((unsigned long)info->access_addr < TASK_SIZE)
bug_type = "user-memory-access";
else
bug_type = "wild-memory-access";
return bug_type;
}
static const char *get_bug_type(struct kasan_access_info *info)
{
if (addr_has_shadow(info))
return get_shadow_bug_type(info);
return get_wild_bug_type(info);
set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
return 1;
}
__setup("kasan_multi_shot", kasan_set_multi_shot);
static void print_error_description(struct kasan_access_info *info)
{
const char *bug_type = get_bug_type(info);
pr_err("BUG: KASAN: %s in %pS\n",
bug_type, (void *)info->ip);
get_bug_type(info), (void *)info->ip);
pr_err("%s of size %zu at addr %px by task %s/%d\n",
info->is_write ? "Write" : "Read", info->access_size,
info->access_addr, current->comm, task_pid_nr(current));
}
static inline bool kernel_or_module_addr(const void *addr)
{
if (addr >= (void *)_stext && addr < (void *)_end)
return true;
if (is_module_address((unsigned long)addr))
return true;
return false;
}
static inline bool init_task_stack_addr(const void *addr)
{
return addr >= (void *)&init_thread_union.stack &&
(addr <= (void *)&init_thread_union.stack +
sizeof(init_thread_union.stack));
}
static DEFINE_SPINLOCK(report_lock);
static void kasan_start_report(unsigned long *flags)
static void start_report(unsigned long *flags)
{
/*
* Make sure we don't end up in loop.
@ -171,7 +86,7 @@ static void kasan_start_report(unsigned long *flags)
pr_err("==================================================================\n");
}
static void kasan_end_report(unsigned long *flags)
static void end_report(unsigned long *flags)
{
pr_err("==================================================================\n");
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
@ -249,6 +164,22 @@ static void describe_object(struct kmem_cache *cache, void *object,
describe_object_addr(cache, object, addr);
}
static inline bool kernel_or_module_addr(const void *addr)
{
if (addr >= (void *)_stext && addr < (void *)_end)
return true;
if (is_module_address((unsigned long)addr))
return true;
return false;
}
static inline bool init_task_stack_addr(const void *addr)
{
return addr >= (void *)&init_thread_union.stack &&
(addr <= (void *)&init_thread_union.stack +
sizeof(init_thread_union.stack));
}
static void print_address_description(void *addr)
{
struct page *page = addr_to_page(addr);
@ -326,65 +257,7 @@ static void print_shadow_for_address(const void *addr)
}
}
void kasan_report_invalid_free(void *object, unsigned long ip)
{
unsigned long flags;
kasan_start_report(&flags);
pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
pr_err("\n");
print_address_description(object);
pr_err("\n");
print_shadow_for_address(object);
kasan_end_report(&flags);
}
static void kasan_report_error(struct kasan_access_info *info)
{
unsigned long flags;
kasan_start_report(&flags);
print_error_description(info);
pr_err("\n");
if (!addr_has_shadow(info)) {
dump_stack();
} else {
print_address_description((void *)info->access_addr);
pr_err("\n");
print_shadow_for_address(info->first_bad_addr);
}
kasan_end_report(&flags);
}
static unsigned long kasan_flags;
#define KASAN_BIT_REPORTED 0
#define KASAN_BIT_MULTI_SHOT 1
bool kasan_save_enable_multi_shot(void)
{
return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
}
EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
void kasan_restore_multi_shot(bool enabled)
{
if (!enabled)
clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
}
EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
static int __init kasan_set_multi_shot(char *str)
{
set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
return 1;
}
__setup("kasan_multi_shot", kasan_set_multi_shot);
static inline bool kasan_report_enabled(void)
static bool report_enabled(void)
{
if (current->kasan_depth)
return false;
@ -393,59 +266,59 @@ static inline bool kasan_report_enabled(void)
return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
}
void kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip)
void kasan_report_invalid_free(void *object, unsigned long ip)
{
unsigned long flags;
start_report(&flags);
pr_err("BUG: KASAN: double-free or invalid-free in %pS\n", (void *)ip);
print_tags(get_tag(object), reset_tag(object));
object = reset_tag(object);
pr_err("\n");
print_address_description(object);
pr_err("\n");
print_shadow_for_address(object);
end_report(&flags);
}
void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
{
struct kasan_access_info info;
void *tagged_addr;
void *untagged_addr;
unsigned long flags;
if (likely(!kasan_report_enabled()))
if (likely(!report_enabled()))
return;
disable_trace_on_warning();
info.access_addr = (void *)addr;
info.first_bad_addr = (void *)addr;
tagged_addr = (void *)addr;
untagged_addr = reset_tag(tagged_addr);
info.access_addr = tagged_addr;
if (addr_has_shadow(untagged_addr))
info.first_bad_addr = find_first_bad_addr(tagged_addr, size);
else
info.first_bad_addr = untagged_addr;
info.access_size = size;
info.is_write = is_write;
info.ip = ip;
kasan_report_error(&info);
start_report(&flags);
print_error_description(&info);
if (addr_has_shadow(untagged_addr))
print_tags(get_tag(tagged_addr), info.first_bad_addr);
pr_err("\n");
if (addr_has_shadow(untagged_addr)) {
print_address_description(untagged_addr);
pr_err("\n");
print_shadow_for_address(info.first_bad_addr);
} else {
dump_stack();
}
end_report(&flags);
}
#define DEFINE_ASAN_REPORT_LOAD(size) \
void __asan_report_load##size##_noabort(unsigned long addr) \
{ \
kasan_report(addr, size, false, _RET_IP_); \
} \
EXPORT_SYMBOL(__asan_report_load##size##_noabort)
#define DEFINE_ASAN_REPORT_STORE(size) \
void __asan_report_store##size##_noabort(unsigned long addr) \
{ \
kasan_report(addr, size, true, _RET_IP_); \
} \
EXPORT_SYMBOL(__asan_report_store##size##_noabort)
DEFINE_ASAN_REPORT_LOAD(1);
DEFINE_ASAN_REPORT_LOAD(2);
DEFINE_ASAN_REPORT_LOAD(4);
DEFINE_ASAN_REPORT_LOAD(8);
DEFINE_ASAN_REPORT_LOAD(16);
DEFINE_ASAN_REPORT_STORE(1);
DEFINE_ASAN_REPORT_STORE(2);
DEFINE_ASAN_REPORT_STORE(4);
DEFINE_ASAN_REPORT_STORE(8);
DEFINE_ASAN_REPORT_STORE(16);
void __asan_report_load_n_noabort(unsigned long addr, size_t size)
{
kasan_report(addr, size, false, _RET_IP_);
}
EXPORT_SYMBOL(__asan_report_load_n_noabort);
void __asan_report_store_n_noabort(unsigned long addr, size_t size)
{
kasan_report(addr, size, true, _RET_IP_);
}
EXPORT_SYMBOL(__asan_report_store_n_noabort);

161
mm/kasan/tags.c Normal file
View file

@ -0,0 +1,161 @@
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains core tag-based KASAN code.
*
* Copyright (c) 2018 Google, Inc.
* Author: Andrey Konovalov <andreyknvl@google.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DISABLE_BRANCH_PROFILING
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/linkage.h>
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/bug.h>
#include "kasan.h"
#include "../slab.h"
static DEFINE_PER_CPU(u32, prng_state);
void kasan_init_tags(void)
{
int cpu;
for_each_possible_cpu(cpu)
per_cpu(prng_state, cpu) = (u32)get_cycles();
}
/*
* If a preemption happens between this_cpu_read and this_cpu_write, the only
* side effect is that we'll give a few allocated in different contexts objects
* the same tag. Since tag-based KASAN is meant to be used a probabilistic
* bug-detection debug feature, this doesn't have significant negative impact.
*
* Ideally the tags use strong randomness to prevent any attempts to predict
* them during explicit exploit attempts. But strong randomness is expensive,
* and we did an intentional trade-off to use a PRNG. This non-atomic RMW
* sequence has in fact positive effect, since interrupts that randomly skew
* PRNG at unpredictable points do only good.
*/
u8 random_tag(void)
{
u32 state = this_cpu_read(prng_state);
state = 1664525 * state + 1013904223;
this_cpu_write(prng_state, state);
return (u8)(state % (KASAN_TAG_MAX + 1));
}
void *kasan_reset_tag(const void *addr)
{
return reset_tag(addr);
}
void check_memory_region(unsigned long addr, size_t size, bool write,
unsigned long ret_ip)
{
u8 tag;
u8 *shadow_first, *shadow_last, *shadow;
void *untagged_addr;
if (unlikely(size == 0))
return;
tag = get_tag((const void *)addr);
/*
* Ignore accesses for pointers tagged with 0xff (native kernel
* pointer tag) to suppress false positives caused by kmap.
*
* Some kernel code was written to account for archs that don't keep
* high memory mapped all the time, but rather map and unmap particular
* pages when needed. Instead of storing a pointer to the kernel memory,
* this code saves the address of the page structure and offset within
* that page for later use. Those pages are then mapped and unmapped
* with kmap/kunmap when necessary and virt_to_page is used to get the
* virtual address of the page. For arm64 (that keeps the high memory
* mapped all the time), kmap is turned into a page_address call.
* The issue is that with use of the page_address + virt_to_page
* sequence the top byte value of the original pointer gets lost (gets
* set to KASAN_TAG_KERNEL (0xFF)).
*/
if (tag == KASAN_TAG_KERNEL)
return;
untagged_addr = reset_tag((const void *)addr);
if (unlikely(untagged_addr <
kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
kasan_report(addr, size, write, ret_ip);
return;
}
shadow_first = kasan_mem_to_shadow(untagged_addr);
shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1);
for (shadow = shadow_first; shadow <= shadow_last; shadow++) {
if (*shadow != tag) {
kasan_report(addr, size, write, ret_ip);
return;
}
}
}
#define DEFINE_HWASAN_LOAD_STORE(size) \
void __hwasan_load##size##_noabort(unsigned long addr) \
{ \
check_memory_region(addr, size, false, _RET_IP_); \
} \
EXPORT_SYMBOL(__hwasan_load##size##_noabort); \
void __hwasan_store##size##_noabort(unsigned long addr) \
{ \
check_memory_region(addr, size, true, _RET_IP_); \
} \
EXPORT_SYMBOL(__hwasan_store##size##_noabort)
DEFINE_HWASAN_LOAD_STORE(1);
DEFINE_HWASAN_LOAD_STORE(2);
DEFINE_HWASAN_LOAD_STORE(4);
DEFINE_HWASAN_LOAD_STORE(8);
DEFINE_HWASAN_LOAD_STORE(16);
void __hwasan_loadN_noabort(unsigned long addr, unsigned long size)
{
check_memory_region(addr, size, false, _RET_IP_);
}
EXPORT_SYMBOL(__hwasan_loadN_noabort);
void __hwasan_storeN_noabort(unsigned long addr, unsigned long size)
{
check_memory_region(addr, size, true, _RET_IP_);
}
EXPORT_SYMBOL(__hwasan_storeN_noabort);
void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size)
{
kasan_poison_shadow((void *)addr, size, tag);
}
EXPORT_SYMBOL(__hwasan_tag_memory);

58
mm/kasan/tags_report.c Normal file
View file

@ -0,0 +1,58 @@
// SPDX-License-Identifier: GPL-2.0
/*
* This file contains tag-based KASAN specific error reporting code.
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
*
* Some code borrowed from https://github.com/xairy/kasan-prototype by
* Andrey Konovalov <andreyknvl@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/bitops.h>
#include <linux/ftrace.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stackdepot.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kasan.h>
#include <linux/module.h>
#include <asm/sections.h>
#include "kasan.h"
#include "../slab.h"
const char *get_bug_type(struct kasan_access_info *info)
{
return "invalid-access";
}
void *find_first_bad_addr(void *addr, size_t size)
{
u8 tag = get_tag(addr);
void *p = reset_tag(addr);
void *end = p + size;
while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p))
p += KASAN_SHADOW_SCALE_SIZE;
return p;
}
void print_tags(u8 addr_tag, const void *addr)
{
u8 *shadow = (u8 *)kasan_mem_to_shadow(addr);
pr_err("Pointer tag: [%02x], memory tag: [%02x]\n", addr_tag, *shadow);
}

View file

@ -253,7 +253,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
phys_addr_t kernel_end, ret;
/* pump up @end */
if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
end == MEMBLOCK_ALLOC_KASAN)
end = memblock.current_limit;
/* avoid allocating the first page */
@ -1410,13 +1411,15 @@ static void * __init memblock_virt_alloc_internal(
done:
ptr = phys_to_virt(alloc);
/*
* The min_count is set to 0 so that bootmem allocated blocks
* are never reported as leaks. This is because many of these blocks
* are only referred via the physical address which is not
* looked up by kmemleak.
*/
kmemleak_alloc(ptr, size, 0, 0);
/* Skip kmemleak for kasan_init() due to high volume. */
if (max_addr != MEMBLOCK_ALLOC_KASAN)
/*
* The min_count is set to 0 so that bootmem allocated
* blocks are never reported as leaks. This is because many
* of these blocks are only referred via the physical
* address which is not looked up by kmemleak.
*/
kmemleak_alloc(ptr, size, 0, 0);
return ptr;
}

View file

@ -1349,6 +1349,7 @@ static void __meminit __init_single_page(struct page *page, unsigned long pfn,
init_page_count(page);
page_mapcount_reset(page);
page_cpupid_reset_last(page);
page_kasan_tag_reset(page);
INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL

View file

@ -406,19 +406,6 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
return page->s_mem + cache->size * idx;
}
/*
* We want to avoid an expensive divide : (offset / cache->size)
* Using the fact that size is a constant for a particular cache,
* we can replace (offset / cache->size) by
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
*/
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
const struct page *page, void *obj)
{
u32 offset = (obj - page->s_mem);
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}
#define BOOT_CPUCACHE_ENTRIES 1
/* internal cache of cache description objs */
static struct kmem_cache kmem_cache_boot = {
@ -2392,6 +2379,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep,
/* Slab management obj is off-slab. */
freelist = kmem_cache_alloc_node(cachep->freelist_cache,
local_flags, nodeid);
freelist = kasan_reset_tag(freelist);
if (!freelist)
return NULL;
} else {
@ -2587,7 +2575,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
for (i = 0; i < cachep->num; i++) {
objp = index_to_obj(cachep, page, i);
kasan_init_slab_obj(cachep, objp);
objp = kasan_init_slab_obj(cachep, objp);
/* constructor could break poison info */
if (DEBUG == 0 && cachep->ctor) {
@ -2705,6 +2693,13 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
offset *= cachep->colour_off;
/*
* Call kasan_poison_slab() before calling alloc_slabmgmt(), so
* page_address() in the latter returns a non-tagged pointer,
* as it should be for slab pages.
*/
kasan_poison_slab(page);
/* Get slab management. */
freelist = alloc_slabmgmt(cachep, page, offset,
local_flags & ~GFP_CONSTRAINT_MASK, page_node);
@ -2713,7 +2708,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep,
slab_map_pages(cachep, page, freelist);
kasan_poison_slab(page);
cache_init_objs(cachep, page);
if (gfpflags_allow_blocking(local_flags))
@ -3566,7 +3560,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *ret = slab_alloc(cachep, flags, _RET_IP_);
kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);
@ -3632,7 +3625,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
ret = slab_alloc(cachep, flags, _RET_IP_);
kasan_kmalloc(cachep, ret, size, flags);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(_RET_IP_, ret,
size, cachep->size, flags);
return ret;
@ -3656,7 +3649,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
kasan_slab_alloc(cachep, ret, flags);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size,
flags, nodeid);
@ -3675,7 +3667,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
kasan_kmalloc(cachep, ret, size, flags);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc_node(_RET_IP_, ret,
size, cachep->size,
flags, nodeid);
@ -3696,7 +3688,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
ret = kmem_cache_alloc_node_trace(cachep, flags, node, size);
kasan_kmalloc(cachep, ret, size, flags);
ret = kasan_kmalloc(cachep, ret, size, flags);
return ret;
}
@ -3734,7 +3726,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
return cachep;
ret = slab_alloc(cachep, flags, caller);
kasan_kmalloc(cachep, ret, size, flags);
ret = kasan_kmalloc(cachep, ret, size, flags);
trace_kmalloc(caller, ret,
size, cachep->size, flags);
@ -4439,6 +4431,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
unsigned int objnr;
unsigned long offset;
ptr = kasan_reset_tag(ptr);
/* Find and validate object. */
cachep = page->slab_cache;
objnr = obj_to_index(cachep, page, (void *)ptr);

View file

@ -438,11 +438,9 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
flags &= gfp_allowed_mask;
for (i = 0; i < size; i++) {
void *object = p[i];
kmemleak_alloc_recursive(object, s->object_size, 1,
p[i] = kasan_slab_alloc(s, p[i], flags);
kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, flags);
kasan_slab_alloc(s, object, flags);
}
if (memcg_kmem_enabled())

View file

@ -1182,8 +1182,8 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
flags |= __GFP_COMP;
page = alloc_pages(flags, order);
ret = page ? page_address(page) : NULL;
ret = kasan_kmalloc_large(ret, size, flags);
kmemleak_alloc(ret, size, 1, flags);
kasan_kmalloc_large(ret, size, flags);
return ret;
}
EXPORT_SYMBOL(kmalloc_order);
@ -1461,7 +1461,7 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
ks = ksize(p);
if (ks >= new_size) {
kasan_krealloc((void *)p, new_size, flags);
p = kasan_krealloc((void *)p, new_size, flags);
return (void *)p;
}
@ -1513,7 +1513,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
}
ret = __do_krealloc(p, new_size, flags);
if (ret && p != ret)
if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
kfree(p);
return ret;

View file

@ -249,7 +249,18 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
unsigned long ptr_addr)
{
#ifdef CONFIG_SLAB_FREELIST_HARDENED
return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr);
/*
* When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged.
* Normally, this doesn't cause any issues, as both set_freepointer()
* and get_freepointer() are called with a pointer with the same tag.
* However, there are some issues with CONFIG_SLUB_DEBUG code. For
* example, when __free_slub() iterates over objects in a cache, it
* passes untagged pointers to check_object(). check_object() in turns
* calls get_freepointer() with an untagged pointer, which causes the
* freepointer to be restored incorrectly.
*/
return (void *)((unsigned long)ptr ^ s->random ^
(unsigned long)kasan_reset_tag((void *)ptr_addr));
#else
return ptr;
#endif
@ -303,15 +314,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
__p < (__addr) + (__objects) * (__s)->size; \
__p += (__s)->size)
#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
for (__p = fixup_red_left(__s, __addr), __idx = 1; \
__idx <= __objects; \
__p += (__s)->size, __idx++)
/* Determine object index from a given position */
static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr)
{
return (p - addr) / s->size;
return (kasan_reset_tag(p) - addr) / s->size;
}
static inline unsigned int order_objects(unsigned int order, unsigned int size)
@ -507,6 +513,7 @@ static inline int check_valid_pointer(struct kmem_cache *s,
return 1;
base = page_address(page);
object = kasan_reset_tag(object);
object = restore_red_left(s, object);
if (object < base || object >= base + page->objects * s->size ||
(object - base) % s->size) {
@ -1087,6 +1094,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
init_tracking(s, object);
}
static void setup_page_debug(struct kmem_cache *s, void *addr, int order)
{
if (!(s->flags & SLAB_POISON))
return;
metadata_access_enable();
memset(addr, POISON_INUSE, PAGE_SIZE << order);
metadata_access_disable();
}
static inline int alloc_consistency_checks(struct kmem_cache *s,
struct page *page,
void *object, unsigned long addr)
@ -1308,6 +1325,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
#else /* !CONFIG_SLUB_DEBUG */
static inline void setup_object_debug(struct kmem_cache *s,
struct page *page, void *object) {}
static inline void setup_page_debug(struct kmem_cache *s,
void *addr, int order) {}
static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; }
@ -1350,10 +1369,11 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
*/
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
ptr = kasan_kmalloc_large(ptr, size, flags);
kmemleak_alloc(ptr, size, 1, flags);
kasan_kmalloc_large(ptr, size, flags);
return ptr;
}
static __always_inline void kfree_hook(void *x)
@ -1453,16 +1473,17 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
#endif
}
static void setup_object(struct kmem_cache *s, struct page *page,
static void *setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
kasan_init_slab_obj(s, object);
object = kasan_init_slab_obj(s, object);
if (unlikely(s->ctor)) {
kasan_unpoison_object_data(s, object);
s->ctor(object);
kasan_poison_object_data(s, object);
}
return object;
}
/*
@ -1570,16 +1591,16 @@ static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
/* First entry is used as the base of the freelist */
cur = next_freelist_entry(s, page, &pos, start, page_limit,
freelist_count);
cur = setup_object(s, page, cur);
page->freelist = cur;
for (idx = 1; idx < page->objects; idx++) {
setup_object(s, page, cur);
next = next_freelist_entry(s, page, &pos, start, page_limit,
freelist_count);
next = setup_object(s, page, next);
set_freepointer(s, cur, next);
cur = next;
}
setup_object(s, page, cur);
set_freepointer(s, cur, NULL);
return true;
@ -1601,7 +1622,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
gfp_t alloc_gfp;
void *start, *p;
void *start, *p, *next;
int idx, order;
bool shuffle;
@ -1642,24 +1663,25 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (page_is_pfmemalloc(page))
SetPageSlabPfmemalloc(page);
kasan_poison_slab(page);
start = page_address(page);
if (unlikely(s->flags & SLAB_POISON))
memset(start, POISON_INUSE, PAGE_SIZE << order);
kasan_poison_slab(page);
setup_page_debug(s, start, order);
shuffle = shuffle_freelist(s, page);
if (!shuffle) {
for_each_object_idx(p, idx, s, start, page->objects) {
setup_object(s, page, p);
if (likely(idx < page->objects))
set_freepointer(s, p, p + s->size);
else
set_freepointer(s, p, NULL);
start = fixup_red_left(s, start);
start = setup_object(s, page, start);
page->freelist = start;
for (idx = 0, p = start; idx < page->objects - 1; idx++) {
next = p + s->size;
next = setup_object(s, page, next);
set_freepointer(s, p, next);
p = next;
}
page->freelist = fixup_red_left(s, start);
set_freepointer(s, p, NULL);
}
page->inuse = page->objects;
@ -2777,7 +2799,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
kasan_kmalloc(s, ret, size, gfpflags);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
@ -2805,7 +2827,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node);
kasan_kmalloc(s, ret, size, gfpflags);
ret = kasan_kmalloc(s, ret, size, gfpflags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
@ -3001,7 +3023,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
do_slab_free(s, page, head, tail, cnt, addr);
}
#ifdef CONFIG_KASAN
#ifdef CONFIG_KASAN_GENERIC
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
{
do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
@ -3373,16 +3395,16 @@ static void early_kmem_cache_node_alloc(int node)
n = page->freelist;
BUG_ON(!n);
page->freelist = get_freepointer(kmem_cache_node, n);
page->inuse = 1;
page->frozen = 0;
kmem_cache_node->node[node] = n;
#ifdef CONFIG_SLUB_DEBUG
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n);
#endif
kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
n = kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
GFP_KERNEL);
page->freelist = get_freepointer(kmem_cache_node, n);
page->inuse = 1;
page->frozen = 0;
kmem_cache_node->node[node] = n;
init_kmem_cache_node(n);
inc_slabs_node(kmem_cache_node, node, page->objects);
@ -3798,7 +3820,7 @@ void *__kmalloc(size_t size, gfp_t flags)
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
kasan_kmalloc(s, ret, size, flags);
ret = kasan_kmalloc(s, ret, size, flags);
return ret;
}
@ -3815,8 +3837,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
if (page)
ptr = page_address(page);
kmalloc_large_node_hook(ptr, size, flags);
return ptr;
return kmalloc_large_node_hook(ptr, size, flags);
}
void *__kmalloc_node(size_t size, gfp_t flags, int node)
@ -3843,7 +3864,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
kasan_kmalloc(s, ret, size, flags);
ret = kasan_kmalloc(s, ret, size, flags);
return ret;
}
@ -3866,6 +3887,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
unsigned int offset;
size_t object_size;
ptr = kasan_reset_tag(ptr);
/* Find object and usable object size. */
s = page->slab_cache;

View file

@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
ifdef CONFIG_KASAN
ifdef CONFIG_KASAN_GENERIC
ifdef CONFIG_KASAN_INLINE
call_threshold := 10000
else
@ -12,36 +13,39 @@ CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
ifeq ($(call cc-option, $(CFLAGS_KASAN_MINIMAL) -Werror),)
ifneq ($(CONFIG_COMPILE_TEST),y)
$(warning Cannot use CONFIG_KASAN: \
-fsanitize=kernel-address is not supported by compiler)
endif
else
# -fasan-shadow-offset fails without -fsanitize
CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \
# -fasan-shadow-offset fails without -fsanitize
CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \
-fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \
$(call cc-option, -fsanitize=kernel-address \
-mllvm -asan-mapping-offset=$(KASAN_SHADOW_OFFSET)))
ifeq ($(strip $(CFLAGS_KASAN_SHADOW)),)
CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL)
else
# Now add all the compiler specific options that are valid standalone
CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
$(call cc-param,asan-globals=1) \
$(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
$(call cc-param,asan-stack=1) \
$(call cc-param,asan-use-after-scope=1) \
$(call cc-param,asan-instrument-allocas=1)
endif
ifeq ($(strip $(CFLAGS_KASAN_SHADOW)),)
CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL)
else
# Now add all the compiler specific options that are valid standalone
CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
$(call cc-param,asan-globals=1) \
$(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
$(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \
$(call cc-param,asan-instrument-allocas=1)
endif
ifdef CONFIG_KASAN_EXTRA
CFLAGS_KASAN += $(call cc-option, -fsanitize-address-use-after-scope)
endif # CONFIG_KASAN_GENERIC
ifdef CONFIG_KASAN_SW_TAGS
ifdef CONFIG_KASAN_INLINE
instrumentation_flags := -mllvm -hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET)
else
instrumentation_flags := -mllvm -hwasan-instrument-with-calls=1
endif
CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
-mllvm -hwasan-instrument-stack=0 \
$(instrumentation_flags)
endif # CONFIG_KASAN_SW_TAGS
ifdef CONFIG_KASAN
CFLAGS_KASAN_NOSANITIZE := -fno-builtin
endif

16
scripts/tools-support-relr.sh Executable file
View file

@ -0,0 +1,16 @@
#!/bin/sh -eu
# SPDX-License-Identifier: GPL-2.0
tmp_file=$(mktemp)
trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
cat << "END" | "$CC" -c -x c - -o $tmp_file.o >/dev/null 2>&1
void *p = &p;
END
"$LD" $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
# Despite printing an error message, GNU nm still exits with exit code 0 if it
# sees a relr section. So we need to check that nothing is printed to stderr.
test -z "$("$NM" $tmp_file 2>&1 >/dev/null)"
"$OBJCOPY" -O binary $tmp_file $tmp_file.bin

View file

@ -3,10 +3,6 @@ menu "Kernel hardening options"
config GCC_PLUGIN_STRUCTLEAK
bool "Force initialization of variables containing userspace addresses"
# Currently STRUCTLEAK inserts initialization out of live scope of
# variables from KASAN point of view. This leads to KASAN false
# positive reports. Prohibit this combination for now.
depends on !KASAN_EXTRA
help
This plugin zero-initializes any structures containing a
__user attribute. This can prevent some classes of information