Merge "Merge android-4.19.78 (75337a6
) into msm-4.19"
This commit is contained in:
commit
1f946a6129
168 changed files with 107431 additions and 112957 deletions
|
@ -1638,6 +1638,15 @@
|
|||
|
||||
initrd= [BOOT] Specify the location of the initial ramdisk
|
||||
|
||||
init_on_alloc= [MM] Fill newly allocated pages and heap objects with
|
||||
zeroes.
|
||||
Format: 0 | 1
|
||||
Default set by CONFIG_INIT_ON_ALLOC_DEFAULT_ON.
|
||||
|
||||
init_on_free= [MM] Fill freed pages and heap objects with zeroes.
|
||||
Format: 0 | 1
|
||||
Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON.
|
||||
|
||||
init_pkru= [x86] Specify the default memory protection keys rights
|
||||
register contents for all processes. 0x55555554 by
|
||||
default (disallow access to all but pkey 0). Can
|
||||
|
|
|
@ -4,15 +4,25 @@ The Kernel Address Sanitizer (KASAN)
|
|||
Overview
|
||||
--------
|
||||
|
||||
KernelAddressSANitizer (KASAN) is a dynamic memory error detector. It provides
|
||||
a fast and comprehensive solution for finding use-after-free and out-of-bounds
|
||||
bugs.
|
||||
KernelAddressSANitizer (KASAN) is a dynamic memory error detector designed to
|
||||
find out-of-bound and use-after-free bugs. KASAN has two modes: generic KASAN
|
||||
(similar to userspace ASan) and software tag-based KASAN (similar to userspace
|
||||
HWASan).
|
||||
|
||||
KASAN uses compile-time instrumentation for checking every memory access,
|
||||
therefore you will need a GCC version 4.9.2 or later. GCC 5.0 or later is
|
||||
required for detection of out-of-bounds accesses to stack or global variables.
|
||||
KASAN uses compile-time instrumentation to insert validity checks before every
|
||||
memory access, and therefore requires a compiler version that supports that.
|
||||
|
||||
Currently KASAN is supported only for the x86_64 and arm64 architectures.
|
||||
Generic KASAN is supported in both GCC and Clang. With GCC it requires version
|
||||
4.9.2 or later for basic support and version 5.0 or later for detection of
|
||||
out-of-bounds accesses for stack and global variables and for inline
|
||||
instrumentation mode (see the Usage section). With Clang it requires version
|
||||
7.0.0 or later and it doesn't support detection of out-of-bounds accesses for
|
||||
global variables yet.
|
||||
|
||||
Tag-based KASAN is only supported in Clang and requires version 7.0.0 or later.
|
||||
|
||||
Currently generic KASAN is supported for the x86_64, arm64, xtensa and s390
|
||||
architectures, and tag-based KASAN is supported only for arm64.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
@ -21,12 +31,14 @@ To enable KASAN configure kernel with::
|
|||
|
||||
CONFIG_KASAN = y
|
||||
|
||||
and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline and
|
||||
inline are compiler instrumentation types. The former produces smaller binary
|
||||
the latter is 1.1 - 2 times faster. Inline instrumentation requires a GCC
|
||||
version 5.0 or later.
|
||||
and choose between CONFIG_KASAN_GENERIC (to enable generic KASAN) and
|
||||
CONFIG_KASAN_SW_TAGS (to enable software tag-based KASAN).
|
||||
|
||||
KASAN works with both SLUB and SLAB memory allocators.
|
||||
You also need to choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE.
|
||||
Outline and inline are compiler instrumentation types. The former produces
|
||||
smaller binary while the latter is 1.1 - 2 times faster.
|
||||
|
||||
Both KASAN modes work with both SLUB and SLAB memory allocators.
|
||||
For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
|
||||
|
||||
To disable instrumentation for specific files or directories, add a line
|
||||
|
@ -43,85 +55,85 @@ similar to the following to the respective kernel Makefile:
|
|||
Error reports
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
A typical out of bounds access report looks like this::
|
||||
A typical out-of-bounds access generic KASAN report looks like this::
|
||||
|
||||
==================================================================
|
||||
BUG: AddressSanitizer: out of bounds access in kmalloc_oob_right+0x65/0x75 [test_kasan] at addr ffff8800693bc5d3
|
||||
Write of size 1 by task modprobe/1689
|
||||
=============================================================================
|
||||
BUG kmalloc-128 (Not tainted): kasan error
|
||||
-----------------------------------------------------------------------------
|
||||
BUG: KASAN: slab-out-of-bounds in kmalloc_oob_right+0xa8/0xbc [test_kasan]
|
||||
Write of size 1 at addr ffff8801f44ec37b by task insmod/2760
|
||||
|
||||
Disabling lock debugging due to kernel taint
|
||||
INFO: Allocated in kmalloc_oob_right+0x3d/0x75 [test_kasan] age=0 cpu=0 pid=1689
|
||||
__slab_alloc+0x4b4/0x4f0
|
||||
kmem_cache_alloc_trace+0x10b/0x190
|
||||
kmalloc_oob_right+0x3d/0x75 [test_kasan]
|
||||
init_module+0x9/0x47 [test_kasan]
|
||||
do_one_initcall+0x99/0x200
|
||||
load_module+0x2cb3/0x3b20
|
||||
SyS_finit_module+0x76/0x80
|
||||
system_call_fastpath+0x12/0x17
|
||||
INFO: Slab 0xffffea0001a4ef00 objects=17 used=7 fp=0xffff8800693bd728 flags=0x100000000004080
|
||||
INFO: Object 0xffff8800693bc558 @offset=1368 fp=0xffff8800693bc720
|
||||
|
||||
Bytes b4 ffff8800693bc548: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
|
||||
Object ffff8800693bc558: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
|
||||
Object ffff8800693bc568: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
|
||||
Object ffff8800693bc578: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
|
||||
Object ffff8800693bc588: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
|
||||
Object ffff8800693bc598: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
|
||||
Object ffff8800693bc5a8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
|
||||
Object ffff8800693bc5b8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
|
||||
Object ffff8800693bc5c8: 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b a5 kkkkkkkkkkkkkkk.
|
||||
Redzone ffff8800693bc5d8: cc cc cc cc cc cc cc cc ........
|
||||
Padding ffff8800693bc718: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
|
||||
CPU: 0 PID: 1689 Comm: modprobe Tainted: G B 3.18.0-rc1-mm1+ #98
|
||||
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014
|
||||
ffff8800693bc000 0000000000000000 ffff8800693bc558 ffff88006923bb78
|
||||
ffffffff81cc68ae 00000000000000f3 ffff88006d407600 ffff88006923bba8
|
||||
ffffffff811fd848 ffff88006d407600 ffffea0001a4ef00 ffff8800693bc558
|
||||
CPU: 1 PID: 2760 Comm: insmod Not tainted 4.19.0-rc3+ #698
|
||||
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1 04/01/2014
|
||||
Call Trace:
|
||||
[<ffffffff81cc68ae>] dump_stack+0x46/0x58
|
||||
[<ffffffff811fd848>] print_trailer+0xf8/0x160
|
||||
[<ffffffffa00026a7>] ? kmem_cache_oob+0xc3/0xc3 [test_kasan]
|
||||
[<ffffffff811ff0f5>] object_err+0x35/0x40
|
||||
[<ffffffffa0002065>] ? kmalloc_oob_right+0x65/0x75 [test_kasan]
|
||||
[<ffffffff8120b9fa>] kasan_report_error+0x38a/0x3f0
|
||||
[<ffffffff8120a79f>] ? kasan_poison_shadow+0x2f/0x40
|
||||
[<ffffffff8120b344>] ? kasan_unpoison_shadow+0x14/0x40
|
||||
[<ffffffff8120a79f>] ? kasan_poison_shadow+0x2f/0x40
|
||||
[<ffffffffa00026a7>] ? kmem_cache_oob+0xc3/0xc3 [test_kasan]
|
||||
[<ffffffff8120a995>] __asan_store1+0x75/0xb0
|
||||
[<ffffffffa0002601>] ? kmem_cache_oob+0x1d/0xc3 [test_kasan]
|
||||
[<ffffffffa0002065>] ? kmalloc_oob_right+0x65/0x75 [test_kasan]
|
||||
[<ffffffffa0002065>] kmalloc_oob_right+0x65/0x75 [test_kasan]
|
||||
[<ffffffffa00026b0>] init_module+0x9/0x47 [test_kasan]
|
||||
[<ffffffff810002d9>] do_one_initcall+0x99/0x200
|
||||
[<ffffffff811e4e5c>] ? __vunmap+0xec/0x160
|
||||
[<ffffffff81114f63>] load_module+0x2cb3/0x3b20
|
||||
[<ffffffff8110fd70>] ? m_show+0x240/0x240
|
||||
[<ffffffff81115f06>] SyS_finit_module+0x76/0x80
|
||||
[<ffffffff81cd3129>] system_call_fastpath+0x12/0x17
|
||||
dump_stack+0x94/0xd8
|
||||
print_address_description+0x73/0x280
|
||||
kasan_report+0x144/0x187
|
||||
__asan_report_store1_noabort+0x17/0x20
|
||||
kmalloc_oob_right+0xa8/0xbc [test_kasan]
|
||||
kmalloc_tests_init+0x16/0x700 [test_kasan]
|
||||
do_one_initcall+0xa5/0x3ae
|
||||
do_init_module+0x1b6/0x547
|
||||
load_module+0x75df/0x8070
|
||||
__do_sys_init_module+0x1c6/0x200
|
||||
__x64_sys_init_module+0x6e/0xb0
|
||||
do_syscall_64+0x9f/0x2c0
|
||||
entry_SYSCALL_64_after_hwframe+0x44/0xa9
|
||||
RIP: 0033:0x7f96443109da
|
||||
RSP: 002b:00007ffcf0b51b08 EFLAGS: 00000202 ORIG_RAX: 00000000000000af
|
||||
RAX: ffffffffffffffda RBX: 000055dc3ee521a0 RCX: 00007f96443109da
|
||||
RDX: 00007f96445cff88 RSI: 0000000000057a50 RDI: 00007f9644992000
|
||||
RBP: 000055dc3ee510b0 R08: 0000000000000003 R09: 0000000000000000
|
||||
R10: 00007f964430cd0a R11: 0000000000000202 R12: 00007f96445cff88
|
||||
R13: 000055dc3ee51090 R14: 0000000000000000 R15: 0000000000000000
|
||||
|
||||
Allocated by task 2760:
|
||||
save_stack+0x43/0xd0
|
||||
kasan_kmalloc+0xa7/0xd0
|
||||
kmem_cache_alloc_trace+0xe1/0x1b0
|
||||
kmalloc_oob_right+0x56/0xbc [test_kasan]
|
||||
kmalloc_tests_init+0x16/0x700 [test_kasan]
|
||||
do_one_initcall+0xa5/0x3ae
|
||||
do_init_module+0x1b6/0x547
|
||||
load_module+0x75df/0x8070
|
||||
__do_sys_init_module+0x1c6/0x200
|
||||
__x64_sys_init_module+0x6e/0xb0
|
||||
do_syscall_64+0x9f/0x2c0
|
||||
entry_SYSCALL_64_after_hwframe+0x44/0xa9
|
||||
|
||||
Freed by task 815:
|
||||
save_stack+0x43/0xd0
|
||||
__kasan_slab_free+0x135/0x190
|
||||
kasan_slab_free+0xe/0x10
|
||||
kfree+0x93/0x1a0
|
||||
umh_complete+0x6a/0xa0
|
||||
call_usermodehelper_exec_async+0x4c3/0x640
|
||||
ret_from_fork+0x35/0x40
|
||||
|
||||
The buggy address belongs to the object at ffff8801f44ec300
|
||||
which belongs to the cache kmalloc-128 of size 128
|
||||
The buggy address is located 123 bytes inside of
|
||||
128-byte region [ffff8801f44ec300, ffff8801f44ec380)
|
||||
The buggy address belongs to the page:
|
||||
page:ffffea0007d13b00 count:1 mapcount:0 mapping:ffff8801f7001640 index:0x0
|
||||
flags: 0x200000000000100(slab)
|
||||
raw: 0200000000000100 ffffea0007d11dc0 0000001a0000001a ffff8801f7001640
|
||||
raw: 0000000000000000 0000000080150015 00000001ffffffff 0000000000000000
|
||||
page dumped because: kasan: bad access detected
|
||||
|
||||
Memory state around the buggy address:
|
||||
ffff8800693bc300: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
|
||||
ffff8800693bc380: fc fc 00 00 00 00 00 00 00 00 00 00 00 00 00 fc
|
||||
ffff8800693bc400: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
|
||||
ffff8800693bc480: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
|
||||
ffff8800693bc500: fc fc fc fc fc fc fc fc fc fc fc 00 00 00 00 00
|
||||
>ffff8800693bc580: 00 00 00 00 00 00 00 00 00 00 03 fc fc fc fc fc
|
||||
^
|
||||
ffff8800693bc600: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
|
||||
ffff8800693bc680: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
|
||||
ffff8800693bc700: fc fc fc fc fb fb fb fb fb fb fb fb fb fb fb fb
|
||||
ffff8800693bc780: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
|
||||
ffff8800693bc800: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
|
||||
ffff8801f44ec200: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb
|
||||
ffff8801f44ec280: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
|
||||
>ffff8801f44ec300: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 03
|
||||
^
|
||||
ffff8801f44ec380: fc fc fc fc fc fc fc fc fb fb fb fb fb fb fb fb
|
||||
ffff8801f44ec400: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
|
||||
==================================================================
|
||||
|
||||
The header of the report discribe what kind of bug happened and what kind of
|
||||
access caused it. It's followed by the description of the accessed slub object
|
||||
(see 'SLUB Debug output' section in Documentation/vm/slub.rst for details) and
|
||||
the description of the accessed memory page.
|
||||
The header of the report provides a short summary of what kind of bug happened
|
||||
and what kind of access caused it. It's followed by a stack trace of the bad
|
||||
access, a stack trace of where the accessed memory was allocated (in case bad
|
||||
access happens on a slab object), and a stack trace of where the object was
|
||||
freed (in case of a use-after-free bug report). Next comes a description of
|
||||
the accessed slab object and information about the accessed memory page.
|
||||
|
||||
In the last section the report shows memory state around the accessed address.
|
||||
Reading this part requires some understanding of how KASAN works.
|
||||
|
@ -138,18 +150,24 @@ inaccessible memory like redzones or freed memory (see mm/kasan/kasan.h).
|
|||
In the report above the arrows point to the shadow byte 03, which means that
|
||||
the accessed address is partially accessible.
|
||||
|
||||
For tag-based KASAN this last report section shows the memory tags around the
|
||||
accessed address (see Implementation details section).
|
||||
|
||||
|
||||
Implementation details
|
||||
----------------------
|
||||
|
||||
Generic KASAN
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
From a high level, our approach to memory error detection is similar to that
|
||||
of kmemcheck: use shadow memory to record whether each byte of memory is safe
|
||||
to access, and use compile-time instrumentation to check shadow memory on each
|
||||
memory access.
|
||||
to access, and use compile-time instrumentation to insert checks of shadow
|
||||
memory on each memory access.
|
||||
|
||||
AddressSanitizer dedicates 1/8 of kernel memory to its shadow memory
|
||||
(e.g. 16TB to cover 128TB on x86_64) and uses direct mapping with a scale and
|
||||
offset to translate a memory address to its corresponding shadow address.
|
||||
Generic KASAN dedicates 1/8th of kernel memory to its shadow memory (e.g. 16TB
|
||||
to cover 128TB on x86_64) and uses direct mapping with a scale and offset to
|
||||
translate a memory address to its corresponding shadow address.
|
||||
|
||||
Here is the function which translates an address to its corresponding shadow
|
||||
address::
|
||||
|
@ -162,12 +180,38 @@ address::
|
|||
|
||||
where ``KASAN_SHADOW_SCALE_SHIFT = 3``.
|
||||
|
||||
Compile-time instrumentation used for checking memory accesses. Compiler inserts
|
||||
function calls (__asan_load*(addr), __asan_store*(addr)) before each memory
|
||||
access of size 1, 2, 4, 8 or 16. These functions check whether memory access is
|
||||
valid or not by checking corresponding shadow memory.
|
||||
Compile-time instrumentation is used to insert memory access checks. Compiler
|
||||
inserts function calls (__asan_load*(addr), __asan_store*(addr)) before each
|
||||
memory access of size 1, 2, 4, 8 or 16. These functions check whether memory
|
||||
access is valid or not by checking corresponding shadow memory.
|
||||
|
||||
GCC 5.0 has possibility to perform inline instrumentation. Instead of making
|
||||
function calls GCC directly inserts the code to check the shadow memory.
|
||||
This option significantly enlarges kernel but it gives x1.1-x2 performance
|
||||
boost over outline instrumented kernel.
|
||||
|
||||
Software tag-based KASAN
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Tag-based KASAN uses the Top Byte Ignore (TBI) feature of modern arm64 CPUs to
|
||||
store a pointer tag in the top byte of kernel pointers. Like generic KASAN it
|
||||
uses shadow memory to store memory tags associated with each 16-byte memory
|
||||
cell (therefore it dedicates 1/16th of the kernel memory for shadow memory).
|
||||
|
||||
On each memory allocation tag-based KASAN generates a random tag, tags the
|
||||
allocated memory with this tag, and embeds this tag into the returned pointer.
|
||||
Software tag-based KASAN uses compile-time instrumentation to insert checks
|
||||
before each memory access. These checks make sure that tag of the memory that
|
||||
is being accessed is equal to tag of the pointer that is used to access this
|
||||
memory. In case of a tag mismatch tag-based KASAN prints a bug report.
|
||||
|
||||
Software tag-based KASAN also has two instrumentation modes (outline, that
|
||||
emits callbacks to check memory accesses; and inline, that performs the shadow
|
||||
memory checks inline). With outline instrumentation mode, a bug report is
|
||||
simply printed from the function that performs the access check. With inline
|
||||
instrumentation a brk instruction is emitted by the compiler, and a dedicated
|
||||
brk handler is used to print bug reports.
|
||||
|
||||
A potential expansion of this mode is a hardware tag-based mode, which would
|
||||
use hardware memory tagging support instead of compiler instrumentation and
|
||||
manual shadow memory manipulation.
|
||||
|
|
|
@ -191,7 +191,9 @@ Currently, the following pairs of encryption modes are supported:
|
|||
If unsure, you should use the (AES-256-XTS, AES-256-CTS-CBC) pair.
|
||||
|
||||
AES-128-CBC was added only for low-powered embedded devices with
|
||||
crypto accelerators such as CAAM or CESA that do not support XTS.
|
||||
crypto accelerators such as CAAM or CESA that do not support XTS. To
|
||||
use AES-128-CBC, CONFIG_CRYPTO_SHA256 (or another SHA-256
|
||||
implementation) must be enabled so that ESSIV can be used.
|
||||
|
||||
Adiantum is a (primarily) stream cipher-based mode that is fast even
|
||||
on CPUs without dedicated crypto instructions. It's also a true
|
||||
|
@ -647,3 +649,42 @@ Note that the precise way that filenames are presented to userspace
|
|||
without the key is subject to change in the future. It is only meant
|
||||
as a way to temporarily present valid filenames so that commands like
|
||||
``rm -r`` work as expected on encrypted directories.
|
||||
|
||||
Tests
|
||||
=====
|
||||
|
||||
To test fscrypt, use xfstests, which is Linux's de facto standard
|
||||
filesystem test suite. First, run all the tests in the "encrypt"
|
||||
group on the relevant filesystem(s). For example, to test ext4 and
|
||||
f2fs encryption using `kvm-xfstests
|
||||
<https://github.com/tytso/xfstests-bld/blob/master/Documentation/kvm-quickstart.md>`_::
|
||||
|
||||
kvm-xfstests -c ext4,f2fs -g encrypt
|
||||
|
||||
UBIFS encryption can also be tested this way, but it should be done in
|
||||
a separate command, and it takes some time for kvm-xfstests to set up
|
||||
emulated UBI volumes::
|
||||
|
||||
kvm-xfstests -c ubifs -g encrypt
|
||||
|
||||
No tests should fail. However, tests that use non-default encryption
|
||||
modes (e.g. generic/549 and generic/550) will be skipped if the needed
|
||||
algorithms were not built into the kernel's crypto API. Also, tests
|
||||
that access the raw block device (e.g. generic/399, generic/548,
|
||||
generic/549, generic/550) will be skipped on UBIFS.
|
||||
|
||||
Besides running the "encrypt" group tests, for ext4 and f2fs it's also
|
||||
possible to run most xfstests with the "test_dummy_encryption" mount
|
||||
option. This option causes all new files to be automatically
|
||||
encrypted with a dummy key, without having to make any API calls.
|
||||
This tests the encrypted I/O paths more thoroughly. To do this with
|
||||
kvm-xfstests, use the "encrypt" filesystem configuration::
|
||||
|
||||
kvm-xfstests -c ext4/encrypt,f2fs/encrypt -g auto
|
||||
|
||||
Because this runs many more tests than "-g encrypt" does, it takes
|
||||
much longer to run; so also consider using `gce-xfstests
|
||||
<https://github.com/tytso/xfstests-bld/blob/master/Documentation/gce-xfstests.md>`_
|
||||
instead of kvm-xfstests::
|
||||
|
||||
gce-xfstests -c ext4/encrypt,f2fs/encrypt -g auto
|
||||
|
|
4
Makefile
4
Makefile
|
@ -956,6 +956,10 @@ ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
|
|||
LDFLAGS_vmlinux += $(call ld-option, -X,)
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_RELR),y)
|
||||
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
|
||||
endif
|
||||
|
||||
# insure the checker run with the right endianness
|
||||
CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)
|
||||
|
||||
|
|
212150
abi_gki_aarch64.xml
212150
abi_gki_aarch64.xml
File diff suppressed because it is too large
Load diff
14
arch/Kconfig
14
arch/Kconfig
|
@ -939,6 +939,20 @@ config PANIC_ON_REFCOUNT_ERROR
|
|||
or potential memory-leaks) with an object associated with that
|
||||
reference counter.
|
||||
|
||||
# Select if the architecture has support for applying RELR relocations.
|
||||
config ARCH_HAS_RELR
|
||||
bool
|
||||
|
||||
config RELR
|
||||
bool "Use RELR relocation packing"
|
||||
depends on ARCH_HAS_RELR && TOOLS_SUPPORT_RELR
|
||||
default y
|
||||
help
|
||||
Store the kernel's dynamic relocations in the RELR relocation packing
|
||||
format. Requires a compatible linker (LLD supports this feature), as
|
||||
well as compatible NM and OBJCOPY utilities (llvm-nm and llvm-objcopy
|
||||
are compatible).
|
||||
|
||||
source "kernel/gcov/Kconfig"
|
||||
|
||||
source "scripts/gcc-plugins/Kconfig"
|
||||
|
|
|
@ -22,7 +22,6 @@ config KVM
|
|||
bool "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on MMU && OF
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select ARM_GIC
|
||||
select ARM_GIC_V3
|
||||
select ARM_GIC_V3_ITS
|
||||
|
|
|
@ -414,3 +414,5 @@
|
|||
397 common statx sys_statx
|
||||
398 common rseq sys_rseq
|
||||
399 common io_pgetevents sys_io_pgetevents
|
||||
424 common pidfd_send_signal sys_pidfd_send_signal
|
||||
434 common pidfd_open sys_pidfd_open
|
||||
|
|
|
@ -107,6 +107,7 @@ config ARM64
|
|||
select HAVE_ARCH_HUGE_VMAP
|
||||
select HAVE_ARCH_JUMP_LABEL
|
||||
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
|
||||
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_ARCH_MMAP_RND_BITS
|
||||
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
|
||||
|
@ -1303,6 +1304,7 @@ config ARM64_MODULE_PLTS
|
|||
|
||||
config RELOCATABLE
|
||||
bool
|
||||
select ARCH_HAS_RELR
|
||||
help
|
||||
This builds the kernel as a Position Independent Executable (PIE),
|
||||
which retains all relocation metadata required to relocate the
|
||||
|
|
|
@ -97,10 +97,19 @@ else
|
|||
TEXT_OFFSET := 0x00080000
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_KASAN_SW_TAGS), y)
|
||||
KASAN_SHADOW_SCALE_SHIFT := 4
|
||||
else
|
||||
KASAN_SHADOW_SCALE_SHIFT := 3
|
||||
endif
|
||||
|
||||
KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
|
||||
KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
|
||||
KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
|
||||
|
||||
# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
|
||||
# - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT))
|
||||
# in 32-bit arithmetic
|
||||
KASAN_SHADOW_SCALE_SHIFT := 3
|
||||
KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
|
||||
(0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
|
||||
+ (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \
|
||||
|
|
|
@ -1,455 +0,0 @@
|
|||
CONFIG_AUDIT=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_TASKSTATS=y
|
||||
CONFIG_TASK_DELAY_ACCT=y
|
||||
CONFIG_TASK_XACCT=y
|
||||
CONFIG_TASK_IO_ACCOUNTING=y
|
||||
CONFIG_PSI=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_MEMCG=y
|
||||
CONFIG_MEMCG_SWAP=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CPUSETS=y
|
||||
# CONFIG_PROC_PID_CPUSET is not set
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_BPF=y
|
||||
CONFIG_SCHED_AUTOGROUP=y
|
||||
CONFIG_SCHED_TUNE=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_RD_BZIP2 is not set
|
||||
# CONFIG_RD_LZMA is not set
|
||||
# CONFIG_RD_XZ is not set
|
||||
# CONFIG_RD_LZO is not set
|
||||
# CONFIG_RD_LZ4 is not set
|
||||
CONFIG_SGETMASK_SYSCALL=y
|
||||
# CONFIG_SYSFS_SYSCALL is not set
|
||||
# CONFIG_FHANDLE is not set
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_BPF_JIT_ALWAYS_ON=y
|
||||
# CONFIG_RSEQ is not set
|
||||
CONFIG_EMBEDDED=y
|
||||
# CONFIG_VM_EVENT_COUNTERS is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
# CONFIG_SLAB_MERGE_DEFAULT is not set
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_PCI=y
|
||||
CONFIG_PCI_HOST_GENERIC=y
|
||||
CONFIG_HZ_100=y
|
||||
CONFIG_SECCOMP=y
|
||||
CONFIG_PARAVIRT=y
|
||||
CONFIG_ARMV8_DEPRECATED=y
|
||||
CONFIG_SWP_EMULATION=y
|
||||
CONFIG_CP15_BARRIER_EMULATION=y
|
||||
CONFIG_SETEND_EMULATION=y
|
||||
CONFIG_ARM64_SW_TTBR0_PAN=y
|
||||
CONFIG_RANDOMIZE_BASE=y
|
||||
# CONFIG_EFI is not set
|
||||
CONFIG_COMPAT=y
|
||||
CONFIG_PM_WAKELOCKS=y
|
||||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
CONFIG_PM_DEBUG=y
|
||||
CONFIG_ENERGY_MODEL=y
|
||||
CONFIG_CPU_IDLE=y
|
||||
CONFIG_ARM_CPUIDLE=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_CPU_FREQ_TIMES=y
|
||||
CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
|
||||
CONFIG_CPUFREQ_DT=y
|
||||
CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
|
||||
CONFIG_ARM_DT_BL_CPUFREQ=y
|
||||
CONFIG_ARM_SCPI_CPUFREQ=y
|
||||
CONFIG_ARM_SCMI_CPUFREQ=y
|
||||
CONFIG_ARM_SCMI_PROTOCOL=y
|
||||
# CONFIG_ARM_SCMI_POWER_DOMAIN is not set
|
||||
CONFIG_ARM_SCPI_PROTOCOL=y
|
||||
# CONFIG_ARM_SCPI_POWER_DOMAIN is not set
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_LTO_CLANG=y
|
||||
CONFIG_CFI_CLANG=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
# CONFIG_SPARSEMEM_VMEMMAP is not set
|
||||
CONFIG_KSM=y
|
||||
CONFIG_ZSMALLOC=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_UNIX=y
|
||||
CONFIG_XFRM_USER=y
|
||||
CONFIG_XFRM_INTERFACE=y
|
||||
CONFIG_XFRM_STATISTICS=y
|
||||
CONFIG_NET_KEY=y
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
CONFIG_IP_ADVANCED_ROUTER=y
|
||||
CONFIG_IP_MULTIPLE_TABLES=y
|
||||
CONFIG_NET_IPGRE_DEMUX=y
|
||||
CONFIG_NET_IPVTI=y
|
||||
CONFIG_INET_ESP=y
|
||||
# CONFIG_INET_XFRM_MODE_BEET is not set
|
||||
CONFIG_INET_UDP_DIAG=y
|
||||
CONFIG_INET_DIAG_DESTROY=y
|
||||
CONFIG_TCP_CONG_ADVANCED=y
|
||||
# CONFIG_TCP_CONG_BIC is not set
|
||||
# CONFIG_TCP_CONG_WESTWOOD is not set
|
||||
# CONFIG_TCP_CONG_HTCP is not set
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_IPV6_ROUTE_INFO=y
|
||||
CONFIG_IPV6_OPTIMISTIC_DAD=y
|
||||
CONFIG_INET6_ESP=y
|
||||
CONFIG_INET6_IPCOMP=y
|
||||
CONFIG_IPV6_MIP6=y
|
||||
CONFIG_IPV6_VTI=y
|
||||
CONFIG_IPV6_MULTIPLE_TABLES=y
|
||||
CONFIG_NETFILTER=y
|
||||
CONFIG_NF_CONNTRACK=y
|
||||
CONFIG_NF_CONNTRACK_SECMARK=y
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
CONFIG_NF_CONNTRACK_AMANDA=y
|
||||
CONFIG_NF_CONNTRACK_FTP=y
|
||||
CONFIG_NF_CONNTRACK_H323=y
|
||||
CONFIG_NF_CONNTRACK_IRC=y
|
||||
CONFIG_NF_CONNTRACK_NETBIOS_NS=y
|
||||
CONFIG_NF_CONNTRACK_PPTP=y
|
||||
CONFIG_NF_CONNTRACK_SANE=y
|
||||
CONFIG_NF_CONNTRACK_TFTP=y
|
||||
CONFIG_NF_CT_NETLINK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CT=y
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
|
||||
CONFIG_NETFILTER_XT_MATCH_BPF=y
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_HELPER=y
|
||||
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
|
||||
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MAC=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_OWNER=y
|
||||
CONFIG_NETFILTER_XT_MATCH_POLICY=y
|
||||
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
|
||||
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STATE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STRING=y
|
||||
CONFIG_NETFILTER_XT_MATCH_TIME=y
|
||||
CONFIG_NETFILTER_XT_MATCH_U32=y
|
||||
CONFIG_IP_NF_IPTABLES=y
|
||||
CONFIG_IP_NF_MATCH_ECN=y
|
||||
CONFIG_IP_NF_MATCH_TTL=y
|
||||
CONFIG_IP_NF_FILTER=y
|
||||
CONFIG_IP_NF_TARGET_REJECT=y
|
||||
CONFIG_IP_NF_NAT=y
|
||||
CONFIG_IP_NF_TARGET_MASQUERADE=y
|
||||
CONFIG_IP_NF_TARGET_NETMAP=y
|
||||
CONFIG_IP_NF_TARGET_REDIRECT=y
|
||||
CONFIG_IP_NF_MANGLE=y
|
||||
CONFIG_IP_NF_RAW=y
|
||||
CONFIG_IP_NF_SECURITY=y
|
||||
CONFIG_IP_NF_ARPTABLES=y
|
||||
CONFIG_IP_NF_ARPFILTER=y
|
||||
CONFIG_IP_NF_ARP_MANGLE=y
|
||||
CONFIG_IP6_NF_IPTABLES=y
|
||||
CONFIG_IP6_NF_MATCH_RPFILTER=y
|
||||
CONFIG_IP6_NF_FILTER=y
|
||||
CONFIG_IP6_NF_TARGET_REJECT=y
|
||||
CONFIG_IP6_NF_MANGLE=y
|
||||
CONFIG_IP6_NF_RAW=y
|
||||
CONFIG_L2TP=y
|
||||
CONFIG_NET_SCHED=y
|
||||
CONFIG_NET_SCH_HTB=y
|
||||
CONFIG_NET_SCH_NETEM=y
|
||||
CONFIG_NET_SCH_INGRESS=y
|
||||
CONFIG_NET_CLS_U32=y
|
||||
CONFIG_NET_CLS_BPF=y
|
||||
CONFIG_NET_EMATCH=y
|
||||
CONFIG_NET_EMATCH_U32=y
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_VSOCKETS=y
|
||||
CONFIG_VIRTIO_VSOCKETS=y
|
||||
CONFIG_BPF_JIT=y
|
||||
CONFIG_CFG80211=y
|
||||
# CONFIG_CFG80211_DEFAULT_PS is not set
|
||||
# CONFIG_CFG80211_CRDA_SUPPORT is not set
|
||||
CONFIG_MAC80211=y
|
||||
# CONFIG_MAC80211_RC_MINSTREL is not set
|
||||
CONFIG_RFKILL=y
|
||||
# CONFIG_UEVENT_HELPER is not set
|
||||
# CONFIG_ALLOW_DEV_COREDUMP is not set
|
||||
CONFIG_DEBUG_DEVRES=y
|
||||
CONFIG_OF_UNITTEST=y
|
||||
CONFIG_ZRAM=y
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=8192
|
||||
CONFIG_VIRTIO_BLK=y
|
||||
CONFIG_UID_SYS_STATS=y
|
||||
CONFIG_SCSI=y
|
||||
# CONFIG_SCSI_MQ_DEFAULT is not set
|
||||
# CONFIG_SCSI_PROC_FS is not set
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_SCSI_VIRTIO=y
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_DM_CRYPT=y
|
||||
CONFIG_DM_UEVENT=y
|
||||
CONFIG_DM_VERITY=y
|
||||
CONFIG_DM_VERITY_AVB=y
|
||||
CONFIG_DM_VERITY_FEC=y
|
||||
CONFIG_DM_BOW=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_NETCONSOLE=y
|
||||
CONFIG_NETCONSOLE_DYNAMIC=y
|
||||
CONFIG_TUN=y
|
||||
CONFIG_VIRTIO_NET=y
|
||||
# CONFIG_ETHERNET is not set
|
||||
CONFIG_PHYLIB=y
|
||||
CONFIG_PPP=y
|
||||
CONFIG_PPP_BSDCOMP=y
|
||||
CONFIG_PPP_DEFLATE=y
|
||||
CONFIG_PPP_MPPE=y
|
||||
CONFIG_PPTP=y
|
||||
CONFIG_PPPOL2TP=y
|
||||
CONFIG_USB_RTL8152=y
|
||||
CONFIG_USB_USBNET=y
|
||||
# CONFIG_USB_NET_AX8817X is not set
|
||||
# CONFIG_USB_NET_AX88179_178A is not set
|
||||
# CONFIG_USB_NET_CDCETHER is not set
|
||||
# CONFIG_USB_NET_CDC_NCM is not set
|
||||
# CONFIG_USB_NET_NET1080 is not set
|
||||
# CONFIG_USB_NET_CDC_SUBSET is not set
|
||||
# CONFIG_USB_NET_ZAURUS is not set
|
||||
# CONFIG_WLAN_VENDOR_ADMTEK is not set
|
||||
# CONFIG_WLAN_VENDOR_ATH is not set
|
||||
# CONFIG_WLAN_VENDOR_ATMEL is not set
|
||||
# CONFIG_WLAN_VENDOR_BROADCOM is not set
|
||||
# CONFIG_WLAN_VENDOR_CISCO is not set
|
||||
# CONFIG_WLAN_VENDOR_INTEL is not set
|
||||
# CONFIG_WLAN_VENDOR_INTERSIL is not set
|
||||
# CONFIG_WLAN_VENDOR_MARVELL is not set
|
||||
# CONFIG_WLAN_VENDOR_MEDIATEK is not set
|
||||
# CONFIG_WLAN_VENDOR_RALINK is not set
|
||||
# CONFIG_WLAN_VENDOR_REALTEK is not set
|
||||
# CONFIG_WLAN_VENDOR_RSI is not set
|
||||
# CONFIG_WLAN_VENDOR_ST is not set
|
||||
# CONFIG_WLAN_VENDOR_TI is not set
|
||||
# CONFIG_WLAN_VENDOR_ZYDAS is not set
|
||||
# CONFIG_WLAN_VENDOR_QUANTENNA is not set
|
||||
CONFIG_VIRT_WIFI=y
|
||||
CONFIG_INPUT_EVDEV=y
|
||||
# CONFIG_INPUT_KEYBOARD is not set
|
||||
# CONFIG_INPUT_MOUSE is not set
|
||||
CONFIG_INPUT_JOYSTICK=y
|
||||
CONFIG_JOYSTICK_XPAD=y
|
||||
CONFIG_JOYSTICK_XPAD_FF=y
|
||||
CONFIG_JOYSTICK_XPAD_LEDS=y
|
||||
CONFIG_INPUT_TABLET=y
|
||||
CONFIG_TABLET_USB_ACECAD=y
|
||||
CONFIG_TABLET_USB_AIPTEK=y
|
||||
CONFIG_TABLET_USB_GTCO=y
|
||||
CONFIG_TABLET_USB_HANWANG=y
|
||||
CONFIG_TABLET_USB_KBTAB=y
|
||||
CONFIG_INPUT_MISC=y
|
||||
CONFIG_INPUT_UINPUT=y
|
||||
# CONFIG_VT is not set
|
||||
# CONFIG_LEGACY_PTYS is not set
|
||||
# CONFIG_DEVMEM is not set
|
||||
CONFIG_SERIAL_8250=y
|
||||
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
|
||||
CONFIG_SERIAL_8250_CONSOLE=y
|
||||
# CONFIG_SERIAL_8250_EXAR is not set
|
||||
CONFIG_SERIAL_8250_NR_UARTS=48
|
||||
CONFIG_SERIAL_8250_EXTENDED=y
|
||||
CONFIG_SERIAL_8250_MANY_PORTS=y
|
||||
CONFIG_SERIAL_8250_SHARE_IRQ=y
|
||||
CONFIG_SERIAL_OF_PLATFORM=y
|
||||
CONFIG_SERIAL_AMBA_PL011=y
|
||||
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
|
||||
CONFIG_VIRTIO_CONSOLE=y
|
||||
CONFIG_HW_RANDOM=y
|
||||
CONFIG_HW_RANDOM_VIRTIO=y
|
||||
# CONFIG_HW_RANDOM_CAVIUM is not set
|
||||
# CONFIG_DEVPORT is not set
|
||||
# CONFIG_I2C_COMPAT is not set
|
||||
# CONFIG_I2C_HELPER_AUTO is not set
|
||||
# CONFIG_HWMON is not set
|
||||
CONFIG_THERMAL=y
|
||||
CONFIG_CPU_THERMAL=y
|
||||
CONFIG_MEDIA_SUPPORT=y
|
||||
# CONFIG_VGA_ARB is not set
|
||||
CONFIG_DRM=y
|
||||
# CONFIG_DRM_FBDEV_EMULATION is not set
|
||||
CONFIG_DRM_VIRTIO_GPU=y
|
||||
CONFIG_SOUND=y
|
||||
CONFIG_SND=y
|
||||
CONFIG_SND_HRTIMER=y
|
||||
# CONFIG_SND_SUPPORT_OLD_API is not set
|
||||
# CONFIG_SND_VERBOSE_PROCFS is not set
|
||||
# CONFIG_SND_DRIVERS is not set
|
||||
CONFIG_SND_INTEL8X0=y
|
||||
# CONFIG_SND_USB is not set
|
||||
CONFIG_HIDRAW=y
|
||||
CONFIG_UHID=y
|
||||
CONFIG_HID_A4TECH=y
|
||||
CONFIG_HID_ACRUX=y
|
||||
CONFIG_HID_ACRUX_FF=y
|
||||
CONFIG_HID_APPLE=y
|
||||
CONFIG_HID_BELKIN=y
|
||||
CONFIG_HID_CHERRY=y
|
||||
CONFIG_HID_CHICONY=y
|
||||
CONFIG_HID_PRODIKEYS=y
|
||||
CONFIG_HID_CYPRESS=y
|
||||
CONFIG_HID_DRAGONRISE=y
|
||||
CONFIG_DRAGONRISE_FF=y
|
||||
CONFIG_HID_EMS_FF=y
|
||||
CONFIG_HID_ELECOM=y
|
||||
CONFIG_HID_EZKEY=y
|
||||
CONFIG_HID_HOLTEK=y
|
||||
CONFIG_HID_KEYTOUCH=y
|
||||
CONFIG_HID_KYE=y
|
||||
CONFIG_HID_UCLOGIC=y
|
||||
CONFIG_HID_WALTOP=y
|
||||
CONFIG_HID_GYRATION=y
|
||||
CONFIG_HID_TWINHAN=y
|
||||
CONFIG_HID_KENSINGTON=y
|
||||
CONFIG_HID_LCPOWER=y
|
||||
CONFIG_HID_LOGITECH=y
|
||||
CONFIG_HID_LOGITECH_DJ=y
|
||||
CONFIG_LOGITECH_FF=y
|
||||
CONFIG_LOGIRUMBLEPAD2_FF=y
|
||||
CONFIG_LOGIG940_FF=y
|
||||
CONFIG_HID_MAGICMOUSE=y
|
||||
CONFIG_HID_MICROSOFT=y
|
||||
CONFIG_HID_MONTEREY=y
|
||||
CONFIG_HID_MULTITOUCH=y
|
||||
CONFIG_HID_NTRIG=y
|
||||
CONFIG_HID_ORTEK=y
|
||||
CONFIG_HID_PANTHERLORD=y
|
||||
CONFIG_PANTHERLORD_FF=y
|
||||
CONFIG_HID_PETALYNX=y
|
||||
CONFIG_HID_PICOLCD=y
|
||||
CONFIG_HID_PRIMAX=y
|
||||
CONFIG_HID_ROCCAT=y
|
||||
CONFIG_HID_SAITEK=y
|
||||
CONFIG_HID_SAMSUNG=y
|
||||
CONFIG_HID_SONY=y
|
||||
CONFIG_HID_SPEEDLINK=y
|
||||
CONFIG_HID_SUNPLUS=y
|
||||
CONFIG_HID_GREENASIA=y
|
||||
CONFIG_GREENASIA_FF=y
|
||||
CONFIG_HID_SMARTJOYPLUS=y
|
||||
CONFIG_SMARTJOYPLUS_FF=y
|
||||
CONFIG_HID_TIVO=y
|
||||
CONFIG_HID_TOPSEED=y
|
||||
CONFIG_HID_THRUSTMASTER=y
|
||||
CONFIG_HID_WACOM=y
|
||||
CONFIG_HID_WIIMOTE=y
|
||||
CONFIG_HID_ZEROPLUS=y
|
||||
CONFIG_HID_ZYDACRON=y
|
||||
CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_GADGET=y
|
||||
CONFIG_USB_CONFIGFS=y
|
||||
CONFIG_USB_CONFIGFS_UEVENT=y
|
||||
CONFIG_USB_CONFIGFS_F_FS=y
|
||||
CONFIG_USB_CONFIGFS_F_ACC=y
|
||||
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
|
||||
CONFIG_USB_CONFIGFS_F_MIDI=y
|
||||
CONFIG_MMC=y
|
||||
# CONFIG_PWRSEQ_EMMC is not set
|
||||
# CONFIG_PWRSEQ_SIMPLE is not set
|
||||
# CONFIG_MMC_BLOCK is not set
|
||||
CONFIG_RTC_CLASS=y
|
||||
# CONFIG_RTC_SYSTOHC is not set
|
||||
CONFIG_RTC_DRV_PL030=y
|
||||
CONFIG_RTC_DRV_PL031=y
|
||||
CONFIG_VIRTIO_PCI=y
|
||||
# CONFIG_VIRTIO_PCI_LEGACY is not set
|
||||
CONFIG_VIRTIO_BALLOON=y
|
||||
CONFIG_VIRTIO_INPUT=y
|
||||
CONFIG_VIRTIO_MMIO=y
|
||||
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
|
||||
CONFIG_STAGING=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_ANDROID_VSOC=y
|
||||
CONFIG_ION=y
|
||||
CONFIG_ION_SYSTEM_HEAP=y
|
||||
CONFIG_COMMON_CLK_SCPI=y
|
||||
# CONFIG_COMMON_CLK_XGENE is not set
|
||||
CONFIG_MAILBOX=y
|
||||
# CONFIG_IOMMU_SUPPORT is not set
|
||||
CONFIG_ANDROID=y
|
||||
CONFIG_ANDROID_BINDER_IPC=y
|
||||
CONFIG_LEGACY_ENERGY_MODEL_DT=y
|
||||
CONFIG_EXT4_FS=y
|
||||
CONFIG_EXT4_FS_SECURITY=y
|
||||
CONFIG_EXT4_ENCRYPTION=y
|
||||
CONFIG_F2FS_FS=y
|
||||
CONFIG_F2FS_FS_SECURITY=y
|
||||
CONFIG_F2FS_FS_ENCRYPTION=y
|
||||
# CONFIG_DNOTIFY is not set
|
||||
CONFIG_QUOTA=y
|
||||
CONFIG_QFMT_V2=y
|
||||
CONFIG_FUSE_FS=y
|
||||
CONFIG_OVERLAY_FS=y
|
||||
CONFIG_MSDOS_FS=y
|
||||
CONFIG_VFAT_FS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_SDCARD_FS=y
|
||||
CONFIG_PSTORE=y
|
||||
CONFIG_PSTORE_CONSOLE=y
|
||||
CONFIG_PSTORE_RAM=y
|
||||
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
|
||||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_NETWORK=y
|
||||
CONFIG_LSM_MMAP_MIN_ADDR=65536
|
||||
CONFIG_HARDENED_USERCOPY=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_CRYPTO_ADIANTUM=y
|
||||
CONFIG_CRYPTO_SHA512=y
|
||||
CONFIG_CRYPTO_LZ4=y
|
||||
CONFIG_CRYPTO_ZSTD=y
|
||||
CONFIG_CRYPTO_ANSI_CPRNG=y
|
||||
CONFIG_CRYPTO_DEV_VIRTIO=y
|
||||
CONFIG_XZ_DEC=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||
CONFIG_FRAME_WARN=1024
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_SOFTLOCKUP_DETECTOR=y
|
||||
# CONFIG_DETECT_HUNG_TASK is not set
|
||||
CONFIG_PANIC_TIMEOUT=5
|
||||
CONFIG_SCHEDSTATS=y
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
CONFIG_ENABLE_DEFAULT_TRACERS=y
|
||||
# CONFIG_RUNTIME_TESTING_MENU is not set
|
|
@ -17,6 +17,7 @@ CONFIG_CPUSETS=y
|
|||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_BPF=y
|
||||
CONFIG_SCHED_AUTOGROUP=y
|
||||
CONFIG_SCHED_TUNE=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_RD_BZIP2 is not set
|
||||
# CONFIG_RD_LZMA is not set
|
||||
|
@ -35,10 +36,13 @@ CONFIG_EMBEDDED=y
|
|||
CONFIG_SLAB_FREELIST_RANDOM=y
|
||||
CONFIG_SLAB_FREELIST_HARDENED=y
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_ARCH_HISI=y
|
||||
CONFIG_ARCH_QCOM=y
|
||||
CONFIG_PCI=y
|
||||
CONFIG_PCI_HOST_GENERIC=y
|
||||
CONFIG_PCIE_KIRIN=y
|
||||
CONFIG_SCHED_MC=y
|
||||
CONFIG_NR_CPUS=256
|
||||
CONFIG_NR_CPUS=32
|
||||
CONFIG_SECCOMP=y
|
||||
CONFIG_PARAVIRT=y
|
||||
CONFIG_ARMV8_DEPRECATED=y
|
||||
|
@ -46,7 +50,7 @@ CONFIG_SWP_EMULATION=y
|
|||
CONFIG_CP15_BARRIER_EMULATION=y
|
||||
CONFIG_SETEND_EMULATION=y
|
||||
CONFIG_RANDOMIZE_BASE=y
|
||||
# CONFIG_EFI is not set
|
||||
# CONFIG_DMI is not set
|
||||
CONFIG_COMPAT=y
|
||||
CONFIG_PM_WAKELOCKS=y
|
||||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
|
@ -65,14 +69,15 @@ CONFIG_ARM_SCMI_PROTOCOL=y
|
|||
# CONFIG_ARM_SCMI_POWER_DOMAIN is not set
|
||||
CONFIG_ARM_SCPI_PROTOCOL=y
|
||||
# CONFIG_ARM_SCPI_POWER_DOMAIN is not set
|
||||
# CONFIG_EFI_ARMSTUB_DTB_LOADER is not set
|
||||
CONFIG_ARM64_CRYPTO=y
|
||||
CONFIG_CRYPTO_AES_ARM64=y
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_GKI_HACKS_TO_FIX=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
# CONFIG_SPARSEMEM_VMEMMAP is not set
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_CMA_AREAS=16
|
||||
|
@ -171,6 +176,7 @@ CONFIG_IP6_NF_FILTER=y
|
|||
CONFIG_IP6_NF_TARGET_REJECT=y
|
||||
CONFIG_IP6_NF_MANGLE=y
|
||||
CONFIG_IP6_NF_RAW=y
|
||||
CONFIG_TIPC=y
|
||||
CONFIG_L2TP=y
|
||||
CONFIG_BRIDGE=y
|
||||
CONFIG_NET_SCHED=y
|
||||
|
@ -263,13 +269,10 @@ CONFIG_SERIAL_8250=y
|
|||
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
|
||||
CONFIG_SERIAL_8250_CONSOLE=y
|
||||
# CONFIG_SERIAL_8250_EXAR is not set
|
||||
CONFIG_SERIAL_8250_NR_UARTS=48
|
||||
CONFIG_SERIAL_8250_EXTENDED=y
|
||||
CONFIG_SERIAL_8250_MANY_PORTS=y
|
||||
CONFIG_SERIAL_8250_SHARE_IRQ=y
|
||||
CONFIG_SERIAL_OF_PLATFORM=y
|
||||
CONFIG_SERIAL_OF_PLATFORM=m
|
||||
CONFIG_SERIAL_AMBA_PL011=y
|
||||
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
|
||||
CONFIG_SERIAL_DEV_BUS=y
|
||||
CONFIG_VIRTIO_CONSOLE=y
|
||||
CONFIG_HW_RANDOM=y
|
||||
CONFIG_HW_RANDOM_VIRTIO=y
|
||||
|
@ -279,9 +282,9 @@ CONFIG_HW_RANDOM_VIRTIO=y
|
|||
# CONFIG_I2C_HELPER_AUTO is not set
|
||||
CONFIG_SPI=y
|
||||
CONFIG_SPMI=y
|
||||
CONFIG_PINCTRL=y
|
||||
CONFIG_PINCTRL_AMD=y
|
||||
CONFIG_POWER_AVS=y
|
||||
CONFIG_POWER_RESET_HISI=y
|
||||
# CONFIG_HWMON is not set
|
||||
CONFIG_THERMAL=y
|
||||
CONFIG_THERMAL_GOV_USER_SPACE=y
|
||||
|
@ -290,7 +293,6 @@ CONFIG_DEVFREQ_THERMAL=y
|
|||
CONFIG_WATCHDOG=y
|
||||
CONFIG_MFD_ACT8945A=y
|
||||
CONFIG_MFD_SYSCON=y
|
||||
CONFIG_REGULATOR=y
|
||||
CONFIG_MEDIA_SUPPORT=y
|
||||
CONFIG_MEDIA_CAMERA_SUPPORT=y
|
||||
CONFIG_MEDIA_CONTROLLER=y
|
||||
|
@ -326,11 +328,11 @@ CONFIG_USB_CONFIGFS_F_FS=y
|
|||
CONFIG_USB_CONFIGFS_F_ACC=y
|
||||
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
|
||||
CONFIG_USB_CONFIGFS_F_MIDI=y
|
||||
CONFIG_MMC=y
|
||||
CONFIG_MMC=m
|
||||
# CONFIG_PWRSEQ_EMMC is not set
|
||||
# CONFIG_PWRSEQ_SIMPLE is not set
|
||||
CONFIG_MMC_SDHCI=y
|
||||
CONFIG_MMC_SDHCI_PLTFM=y
|
||||
CONFIG_MMC_SDHCI=m
|
||||
CONFIG_MMC_SDHCI_PLTFM=m
|
||||
CONFIG_NEW_LEDS=y
|
||||
CONFIG_LEDS_CLASS=y
|
||||
CONFIG_LEDS_TRIGGERS=y
|
||||
|
@ -340,6 +342,7 @@ CONFIG_RTC_CLASS=y
|
|||
CONFIG_RTC_DRV_PL030=y
|
||||
CONFIG_RTC_DRV_PL031=y
|
||||
CONFIG_DMADEVICES=y
|
||||
CONFIG_UIO=y
|
||||
CONFIG_VIRTIO_PCI=y
|
||||
# CONFIG_VIRTIO_PCI_LEGACY is not set
|
||||
CONFIG_VIRTIO_INPUT=y
|
||||
|
@ -349,17 +352,21 @@ CONFIG_STAGING=y
|
|||
CONFIG_ASHMEM=y
|
||||
CONFIG_ANDROID_VSOC=y
|
||||
CONFIG_ION=y
|
||||
CONFIG_ION_SYSTEM_HEAP=y
|
||||
CONFIG_COMMON_CLK_SCPI=y
|
||||
# CONFIG_COMMON_CLK_XGENE is not set
|
||||
CONFIG_HWSPINLOCK=y
|
||||
CONFIG_MAILBOX=y
|
||||
CONFIG_ARM_SMMU=y
|
||||
CONFIG_QCOM_COMMAND_DB=y
|
||||
CONFIG_QCOM_RPMH=y
|
||||
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
|
||||
CONFIG_DEVFREQ_GOV_POWERSAVE=y
|
||||
CONFIG_DEVFREQ_GOV_USERSPACE=y
|
||||
CONFIG_DEVFREQ_GOV_PASSIVE=y
|
||||
CONFIG_EXTCON=y
|
||||
CONFIG_PWM=y
|
||||
CONFIG_QCOM_PDC=y
|
||||
CONFIG_GENERIC_PHY=y
|
||||
CONFIG_RAS=y
|
||||
CONFIG_ANDROID=y
|
||||
|
@ -376,8 +383,8 @@ CONFIG_FUSE_FS=y
|
|||
CONFIG_OVERLAY_FS=y
|
||||
CONFIG_MSDOS_FS=y
|
||||
CONFIG_VFAT_FS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
# CONFIG_EFIVAR_FS is not set
|
||||
CONFIG_SDCARD_FS=y
|
||||
CONFIG_PSTORE=y
|
||||
CONFIG_PSTORE_CONSOLE=y
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#ifndef __ASM_ASSEMBLER_H
|
||||
#define __ASM_ASSEMBLER_H
|
||||
|
||||
#include <asm-generic/export.h>
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
|
@ -500,6 +502,13 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
|
|||
#else
|
||||
#define NOKPROBE(x)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#define EXPORT_SYMBOL_NOKASAN(name)
|
||||
#else
|
||||
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Emit a 64-bit absolute little endian symbol reference in a way that
|
||||
* ensures that it will be resolved at build time, even when building a
|
||||
|
|
|
@ -16,10 +16,12 @@
|
|||
* 0x400: for dynamic BRK instruction
|
||||
* 0x401: for compile time BRK instruction
|
||||
* 0x800: kernel-mode BUG() and WARN() traps
|
||||
* 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff)
|
||||
*/
|
||||
#define FAULT_BRK_IMM 0x100
|
||||
#define KGDB_DYN_DBG_BRK_IMM 0x400
|
||||
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
|
||||
#define BUG_BRK_IMM 0x800
|
||||
#define KASAN_BRK_IMM 0x900
|
||||
|
||||
#endif
|
||||
|
|
|
@ -49,6 +49,10 @@
|
|||
*/
|
||||
#define ARCH_DMA_MINALIGN (128)
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/bitops.h>
|
||||
|
|
|
@ -4,12 +4,16 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/pgtable-types.h>
|
||||
|
||||
#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
|
||||
#define arch_kasan_reset_tag(addr) __tag_reset(addr)
|
||||
#define arch_kasan_get_tag(addr) __tag_get(addr)
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
|
||||
/*
|
||||
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
|
||||
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
|
||||
|
|
|
@ -74,19 +74,13 @@
|
|||
#define KERNEL_END _end
|
||||
|
||||
/*
|
||||
* KASAN requires 1/8th of the kernel virtual address space for the shadow
|
||||
* region. KASAN can bloat the stack significantly, so double the (minimum)
|
||||
* stack size when KASAN is in use, and then double it again if KASAN_EXTRA is
|
||||
* on.
|
||||
* Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual
|
||||
* address space for the shadow region respectively. They can bloat the stack
|
||||
* significantly, so double the (minimum) stack size when they are in use.
|
||||
*/
|
||||
#ifdef CONFIG_KASAN
|
||||
#define KASAN_SHADOW_SCALE_SHIFT 3
|
||||
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
|
||||
#ifdef CONFIG_KASAN_EXTRA
|
||||
#define KASAN_THREAD_SHIFT 2
|
||||
#else
|
||||
#define KASAN_THREAD_SHIFT 1
|
||||
#endif /* CONFIG_KASAN_EXTRA */
|
||||
#else
|
||||
#define KASAN_SHADOW_SIZE (0)
|
||||
#define KASAN_THREAD_SHIFT 0
|
||||
|
@ -220,6 +214,26 @@ static inline unsigned long kaslr_offset(void)
|
|||
*/
|
||||
#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
|
||||
|
||||
/*
|
||||
* When dealing with data aborts, watchpoints, or instruction traps we may end
|
||||
* up with a tagged userland pointer. Clear the tag to get a sane pointer to
|
||||
* pass on to access_ok(), for instance.
|
||||
*/
|
||||
#define untagged_addr(addr) \
|
||||
((__typeof__(addr))sign_extend64((u64)(addr), 55))
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
#define __tag_shifted(tag) ((u64)(tag) << 56)
|
||||
#define __tag_set(addr, tag) (__typeof__(addr))( \
|
||||
((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag))
|
||||
#define __tag_reset(addr) untagged_addr(addr)
|
||||
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
|
||||
#else
|
||||
#define __tag_set(addr, tag) (addr)
|
||||
#define __tag_reset(addr) (addr)
|
||||
#define __tag_get(addr) 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Physical vs virtual RAM address space conversion. These are
|
||||
* private definitions which should NOT be used outside memory.h
|
||||
|
@ -303,7 +317,13 @@ static inline void *phys_to_virt(phys_addr_t x)
|
|||
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
|
||||
#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
|
||||
|
||||
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
|
||||
#define page_to_virt(page) ({ \
|
||||
unsigned long __addr = \
|
||||
((__page_to_voff(page)) | PAGE_OFFSET); \
|
||||
__addr = __tag_set(__addr, page_kasan_tag(page)); \
|
||||
((void *)__addr); \
|
||||
})
|
||||
|
||||
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
|
||||
|
||||
#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
|
||||
|
@ -311,9 +331,10 @@ static inline void *phys_to_virt(phys_addr_t x)
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET)
|
||||
#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
|
||||
_virt_addr_valid(kaddr))
|
||||
#define _virt_addr_is_linear(kaddr) \
|
||||
(__tag_reset((u64)(kaddr)) >= PAGE_OFFSET)
|
||||
#define virt_addr_valid(kaddr) \
|
||||
(_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
|
||||
|
||||
#include <asm-generic/memory_model.h>
|
||||
|
||||
|
|
|
@ -289,6 +289,7 @@
|
|||
#define TCR_A1 (UL(1) << 22)
|
||||
#define TCR_ASID16 (UL(1) << 36)
|
||||
#define TCR_TBI0 (UL(1) << 37)
|
||||
#define TCR_TBI1 (UL(1) << 38)
|
||||
#define TCR_HA (UL(1) << 39)
|
||||
#define TCR_HD (UL(1) << 40)
|
||||
#define TCR_NFD1 (UL(1) << 54)
|
||||
|
|
|
@ -97,13 +97,6 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* When dealing with data aborts, watchpoints, or instruction traps we may end
|
||||
* up with a tagged userland pointer. Clear the tag to get a sane pointer to
|
||||
* pass on to access_ok(), for instance.
|
||||
*/
|
||||
#define untagged_addr(addr) sign_extend64(addr, 55)
|
||||
|
||||
#define access_ok(type, addr, size) __range_ok(addr, size)
|
||||
#define user_addr_max get_fs
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
|
||||
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
|
||||
|
||||
#define __NR_compat_syscalls 399
|
||||
#define __NR_compat_syscalls 435
|
||||
#endif
|
||||
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
|
|
|
@ -819,6 +819,10 @@ __SYSCALL(__NR_pkey_free, sys_pkey_free)
|
|||
__SYSCALL(__NR_statx, sys_statx)
|
||||
#define __NR_rseq 398
|
||||
__SYSCALL(__NR_rseq, sys_rseq)
|
||||
#define __NR_pidfd_send_signal 424
|
||||
__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
|
||||
#define __NR_pidfd_open 434
|
||||
__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
|
||||
|
||||
/*
|
||||
* Please add new compat syscalls above this comment and update
|
||||
|
|
|
@ -112,6 +112,8 @@ pe_header:
|
|||
* x23 stext() .. start_kernel() physical misalignment/KASLR offset
|
||||
* x28 __create_page_tables() callee preserved temp register
|
||||
* x19/x20 __primary_switch() callee preserved temp registers
|
||||
* x24 __primary_switch() .. relocate_kernel()
|
||||
* current RELR displacement
|
||||
*/
|
||||
ENTRY(stext)
|
||||
bl preserve_boot_args
|
||||
|
@ -830,14 +832,93 @@ __relocate_kernel:
|
|||
|
||||
0: cmp x9, x10
|
||||
b.hs 1f
|
||||
ldp x11, x12, [x9], #24
|
||||
ldr x13, [x9, #-8]
|
||||
cmp w12, #R_AARCH64_RELATIVE
|
||||
ldp x12, x13, [x9], #24
|
||||
ldr x14, [x9, #-8]
|
||||
cmp w13, #R_AARCH64_RELATIVE
|
||||
b.ne 0b
|
||||
add x13, x13, x23 // relocate
|
||||
str x13, [x11, x23]
|
||||
add x14, x14, x23 // relocate
|
||||
str x14, [x12, x23]
|
||||
b 0b
|
||||
1: ret
|
||||
|
||||
1:
|
||||
#ifdef CONFIG_RELR
|
||||
/*
|
||||
* Apply RELR relocations.
|
||||
*
|
||||
* RELR is a compressed format for storing relative relocations. The
|
||||
* encoded sequence of entries looks like:
|
||||
* [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
|
||||
*
|
||||
* i.e. start with an address, followed by any number of bitmaps. The
|
||||
* address entry encodes 1 relocation. The subsequent bitmap entries
|
||||
* encode up to 63 relocations each, at subsequent offsets following
|
||||
* the last address entry.
|
||||
*
|
||||
* The bitmap entries must have 1 in the least significant bit. The
|
||||
* assumption here is that an address cannot have 1 in lsb. Odd
|
||||
* addresses are not supported. Any odd addresses are stored in the RELA
|
||||
* section, which is handled above.
|
||||
*
|
||||
* Excluding the least significant bit in the bitmap, each non-zero
|
||||
* bit in the bitmap represents a relocation to be applied to
|
||||
* a corresponding machine word that follows the base address
|
||||
* word. The second least significant bit represents the machine
|
||||
* word immediately following the initial address, and each bit
|
||||
* that follows represents the next word, in linear order. As such,
|
||||
* a single bitmap can encode up to 63 relocations in a 64-bit object.
|
||||
*
|
||||
* In this implementation we store the address of the next RELR table
|
||||
* entry in x9, the address being relocated by the current address or
|
||||
* bitmap entry in x13 and the address being relocated by the current
|
||||
* bit in x14.
|
||||
*
|
||||
* Because addends are stored in place in the binary, RELR relocations
|
||||
* cannot be applied idempotently. We use x24 to keep track of the
|
||||
* currently applied displacement so that we can correctly relocate if
|
||||
* __relocate_kernel is called twice with non-zero displacements (i.e.
|
||||
* if there is both a physical misalignment and a KASLR displacement).
|
||||
*/
|
||||
ldr w9, =__relr_offset // offset to reloc table
|
||||
ldr w10, =__relr_size // size of reloc table
|
||||
add x9, x9, x11 // __va(.relr)
|
||||
add x10, x9, x10 // __va(.relr) + sizeof(.relr)
|
||||
|
||||
sub x15, x23, x24 // delta from previous offset
|
||||
cbz x15, 7f // nothing to do if unchanged
|
||||
mov x24, x23 // save new offset
|
||||
|
||||
2: cmp x9, x10
|
||||
b.hs 7f
|
||||
ldr x11, [x9], #8
|
||||
tbnz x11, #0, 3f // branch to handle bitmaps
|
||||
add x13, x11, x23
|
||||
ldr x12, [x13] // relocate address entry
|
||||
add x12, x12, x15
|
||||
str x12, [x13], #8 // adjust to start of bitmap
|
||||
b 2b
|
||||
|
||||
3: mov x14, x13
|
||||
4: lsr x11, x11, #1
|
||||
cbz x11, 6f
|
||||
tbz x11, #0, 5f // skip bit if not set
|
||||
ldr x12, [x14] // relocate bit
|
||||
add x12, x12, x15
|
||||
str x12, [x14]
|
||||
|
||||
5: add x14, x14, #8 // move to next bit's address
|
||||
b 4b
|
||||
|
||||
6: /*
|
||||
* Move to the next bitmap's address. 8 is the word size, and 63 is the
|
||||
* number of significant bits in a bitmap entry.
|
||||
*/
|
||||
add x13, x13, #(8 * 63)
|
||||
b 2b
|
||||
|
||||
7:
|
||||
#endif
|
||||
ret
|
||||
|
||||
ENDPROC(__relocate_kernel)
|
||||
#endif
|
||||
|
||||
|
@ -849,6 +930,9 @@ __primary_switch:
|
|||
|
||||
bl __enable_mmu
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
#ifdef CONFIG_RELR
|
||||
mov x24, #0 // no RELR displacement yet
|
||||
#endif
|
||||
bl __relocate_kernel
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
ldr x8, =__primary_switched
|
||||
|
|
|
@ -301,6 +301,11 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
setup_machine_fdt(__fdt_pointer);
|
||||
|
||||
/*
|
||||
* Initialise the static keys early as they may be enabled by the
|
||||
* cpufeature code and early parameters.
|
||||
*/
|
||||
jump_label_init();
|
||||
parse_early_param();
|
||||
|
||||
/*
|
||||
|
@ -347,6 +352,9 @@ void __init setup_arch(char **cmdline_p)
|
|||
smp_init_cpus();
|
||||
smp_build_mpidr_hash();
|
||||
|
||||
/* Init percpu seeds for random tags after cpus are set up. */
|
||||
kasan_init_tags();
|
||||
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
/*
|
||||
* Make sure init_thread_info.ttbr0 always generates translation
|
||||
|
|
|
@ -418,11 +418,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
||||
/*
|
||||
* Initialise the static keys early as they may be enabled by the
|
||||
* cpufeature code.
|
||||
*/
|
||||
jump_label_init();
|
||||
cpuinfo_store_boot_cpu();
|
||||
}
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/sizes.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/kasan.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/barrier.h>
|
||||
|
@ -947,6 +948,58 @@ static struct break_hook bug_break_hook = {
|
|||
.fn = bug_handler,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
|
||||
#define KASAN_ESR_RECOVER 0x20
|
||||
#define KASAN_ESR_WRITE 0x10
|
||||
#define KASAN_ESR_SIZE_MASK 0x0f
|
||||
#define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
|
||||
|
||||
static int kasan_handler(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
bool recover = esr & KASAN_ESR_RECOVER;
|
||||
bool write = esr & KASAN_ESR_WRITE;
|
||||
size_t size = KASAN_ESR_SIZE(esr);
|
||||
u64 addr = regs->regs[0];
|
||||
u64 pc = regs->pc;
|
||||
|
||||
if (user_mode(regs))
|
||||
return DBG_HOOK_ERROR;
|
||||
|
||||
kasan_report(addr, size, write, pc);
|
||||
|
||||
/*
|
||||
* The instrumentation allows to control whether we can proceed after
|
||||
* a crash was detected. This is done by passing the -recover flag to
|
||||
* the compiler. Disabling recovery allows to generate more compact
|
||||
* code.
|
||||
*
|
||||
* Unfortunately disabling recovery doesn't work for the kernel right
|
||||
* now. KASAN reporting is disabled in some contexts (for example when
|
||||
* the allocator accesses slab object metadata; this is controlled by
|
||||
* current->kasan_depth). All these accesses are detected by the tool,
|
||||
* even though the reports for them are not printed.
|
||||
*
|
||||
* This is something that might be fixed at some point in the future.
|
||||
*/
|
||||
if (!recover)
|
||||
die("Oops - KASAN", regs, 0);
|
||||
|
||||
/* If thread survives, skip over the brk instruction and continue: */
|
||||
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
|
||||
#define KASAN_ESR_VAL (0xf2000000 | KASAN_BRK_IMM)
|
||||
#define KASAN_ESR_MASK 0xffffff00
|
||||
|
||||
static struct break_hook kasan_break_hook = {
|
||||
.esr_val = KASAN_ESR_VAL,
|
||||
.esr_mask = KASAN_ESR_MASK,
|
||||
.fn = kasan_handler,
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initial handler for AArch64 BRK exceptions
|
||||
* This handler only used until debug_traps_init().
|
||||
|
@ -954,6 +1007,10 @@ static struct break_hook bug_break_hook = {
|
|||
int __init early_brk64(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
if ((esr & KASAN_ESR_MASK) == KASAN_ESR_VAL)
|
||||
return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
|
||||
#endif
|
||||
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -961,4 +1018,7 @@ int __init early_brk64(unsigned long addr, unsigned int esr,
|
|||
void __init trap_init(void)
|
||||
{
|
||||
register_break_hook(&bug_break_hook);
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
register_break_hook(&kasan_break_hook);
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -188,6 +188,15 @@ SECTIONS
|
|||
__rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
|
||||
__rela_size = SIZEOF(.rela.dyn);
|
||||
|
||||
#ifdef CONFIG_RELR
|
||||
.relr.dyn : ALIGN(8) {
|
||||
*(.relr.dyn)
|
||||
}
|
||||
|
||||
__relr_offset = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR);
|
||||
__relr_size = SIZEOF(.relr.dyn);
|
||||
#endif
|
||||
|
||||
. = ALIGN(SEGMENT_ALIGN);
|
||||
__initdata_end = .;
|
||||
__init_end = .;
|
||||
|
|
|
@ -23,7 +23,6 @@ config KVM
|
|||
depends on OF
|
||||
select MMU_NOTIFIER
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
|
||||
select KVM_MMIO
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <asm/exception.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/esr.h>
|
||||
#include <asm/kasan.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
@ -126,6 +127,18 @@ static void mem_abort_decode(unsigned int esr)
|
|||
data_abort_decode(esr);
|
||||
}
|
||||
|
||||
static inline bool is_ttbr0_addr(unsigned long addr)
|
||||
{
|
||||
/* entry assembly clears tags for TTBR0 addrs */
|
||||
return addr < TASK_SIZE;
|
||||
}
|
||||
|
||||
static inline bool is_ttbr1_addr(unsigned long addr)
|
||||
{
|
||||
/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
|
||||
return arch_kasan_reset_tag(addr) >= VA_START;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dump out the page tables associated with 'addr' in the currently active mm.
|
||||
*/
|
||||
|
@ -135,7 +148,7 @@ void show_pte(unsigned long addr)
|
|||
pgd_t *pgdp;
|
||||
pgd_t pgd;
|
||||
|
||||
if (addr < TASK_SIZE) {
|
||||
if (is_ttbr0_addr(addr)) {
|
||||
/* TTBR0 */
|
||||
mm = current->active_mm;
|
||||
if (mm == &init_mm) {
|
||||
|
@ -143,7 +156,7 @@ void show_pte(unsigned long addr)
|
|||
addr);
|
||||
return;
|
||||
}
|
||||
} else if (addr >= VA_START) {
|
||||
} else if (is_ttbr1_addr(addr)) {
|
||||
/* TTBR1 */
|
||||
mm = &init_mm;
|
||||
} else {
|
||||
|
@ -249,7 +262,7 @@ static inline bool is_el1_permission_fault(unsigned int esr,
|
|||
if (fsc_type == ESR_ELx_FSC_PERM)
|
||||
return true;
|
||||
|
||||
if (addr < TASK_SIZE && system_uses_ttbr0_pan())
|
||||
if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
|
||||
return fsc_type == ESR_ELx_FSC_FAULT &&
|
||||
(regs->pstate & PSR_PAN_BIT);
|
||||
|
||||
|
@ -314,7 +327,7 @@ static void __do_user_fault(struct siginfo *info, unsigned int esr)
|
|||
* type", so we ignore this wrinkle and just return the translation
|
||||
* fault.)
|
||||
*/
|
||||
if (current->thread.fault_address >= TASK_SIZE) {
|
||||
if (!is_ttbr0_addr(current->thread.fault_address)) {
|
||||
switch (ESR_ELx_EC(esr)) {
|
||||
case ESR_ELx_EC_DABT_LOW:
|
||||
/*
|
||||
|
@ -454,7 +467,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
|||
mm_flags |= FAULT_FLAG_WRITE;
|
||||
}
|
||||
|
||||
if (addr < TASK_SIZE && is_el1_permission_fault(esr, regs, addr)) {
|
||||
if (is_ttbr0_addr(addr) && is_el1_permission_fault(esr, regs, addr)) {
|
||||
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
|
||||
if (regs->orig_addr_limit == KERNEL_DS)
|
||||
die_kernel_fault("access to user memory with fs=KERNEL_DS",
|
||||
|
@ -641,7 +654,7 @@ static int __kprobes do_translation_fault(unsigned long addr,
|
|||
unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (addr < TASK_SIZE)
|
||||
if (is_ttbr0_addr(addr))
|
||||
return do_page_fault(addr, esr, regs);
|
||||
|
||||
do_bad_area(addr, esr, regs);
|
||||
|
@ -805,7 +818,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
|
|||
* re-enabled IRQs. If the address is a kernel address, apply
|
||||
* BP hardening prior to enabling IRQs and pre-emption.
|
||||
*/
|
||||
if (addr > TASK_SIZE)
|
||||
if (!is_ttbr0_addr(addr))
|
||||
arm64_apply_bp_hardening();
|
||||
|
||||
local_irq_enable();
|
||||
|
@ -820,7 +833,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
|
|||
struct siginfo info;
|
||||
|
||||
if (user_mode(regs)) {
|
||||
if (instruction_pointer(regs) > TASK_SIZE)
|
||||
if (!is_ttbr0_addr(instruction_pointer(regs)))
|
||||
arm64_apply_bp_hardening();
|
||||
local_irq_enable();
|
||||
}
|
||||
|
@ -912,7 +925,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
|
|||
if (interrupts_enabled(regs))
|
||||
trace_hardirqs_off();
|
||||
|
||||
if (user_mode(regs) && pc > TASK_SIZE)
|
||||
if (user_mode(regs) && !is_ttbr0_addr(pc))
|
||||
arm64_apply_bp_hardening();
|
||||
|
||||
if (!inf->fn(addr_if_watchpoint, esr, regs)) {
|
||||
|
|
|
@ -40,7 +40,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
|
|||
{
|
||||
void *p = memblock_virt_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
|
||||
__pa(MAX_DMA_ADDRESS),
|
||||
MEMBLOCK_ALLOC_ACCESSIBLE, node);
|
||||
MEMBLOCK_ALLOC_KASAN, node);
|
||||
return __pa(p);
|
||||
}
|
||||
|
||||
|
@ -48,8 +48,9 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
|
|||
bool early)
|
||||
{
|
||||
if (pmd_none(READ_ONCE(*pmdp))) {
|
||||
phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
phys_addr_t pte_phys = early ?
|
||||
__pa_symbol(kasan_early_shadow_pte)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
|
||||
}
|
||||
|
||||
|
@ -61,8 +62,9 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
|
|||
bool early)
|
||||
{
|
||||
if (pud_none(READ_ONCE(*pudp))) {
|
||||
phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
phys_addr_t pmd_phys = early ?
|
||||
__pa_symbol(kasan_early_shadow_pmd)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
__pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
|
||||
}
|
||||
|
||||
|
@ -73,8 +75,9 @@ static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
|
|||
bool early)
|
||||
{
|
||||
if (pgd_none(READ_ONCE(*pgdp))) {
|
||||
phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
phys_addr_t pud_phys = early ?
|
||||
__pa_symbol(kasan_early_shadow_pud)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
__pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
|
||||
}
|
||||
|
||||
|
@ -88,8 +91,11 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
|
|||
pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
|
||||
|
||||
do {
|
||||
phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
phys_addr_t page_phys = early ?
|
||||
__pa_symbol(kasan_early_shadow_page)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
if (!early)
|
||||
memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
|
||||
next = addr + PAGE_SIZE;
|
||||
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
|
||||
} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
|
||||
|
@ -192,7 +198,7 @@ void __init kasan_init(void)
|
|||
|
||||
/*
|
||||
* We are going to perform proper setup of shadow memory.
|
||||
* At first we should unmap early shadow (clear_pgds() call bellow).
|
||||
* At first we should unmap early shadow (clear_pgds() call below).
|
||||
* However, instrumented code couldn't execute without shadow memory.
|
||||
* tmp_pg_dir used to keep early shadow mapped until full shadow
|
||||
* setup will be finished.
|
||||
|
@ -206,14 +212,14 @@ void __init kasan_init(void)
|
|||
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
|
||||
early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
|
||||
|
||||
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
|
||||
(void *)mod_shadow_start);
|
||||
kasan_populate_zero_shadow((void *)kimg_shadow_end,
|
||||
kasan_mem_to_shadow((void *)PAGE_OFFSET));
|
||||
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
|
||||
(void *)mod_shadow_start);
|
||||
kasan_populate_early_shadow((void *)kimg_shadow_end,
|
||||
kasan_mem_to_shadow((void *)PAGE_OFFSET));
|
||||
|
||||
if (kimg_shadow_start > mod_shadow_end)
|
||||
kasan_populate_zero_shadow((void *)mod_shadow_end,
|
||||
(void *)kimg_shadow_start);
|
||||
kasan_populate_early_shadow((void *)mod_shadow_end,
|
||||
(void *)kimg_shadow_start);
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
void *start = (void *)__phys_to_virt(reg->base);
|
||||
|
@ -228,14 +234,15 @@ void __init kasan_init(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* KAsan may reuse the contents of kasan_zero_pte directly, so we
|
||||
* should make sure that it maps the zero page read-only.
|
||||
* KAsan may reuse the contents of kasan_early_shadow_pte directly,
|
||||
* so we should make sure that it maps the zero page read-only.
|
||||
*/
|
||||
for (i = 0; i < PTRS_PER_PTE; i++)
|
||||
set_pte(&kasan_zero_pte[i],
|
||||
pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
|
||||
set_pte(&kasan_early_shadow_pte[i],
|
||||
pfn_pte(sym_to_pfn(kasan_early_shadow_page),
|
||||
PAGE_KERNEL_RO));
|
||||
|
||||
memset(kasan_zero_page, 0, PAGE_SIZE);
|
||||
memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
|
||||
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
|
||||
|
||||
/* At this point kasan is fully initialized. Enable error messages */
|
||||
|
|
|
@ -47,6 +47,12 @@
|
|||
/* PTWs cacheable, inner/outer WBWA */
|
||||
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
|
||||
|
||||
#ifdef CONFIG_KASAN_SW_TAGS
|
||||
#define TCR_KASAN_FLAGS TCR_TBI1
|
||||
#else
|
||||
#define TCR_KASAN_FLAGS 0
|
||||
#endif
|
||||
|
||||
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
|
||||
|
||||
/*
|
||||
|
@ -497,7 +503,7 @@ ENTRY(__cpu_setup)
|
|||
*/
|
||||
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
|
||||
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
|
||||
TCR_TBI0 | TCR_A1
|
||||
TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
|
||||
tcr_set_idmap_t0sz x10, x9
|
||||
|
||||
/*
|
||||
|
|
|
@ -20,7 +20,6 @@ config KVM
|
|||
depends on HAVE_KVM
|
||||
select EXPORT_UASM
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
select HAVE_KVM_VCPU_ASYNC_IOCTL
|
||||
select KVM_MMIO
|
||||
|
|
|
@ -20,7 +20,6 @@ if VIRTUALIZATION
|
|||
config KVM
|
||||
bool
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select HAVE_KVM_EVENTFD
|
||||
select HAVE_KVM_VCPU_ASYNC_IOCTL
|
||||
select SRCU
|
||||
|
|
|
@ -391,3 +391,5 @@
|
|||
381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load
|
||||
382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
|
||||
383 common rseq sys_rseq compat_sys_rseq
|
||||
424 common pidfd_send_signal sys_pidfd_send_signal sys_pidfd_send_signal
|
||||
434 common pidfd_open sys_pidfd_open sys_pidfd_open
|
||||
|
|
|
@ -21,7 +21,6 @@ config KVM
|
|||
prompt "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on HAVE_KVM
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
select HAVE_KVM_VCPU_ASYNC_IOCTL
|
||||
select HAVE_KVM_EVENTFD
|
||||
|
|
|
@ -46,7 +46,6 @@ config X86
|
|||
#
|
||||
select ACPI_LEGACY_TABLES_LOOKUP if ACPI
|
||||
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
|
||||
select ANON_INODES
|
||||
select ARCH_CLOCKSOURCE_DATA
|
||||
select ARCH_DISCARD_MEMBLOCK
|
||||
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#include "../string.c"
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
static void *__memcpy(void *dest, const void *src, size_t n)
|
||||
static void *____memcpy(void *dest, const void *src, size_t n)
|
||||
{
|
||||
int d0, d1, d2;
|
||||
asm volatile(
|
||||
|
@ -25,7 +25,7 @@ static void *__memcpy(void *dest, const void *src, size_t n)
|
|||
return dest;
|
||||
}
|
||||
#else
|
||||
static void *__memcpy(void *dest, const void *src, size_t n)
|
||||
static void *____memcpy(void *dest, const void *src, size_t n)
|
||||
{
|
||||
long d0, d1, d2;
|
||||
asm volatile(
|
||||
|
@ -56,7 +56,7 @@ void *memmove(void *dest, const void *src, size_t n)
|
|||
const unsigned char *s = src;
|
||||
|
||||
if (d <= s || d - s >= n)
|
||||
return __memcpy(dest, src, n);
|
||||
return ____memcpy(dest, src, n);
|
||||
|
||||
while (n-- > 0)
|
||||
d[n] = s[n];
|
||||
|
@ -71,5 +71,11 @@ void *memcpy(void *dest, const void *src, size_t n)
|
|||
warn("Avoiding potentially unsafe overlapping memcpy()!");
|
||||
return memmove(dest, src, n);
|
||||
}
|
||||
return __memcpy(dest, src, n);
|
||||
return ____memcpy(dest, src, n);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
extern void *__memset(void *s, int c, size_t n) __alias(memset);
|
||||
extern void *__memmove(void *dest, const void *src, size_t n) __alias(memmove);
|
||||
extern void *__memcpy(void *dest, const void *src, size_t n) __alias(memcpy);
|
||||
#endif
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# CONFIG_USELIB is not set
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
|
@ -15,6 +16,7 @@ CONFIG_CGROUP_FREEZER=y
|
|||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_BPF=y
|
||||
CONFIG_SCHED_AUTOGROUP=y
|
||||
CONFIG_SCHED_TUNE=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_RD_BZIP2 is not set
|
||||
# CONFIG_RD_LZMA is not set
|
||||
|
@ -31,10 +33,12 @@ CONFIG_EMBEDDED=y
|
|||
# CONFIG_COMPAT_BRK is not set
|
||||
# CONFIG_SLAB_MERGE_DEFAULT is not set
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_NR_CPUS=32
|
||||
CONFIG_EFI=y
|
||||
CONFIG_PM_WAKELOCKS=y
|
||||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_CPU_FREQ_TIMES=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
|
||||
|
@ -43,8 +47,8 @@ CONFIG_KPROBES=y
|
|||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_GKI_HACKS_TO_FIX=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
# CONFIG_SPARSEMEM_VMEMMAP is not set
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_ZSMALLOC=y
|
||||
CONFIG_NET=y
|
||||
|
@ -141,6 +145,7 @@ CONFIG_IP6_NF_FILTER=y
|
|||
CONFIG_IP6_NF_TARGET_REJECT=y
|
||||
CONFIG_IP6_NF_MANGLE=y
|
||||
CONFIG_IP6_NF_RAW=y
|
||||
CONFIG_TIPC=y
|
||||
CONFIG_L2TP=y
|
||||
CONFIG_NET_SCHED=y
|
||||
CONFIG_NET_SCH_HTB=y
|
||||
|
@ -150,8 +155,12 @@ CONFIG_NET_CLS_BPF=y
|
|||
CONFIG_NET_EMATCH=y
|
||||
CONFIG_NET_EMATCH_U32=y
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_VSOCKETS=y
|
||||
CONFIG_VIRTIO_VSOCKETS=y
|
||||
CONFIG_VSOCKETS=m
|
||||
CONFIG_VIRTIO_VSOCKETS=m
|
||||
CONFIG_CAN=m
|
||||
# CONFIG_CAN_BCM is not set
|
||||
# CONFIG_CAN_GW is not set
|
||||
CONFIG_CAN_VCAN=m
|
||||
CONFIG_CFG80211=y
|
||||
# CONFIG_CFG80211_DEFAULT_PS is not set
|
||||
# CONFIG_CFG80211_CRDA_SUPPORT is not set
|
||||
|
@ -161,17 +170,17 @@ CONFIG_RFKILL=y
|
|||
# CONFIG_UEVENT_HELPER is not set
|
||||
# CONFIG_ALLOW_DEV_COREDUMP is not set
|
||||
CONFIG_DEBUG_DEVRES=y
|
||||
CONFIG_OF=y
|
||||
CONFIG_ZRAM=y
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=8192
|
||||
CONFIG_VIRTIO_BLK=y
|
||||
CONFIG_VIRTIO_BLK=m
|
||||
CONFIG_UID_SYS_STATS=y
|
||||
CONFIG_SCSI=y
|
||||
# CONFIG_SCSI_MQ_DEFAULT is not set
|
||||
# CONFIG_SCSI_PROC_FS is not set
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_SCSI_VIRTIO=y
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_DM_CRYPT=y
|
||||
|
@ -182,7 +191,7 @@ CONFIG_DM_VERITY_FEC=y
|
|||
CONFIG_DM_BOW=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_TUN=y
|
||||
CONFIG_VIRTIO_NET=y
|
||||
CONFIG_VIRTIO_NET=m
|
||||
# CONFIG_ETHERNET is not set
|
||||
CONFIG_PHYLIB=y
|
||||
CONFIG_PPP=y
|
||||
|
@ -216,7 +225,7 @@ CONFIG_USB_USBNET=y
|
|||
# CONFIG_WLAN_VENDOR_TI is not set
|
||||
# CONFIG_WLAN_VENDOR_ZYDAS is not set
|
||||
# CONFIG_WLAN_VENDOR_QUANTENNA is not set
|
||||
CONFIG_VIRT_WIFI=y
|
||||
CONFIG_VIRT_WIFI=m
|
||||
CONFIG_INPUT_EVDEV=y
|
||||
# CONFIG_INPUT_KEYBOARD is not set
|
||||
# CONFIG_INPUT_MOUSE is not set
|
||||
|
@ -230,26 +239,26 @@ CONFIG_SERIAL_8250=y
|
|||
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
|
||||
CONFIG_SERIAL_8250_CONSOLE=y
|
||||
# CONFIG_SERIAL_8250_EXAR is not set
|
||||
CONFIG_SERIAL_8250_NR_UARTS=48
|
||||
CONFIG_SERIAL_8250_EXTENDED=y
|
||||
CONFIG_SERIAL_8250_MANY_PORTS=y
|
||||
CONFIG_SERIAL_8250_SHARE_IRQ=y
|
||||
CONFIG_VIRTIO_CONSOLE=y
|
||||
CONFIG_SERIAL_OF_PLATFORM=m
|
||||
CONFIG_SERIAL_DEV_BUS=y
|
||||
CONFIG_HW_RANDOM=y
|
||||
CONFIG_HW_RANDOM_VIRTIO=y
|
||||
CONFIG_HW_RANDOM_VIRTIO=m
|
||||
# CONFIG_DEVPORT is not set
|
||||
# CONFIG_I2C_COMPAT is not set
|
||||
# CONFIG_I2C_HELPER_AUTO is not set
|
||||
CONFIG_SPI=y
|
||||
CONFIG_GPIOLIB=y
|
||||
# CONFIG_HWMON is not set
|
||||
CONFIG_DEVFREQ_THERMAL=y
|
||||
# CONFIG_X86_PKG_TEMP_THERMAL is not set
|
||||
CONFIG_MEDIA_SUPPORT=y
|
||||
CONFIG_MEDIA_CAMERA_SUPPORT=y
|
||||
# CONFIG_VGA_ARB is not set
|
||||
CONFIG_DRM=y
|
||||
# CONFIG_DRM_FBDEV_EMULATION is not set
|
||||
CONFIG_DRM_VIRTIO_GPU=y
|
||||
CONFIG_DRM_VIRTIO_GPU=m
|
||||
CONFIG_BACKLIGHT_LCD_SUPPORT=y
|
||||
# CONFIG_LCD_CLASS_DEVICE is not set
|
||||
CONFIG_BACKLIGHT_CLASS_DEVICE=y
|
||||
CONFIG_SOUND=y
|
||||
CONFIG_SND=y
|
||||
|
@ -258,7 +267,7 @@ CONFIG_SND_DYNAMIC_MINORS=y
|
|||
# CONFIG_SND_SUPPORT_OLD_API is not set
|
||||
# CONFIG_SND_VERBOSE_PROCFS is not set
|
||||
# CONFIG_SND_DRIVERS is not set
|
||||
CONFIG_SND_INTEL8X0=y
|
||||
CONFIG_SND_INTEL8X0=m
|
||||
# CONFIG_SND_USB is not set
|
||||
CONFIG_HIDRAW=y
|
||||
CONFIG_UHID=y
|
||||
|
@ -277,23 +286,24 @@ CONFIG_USB_CONFIGFS_F_FS=y
|
|||
CONFIG_USB_CONFIGFS_F_ACC=y
|
||||
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
|
||||
CONFIG_USB_CONFIGFS_F_MIDI=y
|
||||
CONFIG_MMC=y
|
||||
# CONFIG_MMC_BLOCK is not set
|
||||
CONFIG_MMC=m
|
||||
CONFIG_NEW_LEDS=y
|
||||
CONFIG_LEDS_CLASS=y
|
||||
CONFIG_LEDS_TRIGGERS=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
# CONFIG_RTC_SYSTOHC is not set
|
||||
CONFIG_VIRTIO_PCI=y
|
||||
CONFIG_RTC_DRV_TEST=m
|
||||
CONFIG_UIO=y
|
||||
CONFIG_VIRTIO_PCI=m
|
||||
# CONFIG_VIRTIO_PCI_LEGACY is not set
|
||||
CONFIG_VIRTIO_BALLOON=y
|
||||
CONFIG_VIRTIO_INPUT=y
|
||||
CONFIG_VIRTIO_MMIO=y
|
||||
CONFIG_VIRTIO_BALLOON=m
|
||||
CONFIG_VIRTIO_INPUT=m
|
||||
CONFIG_VIRTIO_MMIO=m
|
||||
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
|
||||
CONFIG_STAGING=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_ION=y
|
||||
CONFIG_MAILBOX=y
|
||||
CONFIG_ION_SYSTEM_HEAP=y
|
||||
CONFIG_PM_DEVFREQ=y
|
||||
CONFIG_ANDROID=y
|
||||
CONFIG_ANDROID_BINDER_IPC=y
|
||||
|
@ -307,10 +317,12 @@ CONFIG_F2FS_FS_ENCRYPTION=y
|
|||
CONFIG_QUOTA=y
|
||||
CONFIG_QFMT_V2=y
|
||||
CONFIG_FUSE_FS=y
|
||||
CONFIG_OVERLAY_FS=y
|
||||
CONFIG_MSDOS_FS=y
|
||||
CONFIG_VFAT_FS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
# CONFIG_EFIVAR_FS is not set
|
||||
CONFIG_SDCARD_FS=y
|
||||
CONFIG_PSTORE=y
|
||||
CONFIG_PSTORE_CONSOLE=y
|
||||
|
@ -325,7 +337,6 @@ CONFIG_CRYPTO_SHA512=y
|
|||
CONFIG_CRYPTO_LZ4=y
|
||||
CONFIG_CRYPTO_ZSTD=y
|
||||
CONFIG_CRYPTO_ANSI_CPRNG=y
|
||||
CONFIG_CRYPTO_DEV_VIRTIO=y
|
||||
CONFIG_CRC8=y
|
||||
CONFIG_XZ_DEC=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
|
|
|
@ -1,485 +0,0 @@
|
|||
CONFIG_POSIX_MQUEUE=y
|
||||
# CONFIG_USELIB is not set
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_TASKSTATS=y
|
||||
CONFIG_TASK_DELAY_ACCT=y
|
||||
CONFIG_TASK_XACCT=y
|
||||
CONFIG_TASK_IO_ACCOUNTING=y
|
||||
CONFIG_PSI=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_MEMCG=y
|
||||
CONFIG_MEMCG_SWAP=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
CONFIG_RT_GROUP_SCHED=y
|
||||
CONFIG_CGROUP_FREEZER=y
|
||||
CONFIG_CPUSETS=y
|
||||
# CONFIG_PROC_PID_CPUSET is not set
|
||||
CONFIG_CGROUP_CPUACCT=y
|
||||
CONFIG_CGROUP_BPF=y
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_SCHED_TUNE=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_RD_LZ4 is not set
|
||||
# CONFIG_FHANDLE is not set
|
||||
# CONFIG_PCSPKR_PLATFORM is not set
|
||||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_BPF_JIT_ALWAYS_ON=y
|
||||
CONFIG_EMBEDDED=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_PROFILING=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_HYPERVISOR_GUEST=y
|
||||
CONFIG_PARAVIRT=y
|
||||
CONFIG_PARAVIRT_SPINLOCKS=y
|
||||
CONFIG_MCORE2=y
|
||||
CONFIG_PROCESSOR_SELECT=y
|
||||
# CONFIG_CPU_SUP_CENTAUR is not set
|
||||
CONFIG_NR_CPUS=8
|
||||
# CONFIG_MICROCODE is not set
|
||||
CONFIG_X86_MSR=y
|
||||
CONFIG_X86_CPUID=y
|
||||
# CONFIG_MTRR is not set
|
||||
CONFIG_HZ_100=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_CRASH_DUMP=y
|
||||
CONFIG_PHYSICAL_START=0x200000
|
||||
CONFIG_PHYSICAL_ALIGN=0x1000000
|
||||
CONFIG_CMDLINE_BOOL=y
|
||||
CONFIG_CMDLINE="console=ttyS0 reboot=p"
|
||||
CONFIG_PM_WAKELOCKS=y
|
||||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
CONFIG_PM_DEBUG=y
|
||||
CONFIG_ACPI_PROCFS_POWER=y
|
||||
# CONFIG_ACPI_FAN is not set
|
||||
# CONFIG_ACPI_THERMAL is not set
|
||||
# CONFIG_X86_PM_TIMER is not set
|
||||
CONFIG_CPU_FREQ_TIMES=y
|
||||
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
|
||||
CONFIG_X86_ACPI_CPUFREQ=y
|
||||
CONFIG_PCI_MSI=y
|
||||
CONFIG_IA32_EMULATION=y
|
||||
# CONFIG_FIRMWARE_MEMMAP is not set
|
||||
CONFIG_OPROFILE=y
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_LTO_CLANG=y
|
||||
CONFIG_CFI_CLANG=y
|
||||
CONFIG_REFCOUNT_FULL=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_PARTITION_ADVANCED=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
CONFIG_BINFMT_MISC=y
|
||||
CONFIG_KSM=y
|
||||
CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
|
||||
CONFIG_ZSMALLOC=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_UNIX=y
|
||||
CONFIG_XFRM_USER=y
|
||||
CONFIG_XFRM_INTERFACE=y
|
||||
CONFIG_XFRM_STATISTICS=y
|
||||
CONFIG_NET_KEY=y
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
CONFIG_IP_ADVANCED_ROUTER=y
|
||||
CONFIG_IP_MULTIPLE_TABLES=y
|
||||
CONFIG_IP_ROUTE_MULTIPATH=y
|
||||
CONFIG_IP_ROUTE_VERBOSE=y
|
||||
CONFIG_NET_IPGRE_DEMUX=y
|
||||
CONFIG_IP_MROUTE=y
|
||||
CONFIG_IP_PIMSM_V1=y
|
||||
CONFIG_IP_PIMSM_V2=y
|
||||
CONFIG_SYN_COOKIES=y
|
||||
CONFIG_NET_IPVTI=y
|
||||
CONFIG_INET_ESP=y
|
||||
# CONFIG_INET_XFRM_MODE_BEET is not set
|
||||
CONFIG_INET_UDP_DIAG=y
|
||||
CONFIG_INET_DIAG_DESTROY=y
|
||||
CONFIG_TCP_CONG_ADVANCED=y
|
||||
# CONFIG_TCP_CONG_BIC is not set
|
||||
# CONFIG_TCP_CONG_WESTWOOD is not set
|
||||
# CONFIG_TCP_CONG_HTCP is not set
|
||||
CONFIG_TCP_MD5SIG=y
|
||||
CONFIG_IPV6_ROUTER_PREF=y
|
||||
CONFIG_IPV6_ROUTE_INFO=y
|
||||
CONFIG_IPV6_OPTIMISTIC_DAD=y
|
||||
CONFIG_INET6_AH=y
|
||||
CONFIG_INET6_ESP=y
|
||||
CONFIG_INET6_IPCOMP=y
|
||||
CONFIG_IPV6_MIP6=y
|
||||
CONFIG_IPV6_VTI=y
|
||||
CONFIG_IPV6_MULTIPLE_TABLES=y
|
||||
CONFIG_NETLABEL=y
|
||||
CONFIG_NETFILTER=y
|
||||
CONFIG_NF_CONNTRACK=y
|
||||
CONFIG_NF_CONNTRACK_SECMARK=y
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
CONFIG_NF_CONNTRACK_AMANDA=y
|
||||
CONFIG_NF_CONNTRACK_FTP=y
|
||||
CONFIG_NF_CONNTRACK_H323=y
|
||||
CONFIG_NF_CONNTRACK_IRC=y
|
||||
CONFIG_NF_CONNTRACK_NETBIOS_NS=y
|
||||
CONFIG_NF_CONNTRACK_PPTP=y
|
||||
CONFIG_NF_CONNTRACK_SANE=y
|
||||
CONFIG_NF_CONNTRACK_TFTP=y
|
||||
CONFIG_NF_CT_NETLINK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_CT=y
|
||||
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
|
||||
CONFIG_NETFILTER_XT_TARGET_MARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
|
||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TPROXY=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TRACE=y
|
||||
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
|
||||
CONFIG_NETFILTER_XT_MATCH_BPF=y
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_HELPER=y
|
||||
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
|
||||
# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
|
||||
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
|
||||
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MAC=y
|
||||
CONFIG_NETFILTER_XT_MATCH_MARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_OWNER=y
|
||||
CONFIG_NETFILTER_XT_MATCH_POLICY=y
|
||||
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
|
||||
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
|
||||
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STATE=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
|
||||
CONFIG_NETFILTER_XT_MATCH_STRING=y
|
||||
CONFIG_NETFILTER_XT_MATCH_TIME=y
|
||||
CONFIG_NETFILTER_XT_MATCH_U32=y
|
||||
CONFIG_IP_NF_IPTABLES=y
|
||||
CONFIG_IP_NF_MATCH_AH=y
|
||||
CONFIG_IP_NF_MATCH_ECN=y
|
||||
CONFIG_IP_NF_MATCH_TTL=y
|
||||
CONFIG_IP_NF_FILTER=y
|
||||
CONFIG_IP_NF_TARGET_REJECT=y
|
||||
CONFIG_IP_NF_NAT=y
|
||||
CONFIG_IP_NF_TARGET_MASQUERADE=y
|
||||
CONFIG_IP_NF_TARGET_NETMAP=y
|
||||
CONFIG_IP_NF_TARGET_REDIRECT=y
|
||||
CONFIG_IP_NF_MANGLE=y
|
||||
CONFIG_IP_NF_RAW=y
|
||||
CONFIG_IP_NF_SECURITY=y
|
||||
CONFIG_IP_NF_ARPTABLES=y
|
||||
CONFIG_IP_NF_ARPFILTER=y
|
||||
CONFIG_IP_NF_ARP_MANGLE=y
|
||||
CONFIG_IP6_NF_IPTABLES=y
|
||||
CONFIG_IP6_NF_MATCH_IPV6HEADER=y
|
||||
CONFIG_IP6_NF_MATCH_RPFILTER=y
|
||||
CONFIG_IP6_NF_FILTER=y
|
||||
CONFIG_IP6_NF_TARGET_REJECT=y
|
||||
CONFIG_IP6_NF_MANGLE=y
|
||||
CONFIG_IP6_NF_RAW=y
|
||||
CONFIG_L2TP=y
|
||||
CONFIG_NET_SCHED=y
|
||||
CONFIG_NET_SCH_HTB=y
|
||||
CONFIG_NET_SCH_NETEM=y
|
||||
CONFIG_NET_SCH_INGRESS=y
|
||||
CONFIG_NET_CLS_U32=y
|
||||
CONFIG_NET_CLS_BPF=y
|
||||
CONFIG_NET_EMATCH=y
|
||||
CONFIG_NET_EMATCH_U32=y
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_VSOCKETS=y
|
||||
CONFIG_VIRTIO_VSOCKETS=y
|
||||
CONFIG_BPF_JIT=y
|
||||
CONFIG_CFG80211=y
|
||||
CONFIG_MAC80211=y
|
||||
CONFIG_RFKILL=y
|
||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
CONFIG_DEBUG_DEVRES=y
|
||||
CONFIG_OF=y
|
||||
CONFIG_OF_UNITTEST=y
|
||||
# CONFIG_PNP_DEBUG_MESSAGES is not set
|
||||
CONFIG_ZRAM=y
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=8192
|
||||
CONFIG_VIRTIO_BLK=y
|
||||
CONFIG_UID_SYS_STATS=y
|
||||
CONFIG_SCSI=y
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_BLK_DEV_SR=y
|
||||
CONFIG_BLK_DEV_SR_VENDOR=y
|
||||
CONFIG_CHR_DEV_SG=y
|
||||
CONFIG_SCSI_CONSTANTS=y
|
||||
CONFIG_SCSI_SPI_ATTRS=y
|
||||
CONFIG_SCSI_VIRTIO=y
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_DM_CRYPT=y
|
||||
CONFIG_DM_MIRROR=y
|
||||
CONFIG_DM_ZERO=y
|
||||
CONFIG_DM_UEVENT=y
|
||||
CONFIG_DM_VERITY=y
|
||||
CONFIG_DM_VERITY_AVB=y
|
||||
CONFIG_DM_VERITY_FEC=y
|
||||
CONFIG_DM_BOW=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_NETCONSOLE=y
|
||||
CONFIG_NETCONSOLE_DYNAMIC=y
|
||||
CONFIG_TUN=y
|
||||
CONFIG_VIRTIO_NET=y
|
||||
# CONFIG_ETHERNET is not set
|
||||
CONFIG_PPP=y
|
||||
CONFIG_PPP_BSDCOMP=y
|
||||
CONFIG_PPP_DEFLATE=y
|
||||
CONFIG_PPP_MPPE=y
|
||||
CONFIG_PPTP=y
|
||||
CONFIG_PPPOL2TP=y
|
||||
CONFIG_USB_RTL8152=y
|
||||
CONFIG_USB_USBNET=y
|
||||
# CONFIG_USB_NET_AX8817X is not set
|
||||
# CONFIG_USB_NET_AX88179_178A is not set
|
||||
# CONFIG_USB_NET_CDCETHER is not set
|
||||
# CONFIG_USB_NET_CDC_NCM is not set
|
||||
# CONFIG_USB_NET_NET1080 is not set
|
||||
# CONFIG_USB_NET_CDC_SUBSET is not set
|
||||
# CONFIG_USB_NET_ZAURUS is not set
|
||||
# CONFIG_WLAN_VENDOR_ADMTEK is not set
|
||||
# CONFIG_WLAN_VENDOR_ATH is not set
|
||||
# CONFIG_WLAN_VENDOR_ATMEL is not set
|
||||
# CONFIG_WLAN_VENDOR_BROADCOM is not set
|
||||
# CONFIG_WLAN_VENDOR_CISCO is not set
|
||||
# CONFIG_WLAN_VENDOR_INTEL is not set
|
||||
# CONFIG_WLAN_VENDOR_INTERSIL is not set
|
||||
# CONFIG_WLAN_VENDOR_MARVELL is not set
|
||||
# CONFIG_WLAN_VENDOR_MEDIATEK is not set
|
||||
# CONFIG_WLAN_VENDOR_RALINK is not set
|
||||
# CONFIG_WLAN_VENDOR_REALTEK is not set
|
||||
# CONFIG_WLAN_VENDOR_RSI is not set
|
||||
# CONFIG_WLAN_VENDOR_ST is not set
|
||||
# CONFIG_WLAN_VENDOR_TI is not set
|
||||
# CONFIG_WLAN_VENDOR_ZYDAS is not set
|
||||
# CONFIG_WLAN_VENDOR_QUANTENNA is not set
|
||||
CONFIG_MAC80211_HWSIM=y
|
||||
CONFIG_VIRT_WIFI=y
|
||||
CONFIG_INPUT_MOUSEDEV=y
|
||||
CONFIG_INPUT_EVDEV=y
|
||||
# CONFIG_INPUT_KEYBOARD is not set
|
||||
# CONFIG_INPUT_MOUSE is not set
|
||||
CONFIG_INPUT_JOYSTICK=y
|
||||
CONFIG_JOYSTICK_XPAD=y
|
||||
CONFIG_JOYSTICK_XPAD_FF=y
|
||||
CONFIG_JOYSTICK_XPAD_LEDS=y
|
||||
CONFIG_INPUT_TABLET=y
|
||||
CONFIG_TABLET_USB_ACECAD=y
|
||||
CONFIG_TABLET_USB_AIPTEK=y
|
||||
CONFIG_TABLET_USB_GTCO=y
|
||||
CONFIG_TABLET_USB_HANWANG=y
|
||||
CONFIG_TABLET_USB_KBTAB=y
|
||||
CONFIG_INPUT_MISC=y
|
||||
CONFIG_INPUT_UINPUT=y
|
||||
# CONFIG_SERIO_I8042 is not set
|
||||
# CONFIG_VT is not set
|
||||
# CONFIG_LEGACY_PTYS is not set
|
||||
# CONFIG_DEVMEM is not set
|
||||
CONFIG_SERIAL_8250=y
|
||||
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
|
||||
CONFIG_SERIAL_8250_CONSOLE=y
|
||||
# CONFIG_SERIAL_8250_EXAR is not set
|
||||
CONFIG_SERIAL_8250_NR_UARTS=48
|
||||
CONFIG_SERIAL_8250_EXTENDED=y
|
||||
CONFIG_SERIAL_8250_MANY_PORTS=y
|
||||
CONFIG_SERIAL_8250_SHARE_IRQ=y
|
||||
CONFIG_VIRTIO_CONSOLE=y
|
||||
CONFIG_HW_RANDOM=y
|
||||
# CONFIG_HW_RANDOM_INTEL is not set
|
||||
# CONFIG_HW_RANDOM_AMD is not set
|
||||
# CONFIG_HW_RANDOM_VIA is not set
|
||||
CONFIG_HW_RANDOM_VIRTIO=y
|
||||
CONFIG_HPET=y
|
||||
# CONFIG_HPET_MMAP_DEFAULT is not set
|
||||
# CONFIG_DEVPORT is not set
|
||||
# CONFIG_ACPI_I2C_OPREGION is not set
|
||||
# CONFIG_I2C_COMPAT is not set
|
||||
# CONFIG_I2C_HELPER_AUTO is not set
|
||||
CONFIG_PTP_1588_CLOCK=y
|
||||
# CONFIG_HWMON is not set
|
||||
# CONFIG_X86_PKG_TEMP_THERMAL is not set
|
||||
CONFIG_WATCHDOG=y
|
||||
CONFIG_SOFT_WATCHDOG=y
|
||||
CONFIG_MEDIA_SUPPORT=y
|
||||
# CONFIG_VGA_ARB is not set
|
||||
CONFIG_DRM=y
|
||||
# CONFIG_DRM_FBDEV_EMULATION is not set
|
||||
CONFIG_DRM_VIRTIO_GPU=y
|
||||
CONFIG_SOUND=y
|
||||
CONFIG_SND=y
|
||||
CONFIG_SND_HRTIMER=y
|
||||
# CONFIG_SND_SUPPORT_OLD_API is not set
|
||||
# CONFIG_SND_VERBOSE_PROCFS is not set
|
||||
# CONFIG_SND_DRIVERS is not set
|
||||
CONFIG_SND_INTEL8X0=y
|
||||
# CONFIG_SND_USB is not set
|
||||
CONFIG_HIDRAW=y
|
||||
CONFIG_UHID=y
|
||||
CONFIG_HID_A4TECH=y
|
||||
CONFIG_HID_ACRUX=y
|
||||
CONFIG_HID_ACRUX_FF=y
|
||||
CONFIG_HID_APPLE=y
|
||||
CONFIG_HID_BELKIN=y
|
||||
CONFIG_HID_CHERRY=y
|
||||
CONFIG_HID_CHICONY=y
|
||||
CONFIG_HID_PRODIKEYS=y
|
||||
CONFIG_HID_CYPRESS=y
|
||||
CONFIG_HID_DRAGONRISE=y
|
||||
CONFIG_DRAGONRISE_FF=y
|
||||
CONFIG_HID_EMS_FF=y
|
||||
CONFIG_HID_ELECOM=y
|
||||
CONFIG_HID_EZKEY=y
|
||||
CONFIG_HID_HOLTEK=y
|
||||
CONFIG_HID_KEYTOUCH=y
|
||||
CONFIG_HID_KYE=y
|
||||
CONFIG_HID_UCLOGIC=y
|
||||
CONFIG_HID_WALTOP=y
|
||||
CONFIG_HID_GYRATION=y
|
||||
CONFIG_HID_TWINHAN=y
|
||||
CONFIG_HID_KENSINGTON=y
|
||||
CONFIG_HID_LCPOWER=y
|
||||
CONFIG_HID_LOGITECH=y
|
||||
CONFIG_HID_LOGITECH_DJ=y
|
||||
CONFIG_LOGITECH_FF=y
|
||||
CONFIG_LOGIRUMBLEPAD2_FF=y
|
||||
CONFIG_LOGIG940_FF=y
|
||||
CONFIG_HID_MAGICMOUSE=y
|
||||
CONFIG_HID_MICROSOFT=y
|
||||
CONFIG_HID_MONTEREY=y
|
||||
CONFIG_HID_MULTITOUCH=y
|
||||
CONFIG_HID_NTRIG=y
|
||||
CONFIG_HID_ORTEK=y
|
||||
CONFIG_HID_PANTHERLORD=y
|
||||
CONFIG_PANTHERLORD_FF=y
|
||||
CONFIG_HID_PETALYNX=y
|
||||
CONFIG_HID_PICOLCD=y
|
||||
CONFIG_HID_PRIMAX=y
|
||||
CONFIG_HID_ROCCAT=y
|
||||
CONFIG_HID_SAITEK=y
|
||||
CONFIG_HID_SAMSUNG=y
|
||||
CONFIG_HID_SONY=y
|
||||
CONFIG_HID_SPEEDLINK=y
|
||||
CONFIG_HID_SUNPLUS=y
|
||||
CONFIG_HID_GREENASIA=y
|
||||
CONFIG_GREENASIA_FF=y
|
||||
CONFIG_HID_SMARTJOYPLUS=y
|
||||
CONFIG_SMARTJOYPLUS_FF=y
|
||||
CONFIG_HID_TIVO=y
|
||||
CONFIG_HID_TOPSEED=y
|
||||
CONFIG_HID_THRUSTMASTER=y
|
||||
CONFIG_HID_WACOM=y
|
||||
CONFIG_HID_WIIMOTE=y
|
||||
CONFIG_HID_ZEROPLUS=y
|
||||
CONFIG_HID_ZYDACRON=y
|
||||
CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_GADGET=y
|
||||
CONFIG_USB_DUMMY_HCD=y
|
||||
CONFIG_USB_CONFIGFS=y
|
||||
CONFIG_USB_CONFIGFS_UEVENT=y
|
||||
CONFIG_USB_CONFIGFS_F_FS=y
|
||||
CONFIG_USB_CONFIGFS_F_ACC=y
|
||||
CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
|
||||
CONFIG_USB_CONFIGFS_F_MIDI=y
|
||||
CONFIG_MMC=y
|
||||
# CONFIG_PWRSEQ_EMMC is not set
|
||||
# CONFIG_PWRSEQ_SIMPLE is not set
|
||||
# CONFIG_MMC_BLOCK is not set
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_TEST=y
|
||||
CONFIG_SW_SYNC=y
|
||||
CONFIG_VIRTIO_PCI=y
|
||||
CONFIG_VIRTIO_BALLOON=y
|
||||
CONFIG_VIRTIO_INPUT=y
|
||||
CONFIG_VIRTIO_MMIO=y
|
||||
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
|
||||
CONFIG_STAGING=y
|
||||
CONFIG_ASHMEM=y
|
||||
CONFIG_ANDROID_VSOC=y
|
||||
CONFIG_ION=y
|
||||
CONFIG_ION_SYSTEM_HEAP=y
|
||||
# CONFIG_X86_PLATFORM_DEVICES is not set
|
||||
# CONFIG_IOMMU_SUPPORT is not set
|
||||
CONFIG_ANDROID=y
|
||||
CONFIG_ANDROID_BINDER_IPC=y
|
||||
CONFIG_EXT4_FS=y
|
||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||
CONFIG_EXT4_FS_SECURITY=y
|
||||
CONFIG_EXT4_ENCRYPTION=y
|
||||
CONFIG_F2FS_FS=y
|
||||
CONFIG_F2FS_FS_SECURITY=y
|
||||
CONFIG_F2FS_FS_ENCRYPTION=y
|
||||
CONFIG_QUOTA=y
|
||||
CONFIG_QUOTA_NETLINK_INTERFACE=y
|
||||
# CONFIG_PRINT_QUOTA_WARNING is not set
|
||||
CONFIG_QFMT_V2=y
|
||||
CONFIG_AUTOFS4_FS=y
|
||||
CONFIG_FUSE_FS=y
|
||||
CONFIG_OVERLAY_FS=y
|
||||
CONFIG_MSDOS_FS=y
|
||||
CONFIG_VFAT_FS=y
|
||||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
CONFIG_SDCARD_FS=y
|
||||
CONFIG_PSTORE=y
|
||||
CONFIG_PSTORE_CONSOLE=y
|
||||
CONFIG_PSTORE_RAM=y
|
||||
CONFIG_NLS_DEFAULT="utf8"
|
||||
CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ASCII=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
CONFIG_NLS_UTF8=y
|
||||
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
|
||||
CONFIG_SECURITY=y
|
||||
CONFIG_SECURITY_NETWORK=y
|
||||
CONFIG_SECURITY_PATH=y
|
||||
CONFIG_HARDENED_USERCOPY=y
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
|
||||
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
|
||||
CONFIG_CRYPTO_ADIANTUM=y
|
||||
CONFIG_CRYPTO_SHA512=y
|
||||
CONFIG_CRYPTO_LZ4=y
|
||||
CONFIG_CRYPTO_ZSTD=y
|
||||
CONFIG_CRYPTO_DEV_VIRTIO=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||
CONFIG_FRAME_WARN=1024
|
||||
# CONFIG_UNUSED_SYMBOLS is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_DEBUG_STACKOVERFLOW=y
|
||||
CONFIG_HARDLOCKUP_DETECTOR=y
|
||||
CONFIG_PANIC_TIMEOUT=5
|
||||
CONFIG_SCHEDSTATS=y
|
||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||
CONFIG_ENABLE_DEFAULT_TRACERS=y
|
||||
CONFIG_IO_DELAY_NONE=y
|
||||
CONFIG_DEBUG_BOOT_PARAMS=y
|
||||
CONFIG_OPTIMIZE_INLINING=y
|
||||
CONFIG_UNWINDER_FRAME_POINTER=y
|
|
@ -398,3 +398,5 @@
|
|||
384 i386 arch_prctl sys_arch_prctl __ia32_compat_sys_arch_prctl
|
||||
385 i386 io_pgetevents sys_io_pgetevents __ia32_compat_sys_io_pgetevents
|
||||
386 i386 rseq sys_rseq __ia32_sys_rseq
|
||||
424 i386 pidfd_send_signal sys_pidfd_send_signal __ia32_sys_pidfd_send_signal
|
||||
434 i386 pidfd_open sys_pidfd_open __ia32_sys_pidfd_open
|
||||
|
|
|
@ -339,6 +339,8 @@
|
|||
330 common pkey_alloc sys_pkey_alloc
|
||||
331 common pkey_free sys_pkey_free
|
||||
332 common statx sys_statx
|
||||
424 common pidfd_send_signal sys_pidfd_send_signal
|
||||
434 common pidfd_open sys_pidfd_open
|
||||
|
||||
#
|
||||
# x32-specific system call numbers start at 512 to avoid cache impact
|
||||
|
|
|
@ -7,11 +7,7 @@
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
#ifdef CONFIG_KASAN_EXTRA
|
||||
#define KASAN_STACK_ORDER 2
|
||||
#else
|
||||
#define KASAN_STACK_ORDER 1
|
||||
#endif
|
||||
#else
|
||||
#define KASAN_STACK_ORDER 0
|
||||
#endif
|
||||
|
|
|
@ -58,6 +58,23 @@ static __always_inline void stac(void)
|
|||
alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP);
|
||||
}
|
||||
|
||||
static __always_inline unsigned long smap_save(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
asm volatile (ALTERNATIVE("", "pushf; pop %0; " __stringify(__ASM_CLAC),
|
||||
X86_FEATURE_SMAP)
|
||||
: "=rm" (flags) : : "memory", "cc");
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static __always_inline void smap_restore(unsigned long flags)
|
||||
{
|
||||
asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP)
|
||||
: : "g" (flags) : "memory", "cc");
|
||||
}
|
||||
|
||||
/* These macros can be used in asm() statements */
|
||||
#define ASM_CLAC \
|
||||
ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP)
|
||||
|
@ -69,6 +86,9 @@ static __always_inline void stac(void)
|
|||
static inline void clac(void) { }
|
||||
static inline void stac(void) { }
|
||||
|
||||
static inline unsigned long smap_save(void) { return 0; }
|
||||
static inline void smap_restore(unsigned long flags) { }
|
||||
|
||||
#define ASM_CLAC
|
||||
#define ASM_STAC
|
||||
|
||||
|
|
|
@ -711,9 +711,22 @@ extern struct movsl_mask {
|
|||
* checking before using them, but you have to surround them with the
|
||||
* user_access_begin/end() pair.
|
||||
*/
|
||||
#define user_access_begin() __uaccess_begin()
|
||||
static __must_check inline bool user_access_begin(int type,
|
||||
const void __user *ptr,
|
||||
size_t len)
|
||||
{
|
||||
if (unlikely(!access_ok(type, ptr, len)))
|
||||
return 0;
|
||||
__uaccess_begin();
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define user_access_begin(a, b, c) user_access_begin(a, b, c)
|
||||
#define user_access_end() __uaccess_end()
|
||||
|
||||
#define user_access_save() smap_save()
|
||||
#define user_access_restore(x) smap_restore(x)
|
||||
|
||||
#define unsafe_put_user(x, ptr, err_label) \
|
||||
do { \
|
||||
int __pu_err; \
|
||||
|
|
|
@ -27,7 +27,6 @@ config KVM
|
|||
depends on X86_LOCAL_APIC
|
||||
select PREEMPT_NOTIFIERS
|
||||
select MMU_NOTIFIER
|
||||
select ANON_INODES
|
||||
select HAVE_KVM_IRQCHIP
|
||||
select HAVE_KVM_IRQFD
|
||||
select IRQ_BYPASS_MANAGER
|
||||
|
|
|
@ -377,7 +377,7 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
|
|||
|
||||
/*
|
||||
* This is an optimization for KASAN=y case. Since all kasan page tables
|
||||
* eventually point to the kasan_zero_page we could call note_page()
|
||||
* eventually point to the kasan_early_shadow_page we could call note_page()
|
||||
* right away without walking through lower level page tables. This saves
|
||||
* us dozens of seconds (minutes for 5-level config) while checking for
|
||||
* W+X mapping or reading kernel_page_tables debugfs file.
|
||||
|
@ -385,10 +385,11 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
|
|||
static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
|
||||
void *pt)
|
||||
{
|
||||
if (__pa(pt) == __pa(kasan_zero_pmd) ||
|
||||
(pgtable_l5_enabled() && __pa(pt) == __pa(kasan_zero_p4d)) ||
|
||||
__pa(pt) == __pa(kasan_zero_pud)) {
|
||||
pgprotval_t prot = pte_flags(kasan_zero_pte[0]);
|
||||
if (__pa(pt) == __pa(kasan_early_shadow_pmd) ||
|
||||
(pgtable_l5_enabled() &&
|
||||
__pa(pt) == __pa(kasan_early_shadow_p4d)) ||
|
||||
__pa(pt) == __pa(kasan_early_shadow_pud)) {
|
||||
pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]);
|
||||
note_page(m, st, __pgprot(prot), 0, 5);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -212,7 +212,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
|
|||
unsigned long next;
|
||||
|
||||
if (pgd_none(*pgd)) {
|
||||
pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
|
||||
pgd_entry = __pgd(_KERNPG_TABLE |
|
||||
__pa_nodebug(kasan_early_shadow_p4d));
|
||||
set_pgd(pgd, pgd_entry);
|
||||
}
|
||||
|
||||
|
@ -223,7 +224,8 @@ static void __init kasan_early_p4d_populate(pgd_t *pgd,
|
|||
if (!p4d_none(*p4d))
|
||||
continue;
|
||||
|
||||
p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
|
||||
p4d_entry = __p4d(_KERNPG_TABLE |
|
||||
__pa_nodebug(kasan_early_shadow_pud));
|
||||
set_p4d(p4d, p4d_entry);
|
||||
} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
|
||||
}
|
||||
|
@ -262,10 +264,11 @@ static struct notifier_block kasan_die_notifier = {
|
|||
void __init kasan_early_init(void)
|
||||
{
|
||||
int i;
|
||||
pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
|
||||
pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
|
||||
pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
|
||||
p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
|
||||
pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
|
||||
__PAGE_KERNEL | _PAGE_ENC;
|
||||
pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
|
||||
pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
|
||||
p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
|
||||
|
||||
/* Mask out unsupported __PAGE_KERNEL bits: */
|
||||
pte_val &= __default_kernel_pte_mask;
|
||||
|
@ -274,16 +277,16 @@ void __init kasan_early_init(void)
|
|||
p4d_val &= __default_kernel_pte_mask;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; i++)
|
||||
kasan_zero_pte[i] = __pte(pte_val);
|
||||
kasan_early_shadow_pte[i] = __pte(pte_val);
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; i++)
|
||||
kasan_zero_pmd[i] = __pmd(pmd_val);
|
||||
kasan_early_shadow_pmd[i] = __pmd(pmd_val);
|
||||
|
||||
for (i = 0; i < PTRS_PER_PUD; i++)
|
||||
kasan_zero_pud[i] = __pud(pud_val);
|
||||
kasan_early_shadow_pud[i] = __pud(pud_val);
|
||||
|
||||
for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
|
||||
kasan_zero_p4d[i] = __p4d(p4d_val);
|
||||
kasan_early_shadow_p4d[i] = __p4d(p4d_val);
|
||||
|
||||
kasan_map_early_shadow(early_top_pgt);
|
||||
kasan_map_early_shadow(init_top_pgt);
|
||||
|
@ -327,7 +330,7 @@ void __init kasan_init(void)
|
|||
|
||||
clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
|
||||
|
||||
kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
|
||||
kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
|
||||
kasan_mem_to_shadow((void *)PAGE_OFFSET));
|
||||
|
||||
for (i = 0; i < E820_MAX_ENTRIES; i++) {
|
||||
|
@ -339,41 +342,41 @@ void __init kasan_init(void)
|
|||
|
||||
shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
|
||||
shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
|
||||
shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
|
||||
PAGE_SIZE);
|
||||
shadow_cpu_entry_begin = (void *)round_down(
|
||||
(unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);
|
||||
|
||||
shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
|
||||
CPU_ENTRY_AREA_MAP_SIZE);
|
||||
shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
|
||||
shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
|
||||
PAGE_SIZE);
|
||||
shadow_cpu_entry_end = (void *)round_up(
|
||||
(unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
|
||||
|
||||
kasan_populate_zero_shadow(
|
||||
kasan_populate_early_shadow(
|
||||
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
|
||||
shadow_cpu_entry_begin);
|
||||
|
||||
kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
|
||||
(unsigned long)shadow_cpu_entry_end, 0);
|
||||
|
||||
kasan_populate_zero_shadow(shadow_cpu_entry_end,
|
||||
kasan_mem_to_shadow((void *)__START_KERNEL_map));
|
||||
kasan_populate_early_shadow(shadow_cpu_entry_end,
|
||||
kasan_mem_to_shadow((void *)__START_KERNEL_map));
|
||||
|
||||
kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
|
||||
(unsigned long)kasan_mem_to_shadow(_end),
|
||||
early_pfn_to_nid(__pa(_stext)));
|
||||
|
||||
kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
|
||||
(void *)KASAN_SHADOW_END);
|
||||
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
|
||||
(void *)KASAN_SHADOW_END);
|
||||
|
||||
load_cr3(init_top_pgt);
|
||||
__flush_tlb_all();
|
||||
|
||||
/*
|
||||
* kasan_zero_page has been used as early shadow memory, thus it may
|
||||
* contain some garbage. Now we can clear and write protect it, since
|
||||
* after the TLB flush no one should write to it.
|
||||
* kasan_early_shadow_page has been used as early shadow memory, thus
|
||||
* it may contain some garbage. Now we can clear and write protect it,
|
||||
* since after the TLB flush no one should write to it.
|
||||
*/
|
||||
memset(kasan_zero_page, 0, PAGE_SIZE);
|
||||
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
|
||||
for (i = 0; i < PTRS_PER_PTE; i++) {
|
||||
pte_t pte;
|
||||
pgprot_t prot;
|
||||
|
@ -381,8 +384,8 @@ void __init kasan_init(void)
|
|||
prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
|
||||
pgprot_val(prot) &= __default_kernel_pte_mask;
|
||||
|
||||
pte = __pte(__pa(kasan_zero_page) | pgprot_val(prot));
|
||||
set_pte(&kasan_zero_pte[i], pte);
|
||||
pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
|
||||
set_pte(&kasan_early_shadow_pte[i], pte);
|
||||
}
|
||||
/* Flush TLBs again to be sure that write protection applied. */
|
||||
__flush_tlb_all();
|
||||
|
|
|
@ -25,12 +25,13 @@ void __init kasan_early_init(void)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; ++i)
|
||||
set_pte(kasan_zero_pte + i,
|
||||
mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL));
|
||||
set_pte(kasan_early_shadow_pte + i,
|
||||
mk_pte(virt_to_page(kasan_early_shadow_page),
|
||||
PAGE_KERNEL));
|
||||
|
||||
for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
|
||||
BUG_ON(!pmd_none(*pmd));
|
||||
set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte));
|
||||
set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
|
||||
}
|
||||
early_trap_init();
|
||||
}
|
||||
|
@ -81,13 +82,16 @@ void __init kasan_init(void)
|
|||
populate(kasan_mem_to_shadow((void *)VMALLOC_START),
|
||||
kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
|
||||
|
||||
/* Write protect kasan_zero_page and zero-initialize it again. */
|
||||
/*
|
||||
* Write protect kasan_early_shadow_page and zero-initialize it again.
|
||||
*/
|
||||
for (i = 0; i < PTRS_PER_PTE; ++i)
|
||||
set_pte(kasan_zero_pte + i,
|
||||
mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO));
|
||||
set_pte(kasan_early_shadow_pte + i,
|
||||
mk_pte(virt_to_page(kasan_early_shadow_page),
|
||||
PAGE_KERNEL_RO));
|
||||
|
||||
local_flush_tlb_all();
|
||||
memset(kasan_zero_page, 0, PAGE_SIZE);
|
||||
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
|
||||
|
||||
/* At this point kasan is fully initialized. Enable error messages. */
|
||||
current->kasan_depth = 0;
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
. ${ROOT_DIR}/common/build.config.common
|
||||
. ${ROOT_DIR}/common/build.config.aarch64
|
||||
|
||||
DEFCONFIG=cuttlefish_defconfig
|
||||
POST_DEFCONFIG_CMDS="check_defconfig"
|
|
@ -1,5 +0,0 @@
|
|||
. ${ROOT_DIR}/common/build.config.common
|
||||
. ${ROOT_DIR}/common/build.config.x86_64
|
||||
|
||||
DEFCONFIG=x86_64_cuttlefish_defconfig
|
||||
POST_DEFCONFIG_CMDS="check_defconfig"
|
|
@ -16,3 +16,4 @@ System.map
|
|||
"
|
||||
STOP_SHIP_TRACEPRINTK=1
|
||||
ABI_DEFINITION=abi_gki_aarch64.xml
|
||||
BUILD_INITRAMFS=1
|
||||
|
|
|
@ -15,3 +15,4 @@ vmlinux
|
|||
System.map
|
||||
"
|
||||
STOP_SHIP_TRACEPRINTK=1
|
||||
BUILD_INITRAMFS=1
|
||||
|
|
|
@ -179,7 +179,6 @@ source "drivers/base/regmap/Kconfig"
|
|||
config DMA_SHARED_BUFFER
|
||||
bool
|
||||
default n
|
||||
select ANON_INODES
|
||||
select IRQ_WORK
|
||||
help
|
||||
This option enables the framework for buffer-sharing between
|
||||
|
|
|
@ -2025,6 +2025,7 @@ int dpm_prepare(pm_message_t state)
|
|||
printk(KERN_INFO "PM: Device %s not prepared "
|
||||
"for power transition: code %d\n",
|
||||
dev_name(dev), error);
|
||||
dpm_save_failed_dev(dev_name(dev));
|
||||
put_device(dev);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -157,7 +157,6 @@ config TCG_CRB
|
|||
config TCG_VTPM_PROXY
|
||||
tristate "VTPM Proxy Interface"
|
||||
depends on TCG_TPM
|
||||
select ANON_INODES
|
||||
---help---
|
||||
This driver proxies for an emulated TPM (vTPM) running in userspace.
|
||||
A device /dev/vtpmx is provided that creates a device pair
|
||||
|
|
|
@ -3,7 +3,6 @@ menu "DMABUF options"
|
|||
config SYNC_FILE
|
||||
bool "Explicit Synchronization Framework"
|
||||
default n
|
||||
select ANON_INODES
|
||||
select DMA_SHARED_BUFFER
|
||||
---help---
|
||||
The Sync File Framework adds explicit syncronization via
|
||||
|
|
|
@ -40,7 +40,10 @@
|
|||
#include <linux/fdtable.h>
|
||||
#include <linux/list_sort.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/mount.h>
|
||||
|
||||
#include <uapi/linux/dma-buf.h>
|
||||
#include <uapi/linux/magic.h>
|
||||
|
||||
static atomic_long_t name_counter;
|
||||
|
||||
|
@ -66,6 +69,41 @@ struct dma_proc {
|
|||
|
||||
static struct dma_buf_list db_list;
|
||||
|
||||
static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
char name[DMA_BUF_NAME_LEN];
|
||||
size_t ret = 0;
|
||||
|
||||
dmabuf = dentry->d_fsdata;
|
||||
mutex_lock(&dmabuf->lock);
|
||||
if (dmabuf->name)
|
||||
ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
|
||||
mutex_unlock(&dmabuf->lock);
|
||||
|
||||
return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
|
||||
dentry->d_name.name, ret > 0 ? name : "");
|
||||
}
|
||||
|
||||
static const struct dentry_operations dma_buf_dentry_ops = {
|
||||
.d_dname = dmabuffs_dname,
|
||||
};
|
||||
|
||||
static struct vfsmount *dma_buf_mnt;
|
||||
|
||||
static struct dentry *dma_buf_fs_mount(struct file_system_type *fs_type,
|
||||
int flags, const char *name, void *data)
|
||||
{
|
||||
return mount_pseudo(fs_type, "dmabuf:", NULL, &dma_buf_dentry_ops,
|
||||
DMA_BUF_MAGIC);
|
||||
}
|
||||
|
||||
static struct file_system_type dma_buf_fs_type = {
|
||||
.name = "dmabuf",
|
||||
.mount = dma_buf_fs_mount,
|
||||
.kill_sb = kill_anon_super,
|
||||
};
|
||||
|
||||
static int dma_buf_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
|
@ -314,6 +352,43 @@ static int dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf,
|
|||
static int dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
/**
|
||||
* dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
|
||||
* The name of the dma-buf buffer can only be set when the dma-buf is not
|
||||
* attached to any devices. It could theoritically support changing the
|
||||
* name of the dma-buf if the same piece of memory is used for multiple
|
||||
* purpose between different devices.
|
||||
*
|
||||
* @dmabuf [in] dmabuf buffer that will be renamed.
|
||||
* @buf: [in] A piece of userspace memory that contains the name of
|
||||
* the dma-buf.
|
||||
*
|
||||
* Returns 0 on success. If the dma-buf buffer is already attached to
|
||||
* devices, return -EBUSY.
|
||||
*
|
||||
*/
|
||||
static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
|
||||
{
|
||||
char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
|
||||
long ret = 0;
|
||||
|
||||
if (IS_ERR(name))
|
||||
return PTR_ERR(name);
|
||||
|
||||
mutex_lock(&dmabuf->lock);
|
||||
if (!list_empty(&dmabuf->attachments)) {
|
||||
ret = -EBUSY;
|
||||
kfree(name);
|
||||
goto out_unlock;
|
||||
}
|
||||
kfree(dmabuf->name);
|
||||
dmabuf->name = name;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&dmabuf->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long dma_buf_ioctl(struct file *file,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
|
@ -360,11 +435,29 @@ static long dma_buf_ioctl(struct file *file,
|
|||
ret = dma_buf_begin_cpu_access(dmabuf, dir);
|
||||
|
||||
return ret;
|
||||
|
||||
case DMA_BUF_SET_NAME:
|
||||
return dma_buf_set_name(dmabuf, (const char __user *)arg);
|
||||
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
}
|
||||
|
||||
static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
|
||||
{
|
||||
struct dma_buf *dmabuf = file->private_data;
|
||||
|
||||
seq_printf(m, "size:\t%zu\n", dmabuf->size);
|
||||
/* Don't count the temporary reference taken inside procfs seq_show */
|
||||
seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
|
||||
seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
|
||||
mutex_lock(&dmabuf->lock);
|
||||
if (dmabuf->name)
|
||||
seq_printf(m, "name:\t%s\n", dmabuf->name);
|
||||
mutex_unlock(&dmabuf->lock);
|
||||
}
|
||||
|
||||
static const struct file_operations dma_buf_fops = {
|
||||
.release = dma_buf_release,
|
||||
.mmap = dma_buf_mmap_internal,
|
||||
|
@ -374,6 +467,7 @@ static const struct file_operations dma_buf_fops = {
|
|||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = dma_buf_ioctl,
|
||||
#endif
|
||||
.show_fdinfo = dma_buf_show_fdinfo,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -384,6 +478,32 @@ static inline int is_dma_buf_file(struct file *file)
|
|||
return file->f_op == &dma_buf_fops;
|
||||
}
|
||||
|
||||
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
|
||||
{
|
||||
struct file *file;
|
||||
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
|
||||
|
||||
if (IS_ERR(inode))
|
||||
return ERR_CAST(inode);
|
||||
|
||||
inode->i_size = dmabuf->size;
|
||||
inode_set_bytes(inode, dmabuf->size);
|
||||
|
||||
file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
|
||||
flags, &dma_buf_fops);
|
||||
if (IS_ERR(file))
|
||||
goto err_alloc_file;
|
||||
file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
|
||||
file->private_data = dmabuf;
|
||||
file->f_path.dentry->d_fsdata = dmabuf;
|
||||
|
||||
return file;
|
||||
|
||||
err_alloc_file:
|
||||
iput(inode);
|
||||
return file;
|
||||
}
|
||||
|
||||
/**
|
||||
* DOC: dma buf device access
|
||||
*
|
||||
|
@ -491,8 +611,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
|||
}
|
||||
dmabuf->resv = resv;
|
||||
|
||||
file = anon_inode_getfile(bufname, &dma_buf_fops, dmabuf,
|
||||
exp_info->flags);
|
||||
file = dma_buf_getfile(dmabuf, exp_info->flags);
|
||||
if (IS_ERR(file)) {
|
||||
ret = PTR_ERR(file);
|
||||
goto err_dmabuf;
|
||||
|
@ -1178,8 +1297,9 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
|
|||
return ret;
|
||||
|
||||
seq_puts(s, "\nDma-buf Objects:\n");
|
||||
seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\t%-12s\t%-s\n",
|
||||
"size", "flags", "mode", "count", "exp_name", "buf name");
|
||||
seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\t%-12s\t%-s\t%-8s\n",
|
||||
"size", "flags", "mode", "count", "exp_name",
|
||||
"buf name", "ino");
|
||||
|
||||
list_for_each_entry(buf_obj, &db_list.head, list_node) {
|
||||
ret = mutex_lock_interruptible(&buf_obj->lock);
|
||||
|
@ -1190,11 +1310,13 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
|
|||
continue;
|
||||
}
|
||||
|
||||
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%-12s\t%-s\n",
|
||||
seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%-12s\t%-s\t%08lu\t%s\n",
|
||||
buf_obj->size,
|
||||
buf_obj->file->f_flags, buf_obj->file->f_mode,
|
||||
file_count(buf_obj->file),
|
||||
buf_obj->exp_name, buf_obj->buf_name);
|
||||
buf_obj->exp_name, buf_obj->buf_name,
|
||||
file_inode(buf_obj->file)->i_ino,
|
||||
buf_obj->name ?: "");
|
||||
|
||||
robj = buf_obj->resv;
|
||||
while (true) {
|
||||
|
@ -1449,6 +1571,10 @@ static inline void dma_buf_uninit_debugfs(void)
|
|||
|
||||
static int __init dma_buf_init(void)
|
||||
{
|
||||
dma_buf_mnt = kern_mount(&dma_buf_fs_type);
|
||||
if (IS_ERR(dma_buf_mnt))
|
||||
return PTR_ERR(dma_buf_mnt);
|
||||
|
||||
mutex_init(&db_list.lock);
|
||||
INIT_LIST_HEAD(&db_list.head);
|
||||
dma_buf_init_debugfs();
|
||||
|
@ -1459,5 +1585,6 @@ subsys_initcall(dma_buf_init);
|
|||
static void __exit dma_buf_deinit(void)
|
||||
{
|
||||
dma_buf_uninit_debugfs();
|
||||
kern_unmount(dma_buf_mnt);
|
||||
}
|
||||
__exitcall(dma_buf_deinit);
|
||||
|
|
|
@ -12,7 +12,6 @@ config ARCH_HAVE_CUSTOM_GPIO_H
|
|||
|
||||
menuconfig GPIOLIB
|
||||
bool "GPIO Support"
|
||||
select ANON_INODES
|
||||
help
|
||||
This enables GPIO support through the generic GPIO library.
|
||||
You only need to enable this, if you also want to enable
|
||||
|
|
|
@ -256,9 +256,7 @@ int drm_connector_init(struct drm_device *dev,
|
|||
|
||||
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
|
||||
connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
|
||||
drm_object_attach_property(&connector->base,
|
||||
config->edid_property,
|
||||
0);
|
||||
drm_connector_attach_edid_property(connector);
|
||||
|
||||
drm_object_attach_property(&connector->base,
|
||||
config->dpms_property, 0);
|
||||
|
@ -290,6 +288,25 @@ int drm_connector_init(struct drm_device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_connector_init);
|
||||
|
||||
/**
|
||||
* drm_connector_attach_edid_property - attach edid property.
|
||||
* @dev: DRM device
|
||||
* @connector: the connector
|
||||
*
|
||||
* Some connector types like DRM_MODE_CONNECTOR_VIRTUAL do not get a
|
||||
* edid property attached by default. This function can be used to
|
||||
* explicitly enable the edid property in these cases.
|
||||
*/
|
||||
void drm_connector_attach_edid_property(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_mode_config *config = &connector->dev->mode_config;
|
||||
|
||||
drm_object_attach_property(&connector->base,
|
||||
config->edid_property,
|
||||
0);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_connector_attach_edid_property);
|
||||
|
||||
/**
|
||||
* drm_connector_attach_encoder - attach a connector to an encoder
|
||||
* @connector: connector to attach
|
||||
|
|
|
@ -678,6 +678,43 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
|
||||
|
||||
/**
|
||||
* drm_gem_prime_mmap - PRIME mmap function for GEM drivers
|
||||
* @obj: GEM object
|
||||
* @vma: Virtual address range
|
||||
*
|
||||
* This function sets up a userspace mapping for PRIME exported buffers using
|
||||
* the same codepath that is used for regular GEM buffer mapping on the DRM fd.
|
||||
* The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
|
||||
* called to set up the mapping.
|
||||
*
|
||||
* Drivers can use this as their &drm_driver.gem_prime_mmap callback.
|
||||
*/
|
||||
int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
|
||||
{
|
||||
/* Used by drm_gem_mmap() to lookup the GEM object */
|
||||
struct drm_file priv = {
|
||||
.minor = obj->dev->primary,
|
||||
};
|
||||
struct file fil = {
|
||||
.private_data = &priv,
|
||||
};
|
||||
int ret;
|
||||
|
||||
ret = drm_vma_node_allow(&obj->vma_node, &priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
|
||||
|
||||
ret = obj->dev->driver->fops->mmap(&fil, vma);
|
||||
|
||||
drm_vma_node_revoke(&obj->vma_node, &priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_prime_mmap);
|
||||
|
||||
/**
|
||||
* drm_gem_prime_import_dev - core implementation of the import callback
|
||||
* @dev: drm_device to import into
|
||||
|
|
|
@ -1604,7 +1604,9 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
|
|||
* happened we would make the mistake of assuming that the
|
||||
* relocations were valid.
|
||||
*/
|
||||
user_access_begin();
|
||||
if (!user_access_begin(VERIFY_WRITE, urelocs, size))
|
||||
goto end_user;
|
||||
|
||||
for (copied = 0; copied < nreloc; copied++)
|
||||
unsafe_put_user(-1,
|
||||
&urelocs[copied].presumed_offset,
|
||||
|
@ -2649,7 +2651,17 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
|
|||
unsigned int i;
|
||||
|
||||
/* Copy the new buffer offsets back to the user's exec list. */
|
||||
user_access_begin();
|
||||
/*
|
||||
* Note: count * sizeof(*user_exec_list) does not overflow,
|
||||
* because we checked 'count' in check_buffer_count().
|
||||
*
|
||||
* And this range already got effectively checked earlier
|
||||
* when we did the "copy_from_user()" above.
|
||||
*/
|
||||
if (!user_access_begin(VERIFY_WRITE, user_exec_list,
|
||||
count * sizeof(*user_exec_list)))
|
||||
goto end_user;
|
||||
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
if (!(exec2_list[i].offset & UPDATE))
|
||||
continue;
|
||||
|
|
|
@ -6,6 +6,6 @@
|
|||
virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_drm_bus.o virtgpu_gem.o \
|
||||
virtgpu_fb.o virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \
|
||||
virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
|
||||
virtgpu_ioctl.o virtgpu_prime.o
|
||||
virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o
|
||||
|
||||
obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o
|
||||
|
|
|
@ -28,6 +28,30 @@
|
|||
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
static void virtio_add_bool(struct seq_file *m, const char *name,
|
||||
bool value)
|
||||
{
|
||||
seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no");
|
||||
}
|
||||
|
||||
static void virtio_add_int(struct seq_file *m, const char *name,
|
||||
int value)
|
||||
{
|
||||
seq_printf(m, "%-16s : %d\n", name, value);
|
||||
}
|
||||
|
||||
static int virtio_gpu_features(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
|
||||
|
||||
virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
|
||||
virtio_add_bool(m, "edid", vgdev->has_edid);
|
||||
virtio_add_int(m, "cap sets", vgdev->num_capsets);
|
||||
virtio_add_int(m, "scanouts", vgdev->num_scanouts);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
|
||||
{
|
||||
|
@ -41,7 +65,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
|
|||
}
|
||||
|
||||
static struct drm_info_list virtio_gpu_debugfs_list[] = {
|
||||
{ "irq_fence", virtio_gpu_debugfs_irq_info, 0, NULL },
|
||||
{ "virtio-gpu-features", virtio_gpu_features },
|
||||
{ "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL },
|
||||
};
|
||||
|
||||
#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
|
||||
|
|
|
@ -75,12 +75,9 @@ virtio_gpu_framebuffer_init(struct drm_device *dev,
|
|||
struct drm_gem_object *obj)
|
||||
{
|
||||
int ret;
|
||||
struct virtio_gpu_object *bo;
|
||||
|
||||
vgfb->base.obj[0] = obj;
|
||||
|
||||
bo = gem_to_virtio_gpu_obj(obj);
|
||||
|
||||
drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd);
|
||||
|
||||
ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs);
|
||||
|
@ -109,6 +106,9 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
|||
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
{
|
||||
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
|
||||
|
||||
output->enabled = true;
|
||||
}
|
||||
|
||||
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
|
@ -119,6 +119,7 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
|
|||
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
|
||||
|
||||
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
|
||||
output->enabled = false;
|
||||
}
|
||||
|
||||
static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
|
@ -168,6 +169,12 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
|
|||
struct drm_display_mode *mode = NULL;
|
||||
int count, width, height;
|
||||
|
||||
if (output->edid) {
|
||||
count = drm_add_edid_modes(connector, output->edid);
|
||||
if (count)
|
||||
return count;
|
||||
}
|
||||
|
||||
width = le32_to_cpu(output->info.r.width);
|
||||
height = le32_to_cpu(output->info.r.height);
|
||||
count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
|
||||
|
@ -236,12 +243,8 @@ static enum drm_connector_status virtio_gpu_conn_detect(
|
|||
|
||||
static void virtio_gpu_conn_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct virtio_gpu_output *virtio_gpu_output =
|
||||
drm_connector_to_virtio_gpu_output(connector);
|
||||
|
||||
drm_connector_unregister(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(virtio_gpu_output);
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
|
||||
|
@ -286,6 +289,8 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
|
|||
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
|
||||
if (vgdev->has_edid)
|
||||
drm_connector_attach_edid_property(connector);
|
||||
|
||||
drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
|
||||
DRM_MODE_ENCODER_VIRTUAL, NULL);
|
||||
|
@ -372,6 +377,10 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
|
|||
|
||||
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
|
||||
{
|
||||
virtio_gpu_fbdev_fini(vgdev);
|
||||
int i;
|
||||
|
||||
for (i = 0 ; i < vgdev->num_scanouts; ++i)
|
||||
kfree(vgdev->outputs[i].edid);
|
||||
drm_atomic_helper_shutdown(vgdev->ddev);
|
||||
drm_mode_config_cleanup(vgdev->ddev);
|
||||
}
|
||||
|
|
|
@ -71,6 +71,37 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
|
|||
if (vga)
|
||||
virtio_pci_kick_out_firmware_fb(pdev);
|
||||
|
||||
/*
|
||||
* Normally the drm_dev_set_unique() call is done by core DRM.
|
||||
* The following comment covers, why virtio cannot rely on it.
|
||||
*
|
||||
* Unlike the other virtual GPU drivers, virtio abstracts the
|
||||
* underlying bus type by using struct virtio_device.
|
||||
*
|
||||
* Hence the dev_is_pci() check, used in core DRM, will fail
|
||||
* and the unique returned will be the virtio_device "virtio0",
|
||||
* while a "pci:..." one is required.
|
||||
*
|
||||
* A few other ideas were considered:
|
||||
* - Extend the dev_is_pci() check [in drm_set_busid] to
|
||||
* consider virtio.
|
||||
* Seems like a bigger hack than what we have already.
|
||||
*
|
||||
* - Point drm_device::dev to the parent of the virtio_device
|
||||
* Semantic changes:
|
||||
* * Using the wrong device for i2c, framebuffer_alloc and
|
||||
* prime import.
|
||||
* Visual changes:
|
||||
* * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer,
|
||||
* will print the wrong information.
|
||||
*
|
||||
* We could address the latter issues, by introducing
|
||||
* drm_device::bus_dev, ... which would be used solely for this.
|
||||
*
|
||||
* So for the moment keep things as-is, with a bulky comment
|
||||
* for the next person who feels like removing this
|
||||
* drm_dev_set_unique() quirk.
|
||||
*/
|
||||
snprintf(unique, sizeof(unique), "pci:%s", pname);
|
||||
ret = drm_dev_set_unique(dev, unique);
|
||||
if (ret)
|
||||
|
@ -85,6 +116,6 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
|
|||
return 0;
|
||||
|
||||
err_free:
|
||||
drm_dev_unref(dev);
|
||||
drm_dev_put(dev);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -42,13 +42,20 @@ module_param_named(modeset, virtio_gpu_modeset, int, 0400);
|
|||
|
||||
static int virtio_gpu_probe(struct virtio_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (vgacon_text_force() && virtio_gpu_modeset == -1)
|
||||
return -EINVAL;
|
||||
|
||||
if (virtio_gpu_modeset == 0)
|
||||
return -EINVAL;
|
||||
|
||||
return drm_virtio_init(&driver, vdev);
|
||||
ret = drm_virtio_init(&driver, vdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_fbdev_generic_setup(vdev->priv, 32);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_remove(struct virtio_device *vdev)
|
||||
|
@ -80,6 +87,7 @@ static unsigned int features[] = {
|
|||
*/
|
||||
VIRTIO_GPU_F_VIRGL,
|
||||
#endif
|
||||
VIRTIO_GPU_F_EDID,
|
||||
};
|
||||
static struct virtio_driver virtio_gpu_driver = {
|
||||
.feature_table = features,
|
||||
|
@ -130,8 +138,6 @@ static struct drm_driver driver = {
|
|||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = drm_gem_prime_export,
|
||||
.gem_prime_import = drm_gem_prime_import,
|
||||
.gem_prime_pin = virtgpu_gem_prime_pin,
|
||||
.gem_prime_unpin = virtgpu_gem_prime_unpin,
|
||||
.gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
|
||||
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
|
||||
.gem_prime_vmap = virtgpu_gem_prime_vmap,
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/ttm/ttm_bo_api.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
@ -46,23 +47,42 @@
|
|||
#define DRIVER_DATE "0"
|
||||
|
||||
#define DRIVER_MAJOR 0
|
||||
#define DRIVER_MINOR 0
|
||||
#define DRIVER_PATCHLEVEL 1
|
||||
#define DRIVER_MINOR 1
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
/* virtgpu_drm_bus.c */
|
||||
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
|
||||
|
||||
struct virtio_gpu_object_params {
|
||||
uint32_t format;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
unsigned long size;
|
||||
bool dumb;
|
||||
/* 3d */
|
||||
bool virgl;
|
||||
uint32_t target;
|
||||
uint32_t bind;
|
||||
uint32_t depth;
|
||||
uint32_t array_size;
|
||||
uint32_t last_level;
|
||||
uint32_t nr_samples;
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
struct virtio_gpu_object {
|
||||
struct drm_gem_object gem_base;
|
||||
uint32_t hw_res_handle;
|
||||
|
||||
struct sg_table *pages;
|
||||
uint32_t mapped;
|
||||
void *vmap;
|
||||
bool dumb;
|
||||
struct ttm_place placement_code;
|
||||
struct ttm_placement placement;
|
||||
struct ttm_buffer_object tbo;
|
||||
struct ttm_bo_kmap_obj kmap;
|
||||
bool created;
|
||||
};
|
||||
#define gem_to_virtio_gpu_obj(gobj) \
|
||||
container_of((gobj), struct virtio_gpu_object, gem_base)
|
||||
|
@ -85,7 +105,6 @@ struct virtio_gpu_fence {
|
|||
struct dma_fence f;
|
||||
struct virtio_gpu_fence_driver *drv;
|
||||
struct list_head node;
|
||||
uint64_t seq;
|
||||
};
|
||||
#define to_virtio_fence(x) \
|
||||
container_of(x, struct virtio_gpu_fence, f)
|
||||
|
@ -112,8 +131,10 @@ struct virtio_gpu_output {
|
|||
struct drm_encoder enc;
|
||||
struct virtio_gpu_display_one info;
|
||||
struct virtio_gpu_update_cursor cursor;
|
||||
struct edid *edid;
|
||||
int cur_x;
|
||||
int cur_y;
|
||||
bool enabled;
|
||||
};
|
||||
#define drm_crtc_to_virtio_gpu_output(x) \
|
||||
container_of(x, struct virtio_gpu_output, crtc)
|
||||
|
@ -127,6 +148,7 @@ struct virtio_gpu_framebuffer {
|
|||
int x1, y1, x2, y2; /* dirty rect */
|
||||
spinlock_t dirty_lock;
|
||||
uint32_t hw_res_handle;
|
||||
struct virtio_gpu_fence *fence;
|
||||
};
|
||||
#define to_virtio_gpu_framebuffer(x) \
|
||||
container_of(x, struct virtio_gpu_framebuffer, base)
|
||||
|
@ -138,8 +160,6 @@ struct virtio_gpu_mman {
|
|||
struct ttm_bo_device bdev;
|
||||
};
|
||||
|
||||
struct virtio_gpu_fbdev;
|
||||
|
||||
struct virtio_gpu_queue {
|
||||
struct virtqueue *vq;
|
||||
spinlock_t qlock;
|
||||
|
@ -170,8 +190,6 @@ struct virtio_gpu_device {
|
|||
|
||||
struct virtio_gpu_mman mman;
|
||||
|
||||
/* pointer to fbdev info structure */
|
||||
struct virtio_gpu_fbdev *vgfbdev;
|
||||
struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
|
||||
uint32_t num_scanouts;
|
||||
|
||||
|
@ -180,8 +198,7 @@ struct virtio_gpu_device {
|
|||
struct kmem_cache *vbufs;
|
||||
bool vqs_ready;
|
||||
|
||||
struct idr resource_idr;
|
||||
spinlock_t resource_idr_lock;
|
||||
struct ida resource_ida;
|
||||
|
||||
wait_queue_head_t resp_wq;
|
||||
/* current display info */
|
||||
|
@ -190,10 +207,10 @@ struct virtio_gpu_device {
|
|||
|
||||
struct virtio_gpu_fence_driver fence_drv;
|
||||
|
||||
struct idr ctx_id_idr;
|
||||
spinlock_t ctx_id_idr_lock;
|
||||
struct ida ctx_id_ida;
|
||||
|
||||
bool has_virgl_3d;
|
||||
bool has_edid;
|
||||
|
||||
struct work_struct config_changed_work;
|
||||
|
||||
|
@ -209,6 +226,9 @@ struct virtio_gpu_fpriv {
|
|||
/* virtio_ioctl.c */
|
||||
#define DRM_VIRTIO_NUM_IOCTLS 10
|
||||
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
|
||||
int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *head);
|
||||
void virtio_gpu_unref_list(struct list_head *head);
|
||||
|
||||
/* virtio_kms.c */
|
||||
int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags);
|
||||
|
@ -222,16 +242,17 @@ int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
|
|||
void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
|
||||
int virtio_gpu_gem_create(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
uint64_t size,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct drm_gem_object **obj_p,
|
||||
uint32_t *handle_p);
|
||||
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
|
||||
struct drm_file *file);
|
||||
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
|
||||
struct drm_file *file);
|
||||
struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
|
||||
size_t size, bool kernel,
|
||||
bool pinned);
|
||||
struct virtio_gpu_object*
|
||||
virtio_gpu_alloc_object(struct drm_device *dev,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_fence *fence);
|
||||
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
@ -240,30 +261,24 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
|
|||
uint32_t handle, uint64_t *offset_p);
|
||||
|
||||
/* virtio_fb */
|
||||
#define VIRTIO_GPUFB_CONN_LIMIT 1
|
||||
int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev);
|
||||
void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev);
|
||||
int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb,
|
||||
struct drm_clip_rect *clips,
|
||||
unsigned int num_clips);
|
||||
/* virtio vg */
|
||||
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
|
||||
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
|
||||
void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
|
||||
uint32_t *resid);
|
||||
void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id);
|
||||
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id,
|
||||
uint32_t format,
|
||||
uint32_t width,
|
||||
uint32_t height);
|
||||
struct virtio_gpu_object *bo,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_fence *fence);
|
||||
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id);
|
||||
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id, uint64_t offset,
|
||||
struct virtio_gpu_object *bo,
|
||||
uint64_t offset,
|
||||
__le32 width, __le32 height,
|
||||
__le32 x, __le32 y,
|
||||
struct virtio_gpu_fence **fence);
|
||||
struct virtio_gpu_fence *fence);
|
||||
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id,
|
||||
uint32_t x, uint32_t y,
|
||||
|
@ -274,19 +289,19 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
|
|||
uint32_t x, uint32_t y);
|
||||
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_object *obj,
|
||||
uint32_t resource_id,
|
||||
struct virtio_gpu_fence **fence);
|
||||
struct virtio_gpu_fence *fence);
|
||||
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_object *obj);
|
||||
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
|
||||
int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
|
||||
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_output *output);
|
||||
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
|
||||
void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id);
|
||||
int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx);
|
||||
int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
|
||||
int idx, int version,
|
||||
struct virtio_gpu_drv_cap_cache **cache_p);
|
||||
int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev);
|
||||
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
|
||||
uint32_t nlen, const char *name);
|
||||
void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
|
||||
|
@ -299,21 +314,23 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
|
|||
uint32_t resource_id);
|
||||
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
|
||||
void *data, uint32_t data_size,
|
||||
uint32_t ctx_id, struct virtio_gpu_fence **fence);
|
||||
uint32_t ctx_id, struct virtio_gpu_fence *fence);
|
||||
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id, uint32_t ctx_id,
|
||||
uint64_t offset, uint32_t level,
|
||||
struct virtio_gpu_box *box,
|
||||
struct virtio_gpu_fence **fence);
|
||||
struct virtio_gpu_fence *fence);
|
||||
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id, uint32_t ctx_id,
|
||||
struct virtio_gpu_object *bo,
|
||||
uint32_t ctx_id,
|
||||
uint64_t offset, uint32_t level,
|
||||
struct virtio_gpu_box *box,
|
||||
struct virtio_gpu_fence **fence);
|
||||
struct virtio_gpu_fence *fence);
|
||||
void
|
||||
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_resource_create_3d *rc_3d,
|
||||
struct virtio_gpu_fence **fence);
|
||||
struct virtio_gpu_object *bo,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_fence *fence);
|
||||
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
|
||||
void virtio_gpu_cursor_ack(struct virtqueue *vq);
|
||||
void virtio_gpu_fence_ack(struct virtqueue *vq);
|
||||
|
@ -341,25 +358,28 @@ void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
|
|||
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
|
||||
/* virtio_gpu_fence.c */
|
||||
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
|
||||
bool virtio_fence_signaled(struct dma_fence *f);
|
||||
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
|
||||
struct virtio_gpu_device *vgdev);
|
||||
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_ctrl_hdr *cmd_hdr,
|
||||
struct virtio_gpu_fence **fence);
|
||||
struct virtio_gpu_fence *fence);
|
||||
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
|
||||
u64 last_seq);
|
||||
|
||||
/* virtio_gpu_object */
|
||||
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
|
||||
unsigned long size, bool kernel, bool pinned,
|
||||
struct virtio_gpu_object **bo_ptr);
|
||||
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr);
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_object **bo_ptr,
|
||||
struct virtio_gpu_fence *fence);
|
||||
void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
|
||||
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
|
||||
int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
|
||||
struct virtio_gpu_object *bo);
|
||||
void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
|
||||
int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
|
||||
|
||||
/* virtgpu_prime.c */
|
||||
int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
|
||||
struct drm_device *dev, struct dma_buf_attachment *attach,
|
||||
|
@ -372,7 +392,7 @@ int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
|
|||
static inline struct virtio_gpu_object*
|
||||
virtio_gpu_object_ref(struct virtio_gpu_object *bo)
|
||||
{
|
||||
ttm_bo_reference(&bo->tbo);
|
||||
ttm_bo_get(&bo->tbo);
|
||||
return bo;
|
||||
}
|
||||
|
||||
|
@ -383,9 +403,8 @@ static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
|
|||
if ((*bo) == NULL)
|
||||
return;
|
||||
tbo = &((*bo)->tbo);
|
||||
ttm_bo_unref(&tbo);
|
||||
if (tbo == NULL)
|
||||
*bo = NULL;
|
||||
ttm_bo_put(tbo);
|
||||
*bo = NULL;
|
||||
}
|
||||
|
||||
static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
|
||||
|
|
|
@ -27,15 +27,6 @@
|
|||
#include <drm/drm_fb_helper.h>
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
#define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
|
||||
|
||||
struct virtio_gpu_fbdev {
|
||||
struct drm_fb_helper helper;
|
||||
struct virtio_gpu_framebuffer vgfb;
|
||||
struct virtio_gpu_device *vgdev;
|
||||
struct delayed_work work;
|
||||
};
|
||||
|
||||
static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
|
||||
bool store, int x, int y,
|
||||
int width, int height)
|
||||
|
@ -102,7 +93,7 @@ static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
|
|||
|
||||
offset = (y * fb->base.pitches[0]) + x * bpp;
|
||||
|
||||
virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj->hw_res_handle,
|
||||
virtio_gpu_cmd_transfer_to_host_2d(vgdev, obj,
|
||||
offset,
|
||||
cpu_to_le32(w),
|
||||
cpu_to_le32(h),
|
||||
|
@ -157,199 +148,3 @@ int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
|
|||
left, top, right - left, bottom - top);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_fb_dirty_work(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *delayed_work = to_delayed_work(work);
|
||||
struct virtio_gpu_fbdev *vfbdev =
|
||||
container_of(delayed_work, struct virtio_gpu_fbdev, work);
|
||||
struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb;
|
||||
|
||||
virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1,
|
||||
vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1);
|
||||
}
|
||||
|
||||
static void virtio_gpu_3d_fillrect(struct fb_info *info,
|
||||
const struct fb_fillrect *rect)
|
||||
{
|
||||
struct virtio_gpu_fbdev *vfbdev = info->par;
|
||||
|
||||
drm_fb_helper_sys_fillrect(info, rect);
|
||||
virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
|
||||
rect->width, rect->height);
|
||||
schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
|
||||
}
|
||||
|
||||
static void virtio_gpu_3d_copyarea(struct fb_info *info,
|
||||
const struct fb_copyarea *area)
|
||||
{
|
||||
struct virtio_gpu_fbdev *vfbdev = info->par;
|
||||
|
||||
drm_fb_helper_sys_copyarea(info, area);
|
||||
virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
|
||||
area->width, area->height);
|
||||
schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
|
||||
}
|
||||
|
||||
static void virtio_gpu_3d_imageblit(struct fb_info *info,
|
||||
const struct fb_image *image)
|
||||
{
|
||||
struct virtio_gpu_fbdev *vfbdev = info->par;
|
||||
|
||||
drm_fb_helper_sys_imageblit(info, image);
|
||||
virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
|
||||
image->width, image->height);
|
||||
schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
|
||||
}
|
||||
|
||||
static struct fb_ops virtio_gpufb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_fillrect = virtio_gpu_3d_fillrect,
|
||||
.fb_copyarea = virtio_gpu_3d_copyarea,
|
||||
.fb_imageblit = virtio_gpu_3d_imageblit,
|
||||
};
|
||||
|
||||
static int virtio_gpu_vmap_fb(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_object *obj)
|
||||
{
|
||||
return virtio_gpu_object_kmap(obj, NULL);
|
||||
}
|
||||
|
||||
static int virtio_gpufb_create(struct drm_fb_helper *helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct virtio_gpu_fbdev *vfbdev =
|
||||
container_of(helper, struct virtio_gpu_fbdev, helper);
|
||||
struct drm_device *dev = helper->dev;
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct fb_info *info;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = {};
|
||||
struct virtio_gpu_object *obj;
|
||||
uint32_t resid, format, size;
|
||||
int ret;
|
||||
|
||||
mode_cmd.width = sizes->surface_width;
|
||||
mode_cmd.height = sizes->surface_height;
|
||||
mode_cmd.pitches[0] = mode_cmd.width * 4;
|
||||
mode_cmd.pixel_format = drm_mode_legacy_fb_format(32, 24);
|
||||
|
||||
format = virtio_gpu_translate_format(mode_cmd.pixel_format);
|
||||
if (format == 0)
|
||||
return -EINVAL;
|
||||
|
||||
size = mode_cmd.pitches[0] * mode_cmd.height;
|
||||
obj = virtio_gpu_alloc_object(dev, size, false, true);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
virtio_gpu_resource_id_get(vgdev, &resid);
|
||||
virtio_gpu_cmd_create_resource(vgdev, resid, format,
|
||||
mode_cmd.width, mode_cmd.height);
|
||||
|
||||
ret = virtio_gpu_vmap_fb(vgdev, obj);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to vmap fb %d\n", ret);
|
||||
goto err_obj_vmap;
|
||||
}
|
||||
|
||||
/* attach the object to the resource */
|
||||
ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
|
||||
if (ret)
|
||||
goto err_obj_attach;
|
||||
|
||||
info = drm_fb_helper_alloc_fbi(helper);
|
||||
if (IS_ERR(info)) {
|
||||
ret = PTR_ERR(info);
|
||||
goto err_fb_alloc;
|
||||
}
|
||||
|
||||
info->par = helper;
|
||||
|
||||
ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
|
||||
&mode_cmd, &obj->gem_base);
|
||||
if (ret)
|
||||
goto err_fb_alloc;
|
||||
|
||||
fb = &vfbdev->vgfb.base;
|
||||
|
||||
vfbdev->helper.fb = fb;
|
||||
|
||||
strcpy(info->fix.id, "virtiodrmfb");
|
||||
info->fbops = &virtio_gpufb_ops;
|
||||
info->pixmap.flags = FB_PIXMAP_SYSTEM;
|
||||
|
||||
info->screen_buffer = obj->vmap;
|
||||
info->screen_size = obj->gem_base.size;
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
|
||||
drm_fb_helper_fill_var(info, &vfbdev->helper,
|
||||
sizes->fb_width, sizes->fb_height);
|
||||
|
||||
info->fix.mmio_start = 0;
|
||||
info->fix.mmio_len = 0;
|
||||
return 0;
|
||||
|
||||
err_fb_alloc:
|
||||
virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
|
||||
err_obj_attach:
|
||||
err_obj_vmap:
|
||||
virtio_gpu_gem_free_object(&obj->gem_base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
|
||||
struct virtio_gpu_fbdev *vgfbdev)
|
||||
{
|
||||
struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
|
||||
|
||||
drm_fb_helper_unregister_fbi(&vgfbdev->helper);
|
||||
|
||||
if (vgfb->base.obj[0])
|
||||
vgfb->base.obj[0] = NULL;
|
||||
drm_fb_helper_fini(&vgfbdev->helper);
|
||||
drm_framebuffer_cleanup(&vgfb->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
static const struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
|
||||
.fb_probe = virtio_gpufb_create,
|
||||
};
|
||||
|
||||
int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev)
|
||||
{
|
||||
struct virtio_gpu_fbdev *vgfbdev;
|
||||
int bpp_sel = 32; /* TODO: parameter from somewhere? */
|
||||
int ret;
|
||||
|
||||
vgfbdev = kzalloc(sizeof(struct virtio_gpu_fbdev), GFP_KERNEL);
|
||||
if (!vgfbdev)
|
||||
return -ENOMEM;
|
||||
|
||||
vgfbdev->vgdev = vgdev;
|
||||
vgdev->vgfbdev = vgfbdev;
|
||||
INIT_DELAYED_WORK(&vgfbdev->work, virtio_gpu_fb_dirty_work);
|
||||
|
||||
drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper,
|
||||
&virtio_gpu_fb_helper_funcs);
|
||||
ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
|
||||
VIRTIO_GPUFB_CONN_LIMIT);
|
||||
if (ret) {
|
||||
kfree(vgfbdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_fb_helper_single_add_all_connectors(&vgfbdev->helper);
|
||||
drm_fb_helper_initial_config(&vgfbdev->helper, bpp_sel);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev)
|
||||
{
|
||||
if (!vgdev->vgfbdev)
|
||||
return;
|
||||
|
||||
virtio_gpu_fbdev_destroy(vgdev->ddev, vgdev->vgfbdev);
|
||||
kfree(vgdev->vgfbdev);
|
||||
vgdev->vgfbdev = NULL;
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <trace/events/dma_fence.h>
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
static const char *virtio_get_driver_name(struct dma_fence *f)
|
||||
|
@ -36,20 +37,18 @@ static const char *virtio_get_timeline_name(struct dma_fence *f)
|
|||
return "controlq";
|
||||
}
|
||||
|
||||
static bool virtio_signaled(struct dma_fence *f)
|
||||
bool virtio_fence_signaled(struct dma_fence *f)
|
||||
{
|
||||
struct virtio_gpu_fence *fence = to_virtio_fence(f);
|
||||
|
||||
if (atomic64_read(&fence->drv->last_seq) >= fence->seq)
|
||||
if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void virtio_fence_value_str(struct dma_fence *f, char *str, int size)
|
||||
{
|
||||
struct virtio_gpu_fence *fence = to_virtio_fence(f);
|
||||
|
||||
snprintf(str, size, "%llu", fence->seq);
|
||||
snprintf(str, size, "%llu", (long long unsigned int) f->seqno);
|
||||
}
|
||||
|
||||
static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
|
||||
|
@ -62,34 +61,47 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
|
|||
static const struct dma_fence_ops virtio_fence_ops = {
|
||||
.get_driver_name = virtio_get_driver_name,
|
||||
.get_timeline_name = virtio_get_timeline_name,
|
||||
.signaled = virtio_signaled,
|
||||
.signaled = virtio_fence_signaled,
|
||||
.fence_value_str = virtio_fence_value_str,
|
||||
.timeline_value_str = virtio_timeline_value_str,
|
||||
};
|
||||
|
||||
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
|
||||
{
|
||||
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
|
||||
struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
|
||||
GFP_KERNEL);
|
||||
if (!fence)
|
||||
return fence;
|
||||
|
||||
fence->drv = drv;
|
||||
|
||||
/* This only partially initializes the fence because the seqno is
|
||||
* unknown yet. The fence must not be used outside of the driver
|
||||
* until virtio_gpu_fence_emit is called.
|
||||
*/
|
||||
dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_ctrl_hdr *cmd_hdr,
|
||||
struct virtio_gpu_fence **fence)
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
|
||||
unsigned long irq_flags;
|
||||
|
||||
*fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
|
||||
if ((*fence) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_irqsave(&drv->lock, irq_flags);
|
||||
(*fence)->drv = drv;
|
||||
(*fence)->seq = ++drv->sync_seq;
|
||||
dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
|
||||
drv->context, (*fence)->seq);
|
||||
dma_fence_get(&(*fence)->f);
|
||||
list_add_tail(&(*fence)->node, &drv->fences);
|
||||
fence->f.seqno = ++drv->sync_seq;
|
||||
dma_fence_get(&fence->f);
|
||||
list_add_tail(&fence->node, &drv->fences);
|
||||
spin_unlock_irqrestore(&drv->lock, irq_flags);
|
||||
|
||||
trace_dma_fence_emit(&fence->f);
|
||||
|
||||
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
|
||||
cmd_hdr->fence_id = cpu_to_le64((*fence)->seq);
|
||||
return 0;
|
||||
cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno);
|
||||
}
|
||||
|
||||
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
|
||||
|
@ -102,7 +114,7 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
|
|||
spin_lock_irqsave(&drv->lock, irq_flags);
|
||||
atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
|
||||
list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
|
||||
if (last_seq < fence->seq)
|
||||
if (last_seq < fence->f.seqno)
|
||||
continue;
|
||||
dma_fence_signal_locked(&fence->f);
|
||||
list_del(&fence->node);
|
||||
|
|
|
@ -34,15 +34,16 @@ void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
|
|||
virtio_gpu_object_unref(&obj);
|
||||
}
|
||||
|
||||
struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
|
||||
size_t size, bool kernel,
|
||||
bool pinned)
|
||||
struct virtio_gpu_object*
|
||||
virtio_gpu_alloc_object(struct drm_device *dev,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct virtio_gpu_object *obj;
|
||||
int ret;
|
||||
|
||||
ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj);
|
||||
ret = virtio_gpu_object_create(vgdev, params, &obj, fence);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
|
@ -51,7 +52,7 @@ struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
|
|||
|
||||
int virtio_gpu_gem_create(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
uint64_t size,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct drm_gem_object **obj_p,
|
||||
uint32_t *handle_p)
|
||||
{
|
||||
|
@ -59,7 +60,7 @@ int virtio_gpu_gem_create(struct drm_file *file,
|
|||
int ret;
|
||||
u32 handle;
|
||||
|
||||
obj = virtio_gpu_alloc_object(dev, size, false, false);
|
||||
obj = virtio_gpu_alloc_object(dev, params, NULL);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
|
||||
|
@ -82,35 +83,25 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
|
|||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct drm_gem_object *gobj;
|
||||
struct virtio_gpu_object *obj;
|
||||
struct virtio_gpu_object_params params = { 0 };
|
||||
int ret;
|
||||
uint32_t pitch;
|
||||
uint32_t resid;
|
||||
uint32_t format;
|
||||
|
||||
pitch = args->width * ((args->bpp + 1) / 8);
|
||||
args->size = pitch * args->height;
|
||||
args->size = ALIGN(args->size, PAGE_SIZE);
|
||||
|
||||
ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj,
|
||||
params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
|
||||
params.width = args->width;
|
||||
params.height = args->height;
|
||||
params.size = args->size;
|
||||
params.dumb = true;
|
||||
ret = virtio_gpu_gem_create(file_priv, dev, ¶ms, &gobj,
|
||||
&args->handle);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
format = virtio_gpu_translate_format(DRM_FORMAT_XRGB8888);
|
||||
virtio_gpu_resource_id_get(vgdev, &resid);
|
||||
virtio_gpu_cmd_create_resource(vgdev, resid, format,
|
||||
args->width, args->height);
|
||||
|
||||
/* attach the object to the resource */
|
||||
obj = gem_to_virtio_gpu_obj(gobj);
|
||||
ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
obj->dumb = true;
|
||||
args->pitch = pitch;
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <drm/drmP.h>
|
||||
#include <drm/virtgpu_drm.h>
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
#include <linux/sync_file.h>
|
||||
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
|
@ -53,8 +54,8 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
|
|||
&virtio_gpu_map->offset);
|
||||
}
|
||||
|
||||
static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *head)
|
||||
int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct ttm_validate_buffer *buf;
|
||||
|
@ -78,7 +79,7 @@ static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_unref_list(struct list_head *head)
|
||||
void virtio_gpu_unref_list(struct list_head *head)
|
||||
{
|
||||
struct ttm_validate_buffer *buf;
|
||||
struct ttm_buffer_object *bo;
|
||||
|
@ -105,7 +106,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
|||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
|
||||
struct drm_gem_object *gobj;
|
||||
struct virtio_gpu_fence *fence;
|
||||
struct virtio_gpu_fence *out_fence;
|
||||
struct virtio_gpu_object *qobj;
|
||||
int ret;
|
||||
uint32_t *bo_handles = NULL;
|
||||
|
@ -114,11 +115,46 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
|||
struct ttm_validate_buffer *buflist = NULL;
|
||||
int i;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct sync_file *sync_file;
|
||||
int in_fence_fd = exbuf->fence_fd;
|
||||
int out_fence_fd = -1;
|
||||
void *buf;
|
||||
|
||||
if (vgdev->has_virgl_3d == false)
|
||||
return -ENOSYS;
|
||||
|
||||
if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
|
||||
return -EINVAL;
|
||||
|
||||
exbuf->fence_fd = -1;
|
||||
|
||||
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
|
||||
struct dma_fence *in_fence;
|
||||
|
||||
in_fence = sync_file_get_fence(in_fence_fd);
|
||||
|
||||
if (!in_fence)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Wait if the fence is from a foreign context, or if the fence
|
||||
* array contains any fence from a foreign context.
|
||||
*/
|
||||
ret = 0;
|
||||
if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
|
||||
ret = dma_fence_wait(in_fence, true);
|
||||
|
||||
dma_fence_put(in_fence);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
|
||||
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
if (out_fence_fd < 0)
|
||||
return out_fence_fd;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&validate_list);
|
||||
if (exbuf->num_bo_handles) {
|
||||
|
||||
|
@ -128,26 +164,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
|||
sizeof(struct ttm_validate_buffer),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!bo_handles || !buflist) {
|
||||
kvfree(bo_handles);
|
||||
kvfree(buflist);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto out_unused_fd;
|
||||
}
|
||||
|
||||
user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
|
||||
user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
|
||||
if (copy_from_user(bo_handles, user_bo_handles,
|
||||
exbuf->num_bo_handles * sizeof(uint32_t))) {
|
||||
ret = -EFAULT;
|
||||
kvfree(bo_handles);
|
||||
kvfree(buflist);
|
||||
return ret;
|
||||
goto out_unused_fd;
|
||||
}
|
||||
|
||||
for (i = 0; i < exbuf->num_bo_handles; i++) {
|
||||
gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
|
||||
if (!gobj) {
|
||||
kvfree(bo_handles);
|
||||
kvfree(buflist);
|
||||
return -ENOENT;
|
||||
ret = -ENOENT;
|
||||
goto out_unused_fd;
|
||||
}
|
||||
|
||||
qobj = gem_to_virtio_gpu_obj(gobj);
|
||||
|
@ -156,34 +188,60 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
|
|||
list_add(&buflist[i].head, &validate_list);
|
||||
}
|
||||
kvfree(bo_handles);
|
||||
bo_handles = NULL;
|
||||
}
|
||||
|
||||
ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
buf = memdup_user((void __user *)(uintptr_t)exbuf->command,
|
||||
exbuf->size);
|
||||
buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
|
||||
if (IS_ERR(buf)) {
|
||||
ret = PTR_ERR(buf);
|
||||
goto out_unresv;
|
||||
}
|
||||
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
|
||||
vfpriv->ctx_id, &fence);
|
||||
|
||||
ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
|
||||
out_fence = virtio_gpu_fence_alloc(vgdev);
|
||||
if(!out_fence) {
|
||||
ret = -ENOMEM;
|
||||
goto out_memdup;
|
||||
}
|
||||
|
||||
if (out_fence_fd >= 0) {
|
||||
sync_file = sync_file_create(&out_fence->f);
|
||||
if (!sync_file) {
|
||||
dma_fence_put(&out_fence->f);
|
||||
ret = -ENOMEM;
|
||||
goto out_memdup;
|
||||
}
|
||||
|
||||
exbuf->fence_fd = out_fence_fd;
|
||||
fd_install(out_fence_fd, sync_file->file);
|
||||
}
|
||||
|
||||
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
|
||||
vfpriv->ctx_id, out_fence);
|
||||
|
||||
ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
|
||||
|
||||
/* fence the command bo */
|
||||
virtio_gpu_unref_list(&validate_list);
|
||||
kvfree(buflist);
|
||||
dma_fence_put(&fence->f);
|
||||
return 0;
|
||||
|
||||
out_memdup:
|
||||
kfree(buf);
|
||||
out_unresv:
|
||||
ttm_eu_backoff_reservation(&ticket, &validate_list);
|
||||
out_free:
|
||||
virtio_gpu_unref_list(&validate_list);
|
||||
out_unused_fd:
|
||||
kvfree(bo_handles);
|
||||
kvfree(buflist);
|
||||
|
||||
if (out_fence_fd >= 0)
|
||||
put_unused_fd(out_fence_fd);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -204,10 +262,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (copy_to_user((void __user *)(unsigned long)param->value,
|
||||
&value, sizeof(int))) {
|
||||
if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -216,17 +273,12 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
|
|||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct drm_virtgpu_resource_create *rc = data;
|
||||
struct virtio_gpu_fence *fence;
|
||||
int ret;
|
||||
uint32_t res_id;
|
||||
struct virtio_gpu_object *qobj;
|
||||
struct drm_gem_object *obj;
|
||||
uint32_t handle = 0;
|
||||
uint32_t size;
|
||||
struct list_head validate_list;
|
||||
struct ttm_validate_buffer mainbuf;
|
||||
struct virtio_gpu_fence *fence = NULL;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct virtio_gpu_resource_create_3d rc_3d;
|
||||
struct virtio_gpu_object_params params = { 0 };
|
||||
|
||||
if (vgdev->has_virgl_3d == false) {
|
||||
if (rc->depth > 1)
|
||||
|
@ -241,94 +293,43 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&validate_list);
|
||||
memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
|
||||
|
||||
virtio_gpu_resource_id_get(vgdev, &res_id);
|
||||
|
||||
size = rc->size;
|
||||
|
||||
params.format = rc->format;
|
||||
params.width = rc->width;
|
||||
params.height = rc->height;
|
||||
params.size = rc->size;
|
||||
if (vgdev->has_virgl_3d) {
|
||||
params.virgl = true;
|
||||
params.target = rc->target;
|
||||
params.bind = rc->bind;
|
||||
params.depth = rc->depth;
|
||||
params.array_size = rc->array_size;
|
||||
params.last_level = rc->last_level;
|
||||
params.nr_samples = rc->nr_samples;
|
||||
params.flags = rc->flags;
|
||||
}
|
||||
/* allocate a single page size object */
|
||||
if (size == 0)
|
||||
size = PAGE_SIZE;
|
||||
if (params.size == 0)
|
||||
params.size = PAGE_SIZE;
|
||||
|
||||
qobj = virtio_gpu_alloc_object(dev, size, false, false);
|
||||
if (IS_ERR(qobj)) {
|
||||
ret = PTR_ERR(qobj);
|
||||
goto fail_id;
|
||||
}
|
||||
fence = virtio_gpu_fence_alloc(vgdev);
|
||||
if (!fence)
|
||||
return -ENOMEM;
|
||||
qobj = virtio_gpu_alloc_object(dev, ¶ms, fence);
|
||||
dma_fence_put(&fence->f);
|
||||
if (IS_ERR(qobj))
|
||||
return PTR_ERR(qobj);
|
||||
obj = &qobj->gem_base;
|
||||
|
||||
if (!vgdev->has_virgl_3d) {
|
||||
virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
|
||||
rc->width, rc->height);
|
||||
|
||||
ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
|
||||
} else {
|
||||
/* use a gem reference since unref list undoes them */
|
||||
drm_gem_object_get(&qobj->gem_base);
|
||||
mainbuf.bo = &qobj->tbo;
|
||||
list_add(&mainbuf.head, &validate_list);
|
||||
|
||||
ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
|
||||
if (ret) {
|
||||
DRM_DEBUG("failed to validate\n");
|
||||
goto fail_unref;
|
||||
}
|
||||
|
||||
rc_3d.resource_id = cpu_to_le32(res_id);
|
||||
rc_3d.target = cpu_to_le32(rc->target);
|
||||
rc_3d.format = cpu_to_le32(rc->format);
|
||||
rc_3d.bind = cpu_to_le32(rc->bind);
|
||||
rc_3d.width = cpu_to_le32(rc->width);
|
||||
rc_3d.height = cpu_to_le32(rc->height);
|
||||
rc_3d.depth = cpu_to_le32(rc->depth);
|
||||
rc_3d.array_size = cpu_to_le32(rc->array_size);
|
||||
rc_3d.last_level = cpu_to_le32(rc->last_level);
|
||||
rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
|
||||
rc_3d.flags = cpu_to_le32(rc->flags);
|
||||
|
||||
virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
|
||||
ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
|
||||
if (ret) {
|
||||
ttm_eu_backoff_reservation(&ticket, &validate_list);
|
||||
goto fail_unref;
|
||||
}
|
||||
ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
|
||||
}
|
||||
|
||||
qobj->hw_res_handle = res_id;
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, obj, &handle);
|
||||
if (ret) {
|
||||
|
||||
drm_gem_object_release(obj);
|
||||
if (vgdev->has_virgl_3d) {
|
||||
virtio_gpu_unref_list(&validate_list);
|
||||
dma_fence_put(&fence->f);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
|
||||
rc->res_handle = res_id; /* similiar to a VM address */
|
||||
rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
|
||||
rc->bo_handle = handle;
|
||||
|
||||
if (vgdev->has_virgl_3d) {
|
||||
virtio_gpu_unref_list(&validate_list);
|
||||
dma_fence_put(&fence->f);
|
||||
}
|
||||
return 0;
|
||||
fail_unref:
|
||||
if (vgdev->has_virgl_3d) {
|
||||
virtio_gpu_unref_list(&validate_list);
|
||||
dma_fence_put(&fence->f);
|
||||
}
|
||||
//fail_obj:
|
||||
// drm_gem_object_handle_unreference_unlocked(obj);
|
||||
fail_id:
|
||||
virtio_gpu_resource_id_put(vgdev, res_id);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
|
||||
|
@ -383,10 +384,16 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
|
|||
goto out_unres;
|
||||
|
||||
convert_to_hw_box(&box, &args->box);
|
||||
|
||||
fence = virtio_gpu_fence_alloc(vgdev);
|
||||
if (!fence) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unres;
|
||||
}
|
||||
virtio_gpu_cmd_transfer_from_host_3d
|
||||
(vgdev, qobj->hw_res_handle,
|
||||
vfpriv->ctx_id, offset, args->level,
|
||||
&box, &fence);
|
||||
&box, fence);
|
||||
reservation_object_add_excl_fence(qobj->tbo.resv,
|
||||
&fence->f);
|
||||
|
||||
|
@ -429,13 +436,18 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
|
|||
convert_to_hw_box(&box, &args->box);
|
||||
if (!vgdev->has_virgl_3d) {
|
||||
virtio_gpu_cmd_transfer_to_host_2d
|
||||
(vgdev, qobj->hw_res_handle, offset,
|
||||
(vgdev, qobj, offset,
|
||||
box.w, box.h, box.x, box.y, NULL);
|
||||
} else {
|
||||
fence = virtio_gpu_fence_alloc(vgdev);
|
||||
if (!fence) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unres;
|
||||
}
|
||||
virtio_gpu_cmd_transfer_to_host_3d
|
||||
(vgdev, qobj->hw_res_handle,
|
||||
(vgdev, qobj,
|
||||
vfpriv ? vfpriv->ctx_id : 0, offset,
|
||||
args->level, &box, &fence);
|
||||
args->level, &box, fence);
|
||||
reservation_object_add_excl_fence(qobj->tbo.resv,
|
||||
&fence->f);
|
||||
dma_fence_put(&fence->f);
|
||||
|
@ -512,7 +524,6 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
|
|||
list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
|
||||
if (cache_ent->id == args->cap_set_id &&
|
||||
cache_ent->version == args->cap_set_ver) {
|
||||
ptr = cache_ent->caps_cache;
|
||||
spin_unlock(&vgdev->display_info_lock);
|
||||
goto copy_exit;
|
||||
}
|
||||
|
@ -523,6 +534,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
|
|||
virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
|
||||
&cache_ent);
|
||||
|
||||
copy_exit:
|
||||
ret = wait_event_timeout(vgdev->resp_wq,
|
||||
atomic_read(&cache_ent->is_valid), 5 * HZ);
|
||||
if (!ret)
|
||||
|
@ -533,8 +545,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
|
|||
|
||||
ptr = cache_ent->caps_cache;
|
||||
|
||||
copy_exit:
|
||||
if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
|
||||
if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
|
@ -542,34 +553,34 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
|
|||
|
||||
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
|
||||
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
|
||||
DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
|
||||
DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
|
||||
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
|
||||
virtio_gpu_resource_create_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
|
||||
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
|
||||
/* make transfer async to the main ring? - no sure, can we
|
||||
* thread these in the underlying GL
|
||||
*/
|
||||
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
|
||||
virtio_gpu_transfer_from_host_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
|
||||
virtio_gpu_transfer_to_host_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
|
||||
DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
|
||||
DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
|
||||
DRM_AUTH | DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
|
|
@ -28,11 +28,6 @@
|
|||
#include <drm/drmP.h>
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
static int virtio_gpu_fbdev = 1;
|
||||
|
||||
MODULE_PARM_DESC(fbdev, "Disable/Enable framebuffer device & console");
|
||||
module_param_named(fbdev, virtio_gpu_fbdev, int, 0400);
|
||||
|
||||
static void virtio_gpu_config_changed_work_func(struct work_struct *work)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev =
|
||||
|
@ -44,6 +39,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
|
|||
virtio_cread(vgdev->vdev, struct virtio_gpu_config,
|
||||
events_read, &events_read);
|
||||
if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
|
||||
if (vgdev->has_edid)
|
||||
virtio_gpu_cmd_get_edids(vgdev);
|
||||
virtio_gpu_cmd_get_display_info(vgdev);
|
||||
drm_helper_hpd_irq_event(vgdev->ddev);
|
||||
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
|
||||
|
@ -52,39 +49,23 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
|
|||
events_clear, &events_clear);
|
||||
}
|
||||
|
||||
static void virtio_gpu_ctx_id_get(struct virtio_gpu_device *vgdev,
|
||||
uint32_t *resid)
|
||||
static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
|
||||
uint32_t nlen, const char *name)
|
||||
{
|
||||
int handle;
|
||||
int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&vgdev->ctx_id_idr_lock);
|
||||
handle = idr_alloc(&vgdev->ctx_id_idr, NULL, 1, 0, 0);
|
||||
spin_unlock(&vgdev->ctx_id_idr_lock);
|
||||
idr_preload_end();
|
||||
*resid = handle;
|
||||
}
|
||||
|
||||
static void virtio_gpu_ctx_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
|
||||
{
|
||||
spin_lock(&vgdev->ctx_id_idr_lock);
|
||||
idr_remove(&vgdev->ctx_id_idr, id);
|
||||
spin_unlock(&vgdev->ctx_id_idr_lock);
|
||||
}
|
||||
|
||||
static void virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
|
||||
uint32_t nlen, const char *name,
|
||||
uint32_t *ctx_id)
|
||||
{
|
||||
virtio_gpu_ctx_id_get(vgdev, ctx_id);
|
||||
virtio_gpu_cmd_context_create(vgdev, *ctx_id, nlen, name);
|
||||
if (handle < 0)
|
||||
return handle;
|
||||
handle += 1;
|
||||
virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
|
||||
return handle;
|
||||
}
|
||||
|
||||
static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
|
||||
uint32_t ctx_id)
|
||||
{
|
||||
virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
|
||||
virtio_gpu_ctx_id_put(vgdev, ctx_id);
|
||||
ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
|
||||
}
|
||||
|
||||
static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
|
||||
|
@ -151,10 +132,8 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
vgdev->dev = dev->dev;
|
||||
|
||||
spin_lock_init(&vgdev->display_info_lock);
|
||||
spin_lock_init(&vgdev->ctx_id_idr_lock);
|
||||
idr_init(&vgdev->ctx_id_idr);
|
||||
spin_lock_init(&vgdev->resource_idr_lock);
|
||||
idr_init(&vgdev->resource_idr);
|
||||
ida_init(&vgdev->ctx_id_ida);
|
||||
ida_init(&vgdev->resource_ida);
|
||||
init_waitqueue_head(&vgdev->resp_wq);
|
||||
virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
|
||||
virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
|
||||
|
@ -174,6 +153,10 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
#else
|
||||
DRM_INFO("virgl 3d acceleration not supported by guest\n");
|
||||
#endif
|
||||
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
|
||||
vgdev->has_edid = true;
|
||||
DRM_INFO("EDID support available.\n");
|
||||
}
|
||||
|
||||
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
|
||||
if (ret) {
|
||||
|
@ -219,12 +202,11 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
|
||||
if (num_capsets)
|
||||
virtio_gpu_get_capsets(vgdev, num_capsets);
|
||||
if (vgdev->has_edid)
|
||||
virtio_gpu_cmd_get_edids(vgdev);
|
||||
virtio_gpu_cmd_get_display_info(vgdev);
|
||||
wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
|
||||
5 * HZ);
|
||||
if (virtio_gpu_fbdev)
|
||||
virtio_gpu_fbdev_init(vgdev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_modeset:
|
||||
|
@ -257,6 +239,7 @@ void virtio_gpu_driver_unload(struct drm_device *dev)
|
|||
flush_work(&vgdev->ctrlq.dequeue_work);
|
||||
flush_work(&vgdev->cursorq.dequeue_work);
|
||||
flush_work(&vgdev->config_changed_work);
|
||||
vgdev->vdev->config->reset(vgdev->vdev);
|
||||
vgdev->vdev->config->del_vqs(vgdev->vdev);
|
||||
|
||||
virtio_gpu_modeset_fini(vgdev);
|
||||
|
@ -271,7 +254,7 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
|
|||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct virtio_gpu_fpriv *vfpriv;
|
||||
uint32_t id;
|
||||
int id;
|
||||
char dbgname[TASK_COMM_LEN];
|
||||
|
||||
/* can't create contexts without 3d renderer */
|
||||
|
@ -284,7 +267,11 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
|
|||
return -ENOMEM;
|
||||
|
||||
get_task_comm(dbgname, current);
|
||||
virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname, &id);
|
||||
id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname);
|
||||
if (id < 0) {
|
||||
kfree(vfpriv);
|
||||
return id;
|
||||
}
|
||||
|
||||
vfpriv->ctx_id = id;
|
||||
file->driver_priv = vfpriv;
|
||||
|
|
|
@ -23,8 +23,40 @@
|
|||
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
|
||||
uint32_t *resid)
|
||||
{
|
||||
#if 0
|
||||
int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
|
||||
|
||||
if (handle < 0)
|
||||
return handle;
|
||||
#else
|
||||
static int handle;
|
||||
|
||||
/*
|
||||
* FIXME: dirty hack to avoid re-using IDs, virglrenderer
|
||||
* can't deal with that. Needs fixing in virglrenderer, also
|
||||
* should figure a better way to handle that in the guest.
|
||||
*/
|
||||
handle++;
|
||||
#endif
|
||||
|
||||
*resid = handle + 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
|
||||
{
|
||||
#if 0
|
||||
ida_free(&vgdev->resource_ida, id - 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct virtio_gpu_object *bo;
|
||||
|
@ -33,88 +65,130 @@ static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
|||
bo = container_of(tbo, struct virtio_gpu_object, tbo);
|
||||
vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
|
||||
|
||||
if (bo->hw_res_handle)
|
||||
if (bo->created)
|
||||
virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
|
||||
if (bo->pages)
|
||||
virtio_gpu_object_free_sg_table(bo);
|
||||
drm_gem_object_release(&bo->gem_base);
|
||||
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
|
||||
kfree(bo);
|
||||
}
|
||||
|
||||
static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo,
|
||||
bool pinned)
|
||||
static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
|
||||
{
|
||||
u32 c = 1;
|
||||
u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
|
||||
|
||||
vgbo->placement.placement = &vgbo->placement_code;
|
||||
vgbo->placement.busy_placement = &vgbo->placement_code;
|
||||
vgbo->placement_code.fpfn = 0;
|
||||
vgbo->placement_code.lpfn = 0;
|
||||
vgbo->placement_code.flags =
|
||||
TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag;
|
||||
TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
|
||||
TTM_PL_FLAG_NO_EVICT;
|
||||
vgbo->placement.num_placement = c;
|
||||
vgbo->placement.num_busy_placement = c;
|
||||
|
||||
}
|
||||
|
||||
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
|
||||
unsigned long size, bool kernel, bool pinned,
|
||||
struct virtio_gpu_object **bo_ptr)
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_object **bo_ptr,
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_object *bo;
|
||||
enum ttm_bo_type type;
|
||||
size_t acc_size;
|
||||
int ret;
|
||||
|
||||
if (kernel)
|
||||
type = ttm_bo_type_kernel;
|
||||
else
|
||||
type = ttm_bo_type_device;
|
||||
*bo_ptr = NULL;
|
||||
|
||||
acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size,
|
||||
acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
|
||||
sizeof(struct virtio_gpu_object));
|
||||
|
||||
bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
|
||||
if (bo == NULL)
|
||||
return -ENOMEM;
|
||||
size = roundup(size, PAGE_SIZE);
|
||||
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
|
||||
if (ret != 0) {
|
||||
ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
|
||||
if (ret < 0) {
|
||||
kfree(bo);
|
||||
return ret;
|
||||
}
|
||||
bo->dumb = false;
|
||||
virtio_gpu_init_ttm_placement(bo, pinned);
|
||||
params->size = roundup(params->size, PAGE_SIZE);
|
||||
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
|
||||
if (ret != 0) {
|
||||
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
|
||||
kfree(bo);
|
||||
return ret;
|
||||
}
|
||||
bo->dumb = params->dumb;
|
||||
|
||||
ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, 0, !kernel, acc_size,
|
||||
NULL, NULL, &virtio_gpu_ttm_bo_destroy);
|
||||
if (params->virgl) {
|
||||
virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
|
||||
} else {
|
||||
virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
|
||||
}
|
||||
|
||||
virtio_gpu_init_ttm_placement(bo);
|
||||
ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
|
||||
ttm_bo_type_device, &bo->placement, 0,
|
||||
true, acc_size, NULL, NULL,
|
||||
&virtio_gpu_ttm_bo_destroy);
|
||||
/* ttm_bo_init failure will call the destroy */
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
if (fence) {
|
||||
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
|
||||
struct list_head validate_list;
|
||||
struct ttm_validate_buffer mainbuf;
|
||||
struct ww_acquire_ctx ticket;
|
||||
unsigned long irq_flags;
|
||||
bool signaled;
|
||||
|
||||
INIT_LIST_HEAD(&validate_list);
|
||||
memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
|
||||
|
||||
/* use a gem reference since unref list undoes them */
|
||||
drm_gem_object_get(&bo->gem_base);
|
||||
mainbuf.bo = &bo->tbo;
|
||||
list_add(&mainbuf.head, &validate_list);
|
||||
|
||||
ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
|
||||
if (ret == 0) {
|
||||
spin_lock_irqsave(&drv->lock, irq_flags);
|
||||
signaled = virtio_fence_signaled(&fence->f);
|
||||
if (!signaled)
|
||||
/* virtio create command still in flight */
|
||||
ttm_eu_fence_buffer_objects(&ticket, &validate_list,
|
||||
&fence->f);
|
||||
spin_unlock_irqrestore(&drv->lock, irq_flags);
|
||||
if (signaled)
|
||||
/* virtio create command finished */
|
||||
ttm_eu_backoff_reservation(&ticket, &validate_list);
|
||||
}
|
||||
virtio_gpu_unref_list(&validate_list);
|
||||
}
|
||||
|
||||
*bo_ptr = bo;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr)
|
||||
void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo)
|
||||
{
|
||||
bo->vmap = NULL;
|
||||
ttm_bo_kunmap(&bo->kmap);
|
||||
}
|
||||
|
||||
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo)
|
||||
{
|
||||
bool is_iomem;
|
||||
int r;
|
||||
|
||||
if (bo->vmap) {
|
||||
if (ptr)
|
||||
*ptr = bo->vmap;
|
||||
return 0;
|
||||
}
|
||||
WARN_ON(bo->vmap);
|
||||
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
|
||||
if (r)
|
||||
return r;
|
||||
bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
|
||||
if (ptr)
|
||||
*ptr = bo->vmap;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -152,13 +152,13 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
|
|||
if (WARN_ON(!output))
|
||||
return;
|
||||
|
||||
if (plane->state->fb) {
|
||||
if (plane->state->fb && output->enabled) {
|
||||
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
|
||||
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
|
||||
handle = bo->hw_res_handle;
|
||||
if (bo->dumb) {
|
||||
virtio_gpu_cmd_transfer_to_host_2d
|
||||
(vgdev, handle, 0,
|
||||
(vgdev, bo, 0,
|
||||
cpu_to_le32(plane->state->src_w >> 16),
|
||||
cpu_to_le32(plane->state->src_h >> 16),
|
||||
cpu_to_le32(plane->state->src_x >> 16),
|
||||
|
@ -180,11 +180,49 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
|
|||
plane->state->src_h >> 16,
|
||||
plane->state->src_x >> 16,
|
||||
plane->state->src_y >> 16);
|
||||
virtio_gpu_cmd_resource_flush(vgdev, handle,
|
||||
plane->state->src_x >> 16,
|
||||
plane->state->src_y >> 16,
|
||||
plane->state->src_w >> 16,
|
||||
plane->state->src_h >> 16);
|
||||
if (handle)
|
||||
virtio_gpu_cmd_resource_flush(vgdev, handle,
|
||||
plane->state->src_x >> 16,
|
||||
plane->state->src_y >> 16,
|
||||
plane->state->src_w >> 16,
|
||||
plane->state->src_h >> 16);
|
||||
}
|
||||
|
||||
static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_state)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct virtio_gpu_framebuffer *vgfb;
|
||||
struct virtio_gpu_object *bo;
|
||||
|
||||
if (!new_state->fb)
|
||||
return 0;
|
||||
|
||||
vgfb = to_virtio_gpu_framebuffer(new_state->fb);
|
||||
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
|
||||
if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
|
||||
vgfb->fence = virtio_gpu_fence_alloc(vgdev);
|
||||
if (!vgfb->fence)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
struct virtio_gpu_framebuffer *vgfb;
|
||||
|
||||
if (!plane->state->fb)
|
||||
return;
|
||||
|
||||
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
|
||||
if (vgfb->fence) {
|
||||
dma_fence_put(&vgfb->fence->f);
|
||||
vgfb->fence = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
|
||||
|
@ -194,7 +232,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
|
|||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct virtio_gpu_output *output = NULL;
|
||||
struct virtio_gpu_framebuffer *vgfb;
|
||||
struct virtio_gpu_fence *fence = NULL;
|
||||
struct virtio_gpu_object *bo = NULL;
|
||||
uint32_t handle;
|
||||
int ret = 0;
|
||||
|
@ -217,16 +254,16 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
|
|||
if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
|
||||
/* new cursor -- update & wait */
|
||||
virtio_gpu_cmd_transfer_to_host_2d
|
||||
(vgdev, handle, 0,
|
||||
(vgdev, bo, 0,
|
||||
cpu_to_le32(plane->state->crtc_w),
|
||||
cpu_to_le32(plane->state->crtc_h),
|
||||
0, 0, &fence);
|
||||
0, 0, vgfb->fence);
|
||||
ret = virtio_gpu_object_reserve(bo, false);
|
||||
if (!ret) {
|
||||
reservation_object_add_excl_fence(bo->tbo.resv,
|
||||
&fence->f);
|
||||
dma_fence_put(&fence->f);
|
||||
fence = NULL;
|
||||
&vgfb->fence->f);
|
||||
dma_fence_put(&vgfb->fence->f);
|
||||
vgfb->fence = NULL;
|
||||
virtio_gpu_object_unreserve(bo);
|
||||
virtio_gpu_object_wait(bo, false);
|
||||
}
|
||||
|
@ -268,6 +305,8 @@ static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
|
|||
};
|
||||
|
||||
static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
|
||||
.prepare_fb = virtio_gpu_cursor_prepare_fb,
|
||||
.cleanup_fb = virtio_gpu_cursor_cleanup_fb,
|
||||
.atomic_check = virtio_gpu_plane_atomic_check,
|
||||
.atomic_update = virtio_gpu_cursor_plane_update,
|
||||
};
|
||||
|
|
|
@ -28,21 +28,16 @@
|
|||
* device that might share buffers with virtgpu
|
||||
*/
|
||||
|
||||
int virtgpu_gem_prime_pin(struct drm_gem_object *obj)
|
||||
{
|
||||
WARN_ONCE(1, "not implemented");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
|
||||
{
|
||||
WARN_ONCE(1, "not implemented");
|
||||
}
|
||||
|
||||
struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
WARN_ONCE(1, "not implemented");
|
||||
return ERR_PTR(-ENODEV);
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||
|
||||
if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages)
|
||||
/* should not happen */
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
return drm_prime_pages_to_sg(bo->tbo.ttm->pages,
|
||||
bo->tbo.ttm->num_pages);
|
||||
}
|
||||
|
||||
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
|
||||
|
@ -55,17 +50,25 @@ struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
|
|||
|
||||
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
|
||||
{
|
||||
WARN_ONCE(1, "not implemented");
|
||||
return ERR_PTR(-ENODEV);
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||
int ret;
|
||||
|
||||
ret = virtio_gpu_object_kmap(bo);
|
||||
if (ret)
|
||||
return NULL;
|
||||
return bo->vmap;
|
||||
}
|
||||
|
||||
void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
|
||||
{
|
||||
WARN_ONCE(1, "not implemented");
|
||||
virtio_gpu_object_kunmap(gem_to_virtio_gpu_obj(obj));
|
||||
}
|
||||
|
||||
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
|
||||
struct vm_area_struct *area)
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return -ENODEV;
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||
|
||||
bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start;
|
||||
return drm_gem_prime_mmap(obj, vma);
|
||||
}
|
||||
|
|
52
drivers/gpu/drm/virtio/virtgpu_trace.h
Normal file
52
drivers/gpu/drm/virtio/virtgpu_trace.h
Normal file
|
@ -0,0 +1,52 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#if !defined(_VIRTGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _VIRTGPU_TRACE_H_
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM virtio_gpu
|
||||
#define TRACE_INCLUDE_FILE virtgpu_trace
|
||||
|
||||
DECLARE_EVENT_CLASS(virtio_gpu_cmd,
|
||||
TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
|
||||
TP_ARGS(vq, hdr),
|
||||
TP_STRUCT__entry(
|
||||
__field(int, dev)
|
||||
__field(unsigned int, vq)
|
||||
__field(const char *, name)
|
||||
__field(u32, type)
|
||||
__field(u32, flags)
|
||||
__field(u64, fence_id)
|
||||
__field(u32, ctx_id)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->dev = vq->vdev->index;
|
||||
__entry->vq = vq->index;
|
||||
__entry->name = vq->name;
|
||||
__entry->type = le32_to_cpu(hdr->type);
|
||||
__entry->flags = le32_to_cpu(hdr->flags);
|
||||
__entry->fence_id = le64_to_cpu(hdr->fence_id);
|
||||
__entry->ctx_id = le32_to_cpu(hdr->ctx_id);
|
||||
),
|
||||
TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u",
|
||||
__entry->dev, __entry->vq, __entry->name,
|
||||
__entry->type, __entry->flags, __entry->fence_id,
|
||||
__entry->ctx_id)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_queue,
|
||||
TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
|
||||
TP_ARGS(vq, hdr)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_response,
|
||||
TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
|
||||
TP_ARGS(vq, hdr)
|
||||
);
|
||||
|
||||
#endif
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/virtio
|
||||
#include <trace/define_trace.h>
|
5
drivers/gpu/drm/virtio/virtgpu_trace_points.c
Normal file
5
drivers/gpu/drm/virtio/virtgpu_trace_points.c
Normal file
|
@ -0,0 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "virtgpu_drv.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "virtgpu_trace.h"
|
|
@ -106,29 +106,6 @@ static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
|
|||
}
|
||||
}
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* Hmm, seems to not do anything useful. Leftover debug hack?
|
||||
* Something like printing pagefaults to kernel log?
|
||||
*/
|
||||
static struct vm_operations_struct virtio_gpu_ttm_vm_ops;
|
||||
static const struct vm_operations_struct *ttm_vm_ops;
|
||||
|
||||
static int virtio_gpu_ttm_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
struct virtio_gpu_device *vgdev;
|
||||
int r;
|
||||
|
||||
bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
|
||||
if (bo == NULL)
|
||||
return VM_FAULT_NOPAGE;
|
||||
vgdev = virtio_gpu_get_vgdev(bo->bdev);
|
||||
r = ttm_vm_ops->fault(vmf);
|
||||
return r;
|
||||
}
|
||||
#endif
|
||||
|
||||
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_file *file_priv;
|
||||
|
@ -143,19 +120,8 @@ int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
return -EINVAL;
|
||||
}
|
||||
r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
|
||||
#if 0
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
if (unlikely(ttm_vm_ops == NULL)) {
|
||||
ttm_vm_ops = vma->vm_ops;
|
||||
virtio_gpu_ttm_vm_ops = *ttm_vm_ops;
|
||||
virtio_gpu_ttm_vm_ops.fault = &virtio_gpu_ttm_fault;
|
||||
}
|
||||
vma->vm_ops = &virtio_gpu_ttm_vm_ops;
|
||||
return 0;
|
||||
#else
|
||||
|
||||
return r;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
|
||||
|
@ -206,10 +172,6 @@ static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
|
|||
static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev;
|
||||
|
||||
vgdev = virtio_gpu_get_vgdev(bdev);
|
||||
|
||||
switch (type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
/* System memory */
|
||||
|
@ -284,42 +246,45 @@ static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
|
|||
*/
|
||||
struct virtio_gpu_ttm_tt {
|
||||
struct ttm_dma_tt ttm;
|
||||
struct virtio_gpu_device *vgdev;
|
||||
u64 offset;
|
||||
struct virtio_gpu_object *obj;
|
||||
};
|
||||
|
||||
static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *bo_mem)
|
||||
static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct virtio_gpu_ttm_tt *gtt =
|
||||
container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
|
||||
struct virtio_gpu_device *vgdev =
|
||||
virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
|
||||
|
||||
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
|
||||
if (!ttm->num_pages)
|
||||
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
|
||||
ttm->num_pages, bo_mem, ttm);
|
||||
|
||||
/* Not implemented */
|
||||
virtio_gpu_object_attach(vgdev, gtt->obj, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
|
||||
static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
/* Not implemented */
|
||||
struct virtio_gpu_ttm_tt *gtt =
|
||||
container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
|
||||
struct virtio_gpu_device *vgdev =
|
||||
virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
|
||||
|
||||
virtio_gpu_object_detach(vgdev, gtt->obj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm)
|
||||
static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct virtio_gpu_ttm_tt *gtt =
|
||||
container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
|
||||
|
||||
ttm_dma_tt_fini(>t->ttm);
|
||||
kfree(gtt);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func virtio_gpu_backend_func = {
|
||||
.bind = &virtio_gpu_ttm_backend_bind,
|
||||
.unbind = &virtio_gpu_ttm_backend_unbind,
|
||||
.destroy = &virtio_gpu_ttm_backend_destroy,
|
||||
static struct ttm_backend_func virtio_gpu_tt_func = {
|
||||
.bind = &virtio_gpu_ttm_tt_bind,
|
||||
.unbind = &virtio_gpu_ttm_tt_unbind,
|
||||
.destroy = &virtio_gpu_ttm_tt_destroy,
|
||||
};
|
||||
|
||||
static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
|
@ -332,8 +297,8 @@ static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
|||
gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
|
||||
if (gtt == NULL)
|
||||
return NULL;
|
||||
gtt->ttm.ttm.func = &virtio_gpu_backend_func;
|
||||
gtt->vgdev = vgdev;
|
||||
gtt->ttm.ttm.func = &virtio_gpu_tt_func;
|
||||
gtt->obj = container_of(bo, struct virtio_gpu_object, tbo);
|
||||
if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) {
|
||||
kfree(gtt);
|
||||
return NULL;
|
||||
|
@ -341,60 +306,11 @@ static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
|||
return >t->ttm.ttm;
|
||||
}
|
||||
|
||||
static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
|
||||
BUG_ON(old_mem->mm_node != NULL);
|
||||
*old_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
}
|
||||
|
||||
static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict,
|
||||
struct ttm_operation_ctx *ctx,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
virtio_gpu_move_null(bo, new_mem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
|
||||
bool evict,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct virtio_gpu_object *bo;
|
||||
struct virtio_gpu_device *vgdev;
|
||||
|
||||
bo = container_of(tbo, struct virtio_gpu_object, tbo);
|
||||
vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
|
||||
|
||||
if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
|
||||
if (bo->hw_res_handle)
|
||||
virtio_gpu_cmd_resource_inval_backing(vgdev,
|
||||
bo->hw_res_handle);
|
||||
|
||||
} else if (new_mem->placement & TTM_PL_FLAG_TT) {
|
||||
if (bo->hw_res_handle) {
|
||||
virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle,
|
||||
NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
|
||||
{
|
||||
struct virtio_gpu_object *bo;
|
||||
struct virtio_gpu_device *vgdev;
|
||||
|
||||
bo = container_of(tbo, struct virtio_gpu_object, tbo);
|
||||
vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
|
||||
|
||||
if (bo->pages)
|
||||
virtio_gpu_object_free_sg_table(bo);
|
||||
|
@ -406,11 +322,9 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
|
|||
.init_mem_type = &virtio_gpu_init_mem_type,
|
||||
.eviction_valuable = ttm_bo_eviction_valuable,
|
||||
.evict_flags = &virtio_gpu_evict_flags,
|
||||
.move = &virtio_gpu_bo_move,
|
||||
.verify_access = &virtio_gpu_verify_access,
|
||||
.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
|
||||
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
|
||||
.move_notify = &virtio_gpu_bo_move_notify,
|
||||
.swap_notify = &virtio_gpu_bo_swap_notify,
|
||||
};
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
|
||||
#include <drm/drmP.h>
|
||||
#include "virtgpu_drv.h"
|
||||
#include "virtgpu_trace.h"
|
||||
#include <linux/virtio.h>
|
||||
#include <linux/virtio_config.h>
|
||||
#include <linux/virtio_ring.h>
|
||||
|
@ -38,26 +39,6 @@
|
|||
+ MAX_INLINE_CMD_SIZE \
|
||||
+ MAX_INLINE_RESP_SIZE)
|
||||
|
||||
void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
|
||||
uint32_t *resid)
|
||||
{
|
||||
int handle;
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&vgdev->resource_idr_lock);
|
||||
handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
|
||||
spin_unlock(&vgdev->resource_idr_lock);
|
||||
idr_preload_end();
|
||||
*resid = handle;
|
||||
}
|
||||
|
||||
void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
|
||||
{
|
||||
spin_lock(&vgdev->resource_idr_lock);
|
||||
idr_remove(&vgdev->resource_idr, id);
|
||||
spin_unlock(&vgdev->resource_idr_lock);
|
||||
}
|
||||
|
||||
void virtio_gpu_ctrl_ack(struct virtqueue *vq)
|
||||
{
|
||||
struct drm_device *dev = vq->vdev->priv;
|
||||
|
@ -98,10 +79,9 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
|
|||
{
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
|
||||
vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
|
||||
vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
|
||||
if (!vbuf)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
memset(vbuf, 0, VBUFFER_SIZE);
|
||||
|
||||
BUG_ON(size > MAX_INLINE_CMD_SIZE);
|
||||
vbuf->buf = (void *)vbuf + sizeof(*vbuf);
|
||||
|
@ -213,8 +193,19 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
|
|||
|
||||
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
|
||||
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
|
||||
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA))
|
||||
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
|
||||
|
||||
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
|
||||
|
||||
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
|
||||
if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
|
||||
struct virtio_gpu_ctrl_hdr *cmd;
|
||||
cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
|
||||
DRM_ERROR("response 0x%x (command 0x%x)\n",
|
||||
le32_to_cpu(resp->type),
|
||||
le32_to_cpu(cmd->type));
|
||||
} else
|
||||
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
|
||||
}
|
||||
if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
|
||||
u64 f = le64_to_cpu(resp->fence_id);
|
||||
|
||||
|
@ -297,6 +288,9 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
|
|||
spin_lock(&vgdev->ctrlq.qlock);
|
||||
goto retry;
|
||||
} else {
|
||||
trace_virtio_gpu_cmd_queue(vq,
|
||||
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
|
||||
|
||||
virtqueue_kick(vq);
|
||||
}
|
||||
|
||||
|
@ -319,7 +313,7 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
|
|||
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_vbuffer *vbuf,
|
||||
struct virtio_gpu_ctrl_hdr *hdr,
|
||||
struct virtio_gpu_fence **fence)
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtqueue *vq = vgdev->ctrlq.vq;
|
||||
int rc;
|
||||
|
@ -372,6 +366,9 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
|
|||
spin_lock(&vgdev->cursorq.qlock);
|
||||
goto retry;
|
||||
} else {
|
||||
trace_virtio_gpu_cmd_queue(vq,
|
||||
(struct virtio_gpu_ctrl_hdr *)vbuf->buf);
|
||||
|
||||
virtqueue_kick(vq);
|
||||
}
|
||||
|
||||
|
@ -388,10 +385,9 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
|
|||
|
||||
/* create a basic resource */
|
||||
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id,
|
||||
uint32_t format,
|
||||
uint32_t width,
|
||||
uint32_t height)
|
||||
struct virtio_gpu_object *bo,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_resource_create_2d *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
|
@ -400,12 +396,13 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
|
|||
memset(cmd_p, 0, sizeof(*cmd_p));
|
||||
|
||||
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
|
||||
cmd_p->resource_id = cpu_to_le32(resource_id);
|
||||
cmd_p->format = cpu_to_le32(format);
|
||||
cmd_p->width = cpu_to_le32(width);
|
||||
cmd_p->height = cpu_to_le32(height);
|
||||
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
|
||||
cmd_p->format = cpu_to_le32(params->format);
|
||||
cmd_p->width = cpu_to_le32(params->width);
|
||||
cmd_p->height = cpu_to_le32(params->height);
|
||||
|
||||
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
|
||||
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
|
||||
bo->created = true;
|
||||
}
|
||||
|
||||
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
|
||||
|
@ -423,8 +420,9 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
|
|||
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
|
||||
}
|
||||
|
||||
void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id)
|
||||
static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id,
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_resource_detach_backing *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
|
@ -435,7 +433,7 @@ void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
|
|||
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
|
||||
cmd_p->resource_id = cpu_to_le32(resource_id);
|
||||
|
||||
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
|
||||
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
|
||||
}
|
||||
|
||||
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
|
||||
|
@ -482,19 +480,26 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
|
|||
}
|
||||
|
||||
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id, uint64_t offset,
|
||||
struct virtio_gpu_object *bo,
|
||||
uint64_t offset,
|
||||
__le32 width, __le32 height,
|
||||
__le32 x, __le32 y,
|
||||
struct virtio_gpu_fence **fence)
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_transfer_to_host_2d *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
|
||||
|
||||
if (use_dma_api)
|
||||
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
|
||||
bo->pages->sgl, bo->pages->nents,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
||||
memset(cmd_p, 0, sizeof(*cmd_p));
|
||||
|
||||
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
|
||||
cmd_p->resource_id = cpu_to_le32(resource_id);
|
||||
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
|
||||
cmd_p->offset = cpu_to_le64(offset);
|
||||
cmd_p->r.width = width;
|
||||
cmd_p->r.height = height;
|
||||
|
@ -509,7 +514,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
|
|||
uint32_t resource_id,
|
||||
struct virtio_gpu_mem_entry *ents,
|
||||
uint32_t nents,
|
||||
struct virtio_gpu_fence **fence)
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_resource_attach_backing *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
|
@ -595,6 +600,45 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
|
|||
}
|
||||
}
|
||||
spin_unlock(&vgdev->display_info_lock);
|
||||
wake_up_all(&vgdev->resp_wq);
|
||||
}
|
||||
|
||||
static int virtio_get_edid_block(void *data, u8 *buf,
|
||||
unsigned int block, size_t len)
|
||||
{
|
||||
struct virtio_gpu_resp_edid *resp = data;
|
||||
size_t start = block * EDID_LENGTH;
|
||||
|
||||
if (start + len > le32_to_cpu(resp->size))
|
||||
return -1;
|
||||
memcpy(buf, resp->edid + start, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_vbuffer *vbuf)
|
||||
{
|
||||
struct virtio_gpu_cmd_get_edid *cmd =
|
||||
(struct virtio_gpu_cmd_get_edid *)vbuf->buf;
|
||||
struct virtio_gpu_resp_edid *resp =
|
||||
(struct virtio_gpu_resp_edid *)vbuf->resp_buf;
|
||||
uint32_t scanout = le32_to_cpu(cmd->scanout);
|
||||
struct virtio_gpu_output *output;
|
||||
struct edid *new_edid, *old_edid;
|
||||
|
||||
if (scanout >= vgdev->num_scanouts)
|
||||
return;
|
||||
output = vgdev->outputs + scanout;
|
||||
|
||||
new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
|
||||
drm_connector_update_edid_property(&output->conn, new_edid);
|
||||
|
||||
spin_lock(&vgdev->display_info_lock);
|
||||
old_edid = output->edid;
|
||||
output->edid = new_edid;
|
||||
spin_unlock(&vgdev->display_info_lock);
|
||||
|
||||
kfree(old_edid);
|
||||
wake_up(&vgdev->resp_wq);
|
||||
}
|
||||
|
||||
|
@ -650,11 +694,14 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
|
|||
{
|
||||
struct virtio_gpu_get_capset *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
int max_size = vgdev->capsets[idx].max_size;
|
||||
int max_size;
|
||||
struct virtio_gpu_drv_cap_cache *cache_ent;
|
||||
struct virtio_gpu_drv_cap_cache *search_ent;
|
||||
void *resp_buf;
|
||||
|
||||
if (idx > vgdev->num_capsets)
|
||||
*cache_p = NULL;
|
||||
|
||||
if (idx >= vgdev->num_capsets)
|
||||
return -EINVAL;
|
||||
|
||||
if (version > vgdev->capsets[idx].max_version)
|
||||
|
@ -664,6 +711,7 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
|
|||
if (!cache_ent)
|
||||
return -ENOMEM;
|
||||
|
||||
max_size = vgdev->capsets[idx].max_size;
|
||||
cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
|
||||
if (!cache_ent->caps_cache) {
|
||||
kfree(cache_ent);
|
||||
|
@ -683,9 +731,26 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
|
|||
atomic_set(&cache_ent->is_valid, 0);
|
||||
cache_ent->size = max_size;
|
||||
spin_lock(&vgdev->display_info_lock);
|
||||
list_add_tail(&cache_ent->head, &vgdev->cap_cache);
|
||||
/* Search while under lock in case it was added by another task. */
|
||||
list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
|
||||
if (search_ent->id == vgdev->capsets[idx].id &&
|
||||
search_ent->version == version) {
|
||||
*cache_p = search_ent;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!*cache_p)
|
||||
list_add_tail(&cache_ent->head, &vgdev->cap_cache);
|
||||
spin_unlock(&vgdev->display_info_lock);
|
||||
|
||||
if (*cache_p) {
|
||||
/* Entry was found, so free everything that was just created. */
|
||||
kfree(resp_buf);
|
||||
kfree(cache_ent->caps_cache);
|
||||
kfree(cache_ent);
|
||||
return 0;
|
||||
}
|
||||
|
||||
cmd_p = virtio_gpu_alloc_cmd_resp
|
||||
(vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
|
||||
sizeof(struct virtio_gpu_resp_capset) + max_size,
|
||||
|
@ -699,6 +764,34 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
|
||||
{
|
||||
struct virtio_gpu_cmd_get_edid *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
void *resp_buf;
|
||||
int scanout;
|
||||
|
||||
if (WARN_ON(!vgdev->has_edid))
|
||||
return -EINVAL;
|
||||
|
||||
for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
|
||||
resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
|
||||
GFP_KERNEL);
|
||||
if (!resp_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd_p = virtio_gpu_alloc_cmd_resp
|
||||
(vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
|
||||
sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
|
||||
resp_buf);
|
||||
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
|
||||
cmd_p->scanout = cpu_to_le32(scanout);
|
||||
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
|
||||
uint32_t nlen, const char *name)
|
||||
{
|
||||
|
@ -765,8 +858,9 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
|
|||
|
||||
void
|
||||
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_resource_create_3d *rc_3d,
|
||||
struct virtio_gpu_fence **fence)
|
||||
struct virtio_gpu_object *bo,
|
||||
struct virtio_gpu_object_params *params,
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_resource_create_3d *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
|
@ -774,28 +868,46 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
|
|||
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
||||
memset(cmd_p, 0, sizeof(*cmd_p));
|
||||
|
||||
*cmd_p = *rc_3d;
|
||||
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
|
||||
cmd_p->hdr.flags = 0;
|
||||
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
|
||||
cmd_p->format = cpu_to_le32(params->format);
|
||||
cmd_p->width = cpu_to_le32(params->width);
|
||||
cmd_p->height = cpu_to_le32(params->height);
|
||||
|
||||
cmd_p->target = cpu_to_le32(params->target);
|
||||
cmd_p->bind = cpu_to_le32(params->bind);
|
||||
cmd_p->depth = cpu_to_le32(params->depth);
|
||||
cmd_p->array_size = cpu_to_le32(params->array_size);
|
||||
cmd_p->last_level = cpu_to_le32(params->last_level);
|
||||
cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
|
||||
cmd_p->flags = cpu_to_le32(params->flags);
|
||||
|
||||
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
|
||||
bo->created = true;
|
||||
}
|
||||
|
||||
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
|
||||
uint32_t resource_id, uint32_t ctx_id,
|
||||
struct virtio_gpu_object *bo,
|
||||
uint32_t ctx_id,
|
||||
uint64_t offset, uint32_t level,
|
||||
struct virtio_gpu_box *box,
|
||||
struct virtio_gpu_fence **fence)
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_transfer_host_3d *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
|
||||
|
||||
if (use_dma_api)
|
||||
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
|
||||
bo->pages->sgl, bo->pages->nents,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
|
||||
memset(cmd_p, 0, sizeof(*cmd_p));
|
||||
|
||||
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
|
||||
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
|
||||
cmd_p->resource_id = cpu_to_le32(resource_id);
|
||||
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
|
||||
cmd_p->box = *box;
|
||||
cmd_p->offset = cpu_to_le64(offset);
|
||||
cmd_p->level = cpu_to_le32(level);
|
||||
|
@ -807,7 +919,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
|
|||
uint32_t resource_id, uint32_t ctx_id,
|
||||
uint64_t offset, uint32_t level,
|
||||
struct virtio_gpu_box *box,
|
||||
struct virtio_gpu_fence **fence)
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_transfer_host_3d *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
|
@ -827,7 +939,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
|
|||
|
||||
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
|
||||
void *data, uint32_t data_size,
|
||||
uint32_t ctx_id, struct virtio_gpu_fence **fence)
|
||||
uint32_t ctx_id, struct virtio_gpu_fence *fence)
|
||||
{
|
||||
struct virtio_gpu_cmd_submit *cmd_p;
|
||||
struct virtio_gpu_vbuffer *vbuf;
|
||||
|
@ -847,12 +959,15 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
|
|||
|
||||
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_object *obj,
|
||||
uint32_t resource_id,
|
||||
struct virtio_gpu_fence **fence)
|
||||
struct virtio_gpu_fence *fence)
|
||||
{
|
||||
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
|
||||
struct virtio_gpu_mem_entry *ents;
|
||||
struct scatterlist *sg;
|
||||
int si;
|
||||
int si, nents;
|
||||
|
||||
if (WARN_ON_ONCE(!obj->created))
|
||||
return -EINVAL;
|
||||
|
||||
if (!obj->pages) {
|
||||
int ret;
|
||||
|
@ -862,28 +977,59 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (use_dma_api) {
|
||||
obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
|
||||
obj->pages->sgl, obj->pages->nents,
|
||||
DMA_TO_DEVICE);
|
||||
nents = obj->mapped;
|
||||
} else {
|
||||
nents = obj->pages->nents;
|
||||
}
|
||||
|
||||
/* gets freed when the ring has consumed it */
|
||||
ents = kmalloc_array(obj->pages->nents,
|
||||
sizeof(struct virtio_gpu_mem_entry),
|
||||
ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
|
||||
GFP_KERNEL);
|
||||
if (!ents) {
|
||||
DRM_ERROR("failed to allocate ent list\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
|
||||
ents[si].addr = cpu_to_le64(sg_phys(sg));
|
||||
for_each_sg(obj->pages->sgl, sg, nents, si) {
|
||||
ents[si].addr = cpu_to_le64(use_dma_api
|
||||
? sg_dma_address(sg)
|
||||
: sg_phys(sg));
|
||||
ents[si].length = cpu_to_le32(sg->length);
|
||||
ents[si].padding = 0;
|
||||
}
|
||||
|
||||
virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
|
||||
ents, obj->pages->nents,
|
||||
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
|
||||
ents, nents,
|
||||
fence);
|
||||
obj->hw_res_handle = resource_id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_object *obj)
|
||||
{
|
||||
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
|
||||
|
||||
if (use_dma_api && obj->mapped) {
|
||||
struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
|
||||
/* detach backing and wait for the host process it ... */
|
||||
virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
|
||||
dma_fence_wait(&fence->f, true);
|
||||
dma_fence_put(&fence->f);
|
||||
|
||||
/* ... then tear down iommu mappings */
|
||||
dma_unmap_sg(vgdev->vdev->dev.parent,
|
||||
obj->pages->sgl, obj->mapped,
|
||||
DMA_TO_DEVICE);
|
||||
obj->mapped = 0;
|
||||
} else {
|
||||
virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
|
||||
struct virtio_gpu_output *output)
|
||||
{
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
menuconfig IIO
|
||||
tristate "Industrial I/O support"
|
||||
select ANON_INODES
|
||||
help
|
||||
The industrial I/O subsystem provides a unified framework for
|
||||
drivers for many different types of embedded sensors using a
|
||||
|
|
|
@ -25,7 +25,6 @@ config INFINIBAND_USER_MAD
|
|||
|
||||
config INFINIBAND_USER_ACCESS
|
||||
tristate "InfiniBand userspace access (verbs and CM)"
|
||||
select ANON_INODES
|
||||
---help---
|
||||
Userspace InfiniBand access support. This enables the
|
||||
kernel side of userspace verbs and the userspace
|
||||
|
|
|
@ -126,7 +126,7 @@ __malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
|
|||
res = (void *)pbundle->internal_buffer + pbundle->internal_used;
|
||||
pbundle->internal_used =
|
||||
ALIGN(new_used, sizeof(*pbundle->internal_buffer));
|
||||
if (flags & __GFP_ZERO)
|
||||
if (want_init_on_alloc(flags))
|
||||
memset(res, 0, size);
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -412,6 +412,7 @@ config USB_CONFIGFS_F_FS
|
|||
config USB_CONFIGFS_F_ACC
|
||||
bool "Accessory gadget"
|
||||
depends on USB_CONFIGFS
|
||||
depends on HID=y
|
||||
select USB_F_ACC
|
||||
help
|
||||
USB gadget Accessory support
|
||||
|
|
|
@ -22,7 +22,6 @@ menuconfig VFIO
|
|||
tristate "VFIO Non-Privileged userspace driver framework"
|
||||
depends on IOMMU_API
|
||||
select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM_SMMU || ARM_SMMU_V3)
|
||||
select ANON_INODES
|
||||
help
|
||||
VFIO provides a framework for secure userspace device drivers.
|
||||
See Documentation/vfio.txt for more details.
|
||||
|
|
|
@ -24,7 +24,7 @@ obj-$(CONFIG_PROC_FS) += proc_namespace.o
|
|||
|
||||
obj-y += notify/
|
||||
obj-$(CONFIG_EPOLL) += eventpoll.o
|
||||
obj-$(CONFIG_ANON_INODES) += anon_inodes.o
|
||||
obj-y += anon_inodes.o
|
||||
obj-$(CONFIG_SIGNALFD) += signalfd.o
|
||||
obj-$(CONFIG_TIMERFD) += timerfd.o
|
||||
obj-$(CONFIG_EVENTFD) += eventfd.o
|
||||
|
|
|
@ -6,7 +6,6 @@ config FS_ENCRYPTION
|
|||
select CRYPTO_ECB
|
||||
select CRYPTO_XTS
|
||||
select CRYPTO_CTS
|
||||
select CRYPTO_SHA256
|
||||
select KEYS
|
||||
help
|
||||
Enable encryption of files and directories. This
|
||||
|
|
|
@ -36,8 +36,9 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
|
|||
if (fscrypt_using_hardware_encryption(page->mapping->host)) {
|
||||
SetPageUptodate(page);
|
||||
} else {
|
||||
int ret = fscrypt_decrypt_page(page->mapping->host,
|
||||
page, PAGE_SIZE, 0, page->index);
|
||||
int ret = fscrypt_decrypt_pagecache_blocks(page,
|
||||
bv->bv_len,
|
||||
bv->bv_offset);
|
||||
if (ret)
|
||||
SetPageError(page);
|
||||
else if (done)
|
||||
|
@ -56,9 +57,8 @@ EXPORT_SYMBOL(fscrypt_decrypt_bio);
|
|||
|
||||
static void completion_pages(struct work_struct *work)
|
||||
{
|
||||
struct fscrypt_ctx *ctx =
|
||||
container_of(work, struct fscrypt_ctx, r.work);
|
||||
struct bio *bio = ctx->r.bio;
|
||||
struct fscrypt_ctx *ctx = container_of(work, struct fscrypt_ctx, work);
|
||||
struct bio *bio = ctx->bio;
|
||||
|
||||
__fscrypt_decrypt_bio(bio, true);
|
||||
fscrypt_release_ctx(ctx);
|
||||
|
@ -67,57 +67,29 @@ static void completion_pages(struct work_struct *work)
|
|||
|
||||
void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, struct bio *bio)
|
||||
{
|
||||
INIT_WORK(&ctx->r.work, completion_pages);
|
||||
ctx->r.bio = bio;
|
||||
fscrypt_enqueue_decrypt_work(&ctx->r.work);
|
||||
INIT_WORK(&ctx->work, completion_pages);
|
||||
ctx->bio = bio;
|
||||
fscrypt_enqueue_decrypt_work(&ctx->work);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_bio);
|
||||
|
||||
void fscrypt_pullback_bio_page(struct page **page, bool restore)
|
||||
{
|
||||
struct fscrypt_ctx *ctx;
|
||||
struct page *bounce_page;
|
||||
|
||||
/* The bounce data pages are unmapped. */
|
||||
if ((*page)->mapping)
|
||||
return;
|
||||
|
||||
/* The bounce data page is unmapped. */
|
||||
bounce_page = *page;
|
||||
ctx = (struct fscrypt_ctx *)page_private(bounce_page);
|
||||
|
||||
/* restore control page */
|
||||
*page = ctx->w.control_page;
|
||||
|
||||
if (restore)
|
||||
fscrypt_restore_control_page(bounce_page);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_pullback_bio_page);
|
||||
|
||||
int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
|
||||
sector_t pblk, unsigned int len)
|
||||
{
|
||||
struct fscrypt_ctx *ctx;
|
||||
struct page *ciphertext_page = NULL;
|
||||
const unsigned int blockbits = inode->i_blkbits;
|
||||
const unsigned int blocksize = 1 << blockbits;
|
||||
struct page *ciphertext_page;
|
||||
struct bio *bio;
|
||||
int ret, err = 0;
|
||||
|
||||
BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
|
||||
|
||||
ctx = fscrypt_get_ctx(GFP_NOFS);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT);
|
||||
if (IS_ERR(ciphertext_page)) {
|
||||
err = PTR_ERR(ciphertext_page);
|
||||
goto errout;
|
||||
}
|
||||
ciphertext_page = fscrypt_alloc_bounce_page(GFP_NOWAIT);
|
||||
if (!ciphertext_page)
|
||||
return -ENOMEM;
|
||||
|
||||
while (len--) {
|
||||
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk,
|
||||
ZERO_PAGE(0), ciphertext_page,
|
||||
PAGE_SIZE, 0, GFP_NOFS);
|
||||
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk,
|
||||
ZERO_PAGE(0), ciphertext_page,
|
||||
blocksize, 0, GFP_NOFS);
|
||||
if (err)
|
||||
goto errout;
|
||||
|
||||
|
@ -127,14 +99,11 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
|
|||
goto errout;
|
||||
}
|
||||
bio_set_dev(bio, inode->i_sb->s_bdev);
|
||||
bio->bi_iter.bi_sector =
|
||||
pblk << (inode->i_sb->s_blocksize_bits - 9);
|
||||
bio->bi_iter.bi_sector = pblk << (blockbits - 9);
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_NOENCRYPT);
|
||||
ret = bio_add_page(bio, ciphertext_page,
|
||||
inode->i_sb->s_blocksize, 0);
|
||||
if (ret != inode->i_sb->s_blocksize) {
|
||||
ret = bio_add_page(bio, ciphertext_page, blocksize, 0);
|
||||
if (WARN_ON(ret != blocksize)) {
|
||||
/* should never happen! */
|
||||
WARN_ON(1);
|
||||
bio_put(bio);
|
||||
err = -EIO;
|
||||
goto errout;
|
||||
|
@ -150,7 +119,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
|
|||
}
|
||||
err = 0;
|
||||
errout:
|
||||
fscrypt_release_ctx(ctx);
|
||||
fscrypt_free_bounce_page(ciphertext_page);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_zeroout_range);
|
||||
|
|
|
@ -58,23 +58,16 @@ void fscrypt_enqueue_decrypt_work(struct work_struct *work)
|
|||
EXPORT_SYMBOL(fscrypt_enqueue_decrypt_work);
|
||||
|
||||
/**
|
||||
* fscrypt_release_ctx() - Releases an encryption context
|
||||
* @ctx: The encryption context to release.
|
||||
* fscrypt_release_ctx() - Release a decryption context
|
||||
* @ctx: The decryption context to release.
|
||||
*
|
||||
* If the encryption context was allocated from the pre-allocated pool, returns
|
||||
* it to that pool. Else, frees it.
|
||||
*
|
||||
* If there's a bounce page in the context, this frees that.
|
||||
* If the decryption context was allocated from the pre-allocated pool, return
|
||||
* it to that pool. Else, free it.
|
||||
*/
|
||||
void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
|
||||
mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
|
||||
ctx->w.bounce_page = NULL;
|
||||
}
|
||||
ctx->w.control_page = NULL;
|
||||
if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
|
||||
kmem_cache_free(fscrypt_ctx_cachep, ctx);
|
||||
} else {
|
||||
|
@ -86,12 +79,12 @@ void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
|
|||
EXPORT_SYMBOL(fscrypt_release_ctx);
|
||||
|
||||
/**
|
||||
* fscrypt_get_ctx() - Gets an encryption context
|
||||
* fscrypt_get_ctx() - Get a decryption context
|
||||
* @gfp_flags: The gfp flag for memory allocation
|
||||
*
|
||||
* Allocates and initializes an encryption context.
|
||||
* Allocate and initialize a decryption context.
|
||||
*
|
||||
* Return: A new encryption context on success; an ERR_PTR() otherwise.
|
||||
* Return: A new decryption context on success; an ERR_PTR() otherwise.
|
||||
*/
|
||||
struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
|
||||
{
|
||||
|
@ -99,14 +92,8 @@ struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
|
|||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* We first try getting the ctx from a free list because in
|
||||
* the common case the ctx will have an allocated and
|
||||
* initialized crypto tfm, so it's probably a worthwhile
|
||||
* optimization. For the bounce page, we first try getting it
|
||||
* from the kernel allocator because that's just about as fast
|
||||
* as getting it from a list and because a cache of free pages
|
||||
* should generally be a "last resort" option for a filesystem
|
||||
* to be able to do its job.
|
||||
* First try getting a ctx from the free list so that we don't have to
|
||||
* call into the slab allocator.
|
||||
*/
|
||||
spin_lock_irqsave(&fscrypt_ctx_lock, flags);
|
||||
ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
|
||||
|
@ -122,11 +109,31 @@ struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
|
|||
} else {
|
||||
ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
|
||||
}
|
||||
ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
|
||||
return ctx;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_get_ctx);
|
||||
|
||||
struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags)
|
||||
{
|
||||
return mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* fscrypt_free_bounce_page() - free a ciphertext bounce page
|
||||
*
|
||||
* Free a bounce page that was allocated by fscrypt_encrypt_pagecache_blocks(),
|
||||
* or by fscrypt_alloc_bounce_page() directly.
|
||||
*/
|
||||
void fscrypt_free_bounce_page(struct page *bounce_page)
|
||||
{
|
||||
if (!bounce_page)
|
||||
return;
|
||||
set_page_private(bounce_page, (unsigned long)NULL);
|
||||
ClearPagePrivate(bounce_page);
|
||||
mempool_free(bounce_page, fscrypt_bounce_page_pool);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_free_bounce_page);
|
||||
|
||||
void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
|
||||
const struct fscrypt_info *ci)
|
||||
{
|
||||
|
@ -140,10 +147,11 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
|
|||
crypto_cipher_encrypt_one(ci->ci_essiv_tfm, iv->raw, iv->raw);
|
||||
}
|
||||
|
||||
int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
|
||||
u64 lblk_num, struct page *src_page,
|
||||
struct page *dest_page, unsigned int len,
|
||||
unsigned int offs, gfp_t gfp_flags)
|
||||
/* Encrypt or decrypt a single filesystem block of file contents */
|
||||
int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
|
||||
u64 lblk_num, struct page *src_page,
|
||||
struct page *dest_page, unsigned int len,
|
||||
unsigned int offs, gfp_t gfp_flags)
|
||||
{
|
||||
union fscrypt_iv iv;
|
||||
struct skcipher_request *req = NULL;
|
||||
|
@ -188,126 +196,158 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
|
||||
if (ctx->w.bounce_page == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
|
||||
return ctx->w.bounce_page;
|
||||
}
|
||||
|
||||
/**
|
||||
* fscypt_encrypt_page() - Encrypts a page
|
||||
* @inode: The inode for which the encryption should take place
|
||||
* @page: The page to encrypt. Must be locked for bounce-page
|
||||
* encryption.
|
||||
* @len: Length of data to encrypt in @page and encrypted
|
||||
* data in returned page.
|
||||
* @offs: Offset of data within @page and returned
|
||||
* page holding encrypted data.
|
||||
* @lblk_num: Logical block number. This must be unique for multiple
|
||||
* calls with same inode, except when overwriting
|
||||
* previously written data.
|
||||
* @gfp_flags: The gfp flag for memory allocation
|
||||
* fscrypt_encrypt_pagecache_blocks() - Encrypt filesystem blocks from a pagecache page
|
||||
* @page: The locked pagecache page containing the block(s) to encrypt
|
||||
* @len: Total size of the block(s) to encrypt. Must be a nonzero
|
||||
* multiple of the filesystem's block size.
|
||||
* @offs: Byte offset within @page of the first block to encrypt. Must be
|
||||
* a multiple of the filesystem's block size.
|
||||
* @gfp_flags: Memory allocation flags
|
||||
*
|
||||
* Encrypts @page using the ctx encryption context. Performs encryption
|
||||
* either in-place or into a newly allocated bounce page.
|
||||
* Called on the page write path.
|
||||
* A new bounce page is allocated, and the specified block(s) are encrypted into
|
||||
* it. In the bounce page, the ciphertext block(s) will be located at the same
|
||||
* offsets at which the plaintext block(s) were located in the source page; any
|
||||
* other parts of the bounce page will be left uninitialized. However, normally
|
||||
* blocksize == PAGE_SIZE and the whole page is encrypted at once.
|
||||
*
|
||||
* Bounce page allocation is the default.
|
||||
* In this case, the contents of @page are encrypted and stored in an
|
||||
* allocated bounce page. @page has to be locked and the caller must call
|
||||
* fscrypt_restore_control_page() on the returned ciphertext page to
|
||||
* release the bounce buffer and the encryption context.
|
||||
* This is for use by the filesystem's ->writepages() method.
|
||||
*
|
||||
* In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
|
||||
* fscrypt_operations. Here, the input-page is returned with its content
|
||||
* encrypted.
|
||||
*
|
||||
* Return: A page with the encrypted content on success. Else, an
|
||||
* error value or NULL.
|
||||
* Return: the new encrypted bounce page on success; an ERR_PTR() on failure
|
||||
*/
|
||||
struct page *fscrypt_encrypt_page(const struct inode *inode,
|
||||
struct page *page,
|
||||
unsigned int len,
|
||||
unsigned int offs,
|
||||
u64 lblk_num, gfp_t gfp_flags)
|
||||
struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
|
||||
unsigned int len,
|
||||
unsigned int offs,
|
||||
gfp_t gfp_flags)
|
||||
|
||||
{
|
||||
struct fscrypt_ctx *ctx;
|
||||
struct page *ciphertext_page = page;
|
||||
const struct inode *inode = page->mapping->host;
|
||||
const unsigned int blockbits = inode->i_blkbits;
|
||||
const unsigned int blocksize = 1 << blockbits;
|
||||
struct page *ciphertext_page;
|
||||
u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
|
||||
(offs >> blockbits);
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
|
||||
/* with inplace-encryption we just encrypt the page */
|
||||
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
|
||||
ciphertext_page, len, offs,
|
||||
gfp_flags);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
return ciphertext_page;
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(!PageLocked(page)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ctx = fscrypt_get_ctx(gfp_flags);
|
||||
if (IS_ERR(ctx))
|
||||
return ERR_CAST(ctx);
|
||||
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* The encryption operation will require a bounce page. */
|
||||
ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
|
||||
if (IS_ERR(ciphertext_page))
|
||||
goto errout;
|
||||
ciphertext_page = fscrypt_alloc_bounce_page(gfp_flags);
|
||||
if (!ciphertext_page)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ctx->w.control_page = page;
|
||||
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
|
||||
page, ciphertext_page, len, offs,
|
||||
gfp_flags);
|
||||
if (err) {
|
||||
ciphertext_page = ERR_PTR(err);
|
||||
goto errout;
|
||||
for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
|
||||
err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num,
|
||||
page, ciphertext_page,
|
||||
blocksize, i, gfp_flags);
|
||||
if (err) {
|
||||
fscrypt_free_bounce_page(ciphertext_page);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
}
|
||||
SetPagePrivate(ciphertext_page);
|
||||
set_page_private(ciphertext_page, (unsigned long)ctx);
|
||||
lock_page(ciphertext_page);
|
||||
return ciphertext_page;
|
||||
|
||||
errout:
|
||||
fscrypt_release_ctx(ctx);
|
||||
set_page_private(ciphertext_page, (unsigned long)page);
|
||||
return ciphertext_page;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_encrypt_page);
|
||||
EXPORT_SYMBOL(fscrypt_encrypt_pagecache_blocks);
|
||||
|
||||
/**
|
||||
* fscrypt_decrypt_page() - Decrypts a page in-place
|
||||
* @inode: The corresponding inode for the page to decrypt.
|
||||
* @page: The page to decrypt. Must be locked in case
|
||||
* it is a writeback page (FS_CFLG_OWN_PAGES unset).
|
||||
* @len: Number of bytes in @page to be decrypted.
|
||||
* @offs: Start of data in @page.
|
||||
* @lblk_num: Logical block number.
|
||||
* fscrypt_encrypt_block_inplace() - Encrypt a filesystem block in-place
|
||||
* @inode: The inode to which this block belongs
|
||||
* @page: The page containing the block to encrypt
|
||||
* @len: Size of block to encrypt. Doesn't need to be a multiple of the
|
||||
* fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE.
|
||||
* @offs: Byte offset within @page at which the block to encrypt begins
|
||||
* @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
|
||||
* number of the block within the file
|
||||
* @gfp_flags: Memory allocation flags
|
||||
*
|
||||
* Decrypts page in-place using the ctx encryption context.
|
||||
* Encrypt a possibly-compressed filesystem block that is located in an
|
||||
* arbitrary page, not necessarily in the original pagecache page. The @inode
|
||||
* and @lblk_num must be specified, as they can't be determined from @page.
|
||||
*
|
||||
* Called from the read completion callback.
|
||||
*
|
||||
* Return: Zero on success, non-zero otherwise.
|
||||
* Return: 0 on success; -errno on failure
|
||||
*/
|
||||
int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
|
||||
unsigned int len, unsigned int offs, u64 lblk_num)
|
||||
int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page,
|
||||
unsigned int len, unsigned int offs,
|
||||
u64 lblk_num, gfp_t gfp_flags)
|
||||
{
|
||||
if (WARN_ON_ONCE(!PageLocked(page) &&
|
||||
!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)))
|
||||
return fscrypt_crypt_block(inode, FS_ENCRYPT, lblk_num, page, page,
|
||||
len, offs, gfp_flags);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_encrypt_block_inplace);
|
||||
|
||||
/**
|
||||
* fscrypt_decrypt_pagecache_blocks() - Decrypt filesystem blocks in a pagecache page
|
||||
* @page: The locked pagecache page containing the block(s) to decrypt
|
||||
* @len: Total size of the block(s) to decrypt. Must be a nonzero
|
||||
* multiple of the filesystem's block size.
|
||||
* @offs: Byte offset within @page of the first block to decrypt. Must be
|
||||
* a multiple of the filesystem's block size.
|
||||
*
|
||||
* The specified block(s) are decrypted in-place within the pagecache page,
|
||||
* which must still be locked and not uptodate. Normally, blocksize ==
|
||||
* PAGE_SIZE and the whole page is decrypted at once.
|
||||
*
|
||||
* This is for use by the filesystem's ->readpages() method.
|
||||
*
|
||||
* Return: 0 on success; -errno on failure
|
||||
*/
|
||||
int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len,
|
||||
unsigned int offs)
|
||||
{
|
||||
const struct inode *inode = page->mapping->host;
|
||||
const unsigned int blockbits = inode->i_blkbits;
|
||||
const unsigned int blocksize = 1 << blockbits;
|
||||
u64 lblk_num = ((u64)page->index << (PAGE_SHIFT - blockbits)) +
|
||||
(offs >> blockbits);
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
if (WARN_ON_ONCE(!PageLocked(page)))
|
||||
return -EINVAL;
|
||||
|
||||
return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
|
||||
len, offs, GFP_NOFS);
|
||||
if (WARN_ON_ONCE(len <= 0 || !IS_ALIGNED(len | offs, blocksize)))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = offs; i < offs + len; i += blocksize, lblk_num++) {
|
||||
err = fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page,
|
||||
page, blocksize, i, GFP_NOFS);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_decrypt_page);
|
||||
EXPORT_SYMBOL(fscrypt_decrypt_pagecache_blocks);
|
||||
|
||||
/**
|
||||
* fscrypt_decrypt_block_inplace() - Decrypt a filesystem block in-place
|
||||
* @inode: The inode to which this block belongs
|
||||
* @page: The page containing the block to decrypt
|
||||
* @len: Size of block to decrypt. Doesn't need to be a multiple of the
|
||||
* fs block size, but must be a multiple of FS_CRYPTO_BLOCK_SIZE.
|
||||
* @offs: Byte offset within @page at which the block to decrypt begins
|
||||
* @lblk_num: Filesystem logical block number of the block, i.e. the 0-based
|
||||
* number of the block within the file
|
||||
*
|
||||
* Decrypt a possibly-compressed filesystem block that is located in an
|
||||
* arbitrary page, not necessarily in the original pagecache page. The @inode
|
||||
* and @lblk_num must be specified, as they can't be determined from @page.
|
||||
*
|
||||
* Return: 0 on success; -errno on failure
|
||||
*/
|
||||
int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page,
|
||||
unsigned int len, unsigned int offs,
|
||||
u64 lblk_num)
|
||||
{
|
||||
return fscrypt_crypt_block(inode, FS_DECRYPT, lblk_num, page, page,
|
||||
len, offs, GFP_NOFS);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_decrypt_block_inplace);
|
||||
|
||||
/*
|
||||
* Validate dentries in encrypted directories to make sure we aren't potentially
|
||||
|
@ -357,18 +397,6 @@ const struct dentry_operations fscrypt_d_ops = {
|
|||
.d_revalidate = fscrypt_d_revalidate,
|
||||
};
|
||||
|
||||
void fscrypt_restore_control_page(struct page *page)
|
||||
{
|
||||
struct fscrypt_ctx *ctx;
|
||||
|
||||
ctx = (struct fscrypt_ctx *)page_private(page);
|
||||
set_page_private(page, (unsigned long)NULL);
|
||||
ClearPagePrivate(page);
|
||||
unlock_page(page);
|
||||
fscrypt_release_ctx(ctx);
|
||||
}
|
||||
EXPORT_SYMBOL(fscrypt_restore_control_page);
|
||||
|
||||
static void fscrypt_destroy(void)
|
||||
{
|
||||
struct fscrypt_ctx *pos, *n;
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include "fscrypt_private.h"
|
||||
|
||||
|
|
|
@ -106,7 +106,6 @@ typedef enum {
|
|||
} fscrypt_direction_t;
|
||||
|
||||
#define FS_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
|
||||
#define FS_CTX_HAS_BOUNCE_BUFFER_FL 0x00000002
|
||||
|
||||
static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
|
||||
u32 filenames_mode)
|
||||
|
@ -133,14 +132,12 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
|
|||
/* crypto.c */
|
||||
extern struct kmem_cache *fscrypt_info_cachep;
|
||||
extern int fscrypt_initialize(unsigned int cop_flags);
|
||||
extern int fscrypt_do_page_crypto(const struct inode *inode,
|
||||
fscrypt_direction_t rw, u64 lblk_num,
|
||||
struct page *src_page,
|
||||
struct page *dest_page,
|
||||
unsigned int len, unsigned int offs,
|
||||
gfp_t gfp_flags);
|
||||
extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
|
||||
gfp_t gfp_flags);
|
||||
extern int fscrypt_crypt_block(const struct inode *inode,
|
||||
fscrypt_direction_t rw, u64 lblk_num,
|
||||
struct page *src_page, struct page *dest_page,
|
||||
unsigned int len, unsigned int offs,
|
||||
gfp_t gfp_flags);
|
||||
extern struct page *fscrypt_alloc_bounce_page(gfp_t gfp_flags);
|
||||
extern const struct dentry_operations fscrypt_d_ops;
|
||||
|
||||
extern void __printf(3, 4) __cold
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
* Encryption hooks for higher-level filesystem operations.
|
||||
*/
|
||||
|
||||
#include <linux/ratelimit.h>
|
||||
#include "fscrypt_private.h"
|
||||
|
||||
/**
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include <keys/user-type.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <crypto/aes.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/sha.h>
|
||||
|
|
|
@ -1254,8 +1254,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
|
|||
if (unlikely(err))
|
||||
page_zero_new_buffers(page, from, to);
|
||||
else if (decrypt)
|
||||
err = fscrypt_decrypt_page(page->mapping->host, page,
|
||||
PAGE_SIZE, 0, page->index);
|
||||
err = fscrypt_decrypt_pagecache_blocks(page, PAGE_SIZE, 0);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
@ -4135,8 +4134,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
|
|||
/* We expect the key to be set. */
|
||||
BUG_ON(!fscrypt_has_encryption_key(inode));
|
||||
BUG_ON(blocksize != PAGE_SIZE);
|
||||
WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host,
|
||||
page, PAGE_SIZE, 0, page->index));
|
||||
WARN_ON_ONCE(fscrypt_decrypt_pagecache_blocks(
|
||||
page, PAGE_SIZE, 0));
|
||||
}
|
||||
}
|
||||
if (ext4_should_journal_data(inode)) {
|
||||
|
|
|
@ -66,9 +66,7 @@ static void ext4_finish_bio(struct bio *bio)
|
|||
|
||||
bio_for_each_segment_all(bvec, bio, i) {
|
||||
struct page *page = bvec->bv_page;
|
||||
#ifdef CONFIG_FS_ENCRYPTION
|
||||
struct page *data_page = NULL;
|
||||
#endif
|
||||
struct page *bounce_page = NULL;
|
||||
struct buffer_head *bh, *head;
|
||||
unsigned bio_start = bvec->bv_offset;
|
||||
unsigned bio_end = bio_start + bvec->bv_len;
|
||||
|
@ -78,13 +76,10 @@ static void ext4_finish_bio(struct bio *bio)
|
|||
if (!page)
|
||||
continue;
|
||||
|
||||
#ifdef CONFIG_FS_ENCRYPTION
|
||||
if (!page->mapping) {
|
||||
/* The bounce data pages are unmapped. */
|
||||
data_page = page;
|
||||
fscrypt_pullback_bio_page(&page, false);
|
||||
if (fscrypt_is_bounce_page(page)) {
|
||||
bounce_page = page;
|
||||
page = fscrypt_pagecache_page(bounce_page);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (bio->bi_status) {
|
||||
SetPageError(page);
|
||||
|
@ -111,10 +106,7 @@ static void ext4_finish_bio(struct bio *bio)
|
|||
bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
|
||||
local_irq_restore(flags);
|
||||
if (!under_io) {
|
||||
#ifdef CONFIG_FS_ENCRYPTION
|
||||
if (data_page)
|
||||
fscrypt_restore_control_page(data_page);
|
||||
#endif
|
||||
fscrypt_free_bounce_page(bounce_page);
|
||||
end_page_writeback(page);
|
||||
}
|
||||
}
|
||||
|
@ -418,7 +410,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
|
|||
struct writeback_control *wbc,
|
||||
bool keep_towrite)
|
||||
{
|
||||
struct page *data_page = NULL;
|
||||
struct page *bounce_page = NULL;
|
||||
struct inode *inode = page->mapping->host;
|
||||
unsigned block_start;
|
||||
struct buffer_head *bh, *head;
|
||||
|
@ -485,11 +477,10 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
|
|||
|
||||
retry_encrypt:
|
||||
if (!fscrypt_using_hardware_encryption(inode))
|
||||
data_page = fscrypt_encrypt_page(inode,
|
||||
page, PAGE_SIZE, 0,
|
||||
page->index, gfp_flags);
|
||||
if (IS_ERR(data_page)) {
|
||||
ret = PTR_ERR(data_page);
|
||||
bounce_page = fscrypt_encrypt_pagecache_blocks(page,
|
||||
PAGE_SIZE,0, gfp_flags);
|
||||
if (IS_ERR(bounce_page)) {
|
||||
ret = PTR_ERR(bounce_page);
|
||||
if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
|
||||
if (io->io_bio) {
|
||||
ext4_io_submit(io);
|
||||
|
@ -498,7 +489,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
|
|||
gfp_flags |= __GFP_NOFAIL;
|
||||
goto retry_encrypt;
|
||||
}
|
||||
data_page = NULL;
|
||||
bounce_page = NULL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -507,10 +498,9 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
|
|||
do {
|
||||
if (!buffer_async_write(bh))
|
||||
continue;
|
||||
if (data_page)
|
||||
if (bounce_page)
|
||||
io->io_flags |= EXT4_IO_ENCRYPTED;
|
||||
ret = io_submit_add_bh(io, inode,
|
||||
data_page ? data_page : page, bh);
|
||||
ret = io_submit_add_bh(io, inode, bounce_page ?: page, bh);
|
||||
if (ret) {
|
||||
/*
|
||||
* We only get here on ENOMEM. Not much else
|
||||
|
@ -526,8 +516,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
|
|||
/* Error stopped previous loop? Clean up buffers... */
|
||||
if (ret) {
|
||||
out:
|
||||
if (data_page)
|
||||
fscrypt_restore_control_page(data_page);
|
||||
fscrypt_free_bounce_page(bounce_page);
|
||||
printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
|
||||
redirty_page_for_writepage(wbc, page);
|
||||
do {
|
||||
|
|
|
@ -196,7 +196,7 @@ static void f2fs_write_end_io(struct bio *bio)
|
|||
continue;
|
||||
}
|
||||
|
||||
fscrypt_pullback_bio_page(&page, true);
|
||||
fscrypt_finalize_bounce_page(&page);
|
||||
|
||||
if (unlikely(bio->bi_status)) {
|
||||
mapping_set_error(page->mapping, -EIO);
|
||||
|
@ -399,10 +399,9 @@ static bool __has_merged_page(struct bio *bio, struct inode *inode,
|
|||
|
||||
bio_for_each_segment_all(bvec, bio, i) {
|
||||
|
||||
if (bvec->bv_page->mapping)
|
||||
target = bvec->bv_page;
|
||||
else
|
||||
target = fscrypt_control_page(bvec->bv_page);
|
||||
target = bvec->bv_page;
|
||||
if (fscrypt_is_bounce_page(target))
|
||||
target = fscrypt_pagecache_page(target);
|
||||
|
||||
if (inode && inode == target->mapping->host)
|
||||
return true;
|
||||
|
@ -1882,8 +1881,9 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
|
|||
if (fscrypt_using_hardware_encryption(inode))
|
||||
return 0;
|
||||
|
||||
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
|
||||
PAGE_SIZE, 0, fio->page->index, gfp_flags);
|
||||
fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(fio->page,
|
||||
PAGE_SIZE, 0,
|
||||
gfp_flags);
|
||||
if (IS_ERR(fio->encrypted_page)) {
|
||||
/* flush pending IOs and wait for a while in the ENOMEM case */
|
||||
if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
|
||||
|
@ -2055,8 +2055,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
|
|||
err = f2fs_inplace_write_data(fio);
|
||||
if (err) {
|
||||
if (f2fs_encrypted_file(inode))
|
||||
fscrypt_pullback_bio_page(&fio->encrypted_page,
|
||||
true);
|
||||
fscrypt_finalize_bounce_page(&fio->encrypted_page);
|
||||
if (PageWriteback(page))
|
||||
end_page_writeback(page);
|
||||
} else {
|
||||
|
|
|
@ -2918,6 +2918,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
|
|||
if (err) {
|
||||
f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
|
||||
block + 1);
|
||||
err = -EFSCORRUPTED;
|
||||
brelse(bh);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -39,6 +39,8 @@ EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_start);
|
|||
EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_end);
|
||||
EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_start);
|
||||
EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_end);
|
||||
EXPORT_TRACEPOINT_SYMBOL(android_fs_fsync_start);
|
||||
EXPORT_TRACEPOINT_SYMBOL(android_fs_fsync_end);
|
||||
|
||||
/*
|
||||
* I/O completion handler for multipage BIOs.
|
||||
|
|
90
fs/namei.c
90
fs/namei.c
|
@ -44,6 +44,9 @@
|
|||
#include "internal.h"
|
||||
#include "mount.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/namei.h>
|
||||
|
||||
/* [Feb-1997 T. Schoebel-Theuer]
|
||||
* Fundamental changes in the pathname lookup mechanisms (namei)
|
||||
* were necessary because of omirr. The reason is that omirr needs
|
||||
|
@ -779,6 +782,81 @@ static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
|
|||
return 1;
|
||||
}
|
||||
|
||||
#define INIT_PATH_SIZE 64
|
||||
|
||||
static void success_walk_trace(struct nameidata *nd)
|
||||
{
|
||||
struct path *pt = &nd->path;
|
||||
struct inode *i = nd->inode;
|
||||
char buf[INIT_PATH_SIZE], *try_buf;
|
||||
int cur_path_size;
|
||||
char *p;
|
||||
|
||||
/* When eBPF/ tracepoint is disabled, keep overhead low. */
|
||||
if (!trace_inodepath_enabled())
|
||||
return;
|
||||
|
||||
/* First try stack allocated buffer. */
|
||||
try_buf = buf;
|
||||
cur_path_size = INIT_PATH_SIZE;
|
||||
|
||||
while (cur_path_size <= PATH_MAX) {
|
||||
/* Free previous heap allocation if we are now trying
|
||||
* a second or later heap allocation.
|
||||
*/
|
||||
if (try_buf != buf)
|
||||
kfree(try_buf);
|
||||
|
||||
/* All but the first alloc are on the heap. */
|
||||
if (cur_path_size != INIT_PATH_SIZE) {
|
||||
try_buf = kmalloc(cur_path_size, GFP_KERNEL);
|
||||
if (!try_buf) {
|
||||
try_buf = buf;
|
||||
sprintf(try_buf, "error:buf_alloc_failed");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
p = d_path(pt, try_buf, cur_path_size);
|
||||
|
||||
if (!IS_ERR(p)) {
|
||||
char *end = mangle_path(try_buf, p, "\n");
|
||||
|
||||
if (end) {
|
||||
try_buf[end - try_buf] = 0;
|
||||
break;
|
||||
} else {
|
||||
/* On mangle errors, double path size
|
||||
* till PATH_MAX.
|
||||
*/
|
||||
cur_path_size = cur_path_size << 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (PTR_ERR(p) == -ENAMETOOLONG) {
|
||||
/* If d_path complains that name is too long,
|
||||
* then double path size till PATH_MAX.
|
||||
*/
|
||||
cur_path_size = cur_path_size << 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
sprintf(try_buf, "error:d_path_failed_%lu",
|
||||
-1 * PTR_ERR(p));
|
||||
break;
|
||||
}
|
||||
|
||||
if (cur_path_size > PATH_MAX)
|
||||
sprintf(try_buf, "error:d_path_name_too_long");
|
||||
|
||||
trace_inodepath(i, try_buf);
|
||||
|
||||
if (try_buf != buf)
|
||||
kfree(try_buf);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* complete_walk - successful completion of path walk
|
||||
* @nd: pointer nameidata
|
||||
|
@ -801,15 +879,21 @@ static int complete_walk(struct nameidata *nd)
|
|||
return -ECHILD;
|
||||
}
|
||||
|
||||
if (likely(!(nd->flags & LOOKUP_JUMPED)))
|
||||
if (likely(!(nd->flags & LOOKUP_JUMPED))) {
|
||||
success_walk_trace(nd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
|
||||
if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE))) {
|
||||
success_walk_trace(nd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
|
||||
if (status > 0)
|
||||
if (status > 0) {
|
||||
success_walk_trace(nd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!status)
|
||||
status = -ESTALE;
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
config FANOTIFY
|
||||
bool "Filesystem wide access notification"
|
||||
select FSNOTIFY
|
||||
select ANON_INODES
|
||||
default n
|
||||
---help---
|
||||
Say Y here to enable fanotify support. fanotify is a file access
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
config INOTIFY_USER
|
||||
bool "Inotify support for userspace"
|
||||
select ANON_INODES
|
||||
select FSNOTIFY
|
||||
default y
|
||||
---help---
|
||||
|
|
|
@ -3432,6 +3432,15 @@ static const struct file_operations proc_tgid_base_operations = {
|
|||
.llseek = generic_file_llseek,
|
||||
};
|
||||
|
||||
struct pid *tgid_pidfd_to_pid(const struct file *file)
|
||||
{
|
||||
if (!d_is_dir(file->f_path.dentry) ||
|
||||
(file->f_op != &proc_tgid_base_operations))
|
||||
return ERR_PTR(-EBADF);
|
||||
|
||||
return proc_pid(file_inode(file));
|
||||
}
|
||||
|
||||
static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
|
||||
{
|
||||
return proc_pident_lookup(dir, dentry,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue