Merge android-4.19.45 (50f9143) into msm-4.19

* refs/heads/tmp-50f9143:
  Linux 4.19.45
  ext4: don't update s_rev_level if not required
  ext4: fix compile error when using BUFFER_TRACE
  pstore: Refactor compression initialization
  pstore: Allocate compression during late_initcall()
  pstore: Centralize init/exit routines
  iov_iter: optimize page_copy_sane()
  libnvdimm/namespace: Fix label tracking error
  xen/pvh: set xen_domain_type to HVM in xen_pvh_init
  kbuild: turn auto.conf.cmd into a mandatory include file
  KVM: lapic: Busy wait for timer to expire when using hv_timer
  KVM: x86: Skip EFER vs. guest CPUID checks for host-initiated writes
  jbd2: fix potential double free
  ALSA: hda/realtek - Fix for Lenovo B50-70 inverted internal microphone bug
  ALSA: hda/realtek - Fixup headphone noise via runtime suspend
  ALSA: hda/realtek - Corrected fixup for System76 Gazelle (gaze14)
  ext4: avoid panic during forced reboot due to aborted journal
  ext4: fix use-after-free in dx_release()
  ext4: fix data corruption caused by overlapping unaligned and aligned IO
  ext4: zero out the unused memory region in the extent tree block
  tty: Don't force RISCV SBI console as preferred console
  fs/writeback.c: use rcu_barrier() to wait for inflight wb switches going into workqueue when umount
  crypto: ccm - fix incompatibility between "ccm" and "ccm_base"
  ipmi:ssif: compare block number correctly for multi-part return messages
  bcache: never set KEY_PTRS of journal key to 0 in journal_reclaim()
  bcache: fix a race between cache register and cacheset unregister
  Btrfs: do not start a transaction at iterate_extent_inodes()
  Btrfs: do not start a transaction during fiemap
  Btrfs: send, flush dellaloc in order to avoid data loss
  btrfs: Honour FITRIM range constraints during free space trim
  btrfs: Correctly free extent buffer in case btree_read_extent_buffer_pages fails
  btrfs: Check the first key and level for cached extent buffer
  ext4: fix ext4_show_options for file systems w/o journal
  ext4: actually request zeroing of inode table after grow
  ext4: fix use-after-free race with debug_want_extra_isize
  ext4: avoid drop reference to iloc.bh twice
  ext4: ignore e_value_offs for xattrs with value-in-ea-inode
  ext4: make sanity check in mballoc more strict
  jbd2: check superblock mapped prior to committing
  tty/vt: fix write/write race in ioctl(KDSKBSENT) handler
  tty: vt.c: Fix TIOCL_BLANKSCREEN console blanking if blankinterval == 0
  mtd: spi-nor: intel-spi: Avoid crossing 4K address boundary on read/write
  mfd: max77620: Fix swapped FPS_PERIOD_MAX_US values
  mfd: da9063: Fix OTP control register names to match datasheets for DA9063/63L
  ACPI: PM: Set enable_for_wake for wakeup GPEs during suspend-to-idle
  userfaultfd: use RCU to free the task struct when fork fails
  ocfs2: fix ocfs2 read inode data panic in ocfs2_iget
  hugetlb: use same fault hash key for shared and private mappings
  mm/hugetlb.c: don't put_page in lock of hugetlb_lock
  mm/huge_memory: fix vmf_insert_pfn_{pmd, pud}() crash, handle unaligned addresses
  mm/mincore.c: make mincore() more conservative
  crypto: ccree - handle tee fips error during power management resume
  crypto: ccree - add function to handle cryptocell tee fips error
  crypto: ccree - HOST_POWER_DOWN_EN should be the last CC access during suspend
  crypto: ccree - pm resume first enable the source clk
  crypto: ccree - don't map AEAD key and IV on stack
  crypto: ccree - use correct internal state sizes for export
  crypto: ccree - don't map MAC key on stack
  crypto: ccree - fix mem leak on error path
  crypto: ccree - remove special handling of chained sg
  bpf, arm64: remove prefetch insn in xadd mapping
  ASoC: codec: hdac_hdmi add device_link to card device
  ASoC: fsl_esai: Fix missing break in switch statement
  ASoC: RT5677-SPI: Disable 16Bit SPI Transfers
  ASoC: max98090: Fix restore of DAPM Muxes
  ALSA: hdea/realtek - Headset fixup for System76 Gazelle (gaze14)
  ALSA: hda/realtek - EAPD turn on later
  ALSA: hda/hdmi - Consider eld_valid when reporting jack event
  ALSA: hda/hdmi - Read the pin sense from register when repolling
  ALSA: usb-audio: Fix a memory leak bug
  ALSA: line6: toneport: Fix broken usage of timer for delayed execution
  mmc: core: Fix tag set memory leak
  crypto: arm64/aes-neonbs - don't access already-freed walk.iv
  crypto: arm/aes-neonbs - don't access already-freed walk.iv
  crypto: rockchip - update IV buffer to contain the next IV
  crypto: gcm - fix incompatibility between "gcm" and "gcm_base"
  crypto: arm64/gcm-aes-ce - fix no-NEON fallback code
  crypto: x86/crct10dif-pcl - fix use via crypto_shash_digest()
  crypto: crct10dif-generic - fix use via crypto_shash_digest()
  crypto: skcipher - don't WARN on unprocessed data after slow walk step
  crypto: vmx - fix copy-paste error in CTR mode
  crypto: ccp - Do not free psp_master when PLATFORM_INIT fails
  crypto: chacha20poly1305 - set cra_name correctly
  crypto: salsa20 - don't access already-freed walk.iv
  crypto: crypto4xx - fix cfb and ofb "overran dst buffer" issues
  crypto: crypto4xx - fix ctr-aes missing output IV
  sched/x86: Save [ER]FLAGS on context switch
  arm64: Save and restore OSDLR_EL1 across suspend/resume
  arm64: Clear OSDLR_EL1 on CPU boot
  arm64: compat: Reduce address limit
  arm64: arch_timer: Ensure counter register reads occur with seqlock held
  arm64: mmap: Ensure file offset is treated as unsigned
  power: supply: axp288_fuel_gauge: Add ACEPC T8 and T11 mini PCs to the blacklist
  power: supply: axp288_charger: Fix unchecked return value
  ARM: exynos: Fix a leaked reference by adding missing of_node_put
  mmc: sdhci-of-arasan: Add DTS property to disable DCMDs.
  ARM: dts: exynos: Fix audio (microphone) routing on Odroid XU3
  ARM: dts: exynos: Fix interrupt for shared EINTs on Exynos5260
  arm64: dts: rockchip: Disable DCMDs on RK3399's eMMC controller.
  objtool: Fix function fallthrough detection
  x86/speculation/mds: Improve CPU buffer clear documentation
  x86/speculation/mds: Revert CPU buffer clear on double fault exit
  locking/rwsem: Prevent decrement of reader count before increment
  fs: sdcardfs: Add missing option to show_options
  BACKPORT: drm/amd/display: add -msse2 to prevent Clang from emitting libcalls to undefined SW FP routines
  ANDROID: x86: use the correct function type for sys_ni_syscall
  ANDROID: x86: use the correct function type for sys32_(rt_)sigreturn
  ANDROID: x86: use the correct function type for native_set_fixmap
  ANDROID: x86: use the correct function type in SYSCALL_DEFINE0
  ANDROID: x86: add support for CONFIG_LTO_CLANG
  ANDROID: x86: disable STACK_VALIDATION with LTO_CLANG
  ANDROID: x86: disable HAVE_ARCH_PREL32_RELOCATIONS with LTO_CLANG
  ANDROID: x86/vdso: disable LTO only for VDSO
  ANDROID: x86/cpu/vmware: use the full form of inl in VMWARE_PORT
  UPSTREAM: x86/build: Keep local relocations with ld.lld
  ANDROID: crypto: arm64/ghash: fix CFI for GHASH CE
  ANDROID: crypto: arm64/sha: fix CFI in SHA CE
  ANDROID: arm64: kvm: disable CFI
  ANDROID: arm64: mark kpti_install_ng_mappings as __nocfi
  ANDROID: arm64: disable CFI for cpu_replace_ttbr1
  FROMLIST: arm64: use the correct function type for __arm64_sys_ni_syscall
  FROMLIST: arm64: use the correct function type in SYSCALL_DEFINE0
  FROMLIST: arm64: fix syscall_fn_t type
  ANDROID: modpost: add an exception for CFI stubs
  ANDROID: ftrace: fix function type mismatches
  FROMLIST: 9p: pass the correct prototype to read_cache_page
  FROMLIST: jffs2: pass the correct prototype to read_cache_page
  UPSTREAM: nfs: pass the correct prototype to read_cache_page
  FROMLIST: mm: don't cast ->readpage to filler_t for do_read_cache_page
  UPSTREAM: netfilter: xt_IDLETIMER: fix sysfs callback function type
  ANDROID: kallsyms: strip the .cfi postfix from symbols with CONFIG_CFI_CLANG
  ANDROID: add support for clang Control Flow Integrity (CFI)
  FROMLIST: arm64: select ARCH_SUPPORTS_LTO_CLANG
  ANDROID: arm64: disable HAVE_ARCH_PREL32_RELOCATIONS with LTO_CLANG
  ANDROID: arm64: add atomic_ll_sc.o to obj-y if using lld
  ANDROID: arm64: lse: fix LSE atomics with LTO
  ANDROID: arm64: vdso: disable LTO
  FROMLIST: arm64: kvm: use -fno-jump-tables with clang
  BACKPORT: arm64: sysreg: Make mrs_s and msr_s macros work with Clang and LTO
  ANDROID: init: ensure initcall ordering with LTO
  ANDROID: drivers/misc: disable LTO for lkdtm_rodata.o
  FROMLIST: efi/libstub: disable LTO
  FROMLIST: scripts/mod: disable LTO for empty.c
  ANDROID: kbuild: disable LTO_CLANG with KASAN
  FROMLIST: kbuild: fix dynamic ftrace with clang LTO
  ANDROID: kbuild: add support for clang LTO
  ANDROID: kbuild: add CONFIG_LD_IS_LLD
  UPSTREAM: gcov: clang support
  UPSTREAM: gcov: docs: add a note on GCC vs Clang differences
  UPSTREAM: gcov: clang: move common GCC code into gcc_base.c
  UPSTREAM: module: add stubs for within_module functions
  UPSTREAM: bpf: relax inode permission check for retrieving bpf program

Conflicts:
	Makefile
	arch/Kconfig
	arch/arm64/kvm/hyp/Makefile
	arch/x86/include/asm/syscall_wrapper.h
	drivers/mmc/core/queue.c
	fs/nfs/dir.c
	fs/nfs/symlink.c
	include/asm-generic/vmlinux.lds.h
	include/linux/compiler-clang.h
	include/linux/pagemap.h
	kernel/cfi.c
	mm/filemap.c
	scripts/link-vmlinux.sh

Change-Id: I1e34675a86ecb60d7b8a87e16574ea8920f9cb12
Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
Ivaylo Georgiev 2019-07-08 00:33:33 -07:00
commit 0f3194a0fa
170 changed files with 2330 additions and 810 deletions

View file

@ -34,10 +34,6 @@ Configure the kernel with::
CONFIG_DEBUG_FS=y
CONFIG_GCOV_KERNEL=y
select the gcc's gcov format, default is autodetect based on gcc version::
CONFIG_GCOV_FORMAT_AUTODETECT=y
and to get coverage data for the entire kernel::
CONFIG_GCOV_PROFILE_ALL=y
@ -169,6 +165,20 @@ b) gcov is run on the BUILD machine
[user@build] gcov -o /tmp/coverage/tmp/out/init main.c
Note on compilers
-----------------
GCC and LLVM gcov tools are not necessarily compatible. Use gcov_ to work with
GCC-generated .gcno and .gcda files, and use llvm-cov_ for Clang.
.. _gcov: http://gcc.gnu.org/onlinedocs/gcc/Gcov.html
.. _llvm-cov: https://llvm.org/docs/CommandGuide/llvm-cov.html
Build differences between GCC and Clang gcov are handled by Kconfig. It
automatically selects the appropriate gcov format depending on the detected
toolchain.
Troubleshooting
---------------

View file

@ -142,45 +142,13 @@ Mitigation points
mds_user_clear.
The mitigation is invoked in prepare_exit_to_usermode() which covers
most of the kernel to user space transitions. There are a few exceptions
which are not invoking prepare_exit_to_usermode() on return to user
space. These exceptions use the paranoid exit code.
all but one of the kernel to user space transitions. The exception
is when we return from a Non Maskable Interrupt (NMI), which is
handled directly in do_nmi().
- Non Maskable Interrupt (NMI):
Access to sensible data like keys, credentials in the NMI context is
mostly theoretical: The CPU can do prefetching or execute a
misspeculated code path and thereby fetching data which might end up
leaking through a buffer.
But for mounting other attacks the kernel stack address of the task is
already valuable information. So in full mitigation mode, the NMI is
mitigated on the return from do_nmi() to provide almost complete
coverage.
- Double fault (#DF):
A double fault is usually fatal, but the ESPFIX workaround, which can
be triggered from user space through modify_ldt(2) is a recoverable
double fault. #DF uses the paranoid exit path, so explicit mitigation
in the double fault handler is required.
- Machine Check Exception (#MC):
Another corner case is a #MC which hits between the CPU buffer clear
invocation and the actual return to user. As this still is in kernel
space it takes the paranoid exit path which does not clear the CPU
buffers. So the #MC handler repopulates the buffers to some
extent. Machine checks are not reliably controllable and the window is
extremly small so mitigation would just tick a checkbox that this
theoretical corner case is covered. To keep the amount of special
cases small, ignore #MC.
- Debug Exception (#DB):
This takes the paranoid exit path only when the INT1 breakpoint is in
kernel space. #DB on a user space address takes the regular exit path,
so no extra mitigation required.
(The reason that NMI is special is that prepare_exit_to_usermode() can
enable IRQs. In NMI context, NMIs are blocked, and we don't want to
enable IRQs with NMIs blocked.)
2. C-State transition

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 44
SUBLEVEL = 45
EXTRAVERSION =
NAME = "People's Front"
@ -612,6 +612,16 @@ ifdef CONFIG_FUNCTION_TRACER
CC_FLAGS_FTRACE := -pg
endif
# Make toolchain changes before including arch/$(SRCARCH)/Makefile to ensure
# ar/cc/ld-* macros return correct values.
ifdef CONFIG_LTO_CLANG
# use llvm-ar for building symbol tables from IR files, and llvm-nm instead
# of objdump for processing symbol versions and exports
LLVM_AR := llvm-ar
LLVM_NM := llvm-nm
export LLVM_AR LLVM_NM
endif
# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
# values of the respective KBUILD_* variables
ARCH_CPPFLAGS :=
@ -624,7 +634,7 @@ ifeq ($(may-sync-config),1)
# Read in dependencies to all Kconfig* files, make sure to run syncconfig if
# changes are detected. This should be included after arch/$(SRCARCH)/Makefile
# because some architectures define CROSS_COMPILE there.
-include include/config/auto.conf.cmd
include include/config/auto.conf.cmd
# To avoid any implicit rule to kick in, define an empty command
$(KCONFIG_CONFIG): ;
@ -819,8 +829,24 @@ KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
LDFLAGS_vmlinux += --gc-sections
endif
ifdef CONFIG_LTO_CLANG
lto-clang-flags := -flto -fvisibility=hidden
# allow disabling only clang LTO where needed
DISABLE_LTO_CLANG := -fno-lto -fvisibility=default
export DISABLE_LTO_CLANG
endif
ifdef CONFIG_LTO
LTO_CFLAGS := $(lto-clang-flags)
KBUILD_CFLAGS += $(LTO_CFLAGS)
DISABLE_LTO := $(DISABLE_LTO_CLANG)
export LTO_CFLAGS DISABLE_LTO
endif
ifdef CONFIG_CFI_CLANG
cfi-clang-flags += -fsanitize=cfi
cfi-clang-flags += -fsanitize=cfi $(call cc-option, -fsplit-lto-unit)
DISABLE_CFI_CLANG := -fno-sanitize=cfi
ifdef CONFIG_MODULES
cfi-clang-flags += -fsanitize-cfi-cross-dso
@ -830,17 +856,19 @@ ifdef CONFIG_CFI_PERMISSIVE
cfi-clang-flags += -fsanitize-recover=cfi -fno-sanitize-trap=cfi
endif
# also disable CFI when LTO is disabled
DISABLE_LTO_CLANG += $(DISABLE_CFI_CLANG)
# allow disabling only clang CFI where needed
export DISABLE_CFI_CLANG
endif
ifdef CONFIG_CFI
# cfi-flags are re-tested in prepare-compiler-check
cfi-flags := $(cfi-clang-flags)
KBUILD_CFLAGS += $(cfi-flags)
CFI_CFLAGS := $(cfi-clang-flags)
KBUILD_CFLAGS += $(CFI_CFLAGS)
DISABLE_CFI := $(DISABLE_CFI_CLANG)
export DISABLE_CFI
DISABLE_LTO += $(DISABLE_CFI)
export CFI_CFLAGS DISABLE_CFI
endif
# arch Makefile may override CC so keep this after arch Makefile is included
@ -1639,7 +1667,8 @@ clean: $(clean-dirs)
-o -name modules.builtin -o -name '.tmp_*.o.*' \
-o -name '*.c.[012]*.*' \
-o -name '*.ll' \
-o -name '*.gcno' \) -type f -print | xargs rm -f
-o -name '*.gcno' \
-o -name '*.*.symversions' \) -type f -print | xargs rm -f
# Generate tags for editors
# ---------------------------------------------------------------------------

View file

@ -474,6 +474,45 @@ config STACKPROTECTOR_STRONG
about 20% of all kernel functions, which increases the kernel code
size by about 2%.
config LTO
def_bool n
config ARCH_SUPPORTS_LTO_CLANG
bool
help
An architecture should select this option if it supports:
- compiling with clang,
- compiling inline assembly with clang's integrated assembler,
- and linking with LLD.
choice
prompt "Link-Time Optimization (LTO) (EXPERIMENTAL)"
default LTO_NONE
help
This option turns on Link-Time Optimization (LTO).
config LTO_NONE
bool "None"
config LTO_CLANG
bool "Use clang Link Time Optimization (LTO) (EXPERIMENTAL)"
depends on ARCH_SUPPORTS_LTO_CLANG
depends on !FTRACE_MCOUNT_RECORD || HAVE_C_RECORDMCOUNT
depends on !KASAN
depends on CC_IS_CLANG && LD_IS_LLD
select LTO
help
This option enables clang's Link Time Optimization (LTO), which allows
the compiler to optimize the kernel globally at link time. If you
enable this option, the compiler generates LLVM IR instead of object
files, and the actual compilation from IR occurs at the LTO link step,
which may take several minutes.
If you select this option, you must compile the kernel with clang and
LLD.
endchoice
config CFI
bool

View file

@ -223,7 +223,7 @@
wakeup-interrupt-controller {
compatible = "samsung,exynos4210-wakeup-eint";
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
};
};

View file

@ -22,7 +22,7 @@
"Headphone Jack", "HPL",
"Headphone Jack", "HPR",
"Headphone Jack", "MICBIAS",
"IN1", "Headphone Jack",
"IN12", "Headphone Jack",
"Speakers", "SPKL",
"Speakers", "SPKR";

View file

@ -278,6 +278,8 @@ static int __xts_crypt(struct skcipher_request *req,
int err;
err = skcipher_walk_virt(&walk, req, true);
if (err)
return err;
crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);

View file

@ -196,6 +196,7 @@ void __init exynos_firmware_init(void)
return;
addr = of_get_address(nd, 0, NULL, NULL);
of_node_put(nd);
if (!addr) {
pr_err("%s: No address specified.\n", __func__);
return;

View file

@ -639,8 +639,10 @@ void __init exynos_pm_init(void)
if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
of_node_put(np);
return;
}
of_node_put(np);
pm_data = (const struct exynos_pm_data *) match->data;

View file

@ -57,6 +57,7 @@ config ARM64
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
select ARCH_SUPPORTS_NUMA_BALANCING
@ -108,7 +109,7 @@ config ARM64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_PREL32_RELOCATIONS if !LTO_CLANG
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_THREAD_STRUCT_WHITELIST

View file

@ -305,6 +305,7 @@
phys = <&emmc_phy>;
phy-names = "phy_arasan";
power-domains = <&power RK3399_PD_EMMC>;
disable-cqe-dcmd;
status = "disabled";
};

View file

@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_request *req,
int err;
err = skcipher_walk_virt(&walk, req, false);
if (err)
return err;
kernel_neon_begin();
neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);

View file

@ -60,6 +60,22 @@ asmlinkage void pmull_ghash_update_p8(int blocks, u64 dg[], const char *src,
struct ghash_key const *k,
const char *head);
#ifdef CONFIG_CFI_CLANG
static inline void __cfi_pmull_ghash_update_p64(int blocks, u64 dg[],
const char *src, struct ghash_key const *k, const char *head)
{
return pmull_ghash_update_p64(blocks, dg, src, k, head);
}
#define pmull_ghash_update_p64 __cfi_pmull_ghash_update_p64
static inline void __cfi_pmull_ghash_update_p8(int blocks, u64 dg[],
const char *src, struct ghash_key const *k, const char *head)
{
return pmull_ghash_update_p8(blocks, dg, src, k, head);
}
#define pmull_ghash_update_p8 __cfi_pmull_ghash_update_p8
#endif
static void (*pmull_ghash_update)(int blocks, u64 dg[], const char *src,
struct ghash_key const *k,
const char *head);
@ -418,9 +434,11 @@ static int gcm_encrypt(struct aead_request *req)
put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
const int blocks =
walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
int remaining = blocks;
do {
__aes_arm64_encrypt(ctx->aes_key.key_enc,
@ -430,9 +448,9 @@ static int gcm_encrypt(struct aead_request *req)
dst += AES_BLOCK_SIZE;
src += AES_BLOCK_SIZE;
} while (--blocks > 0);
} while (--remaining > 0);
ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
ghash_do_update(blocks, dg,
walk.dst.virt.addr, &ctx->ghash_key,
NULL);
@ -553,7 +571,7 @@ static int gcm_decrypt(struct aead_request *req)
put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE;
int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;

View file

@ -148,18 +148,47 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
isb();
}
/*
* Ensure that reads of the counter are treated the same as memory reads
* for the purposes of ordering by subsequent memory barriers.
*
* This insanity brought to you by speculative system register reads,
* out-of-order memory accesses, sequence locks and Thomas Gleixner.
*
* http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
*/
#define arch_counter_enforce_ordering(val) do { \
u64 tmp, _val = (val); \
\
asm volatile( \
" eor %0, %1, %1\n" \
" add %0, sp, %0\n" \
" ldr xzr, [%0]" \
: "=r" (tmp) : "r" (_val)); \
} while (0)
static inline u64 arch_counter_get_cntpct(void)
{
u64 cnt;
isb();
return arch_timer_reg_read_stable(cntpct_el0);
cnt = arch_timer_reg_read_stable(cntpct_el0);
arch_counter_enforce_ordering(cnt);
return cnt;
}
static inline u64 arch_counter_get_cntvct(void)
{
u64 cnt;
isb();
return arch_timer_reg_read_stable(cntvct_el0);
cnt = arch_timer_reg_read_stable(cntvct_el0);
arch_counter_enforce_ordering(cnt);
return cnt;
}
#undef arch_counter_enforce_ordering
static inline int arch_timer_arch_init(void)
{
return 0;

View file

@ -28,7 +28,7 @@
({ \
u64 reg; \
asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\
"mrs_s %0, " __stringify(r##vh),\
__mrs_s("%0", r##vh), \
ARM64_HAS_VIRT_HOST_EXTN) \
: "=r" (reg)); \
reg; \
@ -38,7 +38,7 @@
do { \
u64 __val = (u64)(v); \
asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\
"msr_s " __stringify(r##vh) ", %x0",\
__msr_s(r##vh, "%x0"), \
ARM64_HAS_VIRT_HOST_EXTN) \
: : "rZ" (__val)); \
} while (0)

View file

@ -20,7 +20,12 @@
#else /* __ASSEMBLER__ */
#ifdef CONFIG_LTO_CLANG
#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
#else
__asm__(".arch_extension lse");
#define __LSE_PREAMBLE
#endif
/* Move the ll/sc atomics out-of-line */
#define __LL_SC_INLINE notrace
@ -33,7 +38,7 @@ __asm__(".arch_extension lse");
/* In-line patching at runtime */
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
#endif /* __ASSEMBLER__ */
#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */

View file

@ -53,7 +53,15 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
*/
#ifdef CONFIG_COMPAT
#ifdef CONFIG_ARM64_64K_PAGES
/*
* With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
* by the compat vectors page.
*/
#define TASK_SIZE_32 UL(0x100000000)
#else
#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE)
#endif /* CONFIG_ARM64_64K_PAGES */
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \

View file

@ -20,7 +20,7 @@
#include <linux/compat.h>
#include <linux/err.h>
typedef long (*syscall_fn_t)(struct pt_regs *regs);
typedef long (*syscall_fn_t)(const struct pt_regs *regs);
extern const syscall_fn_t sys_call_table[];

View file

@ -30,10 +30,10 @@
} \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#define COMPAT_SYSCALL_DEFINE0(sname) \
asmlinkage long __arm64_compat_sys_##sname(void); \
ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \
asmlinkage long __arm64_compat_sys_##sname(void)
#define COMPAT_SYSCALL_DEFINE0(sname) \
asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused); \
ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \
asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused)
#define COND_SYSCALL_COMPAT(name) \
cond_syscall(__arm64_compat_sys_##name);
@ -62,11 +62,11 @@
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#ifndef SYSCALL_DEFINE0
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
asmlinkage long __arm64_sys_##sname(void); \
ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \
asmlinkage long __arm64_sys_##sname(void)
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused); \
ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \
asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused)
#endif
#ifndef COND_SYSCALL

View file

@ -705,20 +705,39 @@
#include <linux/build_bug.h>
#include <linux/types.h>
asm(
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
" .equ .L__reg_num_x\\num, \\num\n"
" .endr\n"
#define __DEFINE_MRS_MSR_S_REGNUM \
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
" .equ .L__reg_num_x\\num, \\num\n" \
" .endr\n" \
" .equ .L__reg_num_xzr, 31\n"
"\n"
" .macro mrs_s, rt, sreg\n"
__emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt))
#define DEFINE_MRS_S \
__DEFINE_MRS_MSR_S_REGNUM \
" .macro mrs_s, rt, sreg\n" \
__emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) \
" .endm\n"
"\n"
" .macro msr_s, sreg, rt\n"
__emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt))
#define DEFINE_MSR_S \
__DEFINE_MRS_MSR_S_REGNUM \
" .macro msr_s, sreg, rt\n" \
__emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) \
" .endm\n"
);
#define UNDEFINE_MRS_S \
" .purgem mrs_s\n"
#define UNDEFINE_MSR_S \
" .purgem msr_s\n"
#define __mrs_s(v, r) \
DEFINE_MRS_S \
" mrs_s " v ", " __stringify(r) "\n" \
UNDEFINE_MRS_S
#define __msr_s(r, v) \
DEFINE_MSR_S \
" msr_s " __stringify(r) ", " v "\n" \
UNDEFINE_MSR_S
/*
* Unlike read_cpuid, calls to read_sysreg are never expected to be
@ -746,13 +765,13 @@ asm(
*/
#define read_sysreg_s(r) ({ \
u64 __val; \
asm volatile("mrs_s %0, " __stringify(r) : "=r" (__val)); \
asm volatile(__mrs_s("%0", r) : "=r" (__val)); \
__val; \
})
#define write_sysreg_s(v, r) do { \
u64 __val = (u64)(v); \
asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \
} while (0)
/*

View file

@ -135,6 +135,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors);
*/
static int clear_os_lock(unsigned int cpu)
{
write_sysreg(0, osdlr_el1);
write_sysreg(0, oslar_el1);
isb();
return 0;

View file

@ -31,7 +31,7 @@
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, off_t, off)
unsigned long, fd, unsigned long, off)
{
if (offset_in_page(off) != 0)
return -EINVAL;
@ -47,22 +47,26 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
return ksys_personality(personality);
}
asmlinkage long sys_ni_syscall(void);
SYSCALL_DEFINE0(ni_syscall)
{
return sys_ni_syscall();
}
/*
* Wrappers to pass the pt_regs argument.
*/
#define sys_personality sys_arm64_personality
asmlinkage long sys_ni_syscall(const struct pt_regs *);
#define __arm64_sys_ni_syscall sys_ni_syscall
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
#include <asm/unistd.h>
#undef __SYSCALL
#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
const syscall_fn_t sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
[0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall,
#include <asm/unistd.h>
};

View file

@ -133,17 +133,21 @@ COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
}
asmlinkage long sys_ni_syscall(const struct pt_regs *);
#define __arm64_sys_ni_syscall sys_ni_syscall
asmlinkage long sys_ni_syscall(void);
COMPAT_SYSCALL_DEFINE0(ni_syscall)
{
return sys_ni_syscall();
}
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
#include <asm/unistd32.h>
#undef __SYSCALL
#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
[0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
[0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall,
#include <asm/unistd32.h>
};

View file

@ -15,6 +15,7 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
ccflags-y := -shared -fno-common -fno-builtin
ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
ccflags-y += $(DISABLE_LTO)
# Disable gcov profiling for VDSO code
GCOV_PROFILE := n

View file

@ -73,6 +73,13 @@ x_tmp .req x8
movn x_tmp, #0xff00, lsl #48
and \res, x_tmp, \res
mul \res, \res, \mult
/*
* Fake address dependency from the value computed from the counter
* register to subsequent data page accesses so that the sequence
* locking also orders the read of the counter.
*/
and x_tmp, \res, xzr
add vdso_data, vdso_data, x_tmp
.endm
/*
@ -147,12 +154,12 @@ ENTRY(__kernel_gettimeofday)
/* w11 = cs_mono_mult, w12 = cs_shift */
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
seqcnt_check fail=1b
get_nsec_per_sec res=x9
lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
seqcnt_check fail=1b
get_ts_realtime res_sec=x10, res_nsec=x11, \
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
@ -211,13 +218,13 @@ realtime:
/* w11 = cs_mono_mult, w12 = cs_shift */
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
seqcnt_check fail=realtime
/* All computations are done with left-shifted nsecs. */
get_nsec_per_sec res=x9
lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
seqcnt_check fail=realtime
get_ts_realtime res_sec=x10, res_nsec=x11, \
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
clock_gettime_return, shift=1
@ -231,7 +238,6 @@ monotonic:
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
seqcnt_check fail=monotonic
/* All computations are done with left-shifted nsecs. */
lsl x4, x4, x12
@ -239,6 +245,7 @@ monotonic:
lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
seqcnt_check fail=monotonic
get_ts_realtime res_sec=x10, res_nsec=x11, \
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
@ -253,13 +260,13 @@ monotonic_raw:
/* w11 = cs_raw_mult, w12 = cs_shift */
ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT]
ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
seqcnt_check fail=monotonic_raw
/* All computations are done with left-shifted nsecs. */
get_nsec_per_sec res=x9
lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
seqcnt_check fail=monotonic_raw
get_ts_clock_raw res_sec=x10, res_nsec=x11, \
clock_nsec=x15, nsec_to_sec=x9

View file

@ -4,7 +4,8 @@
#
ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN) $(DISABLE_CFI)
$(DISABLE_STACKLEAK_PLUGIN) \
$(DISABLE_CFI)
ifeq ($(cc-name),clang)
ccflags-y += -fno-jump-tables

View file

@ -11,7 +11,12 @@ lib-y := clear_user.o delay.o copy_from_user.o \
# patching of the bl instruction in the caller with an atomic instruction
# when supported by the CPU. Result and argument registers are handled
# correctly, based on the function prototype.
ifeq ($(CONFIG_LD_IS_LLD), y)
# https://bugs.llvm.org/show_bug.cgi?id=35841
obj-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
else
lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
endif
CFLAGS_atomic_ll_sc.o := -ffixed-x1 -ffixed-x2 \
-ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \
-ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \

View file

@ -116,24 +116,25 @@ ENTRY(cpu_do_suspend)
mrs x2, tpidr_el0
mrs x3, tpidrro_el0
mrs x4, contextidr_el1
mrs x5, cpacr_el1
mrs x6, tcr_el1
mrs x7, vbar_el1
mrs x8, mdscr_el1
mrs x9, oslsr_el1
mrs x10, sctlr_el1
mrs x5, osdlr_el1
mrs x6, cpacr_el1
mrs x7, tcr_el1
mrs x8, vbar_el1
mrs x9, mdscr_el1
mrs x10, oslsr_el1
mrs x11, sctlr_el1
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs x11, tpidr_el1
mrs x12, tpidr_el1
alternative_else
mrs x11, tpidr_el2
mrs x12, tpidr_el2
alternative_endif
mrs x12, sp_el0
mrs x13, sp_el0
stp x2, x3, [x0]
stp x4, xzr, [x0, #16]
stp x5, x6, [x0, #32]
stp x7, x8, [x0, #48]
stp x9, x10, [x0, #64]
stp x11, x12, [x0, #80]
stp x4, x5, [x0, #16]
stp x6, x7, [x0, #32]
stp x8, x9, [x0, #48]
stp x10, x11, [x0, #64]
stp x12, x13, [x0, #80]
ret
ENDPROC(cpu_do_suspend)
@ -156,8 +157,8 @@ ENTRY(cpu_do_resume)
msr cpacr_el1, x6
/* Don't change t0sz here, mask those bits when restoring */
mrs x5, tcr_el1
bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
mrs x7, tcr_el1
bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
msr tcr_el1, x8
msr vbar_el1, x9
@ -181,6 +182,7 @@ alternative_endif
/*
* Restore oslsr_el1 by writing oslar_el1
*/
msr osdlr_el1, x5
ubfx x11, x11, #1, #1
msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0

View file

@ -100,12 +100,6 @@
#define A64_STXR(sf, Rt, Rn, Rs) \
A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
/* Prefetch */
#define A64_PRFM(Rn, type, target, policy) \
aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \
AARCH64_INSN_PRFM_TARGET_##target, \
AARCH64_INSN_PRFM_POLICY_##policy)
/* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \

View file

@ -736,7 +736,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_STX | BPF_XADD | BPF_DW:
emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_ADD(1, tmp, tmp, dst), ctx);
emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
emit(A64_LDXR(isdw, tmp2, tmp), ctx);
emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);

View file

@ -78,6 +78,7 @@ config X86
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_SUPPORTS_LTO_CLANG if X86_64
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
@ -124,7 +125,7 @@ config X86
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT
select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_PREL32_RELOCATIONS if !LTO_CLANG
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
@ -185,7 +186,7 @@ config X86
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64
select HAVE_STACK_VALIDATION if X86_64 && !LTO_CLANG
select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UNSTABLE_SCHED_CLOCK

View file

@ -47,7 +47,7 @@ export REALMODE_CFLAGS
export BITS
ifdef CONFIG_X86_NEED_RELOCS
LDFLAGS_vmlinux := --emit-relocs
LDFLAGS_vmlinux := --emit-relocs --discard-none
endif
#
@ -214,6 +214,11 @@ ifdef CONFIG_X86_64
KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
endif
ifdef CONFIG_LTO_CLANG
KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
-plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
endif
# Speed up the build
KBUILD_CFLAGS += -pipe
# Workaround for a gcc prelease that unfortunately was shipped in a suse release

View file

@ -76,15 +76,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
return 0;
}
static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
u8 *out)
static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
{
if (irq_fpu_usable()) {
kernel_fpu_begin();
*(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
*(__u16 *)out = crc_t10dif_pcl(crc, data, len);
kernel_fpu_end();
} else
*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
*(__u16 *)out = crc_t10dif_generic(crc, data, len);
return 0;
}
@ -93,15 +92,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksum_finup(&ctx->crc, data, len, out);
return __chksum_finup(ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksum_finup(&ctx->crc, data, length, out);
return __chksum_finup(0, data, length, out);
}
static struct shash_alg alg = {

View file

@ -648,6 +648,7 @@ ENTRY(__switch_to_asm)
pushl %ebx
pushl %edi
pushl %esi
pushfl
/* switch stack */
movl %esp, TASK_threadsp(%eax)
@ -670,6 +671,7 @@ ENTRY(__switch_to_asm)
#endif
/* restore callee-saved registers */
popfl
popl %esi
popl %edi
popl %ebx

View file

@ -352,6 +352,7 @@ ENTRY(__switch_to_asm)
pushq %r13
pushq %r14
pushq %r15
pushfq
/* switch stack */
movq %rsp, TASK_threadsp(%rdi)
@ -374,6 +375,7 @@ ENTRY(__switch_to_asm)
#endif
/* restore callee-saved registers */
popfq
popq %r15
popq %r14
popq %r13

View file

@ -10,13 +10,11 @@
#ifdef CONFIG_IA32_EMULATION
/* On X86_64, we use struct pt_regs * to pass parameters to syscalls */
#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */
extern asmlinkage long sys_ni_syscall(const struct pt_regs *);
#define __sys_ni_syscall __ia32_sys_ni_syscall
#else /* CONFIG_IA32_EMULATION */
#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
#define __sys_ni_syscall sys_ni_syscall
#endif /* CONFIG_IA32_EMULATION */
#include <asm/syscalls_32.h>
@ -29,6 +27,6 @@ __visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] =
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
[0 ... __NR_syscall_compat_max] = &sys_ni_syscall,
[0 ... __NR_syscall_compat_max] = &__sys_ni_syscall,
#include <asm/syscalls_32.h>
};

View file

@ -4,11 +4,17 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
#include <asm/asm-offsets.h>
#include <asm/syscall.h>
/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */
extern asmlinkage long sys_ni_syscall(const struct pt_regs *);
extern asmlinkage long sys_ni_syscall(void);
SYSCALL_DEFINE0(ni_syscall)
{
return sys_ni_syscall();
}
#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
#include <asm/syscalls_64.h>
#undef __SYSCALL_64
@ -20,6 +26,6 @@ asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
*/
[0 ... __NR_syscall_max] = &sys_ni_syscall,
[0 ... __NR_syscall_max] = &__x64_sys_ni_syscall,
#include <asm/syscalls_64.h>
};

View file

@ -124,7 +124,7 @@
110 i386 iopl sys_iopl __ia32_sys_iopl
111 i386 vhangup sys_vhangup
112 i386 idle
113 i386 vm86old sys_vm86old sys_ni_syscall
113 i386 vm86old sys_vm86old __ia32_sys_ni_syscall
114 i386 wait4 sys_wait4 __ia32_compat_sys_wait4
115 i386 swapoff sys_swapoff __ia32_sys_swapoff
116 i386 sysinfo sys_sysinfo __ia32_compat_sys_sysinfo
@ -177,7 +177,7 @@
163 i386 mremap sys_mremap __ia32_sys_mremap
164 i386 setresuid sys_setresuid16 __ia32_sys_setresuid16
165 i386 getresuid sys_getresuid16 __ia32_sys_getresuid16
166 i386 vm86 sys_vm86 sys_ni_syscall
166 i386 vm86 sys_vm86 __ia32_sys_ni_syscall
167 i386 query_module
168 i386 poll sys_poll __ia32_sys_poll
169 i386 nfsservctl

View file

@ -3,7 +3,6 @@
# Building vDSO images for x86.
#
KBUILD_CFLAGS += $(DISABLE_LTO)
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
@ -68,7 +67,7 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
$(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-fno-omit-frame-pointer -foptimize-sibling-calls \
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(DISABLE_LTO)
ifdef CONFIG_RETPOLINE
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
@ -139,6 +138,8 @@ KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(LTO_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(CFI_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)

View file

@ -113,7 +113,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
return err;
}
asmlinkage long sys32_sigreturn(void)
asmlinkage long sys32_sigreturn(const struct pt_regs *__unused)
{
struct pt_regs *regs = current_pt_regs();
struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8);
@ -139,7 +139,7 @@ asmlinkage long sys32_sigreturn(void)
return 0;
}
asmlinkage long sys32_rt_sigreturn(void)
asmlinkage long sys32_rt_sigreturn(const struct pt_regs *__unused)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe_ia32 __user *frame;

View file

@ -159,7 +159,7 @@ extern pte_t *kmap_pte;
extern pte_t *pkmap_page_table;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
void native_set_fixmap(enum fixed_addresses idx,
void native_set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags);
#ifndef CONFIG_PARAVIRT

View file

@ -40,6 +40,7 @@ asmlinkage void ret_from_fork(void);
* order of the fields must match the code in __switch_to_asm().
*/
struct inactive_task_frame {
unsigned long flags;
#ifdef CONFIG_X86_64
unsigned long r15;
unsigned long r14;

View file

@ -44,6 +44,18 @@
return __se_sys##name(SC_IA32_REGS_TO_ARGS(x,__VA_ARGS__));\
}
/*
* To keep the naming coherent, re-define SYSCALL_DEFINE0 to create an alias
* named __ia32_sys_*()
*/
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \
asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
#define COND_SYSCALL(name) \
cond_syscall(sys_##name); \
cond_syscall(__ia32_sys_##name)
@ -168,6 +180,30 @@
} \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
/*
* As the generic SYSCALL_DEFINE0() macro does not decode any parameters for
* obvious reasons, and passing struct pt_regs *regs to it in %rdi does not
* hurt, we only need to re-define it here to keep the naming congruent to
* SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() and SYS_NI()
* macros to work correctly.
*/
#ifndef SYSCALL_DEFINE0
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused);\
ALLOW_ERROR_INJECTION(__x64_sys_##sname, ERRNO); \
asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
#endif
#ifndef COND_SYSCALL
#define COND_SYSCALL(name) cond_syscall(__x64_sys_##name)
#endif
#ifndef SYS_NI
#define SYS_NI(name) SYSCALL_ALIAS(__x64_sys_##name, sys_ni_posix_timers);
#endif
/*
* For VSYSCALLS, we need to declare these three syscalls with the new
* pt_regs-based calling convention for in-kernel use.

View file

@ -45,7 +45,7 @@
#define VMWARE_PORT_CMD_VCPU_RESERVED 31
#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \
__asm__("inl (%%dx)" : \
__asm__("inl (%%dx), %%eax" : \
"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) : \
"0"(VMWARE_HYPERVISOR_MAGIC), \
"1"(VMWARE_PORT_CMD_##cmd), \

View file

@ -130,6 +130,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
struct task_struct *tsk;
int err;
/*
* For a new task use the RESET flags value since there is no before.
* All the status flags are zero; DF and all the system flags must also
* be 0, specifically IF must be 0 because we context switch to the new
* task with interrupts disabled.
*/
frame->flags = X86_EFLAGS_FIXED;
frame->bp = 0;
frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame;

View file

@ -300,6 +300,14 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
childregs = task_pt_regs(p);
fork_frame = container_of(childregs, struct fork_frame, regs);
frame = &fork_frame->frame;
/*
* For a new task use the RESET flags value since there is no before.
* All the status flags are zero; DF and all the system flags must also
* be 0, specifically IF must be 0 because we context switch to the new
* task with interrupts disabled.
*/
frame->flags = X86_EFLAGS_FIXED;
frame->bp = 0;
frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame;

View file

@ -58,7 +58,6 @@
#include <asm/alternative.h>
#include <asm/fpu/xstate.h>
#include <asm/trace/mpx.h>
#include <asm/nospec-branch.h>
#include <asm/mpx.h>
#include <asm/vm86.h>
#include <asm/umip.h>
@ -388,13 +387,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
regs->ip = (unsigned long)general_protection;
regs->sp = (unsigned long)&gpregs->orig_ax;
/*
* This situation can be triggered by userspace via
* modify_ldt(2) and the return does not take the regular
* user space exit, so a CPU buffer clear is required when
* MDS mitigation is enabled.
*/
mds_user_clear_cpu_buffers();
return;
}
#endif

View file

@ -1449,7 +1449,7 @@ static void apic_timer_expired(struct kvm_lapic *apic)
if (swait_active(q))
swake_up_one(q);
if (apic_lvtt_tscdeadline(apic))
if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
ktimer->expired_tscdeadline = ktimer->tscdeadline;
}

View file

@ -1162,31 +1162,42 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
return 0;
}
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{
if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
return false;
if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
return false;
return true;
}
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{
if (efer & efer_reserved_bits)
return false;
if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
return false;
if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
return false;
return true;
return __kvm_valid_efer(vcpu, efer);
}
EXPORT_SYMBOL_GPL(kvm_valid_efer);
static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
u64 old_efer = vcpu->arch.efer;
u64 efer = msr_info->data;
if (!kvm_valid_efer(vcpu, efer))
return 1;
if (efer & efer_reserved_bits)
return false;
if (is_paging(vcpu)
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
return 1;
if (!msr_info->host_initiated) {
if (!__kvm_valid_efer(vcpu, efer))
return 1;
if (is_paging(vcpu) &&
(vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
return 1;
}
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
@ -2356,7 +2367,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.arch_capabilities = data;
break;
case MSR_EFER:
return set_efer(vcpu, data);
return set_efer(vcpu, msr_info);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */

View file

@ -660,7 +660,7 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
fixmaps_set++;
}
void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
void native_set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys,
pgprot_t flags)
{
/* Sanitize 'prot' against any unsupported bits: */

View file

@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void)
}
xen_pvh = 1;
xen_domain_type = XEN_HVM_DOMAIN;
xen_start_flags = pvh_start_info.flags;
msr = cpuid_ebx(xen_cpuid_base() + 2);

View file

@ -458,7 +458,6 @@ static void crypto_ccm_free(struct aead_instance *inst)
static int crypto_ccm_create_common(struct crypto_template *tmpl,
struct rtattr **tb,
const char *full_name,
const char *ctr_name,
const char *mac_name)
{
@ -486,7 +485,8 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
mac = __crypto_hash_alg_common(mac_alg);
err = -EINVAL;
if (mac->digestsize != 16)
if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 ||
mac->digestsize != 16)
goto out_put_mac;
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
@ -509,23 +509,27 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
/* Not a stream cipher? */
/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL;
if (ctr->base.cra_blocksize != 1)
if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
crypto_skcipher_alg_ivsize(ctr) != 16 ||
ctr->base.cra_blocksize != 1)
goto err_drop_ctr;
/* We want the real thing! */
if (crypto_skcipher_alg_ivsize(ctr) != 16)
/* ctr and cbcmac must use the same underlying block cipher. */
if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0)
goto err_drop_ctr;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_ctr;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"ccm_base(%s,%s)", ctr->base.cra_driver_name,
mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_ctr;
memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (mac->base.cra_priority +
ctr->base.cra_priority) / 2;
@ -567,7 +571,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME];
char mac_name[CRYPTO_MAX_ALG_NAME];
char full_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
@ -581,12 +584,7 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
cipher_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >=
CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
mac_name);
return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
}
static struct crypto_template crypto_ccm_tmpl = {
@ -599,23 +597,17 @@ static int crypto_ccm_base_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
const char *ctr_name;
const char *cipher_name;
char full_name[CRYPTO_MAX_ALG_NAME];
const char *mac_name;
ctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ctr_name))
return PTR_ERR(ctr_name);
cipher_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(cipher_name))
return PTR_ERR(cipher_name);
mac_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(mac_name))
return PTR_ERR(mac_name);
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)",
ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
cipher_name);
return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
}
static struct crypto_template crypto_ccm_base_tmpl = {

View file

@ -647,8 +647,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"%s(%s,%s)", name, chacha_name,
poly_name) >= CRYPTO_MAX_ALG_NAME)
"%s(%s,%s)", name, chacha->base.cra_name,
poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto out_drop_chacha;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s(%s,%s)", name, chacha->base.cra_driver_name,

View file

@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
return 0;
}
static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
u8 *out)
static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
{
*(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
*(__u16 *)out = crc_t10dif_generic(crc, data, len);
return 0;
}
@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksum_finup(&ctx->crc, data, len, out);
return __chksum_finup(ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksum_finup(&ctx->crc, data, length, out);
return __chksum_finup(0, data, length, out);
}
static struct shash_alg alg = {

View file

@ -597,7 +597,6 @@ static void crypto_gcm_free(struct aead_instance *inst)
static int crypto_gcm_create_common(struct crypto_template *tmpl,
struct rtattr **tb,
const char *full_name,
const char *ctr_name,
const char *ghash_name)
{
@ -638,7 +637,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
goto err_free_inst;
err = -EINVAL;
if (ghash->digestsize != 16)
if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
ghash->digestsize != 16)
goto err_drop_ghash;
crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
@ -650,24 +650,24 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
/* We only support 16-byte blocks. */
/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL;
if (crypto_skcipher_alg_ivsize(ctr) != 16)
goto out_put_ctr;
/* Not a stream cipher? */
if (ctr->base.cra_blocksize != 1)
if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
crypto_skcipher_alg_ivsize(ctr) != 16 ||
ctr->base.cra_blocksize != 1)
goto out_put_ctr;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
goto out_put_ctr;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"gcm_base(%s,%s)", ctr->base.cra_driver_name,
ghash_alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto out_put_ctr;
memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
inst->alg.base.cra_flags = (ghash->base.cra_flags |
ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (ghash->base.cra_priority +
@ -709,7 +709,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
{
const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME];
char full_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
@ -719,12 +718,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >=
CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return crypto_gcm_create_common(tmpl, tb, full_name,
ctr_name, "ghash");
return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
}
static struct crypto_template crypto_gcm_tmpl = {
@ -738,7 +732,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
{
const char *ctr_name;
const char *ghash_name;
char full_name[CRYPTO_MAX_ALG_NAME];
ctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ctr_name))
@ -748,12 +741,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
if (IS_ERR(ghash_name))
return PTR_ERR(ghash_name);
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)",
ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return crypto_gcm_create_common(tmpl, tb, full_name,
ctr_name, ghash_name);
return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
}
static struct crypto_template crypto_gcm_base_tmpl = {

View file

@ -161,7 +161,7 @@ static int salsa20_crypt(struct skcipher_request *req)
err = skcipher_walk_virt(&walk, req, true);
salsa20_init(state, ctx, walk.iv);
salsa20_init(state, ctx, req->iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;

View file

@ -131,8 +131,13 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err)
memcpy(walk->dst.virt.addr, walk->page, n);
skcipher_unmap_dst(walk);
} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
if (WARN_ON(err)) {
/* unexpected case; didn't process all bytes */
if (err) {
/*
* Didn't process all bytes. Either the algorithm is
* broken, or this was the last step and it turned out
* the message wasn't evenly divisible into blocks but
* the algorithm requires it.
*/
err = -EINVAL;
goto finish;
}

View file

@ -977,6 +977,8 @@ static int acpi_s2idle_prepare(void)
if (acpi_sci_irq_valid())
enable_irq_wake(acpi_sci_irq);
acpi_enable_wakeup_devices(ACPI_STATE_S0);
/* Change the configuration of GPEs to avoid spurious wakeup. */
acpi_enable_all_wakeup_gpes();
acpi_os_wait_events_complete();
@ -1026,6 +1028,8 @@ static void acpi_s2idle_restore(void)
{
acpi_enable_all_runtime_gpes();
acpi_disable_wakeup_devices(ACPI_STATE_S0);
if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq);

View file

@ -688,12 +688,16 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
/* End of read */
len = ssif_info->multi_len;
data = ssif_info->data;
} else if (blocknum != ssif_info->multi_pos) {
} else if (blocknum + 1 != ssif_info->multi_pos) {
/*
* Out of sequence block, just abort. Block
* numbers start at zero for the second block,
* but multi_pos starts at one, so the +1.
*/
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
dev_dbg(&ssif_info->client->dev,
"Received message out of sequence, expected %u, got %u\n",
ssif_info->multi_pos - 1, blocknum);
result = -EIO;
} else {
ssif_inc_stat(ssif_info, received_message_parts);

View file

@ -141,9 +141,10 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
/* Setup SA */
sa = ctx->sa_in;
set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ?
SA_SAVE_IV : SA_NOT_SAVE_IV),
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ?
SA_NOT_SAVE_IV : SA_SAVE_IV),
SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ?
SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE),
SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
@ -162,6 +163,11 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
sa = ctx->sa_out;
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
/*
* SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT.
* it's the DIR_(IN|OUT)BOUND that matters
*/
sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
return 0;
}

View file

@ -712,7 +712,23 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
size_t offset_to_sr_ptr;
u32 gd_idx = 0;
int tmp;
bool is_busy;
bool is_busy, force_sd;
/*
* There's a very subtile/disguised "bug" in the hardware that
* gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
* of the hardware spec:
* *drum roll* the AES/(T)DES OFB and CFB modes are listed as
* operation modes for >>> "Block ciphers" <<<.
*
* To workaround this issue and stop the hardware from causing
* "overran dst buffer" on crypttexts that are not a multiple
* of 16 (AES_BLOCK_SIZE), we force the driver to use the
* scatter buffers.
*/
force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
|| req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
&& (datalen % AES_BLOCK_SIZE);
/* figure how many gd are needed */
tmp = sg_nents_for_len(src, assoclen + datalen);
@ -730,7 +746,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
}
/* figure how many sd are needed */
if (sg_is_last(dst)) {
if (sg_is_last(dst) && force_sd == false) {
num_sd = 0;
} else {
if (datalen > PPC4XX_SD_BUFFER_SIZE) {
@ -805,9 +821,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
pd->sa_len = sa_len;
pd_uinfo = &dev->pdr_uinfo[pd_entry];
pd_uinfo->async_req = req;
pd_uinfo->num_gd = num_gd;
pd_uinfo->num_sd = num_sd;
pd_uinfo->dest_va = dst;
pd_uinfo->async_req = req;
if (iv_len)
memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
@ -826,7 +843,6 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
/* get first gd we are going to use */
gd_idx = fst_gd;
pd_uinfo->first_gd = fst_gd;
pd_uinfo->num_gd = num_gd;
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
pd->src = gd_dma;
/* enable gather */
@ -863,17 +879,14 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
* Indicate gather array is not used
*/
pd_uinfo->first_gd = 0xffffffff;
pd_uinfo->num_gd = 0;
}
if (sg_is_last(dst)) {
if (!num_sd) {
/*
* we know application give us dst a whole piece of memory
* no need to use scatter ring.
*/
pd_uinfo->using_sd = 0;
pd_uinfo->first_sd = 0xffffffff;
pd_uinfo->num_sd = 0;
pd_uinfo->dest_va = dst;
sa->sa_command_0.bf.scatter = 0;
pd->dest = (u32)dma_map_page(dev->core_dev->device,
sg_page(dst), dst->offset,
@ -887,9 +900,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
nbytes = datalen;
sa->sa_command_0.bf.scatter = 1;
pd_uinfo->using_sd = 1;
pd_uinfo->dest_va = dst;
pd_uinfo->first_sd = fst_sd;
pd_uinfo->num_sd = num_sd;
sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
pd->dest = sd_dma;
/* setup scatter descriptor */

View file

@ -935,7 +935,7 @@ void psp_pci_init(void)
rc = sev_platform_init(&error);
if (rc) {
dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
goto err;
return;
}
dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,

View file

@ -415,7 +415,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
/* This function prepers the user key so it can pass to the hmac processing
* (copy to intenral buffer or hash in case of key longer than block
*/
static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
unsigned int keylen)
{
dma_addr_t key_dma_addr = 0;
@ -428,6 +428,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
unsigned int hashmode;
unsigned int idx = 0;
int rc = 0;
u8 *key = NULL;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
dma_addr_t padded_authkey_dma_addr =
ctx->auth_state.hmac.padded_authkey_dma_addr;
@ -446,11 +447,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
}
if (keylen != 0) {
key = kmemdup(authkey, keylen, GFP_KERNEL);
if (!key)
return -ENOMEM;
key_dma_addr = dma_map_single(dev, (void *)key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
kzfree(key);
return -ENOMEM;
}
if (keylen > blocksize) {
@ -533,6 +540,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
kzfree(key);
return rc;
}

View file

@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
*/
static unsigned int cc_get_sgl_nents(struct device *dev,
struct scatterlist *sg_list,
unsigned int nbytes, u32 *lbytes,
bool *is_chained)
unsigned int nbytes, u32 *lbytes)
{
unsigned int nents = 0;
while (nbytes && sg_list) {
if (sg_list->length) {
nents++;
/* get the number of bytes in the last entry */
*lbytes = nbytes;
nbytes -= (sg_list->length > nbytes) ?
nbytes : sg_list->length;
sg_list = sg_next(sg_list);
} else {
sg_list = (struct scatterlist *)sg_page(sg_list);
if (is_chained)
*is_chained = true;
}
nents++;
/* get the number of bytes in the last entry */
*lbytes = nbytes;
nbytes -= (sg_list->length > nbytes) ?
nbytes : sg_list->length;
sg_list = sg_next(sg_list);
}
dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
return nents;
@ -142,7 +135,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
{
u32 nents, lbytes;
nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL);
nents = cc_get_sgl_nents(dev, sg, end, &lbytes);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
(direct == CC_SG_TO_BUF));
}
@ -311,40 +304,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
sgl_data->num_of_buffers++;
}
static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
enum dma_data_direction direction)
{
u32 i, j;
struct scatterlist *l_sg = sg;
for (i = 0; i < nents; i++) {
if (!l_sg)
break;
if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
dev_err(dev, "dma_map_page() sg buffer failed\n");
goto err;
}
l_sg = sg_next(l_sg);
}
return nents;
err:
/* Restore mapped parts */
for (j = 0; j < i; j++) {
if (!sg)
break;
dma_unmap_sg(dev, sg, 1, direction);
sg = sg_next(sg);
}
return 0;
}
static int cc_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nbytes, int direction, u32 *nents,
u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{
bool is_chained = false;
if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */
if (dma_map_sg(dev, sg, 1, direction) != 1) {
@ -358,35 +321,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
*nents = 1;
*mapped_nents = 1;
} else { /*sg_is_last*/
*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
&is_chained);
*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
if (*nents > max_sg_nents) {
*nents = 0;
dev_err(dev, "Too many fragments. current %d max %d\n",
*nents, max_sg_nents);
return -ENOMEM;
}
if (!is_chained) {
/* In case of mmu the number of mapped nents might
* be changed from the original sgl nents
*/
*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
if (*mapped_nents == 0) {
*nents = 0;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
}
} else {
/*In this case the driver maps entry by entry so it
* must have the same nents before and after map
*/
*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
direction);
if (*mapped_nents != *nents) {
*nents = *mapped_nents;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
}
/* In case of mmu the number of mapped nents might
* be changed from the original sgl nents
*/
*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
if (*mapped_nents == 0) {
*nents = 0;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
}
}
@ -571,7 +520,6 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
u32 dummy;
bool chained;
u32 size_to_unmap = 0;
if (areq_ctx->mac_buf_dma_addr) {
@ -612,6 +560,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
if (areq_ctx->gen_ctx.iv_dma_addr) {
dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
hw_iv_size, DMA_BIDIRECTIONAL);
kzfree(areq_ctx->gen_ctx.iv);
}
/* Release pool */
@ -636,15 +585,14 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
size_to_unmap += crypto_aead_ivsize(tfm);
dma_unmap_sg(dev, req->src,
cc_get_sgl_nents(dev, req->src, size_to_unmap,
&dummy, &chained),
cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy),
DMA_BIDIRECTIONAL);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
dma_unmap_sg(dev, req->dst,
cc_get_sgl_nents(dev, req->dst, size_to_unmap,
&dummy, &chained),
&dummy),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
@ -717,19 +665,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct device *dev = drvdata_to_dev(drvdata);
gfp_t flags = cc_gfp_flags(&req->base);
int rc = 0;
if (!req->iv) {
areq_ctx->gen_ctx.iv_dma_addr = 0;
areq_ctx->gen_ctx.iv = NULL;
goto chain_iv_exit;
}
areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
hw_iv_size,
DMA_BIDIRECTIONAL);
areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
if (!areq_ctx->gen_ctx.iv)
return -ENOMEM;
areq_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
hw_iv_size, req->iv);
kzfree(areq_ctx->gen_ctx.iv);
areq_ctx->gen_ctx.iv = NULL;
rc = -ENOMEM;
goto chain_iv_exit;
}
@ -1022,7 +978,6 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
unsigned int size_for_map = req->assoclen + req->cryptlen;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0;
bool chained = false;
bool is_gcm4543 = areq_ctx->is_gcm4543;
u32 size_to_skip = req->assoclen;
@ -1043,7 +998,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0;
src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
&src_last_bytes, &chained);
&src_last_bytes);
sg_index = areq_ctx->src_sgl->length;
//check where the data starts
while (sg_index <= size_to_skip) {
@ -1085,7 +1040,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
}
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
&dst_last_bytes, &chained);
&dst_last_bytes);
sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip;
@ -1486,7 +1441,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents =
cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL);
cc_get_sgl_nents(dev, src, nbytes, &dummy);
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;

View file

@ -162,6 +162,7 @@ struct cc_alg_template {
struct async_gen_req_ctx {
dma_addr_t iv_dma_addr;
u8 *iv;
enum drv_crypto_direction op_type;
};

View file

@ -72,20 +72,28 @@ static inline void tee_fips_error(struct device *dev)
dev_err(dev, "TEE reported error!\n");
}
/*
* This function check if cryptocell tee fips error occurred
* and in such case triggers system error
*/
void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata)
{
struct device *dev = drvdata_to_dev(p_drvdata);
if (!cc_get_tee_fips_status(p_drvdata))
tee_fips_error(dev);
}
/* Deferred service handler, run as interrupt-fired tasklet */
static void fips_dsr(unsigned long devarg)
{
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
struct device *dev = drvdata_to_dev(drvdata);
u32 irq, state, val;
u32 irq, val;
irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
if (irq) {
state = cc_ioread(drvdata, CC_REG(GPR_HOST));
if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
tee_fips_error(dev);
cc_tee_handle_fips_error(drvdata);
}
/* after verifing that there is nothing to do,
@ -113,8 +121,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
dev_dbg(dev, "Initializing fips tasklet\n");
tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
if (!cc_get_tee_fips_status(p_drvdata))
tee_fips_error(dev);
cc_tee_handle_fips_error(p_drvdata);
return 0;
}

View file

@ -18,6 +18,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata);
void cc_fips_fini(struct cc_drvdata *drvdata);
void fips_handler(struct cc_drvdata *drvdata);
void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata);
#else /* CONFIG_CRYPTO_FIPS */
@ -30,6 +31,7 @@ static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
bool ok) {}
static inline void fips_handler(struct cc_drvdata *drvdata) {}
static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {}
#endif /* CONFIG_CRYPTO_FIPS */

View file

@ -64,6 +64,7 @@ struct cc_hash_alg {
struct hash_key_req_ctx {
u32 keylen;
dma_addr_t key_dma_addr;
u8 *key;
};
/* hash per-session context */
@ -724,13 +725,20 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
ctx->key_params.keylen = keylen;
ctx->key_params.key_dma_addr = 0;
ctx->is_hmac = true;
ctx->key_params.key = NULL;
if (keylen) {
ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
if (!ctx->key_params.key)
return -ENOMEM;
ctx->key_params.key_dma_addr =
dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
dma_map_single(dev, (void *)ctx->key_params.key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
ctx->key_params.key, keylen);
kzfree(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@ -881,6 +889,9 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
}
kzfree(ctx->key_params.key);
return rc;
}
@ -907,11 +918,16 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
ctx->key_params.keylen = keylen;
ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
if (!ctx->key_params.key)
return -ENOMEM;
ctx->key_params.key_dma_addr =
dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen);
kzfree(ctx->key_params.key);
return -ENOMEM;
}
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@ -963,6 +979,8 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
kzfree(ctx->key_params.key);
return rc;
}
@ -1598,7 +1616,7 @@ static struct cc_hash_template driver_hash[] = {
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA224_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA224,
@ -1623,7 +1641,7 @@ static struct cc_hash_template driver_hash[] = {
.setkey = cc_hash_setkey,
.halg = {
.digestsize = SHA384_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
},
},
.hash_mode = DRV_HASH_SHA384,

View file

@ -154,9 +154,6 @@ void cc_ivgen_fini(struct cc_drvdata *drvdata)
}
ivgen_ctx->pool = NULL_SRAM_ADDR;
/* release "this" context */
kfree(ivgen_ctx);
}
/*!
@ -174,10 +171,12 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
int rc;
/* Allocate "this" context */
ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL);
ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL);
if (!ivgen_ctx)
return -ENOMEM;
drvdata->ivgen_handle = ivgen_ctx;
/* Allocate pool's header for initial enc. key/IV */
ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
&ivgen_ctx->pool_meta_dma,
@ -196,8 +195,6 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
goto out;
}
drvdata->ivgen_handle = ivgen_ctx;
return cc_init_iv_sram(drvdata);
out:

View file

@ -11,6 +11,7 @@
#include "cc_ivgen.h"
#include "cc_hash.h"
#include "cc_pm.h"
#include "cc_fips.h"
#define POWER_DOWN_ENABLE 0x01
#define POWER_DOWN_DISABLE 0x00
@ -25,13 +26,13 @@ int cc_pm_suspend(struct device *dev)
int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
rc = cc_suspend_req_queue(drvdata);
if (rc) {
dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
return rc;
}
fini_cc_regs(drvdata);
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
cc_clk_off(drvdata);
return 0;
}
@ -42,19 +43,21 @@ int cc_pm_resume(struct device *dev)
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
/* Enables the device source clk */
rc = cc_clk_on(drvdata);
if (rc) {
dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc;
}
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
rc = init_cc_regs(drvdata, false);
if (rc) {
dev_err(dev, "init_cc_regs (%x)\n", rc);
return rc;
}
/* check if tee fips error occurred during power down */
cc_tee_handle_fips_error(drvdata);
rc = cc_resume_req_queue(drvdata);
if (rc) {

View file

@ -250,9 +250,14 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
dev->sg_src->offset + dev->sg_src->length - ivsize;
/* store the iv that need to be updated in chain mode */
if (ctx->mode & RK_CRYPTO_DEC)
/* Store the iv that need to be updated in chain mode.
* And update the IV buffer to contain the next IV for decryption mode.
*/
if (ctx->mode & RK_CRYPTO_DEC) {
memcpy(ctx->iv, src_last_blk, ivsize);
sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
ivsize, dev->total - ivsize);
}
err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
if (!err)
@ -288,13 +293,19 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
struct ablkcipher_request *req =
ablkcipher_request_cast(dev->async_req);
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
u32 ivsize = crypto_ablkcipher_ivsize(tfm);
if (ivsize == DES_BLOCK_SIZE)
memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0,
ivsize);
else if (ivsize == AES_BLOCK_SIZE)
memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize);
/* Update the IV buffer to contain the next IV for encryption mode. */
if (!(ctx->mode & RK_CRYPTO_DEC)) {
if (dev->aligned) {
memcpy(req->info, sg_virt(dev->sg_dst) +
dev->sg_dst->length - ivsize, ivsize);
} else {
memcpy(req->info, dev->addr_vir +
dev->count - ivsize, ivsize);
}
}
}
static void rk_update_iv(struct rk_crypto_info *dev)

View file

@ -1854,7 +1854,7 @@ Lctr32_enc8x_three:
stvx_u $out1,$x10,$out
stvx_u $out2,$x20,$out
addi $out,$out,0x30
b Lcbc_dec8x_done
b Lctr32_enc8x_done
.align 5
Lctr32_enc8x_two:
@ -1866,7 +1866,7 @@ Lctr32_enc8x_two:
stvx_u $out0,$x00,$out
stvx_u $out1,$x10,$out
addi $out,$out,0x20
b Lcbc_dec8x_done
b Lctr32_enc8x_done
.align 5
Lctr32_enc8x_one:

View file

@ -325,8 +325,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn,
vmf->flags & FAULT_FLAG_WRITE);
return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
@ -376,8 +375,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn,
vmf->flags & FAULT_FLAG_WRITE);
return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#else
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,

View file

@ -28,7 +28,8 @@ KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
-D__NO_FORTIFY \
$(call cc-option,-ffreestanding) \
$(call cc-option,-fno-stack-protector) \
-D__DISABLE_EXPORTS
-D__DISABLE_EXPORTS \
$(DISABLE_LTO)
GCOV_PROFILE := n
KASAN_SANITIZE := n

View file

@ -32,6 +32,12 @@ endif
calcs_ccflags := -mhard-float -msse $(cc_stack_align)
# Use -msse2 only with clang:
# https://bugs.freedesktop.org/show_bug.cgi?id=109487
ifdef CONFIG_CC_IS_CLANG
calcs_cc_flags += -msse2
endif
CFLAGS_dcn_calcs.o := $(calcs_ccflags)
CFLAGS_dcn_calc_auto.o := $(calcs_ccflags)
CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare

View file

@ -32,6 +32,12 @@ endif
dml_ccflags := -mhard-float -msse $(cc_stack_align)
# Use -msse2 only with clang:
# https://bugs.freedesktop.org/show_bug.cgi?id=109487
ifdef CONFIG_CC_IS_CLANG
dml_ccflags += -msse2
endif
CFLAGS_display_mode_lib.o := $(dml_ccflags)
CFLAGS_display_pipe_clocks.o := $(dml_ccflags)
CFLAGS_dml1_display_rq_dlg_calc.o := $(dml_ccflags)

View file

@ -540,11 +540,11 @@ static void journal_reclaim(struct cache_set *c)
ca->sb.nr_this_dev);
}
bkey_init(k);
SET_KEY_PTRS(k, n);
if (n)
if (n) {
bkey_init(k);
SET_KEY_PTRS(k, n);
c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
}
out:
if (!journal_full(&c->journal))
__closure_wake_up(&c->journal.wait);
@ -671,6 +671,9 @@ static void journal_write_unlocked(struct closure *cl)
ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
}
/* If KEY_PTRS(k) == 0, this jset gets lost in air */
BUG_ON(i == 0);
atomic_dec_bug(&fifo_back(&c->journal.pin));
bch_journal_next(&c->journal);
journal_reclaim(c);

View file

@ -1511,6 +1511,7 @@ static void cache_set_free(struct closure *cl)
bch_btree_cache_free(c);
bch_journal_free(c);
mutex_lock(&bch_register_lock);
for_each_cache(ca, c, i)
if (ca) {
ca->set = NULL;
@ -1529,7 +1530,6 @@ static void cache_set_free(struct closure *cl)
mempool_exit(&c->search);
kfree(c->devices);
mutex_lock(&bch_register_lock);
list_del(&c->list);
mutex_unlock(&bch_register_lock);

View file

@ -10,6 +10,7 @@ lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o
lkdtm-$(CONFIG_LKDTM) += usercopy.o
KCOV_INSTRUMENT_rodata.o := n
CFLAGS_lkdtm_rodata.o += $(DISABLE_LTO)
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \

View file

@ -814,7 +814,10 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
host->mmc_host_ops.start_signal_voltage_switch =
sdhci_arasan_voltage_switch;
sdhci_arasan->has_cqe = true;
host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
host->mmc->caps2 |= MMC_CAP2_CQE;
if (!of_property_read_bool(np, "disable-cqe-dcmd"))
host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
}
ret = sdhci_arasan_add_host(sdhci_arasan);

View file

@ -632,6 +632,10 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
while (len > 0) {
block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
/* Read cannot cross 4K boundary */
block_size = min_t(loff_t, from + block_size,
round_up(from + 1, SZ_4K)) - from;
writel(from, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);
@ -685,6 +689,10 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
while (len > 0) {
block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
/* Write cannot cross 4K boundary */
block_size = min_t(loff_t, to + block_size,
round_up(to + 1, SZ_4K)) - to;
writel(to, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL);

View file

@ -623,6 +623,17 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
return &guid_null;
}
static void reap_victim(struct nd_mapping *nd_mapping,
struct nd_label_ent *victim)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
u32 slot = to_slot(ndd, victim->label);
dev_dbg(ndd->dev, "free: %d\n", slot);
nd_label_free_slot(ndd, slot);
victim->label = NULL;
}
static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos, unsigned long flags)
@ -630,9 +641,9 @@ static int __pmem_label_update(struct nd_region *nd_region,
struct nd_namespace_common *ndns = &nspm->nsio.common;
struct nd_interleave_set *nd_set = nd_region->nd_set;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_label_ent *label_ent, *victim = NULL;
struct nd_namespace_label *nd_label;
struct nd_namespace_index *nsindex;
struct nd_label_ent *label_ent;
struct nd_label_id label_id;
struct resource *res;
unsigned long *free;
@ -701,18 +712,10 @@ static int __pmem_label_update(struct nd_region *nd_region,
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
if (!label_ent->label)
continue;
if (memcmp(nspm->uuid, label_ent->label->uuid,
NSLABEL_UUID_LEN) != 0)
continue;
victim = label_ent;
list_move_tail(&victim->list, &nd_mapping->labels);
break;
}
if (victim) {
dev_dbg(ndd->dev, "free: %d\n", slot);
slot = to_slot(ndd, victim->label);
nd_label_free_slot(ndd, slot);
victim->label = NULL;
if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
|| memcmp(nspm->uuid, label_ent->label->uuid,
NSLABEL_UUID_LEN) == 0)
reap_victim(nd_mapping, label_ent);
}
/* update index */

View file

@ -1248,12 +1248,27 @@ static int namespace_update_uuid(struct nd_region *nd_region,
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_label_ent *label_ent;
struct resource *res;
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, old_label_id.id) == 0)
sprintf((void *) res->name, "%s",
new_label_id.id);
mutex_lock(&nd_mapping->lock);
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
struct nd_namespace_label *nd_label = label_ent->label;
struct nd_label_id label_id;
if (!nd_label)
continue;
nd_label_gen_id(&label_id, nd_label->uuid,
__le32_to_cpu(nd_label->flags));
if (strcmp(old_label_id.id, label_id.id) == 0)
set_bit(ND_LABEL_REAP, &label_ent->flags);
}
mutex_unlock(&nd_mapping->lock);
}
kfree(*old_uuid);
out:

View file

@ -113,8 +113,12 @@ struct nd_percpu_lane {
spinlock_t lock;
};
enum nd_label_flags {
ND_LABEL_REAP,
};
struct nd_label_ent {
struct list_head list;
unsigned long flags;
struct nd_namespace_label *label;
};

View file

@ -832,6 +832,10 @@ static int axp288_charger_probe(struct platform_device *pdev)
/* Register charger interrupts */
for (i = 0; i < CHRG_INTR_END; i++) {
pirq = platform_get_irq(info->pdev, i);
if (pirq < 0) {
dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq);
return pirq;
}
info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
if (info->irq[i] < 0) {
dev_warn(&info->pdev->dev,

View file

@ -695,6 +695,26 @@ static void fuel_gauge_init_irq(struct axp288_fg_info *info)
* detection reports one despite it not being there.
*/
static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
{
/* ACEPC T8 Cherry Trail Z8350 mini PC */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
/* also match on somewhat unique bios-version */
DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
},
},
{
/* ACEPC T11 Cherry Trail Z8350 mini PC */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
/* also match on somewhat unique bios-version */
DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
},
},
{
/* Intel Cherry Trail Compute Stick, Windows version */
.matches = {

View file

@ -53,7 +53,6 @@ device_initcall(hvc_sbi_init);
static int __init hvc_sbi_console_init(void)
{
hvc_instantiate(0, 0, &hvc_sbi_ops);
add_preferred_console("hvc", 0, NULL);
return 0;
}

View file

@ -123,6 +123,7 @@ static const int NR_TYPES = ARRAY_SIZE(max_vals);
static struct input_handler kbd_handler;
static DEFINE_SPINLOCK(kbd_event_lock);
static DEFINE_SPINLOCK(led_lock);
static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */
static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
static bool dead_key_next;
@ -1990,11 +1991,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
char *p;
u_char *q;
u_char __user *up;
int sz;
int sz, fnw_sz;
int delta;
char *first_free, *fj, *fnw;
int i, j, k;
int ret;
unsigned long flags;
if (!capable(CAP_SYS_TTY_CONFIG))
perm = 0;
@ -2037,7 +2039,14 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
goto reterr;
}
fnw = NULL;
fnw_sz = 0;
/* race aginst other writers */
again:
spin_lock_irqsave(&func_buf_lock, flags);
q = func_table[i];
/* fj pointer to next entry after 'q' */
first_free = funcbufptr + (funcbufsize - funcbufleft);
for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
;
@ -2045,10 +2054,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
fj = func_table[j];
else
fj = first_free;
/* buffer usage increase by new entry */
delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string);
if (delta <= funcbufleft) { /* it fits in current buf */
if (j < MAX_NR_FUNC) {
/* make enough space for new entry at 'fj' */
memmove(fj + delta, fj, first_free - fj);
for (k = j; k < MAX_NR_FUNC; k++)
if (func_table[k])
@ -2061,20 +2072,28 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
sz = 256;
while (sz < funcbufsize - funcbufleft + delta)
sz <<= 1;
fnw = kmalloc(sz, GFP_KERNEL);
if(!fnw) {
ret = -ENOMEM;
goto reterr;
if (fnw_sz != sz) {
spin_unlock_irqrestore(&func_buf_lock, flags);
kfree(fnw);
fnw = kmalloc(sz, GFP_KERNEL);
fnw_sz = sz;
if (!fnw) {
ret = -ENOMEM;
goto reterr;
}
goto again;
}
if (!q)
func_table[i] = fj;
/* copy data before insertion point to new location */
if (fj > funcbufptr)
memmove(fnw, funcbufptr, fj - funcbufptr);
for (k = 0; k < j; k++)
if (func_table[k])
func_table[k] = fnw + (func_table[k] - funcbufptr);
/* copy data after insertion point to new location */
if (first_free > fj) {
memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj);
for (k = j; k < MAX_NR_FUNC; k++)
@ -2087,7 +2106,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
funcbufleft = funcbufleft - delta + sz - funcbufsize;
funcbufsize = sz;
}
/* finally insert item itself */
strcpy(func_table[i], kbs->kb_string);
spin_unlock_irqrestore(&func_buf_lock, flags);
break;
}
ret = 0;

View file

@ -4155,8 +4155,6 @@ void do_blank_screen(int entering_gfx)
return;
}
if (blank_state != blank_normal_wait)
return;
blank_state = blank_off;
/* don't blank graphics */

View file

@ -50,8 +50,9 @@
* @page: structure to page
*
*/
static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
static int v9fs_fid_readpage(void *data, struct page *page)
{
struct p9_fid *fid = data;
struct inode *inode = page->mapping->host;
struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE};
struct iov_iter to;
@ -122,7 +123,8 @@ static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping,
if (ret == 0)
return ret;
ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp);
ret = read_cache_pages(mapping, pages, v9fs_fid_readpage,
filp->private_data);
p9_debug(P9_DEBUG_VFS, " = %d\n", ret);
return ret;
}

View file

@ -1452,8 +1452,8 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
* callers (such as fiemap) which want to know whether the extent is
* shared but do not need a ref count.
*
* This attempts to allocate a transaction in order to account for
* delayed refs, but continues on even when the alloc fails.
* This attempts to attach to the running transaction in order to account for
* delayed refs, but continues on even when no running transaction exists.
*
* Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
*/
@ -1476,13 +1476,16 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
tmp = ulist_alloc(GFP_NOFS);
roots = ulist_alloc(GFP_NOFS);
if (!tmp || !roots) {
ulist_free(tmp);
ulist_free(roots);
return -ENOMEM;
ret = -ENOMEM;
goto out;
}
trans = btrfs_join_transaction(root);
trans = btrfs_attach_transaction(root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
ret = PTR_ERR(trans);
goto out;
}
trans = NULL;
down_read(&fs_info->commit_root_sem);
} else {
@ -1515,6 +1518,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
} else {
up_read(&fs_info->commit_root_sem);
}
out:
ulist_free(tmp);
ulist_free(roots);
return ret;
@ -1904,14 +1908,20 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
extent_item_objectid);
if (!search_commit_root) {
trans = btrfs_join_transaction(fs_info->extent_root);
if (IS_ERR(trans))
return PTR_ERR(trans);
btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
} else {
down_read(&fs_info->commit_root_sem);
trans = btrfs_attach_transaction(fs_info->extent_root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT &&
PTR_ERR(trans) != -EROFS)
return PTR_ERR(trans);
trans = NULL;
}
}
if (trans)
btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
else
down_read(&fs_info->commit_root_sem);
ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
tree_mod_seq_elem.seq, &refs,
&extent_item_pos, ignore_offset);
@ -1943,7 +1953,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
free_leaf_list(refs);
out:
if (!search_commit_root) {
if (trans) {
btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
btrfs_end_transaction(trans);
} else {

View file

@ -2436,6 +2436,16 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
if (tmp) {
/* first we do an atomic uptodate check */
if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
/*
* Do extra check for first_key, eb can be stale due to
* being cached, read from scrub, or have multiple
* parents (shared tree blocks).
*/
if (btrfs_verify_level_key(fs_info, tmp,
parent_level - 1, &first_key, gen)) {
free_extent_buffer(tmp);
return -EUCLEAN;
}
*eb_ret = tmp;
return 0;
}

View file

@ -408,9 +408,9 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
return ret;
}
static int verify_level_key(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int level,
struct btrfs_key *first_key, u64 parent_transid)
int btrfs_verify_level_key(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int level,
struct btrfs_key *first_key, u64 parent_transid)
{
int found_level;
struct btrfs_key found_key;
@ -487,8 +487,8 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
if (verify_parent_transid(io_tree, eb,
parent_transid, 0))
ret = -EIO;
else if (verify_level_key(fs_info, eb, level,
first_key, parent_transid))
else if (btrfs_verify_level_key(fs_info, eb, level,
first_key, parent_transid))
ret = -EUCLEAN;
else
break;
@ -995,13 +995,18 @@ void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct extent_buffer *buf = NULL;
struct inode *btree_inode = fs_info->btree_inode;
int ret;
buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
buf, WAIT_NONE, 0);
free_extent_buffer(buf);
ret = read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf,
WAIT_NONE, 0);
if (ret < 0)
free_extent_buffer_stale(buf);
else
free_extent_buffer(buf);
}
int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
@ -1021,12 +1026,12 @@ int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
mirror_num);
if (ret) {
free_extent_buffer(buf);
free_extent_buffer_stale(buf);
return ret;
}
if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
free_extent_buffer(buf);
free_extent_buffer_stale(buf);
return -EIO;
} else if (extent_buffer_uptodate(buf)) {
*eb = buf;
@ -1080,7 +1085,7 @@ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
level, first_key);
if (ret) {
free_extent_buffer(buf);
free_extent_buffer_stale(buf);
return ERR_PTR(ret);
}
return buf;

View file

@ -39,6 +39,9 @@ static inline u64 btrfs_sb_offset(int mirror)
struct btrfs_device;
struct btrfs_fs_devices;
int btrfs_verify_level_key(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int level,
struct btrfs_key *first_key, u64 parent_transid);
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 parent_transid, int level,
struct btrfs_key *first_key);

View file

@ -10789,9 +10789,9 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
* held back allocations.
*/
static int btrfs_trim_free_extents(struct btrfs_device *device,
u64 minlen, u64 *trimmed)
struct fstrim_range *range, u64 *trimmed)
{
u64 start = 0, len = 0;
u64 start = range->start, len = 0;
int ret;
*trimmed = 0;
@ -10834,8 +10834,8 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
if (!trans)
up_read(&fs_info->commit_root_sem);
ret = find_free_dev_extent_start(trans, device, minlen, start,
&start, &len);
ret = find_free_dev_extent_start(trans, device, range->minlen,
start, &start, &len);
if (trans) {
up_read(&fs_info->commit_root_sem);
btrfs_put_transaction(trans);
@ -10848,6 +10848,16 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
break;
}
/* If we are out of the passed range break */
if (start > range->start + range->len - 1) {
mutex_unlock(&fs_info->chunk_mutex);
ret = 0;
break;
}
start = max(range->start, start);
len = min(range->len, len);
ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
mutex_unlock(&fs_info->chunk_mutex);
@ -10857,6 +10867,10 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
start += len;
*trimmed += bytes;
/* We've trimmed enough */
if (*trimmed >= range->len)
break;
if (fatal_signal_pending(current)) {
ret = -ERESTARTSYS;
break;
@ -10940,8 +10954,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
mutex_lock(&fs_info->fs_devices->device_list_mutex);
devices = &fs_info->fs_devices->devices;
list_for_each_entry(device, devices, dev_list) {
ret = btrfs_trim_free_extents(device, range->minlen,
&group_trimmed);
ret = btrfs_trim_free_extents(device, range, &group_trimmed);
if (ret) {
dev_failed++;
dev_ret = ret;

View file

@ -6583,6 +6583,38 @@ static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
return btrfs_commit_transaction(trans);
}
/*
* Make sure any existing dellaloc is flushed for any root used by a send
* operation so that we do not miss any data and we do not race with writeback
* finishing and changing a tree while send is using the tree. This could
* happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
* a send operation then uses the subvolume.
* After flushing delalloc ensure_commit_roots_uptodate() must be called.
*/
static int flush_delalloc_roots(struct send_ctx *sctx)
{
struct btrfs_root *root = sctx->parent_root;
int ret;
int i;
if (root) {
ret = btrfs_start_delalloc_snapshot(root);
if (ret)
return ret;
btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
}
for (i = 0; i < sctx->clone_roots_cnt; i++) {
root = sctx->clone_roots[i].root;
ret = btrfs_start_delalloc_snapshot(root);
if (ret)
return ret;
btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
}
return 0;
}
static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
{
spin_lock(&root->root_item_lock);
@ -6807,6 +6839,10 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
NULL);
sort_clone_roots = 1;
ret = flush_delalloc_roots(sctx);
if (ret)
goto out;
ret = ensure_commit_roots_uptodate(sctx);
if (ret)
goto out;

View file

@ -1660,8 +1660,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
}
trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
write);
result = vmf_insert_pfn_pmd(vmf, pfn, write);
break;
case IOMAP_UNWRITTEN:
case IOMAP_HOLE:
@ -1775,8 +1774,7 @@ static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
break;
#ifdef CONFIG_FS_DAX_PMD
case PE_SIZE_PMD:
ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
pfn, true);
ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
break;
#endif
default:

View file

@ -1672,6 +1672,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
#define EXT4_FEATURE_INCOMPAT_INLINE_DATA 0x8000 /* data in inode */
#define EXT4_FEATURE_INCOMPAT_ENCRYPT 0x10000
extern void ext4_update_dynamic_rev(struct super_block *sb);
#define EXT4_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline bool ext4_has_feature_##name(struct super_block *sb) \
{ \
@ -1680,6 +1682,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
} \
static inline void ext4_set_feature_##name(struct super_block *sb) \
{ \
ext4_update_dynamic_rev(sb); \
EXT4_SB(sb)->s_es->s_feature_compat |= \
cpu_to_le32(EXT4_FEATURE_COMPAT_##flagname); \
} \
@ -1697,6 +1700,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
} \
static inline void ext4_set_feature_##name(struct super_block *sb) \
{ \
ext4_update_dynamic_rev(sb); \
EXT4_SB(sb)->s_es->s_feature_ro_compat |= \
cpu_to_le32(EXT4_FEATURE_RO_COMPAT_##flagname); \
} \
@ -1714,6 +1718,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
} \
static inline void ext4_set_feature_##name(struct super_block *sb) \
{ \
ext4_update_dynamic_rev(sb); \
EXT4_SB(sb)->s_es->s_feature_incompat |= \
cpu_to_le32(EXT4_FEATURE_INCOMPAT_##flagname); \
} \
@ -2676,7 +2681,6 @@ do { \
#endif
extern void ext4_update_dynamic_rev(struct super_block *sb);
extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb,
__u32 compat);
extern int ext4_update_rocompat_feature(handle_t *handle,

View file

@ -1035,6 +1035,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
__le32 border;
ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
int err = 0;
size_t ext_size = 0;
/* make decision: where to split? */
/* FIXME: now decision is simplest: at current extent */
@ -1126,6 +1127,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
le16_add_cpu(&neh->eh_entries, m);
}
/* zero out unused area in the extent block */
ext_size = sizeof(struct ext4_extent_header) +
sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
@ -1205,6 +1210,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
sizeof(struct ext4_extent_idx) * m);
le16_add_cpu(&neh->eh_entries, m);
}
/* zero out unused area in the extent block */
ext_size = sizeof(struct ext4_extent_header) +
(sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
memset(bh->b_data + ext_size, 0,
inode->i_sb->s_blocksize - ext_size);
ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh);
unlock_buffer(bh);
@ -1270,6 +1280,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
ext4_fsblk_t newblock, goal = 0;
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
int err = 0;
size_t ext_size = 0;
/* Try to prepend new index to old one */
if (ext_depth(inode))
@ -1295,9 +1306,11 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
goto out;
}
ext_size = sizeof(EXT4_I(inode)->i_data);
/* move top-level index/leaf into new block */
memmove(bh->b_data, EXT4_I(inode)->i_data,
sizeof(EXT4_I(inode)->i_data));
memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
/* zero out unused area in the extent block */
memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
/* set size of new block */
neh = ext_block_hdr(bh);

View file

@ -264,6 +264,13 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
}
ret = __generic_file_write_iter(iocb, from);
/*
* Unaligned direct AIO must be the only IO in flight. Otherwise
* overlapping aligned IO after unaligned might result in data
* corruption.
*/
if (ret == -EIOCBQUEUED && unaligned_aio)
ext4_unwritten_wait(inode);
inode_unlock(inode);
if (ret > 0)

Some files were not shown because too many files have changed in this diff Show more