This is the 4.19.45 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlzk4CsACgkQONu9yGCS
 aT5Xaw//UWopx4Yqbiv+4HBgW+2ijP4utxI4lBNYITD44jvkyVJnztUtVkWepu5r
 Tkl/7zytXOpxbpuhS0xqpWwG7lL5eT4NCG08KSX4lYQVjIWX4YzVkw9gLe9V2AaK
 IqTzaWtbuagARbnR3UC65TI4kjRGsr9ldY0AbbGGVTM6IwPquHN9Qd9TAzRwRohn
 CxY94Bwp1RcN2sSPkD3nUCUGOSNh97BXyypeM7FyceOzOpyAdQCXoUPc84cPqdNC
 4GBkd5Z1IL/7zX3HDjQeGS0KK6e1enslSmsbSSUVuHI90LCr3CZPJkFF8RFnPnff
 2RA7bdhp8C1JPeLDimr+SNSLEl9yywoH6d4UQAnBwoLDjiFCEITVgjDtYzzd81+1
 ES6lbUAs8v/LXkaCaExq6pNNd1prg6Mj9Fe6cz+G9V/YV1tLUsoAJHdFucu8Sp7w
 rwz/PZ6waCf8VRO4aYFF9b+u7PQ/RFZWQYsz22P7PhAYg0CTajV1FWGk1AYi0+wQ
 5YCmthbWhDo9U5lAFyQ0pVTXv/UNgEu6MfV1/jKtCk5AzsbE77orj1xusKckHq2e
 QojgmELmHMlFFajI0h/ddDo7iwz/5OrPVs9D03RysiOciMzdTKPucPyC0Ah4yEBA
 sJ0cQkaVtqO2Nu3E42lfQTpVIqBgi8NGav+kRwryB1YyKeaXLsM=
 =HJ7O
 -----END PGP SIGNATURE-----

Merge 4.19.45 into android-4.19

Changes in 4.19.45
	locking/rwsem: Prevent decrement of reader count before increment
	x86/speculation/mds: Revert CPU buffer clear on double fault exit
	x86/speculation/mds: Improve CPU buffer clear documentation
	objtool: Fix function fallthrough detection
	arm64: dts: rockchip: Disable DCMDs on RK3399's eMMC controller.
	ARM: dts: exynos: Fix interrupt for shared EINTs on Exynos5260
	ARM: dts: exynos: Fix audio (microphone) routing on Odroid XU3
	mmc: sdhci-of-arasan: Add DTS property to disable DCMDs.
	ARM: exynos: Fix a leaked reference by adding missing of_node_put
	power: supply: axp288_charger: Fix unchecked return value
	power: supply: axp288_fuel_gauge: Add ACEPC T8 and T11 mini PCs to the blacklist
	arm64: mmap: Ensure file offset is treated as unsigned
	arm64: arch_timer: Ensure counter register reads occur with seqlock held
	arm64: compat: Reduce address limit
	arm64: Clear OSDLR_EL1 on CPU boot
	arm64: Save and restore OSDLR_EL1 across suspend/resume
	sched/x86: Save [ER]FLAGS on context switch
	crypto: crypto4xx - fix ctr-aes missing output IV
	crypto: crypto4xx - fix cfb and ofb "overran dst buffer" issues
	crypto: salsa20 - don't access already-freed walk.iv
	crypto: chacha20poly1305 - set cra_name correctly
	crypto: ccp - Do not free psp_master when PLATFORM_INIT fails
	crypto: vmx - fix copy-paste error in CTR mode
	crypto: skcipher - don't WARN on unprocessed data after slow walk step
	crypto: crct10dif-generic - fix use via crypto_shash_digest()
	crypto: x86/crct10dif-pcl - fix use via crypto_shash_digest()
	crypto: arm64/gcm-aes-ce - fix no-NEON fallback code
	crypto: gcm - fix incompatibility between "gcm" and "gcm_base"
	crypto: rockchip - update IV buffer to contain the next IV
	crypto: arm/aes-neonbs - don't access already-freed walk.iv
	crypto: arm64/aes-neonbs - don't access already-freed walk.iv
	mmc: core: Fix tag set memory leak
	ALSA: line6: toneport: Fix broken usage of timer for delayed execution
	ALSA: usb-audio: Fix a memory leak bug
	ALSA: hda/hdmi - Read the pin sense from register when repolling
	ALSA: hda/hdmi - Consider eld_valid when reporting jack event
	ALSA: hda/realtek - EAPD turn on later
	ALSA: hdea/realtek - Headset fixup for System76 Gazelle (gaze14)
	ASoC: max98090: Fix restore of DAPM Muxes
	ASoC: RT5677-SPI: Disable 16Bit SPI Transfers
	ASoC: fsl_esai: Fix missing break in switch statement
	ASoC: codec: hdac_hdmi add device_link to card device
	bpf, arm64: remove prefetch insn in xadd mapping
	crypto: ccree - remove special handling of chained sg
	crypto: ccree - fix mem leak on error path
	crypto: ccree - don't map MAC key on stack
	crypto: ccree - use correct internal state sizes for export
	crypto: ccree - don't map AEAD key and IV on stack
	crypto: ccree - pm resume first enable the source clk
	crypto: ccree - HOST_POWER_DOWN_EN should be the last CC access during suspend
	crypto: ccree - add function to handle cryptocell tee fips error
	crypto: ccree - handle tee fips error during power management resume
	mm/mincore.c: make mincore() more conservative
	mm/huge_memory: fix vmf_insert_pfn_{pmd, pud}() crash, handle unaligned addresses
	mm/hugetlb.c: don't put_page in lock of hugetlb_lock
	hugetlb: use same fault hash key for shared and private mappings
	ocfs2: fix ocfs2 read inode data panic in ocfs2_iget
	userfaultfd: use RCU to free the task struct when fork fails
	ACPI: PM: Set enable_for_wake for wakeup GPEs during suspend-to-idle
	mfd: da9063: Fix OTP control register names to match datasheets for DA9063/63L
	mfd: max77620: Fix swapped FPS_PERIOD_MAX_US values
	mtd: spi-nor: intel-spi: Avoid crossing 4K address boundary on read/write
	tty: vt.c: Fix TIOCL_BLANKSCREEN console blanking if blankinterval == 0
	tty/vt: fix write/write race in ioctl(KDSKBSENT) handler
	jbd2: check superblock mapped prior to committing
	ext4: make sanity check in mballoc more strict
	ext4: ignore e_value_offs for xattrs with value-in-ea-inode
	ext4: avoid drop reference to iloc.bh twice
	ext4: fix use-after-free race with debug_want_extra_isize
	ext4: actually request zeroing of inode table after grow
	ext4: fix ext4_show_options for file systems w/o journal
	btrfs: Check the first key and level for cached extent buffer
	btrfs: Correctly free extent buffer in case btree_read_extent_buffer_pages fails
	btrfs: Honour FITRIM range constraints during free space trim
	Btrfs: send, flush dellaloc in order to avoid data loss
	Btrfs: do not start a transaction during fiemap
	Btrfs: do not start a transaction at iterate_extent_inodes()
	bcache: fix a race between cache register and cacheset unregister
	bcache: never set KEY_PTRS of journal key to 0 in journal_reclaim()
	ipmi:ssif: compare block number correctly for multi-part return messages
	crypto: ccm - fix incompatibility between "ccm" and "ccm_base"
	fs/writeback.c: use rcu_barrier() to wait for inflight wb switches going into workqueue when umount
	tty: Don't force RISCV SBI console as preferred console
	ext4: zero out the unused memory region in the extent tree block
	ext4: fix data corruption caused by overlapping unaligned and aligned IO
	ext4: fix use-after-free in dx_release()
	ext4: avoid panic during forced reboot due to aborted journal
	ALSA: hda/realtek - Corrected fixup for System76 Gazelle (gaze14)
	ALSA: hda/realtek - Fixup headphone noise via runtime suspend
	ALSA: hda/realtek - Fix for Lenovo B50-70 inverted internal microphone bug
	jbd2: fix potential double free
	KVM: x86: Skip EFER vs. guest CPUID checks for host-initiated writes
	KVM: lapic: Busy wait for timer to expire when using hv_timer
	kbuild: turn auto.conf.cmd into a mandatory include file
	xen/pvh: set xen_domain_type to HVM in xen_pvh_init
	libnvdimm/namespace: Fix label tracking error
	iov_iter: optimize page_copy_sane()
	pstore: Centralize init/exit routines
	pstore: Allocate compression during late_initcall()
	pstore: Refactor compression initialization
	ext4: fix compile error when using BUFFER_TRACE
	ext4: don't update s_rev_level if not required
	Linux 4.19.45

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2019-05-22 08:00:39 +02:00
commit 50f91435a2
111 changed files with 1030 additions and 590 deletions

View file

@ -142,45 +142,13 @@ Mitigation points
mds_user_clear. mds_user_clear.
The mitigation is invoked in prepare_exit_to_usermode() which covers The mitigation is invoked in prepare_exit_to_usermode() which covers
most of the kernel to user space transitions. There are a few exceptions all but one of the kernel to user space transitions. The exception
which are not invoking prepare_exit_to_usermode() on return to user is when we return from a Non Maskable Interrupt (NMI), which is
space. These exceptions use the paranoid exit code. handled directly in do_nmi().
- Non Maskable Interrupt (NMI): (The reason that NMI is special is that prepare_exit_to_usermode() can
enable IRQs. In NMI context, NMIs are blocked, and we don't want to
Access to sensible data like keys, credentials in the NMI context is enable IRQs with NMIs blocked.)
mostly theoretical: The CPU can do prefetching or execute a
misspeculated code path and thereby fetching data which might end up
leaking through a buffer.
But for mounting other attacks the kernel stack address of the task is
already valuable information. So in full mitigation mode, the NMI is
mitigated on the return from do_nmi() to provide almost complete
coverage.
- Double fault (#DF):
A double fault is usually fatal, but the ESPFIX workaround, which can
be triggered from user space through modify_ldt(2) is a recoverable
double fault. #DF uses the paranoid exit path, so explicit mitigation
in the double fault handler is required.
- Machine Check Exception (#MC):
Another corner case is a #MC which hits between the CPU buffer clear
invocation and the actual return to user. As this still is in kernel
space it takes the paranoid exit path which does not clear the CPU
buffers. So the #MC handler repopulates the buffers to some
extent. Machine checks are not reliably controllable and the window is
extremly small so mitigation would just tick a checkbox that this
theoretical corner case is covered. To keep the amount of special
cases small, ignore #MC.
- Debug Exception (#DB):
This takes the paranoid exit path only when the INT1 breakpoint is in
kernel space. #DB on a user space address takes the regular exit path,
so no extra mitigation required.
2. C-State transition 2. C-State transition

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 44 SUBLEVEL = 45
EXTRAVERSION = EXTRAVERSION =
NAME = "People's Front" NAME = "People's Front"
@ -630,7 +630,7 @@ ifeq ($(may-sync-config),1)
# Read in dependencies to all Kconfig* files, make sure to run syncconfig if # Read in dependencies to all Kconfig* files, make sure to run syncconfig if
# changes are detected. This should be included after arch/$(SRCARCH)/Makefile # changes are detected. This should be included after arch/$(SRCARCH)/Makefile
# because some architectures define CROSS_COMPILE there. # because some architectures define CROSS_COMPILE there.
-include include/config/auto.conf.cmd include include/config/auto.conf.cmd
# To avoid any implicit rule to kick in, define an empty command # To avoid any implicit rule to kick in, define an empty command
$(KCONFIG_CONFIG): ; $(KCONFIG_CONFIG): ;

View file

@ -223,7 +223,7 @@
wakeup-interrupt-controller { wakeup-interrupt-controller {
compatible = "samsung,exynos4210-wakeup-eint"; compatible = "samsung,exynos4210-wakeup-eint";
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
}; };
}; };

View file

@ -22,7 +22,7 @@
"Headphone Jack", "HPL", "Headphone Jack", "HPL",
"Headphone Jack", "HPR", "Headphone Jack", "HPR",
"Headphone Jack", "MICBIAS", "Headphone Jack", "MICBIAS",
"IN1", "Headphone Jack", "IN12", "Headphone Jack",
"Speakers", "SPKL", "Speakers", "SPKL",
"Speakers", "SPKR"; "Speakers", "SPKR";

View file

@ -278,6 +278,8 @@ static int __xts_crypt(struct skcipher_request *req,
int err; int err;
err = skcipher_walk_virt(&walk, req, true); err = skcipher_walk_virt(&walk, req, true);
if (err)
return err;
crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv); crypto_cipher_encrypt_one(ctx->tweak_tfm, walk.iv, walk.iv);

View file

@ -196,6 +196,7 @@ void __init exynos_firmware_init(void)
return; return;
addr = of_get_address(nd, 0, NULL, NULL); addr = of_get_address(nd, 0, NULL, NULL);
of_node_put(nd);
if (!addr) { if (!addr) {
pr_err("%s: No address specified.\n", __func__); pr_err("%s: No address specified.\n", __func__);
return; return;

View file

@ -639,8 +639,10 @@ void __init exynos_pm_init(void)
if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) { if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
pr_warn("Outdated DT detected, suspend/resume will NOT work\n"); pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
of_node_put(np);
return; return;
} }
of_node_put(np);
pm_data = (const struct exynos_pm_data *) match->data; pm_data = (const struct exynos_pm_data *) match->data;

View file

@ -305,6 +305,7 @@
phys = <&emmc_phy>; phys = <&emmc_phy>;
phy-names = "phy_arasan"; phy-names = "phy_arasan";
power-domains = <&power RK3399_PD_EMMC>; power-domains = <&power RK3399_PD_EMMC>;
disable-cqe-dcmd;
status = "disabled"; status = "disabled";
}; };

View file

@ -304,6 +304,8 @@ static int __xts_crypt(struct skcipher_request *req,
int err; int err;
err = skcipher_walk_virt(&walk, req, false); err = skcipher_walk_virt(&walk, req, false);
if (err)
return err;
kernel_neon_begin(); kernel_neon_begin();
neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1); neon_aes_ecb_encrypt(walk.iv, walk.iv, ctx->twkey, ctx->key.rounds, 1);

View file

@ -434,9 +434,11 @@ static int gcm_encrypt(struct aead_request *req)
put_unaligned_be32(2, iv + GCM_IV_SIZE); put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE; const int blocks =
walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
u8 *dst = walk.dst.virt.addr; u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr; u8 *src = walk.src.virt.addr;
int remaining = blocks;
do { do {
__aes_arm64_encrypt(ctx->aes_key.key_enc, __aes_arm64_encrypt(ctx->aes_key.key_enc,
@ -446,9 +448,9 @@ static int gcm_encrypt(struct aead_request *req)
dst += AES_BLOCK_SIZE; dst += AES_BLOCK_SIZE;
src += AES_BLOCK_SIZE; src += AES_BLOCK_SIZE;
} while (--blocks > 0); } while (--remaining > 0);
ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg, ghash_do_update(blocks, dg,
walk.dst.virt.addr, &ctx->ghash_key, walk.dst.virt.addr, &ctx->ghash_key,
NULL); NULL);
@ -569,7 +571,7 @@ static int gcm_decrypt(struct aead_request *req)
put_unaligned_be32(2, iv + GCM_IV_SIZE); put_unaligned_be32(2, iv + GCM_IV_SIZE);
while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) { while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
int blocks = walk.nbytes / AES_BLOCK_SIZE; int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
u8 *dst = walk.dst.virt.addr; u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr; u8 *src = walk.src.virt.addr;

View file

@ -148,18 +148,47 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
isb(); isb();
} }
/*
* Ensure that reads of the counter are treated the same as memory reads
* for the purposes of ordering by subsequent memory barriers.
*
* This insanity brought to you by speculative system register reads,
* out-of-order memory accesses, sequence locks and Thomas Gleixner.
*
* http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
*/
#define arch_counter_enforce_ordering(val) do { \
u64 tmp, _val = (val); \
\
asm volatile( \
" eor %0, %1, %1\n" \
" add %0, sp, %0\n" \
" ldr xzr, [%0]" \
: "=r" (tmp) : "r" (_val)); \
} while (0)
static inline u64 arch_counter_get_cntpct(void) static inline u64 arch_counter_get_cntpct(void)
{ {
u64 cnt;
isb(); isb();
return arch_timer_reg_read_stable(cntpct_el0); cnt = arch_timer_reg_read_stable(cntpct_el0);
arch_counter_enforce_ordering(cnt);
return cnt;
} }
static inline u64 arch_counter_get_cntvct(void) static inline u64 arch_counter_get_cntvct(void)
{ {
u64 cnt;
isb(); isb();
return arch_timer_reg_read_stable(cntvct_el0); cnt = arch_timer_reg_read_stable(cntvct_el0);
arch_counter_enforce_ordering(cnt);
return cnt;
} }
#undef arch_counter_enforce_ordering
static inline int arch_timer_arch_init(void) static inline int arch_timer_arch_init(void)
{ {
return 0; return 0;

View file

@ -53,7 +53,15 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
*/ */
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#ifdef CONFIG_ARM64_64K_PAGES
/*
* With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
* by the compat vectors page.
*/
#define TASK_SIZE_32 UL(0x100000000) #define TASK_SIZE_32 UL(0x100000000)
#else
#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE)
#endif /* CONFIG_ARM64_64K_PAGES */
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64) TASK_SIZE_32 : TASK_SIZE_64)
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \

View file

@ -135,6 +135,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors);
*/ */
static int clear_os_lock(unsigned int cpu) static int clear_os_lock(unsigned int cpu)
{ {
write_sysreg(0, osdlr_el1);
write_sysreg(0, oslar_el1); write_sysreg(0, oslar_el1);
isb(); isb();
return 0; return 0;

View file

@ -31,7 +31,7 @@
SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, prot, unsigned long, flags,
unsigned long, fd, off_t, off) unsigned long, fd, unsigned long, off)
{ {
if (offset_in_page(off) != 0) if (offset_in_page(off) != 0)
return -EINVAL; return -EINVAL;

View file

@ -73,6 +73,13 @@ x_tmp .req x8
movn x_tmp, #0xff00, lsl #48 movn x_tmp, #0xff00, lsl #48
and \res, x_tmp, \res and \res, x_tmp, \res
mul \res, \res, \mult mul \res, \res, \mult
/*
* Fake address dependency from the value computed from the counter
* register to subsequent data page accesses so that the sequence
* locking also orders the read of the counter.
*/
and x_tmp, \res, xzr
add vdso_data, vdso_data, x_tmp
.endm .endm
/* /*
@ -147,12 +154,12 @@ ENTRY(__kernel_gettimeofday)
/* w11 = cs_mono_mult, w12 = cs_shift */ /* w11 = cs_mono_mult, w12 = cs_shift */
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
seqcnt_check fail=1b
get_nsec_per_sec res=x9 get_nsec_per_sec res=x9
lsl x9, x9, x12 lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
seqcnt_check fail=1b
get_ts_realtime res_sec=x10, res_nsec=x11, \ get_ts_realtime res_sec=x10, res_nsec=x11, \
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
@ -211,13 +218,13 @@ realtime:
/* w11 = cs_mono_mult, w12 = cs_shift */ /* w11 = cs_mono_mult, w12 = cs_shift */
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
seqcnt_check fail=realtime
/* All computations are done with left-shifted nsecs. */ /* All computations are done with left-shifted nsecs. */
get_nsec_per_sec res=x9 get_nsec_per_sec res=x9
lsl x9, x9, x12 lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
seqcnt_check fail=realtime
get_ts_realtime res_sec=x10, res_nsec=x11, \ get_ts_realtime res_sec=x10, res_nsec=x11, \
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
clock_gettime_return, shift=1 clock_gettime_return, shift=1
@ -231,7 +238,6 @@ monotonic:
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT] ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC] ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC] ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
seqcnt_check fail=monotonic
/* All computations are done with left-shifted nsecs. */ /* All computations are done with left-shifted nsecs. */
lsl x4, x4, x12 lsl x4, x4, x12
@ -239,6 +245,7 @@ monotonic:
lsl x9, x9, x12 lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
seqcnt_check fail=monotonic
get_ts_realtime res_sec=x10, res_nsec=x11, \ get_ts_realtime res_sec=x10, res_nsec=x11, \
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9 clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
@ -253,13 +260,13 @@ monotonic_raw:
/* w11 = cs_raw_mult, w12 = cs_shift */ /* w11 = cs_raw_mult, w12 = cs_shift */
ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT] ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT]
ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC] ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
seqcnt_check fail=monotonic_raw
/* All computations are done with left-shifted nsecs. */ /* All computations are done with left-shifted nsecs. */
get_nsec_per_sec res=x9 get_nsec_per_sec res=x9
lsl x9, x9, x12 lsl x9, x9, x12
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11 get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
seqcnt_check fail=monotonic_raw
get_ts_clock_raw res_sec=x10, res_nsec=x11, \ get_ts_clock_raw res_sec=x10, res_nsec=x11, \
clock_nsec=x15, nsec_to_sec=x9 clock_nsec=x15, nsec_to_sec=x9

View file

@ -70,24 +70,25 @@ ENTRY(cpu_do_suspend)
mrs x2, tpidr_el0 mrs x2, tpidr_el0
mrs x3, tpidrro_el0 mrs x3, tpidrro_el0
mrs x4, contextidr_el1 mrs x4, contextidr_el1
mrs x5, cpacr_el1 mrs x5, osdlr_el1
mrs x6, tcr_el1 mrs x6, cpacr_el1
mrs x7, vbar_el1 mrs x7, tcr_el1
mrs x8, mdscr_el1 mrs x8, vbar_el1
mrs x9, oslsr_el1 mrs x9, mdscr_el1
mrs x10, sctlr_el1 mrs x10, oslsr_el1
mrs x11, sctlr_el1
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs x11, tpidr_el1 mrs x12, tpidr_el1
alternative_else alternative_else
mrs x11, tpidr_el2 mrs x12, tpidr_el2
alternative_endif alternative_endif
mrs x12, sp_el0 mrs x13, sp_el0
stp x2, x3, [x0] stp x2, x3, [x0]
stp x4, xzr, [x0, #16] stp x4, x5, [x0, #16]
stp x5, x6, [x0, #32] stp x6, x7, [x0, #32]
stp x7, x8, [x0, #48] stp x8, x9, [x0, #48]
stp x9, x10, [x0, #64] stp x10, x11, [x0, #64]
stp x11, x12, [x0, #80] stp x12, x13, [x0, #80]
ret ret
ENDPROC(cpu_do_suspend) ENDPROC(cpu_do_suspend)
@ -110,8 +111,8 @@ ENTRY(cpu_do_resume)
msr cpacr_el1, x6 msr cpacr_el1, x6
/* Don't change t0sz here, mask those bits when restoring */ /* Don't change t0sz here, mask those bits when restoring */
mrs x5, tcr_el1 mrs x7, tcr_el1
bfi x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
msr tcr_el1, x8 msr tcr_el1, x8
msr vbar_el1, x9 msr vbar_el1, x9
@ -135,6 +136,7 @@ alternative_endif
/* /*
* Restore oslsr_el1 by writing oslar_el1 * Restore oslsr_el1 by writing oslar_el1
*/ */
msr osdlr_el1, x5
ubfx x11, x11, #1, #1 ubfx x11, x11, #1, #1
msr oslar_el1, x11 msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0 reset_pmuserenr_el0 x0 // Disable PMU access from EL0

View file

@ -100,12 +100,6 @@
#define A64_STXR(sf, Rt, Rn, Rs) \ #define A64_STXR(sf, Rt, Rn, Rs) \
A64_LSX(sf, Rt, Rn, Rs, STORE_EX) A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
/* Prefetch */
#define A64_PRFM(Rn, type, target, policy) \
aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \
AARCH64_INSN_PRFM_TARGET_##target, \
AARCH64_INSN_PRFM_POLICY_##policy)
/* Add/subtract (immediate) */ /* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \ #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \ aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \

View file

@ -736,7 +736,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
case BPF_STX | BPF_XADD | BPF_DW: case BPF_STX | BPF_XADD | BPF_DW:
emit_a64_mov_i(1, tmp, off, ctx); emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_ADD(1, tmp, tmp, dst), ctx); emit(A64_ADD(1, tmp, tmp, dst), ctx);
emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
emit(A64_LDXR(isdw, tmp2, tmp), ctx); emit(A64_LDXR(isdw, tmp2, tmp), ctx);
emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx); emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);

View file

@ -76,15 +76,14 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
return 0; return 0;
} }
static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
u8 *out)
{ {
if (irq_fpu_usable()) { if (irq_fpu_usable()) {
kernel_fpu_begin(); kernel_fpu_begin();
*(__u16 *)out = crc_t10dif_pcl(*crcp, data, len); *(__u16 *)out = crc_t10dif_pcl(crc, data, len);
kernel_fpu_end(); kernel_fpu_end();
} else } else
*(__u16 *)out = crc_t10dif_generic(*crcp, data, len); *(__u16 *)out = crc_t10dif_generic(crc, data, len);
return 0; return 0;
} }
@ -93,15 +92,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
{ {
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksum_finup(&ctx->crc, data, len, out); return __chksum_finup(ctx->crc, data, len, out);
} }
static int chksum_digest(struct shash_desc *desc, const u8 *data, static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out) unsigned int length, u8 *out)
{ {
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); return __chksum_finup(0, data, length, out);
return __chksum_finup(&ctx->crc, data, length, out);
} }
static struct shash_alg alg = { static struct shash_alg alg = {

View file

@ -648,6 +648,7 @@ ENTRY(__switch_to_asm)
pushl %ebx pushl %ebx
pushl %edi pushl %edi
pushl %esi pushl %esi
pushfl
/* switch stack */ /* switch stack */
movl %esp, TASK_threadsp(%eax) movl %esp, TASK_threadsp(%eax)
@ -670,6 +671,7 @@ ENTRY(__switch_to_asm)
#endif #endif
/* restore callee-saved registers */ /* restore callee-saved registers */
popfl
popl %esi popl %esi
popl %edi popl %edi
popl %ebx popl %ebx

View file

@ -352,6 +352,7 @@ ENTRY(__switch_to_asm)
pushq %r13 pushq %r13
pushq %r14 pushq %r14
pushq %r15 pushq %r15
pushfq
/* switch stack */ /* switch stack */
movq %rsp, TASK_threadsp(%rdi) movq %rsp, TASK_threadsp(%rdi)
@ -374,6 +375,7 @@ ENTRY(__switch_to_asm)
#endif #endif
/* restore callee-saved registers */ /* restore callee-saved registers */
popfq
popq %r15 popq %r15
popq %r14 popq %r14
popq %r13 popq %r13

View file

@ -40,6 +40,7 @@ asmlinkage void ret_from_fork(void);
* order of the fields must match the code in __switch_to_asm(). * order of the fields must match the code in __switch_to_asm().
*/ */
struct inactive_task_frame { struct inactive_task_frame {
unsigned long flags;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
unsigned long r15; unsigned long r15;
unsigned long r14; unsigned long r14;

View file

@ -130,6 +130,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
struct task_struct *tsk; struct task_struct *tsk;
int err; int err;
/*
* For a new task use the RESET flags value since there is no before.
* All the status flags are zero; DF and all the system flags must also
* be 0, specifically IF must be 0 because we context switch to the new
* task with interrupts disabled.
*/
frame->flags = X86_EFLAGS_FIXED;
frame->bp = 0; frame->bp = 0;
frame->ret_addr = (unsigned long) ret_from_fork; frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame; p->thread.sp = (unsigned long) fork_frame;

View file

@ -300,6 +300,14 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
childregs = task_pt_regs(p); childregs = task_pt_regs(p);
fork_frame = container_of(childregs, struct fork_frame, regs); fork_frame = container_of(childregs, struct fork_frame, regs);
frame = &fork_frame->frame; frame = &fork_frame->frame;
/*
* For a new task use the RESET flags value since there is no before.
* All the status flags are zero; DF and all the system flags must also
* be 0, specifically IF must be 0 because we context switch to the new
* task with interrupts disabled.
*/
frame->flags = X86_EFLAGS_FIXED;
frame->bp = 0; frame->bp = 0;
frame->ret_addr = (unsigned long) ret_from_fork; frame->ret_addr = (unsigned long) ret_from_fork;
p->thread.sp = (unsigned long) fork_frame; p->thread.sp = (unsigned long) fork_frame;

View file

@ -58,7 +58,6 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/fpu/xstate.h> #include <asm/fpu/xstate.h>
#include <asm/trace/mpx.h> #include <asm/trace/mpx.h>
#include <asm/nospec-branch.h>
#include <asm/mpx.h> #include <asm/mpx.h>
#include <asm/vm86.h> #include <asm/vm86.h>
#include <asm/umip.h> #include <asm/umip.h>
@ -388,13 +387,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
regs->ip = (unsigned long)general_protection; regs->ip = (unsigned long)general_protection;
regs->sp = (unsigned long)&gpregs->orig_ax; regs->sp = (unsigned long)&gpregs->orig_ax;
/*
* This situation can be triggered by userspace via
* modify_ldt(2) and the return does not take the regular
* user space exit, so a CPU buffer clear is required when
* MDS mitigation is enabled.
*/
mds_user_clear_cpu_buffers();
return; return;
} }
#endif #endif

View file

@ -1449,7 +1449,7 @@ static void apic_timer_expired(struct kvm_lapic *apic)
if (swait_active(q)) if (swait_active(q))
swake_up_one(q); swake_up_one(q);
if (apic_lvtt_tscdeadline(apic)) if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
ktimer->expired_tscdeadline = ktimer->tscdeadline; ktimer->expired_tscdeadline = ktimer->tscdeadline;
} }

View file

@ -1162,31 +1162,42 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
return 0; return 0;
} }
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{
if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
return false;
if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
return false;
return true;
}
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
{ {
if (efer & efer_reserved_bits) if (efer & efer_reserved_bits)
return false; return false;
if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) return __kvm_valid_efer(vcpu, efer);
return false;
if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
return false;
return true;
} }
EXPORT_SYMBOL_GPL(kvm_valid_efer); EXPORT_SYMBOL_GPL(kvm_valid_efer);
static int set_efer(struct kvm_vcpu *vcpu, u64 efer) static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ {
u64 old_efer = vcpu->arch.efer; u64 old_efer = vcpu->arch.efer;
u64 efer = msr_info->data;
if (!kvm_valid_efer(vcpu, efer)) if (efer & efer_reserved_bits)
return 1; return false;
if (is_paging(vcpu) if (!msr_info->host_initiated) {
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) if (!__kvm_valid_efer(vcpu, efer))
return 1; return 1;
if (is_paging(vcpu) &&
(vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
return 1;
}
efer &= ~EFER_LMA; efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA;
@ -2356,7 +2367,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.arch_capabilities = data; vcpu->arch.arch_capabilities = data;
break; break;
case MSR_EFER: case MSR_EFER:
return set_efer(vcpu, data); return set_efer(vcpu, msr_info);
case MSR_K7_HWCR: case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */ data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */ data &= ~(u64)0x100; /* ignore ignne emulation enable */

View file

@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void)
} }
xen_pvh = 1; xen_pvh = 1;
xen_domain_type = XEN_HVM_DOMAIN;
xen_start_flags = pvh_start_info.flags; xen_start_flags = pvh_start_info.flags;
msr = cpuid_ebx(xen_cpuid_base() + 2); msr = cpuid_ebx(xen_cpuid_base() + 2);

View file

@ -455,7 +455,6 @@ static void crypto_ccm_free(struct aead_instance *inst)
static int crypto_ccm_create_common(struct crypto_template *tmpl, static int crypto_ccm_create_common(struct crypto_template *tmpl,
struct rtattr **tb, struct rtattr **tb,
const char *full_name,
const char *ctr_name, const char *ctr_name,
const char *mac_name) const char *mac_name)
{ {
@ -483,7 +482,8 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
mac = __crypto_hash_alg_common(mac_alg); mac = __crypto_hash_alg_common(mac_alg);
err = -EINVAL; err = -EINVAL;
if (mac->digestsize != 16) if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 ||
mac->digestsize != 16)
goto out_put_mac; goto out_put_mac;
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
@ -506,23 +506,27 @@ static int crypto_ccm_create_common(struct crypto_template *tmpl,
ctr = crypto_spawn_skcipher_alg(&ictx->ctr); ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
/* Not a stream cipher? */ /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL; err = -EINVAL;
if (ctr->base.cra_blocksize != 1) if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
crypto_skcipher_alg_ivsize(ctr) != 16 ||
ctr->base.cra_blocksize != 1)
goto err_drop_ctr; goto err_drop_ctr;
/* We want the real thing! */ /* ctr and cbcmac must use the same underlying block cipher. */
if (crypto_skcipher_alg_ivsize(ctr) != 16) if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0)
goto err_drop_ctr; goto err_drop_ctr;
err = -ENAMETOOLONG; err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_ctr;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"ccm_base(%s,%s)", ctr->base.cra_driver_name, "ccm_base(%s,%s)", ctr->base.cra_driver_name,
mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_ctr; goto err_drop_ctr;
memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC; inst->alg.base.cra_flags = ctr->base.cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (mac->base.cra_priority + inst->alg.base.cra_priority = (mac->base.cra_priority +
ctr->base.cra_priority) / 2; ctr->base.cra_priority) / 2;
@ -564,7 +568,6 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
const char *cipher_name; const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME]; char ctr_name[CRYPTO_MAX_ALG_NAME];
char mac_name[CRYPTO_MAX_ALG_NAME]; char mac_name[CRYPTO_MAX_ALG_NAME];
char full_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]); cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name)) if (IS_ERR(cipher_name))
@ -578,12 +581,7 @@ static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
cipher_name) >= CRYPTO_MAX_ALG_NAME) cipher_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG; return -ENAMETOOLONG;
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm(%s)", cipher_name) >= return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
mac_name);
} }
static struct crypto_template crypto_ccm_tmpl = { static struct crypto_template crypto_ccm_tmpl = {
@ -596,23 +594,17 @@ static int crypto_ccm_base_create(struct crypto_template *tmpl,
struct rtattr **tb) struct rtattr **tb)
{ {
const char *ctr_name; const char *ctr_name;
const char *cipher_name; const char *mac_name;
char full_name[CRYPTO_MAX_ALG_NAME];
ctr_name = crypto_attr_alg_name(tb[1]); ctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ctr_name)) if (IS_ERR(ctr_name))
return PTR_ERR(ctr_name); return PTR_ERR(ctr_name);
cipher_name = crypto_attr_alg_name(tb[2]); mac_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(cipher_name)) if (IS_ERR(mac_name))
return PTR_ERR(cipher_name); return PTR_ERR(mac_name);
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
ctr_name, cipher_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return crypto_ccm_create_common(tmpl, tb, full_name, ctr_name,
cipher_name);
} }
static struct crypto_template crypto_ccm_base_tmpl = { static struct crypto_template crypto_ccm_base_tmpl = {

View file

@ -647,8 +647,8 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
err = -ENAMETOOLONG; err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"%s(%s,%s)", name, chacha_name, "%s(%s,%s)", name, chacha->base.cra_name,
poly_name) >= CRYPTO_MAX_ALG_NAME) poly->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto out_drop_chacha; goto out_drop_chacha;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s(%s,%s)", name, chacha->base.cra_driver_name, "%s(%s,%s)", name, chacha->base.cra_driver_name,

View file

@ -65,10 +65,9 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
return 0; return 0;
} }
static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
u8 *out)
{ {
*(__u16 *)out = crc_t10dif_generic(*crcp, data, len); *(__u16 *)out = crc_t10dif_generic(crc, data, len);
return 0; return 0;
} }
@ -77,15 +76,13 @@ static int chksum_finup(struct shash_desc *desc, const u8 *data,
{ {
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksum_finup(&ctx->crc, data, len, out); return __chksum_finup(ctx->crc, data, len, out);
} }
static int chksum_digest(struct shash_desc *desc, const u8 *data, static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out) unsigned int length, u8 *out)
{ {
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); return __chksum_finup(0, data, length, out);
return __chksum_finup(&ctx->crc, data, length, out);
} }
static struct shash_alg alg = { static struct shash_alg alg = {

View file

@ -597,7 +597,6 @@ static void crypto_gcm_free(struct aead_instance *inst)
static int crypto_gcm_create_common(struct crypto_template *tmpl, static int crypto_gcm_create_common(struct crypto_template *tmpl,
struct rtattr **tb, struct rtattr **tb,
const char *full_name,
const char *ctr_name, const char *ctr_name,
const char *ghash_name) const char *ghash_name)
{ {
@ -638,7 +637,8 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
goto err_free_inst; goto err_free_inst;
err = -EINVAL; err = -EINVAL;
if (ghash->digestsize != 16) if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
ghash->digestsize != 16)
goto err_drop_ghash; goto err_drop_ghash;
crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst)); crypto_set_skcipher_spawn(&ctx->ctr, aead_crypto_instance(inst));
@ -650,24 +650,24 @@ static int crypto_gcm_create_common(struct crypto_template *tmpl,
ctr = crypto_spawn_skcipher_alg(&ctx->ctr); ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
/* We only support 16-byte blocks. */ /* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL; err = -EINVAL;
if (crypto_skcipher_alg_ivsize(ctr) != 16) if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
goto out_put_ctr; crypto_skcipher_alg_ivsize(ctr) != 16 ||
ctr->base.cra_blocksize != 1)
/* Not a stream cipher? */
if (ctr->base.cra_blocksize != 1)
goto out_put_ctr; goto out_put_ctr;
err = -ENAMETOOLONG; err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
goto out_put_ctr;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"gcm_base(%s,%s)", ctr->base.cra_driver_name, "gcm_base(%s,%s)", ctr->base.cra_driver_name,
ghash_alg->cra_driver_name) >= ghash_alg->cra_driver_name) >=
CRYPTO_MAX_ALG_NAME) CRYPTO_MAX_ALG_NAME)
goto out_put_ctr; goto out_put_ctr;
memcpy(inst->alg.base.cra_name, full_name, CRYPTO_MAX_ALG_NAME);
inst->alg.base.cra_flags = (ghash->base.cra_flags | inst->alg.base.cra_flags = (ghash->base.cra_flags |
ctr->base.cra_flags) & CRYPTO_ALG_ASYNC; ctr->base.cra_flags) & CRYPTO_ALG_ASYNC;
inst->alg.base.cra_priority = (ghash->base.cra_priority + inst->alg.base.cra_priority = (ghash->base.cra_priority +
@ -709,7 +709,6 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
{ {
const char *cipher_name; const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME]; char ctr_name[CRYPTO_MAX_ALG_NAME];
char full_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]); cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name)) if (IS_ERR(cipher_name))
@ -719,12 +718,7 @@ static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
CRYPTO_MAX_ALG_NAME) CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG; return -ENAMETOOLONG;
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm(%s)", cipher_name) >= return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return crypto_gcm_create_common(tmpl, tb, full_name,
ctr_name, "ghash");
} }
static struct crypto_template crypto_gcm_tmpl = { static struct crypto_template crypto_gcm_tmpl = {
@ -738,7 +732,6 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
{ {
const char *ctr_name; const char *ctr_name;
const char *ghash_name; const char *ghash_name;
char full_name[CRYPTO_MAX_ALG_NAME];
ctr_name = crypto_attr_alg_name(tb[1]); ctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ctr_name)) if (IS_ERR(ctr_name))
@ -748,12 +741,7 @@ static int crypto_gcm_base_create(struct crypto_template *tmpl,
if (IS_ERR(ghash_name)) if (IS_ERR(ghash_name))
return PTR_ERR(ghash_name); return PTR_ERR(ghash_name);
if (snprintf(full_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
ctr_name, ghash_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return crypto_gcm_create_common(tmpl, tb, full_name,
ctr_name, ghash_name);
} }
static struct crypto_template crypto_gcm_base_tmpl = { static struct crypto_template crypto_gcm_base_tmpl = {

View file

@ -161,7 +161,7 @@ static int salsa20_crypt(struct skcipher_request *req)
err = skcipher_walk_virt(&walk, req, true); err = skcipher_walk_virt(&walk, req, true);
salsa20_init(state, ctx, walk.iv); salsa20_init(state, ctx, req->iv);
while (walk.nbytes > 0) { while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes; unsigned int nbytes = walk.nbytes;

View file

@ -131,8 +131,13 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err)
memcpy(walk->dst.virt.addr, walk->page, n); memcpy(walk->dst.virt.addr, walk->page, n);
skcipher_unmap_dst(walk); skcipher_unmap_dst(walk);
} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
if (WARN_ON(err)) { if (err) {
/* unexpected case; didn't process all bytes */ /*
* Didn't process all bytes. Either the algorithm is
* broken, or this was the last step and it turned out
* the message wasn't evenly divisible into blocks but
* the algorithm requires it.
*/
err = -EINVAL; err = -EINVAL;
goto finish; goto finish;
} }

View file

@ -977,6 +977,8 @@ static int acpi_s2idle_prepare(void)
if (acpi_sci_irq_valid()) if (acpi_sci_irq_valid())
enable_irq_wake(acpi_sci_irq); enable_irq_wake(acpi_sci_irq);
acpi_enable_wakeup_devices(ACPI_STATE_S0);
/* Change the configuration of GPEs to avoid spurious wakeup. */ /* Change the configuration of GPEs to avoid spurious wakeup. */
acpi_enable_all_wakeup_gpes(); acpi_enable_all_wakeup_gpes();
acpi_os_wait_events_complete(); acpi_os_wait_events_complete();
@ -1026,6 +1028,8 @@ static void acpi_s2idle_restore(void)
{ {
acpi_enable_all_runtime_gpes(); acpi_enable_all_runtime_gpes();
acpi_disable_wakeup_devices(ACPI_STATE_S0);
if (acpi_sci_irq_valid()) if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq); disable_irq_wake(acpi_sci_irq);

View file

@ -688,12 +688,16 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
/* End of read */ /* End of read */
len = ssif_info->multi_len; len = ssif_info->multi_len;
data = ssif_info->data; data = ssif_info->data;
} else if (blocknum != ssif_info->multi_pos) { } else if (blocknum + 1 != ssif_info->multi_pos) {
/* /*
* Out of sequence block, just abort. Block * Out of sequence block, just abort. Block
* numbers start at zero for the second block, * numbers start at zero for the second block,
* but multi_pos starts at one, so the +1. * but multi_pos starts at one, so the +1.
*/ */
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
dev_dbg(&ssif_info->client->dev,
"Received message out of sequence, expected %u, got %u\n",
ssif_info->multi_pos - 1, blocknum);
result = -EIO; result = -EIO;
} else { } else {
ssif_inc_stat(ssif_info, received_message_parts); ssif_inc_stat(ssif_info, received_message_parts);

View file

@ -141,9 +141,10 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
/* Setup SA */ /* Setup SA */
sa = ctx->sa_in; sa = ctx->sa_in;
set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_CBC ? set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, (cm == CRYPTO_MODE_ECB ?
SA_SAVE_IV : SA_NOT_SAVE_IV), SA_NOT_SAVE_IV : SA_SAVE_IV),
SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE, SA_NOT_LOAD_HASH, (cm == CRYPTO_MODE_ECB ?
SA_LOAD_IV_FROM_SA : SA_LOAD_IV_FROM_STATE),
SA_NO_HEADER_PROC, SA_HASH_ALG_NULL, SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO, SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT, SA_OP_GROUP_BASIC, SA_OPCODE_DECRYPT,
@ -162,6 +163,11 @@ static int crypto4xx_setkey_aes(struct crypto_skcipher *cipher,
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
sa = ctx->sa_out; sa = ctx->sa_out;
sa->sa_command_0.bf.dir = DIR_OUTBOUND; sa->sa_command_0.bf.dir = DIR_OUTBOUND;
/*
* SA_OPCODE_ENCRYPT is the same value as SA_OPCODE_DECRYPT.
* it's the DIR_(IN|OUT)BOUND that matters
*/
sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
return 0; return 0;
} }

View file

@ -712,7 +712,23 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
size_t offset_to_sr_ptr; size_t offset_to_sr_ptr;
u32 gd_idx = 0; u32 gd_idx = 0;
int tmp; int tmp;
bool is_busy; bool is_busy, force_sd;
/*
* There's a very subtile/disguised "bug" in the hardware that
* gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
* of the hardware spec:
* *drum roll* the AES/(T)DES OFB and CFB modes are listed as
* operation modes for >>> "Block ciphers" <<<.
*
* To workaround this issue and stop the hardware from causing
* "overran dst buffer" on crypttexts that are not a multiple
* of 16 (AES_BLOCK_SIZE), we force the driver to use the
* scatter buffers.
*/
force_sd = (req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_CFB
|| req_sa->sa_command_1.bf.crypto_mode9_8 == CRYPTO_MODE_OFB)
&& (datalen % AES_BLOCK_SIZE);
/* figure how many gd are needed */ /* figure how many gd are needed */
tmp = sg_nents_for_len(src, assoclen + datalen); tmp = sg_nents_for_len(src, assoclen + datalen);
@ -730,7 +746,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
} }
/* figure how many sd are needed */ /* figure how many sd are needed */
if (sg_is_last(dst)) { if (sg_is_last(dst) && force_sd == false) {
num_sd = 0; num_sd = 0;
} else { } else {
if (datalen > PPC4XX_SD_BUFFER_SIZE) { if (datalen > PPC4XX_SD_BUFFER_SIZE) {
@ -805,9 +821,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
pd->sa_len = sa_len; pd->sa_len = sa_len;
pd_uinfo = &dev->pdr_uinfo[pd_entry]; pd_uinfo = &dev->pdr_uinfo[pd_entry];
pd_uinfo->async_req = req;
pd_uinfo->num_gd = num_gd; pd_uinfo->num_gd = num_gd;
pd_uinfo->num_sd = num_sd; pd_uinfo->num_sd = num_sd;
pd_uinfo->dest_va = dst;
pd_uinfo->async_req = req;
if (iv_len) if (iv_len)
memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len); memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
@ -826,7 +843,6 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
/* get first gd we are going to use */ /* get first gd we are going to use */
gd_idx = fst_gd; gd_idx = fst_gd;
pd_uinfo->first_gd = fst_gd; pd_uinfo->first_gd = fst_gd;
pd_uinfo->num_gd = num_gd;
gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx); gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
pd->src = gd_dma; pd->src = gd_dma;
/* enable gather */ /* enable gather */
@ -863,17 +879,14 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
* Indicate gather array is not used * Indicate gather array is not used
*/ */
pd_uinfo->first_gd = 0xffffffff; pd_uinfo->first_gd = 0xffffffff;
pd_uinfo->num_gd = 0;
} }
if (sg_is_last(dst)) { if (!num_sd) {
/* /*
* we know application give us dst a whole piece of memory * we know application give us dst a whole piece of memory
* no need to use scatter ring. * no need to use scatter ring.
*/ */
pd_uinfo->using_sd = 0; pd_uinfo->using_sd = 0;
pd_uinfo->first_sd = 0xffffffff; pd_uinfo->first_sd = 0xffffffff;
pd_uinfo->num_sd = 0;
pd_uinfo->dest_va = dst;
sa->sa_command_0.bf.scatter = 0; sa->sa_command_0.bf.scatter = 0;
pd->dest = (u32)dma_map_page(dev->core_dev->device, pd->dest = (u32)dma_map_page(dev->core_dev->device,
sg_page(dst), dst->offset, sg_page(dst), dst->offset,
@ -887,9 +900,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
nbytes = datalen; nbytes = datalen;
sa->sa_command_0.bf.scatter = 1; sa->sa_command_0.bf.scatter = 1;
pd_uinfo->using_sd = 1; pd_uinfo->using_sd = 1;
pd_uinfo->dest_va = dst;
pd_uinfo->first_sd = fst_sd; pd_uinfo->first_sd = fst_sd;
pd_uinfo->num_sd = num_sd;
sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx); sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
pd->dest = sd_dma; pd->dest = sd_dma;
/* setup scatter descriptor */ /* setup scatter descriptor */

View file

@ -935,7 +935,7 @@ void psp_pci_init(void)
rc = sev_platform_init(&error); rc = sev_platform_init(&error);
if (rc) { if (rc) {
dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error); dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
goto err; return;
} }
dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major, dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,

View file

@ -415,7 +415,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
/* This function prepers the user key so it can pass to the hmac processing /* This function prepers the user key so it can pass to the hmac processing
* (copy to intenral buffer or hash in case of key longer than block * (copy to intenral buffer or hash in case of key longer than block
*/ */
static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key, static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
unsigned int keylen) unsigned int keylen)
{ {
dma_addr_t key_dma_addr = 0; dma_addr_t key_dma_addr = 0;
@ -428,6 +428,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
unsigned int hashmode; unsigned int hashmode;
unsigned int idx = 0; unsigned int idx = 0;
int rc = 0; int rc = 0;
u8 *key = NULL;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
dma_addr_t padded_authkey_dma_addr = dma_addr_t padded_authkey_dma_addr =
ctx->auth_state.hmac.padded_authkey_dma_addr; ctx->auth_state.hmac.padded_authkey_dma_addr;
@ -446,11 +447,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
} }
if (keylen != 0) { if (keylen != 0) {
key = kmemdup(authkey, keylen, GFP_KERNEL);
if (!key)
return -ENOMEM;
key_dma_addr = dma_map_single(dev, (void *)key, keylen, key_dma_addr = dma_map_single(dev, (void *)key, keylen,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_dma_addr)) { if (dma_mapping_error(dev, key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen); key, keylen);
kzfree(key);
return -ENOMEM; return -ENOMEM;
} }
if (keylen > blocksize) { if (keylen > blocksize) {
@ -533,6 +540,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
if (key_dma_addr) if (key_dma_addr)
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE); dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
kzfree(key);
return rc; return rc;
} }

View file

@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
*/ */
static unsigned int cc_get_sgl_nents(struct device *dev, static unsigned int cc_get_sgl_nents(struct device *dev,
struct scatterlist *sg_list, struct scatterlist *sg_list,
unsigned int nbytes, u32 *lbytes, unsigned int nbytes, u32 *lbytes)
bool *is_chained)
{ {
unsigned int nents = 0; unsigned int nents = 0;
while (nbytes && sg_list) { while (nbytes && sg_list) {
if (sg_list->length) { nents++;
nents++; /* get the number of bytes in the last entry */
/* get the number of bytes in the last entry */ *lbytes = nbytes;
*lbytes = nbytes; nbytes -= (sg_list->length > nbytes) ?
nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length;
nbytes : sg_list->length; sg_list = sg_next(sg_list);
sg_list = sg_next(sg_list);
} else {
sg_list = (struct scatterlist *)sg_page(sg_list);
if (is_chained)
*is_chained = true;
}
} }
dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
return nents; return nents;
@ -142,7 +135,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
{ {
u32 nents, lbytes; u32 nents, lbytes;
nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL); nents = cc_get_sgl_nents(dev, sg, end, &lbytes);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip, sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
(direct == CC_SG_TO_BUF)); (direct == CC_SG_TO_BUF));
} }
@ -311,40 +304,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
sgl_data->num_of_buffers++; sgl_data->num_of_buffers++;
} }
static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
enum dma_data_direction direction)
{
u32 i, j;
struct scatterlist *l_sg = sg;
for (i = 0; i < nents; i++) {
if (!l_sg)
break;
if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
dev_err(dev, "dma_map_page() sg buffer failed\n");
goto err;
}
l_sg = sg_next(l_sg);
}
return nents;
err:
/* Restore mapped parts */
for (j = 0; j < i; j++) {
if (!sg)
break;
dma_unmap_sg(dev, sg, 1, direction);
sg = sg_next(sg);
}
return 0;
}
static int cc_map_sg(struct device *dev, struct scatterlist *sg, static int cc_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nbytes, int direction, u32 *nents, unsigned int nbytes, int direction, u32 *nents,
u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{ {
bool is_chained = false;
if (sg_is_last(sg)) { if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */ /* One entry only case -set to DLLI */
if (dma_map_sg(dev, sg, 1, direction) != 1) { if (dma_map_sg(dev, sg, 1, direction) != 1) {
@ -358,35 +321,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
*nents = 1; *nents = 1;
*mapped_nents = 1; *mapped_nents = 1;
} else { /*sg_is_last*/ } else { /*sg_is_last*/
*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes, *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
&is_chained);
if (*nents > max_sg_nents) { if (*nents > max_sg_nents) {
*nents = 0; *nents = 0;
dev_err(dev, "Too many fragments. current %d max %d\n", dev_err(dev, "Too many fragments. current %d max %d\n",
*nents, max_sg_nents); *nents, max_sg_nents);
return -ENOMEM; return -ENOMEM;
} }
if (!is_chained) { /* In case of mmu the number of mapped nents might
/* In case of mmu the number of mapped nents might * be changed from the original sgl nents
* be changed from the original sgl nents */
*/ *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
*mapped_nents = dma_map_sg(dev, sg, *nents, direction); if (*mapped_nents == 0) {
if (*mapped_nents == 0) { *nents = 0;
*nents = 0; dev_err(dev, "dma_map_sg() sg buffer failed\n");
dev_err(dev, "dma_map_sg() sg buffer failed\n"); return -ENOMEM;
return -ENOMEM;
}
} else {
/*In this case the driver maps entry by entry so it
* must have the same nents before and after map
*/
*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
direction);
if (*mapped_nents != *nents) {
*nents = *mapped_nents;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
}
} }
} }
@ -571,7 +520,6 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_drvdata *drvdata = dev_get_drvdata(dev); struct cc_drvdata *drvdata = dev_get_drvdata(dev);
u32 dummy; u32 dummy;
bool chained;
u32 size_to_unmap = 0; u32 size_to_unmap = 0;
if (areq_ctx->mac_buf_dma_addr) { if (areq_ctx->mac_buf_dma_addr) {
@ -612,6 +560,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
if (areq_ctx->gen_ctx.iv_dma_addr) { if (areq_ctx->gen_ctx.iv_dma_addr) {
dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
hw_iv_size, DMA_BIDIRECTIONAL); hw_iv_size, DMA_BIDIRECTIONAL);
kzfree(areq_ctx->gen_ctx.iv);
} }
/* Release pool */ /* Release pool */
@ -636,15 +585,14 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
size_to_unmap += crypto_aead_ivsize(tfm); size_to_unmap += crypto_aead_ivsize(tfm);
dma_unmap_sg(dev, req->src, dma_unmap_sg(dev, req->src,
cc_get_sgl_nents(dev, req->src, size_to_unmap, cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy),
&dummy, &chained),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (req->src != req->dst) { if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst)); sg_virt(req->dst));
dma_unmap_sg(dev, req->dst, dma_unmap_sg(dev, req->dst,
cc_get_sgl_nents(dev, req->dst, size_to_unmap, cc_get_sgl_nents(dev, req->dst, size_to_unmap,
&dummy, &chained), &dummy),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} }
if (drvdata->coherent && if (drvdata->coherent &&
@ -717,19 +665,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
struct aead_req_ctx *areq_ctx = aead_request_ctx(req); struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size; unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct device *dev = drvdata_to_dev(drvdata); struct device *dev = drvdata_to_dev(drvdata);
gfp_t flags = cc_gfp_flags(&req->base);
int rc = 0; int rc = 0;
if (!req->iv) { if (!req->iv) {
areq_ctx->gen_ctx.iv_dma_addr = 0; areq_ctx->gen_ctx.iv_dma_addr = 0;
areq_ctx->gen_ctx.iv = NULL;
goto chain_iv_exit; goto chain_iv_exit;
} }
areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv, areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
hw_iv_size, if (!areq_ctx->gen_ctx.iv)
DMA_BIDIRECTIONAL); return -ENOMEM;
areq_ctx->gen_ctx.iv_dma_addr =
dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
hw_iv_size, req->iv); hw_iv_size, req->iv);
kzfree(areq_ctx->gen_ctx.iv);
areq_ctx->gen_ctx.iv = NULL;
rc = -ENOMEM; rc = -ENOMEM;
goto chain_iv_exit; goto chain_iv_exit;
} }
@ -1022,7 +978,6 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
unsigned int size_for_map = req->assoclen + req->cryptlen; unsigned int size_for_map = req->assoclen + req->cryptlen;
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0; u32 sg_index = 0;
bool chained = false;
bool is_gcm4543 = areq_ctx->is_gcm4543; bool is_gcm4543 = areq_ctx->is_gcm4543;
u32 size_to_skip = req->assoclen; u32 size_to_skip = req->assoclen;
@ -1043,7 +998,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0; authsize : 0;
src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
&src_last_bytes, &chained); &src_last_bytes);
sg_index = areq_ctx->src_sgl->length; sg_index = areq_ctx->src_sgl->length;
//check where the data starts //check where the data starts
while (sg_index <= size_to_skip) { while (sg_index <= size_to_skip) {
@ -1085,7 +1040,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
} }
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
&dst_last_bytes, &chained); &dst_last_bytes);
sg_index = areq_ctx->dst_sgl->length; sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip; offset = size_to_skip;
@ -1486,7 +1441,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents = areq_ctx->in_nents =
cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL); cc_get_sgl_nents(dev, src, nbytes, &dummy);
sg_copy_to_buffer(src, areq_ctx->in_nents, sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes); &curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes; *curr_buff_cnt += nbytes;

View file

@ -162,6 +162,7 @@ struct cc_alg_template {
struct async_gen_req_ctx { struct async_gen_req_ctx {
dma_addr_t iv_dma_addr; dma_addr_t iv_dma_addr;
u8 *iv;
enum drv_crypto_direction op_type; enum drv_crypto_direction op_type;
}; };

View file

@ -72,20 +72,28 @@ static inline void tee_fips_error(struct device *dev)
dev_err(dev, "TEE reported error!\n"); dev_err(dev, "TEE reported error!\n");
} }
/*
* This function check if cryptocell tee fips error occurred
* and in such case triggers system error
*/
void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata)
{
struct device *dev = drvdata_to_dev(p_drvdata);
if (!cc_get_tee_fips_status(p_drvdata))
tee_fips_error(dev);
}
/* Deferred service handler, run as interrupt-fired tasklet */ /* Deferred service handler, run as interrupt-fired tasklet */
static void fips_dsr(unsigned long devarg) static void fips_dsr(unsigned long devarg)
{ {
struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg; struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
struct device *dev = drvdata_to_dev(drvdata); u32 irq, val;
u32 irq, state, val;
irq = (drvdata->irq & (CC_GPR0_IRQ_MASK)); irq = (drvdata->irq & (CC_GPR0_IRQ_MASK));
if (irq) { if (irq) {
state = cc_ioread(drvdata, CC_REG(GPR_HOST)); cc_tee_handle_fips_error(drvdata);
if (state != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
tee_fips_error(dev);
} }
/* after verifing that there is nothing to do, /* after verifing that there is nothing to do,
@ -113,8 +121,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata)
dev_dbg(dev, "Initializing fips tasklet\n"); dev_dbg(dev, "Initializing fips tasklet\n");
tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata); tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata);
if (!cc_get_tee_fips_status(p_drvdata)) cc_tee_handle_fips_error(p_drvdata);
tee_fips_error(dev);
return 0; return 0;
} }

View file

@ -18,6 +18,7 @@ int cc_fips_init(struct cc_drvdata *p_drvdata);
void cc_fips_fini(struct cc_drvdata *drvdata); void cc_fips_fini(struct cc_drvdata *drvdata);
void fips_handler(struct cc_drvdata *drvdata); void fips_handler(struct cc_drvdata *drvdata);
void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok); void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata);
#else /* CONFIG_CRYPTO_FIPS */ #else /* CONFIG_CRYPTO_FIPS */
@ -30,6 +31,7 @@ static inline void cc_fips_fini(struct cc_drvdata *drvdata) {}
static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata, static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
bool ok) {} bool ok) {}
static inline void fips_handler(struct cc_drvdata *drvdata) {} static inline void fips_handler(struct cc_drvdata *drvdata) {}
static inline void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) {}
#endif /* CONFIG_CRYPTO_FIPS */ #endif /* CONFIG_CRYPTO_FIPS */

View file

@ -64,6 +64,7 @@ struct cc_hash_alg {
struct hash_key_req_ctx { struct hash_key_req_ctx {
u32 keylen; u32 keylen;
dma_addr_t key_dma_addr; dma_addr_t key_dma_addr;
u8 *key;
}; };
/* hash per-session context */ /* hash per-session context */
@ -724,13 +725,20 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
ctx->key_params.keylen = keylen; ctx->key_params.keylen = keylen;
ctx->key_params.key_dma_addr = 0; ctx->key_params.key_dma_addr = 0;
ctx->is_hmac = true; ctx->is_hmac = true;
ctx->key_params.key = NULL;
if (keylen) { if (keylen) {
ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
if (!ctx->key_params.key)
return -ENOMEM;
ctx->key_params.key_dma_addr = ctx->key_params.key_dma_addr =
dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE); dma_map_single(dev, (void *)ctx->key_params.key, keylen,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen); ctx->key_params.key, keylen);
kzfree(ctx->key_params.key);
return -ENOMEM; return -ENOMEM;
} }
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n", dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@ -881,6 +889,9 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen); &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
} }
kzfree(ctx->key_params.key);
return rc; return rc;
} }
@ -907,11 +918,16 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
ctx->key_params.keylen = keylen; ctx->key_params.keylen = keylen;
ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
if (!ctx->key_params.key)
return -ENOMEM;
ctx->key_params.key_dma_addr = ctx->key_params.key_dma_addr =
dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE); dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
key, keylen); key, keylen);
kzfree(ctx->key_params.key);
return -ENOMEM; return -ENOMEM;
} }
dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n", dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
@ -963,6 +979,8 @@ static int cc_xcbc_setkey(struct crypto_ahash *ahash,
dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
&ctx->key_params.key_dma_addr, ctx->key_params.keylen); &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
kzfree(ctx->key_params.key);
return rc; return rc;
} }
@ -1598,7 +1616,7 @@ static struct cc_hash_template driver_hash[] = {
.setkey = cc_hash_setkey, .setkey = cc_hash_setkey,
.halg = { .halg = {
.digestsize = SHA224_DIGEST_SIZE, .digestsize = SHA224_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE), .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
}, },
}, },
.hash_mode = DRV_HASH_SHA224, .hash_mode = DRV_HASH_SHA224,
@ -1623,7 +1641,7 @@ static struct cc_hash_template driver_hash[] = {
.setkey = cc_hash_setkey, .setkey = cc_hash_setkey,
.halg = { .halg = {
.digestsize = SHA384_DIGEST_SIZE, .digestsize = SHA384_DIGEST_SIZE,
.statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE), .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
}, },
}, },
.hash_mode = DRV_HASH_SHA384, .hash_mode = DRV_HASH_SHA384,

View file

@ -154,9 +154,6 @@ void cc_ivgen_fini(struct cc_drvdata *drvdata)
} }
ivgen_ctx->pool = NULL_SRAM_ADDR; ivgen_ctx->pool = NULL_SRAM_ADDR;
/* release "this" context */
kfree(ivgen_ctx);
} }
/*! /*!
@ -174,10 +171,12 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
int rc; int rc;
/* Allocate "this" context */ /* Allocate "this" context */
ivgen_ctx = kzalloc(sizeof(*ivgen_ctx), GFP_KERNEL); ivgen_ctx = devm_kzalloc(device, sizeof(*ivgen_ctx), GFP_KERNEL);
if (!ivgen_ctx) if (!ivgen_ctx)
return -ENOMEM; return -ENOMEM;
drvdata->ivgen_handle = ivgen_ctx;
/* Allocate pool's header for initial enc. key/IV */ /* Allocate pool's header for initial enc. key/IV */
ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE, ivgen_ctx->pool_meta = dma_alloc_coherent(device, CC_IVPOOL_META_SIZE,
&ivgen_ctx->pool_meta_dma, &ivgen_ctx->pool_meta_dma,
@ -196,8 +195,6 @@ int cc_ivgen_init(struct cc_drvdata *drvdata)
goto out; goto out;
} }
drvdata->ivgen_handle = ivgen_ctx;
return cc_init_iv_sram(drvdata); return cc_init_iv_sram(drvdata);
out: out:

View file

@ -11,6 +11,7 @@
#include "cc_ivgen.h" #include "cc_ivgen.h"
#include "cc_hash.h" #include "cc_hash.h"
#include "cc_pm.h" #include "cc_pm.h"
#include "cc_fips.h"
#define POWER_DOWN_ENABLE 0x01 #define POWER_DOWN_ENABLE 0x01
#define POWER_DOWN_DISABLE 0x00 #define POWER_DOWN_DISABLE 0x00
@ -25,13 +26,13 @@ int cc_pm_suspend(struct device *dev)
int rc; int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n"); dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
rc = cc_suspend_req_queue(drvdata); rc = cc_suspend_req_queue(drvdata);
if (rc) { if (rc) {
dev_err(dev, "cc_suspend_req_queue (%x)\n", rc); dev_err(dev, "cc_suspend_req_queue (%x)\n", rc);
return rc; return rc;
} }
fini_cc_regs(drvdata); fini_cc_regs(drvdata);
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE);
cc_clk_off(drvdata); cc_clk_off(drvdata);
return 0; return 0;
} }
@ -42,19 +43,21 @@ int cc_pm_resume(struct device *dev)
struct cc_drvdata *drvdata = dev_get_drvdata(dev); struct cc_drvdata *drvdata = dev_get_drvdata(dev);
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n"); dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE); /* Enables the device source clk */
rc = cc_clk_on(drvdata); rc = cc_clk_on(drvdata);
if (rc) { if (rc) {
dev_err(dev, "failed getting clock back on. We're toast.\n"); dev_err(dev, "failed getting clock back on. We're toast.\n");
return rc; return rc;
} }
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
rc = init_cc_regs(drvdata, false); rc = init_cc_regs(drvdata, false);
if (rc) { if (rc) {
dev_err(dev, "init_cc_regs (%x)\n", rc); dev_err(dev, "init_cc_regs (%x)\n", rc);
return rc; return rc;
} }
/* check if tee fips error occurred during power down */
cc_tee_handle_fips_error(drvdata);
rc = cc_resume_req_queue(drvdata); rc = cc_resume_req_queue(drvdata);
if (rc) { if (rc) {

View file

@ -250,9 +250,14 @@ static int rk_set_data_start(struct rk_crypto_info *dev)
u8 *src_last_blk = page_address(sg_page(dev->sg_src)) + u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
dev->sg_src->offset + dev->sg_src->length - ivsize; dev->sg_src->offset + dev->sg_src->length - ivsize;
/* store the iv that need to be updated in chain mode */ /* Store the iv that need to be updated in chain mode.
if (ctx->mode & RK_CRYPTO_DEC) * And update the IV buffer to contain the next IV for decryption mode.
*/
if (ctx->mode & RK_CRYPTO_DEC) {
memcpy(ctx->iv, src_last_blk, ivsize); memcpy(ctx->iv, src_last_blk, ivsize);
sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
ivsize, dev->total - ivsize);
}
err = dev->load_data(dev, dev->sg_src, dev->sg_dst); err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
if (!err) if (!err)
@ -288,13 +293,19 @@ static void rk_iv_copyback(struct rk_crypto_info *dev)
struct ablkcipher_request *req = struct ablkcipher_request *req =
ablkcipher_request_cast(dev->async_req); ablkcipher_request_cast(dev->async_req);
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
u32 ivsize = crypto_ablkcipher_ivsize(tfm); u32 ivsize = crypto_ablkcipher_ivsize(tfm);
if (ivsize == DES_BLOCK_SIZE) /* Update the IV buffer to contain the next IV for encryption mode. */
memcpy_fromio(req->info, dev->reg + RK_CRYPTO_TDES_IV_0, if (!(ctx->mode & RK_CRYPTO_DEC)) {
ivsize); if (dev->aligned) {
else if (ivsize == AES_BLOCK_SIZE) memcpy(req->info, sg_virt(dev->sg_dst) +
memcpy_fromio(req->info, dev->reg + RK_CRYPTO_AES_IV_0, ivsize); dev->sg_dst->length - ivsize, ivsize);
} else {
memcpy(req->info, dev->addr_vir +
dev->count - ivsize, ivsize);
}
}
} }
static void rk_update_iv(struct rk_crypto_info *dev) static void rk_update_iv(struct rk_crypto_info *dev)

View file

@ -1854,7 +1854,7 @@ Lctr32_enc8x_three:
stvx_u $out1,$x10,$out stvx_u $out1,$x10,$out
stvx_u $out2,$x20,$out stvx_u $out2,$x20,$out
addi $out,$out,0x30 addi $out,$out,0x30
b Lcbc_dec8x_done b Lctr32_enc8x_done
.align 5 .align 5
Lctr32_enc8x_two: Lctr32_enc8x_two:
@ -1866,7 +1866,7 @@ Lctr32_enc8x_two:
stvx_u $out0,$x00,$out stvx_u $out0,$x00,$out
stvx_u $out1,$x10,$out stvx_u $out1,$x10,$out
addi $out,$out,0x20 addi $out,$out,0x20
b Lcbc_dec8x_done b Lctr32_enc8x_done
.align 5 .align 5
Lctr32_enc8x_one: Lctr32_enc8x_one:

View file

@ -325,8 +325,7 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn, return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
vmf->flags & FAULT_FLAG_WRITE);
} }
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
@ -376,8 +375,7 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn, return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
vmf->flags & FAULT_FLAG_WRITE);
} }
#else #else
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,

View file

@ -540,11 +540,11 @@ static void journal_reclaim(struct cache_set *c)
ca->sb.nr_this_dev); ca->sb.nr_this_dev);
} }
bkey_init(k); if (n) {
SET_KEY_PTRS(k, n); bkey_init(k);
SET_KEY_PTRS(k, n);
if (n)
c->journal.blocks_free = c->sb.bucket_size >> c->block_bits; c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
}
out: out:
if (!journal_full(&c->journal)) if (!journal_full(&c->journal))
__closure_wake_up(&c->journal.wait); __closure_wake_up(&c->journal.wait);
@ -671,6 +671,9 @@ static void journal_write_unlocked(struct closure *cl)
ca->journal.seq[ca->journal.cur_idx] = w->data->seq; ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
} }
/* If KEY_PTRS(k) == 0, this jset gets lost in air */
BUG_ON(i == 0);
atomic_dec_bug(&fifo_back(&c->journal.pin)); atomic_dec_bug(&fifo_back(&c->journal.pin));
bch_journal_next(&c->journal); bch_journal_next(&c->journal);
journal_reclaim(c); journal_reclaim(c);

View file

@ -1511,6 +1511,7 @@ static void cache_set_free(struct closure *cl)
bch_btree_cache_free(c); bch_btree_cache_free(c);
bch_journal_free(c); bch_journal_free(c);
mutex_lock(&bch_register_lock);
for_each_cache(ca, c, i) for_each_cache(ca, c, i)
if (ca) { if (ca) {
ca->set = NULL; ca->set = NULL;
@ -1529,7 +1530,6 @@ static void cache_set_free(struct closure *cl)
mempool_exit(&c->search); mempool_exit(&c->search);
kfree(c->devices); kfree(c->devices);
mutex_lock(&bch_register_lock);
list_del(&c->list); list_del(&c->list);
mutex_unlock(&bch_register_lock); mutex_unlock(&bch_register_lock);

View file

@ -494,6 +494,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_mq_unquiesce_queue(q); blk_mq_unquiesce_queue(q);
blk_cleanup_queue(q); blk_cleanup_queue(q);
blk_mq_free_tag_set(&mq->tag_set);
/* /*
* A request can be completed before the next request, potentially * A request can be completed before the next request, potentially

View file

@ -814,7 +814,10 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
host->mmc_host_ops.start_signal_voltage_switch = host->mmc_host_ops.start_signal_voltage_switch =
sdhci_arasan_voltage_switch; sdhci_arasan_voltage_switch;
sdhci_arasan->has_cqe = true; sdhci_arasan->has_cqe = true;
host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; host->mmc->caps2 |= MMC_CAP2_CQE;
if (!of_property_read_bool(np, "disable-cqe-dcmd"))
host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
} }
ret = sdhci_arasan_add_host(sdhci_arasan); ret = sdhci_arasan_add_host(sdhci_arasan);

View file

@ -632,6 +632,10 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
while (len > 0) { while (len > 0) {
block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
/* Read cannot cross 4K boundary */
block_size = min_t(loff_t, from + block_size,
round_up(from + 1, SZ_4K)) - from;
writel(from, ispi->base + FADDR); writel(from, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL); val = readl(ispi->base + HSFSTS_CTL);
@ -685,6 +689,10 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
while (len > 0) { while (len > 0) {
block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ); block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
/* Write cannot cross 4K boundary */
block_size = min_t(loff_t, to + block_size,
round_up(to + 1, SZ_4K)) - to;
writel(to, ispi->base + FADDR); writel(to, ispi->base + FADDR);
val = readl(ispi->base + HSFSTS_CTL); val = readl(ispi->base + HSFSTS_CTL);

View file

@ -623,6 +623,17 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
return &guid_null; return &guid_null;
} }
static void reap_victim(struct nd_mapping *nd_mapping,
struct nd_label_ent *victim)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
u32 slot = to_slot(ndd, victim->label);
dev_dbg(ndd->dev, "free: %d\n", slot);
nd_label_free_slot(ndd, slot);
victim->label = NULL;
}
static int __pmem_label_update(struct nd_region *nd_region, static int __pmem_label_update(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm, struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
int pos, unsigned long flags) int pos, unsigned long flags)
@ -630,9 +641,9 @@ static int __pmem_label_update(struct nd_region *nd_region,
struct nd_namespace_common *ndns = &nspm->nsio.common; struct nd_namespace_common *ndns = &nspm->nsio.common;
struct nd_interleave_set *nd_set = nd_region->nd_set; struct nd_interleave_set *nd_set = nd_region->nd_set;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_label_ent *label_ent, *victim = NULL;
struct nd_namespace_label *nd_label; struct nd_namespace_label *nd_label;
struct nd_namespace_index *nsindex; struct nd_namespace_index *nsindex;
struct nd_label_ent *label_ent;
struct nd_label_id label_id; struct nd_label_id label_id;
struct resource *res; struct resource *res;
unsigned long *free; unsigned long *free;
@ -701,18 +712,10 @@ static int __pmem_label_update(struct nd_region *nd_region,
list_for_each_entry(label_ent, &nd_mapping->labels, list) { list_for_each_entry(label_ent, &nd_mapping->labels, list) {
if (!label_ent->label) if (!label_ent->label)
continue; continue;
if (memcmp(nspm->uuid, label_ent->label->uuid, if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
NSLABEL_UUID_LEN) != 0) || memcmp(nspm->uuid, label_ent->label->uuid,
continue; NSLABEL_UUID_LEN) == 0)
victim = label_ent; reap_victim(nd_mapping, label_ent);
list_move_tail(&victim->list, &nd_mapping->labels);
break;
}
if (victim) {
dev_dbg(ndd->dev, "free: %d\n", slot);
slot = to_slot(ndd, victim->label);
nd_label_free_slot(ndd, slot);
victim->label = NULL;
} }
/* update index */ /* update index */

View file

@ -1248,12 +1248,27 @@ static int namespace_update_uuid(struct nd_region *nd_region,
for (i = 0; i < nd_region->ndr_mappings; i++) { for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_label_ent *label_ent;
struct resource *res; struct resource *res;
for_each_dpa_resource(ndd, res) for_each_dpa_resource(ndd, res)
if (strcmp(res->name, old_label_id.id) == 0) if (strcmp(res->name, old_label_id.id) == 0)
sprintf((void *) res->name, "%s", sprintf((void *) res->name, "%s",
new_label_id.id); new_label_id.id);
mutex_lock(&nd_mapping->lock);
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
struct nd_namespace_label *nd_label = label_ent->label;
struct nd_label_id label_id;
if (!nd_label)
continue;
nd_label_gen_id(&label_id, nd_label->uuid,
__le32_to_cpu(nd_label->flags));
if (strcmp(old_label_id.id, label_id.id) == 0)
set_bit(ND_LABEL_REAP, &label_ent->flags);
}
mutex_unlock(&nd_mapping->lock);
} }
kfree(*old_uuid); kfree(*old_uuid);
out: out:

View file

@ -113,8 +113,12 @@ struct nd_percpu_lane {
spinlock_t lock; spinlock_t lock;
}; };
enum nd_label_flags {
ND_LABEL_REAP,
};
struct nd_label_ent { struct nd_label_ent {
struct list_head list; struct list_head list;
unsigned long flags;
struct nd_namespace_label *label; struct nd_namespace_label *label;
}; };

View file

@ -832,6 +832,10 @@ static int axp288_charger_probe(struct platform_device *pdev)
/* Register charger interrupts */ /* Register charger interrupts */
for (i = 0; i < CHRG_INTR_END; i++) { for (i = 0; i < CHRG_INTR_END; i++) {
pirq = platform_get_irq(info->pdev, i); pirq = platform_get_irq(info->pdev, i);
if (pirq < 0) {
dev_err(&pdev->dev, "Failed to get IRQ: %d\n", pirq);
return pirq;
}
info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq); info->irq[i] = regmap_irq_get_virq(info->regmap_irqc, pirq);
if (info->irq[i] < 0) { if (info->irq[i] < 0) {
dev_warn(&info->pdev->dev, dev_warn(&info->pdev->dev,

View file

@ -695,6 +695,26 @@ static void fuel_gauge_init_irq(struct axp288_fg_info *info)
* detection reports one despite it not being there. * detection reports one despite it not being there.
*/ */
static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = { static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = {
{
/* ACEPC T8 Cherry Trail Z8350 mini PC */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
/* also match on somewhat unique bios-version */
DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
},
},
{
/* ACEPC T11 Cherry Trail Z8350 mini PC */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
/* also match on somewhat unique bios-version */
DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
},
},
{ {
/* Intel Cherry Trail Compute Stick, Windows version */ /* Intel Cherry Trail Compute Stick, Windows version */
.matches = { .matches = {

View file

@ -53,7 +53,6 @@ device_initcall(hvc_sbi_init);
static int __init hvc_sbi_console_init(void) static int __init hvc_sbi_console_init(void)
{ {
hvc_instantiate(0, 0, &hvc_sbi_ops); hvc_instantiate(0, 0, &hvc_sbi_ops);
add_preferred_console("hvc", 0, NULL);
return 0; return 0;
} }

View file

@ -123,6 +123,7 @@ static const int NR_TYPES = ARRAY_SIZE(max_vals);
static struct input_handler kbd_handler; static struct input_handler kbd_handler;
static DEFINE_SPINLOCK(kbd_event_lock); static DEFINE_SPINLOCK(kbd_event_lock);
static DEFINE_SPINLOCK(led_lock); static DEFINE_SPINLOCK(led_lock);
static DEFINE_SPINLOCK(func_buf_lock); /* guard 'func_buf' and friends */
static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */ static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */ static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
static bool dead_key_next; static bool dead_key_next;
@ -1990,11 +1991,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
char *p; char *p;
u_char *q; u_char *q;
u_char __user *up; u_char __user *up;
int sz; int sz, fnw_sz;
int delta; int delta;
char *first_free, *fj, *fnw; char *first_free, *fj, *fnw;
int i, j, k; int i, j, k;
int ret; int ret;
unsigned long flags;
if (!capable(CAP_SYS_TTY_CONFIG)) if (!capable(CAP_SYS_TTY_CONFIG))
perm = 0; perm = 0;
@ -2037,7 +2039,14 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
goto reterr; goto reterr;
} }
fnw = NULL;
fnw_sz = 0;
/* race aginst other writers */
again:
spin_lock_irqsave(&func_buf_lock, flags);
q = func_table[i]; q = func_table[i];
/* fj pointer to next entry after 'q' */
first_free = funcbufptr + (funcbufsize - funcbufleft); first_free = funcbufptr + (funcbufsize - funcbufleft);
for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++) for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
; ;
@ -2045,10 +2054,12 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
fj = func_table[j]; fj = func_table[j];
else else
fj = first_free; fj = first_free;
/* buffer usage increase by new entry */
delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string); delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string);
if (delta <= funcbufleft) { /* it fits in current buf */ if (delta <= funcbufleft) { /* it fits in current buf */
if (j < MAX_NR_FUNC) { if (j < MAX_NR_FUNC) {
/* make enough space for new entry at 'fj' */
memmove(fj + delta, fj, first_free - fj); memmove(fj + delta, fj, first_free - fj);
for (k = j; k < MAX_NR_FUNC; k++) for (k = j; k < MAX_NR_FUNC; k++)
if (func_table[k]) if (func_table[k])
@ -2061,20 +2072,28 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
sz = 256; sz = 256;
while (sz < funcbufsize - funcbufleft + delta) while (sz < funcbufsize - funcbufleft + delta)
sz <<= 1; sz <<= 1;
fnw = kmalloc(sz, GFP_KERNEL); if (fnw_sz != sz) {
if(!fnw) { spin_unlock_irqrestore(&func_buf_lock, flags);
ret = -ENOMEM; kfree(fnw);
goto reterr; fnw = kmalloc(sz, GFP_KERNEL);
fnw_sz = sz;
if (!fnw) {
ret = -ENOMEM;
goto reterr;
}
goto again;
} }
if (!q) if (!q)
func_table[i] = fj; func_table[i] = fj;
/* copy data before insertion point to new location */
if (fj > funcbufptr) if (fj > funcbufptr)
memmove(fnw, funcbufptr, fj - funcbufptr); memmove(fnw, funcbufptr, fj - funcbufptr);
for (k = 0; k < j; k++) for (k = 0; k < j; k++)
if (func_table[k]) if (func_table[k])
func_table[k] = fnw + (func_table[k] - funcbufptr); func_table[k] = fnw + (func_table[k] - funcbufptr);
/* copy data after insertion point to new location */
if (first_free > fj) { if (first_free > fj) {
memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj); memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj);
for (k = j; k < MAX_NR_FUNC; k++) for (k = j; k < MAX_NR_FUNC; k++)
@ -2087,7 +2106,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
funcbufleft = funcbufleft - delta + sz - funcbufsize; funcbufleft = funcbufleft - delta + sz - funcbufsize;
funcbufsize = sz; funcbufsize = sz;
} }
/* finally insert item itself */
strcpy(func_table[i], kbs->kb_string); strcpy(func_table[i], kbs->kb_string);
spin_unlock_irqrestore(&func_buf_lock, flags);
break; break;
} }
ret = 0; ret = 0;

View file

@ -4155,8 +4155,6 @@ void do_blank_screen(int entering_gfx)
return; return;
} }
if (blank_state != blank_normal_wait)
return;
blank_state = blank_off; blank_state = blank_off;
/* don't blank graphics */ /* don't blank graphics */

View file

@ -1452,8 +1452,8 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
* callers (such as fiemap) which want to know whether the extent is * callers (such as fiemap) which want to know whether the extent is
* shared but do not need a ref count. * shared but do not need a ref count.
* *
* This attempts to allocate a transaction in order to account for * This attempts to attach to the running transaction in order to account for
* delayed refs, but continues on even when the alloc fails. * delayed refs, but continues on even when no running transaction exists.
* *
* Return: 0 if extent is not shared, 1 if it is shared, < 0 on error. * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
*/ */
@ -1476,13 +1476,16 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
tmp = ulist_alloc(GFP_NOFS); tmp = ulist_alloc(GFP_NOFS);
roots = ulist_alloc(GFP_NOFS); roots = ulist_alloc(GFP_NOFS);
if (!tmp || !roots) { if (!tmp || !roots) {
ulist_free(tmp); ret = -ENOMEM;
ulist_free(roots); goto out;
return -ENOMEM;
} }
trans = btrfs_join_transaction(root); trans = btrfs_attach_transaction(root);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
ret = PTR_ERR(trans);
goto out;
}
trans = NULL; trans = NULL;
down_read(&fs_info->commit_root_sem); down_read(&fs_info->commit_root_sem);
} else { } else {
@ -1515,6 +1518,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
} else { } else {
up_read(&fs_info->commit_root_sem); up_read(&fs_info->commit_root_sem);
} }
out:
ulist_free(tmp); ulist_free(tmp);
ulist_free(roots); ulist_free(roots);
return ret; return ret;
@ -1904,14 +1908,20 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
extent_item_objectid); extent_item_objectid);
if (!search_commit_root) { if (!search_commit_root) {
trans = btrfs_join_transaction(fs_info->extent_root); trans = btrfs_attach_transaction(fs_info->extent_root);
if (IS_ERR(trans)) if (IS_ERR(trans)) {
return PTR_ERR(trans); if (PTR_ERR(trans) != -ENOENT &&
btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem); PTR_ERR(trans) != -EROFS)
} else { return PTR_ERR(trans);
down_read(&fs_info->commit_root_sem); trans = NULL;
}
} }
if (trans)
btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
else
down_read(&fs_info->commit_root_sem);
ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
tree_mod_seq_elem.seq, &refs, tree_mod_seq_elem.seq, &refs,
&extent_item_pos, ignore_offset); &extent_item_pos, ignore_offset);
@ -1943,7 +1953,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
free_leaf_list(refs); free_leaf_list(refs);
out: out:
if (!search_commit_root) { if (trans) {
btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
btrfs_end_transaction(trans); btrfs_end_transaction(trans);
} else { } else {

View file

@ -2436,6 +2436,16 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
if (tmp) { if (tmp) {
/* first we do an atomic uptodate check */ /* first we do an atomic uptodate check */
if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
/*
* Do extra check for first_key, eb can be stale due to
* being cached, read from scrub, or have multiple
* parents (shared tree blocks).
*/
if (btrfs_verify_level_key(fs_info, tmp,
parent_level - 1, &first_key, gen)) {
free_extent_buffer(tmp);
return -EUCLEAN;
}
*eb_ret = tmp; *eb_ret = tmp;
return 0; return 0;
} }

View file

@ -408,9 +408,9 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
return ret; return ret;
} }
static int verify_level_key(struct btrfs_fs_info *fs_info, int btrfs_verify_level_key(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int level, struct extent_buffer *eb, int level,
struct btrfs_key *first_key, u64 parent_transid) struct btrfs_key *first_key, u64 parent_transid)
{ {
int found_level; int found_level;
struct btrfs_key found_key; struct btrfs_key found_key;
@ -487,8 +487,8 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
if (verify_parent_transid(io_tree, eb, if (verify_parent_transid(io_tree, eb,
parent_transid, 0)) parent_transid, 0))
ret = -EIO; ret = -EIO;
else if (verify_level_key(fs_info, eb, level, else if (btrfs_verify_level_key(fs_info, eb, level,
first_key, parent_transid)) first_key, parent_transid))
ret = -EUCLEAN; ret = -EUCLEAN;
else else
break; break;
@ -995,13 +995,18 @@ void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
{ {
struct extent_buffer *buf = NULL; struct extent_buffer *buf = NULL;
struct inode *btree_inode = fs_info->btree_inode; struct inode *btree_inode = fs_info->btree_inode;
int ret;
buf = btrfs_find_create_tree_block(fs_info, bytenr); buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf)) if (IS_ERR(buf))
return; return;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
buf, WAIT_NONE, 0); ret = read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf,
free_extent_buffer(buf); WAIT_NONE, 0);
if (ret < 0)
free_extent_buffer_stale(buf);
else
free_extent_buffer(buf);
} }
int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr, int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
@ -1021,12 +1026,12 @@ int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK, ret = read_extent_buffer_pages(io_tree, buf, WAIT_PAGE_LOCK,
mirror_num); mirror_num);
if (ret) { if (ret) {
free_extent_buffer(buf); free_extent_buffer_stale(buf);
return ret; return ret;
} }
if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) { if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
free_extent_buffer(buf); free_extent_buffer_stale(buf);
return -EIO; return -EIO;
} else if (extent_buffer_uptodate(buf)) { } else if (extent_buffer_uptodate(buf)) {
*eb = buf; *eb = buf;
@ -1080,7 +1085,7 @@ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid, ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid,
level, first_key); level, first_key);
if (ret) { if (ret) {
free_extent_buffer(buf); free_extent_buffer_stale(buf);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
return buf; return buf;

View file

@ -39,6 +39,9 @@ static inline u64 btrfs_sb_offset(int mirror)
struct btrfs_device; struct btrfs_device;
struct btrfs_fs_devices; struct btrfs_fs_devices;
int btrfs_verify_level_key(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int level,
struct btrfs_key *first_key, u64 parent_transid);
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 parent_transid, int level, u64 parent_transid, int level,
struct btrfs_key *first_key); struct btrfs_key *first_key);

View file

@ -10789,9 +10789,9 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
* held back allocations. * held back allocations.
*/ */
static int btrfs_trim_free_extents(struct btrfs_device *device, static int btrfs_trim_free_extents(struct btrfs_device *device,
u64 minlen, u64 *trimmed) struct fstrim_range *range, u64 *trimmed)
{ {
u64 start = 0, len = 0; u64 start = range->start, len = 0;
int ret; int ret;
*trimmed = 0; *trimmed = 0;
@ -10834,8 +10834,8 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
if (!trans) if (!trans)
up_read(&fs_info->commit_root_sem); up_read(&fs_info->commit_root_sem);
ret = find_free_dev_extent_start(trans, device, minlen, start, ret = find_free_dev_extent_start(trans, device, range->minlen,
&start, &len); start, &start, &len);
if (trans) { if (trans) {
up_read(&fs_info->commit_root_sem); up_read(&fs_info->commit_root_sem);
btrfs_put_transaction(trans); btrfs_put_transaction(trans);
@ -10848,6 +10848,16 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
break; break;
} }
/* If we are out of the passed range break */
if (start > range->start + range->len - 1) {
mutex_unlock(&fs_info->chunk_mutex);
ret = 0;
break;
}
start = max(range->start, start);
len = min(range->len, len);
ret = btrfs_issue_discard(device->bdev, start, len, &bytes); ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->chunk_mutex);
@ -10857,6 +10867,10 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
start += len; start += len;
*trimmed += bytes; *trimmed += bytes;
/* We've trimmed enough */
if (*trimmed >= range->len)
break;
if (fatal_signal_pending(current)) { if (fatal_signal_pending(current)) {
ret = -ERESTARTSYS; ret = -ERESTARTSYS;
break; break;
@ -10940,8 +10954,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
mutex_lock(&fs_info->fs_devices->device_list_mutex); mutex_lock(&fs_info->fs_devices->device_list_mutex);
devices = &fs_info->fs_devices->devices; devices = &fs_info->fs_devices->devices;
list_for_each_entry(device, devices, dev_list) { list_for_each_entry(device, devices, dev_list) {
ret = btrfs_trim_free_extents(device, range->minlen, ret = btrfs_trim_free_extents(device, range, &group_trimmed);
&group_trimmed);
if (ret) { if (ret) {
dev_failed++; dev_failed++;
dev_ret = ret; dev_ret = ret;

View file

@ -6583,6 +6583,38 @@ static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
return btrfs_commit_transaction(trans); return btrfs_commit_transaction(trans);
} }
/*
* Make sure any existing dellaloc is flushed for any root used by a send
* operation so that we do not miss any data and we do not race with writeback
* finishing and changing a tree while send is using the tree. This could
* happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
* a send operation then uses the subvolume.
* After flushing delalloc ensure_commit_roots_uptodate() must be called.
*/
static int flush_delalloc_roots(struct send_ctx *sctx)
{
struct btrfs_root *root = sctx->parent_root;
int ret;
int i;
if (root) {
ret = btrfs_start_delalloc_snapshot(root);
if (ret)
return ret;
btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
}
for (i = 0; i < sctx->clone_roots_cnt; i++) {
root = sctx->clone_roots[i].root;
ret = btrfs_start_delalloc_snapshot(root);
if (ret)
return ret;
btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
}
return 0;
}
static void btrfs_root_dec_send_in_progress(struct btrfs_root* root) static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
{ {
spin_lock(&root->root_item_lock); spin_lock(&root->root_item_lock);
@ -6807,6 +6839,10 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
NULL); NULL);
sort_clone_roots = 1; sort_clone_roots = 1;
ret = flush_delalloc_roots(sctx);
if (ret)
goto out;
ret = ensure_commit_roots_uptodate(sctx); ret = ensure_commit_roots_uptodate(sctx);
if (ret) if (ret)
goto out; goto out;

View file

@ -1660,8 +1660,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
} }
trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry); trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn, result = vmf_insert_pfn_pmd(vmf, pfn, write);
write);
break; break;
case IOMAP_UNWRITTEN: case IOMAP_UNWRITTEN:
case IOMAP_HOLE: case IOMAP_HOLE:
@ -1775,8 +1774,7 @@ static vm_fault_t dax_insert_pfn_mkwrite(struct vm_fault *vmf,
break; break;
#ifdef CONFIG_FS_DAX_PMD #ifdef CONFIG_FS_DAX_PMD
case PE_SIZE_PMD: case PE_SIZE_PMD:
ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
pfn, true);
break; break;
#endif #endif
default: default:

View file

@ -1669,6 +1669,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
#define EXT4_FEATURE_INCOMPAT_INLINE_DATA 0x8000 /* data in inode */ #define EXT4_FEATURE_INCOMPAT_INLINE_DATA 0x8000 /* data in inode */
#define EXT4_FEATURE_INCOMPAT_ENCRYPT 0x10000 #define EXT4_FEATURE_INCOMPAT_ENCRYPT 0x10000
extern void ext4_update_dynamic_rev(struct super_block *sb);
#define EXT4_FEATURE_COMPAT_FUNCS(name, flagname) \ #define EXT4_FEATURE_COMPAT_FUNCS(name, flagname) \
static inline bool ext4_has_feature_##name(struct super_block *sb) \ static inline bool ext4_has_feature_##name(struct super_block *sb) \
{ \ { \
@ -1677,6 +1679,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
} \ } \
static inline void ext4_set_feature_##name(struct super_block *sb) \ static inline void ext4_set_feature_##name(struct super_block *sb) \
{ \ { \
ext4_update_dynamic_rev(sb); \
EXT4_SB(sb)->s_es->s_feature_compat |= \ EXT4_SB(sb)->s_es->s_feature_compat |= \
cpu_to_le32(EXT4_FEATURE_COMPAT_##flagname); \ cpu_to_le32(EXT4_FEATURE_COMPAT_##flagname); \
} \ } \
@ -1694,6 +1697,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
} \ } \
static inline void ext4_set_feature_##name(struct super_block *sb) \ static inline void ext4_set_feature_##name(struct super_block *sb) \
{ \ { \
ext4_update_dynamic_rev(sb); \
EXT4_SB(sb)->s_es->s_feature_ro_compat |= \ EXT4_SB(sb)->s_es->s_feature_ro_compat |= \
cpu_to_le32(EXT4_FEATURE_RO_COMPAT_##flagname); \ cpu_to_le32(EXT4_FEATURE_RO_COMPAT_##flagname); \
} \ } \
@ -1711,6 +1715,7 @@ static inline bool ext4_has_feature_##name(struct super_block *sb) \
} \ } \
static inline void ext4_set_feature_##name(struct super_block *sb) \ static inline void ext4_set_feature_##name(struct super_block *sb) \
{ \ { \
ext4_update_dynamic_rev(sb); \
EXT4_SB(sb)->s_es->s_feature_incompat |= \ EXT4_SB(sb)->s_es->s_feature_incompat |= \
cpu_to_le32(EXT4_FEATURE_INCOMPAT_##flagname); \ cpu_to_le32(EXT4_FEATURE_INCOMPAT_##flagname); \
} \ } \
@ -2673,7 +2678,6 @@ do { \
#endif #endif
extern void ext4_update_dynamic_rev(struct super_block *sb);
extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb, extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb,
__u32 compat); __u32 compat);
extern int ext4_update_rocompat_feature(handle_t *handle, extern int ext4_update_rocompat_feature(handle_t *handle,

View file

@ -1035,6 +1035,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
__le32 border; __le32 border;
ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */ ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
int err = 0; int err = 0;
size_t ext_size = 0;
/* make decision: where to split? */ /* make decision: where to split? */
/* FIXME: now decision is simplest: at current extent */ /* FIXME: now decision is simplest: at current extent */
@ -1126,6 +1127,10 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
le16_add_cpu(&neh->eh_entries, m); le16_add_cpu(&neh->eh_entries, m);
} }
/* zero out unused area in the extent block */
ext_size = sizeof(struct ext4_extent_header) +
sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries);
memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
ext4_extent_block_csum_set(inode, neh); ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
@ -1205,6 +1210,11 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
sizeof(struct ext4_extent_idx) * m); sizeof(struct ext4_extent_idx) * m);
le16_add_cpu(&neh->eh_entries, m); le16_add_cpu(&neh->eh_entries, m);
} }
/* zero out unused area in the extent block */
ext_size = sizeof(struct ext4_extent_header) +
(sizeof(struct ext4_extent) * le16_to_cpu(neh->eh_entries));
memset(bh->b_data + ext_size, 0,
inode->i_sb->s_blocksize - ext_size);
ext4_extent_block_csum_set(inode, neh); ext4_extent_block_csum_set(inode, neh);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
@ -1270,6 +1280,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
ext4_fsblk_t newblock, goal = 0; ext4_fsblk_t newblock, goal = 0;
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
int err = 0; int err = 0;
size_t ext_size = 0;
/* Try to prepend new index to old one */ /* Try to prepend new index to old one */
if (ext_depth(inode)) if (ext_depth(inode))
@ -1295,9 +1306,11 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
goto out; goto out;
} }
ext_size = sizeof(EXT4_I(inode)->i_data);
/* move top-level index/leaf into new block */ /* move top-level index/leaf into new block */
memmove(bh->b_data, EXT4_I(inode)->i_data, memmove(bh->b_data, EXT4_I(inode)->i_data, ext_size);
sizeof(EXT4_I(inode)->i_data)); /* zero out unused area in the extent block */
memset(bh->b_data + ext_size, 0, inode->i_sb->s_blocksize - ext_size);
/* set size of new block */ /* set size of new block */
neh = ext_block_hdr(bh); neh = ext_block_hdr(bh);

View file

@ -264,6 +264,13 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
} }
ret = __generic_file_write_iter(iocb, from); ret = __generic_file_write_iter(iocb, from);
/*
* Unaligned direct AIO must be the only IO in flight. Otherwise
* overlapping aligned IO after unaligned might result in data
* corruption.
*/
if (ret == -EIOCBQUEUED && unaligned_aio)
ext4_unwritten_wait(inode);
inode_unlock(inode); inode_unlock(inode);
if (ret > 0) if (ret > 0)

View file

@ -5373,7 +5373,6 @@ static int ext4_do_update_inode(handle_t *handle,
err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
if (err) if (err)
goto out_brelse; goto out_brelse;
ext4_update_dynamic_rev(sb);
ext4_set_feature_large_file(sb); ext4_set_feature_large_file(sb);
ext4_handle_sync(handle); ext4_handle_sync(handle);
err = ext4_handle_dirty_super(handle, sb); err = ext4_handle_dirty_super(handle, sb);
@ -6024,7 +6023,7 @@ int ext4_expand_extra_isize(struct inode *inode,
ext4_write_lock_xattr(inode, &no_expand); ext4_write_lock_xattr(inode, &no_expand);
BUFFER_TRACE(iloc.bh, "get_write_access"); BUFFER_TRACE(iloc->bh, "get_write_access");
error = ext4_journal_get_write_access(handle, iloc->bh); error = ext4_journal_get_write_access(handle, iloc->bh);
if (error) { if (error) {
brelse(iloc->bh); brelse(iloc->bh);

View file

@ -977,7 +977,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (err == 0) if (err == 0)
err = err2; err = err2;
mnt_drop_write_file(filp); mnt_drop_write_file(filp);
if (!err && (o_group > EXT4_SB(sb)->s_groups_count) && if (!err && (o_group < EXT4_SB(sb)->s_groups_count) &&
ext4_has_group_desc_csum(sb) && ext4_has_group_desc_csum(sb) &&
test_opt(sb, INIT_INODE_TABLE)) test_opt(sb, INIT_INODE_TABLE))
err = ext4_register_li_request(sb, o_group); err = ext4_register_li_request(sb, o_group);

View file

@ -1539,7 +1539,7 @@ static int mb_find_extent(struct ext4_buddy *e4b, int block,
ex->fe_len += 1 << order; ex->fe_len += 1 << order;
} }
if (ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3))) { if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) {
/* Should never happen! (but apparently sometimes does?!?) */ /* Should never happen! (but apparently sometimes does?!?) */
WARN_ON(1); WARN_ON(1);
ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent " ext4_error(e4b->bd_sb, "corruption or bug in mb_find_extent "

View file

@ -871,12 +871,15 @@ static void dx_release(struct dx_frame *frames)
{ {
struct dx_root_info *info; struct dx_root_info *info;
int i; int i;
unsigned int indirect_levels;
if (frames[0].bh == NULL) if (frames[0].bh == NULL)
return; return;
info = &((struct dx_root *)frames[0].bh->b_data)->info; info = &((struct dx_root *)frames[0].bh->b_data)->info;
for (i = 0; i <= info->indirect_levels; i++) { /* save local copy, "info" may be freed after brelse() */
indirect_levels = info->indirect_levels;
for (i = 0; i <= indirect_levels; i++) {
if (frames[i].bh == NULL) if (frames[i].bh == NULL)
break; break;
brelse(frames[i].bh); brelse(frames[i].bh);

View file

@ -874,6 +874,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
if (unlikely(err)) { if (unlikely(err)) {
ext4_std_error(sb, err); ext4_std_error(sb, err);
iloc.bh = NULL;
goto errout; goto errout;
} }
brelse(dind); brelse(dind);

View file

@ -698,7 +698,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO); jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
save_error_info(sb, function, line); save_error_info(sb, function, line);
} }
if (test_opt(sb, ERRORS_PANIC)) { if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
if (EXT4_SB(sb)->s_journal && if (EXT4_SB(sb)->s_journal &&
!(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR)) !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
return; return;
@ -2259,7 +2259,6 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT); es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
le16_add_cpu(&es->s_mnt_count, 1); le16_add_cpu(&es->s_mnt_count, 1);
ext4_update_tstamp(es, s_mtime); ext4_update_tstamp(es, s_mtime);
ext4_update_dynamic_rev(sb);
if (sbi->s_journal) if (sbi->s_journal)
ext4_set_feature_journal_needs_recovery(sb); ext4_set_feature_journal_needs_recovery(sb);
@ -3514,6 +3513,37 @@ int ext4_calculate_overhead(struct super_block *sb)
return 0; return 0;
} }
static void ext4_clamp_want_extra_isize(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
/* determine the minimum size of new large inodes, if present */
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
sbi->s_want_extra_isize == 0) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
if (ext4_has_feature_extra_isize(sb)) {
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_want_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_want_extra_isize);
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_min_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_min_extra_isize);
}
}
/* Check if enough inode space is available */
if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
sbi->s_inode_size) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
ext4_msg(sb, KERN_INFO,
"required extra inode space not available");
}
}
static void ext4_set_resv_clusters(struct super_block *sb) static void ext4_set_resv_clusters(struct super_block *sb)
{ {
ext4_fsblk_t resv_clusters; ext4_fsblk_t resv_clusters;
@ -4239,7 +4269,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"data=, fs mounted w/o journal"); "data=, fs mounted w/o journal");
goto failed_mount_wq; goto failed_mount_wq;
} }
sbi->s_def_mount_opt &= EXT4_MOUNT_JOURNAL_CHECKSUM; sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM;
clear_opt(sb, JOURNAL_CHECKSUM); clear_opt(sb, JOURNAL_CHECKSUM);
clear_opt(sb, DATA_FLAGS); clear_opt(sb, DATA_FLAGS);
sbi->s_journal = NULL; sbi->s_journal = NULL;
@ -4388,30 +4418,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
} else if (ret) } else if (ret)
goto failed_mount4a; goto failed_mount4a;
/* determine the minimum size of new large inodes, if present */ ext4_clamp_want_extra_isize(sb);
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
sbi->s_want_extra_isize == 0) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
if (ext4_has_feature_extra_isize(sb)) {
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_want_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_want_extra_isize);
if (sbi->s_want_extra_isize <
le16_to_cpu(es->s_min_extra_isize))
sbi->s_want_extra_isize =
le16_to_cpu(es->s_min_extra_isize);
}
}
/* Check if enough inode space is available */
if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
sbi->s_inode_size) {
sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
EXT4_GOOD_OLD_INODE_SIZE;
ext4_msg(sb, KERN_INFO, "required extra inode space not"
"available");
}
ext4_set_resv_clusters(sb); ext4_set_resv_clusters(sb);
@ -5197,6 +5204,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts; goto restore_opts;
} }
ext4_clamp_want_extra_isize(sb);
if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
test_opt(sb, JOURNAL_CHECKSUM)) { test_opt(sb, JOURNAL_CHECKSUM)) {
ext4_msg(sb, KERN_ERR, "changing journal_checksum " ext4_msg(sb, KERN_ERR, "changing journal_checksum "

View file

@ -1700,7 +1700,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
/* No failures allowed past this point. */ /* No failures allowed past this point. */
if (!s->not_found && here->e_value_size && here->e_value_offs) { if (!s->not_found && here->e_value_size && !here->e_value_inum) {
/* Remove the old value. */ /* Remove the old value. */
void *first_val = s->base + min_offs; void *first_val = s->base + min_offs;
size_t offs = le16_to_cpu(here->e_value_offs); size_t offs = le16_to_cpu(here->e_value_offs);

View file

@ -530,8 +530,6 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
isw->inode = inode; isw->inode = inode;
atomic_inc(&isw_nr_in_flight);
/* /*
* In addition to synchronizing among switchers, I_WB_SWITCH tells * In addition to synchronizing among switchers, I_WB_SWITCH tells
* the RCU protected stat update paths to grab the i_page * the RCU protected stat update paths to grab the i_page
@ -539,6 +537,9 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
* Let's continue after I_WB_SWITCH is guaranteed to be visible. * Let's continue after I_WB_SWITCH is guaranteed to be visible.
*/ */
call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn); call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
atomic_inc(&isw_nr_in_flight);
goto out_unlock; goto out_unlock;
out_free: out_free:
@ -908,7 +909,11 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
void cgroup_writeback_umount(void) void cgroup_writeback_umount(void)
{ {
if (atomic_read(&isw_nr_in_flight)) { if (atomic_read(&isw_nr_in_flight)) {
synchronize_rcu(); /*
* Use rcu_barrier() to wait for all pending callbacks to
* ensure that all in-flight wb switches are in the workqueue.
*/
rcu_barrier();
flush_workqueue(isw_wq); flush_workqueue(isw_wq);
} }
} }

View file

@ -426,9 +426,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
u32 hash; u32 hash;
index = page->index; index = page->index;
hash = hugetlb_fault_mutex_hash(h, current->mm, hash = hugetlb_fault_mutex_hash(h, mapping, index, 0);
&pseudo_vma,
mapping, index, 0);
mutex_lock(&hugetlb_fault_mutex_table[hash]); mutex_lock(&hugetlb_fault_mutex_table[hash]);
/* /*
@ -625,8 +623,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
addr = index * hpage_size; addr = index * hpage_size;
/* mutex taken here, fault path and hole punch */ /* mutex taken here, fault path and hole punch */
hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, hash = hugetlb_fault_mutex_hash(h, mapping, index, addr);
index, addr);
mutex_lock(&hugetlb_fault_mutex_table[hash]); mutex_lock(&hugetlb_fault_mutex_table[hash]);
/* See if already present in mapping to avoid alloc/free */ /* See if already present in mapping to avoid alloc/free */

View file

@ -1366,6 +1366,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
journal_superblock_t *sb = journal->j_superblock; journal_superblock_t *sb = journal->j_superblock;
int ret; int ret;
/* Buffer got discarded which means block device got invalidated */
if (!buffer_mapped(bh))
return -EIO;
trace_jbd2_write_superblock(journal, write_flags); trace_jbd2_write_superblock(journal, write_flags);
if (!(journal->j_flags & JBD2_BARRIER)) if (!(journal->j_flags & JBD2_BARRIER))
write_flags &= ~(REQ_FUA | REQ_PREFLUSH); write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
@ -2385,22 +2389,19 @@ static struct kmem_cache *jbd2_journal_head_cache;
static atomic_t nr_journal_heads = ATOMIC_INIT(0); static atomic_t nr_journal_heads = ATOMIC_INIT(0);
#endif #endif
static int jbd2_journal_init_journal_head_cache(void) static int __init jbd2_journal_init_journal_head_cache(void)
{ {
int retval; J_ASSERT(!jbd2_journal_head_cache);
J_ASSERT(jbd2_journal_head_cache == NULL);
jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head", jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head",
sizeof(struct journal_head), sizeof(struct journal_head),
0, /* offset */ 0, /* offset */
SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU, SLAB_TEMPORARY | SLAB_TYPESAFE_BY_RCU,
NULL); /* ctor */ NULL); /* ctor */
retval = 0;
if (!jbd2_journal_head_cache) { if (!jbd2_journal_head_cache) {
retval = -ENOMEM;
printk(KERN_EMERG "JBD2: no memory for journal_head cache\n"); printk(KERN_EMERG "JBD2: no memory for journal_head cache\n");
return -ENOMEM;
} }
return retval; return 0;
} }
static void jbd2_journal_destroy_journal_head_cache(void) static void jbd2_journal_destroy_journal_head_cache(void)
@ -2646,28 +2647,38 @@ static void __exit jbd2_remove_jbd_stats_proc_entry(void)
struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache; struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache;
static int __init jbd2_journal_init_handle_cache(void) static int __init jbd2_journal_init_inode_cache(void)
{ {
jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY); J_ASSERT(!jbd2_inode_cache);
if (jbd2_handle_cache == NULL) {
printk(KERN_EMERG "JBD2: failed to create handle cache\n");
return -ENOMEM;
}
jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0); jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0);
if (jbd2_inode_cache == NULL) { if (!jbd2_inode_cache) {
printk(KERN_EMERG "JBD2: failed to create inode cache\n"); pr_emerg("JBD2: failed to create inode cache\n");
kmem_cache_destroy(jbd2_handle_cache);
return -ENOMEM; return -ENOMEM;
} }
return 0; return 0;
} }
static int __init jbd2_journal_init_handle_cache(void)
{
J_ASSERT(!jbd2_handle_cache);
jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY);
if (!jbd2_handle_cache) {
printk(KERN_EMERG "JBD2: failed to create handle cache\n");
return -ENOMEM;
}
return 0;
}
static void jbd2_journal_destroy_inode_cache(void)
{
kmem_cache_destroy(jbd2_inode_cache);
jbd2_inode_cache = NULL;
}
static void jbd2_journal_destroy_handle_cache(void) static void jbd2_journal_destroy_handle_cache(void)
{ {
kmem_cache_destroy(jbd2_handle_cache); kmem_cache_destroy(jbd2_handle_cache);
jbd2_handle_cache = NULL; jbd2_handle_cache = NULL;
kmem_cache_destroy(jbd2_inode_cache);
jbd2_inode_cache = NULL;
} }
/* /*
@ -2678,11 +2689,15 @@ static int __init journal_init_caches(void)
{ {
int ret; int ret;
ret = jbd2_journal_init_revoke_caches(); ret = jbd2_journal_init_revoke_record_cache();
if (ret == 0)
ret = jbd2_journal_init_revoke_table_cache();
if (ret == 0) if (ret == 0)
ret = jbd2_journal_init_journal_head_cache(); ret = jbd2_journal_init_journal_head_cache();
if (ret == 0) if (ret == 0)
ret = jbd2_journal_init_handle_cache(); ret = jbd2_journal_init_handle_cache();
if (ret == 0)
ret = jbd2_journal_init_inode_cache();
if (ret == 0) if (ret == 0)
ret = jbd2_journal_init_transaction_cache(); ret = jbd2_journal_init_transaction_cache();
return ret; return ret;
@ -2690,9 +2705,11 @@ static int __init journal_init_caches(void)
static void jbd2_journal_destroy_caches(void) static void jbd2_journal_destroy_caches(void)
{ {
jbd2_journal_destroy_revoke_caches(); jbd2_journal_destroy_revoke_record_cache();
jbd2_journal_destroy_revoke_table_cache();
jbd2_journal_destroy_journal_head_cache(); jbd2_journal_destroy_journal_head_cache();
jbd2_journal_destroy_handle_cache(); jbd2_journal_destroy_handle_cache();
jbd2_journal_destroy_inode_cache();
jbd2_journal_destroy_transaction_cache(); jbd2_journal_destroy_transaction_cache();
jbd2_journal_destroy_slabs(); jbd2_journal_destroy_slabs();
} }

View file

@ -178,33 +178,41 @@ static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
return NULL; return NULL;
} }
void jbd2_journal_destroy_revoke_caches(void) void jbd2_journal_destroy_revoke_record_cache(void)
{ {
kmem_cache_destroy(jbd2_revoke_record_cache); kmem_cache_destroy(jbd2_revoke_record_cache);
jbd2_revoke_record_cache = NULL; jbd2_revoke_record_cache = NULL;
}
void jbd2_journal_destroy_revoke_table_cache(void)
{
kmem_cache_destroy(jbd2_revoke_table_cache); kmem_cache_destroy(jbd2_revoke_table_cache);
jbd2_revoke_table_cache = NULL; jbd2_revoke_table_cache = NULL;
} }
int __init jbd2_journal_init_revoke_caches(void) int __init jbd2_journal_init_revoke_record_cache(void)
{ {
J_ASSERT(!jbd2_revoke_record_cache); J_ASSERT(!jbd2_revoke_record_cache);
J_ASSERT(!jbd2_revoke_table_cache);
jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s, jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s,
SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY); SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY);
if (!jbd2_revoke_record_cache)
goto record_cache_failure;
if (!jbd2_revoke_record_cache) {
pr_emerg("JBD2: failed to create revoke_record cache\n");
return -ENOMEM;
}
return 0;
}
int __init jbd2_journal_init_revoke_table_cache(void)
{
J_ASSERT(!jbd2_revoke_table_cache);
jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s, jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s,
SLAB_TEMPORARY); SLAB_TEMPORARY);
if (!jbd2_revoke_table_cache) if (!jbd2_revoke_table_cache) {
goto table_cache_failure; pr_emerg("JBD2: failed to create revoke_table cache\n");
return 0;
table_cache_failure:
jbd2_journal_destroy_revoke_caches();
record_cache_failure:
return -ENOMEM; return -ENOMEM;
}
return 0;
} }
static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size) static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)

View file

@ -42,9 +42,11 @@ int __init jbd2_journal_init_transaction_cache(void)
0, 0,
SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY, SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
NULL); NULL);
if (transaction_cache) if (!transaction_cache) {
return 0; pr_emerg("JBD2: failed to create transaction cache\n");
return -ENOMEM; return -ENOMEM;
}
return 0;
} }
void jbd2_journal_destroy_transaction_cache(void) void jbd2_journal_destroy_transaction_cache(void)

View file

@ -148,16 +148,24 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
u64 blkno; u64 blkno;
struct dentry *parent; struct dentry *parent;
struct inode *dir = d_inode(child); struct inode *dir = d_inode(child);
int set;
trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name, trace_ocfs2_get_parent(child, child->d_name.len, child->d_name.name,
(unsigned long long)OCFS2_I(dir)->ip_blkno); (unsigned long long)OCFS2_I(dir)->ip_blkno);
status = ocfs2_nfs_sync_lock(OCFS2_SB(dir->i_sb), 1);
if (status < 0) {
mlog(ML_ERROR, "getting nfs sync lock(EX) failed %d\n", status);
parent = ERR_PTR(status);
goto bail;
}
status = ocfs2_inode_lock(dir, NULL, 0); status = ocfs2_inode_lock(dir, NULL, 0);
if (status < 0) { if (status < 0) {
if (status != -ENOENT) if (status != -ENOENT)
mlog_errno(status); mlog_errno(status);
parent = ERR_PTR(status); parent = ERR_PTR(status);
goto bail; goto unlock_nfs_sync;
} }
status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno); status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno);
@ -166,11 +174,31 @@ static struct dentry *ocfs2_get_parent(struct dentry *child)
goto bail_unlock; goto bail_unlock;
} }
status = ocfs2_test_inode_bit(OCFS2_SB(dir->i_sb), blkno, &set);
if (status < 0) {
if (status == -EINVAL) {
status = -ESTALE;
} else
mlog(ML_ERROR, "test inode bit failed %d\n", status);
parent = ERR_PTR(status);
goto bail_unlock;
}
trace_ocfs2_get_dentry_test_bit(status, set);
if (!set) {
status = -ESTALE;
parent = ERR_PTR(status);
goto bail_unlock;
}
parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0)); parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
bail_unlock: bail_unlock:
ocfs2_inode_unlock(dir, 0); ocfs2_inode_unlock(dir, 0);
unlock_nfs_sync:
ocfs2_nfs_sync_unlock(OCFS2_SB(dir->i_sb), 1);
bail: bail:
trace_ocfs2_get_parent_end(parent); trace_ocfs2_get_parent_end(parent);

View file

@ -482,12 +482,10 @@ static struct file_system_type pstore_fs_type = {
.kill_sb = pstore_kill_sb, .kill_sb = pstore_kill_sb,
}; };
static int __init init_pstore_fs(void) int __init pstore_init_fs(void)
{ {
int err; int err;
pstore_choose_compression();
/* Create a convenient mount point for people to access pstore */ /* Create a convenient mount point for people to access pstore */
err = sysfs_create_mount_point(fs_kobj, "pstore"); err = sysfs_create_mount_point(fs_kobj, "pstore");
if (err) if (err)
@ -500,14 +498,9 @@ static int __init init_pstore_fs(void)
out: out:
return err; return err;
} }
module_init(init_pstore_fs)
static void __exit exit_pstore_fs(void) void __exit pstore_exit_fs(void)
{ {
unregister_filesystem(&pstore_fs_type); unregister_filesystem(&pstore_fs_type);
sysfs_remove_mount_point(fs_kobj, "pstore"); sysfs_remove_mount_point(fs_kobj, "pstore");
} }
module_exit(exit_pstore_fs)
MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
MODULE_LICENSE("GPL");

View file

@ -37,7 +37,8 @@ extern bool pstore_is_mounted(void);
extern void pstore_record_init(struct pstore_record *record, extern void pstore_record_init(struct pstore_record *record,
struct pstore_info *psi); struct pstore_info *psi);
/* Called during module_init() */ /* Called during pstore init/exit. */
extern void __init pstore_choose_compression(void); int __init pstore_init_fs(void);
void __exit pstore_exit_fs(void);
#endif #endif

View file

@ -274,36 +274,56 @@ static int pstore_decompress(void *in, void *out,
static void allocate_buf_for_compression(void) static void allocate_buf_for_compression(void)
{ {
struct crypto_comp *ctx;
int size;
char *buf;
/* Skip if not built-in or compression backend not selected yet. */
if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend) if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend)
return; return;
/* Skip if no pstore backend yet or compression init already done. */
if (!psinfo || tfm)
return;
if (!crypto_has_comp(zbackend->name, 0, 0)) { if (!crypto_has_comp(zbackend->name, 0, 0)) {
pr_err("No %s compression\n", zbackend->name); pr_err("Unknown compression: %s\n", zbackend->name);
return; return;
} }
big_oops_buf_sz = zbackend->zbufsize(psinfo->bufsize); size = zbackend->zbufsize(psinfo->bufsize);
if (big_oops_buf_sz <= 0) if (size <= 0) {
return; pr_err("Invalid compression size for %s: %d\n",
zbackend->name, size);
big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (!big_oops_buf) {
pr_err("allocate compression buffer error!\n");
return; return;
} }
tfm = crypto_alloc_comp(zbackend->name, 0, 0); buf = kmalloc(size, GFP_KERNEL);
if (IS_ERR_OR_NULL(tfm)) { if (!buf) {
kfree(big_oops_buf); pr_err("Failed %d byte compression buffer allocation for: %s\n",
big_oops_buf = NULL; size, zbackend->name);
pr_err("crypto_alloc_comp() failed!\n");
return; return;
} }
ctx = crypto_alloc_comp(zbackend->name, 0, 0);
if (IS_ERR_OR_NULL(ctx)) {
kfree(buf);
pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
PTR_ERR(ctx));
return;
}
/* A non-NULL big_oops_buf indicates compression is available. */
tfm = ctx;
big_oops_buf_sz = size;
big_oops_buf = buf;
pr_info("Using compression: %s\n", zbackend->name);
} }
static void free_buf_for_compression(void) static void free_buf_for_compression(void)
{ {
if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && !IS_ERR_OR_NULL(tfm)) if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm)
crypto_free_comp(tfm); crypto_free_comp(tfm);
kfree(big_oops_buf); kfree(big_oops_buf);
big_oops_buf = NULL; big_oops_buf = NULL;
@ -774,14 +794,43 @@ void __init pstore_choose_compression(void)
for (step = zbackends; step->name; step++) { for (step = zbackends; step->name; step++) {
if (!strcmp(compress, step->name)) { if (!strcmp(compress, step->name)) {
zbackend = step; zbackend = step;
pr_info("using %s compression\n", zbackend->name);
return; return;
} }
} }
} }
static int __init pstore_init(void)
{
int ret;
pstore_choose_compression();
/*
* Check if any pstore backends registered earlier but did not
* initialize compression because crypto was not ready. If so,
* initialize compression now.
*/
allocate_buf_for_compression();
ret = pstore_init_fs();
if (ret)
return ret;
return 0;
}
late_initcall(pstore_init);
static void __exit pstore_exit(void)
{
pstore_exit_fs();
}
module_exit(pstore_exit)
module_param(compress, charp, 0444); module_param(compress, charp, 0444);
MODULE_PARM_DESC(compress, "Pstore compression to use"); MODULE_PARM_DESC(compress, "Pstore compression to use");
module_param(backend, charp, 0444); module_param(backend, charp, 0444);
MODULE_PARM_DESC(backend, "Pstore backend to use"); MODULE_PARM_DESC(backend, "Pstore backend to use");
MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
MODULE_LICENSE("GPL");

View file

@ -956,7 +956,7 @@ static int __init ramoops_init(void)
return ret; return ret;
} }
late_initcall(ramoops_init); postcore_initcall(ramoops_init);
static void __exit ramoops_exit(void) static void __exit ramoops_exit(void)
{ {

View file

@ -47,10 +47,8 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot, unsigned long addr, pgprot_t newprot,
int prot_numa); int prot_numa);
vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
pmd_t *pmd, pfn_t pfn, bool write); vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, pfn_t pfn, bool write);
enum transparent_hugepage_flag { enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,

View file

@ -123,9 +123,7 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
void free_huge_page(struct page *page); void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode); void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table; extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
struct vm_area_struct *vma,
struct address_space *mapping,
pgoff_t idx, unsigned long address); pgoff_t idx, unsigned long address);
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);

View file

@ -1317,7 +1317,7 @@ extern void __wait_on_journal (journal_t *);
/* Transaction cache support */ /* Transaction cache support */
extern void jbd2_journal_destroy_transaction_cache(void); extern void jbd2_journal_destroy_transaction_cache(void);
extern int jbd2_journal_init_transaction_cache(void); extern int __init jbd2_journal_init_transaction_cache(void);
extern void jbd2_journal_free_transaction(transaction_t *); extern void jbd2_journal_free_transaction(transaction_t *);
/* /*
@ -1445,8 +1445,10 @@ static inline void jbd2_free_inode(struct jbd2_inode *jinode)
/* Primary revoke support */ /* Primary revoke support */
#define JOURNAL_REVOKE_DEFAULT_HASH 256 #define JOURNAL_REVOKE_DEFAULT_HASH 256
extern int jbd2_journal_init_revoke(journal_t *, int); extern int jbd2_journal_init_revoke(journal_t *, int);
extern void jbd2_journal_destroy_revoke_caches(void); extern void jbd2_journal_destroy_revoke_record_cache(void);
extern int jbd2_journal_init_revoke_caches(void); extern void jbd2_journal_destroy_revoke_table_cache(void);
extern int __init jbd2_journal_init_revoke_record_cache(void);
extern int __init jbd2_journal_init_revoke_table_cache(void);
extern void jbd2_journal_destroy_revoke(journal_t *); extern void jbd2_journal_destroy_revoke(journal_t *);
extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *); extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *);

View file

@ -215,9 +215,9 @@
/* DA9063 Configuration registers */ /* DA9063 Configuration registers */
/* OTP */ /* OTP */
#define DA9063_REG_OPT_COUNT 0x101 #define DA9063_REG_OTP_CONT 0x101
#define DA9063_REG_OPT_ADDR 0x102 #define DA9063_REG_OTP_ADDR 0x102
#define DA9063_REG_OPT_DATA 0x103 #define DA9063_REG_OTP_DATA 0x103
/* Customer Trim and Configuration */ /* Customer Trim and Configuration */
#define DA9063_REG_T_OFFSET 0x104 #define DA9063_REG_T_OFFSET 0x104

View file

@ -136,8 +136,8 @@
#define MAX77620_FPS_PERIOD_MIN_US 40 #define MAX77620_FPS_PERIOD_MIN_US 40
#define MAX20024_FPS_PERIOD_MIN_US 20 #define MAX20024_FPS_PERIOD_MIN_US 20
#define MAX77620_FPS_PERIOD_MAX_US 2560 #define MAX20024_FPS_PERIOD_MAX_US 2560
#define MAX20024_FPS_PERIOD_MAX_US 5120 #define MAX77620_FPS_PERIOD_MAX_US 5120
#define MAX77620_REG_FPS_GPIO1 0x54 #define MAX77620_REG_FPS_GPIO1 0x54
#define MAX77620_REG_FPS_GPIO2 0x55 #define MAX77620_REG_FPS_GPIO2 0x55

View file

@ -910,6 +910,15 @@ static void mm_init_aio(struct mm_struct *mm)
#endif #endif
} }
static __always_inline void mm_clear_owner(struct mm_struct *mm,
struct task_struct *p)
{
#ifdef CONFIG_MEMCG
if (mm->owner == p)
WRITE_ONCE(mm->owner, NULL);
#endif
}
static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{ {
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
@ -1289,6 +1298,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
free_pt: free_pt:
/* don't put binfmt in mmput, we haven't got module yet */ /* don't put binfmt in mmput, we haven't got module yet */
mm->binfmt = NULL; mm->binfmt = NULL;
mm_init_owner(mm, NULL);
mmput(mm); mmput(mm);
fail_nomem: fail_nomem:
@ -1620,6 +1630,21 @@ static inline void rcu_copy_process(struct task_struct *p)
#endif /* #ifdef CONFIG_TASKS_RCU */ #endif /* #ifdef CONFIG_TASKS_RCU */
} }
static void __delayed_free_task(struct rcu_head *rhp)
{
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
free_task(tsk);
}
static __always_inline void delayed_free_task(struct task_struct *tsk)
{
if (IS_ENABLED(CONFIG_MEMCG))
call_rcu(&tsk->rcu, __delayed_free_task);
else
free_task(tsk);
}
/* /*
* This creates a new process as a copy of the old one, * This creates a new process as a copy of the old one,
* but does not actually start it yet. * but does not actually start it yet.
@ -2081,8 +2106,10 @@ static __latent_entropy struct task_struct *copy_process(
bad_fork_cleanup_namespaces: bad_fork_cleanup_namespaces:
exit_task_namespaces(p); exit_task_namespaces(p);
bad_fork_cleanup_mm: bad_fork_cleanup_mm:
if (p->mm) if (p->mm) {
mm_clear_owner(p->mm, p);
mmput(p->mm); mmput(p->mm);
}
bad_fork_cleanup_signal: bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD)) if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal); free_signal_struct(p->signal);
@ -2113,7 +2140,7 @@ static __latent_entropy struct task_struct *copy_process(
bad_fork_free: bad_fork_free:
p->state = TASK_DEAD; p->state = TASK_DEAD;
put_task_stack(p); put_task_stack(p);
free_task(p); delayed_free_task(p);
fork_out: fork_out:
spin_lock_irq(&current->sighand->siglock); spin_lock_irq(&current->sighand->siglock);
hlist_del_init(&delayed.node); hlist_del_init(&delayed.node);

View file

@ -130,6 +130,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
{ {
struct rwsem_waiter *waiter, *tmp; struct rwsem_waiter *waiter, *tmp;
long oldcount, woken = 0, adjustment = 0; long oldcount, woken = 0, adjustment = 0;
struct list_head wlist;
/* /*
* Take a peek at the queue head waiter such that we can determine * Take a peek at the queue head waiter such that we can determine
@ -188,18 +189,42 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
* of the queue. We know that woken will be at least 1 as we accounted * of the queue. We know that woken will be at least 1 as we accounted
* for above. Note we increment the 'active part' of the count by the * for above. Note we increment the 'active part' of the count by the
* number of readers before waking any processes up. * number of readers before waking any processes up.
*
* We have to do wakeup in 2 passes to prevent the possibility that
* the reader count may be decremented before it is incremented. It
* is because the to-be-woken waiter may not have slept yet. So it
* may see waiter->task got cleared, finish its critical section and
* do an unlock before the reader count increment.
*
* 1) Collect the read-waiters in a separate list, count them and
* fully increment the reader count in rwsem.
* 2) For each waiters in the new list, clear waiter->task and
* put them into wake_q to be woken up later.
*/ */
list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) { list_for_each_entry(waiter, &sem->wait_list, list) {
struct task_struct *tsk;
if (waiter->type == RWSEM_WAITING_FOR_WRITE) if (waiter->type == RWSEM_WAITING_FOR_WRITE)
break; break;
woken++; woken++;
tsk = waiter->task; }
list_cut_before(&wlist, &sem->wait_list, &waiter->list);
adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
if (list_empty(&sem->wait_list)) {
/* hit end of list above */
adjustment -= RWSEM_WAITING_BIAS;
}
if (adjustment)
atomic_long_add(adjustment, &sem->count);
/* 2nd pass */
list_for_each_entry_safe(waiter, tmp, &wlist, list) {
struct task_struct *tsk;
tsk = waiter->task;
get_task_struct(tsk); get_task_struct(tsk);
list_del(&waiter->list);
/* /*
* Ensure calling get_task_struct() before setting the reader * Ensure calling get_task_struct() before setting the reader
* waiter to nil such that rwsem_down_read_failed() cannot * waiter to nil such that rwsem_down_read_failed() cannot
@ -215,15 +240,6 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
/* wake_q_add() already take the task ref */ /* wake_q_add() already take the task ref */
put_task_struct(tsk); put_task_struct(tsk);
} }
adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
if (list_empty(&sem->wait_list)) {
/* hit end of list above */
adjustment -= RWSEM_WAITING_BIAS;
}
if (adjustment)
atomic_long_add(adjustment, &sem->count);
} }
/* /*

View file

@ -817,8 +817,21 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache);
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{ {
struct page *head = compound_head(page); struct page *head;
size_t v = n + offset + page_address(page) - page_address(head); size_t v = n + offset;
/*
* The general case needs to access the page order in order
* to compute the page size.
* However, we mostly deal with order-0 pages and thus can
* avoid a possible cache line miss for requests that fit all
* page orders.
*/
if (n <= v && v <= PAGE_SIZE)
return true;
head = compound_head(page);
v += (page - head) << PAGE_SHIFT;
if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head)))) if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
return true; return true;

View file

@ -772,11 +772,13 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
pte_free(mm, pgtable); pte_free(mm, pgtable);
} }
vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
pmd_t *pmd, pfn_t pfn, bool write)
{ {
unsigned long addr = vmf->address & PMD_MASK;
struct vm_area_struct *vma = vmf->vma;
pgprot_t pgprot = vma->vm_page_prot; pgprot_t pgprot = vma->vm_page_prot;
pgtable_t pgtable = NULL; pgtable_t pgtable = NULL;
/* /*
* If we had pmd_special, we could avoid all these restrictions, * If we had pmd_special, we could avoid all these restrictions,
* but we need to be consistent with PTEs and architectures that * but we need to be consistent with PTEs and architectures that
@ -799,7 +801,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
track_pfn_insert(vma, &pgprot, pfn); track_pfn_insert(vma, &pgprot, pfn);
insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable); insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
} }
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd); EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
@ -848,10 +850,12 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
spin_unlock(ptl); spin_unlock(ptl);
} }
vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
pud_t *pud, pfn_t pfn, bool write)
{ {
unsigned long addr = vmf->address & PUD_MASK;
struct vm_area_struct *vma = vmf->vma;
pgprot_t pgprot = vma->vm_page_prot; pgprot_t pgprot = vma->vm_page_prot;
/* /*
* If we had pud_special, we could avoid all these restrictions, * If we had pud_special, we could avoid all these restrictions,
* but we need to be consistent with PTEs and architectures that * but we need to be consistent with PTEs and architectures that
@ -868,7 +872,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
track_pfn_insert(vma, &pgprot, pfn); track_pfn_insert(vma, &pgprot, pfn);
insert_pfn_pud(vma, addr, pud, pfn, pgprot, write); insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
return VM_FAULT_NOPAGE; return VM_FAULT_NOPAGE;
} }
EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud); EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);

View file

@ -1572,8 +1572,9 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
*/ */
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
SetPageHugeTemporary(page); SetPageHugeTemporary(page);
spin_unlock(&hugetlb_lock);
put_page(page); put_page(page);
page = NULL; return NULL;
} else { } else {
h->surplus_huge_pages++; h->surplus_huge_pages++;
h->surplus_huge_pages_node[page_to_nid(page)]++; h->surplus_huge_pages_node[page_to_nid(page)]++;
@ -3777,8 +3778,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
* handling userfault. Reacquire after handling * handling userfault. Reacquire after handling
* fault to make calling code simpler. * fault to make calling code simpler.
*/ */
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
idx, haddr);
mutex_unlock(&hugetlb_fault_mutex_table[hash]); mutex_unlock(&hugetlb_fault_mutex_table[hash]);
ret = handle_userfault(&vmf, VM_UFFD_MISSING); ret = handle_userfault(&vmf, VM_UFFD_MISSING);
mutex_lock(&hugetlb_fault_mutex_table[hash]); mutex_lock(&hugetlb_fault_mutex_table[hash]);
@ -3886,21 +3886,14 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
struct vm_area_struct *vma,
struct address_space *mapping,
pgoff_t idx, unsigned long address) pgoff_t idx, unsigned long address)
{ {
unsigned long key[2]; unsigned long key[2];
u32 hash; u32 hash;
if (vma->vm_flags & VM_SHARED) { key[0] = (unsigned long) mapping;
key[0] = (unsigned long) mapping; key[1] = idx;
key[1] = idx;
} else {
key[0] = (unsigned long) mm;
key[1] = address >> huge_page_shift(h);
}
hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0); hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
@ -3911,9 +3904,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
* For uniprocesor systems we always use a single mutex, so just * For uniprocesor systems we always use a single mutex, so just
* return 0 and avoid the hashing overhead. * return 0 and avoid the hashing overhead.
*/ */
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
struct vm_area_struct *vma,
struct address_space *mapping,
pgoff_t idx, unsigned long address) pgoff_t idx, unsigned long address)
{ {
return 0; return 0;
@ -3958,7 +3949,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* get spurious allocation failures if two CPUs race to instantiate * get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache. * the same page in the page cache.
*/ */
hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr); hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
mutex_lock(&hugetlb_fault_mutex_table[hash]); mutex_lock(&hugetlb_fault_mutex_table[hash]);
entry = huge_ptep_get(ptep); entry = huge_ptep_get(ptep);

Some files were not shown because too many files have changed in this diff Show more