This is the 4.19.65 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl1Js7MACgkQONu9yGCS aT4PQxAAo7xa4kYvDxc1RjUY/yIlp6lQ3rpYAAfZB0t8vN+dqivnJZ7m6JHeWX1Y CMcxg85zxLVFeuiXdP821Zj68AB5zqlWMhX0bXm2lhw/Eo9+XHzXtnrLZHhz0/Xd M5cmfIPmoyPCUQQfzSfUMvch+ZpwzEt5op5pUfSjckSpjHQZ0HFj1WJ4D8Hn9jAJ y4+DAKDZgtqhb3GvpS6MoVnBJgcPk9+mBiDkSb12L392+FvHqfeBi3tDRhvyiZAO iJrk747SPds7NlNmuRnj7YyUSDhBzaceRCz0Jsv9FT5EKXoPErXdsL3Bkfa9TREM pH0OaMgNr6WSXLO9qIMcfxMeaKVIvIbotqBTkBTzhEAGPkHA75dhi0lpixXXFExg MaqhLfmHO0dOEr9FrvYGe7f2wUA1Rdw/qRTM3KPEKmHxMqBS7eufIWMHwie1n9Oe cYoP6UkxUIvhUyFV2BlMRFdMfaDbtR0iqy8Dqh36NISD6PAYaUGSoVeSO1fEg4Jy 5GgrKPg6rcz2XNY2cVbsm2zLpqY4dY58SFK9ORfuULdKUQvScvFGrdSSW0CgX+uc F/5NmPutUoboHVxFraDPx7yo46pHf1RW0Me4xZ0aJ3e9ituLAN4fmJ9u46nofb5M thPelQlMVt30O41uViJ0ADkOjCsiBr3AxOFvc76Ct9Q/BJVxhLk= =JVBv -----END PGP SIGNATURE----- Merge 4.19.65 into android-4.19-q Changes in 4.19.65 ARM: riscpc: fix DMA ARM: dts: rockchip: Make rk3288-veyron-minnie run at hs200 ARM: dts: rockchip: Make rk3288-veyron-mickey's emmc work again ARM: dts: rockchip: Mark that the rk3288 timer might stop in suspend ftrace: Enable trampoline when rec count returns back to one dmaengine: tegra-apb: Error out if DMA_PREP_INTERRUPT flag is unset arm64: dts: rockchip: fix isp iommu clocks and power domain kernel/module.c: Only return -EEXIST for modules that have finished loading firmware/psci: psci_checker: Park kthreads before stopping them MIPS: lantiq: Fix bitfield masking dmaengine: rcar-dmac: Reject zero-length slave DMA requests clk: tegra210: fix PLLU and PLLU_OUT1 fs/adfs: super: fix use-after-free bug clk: sprd: Add check for return value of sprd_clk_regmap_init() btrfs: fix minimum number of chunk errors for DUP btrfs: qgroup: Don't hold qgroup_ioctl_lock in btrfs_qgroup_inherit() cifs: Fix a race condition with cifs_echo_request ceph: fix improper use of smp_mb__before_atomic() ceph: return -ERANGE if virtual xattr value didn't fit in buffer ACPI: blacklist: fix clang warning for unused DMI table scsi: zfcp: fix GCC compiler warning emitted with -Wmaybe-uninitialized perf version: Fix segfault due to missing OPT_END() x86: kvm: avoid constant-conversion warning ACPI: fix false-positive -Wuninitialized warning be2net: Signal that the device cannot transmit during reconfiguration x86/apic: Silence -Wtype-limits compiler warnings x86: math-emu: Hide clang warnings for 16-bit overflow mm/cma.c: fail if fixed declaration can't be honored lib/test_overflow.c: avoid tainting the kernel and fix wrap size lib/test_string.c: avoid masking memset16/32/64 failures coda: add error handling for fget coda: fix build using bare-metal toolchain uapi linux/coda_psdev.h: move upc_req definition from uapi to kernel side headers drivers/rapidio/devices/rio_mport_cdev.c: NUL terminate some strings ipc/mqueue.c: only perform resource calculation if user valid mlxsw: spectrum_dcb: Configure DSCP map as the last rule is removed xen/pv: Fix a boot up hang revealed by int3 self test x86/kvm: Don't call kvm_spurious_fault() from .fixup x86/paravirt: Fix callee-saved function ELF sizes x86, boot: Remove multiple copy of static function sanitize_boot_params() drm/nouveau: fix memory leak in nouveau_conn_reset() kconfig: Clear "written" flag to avoid data loss kbuild: initialize CLANG_FLAGS correctly in the top Makefile Btrfs: fix incremental send failure after deduplication Btrfs: fix race leading to fs corruption after transaction abort mmc: dw_mmc: Fix occasional hang after tuning on eMMC mmc: meson-mx-sdio: Fix misuse of GENMASK macro gpiolib: fix incorrect IRQ requesting of an active-low lineevent IB/hfi1: Fix Spectre v1 vulnerability mtd: rawnand: micron: handle on-die "ECC-off" devices correctly selinux: fix memory leak in policydb_init() ALSA: hda: Fix 1-minute detection delay when i915 module is not available mm: vmscan: check if mem cgroup is disabled or not before calling memcg slab shrinker s390/dasd: fix endless loop after read unit address configuration cgroup: kselftest: relax fs_spec checks parisc: Fix build of compressed kernel even with debug enabled drivers/perf: arm_pmu: Fix failure path in PM notifier arm64: compat: Allow single-byte watchpoints on all addresses arm64: cpufeature: Fix feature comparison for CTR_EL0.{CWG,ERG} nbd: replace kill_bdev() with __invalidate_device() again xen/swiotlb: fix condition for calling xen_destroy_contiguous_region() IB/mlx5: Fix unreg_umr to ignore the mkey state IB/mlx5: Use direct mkey destroy command upon UMR unreg failure IB/mlx5: Move MRs to a kernel PD when freeing them to the MR cache IB/mlx5: Fix clean_mr() to work in the expected order IB/mlx5: Fix RSS Toeplitz setup to be aligned with the HW specification IB/hfi1: Check for error on call to alloc_rsm_map_table drm/i915/gvt: fix incorrect cache entry for guest page mapping eeprom: at24: make spd world-readable again ARC: enable uboot support unconditionally objtool: Support GCC 9 cold subfunction naming scheme gcc-9: properly declare the {pv,hv}clock_page storage x86/vdso: Prevent segfaults due to hoisted vclock reads scsi: mpt3sas: Use 63-bit DMA addressing on SAS35 HBA x86/cpufeatures: Carve out CQM features retrieval x86/cpufeatures: Combine word 11 and 12 into a new scattered features word x86/speculation: Prepare entry code for Spectre v1 swapgs mitigations x86/speculation: Enable Spectre v1 swapgs mitigations x86/entry/64: Use JMP instead of JMPQ x86/speculation/swapgs: Exclude ATOMs from speculation through SWAPGS Documentation: Add swapgs description to the Spectre v1 documentation Linux 4.19.65 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I0a9a308d7f58de904f229d059a2818fa0cb01dd3
This commit is contained in:
commit
03d5ba4085
95 changed files with 673 additions and 356 deletions
|
@ -41,10 +41,11 @@ Related CVEs
|
|||
|
||||
The following CVE entries describe Spectre variants:
|
||||
|
||||
============= ======================= =================
|
||||
============= ======================= ==========================
|
||||
CVE-2017-5753 Bounds check bypass Spectre variant 1
|
||||
CVE-2017-5715 Branch target injection Spectre variant 2
|
||||
============= ======================= =================
|
||||
CVE-2019-1125 Spectre v1 swapgs Spectre variant 1 (swapgs)
|
||||
============= ======================= ==========================
|
||||
|
||||
Problem
|
||||
-------
|
||||
|
@ -78,6 +79,13 @@ There are some extensions of Spectre variant 1 attacks for reading data
|
|||
over the network, see :ref:`[12] <spec_ref12>`. However such attacks
|
||||
are difficult, low bandwidth, fragile, and are considered low risk.
|
||||
|
||||
Note that, despite "Bounds Check Bypass" name, Spectre variant 1 is not
|
||||
only about user-controlled array bounds checks. It can affect any
|
||||
conditional checks. The kernel entry code interrupt, exception, and NMI
|
||||
handlers all have conditional swapgs checks. Those may be problematic
|
||||
in the context of Spectre v1, as kernel code can speculatively run with
|
||||
a user GS.
|
||||
|
||||
Spectre variant 2 (Branch Target Injection)
|
||||
-------------------------------------------
|
||||
|
||||
|
@ -132,6 +140,9 @@ not cover all possible attack vectors.
|
|||
1. A user process attacking the kernel
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Spectre variant 1
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
The attacker passes a parameter to the kernel via a register or
|
||||
via a known address in memory during a syscall. Such parameter may
|
||||
be used later by the kernel as an index to an array or to derive
|
||||
|
@ -144,7 +155,40 @@ not cover all possible attack vectors.
|
|||
potentially be influenced for Spectre attacks, new "nospec" accessor
|
||||
macros are used to prevent speculative loading of data.
|
||||
|
||||
Spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
|
||||
Spectre variant 1 (swapgs)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
An attacker can train the branch predictor to speculatively skip the
|
||||
swapgs path for an interrupt or exception. If they initialize
|
||||
the GS register to a user-space value, if the swapgs is speculatively
|
||||
skipped, subsequent GS-related percpu accesses in the speculation
|
||||
window will be done with the attacker-controlled GS value. This
|
||||
could cause privileged memory to be accessed and leaked.
|
||||
|
||||
For example:
|
||||
|
||||
::
|
||||
|
||||
if (coming from user space)
|
||||
swapgs
|
||||
mov %gs:<percpu_offset>, %reg
|
||||
mov (%reg), %reg1
|
||||
|
||||
When coming from user space, the CPU can speculatively skip the
|
||||
swapgs, and then do a speculative percpu load using the user GS
|
||||
value. So the user can speculatively force a read of any kernel
|
||||
value. If a gadget exists which uses the percpu value as an address
|
||||
in another load/store, then the contents of the kernel value may
|
||||
become visible via an L1 side channel attack.
|
||||
|
||||
A similar attack exists when coming from kernel space. The CPU can
|
||||
speculatively do the swapgs, causing the user GS to get used for the
|
||||
rest of the speculative window.
|
||||
|
||||
Spectre variant 2
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
A spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
|
||||
target buffer (BTB) before issuing syscall to launch an attack.
|
||||
After entering the kernel, the kernel could use the poisoned branch
|
||||
target buffer on indirect jump and jump to gadget code in speculative
|
||||
|
@ -280,11 +324,18 @@ The sysfs file showing Spectre variant 1 mitigation status is:
|
|||
|
||||
The possible values in this file are:
|
||||
|
||||
======================================= =================================
|
||||
'Mitigation: __user pointer sanitation' Protection in kernel on a case by
|
||||
case base with explicit pointer
|
||||
sanitation.
|
||||
======================================= =================================
|
||||
.. list-table::
|
||||
|
||||
* - 'Not affected'
|
||||
- The processor is not vulnerable.
|
||||
* - 'Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers'
|
||||
- The swapgs protections are disabled; otherwise it has
|
||||
protection in the kernel on a case by case base with explicit
|
||||
pointer sanitation and usercopy LFENCE barriers.
|
||||
* - 'Mitigation: usercopy/swapgs barriers and __user pointer sanitization'
|
||||
- Protection in the kernel on a case by case base with explicit
|
||||
pointer sanitation, usercopy LFENCE barriers, and swapgs LFENCE
|
||||
barriers.
|
||||
|
||||
However, the protections are put in place on a case by case basis,
|
||||
and there is no guarantee that all possible attack vectors for Spectre
|
||||
|
@ -366,12 +417,27 @@ Turning on mitigation for Spectre variant 1 and Spectre variant 2
|
|||
1. Kernel mitigation
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Spectre variant 1
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
For the Spectre variant 1, vulnerable kernel code (as determined
|
||||
by code audit or scanning tools) is annotated on a case by case
|
||||
basis to use nospec accessor macros for bounds clipping :ref:`[2]
|
||||
<spec_ref2>` to avoid any usable disclosure gadgets. However, it may
|
||||
not cover all attack vectors for Spectre variant 1.
|
||||
|
||||
Copy-from-user code has an LFENCE barrier to prevent the access_ok()
|
||||
check from being mis-speculated. The barrier is done by the
|
||||
barrier_nospec() macro.
|
||||
|
||||
For the swapgs variant of Spectre variant 1, LFENCE barriers are
|
||||
added to interrupt, exception and NMI entry where needed. These
|
||||
barriers are done by the FENCE_SWAPGS_KERNEL_ENTRY and
|
||||
FENCE_SWAPGS_USER_ENTRY macros.
|
||||
|
||||
Spectre variant 2
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
For Spectre variant 2 mitigation, the compiler turns indirect calls or
|
||||
jumps in the kernel into equivalent return trampolines (retpolines)
|
||||
:ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` to go to the target
|
||||
|
@ -473,6 +539,12 @@ Mitigation control on the kernel command line
|
|||
Spectre variant 2 mitigation can be disabled or force enabled at the
|
||||
kernel command line.
|
||||
|
||||
nospectre_v1
|
||||
|
||||
[X86,PPC] Disable mitigations for Spectre Variant 1
|
||||
(bounds check bypass). With this option data leaks are
|
||||
possible in the system.
|
||||
|
||||
nospectre_v2
|
||||
|
||||
[X86] Disable all mitigations for the Spectre variant 2
|
||||
|
|
|
@ -2515,6 +2515,7 @@
|
|||
Equivalent to: nopti [X86,PPC]
|
||||
nospectre_v1 [PPC]
|
||||
nobp=0 [S390]
|
||||
nospectre_v1 [X86]
|
||||
nospectre_v2 [X86,PPC,S390]
|
||||
spectre_v2_user=off [X86]
|
||||
spec_store_bypass_disable=off [X86,PPC]
|
||||
|
@ -2861,9 +2862,9 @@
|
|||
nosmt=force: Force disable SMT, cannot be undone
|
||||
via the sysfs control file.
|
||||
|
||||
nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
|
||||
check bypass). With this option data leaks are possible
|
||||
in the system.
|
||||
nospectre_v1 [X66, PPC] Disable mitigations for Spectre Variant 1
|
||||
(bounds check bypass). With this option data leaks
|
||||
are possible in the system.
|
||||
|
||||
nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
|
||||
(indirect branch prediction) vulnerability. System may
|
||||
|
|
5
Makefile
5
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 64
|
||||
SUBLEVEL = 65
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
@ -430,6 +430,7 @@ KBUILD_CFLAGS_MODULE := -DMODULE
|
|||
KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
|
||||
KBUILD_LDFLAGS :=
|
||||
GCC_PLUGINS_CFLAGS :=
|
||||
CLANG_FLAGS :=
|
||||
|
||||
export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
|
||||
export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
|
||||
|
@ -483,7 +484,7 @@ endif
|
|||
ifeq ($(cc-name),clang)
|
||||
ifneq ($(CROSS_COMPILE),)
|
||||
CLANG_TRIPLE ?= $(CROSS_COMPILE)
|
||||
CLANG_FLAGS := --target=$(notdir $(CLANG_TRIPLE:%-=%))
|
||||
CLANG_FLAGS += --target=$(notdir $(CLANG_TRIPLE:%-=%))
|
||||
ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_FLAGS)), y)
|
||||
$(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
|
||||
endif
|
||||
|
|
|
@ -199,7 +199,6 @@ config NR_CPUS
|
|||
|
||||
config ARC_SMP_HALT_ON_RESET
|
||||
bool "Enable Halt-on-reset boot mode"
|
||||
default y if ARC_UBOOT_SUPPORT
|
||||
help
|
||||
In SMP configuration cores can be configured as Halt-on-reset
|
||||
or they could all start at same time. For Halt-on-reset, non
|
||||
|
@ -539,18 +538,6 @@ config ARC_DBG_TLB_PARANOIA
|
|||
|
||||
endif
|
||||
|
||||
config ARC_UBOOT_SUPPORT
|
||||
bool "Support uboot arg Handling"
|
||||
default n
|
||||
help
|
||||
ARC Linux by default checks for uboot provided args as pointers to
|
||||
external cmdline or DTB. This however breaks in absence of uboot,
|
||||
when booting from Metaware debugger directly, as the registers are
|
||||
not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus
|
||||
registers look like uboot args to kernel which then chokes.
|
||||
So only enable the uboot arg checking/processing if users are sure
|
||||
of uboot being in play.
|
||||
|
||||
config ARC_BUILTIN_DTB_NAME
|
||||
string "Built in DTB"
|
||||
help
|
||||
|
|
|
@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5
|
|||
# CONFIG_ARC_HAS_LLSC is not set
|
||||
CONFIG_ARC_KVADDR_SIZE=402
|
||||
CONFIG_ARC_EMUL_UNALIGNED=y
|
||||
CONFIG_ARC_UBOOT_SUPPORT=y
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_UNIX=y
|
||||
|
|
|
@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y
|
|||
CONFIG_ARC_PLAT_AXS10X=y
|
||||
CONFIG_AXS103=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_ARC_UBOOT_SUPPORT=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_NET=y
|
||||
|
|
|
@ -15,8 +15,6 @@ CONFIG_AXS103=y
|
|||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_SMP=y
|
||||
# CONFIG_ARC_TIMERS_64BIT is not set
|
||||
# CONFIG_ARC_SMP_HALT_ON_RESET is not set
|
||||
CONFIG_ARC_UBOOT_SUPPORT=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_NET=y
|
||||
|
|
|
@ -100,7 +100,6 @@ ENTRY(stext)
|
|||
st.ab 0, [r5, 4]
|
||||
1:
|
||||
|
||||
#ifdef CONFIG_ARC_UBOOT_SUPPORT
|
||||
; Uboot - kernel ABI
|
||||
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
|
||||
; r1 = magic number (always zero as of now)
|
||||
|
@ -109,7 +108,6 @@ ENTRY(stext)
|
|||
st r0, [@uboot_tag]
|
||||
st r1, [@uboot_magic]
|
||||
st r2, [@uboot_arg]
|
||||
#endif
|
||||
|
||||
; setup "current" tsk and optionally cache it in dedicated r25
|
||||
mov r9, @init_task
|
||||
|
|
|
@ -493,7 +493,6 @@ void __init handle_uboot_args(void)
|
|||
bool use_embedded_dtb = true;
|
||||
bool append_cmdline = false;
|
||||
|
||||
#ifdef CONFIG_ARC_UBOOT_SUPPORT
|
||||
/* check that we know this tag */
|
||||
if (uboot_tag != UBOOT_TAG_NONE &&
|
||||
uboot_tag != UBOOT_TAG_CMDLINE &&
|
||||
|
@ -525,7 +524,6 @@ void __init handle_uboot_args(void)
|
|||
append_cmdline = true;
|
||||
|
||||
ignore_uboot_args:
|
||||
#endif
|
||||
|
||||
if (use_embedded_dtb) {
|
||||
machine_desc = setup_machine_fdt(__dtb_start);
|
||||
|
|
|
@ -124,10 +124,6 @@
|
|||
};
|
||||
};
|
||||
|
||||
&emmc {
|
||||
/delete-property/mmc-hs200-1_8v;
|
||||
};
|
||||
|
||||
&i2c2 {
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
|
@ -90,10 +90,6 @@
|
|||
pwm-off-delay-ms = <200>;
|
||||
};
|
||||
|
||||
&emmc {
|
||||
/delete-property/mmc-hs200-1_8v;
|
||||
};
|
||||
|
||||
&gpio_keys {
|
||||
pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;
|
||||
|
||||
|
|
|
@ -227,6 +227,7 @@
|
|||
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
|
||||
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
|
||||
clock-frequency = <24000000>;
|
||||
arm,no-tick-in-suspend;
|
||||
};
|
||||
|
||||
timer: timer@ff810000 {
|
||||
|
|
|
@ -131,7 +131,7 @@ static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
|
|||
} while (1);
|
||||
|
||||
idma->state = ~DMA_ST_AB;
|
||||
disable_irq(irq);
|
||||
disable_irq_nosync(irq);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -174,6 +174,9 @@ static void iomd_enable_dma(unsigned int chan, dma_t *dma)
|
|||
DMA_FROM_DEVICE : DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
idma->dma_addr = idma->dma.sg->dma_address;
|
||||
idma->dma_len = idma->dma.sg->length;
|
||||
|
||||
iomd_writeb(DMA_CR_C, dma_base + CR);
|
||||
idma->state = DMA_ST_AB;
|
||||
}
|
||||
|
|
|
@ -1643,11 +1643,11 @@
|
|||
reg = <0x0 0xff914000 0x0 0x100>, <0x0 0xff915000 0x0 0x100>;
|
||||
interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
interrupt-names = "isp0_mmu";
|
||||
clocks = <&cru ACLK_ISP0_NOC>, <&cru HCLK_ISP0_NOC>;
|
||||
clocks = <&cru ACLK_ISP0_WRAPPER>, <&cru HCLK_ISP0_WRAPPER>;
|
||||
clock-names = "aclk", "iface";
|
||||
#iommu-cells = <0>;
|
||||
power-domains = <&power RK3399_PD_ISP0>;
|
||||
rockchip,disable-mmu-reset;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
isp1_mmu: iommu@ff924000 {
|
||||
|
@ -1655,11 +1655,11 @@
|
|||
reg = <0x0 0xff924000 0x0 0x100>, <0x0 0xff925000 0x0 0x100>;
|
||||
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
interrupt-names = "isp1_mmu";
|
||||
clocks = <&cru ACLK_ISP1_NOC>, <&cru HCLK_ISP1_NOC>;
|
||||
clocks = <&cru ACLK_ISP1_WRAPPER>, <&cru HCLK_ISP1_WRAPPER>;
|
||||
clock-names = "aclk", "iface";
|
||||
#iommu-cells = <0>;
|
||||
power-domains = <&power RK3399_PD_ISP1>;
|
||||
rockchip,disable-mmu-reset;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
hdmi_sound: hdmi-sound {
|
||||
|
|
|
@ -45,9 +45,10 @@
|
|||
*/
|
||||
|
||||
enum ftr_type {
|
||||
FTR_EXACT, /* Use a predefined safe value */
|
||||
FTR_LOWER_SAFE, /* Smaller value is safe */
|
||||
FTR_HIGHER_SAFE,/* Bigger value is safe */
|
||||
FTR_EXACT, /* Use a predefined safe value */
|
||||
FTR_LOWER_SAFE, /* Smaller value is safe */
|
||||
FTR_HIGHER_SAFE, /* Bigger value is safe */
|
||||
FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
|
||||
};
|
||||
|
||||
#define FTR_STRICT true /* SANITY check strict matching required */
|
||||
|
|
|
@ -206,8 +206,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
|
|||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
|
||||
/*
|
||||
* Linux can handle differing I-cache policies. Userspace JITs will
|
||||
|
@ -449,6 +449,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
|
|||
case FTR_LOWER_SAFE:
|
||||
ret = new < cur ? new : cur;
|
||||
break;
|
||||
case FTR_HIGHER_OR_ZERO_SAFE:
|
||||
if (!cur || !new)
|
||||
break;
|
||||
/* Fallthrough */
|
||||
case FTR_HIGHER_SAFE:
|
||||
ret = new > cur ? new : cur;
|
||||
break;
|
||||
|
|
|
@ -547,13 +547,14 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
|
|||
/* Aligned */
|
||||
break;
|
||||
case 1:
|
||||
/* Allow single byte watchpoint. */
|
||||
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
|
||||
break;
|
||||
case 2:
|
||||
/* Allow halfword watchpoints and breakpoints. */
|
||||
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
|
||||
break;
|
||||
case 3:
|
||||
/* Allow single byte watchpoint. */
|
||||
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -156,8 +156,9 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
|
|||
if (edge)
|
||||
irq_set_handler(d->hwirq, handle_edge_irq);
|
||||
|
||||
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
|
||||
(val << (i * 4)), LTQ_EIU_EXIN_C);
|
||||
ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
|
||||
(~(7 << (i * 4)))) | (val << (i * 4)),
|
||||
LTQ_EIU_EXIN_C);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,8 +42,8 @@ SECTIONS
|
|||
#endif
|
||||
_startcode_end = .;
|
||||
|
||||
/* bootloader code and data starts behind area of extracted kernel */
|
||||
. = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START);
|
||||
/* bootloader code and data starts at least behind area of extracted kernel */
|
||||
. = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START));
|
||||
|
||||
/* align on next page boundary */
|
||||
. = ALIGN(4096);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include "pgtable.h"
|
||||
#include "../string.h"
|
||||
#include "../voffset.h"
|
||||
#include <asm/bootparam_utils.h>
|
||||
|
||||
/*
|
||||
* WARNING!!
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <asm/page.h>
|
||||
#include <asm/boot.h>
|
||||
#include <asm/bootparam.h>
|
||||
#include <asm/bootparam_utils.h>
|
||||
|
||||
#define BOOT_BOOT_H
|
||||
#include "../ctype.h"
|
||||
|
|
|
@ -329,6 +329,23 @@ For 32-bit we have the following conventions - kernel is built with
|
|||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Mitigate Spectre v1 for conditional swapgs code paths.
|
||||
*
|
||||
* FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
|
||||
* prevent a speculative swapgs when coming from kernel space.
|
||||
*
|
||||
* FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
|
||||
* to prevent the swapgs from getting speculatively skipped when coming from
|
||||
* user space.
|
||||
*/
|
||||
.macro FENCE_SWAPGS_USER_ENTRY
|
||||
ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
|
||||
.endm
|
||||
.macro FENCE_SWAPGS_KERNEL_ENTRY
|
||||
ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
|
||||
.endm
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
/*
|
||||
|
|
|
@ -582,7 +582,7 @@ ENTRY(interrupt_entry)
|
|||
testb $3, CS-ORIG_RAX+8(%rsp)
|
||||
jz 1f
|
||||
SWAPGS
|
||||
|
||||
FENCE_SWAPGS_USER_ENTRY
|
||||
/*
|
||||
* Switch to the thread stack. The IRET frame and orig_ax are
|
||||
* on the stack, as well as the return address. RDI..R12 are
|
||||
|
@ -612,8 +612,10 @@ ENTRY(interrupt_entry)
|
|||
UNWIND_HINT_FUNC
|
||||
|
||||
movq (%rdi), %rdi
|
||||
jmp 2f
|
||||
1:
|
||||
|
||||
FENCE_SWAPGS_KERNEL_ENTRY
|
||||
2:
|
||||
PUSH_AND_CLEAR_REGS save_ret=1
|
||||
ENCODE_FRAME_POINTER 8
|
||||
|
||||
|
@ -1196,7 +1198,6 @@ idtentry stack_segment do_stack_segment has_error_code=1
|
|||
#ifdef CONFIG_XEN
|
||||
idtentry xennmi do_nmi has_error_code=0
|
||||
idtentry xendebug do_debug has_error_code=0
|
||||
idtentry xenint3 do_int3 has_error_code=0
|
||||
#endif
|
||||
|
||||
idtentry general_protection do_general_protection has_error_code=1
|
||||
|
@ -1241,6 +1242,13 @@ ENTRY(paranoid_entry)
|
|||
*/
|
||||
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
|
||||
|
||||
/*
|
||||
* The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
|
||||
* unconditional CR3 write, even in the PTI case. So do an lfence
|
||||
* to prevent GS speculation, regardless of whether PTI is enabled.
|
||||
*/
|
||||
FENCE_SWAPGS_KERNEL_ENTRY
|
||||
|
||||
ret
|
||||
END(paranoid_entry)
|
||||
|
||||
|
@ -1291,6 +1299,7 @@ ENTRY(error_entry)
|
|||
* from user mode due to an IRET fault.
|
||||
*/
|
||||
SWAPGS
|
||||
FENCE_SWAPGS_USER_ENTRY
|
||||
/* We have user CR3. Change to kernel CR3. */
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
||||
|
||||
|
@ -1312,6 +1321,8 @@ ENTRY(error_entry)
|
|||
CALL_enter_from_user_mode
|
||||
ret
|
||||
|
||||
.Lerror_entry_done_lfence:
|
||||
FENCE_SWAPGS_KERNEL_ENTRY
|
||||
.Lerror_entry_done:
|
||||
TRACE_IRQS_OFF
|
||||
ret
|
||||
|
@ -1330,7 +1341,7 @@ ENTRY(error_entry)
|
|||
cmpq %rax, RIP+8(%rsp)
|
||||
je .Lbstep_iret
|
||||
cmpq $.Lgs_change, RIP+8(%rsp)
|
||||
jne .Lerror_entry_done
|
||||
jne .Lerror_entry_done_lfence
|
||||
|
||||
/*
|
||||
* hack: .Lgs_change can fail with user gsbase. If this happens, fix up
|
||||
|
@ -1338,6 +1349,7 @@ ENTRY(error_entry)
|
|||
* .Lgs_change's error handler with kernel gsbase.
|
||||
*/
|
||||
SWAPGS
|
||||
FENCE_SWAPGS_USER_ENTRY
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
||||
jmp .Lerror_entry_done
|
||||
|
||||
|
@ -1352,6 +1364,7 @@ ENTRY(error_entry)
|
|||
* gsbase and CR3. Switch to kernel gsbase and CR3:
|
||||
*/
|
||||
SWAPGS
|
||||
FENCE_SWAPGS_USER_ENTRY
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
|
||||
|
||||
/*
|
||||
|
@ -1443,6 +1456,7 @@ ENTRY(nmi)
|
|||
|
||||
swapgs
|
||||
cld
|
||||
FENCE_SWAPGS_USER_ENTRY
|
||||
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
|
||||
movq %rsp, %rdx
|
||||
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
|
||||
|
|
|
@ -29,12 +29,12 @@ extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
|
|||
extern time_t __vdso_time(time_t *t);
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_CLOCK
|
||||
extern u8 pvclock_page
|
||||
extern u8 pvclock_page[PAGE_SIZE]
|
||||
__attribute__((visibility("hidden")));
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HYPERV_TSCPAGE
|
||||
extern u8 hvclock_page
|
||||
extern u8 hvclock_page[PAGE_SIZE]
|
||||
__attribute__((visibility("hidden")));
|
||||
#endif
|
||||
|
||||
|
@ -191,13 +191,24 @@ notrace static inline u64 vgetsns(int *mode)
|
|||
|
||||
if (gtod->vclock_mode == VCLOCK_TSC)
|
||||
cycles = vread_tsc();
|
||||
|
||||
/*
|
||||
* For any memory-mapped vclock type, we need to make sure that gcc
|
||||
* doesn't cleverly hoist a load before the mode check. Otherwise we
|
||||
* might end up touching the memory-mapped page even if the vclock in
|
||||
* question isn't enabled, which will segfault. Hence the barriers.
|
||||
*/
|
||||
#ifdef CONFIG_PARAVIRT_CLOCK
|
||||
else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
|
||||
else if (gtod->vclock_mode == VCLOCK_PVCLOCK) {
|
||||
barrier();
|
||||
cycles = vread_pvclock(mode);
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_HYPERV_TSCPAGE
|
||||
else if (gtod->vclock_mode == VCLOCK_HVCLOCK)
|
||||
else if (gtod->vclock_mode == VCLOCK_HVCLOCK) {
|
||||
barrier();
|
||||
cycles = vread_hvclock(mode);
|
||||
}
|
||||
#endif
|
||||
else
|
||||
return 0;
|
||||
|
|
|
@ -48,7 +48,7 @@ static inline void generic_apic_probe(void)
|
|||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
extern unsigned int apic_verbosity;
|
||||
extern int apic_verbosity;
|
||||
extern int local_apic_timer_c2_ok;
|
||||
|
||||
extern int disable_apic;
|
||||
|
|
|
@ -22,8 +22,8 @@ enum cpuid_leafs
|
|||
CPUID_LNX_3,
|
||||
CPUID_7_0_EBX,
|
||||
CPUID_D_1_EAX,
|
||||
CPUID_F_0_EDX,
|
||||
CPUID_F_1_EDX,
|
||||
CPUID_LNX_4,
|
||||
CPUID_DUMMY,
|
||||
CPUID_8000_0008_EBX,
|
||||
CPUID_6_EAX,
|
||||
CPUID_8000_000A_EDX,
|
||||
|
|
|
@ -271,13 +271,18 @@
|
|||
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
|
||||
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
|
||||
|
||||
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
|
||||
#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
|
||||
|
||||
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
|
||||
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
|
||||
/*
|
||||
* Extended auxiliary flags: Linux defined - for features scattered in various
|
||||
* CPUID levels like 0xf, etc.
|
||||
*
|
||||
* Reuse free bits when adding new feature flags!
|
||||
*/
|
||||
#define X86_FEATURE_CQM_LLC (11*32+ 0) /* LLC QoS if 1 */
|
||||
#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
|
||||
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
|
||||
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
|
||||
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
|
||||
|
@ -383,5 +388,6 @@
|
|||
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
|
||||
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
|
||||
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
|
||||
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
|
|
@ -1427,25 +1427,29 @@ enum {
|
|||
#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
|
||||
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
|
||||
|
||||
asmlinkage void __noreturn kvm_spurious_fault(void);
|
||||
|
||||
/*
|
||||
* Hardware virtualization extension instructions may fault if a
|
||||
* reboot turns off virtualization while processes are running.
|
||||
* Trap the fault and ignore the instruction if that happens.
|
||||
* Usually after catching the fault we just panic; during reboot
|
||||
* instead the instruction is ignored.
|
||||
*/
|
||||
asmlinkage void kvm_spurious_fault(void);
|
||||
|
||||
#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
|
||||
"666: " insn "\n\t" \
|
||||
"668: \n\t" \
|
||||
".pushsection .fixup, \"ax\" \n" \
|
||||
"667: \n\t" \
|
||||
cleanup_insn "\n\t" \
|
||||
"cmpb $0, kvm_rebooting \n\t" \
|
||||
"jne 668b \n\t" \
|
||||
__ASM_SIZE(push) " $666b \n\t" \
|
||||
"jmp kvm_spurious_fault \n\t" \
|
||||
".popsection \n\t" \
|
||||
_ASM_EXTABLE(666b, 667b)
|
||||
#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
|
||||
"666: \n\t" \
|
||||
insn "\n\t" \
|
||||
"jmp 668f \n\t" \
|
||||
"667: \n\t" \
|
||||
"call kvm_spurious_fault \n\t" \
|
||||
"668: \n\t" \
|
||||
".pushsection .fixup, \"ax\" \n\t" \
|
||||
"700: \n\t" \
|
||||
cleanup_insn "\n\t" \
|
||||
"cmpb $0, kvm_rebooting\n\t" \
|
||||
"je 667b \n\t" \
|
||||
"jmp 668b \n\t" \
|
||||
".popsection \n\t" \
|
||||
_ASM_EXTABLE(666b, 700b)
|
||||
|
||||
#define __kvm_handle_fault_on_reboot(insn) \
|
||||
____kvm_handle_fault_on_reboot(insn, "")
|
||||
|
|
|
@ -768,6 +768,7 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
|
|||
PV_RESTORE_ALL_CALLER_REGS \
|
||||
FRAME_END \
|
||||
"ret;" \
|
||||
".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
|
||||
".popsection")
|
||||
|
||||
/* Get a reference to a callee-save function */
|
||||
|
|
|
@ -40,7 +40,7 @@ asmlinkage void simd_coprocessor_error(void);
|
|||
asmlinkage void xen_divide_error(void);
|
||||
asmlinkage void xen_xennmi(void);
|
||||
asmlinkage void xen_xendebug(void);
|
||||
asmlinkage void xen_xenint3(void);
|
||||
asmlinkage void xen_int3(void);
|
||||
asmlinkage void xen_overflow(void);
|
||||
asmlinkage void xen_bounds(void);
|
||||
asmlinkage void xen_invalid_op(void);
|
||||
|
|
|
@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
|
|||
/*
|
||||
* Debug level, exported for io_apic.c
|
||||
*/
|
||||
unsigned int apic_verbosity;
|
||||
int apic_verbosity;
|
||||
|
||||
int pic_mode;
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <asm/e820/api.h>
|
||||
#include <asm/hypervisor.h>
|
||||
|
||||
static void __init spectre_v1_select_mitigation(void);
|
||||
static void __init spectre_v2_select_mitigation(void);
|
||||
static void __init ssb_select_mitigation(void);
|
||||
static void __init l1tf_select_mitigation(void);
|
||||
|
@ -96,17 +97,11 @@ void __init check_bugs(void)
|
|||
if (boot_cpu_has(X86_FEATURE_STIBP))
|
||||
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
|
||||
|
||||
/* Select the proper spectre mitigation before patching alternatives */
|
||||
/* Select the proper CPU mitigations before patching alternatives: */
|
||||
spectre_v1_select_mitigation();
|
||||
spectre_v2_select_mitigation();
|
||||
|
||||
/*
|
||||
* Select proper mitigation for any exposure to the Speculative Store
|
||||
* Bypass vulnerability.
|
||||
*/
|
||||
ssb_select_mitigation();
|
||||
|
||||
l1tf_select_mitigation();
|
||||
|
||||
mds_select_mitigation();
|
||||
|
||||
arch_smt_update();
|
||||
|
@ -271,6 +266,98 @@ static int __init mds_cmdline(char *str)
|
|||
}
|
||||
early_param("mds", mds_cmdline);
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Spectre V1 : " fmt
|
||||
|
||||
enum spectre_v1_mitigation {
|
||||
SPECTRE_V1_MITIGATION_NONE,
|
||||
SPECTRE_V1_MITIGATION_AUTO,
|
||||
};
|
||||
|
||||
static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
|
||||
SPECTRE_V1_MITIGATION_AUTO;
|
||||
|
||||
static const char * const spectre_v1_strings[] = {
|
||||
[SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
|
||||
[SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
|
||||
};
|
||||
|
||||
/*
|
||||
* Does SMAP provide full mitigation against speculative kernel access to
|
||||
* userspace?
|
||||
*/
|
||||
static bool smap_works_speculatively(void)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_SMAP))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* On CPUs which are vulnerable to Meltdown, SMAP does not
|
||||
* prevent speculative access to user data in the L1 cache.
|
||||
* Consider SMAP to be non-functional as a mitigation on these
|
||||
* CPUs.
|
||||
*/
|
||||
if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __init spectre_v1_select_mitigation(void)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
|
||||
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
|
||||
return;
|
||||
}
|
||||
|
||||
if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
|
||||
/*
|
||||
* With Spectre v1, a user can speculatively control either
|
||||
* path of a conditional swapgs with a user-controlled GS
|
||||
* value. The mitigation is to add lfences to both code paths.
|
||||
*
|
||||
* If FSGSBASE is enabled, the user can put a kernel address in
|
||||
* GS, in which case SMAP provides no protection.
|
||||
*
|
||||
* [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
|
||||
* FSGSBASE enablement patches have been merged. ]
|
||||
*
|
||||
* If FSGSBASE is disabled, the user can only put a user space
|
||||
* address in GS. That makes an attack harder, but still
|
||||
* possible if there's no SMAP protection.
|
||||
*/
|
||||
if (!smap_works_speculatively()) {
|
||||
/*
|
||||
* Mitigation can be provided from SWAPGS itself or
|
||||
* PTI as the CR3 write in the Meltdown mitigation
|
||||
* is serializing.
|
||||
*
|
||||
* If neither is there, mitigate with an LFENCE to
|
||||
* stop speculation through swapgs.
|
||||
*/
|
||||
if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
|
||||
!boot_cpu_has(X86_FEATURE_PTI))
|
||||
setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
|
||||
|
||||
/*
|
||||
* Enable lfences in the kernel entry (non-swapgs)
|
||||
* paths, to prevent user entry from speculatively
|
||||
* skipping swapgs.
|
||||
*/
|
||||
setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
|
||||
}
|
||||
}
|
||||
|
||||
pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
|
||||
}
|
||||
|
||||
static int __init nospectre_v1_cmdline(char *str)
|
||||
{
|
||||
spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
|
||||
return 0;
|
||||
}
|
||||
early_param("nospectre_v1", nospectre_v1_cmdline);
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
||||
|
||||
|
@ -1258,7 +1345,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
|||
break;
|
||||
|
||||
case X86_BUG_SPECTRE_V1:
|
||||
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
||||
return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
|
||||
|
||||
case X86_BUG_SPECTRE_V2:
|
||||
return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
||||
|
|
|
@ -808,6 +808,30 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
|
|||
}
|
||||
}
|
||||
|
||||
static void init_cqm(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
|
||||
c->x86_cache_max_rmid = -1;
|
||||
c->x86_cache_occ_scale = -1;
|
||||
return;
|
||||
}
|
||||
|
||||
/* will be overridden if occupancy monitoring exists */
|
||||
c->x86_cache_max_rmid = cpuid_ebx(0xf);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
|
||||
cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
|
||||
cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
||||
/* QoS sub-leaf, EAX=0Fh, ECX=1 */
|
||||
cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
c->x86_cache_max_rmid = ecx;
|
||||
c->x86_cache_occ_scale = ebx;
|
||||
}
|
||||
}
|
||||
|
||||
void get_cpu_cap(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
@ -839,33 +863,6 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
|
|||
c->x86_capability[CPUID_D_1_EAX] = eax;
|
||||
}
|
||||
|
||||
/* Additional Intel-defined flags: level 0x0000000F */
|
||||
if (c->cpuid_level >= 0x0000000F) {
|
||||
|
||||
/* QoS sub-leaf, EAX=0Fh, ECX=0 */
|
||||
cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
|
||||
c->x86_capability[CPUID_F_0_EDX] = edx;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
|
||||
/* will be overridden if occupancy monitoring exists */
|
||||
c->x86_cache_max_rmid = ebx;
|
||||
|
||||
/* QoS sub-leaf, EAX=0Fh, ECX=1 */
|
||||
cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
|
||||
c->x86_capability[CPUID_F_1_EDX] = edx;
|
||||
|
||||
if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
|
||||
((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
|
||||
(cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
|
||||
c->x86_cache_max_rmid = ecx;
|
||||
c->x86_cache_occ_scale = ebx;
|
||||
}
|
||||
} else {
|
||||
c->x86_cache_max_rmid = -1;
|
||||
c->x86_cache_occ_scale = -1;
|
||||
}
|
||||
}
|
||||
|
||||
/* AMD-defined flags: level 0x80000001 */
|
||||
eax = cpuid_eax(0x80000000);
|
||||
c->extended_cpuid_level = eax;
|
||||
|
@ -896,6 +893,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
|
|||
|
||||
init_scattered_cpuid_features(c);
|
||||
init_speculation_control(c);
|
||||
init_cqm(c);
|
||||
|
||||
/*
|
||||
* Clear/Set all flags overridden by options, after probe.
|
||||
|
@ -954,6 +952,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
|||
#define NO_L1TF BIT(3)
|
||||
#define NO_MDS BIT(4)
|
||||
#define MSBDS_ONLY BIT(5)
|
||||
#define NO_SWAPGS BIT(6)
|
||||
|
||||
#define VULNWL(_vendor, _family, _model, _whitelist) \
|
||||
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
|
||||
|
@ -977,29 +976,37 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
|||
VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
|
||||
VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
|
||||
|
||||
VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
|
||||
VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY),
|
||||
VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY),
|
||||
VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
|
||||
VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY),
|
||||
VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY),
|
||||
VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||
VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||
VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||
VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||
VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||
VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||
|
||||
VULNWL_INTEL(CORE_YONAH, NO_SSB),
|
||||
|
||||
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY),
|
||||
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
|
||||
|
||||
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS),
|
||||
|
||||
/*
|
||||
* Technically, swapgs isn't serializing on AMD (despite it previously
|
||||
* being documented as such in the APM). But according to AMD, %gs is
|
||||
* updated non-speculatively, and the issuing of %gs-relative memory
|
||||
* operands will be blocked until the %gs update completes, which is
|
||||
* good enough for our purposes.
|
||||
*/
|
||||
|
||||
/* AMD Family 0xf - 0x12 */
|
||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
|
||||
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
|
||||
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
|
||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
|
||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
|
||||
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
|
||||
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
|
||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
|
||||
|
||||
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
|
||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -1036,6 +1043,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|||
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
|
||||
}
|
||||
|
||||
if (!cpu_matches(NO_SWAPGS))
|
||||
setup_force_cpu_bug(X86_BUG_SWAPGS);
|
||||
|
||||
if (cpu_matches(NO_MELTDOWN))
|
||||
return;
|
||||
|
||||
|
|
|
@ -59,6 +59,9 @@ static const struct cpuid_dep cpuid_deps[] = {
|
|||
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
|
||||
{ X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F },
|
||||
{ X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F },
|
||||
{ X86_FEATURE_CQM_OCCUP_LLC, X86_FEATURE_CQM_LLC },
|
||||
{ X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC },
|
||||
{ X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC },
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -21,6 +21,10 @@ struct cpuid_bit {
|
|||
static const struct cpuid_bit cpuid_bits[] = {
|
||||
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
|
||||
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
|
||||
{ X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
|
||||
{ X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
|
||||
{ X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
|
||||
{ X86_FEATURE_CQM_MBM_LOCAL, CPUID_EDX, 2, 0x0000000f, 1 },
|
||||
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
|
||||
{ X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
|
||||
{ X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
|
||||
|
|
|
@ -830,6 +830,7 @@ asm(
|
|||
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
|
||||
"setne %al;"
|
||||
"ret;"
|
||||
".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
|
||||
".popsection");
|
||||
|
||||
#endif
|
||||
|
|
|
@ -47,8 +47,6 @@ static const struct cpuid_reg reverse_cpuid[] = {
|
|||
[CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
|
||||
[CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
|
||||
[CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
|
||||
[CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX},
|
||||
[CPUID_F_1_EDX] = { 0xf, 1, CPUID_EDX},
|
||||
[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
|
||||
[CPUID_6_EAX] = { 6, 0, CPUID_EAX},
|
||||
[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
|
||||
|
|
|
@ -4532,11 +4532,11 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
|
|||
*/
|
||||
|
||||
/* Faults from writes to non-writable pages */
|
||||
u8 wf = (pfec & PFERR_WRITE_MASK) ? ~w : 0;
|
||||
u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
|
||||
/* Faults from user mode accesses to supervisor pages */
|
||||
u8 uf = (pfec & PFERR_USER_MASK) ? ~u : 0;
|
||||
u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
|
||||
/* Faults from fetches of non-executable pages*/
|
||||
u8 ff = (pfec & PFERR_FETCH_MASK) ? ~x : 0;
|
||||
u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
|
||||
/* Faults from kernel mode fetches of user pages */
|
||||
u8 smepf = 0;
|
||||
/* Faults from kernel mode accesses of user pages */
|
||||
|
|
|
@ -177,7 +177,7 @@ static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
|
|||
#define setexponentpos(x,y) { (*(short *)&((x)->exp)) = \
|
||||
((y) + EXTENDED_Ebias) & 0x7fff; }
|
||||
#define exponent16(x) (*(short *)&((x)->exp))
|
||||
#define setexponent16(x,y) { (*(short *)&((x)->exp)) = (y); }
|
||||
#define setexponent16(x,y) { (*(short *)&((x)->exp)) = (u16)(y); }
|
||||
#define addexponent(x,y) { (*(short *)&((x)->exp)) += (y); }
|
||||
#define stdexp(x) { (*(short *)&((x)->exp)) += EXTENDED_Ebias; }
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include "control_w.h"
|
||||
|
||||
#define MAKE_REG(s, e, l, h) { l, h, \
|
||||
((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
|
||||
(u16)((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
|
||||
|
||||
FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
|
||||
#if 0
|
||||
|
|
|
@ -597,12 +597,12 @@ struct trap_array_entry {
|
|||
|
||||
static struct trap_array_entry trap_array[] = {
|
||||
{ debug, xen_xendebug, true },
|
||||
{ int3, xen_xenint3, true },
|
||||
{ double_fault, xen_double_fault, true },
|
||||
#ifdef CONFIG_X86_MCE
|
||||
{ machine_check, xen_machine_check, true },
|
||||
#endif
|
||||
{ nmi, xen_xennmi, true },
|
||||
{ int3, xen_int3, false },
|
||||
{ overflow, xen_overflow, false },
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
{ entry_INT80_compat, xen_entry_INT80_compat, false },
|
||||
|
|
|
@ -30,7 +30,6 @@ xen_pv_trap divide_error
|
|||
xen_pv_trap debug
|
||||
xen_pv_trap xendebug
|
||||
xen_pv_trap int3
|
||||
xen_pv_trap xenint3
|
||||
xen_pv_trap xennmi
|
||||
xen_pv_trap overflow
|
||||
xen_pv_trap bounds
|
||||
|
|
|
@ -30,7 +30,9 @@
|
|||
|
||||
#include "internal.h"
|
||||
|
||||
#ifdef CONFIG_DMI
|
||||
static const struct dmi_system_id acpi_rev_dmi_table[] __initconst;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* POLICY: If *anything* doesn't work, put it on the blacklist.
|
||||
|
@ -74,7 +76,9 @@ int __init acpi_blacklisted(void)
|
|||
}
|
||||
|
||||
(void)early_acpi_osi_init();
|
||||
#ifdef CONFIG_DMI
|
||||
dmi_check_system(acpi_rev_dmi_table);
|
||||
#endif
|
||||
|
||||
return blacklisted;
|
||||
}
|
||||
|
|
|
@ -1218,7 +1218,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
|
|||
struct block_device *bdev)
|
||||
{
|
||||
sock_shutdown(nbd);
|
||||
kill_bdev(bdev);
|
||||
__invalidate_device(bdev, true);
|
||||
nbd_bdev_reset(bdev);
|
||||
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
|
||||
&nbd->config->runtime_flags))
|
||||
|
|
|
@ -2023,6 +2023,7 @@ static int sc9860_clk_probe(struct platform_device *pdev)
|
|||
{
|
||||
const struct of_device_id *match;
|
||||
const struct sprd_clk_desc *desc;
|
||||
int ret;
|
||||
|
||||
match = of_match_node(sprd_sc9860_clk_ids, pdev->dev.of_node);
|
||||
if (!match) {
|
||||
|
@ -2031,7 +2032,9 @@ static int sc9860_clk_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
desc = match->data;
|
||||
sprd_clk_regmap_init(pdev, desc);
|
||||
ret = sprd_clk_regmap_init(pdev, desc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return sprd_clk_probe(&pdev->dev, desc->hw_clks);
|
||||
}
|
||||
|
|
|
@ -2214,9 +2214,9 @@ static struct div_nmp pllu_nmp = {
|
|||
};
|
||||
|
||||
static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
|
||||
{ 12000000, 480000000, 40, 1, 0, 0 },
|
||||
{ 13000000, 480000000, 36, 1, 0, 0 }, /* actual: 468.0 MHz */
|
||||
{ 38400000, 480000000, 25, 2, 0, 0 },
|
||||
{ 12000000, 480000000, 40, 1, 1, 0 },
|
||||
{ 13000000, 480000000, 36, 1, 1, 0 }, /* actual: 468.0 MHz */
|
||||
{ 38400000, 480000000, 25, 2, 1, 0 },
|
||||
{ 0, 0, 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
|
@ -3343,6 +3343,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
|
|||
{ TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
|
||||
{ TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
|
||||
{ TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
|
||||
{ TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
|
||||
{ TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
|
||||
{ TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
|
||||
{ TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
|
||||
|
@ -3367,7 +3368,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
|
|||
{ TEGRA210_CLK_PLL_DP, TEGRA210_CLK_CLK_MAX, 270000000, 0 },
|
||||
{ TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 },
|
||||
{ TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 },
|
||||
{ TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
|
||||
{ TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
|
||||
/* This MUST be the last entry. */
|
||||
{ TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
|
||||
|
|
|
@ -1164,7 +1164,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
|
||||
|
||||
/* Someone calling slave DMA on a generic channel? */
|
||||
if (rchan->mid_rid < 0 || !sg_len) {
|
||||
if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
|
||||
dev_warn(chan->device->dev,
|
||||
"%s: bad parameter: len=%d, id=%d\n",
|
||||
__func__, sg_len, rchan->mid_rid);
|
||||
|
|
|
@ -981,8 +981,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
|
|||
csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
|
||||
}
|
||||
|
||||
if (flags & DMA_PREP_INTERRUPT)
|
||||
if (flags & DMA_PREP_INTERRUPT) {
|
||||
csr |= TEGRA_APBDMA_CSR_IE_EOC;
|
||||
} else {
|
||||
WARN_ON_ONCE(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
|
||||
|
||||
|
@ -1124,8 +1128,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
|
|||
csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
|
||||
}
|
||||
|
||||
if (flags & DMA_PREP_INTERRUPT)
|
||||
if (flags & DMA_PREP_INTERRUPT) {
|
||||
csr |= TEGRA_APBDMA_CSR_IE_EOC;
|
||||
} else {
|
||||
WARN_ON_ONCE(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
|
||||
|
||||
|
|
|
@ -366,16 +366,16 @@ static int suspend_test_thread(void *arg)
|
|||
for (;;) {
|
||||
/* Needs to be set first to avoid missing a wakeup. */
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (kthread_should_stop()) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
if (kthread_should_park())
|
||||
break;
|
||||
}
|
||||
schedule();
|
||||
}
|
||||
|
||||
pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
|
||||
cpu, nb_suspend, nb_shallow_sleep, nb_err);
|
||||
|
||||
kthread_parkme();
|
||||
|
||||
return nb_err;
|
||||
}
|
||||
|
||||
|
@ -440,8 +440,10 @@ static int suspend_tests(void)
|
|||
|
||||
|
||||
/* Stop and destroy all threads, get return status. */
|
||||
for (i = 0; i < nb_threads; ++i)
|
||||
for (i = 0; i < nb_threads; ++i) {
|
||||
err += kthread_park(threads[i]);
|
||||
err += kthread_stop(threads[i]);
|
||||
}
|
||||
out:
|
||||
cpuidle_resume_and_unlock();
|
||||
kfree(threads);
|
||||
|
|
|
@ -946,9 +946,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
|
|||
}
|
||||
|
||||
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
|
||||
irqflags |= IRQF_TRIGGER_RISING;
|
||||
irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
|
||||
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
|
||||
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
|
||||
irqflags |= IRQF_TRIGGER_FALLING;
|
||||
irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
|
||||
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
|
||||
irqflags |= IRQF_ONESHOT;
|
||||
irqflags |= IRQF_SHARED;
|
||||
|
||||
|
|
|
@ -1741,6 +1741,18 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
|
|||
|
||||
entry = __gvt_cache_find_gfn(info->vgpu, gfn);
|
||||
if (!entry) {
|
||||
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
|
||||
ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
|
||||
if (ret)
|
||||
goto err_unmap;
|
||||
} else if (entry->size != size) {
|
||||
/* the same gfn with different size: unmap and re-map */
|
||||
gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
|
||||
__gvt_cache_remove_entry(vgpu, entry);
|
||||
|
||||
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
|
|
|
@ -251,7 +251,7 @@ nouveau_conn_reset(struct drm_connector *connector)
|
|||
return;
|
||||
|
||||
if (connector->state)
|
||||
__drm_atomic_helper_connector_destroy_state(connector->state);
|
||||
nouveau_conn_atomic_destroy_state(connector, connector->state);
|
||||
__drm_atomic_helper_connector_reset(connector, &asyc->state);
|
||||
asyc->dither.mode = DITHERING_MODE_AUTO;
|
||||
asyc->dither.depth = DITHERING_DEPTH_AUTO;
|
||||
|
|
|
@ -14586,7 +14586,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
|
|||
clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
|
||||
}
|
||||
|
||||
static void init_rxe(struct hfi1_devdata *dd)
|
||||
static int init_rxe(struct hfi1_devdata *dd)
|
||||
{
|
||||
struct rsm_map_table *rmt;
|
||||
u64 val;
|
||||
|
@ -14595,6 +14595,9 @@ static void init_rxe(struct hfi1_devdata *dd)
|
|||
write_csr(dd, RCV_ERR_MASK, ~0ull);
|
||||
|
||||
rmt = alloc_rsm_map_table(dd);
|
||||
if (!rmt)
|
||||
return -ENOMEM;
|
||||
|
||||
/* set up QOS, including the QPN map table */
|
||||
init_qos(dd, rmt);
|
||||
init_user_fecn_handling(dd, rmt);
|
||||
|
@ -14621,6 +14624,7 @@ static void init_rxe(struct hfi1_devdata *dd)
|
|||
val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
|
||||
RCV_BYPASS_HDR_SIZE_SHIFT);
|
||||
write_csr(dd, RCV_BYPASS, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void init_other(struct hfi1_devdata *dd)
|
||||
|
@ -15163,7 +15167,10 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
|
|||
goto bail_cleanup;
|
||||
|
||||
/* set initial RXE CSRs */
|
||||
init_rxe(dd);
|
||||
ret = init_rxe(dd);
|
||||
if (ret)
|
||||
goto bail_cleanup;
|
||||
|
||||
/* set initial TXE CSRs */
|
||||
init_txe(dd);
|
||||
/* set initial non-RXE, non-TXE CSRs */
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <rdma/opa_addr.h>
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#include "hfi.h"
|
||||
#include "common.h"
|
||||
|
@ -1596,6 +1597,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
|
|||
sl = rdma_ah_get_sl(ah_attr);
|
||||
if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
|
||||
return -EINVAL;
|
||||
sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc));
|
||||
|
||||
sc5 = ibp->sl_to_sc[sl];
|
||||
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
|
||||
|
|
|
@ -467,6 +467,7 @@ struct mlx5_umr_wr {
|
|||
u64 length;
|
||||
int access_flags;
|
||||
u32 mkey;
|
||||
u8 ignore_free_state:1;
|
||||
};
|
||||
|
||||
static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
|
||||
|
|
|
@ -548,14 +548,17 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|||
return;
|
||||
|
||||
c = order2idx(dev, mr->order);
|
||||
if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
|
||||
mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
|
||||
WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
|
||||
|
||||
if (unreg_umr(dev, mr)) {
|
||||
mr->allocated_from_cache = false;
|
||||
destroy_mkey(dev, mr);
|
||||
ent = &cache->ent[c];
|
||||
if (ent->cur < ent->limit)
|
||||
queue_work(cache->wq, &ent->work);
|
||||
return;
|
||||
}
|
||||
|
||||
if (unreg_umr(dev, mr))
|
||||
return;
|
||||
|
||||
ent = &cache->ent[c];
|
||||
spin_lock_irq(&ent->lock);
|
||||
list_add_tail(&mr->list, &ent->head);
|
||||
|
@ -1408,9 +1411,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|||
return 0;
|
||||
|
||||
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
|
||||
MLX5_IB_SEND_UMR_FAIL_IF_FREE;
|
||||
MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
|
||||
umrwr.wr.opcode = MLX5_IB_WR_UMR;
|
||||
umrwr.pd = dev->umrc.pd;
|
||||
umrwr.mkey = mr->mmkey.key;
|
||||
umrwr.ignore_free_state = 1;
|
||||
|
||||
return mlx5_ib_post_send_wait(dev, &umrwr);
|
||||
}
|
||||
|
@ -1615,10 +1620,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
|||
mr->sig = NULL;
|
||||
}
|
||||
|
||||
mlx5_free_priv_descs(mr);
|
||||
|
||||
if (!allocated_from_cache)
|
||||
if (!allocated_from_cache) {
|
||||
destroy_mkey(dev, mr);
|
||||
mlx5_free_priv_descs(mr);
|
||||
}
|
||||
}
|
||||
|
||||
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
|
||||
|
|
|
@ -1501,7 +1501,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|||
}
|
||||
|
||||
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
|
||||
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
|
||||
memcpy(rss_key, ucmd.rx_hash_key, len);
|
||||
break;
|
||||
}
|
||||
|
@ -3717,10 +3716,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
|
|||
|
||||
memset(umr, 0, sizeof(*umr));
|
||||
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
|
||||
umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
|
||||
else
|
||||
umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
|
||||
if (!umrwr->ignore_free_state) {
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
|
||||
/* fail if free */
|
||||
umr->flags = MLX5_UMR_CHECK_FREE;
|
||||
else
|
||||
/* fail if not free */
|
||||
umr->flags = MLX5_UMR_CHECK_NOT_FREE;
|
||||
}
|
||||
|
||||
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
|
||||
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
|
||||
|
|
|
@ -724,7 +724,7 @@ static int at24_probe(struct i2c_client *client)
|
|||
nvmem_config.name = dev_name(dev);
|
||||
nvmem_config.dev = dev;
|
||||
nvmem_config.read_only = !writable;
|
||||
nvmem_config.root_only = true;
|
||||
nvmem_config.root_only = !(pdata.flags & AT24_FLAG_IRUGO);
|
||||
nvmem_config.owner = THIS_MODULE;
|
||||
nvmem_config.compat = true;
|
||||
nvmem_config.base_dev = dev;
|
||||
|
|
|
@ -2038,8 +2038,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
|
|||
* delayed. Allowing the transfer to take place
|
||||
* avoids races and keeps things simple.
|
||||
*/
|
||||
if ((err != -ETIMEDOUT) &&
|
||||
(cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
|
||||
if (err != -ETIMEDOUT) {
|
||||
state = STATE_SENDING_DATA;
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@
|
|||
#define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6)
|
||||
#define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8)
|
||||
#define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9)
|
||||
#define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13)
|
||||
#define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10)
|
||||
#define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15)
|
||||
#define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30)
|
||||
#define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31)
|
||||
|
|
|
@ -400,6 +400,14 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
|
|||
(chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
|
||||
return MICRON_ON_DIE_UNSUPPORTED;
|
||||
|
||||
/*
|
||||
* It seems that there are devices which do not support ECC officially.
|
||||
* At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports
|
||||
* enabling the ECC feature but don't reflect that to the READ_ID table.
|
||||
* So we have to guarantee that we disable the ECC feature directly
|
||||
* after we did the READ_ID table command. Later we can evaluate the
|
||||
* ECC_ENABLE support.
|
||||
*/
|
||||
ret = micron_nand_on_die_ecc_setup(chip, true);
|
||||
if (ret)
|
||||
return MICRON_ON_DIE_UNSUPPORTED;
|
||||
|
@ -408,13 +416,13 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
|
|||
if (ret)
|
||||
return MICRON_ON_DIE_UNSUPPORTED;
|
||||
|
||||
if (!(id[4] & MICRON_ID_ECC_ENABLED))
|
||||
return MICRON_ON_DIE_UNSUPPORTED;
|
||||
|
||||
ret = micron_nand_on_die_ecc_setup(chip, false);
|
||||
if (ret)
|
||||
return MICRON_ON_DIE_UNSUPPORTED;
|
||||
|
||||
if (!(id[4] & MICRON_ID_ECC_ENABLED))
|
||||
return MICRON_ON_DIE_UNSUPPORTED;
|
||||
|
||||
ret = nand_readid_op(chip, 0, id, sizeof(id));
|
||||
if (ret)
|
||||
return MICRON_ON_DIE_UNSUPPORTED;
|
||||
|
|
|
@ -4700,8 +4700,12 @@ int be_update_queues(struct be_adapter *adapter)
|
|||
struct net_device *netdev = adapter->netdev;
|
||||
int status;
|
||||
|
||||
if (netif_running(netdev))
|
||||
if (netif_running(netdev)) {
|
||||
/* device cannot transmit now, avoid dev_watchdog timeouts */
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
be_close(netdev);
|
||||
}
|
||||
|
||||
be_cancel_worker(adapter);
|
||||
|
||||
|
|
|
@ -408,14 +408,6 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
|
|||
have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
|
||||
&prio_map);
|
||||
|
||||
if (!have_dscp) {
|
||||
err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
|
||||
MLXSW_REG_QPTS_TRUST_STATE_PCP);
|
||||
if (err)
|
||||
netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio,
|
||||
&dscp_map);
|
||||
err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port,
|
||||
|
@ -432,6 +424,14 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
|
|||
return err;
|
||||
}
|
||||
|
||||
if (!have_dscp) {
|
||||
err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
|
||||
MLXSW_REG_QPTS_TRUST_STATE_PCP);
|
||||
if (err)
|
||||
netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
|
||||
MLXSW_REG_QPTS_TRUST_STATE_DSCP);
|
||||
if (err) {
|
||||
|
|
|
@ -730,8 +730,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
|
|||
cpu_pm_pmu_setup(armpmu, cmd);
|
||||
break;
|
||||
case CPU_PM_EXIT:
|
||||
cpu_pm_pmu_setup(armpmu, cmd);
|
||||
case CPU_PM_ENTER_FAILED:
|
||||
cpu_pm_pmu_setup(armpmu, cmd);
|
||||
armpmu->start(armpmu);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -1688,6 +1688,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
|
|||
|
||||
if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
|
||||
return -EFAULT;
|
||||
dev_info.name[sizeof(dev_info.name) - 1] = '\0';
|
||||
|
||||
rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
|
||||
dev_info.comptag, dev_info.destid, dev_info.hopcount);
|
||||
|
@ -1819,6 +1820,7 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
|
|||
|
||||
if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
|
||||
return -EFAULT;
|
||||
dev_info.name[sizeof(dev_info.name) - 1] = '\0';
|
||||
|
||||
mport = priv->md->mport;
|
||||
|
||||
|
|
|
@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
|
|||
char msg_format;
|
||||
char msg_no;
|
||||
|
||||
/*
|
||||
* intrc values ENODEV, ENOLINK and EPERM
|
||||
* will be optained from sleep_on to indicate that no
|
||||
* IO operation can be started
|
||||
*/
|
||||
if (cqr->intrc == -ENODEV)
|
||||
return 1;
|
||||
|
||||
if (cqr->intrc == -ENOLINK)
|
||||
return 1;
|
||||
|
||||
if (cqr->intrc == -EPERM)
|
||||
return 1;
|
||||
|
||||
sense = dasd_get_sense(&cqr->irb);
|
||||
if (!sense)
|
||||
return 0;
|
||||
|
@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device,
|
|||
lcu->flags &= ~NEED_UAC_UPDATE;
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
|
||||
do {
|
||||
rc = dasd_sleep_on(cqr);
|
||||
if (rc && suborder_not_supported(cqr))
|
||||
return -EOPNOTSUPP;
|
||||
} while (rc && (cqr->retries > 0));
|
||||
if (rc) {
|
||||
rc = dasd_sleep_on(cqr);
|
||||
if (rc && !suborder_not_supported(cqr)) {
|
||||
spin_lock_irqsave(&lcu->lock, flags);
|
||||
lcu->flags |= NEED_UAC_UPDATE;
|
||||
spin_unlock_irqrestore(&lcu->lock, flags);
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
||||
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/bug.h>
|
||||
#include "zfcp_ext.h"
|
||||
#include "zfcp_reqlist.h"
|
||||
|
||||
|
@ -238,6 +239,12 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
|
|||
struct zfcp_erp_action *erp_action;
|
||||
struct zfcp_scsi_dev *zfcp_sdev;
|
||||
|
||||
if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN &&
|
||||
need != ZFCP_ERP_ACTION_REOPEN_PORT &&
|
||||
need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED &&
|
||||
need != ZFCP_ERP_ACTION_REOPEN_ADAPTER))
|
||||
return NULL;
|
||||
|
||||
switch (need) {
|
||||
case ZFCP_ERP_ACTION_REOPEN_LUN:
|
||||
zfcp_sdev = sdev_to_zfcp(sdev);
|
||||
|
|
|
@ -2565,12 +2565,14 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
|
|||
{
|
||||
struct sysinfo s;
|
||||
u64 consistent_dma_mask;
|
||||
/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
|
||||
int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
|
||||
|
||||
if (ioc->is_mcpu_endpoint)
|
||||
goto try_32bit;
|
||||
|
||||
if (ioc->dma_mask)
|
||||
consistent_dma_mask = DMA_BIT_MASK(64);
|
||||
consistent_dma_mask = DMA_BIT_MASK(dma_mask);
|
||||
else
|
||||
consistent_dma_mask = DMA_BIT_MASK(32);
|
||||
|
||||
|
@ -2578,11 +2580,11 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
|
|||
const uint64_t required_mask =
|
||||
dma_get_required_mask(&pdev->dev);
|
||||
if ((required_mask > DMA_BIT_MASK(32)) &&
|
||||
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
|
||||
!pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask)) &&
|
||||
!pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
|
||||
ioc->base_add_sg_single = &_base_add_sg_single_64;
|
||||
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
|
||||
ioc->dma_mask = 64;
|
||||
ioc->dma_mask = dma_mask;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -2609,7 +2611,7 @@ static int
|
|||
_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
|
||||
struct pci_dev *pdev)
|
||||
{
|
||||
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
|
||||
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -4545,7 +4547,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
|
|||
total_sz += sz;
|
||||
} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
|
||||
|
||||
if (ioc->dma_mask == 64) {
|
||||
if (ioc->dma_mask > 32) {
|
||||
if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
|
||||
pr_warn(MPT3SAS_FMT
|
||||
"no suitable consistent DMA mask for %s\n",
|
||||
|
|
|
@ -357,8 +357,8 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
|||
/* Convert the size to actually allocated. */
|
||||
size = 1UL << (order + XEN_PAGE_SHIFT);
|
||||
|
||||
if (((dev_addr + size - 1 <= dma_mask)) ||
|
||||
range_straddles_page_boundary(phys, size))
|
||||
if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
|
||||
range_straddles_page_boundary(phys, size)))
|
||||
xen_destroy_contiguous_region(phys, order);
|
||||
|
||||
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
|
||||
|
|
|
@ -369,6 +369,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
struct buffer_head *bh;
|
||||
struct object_info root_obj;
|
||||
unsigned char *b_data;
|
||||
unsigned int blocksize;
|
||||
struct adfs_sb_info *asb;
|
||||
struct inode *root;
|
||||
int ret = -EINVAL;
|
||||
|
@ -420,8 +421,10 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
goto error_free_bh;
|
||||
}
|
||||
|
||||
blocksize = 1 << dr->log2secsize;
|
||||
brelse(bh);
|
||||
if (sb_set_blocksize(sb, 1 << dr->log2secsize)) {
|
||||
|
||||
if (sb_set_blocksize(sb, blocksize)) {
|
||||
bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
|
||||
if (!bh) {
|
||||
adfs_error(sb, "couldn't read superblock on "
|
||||
|
|
|
@ -2249,6 +2249,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
|
|||
int ret = 0;
|
||||
int i;
|
||||
u64 *i_qgroups;
|
||||
bool committing = false;
|
||||
struct btrfs_fs_info *fs_info = trans->fs_info;
|
||||
struct btrfs_root *quota_root;
|
||||
struct btrfs_qgroup *srcgroup;
|
||||
|
@ -2256,7 +2257,25 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
|
|||
u32 level_size = 0;
|
||||
u64 nums;
|
||||
|
||||
mutex_lock(&fs_info->qgroup_ioctl_lock);
|
||||
/*
|
||||
* There are only two callers of this function.
|
||||
*
|
||||
* One in create_subvol() in the ioctl context, which needs to hold
|
||||
* the qgroup_ioctl_lock.
|
||||
*
|
||||
* The other one in create_pending_snapshot() where no other qgroup
|
||||
* code can modify the fs as they all need to either start a new trans
|
||||
* or hold a trans handler, thus we don't need to hold
|
||||
* qgroup_ioctl_lock.
|
||||
* This would avoid long and complex lock chain and make lockdep happy.
|
||||
*/
|
||||
spin_lock(&fs_info->trans_lock);
|
||||
if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
|
||||
committing = true;
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
|
||||
if (!committing)
|
||||
mutex_lock(&fs_info->qgroup_ioctl_lock);
|
||||
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
|
||||
goto out;
|
||||
|
||||
|
@ -2420,7 +2439,8 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
|
|||
unlock:
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
out:
|
||||
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
||||
if (!committing)
|
||||
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -6272,68 +6272,21 @@ static int changed_extent(struct send_ctx *sctx,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
if (sctx->cur_ino != sctx->cmp_key->objectid) {
|
||||
|
||||
if (result == BTRFS_COMPARE_TREE_CHANGED) {
|
||||
struct extent_buffer *leaf_l;
|
||||
struct extent_buffer *leaf_r;
|
||||
struct btrfs_file_extent_item *ei_l;
|
||||
struct btrfs_file_extent_item *ei_r;
|
||||
|
||||
leaf_l = sctx->left_path->nodes[0];
|
||||
leaf_r = sctx->right_path->nodes[0];
|
||||
ei_l = btrfs_item_ptr(leaf_l,
|
||||
sctx->left_path->slots[0],
|
||||
struct btrfs_file_extent_item);
|
||||
ei_r = btrfs_item_ptr(leaf_r,
|
||||
sctx->right_path->slots[0],
|
||||
struct btrfs_file_extent_item);
|
||||
|
||||
/*
|
||||
* We may have found an extent item that has changed
|
||||
* only its disk_bytenr field and the corresponding
|
||||
* inode item was not updated. This case happens due to
|
||||
* very specific timings during relocation when a leaf
|
||||
* that contains file extent items is COWed while
|
||||
* relocation is ongoing and its in the stage where it
|
||||
* updates data pointers. So when this happens we can
|
||||
* safely ignore it since we know it's the same extent,
|
||||
* but just at different logical and physical locations
|
||||
* (when an extent is fully replaced with a new one, we
|
||||
* know the generation number must have changed too,
|
||||
* since snapshot creation implies committing the current
|
||||
* transaction, and the inode item must have been updated
|
||||
* as well).
|
||||
* This replacement of the disk_bytenr happens at
|
||||
* relocation.c:replace_file_extents() through
|
||||
* relocation.c:btrfs_reloc_cow_block().
|
||||
*/
|
||||
if (btrfs_file_extent_generation(leaf_l, ei_l) ==
|
||||
btrfs_file_extent_generation(leaf_r, ei_r) &&
|
||||
btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
|
||||
btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
|
||||
btrfs_file_extent_compression(leaf_l, ei_l) ==
|
||||
btrfs_file_extent_compression(leaf_r, ei_r) &&
|
||||
btrfs_file_extent_encryption(leaf_l, ei_l) ==
|
||||
btrfs_file_extent_encryption(leaf_r, ei_r) &&
|
||||
btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
|
||||
btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
|
||||
btrfs_file_extent_type(leaf_l, ei_l) ==
|
||||
btrfs_file_extent_type(leaf_r, ei_r) &&
|
||||
btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
|
||||
btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
|
||||
btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
|
||||
btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
|
||||
btrfs_file_extent_offset(leaf_l, ei_l) ==
|
||||
btrfs_file_extent_offset(leaf_r, ei_r) &&
|
||||
btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
|
||||
btrfs_file_extent_num_bytes(leaf_r, ei_r))
|
||||
return 0;
|
||||
}
|
||||
|
||||
inconsistent_snapshot_error(sctx, result, "extent");
|
||||
return -EIO;
|
||||
}
|
||||
/*
|
||||
* We have found an extent item that changed without the inode item
|
||||
* having changed. This can happen either after relocation (where the
|
||||
* disk_bytenr of an extent item is replaced at
|
||||
* relocation.c:replace_file_extents()) or after deduplication into a
|
||||
* file in both the parent and send snapshots (where an extent item can
|
||||
* get modified or replaced with a new one). Note that deduplication
|
||||
* updates the inode item, but it only changes the iversion (sequence
|
||||
* field in the inode item) of the inode, so if a file is deduplicated
|
||||
* the same amount of times in both the parent and send snapshots, its
|
||||
* iversion becames the same in both snapshots, whence the inode item is
|
||||
* the same on both snapshots.
|
||||
*/
|
||||
if (sctx->cur_ino != sctx->cmp_key->objectid)
|
||||
return 0;
|
||||
|
||||
if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
|
||||
if (result != BTRFS_COMPARE_TREE_DELETED)
|
||||
|
|
|
@ -2027,6 +2027,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
|
|||
}
|
||||
} else {
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
/*
|
||||
* The previous transaction was aborted and was already removed
|
||||
* from the list of transactions at fs_info->trans_list. So we
|
||||
* abort to prevent writing a new superblock that reflects a
|
||||
* corrupt state (pointing to trees with unwritten nodes/leafs).
|
||||
*/
|
||||
if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
|
||||
ret = -EROFS;
|
||||
goto cleanup_transaction;
|
||||
}
|
||||
}
|
||||
|
||||
extwriter_counter_dec(cur_trans, trans->type);
|
||||
|
|
|
@ -5040,8 +5040,7 @@ static inline int btrfs_chunk_max_errors(struct map_lookup *map)
|
|||
|
||||
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID10 |
|
||||
BTRFS_BLOCK_GROUP_RAID5 |
|
||||
BTRFS_BLOCK_GROUP_DUP)) {
|
||||
BTRFS_BLOCK_GROUP_RAID5)) {
|
||||
max_errors = 1;
|
||||
} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
|
||||
max_errors = 2;
|
||||
|
|
|
@ -526,7 +526,12 @@ static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
|
|||
long long release_count,
|
||||
long long ordered_count)
|
||||
{
|
||||
smp_mb__before_atomic();
|
||||
/*
|
||||
* Makes sure operations that setup readdir cache (update page
|
||||
* cache and i_size) are strongly ordered w.r.t. the following
|
||||
* atomic64_set() operations.
|
||||
*/
|
||||
smp_mb();
|
||||
atomic64_set(&ci->i_complete_seq[0], release_count);
|
||||
atomic64_set(&ci->i_complete_seq[1], ordered_count);
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
|
|||
const char *ns_field = " pool_namespace=";
|
||||
char buf[128];
|
||||
size_t len, total_len = 0;
|
||||
int ret;
|
||||
ssize_t ret;
|
||||
|
||||
pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
|
||||
|
||||
|
@ -103,11 +103,8 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
|
|||
if (pool_ns)
|
||||
total_len += strlen(ns_field) + pool_ns->len;
|
||||
|
||||
if (!size) {
|
||||
ret = total_len;
|
||||
} else if (total_len > size) {
|
||||
ret = -ERANGE;
|
||||
} else {
|
||||
ret = total_len;
|
||||
if (size >= total_len) {
|
||||
memcpy(val, buf, len);
|
||||
ret = len;
|
||||
if (pool_name) {
|
||||
|
@ -817,8 +814,11 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
|
|||
if (err)
|
||||
return err;
|
||||
err = -ENODATA;
|
||||
if (!(vxattr->exists_cb && !vxattr->exists_cb(ci)))
|
||||
if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
|
||||
err = vxattr->getxattr_cb(ci, value, size);
|
||||
if (size && size < err)
|
||||
err = -ERANGE;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -563,10 +563,10 @@ static bool
|
|||
server_unresponsive(struct TCP_Server_Info *server)
|
||||
{
|
||||
/*
|
||||
* We need to wait 2 echo intervals to make sure we handle such
|
||||
* We need to wait 3 echo intervals to make sure we handle such
|
||||
* situations right:
|
||||
* 1s client sends a normal SMB request
|
||||
* 2s client gets a response
|
||||
* 3s client gets a response
|
||||
* 30s echo workqueue job pops, and decides we got a response recently
|
||||
* and don't need to send another
|
||||
* ...
|
||||
|
@ -575,9 +575,9 @@ server_unresponsive(struct TCP_Server_Info *server)
|
|||
*/
|
||||
if ((server->tcpStatus == CifsGood ||
|
||||
server->tcpStatus == CifsNeedNegotiate) &&
|
||||
time_after(jiffies, server->lstrp + 2 * server->echo_interval)) {
|
||||
time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
|
||||
cifs_dbg(VFS, "Server %s has not responded in %lu seconds. Reconnecting...\n",
|
||||
server->hostname, (2 * server->echo_interval) / HZ);
|
||||
server->hostname, (3 * server->echo_interval) / HZ);
|
||||
cifs_reconnect(server);
|
||||
wake_up(&server->response_q);
|
||||
return true;
|
||||
|
|
|
@ -187,8 +187,11 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
|
|||
if (req->uc_opcode == CODA_OPEN_BY_FD) {
|
||||
struct coda_open_by_fd_out *outp =
|
||||
(struct coda_open_by_fd_out *)req->uc_data;
|
||||
if (!outp->oh.result)
|
||||
if (!outp->oh.result) {
|
||||
outp->fh = fget(outp->fd);
|
||||
if (!outp->fh)
|
||||
return -EBADF;
|
||||
}
|
||||
}
|
||||
|
||||
wake_up(&req->uc_sleep);
|
||||
|
|
|
@ -326,7 +326,10 @@ void acpi_set_irq_model(enum acpi_irq_model_id model,
|
|||
#ifdef CONFIG_X86_IO_APIC
|
||||
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
|
||||
#else
|
||||
#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
|
||||
static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* This function undoes the effect of one call to acpi_register_gsi().
|
||||
|
|
|
@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
|
|||
#ifndef _CODA_HEADER_
|
||||
#define _CODA_HEADER_
|
||||
|
||||
#if defined(__linux__)
|
||||
typedef unsigned long long u_quad_t;
|
||||
#endif
|
||||
|
||||
#include <uapi/linux/coda.h>
|
||||
#endif
|
||||
|
|
|
@ -19,6 +19,17 @@ struct venus_comm {
|
|||
struct mutex vc_mutex;
|
||||
};
|
||||
|
||||
/* messages between coda filesystem in kernel and Venus */
|
||||
struct upc_req {
|
||||
struct list_head uc_chain;
|
||||
caddr_t uc_data;
|
||||
u_short uc_flags;
|
||||
u_short uc_inSize; /* Size is at most 5000 bytes */
|
||||
u_short uc_outSize;
|
||||
u_short uc_opcode; /* copied from data to save lookup */
|
||||
int uc_unique;
|
||||
wait_queue_head_t uc_sleep; /* process' wait queue */
|
||||
};
|
||||
|
||||
static inline struct venus_comm *coda_vcp(struct super_block *sb)
|
||||
{
|
||||
|
|
|
@ -7,19 +7,6 @@
|
|||
#define CODA_PSDEV_MAJOR 67
|
||||
#define MAX_CODADEVS 5 /* how many do we allow */
|
||||
|
||||
|
||||
/* messages between coda filesystem in kernel and Venus */
|
||||
struct upc_req {
|
||||
struct list_head uc_chain;
|
||||
caddr_t uc_data;
|
||||
u_short uc_flags;
|
||||
u_short uc_inSize; /* Size is at most 5000 bytes */
|
||||
u_short uc_outSize;
|
||||
u_short uc_opcode; /* copied from data to save lookup */
|
||||
int uc_unique;
|
||||
wait_queue_head_t uc_sleep; /* process' wait queue */
|
||||
};
|
||||
|
||||
#define CODA_REQ_ASYNC 0x1
|
||||
#define CODA_REQ_READ 0x2
|
||||
#define CODA_REQ_WRITE 0x4
|
||||
|
|
19
ipc/mqueue.c
19
ipc/mqueue.c
|
@ -389,7 +389,6 @@ static void mqueue_evict_inode(struct inode *inode)
|
|||
{
|
||||
struct mqueue_inode_info *info;
|
||||
struct user_struct *user;
|
||||
unsigned long mq_bytes, mq_treesize;
|
||||
struct ipc_namespace *ipc_ns;
|
||||
struct msg_msg *msg, *nmsg;
|
||||
LIST_HEAD(tmp_msg);
|
||||
|
@ -412,16 +411,18 @@ static void mqueue_evict_inode(struct inode *inode)
|
|||
free_msg(msg);
|
||||
}
|
||||
|
||||
/* Total amount of bytes accounted for the mqueue */
|
||||
mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
|
||||
min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
|
||||
sizeof(struct posix_msg_tree_node);
|
||||
|
||||
mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
|
||||
info->attr.mq_msgsize);
|
||||
|
||||
user = info->user;
|
||||
if (user) {
|
||||
unsigned long mq_bytes, mq_treesize;
|
||||
|
||||
/* Total amount of bytes accounted for the mqueue */
|
||||
mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
|
||||
min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
|
||||
sizeof(struct posix_msg_tree_node);
|
||||
|
||||
mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
|
||||
info->attr.mq_msgsize);
|
||||
|
||||
spin_lock(&mq_lock);
|
||||
user->mq_bytes -= mq_bytes;
|
||||
/*
|
||||
|
|
|
@ -3399,8 +3399,7 @@ static bool finished_loading(const char *name)
|
|||
sched_annotate_sleep();
|
||||
mutex_lock(&module_mutex);
|
||||
mod = find_module_all(name, strlen(name), true);
|
||||
ret = !mod || mod->state == MODULE_STATE_LIVE
|
||||
|| mod->state == MODULE_STATE_GOING;
|
||||
ret = !mod || mod->state == MODULE_STATE_LIVE;
|
||||
mutex_unlock(&module_mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -3570,8 +3569,7 @@ static int add_unformed_module(struct module *mod)
|
|||
mutex_lock(&module_mutex);
|
||||
old = find_module_all(mod->name, strlen(mod->name), true);
|
||||
if (old != NULL) {
|
||||
if (old->state == MODULE_STATE_COMING
|
||||
|| old->state == MODULE_STATE_UNFORMED) {
|
||||
if (old->state != MODULE_STATE_LIVE) {
|
||||
/* Wait in case it fails to load. */
|
||||
mutex_unlock(&module_mutex);
|
||||
err = wait_event_interruptible(module_wq,
|
||||
|
|
|
@ -1648,6 +1648,11 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
|
|||
return keep_regs;
|
||||
}
|
||||
|
||||
static struct ftrace_ops *
|
||||
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
|
||||
static struct ftrace_ops *
|
||||
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
|
||||
|
||||
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
||||
int filter_hash,
|
||||
bool inc)
|
||||
|
@ -1776,15 +1781,17 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
|
|||
}
|
||||
|
||||
/*
|
||||
* If the rec had TRAMP enabled, then it needs to
|
||||
* be cleared. As TRAMP can only be enabled iff
|
||||
* there is only a single ops attached to it.
|
||||
* In otherwords, always disable it on decrementing.
|
||||
* In the future, we may set it if rec count is
|
||||
* decremented to one, and the ops that is left
|
||||
* has a trampoline.
|
||||
* The TRAMP needs to be set only if rec count
|
||||
* is decremented to one, and the ops that is
|
||||
* left has a trampoline. As TRAMP can only be
|
||||
* enabled if there is only a single ops attached
|
||||
* to it.
|
||||
*/
|
||||
rec->flags &= ~FTRACE_FL_TRAMP;
|
||||
if (ftrace_rec_count(rec) == 1 &&
|
||||
ftrace_find_tramp_ops_any(rec))
|
||||
rec->flags |= FTRACE_FL_TRAMP;
|
||||
else
|
||||
rec->flags &= ~FTRACE_FL_TRAMP;
|
||||
|
||||
/*
|
||||
* flags will be cleared in ftrace_check_record()
|
||||
|
@ -1977,11 +1984,6 @@ static void print_ip_ins(const char *fmt, const unsigned char *p)
|
|||
printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
|
||||
}
|
||||
|
||||
static struct ftrace_ops *
|
||||
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
|
||||
static struct ftrace_ops *
|
||||
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
|
||||
|
||||
enum ftrace_bug_type ftrace_bug_type;
|
||||
const void *ftrace_expected;
|
||||
|
||||
|
|
|
@ -486,16 +486,17 @@ static int __init test_overflow_shift(void)
|
|||
* Deal with the various forms of allocator arguments. See comments above
|
||||
* the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
|
||||
*/
|
||||
#define alloc010(alloc, arg, sz) alloc(sz, GFP_KERNEL)
|
||||
#define alloc011(alloc, arg, sz) alloc(sz, GFP_KERNEL, NUMA_NO_NODE)
|
||||
#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN)
|
||||
#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP)
|
||||
#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE)
|
||||
#define alloc000(alloc, arg, sz) alloc(sz)
|
||||
#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE)
|
||||
#define alloc110(alloc, arg, sz) alloc(arg, sz, GFP_KERNEL)
|
||||
#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP)
|
||||
#define free0(free, arg, ptr) free(ptr)
|
||||
#define free1(free, arg, ptr) free(arg, ptr)
|
||||
|
||||
/* Wrap around to 8K */
|
||||
#define TEST_SIZE (9 << PAGE_SHIFT)
|
||||
/* Wrap around to 16K */
|
||||
#define TEST_SIZE (5 * 4096)
|
||||
|
||||
#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
|
||||
static int __init test_ ## func (void *arg) \
|
||||
|
|
|
@ -35,7 +35,7 @@ static __init int memset16_selftest(void)
|
|||
fail:
|
||||
kfree(p);
|
||||
if (i < 256)
|
||||
return (i << 24) | (j << 16) | k;
|
||||
return (i << 24) | (j << 16) | k | 0x8000;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,7 @@ static __init int memset32_selftest(void)
|
|||
fail:
|
||||
kfree(p);
|
||||
if (i < 256)
|
||||
return (i << 24) | (j << 16) | k;
|
||||
return (i << 24) | (j << 16) | k | 0x8000;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,7 @@ static __init int memset64_selftest(void)
|
|||
fail:
|
||||
kfree(p);
|
||||
if (i < 256)
|
||||
return (i << 24) | (j << 16) | k;
|
||||
return (i << 24) | (j << 16) | k | 0x8000;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
13
mm/cma.c
13
mm/cma.c
|
@ -282,6 +282,12 @@ int __init cma_declare_contiguous(phys_addr_t base,
|
|||
*/
|
||||
alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
|
||||
max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
|
||||
if (fixed && base & (alignment - 1)) {
|
||||
ret = -EINVAL;
|
||||
pr_err("Region at %pa must be aligned to %pa bytes\n",
|
||||
&base, &alignment);
|
||||
goto err;
|
||||
}
|
||||
base = ALIGN(base, alignment);
|
||||
size = ALIGN(size, alignment);
|
||||
limit &= ~(alignment - 1);
|
||||
|
@ -312,6 +318,13 @@ int __init cma_declare_contiguous(phys_addr_t base,
|
|||
if (limit == 0 || limit > memblock_end)
|
||||
limit = memblock_end;
|
||||
|
||||
if (base + size > limit) {
|
||||
ret = -EINVAL;
|
||||
pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
|
||||
&size, &base, &limit);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Reserve memory */
|
||||
if (fixed) {
|
||||
if (memblock_is_region_reserved(base, size) ||
|
||||
|
|
|
@ -671,7 +671,14 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
|||
unsigned long ret, freed = 0;
|
||||
struct shrinker *shrinker;
|
||||
|
||||
if (!mem_cgroup_is_root(memcg))
|
||||
/*
|
||||
* The root memcg might be allocated even though memcg is disabled
|
||||
* via "cgroup_disable=memory" boot parameter. This could make
|
||||
* mem_cgroup_is_root() return false, then just run memcg slab
|
||||
* shrink, but skip global shrink. This may result in premature
|
||||
* oom.
|
||||
*/
|
||||
if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
|
||||
return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
|
||||
|
||||
if (!down_read_trylock(&shrinker_rwsem))
|
||||
|
|
|
@ -784,6 +784,7 @@ int conf_write(const char *name)
|
|||
const char *str;
|
||||
char dirname[PATH_MAX+1], tmpname[PATH_MAX+22], newname[PATH_MAX+8];
|
||||
char *env;
|
||||
int i;
|
||||
|
||||
dirname[0] = 0;
|
||||
if (name && name[0]) {
|
||||
|
@ -860,6 +861,9 @@ int conf_write(const char *name)
|
|||
}
|
||||
fclose(out);
|
||||
|
||||
for_all_symbols(i, sym)
|
||||
sym->flags &= ~SYMBOL_WRITTEN;
|
||||
|
||||
if (*tmpname) {
|
||||
strcat(dirname, basename);
|
||||
strcat(dirname, ".old");
|
||||
|
|
|
@ -275,6 +275,8 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
|
|||
return v;
|
||||
}
|
||||
|
||||
static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
|
||||
|
||||
/*
|
||||
* Initialize a policy database structure.
|
||||
*/
|
||||
|
@ -322,8 +324,10 @@ static int policydb_init(struct policydb *p)
|
|||
out:
|
||||
hashtab_destroy(p->filename_trans);
|
||||
hashtab_destroy(p->range_tr);
|
||||
for (i = 0; i < SYM_NUM; i++)
|
||||
for (i = 0; i < SYM_NUM; i++) {
|
||||
hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
|
||||
hashtab_destroy(p->symtab[i].table);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -143,10 +143,12 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
|
|||
if (!acomp)
|
||||
return -ENODEV;
|
||||
if (!acomp->ops) {
|
||||
request_module("i915");
|
||||
/* 60s timeout */
|
||||
wait_for_completion_timeout(&bind_complete,
|
||||
msecs_to_jiffies(60 * 1000));
|
||||
if (!IS_ENABLED(CONFIG_MODULES) ||
|
||||
!request_module("i915")) {
|
||||
/* 60s timeout */
|
||||
wait_for_completion_timeout(&bind_complete,
|
||||
msecs_to_jiffies(60 * 1000));
|
||||
}
|
||||
}
|
||||
if (!acomp->ops) {
|
||||
dev_info(bus->dev, "couldn't bind with audio component\n");
|
||||
|
|
|
@ -305,7 +305,7 @@ static int read_symbols(struct elf *elf)
|
|||
if (sym->type != STT_FUNC)
|
||||
continue;
|
||||
sym->pfunc = sym->cfunc = sym;
|
||||
coldstr = strstr(sym->name, ".cold.");
|
||||
coldstr = strstr(sym->name, ".cold");
|
||||
if (!coldstr)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@ static struct version version;
|
|||
static struct option version_options[] = {
|
||||
OPT_BOOLEAN(0, "build-options", &version.build_options,
|
||||
"display the build options"),
|
||||
OPT_END(),
|
||||
};
|
||||
|
||||
static const char * const version_usage[] = {
|
||||
|
|
|
@ -181,8 +181,7 @@ int cg_find_unified_root(char *root, size_t len)
|
|||
strtok(NULL, delim);
|
||||
strtok(NULL, delim);
|
||||
|
||||
if (strcmp(fs, "cgroup") == 0 &&
|
||||
strcmp(type, "cgroup2") == 0) {
|
||||
if (strcmp(type, "cgroup2") == 0) {
|
||||
strncpy(root, mount, len);
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue