Merge android-4.19.35 (3e16663) into msm-4.19

* refs/heads/tmp-3e16663:
  Linux 4.19.35
  KVM: x86: nVMX: fix x2APIC VTPR read intercept
  KVM: x86: nVMX: close leak of L0's x2APIC MSRs (CVE-2019-3887)
  ACPICA: AML interpreter: add region addresses in global list during initialization
  arm64: dts: rockchip: Fix vcc_host1_5v GPIO polarity on rk3328-rock64
  arm64: dts: rockchip: fix vcc_host1_5v pin assign on rk3328-rock64
  dm integrity: fix deadlock with overlapping I/O
  dm table: propagate BDI_CAP_STABLE_WRITES to fix sporadic checksum errors
  dm: revert 8f50e35815 ("dm: limit the max bio size as BIO_MAX_PAGES * PAGE_SIZE")
  dm integrity: change memcmp to strncmp in dm_integrity_ctr
  PCI: pciehp: Ignore Link State Changes after powering off a slot
  PCI: Add function 1 DMA alias quirk for Marvell 9170 SATA controller
  x86/perf/amd: Remove need to check "running" bit in NMI handler
  x86/perf/amd: Resolve NMI latency issues for active PMCs
  x86/perf/amd: Resolve race condition when disabling PMC
  x86/asm: Use stricter assembly constraints in bitops
  x86/asm: Remove dead __GNUC__ conditionals
  xtensa: fix return_address
  sched/fair: Do not re-read ->h_load_next during hierarchical load calculation
  xen: Prevent buffer overflow in privcmd ioctl
  arm64: backtrace: Don't bother trying to unwind the userspace stack
  arm64: dts: rockchip: fix rk3328 rgmii high tx error rate
  arm64: futex: Fix FUTEX_WAKE_OP atomic ops with non-zero result value
  ARM: dts: at91: Fix typo in ISC_D0 on PC9
  ARM: dts: am335x-evm: Correct the regulators for the audio codec
  ARM: dts: am335x-evmsk: Correct the regulators for the audio codec
  ARM: dts: rockchip: fix rk3288 cpu opp node reference
  virtio: Honour 'may_reduce_num' in vring_create_virtqueue
  genirq: Initialize request_mutex if CONFIG_SPARSE_IRQ=n
  genirq: Respect IRQCHIP_SKIP_SET_WAKE in irq_chip_set_wake_parent()
  block: fix the return errno for direct IO
  block: do not leak memory in bio_copy_user_iov()
  riscv: Fix syscall_get_arguments() and syscall_set_arguments()
  btrfs: prop: fix vanished compression property after failed set
  btrfs: prop: fix zstd compression parameter validation
  Btrfs: do not allow trimming when a fs is mounted with the nologreplay option
  ASoC: fsl_esai: fix channel swap issue when stream starts
  ASoC: intel: Fix crash at suspend/resume after failed codec registration
  mm: writeback: use exact memcg dirty counts
  include/linux/bitrev.h: fix constant bitrev
  kvm: svm: fix potential get_num_contig_pages overflow
  drm/udl: add a release method and delay modeset teardown
  drm/i915/gvt: do not deliver a workload if its creation fails
  alarmtimer: Return correct remaining time
  parisc: also set iaoq_b in instruction_pointer_set()
  parisc: regs_return_value() should return gpr28
  parisc: Detect QEMU earlier in boot process
  arm64: dts: rockchip: fix rk3328 sdmmc0 write errors
  mm/huge_memory.c: fix modifying of page protection by insert_pfn_pmd()
  ALSA: hda - Add two more machines to the power_save_blacklist
  ALSA: hda/realtek - Add quirk for Tuxedo XC 1509
  ALSA: hda/realtek: Enable headset MIC of Acer TravelMate B114-21 with ALC233
  ALSA: seq: Fix OOB-reads from strlcpy
  ACPICA: Namespace: remove address node from global list after method termination
  ACPICA: Clear status of GPEs before enabling them
  hwmon: (w83773g) Select REGMAP_I2C to fix build error
  tty: ldisc: add sysctl to prevent autoloading of ldiscs
  tty: mark Siemens R3964 line discipline as BROKEN
  arm64: kaslr: Reserve size of ARM64_MEMSTART_ALIGN in linear region
  netfilter: nfnetlink_cttimeout: fetch timeouts for udplite and gre, too
  netfilter: nfnetlink_cttimeout: pass default timeout policy to obj_to_nlattr
  Revert "clk: meson: clean-up clock registration"
  lib/string.c: implement a basic bcmp
  x86/vdso: Drop implicit common-page-size linker flag
  kbuild: clang: choose GCC_TOOLCHAIN_DIR not on LD
  kbuild: deb-pkg: fix bindeb-pkg breakage when O= is used
  net/mlx5e: Update xon formula
  net/mlx5e: Update xoff formula
  net: mlx5: Add a missing check on idr_find, free buf
  r8169: disable default rx interrupt coalescing on RTL8168
  net: core: netif_receive_skb_list: unlist skb before passing to pt->func
  net: ip6_gre: fix possible use-after-free in ip6erspan_rcv
  net: ip_gre: fix possible use-after-free in erspan_rcv
  bnxt_en: Reset device on RX buffer errors.
  bnxt_en: Improve RX consumer index validity check.
  nfp: disable netpoll on representors
  nfp: validate the return code from dev_queue_xmit()
  net/mlx5e: Add a lock on tir list
  net/mlx5e: Fix error handling when refreshing TIRs
  vrf: check accept_source_route on the original netdevice
  tcp: fix a potential NULL pointer dereference in tcp_sk_exit
  tcp: Ensure DCTCP reacts to losses
  sctp: initialize _pad of sockaddr_in before copying to user memory
  r8169: disable ASPM again
  qmi_wwan: add Olicard 600
  openvswitch: fix flow actions reallocation
  net/sched: fix ->get helper of the matchall cls
  net/sched: act_sample: fix divide by zero in the traffic path
  net: rds: force to destroy connection if t_sock is NULL in rds_tcp_kill_sock().
  netns: provide pure entropy for net_hash_mix()
  net/mlx5: Decrease default mr cache size
  net-gro: Fix GRO flush when receiving a GSO packet.
  net: ethtool: not call vzalloc for zero sized memory request
  kcm: switch order of device registration to fix a crash
  ipv6: sit: reset ip header pointer in ipip6_rcv
  ipv6: Fix dangling pointer when ipv6 fragment
  ip6_tunnel: Match to ARPHRD_TUNNEL6 for dev type
  ibmvnic: Fix completion structure initialization
  hv_netvsc: Fix unwanted wakeup after tx_disable
  powerpc/tm: Limit TM code inside PPC_TRANSACTIONAL_MEM
  drm/i915/gvt: do not let pin count of shadow mm go negative
  kvm: nVMX: NMI-window and interrupt-window exiting should wake L2 from HLT
  sched/fair: remove printk while schedule is in progress
  ANDROID: cuttlefish_defconfig: Enable CONFIG_FUSE_FS

Conflicts:
	drivers/tty/Kconfig
	kernel/sched/fair.c

Change-Id: I274ed1c395d53d718a0ad33f37f48afd423db510
Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
Ivaylo Georgiev 2019-05-16 05:00:47 -07:00
commit 853ed1ac0e
105 changed files with 1039 additions and 455 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 34
SUBLEVEL = 35
EXTRAVERSION =
NAME = "People's Front"
@ -486,12 +486,8 @@ endif
ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
CLANG_TRIPLE ?= $(CROSS_COMPILE)
CLANG_FLAGS := --target=$(notdir $(CLANG_TRIPLE:%-=%))
ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_FLAGS)), y)
$(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
endif
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif

View file

@ -57,6 +57,24 @@
enable-active-high;
};
/* TPS79501 */
v1_8d_reg: fixedregulator-v1_8d {
compatible = "regulator-fixed";
regulator-name = "v1_8d";
vin-supply = <&vbat>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
/* TPS79501 */
v3_3d_reg: fixedregulator-v3_3d {
compatible = "regulator-fixed";
regulator-name = "v3_3d";
vin-supply = <&vbat>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
matrix_keypad: matrix_keypad0 {
compatible = "gpio-matrix-keypad";
debounce-delay-ms = <5>;
@ -499,10 +517,10 @@
status = "okay";
/* Regulators */
AVDD-supply = <&vaux2_reg>;
IOVDD-supply = <&vaux2_reg>;
DRVDD-supply = <&vaux2_reg>;
DVDD-supply = <&vbat>;
AVDD-supply = <&v3_3d_reg>;
IOVDD-supply = <&v3_3d_reg>;
DRVDD-supply = <&v3_3d_reg>;
DVDD-supply = <&v1_8d_reg>;
};
};

View file

@ -73,6 +73,24 @@
enable-active-high;
};
/* TPS79518 */
v1_8d_reg: fixedregulator-v1_8d {
compatible = "regulator-fixed";
regulator-name = "v1_8d";
vin-supply = <&vbat>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
/* TPS78633 */
v3_3d_reg: fixedregulator-v3_3d {
compatible = "regulator-fixed";
regulator-name = "v3_3d";
vin-supply = <&vbat>;
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
leds {
pinctrl-names = "default";
pinctrl-0 = <&user_leds_s0>;
@ -501,10 +519,10 @@
status = "okay";
/* Regulators */
AVDD-supply = <&vaux2_reg>;
IOVDD-supply = <&vaux2_reg>;
DRVDD-supply = <&vaux2_reg>;
DVDD-supply = <&vbat>;
AVDD-supply = <&v3_3d_reg>;
IOVDD-supply = <&v3_3d_reg>;
DRVDD-supply = <&v3_3d_reg>;
DVDD-supply = <&v1_8d_reg>;
};
};

View file

@ -70,7 +70,7 @@
compatible = "arm,cortex-a12";
reg = <0x501>;
resets = <&cru SRST_CORE1>;
operating-points = <&cpu_opp_table>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */
clock-latency = <40000>;
clocks = <&cru ARMCLK>;
@ -80,7 +80,7 @@
compatible = "arm,cortex-a12";
reg = <0x502>;
resets = <&cru SRST_CORE2>;
operating-points = <&cpu_opp_table>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */
clock-latency = <40000>;
clocks = <&cru ARMCLK>;
@ -90,7 +90,7 @@
compatible = "arm,cortex-a12";
reg = <0x503>;
resets = <&cru SRST_CORE3>;
operating-points = <&cpu_opp_table>;
operating-points-v2 = <&cpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */
clock-latency = <40000>;
clocks = <&cru ARMCLK>;

View file

@ -518,7 +518,7 @@
#define PIN_PC9__GPIO PINMUX_PIN(PIN_PC9, 0, 0)
#define PIN_PC9__FIQ PINMUX_PIN(PIN_PC9, 1, 3)
#define PIN_PC9__GTSUCOMP PINMUX_PIN(PIN_PC9, 2, 1)
#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 2, 1)
#define PIN_PC9__ISC_D0 PINMUX_PIN(PIN_PC9, 3, 1)
#define PIN_PC9__TIOA4 PINMUX_PIN(PIN_PC9, 4, 2)
#define PIN_PC10 74
#define PIN_PC10__GPIO PINMUX_PIN(PIN_PC10, 0, 0)

View file

@ -45,8 +45,7 @@
vcc_host1_5v: vcc_otg_5v: vcc-host1-5v-regulator {
compatible = "regulator-fixed";
enable-active-high;
gpio = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>;
gpio = <&gpio0 RK_PA2 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&usb20_host_drv>;
regulator-name = "vcc_host1_5v";
@ -238,7 +237,7 @@
usb2 {
usb20_host_drv: usb20-host-drv {
rockchip,pins = <0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>;
rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>;
};
};

View file

@ -1356,11 +1356,11 @@
sdmmc0 {
sdmmc0_clk: sdmmc0-clk {
rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_4ma>;
rockchip,pins = <1 RK_PA6 1 &pcfg_pull_none_8ma>;
};
sdmmc0_cmd: sdmmc0-cmd {
rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_4ma>;
rockchip,pins = <1 RK_PA4 1 &pcfg_pull_up_8ma>;
};
sdmmc0_dectn: sdmmc0-dectn {
@ -1372,14 +1372,14 @@
};
sdmmc0_bus1: sdmmc0-bus1 {
rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>;
rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>;
};
sdmmc0_bus4: sdmmc0-bus4 {
rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_4ma>,
<1 RK_PA1 1 &pcfg_pull_up_4ma>,
<1 RK_PA2 1 &pcfg_pull_up_4ma>,
<1 RK_PA3 1 &pcfg_pull_up_4ma>;
rockchip,pins = <1 RK_PA0 1 &pcfg_pull_up_8ma>,
<1 RK_PA1 1 &pcfg_pull_up_8ma>,
<1 RK_PA2 1 &pcfg_pull_up_8ma>,
<1 RK_PA3 1 &pcfg_pull_up_8ma>;
};
sdmmc0_gpio: sdmmc0-gpio {
@ -1553,50 +1553,50 @@
rgmiim1_pins: rgmiim1-pins {
rockchip,pins =
/* mac_txclk */
<1 RK_PB4 2 &pcfg_pull_none_12ma>,
<1 RK_PB4 2 &pcfg_pull_none_8ma>,
/* mac_rxclk */
<1 RK_PB5 2 &pcfg_pull_none_2ma>,
<1 RK_PB5 2 &pcfg_pull_none_4ma>,
/* mac_mdio */
<1 RK_PC3 2 &pcfg_pull_none_2ma>,
<1 RK_PC3 2 &pcfg_pull_none_4ma>,
/* mac_txen */
<1 RK_PD1 2 &pcfg_pull_none_12ma>,
<1 RK_PD1 2 &pcfg_pull_none_8ma>,
/* mac_clk */
<1 RK_PC5 2 &pcfg_pull_none_2ma>,
<1 RK_PC5 2 &pcfg_pull_none_4ma>,
/* mac_rxdv */
<1 RK_PC6 2 &pcfg_pull_none_2ma>,
<1 RK_PC6 2 &pcfg_pull_none_4ma>,
/* mac_mdc */
<1 RK_PC7 2 &pcfg_pull_none_2ma>,
<1 RK_PC7 2 &pcfg_pull_none_4ma>,
/* mac_rxd1 */
<1 RK_PB2 2 &pcfg_pull_none_2ma>,
<1 RK_PB2 2 &pcfg_pull_none_4ma>,
/* mac_rxd0 */
<1 RK_PB3 2 &pcfg_pull_none_2ma>,
<1 RK_PB3 2 &pcfg_pull_none_4ma>,
/* mac_txd1 */
<1 RK_PB0 2 &pcfg_pull_none_12ma>,
<1 RK_PB0 2 &pcfg_pull_none_8ma>,
/* mac_txd0 */
<1 RK_PB1 2 &pcfg_pull_none_12ma>,
<1 RK_PB1 2 &pcfg_pull_none_8ma>,
/* mac_rxd3 */
<1 RK_PB6 2 &pcfg_pull_none_2ma>,
<1 RK_PB6 2 &pcfg_pull_none_4ma>,
/* mac_rxd2 */
<1 RK_PB7 2 &pcfg_pull_none_2ma>,
<1 RK_PB7 2 &pcfg_pull_none_4ma>,
/* mac_txd3 */
<1 RK_PC0 2 &pcfg_pull_none_12ma>,
<1 RK_PC0 2 &pcfg_pull_none_8ma>,
/* mac_txd2 */
<1 RK_PC1 2 &pcfg_pull_none_12ma>,
<1 RK_PC1 2 &pcfg_pull_none_8ma>,
/* mac_txclk */
<0 RK_PB0 1 &pcfg_pull_none>,
<0 RK_PB0 1 &pcfg_pull_none_8ma>,
/* mac_txen */
<0 RK_PB4 1 &pcfg_pull_none>,
<0 RK_PB4 1 &pcfg_pull_none_8ma>,
/* mac_clk */
<0 RK_PD0 1 &pcfg_pull_none>,
<0 RK_PD0 1 &pcfg_pull_none_4ma>,
/* mac_txd1 */
<0 RK_PC0 1 &pcfg_pull_none>,
<0 RK_PC0 1 &pcfg_pull_none_8ma>,
/* mac_txd0 */
<0 RK_PC1 1 &pcfg_pull_none>,
<0 RK_PC1 1 &pcfg_pull_none_8ma>,
/* mac_txd3 */
<0 RK_PC7 1 &pcfg_pull_none>,
<0 RK_PC7 1 &pcfg_pull_none_8ma>,
/* mac_txd2 */
<0 RK_PC6 1 &pcfg_pull_none>;
<0 RK_PC6 1 &pcfg_pull_none_8ma>;
};
rmiim1_pins: rmiim1-pins {

View file

@ -412,6 +412,7 @@ CONFIG_F2FS_FS_ENCRYPTION=y
# CONFIG_DNOTIFY is not set
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
CONFIG_OVERLAY_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y

View file

@ -30,8 +30,8 @@ do { \
" prfm pstl1strm, %2\n" \
"1: ldxr %w1, %2\n" \
insn "\n" \
"2: stlxr %w3, %w0, %2\n" \
" cbnz %w3, 1b\n" \
"2: stlxr %w0, %w3, %2\n" \
" cbnz %w0, 1b\n" \
" dmb ish\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
@ -50,30 +50,30 @@ do { \
static inline int
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
{
int oldval = 0, ret, tmp;
int oldval, ret, tmp;
u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("mov %w0, %w4",
__futex_atomic_op("mov %w3, %w4",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op("add %w0, %w1, %w4",
__futex_atomic_op("add %w3, %w1, %w4",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("orr %w0, %w1, %w4",
__futex_atomic_op("orr %w3, %w1, %w4",
ret, oldval, uaddr, tmp, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("and %w0, %w1, %w4",
__futex_atomic_op("and %w3, %w1, %w4",
ret, oldval, uaddr, tmp, ~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("eor %w0, %w1, %w4",
__futex_atomic_op("eor %w3, %w1, %w4",
ret, oldval, uaddr, tmp, oparg);
break;
default:

View file

@ -102,10 +102,16 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
int skip;
int skip = 0;
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (regs) {
if (user_mode(regs))
return;
skip = 1;
}
if (!tsk)
tsk = current;
@ -126,7 +132,6 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.graph = tsk->curr_ret_stack;
#endif
skip = !!regs;
printk("Call trace:\n");
do {
/* skip until specified stack frame */
@ -176,15 +181,13 @@ static int __die(const char *str, int err, struct pt_regs *regs)
return ret;
print_modules();
__show_regs(regs);
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
end_of_stack(tsk));
show_regs(regs);
if (!user_mode(regs)) {
dump_backtrace(regs, tsk);
if (!user_mode(regs))
dump_instr(KERN_EMERG, regs);
}
return ret;
}

View file

@ -22,13 +22,14 @@ unsigned long profile_pc(struct pt_regs *);
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->gr[20];
return regs->gr[28];
}
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->iaoq[0] = val;
regs->iaoq[0] = val;
regs->iaoq[1] = val + 4;
}
/* Query offset/name of register from its name/offset */

View file

@ -210,12 +210,6 @@ void __cpuidle arch_cpu_idle(void)
static int __init parisc_idle_init(void)
{
const char *marker;
/* check QEMU/SeaBIOS marker in PAGE0 */
marker = (char *) &PAGE0->pad0;
running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
if (!running_on_qemu)
cpu_idle_poll_ctrl(1);

View file

@ -399,6 +399,9 @@ void __init start_parisc(void)
int ret, cpunum;
struct pdc_coproc_cfg coproc_cfg;
/* check QEMU/SeaBIOS marker in PAGE0 */
running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0);
cpunum = smp_processor_id();
init_cpu_topology();

View file

@ -755,12 +755,25 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
&uc_transact->uc_mcontext))
goto badframe;
}
else
/* Fall through, for non-TM restore */
} else
#endif
if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
goto badframe;
{
/*
* Fall through, for non-TM restore
*
* Unset MSR[TS] on the thread regs since MSR from user
* context does not have MSR active, and recheckpoint was
* not called since restore_tm_sigcontexts() was not called
* also.
*
* If not unsetting it, the code can RFID to userspace with
* MSR[TS] set, but without CPU in the proper state,
* causing a TM bad thing.
*/
current->thread.regs->msr &= ~MSR_TS_MASK;
if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
goto badframe;
}
if (restore_altstack(&uc->uc_stack))
goto badframe;

View file

@ -78,10 +78,11 @@ static inline void syscall_get_arguments(struct task_struct *task,
if (i == 0) {
args[0] = regs->orig_a0;
args++;
i++;
n--;
} else {
i--;
}
memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
memcpy(args, &regs->a1 + i, n * sizeof(args[0]));
}
static inline void syscall_set_arguments(struct task_struct *task,
@ -93,10 +94,11 @@ static inline void syscall_set_arguments(struct task_struct *task,
if (i == 0) {
regs->orig_a0 = args[0];
args++;
i++;
n--;
}
memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
} else {
i--;
}
memcpy(&regs->a1 + i, args, n * sizeof(regs->a1));
}
#endif /* _ASM_RISCV_SYSCALL_H */

View file

@ -425,6 +425,7 @@ CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_QFMT_V2=y
CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
CONFIG_OVERLAY_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y

View file

@ -47,7 +47,7 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
CPPFLAGS_vdso.lds += -P -C
VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
-z max-page-size=4096 -z common-page-size=4096
-z max-page-size=4096
$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
@ -98,7 +98,7 @@ CFLAGS_REMOVE_vvar.o = -pg
CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
-z max-page-size=4096 -z common-page-size=4096
-z max-page-size=4096
# x32-rebranded versions
vobjx32s-y := $(vobjs-y:.o=-x32.o)

View file

@ -3,10 +3,14 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <asm/apicdef.h>
#include <asm/nmi.h>
#include "../perf_event.h"
static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
static __initconst const u64 amd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@ -429,6 +433,132 @@ static void amd_pmu_cpu_dead(int cpu)
}
}
/*
* When a PMC counter overflows, an NMI is used to process the event and
* reset the counter. NMI latency can result in the counter being updated
* before the NMI can run, which can result in what appear to be spurious
* NMIs. This function is intended to wait for the NMI to run and reset
* the counter to avoid possible unhandled NMI messages.
*/
#define OVERFLOW_WAIT_COUNT 50
static void amd_pmu_wait_on_overflow(int idx)
{
unsigned int i;
u64 counter;
/*
* Wait for the counter to be reset if it has overflowed. This loop
* should exit very, very quickly, but just in case, don't wait
* forever...
*/
for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
rdmsrl(x86_pmu_event_addr(idx), counter);
if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
break;
/* Might be in IRQ context, so can't sleep */
udelay(1);
}
}
static void amd_pmu_disable_all(void)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;
x86_pmu_disable_all();
/*
* This shouldn't be called from NMI context, but add a safeguard here
* to return, since if we're in NMI context we can't wait for an NMI
* to reset an overflowed counter value.
*/
if (in_nmi())
return;
/*
* Check each counter for overflow and wait for it to be reset by the
* NMI if it has overflowed. This relies on the fact that all active
* counters are always enabled when this function is caled and
* ARCH_PERFMON_EVENTSEL_INT is always set.
*/
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
amd_pmu_wait_on_overflow(idx);
}
}
static void amd_pmu_disable_event(struct perf_event *event)
{
x86_pmu_disable_event(event);
/*
* This can be called from NMI context (via x86_pmu_stop). The counter
* may have overflowed, but either way, we'll never see it get reset
* by the NMI if we're already in the NMI. And the NMI latency support
* below will take care of any pending NMI that might have been
* generated by the overflow.
*/
if (in_nmi())
return;
amd_pmu_wait_on_overflow(event->hw.idx);
}
/*
* Because of NMI latency, if multiple PMC counters are active or other sources
* of NMIs are received, the perf NMI handler can handle one or more overflowed
* PMC counters outside of the NMI associated with the PMC overflow. If the NMI
* doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
* back-to-back NMI support won't be active. This PMC handler needs to take into
* account that this can occur, otherwise this could result in unknown NMI
* messages being issued. Examples of this is PMC overflow while in the NMI
* handler when multiple PMCs are active or PMC overflow while handling some
* other source of an NMI.
*
* Attempt to mitigate this by using the number of active PMCs to determine
* whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
* any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
* number of active PMCs or 2. The value of 2 is used in case an NMI does not
* arrive at the LAPIC in time to be collapsed into an already pending NMI.
*/
static int amd_pmu_handle_irq(struct pt_regs *regs)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int active, handled;
/*
* Obtain the active count before calling x86_pmu_handle_irq() since
* it is possible that x86_pmu_handle_irq() may make a counter
* inactive (through x86_pmu_stop).
*/
active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX);
/* Process any counter overflows */
handled = x86_pmu_handle_irq(regs);
/*
* If a counter was handled, record the number of possible remaining
* NMIs that can occur.
*/
if (handled) {
this_cpu_write(perf_nmi_counter,
min_t(unsigned int, 2, active));
return handled;
}
if (!this_cpu_read(perf_nmi_counter))
return NMI_DONE;
this_cpu_dec(perf_nmi_counter);
return NMI_HANDLED;
}
static struct event_constraint *
amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
@ -621,11 +751,11 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
static __initconst const struct x86_pmu amd_pmu = {
.name = "AMD",
.handle_irq = x86_pmu_handle_irq,
.disable_all = x86_pmu_disable_all,
.handle_irq = amd_pmu_handle_irq,
.disable_all = amd_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
.enable = x86_pmu_enable_event,
.disable = x86_pmu_disable_event,
.disable = amd_pmu_disable_event,
.hw_config = amd_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_K7_EVNTSEL0,
@ -728,7 +858,7 @@ void amd_pmu_enable_virt(void)
cpuc->perf_ctr_virt_mask = 0;
/* Reload all events */
x86_pmu_disable_all();
amd_pmu_disable_all();
x86_pmu_enable_all(0);
}
EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
@ -746,7 +876,7 @@ void amd_pmu_disable_virt(void)
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
/* Reload all events */
x86_pmu_disable_all();
amd_pmu_disable_all();
x86_pmu_enable_all(0);
}
EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);

View file

@ -1328,8 +1328,9 @@ void x86_pmu_stop(struct perf_event *event, int flags)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
if (test_bit(hwc->idx, cpuc->active_mask)) {
x86_pmu.disable(event);
__clear_bit(hwc->idx, cpuc->active_mask);
cpuc->events[hwc->idx] = NULL;
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
@ -1426,16 +1427,8 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
apic_write(APIC_LVTPC, APIC_DM_NMI);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask)) {
/*
* Though we deactivated the counter some cpus
* might still deliver spurious interrupts still
* in flight. Catch them:
*/
if (__test_and_clear_bit(idx, cpuc->running))
handled++;
if (!test_bit(idx, cpuc->active_mask))
continue;
}
event = cpuc->events[idx];

View file

@ -36,22 +36,17 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
/* Technically wrong, but this avoids compilation errors on some gcc
versions. */
#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
#else
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
#endif
#define RLONG_ADDR(x) "m" (*(volatile long *) (x))
#define WBYTE_ADDR(x) "+m" (*(volatile char *) (x))
#define ADDR BITOP_ADDR(addr)
#define ADDR RLONG_ADDR(addr)
/*
* We do the locked ops that don't return the old value as
* a mask operation on a byte.
*/
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
#define CONST_MASK_ADDR(nr, addr) WBYTE_ADDR((void *)(addr) + ((nr)>>3))
#define CONST_MASK(nr) (1 << ((nr) & 7))
/**
@ -79,7 +74,7 @@ set_bit(long nr, volatile unsigned long *addr)
: "memory");
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0"
: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
@ -94,7 +89,7 @@ set_bit(long nr, volatile unsigned long *addr)
*/
static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory");
asm volatile(__ASM_SIZE(bts) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
/**
@ -116,8 +111,7 @@ clear_bit(long nr, volatile unsigned long *addr)
: "iq" ((u8)~CONST_MASK(nr)));
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0"
: BITOP_ADDR(addr)
: "Ir" (nr));
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
@ -137,7 +131,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad
static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr));
asm volatile(__ASM_SIZE(btr) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
@ -145,7 +139,7 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
bool negative;
asm volatile(LOCK_PREFIX "andb %2,%1"
CC_SET(s)
: CC_OUT(s) (negative), ADDR
: CC_OUT(s) (negative), WBYTE_ADDR(addr)
: "ir" ((char) ~(1 << nr)) : "memory");
return negative;
}
@ -161,13 +155,9 @@ static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile
* __clear_bit() is non-atomic and implies release semantics before the memory
* operation. It can be used for an unlock if no other CPUs can concurrently
* modify other bits in the word.
*
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/
static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
__clear_bit(nr, addr);
}
@ -182,7 +172,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *
*/
static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
{
asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr));
asm volatile(__ASM_SIZE(btc) " %1,%0" : : ADDR, "Ir" (nr) : "memory");
}
/**
@ -202,8 +192,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
: "iq" ((u8)CONST_MASK(nr)));
} else {
asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0"
: BITOP_ADDR(addr)
: "Ir" (nr));
: : RLONG_ADDR(addr), "Ir" (nr) : "memory");
}
}
@ -249,8 +238,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *
asm(__ASM_SIZE(bts) " %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit), ADDR
: "Ir" (nr));
: CC_OUT(c) (oldbit)
: ADDR, "Ir" (nr) : "memory");
return oldbit;
}
@ -290,8 +279,8 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long
asm volatile(__ASM_SIZE(btr) " %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit), ADDR
: "Ir" (nr));
: CC_OUT(c) (oldbit)
: ADDR, "Ir" (nr) : "memory");
return oldbit;
}
@ -302,8 +291,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon
asm volatile(__ASM_SIZE(btc) " %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit), ADDR
: "Ir" (nr) : "memory");
: CC_OUT(c) (oldbit)
: ADDR, "Ir" (nr) : "memory");
return oldbit;
}
@ -335,7 +324,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l
asm volatile(__ASM_SIZE(bt) " %2,%1"
CC_SET(c)
: CC_OUT(c) (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr));
: "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
return oldbit;
}

View file

@ -179,14 +179,7 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
* No 3D Now!
*/
#if (__GNUC__ >= 4)
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
#else
#define memcpy(t, f, n) \
(__builtin_constant_p((n)) \
? __constant_memcpy((t), (f), (n)) \
: __memcpy((t), (f), (n)))
#endif
#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
@ -282,12 +275,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
{
int d0, d1;
#if __GNUC__ == 4 && __GNUC_MINOR__ == 0
/* Workaround for broken gcc 4.0 */
register unsigned long eax asm("%eax") = pattern;
#else
unsigned long eax = pattern;
#endif
switch (count % 4) {
case 0:
@ -321,15 +309,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, size_t);
#ifndef CONFIG_FORTIFY_SOURCE
#if (__GNUC__ >= 4)
#define memset(s, c, count) __builtin_memset(s, c, count)
#else
#define memset(s, c, count) \
(__builtin_constant_p(c) \
? __constant_c_x_memset((s), (0x01010101UL * (unsigned char)(c)), \
(count)) \
: __memset((s), (c), (count)))
#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMSET16

View file

@ -32,21 +32,6 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
extern void *memcpy(void *to, const void *from, size_t len);
extern void *__memcpy(void *to, const void *from, size_t len);
#ifndef CONFIG_FORTIFY_SOURCE
#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
#define memcpy(dst, src, len) \
({ \
size_t __len = (len); \
void *__ret; \
if (__builtin_constant_p(len) && __len >= 64) \
__ret = __memcpy((dst), (src), __len); \
else \
__ret = __builtin_memcpy((dst), (src), __len); \
__ret; \
})
#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);
void *__memset(void *s, int c, size_t n);

View file

@ -206,6 +206,9 @@ xen_single_call(unsigned int call,
__HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
return -EINVAL;
asm volatile(CALL_NOSPEC
: __HYPERCALL_5PARAM
: [thunk_target] "a" (&hypercall_page[call])

View file

@ -6398,11 +6398,11 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
return ret;
}
static int get_num_contig_pages(int idx, struct page **inpages,
unsigned long npages)
static unsigned long get_num_contig_pages(unsigned long idx,
struct page **inpages, unsigned long npages)
{
unsigned long paddr, next_paddr;
int i = idx + 1, pages = 1;
unsigned long i = idx + 1, pages = 1;
/* find the number of contiguous pages starting from idx */
paddr = __sme_page_pa(inpages[idx]);
@ -6421,12 +6421,12 @@ static int get_num_contig_pages(int idx, struct page **inpages,
static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct kvm_sev_launch_update_data params;
struct sev_data_launch_update_data *data;
struct page **inpages;
int i, ret, pages;
int ret;
if (!sev_guest(kvm))
return -ENOTTY;

View file

@ -11582,6 +11582,17 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
return 0;
}
static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
int msr;
for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
unsigned word = msr / BITS_PER_LONG;
msr_bitmap[word] = ~0;
msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
}
}
/*
* Merge L0's and L1's MSR bitmap, return false to indicate that
* we do not use the hardware.
@ -11623,39 +11634,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
return false;
msr_bitmap_l1 = (unsigned long *)kmap(page);
if (nested_cpu_has_apic_reg_virt(vmcs12)) {
/*
* L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
* just lets the processor take the value from the virtual-APIC page;
* take those 256 bits directly from the L1 bitmap.
*/
for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
unsigned word = msr / BITS_PER_LONG;
msr_bitmap_l0[word] = msr_bitmap_l1[word];
msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
}
} else {
for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
unsigned word = msr / BITS_PER_LONG;
msr_bitmap_l0[word] = ~0;
msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
}
}
nested_vmx_disable_intercept_for_msr(
msr_bitmap_l1, msr_bitmap_l0,
X2APIC_MSR(APIC_TASKPRI),
MSR_TYPE_W);
/*
* To keep the control flow simple, pay eight 8-byte writes (sixteen
* 4-byte writes on 32-bit systems) up front to enable intercepts for
* the x2APIC MSR range and selectively disable them below.
*/
enable_x2apic_msr_intercepts(msr_bitmap_l0);
if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
if (nested_cpu_has_apic_reg_virt(vmcs12)) {
/*
* L0 need not intercept reads for MSRs between 0x800
* and 0x8ff, it just lets the processor take the value
* from the virtual-APIC page; take those 256 bits
* directly from the L1 bitmap.
*/
for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
unsigned word = msr / BITS_PER_LONG;
msr_bitmap_l0[word] = msr_bitmap_l1[word];
}
}
if (nested_cpu_has_vid(vmcs12)) {
nested_vmx_disable_intercept_for_msr(
msr_bitmap_l1, msr_bitmap_l0,
X2APIC_MSR(APIC_EOI),
MSR_TYPE_W);
nested_vmx_disable_intercept_for_msr(
msr_bitmap_l1, msr_bitmap_l0,
X2APIC_MSR(APIC_SELF_IPI),
MSR_TYPE_W);
X2APIC_MSR(APIC_TASKPRI),
MSR_TYPE_R | MSR_TYPE_W);
if (nested_cpu_has_vid(vmcs12)) {
nested_vmx_disable_intercept_for_msr(
msr_bitmap_l1, msr_bitmap_l0,
X2APIC_MSR(APIC_EOI),
MSR_TYPE_W);
nested_vmx_disable_intercept_for_msr(
msr_bitmap_l1, msr_bitmap_l0,
X2APIC_MSR(APIC_SELF_IPI),
MSR_TYPE_W);
}
}
if (spec_ctrl)
@ -12836,11 +12852,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
nested_cache_shadow_vmcs12(vcpu, vmcs12);
/*
* If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
* by event injection, halt vcpu.
* If we're entering a halted L2 vcpu and the L2 vcpu won't be
* awakened by event injection or by an NMI-window VM-exit or
* by an interrupt-window VM-exit, halt the vcpu.
*/
if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) {
!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
!(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
!((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
(vmcs12->guest_rflags & X86_EFLAGS_IF))) {
vmx->nested.nested_run_pending = 0;
return kvm_vcpu_halt(vcpu);
}

View file

@ -253,10 +253,14 @@ static int return_address_cb(struct stackframe *frame, void *data)
return 1;
}
/*
* level == 0 is for the return address from the caller of this function,
* not from this function itself.
*/
unsigned long return_address(unsigned level)
{
struct return_addr_data r = {
.skip = level + 1,
.skip = level,
};
walk_stackframe(stack_pointer(NULL), return_address_cb, &r);
return r.addr;

View file

@ -1253,8 +1253,11 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
}
}
if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
if (!map_data)
__free_page(page);
break;
}
len -= bytes;
offset = 0;

View file

@ -523,6 +523,10 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
ACPI_FORMAT_UINT64(obj_desc->region.address),
obj_desc->region.length));
status = acpi_ut_add_address_range(obj_desc->region.space_id,
obj_desc->region.address,
obj_desc->region.length, node);
/* Now the address and length are valid for this opregion */
obj_desc->region.flags |= AOPOBJ_DATA_VALID;

View file

@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
ACPI_FUNCTION_TRACE(ev_enable_gpe);
/* Enable the requested GPE */
/* Clear the GPE status */
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status))
return_ACPI_STATUS(status);
/* Enable the requested GPE */
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
return_ACPI_STATUS(status);
}

View file

@ -186,6 +186,10 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
}
}
if (obj_desc->common.type == ACPI_TYPE_REGION) {
acpi_ut_remove_address_range(obj_desc->region.space_id, node);
}
/* Clear the Node entry in all cases */
node->object = NULL;

View file

@ -343,7 +343,7 @@ config XILINX_HWICAP
config R3964
tristate "Siemens R3964 line discipline"
depends on TTY
depends on TTY && BROKEN
---help---
This driver allows synchronous communication with devices using the
Siemens R3964 packet protocol. Unless you are dealing with special

View file

@ -65,20 +65,15 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return ret;
}
/* Populate regmap */
for (clkid = 0; clkid < data->num_clks; clkid++)
/*
* Populate regmap and register all clks
*/
for (clkid = 0; clkid < data->num_clks; clkid++) {
data->clks[clkid]->map = regmap;
/* Register all clks */
for (clkid = 0; clkid < data->hw_data->num; clkid++) {
if (!data->hw_data->hws[clkid])
continue;
ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]);
if (ret) {
dev_err(dev, "Clock registration failed\n");
if (ret)
return ret;
}
}
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,

View file

@ -1940,7 +1940,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
*/
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
{
atomic_dec(&mm->pincount);
atomic_dec_if_positive(&mm->pincount);
}
/**

View file

@ -1389,8 +1389,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_runtime_pm_put(dev_priv);
}
if (ret && (vgpu_is_vm_unhealthy(ret))) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
if (ret) {
if (vgpu_is_vm_unhealthy(ret))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
intel_vgpu_destroy_workload(workload);
return ERR_PTR(ret);
}

View file

@ -51,6 +51,7 @@ static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.load = udl_driver_load,
.unload = udl_driver_unload,
.release = udl_driver_release,
/* gem hooks */
.gem_free_object_unlocked = udl_gem_free_object,

View file

@ -104,6 +104,7 @@ void udl_urb_completion(struct urb *urb);
int udl_driver_load(struct drm_device *dev, unsigned long flags);
void udl_driver_unload(struct drm_device *dev);
void udl_driver_release(struct drm_device *dev);
int udl_fbdev_init(struct drm_device *dev);
void udl_fbdev_cleanup(struct drm_device *dev);

View file

@ -378,6 +378,12 @@ void udl_driver_unload(struct drm_device *dev)
udl_free_urb_list(dev);
udl_fbdev_cleanup(dev);
udl_modeset_cleanup(dev);
kfree(udl);
}
void udl_driver_release(struct drm_device *dev)
{
udl_modeset_cleanup(dev);
drm_dev_fini(dev);
kfree(dev);
}

View file

@ -1755,6 +1755,7 @@ config SENSORS_VT8231
config SENSORS_W83773G
tristate "Nuvoton W83773G"
depends on I2C
select REGMAP_I2C
help
If you say yes here you get support for the Nuvoton W83773G hardware
monitoring chip.

View file

@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
{
return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
range2->logical_sector + range2->n_sectors > range2->logical_sector;
range1->logical_sector + range1->n_sectors > range2->logical_sector;
}
static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
struct dm_integrity_range *last_range =
list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
struct task_struct *last_range_task;
if (!ranges_overlap(range, last_range))
break;
last_range_task = last_range->task;
list_del(&last_range->wait_entry);
if (!add_new_range(ic, last_range, false)) {
@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
journal_watermark = val;
else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
sync_msec = val;
else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) {
else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
if (ic->meta_dev) {
dm_put_device(ti, ic->meta_dev);
ic->meta_dev = NULL;
@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
ic->sectors_per_block = val >> SECTOR_SHIFT;
} else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
"Invalid internal_hash argument");
if (r)
goto bad;
} else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
} else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
"Invalid journal_crypt argument");
if (r)
goto bad;
} else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
} else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
"Invalid journal_mac argument");
if (r)

View file

@ -1883,6 +1883,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
return true;
}
static int device_requires_stable_pages(struct dm_target *ti,
struct dm_dev *dev, sector_t start,
sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && bdi_cap_stable_pages_required(q->backing_dev_info);
}
/*
* If any underlying device requires stable pages, a table must require
* them as well. Only targets that support iterate_devices are considered:
* don't want error, zero, etc to require stable pages.
*/
static bool dm_table_requires_stable_pages(struct dm_table *t)
{
struct dm_target *ti;
unsigned i;
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
return true;
}
return false;
}
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@ -1945,6 +1975,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
dm_table_verify_integrity(t);
/*
* Some devices don't use blk_integrity but still want stable pages
* because they do their own checksumming.
*/
if (dm_table_requires_stable_pages(t))
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
else
q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
/*
* Determine whether or not this queue's I/O timings contribute
* to the entropy pool, Only request-based targets use this.

View file

@ -1007,15 +1007,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
return -EINVAL;
}
/*
* BIO based queue uses its own splitting. When multipage bvecs
* is switched on, size of the incoming bio may be too big to
* be handled in some targets, such as crypt.
*
* When these targets are ready for the big bio, we can remove
* the limit.
*/
ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
ti->max_io_len = (uint32_t) len;
return 0;
}

View file

@ -1092,6 +1092,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(cons != rxr->rx_next_cons)) {
netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return;
}
@ -1544,15 +1546,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
cons = rxcmp->rx_cmp_opaque;
rx_buf = &rxr->rx_buf_ring[cons];
data = rx_buf->data;
data_ptr = rx_buf->data_ptr;
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return rc1;
}
rx_buf = &rxr->rx_buf_ring[cons];
data = rx_buf->data;
data_ptr = rx_buf->data_ptr;
prefetch(data_ptr);
misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
@ -1569,11 +1573,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
rx_buf->data = NULL;
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
bnxt_sched_reset(bp, rxr);
}
goto next_rx;
}

View file

@ -1888,6 +1888,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
reinit_completion(&adapter->init_done);
rc = init_crq_queue(adapter);
if (rc) {
netdev_err(adapter->netdev,
@ -4569,7 +4570,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
init_completion(&adapter->init_done);
reinit_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@ -4624,7 +4625,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
adapter->from_passive_init = false;
init_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@ -4703,6 +4703,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
spin_lock_init(&adapter->rwi_lock);
init_completion(&adapter->init_done);
adapter->resetting = false;
adapter->mac_change_pending = false;

View file

@ -122,7 +122,9 @@ static int port_set_buffer(struct mlx5e_priv *priv,
return err;
}
/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
* minimum speed value is 40Gbps
*/
static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
{
u32 speed;
@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
int err;
err = mlx5e_port_linkspeed(priv->mdev, &speed);
if (err) {
mlx5_core_warn(priv->mdev, "cannot get port speed\n");
return 0;
}
if (err)
speed = SPEED_40000;
speed = max_t(u32, speed, SPEED_40000);
xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
}
static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
u32 xoff, unsigned int mtu)
u32 xoff, unsigned int max_mtu)
{
int i;
@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
}
if (port_buffer->buffer[i].size <
(xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
(xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
return -ENOMEM;
port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu;
port_buffer->buffer[i].xon =
port_buffer->buffer[i].xoff - max_mtu;
}
return 0;
@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
/**
* update_buffer_lossy()
* mtu: device's MTU
* max_mtu: netdev's max_mtu
* pfc_en: <input> current pfc configuration
* buffer: <input> current prio to buffer mapping
* xoff: <input> xoff value
@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* Return 0 if no error.
* Set change to true if buffer configuration is modified.
*/
static int update_buffer_lossy(unsigned int mtu,
static int update_buffer_lossy(unsigned int max_mtu,
u8 pfc_en, u8 *buffer, u32 xoff,
struct mlx5e_port_buffer *port_buffer,
bool *change)
@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
}
if (changed) {
err = update_xoff_threshold(port_buffer, xoff, mtu);
err = update_xoff_threshold(port_buffer, xoff, max_mtu);
if (err)
return err;
@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
return 0;
}
#define MINIMUM_MAX_MTU 9216
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u32 change, unsigned int mtu,
struct ieee_pfc *pfc,
@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
bool update_prio2buffer = false;
u8 buffer[MLX5E_MAX_PRIORITY];
bool update_buffer = false;
unsigned int max_mtu;
u32 total_used = 0;
u8 curr_pfc_en;
int err;
int i;
mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
err = mlx5e_port_query_buffer(priv, &port_buffer);
if (err)
@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
update_buffer = true;
err = update_xoff_threshold(&port_buffer, xoff, mtu);
err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
&port_buffer, &update_buffer);
if (err)
return err;
@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
&port_buffer, &update_buffer);
err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
xoff, &port_buffer, &update_buffer);
if (err)
return err;
}
@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
return -EINVAL;
update_buffer = true;
err = update_xoff_threshold(&port_buffer, xoff, mtu);
err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
/* Need to update buffer configuration if xoff value is changed */
if (!update_buffer && xoff != priv->dcbx.xoff) {
update_buffer = true;
err = update_xoff_threshold(&port_buffer, xoff, mtu);
err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}

View file

@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
if (err)
return err;
mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return 0;
}
@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir)
{
mutex_lock(&mdev->mlx5e_res.td.list_lock);
mlx5_core_destroy_tir(mdev, tir->tirn);
list_del(&tir->list);
mutex_unlock(&mdev->mlx5e_res.td.list_lock);
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
}
INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
mutex_init(&mdev->mlx5e_res.td.list_lock);
return 0;
@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_tir *tir;
int err = -ENOMEM;
int err = 0;
u32 tirn = 0;
int inlen;
void *in;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
if (!in) {
err = -ENOMEM;
goto out;
}
if (enable_uc_lb)
MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
tirn = tir->tirn;
err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@ -168,6 +176,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
kvfree(in);
if (err)
netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return err;
}

View file

@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
void *cmd;
int ret;
rcu_read_lock();
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
rcu_read_unlock();
if (!flow) {
WARN_ONCE(1, "Received NULL pointer for handle\n");
return -EINVAL;
}
buf = kzalloc(size, GFP_ATOMIC);
if (!buf)
return -ENOMEM;
cmd = (buf + 1);
rcu_read_lock();
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
rcu_read_unlock();
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
buf->complete = mlx_tls_kfree_complete;
ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
if (ret < 0)
kfree(buf);
return ret;
}

View file

@ -162,26 +162,6 @@ static struct mlx5_profile profile[] = {
.size = 8,
.limit = 4
},
.mr_cache[16] = {
.size = 8,
.limit = 4
},
.mr_cache[17] = {
.size = 8,
.limit = 4
},
.mr_cache[18] = {
.size = 8,
.limit = 4
},
.mr_cache[19] = {
.size = 4,
.limit = 2
},
.mr_cache[20] = {
.size = 4,
.limit = 2
},
},
};

View file

@ -225,7 +225,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = dev_queue_xmit(skb);
nfp_repr_inc_tx_stats(netdev, len, ret);
return ret;
return NETDEV_TX_OK;
}
static int nfp_repr_stop(struct net_device *netdev)
@ -329,6 +329,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
netdev->priv_flags |= IFF_DISABLE_NETPOLL;
if (nfp_app_has_tc(app)) {
netdev->features |= NETIF_F_HW_TC;
netdev->hw_features |= NETIF_F_HW_TC;

View file

@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <linux/firmware.h>
#include <linux/prefetch.h>
#include <linux/pci-aspm.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
@ -5417,7 +5418,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
tp->cp_cmd |= PktCntrDisable | INTT_1;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
RTL_W16(tp, IntrMitigate, 0x5151);
RTL_W16(tp, IntrMitigate, 0x5100);
/* Work around for RxFIFO overflow. */
if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
@ -7324,6 +7325,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
/* Disable ASPM completely as that cause random device stop working
* problems as well as full system hangs for some PCIe devices users.
*/
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
if (rc < 0) {

View file

@ -970,6 +970,7 @@ struct netvsc_device {
wait_queue_head_t wait_drain;
bool destroy;
bool tx_disable; /* if true, do not wake up queue again */
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;

View file

@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
net_device->tx_disable = false;
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@ -716,7 +717,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
} else {
struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
if (netif_tx_queue_stopped(txq) &&
if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
(hv_get_avail_to_write_percent(&channel->outbound) >
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
netif_tx_wake_queue(txq);
@ -871,7 +872,8 @@ static inline int netvsc_send_pkt(
} else if (ret == -EAGAIN) {
netif_tx_stop_queue(txq);
ndev_ctx->eth_stats.stop_queue++;
if (atomic_read(&nvchan->queue_sends) < 1) {
if (atomic_read(&nvchan->queue_sends) < 1 &&
!net_device->tx_disable) {
netif_tx_wake_queue(txq);
ndev_ctx->eth_stats.wake_queue++;
ret = -ENOSPC;

View file

@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
rcu_read_unlock();
}
static void netvsc_tx_enable(struct netvsc_device *nvscdev,
struct net_device *ndev)
{
nvscdev->tx_disable = false;
virt_wmb(); /* ensure queue wake up mechanism is on */
netif_tx_wake_all_queues(ndev);
}
static int netvsc_open(struct net_device *net)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
rdev = nvdev->extension;
if (!rdev->link_state) {
netif_carrier_on(net);
netif_tx_wake_all_queues(net);
netvsc_tx_enable(nvdev, net);
}
if (vf_netdev) {
@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
}
}
static void netvsc_tx_disable(struct netvsc_device *nvscdev,
struct net_device *ndev)
{
if (nvscdev) {
nvscdev->tx_disable = true;
virt_wmb(); /* ensure txq will not wake up after stop */
}
netif_tx_disable(ndev);
}
static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
int ret;
netif_tx_disable(net);
netvsc_tx_disable(nvdev, net);
/* No need to close rndis filter if it is removed already */
if (!nvdev)
@ -918,7 +938,7 @@ static int netvsc_detach(struct net_device *ndev,
/* If device was up (receiving) then shutdown */
if (netif_running(ndev)) {
netif_tx_disable(ndev);
netvsc_tx_disable(nvdev, ndev);
ret = rndis_filter_close(nvdev);
if (ret) {
@ -1899,7 +1919,7 @@ static void netvsc_link_change(struct work_struct *w)
if (rdev->link_state) {
rdev->link_state = false;
netif_carrier_on(net);
netif_tx_wake_all_queues(net);
netvsc_tx_enable(net_device, net);
} else {
notify = true;
}
@ -1909,7 +1929,7 @@ static void netvsc_link_change(struct work_struct *w)
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
netif_tx_stop_all_queues(net);
netvsc_tx_disable(net_device, net);
}
kfree(event);
break;
@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
netif_tx_stop_all_queues(net);
netvsc_tx_disable(net_device, net);
event->event = RNDIS_STATUS_MEDIA_CONNECT;
spin_lock_irqsave(&ndev_ctx->lock, flags);
list_add(&event->list, &ndev_ctx->reconfig_events);

View file

@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
{QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */

View file

@ -117,6 +117,10 @@ static void remove_board(struct slot *p_slot)
* removed from the slot/adapter.
*/
msleep(1000);
/* Ignore link or presence changes caused by power off */
atomic_and(~(PCI_EXP_SLTSTA_DLLSC | PCI_EXP_SLTSTA_PDC),
&ctrl->pending_events);
}
/* turn off Green LED */

View file

@ -3852,6 +3852,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
quirk_dma_func1_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
quirk_dma_func1_alias);

View file

@ -461,4 +461,28 @@ config OKL4_VTTY_CONSOLE
default y
help
Console support for OKL4 Microvisor virtual ttys.
config LDISC_AUTOLOAD
bool "Automatically load TTY Line Disciplines"
default y
help
Historically the kernel has always automatically loaded any
line discipline that is in a kernel module when a user asks
for it to be loaded with the TIOCSETD ioctl, or through other
means. This is not always the best thing to do on systems
where you know you will not be using some of the more
"ancient" line disciplines, so prevent the kernel from doing
this unless the request is coming from a process with the
CAP_SYS_MODULE permissions.
Say 'Y' here if you trust your userspace users to do the right
thing, or if you have only provided the line disciplines that
you know you will be using, or if you wish to continue to use
the traditional method of on-demand loading of these modules
by any user.
This functionality can be changed at runtime with the
dev.tty.ldisc_autoload sysctl, this configuration option will
only set the default value of this functionality.
endif # TTY

View file

@ -512,6 +512,8 @@ static const struct file_operations hung_up_tty_fops = {
static DEFINE_SPINLOCK(redirect_lock);
static struct file *redirect;
extern void tty_sysctl_init(void);
/**
* tty_wakeup - request more data
* @tty: terminal
@ -3340,6 +3342,7 @@ void console_sysfs_notify(void)
*/
int __init tty_init(void)
{
tty_sysctl_init();
cdev_init(&tty_cdev, &tty_fops);
if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) ||
register_chrdev_region(MKDEV(TTYAUX_MAJOR, 0), 1, "/dev/tty") < 0)

View file

@ -156,6 +156,13 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
* takes tty_ldiscs_lock to guard against ldisc races
*/
#if defined(CONFIG_LDISC_AUTOLOAD)
#define INITIAL_AUTOLOAD_STATE 1
#else
#define INITIAL_AUTOLOAD_STATE 0
#endif
static int tty_ldisc_autoload = INITIAL_AUTOLOAD_STATE;
static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
{
struct tty_ldisc *ld;
@ -170,6 +177,8 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
*/
ldops = get_ldops(disc);
if (IS_ERR(ldops)) {
if (!capable(CAP_SYS_MODULE) && !tty_ldisc_autoload)
return ERR_PTR(-EPERM);
request_module("tty-ldisc-%d", disc);
ldops = get_ldops(disc);
if (IS_ERR(ldops))
@ -835,3 +844,41 @@ void tty_ldisc_deinit(struct tty_struct *tty)
tty_ldisc_put(tty->ldisc);
tty->ldisc = NULL;
}
static int zero;
static int one = 1;
static struct ctl_table tty_table[] = {
{
.procname = "ldisc_autoload",
.data = &tty_ldisc_autoload,
.maxlen = sizeof(tty_ldisc_autoload),
.mode = 0644,
.proc_handler = proc_dointvec,
.extra1 = &zero,
.extra2 = &one,
},
{ }
};
static struct ctl_table tty_dir_table[] = {
{
.procname = "tty",
.mode = 0555,
.child = tty_table,
},
{ }
};
static struct ctl_table tty_root_table[] = {
{
.procname = "dev",
.mode = 0555,
.child = tty_dir_table,
},
{ }
};
void tty_sysctl_init(void)
{
register_sysctl_table(tty_root_table);
}

View file

@ -1086,6 +1086,8 @@ struct virtqueue *vring_create_virtqueue(
GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
if (queue)
break;
if (!may_reduce_num)
return NULL;
}
if (!num)

View file

@ -296,10 +296,10 @@ static void blkdev_bio_end_io(struct bio *bio)
struct blkdev_dio *dio = bio->bi_private;
bool should_dirty = dio->should_dirty;
if (dio->multi_bio && !atomic_dec_and_test(&dio->ref)) {
if (bio->bi_status && !dio->bio.bi_status)
dio->bio.bi_status = bio->bi_status;
} else {
if (bio->bi_status && !dio->bio.bi_status)
dio->bio.bi_status = bio->bi_status;
if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
if (!dio->is_sync) {
struct kiocb *iocb = dio->iocb;
ssize_t ret;

View file

@ -496,6 +496,16 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/*
* If the fs is mounted with nologreplay, which requires it to be
* mounted in RO mode as well, we can not allow discard on free space
* inside block groups, because log trees refer to extents that are not
* pinned in a block group's free space cache (pinning the extents is
* precisely the first phase of replaying a log tree).
*/
if (btrfs_test_opt(fs_info, NOLOGREPLAY))
return -EROFS;
rcu_read_lock();
list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
dev_list) {

View file

@ -366,11 +366,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
static int prop_compression_validate(const char *value, size_t len)
{
if (!strncmp("lzo", value, len))
if (!strncmp("lzo", value, 3))
return 0;
else if (!strncmp("zlib", value, len))
else if (!strncmp("zlib", value, 4))
return 0;
else if (!strncmp("zstd", value, len))
else if (!strncmp("zstd", value, 4))
return 0;
return -EINVAL;
@ -396,7 +396,7 @@ static int prop_compression_apply(struct inode *inode,
btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
} else if (!strncmp("zlib", value, 4)) {
type = BTRFS_COMPRESS_ZLIB;
} else if (!strncmp("zstd", value, len)) {
} else if (!strncmp("zstd", value, 4)) {
type = BTRFS_COMPRESS_ZSTD;
btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
} else {

View file

@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
#define __constant_bitrev32(x) \
({ \
u32 __x = x; \
__x = (__x >> 16) | (__x << 16); \
__x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
__x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
__x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
__x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
__x; \
u32 ___x = x; \
___x = (___x >> 16) | (___x << 16); \
___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \
___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
___x; \
})
#define __constant_bitrev16(x) \
({ \
u16 __x = x; \
__x = (__x >> 8) | (__x << 8); \
__x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
__x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
__x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
__x; \
u16 ___x = x; \
___x = (___x >> 8) | (___x << 8); \
___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \
___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \
___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \
___x; \
})
#define __constant_bitrev8x4(x) \
({ \
u32 __x = x; \
__x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
__x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
__x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
__x; \
u32 ___x = x; \
___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
___x; \
})
#define __constant_bitrev8(x) \
({ \
u8 __x = x; \
__x = (__x >> 4) | (__x << 4); \
__x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
__x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
__x; \
u8 ___x = x; \
___x = (___x >> 4) | (___x << 4); \
___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \
___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \
___x; \
})
#define bitrev32(x) \

View file

@ -559,7 +559,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page);
void __unlock_page_memcg(struct mem_cgroup *memcg);
void unlock_page_memcg(struct page *page);
/* idx can be of type enum memcg_stat_item or node_stat_item */
/*
* idx can be of type enum memcg_stat_item or node_stat_item.
* Keep in sync with memcg_exact_page_state().
*/
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
int idx)
{

View file

@ -776,6 +776,8 @@ struct mlx5_pagefault {
};
struct mlx5_td {
/* protects tirs list changes while tirs refresh */
struct mutex list_lock;
struct list_head tirs_list;
u32 tdn;
};

View file

@ -21,6 +21,19 @@ struct nf_ct_gre_keymap {
struct nf_conntrack_tuple tuple;
};
enum grep_conntrack {
GRE_CT_UNREPLIED,
GRE_CT_REPLIED,
GRE_CT_MAX
};
struct netns_proto_gre {
struct nf_proto_net nf;
rwlock_t keymap_lock;
struct list_head keymap_list;
unsigned int gre_timeouts[GRE_CT_MAX];
};
/* add new tuple->key_reply pair to keymap */
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t);

View file

@ -143,6 +143,9 @@ extern void * memscan(void *,int,__kernel_size_t);
#ifndef __HAVE_ARCH_MEMCMP
extern int memcmp(const void *,const void *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_BCMP
extern int bcmp(const void *,const void *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_MEMCHR
extern void * memchr(const void *,int,__kernel_size_t);
#endif

View file

@ -63,7 +63,7 @@ struct virtqueue;
/*
* Creates a virtqueue and allocates the descriptor ring. If
* may_reduce_num is set, then this may allocate a smaller ring than
* expected. The caller should query virtqueue_get_ring_size to learn
* expected. The caller should query virtqueue_get_vring_size to learn
* the actual size of the ring.
*/
struct virtqueue *vring_create_virtqueue(unsigned int index,

View file

@ -651,7 +651,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
unsigned char __user *data, int optlen);
void ip_options_undo(struct ip_options *opt);
void ip_forward_options(struct sk_buff *skb);
int ip_options_rcv_srr(struct sk_buff *skb);
int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
/*
* Functions provided by ip_sockglue.c

View file

@ -57,6 +57,7 @@ struct net {
*/
spinlock_t rules_mod_lock;
u32 hash_mix;
atomic64_t cookie_gen;
struct list_head list; /* list of network namespaces */

View file

@ -2,16 +2,10 @@
#ifndef __NET_NS_HASH_H__
#define __NET_NS_HASH_H__
#include <asm/cache.h>
struct net;
#include <net/net_namespace.h>
static inline u32 net_hash_mix(const struct net *net)
{
#ifdef CONFIG_NET_NS
return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
#else
return 0;
#endif
return net->hash_mix;
}
#endif

View file

@ -554,6 +554,7 @@ int __init early_irq_init(void)
alloc_masks(&desc[i], node);
raw_spin_lock_init(&desc[i].lock);
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
mutex_init(&desc[i].request_mutex);
desc_set_defaults(i, &desc[i], node, NULL, NULL);
}
return arch_early_irq_init();

View file

@ -8842,10 +8842,10 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
if (cfs_rq->last_h_load_update == now)
return;
cfs_rq->h_load_next = NULL;
WRITE_ONCE(cfs_rq->h_load_next, NULL);
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
cfs_rq->h_load_next = se;
WRITE_ONCE(cfs_rq->h_load_next, se);
if (cfs_rq->last_h_load_update == now)
break;
}
@ -8855,7 +8855,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
cfs_rq->last_h_load_update = now;
}
while ((se = cfs_rq->h_load_next) != NULL) {
while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) {
load = cfs_rq->h_load;
load = div64_ul(load * se->avg.load_avg,
cfs_rq_load_avg(cfs_rq) + 1);
@ -9050,8 +9050,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
mcc->cpu = cpu;
#ifdef CONFIG_SCHED_DEBUG
raw_spin_unlock_irqrestore(&mcc->lock, flags);
printk_deferred("CPU%d: update max cpu_capacity %lu\n",
cpu, capacity);
printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n",
cpu, capacity);
goto skip_unlock;
#endif
}

View file

@ -597,7 +597,7 @@ static ktime_t alarm_timer_remaining(struct k_itimer *timr, ktime_t now)
{
struct alarm *alarm = &timr->it.alarm.alarmtimer;
return ktime_sub(now, alarm->node.expires);
return ktime_sub(alarm->node.expires, now);
}
/**

View file

@ -865,6 +865,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
EXPORT_SYMBOL(memcmp);
#endif
#ifndef __HAVE_ARCH_BCMP
/**
* bcmp - returns 0 if and only if the buffers have identical contents.
* @a: pointer to first buffer.
* @b: pointer to second buffer.
* @len: size of buffers.
*
* The sign or magnitude of a non-zero return value has no particular
* meaning, and architectures may implement their own more efficient bcmp(). So
* while this particular implementation is a simple (tail) call to memcmp, do
* not rely on anything but whether the return value is zero or non-zero.
*/
#undef bcmp
int bcmp(const void *a, const void *b, size_t len)
{
return memcmp(a, b, len);
}
EXPORT_SYMBOL(bcmp);
#endif
#ifndef __HAVE_ARCH_MEMSCAN
/**
* memscan - Find a character in an area of memory.

View file

@ -734,6 +734,21 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
spinlock_t *ptl;
ptl = pmd_lock(mm, pmd);
if (!pmd_none(*pmd)) {
if (write) {
if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
WARN_ON_ONCE(!is_huge_zero_pmd(*pmd));
goto out_unlock;
}
entry = pmd_mkyoung(*pmd);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
if (pmdp_set_access_flags(vma, addr, pmd, entry, 1))
update_mmu_cache_pmd(vma, addr, pmd);
}
goto out_unlock;
}
entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pmd_mkdevmap(entry);
@ -745,11 +760,16 @@ static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
if (pgtable) {
pgtable_trans_huge_deposit(mm, pmd, pgtable);
mm_inc_nr_ptes(mm);
pgtable = NULL;
}
set_pmd_at(mm, addr, pmd, entry);
update_mmu_cache_pmd(vma, addr, pmd);
out_unlock:
spin_unlock(ptl);
if (pgtable)
pte_free(mm, pgtable);
}
vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
@ -800,6 +820,20 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
spinlock_t *ptl;
ptl = pud_lock(mm, pud);
if (!pud_none(*pud)) {
if (write) {
if (pud_pfn(*pud) != pfn_t_to_pfn(pfn)) {
WARN_ON_ONCE(!is_huge_zero_pud(*pud));
goto out_unlock;
}
entry = pud_mkyoung(*pud);
entry = maybe_pud_mkwrite(pud_mkdirty(entry), vma);
if (pudp_set_access_flags(vma, addr, pud, entry, 1))
update_mmu_cache_pud(vma, addr, pud);
}
goto out_unlock;
}
entry = pud_mkhuge(pfn_t_pud(pfn, prot));
if (pfn_t_devmap(pfn))
entry = pud_mkdevmap(entry);
@ -809,6 +843,8 @@ static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
}
set_pud_at(mm, addr, pud, entry);
update_mmu_cache_pud(vma, addr, pud);
out_unlock:
spin_unlock(ptl);
}

View file

@ -3897,6 +3897,22 @@ struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
return &memcg->cgwb_domain;
}
/*
* idx can be of type enum memcg_stat_item or node_stat_item.
* Keep in sync with memcg_exact_page().
*/
static unsigned long memcg_exact_page_state(struct mem_cgroup *memcg, int idx)
{
long x = atomic_long_read(&memcg->stat[idx]);
int cpu;
for_each_online_cpu(cpu)
x += per_cpu_ptr(memcg->stat_cpu, cpu)->count[idx];
if (x < 0)
x = 0;
return x;
}
/**
* mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
* @wb: bdi_writeback in question
@ -3922,10 +3938,10 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
struct mem_cgroup *parent;
*pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
*pdirty = memcg_exact_page_state(memcg, NR_FILE_DIRTY);
/* this should eventually include NR_UNSTABLE_NFS */
*pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
*pwriteback = memcg_exact_page_state(memcg, NR_WRITEBACK);
*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
(1 << LRU_ACTIVE_FILE));
*pheadroom = PAGE_COUNTER_MAX;

View file

@ -4965,8 +4965,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
if (pt_prev->list_func != NULL)
pt_prev->list_func(head, pt_prev, orig_dev);
else
list_for_each_entry_safe(skb, next, head, list)
list_for_each_entry_safe(skb, next, head, list) {
skb_list_del_init(skb);
pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
}
static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)

View file

@ -1863,11 +1863,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
WARN_ON_ONCE(!ret);
gstrings.len = ret;
data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
if (gstrings.len && !data)
return -ENOMEM;
__ethtool_get_strings(dev, gstrings.string_set, data);
if (gstrings.len) {
data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
if (!data)
return -ENOMEM;
__ethtool_get_strings(dev, gstrings.string_set, data);
} else {
data = NULL;
}
ret = -EFAULT;
if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@ -1963,11 +1968,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
return -EFAULT;
stats.n_stats = n_stats;
data = vzalloc(array_size(n_stats, sizeof(u64)));
if (n_stats && !data)
return -ENOMEM;
ops->get_ethtool_stats(dev, &stats, data);
if (n_stats) {
data = vzalloc(array_size(n_stats, sizeof(u64)));
if (!data)
return -ENOMEM;
ops->get_ethtool_stats(dev, &stats, data);
} else {
data = NULL;
}
ret = -EFAULT;
if (copy_to_user(useraddr, &stats, sizeof(stats)))
@ -2007,16 +2016,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
return -EFAULT;
stats.n_stats = n_stats;
data = vzalloc(array_size(n_stats, sizeof(u64)));
if (n_stats && !data)
return -ENOMEM;
if (dev->phydev && !ops->get_ethtool_phy_stats) {
ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
if (ret < 0)
return ret;
if (n_stats) {
data = vzalloc(array_size(n_stats, sizeof(u64)));
if (!data)
return -ENOMEM;
if (dev->phydev && !ops->get_ethtool_phy_stats) {
ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
if (ret < 0)
goto out;
} else {
ops->get_ethtool_phy_stats(dev, &stats, data);
}
} else {
ops->get_ethtool_phy_stats(dev, &stats, data);
data = NULL;
}
ret = -EFAULT;

View file

@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
refcount_set(&net->count, 1);
refcount_set(&net->passive, 1);
get_random_bytes(&net->hash_mix, sizeof(u32));
net->dev_base_seq = 1;
net->user_ns = user_ns;
idr_init(&net->netns_ids);

View file

@ -3832,7 +3832,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
unsigned int delta_truesize;
struct sk_buff *lp;
if (unlikely(p->len + len >= 65536))
if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
return -E2BIG;
lp = NAPI_GRO_CB(p)->last;

View file

@ -260,7 +260,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
struct net *net = dev_net(skb->dev);
struct metadata_dst *tun_dst = NULL;
struct erspan_base_hdr *ershdr;
struct erspan_metadata *pkt_md;
struct ip_tunnel_net *itn;
struct ip_tunnel *tunnel;
const struct iphdr *iph;
@ -283,9 +282,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
if (unlikely(!pskb_may_pull(skb, len)))
return PACKET_REJECT;
ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
pkt_md = (struct erspan_metadata *)(ershdr + 1);
if (__iptunnel_pull_header(skb,
len,
htons(ETH_P_TEB),
@ -293,8 +289,9 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
goto drop;
if (tunnel->collect_md) {
struct erspan_metadata *pkt_md, *md;
struct ip_tunnel_info *info;
struct erspan_metadata *md;
unsigned char *gh;
__be64 tun_id;
__be16 flags;
@ -307,6 +304,14 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
if (!tun_dst)
return PACKET_REJECT;
/* skb can be uncloned in __iptunnel_pull_header, so
* old pkt_md is no longer valid and we need to reset
* it
*/
gh = skb_network_header(skb) +
skb_network_header_len(skb);
pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
sizeof(*ershdr));
md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
md->version = ver;
md2 = &md->u.md2;

View file

@ -257,11 +257,10 @@ int ip_local_deliver(struct sk_buff *skb)
ip_local_deliver_finish);
}
static inline bool ip_rcv_options(struct sk_buff *skb)
static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
{
struct ip_options *opt;
const struct iphdr *iph;
struct net_device *dev = skb->dev;
/* It looks as overkill, because not all
IP options require packet mangling.
@ -297,7 +296,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
}
}
if (ip_options_rcv_srr(skb))
if (ip_options_rcv_srr(skb, dev))
goto drop;
}
@ -353,7 +352,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
}
#endif
if (iph->ihl > 5 && ip_rcv_options(skb))
if (iph->ihl > 5 && ip_rcv_options(skb, dev))
goto drop;
rt = skb_rtable(skb);

View file

@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
}
}
int ip_options_rcv_srr(struct sk_buff *skb)
int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
{
struct ip_options *opt = &(IPCB(skb)->opt);
int srrspace, srrptr;
@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
orefdst = skb->_skb_refdst;
skb_dst_set(skb, NULL);
err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev);
err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
rt2 = skb_rtable(skb);
if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
skb_dst_drop(skb);

View file

@ -66,11 +66,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
module_param(dctcp_alpha_on_init, uint, 0644);
MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
"parameter for clamping alpha on loss");
static struct tcp_congestion_ops dctcp_reno;
static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
@ -211,21 +206,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
}
}
static void dctcp_react_to_loss(struct sock *sk)
{
struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
ca->loss_cwnd = tp->snd_cwnd;
tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
}
static void dctcp_state(struct sock *sk, u8 new_state)
{
if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
struct dctcp *ca = inet_csk_ca(sk);
/* If this extension is enabled, we clamp dctcp_alpha to
* max on packet loss; the motivation is that dctcp_alpha
* is an indicator to the extend of congestion and packet
* loss is an indicator of extreme congestion; setting
* this in practice turned out to be beneficial, and
* effectively assumes total congestion which reduces the
* window by half.
*/
ca->dctcp_alpha = DCTCP_MAX_ALPHA;
}
if (new_state == TCP_CA_Recovery &&
new_state != inet_csk(sk)->icsk_ca_state)
dctcp_react_to_loss(sk);
/* We handle RTO in dctcp_cwnd_event to ensure that we perform only
* one loss-adjustment per RTT.
*/
}
static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
@ -237,6 +234,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
case CA_EVENT_ECN_NO_CE:
dctcp_ce_state_1_to_0(sk);
break;
case CA_EVENT_LOSS:
dctcp_react_to_loss(sk);
break;
default:
/* Don't care for the rest. */
break;

View file

@ -2494,7 +2494,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
{
int cpu;
module_put(net->ipv4.tcp_congestion_control->owner);
if (net->ipv4.tcp_congestion_control)
module_put(net->ipv4.tcp_congestion_control->owner);
for_each_possible_cpu(cpu)
inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));

View file

@ -540,11 +540,10 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
return PACKET_REJECT;
}
static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
struct tnl_ptk_info *tpi)
static int ip6erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
int gre_hdr_len)
{
struct erspan_base_hdr *ershdr;
struct erspan_metadata *pkt_md;
const struct ipv6hdr *ipv6h;
struct erspan_md2 *md2;
struct ip6_tnl *tunnel;
@ -563,18 +562,16 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
if (unlikely(!pskb_may_pull(skb, len)))
return PACKET_REJECT;
ershdr = (struct erspan_base_hdr *)skb->data;
pkt_md = (struct erspan_metadata *)(ershdr + 1);
if (__iptunnel_pull_header(skb, len,
htons(ETH_P_TEB),
false, false) < 0)
return PACKET_REJECT;
if (tunnel->parms.collect_md) {
struct erspan_metadata *pkt_md, *md;
struct metadata_dst *tun_dst;
struct ip_tunnel_info *info;
struct erspan_metadata *md;
unsigned char *gh;
__be64 tun_id;
__be16 flags;
@ -587,6 +584,14 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len,
if (!tun_dst)
return PACKET_REJECT;
/* skb can be uncloned in __iptunnel_pull_header, so
* old pkt_md is no longer valid and we need to reset
* it
*/
gh = skb_network_header(skb) +
skb_network_header_len(skb);
pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
sizeof(*ershdr));
info = &tun_dst->u.tun_info;
md = ip_tunnel_info_opts(info);
md->version = ver;
@ -623,7 +628,7 @@ static int gre_rcv(struct sk_buff *skb)
if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
tpi.proto == htons(ETH_P_ERSPAN2))) {
if (ip6erspan_rcv(skb, hdr_len, &tpi) == PACKET_RCVD)
if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
return 0;
goto out;
}

View file

@ -587,7 +587,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
inet6_sk(skb->sk) : NULL;
struct ipv6hdr *tmp_hdr;
struct frag_hdr *fh;
unsigned int mtu, hlen, left, len;
unsigned int mtu, hlen, left, len, nexthdr_offset;
int hroom, troom;
__be32 frag_id;
int ptr, offset = 0, err = 0;
@ -598,6 +598,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
goto fail;
hlen = err;
nexthdr = *prevhdr;
nexthdr_offset = prevhdr - skb_network_header(skb);
mtu = ip6_skb_dst_mtu(skb);
@ -632,6 +633,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
(err = skb_checksum_help(skb)))
goto fail;
prevhdr = skb_network_header(skb) + nexthdr_offset;
hroom = LL_RESERVED_SPACE(rt->dst.dev);
if (skb_has_frag_list(skb)) {
unsigned int first_len = skb_pagelen(skb);

View file

@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
eiph->daddr, eiph->saddr, 0, 0,
IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) {
if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
if (!IS_ERR(rt))
ip_rt_put(rt);
goto out;
@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
} else {
if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
skb2->dev) ||
skb_dst(skb2)->dev->type != ARPHRD_TUNNEL)
skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
goto out;
}

View file

@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb)
!net_eq(tunnel->net, dev_net(tunnel->dev))))
goto out;
/* skb can be uncloned in iptunnel_pull_header, so
* old iph is no longer valid
*/
iph = (const struct iphdr *)skb_mac_header(skb);
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
if (log_ecn_error)

View file

@ -2054,14 +2054,14 @@ static int __init kcm_init(void)
if (err)
goto fail;
err = sock_register(&kcm_family_ops);
if (err)
goto sock_register_fail;
err = register_pernet_device(&kcm_net_ops);
if (err)
goto net_ops_fail;
err = sock_register(&kcm_family_ops);
if (err)
goto sock_register_fail;
err = kcm_proc_init();
if (err)
goto proc_init_fail;
@ -2069,12 +2069,12 @@ static int __init kcm_init(void)
return 0;
proc_init_fail:
unregister_pernet_device(&kcm_net_ops);
net_ops_fail:
sock_unregister(PF_KCM);
sock_register_fail:
unregister_pernet_device(&kcm_net_ops);
net_ops_fail:
proto_unregister(&kcm_proto);
fail:
@ -2090,8 +2090,8 @@ static int __init kcm_init(void)
static void __exit kcm_exit(void)
{
kcm_proc_exit();
unregister_pernet_device(&kcm_net_ops);
sock_unregister(PF_KCM);
unregister_pernet_device(&kcm_net_ops);
proto_unregister(&kcm_proto);
destroy_workqueue(kcm_wq);

View file

@ -43,24 +43,12 @@
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
enum grep_conntrack {
GRE_CT_UNREPLIED,
GRE_CT_REPLIED,
GRE_CT_MAX
};
static const unsigned int gre_timeouts[GRE_CT_MAX] = {
[GRE_CT_UNREPLIED] = 30*HZ,
[GRE_CT_REPLIED] = 180*HZ,
};
static unsigned int proto_gre_net_id __read_mostly;
struct netns_proto_gre {
struct nf_proto_net nf;
rwlock_t keymap_lock;
struct list_head keymap_list;
unsigned int gre_timeouts[GRE_CT_MAX];
};
static inline struct netns_proto_gre *gre_pernet(struct net *net)
{
@ -408,6 +396,8 @@ static int __init nf_ct_proto_gre_init(void)
{
int ret;
BUILD_BUG_ON(offsetof(struct netns_proto_gre, nf) != 0);
ret = register_pernet_subsys(&proto_gre_net_ops);
if (ret < 0)
goto out_pernet;

View file

@ -392,7 +392,8 @@ static int cttimeout_default_set(struct net *net, struct sock *ctnl,
static int
cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
u32 seq, u32 type, int event,
const struct nf_conntrack_l4proto *l4proto)
const struct nf_conntrack_l4proto *l4proto,
const unsigned int *timeouts)
{
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
@ -421,7 +422,7 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
if (!nest_parms)
goto nla_put_failure;
ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
if (ret < 0)
goto nla_put_failure;
@ -444,6 +445,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
struct netlink_ext_ack *extack)
{
const struct nf_conntrack_l4proto *l4proto;
unsigned int *timeouts = NULL;
struct sk_buff *skb2;
int ret, err;
__u16 l3num;
@ -456,12 +458,55 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
l4proto = nf_ct_l4proto_find_get(l3num, l4num);
/* This protocol is not supported, skip. */
if (l4proto->l4proto != l4num) {
err = -EOPNOTSUPP;
err = -EOPNOTSUPP;
if (l4proto->l4proto != l4num)
goto err;
switch (l4proto->l4proto) {
case IPPROTO_ICMP:
timeouts = &net->ct.nf_ct_proto.icmp.timeout;
break;
case IPPROTO_TCP:
timeouts = net->ct.nf_ct_proto.tcp.timeouts;
break;
case IPPROTO_UDP: /* fallthrough */
case IPPROTO_UDPLITE:
timeouts = net->ct.nf_ct_proto.udp.timeouts;
break;
case IPPROTO_DCCP:
#ifdef CONFIG_NF_CT_PROTO_DCCP
timeouts = net->ct.nf_ct_proto.dccp.dccp_timeout;
#endif
break;
case IPPROTO_ICMPV6:
timeouts = &net->ct.nf_ct_proto.icmpv6.timeout;
break;
case IPPROTO_SCTP:
#ifdef CONFIG_NF_CT_PROTO_SCTP
timeouts = net->ct.nf_ct_proto.sctp.timeouts;
#endif
break;
case IPPROTO_GRE:
#ifdef CONFIG_NF_CT_PROTO_GRE
if (l4proto->net_id) {
struct netns_proto_gre *net_gre;
net_gre = net_generic(net, *l4proto->net_id);
timeouts = net_gre->gre_timeouts;
}
#endif
break;
case 255:
timeouts = &net->ct.nf_ct_proto.generic.timeout;
break;
default:
WARN_ONCE(1, "Missing timeouts for proto %d", l4proto->l4proto);
break;
}
if (!timeouts)
goto err;
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL) {
err = -ENOMEM;
@ -472,7 +517,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
nlh->nlmsg_seq,
NFNL_MSG_TYPE(nlh->nlmsg_type),
IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
l4proto);
l4proto, timeouts);
if (ret <= 0) {
kfree_skb(skb2);
err = -ENOMEM;

View file

@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
struct sw_flow_actions *acts;
int new_acts_size;
int req_size = NLA_ALIGN(attr_len);
size_t req_size = NLA_ALIGN(attr_len);
int next_offset = offsetof(struct sw_flow_actions, actions) +
(*sfa)->actions_len;
if (req_size <= (ksize(*sfa) - next_offset))
goto out;
new_acts_size = ksize(*sfa) * 2;
new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {

View file

@ -600,7 +600,7 @@ static void rds_tcp_kill_sock(struct net *net)
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
if (net != c_net || !tc->t_sock)
if (net != c_net)
continue;
if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
list_move_tail(&tc->t_tcp_node, &tmp_list);

View file

@ -43,8 +43,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
struct tc_action_net *tn = net_generic(net, sample_net_id);
struct nlattr *tb[TCA_SAMPLE_MAX + 1];
struct psample_group *psample_group;
u32 psample_group_num, rate;
struct tc_sample *parm;
u32 psample_group_num;
struct tcf_sample *s;
bool exists = false;
int ret, err;
@ -80,6 +80,12 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
return -EEXIST;
}
rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
if (!rate) {
NL_SET_ERR_MSG(extack, "invalid sample rate");
tcf_idr_release(*a, bind);
return -EINVAL;
}
psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
psample_group = psample_group_get(net, psample_group_num);
if (!psample_group) {
@ -91,7 +97,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
spin_lock_bh(&s->tcf_lock);
s->tcf_action = parm->action;
s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
s->rate = rate;
s->psample_group_num = psample_group_num;
RCU_INIT_POINTER(s->psample_group, psample_group);

View file

@ -126,6 +126,11 @@ static void mall_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
static void *mall_get(struct tcf_proto *tp, u32 handle)
{
struct cls_mall_head *head = rtnl_dereference(tp->root);
if (head && head->handle == handle)
return head;
return NULL;
}

View file

@ -600,6 +600,7 @@ static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
{
/* No address mapping for V4 sockets */
memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
return sizeof(struct sockaddr_in);
}

View file

@ -81,7 +81,7 @@ else
cp System.map "$tmpdir/boot/System.map-$version"
cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
fi
cp "$($MAKE -s image_name)" "$tmpdir/$installed_image_path"
cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
if grep -q "^CONFIG_OF=y" $KCONFIG_CONFIG ; then
# Only some architectures with OF support have this target

View file

@ -1252,7 +1252,7 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
/* fill the info fields */
if (client_info->name[0])
strlcpy(client->name, client_info->name, sizeof(client->name));
strscpy(client->name, client_info->name, sizeof(client->name));
client->filter = client_info->filter;
client->event_lost = client_info->event_lost;
@ -1530,7 +1530,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
/* set queue name */
if (!info->name[0])
snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
strlcpy(q->name, info->name, sizeof(q->name));
strscpy(q->name, info->name, sizeof(q->name));
snd_use_lock_free(&q->use_lock);
return 0;
@ -1592,7 +1592,7 @@ static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
queuefree(q);
return -EPERM;
}
strlcpy(q->name, info->name, sizeof(q->name));
strscpy(q->name, info->name, sizeof(q->name));
queuefree(q);
return 0;

Some files were not shown because too many files have changed in this diff Show more