This is the 4.19.126 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl7XQQYACgkQONu9yGCS aT4vwQ/9EZxtWUPh/JSsl+eImHuZjCwa/gzdLh0kUvr4Tgqxv+3KkTZ+7/TyPvID UgbxxY6qtIP3o4W3kysLAFbOJl+I4IWkEpfCn7vKLzV0BxHjx5Krodo38zT/Ll8S Vqi62nRpiYiqD0qrr/dZSnlY1SUyMYnQ04NKKyZokyj392ErEE2TWNGhN4m3369A 2Dm46WDKckMudkUElXvu2rQkIpVMJACr/aUaFWmmGsfZt+TGQtjRozlKkkq1vokW WJEdCVjQwmeWW5T/OZdfM5VmuqspgtU4BhAmzxTVHGGWw+MIEcNU7LIz3s7cpBdr 7ykY4NcXxvPO5Mn/P5usOZFT/TncZQ65ZqxAEgPoF089D0uXkVTOV9dCLqPzej+g /druvsu6bJqsbi8sd5mftXi5KKH/VDPrxnkEEvhIcuc9GCAKCQjtYz8Vtmkek30U Mz/UcqhtUTzOJU6yZg7zV/JQ6jrzrXm4VFDdiUHoNe3LuWtFsExMXhokV9TBsScY LtDYfe9qIq345BHsKah46VKEIa0Sb53eJFKRrEUK+4EVNr8Rp13afdXPlweX41O+ ecBlHfpRsi6MB2/fY6lBlE0uHIYSIlV78wV0wHC4czbROCYY2XSCCS2MoEXu5kD4 KMqXE6nM4tYqgV3arc2nHzth7GaEnbyCPSMMOq+2on6XB4LCRQc= =rO6H -----END PGP SIGNATURE----- Merge 4.19.126 into android-4.19-stable Changes in 4.19.126 ax25: fix setsockopt(SO_BINDTODEVICE) dpaa_eth: fix usage as DSA master, try 3 net: dsa: mt7530: fix roaming from DSA user ports __netif_receive_skb_core: pass skb by reference net: inet_csk: Fix so_reuseport bind-address cache in tb->fast* net: ipip: fix wrong address family in init error path net/mlx5: Add command entry handling completion net: qrtr: Fix passing invalid reference to qrtr_local_enqueue() net: revert "net: get rid of an signed integer overflow in ip_idents_reserve()" net sched: fix reporting the first-time use timestamp r8152: support additional Microsoft Surface Ethernet Adapter variant sctp: Don't add the shutdown timer if its already been added sctp: Start shutdown on association restart if in SHUTDOWN-SENT state and socket is closed net/mlx5e: Update netdev txq on completions during closure net/mlx5: Annotate mutex destroy for root ns net: sun: fix missing release regions in cas_init_one(). net/mlx4_core: fix a memory leak bug. mlxsw: spectrum: Fix use-after-free of split/unsplit/type_set in case reload fails ARM: dts: rockchip: fix phy nodename for rk3228-evb arm64: dts: rockchip: fix status for &gmac2phy in rk3328-evb.dts arm64: dts: rockchip: swap interrupts interrupt-names rk3399 gpu node ARM: dts: rockchip: swap clock-names of gpu nodes ARM: dts: rockchip: fix pinctrl sub nodename for spi in rk322x.dtsi gpio: tegra: mask GPIO IRQs during IRQ shutdown ALSA: usb-audio: add mapping for ASRock TRX40 Creator net: microchip: encx24j600: add missed kthread_stop gfs2: move privileged user check to gfs2_quota_lock_check cachefiles: Fix race between read_waiter and read_copier involving op->to_do usb: dwc3: pci: Enable extcon driver for Intel Merrifield usb: gadget: legacy: fix redundant initialization warnings net: freescale: select CONFIG_FIXED_PHY where needed IB/i40iw: Remove bogus call to netdev_master_upper_dev_get() riscv: stacktrace: Fix undefined reference to `walk_stackframe' cifs: Fix null pointer check in cifs_read samples: bpf: Fix build error Input: usbtouchscreen - add support for BonXeon TP Input: evdev - call input_flush_device() on release(), not flush() Input: xpad - add custom init packet for Xbox One S controllers Input: dlink-dir685-touchkeys - fix a typo in driver name Input: i8042 - add ThinkPad S230u to i8042 reset list Input: synaptics-rmi4 - really fix attn_data use-after-free Input: synaptics-rmi4 - fix error return code in rmi_driver_probe() ARM: 8970/1: decompressor: increase tag size ARM: 8843/1: use unified assembler in headers ARM: uaccess: consolidate uaccess asm to asm/uaccess-asm.h ARM: uaccess: integrate uaccess_save and uaccess_restore ARM: uaccess: fix DACR mismatch with nested exceptions gpio: exar: Fix bad handling for ida_simple_get error path IB/qib: Call kobject_put() when kobject_init_and_add() fails ARM: dts/imx6q-bx50v3: Set display interface clock parents ARM: dts: bcm2835-rpi-zero-w: Fix led polarity ARM: dts: bcm: HR2: Fix PPI interrupt types mmc: block: Fix use-after-free issue for rpmb RDMA/pvrdma: Fix missing pci disable in pvrdma_pci_probe() ALSA: hwdep: fix a left shifting 1 by 31 UB bug ALSA: hda/realtek - Add a model for Thinkpad T570 without DAC workaround ALSA: usb-audio: mixer: volume quirk for ESS Technology Asus USB DAC exec: Always set cap_ambient in cap_bprm_set_creds ALSA: usb-audio: Quirks for Gigabyte TRX40 Aorus Master onboard audio ALSA: hda/realtek - Add new codec supported for ALC287 libceph: ignore pool overlay and cache logic on redirects IB/ipoib: Fix double free of skb in case of multicast traffic in CM mode mm: remove VM_BUG_ON(PageSlab()) from page_mapcount() fs/binfmt_elf.c: allocate initialized memory in fill_thread_core_info() include/asm-generic/topology.h: guard cpumask_of_node() macro argument iommu: Fix reference count leak in iommu_group_alloc. parisc: Fix kernel panic in mem_init() mmc: core: Fix recursive locking issue in CQE recovery path RDMA/core: Fix double destruction of uobject mac80211: mesh: fix discovery timer re-arming issue / crash x86/dma: Fix max PFN arithmetic overflow on 32 bit systems copy_xstate_to_kernel(): don't leave parts of destination uninitialized xfrm: allow to accept packets with ipv6 NEXTHDR_HOP in xfrm_input xfrm: call xfrm_output_gso when inner_protocol is set in xfrm_output xfrm interface: fix oops when deleting a x-netns interface xfrm: fix a warning in xfrm_policy_insert_list xfrm: fix a NULL-ptr deref in xfrm_local_error xfrm: fix error in comment vti4: eliminated some duplicate code. ip_vti: receive ipip packet by calling ip_tunnel_rcv netfilter: nft_reject_bridge: enable reject with bridge vlan netfilter: ipset: Fix subcounter update skip netfilter: nfnetlink_cthelper: unbreak userspace helper support netfilter: nf_conntrack_pptp: prevent buffer overflows in debug code esp6: get the right proto for transport mode in esp6_gso_encap bnxt_en: Fix accumulation of bp->net_stats_prev. xsk: Add overflow check for u64 division, stored into u32 qlcnic: fix missing release in qlcnic_83xx_interrupt_test. crypto: chelsio/chtls: properly set tp->lsndtime bonding: Fix reference count leak in bond_sysfs_slave_add. netfilter: nf_conntrack_pptp: fix compilation warning with W=1 build mm/vmalloc.c: don't dereference possible NULL pointer in __vunmap() Linux 4.19.126 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ic7ffeb4cbc4d3f1b49c60d97a5d113fcad1d098a
This commit is contained in:
commit
beb44e8562
107 changed files with 722 additions and 426 deletions
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 19
|
PATCHLEVEL = 19
|
||||||
SUBLEVEL = 125
|
SUBLEVEL = 126
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = "People's Front"
|
NAME = "People's Front"
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ SECTIONS
|
||||||
}
|
}
|
||||||
.table : ALIGN(4) {
|
.table : ALIGN(4) {
|
||||||
_table_start = .;
|
_table_start = .;
|
||||||
LONG(ZIMAGE_MAGIC(2))
|
LONG(ZIMAGE_MAGIC(4))
|
||||||
LONG(ZIMAGE_MAGIC(0x5a534c4b))
|
LONG(ZIMAGE_MAGIC(0x5a534c4b))
|
||||||
LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start))
|
LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start))
|
||||||
LONG(ZIMAGE_MAGIC(_kernel_bss_size))
|
LONG(ZIMAGE_MAGIC(_kernel_bss_size))
|
||||||
|
|
|
@ -75,7 +75,7 @@
|
||||||
timer@20200 {
|
timer@20200 {
|
||||||
compatible = "arm,cortex-a9-global-timer";
|
compatible = "arm,cortex-a9-global-timer";
|
||||||
reg = <0x20200 0x100>;
|
reg = <0x20200 0x100>;
|
||||||
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
|
||||||
clocks = <&periph_clk>;
|
clocks = <&periph_clk>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@
|
||||||
compatible = "arm,cortex-a9-twd-timer";
|
compatible = "arm,cortex-a9-twd-timer";
|
||||||
reg = <0x20600 0x20>;
|
reg = <0x20600 0x20>;
|
||||||
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) |
|
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) |
|
||||||
IRQ_TYPE_LEVEL_HIGH)>;
|
IRQ_TYPE_EDGE_RISING)>;
|
||||||
clocks = <&periph_clk>;
|
clocks = <&periph_clk>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@
|
||||||
compatible = "arm,cortex-a9-twd-wdt";
|
compatible = "arm,cortex-a9-twd-wdt";
|
||||||
reg = <0x20620 0x20>;
|
reg = <0x20620 0x20>;
|
||||||
interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) |
|
interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) |
|
||||||
IRQ_TYPE_LEVEL_HIGH)>;
|
IRQ_TYPE_EDGE_RISING)>;
|
||||||
clocks = <&periph_clk>;
|
clocks = <&periph_clk>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
|
|
||||||
leds {
|
leds {
|
||||||
act {
|
act {
|
||||||
gpios = <&gpio 47 GPIO_ACTIVE_HIGH>;
|
gpios = <&gpio 47 GPIO_ACTIVE_LOW>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -65,13 +65,6 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
&clks {
|
|
||||||
assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
|
|
||||||
<&clks IMX6QDL_CLK_LDB_DI1_SEL>;
|
|
||||||
assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
|
|
||||||
<&clks IMX6QDL_CLK_PLL3_USB_OTG>;
|
|
||||||
};
|
|
||||||
|
|
||||||
&ldb {
|
&ldb {
|
||||||
status = "okay";
|
status = "okay";
|
||||||
|
|
||||||
|
|
|
@ -65,13 +65,6 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
&clks {
|
|
||||||
assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
|
|
||||||
<&clks IMX6QDL_CLK_LDB_DI1_SEL>;
|
|
||||||
assigned-clock-parents = <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
|
|
||||||
<&clks IMX6QDL_CLK_PLL3_USB_OTG>;
|
|
||||||
};
|
|
||||||
|
|
||||||
&ldb {
|
&ldb {
|
||||||
status = "okay";
|
status = "okay";
|
||||||
|
|
||||||
|
|
|
@ -53,17 +53,6 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
&clks {
|
|
||||||
assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
|
|
||||||
<&clks IMX6QDL_CLK_LDB_DI1_SEL>,
|
|
||||||
<&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
|
|
||||||
<&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>;
|
|
||||||
assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
|
|
||||||
<&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
|
|
||||||
<&clks IMX6QDL_CLK_PLL2_PFD2_396M>,
|
|
||||||
<&clks IMX6QDL_CLK_PLL2_PFD2_396M>;
|
|
||||||
};
|
|
||||||
|
|
||||||
&ldb {
|
&ldb {
|
||||||
fsl,dual-channel;
|
fsl,dual-channel;
|
||||||
status = "okay";
|
status = "okay";
|
||||||
|
|
|
@ -391,3 +391,18 @@
|
||||||
#interrupt-cells = <1>;
|
#interrupt-cells = <1>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
&clks {
|
||||||
|
assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
|
||||||
|
<&clks IMX6QDL_CLK_LDB_DI1_SEL>,
|
||||||
|
<&clks IMX6QDL_CLK_IPU1_DI0_PRE_SEL>,
|
||||||
|
<&clks IMX6QDL_CLK_IPU1_DI1_PRE_SEL>,
|
||||||
|
<&clks IMX6QDL_CLK_IPU2_DI0_PRE_SEL>,
|
||||||
|
<&clks IMX6QDL_CLK_IPU2_DI1_PRE_SEL>;
|
||||||
|
assigned-clock-parents = <&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
|
||||||
|
<&clks IMX6QDL_CLK_PLL5_VIDEO_DIV>,
|
||||||
|
<&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
|
||||||
|
<&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
|
||||||
|
<&clks IMX6QDL_CLK_PLL2_PFD0_352M>,
|
||||||
|
<&clks IMX6QDL_CLK_PLL2_PFD0_352M>;
|
||||||
|
};
|
||||||
|
|
|
@ -128,7 +128,7 @@
|
||||||
assigned-clocks = <&cru SCLK_GPU>;
|
assigned-clocks = <&cru SCLK_GPU>;
|
||||||
assigned-clock-rates = <100000000>;
|
assigned-clock-rates = <100000000>;
|
||||||
clocks = <&cru SCLK_GPU>, <&cru SCLK_GPU>;
|
clocks = <&cru SCLK_GPU>, <&cru SCLK_GPU>;
|
||||||
clock-names = "core", "bus";
|
clock-names = "bus", "core";
|
||||||
resets = <&cru SRST_GPU>;
|
resets = <&cru SRST_GPU>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
|
@ -46,7 +46,7 @@
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
|
|
||||||
phy: phy@0 {
|
phy: ethernet-phy@0 {
|
||||||
compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22";
|
compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22";
|
||||||
reg = <0>;
|
reg = <0>;
|
||||||
clocks = <&cru SCLK_MAC_PHY>;
|
clocks = <&cru SCLK_MAC_PHY>;
|
||||||
|
|
|
@ -539,7 +539,7 @@
|
||||||
"pp1",
|
"pp1",
|
||||||
"ppmmu1";
|
"ppmmu1";
|
||||||
clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>;
|
clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>;
|
||||||
clock-names = "core", "bus";
|
clock-names = "bus", "core";
|
||||||
resets = <&cru SRST_GPU_A>;
|
resets = <&cru SRST_GPU_A>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
@ -944,7 +944,7 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
spi-0 {
|
spi0 {
|
||||||
spi0_clk: spi0-clk {
|
spi0_clk: spi0-clk {
|
||||||
rockchip,pins = <0 9 RK_FUNC_2 &pcfg_pull_up>;
|
rockchip,pins = <0 9 RK_FUNC_2 &pcfg_pull_up>;
|
||||||
};
|
};
|
||||||
|
@ -962,7 +962,7 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
spi-1 {
|
spi1 {
|
||||||
spi1_clk: spi1-clk {
|
spi1_clk: spi1-clk {
|
||||||
rockchip,pins = <0 23 RK_FUNC_2 &pcfg_pull_up>;
|
rockchip,pins = <0 23 RK_FUNC_2 &pcfg_pull_up>;
|
||||||
};
|
};
|
||||||
|
|
|
@ -84,7 +84,7 @@
|
||||||
compatible = "arm,mali-400";
|
compatible = "arm,mali-400";
|
||||||
reg = <0x10090000 0x10000>;
|
reg = <0x10090000 0x10000>;
|
||||||
clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>;
|
clocks = <&cru ACLK_GPU>, <&cru ACLK_GPU>;
|
||||||
clock-names = "core", "bus";
|
clock-names = "bus", "core";
|
||||||
assigned-clocks = <&cru ACLK_GPU>;
|
assigned-clocks = <&cru ACLK_GPU>;
|
||||||
assigned-clock-rates = <100000000>;
|
assigned-clock-rates = <100000000>;
|
||||||
resets = <&cru SRST_GPU>;
|
resets = <&cru SRST_GPU>;
|
||||||
|
|
|
@ -21,11 +21,11 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/domain.h>
|
|
||||||
#include <asm/opcodes-virt.h>
|
#include <asm/opcodes-virt.h>
|
||||||
#include <asm/asm-offsets.h>
|
#include <asm/asm-offsets.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/thread_info.h>
|
#include <asm/thread_info.h>
|
||||||
|
#include <asm/uaccess-asm.h>
|
||||||
|
|
||||||
#define IOMEM(x) (x)
|
#define IOMEM(x) (x)
|
||||||
|
|
||||||
|
@ -374,9 +374,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||||
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
|
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
|
||||||
9999:
|
9999:
|
||||||
.if \inc == 1
|
.if \inc == 1
|
||||||
\instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
|
\instr\()b\t\cond\().w \reg, [\ptr, #\off]
|
||||||
.elseif \inc == 4
|
.elseif \inc == 4
|
||||||
\instr\cond\()\t\().w \reg, [\ptr, #\off]
|
\instr\t\cond\().w \reg, [\ptr, #\off]
|
||||||
.else
|
.else
|
||||||
.error "Unsupported inc macro argument"
|
.error "Unsupported inc macro argument"
|
||||||
.endif
|
.endif
|
||||||
|
@ -415,9 +415,9 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||||
.rept \rept
|
.rept \rept
|
||||||
9999:
|
9999:
|
||||||
.if \inc == 1
|
.if \inc == 1
|
||||||
\instr\cond\()b\()\t \reg, [\ptr], #\inc
|
\instr\()b\t\cond \reg, [\ptr], #\inc
|
||||||
.elseif \inc == 4
|
.elseif \inc == 4
|
||||||
\instr\cond\()\t \reg, [\ptr], #\inc
|
\instr\t\cond \reg, [\ptr], #\inc
|
||||||
.else
|
.else
|
||||||
.error "Unsupported inc macro argument"
|
.error "Unsupported inc macro argument"
|
||||||
.endif
|
.endif
|
||||||
|
@ -447,79 +447,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||||
.size \name , . - \name
|
.size \name , . - \name
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro csdb
|
|
||||||
#ifdef CONFIG_THUMB2_KERNEL
|
|
||||||
.inst.w 0xf3af8014
|
|
||||||
#else
|
|
||||||
.inst 0xe320f014
|
|
||||||
#endif
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
|
|
||||||
#ifndef CONFIG_CPU_USE_DOMAINS
|
|
||||||
adds \tmp, \addr, #\size - 1
|
|
||||||
sbcccs \tmp, \tmp, \limit
|
|
||||||
bcs \bad
|
|
||||||
#ifdef CONFIG_CPU_SPECTRE
|
|
||||||
movcs \addr, #0
|
|
||||||
csdb
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
|
|
||||||
#ifdef CONFIG_CPU_SPECTRE
|
|
||||||
sub \tmp, \limit, #1
|
|
||||||
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
|
|
||||||
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
|
|
||||||
subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
|
|
||||||
movlo \addr, #0 @ if (tmp < 0) addr = NULL
|
|
||||||
csdb
|
|
||||||
#endif
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro uaccess_disable, tmp, isb=1
|
|
||||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
|
||||||
/*
|
|
||||||
* Whenever we re-enter userspace, the domains should always be
|
|
||||||
* set appropriately.
|
|
||||||
*/
|
|
||||||
mov \tmp, #DACR_UACCESS_DISABLE
|
|
||||||
mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
|
|
||||||
.if \isb
|
|
||||||
instr_sync
|
|
||||||
.endif
|
|
||||||
#endif
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro uaccess_enable, tmp, isb=1
|
|
||||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
|
||||||
/*
|
|
||||||
* Whenever we re-enter userspace, the domains should always be
|
|
||||||
* set appropriately.
|
|
||||||
*/
|
|
||||||
mov \tmp, #DACR_UACCESS_ENABLE
|
|
||||||
mcr p15, 0, \tmp, c3, c0, 0
|
|
||||||
.if \isb
|
|
||||||
instr_sync
|
|
||||||
.endif
|
|
||||||
#endif
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro uaccess_save, tmp
|
|
||||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
|
||||||
mrc p15, 0, \tmp, c3, c0, 0
|
|
||||||
str \tmp, [sp, #SVC_DACR]
|
|
||||||
#endif
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.macro uaccess_restore
|
|
||||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
|
||||||
ldr r0, [sp, #SVC_DACR]
|
|
||||||
mcr p15, 0, r0, c3, c0, 0
|
|
||||||
#endif
|
|
||||||
.endm
|
|
||||||
|
|
||||||
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
|
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
|
||||||
.macro ret\c, reg
|
.macro ret\c, reg
|
||||||
#if __LINUX_ARM_ARCH__ < 6
|
#if __LINUX_ARM_ARCH__ < 6
|
||||||
|
|
117
arch/arm/include/asm/uaccess-asm.h
Normal file
117
arch/arm/include/asm/uaccess-asm.h
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||||
|
|
||||||
|
#ifndef __ASM_UACCESS_ASM_H__
|
||||||
|
#define __ASM_UACCESS_ASM_H__
|
||||||
|
|
||||||
|
#include <asm/asm-offsets.h>
|
||||||
|
#include <asm/domain.h>
|
||||||
|
#include <asm/memory.h>
|
||||||
|
#include <asm/thread_info.h>
|
||||||
|
|
||||||
|
.macro csdb
|
||||||
|
#ifdef CONFIG_THUMB2_KERNEL
|
||||||
|
.inst.w 0xf3af8014
|
||||||
|
#else
|
||||||
|
.inst 0xe320f014
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
|
||||||
|
#ifndef CONFIG_CPU_USE_DOMAINS
|
||||||
|
adds \tmp, \addr, #\size - 1
|
||||||
|
sbcscc \tmp, \tmp, \limit
|
||||||
|
bcs \bad
|
||||||
|
#ifdef CONFIG_CPU_SPECTRE
|
||||||
|
movcs \addr, #0
|
||||||
|
csdb
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
|
||||||
|
#ifdef CONFIG_CPU_SPECTRE
|
||||||
|
sub \tmp, \limit, #1
|
||||||
|
subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
|
||||||
|
addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
|
||||||
|
subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
|
||||||
|
movlo \addr, #0 @ if (tmp < 0) addr = NULL
|
||||||
|
csdb
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro uaccess_disable, tmp, isb=1
|
||||||
|
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||||
|
/*
|
||||||
|
* Whenever we re-enter userspace, the domains should always be
|
||||||
|
* set appropriately.
|
||||||
|
*/
|
||||||
|
mov \tmp, #DACR_UACCESS_DISABLE
|
||||||
|
mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
|
||||||
|
.if \isb
|
||||||
|
instr_sync
|
||||||
|
.endif
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro uaccess_enable, tmp, isb=1
|
||||||
|
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||||
|
/*
|
||||||
|
* Whenever we re-enter userspace, the domains should always be
|
||||||
|
* set appropriately.
|
||||||
|
*/
|
||||||
|
mov \tmp, #DACR_UACCESS_ENABLE
|
||||||
|
mcr p15, 0, \tmp, c3, c0, 0
|
||||||
|
.if \isb
|
||||||
|
instr_sync
|
||||||
|
.endif
|
||||||
|
#endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS)
|
||||||
|
#define DACR(x...) x
|
||||||
|
#else
|
||||||
|
#define DACR(x...)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Save the address limit on entry to a privileged exception.
|
||||||
|
*
|
||||||
|
* If we are using the DACR for kernel access by the user accessors
|
||||||
|
* (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain
|
||||||
|
* back to client mode, whether or not \disable is set.
|
||||||
|
*
|
||||||
|
* If we are using SW PAN, set the DACR user domain to no access
|
||||||
|
* if \disable is set.
|
||||||
|
*/
|
||||||
|
.macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
|
||||||
|
ldr \tmp1, [\tsk, #TI_ADDR_LIMIT]
|
||||||
|
mov \tmp2, #TASK_SIZE
|
||||||
|
str \tmp2, [\tsk, #TI_ADDR_LIMIT]
|
||||||
|
DACR( mrc p15, 0, \tmp0, c3, c0, 0)
|
||||||
|
DACR( str \tmp0, [sp, #SVC_DACR])
|
||||||
|
str \tmp1, [sp, #SVC_ADDR_LIMIT]
|
||||||
|
.if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
|
||||||
|
/* kernel=client, user=no access */
|
||||||
|
mov \tmp2, #DACR_UACCESS_DISABLE
|
||||||
|
mcr p15, 0, \tmp2, c3, c0, 0
|
||||||
|
instr_sync
|
||||||
|
.elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS)
|
||||||
|
/* kernel=client */
|
||||||
|
bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL)
|
||||||
|
orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT)
|
||||||
|
mcr p15, 0, \tmp2, c3, c0, 0
|
||||||
|
instr_sync
|
||||||
|
.endif
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/* Restore the user access state previously saved by uaccess_entry */
|
||||||
|
.macro uaccess_exit, tsk, tmp0, tmp1
|
||||||
|
ldr \tmp1, [sp, #SVC_ADDR_LIMIT]
|
||||||
|
DACR( ldr \tmp0, [sp, #SVC_DACR])
|
||||||
|
str \tmp1, [\tsk, #TI_ADDR_LIMIT]
|
||||||
|
DACR( mcr p15, 0, \tmp0, c3, c0, 0)
|
||||||
|
.endm
|
||||||
|
|
||||||
|
#undef DACR
|
||||||
|
|
||||||
|
#endif /* __ASM_UACCESS_ASM_H__ */
|
|
@ -29,13 +29,13 @@
|
||||||
ldr \tmp, =elf_hwcap @ may not have MVFR regs
|
ldr \tmp, =elf_hwcap @ may not have MVFR regs
|
||||||
ldr \tmp, [\tmp, #0]
|
ldr \tmp, [\tmp, #0]
|
||||||
tst \tmp, #HWCAP_VFPD32
|
tst \tmp, #HWCAP_VFPD32
|
||||||
ldcnel p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
|
ldclne p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
|
||||||
addeq \base, \base, #32*4 @ step over unused register space
|
addeq \base, \base, #32*4 @ step over unused register space
|
||||||
#else
|
#else
|
||||||
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
|
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
|
||||||
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
|
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
|
||||||
cmp \tmp, #2 @ 32 x 64bit registers?
|
cmp \tmp, #2 @ 32 x 64bit registers?
|
||||||
ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
|
ldcleq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
|
||||||
addne \base, \base, #32*4 @ step over unused register space
|
addne \base, \base, #32*4 @ step over unused register space
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
@ -53,13 +53,13 @@
|
||||||
ldr \tmp, =elf_hwcap @ may not have MVFR regs
|
ldr \tmp, =elf_hwcap @ may not have MVFR regs
|
||||||
ldr \tmp, [\tmp, #0]
|
ldr \tmp, [\tmp, #0]
|
||||||
tst \tmp, #HWCAP_VFPD32
|
tst \tmp, #HWCAP_VFPD32
|
||||||
stcnel p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
|
stclne p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
|
||||||
addeq \base, \base, #32*4 @ step over unused register space
|
addeq \base, \base, #32*4 @ step over unused register space
|
||||||
#else
|
#else
|
||||||
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
|
VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
|
||||||
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
|
and \tmp, \tmp, #MVFR0_A_SIMD_MASK @ A_SIMD field
|
||||||
cmp \tmp, #2 @ 32 x 64bit registers?
|
cmp \tmp, #2 @ 32 x 64bit registers?
|
||||||
stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
|
stcleq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
|
||||||
addne \base, \base, #32*4 @ step over unused register space
|
addne \base, \base, #32*4 @ step over unused register space
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include <asm/unistd.h>
|
#include <asm/unistd.h>
|
||||||
#include <asm/tls.h>
|
#include <asm/tls.h>
|
||||||
#include <asm/system_info.h>
|
#include <asm/system_info.h>
|
||||||
|
#include <asm/uaccess-asm.h>
|
||||||
|
|
||||||
#include "entry-header.S"
|
#include "entry-header.S"
|
||||||
#include <asm/entry-macro-multi.S>
|
#include <asm/entry-macro-multi.S>
|
||||||
|
@ -182,15 +183,7 @@ ENDPROC(__und_invalid)
|
||||||
stmia r7, {r2 - r6}
|
stmia r7, {r2 - r6}
|
||||||
|
|
||||||
get_thread_info tsk
|
get_thread_info tsk
|
||||||
ldr r0, [tsk, #TI_ADDR_LIMIT]
|
uaccess_entry tsk, r0, r1, r2, \uaccess
|
||||||
mov r1, #TASK_SIZE
|
|
||||||
str r1, [tsk, #TI_ADDR_LIMIT]
|
|
||||||
str r0, [sp, #SVC_ADDR_LIMIT]
|
|
||||||
|
|
||||||
uaccess_save r0
|
|
||||||
.if \uaccess
|
|
||||||
uaccess_disable r0
|
|
||||||
.endif
|
|
||||||
|
|
||||||
.if \trace
|
.if \trace
|
||||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#include <asm/asm-offsets.h>
|
#include <asm/asm-offsets.h>
|
||||||
#include <asm/errno.h>
|
#include <asm/errno.h>
|
||||||
#include <asm/thread_info.h>
|
#include <asm/thread_info.h>
|
||||||
|
#include <asm/uaccess-asm.h>
|
||||||
#include <asm/v7m.h>
|
#include <asm/v7m.h>
|
||||||
|
|
||||||
@ Bad Abort numbers
|
@ Bad Abort numbers
|
||||||
|
@ -217,9 +218,7 @@
|
||||||
blne trace_hardirqs_off
|
blne trace_hardirqs_off
|
||||||
#endif
|
#endif
|
||||||
.endif
|
.endif
|
||||||
ldr r1, [sp, #SVC_ADDR_LIMIT]
|
uaccess_exit tsk, r0, r1
|
||||||
uaccess_restore
|
|
||||||
str r1, [tsk, #TI_ADDR_LIMIT]
|
|
||||||
|
|
||||||
#ifndef CONFIG_THUMB2_KERNEL
|
#ifndef CONFIG_THUMB2_KERNEL
|
||||||
@ ARM mode SVC restore
|
@ ARM mode SVC restore
|
||||||
|
@ -263,9 +262,7 @@
|
||||||
@ on the stack remains correct).
|
@ on the stack remains correct).
|
||||||
@
|
@
|
||||||
.macro svc_exit_via_fiq
|
.macro svc_exit_via_fiq
|
||||||
ldr r1, [sp, #SVC_ADDR_LIMIT]
|
uaccess_exit tsk, r0, r1
|
||||||
uaccess_restore
|
|
||||||
str r1, [tsk, #TI_ADDR_LIMIT]
|
|
||||||
#ifndef CONFIG_THUMB2_KERNEL
|
#ifndef CONFIG_THUMB2_KERNEL
|
||||||
@ ARM mode restore
|
@ ARM mode restore
|
||||||
mov r0, sp
|
mov r0, sp
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
ENTRY( \name )
|
ENTRY( \name )
|
||||||
UNWIND( .fnstart )
|
UNWIND( .fnstart )
|
||||||
ands ip, r1, #3
|
ands ip, r1, #3
|
||||||
strneb r1, [ip] @ assert word-aligned
|
strbne r1, [ip] @ assert word-aligned
|
||||||
mov r2, #1
|
mov r2, #1
|
||||||
and r3, r0, #31 @ Get bit offset
|
and r3, r0, #31 @ Get bit offset
|
||||||
mov r0, r0, lsr #5
|
mov r0, r0, lsr #5
|
||||||
|
@ -32,7 +32,7 @@ ENDPROC(\name )
|
||||||
ENTRY( \name )
|
ENTRY( \name )
|
||||||
UNWIND( .fnstart )
|
UNWIND( .fnstart )
|
||||||
ands ip, r1, #3
|
ands ip, r1, #3
|
||||||
strneb r1, [ip] @ assert word-aligned
|
strbne r1, [ip] @ assert word-aligned
|
||||||
mov r2, #1
|
mov r2, #1
|
||||||
and r3, r0, #31 @ Get bit offset
|
and r3, r0, #31 @ Get bit offset
|
||||||
mov r0, r0, lsr #5
|
mov r0, r0, lsr #5
|
||||||
|
@ -62,7 +62,7 @@ ENDPROC(\name )
|
||||||
ENTRY( \name )
|
ENTRY( \name )
|
||||||
UNWIND( .fnstart )
|
UNWIND( .fnstart )
|
||||||
ands ip, r1, #3
|
ands ip, r1, #3
|
||||||
strneb r1, [ip] @ assert word-aligned
|
strbne r1, [ip] @ assert word-aligned
|
||||||
and r2, r0, #31
|
and r2, r0, #31
|
||||||
mov r0, r0, lsr #5
|
mov r0, r0, lsr #5
|
||||||
mov r3, #1
|
mov r3, #1
|
||||||
|
@ -89,7 +89,7 @@ ENDPROC(\name )
|
||||||
ENTRY( \name )
|
ENTRY( \name )
|
||||||
UNWIND( .fnstart )
|
UNWIND( .fnstart )
|
||||||
ands ip, r1, #3
|
ands ip, r1, #3
|
||||||
strneb r1, [ip] @ assert word-aligned
|
strbne r1, [ip] @ assert word-aligned
|
||||||
and r3, r0, #31
|
and r3, r0, #31
|
||||||
mov r0, r0, lsr #5
|
mov r0, r0, lsr #5
|
||||||
save_and_disable_irqs ip
|
save_and_disable_irqs ip
|
||||||
|
|
|
@ -86,7 +86,7 @@
|
||||||
assigned-clock-rate = <50000000>;
|
assigned-clock-rate = <50000000>;
|
||||||
assigned-clocks = <&cru SCLK_MAC2PHY>;
|
assigned-clocks = <&cru SCLK_MAC2PHY>;
|
||||||
assigned-clock-parents = <&cru SCLK_MAC2PHY_SRC>;
|
assigned-clock-parents = <&cru SCLK_MAC2PHY_SRC>;
|
||||||
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
|
||||||
&i2c1 {
|
&i2c1 {
|
||||||
|
|
|
@ -1817,10 +1817,10 @@
|
||||||
gpu: gpu@ff9a0000 {
|
gpu: gpu@ff9a0000 {
|
||||||
compatible = "rockchip,rk3399-mali", "arm,mali-t860";
|
compatible = "rockchip,rk3399-mali", "arm,mali-t860";
|
||||||
reg = <0x0 0xff9a0000 0x0 0x10000>;
|
reg = <0x0 0xff9a0000 0x0 0x10000>;
|
||||||
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH 0>,
|
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||||
<GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH 0>,
|
<GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH 0>,
|
||||||
<GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH 0>;
|
<GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||||
interrupt-names = "gpu", "job", "mmu";
|
interrupt-names = "job", "mmu", "gpu";
|
||||||
clocks = <&cru ACLK_GPU>;
|
clocks = <&cru ACLK_GPU>;
|
||||||
power-domains = <&power RK3399_PD_GPU>;
|
power-domains = <&power RK3399_PD_GPU>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
|
|
|
@ -607,7 +607,7 @@ void __init mem_init(void)
|
||||||
> BITS_PER_LONG);
|
> BITS_PER_LONG);
|
||||||
|
|
||||||
high_memory = __va((max_pfn << PAGE_SHIFT));
|
high_memory = __va((max_pfn << PAGE_SHIFT));
|
||||||
set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
|
set_max_mapnr(max_low_pfn);
|
||||||
free_all_bootmem();
|
free_all_bootmem();
|
||||||
|
|
||||||
#ifdef CONFIG_PA11
|
#ifdef CONFIG_PA11
|
||||||
|
|
|
@ -75,7 +75,7 @@ static void notrace walk_stackframe(struct task_struct *task,
|
||||||
|
|
||||||
#else /* !CONFIG_FRAME_POINTER */
|
#else /* !CONFIG_FRAME_POINTER */
|
||||||
|
|
||||||
static void notrace walk_stackframe(struct task_struct *task,
|
void notrace walk_stackframe(struct task_struct *task,
|
||||||
struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
|
struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
|
||||||
{
|
{
|
||||||
unsigned long sp, pc;
|
unsigned long sp, pc;
|
||||||
|
|
|
@ -74,7 +74,7 @@
|
||||||
#define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT)
|
#define MAX_DMA_PFN ((16UL * 1024 * 1024) >> PAGE_SHIFT)
|
||||||
|
|
||||||
/* 4GB broken PCI/AGP hardware bus master zone */
|
/* 4GB broken PCI/AGP hardware bus master zone */
|
||||||
#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
|
#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT))
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
/* The maximum address that we can perform a DMA transfer to on this platform */
|
/* The maximum address that we can perform a DMA transfer to on this platform */
|
||||||
|
|
|
@ -964,18 +964,31 @@ static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count)
|
||||||
* This is similar to user_regset_copyout(), but will not add offset to
|
|
||||||
* the source data pointer or increment pos, count, kbuf, and ubuf.
|
|
||||||
*/
|
|
||||||
static inline void
|
|
||||||
__copy_xstate_to_kernel(void *kbuf, const void *data,
|
|
||||||
unsigned int offset, unsigned int size, unsigned int size_total)
|
|
||||||
{
|
{
|
||||||
if (offset < size_total) {
|
if (*pos < to) {
|
||||||
unsigned int copy = min(size, size_total - offset);
|
unsigned size = to - *pos;
|
||||||
|
|
||||||
memcpy(kbuf + offset, data, copy);
|
if (size > *count)
|
||||||
|
size = *count;
|
||||||
|
memcpy(*kbuf, (void *)&init_fpstate.xsave + *pos, size);
|
||||||
|
*kbuf += size;
|
||||||
|
*pos += size;
|
||||||
|
*count -= size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void copy_part(unsigned offset, unsigned size, void *from,
|
||||||
|
void **kbuf, unsigned *pos, unsigned *count)
|
||||||
|
{
|
||||||
|
fill_gap(offset, kbuf, pos, count);
|
||||||
|
if (size > *count)
|
||||||
|
size = *count;
|
||||||
|
if (size) {
|
||||||
|
memcpy(*kbuf, from, size);
|
||||||
|
*kbuf += size;
|
||||||
|
*pos += size;
|
||||||
|
*count -= size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -988,8 +1001,9 @@ __copy_xstate_to_kernel(void *kbuf, const void *data,
|
||||||
*/
|
*/
|
||||||
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
|
int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
|
||||||
{
|
{
|
||||||
unsigned int offset, size;
|
|
||||||
struct xstate_header header;
|
struct xstate_header header;
|
||||||
|
const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr);
|
||||||
|
unsigned count = size_total;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1005,46 +1019,42 @@ int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int of
|
||||||
header.xfeatures = xsave->header.xfeatures;
|
header.xfeatures = xsave->header.xfeatures;
|
||||||
header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
|
header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
|
||||||
|
|
||||||
|
if (header.xfeatures & XFEATURE_MASK_FP)
|
||||||
|
copy_part(0, off_mxcsr,
|
||||||
|
&xsave->i387, &kbuf, &offset_start, &count);
|
||||||
|
if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM))
|
||||||
|
copy_part(off_mxcsr, MXCSR_AND_FLAGS_SIZE,
|
||||||
|
&xsave->i387.mxcsr, &kbuf, &offset_start, &count);
|
||||||
|
if (header.xfeatures & XFEATURE_MASK_FP)
|
||||||
|
copy_part(offsetof(struct fxregs_state, st_space), 128,
|
||||||
|
&xsave->i387.st_space, &kbuf, &offset_start, &count);
|
||||||
|
if (header.xfeatures & XFEATURE_MASK_SSE)
|
||||||
|
copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256,
|
||||||
|
&xsave->i387.xmm_space, &kbuf, &offset_start, &count);
|
||||||
|
/*
|
||||||
|
* Fill xsave->i387.sw_reserved value for ptrace frame:
|
||||||
|
*/
|
||||||
|
copy_part(offsetof(struct fxregs_state, sw_reserved), 48,
|
||||||
|
xstate_fx_sw_bytes, &kbuf, &offset_start, &count);
|
||||||
/*
|
/*
|
||||||
* Copy xregs_state->header:
|
* Copy xregs_state->header:
|
||||||
*/
|
*/
|
||||||
offset = offsetof(struct xregs_state, header);
|
copy_part(offsetof(struct xregs_state, header), sizeof(header),
|
||||||
size = sizeof(header);
|
&header, &kbuf, &offset_start, &count);
|
||||||
|
|
||||||
__copy_xstate_to_kernel(kbuf, &header, offset, size, size_total);
|
for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
|
||||||
|
|
||||||
for (i = 0; i < XFEATURE_MAX; i++) {
|
|
||||||
/*
|
/*
|
||||||
* Copy only in-use xstates:
|
* Copy only in-use xstates:
|
||||||
*/
|
*/
|
||||||
if ((header.xfeatures >> i) & 1) {
|
if ((header.xfeatures >> i) & 1) {
|
||||||
void *src = __raw_xsave_addr(xsave, 1 << i);
|
void *src = __raw_xsave_addr(xsave, 1 << i);
|
||||||
|
|
||||||
offset = xstate_offsets[i];
|
copy_part(xstate_offsets[i], xstate_sizes[i],
|
||||||
size = xstate_sizes[i];
|
src, &kbuf, &offset_start, &count);
|
||||||
|
|
||||||
/* The next component has to fit fully into the output buffer: */
|
|
||||||
if (offset + size > size_total)
|
|
||||||
break;
|
|
||||||
|
|
||||||
__copy_xstate_to_kernel(kbuf, src, offset, size, size_total);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
fill_gap(size_total, &kbuf, &offset_start, &count);
|
||||||
if (xfeatures_mxcsr_quirk(header.xfeatures)) {
|
|
||||||
offset = offsetof(struct fxregs_state, mxcsr);
|
|
||||||
size = MXCSR_AND_FLAGS_SIZE;
|
|
||||||
__copy_xstate_to_kernel(kbuf, &xsave->i387.mxcsr, offset, size, size_total);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Fill xsave->i387.sw_reserved value for ptrace frame:
|
|
||||||
*/
|
|
||||||
offset = offsetof(struct fxregs_state, sw_reserved);
|
|
||||||
size = sizeof(xstate_fx_sw_bytes);
|
|
||||||
|
|
||||||
__copy_xstate_to_kernel(kbuf, xstate_fx_sw_bytes, offset, size, size_total);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -686,7 +686,7 @@ int chtls_push_frames(struct chtls_sock *csk, int comp)
|
||||||
make_tx_data_wr(sk, skb, immdlen, len,
|
make_tx_data_wr(sk, skb, immdlen, len,
|
||||||
credits_needed, completion);
|
credits_needed, completion);
|
||||||
tp->snd_nxt += len;
|
tp->snd_nxt += len;
|
||||||
tp->lsndtime = tcp_time_stamp(tp);
|
tp->lsndtime = tcp_jiffies32;
|
||||||
if (completion)
|
if (completion)
|
||||||
ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
|
ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -148,8 +148,10 @@ static int gpio_exar_probe(struct platform_device *pdev)
|
||||||
mutex_init(&exar_gpio->lock);
|
mutex_init(&exar_gpio->lock);
|
||||||
|
|
||||||
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
|
index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
|
||||||
if (index < 0)
|
if (index < 0) {
|
||||||
goto err_destroy;
|
ret = index;
|
||||||
|
goto err_mutex_destroy;
|
||||||
|
}
|
||||||
|
|
||||||
sprintf(exar_gpio->name, "exar_gpio%d", index);
|
sprintf(exar_gpio->name, "exar_gpio%d", index);
|
||||||
exar_gpio->gpio_chip.label = exar_gpio->name;
|
exar_gpio->gpio_chip.label = exar_gpio->name;
|
||||||
|
@ -176,6 +178,7 @@ static int gpio_exar_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
err_destroy:
|
err_destroy:
|
||||||
ida_simple_remove(&ida_index, index);
|
ida_simple_remove(&ida_index, index);
|
||||||
|
err_mutex_destroy:
|
||||||
mutex_destroy(&exar_gpio->lock);
|
mutex_destroy(&exar_gpio->lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -357,6 +357,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d)
|
||||||
struct tegra_gpio_info *tgi = bank->tgi;
|
struct tegra_gpio_info *tgi = bank->tgi;
|
||||||
unsigned int gpio = d->hwirq;
|
unsigned int gpio = d->hwirq;
|
||||||
|
|
||||||
|
tegra_gpio_irq_mask(d);
|
||||||
gpiochip_unlock_as_irq(&tgi->gc, gpio);
|
gpiochip_unlock_as_irq(&tgi->gc, gpio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -158,9 +158,9 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
|
||||||
uobj->context = NULL;
|
uobj->context = NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For DESTROY the usecnt is held write locked, the caller is expected
|
* For DESTROY the usecnt is not changed, the caller is expected to
|
||||||
* to put it unlock and put the object when done with it. Only DESTROY
|
* manage it via uobj_put_destroy(). Only DESTROY can remove the IDR
|
||||||
* can remove the IDR handle.
|
* handle.
|
||||||
*/
|
*/
|
||||||
if (reason != RDMA_REMOVE_DESTROY)
|
if (reason != RDMA_REMOVE_DESTROY)
|
||||||
atomic_set(&uobj->usecnt, 0);
|
atomic_set(&uobj->usecnt, 0);
|
||||||
|
@ -192,7 +192,7 @@ static int uverbs_destroy_uobject(struct ib_uobject *uobj,
|
||||||
/*
|
/*
|
||||||
* This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
|
* This calls uverbs_destroy_uobject() using the RDMA_REMOVE_DESTROY
|
||||||
* sequence. It should only be used from command callbacks. On success the
|
* sequence. It should only be used from command callbacks. On success the
|
||||||
* caller must pair this with rdma_lookup_put_uobject(LOOKUP_WRITE). This
|
* caller must pair this with uobj_put_destroy(). This
|
||||||
* version requires the caller to have already obtained an
|
* version requires the caller to have already obtained an
|
||||||
* LOOKUP_DESTROY uobject kref.
|
* LOOKUP_DESTROY uobject kref.
|
||||||
*/
|
*/
|
||||||
|
@ -203,6 +203,13 @@ int uobj_destroy(struct ib_uobject *uobj)
|
||||||
|
|
||||||
down_read(&ufile->hw_destroy_rwsem);
|
down_read(&ufile->hw_destroy_rwsem);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Once the uobject is destroyed by RDMA_REMOVE_DESTROY then it is left
|
||||||
|
* write locked as the callers put it back with UVERBS_LOOKUP_DESTROY.
|
||||||
|
* This is because any other concurrent thread can still see the object
|
||||||
|
* in the xarray due to RCU. Leaving it locked ensures nothing else will
|
||||||
|
* touch it.
|
||||||
|
*/
|
||||||
ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
|
ret = uverbs_try_lock_object(uobj, UVERBS_LOOKUP_WRITE);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
@ -221,7 +228,7 @@ int uobj_destroy(struct ib_uobject *uobj)
|
||||||
/*
|
/*
|
||||||
* uobj_get_destroy destroys the HW object and returns a handle to the uobj
|
* uobj_get_destroy destroys the HW object and returns a handle to the uobj
|
||||||
* with a NULL object pointer. The caller must pair this with
|
* with a NULL object pointer. The caller must pair this with
|
||||||
* uverbs_put_destroy.
|
* uobj_put_destroy().
|
||||||
*/
|
*/
|
||||||
struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
|
struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
|
||||||
u32 id, struct ib_uverbs_file *ufile)
|
u32 id, struct ib_uverbs_file *ufile)
|
||||||
|
@ -256,7 +263,7 @@ int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
|
||||||
if (IS_ERR(uobj))
|
if (IS_ERR(uobj))
|
||||||
return PTR_ERR(uobj);
|
return PTR_ERR(uobj);
|
||||||
|
|
||||||
rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
|
uobj_put_destroy(uobj);
|
||||||
return success_res;
|
return success_res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1984,7 +1984,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
|
||||||
struct rtable *rt;
|
struct rtable *rt;
|
||||||
struct neighbour *neigh;
|
struct neighbour *neigh;
|
||||||
int rc = arpindex;
|
int rc = arpindex;
|
||||||
struct net_device *netdev = iwdev->netdev;
|
|
||||||
__be32 dst_ipaddr = htonl(dst_ip);
|
__be32 dst_ipaddr = htonl(dst_ip);
|
||||||
__be32 src_ipaddr = htonl(src_ip);
|
__be32 src_ipaddr = htonl(src_ip);
|
||||||
|
|
||||||
|
@ -1994,9 +1993,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (netif_is_bond_slave(netdev))
|
|
||||||
netdev = netdev_master_upper_dev_get(netdev);
|
|
||||||
|
|
||||||
neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
|
neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
@ -2062,7 +2058,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
|
||||||
{
|
{
|
||||||
struct neighbour *neigh;
|
struct neighbour *neigh;
|
||||||
int rc = arpindex;
|
int rc = arpindex;
|
||||||
struct net_device *netdev = iwdev->netdev;
|
|
||||||
struct dst_entry *dst;
|
struct dst_entry *dst;
|
||||||
struct sockaddr_in6 dst_addr;
|
struct sockaddr_in6 dst_addr;
|
||||||
struct sockaddr_in6 src_addr;
|
struct sockaddr_in6 src_addr;
|
||||||
|
@ -2083,9 +2078,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (netif_is_bond_slave(netdev))
|
|
||||||
netdev = netdev_master_upper_dev_get(netdev);
|
|
||||||
|
|
||||||
neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
|
neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
|
@ -756,7 +756,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||||
qib_dev_err(dd,
|
qib_dev_err(dd,
|
||||||
"Skipping linkcontrol sysfs info, (err %d) port %u\n",
|
"Skipping linkcontrol sysfs info, (err %d) port %u\n",
|
||||||
ret, port_num);
|
ret, port_num);
|
||||||
goto bail;
|
goto bail_link;
|
||||||
}
|
}
|
||||||
kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
|
kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
|
||||||
|
|
||||||
|
@ -766,7 +766,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||||
qib_dev_err(dd,
|
qib_dev_err(dd,
|
||||||
"Skipping sl2vl sysfs info, (err %d) port %u\n",
|
"Skipping sl2vl sysfs info, (err %d) port %u\n",
|
||||||
ret, port_num);
|
ret, port_num);
|
||||||
goto bail_link;
|
goto bail_sl;
|
||||||
}
|
}
|
||||||
kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
|
kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
|
||||||
|
|
||||||
|
@ -776,7 +776,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||||
qib_dev_err(dd,
|
qib_dev_err(dd,
|
||||||
"Skipping diag_counters sysfs info, (err %d) port %u\n",
|
"Skipping diag_counters sysfs info, (err %d) port %u\n",
|
||||||
ret, port_num);
|
ret, port_num);
|
||||||
goto bail_sl;
|
goto bail_diagc;
|
||||||
}
|
}
|
||||||
kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
|
kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
|
||||||
|
|
||||||
|
@ -789,7 +789,7 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
|
||||||
qib_dev_err(dd,
|
qib_dev_err(dd,
|
||||||
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
|
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
|
||||||
ret, port_num);
|
ret, port_num);
|
||||||
goto bail_diagc;
|
goto bail_cc;
|
||||||
}
|
}
|
||||||
|
|
||||||
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
|
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
|
||||||
|
@ -871,6 +871,7 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
|
||||||
&cc_table_bin_attr);
|
&cc_table_bin_attr);
|
||||||
kobject_put(&ppd->pport_cc_kobj);
|
kobject_put(&ppd->pport_cc_kobj);
|
||||||
}
|
}
|
||||||
|
kobject_put(&ppd->diagc_kobj);
|
||||||
kobject_put(&ppd->sl2vl_kobj);
|
kobject_put(&ppd->sl2vl_kobj);
|
||||||
kobject_put(&ppd->pport_kobj);
|
kobject_put(&ppd->pport_kobj);
|
||||||
}
|
}
|
||||||
|
|
|
@ -833,7 +833,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
|
||||||
!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
|
!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
|
||||||
dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
|
dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err_free_device;
|
goto err_disable_pdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = pci_request_regions(pdev, DRV_NAME);
|
ret = pci_request_regions(pdev, DRV_NAME);
|
||||||
|
|
|
@ -377,8 +377,12 @@ struct ipoib_dev_priv {
|
||||||
struct ipoib_rx_buf *rx_ring;
|
struct ipoib_rx_buf *rx_ring;
|
||||||
|
|
||||||
struct ipoib_tx_buf *tx_ring;
|
struct ipoib_tx_buf *tx_ring;
|
||||||
|
/* cyclic ring variables for managing tx_ring, for UD only */
|
||||||
unsigned int tx_head;
|
unsigned int tx_head;
|
||||||
unsigned int tx_tail;
|
unsigned int tx_tail;
|
||||||
|
/* cyclic ring variables for counting overall outstanding send WRs */
|
||||||
|
unsigned int global_tx_head;
|
||||||
|
unsigned int global_tx_tail;
|
||||||
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
|
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
|
||||||
struct ib_ud_wr tx_wr;
|
struct ib_ud_wr tx_wr;
|
||||||
struct ib_wc send_wc[MAX_SEND_CQE];
|
struct ib_wc send_wc[MAX_SEND_CQE];
|
||||||
|
|
|
@ -756,7 +756,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size - 1) {
|
if ((priv->global_tx_head - priv->global_tx_tail) ==
|
||||||
|
ipoib_sendq_size - 1) {
|
||||||
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
|
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
|
||||||
tx->qp->qp_num);
|
tx->qp->qp_num);
|
||||||
netif_stop_queue(dev);
|
netif_stop_queue(dev);
|
||||||
|
@ -786,7 +787,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
|
||||||
} else {
|
} else {
|
||||||
netif_trans_update(dev);
|
netif_trans_update(dev);
|
||||||
++tx->tx_head;
|
++tx->tx_head;
|
||||||
++priv->tx_head;
|
++priv->global_tx_head;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -820,10 +821,11 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
|
||||||
netif_tx_lock(dev);
|
netif_tx_lock(dev);
|
||||||
|
|
||||||
++tx->tx_tail;
|
++tx->tx_tail;
|
||||||
++priv->tx_tail;
|
++priv->global_tx_tail;
|
||||||
|
|
||||||
if (unlikely(netif_queue_stopped(dev) &&
|
if (unlikely(netif_queue_stopped(dev) &&
|
||||||
(priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1 &&
|
((priv->global_tx_head - priv->global_tx_tail) <=
|
||||||
|
ipoib_sendq_size >> 1) &&
|
||||||
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
|
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
|
||||||
netif_wake_queue(dev);
|
netif_wake_queue(dev);
|
||||||
|
|
||||||
|
@ -1233,8 +1235,9 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
|
||||||
dev_kfree_skb_any(tx_req->skb);
|
dev_kfree_skb_any(tx_req->skb);
|
||||||
netif_tx_lock_bh(p->dev);
|
netif_tx_lock_bh(p->dev);
|
||||||
++p->tx_tail;
|
++p->tx_tail;
|
||||||
++priv->tx_tail;
|
++priv->global_tx_tail;
|
||||||
if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
|
if (unlikely((priv->global_tx_head - priv->global_tx_tail) <=
|
||||||
|
ipoib_sendq_size >> 1) &&
|
||||||
netif_queue_stopped(p->dev) &&
|
netif_queue_stopped(p->dev) &&
|
||||||
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
|
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
|
||||||
netif_wake_queue(p->dev);
|
netif_wake_queue(p->dev);
|
||||||
|
|
|
@ -406,9 +406,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
|
||||||
dev_kfree_skb_any(tx_req->skb);
|
dev_kfree_skb_any(tx_req->skb);
|
||||||
|
|
||||||
++priv->tx_tail;
|
++priv->tx_tail;
|
||||||
|
++priv->global_tx_tail;
|
||||||
|
|
||||||
if (unlikely(netif_queue_stopped(dev) &&
|
if (unlikely(netif_queue_stopped(dev) &&
|
||||||
((priv->tx_head - priv->tx_tail) <= ipoib_sendq_size >> 1) &&
|
((priv->global_tx_head - priv->global_tx_tail) <=
|
||||||
|
ipoib_sendq_size >> 1) &&
|
||||||
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
|
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
|
||||||
netif_wake_queue(dev);
|
netif_wake_queue(dev);
|
||||||
|
|
||||||
|
@ -633,7 +635,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
||||||
else
|
else
|
||||||
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
|
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
|
||||||
/* increase the tx_head after send success, but use it for queue state */
|
/* increase the tx_head after send success, but use it for queue state */
|
||||||
if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
|
if ((priv->global_tx_head - priv->global_tx_tail) ==
|
||||||
|
ipoib_sendq_size - 1) {
|
||||||
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
|
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
|
||||||
netif_stop_queue(dev);
|
netif_stop_queue(dev);
|
||||||
}
|
}
|
||||||
|
@ -661,6 +664,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
||||||
|
|
||||||
rc = priv->tx_head;
|
rc = priv->tx_head;
|
||||||
++priv->tx_head;
|
++priv->tx_head;
|
||||||
|
++priv->global_tx_head;
|
||||||
}
|
}
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -807,6 +811,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
|
||||||
ipoib_dma_unmap_tx(priv, tx_req);
|
ipoib_dma_unmap_tx(priv, tx_req);
|
||||||
dev_kfree_skb_any(tx_req->skb);
|
dev_kfree_skb_any(tx_req->skb);
|
||||||
++priv->tx_tail;
|
++priv->tx_tail;
|
||||||
|
++priv->global_tx_tail;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < ipoib_recvq_size; ++i) {
|
for (i = 0; i < ipoib_recvq_size; ++i) {
|
||||||
|
|
|
@ -1188,9 +1188,11 @@ static void ipoib_timeout(struct net_device *dev)
|
||||||
|
|
||||||
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
|
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
|
||||||
jiffies_to_msecs(jiffies - dev_trans_start(dev)));
|
jiffies_to_msecs(jiffies - dev_trans_start(dev)));
|
||||||
ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
|
ipoib_warn(priv,
|
||||||
netif_queue_stopped(dev),
|
"queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
|
||||||
priv->tx_head, priv->tx_tail);
|
netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
|
||||||
|
priv->global_tx_head, priv->global_tx_tail);
|
||||||
|
|
||||||
/* XXX reset QP, etc. */
|
/* XXX reset QP, etc. */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1705,7 +1707,7 @@ static int ipoib_dev_init_default(struct net_device *dev)
|
||||||
goto out_rx_ring_cleanup;
|
goto out_rx_ring_cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* priv->tx_head, tx_tail & tx_outstanding are already 0 */
|
/* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
|
||||||
|
|
||||||
if (ipoib_transport_dev_init(dev, priv->ca)) {
|
if (ipoib_transport_dev_init(dev, priv->ca)) {
|
||||||
pr_warn("%s: ipoib_transport_dev_init failed\n",
|
pr_warn("%s: ipoib_transport_dev_init failed\n",
|
||||||
|
|
|
@ -329,20 +329,6 @@ static int evdev_fasync(int fd, struct file *file, int on)
|
||||||
return fasync_helper(fd, file, on, &client->fasync);
|
return fasync_helper(fd, file, on, &client->fasync);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int evdev_flush(struct file *file, fl_owner_t id)
|
|
||||||
{
|
|
||||||
struct evdev_client *client = file->private_data;
|
|
||||||
struct evdev *evdev = client->evdev;
|
|
||||||
|
|
||||||
mutex_lock(&evdev->mutex);
|
|
||||||
|
|
||||||
if (evdev->exist && !client->revoked)
|
|
||||||
input_flush_device(&evdev->handle, file);
|
|
||||||
|
|
||||||
mutex_unlock(&evdev->mutex);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void evdev_free(struct device *dev)
|
static void evdev_free(struct device *dev)
|
||||||
{
|
{
|
||||||
struct evdev *evdev = container_of(dev, struct evdev, dev);
|
struct evdev *evdev = container_of(dev, struct evdev, dev);
|
||||||
|
@ -456,6 +442,10 @@ static int evdev_release(struct inode *inode, struct file *file)
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
mutex_lock(&evdev->mutex);
|
mutex_lock(&evdev->mutex);
|
||||||
|
|
||||||
|
if (evdev->exist && !client->revoked)
|
||||||
|
input_flush_device(&evdev->handle, file);
|
||||||
|
|
||||||
evdev_ungrab(evdev, client);
|
evdev_ungrab(evdev, client);
|
||||||
mutex_unlock(&evdev->mutex);
|
mutex_unlock(&evdev->mutex);
|
||||||
|
|
||||||
|
@ -1317,7 +1307,6 @@ static const struct file_operations evdev_fops = {
|
||||||
.compat_ioctl = evdev_ioctl_compat,
|
.compat_ioctl = evdev_ioctl_compat,
|
||||||
#endif
|
#endif
|
||||||
.fasync = evdev_fasync,
|
.fasync = evdev_fasync,
|
||||||
.flush = evdev_flush,
|
|
||||||
.llseek = no_llseek,
|
.llseek = no_llseek,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -472,6 +472,16 @@ static const u8 xboxone_fw2015_init[] = {
|
||||||
0x05, 0x20, 0x00, 0x01, 0x00
|
0x05, 0x20, 0x00, 0x01, 0x00
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This packet is required for Xbox One S (0x045e:0x02ea)
|
||||||
|
* and Xbox One Elite Series 2 (0x045e:0x0b00) pads to
|
||||||
|
* initialize the controller that was previously used in
|
||||||
|
* Bluetooth mode.
|
||||||
|
*/
|
||||||
|
static const u8 xboxone_s_init[] = {
|
||||||
|
0x05, 0x20, 0x00, 0x0f, 0x06
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This packet is required for the Titanfall 2 Xbox One pads
|
* This packet is required for the Titanfall 2 Xbox One pads
|
||||||
* (0x0e6f:0x0165) to finish initialization and for Hori pads
|
* (0x0e6f:0x0165) to finish initialization and for Hori pads
|
||||||
|
@ -530,6 +540,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
|
||||||
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
|
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
|
||||||
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
|
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
|
||||||
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
|
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
|
||||||
|
XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init),
|
||||||
|
XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init),
|
||||||
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
|
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
|
||||||
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
|
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
|
||||||
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
|
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
|
||||||
|
|
|
@ -142,7 +142,7 @@ MODULE_DEVICE_TABLE(of, dir685_tk_of_match);
|
||||||
|
|
||||||
static struct i2c_driver dir685_tk_i2c_driver = {
|
static struct i2c_driver dir685_tk_i2c_driver = {
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "dlin-dir685-touchkeys",
|
.name = "dlink-dir685-touchkeys",
|
||||||
.of_match_table = of_match_ptr(dir685_tk_of_match),
|
.of_match_table = of_match_ptr(dir685_tk_of_match),
|
||||||
},
|
},
|
||||||
.probe = dir685_tk_probe,
|
.probe = dir685_tk_probe,
|
||||||
|
|
|
@ -208,7 +208,7 @@ static irqreturn_t rmi_irq_fn(int irq, void *dev_id)
|
||||||
|
|
||||||
if (count) {
|
if (count) {
|
||||||
kfree(attn_data.data);
|
kfree(attn_data.data);
|
||||||
attn_data.data = NULL;
|
drvdata->attn_data.data = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!kfifo_is_empty(&drvdata->attn_fifo))
|
if (!kfifo_is_empty(&drvdata->attn_fifo))
|
||||||
|
@ -1213,7 +1213,8 @@ static int rmi_driver_probe(struct device *dev)
|
||||||
if (data->input) {
|
if (data->input) {
|
||||||
rmi_driver_set_input_name(rmi_dev, data->input);
|
rmi_driver_set_input_name(rmi_dev, data->input);
|
||||||
if (!rmi_dev->xport->input) {
|
if (!rmi_dev->xport->input) {
|
||||||
if (input_register_device(data->input)) {
|
retval = input_register_device(data->input);
|
||||||
|
if (retval) {
|
||||||
dev_err(dev, "%s: Failed to register input device.\n",
|
dev_err(dev, "%s: Failed to register input device.\n",
|
||||||
__func__);
|
__func__);
|
||||||
goto err_destroy_functions;
|
goto err_destroy_functions;
|
||||||
|
|
|
@ -666,6 +666,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
|
||||||
DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
/* Lenovo ThinkPad Twist S230u */
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME, "33474HU"),
|
||||||
|
},
|
||||||
|
},
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -195,6 +195,7 @@ static const struct usb_device_id usbtouch_devices[] = {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
|
#ifdef CONFIG_TOUCHSCREEN_USB_IRTOUCH
|
||||||
|
{USB_DEVICE(0x255e, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
|
||||||
{USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
|
{USB_DEVICE(0x595a, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
|
||||||
{USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
|
{USB_DEVICE(0x6615, 0x0001), .driver_info = DEVTYPE_IRTOUCH},
|
||||||
{USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES},
|
{USB_DEVICE(0x6615, 0x0012), .driver_info = DEVTYPE_IRTOUCH_HIRES},
|
||||||
|
|
|
@ -393,7 +393,7 @@ struct iommu_group *iommu_group_alloc(void)
|
||||||
NULL, "%d", group->id);
|
NULL, "%d", group->id);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ida_simple_remove(&iommu_group_ida, group->id);
|
ida_simple_remove(&iommu_group_ida, group->id);
|
||||||
kfree(group);
|
kobject_put(&group->kobj);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2488,8 +2488,8 @@ static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp)
|
||||||
struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
|
struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev,
|
||||||
struct mmc_rpmb_data, chrdev);
|
struct mmc_rpmb_data, chrdev);
|
||||||
|
|
||||||
put_device(&rpmb->dev);
|
|
||||||
mmc_blk_put(rpmb->md);
|
mmc_blk_put(rpmb->md);
|
||||||
|
put_device(&rpmb->dev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -109,7 +109,7 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
|
||||||
case MMC_ISSUE_DCMD:
|
case MMC_ISSUE_DCMD:
|
||||||
if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
|
if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
|
||||||
if (recovery_needed)
|
if (recovery_needed)
|
||||||
__mmc_cqe_recovery_notifier(mq);
|
mmc_cqe_recovery_notifier(mrq);
|
||||||
return BLK_EH_RESET_TIMER;
|
return BLK_EH_RESET_TIMER;
|
||||||
}
|
}
|
||||||
/* The request has gone already */
|
/* The request has gone already */
|
||||||
|
@ -126,18 +126,13 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
|
||||||
struct request_queue *q = req->q;
|
struct request_queue *q = req->q;
|
||||||
struct mmc_queue *mq = q->queuedata;
|
struct mmc_queue *mq = q->queuedata;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
bool ignore_tout;
|
||||||
|
|
||||||
spin_lock_irqsave(q->queue_lock, flags);
|
spin_lock_irqsave(q->queue_lock, flags);
|
||||||
|
ignore_tout = mq->recovery_needed || !mq->use_cqe;
|
||||||
if (mq->recovery_needed || !mq->use_cqe)
|
|
||||||
ret = BLK_EH_RESET_TIMER;
|
|
||||||
else
|
|
||||||
ret = mmc_cqe_timed_out(req);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
|
|
||||||
return ret;
|
return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmc_mq_recovery_handler(struct work_struct *work)
|
static void mmc_mq_recovery_handler(struct work_struct *work)
|
||||||
|
|
|
@ -153,8 +153,10 @@ int bond_sysfs_slave_add(struct slave *slave)
|
||||||
|
|
||||||
err = kobject_init_and_add(&slave->kobj, &slave_ktype,
|
err = kobject_init_and_add(&slave->kobj, &slave_ktype,
|
||||||
&(slave->dev->dev.kobj), "bonding_slave");
|
&(slave->dev->dev.kobj), "bonding_slave");
|
||||||
if (err)
|
if (err) {
|
||||||
|
kobject_put(&slave->kobj);
|
||||||
return err;
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
for (a = slave_attrs; *a; ++a) {
|
for (a = slave_attrs; *a; ++a) {
|
||||||
err = sysfs_create_file(&slave->kobj, &((*a)->attr));
|
err = sysfs_create_file(&slave->kobj, &((*a)->attr));
|
||||||
|
|
|
@ -685,11 +685,8 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
|
||||||
/* Setup the MAC by default for the cpu port */
|
/* Setup the MAC by default for the cpu port */
|
||||||
mt7530_write(priv, MT7530_PMCR_P(port), PMCR_CPUP_LINK);
|
mt7530_write(priv, MT7530_PMCR_P(port), PMCR_CPUP_LINK);
|
||||||
|
|
||||||
/* Disable auto learning on the cpu port */
|
/* Unknown multicast frame forwarding to the cpu port */
|
||||||
mt7530_set(priv, MT7530_PSC_P(port), SA_DIS);
|
mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port)));
|
||||||
|
|
||||||
/* Unknown unicast frame fordwarding to the cpu port */
|
|
||||||
mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port)));
|
|
||||||
|
|
||||||
/* CPU port gets connected to all user ports of
|
/* CPU port gets connected to all user ports of
|
||||||
* the switch
|
* the switch
|
||||||
|
@ -1288,8 +1285,6 @@ mt7530_setup(struct dsa_switch *ds)
|
||||||
/* Enable and reset MIB counters */
|
/* Enable and reset MIB counters */
|
||||||
mt7530_mib_reset(ds);
|
mt7530_mib_reset(ds);
|
||||||
|
|
||||||
mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK);
|
|
||||||
|
|
||||||
for (i = 0; i < MT7530_NUM_PORTS; i++) {
|
for (i = 0; i < MT7530_NUM_PORTS; i++) {
|
||||||
/* Disable forwarding by default on all ports */
|
/* Disable forwarding by default on all ports */
|
||||||
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
|
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
|
||||||
|
|
|
@ -34,6 +34,7 @@
|
||||||
#define MT7530_MFC 0x10
|
#define MT7530_MFC 0x10
|
||||||
#define BC_FFP(x) (((x) & 0xff) << 24)
|
#define BC_FFP(x) (((x) & 0xff) << 24)
|
||||||
#define UNM_FFP(x) (((x) & 0xff) << 16)
|
#define UNM_FFP(x) (((x) & 0xff) << 16)
|
||||||
|
#define UNM_FFP_MASK UNM_FFP(~0)
|
||||||
#define UNU_FFP(x) (((x) & 0xff) << 8)
|
#define UNU_FFP(x) (((x) & 0xff) << 8)
|
||||||
#define UNU_FFP_MASK UNU_FFP(~0)
|
#define UNU_FFP_MASK UNU_FFP(~0)
|
||||||
|
|
||||||
|
|
|
@ -7177,7 +7177,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
|
||||||
bnxt_free_skbs(bp);
|
bnxt_free_skbs(bp);
|
||||||
|
|
||||||
/* Save ring stats before shutdown */
|
/* Save ring stats before shutdown */
|
||||||
if (bp->bnapi)
|
if (bp->bnapi && irq_re_init)
|
||||||
bnxt_get_ring_stats(bp, &bp->net_stats_prev);
|
bnxt_get_ring_stats(bp, &bp->net_stats_prev);
|
||||||
if (irq_re_init) {
|
if (irq_re_init) {
|
||||||
bnxt_free_irq(bp);
|
bnxt_free_irq(bp);
|
||||||
|
|
|
@ -76,6 +76,7 @@ config UCC_GETH
|
||||||
depends on QUICC_ENGINE
|
depends on QUICC_ENGINE
|
||||||
select FSL_PQ_MDIO
|
select FSL_PQ_MDIO
|
||||||
select PHYLIB
|
select PHYLIB
|
||||||
|
select FIXED_PHY
|
||||||
---help---
|
---help---
|
||||||
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
|
This driver supports the Gigabit Ethernet mode of the QUICC Engine,
|
||||||
which is available on some Freescale SOCs.
|
which is available on some Freescale SOCs.
|
||||||
|
@ -89,6 +90,7 @@ config GIANFAR
|
||||||
depends on HAS_DMA
|
depends on HAS_DMA
|
||||||
select FSL_PQ_MDIO
|
select FSL_PQ_MDIO
|
||||||
select PHYLIB
|
select PHYLIB
|
||||||
|
select FIXED_PHY
|
||||||
select CRC32
|
select CRC32
|
||||||
---help---
|
---help---
|
||||||
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
|
This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
|
||||||
|
|
|
@ -2,6 +2,7 @@ menuconfig FSL_DPAA_ETH
|
||||||
tristate "DPAA Ethernet"
|
tristate "DPAA Ethernet"
|
||||||
depends on FSL_DPAA && FSL_FMAN
|
depends on FSL_DPAA && FSL_FMAN
|
||||||
select PHYLIB
|
select PHYLIB
|
||||||
|
select FIXED_PHY
|
||||||
select FSL_FMAN_MAC
|
select FSL_FMAN_MAC
|
||||||
---help---
|
---help---
|
||||||
Data Path Acceleration Architecture Ethernet driver,
|
Data Path Acceleration Architecture Ethernet driver,
|
||||||
|
|
|
@ -2796,7 +2796,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Do this here, so we can be verbose early */
|
/* Do this here, so we can be verbose early */
|
||||||
SET_NETDEV_DEV(net_dev, dev);
|
SET_NETDEV_DEV(net_dev, dev->parent);
|
||||||
dev_set_drvdata(dev, net_dev);
|
dev_set_drvdata(dev, net_dev);
|
||||||
|
|
||||||
priv = netdev_priv(net_dev);
|
priv = netdev_priv(net_dev);
|
||||||
|
|
|
@ -2731,7 +2731,7 @@ void mlx4_opreq_action(struct work_struct *work)
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx4_err(dev, "Failed to retrieve required operation: %d\n",
|
mlx4_err(dev, "Failed to retrieve required operation: %d\n",
|
||||||
err);
|
err);
|
||||||
return;
|
goto out;
|
||||||
}
|
}
|
||||||
MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
|
MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
|
||||||
MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
|
MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
|
||||||
|
|
|
@ -835,6 +835,7 @@ static void cmd_work_handler(struct work_struct *work)
|
||||||
int alloc_ret;
|
int alloc_ret;
|
||||||
int cmd_mode;
|
int cmd_mode;
|
||||||
|
|
||||||
|
complete(&ent->handling);
|
||||||
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
|
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
|
||||||
down(sem);
|
down(sem);
|
||||||
if (!ent->page_queue) {
|
if (!ent->page_queue) {
|
||||||
|
@ -953,6 +954,11 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
|
||||||
struct mlx5_cmd *cmd = &dev->cmd;
|
struct mlx5_cmd *cmd = &dev->cmd;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (!wait_for_completion_timeout(&ent->handling, timeout) &&
|
||||||
|
cancel_work_sync(&ent->work)) {
|
||||||
|
ent->ret = -ECANCELED;
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
|
if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
|
||||||
wait_for_completion(&ent->done);
|
wait_for_completion(&ent->done);
|
||||||
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
|
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
|
||||||
|
@ -960,12 +966,17 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
|
||||||
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
|
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out_err:
|
||||||
err = ent->ret;
|
err = ent->ret;
|
||||||
|
|
||||||
if (err == -ETIMEDOUT) {
|
if (err == -ETIMEDOUT) {
|
||||||
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
|
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
|
||||||
mlx5_command_str(msg_to_opcode(ent->in)),
|
mlx5_command_str(msg_to_opcode(ent->in)),
|
||||||
msg_to_opcode(ent->in));
|
msg_to_opcode(ent->in));
|
||||||
|
} else if (err == -ECANCELED) {
|
||||||
|
mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
|
||||||
|
mlx5_command_str(msg_to_opcode(ent->in)),
|
||||||
|
msg_to_opcode(ent->in));
|
||||||
}
|
}
|
||||||
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
|
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
|
||||||
err, deliv_status_to_str(ent->status), ent->status);
|
err, deliv_status_to_str(ent->status), ent->status);
|
||||||
|
@ -1001,6 +1012,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
||||||
ent->token = token;
|
ent->token = token;
|
||||||
ent->polling = force_polling;
|
ent->polling = force_polling;
|
||||||
|
|
||||||
|
init_completion(&ent->handling);
|
||||||
if (!callback)
|
if (!callback)
|
||||||
init_completion(&ent->done);
|
init_completion(&ent->done);
|
||||||
|
|
||||||
|
@ -1020,6 +1032,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
|
||||||
err = wait_func(dev, ent);
|
err = wait_func(dev, ent);
|
||||||
if (err == -ETIMEDOUT)
|
if (err == -ETIMEDOUT)
|
||||||
goto out;
|
goto out;
|
||||||
|
if (err == -ECANCELED)
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
ds = ent->ts2 - ent->ts1;
|
ds = ent->ts2 - ent->ts1;
|
||||||
op = MLX5_GET(mbox_in, in->first.data, opcode);
|
op = MLX5_GET(mbox_in, in->first.data, opcode);
|
||||||
|
|
|
@ -595,8 +595,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
||||||
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
|
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
|
||||||
{
|
{
|
||||||
struct mlx5e_tx_wqe_info *wi;
|
struct mlx5e_tx_wqe_info *wi;
|
||||||
|
u32 nbytes = 0;
|
||||||
|
u16 ci, npkts = 0;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
u16 ci;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
while (sq->cc != sq->pc) {
|
while (sq->cc != sq->pc) {
|
||||||
|
@ -617,8 +618,11 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
|
npkts++;
|
||||||
|
nbytes += wi->num_bytes;
|
||||||
sq->cc += wi->num_wqebbs;
|
sq->cc += wi->num_wqebbs;
|
||||||
}
|
}
|
||||||
|
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MLX5_CORE_IPOIB
|
#ifdef CONFIG_MLX5_CORE_IPOIB
|
||||||
|
|
|
@ -364,6 +364,12 @@ static void del_sw_ns(struct fs_node *node)
|
||||||
|
|
||||||
static void del_sw_prio(struct fs_node *node)
|
static void del_sw_prio(struct fs_node *node)
|
||||||
{
|
{
|
||||||
|
struct mlx5_flow_root_namespace *root_ns;
|
||||||
|
struct mlx5_flow_namespace *ns;
|
||||||
|
|
||||||
|
fs_get_obj(ns, node);
|
||||||
|
root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
|
||||||
|
mutex_destroy(&root_ns->chain_lock);
|
||||||
kfree(node);
|
kfree(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3126,6 +3126,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
|
||||||
mlxsw_sp_port_remove(mlxsw_sp, i);
|
mlxsw_sp_port_remove(mlxsw_sp, i);
|
||||||
kfree(mlxsw_sp->port_to_module);
|
kfree(mlxsw_sp->port_to_module);
|
||||||
kfree(mlxsw_sp->ports);
|
kfree(mlxsw_sp->ports);
|
||||||
|
mlxsw_sp->ports = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
|
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
|
||||||
|
@ -3174,6 +3175,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
|
||||||
kfree(mlxsw_sp->port_to_module);
|
kfree(mlxsw_sp->port_to_module);
|
||||||
err_port_to_module_alloc:
|
err_port_to_module_alloc:
|
||||||
kfree(mlxsw_sp->ports);
|
kfree(mlxsw_sp->ports);
|
||||||
|
mlxsw_sp->ports = NULL;
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3228,6 +3230,14 @@ static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct mlxsw_sp_port *
|
||||||
|
mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
|
||||||
|
{
|
||||||
|
if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
|
||||||
|
return mlxsw_sp->ports[local_port];
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
|
static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
|
||||||
unsigned int count,
|
unsigned int count,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
|
@ -3238,7 +3248,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
|
||||||
int i;
|
int i;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
mlxsw_sp_port = mlxsw_sp->ports[local_port];
|
mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
|
||||||
if (!mlxsw_sp_port) {
|
if (!mlxsw_sp_port) {
|
||||||
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
|
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
|
||||||
local_port);
|
local_port);
|
||||||
|
@ -3305,7 +3315,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
|
||||||
unsigned int count;
|
unsigned int count;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
mlxsw_sp_port = mlxsw_sp->ports[local_port];
|
mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
|
||||||
if (!mlxsw_sp_port) {
|
if (!mlxsw_sp_port) {
|
||||||
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
|
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
|
||||||
local_port);
|
local_port);
|
||||||
|
|
|
@ -1289,6 +1289,7 @@ static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
|
||||||
if (mlxsw_sx_port_created(mlxsw_sx, i))
|
if (mlxsw_sx_port_created(mlxsw_sx, i))
|
||||||
mlxsw_sx_port_remove(mlxsw_sx, i);
|
mlxsw_sx_port_remove(mlxsw_sx, i);
|
||||||
kfree(mlxsw_sx->ports);
|
kfree(mlxsw_sx->ports);
|
||||||
|
mlxsw_sx->ports = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
|
static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
|
||||||
|
@ -1323,6 +1324,7 @@ static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
|
||||||
if (mlxsw_sx_port_created(mlxsw_sx, i))
|
if (mlxsw_sx_port_created(mlxsw_sx, i))
|
||||||
mlxsw_sx_port_remove(mlxsw_sx, i);
|
mlxsw_sx_port_remove(mlxsw_sx, i);
|
||||||
kfree(mlxsw_sx->ports);
|
kfree(mlxsw_sx->ports);
|
||||||
|
mlxsw_sx->ports = NULL;
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1406,6 +1408,12 @@ static int mlxsw_sx_port_type_set(struct mlxsw_core *mlxsw_core, u8 local_port,
|
||||||
u8 module, width;
|
u8 module, width;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) {
|
||||||
|
dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n",
|
||||||
|
local_port);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
if (new_type == DEVLINK_PORT_TYPE_AUTO)
|
if (new_type == DEVLINK_PORT_TYPE_AUTO)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
|
|
@ -1075,7 +1075,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
|
netif_err(priv, probe, ndev, "Error %d initializing card encx24j600 card\n",
|
||||||
ret);
|
ret);
|
||||||
goto out_free;
|
goto out_stop;
|
||||||
}
|
}
|
||||||
|
|
||||||
eidled = encx24j600_read_reg(priv, EIDLED);
|
eidled = encx24j600_read_reg(priv, EIDLED);
|
||||||
|
@ -1093,6 +1093,8 @@ static int encx24j600_spi_probe(struct spi_device *spi)
|
||||||
|
|
||||||
out_unregister:
|
out_unregister:
|
||||||
unregister_netdev(priv->ndev);
|
unregister_netdev(priv->ndev);
|
||||||
|
out_stop:
|
||||||
|
kthread_stop(priv->kworker_task);
|
||||||
out_free:
|
out_free:
|
||||||
free_netdev(ndev);
|
free_netdev(ndev);
|
||||||
|
|
||||||
|
@ -1105,6 +1107,7 @@ static int encx24j600_spi_remove(struct spi_device *spi)
|
||||||
struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
|
struct encx24j600_priv *priv = dev_get_drvdata(&spi->dev);
|
||||||
|
|
||||||
unregister_netdev(priv->ndev);
|
unregister_netdev(priv->ndev);
|
||||||
|
kthread_stop(priv->kworker_task);
|
||||||
|
|
||||||
free_netdev(priv->ndev);
|
free_netdev(priv->ndev);
|
||||||
|
|
||||||
|
|
|
@ -3651,7 +3651,7 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
|
||||||
ahw->diag_cnt = 0;
|
ahw->diag_cnt = 0;
|
||||||
ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
|
ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail_diag_irq;
|
goto fail_mbx_args;
|
||||||
|
|
||||||
if (adapter->flags & QLCNIC_MSIX_ENABLED)
|
if (adapter->flags & QLCNIC_MSIX_ENABLED)
|
||||||
intrpt_id = ahw->intr_tbl[0].id;
|
intrpt_id = ahw->intr_tbl[0].id;
|
||||||
|
@ -3681,6 +3681,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
|
||||||
|
|
||||||
done:
|
done:
|
||||||
qlcnic_free_mbx_args(&cmd);
|
qlcnic_free_mbx_args(&cmd);
|
||||||
|
|
||||||
|
fail_mbx_args:
|
||||||
qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
|
qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
|
||||||
|
|
||||||
fail_diag_irq:
|
fail_diag_irq:
|
||||||
|
|
|
@ -4971,7 +4971,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
cas_cacheline_size)) {
|
cas_cacheline_size)) {
|
||||||
dev_err(&pdev->dev, "Could not set PCI cache "
|
dev_err(&pdev->dev, "Could not set PCI cache "
|
||||||
"line size\n");
|
"line size\n");
|
||||||
goto err_write_cacheline;
|
goto err_out_free_res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -5144,7 +5144,6 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
err_out_free_res:
|
err_out_free_res:
|
||||||
pci_release_regions(pdev);
|
pci_release_regions(pdev);
|
||||||
|
|
||||||
err_write_cacheline:
|
|
||||||
/* Try to restore it in case the error occurred after we
|
/* Try to restore it in case the error occurred after we
|
||||||
* set it.
|
* set it.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -821,14 +821,21 @@ static const struct usb_device_id products[] = {
|
||||||
.driver_info = 0,
|
.driver_info = 0,
|
||||||
},
|
},
|
||||||
|
|
||||||
/* Microsoft Surface 3 dock (based on Realtek RTL8153) */
|
/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */
|
||||||
{
|
{
|
||||||
USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
|
USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
|
||||||
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
||||||
.driver_info = 0,
|
.driver_info = 0,
|
||||||
},
|
},
|
||||||
|
|
||||||
/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
|
/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */
|
||||||
|
{
|
||||||
|
USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM,
|
||||||
|
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
||||||
|
.driver_info = 0,
|
||||||
|
},
|
||||||
|
|
||||||
|
/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
|
||||||
{
|
{
|
||||||
USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
|
USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
|
||||||
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
|
||||||
|
|
|
@ -5344,6 +5344,7 @@ static const struct usb_device_id rtl8152_table[] = {
|
||||||
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
|
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
|
||||||
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
|
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)},
|
||||||
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
|
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)},
|
||||||
|
{REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)},
|
||||||
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
|
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
|
||||||
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
|
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
|
||||||
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
|
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
|
||||||
|
|
|
@ -112,6 +112,7 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
|
||||||
|
|
||||||
static const struct property_entry dwc3_pci_mrfld_properties[] = {
|
static const struct property_entry dwc3_pci_mrfld_properties[] = {
|
||||||
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
|
PROPERTY_ENTRY_STRING("dr_mode", "otg"),
|
||||||
|
PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
|
||||||
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
|
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
|
@ -1360,7 +1360,6 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
|
||||||
|
|
||||||
req->buf = dev->rbuf;
|
req->buf = dev->rbuf;
|
||||||
req->context = NULL;
|
req->context = NULL;
|
||||||
value = -EOPNOTSUPP;
|
|
||||||
switch (ctrl->bRequest) {
|
switch (ctrl->bRequest) {
|
||||||
|
|
||||||
case USB_REQ_GET_DESCRIPTOR:
|
case USB_REQ_GET_DESCRIPTOR:
|
||||||
|
@ -1783,7 +1782,7 @@ static ssize_t
|
||||||
dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
|
||||||
{
|
{
|
||||||
struct dev_data *dev = fd->private_data;
|
struct dev_data *dev = fd->private_data;
|
||||||
ssize_t value = len, length = len;
|
ssize_t value, length = len;
|
||||||
unsigned total;
|
unsigned total;
|
||||||
u32 tag;
|
u32 tag;
|
||||||
char *kbuf;
|
char *kbuf;
|
||||||
|
|
|
@ -1766,7 +1766,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
|
||||||
(!regset->active || regset->active(t->task, regset) > 0)) {
|
(!regset->active || regset->active(t->task, regset) > 0)) {
|
||||||
int ret;
|
int ret;
|
||||||
size_t size = regset_size(t->task, regset);
|
size_t size = regset_size(t->task, regset);
|
||||||
void *data = kmalloc(size, GFP_KERNEL);
|
void *data = kzalloc(size, GFP_KERNEL);
|
||||||
if (unlikely(!data))
|
if (unlikely(!data))
|
||||||
return 0;
|
return 0;
|
||||||
ret = regset->get(t->task, regset,
|
ret = regset->get(t->task, regset,
|
||||||
|
|
|
@ -64,9 +64,9 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
|
||||||
object = container_of(op->op.object, struct cachefiles_object, fscache);
|
object = container_of(op->op.object, struct cachefiles_object, fscache);
|
||||||
spin_lock(&object->work_lock);
|
spin_lock(&object->work_lock);
|
||||||
list_add_tail(&monitor->op_link, &op->to_do);
|
list_add_tail(&monitor->op_link, &op->to_do);
|
||||||
|
fscache_enqueue_retrieval(op);
|
||||||
spin_unlock(&object->work_lock);
|
spin_unlock(&object->work_lock);
|
||||||
|
|
||||||
fscache_enqueue_retrieval(op);
|
|
||||||
fscache_put_retrieval(op);
|
fscache_put_retrieval(op);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3532,7 +3532,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
|
||||||
* than it negotiated since it will refuse the read
|
* than it negotiated since it will refuse the read
|
||||||
* then.
|
* then.
|
||||||
*/
|
*/
|
||||||
if ((tcon->ses) && !(tcon->ses->capabilities &
|
if (!(tcon->ses->capabilities &
|
||||||
tcon->ses->server->vals->cap_large_files)) {
|
tcon->ses->server->vals->cap_large_files)) {
|
||||||
current_read_size = min_t(uint,
|
current_read_size = min_t(uint,
|
||||||
current_read_size, CIFSMaxBufSize);
|
current_read_size, CIFSMaxBufSize);
|
||||||
|
|
|
@ -1043,8 +1043,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
|
||||||
u32 x;
|
u32 x;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
if (capable(CAP_SYS_RESOURCE) ||
|
if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
|
||||||
sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error = gfs2_quota_hold(ip, uid, gid);
|
error = gfs2_quota_hold(ip, uid, gid);
|
||||||
|
|
|
@ -47,7 +47,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
|
ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
|
||||||
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
|
if (capable(CAP_SYS_RESOURCE) ||
|
||||||
|
sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
|
||||||
return 0;
|
return 0;
|
||||||
ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
|
ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -48,7 +48,7 @@
|
||||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||||
#define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
|
#define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
|
||||||
#else
|
#else
|
||||||
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
|
#define cpumask_of_node(node) ((void)(node), cpu_online_mask)
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
#ifndef pcibus_to_node
|
#ifndef pcibus_to_node
|
||||||
|
|
|
@ -902,6 +902,7 @@ struct mlx5_cmd_work_ent {
|
||||||
struct delayed_work cb_timeout_work;
|
struct delayed_work cb_timeout_work;
|
||||||
void *context;
|
void *context;
|
||||||
int idx;
|
int idx;
|
||||||
|
struct completion handling;
|
||||||
struct completion done;
|
struct completion done;
|
||||||
struct mlx5_cmd *cmd;
|
struct mlx5_cmd *cmd;
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
|
|
|
@ -619,6 +619,11 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
|
||||||
|
|
||||||
extern void kvfree(const void *addr);
|
extern void kvfree(const void *addr);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mapcount of compound page as a whole, does not include mapped sub-pages.
|
||||||
|
*
|
||||||
|
* Must be called only for compound pages or any their tail sub-pages.
|
||||||
|
*/
|
||||||
static inline int compound_mapcount(struct page *page)
|
static inline int compound_mapcount(struct page *page)
|
||||||
{
|
{
|
||||||
VM_BUG_ON_PAGE(!PageCompound(page), page);
|
VM_BUG_ON_PAGE(!PageCompound(page), page);
|
||||||
|
@ -638,10 +643,16 @@ static inline void page_mapcount_reset(struct page *page)
|
||||||
|
|
||||||
int __page_mapcount(struct page *page);
|
int __page_mapcount(struct page *page);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mapcount of 0-order page; when compound sub-page, includes
|
||||||
|
* compound_mapcount().
|
||||||
|
*
|
||||||
|
* Result is undefined for pages which cannot be mapped into userspace.
|
||||||
|
* For example SLAB or special types of pages. See function page_has_type().
|
||||||
|
* They use this place in struct page differently.
|
||||||
|
*/
|
||||||
static inline int page_mapcount(struct page *page)
|
static inline int page_mapcount(struct page *page)
|
||||||
{
|
{
|
||||||
VM_BUG_ON_PAGE(PageSlab(page), page);
|
|
||||||
|
|
||||||
if (unlikely(PageCompound(page)))
|
if (unlikely(PageCompound(page)))
|
||||||
return __page_mapcount(page);
|
return __page_mapcount(page);
|
||||||
return atomic_read(&page->_mapcount) + 1;
|
return atomic_read(&page->_mapcount) + 1;
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
#include <linux/netfilter/nf_conntrack_common.h>
|
#include <linux/netfilter/nf_conntrack_common.h>
|
||||||
|
|
||||||
extern const char *const pptp_msg_name[];
|
const char *pptp_msg_name(u_int16_t msg);
|
||||||
|
|
||||||
/* state of the control session */
|
/* state of the control session */
|
||||||
enum pptp_ctrlsess_state {
|
enum pptp_ctrlsess_state {
|
||||||
|
|
|
@ -67,7 +67,8 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm)
|
||||||
{
|
{
|
||||||
dtm->install = jiffies_to_clock_t(jiffies - stm->install);
|
dtm->install = jiffies_to_clock_t(jiffies - stm->install);
|
||||||
dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse);
|
dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse);
|
||||||
dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse);
|
dtm->firstuse = stm->firstuse ?
|
||||||
|
jiffies_to_clock_t(jiffies - stm->firstuse) : 0;
|
||||||
dtm->expires = jiffies_to_clock_t(stm->expires);
|
dtm->expires = jiffies_to_clock_t(stm->expires);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
|
||||||
|
|
||||||
static inline void uobj_put_destroy(struct ib_uobject *uobj)
|
static inline void uobj_put_destroy(struct ib_uobject *uobj)
|
||||||
{
|
{
|
||||||
rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
|
rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void uobj_put_read(struct ib_uobject *uobj)
|
static inline void uobj_put_read(struct ib_uobject *uobj)
|
||||||
|
|
|
@ -304,7 +304,7 @@ enum xfrm_attr_type_t {
|
||||||
XFRMA_PROTO, /* __u8 */
|
XFRMA_PROTO, /* __u8 */
|
||||||
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
|
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
|
||||||
XFRMA_PAD,
|
XFRMA_PAD,
|
||||||
XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
|
XFRMA_OFFLOAD_DEV, /* struct xfrm_user_offload */
|
||||||
XFRMA_SET_MARK, /* __u32 */
|
XFRMA_SET_MARK, /* __u32 */
|
||||||
XFRMA_SET_MARK_MASK, /* __u32 */
|
XFRMA_SET_MARK_MASK, /* __u32 */
|
||||||
XFRMA_IF_ID, /* __u32 */
|
XFRMA_IF_ID, /* __u32 */
|
||||||
|
|
|
@ -1518,7 +1518,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
|
||||||
addr))
|
addr))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
area = find_vmap_area((unsigned long)addr)->vm;
|
area = find_vm_area(addr);
|
||||||
if (unlikely(!area)) {
|
if (unlikely(!area)) {
|
||||||
WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
|
WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
|
||||||
addr);
|
addr);
|
||||||
|
|
|
@ -638,8 +638,10 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SO_BINDTODEVICE:
|
case SO_BINDTODEVICE:
|
||||||
if (optlen > IFNAMSIZ)
|
if (optlen > IFNAMSIZ - 1)
|
||||||
optlen = IFNAMSIZ;
|
optlen = IFNAMSIZ - 1;
|
||||||
|
|
||||||
|
memset(devname, 0, sizeof(devname));
|
||||||
|
|
||||||
if (copy_from_user(devname, optval, optlen)) {
|
if (copy_from_user(devname, optval, optlen)) {
|
||||||
res = -EFAULT;
|
res = -EFAULT;
|
||||||
|
|
|
@ -34,6 +34,12 @@ static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
|
||||||
ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
|
ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
|
||||||
eth->h_proto = eth_hdr(oldskb)->h_proto;
|
eth->h_proto = eth_hdr(oldskb)->h_proto;
|
||||||
skb_pull(nskb, ETH_HLEN);
|
skb_pull(nskb, ETH_HLEN);
|
||||||
|
|
||||||
|
if (skb_vlan_tag_present(oldskb)) {
|
||||||
|
u16 vid = skb_vlan_tag_get(oldskb);
|
||||||
|
|
||||||
|
__vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nft_bridge_iphdr_validate(struct sk_buff *skb)
|
static int nft_bridge_iphdr_validate(struct sk_buff *skb)
|
||||||
|
|
|
@ -3540,7 +3540,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
|
||||||
* supported.
|
* supported.
|
||||||
*/
|
*/
|
||||||
req->r_t.target_oloc.pool = m.redirect.oloc.pool;
|
req->r_t.target_oloc.pool = m.redirect.oloc.pool;
|
||||||
req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
|
req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
|
||||||
|
CEPH_OSD_FLAG_IGNORE_OVERLAY |
|
||||||
|
CEPH_OSD_FLAG_IGNORE_CACHE;
|
||||||
req->r_tid = 0;
|
req->r_tid = 0;
|
||||||
__submit_request(req, false);
|
__submit_request(req, false);
|
||||||
goto out_unlock_osdc;
|
goto out_unlock_osdc;
|
||||||
|
|
|
@ -4778,11 +4778,12 @@ static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
|
static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
|
||||||
struct packet_type **ppt_prev)
|
struct packet_type **ppt_prev)
|
||||||
{
|
{
|
||||||
struct packet_type *ptype, *pt_prev;
|
struct packet_type *ptype, *pt_prev;
|
||||||
rx_handler_func_t *rx_handler;
|
rx_handler_func_t *rx_handler;
|
||||||
|
struct sk_buff *skb = *pskb;
|
||||||
struct net_device *orig_dev;
|
struct net_device *orig_dev;
|
||||||
bool deliver_exact = false;
|
bool deliver_exact = false;
|
||||||
int ret = NET_RX_DROP;
|
int ret = NET_RX_DROP;
|
||||||
|
@ -4813,8 +4814,10 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
|
||||||
ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
|
ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
||||||
if (ret2 != XDP_PASS)
|
if (ret2 != XDP_PASS) {
|
||||||
return NET_RX_DROP;
|
ret = NET_RX_DROP;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
skb_reset_mac_len(skb);
|
skb_reset_mac_len(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4936,6 +4939,13 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
/* The invariant here is that if *ppt_prev is not NULL
|
||||||
|
* then skb should also be non-NULL.
|
||||||
|
*
|
||||||
|
* Apparently *ppt_prev assignment above holds this invariant due to
|
||||||
|
* skb dereferencing near it.
|
||||||
|
*/
|
||||||
|
*pskb = skb;
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4945,7 +4955,7 @@ static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
|
||||||
struct packet_type *pt_prev = NULL;
|
struct packet_type *pt_prev = NULL;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
|
ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
|
||||||
if (pt_prev)
|
if (pt_prev)
|
||||||
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
|
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -5021,7 +5031,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
|
||||||
struct packet_type *pt_prev = NULL;
|
struct packet_type *pt_prev = NULL;
|
||||||
|
|
||||||
skb_list_del_init(skb);
|
skb_list_del_init(skb);
|
||||||
__netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
|
__netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
|
||||||
if (!pt_prev)
|
if (!pt_prev)
|
||||||
continue;
|
continue;
|
||||||
if (pt_curr != pt_prev || od_curr != orig_dev) {
|
if (pt_curr != pt_prev || od_curr != orig_dev) {
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
|
#define MTK_HDR_XMIT_TAGGED_TPID_8100 1
|
||||||
#define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0)
|
#define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0)
|
||||||
#define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0)
|
#define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0)
|
||||||
|
#define MTK_HDR_XMIT_SA_DIS BIT(6)
|
||||||
|
|
||||||
static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
|
static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
|
||||||
struct net_device *dev)
|
struct net_device *dev)
|
||||||
|
@ -29,6 +30,9 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
|
||||||
struct dsa_port *dp = dsa_slave_to_port(dev);
|
struct dsa_port *dp = dsa_slave_to_port(dev);
|
||||||
u8 *mtk_tag;
|
u8 *mtk_tag;
|
||||||
bool is_vlan_skb = true;
|
bool is_vlan_skb = true;
|
||||||
|
unsigned char *dest = eth_hdr(skb)->h_dest;
|
||||||
|
bool is_multicast_skb = is_multicast_ether_addr(dest) &&
|
||||||
|
!is_broadcast_ether_addr(dest);
|
||||||
|
|
||||||
/* Build the special tag after the MAC Source Address. If VLAN header
|
/* Build the special tag after the MAC Source Address. If VLAN header
|
||||||
* is present, it's required that VLAN header and special tag is
|
* is present, it's required that VLAN header and special tag is
|
||||||
|
@ -54,6 +58,10 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
|
||||||
MTK_HDR_XMIT_UNTAGGED;
|
MTK_HDR_XMIT_UNTAGGED;
|
||||||
mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
|
mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK;
|
||||||
|
|
||||||
|
/* Disable SA learning for multicast frames */
|
||||||
|
if (unlikely(is_multicast_skb))
|
||||||
|
mtk_tag[1] |= MTK_HDR_XMIT_SA_DIS;
|
||||||
|
|
||||||
/* Tag control information is kept for 802.1Q */
|
/* Tag control information is kept for 802.1Q */
|
||||||
if (!is_vlan_skb) {
|
if (!is_vlan_skb) {
|
||||||
mtk_tag[2] = 0;
|
mtk_tag[2] = 0;
|
||||||
|
@ -68,6 +76,9 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||||
{
|
{
|
||||||
int port;
|
int port;
|
||||||
__be16 *phdr, hdr;
|
__be16 *phdr, hdr;
|
||||||
|
unsigned char *dest = eth_hdr(skb)->h_dest;
|
||||||
|
bool is_multicast_skb = is_multicast_ether_addr(dest) &&
|
||||||
|
!is_broadcast_ether_addr(dest);
|
||||||
|
|
||||||
if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
|
if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -93,6 +104,10 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||||
if (!skb->dev)
|
if (!skb->dev)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/* Only unicast or broadcast frames are offloaded */
|
||||||
|
if (likely(!is_multicast_skb))
|
||||||
|
skb->offload_fwd_mark = 1;
|
||||||
|
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,9 +28,10 @@
|
||||||
#include <net/addrconf.h>
|
#include <net/addrconf.h>
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_IPV6)
|
#if IS_ENABLED(CONFIG_IPV6)
|
||||||
/* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
|
/* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses
|
||||||
* only, and any IPv4 addresses if not IPv6 only
|
* if IPv6 only, and any IPv4 addresses
|
||||||
* match_wildcard == false: addresses must be exactly the same, i.e.
|
* if not IPv6 only
|
||||||
|
* match_sk*_wildcard == false: addresses must be exactly the same, i.e.
|
||||||
* IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
|
* IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
|
||||||
* and 0.0.0.0 equals to 0.0.0.0 only
|
* and 0.0.0.0 equals to 0.0.0.0 only
|
||||||
*/
|
*/
|
||||||
|
@ -38,7 +39,8 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
|
||||||
const struct in6_addr *sk2_rcv_saddr6,
|
const struct in6_addr *sk2_rcv_saddr6,
|
||||||
__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
|
__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
|
||||||
bool sk1_ipv6only, bool sk2_ipv6only,
|
bool sk1_ipv6only, bool sk2_ipv6only,
|
||||||
bool match_wildcard)
|
bool match_sk1_wildcard,
|
||||||
|
bool match_sk2_wildcard)
|
||||||
{
|
{
|
||||||
int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
|
int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
|
||||||
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
|
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
|
||||||
|
@ -48,8 +50,8 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
|
||||||
if (!sk2_ipv6only) {
|
if (!sk2_ipv6only) {
|
||||||
if (sk1_rcv_saddr == sk2_rcv_saddr)
|
if (sk1_rcv_saddr == sk2_rcv_saddr)
|
||||||
return true;
|
return true;
|
||||||
if (!sk1_rcv_saddr || !sk2_rcv_saddr)
|
return (match_sk1_wildcard && !sk1_rcv_saddr) ||
|
||||||
return match_wildcard;
|
(match_sk2_wildcard && !sk2_rcv_saddr);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -57,11 +59,11 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
|
||||||
if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
|
if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
|
if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
|
||||||
!(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
|
!(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
|
if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
|
||||||
!(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
|
!(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
@ -73,18 +75,19 @@ static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
|
/* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
|
||||||
* match_wildcard == false: addresses must be exactly the same, i.e.
|
* match_sk*_wildcard == false: addresses must be exactly the same, i.e.
|
||||||
* 0.0.0.0 only equals to 0.0.0.0
|
* 0.0.0.0 only equals to 0.0.0.0
|
||||||
*/
|
*/
|
||||||
static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
|
static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
|
||||||
bool sk2_ipv6only, bool match_wildcard)
|
bool sk2_ipv6only, bool match_sk1_wildcard,
|
||||||
|
bool match_sk2_wildcard)
|
||||||
{
|
{
|
||||||
if (!sk2_ipv6only) {
|
if (!sk2_ipv6only) {
|
||||||
if (sk1_rcv_saddr == sk2_rcv_saddr)
|
if (sk1_rcv_saddr == sk2_rcv_saddr)
|
||||||
return true;
|
return true;
|
||||||
if (!sk1_rcv_saddr || !sk2_rcv_saddr)
|
return (match_sk1_wildcard && !sk1_rcv_saddr) ||
|
||||||
return match_wildcard;
|
(match_sk2_wildcard && !sk2_rcv_saddr);
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -100,10 +103,12 @@ bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
|
||||||
sk2->sk_rcv_saddr,
|
sk2->sk_rcv_saddr,
|
||||||
ipv6_only_sock(sk),
|
ipv6_only_sock(sk),
|
||||||
ipv6_only_sock(sk2),
|
ipv6_only_sock(sk2),
|
||||||
|
match_wildcard,
|
||||||
match_wildcard);
|
match_wildcard);
|
||||||
#endif
|
#endif
|
||||||
return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
|
return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
|
||||||
ipv6_only_sock(sk2), match_wildcard);
|
ipv6_only_sock(sk2), match_wildcard,
|
||||||
|
match_wildcard);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(inet_rcv_saddr_equal);
|
EXPORT_SYMBOL(inet_rcv_saddr_equal);
|
||||||
|
|
||||||
|
@ -274,10 +279,10 @@ static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
|
||||||
tb->fast_rcv_saddr,
|
tb->fast_rcv_saddr,
|
||||||
sk->sk_rcv_saddr,
|
sk->sk_rcv_saddr,
|
||||||
tb->fast_ipv6_only,
|
tb->fast_ipv6_only,
|
||||||
ipv6_only_sock(sk), true);
|
ipv6_only_sock(sk), true, false);
|
||||||
#endif
|
#endif
|
||||||
return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
|
return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
|
||||||
ipv6_only_sock(sk), true);
|
ipv6_only_sock(sk), true, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Obtain a reference to a local port for the given sock,
|
/* Obtain a reference to a local port for the given sock,
|
||||||
|
|
|
@ -50,32 +50,7 @@ static unsigned int vti_net_id __read_mostly;
|
||||||
static int vti_tunnel_init(struct net_device *dev);
|
static int vti_tunnel_init(struct net_device *dev);
|
||||||
|
|
||||||
static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
|
static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
|
||||||
int encap_type)
|
int encap_type, bool update_skb_dev)
|
||||||
{
|
|
||||||
struct ip_tunnel *tunnel;
|
|
||||||
const struct iphdr *iph = ip_hdr(skb);
|
|
||||||
struct net *net = dev_net(skb->dev);
|
|
||||||
struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
|
|
||||||
|
|
||||||
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
|
|
||||||
iph->saddr, iph->daddr, 0);
|
|
||||||
if (tunnel) {
|
|
||||||
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
|
|
||||||
goto drop;
|
|
||||||
|
|
||||||
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
|
|
||||||
|
|
||||||
return xfrm_input(skb, nexthdr, spi, encap_type);
|
|
||||||
}
|
|
||||||
|
|
||||||
return -EINVAL;
|
|
||||||
drop:
|
|
||||||
kfree_skb(skb);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
|
|
||||||
int encap_type)
|
|
||||||
{
|
{
|
||||||
struct ip_tunnel *tunnel;
|
struct ip_tunnel *tunnel;
|
||||||
const struct iphdr *iph = ip_hdr(skb);
|
const struct iphdr *iph = ip_hdr(skb);
|
||||||
|
@ -90,6 +65,7 @@ static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
|
||||||
|
|
||||||
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
|
XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
|
||||||
|
|
||||||
|
if (update_skb_dev)
|
||||||
skb->dev = tunnel->dev;
|
skb->dev = tunnel->dev;
|
||||||
|
|
||||||
return xfrm_input(skb, nexthdr, spi, encap_type);
|
return xfrm_input(skb, nexthdr, spi, encap_type);
|
||||||
|
@ -101,20 +77,49 @@ static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vti_rcv(struct sk_buff *skb)
|
static int vti_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi,
|
||||||
|
int encap_type)
|
||||||
{
|
{
|
||||||
XFRM_SPI_SKB_CB(skb)->family = AF_INET;
|
return vti_input(skb, nexthdr, spi, encap_type, false);
|
||||||
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
|
|
||||||
|
|
||||||
return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vti_rcv_ipip(struct sk_buff *skb)
|
static int vti_rcv(struct sk_buff *skb, __be32 spi, bool update_skb_dev)
|
||||||
{
|
{
|
||||||
XFRM_SPI_SKB_CB(skb)->family = AF_INET;
|
XFRM_SPI_SKB_CB(skb)->family = AF_INET;
|
||||||
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
|
XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
|
||||||
|
|
||||||
return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
|
return vti_input(skb, ip_hdr(skb)->protocol, spi, 0, update_skb_dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vti_rcv_proto(struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
return vti_rcv(skb, 0, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int vti_rcv_tunnel(struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id);
|
||||||
|
const struct iphdr *iph = ip_hdr(skb);
|
||||||
|
struct ip_tunnel *tunnel;
|
||||||
|
|
||||||
|
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
|
||||||
|
iph->saddr, iph->daddr, 0);
|
||||||
|
if (tunnel) {
|
||||||
|
struct tnl_ptk_info tpi = {
|
||||||
|
.proto = htons(ETH_P_IP),
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
|
||||||
|
goto drop;
|
||||||
|
if (iptunnel_pull_header(skb, 0, tpi.proto, false))
|
||||||
|
goto drop;
|
||||||
|
return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
|
drop:
|
||||||
|
kfree_skb(skb);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vti_rcv_cb(struct sk_buff *skb, int err)
|
static int vti_rcv_cb(struct sk_buff *skb, int err)
|
||||||
|
@ -478,31 +483,31 @@ static void __net_init vti_fb_tunnel_init(struct net_device *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct xfrm4_protocol vti_esp4_protocol __read_mostly = {
|
static struct xfrm4_protocol vti_esp4_protocol __read_mostly = {
|
||||||
.handler = vti_rcv,
|
.handler = vti_rcv_proto,
|
||||||
.input_handler = vti_input,
|
.input_handler = vti_input_proto,
|
||||||
.cb_handler = vti_rcv_cb,
|
.cb_handler = vti_rcv_cb,
|
||||||
.err_handler = vti4_err,
|
.err_handler = vti4_err,
|
||||||
.priority = 100,
|
.priority = 100,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct xfrm4_protocol vti_ah4_protocol __read_mostly = {
|
static struct xfrm4_protocol vti_ah4_protocol __read_mostly = {
|
||||||
.handler = vti_rcv,
|
.handler = vti_rcv_proto,
|
||||||
.input_handler = vti_input,
|
.input_handler = vti_input_proto,
|
||||||
.cb_handler = vti_rcv_cb,
|
.cb_handler = vti_rcv_cb,
|
||||||
.err_handler = vti4_err,
|
.err_handler = vti4_err,
|
||||||
.priority = 100,
|
.priority = 100,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
|
static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
|
||||||
.handler = vti_rcv,
|
.handler = vti_rcv_proto,
|
||||||
.input_handler = vti_input,
|
.input_handler = vti_input_proto,
|
||||||
.cb_handler = vti_rcv_cb,
|
.cb_handler = vti_rcv_cb,
|
||||||
.err_handler = vti4_err,
|
.err_handler = vti4_err,
|
||||||
.priority = 100,
|
.priority = 100,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct xfrm_tunnel ipip_handler __read_mostly = {
|
static struct xfrm_tunnel ipip_handler __read_mostly = {
|
||||||
.handler = vti_rcv_ipip,
|
.handler = vti_rcv_tunnel,
|
||||||
.err_handler = vti4_err,
|
.err_handler = vti4_err,
|
||||||
.priority = 0,
|
.priority = 0,
|
||||||
};
|
};
|
||||||
|
|
|
@ -704,7 +704,7 @@ static int __init ipip_init(void)
|
||||||
|
|
||||||
rtnl_link_failed:
|
rtnl_link_failed:
|
||||||
#if IS_ENABLED(CONFIG_MPLS)
|
#if IS_ENABLED(CONFIG_MPLS)
|
||||||
xfrm4_tunnel_deregister(&mplsip_handler, AF_INET);
|
xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS);
|
||||||
xfrm_tunnel_mplsip_failed:
|
xfrm_tunnel_mplsip_failed:
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -165,8 +165,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
pr_debug("unknown outbound packet 0x%04x:%s\n", msg,
|
pr_debug("unknown outbound packet 0x%04x:%s\n", msg,
|
||||||
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
|
pptp_msg_name(msg));
|
||||||
pptp_msg_name[0]);
|
|
||||||
/* fall through */
|
/* fall through */
|
||||||
case PPTP_SET_LINK_INFO:
|
case PPTP_SET_LINK_INFO:
|
||||||
/* only need to NAT in case PAC is behind NAT box */
|
/* only need to NAT in case PAC is behind NAT box */
|
||||||
|
@ -267,9 +266,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
|
||||||
pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
|
pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
pr_debug("unknown inbound packet %s\n",
|
pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg));
|
||||||
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] :
|
|
||||||
pptp_msg_name[0]);
|
|
||||||
/* fall through */
|
/* fall through */
|
||||||
case PPTP_START_SESSION_REQUEST:
|
case PPTP_START_SESSION_REQUEST:
|
||||||
case PPTP_START_SESSION_REPLY:
|
case PPTP_START_SESSION_REPLY:
|
||||||
|
|
|
@ -484,18 +484,16 @@ u32 ip_idents_reserve(u32 hash, int segs)
|
||||||
atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
|
atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
|
||||||
u32 old = READ_ONCE(*p_tstamp);
|
u32 old = READ_ONCE(*p_tstamp);
|
||||||
u32 now = (u32)jiffies;
|
u32 now = (u32)jiffies;
|
||||||
u32 new, delta = 0;
|
u32 delta = 0;
|
||||||
|
|
||||||
if (old != now && cmpxchg(p_tstamp, old, now) == old)
|
if (old != now && cmpxchg(p_tstamp, old, now) == old)
|
||||||
delta = prandom_u32_max(now - old);
|
delta = prandom_u32_max(now - old);
|
||||||
|
|
||||||
/* Do not use atomic_add_return() as it makes UBSAN unhappy */
|
/* If UBSAN reports an error there, please make sure your compiler
|
||||||
do {
|
* supports -fno-strict-overflow before reporting it that was a bug
|
||||||
old = (u32)atomic_read(p_id);
|
* in UBSAN, and it has been fixed in GCC-8.
|
||||||
new = old + delta + segs;
|
*/
|
||||||
} while (atomic_cmpxchg(p_id, old, new) != old);
|
return atomic_add_return(segs + delta, p_id) - segs;
|
||||||
|
|
||||||
return new - segs;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ip_idents_reserve);
|
EXPORT_SYMBOL(ip_idents_reserve);
|
||||||
|
|
||||||
|
|
|
@ -121,9 +121,16 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
|
||||||
struct ip_esp_hdr *esph;
|
struct ip_esp_hdr *esph;
|
||||||
struct ipv6hdr *iph = ipv6_hdr(skb);
|
struct ipv6hdr *iph = ipv6_hdr(skb);
|
||||||
struct xfrm_offload *xo = xfrm_offload(skb);
|
struct xfrm_offload *xo = xfrm_offload(skb);
|
||||||
int proto = iph->nexthdr;
|
u8 proto = iph->nexthdr;
|
||||||
|
|
||||||
skb_push(skb, -skb_network_offset(skb));
|
skb_push(skb, -skb_network_offset(skb));
|
||||||
|
|
||||||
|
if (x->outer_mode->encap == XFRM_MODE_TRANSPORT) {
|
||||||
|
__be16 frag;
|
||||||
|
|
||||||
|
ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag);
|
||||||
|
}
|
||||||
|
|
||||||
esph = ip_esp_hdr(skb);
|
esph = ip_esp_hdr(skb);
|
||||||
*skb_mac_header(skb) = IPPROTO_ESP;
|
*skb_mac_header(skb) = IPPROTO_ESP;
|
||||||
|
|
||||||
|
|
|
@ -1088,7 +1088,14 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
|
||||||
mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
|
mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
|
||||||
target_flags, mpath->dst, mpath->sn, da, 0,
|
target_flags, mpath->dst, mpath->sn, da, 0,
|
||||||
ttl, lifetime, 0, ifmsh->preq_id++, sdata);
|
ttl, lifetime, 0, ifmsh->preq_id++, sdata);
|
||||||
|
|
||||||
|
spin_lock_bh(&mpath->state_lock);
|
||||||
|
if (mpath->flags & MESH_PATH_DELETED) {
|
||||||
|
spin_unlock_bh(&mpath->state_lock);
|
||||||
|
goto enddiscovery;
|
||||||
|
}
|
||||||
mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
|
mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
|
||||||
|
spin_unlock_bh(&mpath->state_lock);
|
||||||
|
|
||||||
enddiscovery:
|
enddiscovery:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
|
@ -63,7 +63,7 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
|
||||||
/* Don't lookup sub-counters at all */
|
/* Don't lookup sub-counters at all */
|
||||||
opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
|
opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
|
||||||
if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
|
if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
|
||||||
opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
|
opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE;
|
||||||
list_for_each_entry_rcu(e, &map->members, list) {
|
list_for_each_entry_rcu(e, &map->members, list) {
|
||||||
ret = ip_set_test(e->id, skb, par, opt);
|
ret = ip_set_test(e->id, skb, par, opt);
|
||||||
if (ret <= 0)
|
if (ret <= 0)
|
||||||
|
|
|
@ -71,24 +71,32 @@ EXPORT_SYMBOL_GPL(nf_nat_pptp_hook_expectfn);
|
||||||
|
|
||||||
#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
|
#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
|
||||||
/* PptpControlMessageType names */
|
/* PptpControlMessageType names */
|
||||||
const char *const pptp_msg_name[] = {
|
static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = {
|
||||||
"UNKNOWN_MESSAGE",
|
[0] = "UNKNOWN_MESSAGE",
|
||||||
"START_SESSION_REQUEST",
|
[PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST",
|
||||||
"START_SESSION_REPLY",
|
[PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY",
|
||||||
"STOP_SESSION_REQUEST",
|
[PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST",
|
||||||
"STOP_SESSION_REPLY",
|
[PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY",
|
||||||
"ECHO_REQUEST",
|
[PPTP_ECHO_REQUEST] = "ECHO_REQUEST",
|
||||||
"ECHO_REPLY",
|
[PPTP_ECHO_REPLY] = "ECHO_REPLY",
|
||||||
"OUT_CALL_REQUEST",
|
[PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST",
|
||||||
"OUT_CALL_REPLY",
|
[PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY",
|
||||||
"IN_CALL_REQUEST",
|
[PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST",
|
||||||
"IN_CALL_REPLY",
|
[PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY",
|
||||||
"IN_CALL_CONNECT",
|
[PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT",
|
||||||
"CALL_CLEAR_REQUEST",
|
[PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST",
|
||||||
"CALL_DISCONNECT_NOTIFY",
|
[PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY",
|
||||||
"WAN_ERROR_NOTIFY",
|
[PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY",
|
||||||
"SET_LINK_INFO"
|
[PPTP_SET_LINK_INFO] = "SET_LINK_INFO"
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const char *pptp_msg_name(u_int16_t msg)
|
||||||
|
{
|
||||||
|
if (msg > PPTP_MSG_MAX)
|
||||||
|
return pptp_msg_name_array[0];
|
||||||
|
|
||||||
|
return pptp_msg_name_array[msg];
|
||||||
|
}
|
||||||
EXPORT_SYMBOL(pptp_msg_name);
|
EXPORT_SYMBOL(pptp_msg_name);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -275,7 +283,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
||||||
typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;
|
typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;
|
||||||
|
|
||||||
msg = ntohs(ctlh->messageType);
|
msg = ntohs(ctlh->messageType);
|
||||||
pr_debug("inbound control message %s\n", pptp_msg_name[msg]);
|
pr_debug("inbound control message %s\n", pptp_msg_name(msg));
|
||||||
|
|
||||||
switch (msg) {
|
switch (msg) {
|
||||||
case PPTP_START_SESSION_REPLY:
|
case PPTP_START_SESSION_REPLY:
|
||||||
|
@ -310,7 +318,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
||||||
pcid = pptpReq->ocack.peersCallID;
|
pcid = pptpReq->ocack.peersCallID;
|
||||||
if (info->pns_call_id != pcid)
|
if (info->pns_call_id != pcid)
|
||||||
goto invalid;
|
goto invalid;
|
||||||
pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg],
|
pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg),
|
||||||
ntohs(cid), ntohs(pcid));
|
ntohs(cid), ntohs(pcid));
|
||||||
|
|
||||||
if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) {
|
if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) {
|
||||||
|
@ -327,7 +335,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
||||||
goto invalid;
|
goto invalid;
|
||||||
|
|
||||||
cid = pptpReq->icreq.callID;
|
cid = pptpReq->icreq.callID;
|
||||||
pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
|
pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
|
||||||
info->cstate = PPTP_CALL_IN_REQ;
|
info->cstate = PPTP_CALL_IN_REQ;
|
||||||
info->pac_call_id = cid;
|
info->pac_call_id = cid;
|
||||||
break;
|
break;
|
||||||
|
@ -346,7 +354,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
||||||
if (info->pns_call_id != pcid)
|
if (info->pns_call_id != pcid)
|
||||||
goto invalid;
|
goto invalid;
|
||||||
|
|
||||||
pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid));
|
pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid));
|
||||||
info->cstate = PPTP_CALL_IN_CONF;
|
info->cstate = PPTP_CALL_IN_CONF;
|
||||||
|
|
||||||
/* we expect a GRE connection from PAC to PNS */
|
/* we expect a GRE connection from PAC to PNS */
|
||||||
|
@ -356,7 +364,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
||||||
case PPTP_CALL_DISCONNECT_NOTIFY:
|
case PPTP_CALL_DISCONNECT_NOTIFY:
|
||||||
/* server confirms disconnect */
|
/* server confirms disconnect */
|
||||||
cid = pptpReq->disc.callID;
|
cid = pptpReq->disc.callID;
|
||||||
pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
|
pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
|
||||||
info->cstate = PPTP_CALL_NONE;
|
info->cstate = PPTP_CALL_NONE;
|
||||||
|
|
||||||
/* untrack this call id, unexpect GRE packets */
|
/* untrack this call id, unexpect GRE packets */
|
||||||
|
@ -383,7 +391,7 @@ pptp_inbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
||||||
invalid:
|
invalid:
|
||||||
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
|
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
|
||||||
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
|
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
|
||||||
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
|
pptp_msg_name(msg),
|
||||||
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
|
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
|
||||||
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
|
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
|
@ -403,7 +411,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
||||||
typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
|
typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
|
||||||
|
|
||||||
msg = ntohs(ctlh->messageType);
|
msg = ntohs(ctlh->messageType);
|
||||||
pr_debug("outbound control message %s\n", pptp_msg_name[msg]);
|
pr_debug("outbound control message %s\n", pptp_msg_name(msg));
|
||||||
|
|
||||||
switch (msg) {
|
switch (msg) {
|
||||||
case PPTP_START_SESSION_REQUEST:
|
case PPTP_START_SESSION_REQUEST:
|
||||||
|
@ -425,7 +433,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
||||||
info->cstate = PPTP_CALL_OUT_REQ;
|
info->cstate = PPTP_CALL_OUT_REQ;
|
||||||
/* track PNS call id */
|
/* track PNS call id */
|
||||||
cid = pptpReq->ocreq.callID;
|
cid = pptpReq->ocreq.callID;
|
||||||
pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid));
|
pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid));
|
||||||
info->pns_call_id = cid;
|
info->pns_call_id = cid;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -439,7 +447,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
||||||
pcid = pptpReq->icack.peersCallID;
|
pcid = pptpReq->icack.peersCallID;
|
||||||
if (info->pac_call_id != pcid)
|
if (info->pac_call_id != pcid)
|
||||||
goto invalid;
|
goto invalid;
|
||||||
pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg],
|
pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg),
|
||||||
ntohs(cid), ntohs(pcid));
|
ntohs(cid), ntohs(pcid));
|
||||||
|
|
||||||
if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) {
|
if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) {
|
||||||
|
@ -479,7 +487,7 @@ pptp_outbound_pkt(struct sk_buff *skb, unsigned int protoff,
|
||||||
invalid:
|
invalid:
|
||||||
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
|
pr_debug("invalid %s: type=%d cid=%u pcid=%u "
|
||||||
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
|
"cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n",
|
||||||
msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0],
|
pptp_msg_name(msg),
|
||||||
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
|
msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate,
|
||||||
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
|
ntohs(info->pns_call_id), ntohs(info->pac_call_id));
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
|
|
|
@ -106,7 +106,7 @@ nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
|
||||||
if (help->helper->data_len == 0)
|
if (help->helper->data_len == 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
nla_memcpy(help->data, nla_data(attr), sizeof(help->data));
|
nla_memcpy(help->data, attr, sizeof(help->data));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -242,6 +242,7 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err2;
|
goto err2;
|
||||||
}
|
}
|
||||||
|
helper->data_len = size;
|
||||||
|
|
||||||
helper->flags |= NF_CT_HELPER_F_USERSPACE;
|
helper->flags |= NF_CT_HELPER_F_USERSPACE;
|
||||||
memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple));
|
memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple));
|
||||||
|
|
|
@ -718,7 +718,7 @@ static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
mutex_unlock(&qrtr_node_lock);
|
mutex_unlock(&qrtr_node_lock);
|
||||||
|
|
||||||
qrtr_local_enqueue(node, skb, type, from, to);
|
qrtr_local_enqueue(NULL, skb, type, from, to);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1537,9 +1537,17 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
|
||||||
timeout = asoc->timeouts[cmd->obj.to];
|
timeout = asoc->timeouts[cmd->obj.to];
|
||||||
BUG_ON(!timeout);
|
BUG_ON(!timeout);
|
||||||
|
|
||||||
timer->expires = jiffies + timeout;
|
/*
|
||||||
|
* SCTP has a hard time with timer starts. Because we process
|
||||||
|
* timer starts as side effects, it can be hard to tell if we
|
||||||
|
* have already started a timer or not, which leads to BUG
|
||||||
|
* halts when we call add_timer. So here, instead of just starting
|
||||||
|
* a timer, if the timer is already started, and just mod
|
||||||
|
* the timer with the shorter of the two expiration times
|
||||||
|
*/
|
||||||
|
if (!timer_pending(timer))
|
||||||
sctp_association_hold(asoc);
|
sctp_association_hold(asoc);
|
||||||
add_timer(timer);
|
timer_reduce(timer, jiffies + timeout);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SCTP_CMD_TIMER_RESTART:
|
case SCTP_CMD_TIMER_RESTART:
|
||||||
|
|
|
@ -1871,12 +1871,13 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
|
||||||
/* Update the content of current association. */
|
/* Update the content of current association. */
|
||||||
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
|
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
|
||||||
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
|
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
|
||||||
if (sctp_state(asoc, SHUTDOWN_PENDING) &&
|
if ((sctp_state(asoc, SHUTDOWN_PENDING) ||
|
||||||
|
sctp_state(asoc, SHUTDOWN_SENT)) &&
|
||||||
(sctp_sstate(asoc->base.sk, CLOSING) ||
|
(sctp_sstate(asoc->base.sk, CLOSING) ||
|
||||||
sock_flag(asoc->base.sk, SOCK_DEAD))) {
|
sock_flag(asoc->base.sk, SOCK_DEAD))) {
|
||||||
/* if were currently in SHUTDOWN_PENDING, but the socket
|
/* If the socket has been closed by user, don't
|
||||||
* has been closed by user, don't transition to ESTABLISHED.
|
* transition to ESTABLISHED. Instead trigger SHUTDOWN
|
||||||
* Instead trigger SHUTDOWN bundled with COOKIE_ACK.
|
* bundled with COOKIE_ACK.
|
||||||
*/
|
*/
|
||||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
|
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
|
||||||
return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
|
return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
|
||||||
|
|
|
@ -258,8 +258,8 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)
|
||||||
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
||||||
{
|
{
|
||||||
u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
|
u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
|
||||||
|
u64 npgs, addr = mr->addr, size = mr->len;
|
||||||
unsigned int chunks, chunks_per_page;
|
unsigned int chunks, chunks_per_page;
|
||||||
u64 addr = mr->addr, size = mr->len;
|
|
||||||
int err, i;
|
int err, i;
|
||||||
|
|
||||||
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
|
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
|
||||||
|
@ -285,6 +285,10 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
||||||
if ((addr + size) < addr)
|
if ((addr + size) < addr)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
npgs = div_u64(size, PAGE_SIZE);
|
||||||
|
if (npgs > U32_MAX)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
chunks = (unsigned int)div_u64(size, chunk_size);
|
chunks = (unsigned int)div_u64(size, chunk_size);
|
||||||
if (chunks == 0)
|
if (chunks == 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -303,7 +307,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
||||||
umem->props.size = size;
|
umem->props.size = size;
|
||||||
umem->headroom = headroom;
|
umem->headroom = headroom;
|
||||||
umem->chunk_size_nohr = chunk_size - headroom;
|
umem->chunk_size_nohr = chunk_size - headroom;
|
||||||
umem->npgs = size / PAGE_SIZE;
|
umem->npgs = (u32)npgs;
|
||||||
umem->pgs = NULL;
|
umem->pgs = NULL;
|
||||||
umem->user = NULL;
|
umem->user = NULL;
|
||||||
INIT_LIST_HEAD(&umem->xsk_list);
|
INIT_LIST_HEAD(&umem->xsk_list);
|
||||||
|
|
|
@ -407,7 +407,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
|
||||||
dev_put(skb->dev);
|
dev_put(skb->dev);
|
||||||
|
|
||||||
spin_lock(&x->lock);
|
spin_lock(&x->lock);
|
||||||
if (nexthdr <= 0) {
|
if (nexthdr < 0) {
|
||||||
if (nexthdr == -EBADMSG) {
|
if (nexthdr == -EBADMSG) {
|
||||||
xfrm_audit_state_icvfail(x, skb,
|
xfrm_audit_state_icvfail(x, skb,
|
||||||
x->type->proto);
|
x->type->proto);
|
||||||
|
|
|
@ -780,7 +780,28 @@ static void __net_exit xfrmi_exit_net(struct net *net)
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
|
||||||
|
{
|
||||||
|
struct net *net;
|
||||||
|
LIST_HEAD(list);
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
|
list_for_each_entry(net, net_exit_list, exit_list) {
|
||||||
|
struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
|
||||||
|
struct xfrm_if __rcu **xip;
|
||||||
|
struct xfrm_if *xi;
|
||||||
|
|
||||||
|
for (xip = &xfrmn->xfrmi[0];
|
||||||
|
(xi = rtnl_dereference(*xip)) != NULL;
|
||||||
|
xip = &xi->next)
|
||||||
|
unregister_netdevice_queue(xi->dev, &list);
|
||||||
|
}
|
||||||
|
unregister_netdevice_many(&list);
|
||||||
|
rtnl_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
static struct pernet_operations xfrmi_net_ops = {
|
static struct pernet_operations xfrmi_net_ops = {
|
||||||
|
.exit_batch = xfrmi_exit_batch_net,
|
||||||
.init = xfrmi_init_net,
|
.init = xfrmi_init_net,
|
||||||
.exit = xfrmi_exit_net,
|
.exit = xfrmi_exit_net,
|
||||||
.id = &xfrmi_net_id,
|
.id = &xfrmi_net_id,
|
||||||
|
|
|
@ -235,17 +235,19 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
|
||||||
xfrm_state_hold(x);
|
xfrm_state_hold(x);
|
||||||
|
|
||||||
if (skb_is_gso(skb)) {
|
if (skb_is_gso(skb)) {
|
||||||
skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
|
if (skb->inner_protocol)
|
||||||
|
return xfrm_output_gso(net, sk, skb);
|
||||||
|
|
||||||
return xfrm_output2(net, sk, skb);
|
skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
|
if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
} else {
|
||||||
|
|
||||||
if (skb_is_gso(skb))
|
if (skb_is_gso(skb))
|
||||||
return xfrm_output_gso(net, sk, skb);
|
return xfrm_output_gso(net, sk, skb);
|
||||||
|
}
|
||||||
|
|
||||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||||
err = skb_checksum_help(skb);
|
err = skb_checksum_help(skb);
|
||||||
|
@ -283,7 +285,8 @@ void xfrm_local_error(struct sk_buff *skb, int mtu)
|
||||||
|
|
||||||
if (skb->protocol == htons(ETH_P_IP))
|
if (skb->protocol == htons(ETH_P_IP))
|
||||||
proto = AF_INET;
|
proto = AF_INET;
|
||||||
else if (skb->protocol == htons(ETH_P_IPV6))
|
else if (skb->protocol == htons(ETH_P_IPV6) &&
|
||||||
|
skb->sk->sk_family == AF_INET6)
|
||||||
proto = AF_INET6;
|
proto = AF_INET6;
|
||||||
else
|
else
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -730,12 +730,7 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
|
||||||
static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
|
static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
|
||||||
struct xfrm_policy *pol)
|
struct xfrm_policy *pol)
|
||||||
{
|
{
|
||||||
u32 mark = policy->mark.v & policy->mark.m;
|
if (policy->mark.v == pol->mark.v &&
|
||||||
|
|
||||||
if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
if ((mark & pol->mark.m) == pol->mark.v &&
|
|
||||||
policy->priority == pol->priority)
|
policy->priority == pol->priority)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue