This is the 4.19.29 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlyJb/EACgkQONu9yGCS
 aT4y0g//b9t9/onhTaXcY/ByPmBAwqNgugi7eYcZqGDBp7aCDOBLF6eOwbhdvvuS
 ZTaZ5eWG3Twz3mZu9vveuskgMci2npDyLPgqBWGzW+Ef5r/xPd40diaI75ZUc68T
 gimWbQ0VANuXKklK6LysBUaVQWE3ilIy6qnnpj0DI3ipNDoE62Ry1LNthuKy+73J
 w6r7uwkb6X/CkXpNB/L4cDdpSy/CvhGQhd6p91lBuE4DfyPqEzslYCokD9aPXp9b
 Fedt/Re+8eULBNcgqPYxkS5pBrbHtqrGf00AMlzC8DkC+GZyDqSP2xjv6AiTfGJd
 uf0/Jvsv2OBnP4aYsbk+uB2z3plzPBgmXxa/1bm+yrGCMvbpi9mMx75HM2joAeVp
 tVN4ZN65kNgJkXCchJTHdQ3s6teOD8Par1czy570HyKBU6l1j3AGArGm+b4WGPWx
 dL+82coojMKxKNdTHfxUXES6QGKp716r3un6mCrKR0xET/SDayzDQMaSM8UOtArK
 ELzNeKzKTc5oBx6i+JfGmY8ZsedpNGCIPpsiuoSYAaon5ZzNbruzOAlDOThs157d
 YezDHZ9XMrx3kN/xYnqZD63x/5egq9REbZGWljeykbNkWcEY74jIkKwNLxqv3P64
 JsLp60owvjzwtzKycjZogNU//GGNTBdb+6pESq4MxJpPTteFWnc=
 =n9iV
 -----END PGP SIGNATURE-----

Merge 4.19.29 into android-4.19

Changes in 4.19.29
	media: uvcvideo: Fix 'type' check leading to overflow
	vti4: Fix a ipip packet processing bug in 'IPCOMP' virtual tunnel
	perf script: Fix crash with printing mixed trace point and other events
	perf core: Fix perf_proc_update_handler() bug
	perf tools: Handle TOPOLOGY headers with no CPU
	perf script: Fix crash when processing recorded stat data
	IB/{hfi1, qib}: Fix WC.byte_len calculation for UD_SEND_WITH_IMM
	iommu/amd: Call free_iova_fast with pfn in map_sg
	iommu/amd: Unmap all mapped pages in error path of map_sg
	riscv: fixup max_low_pfn with PFN_DOWN.
	ipvs: Fix signed integer overflow when setsockopt timeout
	iommu/amd: Fix IOMMU page flush when detach device from a domain
	clk: ti: Fix error handling in ti_clk_parse_divider_data()
	clk: qcom: gcc: Use active only source for CPUSS clocks
	xtensa: SMP: fix ccount_timer_shutdown
	riscv: Adjust mmap base address at a third of task size
	IB/ipoib: Fix for use-after-free in ipoib_cm_tx_start
	selftests: cpu-hotplug: fix case where CPUs offline > CPUs present
	xtensa: SMP: fix secondary CPU initialization
	xtensa: smp_lx200_defconfig: fix vectors clash
	xtensa: SMP: mark each possible CPU as present
	iomap: get/put the page in iomap_page_create/release()
	iomap: fix a use after free in iomap_dio_rw
	xtensa: SMP: limit number of possible CPUs by NR_CPUS
	net: altera_tse: fix msgdma_tx_completion on non-zero fill_level case
	net: hns: Fix for missing of_node_put() after of_parse_phandle()
	net: hns: Restart autoneg need return failed when autoneg off
	net: hns: Fix wrong read accesses via Clause 45 MDIO protocol
	net: stmmac: dwmac-rk: fix error handling in rk_gmac_powerup()
	netfilter: ebtables: compat: un-break 32bit setsockopt when no rules are present
	gpio: vf610: Mask all GPIO interrupts
	selftests: net: use LDLIBS instead of LDFLAGS
	selftests: timers: use LDLIBS instead of LDFLAGS
	nfs: Fix NULL pointer dereference of dev_name
	qed: Fix bug in tx promiscuous mode settings
	qed: Fix LACP pdu drops for VFs
	qed: Fix VF probe failure while FLR
	qed: Fix system crash in ll2 xmit
	qed: Fix stack out of bounds bug
	scsi: libfc: free skb when receiving invalid flogi resp
	scsi: scsi_debug: fix write_same with virtual_gb problem
	scsi: bnx2fc: Fix error handling in probe()
	scsi: 53c700: pass correct "dev" to dma_alloc_attrs()
	platform/x86: Fix unmet dependency warning for ACPI_CMPC
	platform/x86: Fix unmet dependency warning for SAMSUNG_Q10
	net: macb: Apply RXUBR workaround only to versions with errata
	x86/boot/compressed/64: Set EFER.LME=1 in 32-bit trampoline before returning to long mode
	cifs: fix computation for MAX_SMB2_HDR_SIZE
	x86/microcode/amd: Don't falsely trick the late loading mechanism
	arm64: kprobe: Always blacklist the KVM world-switch code
	apparmor: Fix aa_label_build() error handling for failed merges
	x86/kexec: Don't setup EFI info if EFI runtime is not enabled
	proc: fix /proc/net/* after setns(2)
	x86_64: increase stack size for KASAN_EXTRA
	mm, memory_hotplug: is_mem_section_removable do not pass the end of a zone
	mm, memory_hotplug: test_pages_in_a_zone do not pass the end of zone
	lib/test_kmod.c: potential double free in error handling
	fs/drop_caches.c: avoid softlockups in drop_pagecache_sb()
	autofs: drop dentry reference only when it is never used
	autofs: fix error return in autofs_fill_super()
	mm, memory_hotplug: fix off-by-one in is_pageblock_removable
	ARM: OMAP: dts: N950/N9: fix onenand timings
	ARM: dts: omap4-droid4: Fix typo in cpcap IRQ flags
	ARM: dts: sun8i: h3: Add ethernet0 alias to Beelink X2
	arm: dts: meson: Fix IRQ trigger type for macirq
	ARM: dts: meson8b: odroidc1: mark the SD card detection GPIO active-low
	ARM: dts: meson8m2: mxiii-plus: mark the SD card detection GPIO active-low
	ARM: dts: imx6sx: correct backward compatible of gpt
	arm64: dts: renesas: r8a7796: Enable DMA for SCIF2
	arm64: dts: renesas: r8a77965: Enable DMA for SCIF2
	soc: fsl: qbman: avoid race in clearing QMan interrupt
	pinctrl: mcp23s08: spi: Fix regmap allocation for mcp23s18
	wlcore: sdio: Fixup power on/off sequence
	bpftool: Fix prog dump by tag
	bpftool: fix percpu maps updating
	bpf: sock recvbuff must be limited by rmem_max in bpf_setsockopt()
	ARM: pxa: ssp: unneeded to free devm_ allocated data
	arm64: dts: add msm8996 compatible to gicv3
	batman-adv: release station info tidstats
	DTS: CI20: Fix bugs in ci20's device tree.
	usb: phy: fix link errors
	irqchip/gic-v4: Fix occasional VLPI drop
	irqchip/gic-v3-its: Gracefully fail on LPI exhaustion
	irqchip/mmp: Only touch the PJ4 IRQ & FIQ bits on enable/disable
	drm/amdgpu: Add missing power attribute to APU check
	drm/radeon: check if device is root before getting pci speed caps
	drm/amdgpu: Transfer fences to dmabuf importer
	net: stmmac: Fallback to Platform Data clock in Watchdog conversion
	net: stmmac: Send TSO packets always from Queue 0
	net: stmmac: Disable EEE mode earlier in XMIT callback
	irqchip/gic-v3-its: Fix ITT_entry_size accessor
	relay: check return of create_buf_file() properly
	bpf, selftests: fix handling of sparse CPU allocations
	bpf: fix lockdep false positive in percpu_freelist
	bpf: fix potential deadlock in bpf_prog_register
	bpf: Fix syscall's stackmap lookup potential deadlock
	drm/sun4i: tcon: Prepare and enable TCON channel 0 clock at init
	dmaengine: at_xdmac: Fix wrongfull report of a channel as in use
	vsock/virtio: fix kernel panic after device hot-unplug
	vsock/virtio: reset connected sockets on device removal
	dmaengine: dmatest: Abort test in case of mapping error
	selftests: netfilter: fix config fragment CONFIG_NF_TABLES_INET
	selftests: netfilter: add simple masq/redirect test cases
	netfilter: nf_nat: skip nat clash resolution for same-origin entries
	s390/qeth: release cmd buffer in error paths
	s390/qeth: fix use-after-free in error path
	s390/qeth: cancel close_dev work before removing a card
	perf symbols: Filter out hidden symbols from labels
	perf trace: Support multiple "vfs_getname" probes
	MIPS: Remove function size check in get_frame_info()
	Revert "scsi: libfc: Add WARN_ON() when deleting rports"
	i2c: omap: Use noirq system sleep pm ops to idle device for suspend
	drm/amdgpu: use spin_lock_irqsave to protect vm_manager.pasid_idr
	nvme: lock NS list changes while handling command effects
	nvme-pci: fix rapid add remove sequence
	fs: ratelimit __find_get_block_slow() failure message.
	qed: Fix EQ full firmware assert.
	qed: Consider TX tcs while deriving the max num_queues for PF.
	qede: Fix system crash on configuring channels.
	blk-iolatency: fix IO hang due to negative inflight counter
	nvme-pci: add missing unlock for reset error
	Input: wacom_serial4 - add support for Wacom ArtPad II tablet
	Input: elan_i2c - add id for touchpad found in Lenovo s21e-20
	iscsi_ibft: Fix missing break in switch statement
	scsi: aacraid: Fix missing break in switch statement
	x86/PCI: Fixup RTIT_BAR of Intel Denverton Trace Hub
	arm64: dts: zcu100-revC: Give wifi some time after power-on
	arm64: dts: hikey: Give wifi some time after power-on
	arm64: dts: hikey: Revert "Enable HS200 mode on eMMC"
	ARM: dts: exynos: Fix pinctrl definition for eMMC RTSN line on Odroid X2/U3
	ARM: dts: exynos: Add minimal clkout parameters to Exynos3250 PMU
	ARM: dts: exynos: Fix max voltage for buck8 regulator on Odroid XU3/XU4
	drm: disable uncached DMA optimization for ARM and arm64
	netfilter: xt_TEE: fix wrong interface selection
	netfilter: xt_TEE: add missing code to get interface index in checkentry.
	gfs2: Fix missed wakeups in find_insert_glock
	staging: erofs: add error handling for xattr submodule
	staging: erofs: fix fast symlink w/o xattr when fs xattr is on
	staging: erofs: fix memleak of inode's shared xattr array
	staging: erofs: fix race of initializing xattrs of a inode at the same time
	staging: erofs: keep corrupted fs from crashing kernel in erofs_namei()
	cifs: allow calling SMB2_xxx_free(NULL)
	ath9k: Avoid OF no-EEPROM quirks without qca,no-eeprom
	driver core: Postpone DMA tear-down until after devres release
	perf/x86/intel: Make cpuc allocations consistent
	perf/x86/intel: Generalize dynamic constraint creation
	x86: Add TSX Force Abort CPUID/MSR
	perf/x86/intel: Implement support for TSX Force Abort
	Linux 4.19.29

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2019-03-13 14:17:29 -07:00
commit 2e568c979c
155 changed files with 2357 additions and 618 deletions
Makefile
arch
block
drivers

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 28
SUBLEVEL = 29
EXTRAVERSION =
NAME = "People's Front"

View file

@ -168,6 +168,9 @@
interrupt-controller;
#interrupt-cells = <3>;
interrupt-parent = <&gic>;
clock-names = "clkout8";
clocks = <&cmu CLK_FIN_PLL>;
#clock-cells = <1>;
};
mipi_phy: video-phy {

View file

@ -49,7 +49,7 @@
};
emmc_pwrseq: pwrseq {
pinctrl-0 = <&sd1_cd>;
pinctrl-0 = <&emmc_rstn>;
pinctrl-names = "default";
compatible = "mmc-pwrseq-emmc";
reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>;
@ -161,12 +161,6 @@
cpu0-supply = <&buck2_reg>;
};
/* RSTN signal for eMMC */
&sd1_cd {
samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
};
&pinctrl_1 {
gpio_power_key: power_key {
samsung,pins = "gpx1-3";
@ -184,6 +178,11 @@
samsung,pins = "gpx3-7";
samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
};
emmc_rstn: emmc-rstn {
samsung,pins = "gpk1-2";
samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
};
};
&ehci {

View file

@ -334,7 +334,7 @@
buck8_reg: BUCK8 {
regulator-name = "vdd_1.8v_ldo";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1500000>;
regulator-max-microvolt = <2000000>;
regulator-always-on;
regulator-boot-on;
};

View file

@ -462,7 +462,7 @@
};
gpt: gpt@2098000 {
compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt";
compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
reg = <0x02098000 0x4000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_GPT_BUS>,

View file

@ -263,7 +263,7 @@
compatible = "amlogic,meson6-dwmac", "snps,dwmac";
reg = <0xc9410000 0x10000
0xc1108108 0x4>;
interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
status = "disabled";
};

View file

@ -125,7 +125,6 @@
/* Realtek RTL8211F (0x001cc916) */
eth_phy: ethernet-phy@0 {
reg = <0>;
eee-broken-1000t;
interrupt-parent = <&gpio_intc>;
/* GPIOH_3 */
interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
@ -172,8 +171,7 @@
cap-sd-highspeed;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&tflash_vdd>;
vqmmc-supply = <&tf_io>;

View file

@ -206,8 +206,7 @@
cap-sd-highspeed;
disable-wp;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
cd-inverted;
cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vcc_3v3>;
};

View file

@ -105,7 +105,7 @@
interrupts-extended = <
&cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
&cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
&cpcap 48 1
&cpcap 48 0
>;
interrupt-names =
"id_ground", "id_float", "se0conn", "vbusvld",

View file

@ -370,6 +370,19 @@
compatible = "ti,omap2-onenand";
reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
/*
* These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
* bootloader set values when booted with v4.19 using both N950
* and N9 devices (OneNAND Manufacturer: Samsung):
*
* gpmc cs0 before gpmc_cs_program_settings:
* cs0 GPMC_CS_CONFIG1: 0xfd001202
* cs0 GPMC_CS_CONFIG2: 0x00181800
* cs0 GPMC_CS_CONFIG3: 0x00030300
* cs0 GPMC_CS_CONFIG4: 0x18001804
* cs0 GPMC_CS_CONFIG5: 0x03171d1d
* cs0 GPMC_CS_CONFIG6: 0x97080000
*/
gpmc,sync-read;
gpmc,sync-write;
gpmc,burst-length = <16>;
@ -379,26 +392,27 @@
gpmc,device-width = <2>;
gpmc,mux-add-data = <2>;
gpmc,cs-on-ns = <0>;
gpmc,cs-rd-off-ns = <87>;
gpmc,cs-wr-off-ns = <87>;
gpmc,cs-rd-off-ns = <122>;
gpmc,cs-wr-off-ns = <122>;
gpmc,adv-on-ns = <0>;
gpmc,adv-rd-off-ns = <10>;
gpmc,adv-wr-off-ns = <10>;
gpmc,oe-on-ns = <15>;
gpmc,oe-off-ns = <87>;
gpmc,adv-rd-off-ns = <15>;
gpmc,adv-wr-off-ns = <15>;
gpmc,oe-on-ns = <20>;
gpmc,oe-off-ns = <122>;
gpmc,we-on-ns = <0>;
gpmc,we-off-ns = <87>;
gpmc,rd-cycle-ns = <112>;
gpmc,wr-cycle-ns = <112>;
gpmc,access-ns = <81>;
gpmc,we-off-ns = <122>;
gpmc,rd-cycle-ns = <148>;
gpmc,wr-cycle-ns = <148>;
gpmc,access-ns = <117>;
gpmc,page-burst-access-ns = <15>;
gpmc,bus-turnaround-ns = <0>;
gpmc,cycle2cycle-delay-ns = <0>;
gpmc,wait-monitoring-ns = <0>;
gpmc,clk-activation-ns = <5>;
gpmc,wr-data-mux-bus-ns = <30>;
gpmc,wr-access-ns = <81>;
gpmc,sync-clk-ps = <15000>;
gpmc,clk-activation-ns = <10>;
gpmc,wr-data-mux-bus-ns = <40>;
gpmc,wr-access-ns = <117>;
gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */
/*
* MTD partition table corresponding to Nokia's MeeGo 1.2

View file

@ -53,7 +53,7 @@
aliases {
serial0 = &uart0;
/* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
ethernet0 = &emac;
ethernet1 = &sdiowifi;
};

View file

@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
if (ssp == NULL)
return -ENODEV;
iounmap(ssp->mmio_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
kfree(ssp);
return 0;
}

View file

@ -118,6 +118,7 @@
reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
clocks = <&pmic>;
clock-names = "ext_clock";
post-power-on-delay-ms = <10>;
power-off-delay-us = <10>;
};
@ -300,7 +301,6 @@
dwmmc_0: dwmmc0@f723d000 {
cap-mmc-highspeed;
mmc-hs200-1_8v;
non-removable;
bus-width = <0x8>;
vmmc-supply = <&ldo19>;

View file

@ -399,7 +399,7 @@
};
intc: interrupt-controller@9bc0000 {
compatible = "arm,gic-v3";
compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
#interrupt-cells = <3>;
interrupt-controller;
#redistributor-regions = <1>;

View file

@ -1161,6 +1161,9 @@
<&cpg CPG_CORE R8A7796_CLK_S3D1>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac1 0x13>, <&dmac1 0x12>,
<&dmac2 0x13>, <&dmac2 0x12>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
resets = <&cpg 310>;
status = "disabled";

View file

@ -951,6 +951,9 @@
<&cpg CPG_CORE R8A77965_CLK_S3D1>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac1 0x13>, <&dmac1 0x12>,
<&dmac2 0x13>, <&dmac2 0x12>;
dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
resets = <&cpg 310>;
status = "disabled";

View file

@ -101,6 +101,7 @@
sdio_pwrseq: sdio_pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
post-power-on-delay-ms = <10>;
};
};

View file

@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
addr < (unsigned long)__entry_text_end) ||
(addr >= (unsigned long)__idmap_text_start &&
addr < (unsigned long)__idmap_text_end) ||
(addr >= (unsigned long)__hyp_text_start &&
addr < (unsigned long)__hyp_text_end) ||
!!search_exception_tables(addr))
return true;
if (!is_kernel_in_hyp_mode()) {
if ((addr >= (unsigned long)__hyp_text_start &&
addr < (unsigned long)__hyp_text_end) ||
(addr >= (unsigned long)__hyp_idmap_text_start &&
if ((addr >= (unsigned long)__hyp_idmap_text_start &&
addr < (unsigned long)__hyp_idmap_text_end))
return true;
}

View file

@ -76,7 +76,7 @@
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&pins_uart2>;
pinctrl-0 = <&pins_uart3>;
};
&uart4 {
@ -196,9 +196,9 @@
bias-disable;
};
pins_uart2: uart2 {
function = "uart2";
groups = "uart2-data", "uart2-hwflow";
pins_uart3: uart3 {
function = "uart3";
groups = "uart3-data", "uart3-hwflow";
bias-disable;
};

View file

@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
static int get_frame_info(struct mips_frame_info *info)
{
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
union mips_instruction insn, *ip, *ip_end;
union mips_instruction insn, *ip;
const unsigned int max_insns = 128;
unsigned int last_insn_size = 0;
unsigned int i;
@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info)
if (!ip)
goto err;
ip_end = (void *)ip + info->func_size;
for (i = 0; i < max_insns && ip < ip_end; i++) {
for (i = 0; i < max_insns; i++) {
ip = (void *)ip + last_insn_size;
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
insn.word = ip->halfword[0] << 16;
last_insn_size = 2;

View file

@ -22,7 +22,7 @@
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1)
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP

View file

@ -186,7 +186,7 @@ static void __init setup_bootmem(void)
BUG_ON(mem_size == 0);
set_max_mapnr(PFN_DOWN(mem_size));
max_low_pfn = memblock_end_of_DRAM();
max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();

View file

@ -29,7 +29,8 @@ static void __init zone_sizes_init(void)
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
(unsigned long) PFN_PHYS(max_low_pfn)));
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;

View file

@ -600,6 +600,14 @@ ENTRY(trampoline_32bit_src)
leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
movl %eax, %cr3
3:
/* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
pushl %ecx
movl $MSR_EFER, %ecx
rdmsr
btsl $_EFER_LME, %eax
wrmsr
popl %ecx
/* Enable PAE and LA57 (if required) paging modes */
movl $X86_CR4_PAE, %eax
cmpl $0, %edx

View file

@ -6,7 +6,7 @@
#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
#define TRAMPOLINE_32BIT_CODE_SIZE 0x60
#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE

View file

@ -1970,7 +1970,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
*/
static void free_fake_cpuc(struct cpu_hw_events *cpuc)
{
kfree(cpuc->shared_regs);
intel_cpuc_finish(cpuc);
kfree(cpuc);
}
@ -1982,14 +1982,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
if (!cpuc)
return ERR_PTR(-ENOMEM);
/* only needed, if we have extra_regs */
if (x86_pmu.extra_regs) {
cpuc->shared_regs = allocate_shared_regs(cpu);
if (!cpuc->shared_regs)
goto error;
}
cpuc->is_fake = 1;
if (intel_cpuc_prepare(cpuc, cpu))
goto error;
return cpuc;
error:
free_fake_cpuc(cpuc);

View file

@ -1995,6 +1995,39 @@ static void intel_pmu_nhm_enable_all(int added)
intel_pmu_enable_all(added);
}
static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
{
u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
if (cpuc->tfa_shadow != val) {
cpuc->tfa_shadow = val;
wrmsrl(MSR_TSX_FORCE_ABORT, val);
}
}
static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
{
/*
* We're going to use PMC3, make sure TFA is set before we touch it.
*/
if (cntr == 3 && !cpuc->is_fake)
intel_set_tfa(cpuc, true);
}
static void intel_tfa_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
/*
* If we find PMC3 is no longer used when we enable the PMU, we can
* clear TFA.
*/
if (!test_bit(3, cpuc->active_mask))
intel_set_tfa(cpuc, false);
intel_pmu_enable_all(added);
}
static inline u64 intel_pmu_get_status(void)
{
u64 status;
@ -2652,6 +2685,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
raw_spin_unlock(&excl_cntrs->lock);
}
static struct event_constraint *
dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
{
WARN_ON_ONCE(!cpuc->constraint_list);
if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
struct event_constraint *cx;
/*
* grab pre-allocated constraint entry
*/
cx = &cpuc->constraint_list[idx];
/*
* initialize dynamic constraint
* with static constraint
*/
*cx = *c;
/*
* mark constraint as dynamic
*/
cx->flags |= PERF_X86_EVENT_DYNAMIC;
c = cx;
}
return c;
}
static struct event_constraint *
intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
int idx, struct event_constraint *c)
@ -2682,27 +2744,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
* only needed when constraint has not yet
* been cloned (marked dynamic)
*/
if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
struct event_constraint *cx;
/*
* grab pre-allocated constraint entry
*/
cx = &cpuc->constraint_list[idx];
/*
* initialize dynamic constraint
* with static constraint
*/
*cx = *c;
/*
* mark constraint as dynamic, so we
* can free it later on
*/
cx->flags |= PERF_X86_EVENT_DYNAMIC;
c = cx;
}
c = dyn_constraint(cpuc, c, idx);
/*
* From here on, the constraint is dynamic.
@ -3229,6 +3271,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
return c;
}
static bool allow_tsx_force_abort = true;
static struct event_constraint *
tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
/*
* Without TFA we must not use PMC3.
*/
if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
c = dyn_constraint(cpuc, c, idx);
c->idxmsk64 &= ~(1ULL << 3);
c->weight--;
}
return c;
}
/*
* Broadwell:
*
@ -3282,7 +3344,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
return x86_event_sysfs_show(page, config, event);
}
struct intel_shared_regs *allocate_shared_regs(int cpu)
static struct intel_shared_regs *allocate_shared_regs(int cpu)
{
struct intel_shared_regs *regs;
int i;
@ -3314,23 +3376,24 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
return c;
}
static int intel_pmu_cpu_prepare(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
{
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
cpuc->shared_regs = allocate_shared_regs(cpu);
if (!cpuc->shared_regs)
goto err;
}
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
if (!cpuc->constraint_list)
goto err_shared_regs;
}
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
if (!cpuc->excl_cntrs)
goto err_constraint_list;
@ -3352,6 +3415,11 @@ static int intel_pmu_cpu_prepare(int cpu)
return -ENOMEM;
}
static int intel_pmu_cpu_prepare(int cpu)
{
return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
}
static void flip_smm_bit(void *data)
{
unsigned long set = *(unsigned long *)data;
@ -3423,9 +3491,8 @@ static void intel_pmu_cpu_starting(int cpu)
}
}
static void free_excl_cntrs(int cpu)
static void free_excl_cntrs(struct cpu_hw_events *cpuc)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct intel_excl_cntrs *c;
c = cpuc->excl_cntrs;
@ -3433,9 +3500,10 @@ static void free_excl_cntrs(int cpu)
if (c->core_id == -1 || --c->refcnt == 0)
kfree(c);
cpuc->excl_cntrs = NULL;
kfree(cpuc->constraint_list);
cpuc->constraint_list = NULL;
}
kfree(cpuc->constraint_list);
cpuc->constraint_list = NULL;
}
static void intel_pmu_cpu_dying(int cpu)
@ -3443,9 +3511,8 @@ static void intel_pmu_cpu_dying(int cpu)
fini_debug_store_on_cpu(cpu);
}
static void intel_pmu_cpu_dead(int cpu)
void intel_cpuc_finish(struct cpu_hw_events *cpuc)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct intel_shared_regs *pc;
pc = cpuc->shared_regs;
@ -3455,7 +3522,12 @@ static void intel_pmu_cpu_dead(int cpu)
cpuc->shared_regs = NULL;
}
free_excl_cntrs(cpu);
free_excl_cntrs(cpuc);
}
static void intel_pmu_cpu_dead(int cpu)
{
intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
}
static void intel_pmu_sched_task(struct perf_event_context *ctx,
@ -3917,8 +3989,11 @@ static struct attribute *intel_pmu_caps_attrs[] = {
NULL
};
DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
static struct attribute *intel_pmu_attrs[] = {
&dev_attr_freeze_on_smi.attr,
NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
NULL,
};
@ -4374,6 +4449,15 @@ __init int intel_pmu_init(void)
x86_pmu.cpu_events = get_hsw_events_attrs();
intel_pmu_pebs_data_source_skl(
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
x86_pmu.flags |= PMU_FL_TFA;
x86_pmu.get_event_constraints = tfa_get_event_constraints;
x86_pmu.enable_all = intel_tfa_pmu_enable_all;
x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
}
pr_cont("Skylake events, ");
name = "skylake";
break;
@ -4515,7 +4599,7 @@ static __init int fixup_ht_bug(void)
hardlockup_detector_perf_restart();
for_each_online_cpu(c)
free_excl_cntrs(c);
free_excl_cntrs(&per_cpu(cpu_hw_events, c));
cpus_read_unlock();
pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");

View file

@ -242,6 +242,11 @@ struct cpu_hw_events {
struct intel_excl_cntrs *excl_cntrs;
int excl_thread_id; /* 0 or 1 */
/*
* SKL TSX_FORCE_ABORT shadow
*/
u64 tfa_shadow;
/*
* AMD specific bits
*/
@ -679,6 +684,7 @@ do { \
#define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
#define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
#define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
#define PMU_FL_TFA 0x20 /* deal with TSX force abort */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
@ -887,7 +893,8 @@ struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event);
struct intel_shared_regs *allocate_shared_regs(int cpu);
extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
int intel_pmu_init(void);
@ -1023,9 +1030,13 @@ static inline int intel_pmu_init(void)
return 0;
}
static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
{
return 0;
}
static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
{
return NULL;
}
static inline int is_ht_workaround_enabled(void)

View file

@ -340,6 +340,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */

View file

@ -629,6 +629,12 @@
#define MSR_IA32_TSC_DEADLINE 0x000006E0
#define MSR_TSX_FORCE_ABORT 0x0000010F
#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x00000180
#define MSR_IA32_MCG_EBX 0x00000181

View file

@ -7,7 +7,11 @@
#endif
#ifdef CONFIG_KASAN
#ifdef CONFIG_KASAN_EXTRA
#define KASAN_STACK_ORDER 2
#else
#define KASAN_STACK_ORDER 1
#endif
#else
#define KASAN_STACK_ORDER 0
#endif

View file

@ -707,7 +707,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
if (!p) {
return ret;
} else {
if (boot_cpu_data.microcode == p->patch_id)
if (boot_cpu_data.microcode >= p->patch_id)
return ret;
ret = UCODE_NEW;

View file

@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
struct efi_info *current_ei = &boot_params.efi_info;
struct efi_info *ei = &params->efi_info;
if (!efi_enabled(EFI_RUNTIME_SERVICES))
return 0;
if (!current_ei->efi_memmap_size)
return 0;

View file

@ -641,6 +641,22 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
static void quirk_intel_th_dnv(struct pci_dev *dev)
{
struct resource *r = &dev->resource[4];
/*
* Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
* appears to be 4 MB in reality.
*/
if (r->end == r->start + 0x7ff) {
r->start = 0;
r->end = 0x3fffff;
r->flags |= IORESOURCE_UNSET;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)

View file

@ -33,6 +33,7 @@ CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
# CONFIG_PCI is not set
CONFIG_VECTORS_OFFSET=0x00002000
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"

View file

@ -280,12 +280,13 @@ should_never_return:
movi a2, cpu_start_ccount
1:
memw
l32i a3, a2, 0
beqi a3, 0, 1b
movi a3, 0
s32i a3, a2, 0
memw
1:
memw
l32i a3, a2, 0
beqi a3, 0, 1b
wsr a3, ccount
@ -321,11 +322,13 @@ ENTRY(cpu_restart)
rsr a0, prid
neg a2, a0
movi a3, cpu_start_id
memw
s32i a2, a3, 0
#if XCHAL_DCACHE_IS_WRITEBACK
dhwbi a3, 0
#endif
1:
memw
l32i a2, a3, 0
dhi a3, 0
bne a2, a0, 1b

View file

@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned i;
for (i = 0; i < max_cpus; ++i)
for_each_possible_cpu(i)
set_cpu_present(i, true);
}
@ -96,6 +96,11 @@ void __init smp_init_cpus(void)
pr_info("%s: Core Count = %d\n", __func__, ncpus);
pr_info("%s: Core Id = %d\n", __func__, core_id);
if (ncpus > NR_CPUS) {
ncpus = NR_CPUS;
pr_info("%s: limiting core count by %d\n", __func__, ncpus);
}
for (i = 0; i < ncpus; ++i)
set_cpu_possible(i, true);
}
@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
int i;
#ifdef CONFIG_HOTPLUG_CPU
cpu_start_id = cpu;
system_flush_invalidate_dcache_range(
(unsigned long)&cpu_start_id, sizeof(cpu_start_id));
WRITE_ONCE(cpu_start_id, cpu);
/* Pairs with the third memw in the cpu_restart */
mb();
system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
sizeof(cpu_start_id));
#endif
smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
ccount = get_ccount();
while (!ccount);
cpu_start_ccount = ccount;
WRITE_ONCE(cpu_start_ccount, ccount);
while (time_before(jiffies, timeout)) {
do {
/*
* Pairs with the first two memws in the
* .Lboot_secondary.
*/
mb();
if (!cpu_start_ccount)
break;
}
ccount = READ_ONCE(cpu_start_ccount);
} while (ccount && time_before(jiffies, timeout));
if (cpu_start_ccount) {
if (ccount) {
smp_call_function_single(0, mx_cpu_stop,
(void *)cpu, 1);
cpu_start_ccount = 0;
(void *)cpu, 1);
WRITE_ONCE(cpu_start_ccount, 0);
return -EIO;
}
}
@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
__func__, cpu, idle, start_info.stack);
init_completion(&cpu_running);
ret = boot_secondary(cpu, idle);
if (ret == 0) {
wait_for_completion_timeout(&cpu_running,
@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu)
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
while (time_before(jiffies, timeout)) {
system_invalidate_dcache_range((unsigned long)&cpu_start_id,
sizeof(cpu_start_id));
if (cpu_start_id == -cpu) {
sizeof(cpu_start_id));
/* Pairs with the second memw in the cpu_restart */
mb();
if (READ_ONCE(cpu_start_id) == -cpu) {
platform_cpu_kill(cpu);
return;
}

View file

@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
container_of(evt, struct ccount_timer, evt);
if (timer->irq_enabled) {
disable_irq(evt->irq);
disable_irq_nosync(evt->irq);
timer->irq_enabled = 0;
}
return 0;

View file

@ -72,6 +72,7 @@
#include <linux/sched/loadavg.h>
#include <linux/sched/signal.h>
#include <trace/events/block.h>
#include <linux/blk-mq.h>
#include "blk-rq-qos.h"
#include "blk-stat.h"
@ -568,6 +569,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
return;
enabled = blk_iolatency_enabled(iolat->blkiolat);
if (!enabled)
return;
while (blkg && blkg->parent) {
iolat = blkg_to_lat(blkg);
if (!iolat) {
@ -577,7 +581,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
rqw = &iolat->rq_wait;
atomic_dec(&rqw->inflight);
if (!enabled || iolat->min_lat_nsec == 0)
if (iolat->min_lat_nsec == 0)
goto next;
iolatency_record_time(iolat, &bio->bi_issue, now,
issue_as_root);
@ -721,10 +725,13 @@ int blk_iolatency_init(struct request_queue *q)
return 0;
}
static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
/*
* return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
* return 0.
*/
static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
{
struct iolatency_grp *iolat = blkg_to_lat(blkg);
struct blk_iolatency *blkiolat = iolat->blkiolat;
u64 oldval = iolat->min_lat_nsec;
iolat->min_lat_nsec = val;
@ -733,9 +740,10 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
BLKIOLATENCY_MAX_WIN_SIZE);
if (!oldval && val)
atomic_inc(&blkiolat->enabled);
return 1;
if (oldval && !val)
atomic_dec(&blkiolat->enabled);
return -1;
return 0;
}
static void iolatency_clear_scaling(struct blkcg_gq *blkg)
@ -768,6 +776,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
u64 lat_val = 0;
u64 oldval;
int ret;
int enable = 0;
ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
if (ret)
@ -803,7 +812,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
blkg = ctx.blkg;
oldval = iolat->min_lat_nsec;
iolatency_set_min_lat_nsec(blkg, lat_val);
enable = iolatency_set_min_lat_nsec(blkg, lat_val);
if (enable) {
WARN_ON_ONCE(!blk_get_queue(blkg->q));
blkg_get(blkg);
}
if (oldval != iolat->min_lat_nsec) {
iolatency_clear_scaling(blkg);
}
@ -811,6 +825,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
ret = 0;
out:
blkg_conf_finish(&ctx);
if (ret == 0 && enable) {
struct iolatency_grp *tmp = blkg_to_lat(blkg);
struct blk_iolatency *blkiolat = tmp->blkiolat;
blk_mq_freeze_queue(blkg->q);
if (enable == 1)
atomic_inc(&blkiolat->enabled);
else if (enable == -1)
atomic_dec(&blkiolat->enabled);
else
WARN_ON_ONCE(1);
blk_mq_unfreeze_queue(blkg->q);
blkg_put(blkg);
blk_put_queue(blkg->q);
}
return ret ?: nbytes;
}
@ -910,8 +942,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
struct blkcg_gq *blkg = lat_to_blkg(iolat);
struct blk_iolatency *blkiolat = iolat->blkiolat;
int ret;
iolatency_set_min_lat_nsec(blkg, 0);
ret = iolatency_set_min_lat_nsec(blkg, 0);
if (ret == 1)
atomic_inc(&blkiolat->enabled);
if (ret == -1)
atomic_dec(&blkiolat->enabled);
iolatency_clear_scaling(blkg);
}

View file

@ -963,9 +963,9 @@ static void __device_release_driver(struct device *dev, struct device *parent)
drv->remove(dev);
device_links_driver_cleanup(dev);
dma_deconfigure(dev);
devres_release_all(dev);
dma_deconfigure(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)

View file

@ -131,8 +131,8 @@ static const char * const gcc_parent_names_6[] = {
"core_bi_pll_test_se",
};
static const char * const gcc_parent_names_7[] = {
"bi_tcxo",
static const char * const gcc_parent_names_7_ao[] = {
"bi_tcxo_ao",
"gpll0",
"gpll0_out_even",
"core_bi_pll_test_se",
@ -144,6 +144,12 @@ static const char * const gcc_parent_names_8[] = {
"core_bi_pll_test_se",
};
static const char * const gcc_parent_names_8_ao[] = {
"bi_tcxo_ao",
"gpll0",
"core_bi_pll_test_se",
};
static const struct parent_map gcc_parent_map_10[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
@ -226,7 +232,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
.freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_ahb_clk_src",
.parent_names = gcc_parent_names_7,
.parent_names = gcc_parent_names_7_ao,
.num_parents = 4,
.ops = &clk_rcg2_ops,
},
@ -245,7 +251,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
.freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_rbcpr_clk_src",
.parent_names = gcc_parent_names_8,
.parent_names = gcc_parent_names_8_ao,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},

View file

@ -367,8 +367,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
num_dividers = i;
tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
if (!tmp)
if (!tmp) {
*table = ERR_PTR(-ENOMEM);
return -ENOMEM;
}
valid_div = 0;
*width = 0;
@ -403,6 +405,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
{
struct clk_omap_divider *div;
struct clk_omap_reg *reg;
int ret;
if (!setup)
return NULL;
@ -422,6 +425,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
div->flags |= CLK_DIVIDER_POWER_OF_TWO;
div->table = _get_div_table_from_setup(setup, &div->width);
if (IS_ERR(div->table)) {
ret = PTR_ERR(div->table);
kfree(div);
return ERR_PTR(ret);
}
div->shift = setup->bit_shift;
div->latch = -EINVAL;

View file

@ -203,6 +203,7 @@ struct at_xdmac_chan {
u32 save_cim;
u32 save_cnda;
u32 save_cndc;
u32 irq_status;
unsigned long status;
struct tasklet_struct tasklet;
struct dma_slave_config sconfig;
@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data)
struct at_xdmac_desc *desc;
u32 error_mask;
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
__func__, atchan->status);
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
__func__, atchan->irq_status);
error_mask = AT_XDMAC_CIS_RBEIS
| AT_XDMAC_CIS_WBEIS
@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data)
if (at_xdmac_chan_is_cyclic(atchan)) {
at_xdmac_handle_cyclic(atchan);
} else if ((atchan->status & AT_XDMAC_CIS_LIS)
|| (atchan->status & error_mask)) {
} else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
|| (atchan->irq_status & error_mask)) {
struct dma_async_tx_descriptor *txd;
if (atchan->status & AT_XDMAC_CIS_RBEIS)
if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
dev_err(chan2dev(&atchan->chan), "read bus error!!!");
if (atchan->status & AT_XDMAC_CIS_WBEIS)
if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
dev_err(chan2dev(&atchan->chan), "write bus error!!!");
if (atchan->status & AT_XDMAC_CIS_ROIS)
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock_bh(&atchan->lock);
@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
atchan = &atxdmac->chan[i];
chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
atchan->status = chan_status & chan_imr;
atchan->irq_status = chan_status & chan_imr;
dev_vdbg(atxdmac->dma.dev,
"%s: chan%d: imr=0x%x, status=0x%x\n",
__func__, i, chan_imr, chan_status);
@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
tasklet_schedule(&atchan->tasklet);

View file

@ -642,11 +642,9 @@ static int dmatest_func(void *data)
srcs[i] = um->addr[i] + src_off;
ret = dma_mapping_error(dev->dev, um->addr[i]);
if (ret) {
dmaengine_unmap_put(um);
result("src mapping error", total_tests,
src_off, dst_off, len, ret);
failed_tests++;
continue;
goto error_unmap_continue;
}
um->to_cnt++;
}
@ -661,11 +659,9 @@ static int dmatest_func(void *data)
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(dev->dev, dsts[i]);
if (ret) {
dmaengine_unmap_put(um);
result("dst mapping error", total_tests,
src_off, dst_off, len, ret);
failed_tests++;
continue;
goto error_unmap_continue;
}
um->bidi_cnt++;
}
@ -693,12 +689,10 @@ static int dmatest_func(void *data)
}
if (!tx) {
dmaengine_unmap_put(um);
result("prep error", total_tests, src_off,
dst_off, len, ret);
msleep(100);
failed_tests++;
continue;
goto error_unmap_continue;
}
done->done = false;
@ -707,12 +701,10 @@ static int dmatest_func(void *data)
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
dmaengine_unmap_put(um);
result("submit error", total_tests, src_off,
dst_off, len, ret);
msleep(100);
failed_tests++;
continue;
goto error_unmap_continue;
}
dma_async_issue_pending(chan);
@ -725,16 +717,14 @@ static int dmatest_func(void *data)
dmaengine_unmap_put(um);
result("test timed out", total_tests, src_off, dst_off,
len, 0);
failed_tests++;
continue;
goto error_unmap_continue;
} else if (status != DMA_COMPLETE) {
dmaengine_unmap_put(um);
result(status == DMA_ERROR ?
"completion error status" :
"completion busy status", total_tests, src_off,
dst_off, len, ret);
failed_tests++;
continue;
goto error_unmap_continue;
}
dmaengine_unmap_put(um);
@ -779,6 +769,12 @@ static int dmatest_func(void *data)
verbose_result("test passed", total_tests, src_off,
dst_off, len, 0);
}
continue;
error_unmap_continue:
dmaengine_unmap_put(um);
failed_tests++;
}
ktime = ktime_sub(ktime_get(), ktime);
ktime = ktime_sub(ktime, comparetime);

View file

@ -542,6 +542,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int type)
case ISCSI_BOOT_TGT_NIC_ASSOC:
case ISCSI_BOOT_TGT_CHAP_TYPE:
rc = S_IRUGO;
break;
case ISCSI_BOOT_TGT_NAME:
if (tgt->tgt_name_len)
rc = S_IRUGO;

View file

@ -259,6 +259,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
struct vf610_gpio_port *port;
struct resource *iores;
struct gpio_chip *gc;
int i;
int ret;
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
@ -298,6 +299,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
/* Mask all GPIO interrupts */
for (i = 0; i < gc->ngpio; i++)
vf610_gpio_writel(0, port->base + PORT_PCR(i));
/* Clear the interrupt status register for all GPIO's */
vf610_gpio_writel(~0, port->base + PORT_ISFR);

View file

@ -1443,7 +1443,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
effective_mode &= ~S_IWUSR;
if ((adev->flags & AMD_IS_APU) &&
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
(attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
return 0;

View file

@ -37,6 +37,7 @@
#include "amdgpu_display.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
static const struct dma_buf_ops amdgpu_dmabuf_ops;
@ -188,6 +189,48 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
return ERR_PTR(ret);
}
static int
__reservation_object_make_exclusive(struct reservation_object *obj)
{
struct dma_fence **fences;
unsigned int count;
int r;
if (!reservation_object_get_list(obj)) /* no shared fences to convert */
return 0;
r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
if (r)
return r;
if (count == 0) {
/* Now that was unexpected. */
} else if (count == 1) {
reservation_object_add_excl_fence(obj, fences[0]);
dma_fence_put(fences[0]);
kfree(fences);
} else {
struct dma_fence_array *array;
array = dma_fence_array_create(count, fences,
dma_fence_context_alloc(1), 0,
false);
if (!array)
goto err_fences_put;
reservation_object_add_excl_fence(obj, &array->base);
dma_fence_put(&array->base);
}
return 0;
err_fences_put:
while (count--)
dma_fence_put(fences[count]);
kfree(fences);
return -ENOMEM;
}
/**
* amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
* @dma_buf: shared DMA buffer
@ -219,16 +262,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
if (attach->dev->driver != adev->dev->driver) {
/*
* Wait for all shared fences to complete before we switch to future
* use of exclusive fence on this prime shared bo.
* We only create shared fences for internal use, but importers
* of the dmabuf rely on exclusive fences for implicitly
* tracking write hazards. As any of the current fences may
* correspond to a write, we need to convert all existing
* fences on the reservation object into a single exclusive
* fence.
*/
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false,
MAX_SCHEDULE_TIMEOUT);
if (unlikely(r < 0)) {
DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
r = __reservation_object_make_exclusive(bo->tbo.resv);
if (r)
goto error_unreserve;
}
}
/* pin buffer into GTT */

View file

@ -3011,14 +3011,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
struct amdgpu_task_info *task_info)
{
struct amdgpu_vm *vm;
unsigned long flags;
spin_lock(&adev->vm_manager.pasid_lock);
spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
if (vm)
*task_info = vm->task_info;
spin_unlock(&adev->vm_manager.pasid_lock);
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
/**

View file

@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
u16 data_offset, size;
u8 frev, crev;
struct ci_power_info *pi;
enum pci_bus_speed speed_cap;
enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
struct pci_dev *root = rdev->pdev->bus->self;
int ret;
@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
return -ENOMEM;
rdev->pm.dpm.priv = pi;
speed_cap = pcie_get_speed_cap(root);
if (!pci_is_root_bus(rdev->pdev->bus))
speed_cap = pcie_get_speed_cap(root);
if (speed_cap == PCI_SPEED_UNKNOWN) {
pi->sys_pcie_mask = 0;
} else {

View file

@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
struct ni_power_info *ni_pi;
struct si_power_info *si_pi;
struct atom_clock_dividers dividers;
enum pci_bus_speed speed_cap;
enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
struct pci_dev *root = rdev->pdev->bus->self;
int ret;
@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx;
speed_cap = pcie_get_speed_cap(root);
if (!pci_is_root_bus(rdev->pdev->bus))
speed_cap = pcie_get_speed_cap(root);
if (speed_cap == PCI_SPEED_UNKNOWN) {
si_pi->sys_pcie_mask = 0;
} else {

View file

@ -672,6 +672,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
return PTR_ERR(tcon->sclk0);
}
}
clk_prepare_enable(tcon->sclk0);
if (tcon->quirks->has_channel_1) {
tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
@ -686,6 +687,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
{
clk_disable_unprepare(tcon->sclk0);
clk_disable_unprepare(tcon->clk);
}

View file

@ -1498,8 +1498,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM
static int omap_i2c_runtime_suspend(struct device *dev)
static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
{
struct omap_i2c_dev *omap = dev_get_drvdata(dev);
@ -1525,7 +1524,7 @@ static int omap_i2c_runtime_suspend(struct device *dev)
return 0;
}
static int omap_i2c_runtime_resume(struct device *dev)
static int __maybe_unused omap_i2c_runtime_resume(struct device *dev)
{
struct omap_i2c_dev *omap = dev_get_drvdata(dev);
@ -1540,20 +1539,18 @@ static int omap_i2c_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops omap_i2c_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
omap_i2c_runtime_resume, NULL)
};
#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
#else
#define OMAP_I2C_PM_OPS NULL
#endif /* CONFIG_PM */
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
.remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
.pm = OMAP_I2C_PM_OPS,
.pm = &omap_i2c_pm_ops,
.of_match_table = of_match_ptr(omap_i2c_of_match),
},
};

View file

@ -980,7 +980,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;

View file

@ -513,7 +513,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
wc.ex.imm_data = ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;

View file

@ -248,7 +248,6 @@ struct ipoib_cm_tx {
struct list_head list;
struct net_device *dev;
struct ipoib_neigh *neigh;
struct ipoib_path *path;
struct ipoib_tx_buf *tx_ring;
unsigned int tx_head;
unsigned int tx_tail;

View file

@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
neigh->cm = tx;
tx->neigh = neigh;
tx->path = path;
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
neigh->daddr + QPN_AND_OPTIONS_OFFSET);
goto free_neigh;
}
memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);

View file

@ -1337,6 +1337,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
{ "ELAN0600", 0 },
{ "ELAN0601", 0 },
{ "ELAN0602", 0 },
{ "ELAN0605", 0 },
{ "ELAN0608", 0 },

View file

@ -187,6 +187,7 @@ enum {
MODEL_DIGITIZER_II = 0x5544, /* UD */
MODEL_GRAPHIRE = 0x4554, /* ET */
MODEL_PENPARTNER = 0x4354, /* CT */
MODEL_ARTPAD_II = 0x4B54, /* KT */
};
static void wacom_handle_model_response(struct wacom *wacom)
@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom *wacom)
wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
break;
case MODEL_ARTPAD_II:
case MODEL_DIGITIZER_II:
wacom->dev->name = "Wacom Digitizer II";
wacom->dev->id.version = MODEL_DIGITIZER_II;

View file

@ -1929,16 +1929,13 @@ static void do_attach(struct iommu_dev_data *dev_data,
static void do_detach(struct iommu_dev_data *dev_data)
{
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
u16 alias;
iommu = amd_iommu_rlookup_table[dev_data->devid];
alias = dev_data->alias;
/* decrease reference counters */
dev_data->domain->dev_iommu[iommu->index] -= 1;
dev_data->domain->dev_cnt -= 1;
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
@ -1948,6 +1945,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
/* Flush the DTE entry */
device_flush_dte(dev_data);
/* Flush IOTLB */
domain_flush_tlb_pde(domain);
/* Wait for the flushes to finish */
domain_flush_complete(domain);
/* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1;
domain->dev_cnt -= 1;
}
/*
@ -2555,13 +2562,13 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
if (--mapped_pages)
if (--mapped_pages == 0)
goto out_free_iova;
}
}
out_free_iova:
free_iova_fast(&dma_dom->iovad, address, npages);
free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
out_err:
return 0;

View file

@ -1581,6 +1581,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
nr_irqs /= 2;
} while (nr_irqs > 0);
if (!nr_irqs)
err = -ENOSPC;
if (err)
goto out;
@ -1951,6 +1954,29 @@ static void its_free_pending_table(struct page *pt)
get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
}
static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
{
u32 count = 1000000; /* 1s! */
bool clean;
u64 val;
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
val &= ~GICR_VPENDBASER_Valid;
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
do {
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
clean = !(val & GICR_VPENDBASER_Dirty);
if (!clean) {
count--;
cpu_relax();
udelay(1);
}
} while (!clean && count);
return val;
}
static void its_cpu_init_lpis(void)
{
void __iomem *rbase = gic_data_rdist_rd_base();
@ -2024,6 +2050,30 @@ static void its_cpu_init_lpis(void)
val |= GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
if (gic_rdists->has_vlpis) {
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
/*
* It's possible for CPU to receive VLPIs before it is
* sheduled as a vPE, especially for the first CPU, and the
* VLPI with INTID larger than 2^(IDbits+1) will be considered
* as out of range and dropped by GIC.
* So we initialize IDbits to known value to avoid VLPI drop.
*/
val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
smp_processor_id(), val);
gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
/*
* Also clear Valid bit of GICR_VPENDBASER, in case some
* ancient programming gets left in and has possibility of
* corrupting memory.
*/
val = its_clear_vpend_valid(vlpi_base);
WARN_ON(val & GICR_VPENDBASER_Dirty);
}
/* Make sure the GIC has seen the above */
dsb(sy);
}
@ -2644,26 +2694,11 @@ static void its_vpe_schedule(struct its_vpe *vpe)
static void its_vpe_deschedule(struct its_vpe *vpe)
{
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
u32 count = 1000000; /* 1s! */
bool clean;
u64 val;
/* We're being scheduled out */
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
val &= ~GICR_VPENDBASER_Valid;
gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
val = its_clear_vpend_valid(vlpi_base);
do {
val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
clean = !(val & GICR_VPENDBASER_Dirty);
if (!clean) {
count--;
cpu_relax();
udelay(1);
}
} while (!clean && count);
if (unlikely(!clean && !count)) {
if (unlikely(val & GICR_VPENDBASER_Dirty)) {
pr_err_ratelimited("ITS virtual pending table not cleaning\n");
vpe->idai = false;
vpe->pending_last = true;

View file

@ -34,6 +34,9 @@
#define SEL_INT_PENDING (1 << 6)
#define SEL_INT_NUM_MASK 0x3f
#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
struct icu_chip_data {
int nr_irqs;
unsigned int virq_base;
@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = {
static const struct mmp_intc_conf mmp2_conf = {
.conf_enable = 0x20,
.conf_disable = 0x0,
.conf_mask = 0x7f,
.conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
MMP2_ICU_INT_ROUTE_PJ4_FIQ,
};
static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)

View file

@ -1065,11 +1065,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
/* Make sure the terminal type MSB is not null, otherwise it
* could be confused with a unit.
/*
* Reject invalid terminal types that would cause issues:
*
* - The high byte must be non-zero, otherwise it would be
* confused with a unit.
*
* - Bit 15 must be 0, as we use it internally as a terminal
* direction flag.
*
* Other unknown types are accepted.
*/
type = get_unaligned_le16(&buffer[4]);
if ((type & 0xff00) == 0) {
if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
"interface %d INPUT_TERMINAL %d has invalid "
"type 0x%04x, skipping\n", udev->devnum,

View file

@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
& 0xffff;
if (inuse) { /* Tx FIFO is not empty */
ready = priv->tx_prod - priv->tx_cons - inuse - 1;
ready = max_t(int,
priv->tx_prod - priv->tx_cons - inuse - 1, 0);
} else {
/* Check for buffered last packet */
status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));

View file

@ -643,6 +643,7 @@
#define MACB_CAPS_JUMBO 0x00000020
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
@ -1214,6 +1215,8 @@ struct macb {
int rx_bd_rd_prefetch;
int tx_bd_rd_prefetch;
u32 rx_intr_mask;
};
#ifdef CONFIG_MACB_USE_HWSTAMP

View file

@ -56,8 +56,7 @@
/* level of occupied TX descriptors under which we wake up TX process */
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
| MACB_BIT(ISR_ROVR))
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
| MACB_BIT(ISR_RLE) \
| MACB_BIT(TXERR))
@ -1271,7 +1270,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
napi_reschedule(napi);
} else {
queue_writel(queue, IER, MACB_RX_INT_FLAGS);
queue_writel(queue, IER, bp->rx_intr_mask);
}
}
@ -1289,7 +1288,7 @@ static void macb_hresp_error_task(unsigned long data)
u32 ctrl;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
queue_writel(queue, IDR, bp->rx_intr_mask |
MACB_TX_INT_FLAGS |
MACB_BIT(HRESP));
}
@ -1319,7 +1318,7 @@ static void macb_hresp_error_task(unsigned long data)
/* Enable interrupts */
queue_writel(queue, IER,
MACB_RX_INT_FLAGS |
bp->rx_intr_mask |
MACB_TX_INT_FLAGS |
MACB_BIT(HRESP));
}
@ -1373,14 +1372,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
(unsigned int)(queue - bp->queues),
(unsigned long)status);
if (status & MACB_RX_INT_FLAGS) {
if (status & bp->rx_intr_mask) {
/* There's no point taking any more interrupts
* until we have processed the buffers. The
* scheduling call may fail if the poll routine
* is already scheduled, so disable interrupts
* now.
*/
queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
queue_writel(queue, IDR, bp->rx_intr_mask);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
@ -1413,8 +1412,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
/* There is a hardware issue under heavy load where DMA can
* stop, this causes endless "used buffer descriptor read"
* interrupts but it can be cleared by re-enabling RX. See
* the at91 manual, section 41.3.1 or the Zynq manual
* section 16.7.4 for details.
* the at91rm9200 manual, section 41.3.1 or the Zynq manual
* section 16.7.4 for details. RXUBR is only enabled for
* these two versions.
*/
if (status & MACB_BIT(RXUBR)) {
ctrl = macb_readl(bp, NCR);
@ -2264,7 +2264,7 @@ static void macb_init_hw(struct macb *bp)
/* Enable interrupts */
queue_writel(queue, IER,
MACB_RX_INT_FLAGS |
bp->rx_intr_mask |
MACB_TX_INT_FLAGS |
MACB_BIT(HRESP));
}
@ -3912,6 +3912,7 @@ static const struct macb_config sama5d4_config = {
};
static const struct macb_config emac_config = {
.caps = MACB_CAPS_NEEDS_RSTONUBR,
.clk_init = at91ether_clk_init,
.init = at91ether_init,
};
@ -3933,7 +3934,8 @@ static const struct macb_config zynqmp_config = {
};
static const struct macb_config zynq_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
MACB_CAPS_NEEDS_RSTONUBR,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@ -4088,6 +4090,10 @@ static int macb_probe(struct platform_device *pdev)
macb_dma_desc_get_size(bp);
}
bp->rx_intr_mask = MACB_RX_INT_FLAGS;
if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
bp->rx_intr_mask |= MACB_BIT(RXUBR);
mac = of_get_mac_address(np);
if (mac) {
ether_addr_copy(bp->dev->dev_addr, mac);

View file

@ -2419,6 +2419,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
out_notify_fail:
(void)cancel_work_sync(&priv->service_task);
out_read_prop_fail:
/* safe for ACPI FW */
of_node_put(to_of_node(priv->fwnode));
free_netdev(ndev);
return ret;
}
@ -2448,6 +2450,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
set_bit(NIC_STATE_REMOVING, &priv->state);
(void)cancel_work_sync(&priv->service_task);
/* safe for ACPI FW */
of_node_put(to_of_node(priv->fwnode));
free_netdev(ndev);
return 0;
}

View file

@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev)
*/
static int hns_nic_nway_reset(struct net_device *netdev)
{
int ret = 0;
struct phy_device *phy = netdev->phydev;
if (netif_running(netdev)) {
/* if autoneg is disabled, don't restart auto-negotiation */
if (phy && phy->autoneg == AUTONEG_ENABLE)
ret = genphy_restart_aneg(phy);
}
if (!netif_running(netdev))
return 0;
return ret;
if (!phy)
return -EOPNOTSUPP;
if (phy->autoneg != AUTONEG_ENABLE)
return -EINVAL;
return genphy_restart_aneg(phy);
}
static u32

View file

@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
}
hns_mdio_cmd_write(mdio_dev, is_c45,
MDIO_C45_WRITE_ADDR, phy_id, devad);
MDIO_C45_READ, phy_id, devad);
}
/* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/

View file

@ -473,19 +473,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
/* get pq index according to PQ_FLAGS */
static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
u32 pq_flags)
unsigned long pq_flags)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
/* Can't have multiple flags set here */
if (bitmap_weight((unsigned long *)&pq_flags,
if (bitmap_weight(&pq_flags,
sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
goto err;
}
if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
goto err;
}

View file

@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
!!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
!!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
!!(accept_filter & QED_ACCEPT_BCAST));
@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
return rc;
}
if (p_params->update_ctl_frame_check) {
p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
}
/* Update mcast bins for VFs, PF doesn't use this functionality */
qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
@ -2207,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
u16 num_queues = 0;
/* Since the feature controls only queue-zones,
* make sure we have the contexts [rx, tx, xdp] to
* make sure we have the contexts [rx, xdp, tcs] to
* match.
*/
for_each_hwfn(cdev, i) {
@ -2217,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
u16 cids;
cids = hwfn->pf_params.eth_pf_params.num_cons;
num_queues += min_t(u16, l2_queues, cids / 3);
cids /= (2 + info->num_tc);
num_queues += min_t(u16, l2_queues, cids);
}
/* queues might theoretically be >256, but interrupts'
@ -2688,7 +2698,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
QED_ACCEPT_MCAST_UNMATCHED;
accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
QED_ACCEPT_MCAST_UNMATCHED;
} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;

View file

@ -219,6 +219,9 @@ struct qed_sp_vport_update_params {
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
struct qed_sge_tpa_params *sge_tpa_params;
u8 update_ctl_frame_check;
u8 mac_chk_en;
u8 ethtype_chk_en;
};
int qed_sp_vport_update(struct qed_hwfn *p_hwfn,

View file

@ -2430,19 +2430,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
{
struct qed_ll2_tx_pkt_info pkt;
const skb_frag_t *frag;
u8 flags = 0, nr_frags;
int rc = -EINVAL, i;
dma_addr_t mapping;
u16 vlan = 0;
u8 flags = 0;
if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
return -EINVAL;
}
if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
/* Cache number of fragments from SKB since SKB may be freed by
* the completion routine after calling qed_ll2_prepare_tx_packet()
*/
nr_frags = skb_shinfo(skb)->nr_frags;
if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
1 + skb_shinfo(skb)->nr_frags);
1 + nr_frags);
return -EINVAL;
}
@ -2464,7 +2469,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
}
memset(&pkt, 0, sizeof(pkt));
pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
pkt.num_of_bds = 1 + nr_frags;
pkt.vlan = vlan;
pkt.bd_flags = flags;
pkt.tx_dest = QED_LL2_TX_DEST_NW;
@ -2475,12 +2480,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
pkt.remove_stag = true;
/* qed_ll2_prepare_tx_packet() may actually send the packet if
* there are no fragments in the skb and subsequently the completion
* routine may run and free the SKB, so no dereferencing the SKB
* beyond this point unless skb has any fragments.
*/
rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
&pkt, 1);
if (rc)
goto err;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
for (i = 0; i < nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,

View file

@ -380,6 +380,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn);
* @param p_hwfn
*/
void qed_consq_free(struct qed_hwfn *p_hwfn);
int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
/**
* @file

View file

@ -402,6 +402,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
/* Attempt to post pending requests */
spin_lock_bh(&p_hwfn->p_spq->lock);
rc = qed_spq_pend_post(p_hwfn);
spin_unlock_bh(&p_hwfn->p_spq->lock);
return rc;
}
@ -745,7 +750,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
return 0;
}
static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
struct qed_spq_entry *p_ent = NULL;
@ -883,7 +888,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_spq_entry *tmp;
struct qed_spq_entry *found = NULL;
int rc;
if (!p_hwfn)
return -EINVAL;
@ -941,12 +945,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
*/
qed_spq_return_entry(p_hwfn, found);
/* Attempt to post pending requests */
spin_lock_bh(&p_spq->lock);
rc = qed_spq_pend_post(p_hwfn);
spin_unlock_bh(&p_spq->lock);
return rc;
return 0;
}
int qed_consq_alloc(struct qed_hwfn *p_hwfn)

View file

@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
params.vport_id = vf->vport_id;
params.max_buffers_per_cqe = start->max_buffers_per_cqe;
params.mtu = vf->mtu;
params.check_mac = true;
/* Non trusted VFs should enable control frame filtering */
params.check_mac = !vf->p_vf_info.is_trusted_configured;
rc = qed_sp_eth_vport_start(p_hwfn, &params);
if (rc) {
@ -5130,6 +5132,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
params.opaque_fid = vf->opaque_fid;
params.vport_id = vf->vport_id;
params.update_ctl_frame_check = 1;
params.mac_chk_en = !vf_info->is_trusted_configured;
if (vf_info->rx_accept_mode & mask) {
flags->update_rx_mode_config = 1;
flags->rx_accept_filter = vf_info->rx_accept_mode;
@ -5147,7 +5152,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
}
if (flags->update_rx_mode_config ||
flags->update_tx_mode_config)
flags->update_tx_mode_config ||
params.update_ctl_frame_check)
qed_sp_vport_update(hwfn, &params,
QED_SPQ_MODE_EBLOCK, NULL);
}

View file

@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
struct vf_pf_resc_request *p_resc;
u8 retry_cnt = VF_ACQUIRE_THRESH;
bool resources_acquired = false;
struct vfpf_acquire_tlv *req;
int rc = 0, attempts = 0;
@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
/* send acquire request */
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
/* Re-try acquire in case of vf-pf hw channel timeout */
if (retry_cnt && rc == -EBUSY) {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"VF retrying to acquire due to VPC timeout\n");
retry_cnt--;
continue;
}
if (rc)
goto exit;

View file

@ -489,6 +489,9 @@ struct qede_reload_args {
/* Datapath functions definition */
netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback);
netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);

View file

@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
struct qede_dev *edev = netdev_priv(dev);
int total_txq;
total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
return QEDE_TSS_COUNT(edev) ?
fallback(dev, skb, NULL) % total_txq : 0;
}
/* 8B udp header + 8B base tunnel header + 32B option length */
#define QEDE_MAX_TUN_HDR_LEN 48

View file

@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
.ndo_start_xmit = qede_start_xmit,
.ndo_select_queue = qede_select_queue,
.ndo_set_rx_mode = qede_set_rx_mode,
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
.ndo_start_xmit = qede_start_xmit,
.ndo_select_queue = qede_select_queue,
.ndo_set_rx_mode = qede_set_rx_mode,
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
.ndo_start_xmit = qede_start_xmit,
.ndo_select_queue = qede_select_queue,
.ndo_set_rx_mode = qede_set_rx_mode,
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,

View file

@ -1342,8 +1342,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
}
ret = phy_power_on(bsp_priv, true);
if (ret)
if (ret) {
gmac_clk_enable(bsp_priv, false);
return ret;
}
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);

View file

@ -719,8 +719,11 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
{
unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
if (!clk)
return 0;
if (!clk) {
clk = priv->plat->clk_ref_rate;
if (!clk)
return 0;
}
return (usec * (clk / 1000000)) / 256;
}
@ -729,8 +732,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
{
unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
if (!clk)
return 0;
if (!clk) {
clk = priv->plat->clk_ref_rate;
if (!clk)
return 0;
}
return (riwt * 256) / (clk / 1000000);
}

View file

@ -3028,10 +3028,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q = &priv->tx_queue[queue];
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
/*
* There is no way to determine the number of TSO
* capable Queues. Let's use always the Queue 0
* because if TSO is supported then at least this
* one will be capable.
*/
skb_set_queue_mapping(skb, 0);
return stmmac_tso_xmit(skb, dev);
}
}
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@ -3046,9 +3058,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
if (priv->tx_path_in_lpi_mode)
stmmac_disable_eee_mode(priv);
entry = tx_q->cur_tx;
first_entry = entry;
WARN_ON(tx_q->tx_skbuff[first_entry]);

View file

@ -636,15 +636,15 @@ static int ath9k_of_init(struct ath_softc *sc)
ret = ath9k_eeprom_request(sc, eeprom_name);
if (ret)
return ret;
ah->ah_flags &= ~AH_USE_EEPROM;
ah->ah_flags |= AH_NO_EEP_SWAP;
}
mac = of_get_mac_address(np);
if (mac)
ether_addr_copy(common->macaddr, mac);
ah->ah_flags &= ~AH_USE_EEPROM;
ah->ah_flags |= AH_NO_EEP_SWAP;
return 0;
}

View file

@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
}
sdio_claim_host(func);
/*
* To guarantee that the SDIO card is power cycled, as required to make
* the FW programming to succeed, let's do a brute force HW reset.
*/
mmc_hw_reset(card->host);
sdio_enable_func(func);
sdio_release_host(func);
@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
{
struct sdio_func *func = dev_to_sdio_func(glue->dev);
struct mmc_card *card = func->card;
int error;
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
/* Let runtime PM know the card is powered off */
error = pm_runtime_put(&card->dev);
if (error < 0 && error != -EBUSY) {
dev_err(&card->dev, "%s failed: %i\n", __func__, error);
return error;
}
pm_runtime_put(&card->dev);
return 0;
}

View file

@ -1182,6 +1182,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
* effects say only one namespace is affected.
*/
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
mutex_lock(&ctrl->scan_lock);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
}
@ -1210,8 +1211,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
*/
if (effects & NVME_CMD_EFFECTS_LBCC)
nvme_update_formats(ctrl);
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK))
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
nvme_unfreeze(ctrl);
mutex_unlock(&ctrl->scan_lock);
}
if (effects & NVME_CMD_EFFECTS_CCC)
nvme_init_identify(ctrl);
if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
@ -3292,6 +3295,7 @@ static void nvme_scan_work(struct work_struct *work)
if (nvme_identify_ctrl(ctrl, &id))
return;
mutex_lock(&ctrl->scan_lock);
nn = le32_to_cpu(id->nn);
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@ -3300,6 +3304,7 @@ static void nvme_scan_work(struct work_struct *work)
}
nvme_scan_ns_sequential(ctrl, nn);
out_free_id:
mutex_unlock(&ctrl->scan_lock);
kfree(id);
down_write(&ctrl->namespaces_rwsem);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
@ -3535,6 +3540,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
ctrl->state = NVME_CTRL_NEW;
spin_lock_init(&ctrl->lock);
mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
init_rwsem(&ctrl->namespaces_rwsem);
ctrl->dev = dev;

View file

@ -148,6 +148,7 @@ struct nvme_ctrl {
enum nvme_ctrl_state state;
bool identified;
spinlock_t lock;
struct mutex scan_lock;
const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q;
struct request_queue *connect_q;

View file

@ -2260,6 +2260,27 @@ static void nvme_reset_work(struct work_struct *work)
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
mutex_lock(&dev->shutdown_lock);
result = nvme_pci_enable(dev);
if (result)
goto out_unlock;
result = nvme_pci_configure_admin_queue(dev);
if (result)
goto out_unlock;
result = nvme_alloc_admin_tags(dev);
if (result)
goto out_unlock;
/*
* Limit the max command size to prevent iod->sg allocations going
* over a single page.
*/
dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
dev->ctrl.max_segments = NVME_MAX_SEGS;
mutex_unlock(&dev->shutdown_lock);
/*
* Introduce CONNECTING state from nvme-fc/rdma transports to mark the
* initializing procedure here.
@ -2270,25 +2291,6 @@ static void nvme_reset_work(struct work_struct *work)
goto out;
}
result = nvme_pci_enable(dev);
if (result)
goto out;
result = nvme_pci_configure_admin_queue(dev);
if (result)
goto out;
result = nvme_alloc_admin_tags(dev);
if (result)
goto out;
/*
* Limit the max command size to prevent iod->sg allocations going
* over a single page.
*/
dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
dev->ctrl.max_segments = NVME_MAX_SEGS;
result = nvme_init_identify(&dev->ctrl);
if (result)
goto out;
@ -2352,6 +2354,8 @@ static void nvme_reset_work(struct work_struct *work)
nvme_start_ctrl(&dev->ctrl);
return;
out_unlock:
mutex_unlock(&dev->shutdown_lock);
out:
nvme_remove_dead_ctrl(dev, result);
}

View file

@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
break;
case MCP_TYPE_S18:
one_regmap_config =
devm_kmemdup(dev, &mcp23x17_regmap,
sizeof(struct regmap_config), GFP_KERNEL);
if (!one_regmap_config)
return -ENOMEM;
mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
&mcp23x17_regmap);
one_regmap_config);
mcp->reg_shift = 1;
mcp->chip.ngpio = 16;
mcp->chip.label = "mcp23s18";

View file

@ -856,6 +856,7 @@ config TOSHIBA_WMI
config ACPI_CMPC
tristate "CMPC Laptop Extras"
depends on ACPI && INPUT
depends on BACKLIGHT_LCD_SUPPORT
depends on RFKILL || RFKILL=n
select BACKLIGHT_CLASS_DEVICE
help
@ -1077,6 +1078,7 @@ config INTEL_OAKTRAIL
config SAMSUNG_Q10
tristate "Samsung Q10 Extras"
depends on ACPI
depends on BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
---help---
This driver provides support for backlight control on Samsung Q10

View file

@ -22,6 +22,7 @@
#include <linux/hashtable.h>
#include <linux/ip.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>

View file

@ -565,6 +565,7 @@ static int __qeth_issue_next_read(struct qeth_card *card)
QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
"rc=%i\n", dev_name(&card->gdev->dev), rc);
atomic_set(&channel->irq_pending, 0);
qeth_release_buffer(channel, iob);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
wake_up(&card->wait_q);
@ -1187,6 +1188,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
rc = qeth_get_problem(cdev, irb);
if (rc) {
card->read_or_write_problem = 1;
if (iob)
qeth_release_buffer(iob->channel, iob);
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
goto out;
@ -1852,6 +1855,7 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
atomic_set(&channel->irq_pending, 0);
qeth_release_buffer(channel, iob);
wake_up(&card->wait_q);
return rc;
}
@ -1923,6 +1927,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
rc);
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
atomic_set(&channel->irq_pending, 0);
qeth_release_buffer(channel, iob);
wake_up(&card->wait_q);
return rc;
}
@ -2110,6 +2115,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
}
reply = qeth_alloc_reply(card);
if (!reply) {
qeth_release_buffer(channel, iob);
return -ENOMEM;
}
reply->callback = reply_cb;
@ -2448,11 +2454,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
return 0;
}
static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
{
if (!q)
return;
qeth_clear_outq_buffers(q, 1);
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
kfree(q);
}
@ -2526,10 +2533,8 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
card->qdio.out_qs[i]->bufs[j] = NULL;
}
out_freeoutq:
while (i > 0) {
qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
}
while (i > 0)
qeth_free_output_queue(card->qdio.out_qs[--i]);
kfree(card->qdio.out_qs);
card->qdio.out_qs = NULL;
out_freepool:
@ -2562,10 +2567,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
qeth_free_buffer_pool(card);
/* free outbound qdio_qs */
if (card->qdio.out_qs) {
for (i = 0; i < card->qdio.no_out_queues; ++i) {
qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
}
for (i = 0; i < card->qdio.no_out_queues; i++)
qeth_free_output_queue(card->qdio.out_qs[i]);
kfree(card->qdio.out_qs);
card->qdio.out_qs = NULL;
}

View file

@ -854,6 +854,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l2_set_offline(cgdev);
cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
unregister_netdev(card->dev);
}

View file

@ -2611,6 +2611,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l3_set_offline(cgdev);
cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
unregister_netdev(card->dev);
qeth_l3_clear_ip_htable(card, 0);

View file

@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
if(tpnt->sdev_attrs == NULL)
tpnt->sdev_attrs = NCR_700_dev_attrs;
memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
if(memory == NULL) {
printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");

View file

@ -1304,8 +1304,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
ADD : DELETE;
break;
}
case AifBuManagerEvent:
aac_handle_aif_bu(dev, aifcmd);
break;
case AifBuManagerEvent:
aac_handle_aif_bu(dev, aifcmd);
break;
}

View file

@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
return NULL;
}
cmgr->hba = hba;
cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
GFP_KERNEL);
if (!cmgr->free_list) {
@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
goto mem_err;
}
cmgr->hba = hba;
cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
for (i = 0; i < arr_sz; i++) {
@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
/* Allocate pool of io_bdts - one for each bnx2fc_cmd */
mem_size = num_ios * sizeof(struct io_bdt *);
cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
if (!cmgr->io_bdt_pool) {
printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
goto mem_err;

View file

@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_frame_payload_op(fp) != ELS_LS_ACC) {
FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
fc_lport_error(lport, fp);
goto err;
goto out;
}
flp = fc_frame_payload_get(fp, sizeof(*flp));
if (!flp) {
FC_LPORT_DBG(lport, "FLOGI bad response\n");
fc_lport_error(lport, fp);
goto err;
goto out;
}
mfs = ntohs(flp->fl_csp.sp_bb_data) &
@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
"lport->mfs:%hu\n", mfs, lport->mfs);
fc_lport_error(lport, fp);
goto err;
goto out;
}
if (mfs <= lport->mfs) {

View file

@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref)
struct fc_rport_priv *rdata;
rdata = container_of(kref, struct fc_rport_priv, kref);
WARN_ON(!list_empty(&rdata->peers));
kfree_rcu(rdata, rcu);
}
EXPORT_SYMBOL(fc_rport_destroy);

View file

@ -62,7 +62,7 @@
/* make sure inq_product_rev string corresponds to this version */
#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
static const char *sdebug_version_date = "20180128";
static const char *sdebug_version_date = "20190125";
#define MY_NAME "scsi_debug"
@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void)
(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
}
static void *fake_store(unsigned long long lba)
static void *lba2fake_store(unsigned long long lba)
{
lba = do_div(lba, sdebug_store_sectors);
@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
return ret;
}
/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
* arr into fake_store(lba,num) and return true. If comparison fails then
/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
* arr into lba2fake_store(lba,num) and return true. If comparison fails then
* return false. */
static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
{
@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
if (sdt->app_tag == cpu_to_be16(0xffff))
continue;
ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
if (ret) {
dif_errors++;
return ret;
@ -3261,10 +3261,12 @@ static int resp_write_scat(struct scsi_cmnd *scp,
static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u32 ei_lba, bool unmap, bool ndob)
{
int ret;
unsigned long iflags;
unsigned long long i;
int ret;
u64 lba_off;
u32 lb_size = sdebug_sector_size;
u64 block, lbaa;
u8 *fs1p;
ret = check_device_access_params(scp, lba, num);
if (ret)
@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
unmap_region(lba, num);
goto out;
}
lba_off = lba * sdebug_sector_size;
lbaa = lba;
block = do_div(lbaa, sdebug_store_sectors);
/* if ndob then zero 1 logical block, else fetch 1 logical block */
fs1p = fake_storep + (block * lb_size);
if (ndob) {
memset(fake_storep + lba_off, 0, sdebug_sector_size);
memset(fs1p, 0, lb_size);
ret = 0;
} else
ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
sdebug_sector_size);
ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
if (-1 == ret) {
write_unlock_irqrestore(&atomic_rw, iflags);
return DID_ERROR << 16;
} else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
} else if (sdebug_verbose && !ndob && (ret < lb_size))
sdev_printk(KERN_INFO, scp->device,
"%s: %s: lb size=%u, IO sent=%d bytes\n",
my_name, "write same",
sdebug_sector_size, ret);
my_name, "write same", lb_size, ret);
/* Copy first sector to remaining blocks */
for (i = 1 ; i < num ; i++)
memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
fake_storep + lba_off,
sdebug_sector_size);
for (i = 1 ; i < num ; i++) {
lbaa = lba + i;
block = do_div(lbaa, sdebug_store_sectors);
memmove(fake_storep + (block * lb_size), fs1p, lb_size);
}
if (scsi_debug_lbp())
map_region(lba, num);
out:

Some files were not shown because too many files have changed in this diff Show more