This is the 4.19.86 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl3aL2UACgkQONu9yGCS
 aT73GA//VSRJjGzdohy0+NVK3Dk7tCb2GfXFyLfRasyCbpCVGudaN9IltPU20pmj
 U67BRp3jJg6AFRFDxJn4uyAxqcYF6VFp77BiBLiF6lZEv3+0xxOqdyFL2IY9Cyew
 5XGNWcjXAR/bZ0r/rRXw8GUBMmW/8oewW7Iay4YhriUWv/afucbMVK7cNgyj/qvP
 jSbHh4mp15BGg1aIanM7YSlJgXX2MimXwEceyHPQJgKpSx1CApI2uRMSNZw/RXeP
 hFox3Ord5o/K+dowtKW+eTXUMkbm+7Htsi0p+WvE69y6KjyBzh3CEXrQqJsLtd0Y
 1myphKOX42z0/hbysUZQV8AvY5jrZu/SPoH8quXD/MNxPvNe0OjO3UiMruAdohQh
 I3SjKZB+HprtsCGn4X6/PiHUxq8PCLwtMaa9IIRmtFOXeuxPPeQLdEoM8m2eCEiL
 DnwkDXVVtQhKymmYgWUxcAsFpXl+s3k5ZRFmWEDDTuwlyZRWMPuRaWEOH8YuIHzz
 QETCyodrOis90TFgG1XJDijzPpZtxZKuJ8HdGmO7J8BMDXi6r0aoTzBk8cPAAe3A
 TUqRnHoMKLLYC+9vxA90aThXsibL6DuD06beJy3H1XCSj2vKvkM/iLaL8R95JjAW
 XZaEv/SH9zoEynypd+b8tOHHdPSaZcTe3pd3SDmOPLpejOuSTJU=
 =VtIx
 -----END PGP SIGNATURE-----

Merge 4.19.86 into android-4.19

Changes in 4.19.86
	spi: mediatek: use correct mata->xfer_len when in fifo transfer
	i2c: mediatek: modify threshold passed to i2c_get_dma_safe_msg_buf()
	tee: optee: add missing of_node_put after of_device_is_available
	Revert "OPP: Protect dev_list with opp_table lock"
	net: cdc_ncm: Signedness bug in cdc_ncm_set_dgram_size()
	idr: Fix idr_get_next race with idr_remove
	mm/memory_hotplug: don't access uninitialized memmaps in shrink_pgdat_span()
	mm/memory_hotplug: fix updating the node span
	arm64: uaccess: Ensure PAN is re-enabled after unhandled uaccess fault
	fbdev: Ditch fb_edid_add_monspecs
	bpf, x32: Fix bug for BPF_ALU64 | BPF_NEG
	bpf, x32: Fix bug with ALU64 {LSH, RSH, ARSH} BPF_X shift by 0
	bpf, x32: Fix bug with ALU64 {LSH, RSH, ARSH} BPF_K shift by 0
	bpf, x32: Fix bug for BPF_JMP | {BPF_JSGT, BPF_JSLE, BPF_JSLT, BPF_JSGE}
	net: ovs: fix return type of ndo_start_xmit function
	net: xen-netback: fix return type of ndo_start_xmit function
	ARM: dts: dra7: Enable workaround for errata i870 in PCIe host mode
	ARM: dts: omap5: enable OTG role for DWC3 controller
	net: hns3: Fix for netdev not up problem when setting mtu
	net: hns3: Fix loss of coal configuration while doing reset
	f2fs: return correct errno in f2fs_gc
	ARM: dts: sun8i: h3-h5: ir register size should be the whole memory block
	ARM: dts: sun8i: h3: bpi-m2-plus: Fix address for external RGMII Ethernet PHY
	tcp: up initial rmem to 128KB and SYN rwin to around 64KB
	SUNRPC: Fix priority queue fairness
	ACPI / LPSS: Make acpi_lpss_find_device() also find PCI devices
	ACPI / LPSS: Resume BYT/CHT I2C controllers from resume_noirq
	f2fs: keep lazytime on remount
	IB/hfi1: Error path MAD response size is incorrect
	IB/hfi1: Ensure ucast_dlid access doesnt exceed bounds
	mt76x2: fix tx power configuration for VHT mcs 9
	mt76x2: disable WLAN core before probe
	mt76: fix handling ps-poll frames
	iommu/io-pgtable-arm: Fix race handling in split_blk_unmap()
	iommu/arm-smmu-v3: Fix unexpected CMD_SYNC timeout
	kvm: arm/arm64: Fix stage2_flush_memslot for 4 level page table
	arm64/numa: Report correct memblock range for the dummy node
	ath10k: fix vdev-start timeout on error
	rtlwifi: btcoex: Use proper enumerated types for Wi-Fi only interface
	ata: ahci_brcm: Allow using driver or DSL SoCs
	PM / devfreq: Fix devfreq_add_device() when drivers are built as modules.
	PM / devfreq: Fix handling of min/max_freq == 0
	PM / devfreq: stopping the governor before device_unregister()
	ath9k: fix reporting calculated new FFT upper max
	selftests/tls: Fix recv(MSG_PEEK) & splice() test cases
	usb: gadget: udc: fotg210-udc: Fix a sleep-in-atomic-context bug in fotg210_get_status()
	usb: dwc3: gadget: Check ENBLSLPM before sending ep command
	nl80211: Fix a GET_KEY reply attribute
	irqchip/irq-mvebu-icu: Fix wrong private data retrieval
	watchdog: core: fix null pointer dereference when releasing cdev
	watchdog: renesas_wdt: stop when unregistering
	watchdog: sama5d4: fix timeout-sec usage
	watchdog: w83627hf_wdt: Support NCT6796D, NCT6797D, NCT6798D
	KVM: PPC: Inform the userspace about TCE update failures
	printk: Do not miss new messages when replaying the log
	printk: CON_PRINTBUFFER console registration is a bit racy
	dmaengine: ep93xx: Return proper enum in ep93xx_dma_chan_direction
	dmaengine: timb_dma: Use proper enum in td_prep_slave_sg
	ALSA: hda: Fix mismatch for register mask and value in ext controller.
	ext4: fix build error when DX_DEBUG is defined
	clk: keystone: Enable TISCI clocks if K3_ARCH
	sunrpc: Fix connect metrics
	x86/PCI: Apply VMD's AERSID fixup generically
	mei: samples: fix a signedness bug in amt_host_if_call()
	cxgb4: Use proper enum in cxgb4_dcb_handle_fw_update
	cxgb4: Use proper enum in IEEE_FAUX_SYNC
	powerpc/pseries: Fix DTL buffer registration
	powerpc/pseries: Fix how we iterate over the DTL entries
	powerpc/xive: Move a dereference below a NULL test
	ARM: dts: at91: sama5d4_xplained: fix addressable nand flash size
	ARM: dts: at91: at91sam9x5cm: fix addressable nand flash size
	ARM: dts: at91: sama5d2_ptc_ek: fix bootloader env offsets
	mtd: rawnand: sh_flctl: Use proper enum for flctl_dma_fifo0_transfer
	PM / hibernate: Check the success of generating md5 digest before hibernation
	tools: PCI: Fix compilation warnings
	clocksource/drivers/sh_cmt: Fixup for 64-bit machines
	clocksource/drivers/sh_cmt: Fix clocksource width for 32-bit machines
	ice: Fix forward to queue group logic
	md: allow metadata updates while suspending an array - fix
	ixgbe: Fix ixgbe TX hangs with XDP_TX beyond queue limit
	i40e: Use proper enum in i40e_ndo_set_vf_link_state
	ixgbe: Fix crash with VFs and flow director on interface flap
	IB/mthca: Fix error return code in __mthca_init_one()
	IB/rxe: avoid srq memory leak
	RDMA/hns: Bugfix for reserved qp number
	RDMA/hns: Submit bad wr when post send wr exception
	RDMA/hns: Bugfix for CM test
	RDMA/hns: Limit the size of extend sge of sq
	IB/mlx4: Avoid implicit enumerated type conversion
	rpmsg: glink: smem: Support rx peak for size less than 4 bytes
	msm/gpu/a6xx: Force of_dma_configure to setup DMA for GMU
	OPP: Return error on error from dev_pm_opp_get_opp_count()
	ACPICA: Never run _REG on system_memory and system_IO
	cpuidle: menu: Fix wakeup statistics updates for polling state
	ASoC: qdsp6: q6asm-dai: checking NULL vs IS_ERR()
	powerpc/time: Use clockevents_register_device(), fixing an issue with large decrementer
	powerpc/64s/radix: Explicitly flush ERAT with local LPID invalidation
	ata: ep93xx: Use proper enums for directions
	qed: Avoid implicit enum conversion in qed_ooo_submit_tx_buffers
	media: rc: ir-rc6-decoder: enable toggle bit for Kathrein RCU-676 remote
	media: pxa_camera: Fix check for pdev->dev.of_node
	media: rcar-vin: fix redeclaration of symbol
	media: i2c: adv748x: Support probing a single output
	ALSA: hda/sigmatel - Disable automute for Elo VuPoint
	bnxt_en: return proper error when FW returns HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED
	KVM: PPC: Book3S PR: Exiting split hack mode needs to fixup both PC and LR
	USB: serial: cypress_m8: fix interrupt-out transfer length
	usb: dwc2: disable power_down on rockchip devices
	mtd: physmap_of: Release resources on error
	cpu/SMT: State SMT is disabled even with nosmt and without "=force"
	brcmfmac: reduce timeout for action frame scan
	brcmfmac: fix full timeout waiting for action frame on-channel tx
	qtnfmac: request userspace to do OBSS scanning if FW can not
	qtnfmac: pass sgi rate info flag to wireless core
	qtnfmac: inform wireless core about supported extended capabilities
	qtnfmac: drop error reports for out-of-bounds key indexes
	clk: samsung: Use NOIRQ stage for Exynos5433 clocks suspend/resume
	clk: samsung: exynos5420: Define CLK_SECKEY gate clock only or Exynos5420
	clk: samsung: Use clk_hw API for calling clk framework from clk notifiers
	i2c: brcmstb: Allow enabling the driver on DSL SoCs
	printk: Correct wrong casting
	NFSv4.x: fix lock recovery during delegation recall
	dmaengine: ioat: fix prototype of ioat_enumerate_channels
	media: ov5640: fix framerate update
	media: cec-gpio: select correct Signal Free Time
	gfs2: slow the deluge of io error messages
	i2c: omap: use core to detect 'no zero length' quirk
	i2c: qup: use core to detect 'no zero length' quirk
	i2c: tegra: use core to detect 'no zero length' quirk
	i2c: zx2967: use core to detect 'no zero length' quirk
	Input: st1232 - set INPUT_PROP_DIRECT property
	Input: silead - try firmware reload after unsuccessful resume
	soc: fsl: bman_portals: defer probe after bman's probe
	net: hns3: Fix for rx vlan id handle to support Rev 0x21 hardware
	tc-testing: fix build of eBPF programs
	remoteproc: Check for NULL firmwares in sysfs interface
	remoteproc: qcom: q6v5: Fix a race condition on fatal crash
	kexec: Allocate decrypted control pages for kdump if SME is enabled
	x86/olpc: Fix build error with CONFIG_MFD_CS5535=m
	dmaengine: rcar-dmac: set scatter/gather max segment size
	crypto: mxs-dcp - Fix SHA null hashes and output length
	crypto: mxs-dcp - Fix AES issues
	xfrm: use correct size to initialise sp->ovec
	ACPI / SBS: Fix rare oops when removing modules
	iwlwifi: mvm: don't send keys when entering D3
	xsk: proper AF_XDP socket teardown ordering
	x86/fsgsbase/64: Fix ptrace() to read the FS/GS base accurately
	mmc: renesas_sdhi_internal_dmac: Whitelist r8a774a1
	mmc: tmio: Fix SCC error detection
	mmc: renesas_sdhi_internal_dmac: set scatter/gather max segment size
	atmel_lcdfb: support native-mode display-timings
	fbdev: sbuslib: use checked version of put_user()
	fbdev: sbuslib: integer overflow in sbusfb_ioctl_helper()
	fbdev: fix broken menu dependencies
	reset: Fix potential use-after-free in __of_reset_control_get()
	bcache: account size of buckets used in uuid write to ca->meta_sectors_written
	bcache: recal cached_dev_sectors on detach
	platform/x86: mlx-platform: Properly use mlxplat_mlxcpld_msn201x_items
	media: dw9714: Fix error handling in probe function
	media: dw9807-vcm: Fix probe error handling
	media: cx18: Don't check for address of video_dev
	mtd: spi-nor: cadence-quadspi: Use proper enum for dma_[un]map_single
	mtd: devices: m25p80: Make sure WRITE_EN is issued before each write
	x86/intel_rdt: Introduce utility to obtain CDP peer
	x86/intel_rdt: CBM overlap should also check for overlap with CDP peer
	mmc: mmci: expand startbiterr to irqmask and error check
	s390/kasan: avoid vdso instrumentation
	s390/kasan: avoid instrumentation of early C code
	s390/kasan: avoid user access code instrumentation
	proc/vmcore: Fix i386 build error of missing copy_oldmem_page_encrypted()
	backlight: lm3639: Unconditionally call led_classdev_unregister
	mfd: ti_am335x_tscadc: Keep ADC interface on if child is wakeup capable
	printk: Give error on attempt to set log buffer length to over 2G
	media: isif: fix a NULL pointer dereference bug
	GFS2: Flush the GFS2 delete workqueue before stopping the kernel threads
	media: cx231xx: fix potential sign-extension overflow on large shift
	media: venus: vdec: fix decoded data size
	ALSA: hda/ca0132 - Fix input effect controls for desktop cards
	lightnvm: pblk: fix rqd.error return value in pblk_blk_erase_sync
	lightnvm: pblk: fix incorrect min_write_pgs
	lightnvm: pblk: guarantee emeta on line close
	lightnvm: pblk: fix write amplificiation calculation
	lightnvm: pblk: guarantee mw_cunits on read buffer
	lightnvm: do no update csecs and sos on 1.2
	lightnvm: pblk: fix error handling of pblk_lines_init()
	lightnvm: pblk: consider max hw sectors supported for max_write_pgs
	x86/kexec: Correct KEXEC_BACKUP_SRC_END off-by-one error
	bpf: btf: Fix a missing check bug
	net: fix generic XDP to handle if eth header was mangled
	gpio: syscon: Fix possible NULL ptr usage
	spi: fsl-lpspi: Prevent FIFO under/overrun by default
	pinctrl: gemini: Mask and set properly
	spi: spidev: Fix OF tree warning logic
	ARM: 8802/1: Call syscall_trace_exit even when system call skipped
	x86/mm: Do not warn about PCI BIOS W+X mappings
	orangefs: rate limit the client not running info message
	pinctrl: gemini: Fix up TVC clock group
	scsi: arcmsr: clean up clang warning on extraneous parentheses
	hwmon: (k10temp) Support all Family 15h Model 6xh and Model 7xh processors
	hwmon: (nct6775) Fix names of DIMM temperature sources
	hwmon: (pwm-fan) Silence error on probe deferral
	hwmon: (ina3221) Fix INA3221_CONFIG_MODE macros
	hwmon: (npcm-750-pwm-fan) Change initial pwm target to 255
	selftests: forwarding: Have lldpad_app_wait_set() wait for unknown, too
	net: sched: avoid writing on noop_qdisc
	netfilter: nft_compat: do not dump private area
	misc: cxl: Fix possible null pointer dereference
	mac80211: minstrel: fix using short preamble CCK rates on HT clients
	mac80211: minstrel: fix CCK rate group streams value
	mac80211: minstrel: fix sampling/reporting of CCK rates in HT mode
	spi: rockchip: initialize dma_slave_config properly
	mlxsw: spectrum_switchdev: Check notification relevance based on upper device
	ARM: dts: omap5: Fix dual-role mode on Super-Speed port
	tcp: start receiver buffer autotuning sooner
	ACPI / LPSS: Use acpi_lpss_* instead of acpi_subsys_* functions for hibernate
	PM / devfreq: Fix static checker warning in try_then_request_governor
	tools: PCI: Fix broken pcitest compilation
	powerpc/time: Fix clockevent_decrementer initalisation for PR KVM
	mmc: tmio: fix SCC error handling to avoid false positive CRC error
	x86/resctrl: Fix rdt_find_domain() return value and checks
	Linux 4.19.86

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ib9de820e5026cadf4fab89e69d1324302cdae9c3
This commit is contained in:
Greg Kroah-Hartman 2019-11-25 10:00:06 +01:00
commit a36fc1fff6
224 changed files with 1885 additions and 1355 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 85
SUBLEVEL = 86
EXTRAVERSION =
NAME = "People's Front"

View file

@ -92,13 +92,13 @@
reg = <0x40000 0xc0000>;
};
bootloaderenv@0x100000 {
label = "bootloader env";
bootloaderenvred@0x100000 {
label = "bootloader env redundant";
reg = <0x100000 0x40000>;
};
bootloaderenvred@0x140000 {
label = "bootloader env redundant";
bootloaderenv@0x140000 {
label = "bootloader env";
reg = <0x140000 0x40000>;
};

View file

@ -252,7 +252,7 @@
rootfs@800000 {
label = "rootfs";
reg = <0x800000 0x0f800000>;
reg = <0x800000 0x1f800000>;
};
};
};

View file

@ -100,7 +100,7 @@
rootfs@800000 {
label = "rootfs";
reg = <0x800000 0x1f800000>;
reg = <0x800000 0x0f800000>;
};
};
};

View file

@ -336,6 +336,7 @@
<0 0 0 2 &pcie1_intc 2>,
<0 0 0 3 &pcie1_intc 3>,
<0 0 0 4 &pcie1_intc 4>;
ti,syscon-unaligned-access = <&scm_conf1 0x14 1>;
status = "disabled";
pcie1_intc: interrupt-controller {
interrupt-controller;
@ -387,6 +388,7 @@
<0 0 0 2 &pcie2_intc 2>,
<0 0 0 3 &pcie2_intc 3>,
<0 0 0 4 &pcie2_intc 4>;
ti,syscon-unaligned-access = <&scm_conf1 0x14 2>;
pcie2_intc: interrupt-controller {
interrupt-controller;
#address-cells = <0>;

View file

@ -703,6 +703,11 @@
vbus-supply = <&smps10_out1_reg>;
};
&dwc3 {
extcon = <&extcon_usb3>;
dr_mode = "otg";
};
&mcspi1 {
};

View file

@ -140,7 +140,7 @@
&external_mdio {
ext_rgmii_phy: ethernet-phy@1 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0>;
reg = <1>;
};
};

View file

@ -816,7 +816,7 @@
clock-names = "apb", "ir";
resets = <&r_ccu RST_APB0_IR>;
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x01f02000 0x40>;
reg = <0x01f02000 0x400>;
status = "disabled";
};

View file

@ -296,16 +296,15 @@ __sys_trace:
cmp scno, #-1 @ skip the syscall?
bne 2b
add sp, sp, #S_OFF @ restore stack
b ret_slow_syscall
__sys_trace_return:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
__sys_trace_return_nosave:
enable_irq_notrace
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
__sys_trace_return_nosave:
enable_irq_notrace
__sys_trace_return:
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall

View file

@ -57,5 +57,6 @@ ENDPROC(__arch_clear_user)
.section .fixup,"ax"
.align 2
9: mov x0, x2 // return the original size
uaccess_disable_not_uao x2, x3
ret
.previous

View file

@ -75,5 +75,6 @@ ENDPROC(__arch_copy_from_user)
.section .fixup,"ax"
.align 2
9998: sub x0, end, dst // bytes not copied
uaccess_disable_not_uao x3, x4
ret
.previous

View file

@ -77,5 +77,6 @@ ENDPROC(__arch_copy_in_user)
.section .fixup,"ax"
.align 2
9998: sub x0, end, dst // bytes not copied
uaccess_disable_not_uao x3, x4
ret
.previous

View file

@ -74,5 +74,6 @@ ENDPROC(__arch_copy_to_user)
.section .fixup,"ax"
.align 2
9998: sub x0, end, dst // bytes not copied
uaccess_disable_not_uao x3, x4
ret
.previous

View file

@ -432,7 +432,7 @@ static int __init dummy_numa_init(void)
if (numa_off)
pr_info("NUMA disabled\n"); /* Forced off on command line. */
pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n",
0LLU, PFN_PHYS(max_pfn) - 1);
memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1);
for_each_memblock(memory, mblk) {
ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size);

View file

@ -984,10 +984,14 @@ static void register_decrementer_clockevent(int cpu)
*dec = decrementer_clockevent;
dec->cpumask = cpumask_of(cpu);
clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
dec->name, dec->mult, dec->shift, cpu);
clockevents_register_device(dec);
/* Set values for KVM, see kvm_emulate_dec() */
decrementer_clockevent.mult = dec->mult;
decrementer_clockevent.shift = dec->shift;
}
static void enable_large_decrementer(void)
@ -1035,18 +1039,7 @@ static void __init set_decrementer_max(void)
static void __init init_decrementer_clockevent(void)
{
int cpu = smp_processor_id();
clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
decrementer_clockevent.max_delta_ns =
clockevent_delta2ns(decrementer_max, &decrementer_clockevent);
decrementer_clockevent.max_delta_ticks = decrementer_max;
decrementer_clockevent.min_delta_ns =
clockevent_delta2ns(2, &decrementer_clockevent);
decrementer_clockevent.min_delta_ticks = 2;
register_decrementer_clockevent(cpu);
register_decrementer_clockevent(smp_processor_id());
}
void secondary_cpu_time_init(void)

View file

@ -78,8 +78,11 @@ void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
ulong pc = kvmppc_get_pc(vcpu);
ulong lr = kvmppc_get_lr(vcpu);
if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
}
}

View file

@ -401,7 +401,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
long ret;
if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
return H_HARDWARE;
return H_TOO_HARD;
if (dir == DMA_NONE)
return H_SUCCESS;
@ -449,15 +449,15 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
return H_TOO_HARD;
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
return H_HARDWARE;
return H_TOO_HARD;
if (mm_iommu_mapped_inc(mem))
return H_CLOSED;
return H_TOO_HARD;
ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
if (WARN_ON_ONCE(ret)) {
mm_iommu_mapped_dec(mem);
return H_HARDWARE;
return H_TOO_HARD;
}
if (dir != DMA_NONE)

View file

@ -300,10 +300,10 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
&hpa)))
return H_HARDWARE;
return H_TOO_HARD;
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
return H_CLOSED;
return H_TOO_HARD;
ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
if (ret) {
@ -501,7 +501,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
rmap = (void *) vmalloc_to_phys(rmap);
if (WARN_ON_ONCE_RM(!rmap))
return H_HARDWARE;
return H_TOO_HARD;
/*
* Synchronize with the MMU notifier callbacks in

View file

@ -429,6 +429,7 @@ static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
__tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB);
asm volatile("ptesync": : :"memory");
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
}

View file

@ -149,7 +149,7 @@ static int dtl_start(struct dtl *dtl)
/* Register our dtl buffer with the hypervisor. The HV expects the
* buffer size to be passed in the second word of the buffer */
((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES;
((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
hwcpu = get_hard_smp_processor_id(dtl->cpu);
addr = __pa(dtl->buf);
@ -184,7 +184,7 @@ static void dtl_stop(struct dtl *dtl)
static u64 dtl_current_index(struct dtl *dtl)
{
return lppaca_of(dtl->cpu).dtl_idx;
return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */

View file

@ -1009,12 +1009,13 @@ static void xive_ipi_eoi(struct irq_data *d)
{
struct xive_cpu *xc = __this_cpu_read(xive_cpu);
DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
/* Handle possible race with unplug and drop stale IPIs */
if (!xc)
return;
DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
xive_do_queue_eoi(xc);
}

View file

@ -6,6 +6,7 @@
KCOV_INSTRUMENT := n
GCOV_PROFILE := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)

View file

@ -8,6 +8,7 @@
KCOV_INSTRUMENT := n
GCOV_PROFILE := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,head.o misc.o) piggy.o
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2

View file

@ -23,6 +23,8 @@ KCOV_INSTRUMENT_early_nobss.o := n
UBSAN_SANITIZE_early.o := n
UBSAN_SANITIZE_early_nobss.o := n
KASAN_SANITIZE_early_nobss.o := n
#
# Passing null pointers is ok for smp code, since we access the lowcore here.
#

View file

@ -28,9 +28,10 @@ obj-y += vdso32_wrapper.o
extra-y += vdso32.lds
CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
# Disable gcov profiling and ubsan for VDSO code
# Disable gcov profiling, ubsan and kasan for VDSO code
GCOV_PROFILE := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so

View file

@ -28,9 +28,10 @@ obj-y += vdso64_wrapper.o
extra-y += vdso64.lds
CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
# Disable gcov profiling and ubsan for VDSO code
# Disable gcov profiling, ubsan and kasan for VDSO code
GCOV_PROFILE := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
# Force dependency (incbin is bad)
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so

View file

@ -9,5 +9,9 @@ lib-$(CONFIG_SMP) += spinlock.o
lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o
# Instrumenting memory accesses to __user data (in different address space)
# produce false positives
KASAN_SANITIZE_uaccess.o := n
chkbss := mem.o
include $(srctree)/arch/s390/scripts/Makefile.chkbss

View file

@ -2771,8 +2771,7 @@ config OLPC
config OLPC_XO1_PM
bool "OLPC XO-1 Power Management"
depends on OLPC && MFD_CS5535 && PM_SLEEP
select MFD_CORE
depends on OLPC && MFD_CS5535=y && PM_SLEEP
---help---
Add support for poweroff and suspend of the OLPC XO-1 laptop.

View file

@ -67,7 +67,7 @@ struct kimage;
/* Memory to backup during crash kdump */
#define KEXEC_BACKUP_SRC_START (0UL)
#define KEXEC_BACKUP_SRC_END (640 * 1024UL) /* 640K */
#define KEXEC_BACKUP_SRC_END (640 * 1024UL - 1) /* 640K */
/*
* CPU does not save ss and sp on stack if execution is already

View file

@ -421,7 +421,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
struct list_head *l;
if (id < 0)
return ERR_PTR(id);
return ERR_PTR(-ENODEV);
list_for_each(l, &r->domains) {
d = list_entry(l, struct rdt_domain, list);

View file

@ -459,7 +459,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
r = &rdt_resources_all[resid];
d = rdt_find_domain(r, domid, NULL);
if (!d) {
if (IS_ERR_OR_NULL(d)) {
ret = -ENOENT;
goto out;
}

View file

@ -965,7 +965,78 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
}
/**
* rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
* rdt_cdp_peer_get - Retrieve CDP peer if it exists
* @r: RDT resource to which RDT domain @d belongs
* @d: Cache instance for which a CDP peer is requested
* @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
* Used to return the result.
* @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
* Used to return the result.
*
* RDT resources are managed independently and by extension the RDT domains
* (RDT resource instances) are managed independently also. The Code and
* Data Prioritization (CDP) RDT resources, while managed independently,
* could refer to the same underlying hardware. For example,
* RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
*
* When provided with an RDT resource @r and an instance of that RDT
* resource @d rdt_cdp_peer_get() will return if there is a peer RDT
* resource and the exact instance that shares the same hardware.
*
* Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
* If a CDP peer was found, @r_cdp will point to the peer RDT resource
* and @d_cdp will point to the peer RDT domain.
*/
static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
struct rdt_resource **r_cdp,
struct rdt_domain **d_cdp)
{
struct rdt_resource *_r_cdp = NULL;
struct rdt_domain *_d_cdp = NULL;
int ret = 0;
switch (r->rid) {
case RDT_RESOURCE_L3DATA:
_r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE];
break;
case RDT_RESOURCE_L3CODE:
_r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA];
break;
case RDT_RESOURCE_L2DATA:
_r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE];
break;
case RDT_RESOURCE_L2CODE:
_r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA];
break;
default:
ret = -ENOENT;
goto out;
}
/*
* When a new CPU comes online and CDP is enabled then the new
* RDT domains (if any) associated with both CDP RDT resources
* are added in the same CPU online routine while the
* rdtgroup_mutex is held. It should thus not happen for one
* RDT domain to exist and be associated with its RDT CDP
* resource but there is no RDT domain associated with the
* peer RDT CDP resource. Hence the WARN.
*/
_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
_r_cdp = NULL;
ret = -EINVAL;
}
out:
*r_cdp = _r_cdp;
*d_cdp = _d_cdp;
return ret;
}
/**
* __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
* @r: Resource to which domain instance @d belongs.
* @d: The domain instance for which @closid is being tested.
* @cbm: Capacity bitmask being tested.
@ -984,8 +1055,8 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
*
* Return: false if CBM does not overlap, true if it does.
*/
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
unsigned long cbm, int closid, bool exclusive)
static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
unsigned long cbm, int closid, bool exclusive)
{
enum rdtgrp_mode mode;
unsigned long ctrl_b;
@ -1020,6 +1091,41 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
return false;
}
/**
* rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
* @r: Resource to which domain instance @d belongs.
* @d: The domain instance for which @closid is being tested.
* @cbm: Capacity bitmask being tested.
* @closid: Intended closid for @cbm.
* @exclusive: Only check if overlaps with exclusive resource groups
*
* Resources that can be allocated using a CBM can use the CBM to control
* the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
* for overlap. Overlap test is not limited to the specific resource for
* which the CBM is intended though - when dealing with CDP resources that
* share the underlying hardware the overlap check should be performed on
* the CDP resource sharing the hardware also.
*
* Refer to description of __rdtgroup_cbm_overlaps() for the details of the
* overlap test.
*
* Return: true if CBM overlap detected, false if there is no overlap
*/
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
unsigned long cbm, int closid, bool exclusive)
{
struct rdt_resource *r_cdp;
struct rdt_domain *d_cdp;
if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive))
return true;
if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0)
return false;
return __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive);
}
/**
* rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
*

View file

@ -40,6 +40,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/traps.h>
#include <asm/syscall.h>
#include <asm/mmu_context.h>
#include "tls.h"
@ -343,6 +344,49 @@ static int set_segment_reg(struct task_struct *task,
return 0;
}
static unsigned long task_seg_base(struct task_struct *task,
unsigned short selector)
{
unsigned short idx = selector >> 3;
unsigned long base;
if (likely((selector & SEGMENT_TI_MASK) == 0)) {
if (unlikely(idx >= GDT_ENTRIES))
return 0;
/*
* There are no user segments in the GDT with nonzero bases
* other than the TLS segments.
*/
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return 0;
idx -= GDT_ENTRY_TLS_MIN;
base = get_desc_base(&task->thread.tls_array[idx]);
} else {
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt;
/*
* If performance here mattered, we could protect the LDT
* with RCU. This is a slow path, though, so we can just
* take the mutex.
*/
mutex_lock(&task->mm->context.lock);
ldt = task->mm->context.ldt;
if (unlikely(idx >= ldt->nr_entries))
base = 0;
else
base = get_desc_base(ldt->entries + idx);
mutex_unlock(&task->mm->context.lock);
#else
base = 0;
#endif
}
return base;
}
#endif /* CONFIG_X86_32 */
static unsigned long get_flags(struct task_struct *task)
@ -436,18 +480,16 @@ static unsigned long getreg(struct task_struct *task, unsigned long offset)
#ifdef CONFIG_X86_64
case offsetof(struct user_regs_struct, fs_base): {
/*
* XXX: This will not behave as expected if called on
* current or if fsindex != 0.
*/
return task->thread.fsbase;
if (task->thread.fsindex == 0)
return task->thread.fsbase;
else
return task_seg_base(task, task->thread.fsindex);
}
case offsetof(struct user_regs_struct, gs_base): {
/*
* XXX: This will not behave as expected if called on
* current or if fsindex != 0.
*/
return task->thread.gsbase;
if (task->thread.gsindex == 0)
return task->thread.gsbase;
else
return task_seg_base(task, task->thread.gsindex);
}
#endif
}

View file

@ -19,7 +19,9 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/highmem.h>
#include <linux/pci.h>
#include <asm/e820/types.h>
#include <asm/pgtable.h>
/*
@ -238,6 +240,29 @@ static unsigned long normalize_addr(unsigned long u)
return (signed long)(u << shift) >> shift;
}
static void note_wx(struct pg_state *st)
{
unsigned long npages;
npages = (st->current_address - st->start_address) / PAGE_SIZE;
#ifdef CONFIG_PCI_BIOS
/*
* If PCI BIOS is enabled, the PCI BIOS area is forced to WX.
* Inform about it, but avoid the warning.
*/
if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN &&
st->current_address <= PAGE_OFFSET + BIOS_END) {
pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages);
return;
}
#endif
/* Account the WX pages */
st->wx_pages += npages;
WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n",
(void *)st->start_address);
}
/*
* This function gets called on a break in a continuous series
* of PTE entries; the next one is different so we need to
@ -273,14 +298,8 @@ static void note_page(struct seq_file *m, struct pg_state *st,
unsigned long delta;
int width = sizeof(unsigned long) * 2;
if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) {
WARN_ONCE(1,
"x86/mm: Found insecure W+X mapping at address %p/%pS\n",
(void *)st->start_address,
(void *)st->start_address);
st->wx_pages += (st->current_address -
st->start_address) / PAGE_SIZE;
}
if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX))
note_wx(st);
/*
* Now print the actual finished series

View file

@ -117,6 +117,8 @@ static bool is_simm32(s64 value)
#define IA32_JLE 0x7E
#define IA32_JG 0x7F
#define COND_JMP_OPCODE_INVALID (0xFF)
/*
* Map eBPF registers to IA32 32bit registers or stack scratch space.
*
@ -698,19 +700,12 @@ static inline void emit_ia32_neg64(const u8 dst[], bool dstk, u8 **pprog)
STACK_VAR(dst_hi));
}
/* xor ecx,ecx */
EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX));
/* sub dreg_lo,ecx */
EMIT2(0x2B, add_2reg(0xC0, dreg_lo, IA32_ECX));
/* mov dreg_lo,ecx */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, IA32_ECX));
/* xor ecx,ecx */
EMIT2(0x31, add_2reg(0xC0, IA32_ECX, IA32_ECX));
/* sbb dreg_hi,ecx */
EMIT2(0x19, add_2reg(0xC0, dreg_hi, IA32_ECX));
/* mov dreg_hi,ecx */
EMIT2(0x89, add_2reg(0xC0, dreg_hi, IA32_ECX));
/* neg dreg_lo */
EMIT2(0xF7, add_1reg(0xD8, dreg_lo));
/* adc dreg_hi,0x0 */
EMIT3(0x83, add_1reg(0xD0, dreg_hi), 0x00);
/* neg dreg_hi */
EMIT2(0xF7, add_1reg(0xD8, dreg_hi));
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
@ -729,9 +724,6 @@ static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[],
{
u8 *prog = *pprog;
int cnt = 0;
static int jmp_label1 = -1;
static int jmp_label2 = -1;
static int jmp_label3 = -1;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
@ -750,79 +742,23 @@ static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[],
/* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
/* shld dreg_hi,dreg_lo,cl */
EMIT3(0x0F, 0xA5, add_2reg(0xC0, dreg_hi, dreg_lo));
/* shl dreg_lo,cl */
EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
/* if ecx >= 32, mov dreg_lo into dreg_hi and clear dreg_lo */
/* cmp ecx,32 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
/* Jumps when >= 32 */
if (is_imm8(jmp_label(jmp_label1, 2)))
EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
else
EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
/* skip the next two instructions (4 bytes) when < 32 */
EMIT2(IA32_JB, 4);
/* < 32 */
/* shl dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xE0, dreg_hi));
/* mov ebx,dreg_lo */
EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
/* shl dreg_lo,cl */
EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
/* IA32_ECX = -IA32_ECX + 32 */
/* neg ecx */
EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
/* add ecx,32 */
EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
/* shr ebx,cl */
EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
/* or dreg_hi,ebx */
EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
/* goto out; */
if (is_imm8(jmp_label(jmp_label3, 2)))
EMIT2(0xEB, jmp_label(jmp_label3, 2));
else
EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
/* >= 32 */
if (jmp_label1 == -1)
jmp_label1 = cnt;
/* cmp ecx,64 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
/* Jumps when >= 64 */
if (is_imm8(jmp_label(jmp_label2, 2)))
EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
else
EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
/* >= 32 && < 64 */
/* sub ecx,32 */
EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
/* shl dreg_lo,cl */
EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
/* mov dreg_hi,dreg_lo */
EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
/* xor dreg_lo,dreg_lo */
EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
/* goto out; */
if (is_imm8(jmp_label(jmp_label3, 2)))
EMIT2(0xEB, jmp_label(jmp_label3, 2));
else
EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
/* >= 64 */
if (jmp_label2 == -1)
jmp_label2 = cnt;
/* xor dreg_lo,dreg_lo */
EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
if (jmp_label3 == -1)
jmp_label3 = cnt;
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
@ -841,9 +777,6 @@ static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[],
{
u8 *prog = *pprog;
int cnt = 0;
static int jmp_label1 = -1;
static int jmp_label2 = -1;
static int jmp_label3 = -1;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
@ -862,79 +795,23 @@ static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[],
/* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
/* shrd dreg_lo,dreg_hi,cl */
EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
/* sar dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
/* if ecx >= 32, mov dreg_hi to dreg_lo and set/clear dreg_hi depending on sign */
/* cmp ecx,32 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
/* Jumps when >= 32 */
if (is_imm8(jmp_label(jmp_label1, 2)))
EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
else
EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
/* skip the next two instructions (5 bytes) when < 32 */
EMIT2(IA32_JB, 5);
/* < 32 */
/* lshr dreg_lo,cl */
EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
/* mov ebx,dreg_hi */
EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
/* ashr dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
/* IA32_ECX = -IA32_ECX + 32 */
/* neg ecx */
EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
/* add ecx,32 */
EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
/* shl ebx,cl */
EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
/* or dreg_lo,ebx */
EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
/* goto out; */
if (is_imm8(jmp_label(jmp_label3, 2)))
EMIT2(0xEB, jmp_label(jmp_label3, 2));
else
EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
/* >= 32 */
if (jmp_label1 == -1)
jmp_label1 = cnt;
/* cmp ecx,64 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
/* Jumps when >= 64 */
if (is_imm8(jmp_label(jmp_label2, 2)))
EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
else
EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
/* >= 32 && < 64 */
/* sub ecx,32 */
EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
/* ashr dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
/* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
/* ashr dreg_hi,imm8 */
/* sar dreg_hi,31 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
/* goto out; */
if (is_imm8(jmp_label(jmp_label3, 2)))
EMIT2(0xEB, jmp_label(jmp_label3, 2));
else
EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
/* >= 64 */
if (jmp_label2 == -1)
jmp_label2 = cnt;
/* ashr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
/* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
if (jmp_label3 == -1)
jmp_label3 = cnt;
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
@ -953,9 +830,6 @@ static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
{
u8 *prog = *pprog;
int cnt = 0;
static int jmp_label1 = -1;
static int jmp_label2 = -1;
static int jmp_label3 = -1;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
@ -974,77 +848,23 @@ static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
/* mov ecx,src_lo */
EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
/* shrd dreg_lo,dreg_hi,cl */
EMIT3(0x0F, 0xAD, add_2reg(0xC0, dreg_lo, dreg_hi));
/* shr dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
/* if ecx >= 32, mov dreg_hi to dreg_lo and clear dreg_hi */
/* cmp ecx,32 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
/* Jumps when >= 32 */
if (is_imm8(jmp_label(jmp_label1, 2)))
EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
else
EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
/* skip the next two instructions (4 bytes) when < 32 */
EMIT2(IA32_JB, 4);
/* < 32 */
/* lshr dreg_lo,cl */
EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
/* mov ebx,dreg_hi */
EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
/* shr dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
/* IA32_ECX = -IA32_ECX + 32 */
/* neg ecx */
EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
/* add ecx,32 */
EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
/* shl ebx,cl */
EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
/* or dreg_lo,ebx */
EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
/* goto out; */
if (is_imm8(jmp_label(jmp_label3, 2)))
EMIT2(0xEB, jmp_label(jmp_label3, 2));
else
EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
/* >= 32 */
if (jmp_label1 == -1)
jmp_label1 = cnt;
/* cmp ecx,64 */
EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
/* Jumps when >= 64 */
if (is_imm8(jmp_label(jmp_label2, 2)))
EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
else
EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
/* >= 32 && < 64 */
/* sub ecx,32 */
EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
/* shr dreg_hi,cl */
EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
/* mov dreg_lo,dreg_hi */
EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
/* goto out; */
if (is_imm8(jmp_label(jmp_label3, 2)))
EMIT2(0xEB, jmp_label(jmp_label3, 2));
else
EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
/* >= 64 */
if (jmp_label2 == -1)
jmp_label2 = cnt;
/* xor dreg_lo,dreg_lo */
EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
/* xor dreg_hi,dreg_hi */
EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
if (jmp_label3 == -1)
jmp_label3 = cnt;
if (dstk) {
/* mov dword ptr [ebp+off],dreg_lo */
EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
@ -1074,27 +894,10 @@ static inline void emit_ia32_lsh_i64(const u8 dst[], const u32 val,
}
/* Do LSH operation */
if (val < 32) {
/* shl dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xE0, dreg_hi), val);
/* mov ebx,dreg_lo */
EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
/* shld dreg_hi,dreg_lo,imm8 */
EMIT4(0x0F, 0xA4, add_2reg(0xC0, dreg_hi, dreg_lo), val);
/* shl dreg_lo,imm8 */
EMIT3(0xC1, add_1reg(0xE0, dreg_lo), val);
/* IA32_ECX = 32 - val */
/* mov ecx,val */
EMIT2(0xB1, val);
/* movzx ecx,ecx */
EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
/* neg ecx */
EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
/* add ecx,32 */
EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
/* shr ebx,cl */
EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
/* or dreg_hi,ebx */
EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
} else if (val >= 32 && val < 64) {
u32 value = val - 32;
@ -1140,27 +943,10 @@ static inline void emit_ia32_rsh_i64(const u8 dst[], const u32 val,
/* Do RSH operation */
if (val < 32) {
/* shr dreg_lo,imm8 */
EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val);
/* mov ebx,dreg_hi */
EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
/* shrd dreg_lo,dreg_hi,imm8 */
EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
/* shr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xE8, dreg_hi), val);
/* IA32_ECX = 32 - val */
/* mov ecx,val */
EMIT2(0xB1, val);
/* movzx ecx,ecx */
EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
/* neg ecx */
EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
/* add ecx,32 */
EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
/* shl ebx,cl */
EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
/* or dreg_lo,ebx */
EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
} else if (val >= 32 && val < 64) {
u32 value = val - 32;
@ -1205,27 +991,10 @@ static inline void emit_ia32_arsh_i64(const u8 dst[], const u32 val,
}
/* Do RSH operation */
if (val < 32) {
/* shr dreg_lo,imm8 */
EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val);
/* mov ebx,dreg_hi */
EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
/* shrd dreg_lo,dreg_hi,imm8 */
EMIT4(0x0F, 0xAC, add_2reg(0xC0, dreg_lo, dreg_hi), val);
/* ashr dreg_hi,imm8 */
EMIT3(0xC1, add_1reg(0xF8, dreg_hi), val);
/* IA32_ECX = 32 - val */
/* mov ecx,val */
EMIT2(0xB1, val);
/* movzx ecx,ecx */
EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
/* neg ecx */
EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
/* add ecx,32 */
EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
/* shl ebx,cl */
EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
/* or dreg_lo,ebx */
EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
} else if (val >= 32 && val < 64) {
u32 value = val - 32;
@ -1613,6 +1382,75 @@ static inline void emit_push_r64(const u8 src[], u8 **pprog)
*pprog = prog;
}
static u8 get_cond_jmp_opcode(const u8 op, bool is_cmp_lo)
{
u8 jmp_cond;
/* Convert BPF opcode to x86 */
switch (op) {
case BPF_JEQ:
jmp_cond = IA32_JE;
break;
case BPF_JSET:
case BPF_JNE:
jmp_cond = IA32_JNE;
break;
case BPF_JGT:
/* GT is unsigned '>', JA in x86 */
jmp_cond = IA32_JA;
break;
case BPF_JLT:
/* LT is unsigned '<', JB in x86 */
jmp_cond = IA32_JB;
break;
case BPF_JGE:
/* GE is unsigned '>=', JAE in x86 */
jmp_cond = IA32_JAE;
break;
case BPF_JLE:
/* LE is unsigned '<=', JBE in x86 */
jmp_cond = IA32_JBE;
break;
case BPF_JSGT:
if (!is_cmp_lo)
/* Signed '>', GT in x86 */
jmp_cond = IA32_JG;
else
/* GT is unsigned '>', JA in x86 */
jmp_cond = IA32_JA;
break;
case BPF_JSLT:
if (!is_cmp_lo)
/* Signed '<', LT in x86 */
jmp_cond = IA32_JL;
else
/* LT is unsigned '<', JB in x86 */
jmp_cond = IA32_JB;
break;
case BPF_JSGE:
if (!is_cmp_lo)
/* Signed '>=', GE in x86 */
jmp_cond = IA32_JGE;
else
/* GE is unsigned '>=', JAE in x86 */
jmp_cond = IA32_JAE;
break;
case BPF_JSLE:
if (!is_cmp_lo)
/* Signed '<=', LE in x86 */
jmp_cond = IA32_JLE;
else
/* LE is unsigned '<=', JBE in x86 */
jmp_cond = IA32_JBE;
break;
default: /* to silence GCC warning */
jmp_cond = COND_JMP_OPCODE_INVALID;
break;
}
return jmp_cond;
}
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int oldproglen, struct jit_context *ctx)
{
@ -2068,11 +1906,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X: {
case BPF_JMP | BPF_JLE | BPF_X: {
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
@ -2099,6 +1933,40 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
goto emit_cond_jmp;
}
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X: {
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
u8 sreg_hi = sstk ? IA32_EBX : src_hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
EMIT3(0x8B,
add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(dst_hi));
}
if (sstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
STACK_VAR(src_lo));
EMIT3(0x8B,
add_2reg(0x40, IA32_EBP,
IA32_EBX),
STACK_VAR(src_hi));
}
/* cmp dreg_hi,sreg_hi */
EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
EMIT2(IA32_JNE, 10);
/* cmp dreg_lo,sreg_lo */
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
goto emit_cond_jmp_signed;
}
case BPF_JMP | BPF_JSET | BPF_X: {
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
@ -2159,11 +2027,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K: {
case BPF_JMP | BPF_JLE | BPF_K: {
u32 hi;
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
@ -2189,50 +2053,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
/* cmp dreg_lo,sreg_lo */
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
emit_cond_jmp: /* Convert BPF opcode to x86 */
switch (BPF_OP(code)) {
case BPF_JEQ:
jmp_cond = IA32_JE;
break;
case BPF_JSET:
case BPF_JNE:
jmp_cond = IA32_JNE;
break;
case BPF_JGT:
/* GT is unsigned '>', JA in x86 */
jmp_cond = IA32_JA;
break;
case BPF_JLT:
/* LT is unsigned '<', JB in x86 */
jmp_cond = IA32_JB;
break;
case BPF_JGE:
/* GE is unsigned '>=', JAE in x86 */
jmp_cond = IA32_JAE;
break;
case BPF_JLE:
/* LE is unsigned '<=', JBE in x86 */
jmp_cond = IA32_JBE;
break;
case BPF_JSGT:
/* Signed '>', GT in x86 */
jmp_cond = IA32_JG;
break;
case BPF_JSLT:
/* Signed '<', LT in x86 */
jmp_cond = IA32_JL;
break;
case BPF_JSGE:
/* Signed '>=', GE in x86 */
jmp_cond = IA32_JGE;
break;
case BPF_JSLE:
/* Signed '<=', LE in x86 */
jmp_cond = IA32_JLE;
break;
default: /* to silence GCC warning */
emit_cond_jmp: jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
if (jmp_cond == COND_JMP_OPCODE_INVALID)
return -EFAULT;
}
jmp_offset = addrs[i + insn->off] - addrs[i];
if (is_imm8(jmp_offset)) {
EMIT2(jmp_cond, jmp_offset);
@ -2242,7 +2065,66 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
pr_err("cond_jmp gen bug %llx\n", jmp_offset);
return -EFAULT;
}
break;
}
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K: {
u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
u8 sreg_lo = IA32_ECX;
u8 sreg_hi = IA32_EBX;
u32 hi;
if (dstk) {
EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
STACK_VAR(dst_lo));
EMIT3(0x8B,
add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(dst_hi));
}
/* mov ecx,imm32 */
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
hi = imm32 & (1 << 31) ? (u32)~0 : 0;
/* mov ebx,imm32 */
EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
/* cmp dreg_hi,sreg_hi */
EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
EMIT2(IA32_JNE, 10);
/* cmp dreg_lo,sreg_lo */
EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
/*
* For simplicity of branch offset computation,
* let's use fixed jump coding here.
*/
emit_cond_jmp_signed: /* Check the condition for low 32-bit comparison */
jmp_cond = get_cond_jmp_opcode(BPF_OP(code), true);
if (jmp_cond == COND_JMP_OPCODE_INVALID)
return -EFAULT;
jmp_offset = addrs[i + insn->off] - addrs[i] + 8;
if (is_simm32(jmp_offset)) {
EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
} else {
pr_err("cond_jmp gen bug %llx\n", jmp_offset);
return -EFAULT;
}
EMIT2(0xEB, 6);
/* Check the condition for high 32-bit comparison */
jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
if (jmp_cond == COND_JMP_OPCODE_INVALID)
return -EFAULT;
jmp_offset = addrs[i + insn->off] - addrs[i];
if (is_simm32(jmp_offset)) {
EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
} else {
pr_err("cond_jmp gen bug %llx\n", jmp_offset);
return -EFAULT;
}
break;
}
case BPF_JMP | BPF_JA:

View file

@ -629,17 +629,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
static void quirk_no_aersid(struct pci_dev *pdev)
{
/* VMD Domain */
if (is_vmd(pdev->bus))
if (is_vmd(pdev->bus) && pci_is_root_bus(pdev->bus))
pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334a, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
static void quirk_intel_th_dnv(struct pci_dev *dev)
{

View file

@ -266,9 +266,9 @@ static int get_e820_md5(struct e820_table *table, void *buf)
return ret;
}
static void hibernation_e820_save(void *buf)
static int hibernation_e820_save(void *buf)
{
get_e820_md5(e820_table_firmware, buf);
return get_e820_md5(e820_table_firmware, buf);
}
static bool hibernation_e820_mismatch(void *buf)
@ -288,8 +288,9 @@ static bool hibernation_e820_mismatch(void *buf)
return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
}
#else
static void hibernation_e820_save(void *buf)
static int hibernation_e820_save(void *buf)
{
return 0;
}
static bool hibernation_e820_mismatch(void *buf)
@ -334,9 +335,7 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
rdr->magic = RESTORE_MAGIC;
hibernation_e820_save(rdr->e820_digest);
return 0;
return hibernation_e820_save(rdr->e820_digest);
}
/**

View file

@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/platform_data/clk-lpss.h>
#include <linux/platform_data/x86/pmc_atom.h>
@ -83,6 +84,7 @@ struct lpss_device_desc {
size_t prv_size_override;
struct property_entry *properties;
void (*setup)(struct lpss_private_data *pdata);
bool resume_from_noirq;
};
static const struct lpss_device_desc lpss_dma_desc = {
@ -292,12 +294,14 @@ static const struct lpss_device_desc byt_i2c_dev_desc = {
.flags = LPSS_CLK | LPSS_SAVE_CTX,
.prv_offset = 0x800,
.setup = byt_i2c_setup,
.resume_from_noirq = true,
};
static const struct lpss_device_desc bsw_i2c_dev_desc = {
.flags = LPSS_CLK | LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
.prv_offset = 0x800,
.setup = byt_i2c_setup,
.resume_from_noirq = true,
};
static const struct lpss_device_desc bsw_spi_dev_desc = {
@ -512,12 +516,18 @@ static int match_hid_uid(struct device *dev, void *data)
static struct device *acpi_lpss_find_device(const char *hid, const char *uid)
{
struct device *dev;
struct hid_uid data = {
.hid = hid,
.uid = uid,
};
return bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
dev = bus_find_device(&platform_bus_type, NULL, &data, match_hid_uid);
if (dev)
return dev;
return bus_find_device(&pci_bus_type, NULL, &data, match_hid_uid);
}
static bool acpi_lpss_dep(struct acpi_device *adev, acpi_handle handle)
@ -1024,7 +1034,7 @@ static int acpi_lpss_resume(struct device *dev)
}
#ifdef CONFIG_PM_SLEEP
static int acpi_lpss_suspend_late(struct device *dev)
static int acpi_lpss_do_suspend_late(struct device *dev)
{
int ret;
@ -1035,12 +1045,62 @@ static int acpi_lpss_suspend_late(struct device *dev)
return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
}
static int acpi_lpss_resume_early(struct device *dev)
static int acpi_lpss_suspend_late(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
if (pdata->dev_desc->resume_from_noirq)
return 0;
return acpi_lpss_do_suspend_late(dev);
}
static int acpi_lpss_suspend_noirq(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
if (pdata->dev_desc->resume_from_noirq) {
ret = acpi_lpss_do_suspend_late(dev);
if (ret)
return ret;
}
return acpi_subsys_suspend_noirq(dev);
}
static int acpi_lpss_do_resume_early(struct device *dev)
{
int ret = acpi_lpss_resume(dev);
return ret ? ret : pm_generic_resume_early(dev);
}
static int acpi_lpss_resume_early(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
if (pdata->dev_desc->resume_from_noirq)
return 0;
return acpi_lpss_do_resume_early(dev);
}
static int acpi_lpss_resume_noirq(struct device *dev)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
ret = acpi_subsys_resume_noirq(dev);
if (ret)
return ret;
if (!dev_pm_may_skip_resume(dev) && pdata->dev_desc->resume_from_noirq)
ret = acpi_lpss_do_resume_early(dev);
return ret;
}
#endif /* CONFIG_PM_SLEEP */
static int acpi_lpss_runtime_suspend(struct device *dev)
@ -1070,8 +1130,8 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
.complete = acpi_subsys_complete,
.suspend = acpi_subsys_suspend,
.suspend_late = acpi_lpss_suspend_late,
.suspend_noirq = acpi_subsys_suspend_noirq,
.resume_noirq = acpi_subsys_resume_noirq,
.suspend_noirq = acpi_lpss_suspend_noirq,
.resume_noirq = acpi_lpss_resume_noirq,
.resume_early = acpi_lpss_resume_early,
.freeze = acpi_subsys_freeze,
.freeze_late = acpi_subsys_freeze_late,
@ -1079,8 +1139,8 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
.thaw_noirq = acpi_subsys_thaw_noirq,
.poweroff = acpi_subsys_suspend,
.poweroff_late = acpi_lpss_suspend_late,
.poweroff_noirq = acpi_subsys_suspend_noirq,
.restore_noirq = acpi_subsys_resume_noirq,
.poweroff_noirq = acpi_lpss_suspend_noirq,
.restore_noirq = acpi_lpss_resume_noirq,
.restore_early = acpi_lpss_resume_early,
#endif
.runtime_suspend = acpi_lpss_runtime_suspend,

View file

@ -230,6 +230,8 @@ acpi_ev_default_region_setup(acpi_handle handle,
acpi_status acpi_ev_initialize_region(union acpi_operand_object *region_obj);
u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
/*
* evsci - SCI (System Control Interrupt) handling/dispatch
*/

View file

@ -395,9 +395,9 @@ struct acpi_simple_repair_info {
/* Info for running the _REG methods */
struct acpi_reg_walk_info {
acpi_adr_space_type space_id;
u32 function;
u32 reg_run_count;
acpi_adr_space_type space_id;
};
/*****************************************************************************

View file

@ -653,6 +653,19 @@ acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
ACPI_FUNCTION_TRACE(ev_execute_reg_methods);
/*
* These address spaces do not need a call to _REG, since the ACPI
* specification defines them as: "must always be accessible". Since
* they never change state (never become unavailable), no need to ever
* call _REG on them. Also, a data_table is not a "real" address space,
* so do not call _REG. September 2018.
*/
if ((space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) ||
(space_id == ACPI_ADR_SPACE_SYSTEM_IO) ||
(space_id == ACPI_ADR_SPACE_DATA_TABLE)) {
return_VOID;
}
info.space_id = space_id;
info.function = function;
info.reg_run_count = 0;
@ -714,8 +727,8 @@ acpi_ev_reg_run(acpi_handle obj_handle,
}
/*
* We only care about regions.and objects that are allowed to have address
* space handlers
* We only care about regions and objects that are allowed to have
* address space handlers
*/
if ((node->type != ACPI_TYPE_REGION) && (node != acpi_gbl_root_node)) {
return (AE_OK);

View file

@ -16,9 +16,6 @@
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evrgnini")
/* Local prototypes */
static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
/*******************************************************************************
*
* FUNCTION: acpi_ev_system_memory_region_setup
@ -33,7 +30,6 @@ static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
* DESCRIPTION: Setup a system_memory operation region
*
******************************************************************************/
acpi_status
acpi_ev_system_memory_region_setup(acpi_handle handle,
u32 function,
@ -313,7 +309,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*
******************************************************************************/
static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
{
acpi_status status;
struct acpi_pnp_device_id *hid;

View file

@ -193,7 +193,6 @@ acpi_remove_address_space_handler(acpi_handle device,
*/
region_obj =
handler_obj->address_space.region_list;
}
/* Remove this Handler object from the list */

View file

@ -1132,6 +1132,7 @@ void acpi_os_wait_events_complete(void)
flush_workqueue(kacpid_wq);
flush_workqueue(kacpi_notify_wq);
}
EXPORT_SYMBOL(acpi_os_wait_events_complete);
struct acpi_hp_work {
struct work_struct work;

View file

@ -196,6 +196,7 @@ int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc)
hc->callback = NULL;
hc->context = NULL;
mutex_unlock(&hc->lock);
acpi_os_wait_events_complete();
return 0;
}
@ -292,6 +293,7 @@ static int acpi_smbus_hc_remove(struct acpi_device *device)
hc = acpi_driver_data(device);
acpi_ec_remove_query_handler(hc->ec, hc->query_bit);
acpi_os_wait_events_complete();
kfree(hc);
device->driver_data = NULL;
return 0;

View file

@ -121,7 +121,8 @@ config SATA_AHCI_PLATFORM
config AHCI_BRCM
tristate "Broadcom AHCI SATA support"
depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP
depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \
ARCH_BCM_63XX
help
This option enables support for the AHCI SATA3 controller found on
Broadcom SoC's.

View file

@ -659,7 +659,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
* start of new transfer.
*/
drv_data->dma_rx_data.port = EP93XX_DMA_IDE;
drv_data->dma_rx_data.direction = DMA_FROM_DEVICE;
drv_data->dma_rx_data.direction = DMA_DEV_TO_MEM;
drv_data->dma_rx_data.name = "ep93xx-pata-rx";
drv_data->dma_rx_channel = dma_request_channel(mask,
ep93xx_pata_dma_filter, &drv_data->dma_rx_data);
@ -667,7 +667,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
return;
drv_data->dma_tx_data.port = EP93XX_DMA_IDE;
drv_data->dma_tx_data.direction = DMA_TO_DEVICE;
drv_data->dma_tx_data.direction = DMA_MEM_TO_DEV;
drv_data->dma_tx_data.name = "ep93xx-pata-tx";
drv_data->dma_tx_channel = dma_request_channel(mask,
ep93xx_pata_dma_filter, &drv_data->dma_tx_data);
@ -678,7 +678,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
/* Configure receive channel direction and source address */
memset(&conf, 0, sizeof(conf));
conf.direction = DMA_FROM_DEVICE;
conf.direction = DMA_DEV_TO_MEM;
conf.src_addr = drv_data->udma_in_phys;
conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) {
@ -689,7 +689,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
/* Configure transmit channel direction and destination address */
memset(&conf, 0, sizeof(conf));
conf.direction = DMA_TO_DEVICE;
conf.direction = DMA_MEM_TO_DEV;
conf.dst_addr = drv_data->udma_out_phys;
conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) {

View file

@ -73,6 +73,7 @@ obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-y += imgtec/
obj-$(CONFIG_ARCH_MXC) += imx/
obj-$(CONFIG_MACH_INGENIC) += ingenic/
obj-$(CONFIG_ARCH_K3) += keystone/
obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
obj-$(CONFIG_MACH_LOONGSON32) += loongson1/
obj-y += mediatek/

View file

@ -7,7 +7,7 @@ config COMMON_CLK_KEYSTONE
config TI_SCI_CLK
tristate "TI System Control Interface clock drivers"
depends on (ARCH_KEYSTONE || COMPILE_TEST) && OF
depends on (ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST) && OF
depends on TI_SCI_PROTOCOL
default ARCH_KEYSTONE
---help---

View file

@ -152,7 +152,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
struct exynos_cpuclk *cpuclk, void __iomem *base)
{
const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
unsigned long div0, div1 = 0, mux_reg;
unsigned long flags;
@ -280,7 +280,7 @@ static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
struct exynos_cpuclk *cpuclk, void __iomem *base)
{
const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
unsigned long div0, div1 = 0, mux_reg;
unsigned long flags;
@ -432,7 +432,7 @@ int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
else
cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
cpuclk->alt_parent = __clk_lookup(alt_parent);
cpuclk->alt_parent = __clk_get_hw(__clk_lookup(alt_parent));
if (!cpuclk->alt_parent) {
pr_err("%s: could not lookup alternate parent %s\n",
__func__, alt_parent);

View file

@ -49,7 +49,7 @@ struct exynos_cpuclk_cfg_data {
*/
struct exynos_cpuclk {
struct clk_hw hw;
struct clk *alt_parent;
struct clk_hw *alt_parent;
void __iomem *ctrl_base;
spinlock_t *lock;
const struct exynos_cpuclk_cfg_data *cfg;

View file

@ -634,6 +634,7 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
};
static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
};
@ -1163,8 +1164,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_TMU, "tmu", "aclk66_psgen", GATE_IP_PERIS, 21, 0, 0),
GATE(CLK_TMU_GPU, "tmu_gpu", "aclk66_psgen", GATE_IP_PERIS, 22, 0, 0),
GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
/* GEN Block */
GATE(CLK_ROTATOR, "rotator", "mout_user_aclk266", GATE_IP_GEN, 1, 0, 0),
GATE(CLK_JPEG, "jpeg", "aclk300_jpeg", GATE_IP_GEN, 2, 0, 0),

View file

@ -5630,7 +5630,7 @@ static const struct of_device_id exynos5433_cmu_of_match[] = {
static const struct dev_pm_ops exynos5433_cmu_pm_ops = {
SET_RUNTIME_PM_OPS(exynos5433_cmu_suspend, exynos5433_cmu_resume,
NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};

View file

@ -78,18 +78,17 @@ struct sh_cmt_info {
unsigned int channels_mask;
unsigned long width; /* 16 or 32 bit version of hardware block */
unsigned long overflow_bit;
unsigned long clear_bits;
u32 overflow_bit;
u32 clear_bits;
/* callbacks for CMSTR and CMCSR access */
unsigned long (*read_control)(void __iomem *base, unsigned long offs);
u32 (*read_control)(void __iomem *base, unsigned long offs);
void (*write_control)(void __iomem *base, unsigned long offs,
unsigned long value);
u32 value);
/* callbacks for CMCNT and CMCOR access */
unsigned long (*read_count)(void __iomem *base, unsigned long offs);
void (*write_count)(void __iomem *base, unsigned long offs,
unsigned long value);
u32 (*read_count)(void __iomem *base, unsigned long offs);
void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
};
struct sh_cmt_channel {
@ -103,13 +102,13 @@ struct sh_cmt_channel {
unsigned int timer_bit;
unsigned long flags;
unsigned long match_value;
unsigned long next_match_value;
unsigned long max_match_value;
u32 match_value;
u32 next_match_value;
u32 max_match_value;
raw_spinlock_t lock;
struct clock_event_device ced;
struct clocksource cs;
unsigned long total_cycles;
u64 total_cycles;
bool cs_enabled;
};
@ -160,24 +159,22 @@ struct sh_cmt_device {
#define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
#define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
{
return ioread16(base + (offs << 1));
}
static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
{
return ioread32(base + (offs << 2));
}
static void sh_cmt_write16(void __iomem *base, unsigned long offs,
unsigned long value)
static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
{
iowrite16(value, base + (offs << 1));
}
static void sh_cmt_write32(void __iomem *base, unsigned long offs,
unsigned long value)
static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
{
iowrite32(value, base + (offs << 2));
}
@ -242,7 +239,7 @@ static const struct sh_cmt_info sh_cmt_info[] = {
#define CMCNT 1 /* channel register */
#define CMCOR 2 /* channel register */
static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
{
if (ch->iostart)
return ch->cmt->info->read_control(ch->iostart, 0);
@ -250,8 +247,7 @@ static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
}
static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
unsigned long value)
static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
{
if (ch->iostart)
ch->cmt->info->write_control(ch->iostart, 0, value);
@ -259,39 +255,35 @@ static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
}
static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
{
return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
}
static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch,
unsigned long value)
static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
{
ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
}
static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
{
return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
}
static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch,
unsigned long value)
static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
{
ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
}
static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch,
unsigned long value)
static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
{
ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
}
static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
int *has_wrapped)
static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
{
unsigned long v1, v2, v3;
int o1, o2;
u32 v1, v2, v3;
u32 o1, o2;
o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
@ -311,7 +303,8 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
{
unsigned long flags, value;
unsigned long flags;
u32 value;
/* start stop register shared by multiple timer channels */
raw_spin_lock_irqsave(&ch->cmt->lock, flags);
@ -418,11 +411,11 @@ static void sh_cmt_disable(struct sh_cmt_channel *ch)
static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
int absolute)
{
unsigned long new_match;
unsigned long value = ch->next_match_value;
unsigned long delay = 0;
unsigned long now = 0;
int has_wrapped;
u32 value = ch->next_match_value;
u32 new_match;
u32 delay = 0;
u32 now = 0;
u32 has_wrapped;
now = sh_cmt_get_counter(ch, &has_wrapped);
ch->flags |= FLAG_REPROGRAM; /* force reprogram */
@ -619,9 +612,10 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
static u64 sh_cmt_clocksource_read(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
unsigned long flags, raw;
unsigned long value;
int has_wrapped;
unsigned long flags;
u32 has_wrapped;
u64 value;
u32 raw;
raw_spin_lock_irqsave(&ch->lock, flags);
value = ch->total_cycles;
@ -694,7 +688,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
cs->disable = sh_cmt_clocksource_disable;
cs->suspend = sh_cmt_clocksource_suspend;
cs->resume = sh_cmt_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",

View file

@ -508,6 +508,16 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* duration predictor do a better job next time.
*/
measured_us = 9 * MAX_INTERESTING / 10;
} else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) &&
dev->poll_time_limit) {
/*
* The CPU exited the "polling" state due to a time limit, so
* the idle duration prediction leading to the selection of that
* state was inaccurate. If a better prediction had been made,
* the CPU might have been woken up from idle by the next timer.
* Assume that to be the case.
*/
measured_us = data->next_timer_us;
} else {
/* measured value */
measured_us = cpuidle_get_last_residency(dev);

View file

@ -17,6 +17,8 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
{
u64 time_start = local_clock();
dev->poll_time_limit = false;
local_irq_enable();
if (!current_set_polling_and_test()) {
unsigned int loop_count = 0;
@ -27,8 +29,10 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
continue;
loop_count = 0;
if (local_clock() - time_start > POLL_IDLE_TIME_LIMIT)
if (local_clock() - time_start > POLL_IDLE_TIME_LIMIT) {
dev->poll_time_limit = true;
break;
}
}
}
current_clr_polling();

View file

@ -28,9 +28,24 @@
#define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE
#define DCP_SHA_PAY_SZ 64
#define DCP_ALIGNMENT 64
/*
* Null hashes to align with hw behavior on imx6sl and ull
* these are flipped for consistency with hw output
*/
const uint8_t sha1_null_hash[] =
"\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
"\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
const uint8_t sha256_null_hash[] =
"\x55\xb8\x52\x78\x1b\x99\x95\xa4"
"\x4c\x93\x9b\x64\xe4\x41\xae\x27"
"\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
"\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
/* DCP DMA descriptor. */
struct dcp_dma_desc {
uint32_t next_cmd_addr;
@ -48,6 +63,7 @@ struct dcp_coherent_block {
uint8_t aes_in_buf[DCP_BUF_SZ];
uint8_t aes_out_buf[DCP_BUF_SZ];
uint8_t sha_in_buf[DCP_BUF_SZ];
uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
uint8_t aes_key[2 * AES_KEYSIZE_128];
@ -209,6 +225,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
DCP_BUF_SZ, DMA_FROM_DEVICE);
if (actx->fill % AES_BLOCK_SIZE) {
dev_err(sdcp->dev, "Invalid block size!\n");
ret = -EINVAL;
goto aes_done_run;
}
/* Fill in the DMA descriptor. */
desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
MXS_DCP_CONTROL0_INTERRUPT |
@ -238,6 +260,7 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
ret = mxs_dcp_start_dma(actx);
aes_done_run:
dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
DMA_TO_DEVICE);
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
@ -264,13 +287,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
uint32_t dst_off = 0;
uint32_t last_out_len = 0;
uint8_t *key = sdcp->coh->aes_key;
int ret = 0;
int split = 0;
unsigned int i, len, clen, rem = 0;
unsigned int i, len, clen, rem = 0, tlen = 0;
int init = 0;
bool limit_hit = false;
actx->fill = 0;
@ -289,6 +314,11 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
for_each_sg(req->src, src, nents, i) {
src_buf = sg_virt(src);
len = sg_dma_len(src);
tlen += len;
limit_hit = tlen > req->nbytes;
if (limit_hit)
len = req->nbytes - (tlen - len);
do {
if (actx->fill + len > out_off)
@ -305,13 +335,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
* If we filled the buffer or this is the last SG,
* submit the buffer.
*/
if (actx->fill == out_off || sg_is_last(src)) {
if (actx->fill == out_off || sg_is_last(src) ||
limit_hit) {
ret = mxs_dcp_run_aes(actx, req, init);
if (ret)
return ret;
init = 0;
out_tmp = out_buf;
last_out_len = actx->fill;
while (dst && actx->fill) {
if (!split) {
dst_buf = sg_virt(dst);
@ -334,6 +366,19 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
}
}
} while (len);
if (limit_hit)
break;
}
/* Copy the IV for CBC for chaining */
if (!rctx->ecb) {
if (rctx->enc)
memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
AES_BLOCK_SIZE);
else
memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
AES_BLOCK_SIZE);
}
return ret;
@ -513,8 +558,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
dma_addr_t digest_phys = 0;
@ -536,10 +579,23 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
desc->payload = 0;
desc->status = 0;
/*
* Align driver with hw behavior when generating null hashes
*/
if (rctx->init && rctx->fini && desc->size == 0) {
struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
const uint8_t *sha_buf =
(actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
sha1_null_hash : sha256_null_hash;
memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
ret = 0;
goto done_run;
}
/* Set HASH_TERM bit for last transfer block. */
if (rctx->fini) {
digest_phys = dma_map_single(sdcp->dev, req->result,
halg->digestsize, DMA_FROM_DEVICE);
digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
desc->payload = digest_phys;
}
@ -547,9 +603,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
ret = mxs_dcp_start_dma(actx);
if (rctx->fini)
dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
DMA_FROM_DEVICE);
done_run:
dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
return ret;
@ -567,6 +624,7 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
const int nents = sg_nents(req->src);
uint8_t *in_buf = sdcp->coh->sha_in_buf;
uint8_t *out_buf = sdcp->coh->sha_out_buf;
uint8_t *src_buf;
@ -621,11 +679,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
actx->fill = 0;
/* For some reason, the result is flipped. */
for (i = 0; i < halg->digestsize / 2; i++) {
swap(req->result[i],
req->result[halg->digestsize - i - 1]);
}
/* For some reason the result is flipped */
for (i = 0; i < halg->digestsize; i++)
req->result[i] = out_buf[halg->digestsize - i - 1];
}
return 0;

View file

@ -11,6 +11,7 @@
*/
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/err.h>
@ -221,6 +222,49 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
return ERR_PTR(-ENODEV);
}
/**
* try_then_request_governor() - Try to find the governor and request the
* module if is not found.
* @name: name of the governor
*
* Search the list of devfreq governors and request the module and try again
* if is not found. This can happen when both drivers (the governor driver
* and the driver that call devfreq_add_device) are built as modules.
* devfreq_list_lock should be held by the caller. Returns the matched
* governor's pointer or an error pointer.
*/
static struct devfreq_governor *try_then_request_governor(const char *name)
{
struct devfreq_governor *governor;
int err = 0;
if (IS_ERR_OR_NULL(name)) {
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
WARN(!mutex_is_locked(&devfreq_list_lock),
"devfreq_list_lock must be locked.");
governor = find_devfreq_governor(name);
if (IS_ERR(governor)) {
mutex_unlock(&devfreq_list_lock);
if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
DEVFREQ_NAME_LEN))
err = request_module("governor_%s", "simpleondemand");
else
err = request_module("governor_%s", name);
/* Restore previous state before return */
mutex_lock(&devfreq_list_lock);
if (err)
return ERR_PTR(err);
governor = find_devfreq_governor(name);
}
return governor;
}
static int devfreq_notify_transition(struct devfreq *devfreq,
struct devfreq_freqs *freqs, unsigned int state)
{
@ -283,11 +327,11 @@ int update_devfreq(struct devfreq *devfreq)
max_freq = MIN(devfreq->scaling_max_freq, devfreq->max_freq);
min_freq = MAX(devfreq->scaling_min_freq, devfreq->min_freq);
if (min_freq && freq < min_freq) {
if (freq < min_freq) {
freq = min_freq;
flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
}
if (max_freq && freq > max_freq) {
if (freq > max_freq) {
freq = max_freq;
flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
}
@ -534,10 +578,6 @@ static void devfreq_dev_release(struct device *dev)
list_del(&devfreq->node);
mutex_unlock(&devfreq_list_lock);
if (devfreq->governor)
devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_STOP, NULL);
if (devfreq->profile->exit)
devfreq->profile->exit(devfreq->dev.parent);
@ -646,9 +686,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_unlock(&devfreq->lock);
mutex_lock(&devfreq_list_lock);
list_add(&devfreq->node, &devfreq_list);
governor = find_devfreq_governor(devfreq->governor_name);
governor = try_then_request_governor(devfreq->governor_name);
if (IS_ERR(governor)) {
dev_err(dev, "%s: Unable to find governor for the device\n",
__func__);
@ -664,15 +703,17 @@ struct devfreq *devfreq_add_device(struct device *dev,
__func__);
goto err_init;
}
list_add(&devfreq->node, &devfreq_list);
mutex_unlock(&devfreq_list_lock);
return devfreq;
err_init:
list_del(&devfreq->node);
mutex_unlock(&devfreq_list_lock);
device_unregister(&devfreq->dev);
devfreq_remove_device(devfreq);
devfreq = NULL;
err_dev:
if (devfreq)
@ -693,6 +734,9 @@ int devfreq_remove_device(struct devfreq *devfreq)
if (!devfreq)
return -EINVAL;
if (devfreq->governor)
devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_STOP, NULL);
device_unregister(&devfreq->dev);
return 0;
@ -991,7 +1035,7 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&devfreq_list_lock);
governor = find_devfreq_governor(str_governor);
governor = try_then_request_governor(str_governor);
if (IS_ERR(governor)) {
ret = PTR_ERR(governor);
goto out;
@ -1126,17 +1170,26 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
struct devfreq *df = to_devfreq(dev);
unsigned long value;
int ret;
unsigned long max;
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
return -EINVAL;
mutex_lock(&df->lock);
max = df->max_freq;
if (value && max && value > max) {
ret = -EINVAL;
goto unlock;
if (value) {
if (value > df->max_freq) {
ret = -EINVAL;
goto unlock;
}
} else {
unsigned long *freq_table = df->profile->freq_table;
/* Get minimum frequency according to sorting order */
if (freq_table[0] < freq_table[df->profile->max_state - 1])
value = freq_table[0];
else
value = freq_table[df->profile->max_state - 1];
}
df->min_freq = value;
@ -1161,17 +1214,26 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
struct devfreq *df = to_devfreq(dev);
unsigned long value;
int ret;
unsigned long min;
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
return -EINVAL;
mutex_lock(&df->lock);
min = df->min_freq;
if (value && min && value < min) {
ret = -EINVAL;
goto unlock;
if (value) {
if (value < df->min_freq) {
ret = -EINVAL;
goto unlock;
}
} else {
unsigned long *freq_table = df->profile->freq_table;
/* Get maximum frequency according to sorting order */
if (freq_table[0] < freq_table[df->profile->max_state - 1])
value = freq_table[df->profile->max_state - 1];
else
value = freq_table[0];
}
df->max_freq = value;

View file

@ -129,7 +129,7 @@ static void
ioat_init_channel(struct ioatdma_device *ioat_dma,
struct ioatdma_chan *ioat_chan, int idx);
static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
static int ioat_dca_enabled = 1;
@ -575,7 +575,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
* ioat_enumerate_channels - find and initialize the device's channels
* @ioat_dma: the ioat dma device to be enumerated
*/
static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
{
struct ioatdma_chan *ioat_chan;
struct device *dev = &ioat_dma->pdev->dev;
@ -594,7 +594,7 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
xfercap_log &= 0x1f; /* bits [4:0] valid */
if (xfercap_log == 0)
return 0;
return;
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) {
@ -611,7 +611,6 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
}
}
dma->chancnt = i;
return i;
}
/**

View file

@ -198,6 +198,7 @@ struct rcar_dmac {
struct dma_device engine;
struct device *dev;
void __iomem *iomem;
struct device_dma_parameters parms;
unsigned int n_channels;
struct rcar_dmac_chan *channels;
@ -1814,6 +1815,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
dmac->dev->dma_parms = &dmac->parms;
dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
ret = rcar_dmac_parse_of(&pdev->dev, dmac);

View file

@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
}
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
td_desc->desc_list_len, DMA_MEM_TO_DEV);
td_desc->desc_list_len, DMA_TO_DEVICE);
return &td_desc->txd;
}

View file

@ -122,7 +122,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val)
BIT(offs % SYSCON_REG_BITS));
}
priv->data->set(chip, offset, val);
chip->set(chip, offset, val);
return 0;
}

View file

@ -1140,7 +1140,7 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, false);
of_dma_configure(gmu->dev, node, true);
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;

View file

@ -38,9 +38,9 @@
#define INA3221_WARN3 0x0c
#define INA3221_MASK_ENABLE 0x0f
#define INA3221_CONFIG_MODE_SHUNT BIT(1)
#define INA3221_CONFIG_MODE_BUS BIT(2)
#define INA3221_CONFIG_MODE_CONTINUOUS BIT(3)
#define INA3221_CONFIG_MODE_SHUNT BIT(0)
#define INA3221_CONFIG_MODE_BUS BIT(1)
#define INA3221_CONFIG_MODE_CONTINUOUS BIT(2)
#define INA3221_RSHUNT_DEFAULT 10000

View file

@ -325,8 +325,9 @@ static int k10temp_probe(struct pci_dev *pdev,
data->pdev = pdev;
if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
boot_cpu_data.x86_model == 0x70)) {
if (boot_cpu_data.x86 == 0x15 &&
((boot_cpu_data.x86_model & 0xf0) == 0x60 ||
(boot_cpu_data.x86_model & 0xf0) == 0x70)) {
data->read_htcreg = read_htcreg_nb_f15;
data->read_tempreg = read_tempreg_nb_f15;
} else if (boot_cpu_data.x86 == 0x17) {

View file

@ -704,10 +704,10 @@ static const char *const nct6795_temp_label[] = {
"PCH_CHIP_TEMP",
"PCH_CPU_TEMP",
"PCH_MCH_TEMP",
"PCH_DIM0_TEMP",
"PCH_DIM1_TEMP",
"PCH_DIM2_TEMP",
"PCH_DIM3_TEMP",
"Agent0 Dimm0",
"Agent0 Dimm1",
"Agent1 Dimm0",
"Agent1 Dimm1",
"BYTE_TEMP0",
"BYTE_TEMP1",
"PECI Agent 0 Calibration",
@ -742,10 +742,10 @@ static const char *const nct6796_temp_label[] = {
"PCH_CHIP_TEMP",
"PCH_CPU_TEMP",
"PCH_MCH_TEMP",
"PCH_DIM0_TEMP",
"PCH_DIM1_TEMP",
"PCH_DIM2_TEMP",
"PCH_DIM3_TEMP",
"Agent0 Dimm0",
"Agent0 Dimm1",
"Agent1 Dimm0",
"Agent1 Dimm1",
"BYTE_TEMP0",
"BYTE_TEMP1",
"PECI Agent 0 Calibration",

View file

@ -52,7 +52,7 @@
/* Define the Counter Register, value = 100 for match 100% */
#define NPCM7XX_PWM_COUNTER_DEFAULT_NUM 255
#define NPCM7XX_PWM_CMR_DEFAULT_NUM 127
#define NPCM7XX_PWM_CMR_DEFAULT_NUM 255
#define NPCM7XX_PWM_CMR_MAX 255
/* default all PWM channels PRESCALE2 = 1 */

View file

@ -221,8 +221,12 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->pwm = devm_of_pwm_get(&pdev->dev, pdev->dev.of_node, NULL);
if (IS_ERR(ctx->pwm)) {
dev_err(&pdev->dev, "Could not get PWM\n");
return PTR_ERR(ctx->pwm);
ret = PTR_ERR(ctx->pwm);
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "Could not get PWM: %d\n", ret);
return ret;
}
platform_set_drvdata(pdev, ctx);

View file

@ -433,12 +433,13 @@ config I2C_BCM_KONA
If you do not need KONA I2C interface, say N.
config I2C_BRCMSTB
tristate "BRCM Settop I2C controller"
depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
tristate "BRCM Settop/DSL I2C controller"
depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_63XX || \
COMPILE_TEST
default y
help
If you say yes to this option, support will be included for the
I2C interface on the Broadcom Settop SoCs.
I2C interface on the Broadcom Settop/DSL SoCs.
If you do not need I2C interface, say N.

View file

@ -503,7 +503,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG);
writel(I2C_DMA_CON_RX, i2c->pdmabase + OFFSET_CON);
dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 0);
dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
if (!dma_rd_buf)
return -ENOMEM;
@ -526,7 +526,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG);
writel(I2C_DMA_CON_TX, i2c->pdmabase + OFFSET_CON);
dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 0);
dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
if (!dma_wr_buf)
return -ENOMEM;
@ -549,7 +549,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_INT_FLAG);
writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_CON);
dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 0);
dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
if (!dma_wr_buf)
return -ENOMEM;
@ -561,7 +561,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
return -ENOMEM;
}
dma_rd_buf = i2c_get_dma_safe_msg_buf((msgs + 1), 0);
dma_rd_buf = i2c_get_dma_safe_msg_buf((msgs + 1), 1);
if (!dma_rd_buf) {
dma_unmap_single(i2c->dev, wpaddr,
msgs->len, DMA_TO_DEVICE);

View file

@ -661,9 +661,6 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
dev_dbg(omap->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
msg->addr, msg->len, msg->flags, stop);
if (msg->len == 0)
return -EINVAL;
omap->receiver = !!(msg->flags & I2C_M_RD);
omap_i2c_resize_fifo(omap, msg->len, omap->receiver);
@ -1179,6 +1176,10 @@ static const struct i2c_algorithm omap_i2c_algo = {
.functionality = omap_i2c_func,
};
static const struct i2c_adapter_quirks omap_i2c_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
};
#ifdef CONFIG_OF
static struct omap_i2c_bus_platform_data omap2420_pdata = {
.rev = OMAP_I2C_IP_VERSION_1,
@ -1453,6 +1454,7 @@ omap_i2c_probe(struct platform_device *pdev)
adap->class = I2C_CLASS_DEPRECATED;
strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
adap->algo = &omap_i2c_algo;
adap->quirks = &omap_i2c_quirks;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
adap->bus_recovery_info = &omap_i2c_bus_recovery_info;

View file

@ -1088,11 +1088,6 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
writel(I2C_MINI_CORE | I2C_N_VAL, qup->base + QUP_CONFIG);
for (idx = 0; idx < num; idx++) {
if (msgs[idx].len == 0) {
ret = -EINVAL;
goto out;
}
if (qup_i2c_poll_state_i2c_master(qup)) {
ret = -EIO;
goto out;
@ -1520,9 +1515,6 @@ qup_i2c_determine_mode_v2(struct qup_i2c_dev *qup,
/* All i2c_msgs should be transferred using either dma or cpu */
for (idx = 0; idx < num; idx++) {
if (msgs[idx].len == 0)
return -EINVAL;
if (msgs[idx].flags & I2C_M_RD)
max_rx_len = max_t(unsigned int, max_rx_len,
msgs[idx].len);
@ -1636,9 +1628,14 @@ static const struct i2c_algorithm qup_i2c_algo_v2 = {
* which limits the possible read to 256 (QUP_READ_LIMIT) bytes.
*/
static const struct i2c_adapter_quirks qup_i2c_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
.max_read_len = QUP_READ_LIMIT,
};
static const struct i2c_adapter_quirks qup_i2c_quirks_v2 = {
.flags = I2C_AQ_NO_ZERO_LEN,
};
static void qup_i2c_enable_clocks(struct qup_i2c_dev *qup)
{
clk_prepare_enable(qup->clk);
@ -1701,6 +1698,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
is_qup_v1 = true;
} else {
qup->adap.algo = &qup_i2c_algo_v2;
qup->adap.quirks = &qup_i2c_quirks_v2;
is_qup_v1 = false;
if (acpi_match_device(qup_i2c_acpi_match, qup->dev))
goto nodma;

View file

@ -684,9 +684,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
tegra_i2c_flush_fifos(i2c_dev);
if (msg->len == 0)
return -EINVAL;
i2c_dev->msg_buf = msg->buf;
i2c_dev->msg_buf_remaining = msg->len;
i2c_dev->msg_err = I2C_ERR_NONE;
@ -831,6 +828,7 @@ static const struct i2c_algorithm tegra_i2c_algo = {
/* payload size is only 12 bit */
static const struct i2c_adapter_quirks tegra_i2c_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
.max_read_len = 4096,
.max_write_len = 4096 - 12,
};

View file

@ -281,9 +281,6 @@ static int zx2967_i2c_xfer_msg(struct zx2967_i2c *i2c,
int ret;
int i;
if (msg->len == 0)
return -EINVAL;
zx2967_i2c_flush_fifos(i2c);
i2c->cur_trans = msg->buf;
@ -498,6 +495,10 @@ static const struct i2c_algorithm zx2967_i2c_algo = {
.functionality = zx2967_i2c_func,
};
static const struct i2c_adapter_quirks zx2967_i2c_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
};
static const struct of_device_id zx2967_i2c_of_match[] = {
{ .compatible = "zte,zx296718-i2c", },
{ },
@ -568,6 +569,7 @@ static int zx2967_i2c_probe(struct platform_device *pdev)
strlcpy(i2c->adap.name, "zx2967 i2c adapter",
sizeof(i2c->adap.name));
i2c->adap.algo = &zx2967_i2c_algo;
i2c->adap.quirks = &zx2967_i2c_quirks;
i2c->adap.nr = pdev->id;
i2c->adap.dev.parent = &pdev->dev;
i2c->adap.dev.of_node = pdev->dev.of_node;

View file

@ -1,5 +1,5 @@
/*
* Copyright(c) 2015-2017 Intel Corporation.
* Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@ -4829,7 +4829,7 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
int ret;
int pkey_idx;
int local_mad = 0;
u32 resp_len = 0;
u32 resp_len = in_wc->byte_len - sizeof(*in_grh);
struct hfi1_ibport *ibp = to_iport(ibdev, port);
pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);

View file

@ -665,7 +665,9 @@ struct hns_roce_caps {
u32 max_sq_sg; /* 2 */
u32 max_sq_inline; /* 32 */
u32 max_rq_sg; /* 2 */
u32 max_extend_sg;
int num_qps; /* 256k */
int reserved_qps;
u32 max_wqes; /* 16k */
u32 max_sq_desc_sz; /* 64 */
u32 max_rq_desc_sz; /* 64 */

View file

@ -121,6 +121,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
}
if (wr->opcode == IB_WR_RDMA_READ) {
*bad_wr = wr;
dev_err(hr_dev->dev, "Not support inline data!\n");
return -EINVAL;
}
@ -1193,6 +1194,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
@ -1222,6 +1224,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->reserved_mrws = 1;
caps->reserved_uars = 0;
caps->reserved_cqs = 0;
caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
caps->qpc_ba_pg_sz = 0;
caps->qpc_buf_pg_sz = 0;
@ -2266,6 +2269,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->src_qp = (u8)roce_get_field(cqe->byte_32,
V2_CQE_BYTE_32_RMT_QPN_M,
V2_CQE_BYTE_32_RMT_QPN_S);
wc->slid = 0;
wc->wc_flags |= (roce_get_bit(cqe->byte_32,
V2_CQE_BYTE_32_GRH_S) ?
IB_WC_GRH : 0);

View file

@ -50,6 +50,7 @@
#define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
@ -78,6 +79,7 @@
#define HNS_ROCE_INVALID_LKEY 0x100
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
#define HNS_ROCE_CONTEXT_HOP_NUM 1
#define HNS_ROCE_MTT_HOP_NUM 1

View file

@ -31,6 +31,7 @@
* SOFTWARE.
*/
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
@ -372,6 +373,16 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
if (hr_qp->sq.max_gs > 2)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2));
if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(hr_dev->dev,
"The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt);
return -EINVAL;
}
}
hr_qp->sge.sge_shift = 4;
/* Get buf size, SQ and RQ are aligned to page_szie */
@ -465,6 +476,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sge.sge_shift = 4;
}
if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
hr_qp->sge.sge_cnt);
return -EINVAL;
}
}
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
@ -1106,14 +1125,20 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
int reserved_from_top = 0;
int reserved_from_bot;
int ret;
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
/* A port include two SQP, six port total 12 */
/* In hw v1, a port include two SQP, six ports total 12 */
if (hr_dev->caps.max_sq_sg <= 2)
reserved_from_bot = SQP_NUM;
else
reserved_from_bot = hr_dev->caps.reserved_qps;
ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
hr_dev->caps.num_qps - 1, SQP_NUM,
hr_dev->caps.num_qps - 1, reserved_from_bot,
reserved_from_top);
if (ret) {
dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",

View file

@ -986,7 +986,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
goto err_free_dev;
}
if (mthca_cmd_init(mdev)) {
err = mthca_cmd_init(mdev);
if (err) {
mthca_err(mdev, "Failed to init command interface, aborting.\n");
goto err_free_dev;
}

View file

@ -31,6 +31,7 @@
* SOFTWARE.
*/
#include <linux/vmalloc.h>
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
@ -129,13 +130,18 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf,
q->buf_size, &q->ip);
if (err)
if (err) {
vfree(q->buf);
kfree(q);
return err;
}
if (uresp) {
if (copy_to_user(&uresp->srq_num, &srq->srq_num,
sizeof(uresp->srq_num)))
sizeof(uresp->srq_num))) {
rxe_queue_cleanup(q);
return -EFAULT;
}
}
return 0;

View file

@ -351,7 +351,8 @@ static uint32_t opa_vnic_get_dlid(struct opa_vnic_adapter *adapter,
if (unlikely(!dlid))
v_warn("Null dlid in MAC address\n");
} else if (def_port != OPA_VNIC_INVALID_PORT) {
dlid = info->vesw.u_ucast_dlid[def_port];
if (def_port < OPA_VESW_MAX_NUM_DEF_PORT)
dlid = info->vesw.u_ucast_dlid[def_port];
}
}

View file

@ -558,20 +558,33 @@ static int __maybe_unused silead_ts_suspend(struct device *dev)
static int __maybe_unused silead_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
bool second_try = false;
int error, status;
silead_ts_set_power(client, SILEAD_POWER_ON);
retry:
error = silead_ts_reset(client);
if (error)
return error;
if (second_try) {
error = silead_ts_load_fw(client);
if (error)
return error;
}
error = silead_ts_startup(client);
if (error)
return error;
status = silead_ts_get_status(client);
if (status != SILEAD_STATUS_OK) {
if (!second_try) {
second_try = true;
dev_dbg(dev, "Reloading firmware after unsuccessful resume\n");
goto retry;
}
dev_err(dev, "Resume error, status: 0x%02x\n", status);
return -ENODEV;
}

View file

@ -195,6 +195,7 @@ static int st1232_ts_probe(struct i2c_client *client,
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
__set_bit(EV_SYN, input_dev->evbit);
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(EV_ABS, input_dev->evbit);

View file

@ -567,7 +567,7 @@ struct arm_smmu_device {
int gerr_irq;
int combined_irq;
atomic_t sync_nr;
u32 sync_nr;
unsigned long ias; /* IPA */
unsigned long oas; /* PA */
@ -964,14 +964,13 @@ static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
struct arm_smmu_cmdq_ent ent = {
.opcode = CMDQ_OP_CMD_SYNC,
.sync = {
.msidata = atomic_inc_return_relaxed(&smmu->sync_nr),
.msiaddr = virt_to_phys(&smmu->sync_count),
},
};
arm_smmu_cmdq_build_cmd(cmd, &ent);
spin_lock_irqsave(&smmu->cmdq.lock, flags);
ent.sync.msidata = ++smmu->sync_nr;
arm_smmu_cmdq_build_cmd(cmd, &ent);
arm_smmu_cmdq_insert_cmd(smmu, cmd);
spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
@ -2196,7 +2195,6 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
{
int ret;
atomic_set(&smmu->sync_nr, 0);
ret = arm_smmu_init_queues(smmu);
if (ret)
return ret;

View file

@ -574,13 +574,12 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
return 0;
tablep = iopte_deref(pte, data);
} else if (unmap_idx >= 0) {
io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
return size;
}
if (unmap_idx < 0)
return __arm_lpae_unmap(data, iova, size, lvl, tablep);
io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
return size;
return __arm_lpae_unmap(data, iova, size, lvl, tablep);
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,

View file

@ -105,7 +105,7 @@ static int
mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
struct mvebu_icu *icu = d->host_data;
struct mvebu_icu *icu = platform_msi_get_host_data(d);
unsigned int icu_group;
/* Check the count of the parameters in dt */

View file

@ -893,10 +893,8 @@ static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
{
struct nvm_rq rqd;
int ret = 0;
memset(&rqd, 0, sizeof(struct nvm_rq));
struct nvm_rq rqd = {NULL};
int ret;
pblk_setup_e_rq(pblk, &rqd, ppa);
@ -904,19 +902,6 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
* with writes. Thus, there is no need to take the LUN semaphore.
*/
ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
pblk_err(pblk, "could not sync erase line:%d,blk:%d\n",
pblk_ppa_to_line(ppa),
pblk_ppa_to_pos(geo, ppa));
rqd.error = ret;
goto out;
}
out:
rqd.private = pblk;
__pblk_end_io_erase(pblk, &rqd);
@ -1788,6 +1773,17 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
memcpy(emeta_buf->header.uuid, pblk->instance_uuid, 16);
emeta_buf->header.id = cpu_to_le32(line->id);
emeta_buf->header.type = cpu_to_le16(line->type);
emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
emeta_buf->header.crc = cpu_to_le32(
pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
}
emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
@ -1805,8 +1801,6 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
spin_unlock(&l_mg->close_lock);
pblk_line_should_sync_meta(pblk);
}
static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)

View file

@ -181,7 +181,8 @@ static int pblk_rwb_init(struct pblk *pblk)
unsigned int power_size, power_seg_sz;
int pgs_in_buffer;
pgs_in_buffer = max(geo->mw_cunits, geo->ws_opt) * geo->all_luns;
pgs_in_buffer = (max(geo->mw_cunits, geo->ws_opt) + geo->ws_opt)
* geo->all_luns;
if (write_buffer_size && (write_buffer_size > pgs_in_buffer))
buffer_size = write_buffer_size;
@ -371,9 +372,11 @@ static int pblk_core_init(struct pblk *pblk)
atomic64_set(&pblk->nr_flush, 0);
pblk->nr_flush_rst = 0;
pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
pblk->min_write_pgs = geo->ws_opt;
max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT));
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
@ -1083,7 +1086,8 @@ static int pblk_lines_init(struct pblk *pblk)
if (!nr_free_chks) {
pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
return -EINTR;
ret = -EINTR;
goto fail_free_lines;
}
pblk_set_provision(pblk, nr_free_chks);

View file

@ -343,7 +343,6 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
{
int sz;
sz = snprintf(page, PAGE_SIZE,
"user:%lld gc:%lld pad:%lld WA:",
user, gc, pad);
@ -355,7 +354,7 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
u32 wa_frac;
wa_int = (user + gc + pad) * 100000;
wa_int = div_u64(wa_int, user);
wa_int = div64_u64(wa_int, user);
wa_int = div_u64_rem(wa_int, 100000, &wa_frac);
sz += snprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n",

View file

@ -418,6 +418,7 @@ static int __uuid_write(struct cache_set *c)
{
BKEY_PADDED(key) k;
struct closure cl;
struct cache *ca;
closure_init_stack(&cl);
lockdep_assert_held(&bch_register_lock);
@ -429,6 +430,10 @@ static int __uuid_write(struct cache_set *c)
uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
closure_sync(&cl);
/* Only one bucket used for uuid write */
ca = PTR_CACHE(c, &k.key, 0);
atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
bkey_copy(&c->uuid_bucket, &k.key);
bkey_put(c, &k.key);
return 0;
@ -1004,6 +1009,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
bch_write_bdev_super(dc, &cl);
closure_sync(&cl);
calc_cached_dev_sectors(dc->disk.c);
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);

View file

@ -8778,6 +8778,18 @@ static void md_start_sync(struct work_struct *ws)
*/
void md_check_recovery(struct mddev *mddev)
{
if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
/* Write superblock - thread that called mddev_suspend()
* holds reconfig_mutex for us.
*/
set_bit(MD_UPDATING_SB, &mddev->flags);
smp_mb__after_atomic();
if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
md_update_sb(mddev, 0);
clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
wake_up(&mddev->sb_wait);
}
if (mddev->suspended)
return;
@ -8938,16 +8950,6 @@ void md_check_recovery(struct mddev *mddev)
unlock:
wake_up(&mddev->sb_wait);
mddev_unlock(mddev);
} else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
/* Write superblock - thread that called mddev_suspend()
* holds reconfig_mutex for us.
*/
set_bit(MD_UPDATING_SB, &mddev->flags);
smp_mb__after_atomic();
if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
md_update_sb(mddev, 0);
clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
wake_up(&mddev->sb_wait);
}
}
EXPORT_SYMBOL(md_check_recovery);

View file

@ -936,6 +936,17 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
/* Start bit, switch to receive state */
pin->ts = ts;
pin->state = CEC_ST_RX_START_BIT_LOW;
/*
* If a transmit is pending, then that transmit should
* use a signal free time of no more than
* CEC_SIGNAL_FREE_TIME_NEW_INITIATOR since it will
* have a new initiator due to the receive that is now
* starting.
*/
if (pin->tx_msg.len && pin->tx_signal_free_time >
CEC_SIGNAL_FREE_TIME_NEW_INITIATOR)
pin->tx_signal_free_time =
CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
break;
}
if (ktime_to_ns(pin->ts) == 0)
@ -1158,6 +1169,15 @@ static int cec_pin_adap_transmit(struct cec_adapter *adap, u8 attempts,
{
struct cec_pin *pin = adap->pin;
/*
* If a receive is in progress, then this transmit should use
* a signal free time of max CEC_SIGNAL_FREE_TIME_NEW_INITIATOR
* since when it starts transmitting it will have a new initiator.
*/
if (pin->state != CEC_ST_IDLE &&
signal_free_time > CEC_SIGNAL_FREE_TIME_NEW_INITIATOR)
signal_free_time = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
pin->tx_signal_free_time = signal_free_time;
pin->tx_extra_bytes = 0;
pin->tx_msg = *msg;

View file

@ -569,7 +569,8 @@ static int adv748x_parse_dt(struct adv748x_state *state)
{
struct device_node *ep_np = NULL;
struct of_endpoint ep;
bool found = false;
bool out_found = false;
bool in_found = false;
for_each_endpoint_of_node(state->dev->of_node, ep_np) {
of_graph_parse_endpoint(ep_np, &ep);
@ -592,10 +593,17 @@ static int adv748x_parse_dt(struct adv748x_state *state)
of_node_get(ep_np);
state->endpoints[ep.port] = ep_np;
found = true;
/*
* At least one input endpoint and one output endpoint shall
* be defined.
*/
if (ep.port < ADV748X_PORT_TXA)
in_found = true;
else
out_found = true;
}
return found ? 0 : -ENODEV;
return in_found && out_found ? 0 : -ENODEV;
}
static void adv748x_dt_cleanup(struct adv748x_state *state)
@ -627,6 +635,17 @@ static int adv748x_probe(struct i2c_client *client,
state->i2c_clients[ADV748X_PAGE_IO] = client;
i2c_set_clientdata(client, state);
/*
* We can not use container_of to get back to the state with two TXs;
* Initialize the TXs's fields unconditionally on the endpoint
* presence to access them later.
*/
state->txa.state = state->txb.state = state;
state->txa.page = ADV748X_PAGE_TXA;
state->txb.page = ADV748X_PAGE_TXB;
state->txa.port = ADV748X_PORT_TXA;
state->txb.port = ADV748X_PORT_TXB;
/* Discover and process ports declared by the Device tree endpoints */
ret = adv748x_parse_dt(state);
if (ret) {

View file

@ -266,19 +266,10 @@ static int adv748x_csi2_init_controls(struct adv748x_csi2 *tx)
int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx)
{
struct device_node *ep;
int ret;
/* We can not use container_of to get back to the state with two TXs */
tx->state = state;
tx->page = is_txa(tx) ? ADV748X_PAGE_TXA : ADV748X_PAGE_TXB;
ep = state->endpoints[is_txa(tx) ? ADV748X_PORT_TXA : ADV748X_PORT_TXB];
if (!ep) {
adv_err(state, "No endpoint found for %s\n",
is_txa(tx) ? "txa" : "txb");
return -ENODEV;
}
if (!is_tx_enabled(tx))
return 0;
/* Initialise the virtual channel */
adv748x_csi2_set_virtual_channel(tx, 0);
@ -288,7 +279,7 @@ int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx)
is_txa(tx) ? "txa" : "txb");
/* Ensure that matching is based upon the endpoint fwnodes */
tx->sd.fwnode = of_fwnode_handle(ep);
tx->sd.fwnode = of_fwnode_handle(state->endpoints[tx->port]);
/* Register internal ops for incremental subdev registration */
tx->sd.internal_ops = &adv748x_csi2_internal_ops;
@ -321,6 +312,9 @@ int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx)
void adv748x_csi2_cleanup(struct adv748x_csi2 *tx)
{
if (!is_tx_enabled(tx))
return;
v4l2_async_unregister_subdev(&tx->sd);
media_entity_cleanup(&tx->sd.entity);
v4l2_ctrl_handler_free(&tx->ctrl_hdl);

View file

@ -82,6 +82,7 @@ struct adv748x_csi2 {
struct adv748x_state *state;
struct v4l2_mbus_framefmt format;
unsigned int page;
unsigned int port;
struct media_pad pads[ADV748X_CSI2_NR_PADS];
struct v4l2_ctrl_handler ctrl_hdl;
@ -91,6 +92,7 @@ struct adv748x_csi2 {
#define notifier_to_csi2(n) container_of(n, struct adv748x_csi2, notifier)
#define adv748x_sd_to_csi2(sd) container_of(sd, struct adv748x_csi2, sd)
#define is_tx_enabled(_tx) ((_tx)->state->endpoints[(_tx)->port] != NULL)
enum adv748x_hdmi_pads {
ADV748X_HDMI_SINK,

View file

@ -169,7 +169,8 @@ static int dw9714_probe(struct i2c_client *client)
return 0;
err_cleanup:
dw9714_subdev_cleanup(dw9714_dev);
v4l2_ctrl_handler_free(&dw9714_dev->ctrls_vcm);
media_entity_cleanup(&dw9714_dev->sd.entity);
dev_err(&client->dev, "Probe failed: %d\n", rval);
return rval;
}

View file

@ -218,7 +218,8 @@ static int dw9807_probe(struct i2c_client *client)
return 0;
err_cleanup:
dw9807_subdev_cleanup(dw9807_dev);
v4l2_ctrl_handler_free(&dw9807_dev->ctrls_vcm);
media_entity_cleanup(&dw9807_dev->sd.entity);
return rval;
}

View file

@ -2572,8 +2572,6 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
if (frame_rate < 0)
frame_rate = OV5640_15_FPS;
sensor->current_fr = frame_rate;
sensor->frame_interval = fi->interval;
mode = ov5640_find_mode(sensor, frame_rate, mode->hact,
mode->vact, true);
if (!mode) {
@ -2581,7 +2579,10 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
goto out;
}
if (mode != sensor->current_mode) {
if (mode != sensor->current_mode ||
frame_rate != sensor->current_fr) {
sensor->current_fr = frame_rate;
sensor->frame_interval = fi->interval;
sensor->current_mode = mode;
sensor->pending_mode_change = true;
}

View file

@ -1252,7 +1252,7 @@ static void cx18_cancel_out_work_orders(struct cx18 *cx)
{
int i;
for (i = 0; i < CX18_MAX_STREAMS; i++)
if (&cx->streams[i].video_dev)
if (cx->streams[i].video_dev.v4l2_dev)
cancel_work_sync(&cx->streams[i].out_work_order);
}

Some files were not shown because too many files have changed in this diff Show more