This is the 4.19.82 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl3Ct0gACgkQONu9yGCS
 aT46EQ//SsO3zq9K1P9HVRCQh5+ZPrk2uynVQIlMunhvhix8+SA+UopfNWwqM30n
 aEUPHk9snHTiRm5VRCBip8ea3/uZCpLTAwm/L0OKSyHpZ/GDGIQxNP5svjMQePYp
 57mmhVEV387gHoJiXxi8OiOYuPagscw809UkMBTIgl1g3B+vicy6IYEjlvmwr+vy
 6ghqEDkrR+2+25n/yrPPfesL+rlpE4nB6QvNkYnDSzyJTTKP76Wh21bP4r0mV2RN
 U4X5irbdfTSEYcK5DbNTUgMsUEk1ixxY6vHy7yWSe8ED9oMHdfjzfUS15pjbyrxD
 GLXw3o7Lv/ES7HGZpG073QLIp9oJPtvhrFHvIwBicE2pvBE3++zmmCFdQMx/tY25
 sUUctWvpeizX0qD+7mH6VrMXZB7DbHTUGfxdtfPJfk3l+c4NtSxe6hPaOC1MDJaY
 qBj7tDCoeB1HkLSqKkCGlMu9v1/V6+8E0Gqb6DbPC3IRwezHLq0U/LDCoTf874Qs
 0Fx5t+9Fxk5oeG6ifEmaYP0xT9untUi5EfFAYGCdEGYv5GZskmQgi7BLrnzUCfqM
 T+oruajADfSfe0ylgcXp9vdVkVWx/arbftOH3IVAZhe6Qj1jq57sMmSaLii7QyCC
 Z+rRwGNCO3WhkobBbLpF75uLMYulMqtlsep0Y2JIxdhcticiQbE=
 =1W43
 -----END PGP SIGNATURE-----

Merge 4.19.82 into android-4.19

Changes in 4.19.82
	zram: fix race between backing_dev_show and backing_dev_store
	dm snapshot: introduce account_start_copy() and account_end_copy()
	dm snapshot: rework COW throttling to fix deadlock
	Btrfs: fix inode cache block reserve leak on failure to allocate data space
	Btrfs: fix memory leak due to concurrent append writes with fiemap
	btrfs: qgroup: Always free PREALLOC META reserve in btrfs_delalloc_release_extents()
	btrfs: tracepoints: Fix wrong parameter order for qgroup events
	wil6210: fix freeing of rx buffers in EDMA mode
	f2fs: flush quota blocks after turnning it off
	scsi: lpfc: Fix a duplicate 0711 log message number.
	sc16is7xx: Fix for "Unexpected interrupt: 8"
	powerpc/powernv: hold device_hotplug_lock when calling memtrace_offline_pages()
	f2fs: fix to recover inode's i_gc_failures during POR
	f2fs: fix to recover inode->i_flags of inode block during POR
	HID: i2c-hid: add Direkt-Tek DTLAPY133-1 to descriptor override
	usb: dwc2: fix unbalanced use of external vbus-supply
	tools/power turbostat: fix goldmont C-state limit decoding
	x86/cpu: Add Atom Tremont (Jacobsville)
	drm/msm/dpu: handle failures while initializing displays
	bcache: fix input overflow to writeback_rate_minimum
	PCI: Fix Switchtec DMA aliasing quirk dmesg noise
	Btrfs: fix deadlock on tree root leaf when finding free extent
	netfilter: ipset: Make invalid MAC address checks consistent
	HID: i2c-hid: Disable runtime PM for LG touchscreen
	HID: i2c-hid: Ignore input report if there's no data present on Elan touchpanels
	HID: i2c-hid: Add Odys Winbook 13 to descriptor override
	platform/x86: Add the VLV ISP PCI ID to atomisp2_pm
	platform/x86: Fix config space access for intel_atomisp2_pm
	ath10k: assign 'n_cipher_suites = 11' for WCN3990 to enable WPA3
	clk: boston: unregister clks on failure in clk_boston_setup()
	scripts/setlocalversion: Improve -dirty check with git-status --no-optional-locks
	staging: mt7621-pinctrl: use pinconf-generic for 'dt_node_to_map' and 'dt_free_map'
	HID: Add ASUS T100CHI keyboard dock battery quirks
	NFSv4: Ensure that the state manager exits the loop on SIGKILL
	HID: steam: fix boot loop with bluetooth firmware
	HID: steam: fix deadlock with input devices.
	samples: bpf: fix: seg fault with NULL pointer arg
	usb: dwc3: gadget: early giveback if End Transfer already completed
	usb: dwc3: gadget: clear DWC3_EP_TRANSFER_STARTED on cmd complete
	ALSA: usb-audio: Cleanup DSD whitelist
	usb: handle warm-reset port requests on hub resume
	rtc: pcf8523: set xtal load capacitance from DT
	arm64: Add MIDR encoding for HiSilicon Taishan CPUs
	arm64: kpti: Whitelist HiSilicon Taishan v110 CPUs
	mlxsw: spectrum: Set LAG port collector only when active
	scsi: lpfc: Correct localport timeout duration error
	CIFS: Respect SMB2 hdr preamble size in read responses
	cifs: add credits from unmatched responses/messages
	ALSA: hda/realtek - Apply ALC294 hp init also for S4 resume
	media: vimc: Remove unused but set variables
	ext4: disallow files with EXT4_JOURNAL_DATA_FL from EXT4_IOC_SWAP_BOOT
	exec: load_script: Do not exec truncated interpreter path
	net: dsa: mv88e6xxx: Release lock while requesting IRQ
	PCI/PME: Fix possible use-after-free on remove
	drm/amd/display: fix odm combine pipe reset
	power: supply: max14656: fix potential use-after-free
	iio: adc: meson_saradc: Fix memory allocation order
	iio: fix center temperature of bmc150-accel-core
	libsubcmd: Make _FORTIFY_SOURCE defines dependent on the feature
	perf tests: Avoid raising SEGV using an obvious NULL dereference
	perf map: Fix overlapped map handling
	perf script brstackinsn: Fix recovery from LBR/binary mismatch
	perf jevents: Fix period for Intel fixed counters
	perf tools: Propagate get_cpuid() error
	perf annotate: Propagate perf_env__arch() error
	perf annotate: Fix the signedness of failure returns
	perf annotate: Propagate the symbol__annotate() error return
	perf annotate: Return appropriate error code for allocation failures
	staging: rtl8188eu: fix null dereference when kzalloc fails
	RDMA/hfi1: Prevent memory leak in sdma_init
	RDMA/iwcm: Fix a lock inversion issue
	HID: hyperv: Use in-place iterator API in the channel callback
	nfs: Fix nfsi->nrequests count error on nfs_inode_remove_request
	arm64: ftrace: Ensure synchronisation in PLT setup for Neoverse-N1 #1542419
	tty: serial: owl: Fix the link time qualifier of 'owl_uart_exit()'
	tty: n_hdlc: fix build on SPARC
	gpio: max77620: Use correct unit for debounce times
	fs: cifs: mute -Wunused-const-variable message
	serial: mctrl_gpio: Check for NULL pointer
	efi/cper: Fix endianness of PCIe class code
	efi/x86: Do not clean dummy variable in kexec path
	MIPS: include: Mark __cmpxchg as __always_inline
	x86/xen: Return from panic notifier
	ocfs2: clear zero in unaligned direct IO
	fs: ocfs2: fix possible null-pointer dereferences in ocfs2_xa_prepare_entry()
	fs: ocfs2: fix a possible null-pointer dereference in ocfs2_write_end_nolock()
	fs: ocfs2: fix a possible null-pointer dereference in ocfs2_info_scan_inode_alloc()
	arm64: armv8_deprecated: Checking return value for memory allocation
	x86/cpu: Add Comet Lake to the Intel CPU models header
	sched/vtime: Fix guest/system mis-accounting on task switch
	perf/x86/amd: Change/fix NMI latency mitigation to use a timestamp
	drm/amdgpu: fix memory leak
	iio: imu: adis16400: release allocated memory on failure
	MIPS: include: Mark __xchg as __always_inline
	MIPS: fw: sni: Fix out of bounds init of o32 stack
	virt: vbox: fix memory leak in hgcm_call_preprocess_linaddr
	nbd: fix possible sysfs duplicate warning
	NFSv4: Fix leak of clp->cl_acceptor string
	s390/uaccess: avoid (false positive) compiler warnings
	tracing: Initialize iter->seq after zeroing in tracing_read_pipe()
	ARM: 8914/1: NOMMU: Fix exc_ret for XIP
	ALSA: hda/realtek: Reduce the Headphone static noise on XPS 9350/9360
	iwlwifi: exclude GEO SAR support for 3168
	nbd: verify socket is supported during setup
	USB: legousbtower: fix a signedness bug in tower_probe()
	thunderbolt: Use 32-bit writes when writing ring producer/consumer
	ath6kl: fix a NULL-ptr-deref bug in ath6kl_usb_alloc_urb_from_pipe()
	fuse: flush dirty data/metadata before non-truncate setattr
	fuse: truncate pending writes on O_TRUNC
	ALSA: bebob: Fix prototype of helper function to return negative value
	ALSA: hda/realtek - Fix 2 front mics of codec 0x623
	ALSA: hda/realtek - Add support for ALC623
	UAS: Revert commit 3ae62a42090f ("UAS: fix alignment of scatter/gather segments")
	USB: gadget: Reject endpoints with 0 maxpacket value
	usb-storage: Revert commit 747668dbc061 ("usb-storage: Set virt_boundary_mask to avoid SG overflows")
	USB: ldusb: fix ring-buffer locking
	USB: ldusb: fix control-message timeout
	usb: xhci: fix __le32/__le64 accessors in debugfs code
	USB: serial: whiteheat: fix potential slab corruption
	USB: serial: whiteheat: fix line-speed endianness
	scsi: target: cxgbit: Fix cxgbit_fw4_ack()
	HID: i2c-hid: add Trekstor Primebook C11B to descriptor override
	HID: Fix assumption that devices have inputs
	HID: fix error message in hid_open_report()
	nl80211: fix validation of mesh path nexthop
	s390/cmm: fix information leak in cmm_timeout_handler()
	s390/idle: fix cpu idle time calculation
	arm64: Ensure VM_WRITE|VM_SHARED ptes are clean by default
	rtlwifi: Fix potential overflow on P2P code
	dmaengine: qcom: bam_dma: Fix resource leak
	dmaengine: cppi41: Fix cppi41_dma_prep_slave_sg() when idle
	drm/amdgpu/powerplay/vega10: allow undervolting in p7
	NFS: Fix an RCU lock leak in nfs4_refresh_delegation_stateid()
	batman-adv: Avoid free/alloc race when handling OGM buffer
	llc: fix sk_buff leak in llc_sap_state_process()
	llc: fix sk_buff leak in llc_conn_service()
	rxrpc: Fix call ref leak
	rxrpc: rxrpc_peer needs to hold a ref on the rxrpc_local record
	rxrpc: Fix trace-after-put looking at the put peer record
	NFC: pn533: fix use-after-free and memleaks
	bonding: fix potential NULL deref in bond_update_slave_arr
	net: usb: sr9800: fix uninitialized local variable
	sch_netem: fix rcu splat in netem_enqueue()
	ALSA: timer: Simplify error path in snd_timer_open()
	ALSA: timer: Fix mutex deadlock at releasing card
	ALSA: usb-audio: DSD auto-detection for Playback Designs
	ALSA: usb-audio: Update DSD support quirks for Oppo and Rotel
	ALSA: usb-audio: Add DSD support for Gustard U16/X26 USB Interface
	powerpc/powernv: Fix CPU idle to be called with IRQs disabled
	Revert "ALSA: hda: Flush interrupts on disabling"
	Linux 4.19.82

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I79ced3dcffed0086af7d8a77116e8061915677a1
This commit is contained in:
Greg Kroah-Hartman 2019-11-06 13:21:58 +01:00
commit aa4d6b3489
160 changed files with 1405 additions and 742 deletions

View file

@ -5136,6 +5136,10 @@
the unplug protocol
never -- do not unplug even if version check succeeds
xen_legacy_crash [X86,XEN]
Crash from Xen panic notifier, without executing late
panic() code such as dumping handler.
xen_nopvspin [X86,XEN]
Disables the ticketlock slowpath using Xen PV
optimizations.

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 81
SUBLEVEL = 82
EXTRAVERSION =
NAME = "People's Front"

View file

@ -72,7 +72,7 @@ ENDPROC(__vet_atags)
* The following fragment of code is executed with the MMU on in MMU mode,
* and uses absolute addresses; this is not position independent.
*
* r0 = cp#15 control register
* r0 = cp#15 control register (exc_ret for M-class)
* r1 = machine ID
* r2 = atags/dtb pointer
* r9 = processor ID
@ -141,7 +141,8 @@ __mmap_switched_data:
#ifdef CONFIG_CPU_CP15
.long cr_alignment @ r3
#else
.long 0 @ r3
M_CLASS(.long exc_ret) @ r3
AR_CLASS(.long 0) @ r3
#endif
.size __mmap_switched_data, . - __mmap_switched_data

View file

@ -205,6 +205,8 @@ M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
bic r0, r0, #V7M_SCB_CCR_IC
#endif
str r0, [r12, V7M_SCB_CCR]
/* Pass exc_ret to __mmap_switched */
mov r0, r10
#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
ret lr
ENDPROC(__after_proc_init)

View file

@ -139,9 +139,8 @@ __v7m_setup_cont:
cpsie i
svc #0
1: cpsid i
ldr r0, =exc_ret
orr lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
str lr, [r0]
/* Calculate exc_ret */
orr r10, lr, #EXC_RET_THREADMODE_PROCESSSTACK
ldmia sp, {r0-r3, r12}
str r5, [r12, #11 * 4] @ restore the original SVC vector entry
mov lr, r6 @ restore LR

View file

@ -68,6 +68,7 @@
#define ARM_CPU_IMP_BRCM 0x42
#define ARM_CPU_IMP_QCOM 0x51
#define ARM_CPU_IMP_NVIDIA 0x4E
#define ARM_CPU_IMP_HISI 0x48
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
@ -96,6 +97,8 @@
#define NVIDIA_CPU_PART_DENVER 0x003
#define NVIDIA_CPU_PART_CARMEL 0x004
#define HISI_CPU_PART_TSV110 0xD01
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@ -114,6 +117,7 @@
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
#ifndef __ASSEMBLY__

View file

@ -43,11 +43,11 @@
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
@ -91,8 +91,9 @@
#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PAGE_S2_XN)
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
/* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)

View file

@ -177,6 +177,9 @@ static void __init register_insn_emulation(struct insn_emulation_ops *ops)
struct insn_emulation *insn;
insn = kzalloc(sizeof(*insn), GFP_KERNEL);
if (!insn)
return;
insn->ops = ops;
insn->min = INSN_UNDEF;
@ -236,6 +239,8 @@ static void __init register_insn_emulation_sysctl(void)
insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl),
GFP_KERNEL);
if (!insns_sysctl)
return;
raw_spin_lock_irqsave(&insn_emulation_lock, flags);
list_for_each_entry(insn, &insn_emulation, node) {

View file

@ -906,6 +906,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
{ /* sentinel */ }
};
char const *str = "kpti command line option";

View file

@ -119,10 +119,16 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
/*
* Ensure updated trampoline is visible to instruction
* fetch before we patch in the branch.
* fetch before we patch in the branch. Although the
* architecture doesn't require an IPI in this case,
* Neoverse-N1 erratum #1542419 does require one
* if the TLB maintenance in module_enable_ro() is
* skipped due to rodata_enabled. It doesn't seem worth
* it to make it conditional given that this is
* certainly not a fast-path.
*/
__flush_icache_range((unsigned long)&dst[0],
(unsigned long)&dst[1]);
flush_icache_range((unsigned long)&dst[0],
(unsigned long)&dst[1]);
}
addr = (unsigned long)dst;
#else /* CONFIG_ARM64_MODULE_PLTS */

View file

@ -43,7 +43,7 @@
/* O32 stack has to be 8-byte aligned. */
static u64 o32_stk[4096];
#define O32_STK &o32_stk[sizeof(o32_stk)]
#define O32_STK (&o32_stk[ARRAY_SIZE(o32_stk)])
#define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
__asm__(#fun " = call_o32")

View file

@ -73,8 +73,8 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
unsigned int size);
static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
int size)
static __always_inline
unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
{
switch (size) {
case 1:
@ -146,8 +146,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size);
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
static __always_inline
unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
{
switch (size) {
case 1:

View file

@ -70,6 +70,7 @@ static int change_memblock_state(struct memory_block *mem, void *arg)
return 0;
}
/* called with device_hotplug_lock held */
static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
{
u64 end_pfn = start_pfn + nr_pages - 1;
@ -110,6 +111,7 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
/* Trace memory needs to be aligned to the size */
end_pfn = round_down(end_pfn - nr_pages, nr_pages);
lock_device_hotplug();
for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
/*
@ -118,7 +120,6 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
* we never try to remove memory that spans two iomem
* resources.
*/
lock_device_hotplug();
end_pfn = base_pfn + nr_pages;
for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
remove_memory(nid, pfn << PAGE_SHIFT, bytes);
@ -127,6 +128,7 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
return base_pfn << PAGE_SHIFT;
}
}
unlock_device_hotplug();
return 0;
}

View file

@ -150,20 +150,25 @@ static int pnv_smp_cpu_disable(void)
return 0;
}
static void pnv_flush_interrupts(void)
{
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (xive_enabled())
xive_flush_interrupt();
else
icp_opal_flush_interrupt();
} else {
icp_native_flush_interrupt();
}
}
static void pnv_smp_cpu_kill_self(void)
{
unsigned long srr1, unexpected_mask, wmask;
unsigned int cpu;
unsigned long srr1, wmask;
u64 lpcr_val;
/* Standard hot unplug procedure */
/*
* This hard disables local interurpts, ensuring we have no lazy
* irqs pending.
*/
WARN_ON(irqs_disabled());
hard_irq_disable();
WARN_ON(lazy_irq_pending());
idle_task_exit();
current->active_mm = NULL; /* for sanity */
@ -176,6 +181,27 @@ static void pnv_smp_cpu_kill_self(void)
if (cpu_has_feature(CPU_FTR_ARCH_207S))
wmask = SRR1_WAKEMASK_P8;
/*
* This turns the irq soft-disabled state we're called with, into a
* hard-disabled state with pending irq_happened interrupts cleared.
*
* PACA_IRQ_DEC - Decrementer should be ignored.
* PACA_IRQ_HMI - Can be ignored, processing is done in real mode.
* PACA_IRQ_DBELL, EE, PMI - Unexpected.
*/
hard_irq_disable();
if (generic_check_cpu_restart(cpu))
goto out;
unexpected_mask = ~(PACA_IRQ_DEC | PACA_IRQ_HMI | PACA_IRQ_HARD_DIS);
if (local_paca->irq_happened & unexpected_mask) {
if (local_paca->irq_happened & PACA_IRQ_EE)
pnv_flush_interrupts();
DBG("CPU%d Unexpected exit while offline irq_happened=%lx!\n",
cpu, local_paca->irq_happened);
}
local_paca->irq_happened = PACA_IRQ_HARD_DIS;
/*
* We don't want to take decrementer interrupts while we are
* offline, so clear LPCR:PECE1. We keep PECE2 (and
@ -201,6 +227,7 @@ static void pnv_smp_cpu_kill_self(void)
srr1 = pnv_cpu_offline(cpu);
WARN_ON_ONCE(!irqs_disabled());
WARN_ON(lazy_irq_pending());
/*
@ -216,13 +243,7 @@ static void pnv_smp_cpu_kill_self(void)
*/
if (((srr1 & wmask) == SRR1_WAKEEE) ||
((srr1 & wmask) == SRR1_WAKEHVI)) {
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (xive_enabled())
xive_flush_interrupt();
else
icp_opal_flush_interrupt();
} else
icp_native_flush_interrupt();
pnv_flush_interrupts();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
@ -270,7 +291,7 @@ static void pnv_smp_cpu_kill_self(void)
*/
lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
out:
DBG("CPU%d coming online...\n", cpu);
}

View file

@ -84,7 +84,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
__rc; \
})
static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
unsigned long spec = 0x010000UL;
int rc;
@ -114,7 +114,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
return rc;
}
static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
{
unsigned long spec = 0x01UL;
int rc;

View file

@ -69,18 +69,26 @@ DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
static ssize_t show_idle_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
unsigned long long now, idle_time, idle_enter, idle_exit;
unsigned int seq;
do {
now = get_tod_clock();
seq = read_seqcount_begin(&idle->seqcount);
idle_time = READ_ONCE(idle->idle_time);
idle_enter = READ_ONCE(idle->clock_idle_enter);
idle_exit = READ_ONCE(idle->clock_idle_exit);
} while (read_seqcount_retry(&idle->seqcount, seq));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
in_idle = 0;
now = get_tod_clock();
if (idle_enter) {
if (idle_exit) {
in_idle = idle_exit - idle_enter;
} else if (now > idle_enter) {
in_idle = now - idle_enter;
}
}
idle_time += in_idle;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
@ -88,17 +96,24 @@ DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
u64 arch_cpu_idle_time(int cpu)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
unsigned long long now, idle_enter, idle_exit;
unsigned long long now, idle_enter, idle_exit, in_idle;
unsigned int seq;
do {
now = get_tod_clock();
seq = read_seqcount_begin(&idle->seqcount);
idle_enter = READ_ONCE(idle->clock_idle_enter);
idle_exit = READ_ONCE(idle->clock_idle_exit);
} while (read_seqcount_retry(&idle->seqcount, seq));
return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0);
in_idle = 0;
now = get_tod_clock();
if (idle_enter) {
if (idle_exit) {
in_idle = idle_exit - idle_enter;
} else if (now > idle_enter) {
in_idle = now - idle_enter;
}
}
return cputime_to_nsecs(in_idle);
}
void arch_cpu_idle_enter(void)

View file

@ -298,16 +298,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
}
if (write) {
len = *lenp;
if (copy_from_user(buf, buffer,
len > sizeof(buf) ? sizeof(buf) : len))
len = min(*lenp, sizeof(buf));
if (copy_from_user(buf, buffer, len))
return -EFAULT;
buf[sizeof(buf) - 1] = '\0';
buf[len - 1] = '\0';
cmm_skip_blanks(buf, &p);
nr = simple_strtoul(p, &p, 0);
cmm_skip_blanks(p, &p);
seconds = simple_strtoul(p, &p, 0);
cmm_set_timeout(nr, seconds);
*ppos += *lenp;
} else {
len = sprintf(buf, "%ld %ld\n",
cmm_timeout_pages, cmm_timeout_seconds);
@ -315,9 +315,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
len = *lenp;
if (copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
*ppos += len;
}
*lenp = len;
*ppos += len;
return 0;
}

View file

@ -4,12 +4,14 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <asm/apicdef.h>
#include <asm/nmi.h>
#include "../perf_event.h"
static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
static unsigned long perf_nmi_window;
static __initconst const u64 amd_hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
@ -640,11 +642,12 @@ static void amd_pmu_disable_event(struct perf_event *event)
* handler when multiple PMCs are active or PMC overflow while handling some
* other source of an NMI.
*
* Attempt to mitigate this by using the number of active PMCs to determine
* whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
* any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
* number of active PMCs or 2. The value of 2 is used in case an NMI does not
* arrive at the LAPIC in time to be collapsed into an already pending NMI.
* Attempt to mitigate this by creating an NMI window in which un-handled NMIs
* received during this window will be claimed. This prevents extending the
* window past when it is possible that latent NMIs should be received. The
* per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has
* handled a counter. When an un-handled NMI is received, it will be claimed
* only if arriving within that window.
*/
static int amd_pmu_handle_irq(struct pt_regs *regs)
{
@ -662,21 +665,19 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
handled = x86_pmu_handle_irq(regs);
/*
* If a counter was handled, record the number of possible remaining
* NMIs that can occur.
* If a counter was handled, record a timestamp such that un-handled
* NMIs will be claimed if arriving within that window.
*/
if (handled) {
this_cpu_write(perf_nmi_counter,
min_t(unsigned int, 2, active));
this_cpu_write(perf_nmi_tstamp,
jiffies + perf_nmi_window);
return handled;
}
if (!this_cpu_read(perf_nmi_counter))
if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
return NMI_DONE;
this_cpu_dec(perf_nmi_counter);
return NMI_HANDLED;
}
@ -908,6 +909,9 @@ static int __init amd_core_pmu_init(void)
if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
return 0;
/* Avoid calulating the value each time in the NMI handler */
perf_nmi_window = msecs_to_jiffies(100);
switch (boot_cpu_data.x86) {
case 0x15:
pr_cont("Fam15h ");

View file

@ -6,7 +6,7 @@
* "Big Core" Processors (Branded as Core, Xeon, etc...)
*
* The "_X" parts are generally the EP and EX Xeons, or the
* "Extreme" ones, like Broadwell-E.
* "Extreme" ones, like Broadwell-E, or Atom microserver.
*
* While adding a new CPUID for a new microarchitecture, add a new
* group to keep logically sorted out in chronological order. Within
@ -61,6 +61,9 @@
#define INTEL_FAM6_TIGERLAKE_L 0x8C
#define INTEL_FAM6_TIGERLAKE 0x8D
#define INTEL_FAM6_COMETLAKE 0xA5
#define INTEL_FAM6_COMETLAKE_L 0xA6
/* "Small Core" Processors (Atom) */
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
@ -80,6 +83,7 @@
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
/* Xeon Phi */

View file

@ -893,9 +893,6 @@ static void __init kexec_enter_virtual_mode(void)
if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
runtime_code_page_mkexec();
/* clean DUMMY object */
efi_delete_dummy_variable();
#endif
}

View file

@ -266,19 +266,41 @@ void xen_reboot(int reason)
BUG();
}
static int reboot_reason = SHUTDOWN_reboot;
static bool xen_legacy_crash;
void xen_emergency_restart(void)
{
xen_reboot(SHUTDOWN_reboot);
xen_reboot(reboot_reason);
}
static int
xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
{
if (!kexec_crash_loaded())
xen_reboot(SHUTDOWN_crash);
if (!kexec_crash_loaded()) {
if (xen_legacy_crash)
xen_reboot(SHUTDOWN_crash);
reboot_reason = SHUTDOWN_crash;
/*
* If panic_timeout==0 then we are supposed to wait forever.
* However, to preserve original dom0 behavior we have to drop
* into hypervisor. (domU behavior is controlled by its
* config file)
*/
if (panic_timeout == 0)
panic_timeout = -1;
}
return NOTIFY_DONE;
}
static int __init parse_xen_legacy_crash(char *arg)
{
xen_legacy_crash = true;
return 0;
}
early_param("xen_legacy_crash", parse_xen_legacy_crash);
static struct notifier_block xen_panic_block = {
.notifier_call = xen_panic_event,
.priority = INT_MIN

View file

@ -228,8 +228,8 @@ static void nbd_put(struct nbd_device *nbd)
if (refcount_dec_and_mutex_lock(&nbd->refs,
&nbd_index_mutex)) {
idr_remove(&nbd_index_idr, nbd->index);
mutex_unlock(&nbd_index_mutex);
nbd_dev_remove(nbd);
mutex_unlock(&nbd_index_mutex);
}
}
@ -924,6 +924,25 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
return ret;
}
static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
int *err)
{
struct socket *sock;
*err = 0;
sock = sockfd_lookup(fd, err);
if (!sock)
return NULL;
if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL;
return NULL;
}
return sock;
}
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
bool netlink)
{
@ -933,7 +952,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
struct nbd_sock *nsock;
int err;
sock = sockfd_lookup(arg, &err);
sock = nbd_get_socket(nbd, arg, &err);
if (!sock)
return err;
@ -985,7 +1004,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
int i;
int err;
sock = sockfd_lookup(arg, &err);
sock = nbd_get_socket(nbd, arg, &err);
if (!sock)
return err;

View file

@ -423,13 +423,14 @@ static void reset_bdev(struct zram *zram)
static ssize_t backing_dev_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct file *file;
struct zram *zram = dev_to_zram(dev);
struct file *file = zram->backing_dev;
char *p;
ssize_t ret;
down_read(&zram->init_lock);
if (!zram->backing_dev) {
file = zram->backing_dev;
if (!file) {
memcpy(buf, "none\n", 5);
up_read(&zram->init_lock);
return 5;

View file

@ -73,31 +73,39 @@ static void __init clk_boston_setup(struct device_node *np)
hw = clk_hw_register_fixed_rate(NULL, "input", NULL, 0, in_freq);
if (IS_ERR(hw)) {
pr_err("failed to register input clock: %ld\n", PTR_ERR(hw));
goto error;
goto fail_input;
}
onecell->hws[BOSTON_CLK_INPUT] = hw;
hw = clk_hw_register_fixed_rate(NULL, "sys", "input", 0, sys_freq);
if (IS_ERR(hw)) {
pr_err("failed to register sys clock: %ld\n", PTR_ERR(hw));
goto error;
goto fail_sys;
}
onecell->hws[BOSTON_CLK_SYS] = hw;
hw = clk_hw_register_fixed_rate(NULL, "cpu", "input", 0, cpu_freq);
if (IS_ERR(hw)) {
pr_err("failed to register cpu clock: %ld\n", PTR_ERR(hw));
goto error;
goto fail_cpu;
}
onecell->hws[BOSTON_CLK_CPU] = hw;
err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, onecell);
if (err)
if (err) {
pr_err("failed to add DT provider: %d\n", err);
goto fail_clk_add;
}
return;
error:
fail_clk_add:
clk_hw_unregister_fixed_rate(onecell->hws[BOSTON_CLK_CPU]);
fail_cpu:
clk_hw_unregister_fixed_rate(onecell->hws[BOSTON_CLK_SYS]);
fail_sys:
clk_hw_unregister_fixed_rate(onecell->hws[BOSTON_CLK_INPUT]);
fail_input:
kfree(onecell);
}

View file

@ -703,6 +703,25 @@ static int bam_dma_terminate_all(struct dma_chan *chan)
/* remove all transactions, including active transaction */
spin_lock_irqsave(&bchan->vc.lock, flag);
/*
* If we have transactions queued, then some might be committed to the
* hardware in the desc fifo. The only way to reset the desc fifo is
* to do a hardware reset (either by pipe or the entire block).
* bam_chan_init_hw() will trigger a pipe reset, and also reinit the
* pipe. If the pipe is left disabled (default state after pipe reset)
* and is accessed by a connected hardware engine, a fatal error in
* the BAM will occur. There is a small window where this could happen
* with bam_chan_init_hw(), but it is assumed that the caller has
* stopped activity on any attached hardware engine. Make sure to do
* this first so that the BAM hardware doesn't cause memory corruption
* by accessing freed resources.
*/
if (!list_empty(&bchan->desc_list)) {
async_desc = list_first_entry(&bchan->desc_list,
struct bam_async_desc, desc_node);
bam_chan_init_hw(bchan, async_desc->dir);
}
list_for_each_entry_safe(async_desc, tmp,
&bchan->desc_list, desc_node) {
list_add(&async_desc->vd.node, &bchan->vc.desc_issued);

View file

@ -585,9 +585,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
{
struct cppi41_channel *c = to_cpp41_chan(chan);
struct dma_async_tx_descriptor *txd = NULL;
struct cppi41_dd *cdd = c->cdd;
struct cppi41_desc *d;
struct scatterlist *sg;
unsigned int i;
int error;
error = pm_runtime_get(cdd->ddev.dev);
if (error < 0) {
pm_runtime_put_noidle(cdd->ddev.dev);
return NULL;
}
if (cdd->is_suspended)
goto err_out_not_ready;
d = c->desc;
for_each_sg(sgl, sg, sg_len, i) {
@ -610,7 +623,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
d++;
}
return &c->txd;
txd = &c->txd;
err_out_not_ready:
pm_runtime_mark_last_busy(cdd->ddev.dev);
pm_runtime_put_autosuspend(cdd->ddev.dev);
return txd;
}
static void cppi41_compute_td_desc(struct cppi41_desc *d)

View file

@ -393,7 +393,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
pcie->device_id.vendor_id, pcie->device_id.device_id);
p = pcie->device_id.class_code;
printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
}
if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,

View file

@ -163,13 +163,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
case 0:
val = MAX77620_CNFG_GPIO_DBNC_None;
break;
case 1 ... 8:
case 1000 ... 8000:
val = MAX77620_CNFG_GPIO_DBNC_8ms;
break;
case 9 ... 16:
case 9000 ... 16000:
val = MAX77620_CNFG_GPIO_DBNC_16ms;
break;
case 17 ... 32:
case 17000 ... 32000:
val = MAX77620_CNFG_GPIO_DBNC_32ms;
break;
default:

View file

@ -264,7 +264,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
r = amdgpu_bo_create_list_entry_array(&args->in, &info);
if (r)
goto error_free;
return r;
switch (args->in.operation) {
case AMDGPU_BO_LIST_OP_CREATE:
@ -277,8 +277,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
mutex_unlock(&fpriv->bo_list_lock);
if (r < 0) {
amdgpu_bo_list_put(list);
return r;
goto error_put_list;
}
handle = r;
@ -300,9 +299,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&fpriv->bo_list_lock);
if (IS_ERR(old)) {
amdgpu_bo_list_put(list);
r = PTR_ERR(old);
goto error_free;
goto error_put_list;
}
amdgpu_bo_list_put(old);
@ -319,8 +317,10 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
return 0;
error_put_list:
amdgpu_bo_list_put(list);
error_free:
if (info)
kvfree(info);
kvfree(info);
return r;
}

View file

@ -1399,9 +1399,9 @@ bool dc_remove_plane_from_context(
* For head pipe detach surfaces from pipe for tail
* pipe just zero it out
*/
if (!pipe_ctx->top_pipe ||
(!pipe_ctx->top_pipe->top_pipe &&
if (!pipe_ctx->top_pipe || (!pipe_ctx->top_pipe->top_pipe &&
pipe_ctx->top_pipe->stream_res.opp != pipe_ctx->stream_res.opp)) {
pipe_ctx->top_pipe = NULL;
pipe_ctx->plane_state = NULL;
pipe_ctx->bottom_pipe = NULL;
} else {
@ -1803,8 +1803,6 @@ enum dc_status dc_remove_stream_from_ctx(
dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream);
memset(del_pipe, 0, sizeof(*del_pipe));
break;
}
}

View file

@ -4751,9 +4751,7 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
for (i = 0; i < podn_vdd_dep->count - 1; i++)
od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
for (i = 0; i < podn_vdd_dep->count; i++)
od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
} else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;

View file

@ -442,35 +442,38 @@ static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
}
}
static void _dpu_kms_initialize_dsi(struct drm_device *dev,
static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
struct drm_encoder *encoder = NULL;
int i, rc;
int i, rc = 0;
if (!(priv->dsi[0] || priv->dsi[1]))
return rc;
/*TODO: Support two independent DSI connectors */
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
if (IS_ERR_OR_NULL(encoder)) {
if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
return;
return PTR_ERR(encoder);
}
priv->encoders[priv->num_encoders++] = encoder;
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
if (!priv->dsi[i]) {
DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
return;
}
if (!priv->dsi[i])
continue;
rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
if (rc) {
DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
i, rc);
continue;
break;
}
}
return rc;
}
/**
@ -481,16 +484,16 @@ static void _dpu_kms_initialize_dsi(struct drm_device *dev,
* @dpu_kms: Pointer to dpu kms structure
* Returns: Zero on success
*/
static void _dpu_kms_setup_displays(struct drm_device *dev,
static int _dpu_kms_setup_displays(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
{
_dpu_kms_initialize_dsi(dev, priv, dpu_kms);
/**
* Extend this function to initialize other
* types of displays
*/
return _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
}
static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
@ -552,7 +555,9 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
* Create encoder and query display drivers to create
* bridges and connectors
*/
_dpu_kms_setup_displays(dev, priv, dpu_kms);
ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
if (ret)
goto fail;
max_crtc_count = min(catalog->mixer_count, priv->num_encoders);

View file

@ -75,13 +75,20 @@ static int axff_init(struct hid_device *hid)
{
struct axff_device *axff;
struct hid_report *report;
struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int field_count = 0;
int i, j;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;

View file

@ -975,6 +975,7 @@ int hid_open_report(struct hid_device *device)
__u8 *start;
__u8 *buf;
__u8 *end;
__u8 *next;
int ret;
static int (*dispatch_type[])(struct hid_parser *parser,
struct hid_item *item) = {
@ -1028,7 +1029,8 @@ int hid_open_report(struct hid_device *device)
device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
ret = -EINVAL;
while ((start = fetch_item(start, end, &item)) != NULL) {
while ((next = fetch_item(start, end, &item)) != NULL) {
start = next;
if (item.format != HID_ITEM_FORMAT_SHORT) {
hid_err(device, "unexpected long global item\n");
@ -1058,7 +1060,8 @@ int hid_open_report(struct hid_device *device)
}
}
hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
hid_err(device, "item fetching failed at offset %u/%u\n",
size - (unsigned int)(end - start), size);
err:
kfree(parser->collection_stack);
alloc_err:

View file

@ -87,13 +87,19 @@ static int drff_init(struct hid_device *hid)
{
struct drff_device *drff;
struct hid_report *report;
struct hid_input *hidinput = list_first_entry(&hid->inputs,
struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;

View file

@ -59,13 +59,19 @@ static int emsff_init(struct hid_device *hid)
{
struct emsff_device *emsff;
struct hid_report *report;
struct hid_input *hidinput = list_first_entry(&hid->inputs,
struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;

View file

@ -77,14 +77,20 @@ static int gaff_init(struct hid_device *hid)
{
struct gaff_device *gaff;
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct list_head *report_ptr = report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;

View file

@ -136,13 +136,19 @@ static int holtekff_init(struct hid_device *hid)
{
struct holtekff_device *holtekff;
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output report found\n");
return -ENODEV;

View file

@ -322,60 +322,24 @@ static void mousevsc_on_receive(struct hv_device *device,
static void mousevsc_on_channel_callback(void *context)
{
const int packet_size = 0x100;
int ret;
struct hv_device *device = context;
u32 bytes_recvd;
u64 req_id;
struct vmpacket_descriptor *desc;
unsigned char *buffer;
int bufferlen = packet_size;
buffer = kmalloc(bufferlen, GFP_ATOMIC);
if (!buffer)
return;
do {
ret = vmbus_recvpacket_raw(device->channel, buffer,
bufferlen, &bytes_recvd, &req_id);
switch (ret) {
case 0:
if (bytes_recvd <= 0) {
kfree(buffer);
return;
}
desc = (struct vmpacket_descriptor *)buffer;
switch (desc->type) {
case VM_PKT_COMP:
break;
case VM_PKT_DATA_INBAND:
mousevsc_on_receive(device, desc);
break;
default:
pr_err("unhandled packet type %d, tid %llx len %d\n",
desc->type, req_id, bytes_recvd);
break;
}
foreach_vmbus_pkt(desc, device->channel) {
switch (desc->type) {
case VM_PKT_COMP:
break;
case -ENOBUFS:
kfree(buffer);
/* Handle large packet */
bufferlen = bytes_recvd;
buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
if (!buffer)
return;
case VM_PKT_DATA_INBAND:
mousevsc_on_receive(device, desc);
break;
default:
pr_err("Unhandled packet type %d, tid %llx len %d\n",
desc->type, desc->trans_id, desc->len8 * 8);
break;
}
} while (1);
}
}
static int mousevsc_connect_to_vsp(struct hv_device *device)

View file

@ -713,6 +713,7 @@
#define USB_VENDOR_ID_LG 0x1fd2
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
#define USB_DEVICE_ID_LG_MELFAS_MT 0x6007
#define I2C_DEVICE_ID_LG_8001 0x8001
#define USB_VENDOR_ID_LOGITECH 0x046d
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e

View file

@ -328,6 +328,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL,
USB_DEVICE_ID_SYMBOL_SCANNER_3),
HID_BATTERY_QUIRK_IGNORE },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD),
HID_BATTERY_QUIRK_IGNORE },
{}
};

View file

@ -62,11 +62,17 @@ int lg2ff_init(struct hid_device *hid)
{
struct lg2ff_device *lg2ff;
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
/* Check that the report looks ok */
report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
if (!report)

View file

@ -129,12 +129,19 @@ static const signed short ff3_joystick_ac[] = {
int lg3ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
const signed short *ff_bits = ff3_joystick_ac;
int error;
int i;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
return -ENODEV;

View file

@ -1259,8 +1259,8 @@ static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_produc
int lg4ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
@ -1272,6 +1272,13 @@ int lg4ff_init(struct hid_device *hid)
int mmode_ret, mmode_idx = -1;
u16 real_product_id;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -1;

View file

@ -127,12 +127,19 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
int lgff_init(struct hid_device* hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
const signed short *ff_bits = ff_joystick;
int error;
int i;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -ENODEV;

View file

@ -1867,8 +1867,8 @@ static void hidpp_ff_destroy(struct ff_device *ff)
static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
{
struct hid_device *hid = hidpp->hid_dev;
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
struct ff_device *ff;
@ -1877,6 +1877,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
int error, j, num_slots;
u8 version;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
if (!dev) {
hid_err(hid, "Struct input_dev not set!\n");
return -EINVAL;

View file

@ -2249,9 +2249,15 @@ static int sony_play_effect(struct input_dev *dev, void *data,
static int sony_init_ff(struct sony_sc *sc)
{
struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
struct hid_input, list);
struct input_dev *input_dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *input_dev;
if (list_empty(&sc->hdev->inputs)) {
hid_err(sc->hdev, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list);
input_dev = hidinput->input;
input_set_capability(input_dev, EV_FF, FF_RUMBLE);
return input_ff_create_memless(input_dev, NULL, sony_play_effect);

View file

@ -283,11 +283,6 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
static int steam_input_open(struct input_dev *dev)
{
struct steam_device *steam = input_get_drvdata(dev);
int ret;
ret = hid_hw_open(steam->hdev);
if (ret)
return ret;
mutex_lock(&steam->mutex);
if (!steam->client_opened && lizard_mode)
@ -304,8 +299,6 @@ static void steam_input_close(struct input_dev *dev)
if (!steam->client_opened && lizard_mode)
steam_set_lizard_mode(steam, true);
mutex_unlock(&steam->mutex);
hid_hw_close(steam->hdev);
}
static enum power_supply_property steam_battery_props[] = {
@ -506,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam)
static int steam_register(struct steam_device *steam)
{
int ret;
bool client_opened;
/*
* This function can be called several times in a row with the
@ -518,9 +512,11 @@ static int steam_register(struct steam_device *steam)
* Unlikely, but getting the serial could fail, and it is not so
* important, so make up a serial number and go on.
*/
mutex_lock(&steam->mutex);
if (steam_get_serial(steam) < 0)
strlcpy(steam->serial_no, "XXXXXXXXXX",
sizeof(steam->serial_no));
mutex_unlock(&steam->mutex);
hid_info(steam->hdev, "Steam Controller '%s' connected",
steam->serial_no);
@ -535,14 +531,16 @@ static int steam_register(struct steam_device *steam)
}
mutex_lock(&steam->mutex);
if (!steam->client_opened) {
client_opened = steam->client_opened;
if (!client_opened)
steam_set_lizard_mode(steam, lizard_mode);
ret = steam_input_register(steam);
} else {
ret = 0;
}
mutex_unlock(&steam->mutex);
if (!client_opened)
ret = steam_input_register(steam);
else
ret = 0;
return ret;
}
@ -623,11 +621,6 @@ static void steam_client_ll_stop(struct hid_device *hdev)
static int steam_client_ll_open(struct hid_device *hdev)
{
struct steam_device *steam = hdev->driver_data;
int ret;
ret = hid_hw_open(steam->hdev);
if (ret)
return ret;
mutex_lock(&steam->mutex);
steam->client_opened = true;
@ -635,22 +628,28 @@ static int steam_client_ll_open(struct hid_device *hdev)
steam_input_unregister(steam);
return ret;
return 0;
}
static void steam_client_ll_close(struct hid_device *hdev)
{
struct steam_device *steam = hdev->driver_data;
unsigned long flags;
bool connected;
spin_lock_irqsave(&steam->lock, flags);
connected = steam->connected;
spin_unlock_irqrestore(&steam->lock, flags);
mutex_lock(&steam->mutex);
steam->client_opened = false;
if (connected)
steam_set_lizard_mode(steam, lizard_mode);
mutex_unlock(&steam->mutex);
hid_hw_close(steam->hdev);
if (steam->connected) {
steam_set_lizard_mode(steam, lizard_mode);
if (connected)
steam_input_register(steam);
}
}
static int steam_client_ll_raw_request(struct hid_device *hdev,
@ -759,14 +758,15 @@ static int steam_probe(struct hid_device *hdev,
if (ret)
goto client_hdev_add_fail;
ret = hid_hw_open(hdev);
if (ret) {
hid_err(hdev,
"%s:hid_hw_open\n",
__func__);
goto hid_hw_open_fail;
}
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
ret = hid_hw_open(hdev);
if (ret) {
hid_err(hdev,
"%s:hid_hw_open for wireless\n",
__func__);
goto hid_hw_open_fail;
}
hid_info(hdev, "Steam wireless receiver connected");
steam_request_conn_status(steam);
} else {
@ -781,8 +781,8 @@ static int steam_probe(struct hid_device *hdev,
return 0;
hid_hw_open_fail:
input_register_fail:
hid_hw_open_fail:
client_hdev_add_fail:
hid_hw_stop(hdev);
hid_hw_start_fail:
@ -809,8 +809,8 @@ static void steam_remove(struct hid_device *hdev)
cancel_work_sync(&steam->work_connect);
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
hid_info(hdev, "Steam wireless receiver disconnected");
hid_hw_close(hdev);
}
hid_hw_close(hdev);
hid_hw_stop(hdev);
steam_unregister(steam);
}

View file

@ -136,12 +136,18 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits)
struct tmff_device *tmff;
struct hid_report *report;
struct list_head *report_list;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct input_dev *input_dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *input_dev;
int error;
int i;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
input_dev = hidinput->input;
tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL);
if (!tmff)
return -ENOMEM;

View file

@ -66,11 +66,17 @@ static int zpff_init(struct hid_device *hid)
{
struct zpff_device *zpff;
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
int i, error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
for (i = 0; i < 4; i++) {
report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
if (!report)

View file

@ -50,6 +50,7 @@
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3)
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
/* flags */
#define I2C_HID_STARTED 0
@ -177,6 +178,10 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_NO_RUNTIME_PM },
{ I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
{ USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
I2C_HID_QUIRK_NO_RUNTIME_PM },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID,
I2C_HID_QUIRK_BOGUS_IRQ },
{ 0, 0 }
};
@ -501,6 +506,12 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
return;
}
if (ihid->quirks & I2C_HID_QUIRK_BOGUS_IRQ && ret_size == 0xffff) {
dev_warn_once(&ihid->client->dev, "%s: IRQ triggered but "
"there's no data\n", __func__);
return;
}
if ((ret_size > size) || (ret_size < 2)) {
dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
__func__, size, ret_size);

View file

@ -322,6 +322,25 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
},
.driver_data = (void *)&sipodev_desc
},
{
/*
* There are at least 2 Primebook C11B versions, the older
* version has a product-name of "Primebook C11B", and a
* bios version / release / firmware revision of:
* V2.1.2 / 05/03/2018 / 18.2
* The new version has "PRIMEBOOK C11B" as product-name and a
* bios version / release / firmware revision of:
* CFALKSW05_BIOS_V1.1.2 / 11/19/2018 / 19.2
* Only the older version needs this quirk, note the newer
* version will not match as it has a different product-name.
*/
.ident = "Trekstor Primebook C11B",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11B"),
},
.driver_data = (void *)&sipodev_desc
},
{
.ident = "Direkt-Tek DTLAPY116-2",
.matches = {
@ -330,6 +349,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
},
.driver_data = (void *)&sipodev_desc
},
{
.ident = "Direkt-Tek DTLAPY133-1",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY133-1"),
},
.driver_data = (void *)&sipodev_desc
},
{
.ident = "Mediacom Flexbook Edge 11",
.matches = {
@ -338,6 +365,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
},
.driver_data = (void *)&sipodev_desc
},
{
.ident = "Odys Winbook 13",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AXDIA International GmbH"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "WINBOOK 13"),
},
.driver_data = (void *)&sipodev_desc
},
{ } /* Terminate list */
};

View file

@ -125,7 +125,7 @@
#define BMC150_ACCEL_SLEEP_1_SEC 0x0F
#define BMC150_ACCEL_REG_TEMP 0x08
#define BMC150_ACCEL_TEMP_CENTER_VAL 24
#define BMC150_ACCEL_TEMP_CENTER_VAL 23
#define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2))
#define BMC150_AUTO_SUSPEND_DELAY_MS 2000

View file

@ -1023,6 +1023,11 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
priv->data->param->regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (!irq)
return -EINVAL;
@ -1032,11 +1037,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
priv->data->param->regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
priv->clkin = devm_clk_get(&pdev->dev, "clkin");
if (IS_ERR(priv->clkin)) {
dev_err(&pdev->dev, "failed to get clkin\n");

View file

@ -39,8 +39,11 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
return -ENOMEM;
adis->buffer = kcalloc(indio_dev->scan_bytes, 2, GFP_KERNEL);
if (!adis->buffer)
if (!adis->buffer) {
kfree(adis->xfer);
adis->xfer = NULL;
return -ENOMEM;
}
rx = adis->buffer;
tx = rx + scan_count;

View file

@ -2270,9 +2270,10 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
conn_id->cm_id.iw = NULL;
cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
mutex_unlock(&listen_id->handler_mutex);
cma_deref_id(conn_id);
rdma_destroy_id(&conn_id->id);
goto out;
return ret;
}
mutex_unlock(&conn_id->handler_mutex);

View file

@ -1518,8 +1518,11 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
}
ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
if (ret < 0)
if (ret < 0) {
kfree(tmp_sdma_rht);
goto bail;
}
dd->sdma_rht = tmp_sdma_rht;
dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);

View file

@ -289,7 +289,9 @@ STORE(__cached_dev)
sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
dc->writeback_rate_p_term_inverse,
1, UINT_MAX);
d_strtoul_nonzero(writeback_rate_minimum);
sysfs_strtoul_clamp(writeback_rate_minimum,
dc->writeback_rate_minimum,
1, UINT_MAX);
sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);

View file

@ -19,7 +19,6 @@
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
#include <linux/semaphore.h>
#include "dm.h"
@ -106,8 +105,8 @@ struct dm_snapshot {
/* The on disk metadata handler */
struct dm_exception_store *store;
/* Maximum number of in-flight COW jobs. */
struct semaphore cow_count;
unsigned in_progress;
struct wait_queue_head in_progress_wait;
struct dm_kcopyd_client *kcopyd_client;
@ -158,8 +157,8 @@ struct dm_snapshot {
*/
#define DEFAULT_COW_THRESHOLD 2048
static int cow_threshold = DEFAULT_COW_THRESHOLD;
module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
@ -1207,7 +1206,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
init_waitqueue_head(&s->in_progress_wait);
s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
@ -1396,9 +1395,56 @@ static void snapshot_dtr(struct dm_target *ti)
dm_put_device(ti, s->origin);
WARN_ON(s->in_progress);
kfree(s);
}
static void account_start_copy(struct dm_snapshot *s)
{
spin_lock(&s->in_progress_wait.lock);
s->in_progress++;
spin_unlock(&s->in_progress_wait.lock);
}
static void account_end_copy(struct dm_snapshot *s)
{
spin_lock(&s->in_progress_wait.lock);
BUG_ON(!s->in_progress);
s->in_progress--;
if (likely(s->in_progress <= cow_threshold) &&
unlikely(waitqueue_active(&s->in_progress_wait)))
wake_up_locked(&s->in_progress_wait);
spin_unlock(&s->in_progress_wait.lock);
}
static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
{
if (unlikely(s->in_progress > cow_threshold)) {
spin_lock(&s->in_progress_wait.lock);
if (likely(s->in_progress > cow_threshold)) {
/*
* NOTE: this throttle doesn't account for whether
* the caller is servicing an IO that will trigger a COW
* so excess throttling may result for chunks not required
* to be COW'd. But if cow_threshold was reached, extra
* throttling is unlikely to negatively impact performance.
*/
DECLARE_WAITQUEUE(wait, current);
__add_wait_queue(&s->in_progress_wait, &wait);
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock(&s->in_progress_wait.lock);
if (unlock_origins)
up_read(&_origins_lock);
io_schedule();
remove_wait_queue(&s->in_progress_wait, &wait);
return false;
}
spin_unlock(&s->in_progress_wait.lock);
}
return true;
}
/*
* Flush a list of buffers.
*/
@ -1414,7 +1460,7 @@ static void flush_bios(struct bio *bio)
}
}
static int do_origin(struct dm_dev *origin, struct bio *bio);
static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
/*
* Flush a list of buffers.
@ -1427,7 +1473,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
r = do_origin(s->origin, bio);
r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
bio = n;
@ -1594,7 +1640,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
rb_link_node(&pe->out_of_order_node, parent, p);
rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
}
up(&s->cow_count);
account_end_copy(s);
}
/*
@ -1618,7 +1664,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dest.count = src.count;
/* Hand over to kcopyd */
down(&s->cow_count);
account_start_copy(s);
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
@ -1638,7 +1684,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
pe->full_bio = bio;
pe->full_bio_end_io = bio->bi_end_io;
down(&s->cow_count);
account_start_copy(s);
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
copy_callback, pe);
@ -1729,6 +1775,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!s->valid)
return DM_MAPIO_KILL;
if (bio_data_dir(bio) == WRITE) {
while (unlikely(!wait_for_in_progress(s, false)))
; /* wait_for_in_progress() has slept */
}
mutex_lock(&s->lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
@ -1877,7 +1928,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
mutex_unlock(&s->lock);
return do_origin(s->origin, bio);
return do_origin(s->origin, bio, false);
}
out_unlock:
@ -2214,15 +2265,24 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
/*
* Called on a write from the origin driver.
*/
static int do_origin(struct dm_dev *origin, struct bio *bio)
static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
{
struct origin *o;
int r = DM_MAPIO_REMAPPED;
again:
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
if (o)
if (o) {
if (limit) {
struct dm_snapshot *s;
list_for_each_entry(s, &o->snapshots, list)
if (unlikely(!wait_for_in_progress(s, true)))
goto again;
}
r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
}
up_read(&_origins_lock);
return r;
@ -2335,7 +2395,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
dm_accept_partial_bio(bio, available_sectors);
/* Only tell snapshots if this is a write */
return do_origin(o->dev, bio);
return do_origin(o->dev, bio, true);
}
static long origin_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,

View file

@ -204,13 +204,6 @@ static void *vimc_sen_process_frame(struct vimc_ent_device *ved,
{
struct vimc_sen_device *vsen = container_of(ved, struct vimc_sen_device,
ved);
const struct vimc_pix_map *vpix;
unsigned int frame_size;
/* Calculate the frame size */
vpix = vimc_pix_map_by_code(vsen->mbus_format.code);
frame_size = vsen->mbus_format.width * vpix->bpp *
vsen->mbus_format.height;
tpg_fill_plane_buffer(&vsen->tpg, 0, 0, vsen->frame);
return vsen->frame;

View file

@ -4033,7 +4033,7 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
* this to-be-skipped slave to send a packet out.
*/
old_arr = rtnl_dereference(bond->slave_arr);
for (idx = 0; idx < old_arr->count; idx++) {
for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
if (skipslave == old_arr->arr[idx]) {
old_arr->arr[idx] =
old_arr->arr[old_arr->count-1];

View file

@ -456,10 +456,12 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
*/
irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
mutex_unlock(&chip->reg_lock);
err = request_threaded_irq(chip->irq, NULL,
mv88e6xxx_g1_irq_thread_fn,
IRQF_ONESHOT,
dev_name(chip->dev), chip);
mutex_lock(&chip->reg_lock);
if (err)
mv88e6xxx_g1_irq_free_common(chip);

View file

@ -4410,9 +4410,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
if (err)
goto err_col_port_add;
err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
if (err)
goto err_col_port_enable;
mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
mlxsw_sp_port->local_port);
@ -4427,8 +4424,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
err_col_port_enable:
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
err_col_port_add:
if (!lag->ref_count)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
@ -4447,7 +4442,6 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
WARN_ON(lag->ref_count == 0);
mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
/* Any VLANs configured on the port are no longer valid */
@ -4492,21 +4486,56 @@ static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
}
static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool lag_tx_enabled)
static int
mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
{
if (lag_tx_enabled)
return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
mlxsw_sp_port->lag_id);
else
return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
mlxsw_sp_port->lag_id);
int err;
err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
mlxsw_sp_port->lag_id);
if (err)
return err;
err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
if (err)
goto err_dist_port_add;
return 0;
err_dist_port_add:
mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
return err;
}
static int
mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
{
int err;
err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
mlxsw_sp_port->lag_id);
if (err)
return err;
err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
mlxsw_sp_port->lag_id);
if (err)
goto err_col_port_disable;
return 0;
err_col_port_disable:
mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
return err;
}
static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
struct netdev_lag_lower_state_info *info)
{
return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
if (info->tx_enabled)
return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
else
return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
}
static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
@ -4668,8 +4697,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
upper_dev);
} else {
mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port,
false);
mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
mlxsw_sp_port_lag_leave(mlxsw_sp_port,
upper_dev);
}

View file

@ -336,7 +336,7 @@ static void sr_set_multicast(struct net_device *net)
static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(net);
__le16 res;
__le16 res = 0;
mutex_lock(&dev->phy_mutex);
sr_set_sw_mii(dev);

View file

@ -532,7 +532,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &wcn3990_ops,
.decap_align_bytes = 1,
.num_peers = TARGET_HL_10_TLV_NUM_PEERS,
.n_cipher_suites = 8,
.n_cipher_suites = 11,
.ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
.num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
.target_64bit = true,

View file

@ -132,6 +132,10 @@ ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe)
struct ath6kl_urb_context *urb_context = NULL;
unsigned long flags;
/* bail if this pipe is not initialized */
if (!pipe->ar_usb)
return NULL;
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
if (!list_empty(&pipe->urb_list_head)) {
urb_context =
@ -150,6 +154,10 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
{
unsigned long flags;
/* bail if this pipe is not initialized */
if (!pipe->ar_usb)
return;
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
pipe->urb_cnt++;

View file

@ -234,9 +234,10 @@ static int wil_rx_refill_edma(struct wil6210_priv *wil)
struct wil_ring *ring = &wil->ring_rx;
u32 next_head;
int rc = 0;
u32 swtail = *ring->edma_rx_swtail.va;
ring->swtail = *ring->edma_rx_swtail.va;
for (; next_head = wil_ring_next_head(ring), (next_head != swtail);
for (; next_head = wil_ring_next_head(ring),
(next_head != ring->swtail);
ring->swhead = next_head) {
rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead);
if (unlikely(rc)) {
@ -264,43 +265,26 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
struct wil_ring *ring)
{
struct device *dev = wil_to_dev(wil);
u32 next_tail;
u32 swhead = (ring->swhead + 1) % ring->size;
struct list_head *active = &wil->rx_buff_mgmt.active;
dma_addr_t pa;
u16 dmalen;
for (; next_tail = wil_ring_next_tail(ring), (next_tail != swhead);
ring->swtail = next_tail) {
struct wil_rx_enhanced_desc dd, *d = &dd;
struct wil_rx_enhanced_desc *_d =
(struct wil_rx_enhanced_desc *)
&ring->va[ring->swtail].rx.enhanced;
struct sk_buff *skb;
u16 buff_id;
while (!list_empty(active)) {
struct wil_rx_buff *rx_buff =
list_first_entry(active, struct wil_rx_buff, list);
struct sk_buff *skb = rx_buff->skb;
*d = *_d;
/* Extract the SKB from the rx_buff management array */
buff_id = __le16_to_cpu(d->mac.buff_id);
if (buff_id >= wil->rx_buff_mgmt.size) {
wil_err(wil, "invalid buff_id %d\n", buff_id);
continue;
}
skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
if (unlikely(!skb)) {
wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
wil_err(wil, "No Rx skb at buff_id %d\n", rx_buff->id);
} else {
pa = wil_rx_desc_get_addr_edma(&d->dma);
dmalen = le16_to_cpu(d->dma.length);
dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
rx_buff->skb = NULL;
memcpy(&pa, skb->cb, sizeof(pa));
dma_unmap_single(dev, pa, wil->rx_buf_len,
DMA_FROM_DEVICE);
kfree_skb(skb);
}
/* Move the buffer from the active to the free list */
list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
&wil->rx_buff_mgmt.free);
list_move(&rx_buff->list, &wil->rx_buff_mgmt.free);
}
}

View file

@ -843,15 +843,17 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
* firmware versions. Unfortunately, we don't have a TLV API
* flag to rely on, so rely on the major version which is in
* the first byte of ucode_ver. This was implemented
* initially on version 38 and then backported to29 and 17.
* The intention was to have it in 36 as well, but not all
* 8000 family got this feature enabled. The 8000 family is
* the only one using version 36, so skip this version
* entirely.
* initially on version 38 and then backported to 17. It was
* also backported to 29, but only for 7265D devices. The
* intention was to have it in 36 as well, but not all 8000
* family got this feature enabled. The 8000 family is the
* only one using version 36, so skip this version entirely.
*/
return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 ||
(IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 &&
((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
CSR_HW_REV_TYPE_7265D));
}
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)

View file

@ -559,18 +559,25 @@ static int pn533_usb_probe(struct usb_interface *interface,
rc = pn533_finalize_setup(priv);
if (rc)
goto error;
goto err_deregister;
usb_set_intfdata(interface, phy);
return 0;
err_deregister:
pn533_unregister_device(phy->priv);
error:
usb_kill_urb(phy->in_urb);
usb_kill_urb(phy->out_urb);
usb_kill_urb(phy->ack_urb);
usb_free_urb(phy->in_urb);
usb_free_urb(phy->out_urb);
usb_free_urb(phy->ack_urb);
usb_put_dev(phy->udev);
kfree(in_buf);
kfree(phy->ack_buffer);
return rc;
}

View file

@ -437,6 +437,7 @@ static void pcie_pme_remove(struct pcie_device *srv)
pcie_pme_disable_interrupt(srv->port, data);
free_irq(srv->irq, srv);
cancel_work_sync(&data->work);
kfree(data);
}

View file

@ -5083,8 +5083,8 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
pci_disable_device(pdev);
}
#define SWITCHTEC_QUIRK(vid) \
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
quirk_switchtec_ntb_dma_alias)
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
SWITCHTEC_QUIRK(0x8531); /* PFX 24xG3 */
SWITCHTEC_QUIRK(0x8532); /* PFX 32xG3 */

View file

@ -33,46 +33,45 @@
#define ISPSSPM0_IUNIT_POWER_ON 0x0
#define ISPSSPM0_IUNIT_POWER_OFF 0x3
static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
static int isp_set_power(struct pci_dev *dev, bool enable)
{
unsigned long timeout;
u32 val;
u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON :
ISPSSPM0_IUNIT_POWER_OFF;
pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, 0);
/*
* MRFLD IUNIT DPHY is located in an always-power-on island
* MRFLD HW design need all CSI ports are disabled before
* powering down the IUNIT.
*/
pci_read_config_dword(dev, PCI_CSI_CONTROL, &val);
val |= PCI_CSI_CONTROL_PORTS_OFF_MASK;
pci_write_config_dword(dev, PCI_CSI_CONTROL, val);
/* Write 0x3 to ISPSSPM0 bit[1:0] to power off the IUNIT */
/* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
ISPSSPM0_IUNIT_POWER_OFF, ISPSSPM0_ISPSSC_MASK);
val, ISPSSPM0_ISPSSC_MASK);
/*
* There should be no IUNIT access while power-down is
* in progress HW sighting: 4567865
* Wait up to 50 ms for the IUNIT to shut down.
* And we do the same for power on.
*/
timeout = jiffies + msecs_to_jiffies(50);
while (1) {
/* Wait until ISPSSPM0 bit[25:24] shows 0x3 */
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &val);
val = (val & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
if (val == ISPSSPM0_IUNIT_POWER_OFF)
u32 tmp;
/* Wait until ISPSSPM0 bit[25:24] shows the right value */
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp);
tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
if (tmp == val)
break;
if (time_after(jiffies, timeout)) {
dev_err(&dev->dev, "IUNIT power-off timeout.\n");
dev_err(&dev->dev, "IUNIT power-%s timeout.\n",
enable ? "on" : "off");
return -EBUSY;
}
usleep_range(1000, 2000);
}
return 0;
}
static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
pm_runtime_allow(&dev->dev);
pm_runtime_put_sync_suspend(&dev->dev);
@ -87,11 +86,40 @@ static void isp_remove(struct pci_dev *dev)
static int isp_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
u32 val;
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, 0);
/*
* MRFLD IUNIT DPHY is located in an always-power-on island
* MRFLD HW design need all CSI ports are disabled before
* powering down the IUNIT.
*/
pci_read_config_dword(pdev, PCI_CSI_CONTROL, &val);
val |= PCI_CSI_CONTROL_PORTS_OFF_MASK;
pci_write_config_dword(pdev, PCI_CSI_CONTROL, val);
/*
* We lose config space access when punit power gates
* the ISP. Can't use pci_set_power_state() because
* pmcsr won't actually change when we write to it.
*/
pci_save_state(pdev);
pdev->current_state = PCI_D3cold;
isp_set_power(pdev, false);
return 0;
}
static int isp_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
isp_set_power(pdev, true);
pdev->current_state = PCI_D0;
pci_restore_state(pdev);
return 0;
}
@ -99,6 +127,7 @@ static UNIVERSAL_DEV_PM_OPS(isp_pm_ops, isp_pci_suspend,
isp_pci_resume, NULL);
static const struct pci_device_id isp_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0f38), },
{ PCI_VDEVICE(INTEL, 0x22b8), },
{ 0, }
};

View file

@ -240,6 +240,14 @@ static enum power_supply_property max14656_battery_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
};
static void stop_irq_work(void *data)
{
struct max14656_chip *chip = data;
cancel_delayed_work_sync(&chip->irq_work);
}
static int max14656_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@ -278,8 +286,6 @@ static int max14656_probe(struct i2c_client *client,
if (ret)
return -ENODEV;
INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker);
chip->detect_psy = devm_power_supply_register(dev,
&chip->psy_desc, &psy_cfg);
if (IS_ERR(chip->detect_psy)) {
@ -287,6 +293,13 @@ static int max14656_probe(struct i2c_client *client,
return -EINVAL;
}
INIT_DELAYED_WORK(&chip->irq_work, max14656_irq_worker);
ret = devm_add_action(dev, stop_irq_work, chip);
if (ret) {
dev_err(dev, "devm_add_action %d failed\n", ret);
return ret;
}
ret = devm_request_irq(dev, chip->irq, max14656_irq,
IRQF_TRIGGER_FALLING,
MAX14656_NAME, chip);

View file

@ -97,8 +97,9 @@ static int pcf8523_voltage_low(struct i2c_client *client)
return !!(value & REG_CONTROL3_BLF);
}
static int pcf8523_select_capacitance(struct i2c_client *client, bool high)
static int pcf8523_load_capacitance(struct i2c_client *client)
{
u32 load;
u8 value;
int err;
@ -106,14 +107,24 @@ static int pcf8523_select_capacitance(struct i2c_client *client, bool high)
if (err < 0)
return err;
if (!high)
value &= ~REG_CONTROL1_CAP_SEL;
else
load = 12500;
of_property_read_u32(client->dev.of_node, "quartz-load-femtofarads",
&load);
switch (load) {
default:
dev_warn(&client->dev, "Unknown quartz-load-femtofarads value: %d. Assuming 12500",
load);
/* fall through */
case 12500:
value |= REG_CONTROL1_CAP_SEL;
break;
case 7000:
value &= ~REG_CONTROL1_CAP_SEL;
break;
}
err = pcf8523_write(client, REG_CONTROL1, value);
if (err < 0)
return err;
return err;
}
@ -347,9 +358,10 @@ static int pcf8523_probe(struct i2c_client *client,
if (!pcf)
return -ENOMEM;
err = pcf8523_select_capacitance(client, true);
err = pcf8523_load_capacitance(client);
if (err < 0)
return err;
dev_warn(&client->dev, "failed to set xtal load capacitance: %d",
err);
err = pcf8523_set_pm(client, 0);
if (err < 0)

View file

@ -1713,7 +1713,11 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
}
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
nvmet_fc_unregister_targetport(phba->targetport);
wait_for_completion_timeout(&tport_unreg_cmp, 5);
if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
"6179 Unreg targetport %p timeout "
"reached.\n", phba->targetport);
lpfc_nvmet_cleanup_io_context(phba);
}
phba->targetport = NULL;

View file

@ -31,6 +31,8 @@
#define LPFC_NVMET_MRQ_AUTO 0
#define LPFC_NVMET_MRQ_MAX 16
#define LPFC_NVMET_WAIT_TMO (5 * MSEC_PER_SEC)
/* Used for NVME Target */
struct lpfc_nvmet_tgtport {
struct lpfc_hba *phba;

View file

@ -4161,7 +4161,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
/* If pCmd was set to NULL from abort path, do not call scsi_done */
if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0711 FCP cmd already NULL, sid: 0x%06x, "
"5688 FCP cmd already NULL, sid: 0x%06x, "
"did: 0x%06x, oxid: 0x%04x\n",
vport->fc_myDID,
(pnode) ? pnode->nlp_DID : 0,

View file

@ -2,3 +2,4 @@ config PINCTRL_RT2880
bool "RT2800 pinctrl driver for RALINK/Mediatek SOCs"
depends on RALINK
select PINMUX
select GENERIC_PINCONF

View file

@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/machine.h>
@ -73,48 +74,12 @@ static int rt2880_get_group_pins(struct pinctrl_dev *pctrldev,
return 0;
}
static int rt2880_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrldev,
struct device_node *np_config,
struct pinctrl_map **map,
unsigned int *num_maps)
{
struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
struct property *prop;
const char *function_name, *group_name;
int ret;
int ngroups = 0;
unsigned int reserved_maps = 0;
for_each_node_with_property(np_config, "group")
ngroups++;
*map = NULL;
ret = pinctrl_utils_reserve_map(pctrldev, map, &reserved_maps,
num_maps, ngroups);
if (ret) {
dev_err(p->dev, "can't reserve map: %d\n", ret);
return ret;
}
of_property_for_each_string(np_config, "group", prop, group_name) {
ret = pinctrl_utils_add_map_mux(pctrldev, map, &reserved_maps,
num_maps, group_name,
function_name);
if (ret) {
dev_err(p->dev, "can't add map: %d\n", ret);
return ret;
}
}
return 0;
}
static const struct pinctrl_ops rt2880_pctrl_ops = {
.get_groups_count = rt2880_get_group_count,
.get_group_name = rt2880_get_group_name,
.get_group_pins = rt2880_get_group_pins,
.dt_node_to_map = rt2880_pinctrl_dt_node_to_map,
.dt_free_map = pinctrl_utils_free_map,
.dt_node_to_map = pinconf_generic_dt_node_to_map_all,
.dt_free_map = pinconf_generic_dt_free_map,
};
static int rt2880_pmx_func_count(struct pinctrl_dev *pctrldev)

View file

@ -348,8 +348,10 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
}
padapter->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
if (!padapter->HalData)
DBG_88E("cant not alloc memory for HAL DATA\n");
if (!padapter->HalData) {
DBG_88E("Failed to allocate memory for HAL data\n");
goto free_adapter;
}
/* step read_chip_version */
rtw_hal_read_chip_version(padapter);

View file

@ -1832,7 +1832,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
while (credits) {
struct sk_buff *p = cxgbit_sock_peek_wr(csk);
const u32 csum = (__force u32)p->csum;
u32 csum;
if (unlikely(!p)) {
pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
@ -1841,6 +1841,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
break;
}
csum = (__force u32)p->csum;
if (unlikely(credits < csum)) {
pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
csk, csk->tid,

View file

@ -142,9 +142,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring)
return io;
}
static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
{
iowrite16(value, ring_desc_base(ring) + offset);
/*
* The other 16-bits in the register is read-only and writes to it
* are ignored by the hardware so we can save one ioread32() by
* filling the read-only bits with zeroes.
*/
iowrite32(cons, ring_desc_base(ring) + 8);
}
static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
{
/* See ring_iowrite_cons() above for explanation */
iowrite32(prod << 16, ring_desc_base(ring) + 8);
}
static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
@ -196,7 +207,10 @@ static void ring_write_descriptors(struct tb_ring *ring)
descriptor->sof = frame->sof;
}
ring->head = (ring->head + 1) % ring->size;
ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
if (ring->is_tx)
ring_iowrite_prod(ring, ring->head);
else
ring_iowrite_cons(ring, ring->head);
}
}
@ -660,7 +674,7 @@ void tb_ring_stop(struct tb_ring *ring)
ring_iowrite32options(ring, 0, 0);
ring_iowrite64desc(ring, 0, 0);
ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
ring_iowrite32desc(ring, 0, 8);
ring_iowrite32desc(ring, 0, 12);
ring->head = 0;
ring->tail = 0;

View file

@ -968,6 +968,11 @@ static int __init n_hdlc_init(void)
} /* end of init_module() */
#ifdef CONFIG_SPARC
#undef __exitdata
#define __exitdata
#endif
static const char hdlc_unregister_ok[] __exitdata =
KERN_INFO "N_HDLC: line discipline unregistered\n";
static const char hdlc_unregister_fail[] __exitdata =

View file

@ -742,7 +742,7 @@ static int __init owl_uart_init(void)
return ret;
}
static void __init owl_uart_exit(void)
static void __exit owl_uart_exit(void)
{
platform_driver_unregister(&owl_uart_platform_driver);
uart_unregister_driver(&owl_uart_driver);

View file

@ -328,6 +328,7 @@ struct sc16is7xx_port {
struct kthread_worker kworker;
struct task_struct *kworker_task;
struct kthread_work irq_work;
struct mutex efr_lock;
struct sc16is7xx_one p[0];
};
@ -499,6 +500,21 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
div /= 4;
}
/* In an amazing feat of design, the Enhanced Features Register shares
* the address of the Interrupt Identification Register, and is
* switched in by writing a magic value (0xbf) to the Line Control
* Register. Any interrupt firing during this time will see the EFR
* where it expects the IIR to be, leading to "Unexpected interrupt"
* messages.
*
* Prevent this possibility by claiming a mutex while accessing the
* EFR, and claiming the same mutex from within the interrupt handler.
* This is similar to disabling the interrupt, but that doesn't work
* because the bulk of the interrupt processing is run as a workqueue
* job in thread context.
*/
mutex_lock(&s->efr_lock);
lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
/* Open the LCR divisors for configuration */
@ -514,6 +530,8 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
/* Put LCR back to the normal mode */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
mutex_unlock(&s->efr_lock);
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_CLKSEL_BIT,
prescaler);
@ -696,6 +714,8 @@ static void sc16is7xx_ist(struct kthread_work *ws)
{
struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
mutex_lock(&s->efr_lock);
while (1) {
bool keep_polling = false;
int i;
@ -705,6 +725,8 @@ static void sc16is7xx_ist(struct kthread_work *ws)
if (!keep_polling)
break;
}
mutex_unlock(&s->efr_lock);
}
static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
@ -899,6 +921,9 @@ static void sc16is7xx_set_termios(struct uart_port *port,
if (!(termios->c_cflag & CREAD))
port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK;
/* As above, claim the mutex while accessing the EFR. */
mutex_lock(&s->efr_lock);
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
SC16IS7XX_LCR_CONF_MODE_B);
@ -920,6 +945,8 @@ static void sc16is7xx_set_termios(struct uart_port *port,
/* Update LCR register */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
mutex_unlock(&s->efr_lock);
/* Get baud rate generator configuration */
baud = uart_get_baud_rate(port, termios, old,
port->uartclk / 16 / 4 / 0xffff,
@ -1185,6 +1212,7 @@ static int sc16is7xx_probe(struct device *dev,
s->regmap = regmap;
s->devtype = devtype;
dev_set_drvdata(dev, s);
mutex_init(&s->efr_lock);
kthread_init_worker(&s->kworker);
kthread_init_work(&s->irq_work, sc16is7xx_ist);

View file

@ -60,6 +60,9 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set);
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
{
if (gpios == NULL)
return NULL;
return gpios->gpio[gidx];
}
EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);

View file

@ -107,6 +107,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
u16 portstatus);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
@ -1111,6 +1113,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
USB_PORT_FEAT_ENABLE);
}
/* Make sure a warm-reset request is handled by port_event */
if (type == HUB_RESUME &&
hub_port_warm_reset_required(hub, port1, portstatus))
set_bit(port1, hub->event_bits);
/*
* Add debounce if USB3 link is in polling/link training state.
* Link will automatically transition to Enabled state after

View file

@ -3568,6 +3568,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
u32 port_status;
u32 speed;
u32 pcgctl;
u32 pwr;
switch (typereq) {
case ClearHubFeature:
@ -3616,8 +3617,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_POWER\n");
hprt0 = dwc2_read_hprt0(hsotg);
pwr = hprt0 & HPRT0_PWR;
hprt0 &= ~HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
if (pwr)
dwc2_vbus_supply_exit(hsotg);
break;
case USB_PORT_FEAT_INDICATOR:
@ -3827,8 +3831,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_dbg(hsotg->dev,
"SetPortFeature - USB_PORT_FEAT_POWER\n");
hprt0 = dwc2_read_hprt0(hsotg);
pwr = hprt0 & HPRT0_PWR;
hprt0 |= HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
if (!pwr)
dwc2_vbus_supply_init(hsotg);
break;
case USB_PORT_FEAT_RESET:
@ -3845,6 +3852,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dwc2_writel(hsotg, 0, PCGCTL);
hprt0 = dwc2_read_hprt0(hsotg);
pwr = hprt0 & HPRT0_PWR;
/* Clear suspend bit if resetting from suspend state */
hprt0 &= ~HPRT0_SUSP;
@ -3858,6 +3866,8 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_dbg(hsotg->dev,
"In host mode, hprt0=%08x\n", hprt0);
dwc2_writel(hsotg, hprt0, HPRT0);
if (!pwr)
dwc2_vbus_supply_init(hsotg);
}
/* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
@ -4400,6 +4410,7 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
struct usb_bus *bus = hcd_to_bus(hcd);
unsigned long flags;
u32 hprt0;
int ret;
dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
@ -4416,12 +4427,16 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
dwc2_hcd_reinit(hsotg);
/* enable external vbus supply before resuming root hub */
spin_unlock_irqrestore(&hsotg->lock, flags);
ret = dwc2_vbus_supply_init(hsotg);
if (ret)
return ret;
spin_lock_irqsave(&hsotg->lock, flags);
hprt0 = dwc2_read_hprt0(hsotg);
/* Has vbus power been turned on in dwc2_core_host_init ? */
if (hprt0 & HPRT0_PWR) {
/* Enable external vbus supply before resuming root hub */
spin_unlock_irqrestore(&hsotg->lock, flags);
ret = dwc2_vbus_supply_init(hsotg);
if (ret)
return ret;
spin_lock_irqsave(&hsotg->lock, flags);
}
/* Initialize and connect root hub if one is not already attached */
if (bus->root_hub) {
@ -4443,6 +4458,7 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
unsigned long flags;
u32 hprt0;
/* Turn off all host-specific interrupts */
dwc2_disable_host_interrupts(hsotg);
@ -4451,6 +4467,7 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
synchronize_irq(hcd->irq);
spin_lock_irqsave(&hsotg->lock, flags);
hprt0 = dwc2_read_hprt0(hsotg);
/* Ensure hcd is disconnected */
dwc2_hcd_disconnect(hsotg, true);
dwc2_hcd_stop(hsotg);
@ -4459,7 +4476,9 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_vbus_supply_exit(hsotg);
/* keep balanced supply init/exit by checking HPRT0_PWR */
if (hprt0 & HPRT0_PWR)
dwc2_vbus_supply_exit(hsotg);
usleep_range(1000, 3000);
}

View file

@ -375,19 +375,9 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
if (ret == 0) {
switch (DWC3_DEPCMD_CMD(cmd)) {
case DWC3_DEPCMD_STARTTRANSFER:
dep->flags |= DWC3_EP_TRANSFER_STARTED;
dwc3_gadget_ep_get_transfer_index(dep);
break;
case DWC3_DEPCMD_ENDTRANSFER:
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
break;
default:
/* nothing */
break;
}
if (ret == 0 && DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
dep->flags |= DWC3_EP_TRANSFER_STARTED;
dwc3_gadget_ep_get_transfer_index(dep);
}
if (unlikely(susphy)) {
@ -1410,7 +1400,10 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
goto out0;
dwc3_gadget_move_cancelled_request(req);
goto out0;
if (dep->flags & DWC3_EP_TRANSFER_STARTED)
goto out0;
else
goto out1;
}
dev_err(dwc->dev, "request %pK was not queued to %s\n",
request, ep->name);
@ -1418,6 +1411,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
goto out0;
}
out1:
dwc3_gadget_giveback(dep, req, -ECONNRESET);
out0:
@ -2413,7 +2407,8 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
cmd = DEPEVT_PARAMETER_CMD(event->parameters);
if (cmd == DWC3_DEPCMD_ENDTRANSFER) {
dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
dep->flags &= ~(DWC3_EP_END_TRANSFER_PENDING |
DWC3_EP_TRANSFER_STARTED);
dwc3_gadget_ep_cleanup_cancelled_requests(dep);
}
break;

View file

@ -98,6 +98,17 @@ int usb_ep_enable(struct usb_ep *ep)
if (ep->enabled)
goto out;
/* UDC drivers can't handle endpoints with maxpacket size 0 */
if (usb_endpoint_maxp(ep->desc) == 0) {
/*
* We should log an error message here, but we can't call
* dev_err() because there's no way to find the gadget
* given only ep.
*/
ret = -EINVAL;
goto out;
}
ret = ep->ops->enable(ep, ep->desc);
if (ret)
goto out;

View file

@ -202,10 +202,10 @@ static void xhci_ring_dump_segment(struct seq_file *s,
trb = &seg->trbs[i];
dma = seg->dma + i * sizeof(*trb);
seq_printf(s, "%pad: %s\n", &dma,
xhci_decode_trb(trb->generic.field[0],
trb->generic.field[1],
trb->generic.field[2],
trb->generic.field[3]));
xhci_decode_trb(le32_to_cpu(trb->generic.field[0]),
le32_to_cpu(trb->generic.field[1]),
le32_to_cpu(trb->generic.field[2]),
le32_to_cpu(trb->generic.field[3])));
}
}
@ -263,10 +263,10 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused)
xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
seq_printf(s, "%pad: %s\n", &dev->out_ctx->dma,
xhci_decode_slot_context(slot_ctx->dev_info,
slot_ctx->dev_info2,
slot_ctx->tt_info,
slot_ctx->dev_state));
xhci_decode_slot_context(le32_to_cpu(slot_ctx->dev_info),
le32_to_cpu(slot_ctx->dev_info2),
le32_to_cpu(slot_ctx->tt_info),
le32_to_cpu(slot_ctx->dev_state)));
return 0;
}
@ -286,10 +286,10 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci);
dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params);
seq_printf(s, "%pad: %s\n", &dma,
xhci_decode_ep_context(ep_ctx->ep_info,
ep_ctx->ep_info2,
ep_ctx->deq,
ep_ctx->tx_info));
xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info),
le32_to_cpu(ep_ctx->ep_info2),
le64_to_cpu(ep_ctx->deq),
le32_to_cpu(ep_ctx->tx_info)));
}
return 0;

View file

@ -495,11 +495,11 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
retval = -EFAULT;
goto unlock_exit;
}
dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size;
retval = bytes_to_read;
spin_lock_irq(&dev->rbsl);
dev->ring_tail = (dev->ring_tail + 1) % ring_buffer_size;
if (dev->buffer_overflow) {
dev->buffer_overflow = 0;
spin_unlock_irq(&dev->rbsl);
@ -580,7 +580,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
1 << 8, 0,
dev->interrupt_out_buffer,
bytes_to_write,
USB_CTRL_SET_TIMEOUT * HZ);
USB_CTRL_SET_TIMEOUT);
if (retval < 0)
dev_err(&dev->intf->dev,
"Couldn't submit HID_REQ_SET_REPORT %d\n",

View file

@ -878,7 +878,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
get_version_reply,
sizeof(*get_version_reply),
1000);
if (result < sizeof(*get_version_reply)) {
if (result != sizeof(*get_version_reply)) {
if (result >= 0)
result = -EIO;
dev_err(idev, "get version request failed: %d\n", result);

View file

@ -571,6 +571,10 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
command_port = port->serial->port[COMMAND_PORT];
command_info = usb_get_serial_port_data(command_port);
if (command_port->bulk_out_size < datasize + 1)
return -EIO;
mutex_lock(&command_info->mutex);
command_info->command_finished = false;
@ -644,6 +648,7 @@ static void firm_setup_port(struct tty_struct *tty)
struct device *dev = &port->dev;
struct whiteheat_port_settings port_settings;
unsigned int cflag = tty->termios.c_cflag;
speed_t baud;
port_settings.port = port->port_number + 1;
@ -704,11 +709,13 @@ static void firm_setup_port(struct tty_struct *tty)
dev_dbg(dev, "%s - XON = %2x, XOFF = %2x\n", __func__, port_settings.xon, port_settings.xoff);
/* get the baud rate wanted */
port_settings.baud = tty_get_baud_rate(tty);
dev_dbg(dev, "%s - baud rate = %d\n", __func__, port_settings.baud);
baud = tty_get_baud_rate(tty);
port_settings.baud = cpu_to_le32(baud);
dev_dbg(dev, "%s - baud rate = %u\n", __func__, baud);
/* fixme: should set validated settings */
tty_encode_baud_rate(tty, port_settings.baud, port_settings.baud);
tty_encode_baud_rate(tty, baud, baud);
/* handle any settings that aren't specified in the tty structure */
port_settings.lloop = 0;

View file

@ -87,7 +87,7 @@ struct whiteheat_simple {
struct whiteheat_port_settings {
__u8 port; /* port number (1 to N) */
__u32 baud; /* any value 7 - 460800, firmware calculates
__le32 baud; /* any value 7 - 460800, firmware calculates
best fit; arrives little endian */
__u8 bits; /* 5, 6, 7, or 8 */
__u8 stop; /* 1 or 2, default 1 (2 = 1.5 if bits = 5) */

View file

@ -65,7 +65,6 @@ static const char* host_info(struct Scsi_Host *host)
static int slave_alloc (struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
int maxp;
/*
* Set the INQUIRY transfer length to 36. We don't use any of
@ -74,15 +73,6 @@ static int slave_alloc (struct scsi_device *sdev)
*/
sdev->inquiry_len = 36;
/*
* USB has unusual scatter-gather requirements: the length of each
* scatterlist element except the last must be divisible by the
* Bulk maxpacket value. Fortunately this value is always a
* power of 2. Inform the block layer about this requirement.
*/
maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0);
blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
/*
* Some host controllers may have alignment requirements.
* We'll play it safe by requiring 512-byte alignment always.

View file

@ -796,29 +796,9 @@ static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
(struct uas_dev_info *)sdev->host->hostdata;
int maxp;
sdev->hostdata = devinfo;
/*
* We have two requirements here. We must satisfy the requirements
* of the physical HC and the demands of the protocol, as we
* definitely want no additional memory allocation in this path
* ruling out using bounce buffers.
*
* For a transmission on USB to continue we must never send
* a package that is smaller than maxpacket. Hence the length of each
* scatterlist element except the last must be divisible by the
* Bulk maxpacket value.
* If the HC does not ensure that through SG,
* the upper layer must do that. We must assume nothing
* about the capabilities off the HC, so we use the most
* pessimistic requirement.
*/
maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0);
blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
/*
* The protocol has no requirements on alignment in the strict sense.
* Controllers may or may not have alignment restrictions.

View file

@ -217,6 +217,8 @@ static int hgcm_call_preprocess_linaddr(
if (!bounce_buf)
return -ENOMEM;
*bounce_buf_ret = bounce_buf;
if (copy_in) {
ret = copy_from_user(bounce_buf, (void __user *)buf, len);
if (ret)
@ -225,7 +227,6 @@ static int hgcm_call_preprocess_linaddr(
memset(bounce_buf, 0, len);
}
*bounce_buf_ret = bounce_buf;
hgcm_call_add_pagelist_size(bounce_buf, len, extra);
return 0;
}

View file

@ -14,13 +14,30 @@
#include <linux/err.h>
#include <linux/fs.h>
static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
static inline char *next_non_spacetab(char *first, const char *last)
{
for (; first <= last; first++)
if (!spacetab(*first))
return first;
return NULL;
}
static inline char *next_terminator(char *first, const char *last)
{
for (; first <= last; first++)
if (spacetab(*first) || !*first)
return first;
return NULL;
}
static int load_script(struct linux_binprm *bprm)
{
const char *i_arg, *i_name;
char *cp;
char *cp, *buf_end;
struct file *file;
int retval;
/* Not ours to exec if we don't start with "#!". */
if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
return -ENOEXEC;
@ -33,18 +50,40 @@ static int load_script(struct linux_binprm *bprm)
if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
return -ENOENT;
/*
* This section does the #! interpretation.
* Sorta complicated, but hopefully it will work. -TYT
*/
/* Release since we are not mapping a binary into memory. */
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
if ((cp = strchr(bprm->buf, '\n')) == NULL)
cp = bprm->buf+BINPRM_BUF_SIZE-1;
/*
* This section handles parsing the #! line into separate
* interpreter path and argument strings. We must be careful
* because bprm->buf is not yet guaranteed to be NUL-terminated
* (though the buffer will have trailing NUL padding when the
* file size was smaller than the buffer size).
*
* We do not want to exec a truncated interpreter path, so either
* we find a newline (which indicates nothing is truncated), or
* we find a space/tab/NUL after the interpreter path (which
* itself may be preceded by spaces/tabs). Truncating the
* arguments is fine: the interpreter can re-read the script to
* parse them on its own.
*/
buf_end = bprm->buf + sizeof(bprm->buf) - 1;
cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
if (!cp) {
cp = next_non_spacetab(bprm->buf + 2, buf_end);
if (!cp)
return -ENOEXEC; /* Entire buf is spaces/tabs */
/*
* If there is no later space/tab/NUL we must assume the
* interpreter path is truncated.
*/
if (!next_terminator(cp, buf_end))
return -ENOEXEC;
cp = buf_end;
}
/* NUL-terminate the buffer and any trailing spaces/tabs. */
*cp = '\0';
while (cp > bprm->buf) {
cp--;

View file

@ -2747,8 +2747,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
int nitems, bool use_global_rsv);
void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
bool qgroup_free);
void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
@ -3178,6 +3177,9 @@ void btrfs_destroy_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
int __init btrfs_init_cachep(void);
void __cold btrfs_destroy_cachep(void);
struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *new,
struct btrfs_path *path);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *was_new);
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,

Some files were not shown because too many files have changed in this diff Show more