This is the 4.19.119 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl6pj8gACgkQONu9yGCS
 aT5mKxAAvzC4s4XHwDDckvvu57/sED2oEtp7MgmLuyK4Ih55GyyLGx9zg1A2z+rs
 wQSsVW+/WeurCj4CuVciakkCvgBeY494cbnghr2lohhJZ918/XnYmPODLJhlvtcV
 gZ4vxk5euNqpWGsmu+X+DRBG6QuU5GYf4ox39NZdtKm5I+kt5Lw44AHSNlFP0q3y
 drRFc49cqSxa4WkVRixJJOTQbSHARNWiayOG4uLb4zoZFvJOTDAp7+yX5LYD7lxY
 3FsQLVMSp7c/whppeGySVX0oJF/12weR9OQJZVxxhlMNggmGREwDxayBaPYqA4pa
 0OO83rO1aP9j2VK3HFiK4OwatKHcu0GvGV9I4rP3u8hWvJyUzTAfdcVUXHl6of12
 6hXG7F3f0TVY/OP6J2WepcQG5IbkiiAY1J0wlqbqo5MvOqESJZ/J0pGuFD1qzQ8n
 zaMnj2zhJHkJEfyP7Dvjo4y72eM9tWnFxKfm/PtuHWGovpP15rrsuHcs343U92Z7
 zQ/Ak10tA8FpSM7dXaTd98/3FkVdQbkImkEUOpWzPjiJFGyuk8j6/ZE9rCWtlNR1
 HP9cLgKB/PF/a3+kwtgGAhAHBVIA8trhSm1jRqEU7ki9sBQnV/2iR5b7UJ8xn4uA
 dl9HlxpiDYvIjRhHfMh6GXIhdO2T8coFzxKRztjxrM0dbaQeVKQ=
 =31Y5
 -----END PGP SIGNATURE-----

Merge 4.19.119 into android-4.19

Changes in 4.19.119
	ext4: fix extent_status fragmentation for plain files
	drm/msm: Use the correct dma_sync calls harder
	bpftool: Fix printing incorrect pointer in btf_dump_ptr
	crypto: mxs-dcp - make symbols 'sha1_null_hash' and 'sha256_null_hash' static
	vti4: removed duplicate log message.
	arm64: Add part number for Neoverse N1
	arm64: errata: Hide CTR_EL0.DIC on systems affected by Neoverse-N1 #1542419
	arm64: Fake the IminLine size on systems affected by Neoverse-N1 #1542419
	arm64: compat: Workaround Neoverse-N1 #1542419 for compat user-space
	arm64: Silence clang warning on mismatched value/register sizes
	watchdog: reset last_hw_keepalive time at start
	scsi: lpfc: Fix kasan slab-out-of-bounds error in lpfc_unreg_login
	scsi: lpfc: Fix crash in target side cable pulls hitting WAIT_FOR_UNREG
	ceph: return ceph_mdsc_do_request() errors from __get_parent()
	ceph: don't skip updating wanted caps when cap is stale
	pwm: rcar: Fix late Runtime PM enablement
	scsi: iscsi: Report unbind session event when the target has been removed
	ASoC: Intel: atom: Take the drv->lock mutex before calling sst_send_slot_map()
	nvme: fix deadlock caused by ANA update wrong locking
	kernel/gcov/fs.c: gcov_seq_next() should increase position index
	selftests: kmod: fix handling test numbers above 9
	ipc/util.c: sysvipc_find_ipc() should increase position index
	kconfig: qconf: Fix a few alignment issues
	s390/cio: avoid duplicated 'ADD' uevents
	loop: Better discard support for block devices
	Revert "powerpc/64: irq_work avoid interrupt when called with hardware irqs enabled"
	pwm: renesas-tpu: Fix late Runtime PM enablement
	pwm: bcm2835: Dynamically allocate base
	perf/core: Disable page faults when getting phys address
	ASoC: Intel: bytcr_rt5640: Add quirk for MPMAN MPWIN895CL tablet
	xhci: Ensure link state is U3 after setting USB_SS_PORT_LS_U3
	drm/amd/display: Not doing optimize bandwidth if flip pending.
	tracing/selftests: Turn off timeout setting
	virtio-blk: improve virtqueue error to BLK_STS
	scsi: smartpqi: fix call trace in device discovery
	PCI/ASPM: Allow re-enabling Clock PM
	net: ipv6: add net argument to ip6_dst_lookup_flow
	net: ipv6_stub: use ip6_dst_lookup_flow instead of ip6_dst_lookup
	blktrace: Protect q->blk_trace with RCU
	blktrace: fix dereference after null check
	f2fs: fix to avoid memory leakage in f2fs_listxattr
	KVM: VMX: Zero out *all* general purpose registers after VM-Exit
	KVM: nVMX: Always sync GUEST_BNDCFGS when it comes from vmcs01
	KVM: Introduce a new guest mapping API
	kvm: fix compilation on aarch64
	kvm: fix compilation on s390
	kvm: fix compile on s390 part 2
	KVM: Properly check if "page" is valid in kvm_vcpu_unmap
	x86/kvm: Introduce kvm_(un)map_gfn()
	x86/kvm: Cache gfn to pfn translation
	x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not missed
	x86/KVM: Clean up host's steal time structure
	cxgb4: fix adapter crash due to wrong MC size
	cxgb4: fix large delays in PTP synchronization
	ipv6: fix restrict IPV6_ADDRFORM operation
	macsec: avoid to set wrong mtu
	macvlan: fix null dereference in macvlan_device_event()
	net: bcmgenet: correct per TX/RX ring statistics
	net: netrom: Fix potential nr_neigh refcnt leak in nr_add_node
	net: stmmac: dwmac-meson8b: Add missing boundary to RGMII TX clock array
	net/x25: Fix x25_neigh refcnt leak when receiving frame
	sched: etf: do not assume all sockets are full blown
	tcp: cache line align MAX_TCP_HEADER
	team: fix hang in team_mode_get()
	vrf: Fix IPv6 with qdisc and xfrm
	net: dsa: b53: Lookup VID in ARL searches when VLAN is enabled
	net: dsa: b53: Fix ARL register definitions
	net: dsa: b53: Rework ARL bin logic
	net: dsa: b53: b53_arl_rw_op() needs to select IVL or SVL
	xfrm: Always set XFRM_TRANSFORMED in xfrm{4,6}_output_finish
	vrf: Check skb for XFRM_TRANSFORMED flag
	mlxsw: Fix some IS_ERR() vs NULL bugs
	KEYS: Avoid false positive ENOMEM error on key read
	ALSA: hda: Remove ASUS ROG Zenith from the blacklist
	ALSA: usb-audio: Add static mapping table for ALC1220-VB-based mobos
	ALSA: usb-audio: Add connector notifier delegation
	iio: core: remove extra semi-colon from devm_iio_device_register() macro
	iio: st_sensors: rely on odr mask to know if odr can be set
	iio: adc: stm32-adc: fix sleep in atomic context
	iio: xilinx-xadc: Fix ADC-B powerdown
	iio: xilinx-xadc: Fix clearing interrupt when enabling trigger
	iio: xilinx-xadc: Fix sequencer configuration for aux channels in simultaneous mode
	iio: xilinx-xadc: Make sure not exceed maximum samplerate
	fs/namespace.c: fix mountpoint reference counter race
	USB: sisusbvga: Change port variable from signed to unsigned
	USB: Add USB_QUIRK_DELAY_CTRL_MSG and USB_QUIRK_DELAY_INIT for Corsair K70 RGB RAPIDFIRE
	USB: early: Handle AMD's spec-compliant identifiers, too
	USB: core: Fix free-while-in-use bug in the USB S-Glibrary
	USB: hub: Fix handling of connect changes during sleep
	vmalloc: fix remap_vmalloc_range() bounds checks
	mm/hugetlb: fix a addressing exception caused by huge_pte_offset
	mm/ksm: fix NULL pointer dereference when KSM zero page is enabled
	tools/vm: fix cross-compile build
	ALSA: usx2y: Fix potential NULL dereference
	ALSA: hda/realtek - Fix unexpected init_amp override
	ALSA: hda/realtek - Add new codec supported for ALC245
	ALSA: usb-audio: Fix usb audio refcnt leak when getting spdif
	ALSA: usb-audio: Filter out unsupported sample rates on Focusrite devices
	tpm/tpm_tis: Free IRQ if probing fails
	tpm: ibmvtpm: retry on H_CLOSED in tpm_ibmvtpm_send()
	KVM: s390: Return last valid slot if approx index is out-of-bounds
	KVM: Check validity of resolved slot when searching memslots
	KVM: VMX: Enable machine check support for 32bit targets
	tty: hvc: fix buffer overflow during hvc_alloc().
	tty: rocket, avoid OOB access
	usb-storage: Add unusual_devs entry for JMicron JMS566
	audit: check the length of userspace generated audit records
	ASoC: dapm: fixup dapm kcontrol widget
	iwlwifi: pcie: actually release queue memory in TVQM
	iwlwifi: mvm: beacon statistics shouldn't go backwards
	ARM: imx: provide v7_cpu_resume() only on ARM_CPU_SUSPEND=y
	powerpc/setup_64: Set cache-line-size based on cache-block-size
	staging: comedi: dt2815: fix writing hi byte of analog output
	staging: comedi: Fix comedi_device refcnt leak in comedi_open
	vt: don't hardcode the mem allocation upper bound
	vt: don't use kmalloc() for the unicode screen buffer
	staging: vt6656: Don't set RCR_MULTICAST or RCR_BROADCAST by default.
	staging: vt6656: Fix calling conditions of vnt_set_bss_mode
	staging: vt6656: Fix drivers TBTT timing counter.
	staging: vt6656: Fix pairwise key entry save.
	staging: vt6656: Power save stop wake_up_count wrap around.
	cdc-acm: close race betrween suspend() and acm_softint
	cdc-acm: introduce a cool down
	UAS: no use logging any details in case of ENODEV
	UAS: fix deadlock in error handling and PM flushing work
	usb: dwc3: gadget: Fix request completion check
	usb: f_fs: Clear OS Extended descriptor counts to zero in ffs_data_reset()
	xhci: prevent bus suspend if a roothub port detected a over-current condition
	serial: sh-sci: Make sure status register SCxSR is read in correct sequence
	xfs: Fix deadlock between AGI and AGF with RENAME_WHITEOUT
	s390/mm: fix page table upgrade vs 2ndary address mode accesses
	Linux 4.19.119

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I4b16db8472367d135a4ff68d2863c634bf093ef5
This commit is contained in:
Greg Kroah-Hartman 2020-04-29 17:26:17 +02:00
commit be3bb0daac
149 changed files with 1577 additions and 596 deletions

View file

@ -59,6 +59,7 @@ stable kernels.
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 118
SUBLEVEL = 119
EXTRAVERSION =
NAME = "People's Front"

View file

@ -89,8 +89,10 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
endif
ifeq ($(CONFIG_ARM_CPU_SUSPEND),y)
AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
endif
obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
obj-$(CONFIG_SOC_IMX1) += mach-imx1.o

View file

@ -505,6 +505,22 @@ config ARM64_ERRATUM_1463225
If unsure, say Y.
config ARM64_ERRATUM_1542419
bool "Neoverse-N1: workaround mis-ordering of instruction fetches"
default y
help
This option adds a workaround for ARM Neoverse-N1 erratum
1542419.
Affected Neoverse-N1 cores could execute a stale instruction when
modified by another CPU. The workaround depends on a firmware
counterpart.
Workaround the issue by hiding the DIC feature from EL0. This
forces user-space to perform cache maintenance.
If unsure, say Y.
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y

View file

@ -22,6 +22,7 @@
#define CTR_L1IP_MASK 3
#define CTR_DMINLINE_SHIFT 16
#define CTR_IMINLINE_SHIFT 0
#define CTR_IMINLINE_MASK 0xf
#define CTR_ERG_SHIFT 20
#define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15
@ -29,7 +30,7 @@
#define CTR_DIC_SHIFT 29
#define CTR_CACHE_MINLINE_MASK \
(0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
(0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT)
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)

View file

@ -53,8 +53,9 @@
#define ARM64_HAS_STAGE2_FWB 32
#define ARM64_WORKAROUND_1463225 33
#define ARM64_SSBS 34
#define ARM64_WORKAROUND_1542419 35
/* kabi: reserve 35 - 62 for future cpu capabilities */
/* kabi: reserve 36 - 62 for future cpu capabilities */
#define ARM64_NCAPS 62
#endif /* __ASM_CPUCAPS_H */

View file

@ -80,6 +80,7 @@
#define ARM_CPU_PART_CORTEX_A35 0xD04
#define ARM_CPU_PART_CORTEX_A55 0xD05
#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define APM_CPU_PART_POTENZA 0x000
@ -107,6 +108,7 @@
#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)

View file

@ -643,6 +643,18 @@ needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
return false;
}
static bool __maybe_unused
has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
int scope)
{
u32 midr = read_cpuid_id();
bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return is_midr_in_range(midr, &range) && has_dic;
}
#ifdef CONFIG_HARDEN_EL2_VECTORS
static const struct midr_range arm64_harden_el2_vectors[] = {
@ -834,6 +846,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
.matches = needs_tx2_tvm_workaround,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1542419
{
/* we depend on the firmware portion for correctness */
.desc = "ARM erratum 1542419 (kernel portion)",
.capability = ARM64_WORKAROUND_1542419,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_neoverse_n1_erratum_1542419,
.cpu_enable = cpu_enable_trap_ctr_access,
},
#endif
{
}

View file

@ -19,6 +19,7 @@
*/
#include <linux/compat.h>
#include <linux/cpufeature.h>
#include <linux/personality.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
@ -28,6 +29,7 @@
#include <asm/cacheflush.h>
#include <asm/system_misc.h>
#include <asm/tlbflush.h>
#include <asm/unistd.h>
static long
@ -41,6 +43,15 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
if (fatal_signal_pending(current))
return 0;
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
/*
* The workaround requires an inner-shareable tlbi.
* We pick the reserved-ASID to minimise the impact.
*/
__tlbi(aside1is, __TLBI_VADDR(0, 0));
dsb(ish);
}
ret = __flush_cache_user_range(start, start + chunk);
if (ret)
return ret;

View file

@ -482,6 +482,15 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
/* Hide DIC so that we can trap the unnecessary maintenance...*/
val &= ~BIT(CTR_DIC_SHIFT);
/* ... and fake IminLine to reduce the number of traps. */
val &= ~CTR_IMINLINE_MASK;
val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
}
pt_regs_write_reg(regs, rt, val);
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);

View file

@ -518,6 +518,8 @@ static bool __init parse_cache_info(struct device_node *np,
lsizep = of_get_property(np, propnames[3], NULL);
if (bsizep == NULL)
bsizep = lsizep;
if (lsizep == NULL)
lsizep = bsizep;
if (lsizep != NULL)
lsize = be32_to_cpu(*lsizep);
if (bsizep != NULL)

View file

@ -492,35 +492,6 @@ static inline void clear_irq_work_pending(void)
"i" (offsetof(struct paca_struct, irq_work_pending)));
}
void arch_irq_work_raise(void)
{
preempt_disable();
set_irq_work_pending_flag();
/*
* Non-nmi code running with interrupts disabled will replay
* irq_happened before it re-enables interrupts, so setthe
* decrementer there instead of causing a hardware exception
* which would immediately hit the masked interrupt handler
* and have the net effect of setting the decrementer in
* irq_happened.
*
* NMI interrupts can not check this when they return, so the
* decrementer hardware exception is raised, which will fire
* when interrupts are next enabled.
*
* BookE does not support this yet, it must audit all NMI
* interrupt handlers to ensure they call nmi_enter() so this
* check would be correct.
*/
if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) {
set_dec(1);
} else {
hard_irq_disable();
local_paca->irq_happened |= PACA_IRQ_DEC;
}
preempt_enable();
}
#else /* 32-bit */
DEFINE_PER_CPU(u8, irq_work_pending);
@ -529,16 +500,27 @@ DEFINE_PER_CPU(u8, irq_work_pending);
#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
#endif /* 32 vs 64 bit */
void arch_irq_work_raise(void)
{
/*
* 64-bit code that uses irq soft-mask can just cause an immediate
* interrupt here that gets soft masked, if this is called under
* local_irq_disable(). It might be possible to prevent that happening
* by noticing interrupts are disabled and setting decrementer pending
* to be replayed when irqs are enabled. The problem there is that
* tracing can call irq_work_raise, including in code that does low
* level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
* which could get tangled up if we're messing with the same state
* here.
*/
preempt_disable();
set_irq_work_pending_flag();
set_dec(1);
preempt_enable();
}
#endif /* 32 vs 64 bit */
#else /* CONFIG_IRQ_WORK */
#define test_irq_work_pending() 0

View file

@ -1666,6 +1666,9 @@ static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
start = slot + 1;
}
if (start >= slots->used_slots)
return slots->used_slots - 1;
if (gfn >= memslots[start].base_gfn &&
gfn < memslots[start].base_gfn + memslots[start].npages) {
atomic_set(&slots->lru_slot, start);

View file

@ -64,10 +64,13 @@ mm_segment_t enable_sacf_uaccess(void)
{
mm_segment_t old_fs;
unsigned long asce, cr;
unsigned long flags;
old_fs = current->thread.mm_segment;
if (old_fs & 1)
return old_fs;
/* protect against a concurrent page table upgrade */
local_irq_save(flags);
current->thread.mm_segment |= 1;
asce = S390_lowcore.kernel_asce;
if (likely(old_fs == USER_DS)) {
@ -83,6 +86,7 @@ mm_segment_t enable_sacf_uaccess(void)
__ctl_load(asce, 7, 7);
set_cpu_flag(CIF_ASCE_SECONDARY);
}
local_irq_restore(flags);
return old_fs;
}
EXPORT_SYMBOL(enable_sacf_uaccess);

View file

@ -72,8 +72,20 @@ static void __crst_table_upgrade(void *arg)
{
struct mm_struct *mm = arg;
if (current->active_mm == mm)
set_user_asce(mm);
/* we must change all active ASCEs to avoid the creation of new TLBs */
if (current->active_mm == mm) {
S390_lowcore.user_asce = mm->context.asce;
if (current->thread.mm_segment == USER_DS) {
__ctl_load(S390_lowcore.user_asce, 1, 1);
/* Mark user-ASCE present in CR1 */
clear_cpu_flag(CIF_ASCE_PRIMARY);
}
if (current->thread.mm_segment == USER_DS_SACF) {
__ctl_load(S390_lowcore.user_asce, 7, 7);
/* enable_sacf_uaccess does all or nothing */
WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY));
}
}
__tlb_flush_local();
}

View file

@ -622,10 +622,10 @@ struct kvm_vcpu_arch {
bool pvclock_set_guest_stopped_request;
struct {
u8 preempted;
u64 msr_val;
u64 last_steal;
struct gfn_to_hva_cache stime;
struct kvm_steal_time steal;
struct gfn_to_pfn_cache cache;
} st;
u64 tsc_offset;

View file

@ -7015,7 +7015,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
*/
static void kvm_machine_check(void)
{
#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
#if defined(CONFIG_X86_MCE)
struct pt_regs regs = {
.cs = 3, /* Fake ring 3 no matter what the guest ran on */
.flags = X86_EFLAGS_IF,
@ -10841,6 +10841,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"mov %%r13, %c[r13](%0) \n\t"
"mov %%r14, %c[r14](%0) \n\t"
"mov %%r15, %c[r15](%0) \n\t"
/*
* Clear all general purpose registers (except RSP, which is loaded by
* the CPU during VM-Exit) to prevent speculative use of the guest's
* values, even those that are saved/loaded via the stack. In theory,
* an L1 cache miss when restoring registers could lead to speculative
* execution with the guest's values. Zeroing XORs are dirt cheap,
* i.e. the extra paranoia is essentially free.
*/
"xor %%r8d, %%r8d \n\t"
"xor %%r9d, %%r9d \n\t"
"xor %%r10d, %%r10d \n\t"
@ -10855,8 +10864,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"xor %%eax, %%eax \n\t"
"xor %%ebx, %%ebx \n\t"
"xor %%ecx, %%ecx \n\t"
"xor %%edx, %%edx \n\t"
"xor %%esi, %%esi \n\t"
"xor %%edi, %%edi \n\t"
"xor %%ebp, %%ebp \n\t"
"pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
".pushsection .rodata \n\t"
".global vmx_return \n\t"
@ -12125,13 +12137,9 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
set_cr4_guest_host_mask(vmx);
if (kvm_mpx_supported()) {
if (vmx->nested.nested_run_pending &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
else
vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
}
if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
if (enable_vpid) {
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
@ -12195,6 +12203,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
}
if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
if (vmx->nested.nested_run_pending) {
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
vmcs12->vm_entry_intr_info_field);

View file

@ -2397,43 +2397,45 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
static void record_steal_time(struct kvm_vcpu *vcpu)
{
struct kvm_host_map map;
struct kvm_steal_time *st;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
/* -EAGAIN is returned in atomic context so we can just return. */
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
&map, &vcpu->arch.st.cache, false))
return;
st = map.hva +
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
/*
* Doing a TLB flush here, on the guest's behalf, can avoid
* expensive IPIs.
*/
if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
kvm_vcpu_flush_tlb(vcpu, false);
if (vcpu->arch.st.steal.version & 1)
vcpu->arch.st.steal.version += 1; /* first time write, random junk */
vcpu->arch.st.preempted = 0;
vcpu->arch.st.steal.version += 1;
if (st->version & 1)
st->version += 1; /* first time write, random junk */
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
st->version += 1;
smp_wmb();
vcpu->arch.st.steal.steal += current->sched_info.run_delay -
st->steal += current->sched_info.run_delay -
vcpu->arch.st.last_steal;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
smp_wmb();
vcpu->arch.st.steal.version += 1;
st->version += 1;
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
}
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@ -2575,11 +2577,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data & KVM_STEAL_RESERVED_MASK)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS,
sizeof(struct kvm_steal_time)))
return 1;
vcpu->arch.st.msr_val = data;
if (!(data & KVM_MSR_ENABLED))
@ -3272,18 +3269,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
{
struct kvm_host_map map;
struct kvm_steal_time *st;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
if (vcpu->arch.st.steal.preempted)
if (vcpu->arch.st.preempted)
return;
vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
&vcpu->arch.st.cache, true))
return;
kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal.preempted,
offsetof(struct kvm_steal_time, preempted),
sizeof(vcpu->arch.st.steal.preempted));
st = map.hva +
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@ -8634,6 +8638,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
kvm_release_pfn(cache->pfn, cache->dirty, cache);
kvmclock_reset(vcpu);
@ -9298,11 +9305,18 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{
struct kvm_vcpu *vcpu;
int i;
/*
* memslots->generation has been incremented.
* mmio generation may have reached its maximum value.
*/
kvm_mmu_invalidate_mmio_sptes(kvm, gen);
/* Force re-initialization of steal_time cache */
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vcpu_kick(vcpu);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,

View file

@ -428,11 +428,12 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
* information.
*/
struct file *file = lo->lo_backing_file;
struct request_queue *q = lo->lo_queue;
int ret;
mode |= FALLOC_FL_KEEP_SIZE;
if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
if (!blk_queue_discard(q)) {
ret = -EOPNOTSUPP;
goto out;
}
@ -866,28 +867,47 @@ static void loop_config_discard(struct loop_device *lo)
struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue;
/*
* If the backing device is a block device, mirror its zeroing
* capability. Set the discard sectors to the block device's zeroing
* capabilities because loop discards result in blkdev_issue_zeroout(),
* not blkdev_issue_discard(). This maintains consistent behavior with
* file-backed loop devices: discarded regions read back as zero.
*/
if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
struct request_queue *backingq;
backingq = bdev_get_queue(inode->i_bdev);
blk_queue_max_discard_sectors(q,
backingq->limits.max_write_zeroes_sectors);
blk_queue_max_write_zeroes_sectors(q,
backingq->limits.max_write_zeroes_sectors);
/*
* We use punch hole to reclaim the free space used by the
* image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
*/
if ((!file->f_op->fallocate) ||
lo->lo_encrypt_key_size) {
} else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
q->limits.discard_granularity = 0;
q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, 0);
blk_queue_max_write_zeroes_sectors(q, 0);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
return;
} else {
q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
}
q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
if (q->limits.max_write_zeroes_sectors)
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
else
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
}
static void loop_unprepare_queue(struct loop_device *lo)

View file

@ -277,9 +277,14 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
if (err == -ENOSPC)
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
if (err == -ENOMEM || err == -ENOSPC)
switch (err) {
case -ENOSPC:
return BLK_STS_DEV_RESOURCE;
return BLK_STS_IOERR;
case -ENOMEM:
return BLK_STS_RESOURCE;
default:
return BLK_STS_IOERR;
}
}
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2012 IBM Corporation
* Copyright (C) 2012-2020 IBM Corporation
*
* Author: Ashley Lai <ashleydlai@gmail.com>
*
@ -140,6 +140,64 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return len;
}
/**
* ibmvtpm_crq_send_init - Send a CRQ initialize message
* @ibmvtpm: vtpm device struct
*
* Return:
* 0 on success.
* Non-zero on failure.
*/
static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
{
int rc;
rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"%s failed rc=%d\n", __func__, rc);
return rc;
}
/**
* tpm_ibmvtpm_resume - Resume from suspend
*
* @dev: device struct
*
* Return: Always 0.
*/
static int tpm_ibmvtpm_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
int rc = 0;
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_ENABLE_CRQ,
ibmvtpm->vdev->unit_address);
} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
if (rc) {
dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
return rc;
}
rc = vio_enable_interrupts(ibmvtpm->vdev);
if (rc) {
dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
return rc;
}
rc = ibmvtpm_crq_send_init(ibmvtpm);
if (rc)
dev_err(dev, "Error send_init rc=%d\n", rc);
return rc;
}
/**
* tpm_ibmvtpm_send() - Send a TPM command
* @chip: tpm chip struct
@ -153,6 +211,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
bool retry = true;
int rc, sig;
if (!ibmvtpm->rtce_buf) {
@ -186,18 +245,27 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
*/
ibmvtpm->tpm_processing_cmd = true;
again:
rc = ibmvtpm_send_crq(ibmvtpm->vdev,
IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
count, ibmvtpm->rtce_dma_handle);
if (rc != H_SUCCESS) {
/*
* H_CLOSED can be returned after LPM resume. Call
* tpm_ibmvtpm_resume() to re-enable the CRQ then retry
* ibmvtpm_send_crq() once before failing.
*/
if (rc == H_CLOSED && retry) {
tpm_ibmvtpm_resume(ibmvtpm->dev);
retry = false;
goto again;
}
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
rc = 0;
ibmvtpm->tpm_processing_cmd = false;
} else
rc = 0;
}
spin_unlock(&ibmvtpm->rtce_lock);
return rc;
return 0;
}
static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
@ -275,26 +343,6 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
return rc;
}
/**
* ibmvtpm_crq_send_init - Send a CRQ initialize message
* @ibmvtpm: vtpm device struct
*
* Return:
* 0 on success.
* Non-zero on failure.
*/
static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
{
int rc;
rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"ibmvtpm_crq_send_init failed rc=%d\n", rc);
return rc;
}
/**
* tpm_ibmvtpm_remove - ibm vtpm remove entry point
* @vdev: vio device struct
@ -407,44 +455,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
}
/**
* tpm_ibmvtpm_resume - Resume from suspend
*
* @dev: device struct
*
* Return: Always 0.
*/
static int tpm_ibmvtpm_resume(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
int rc = 0;
do {
if (rc)
msleep(100);
rc = plpar_hcall_norets(H_ENABLE_CRQ,
ibmvtpm->vdev->unit_address);
} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
if (rc) {
dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
return rc;
}
rc = vio_enable_interrupts(ibmvtpm->vdev);
if (rc) {
dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
return rc;
}
rc = ibmvtpm_crq_send_init(ibmvtpm);
if (rc)
dev_err(dev, "Error send_init rc=%d\n", rc);
return rc;
}
static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
{
return (status == 0);

View file

@ -437,6 +437,9 @@ static void disable_interrupts(struct tpm_chip *chip)
u32 intmask;
int rc;
if (priv->irq == 0)
return;
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
if (rc < 0)
intmask = 0;
@ -984,9 +987,12 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (irq) {
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
irq);
if (!(chip->flags & TPM_CHIP_FLAG_IRQ))
if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
dev_err(&chip->dev, FW_BUG
"TPM interrupt not working, polling instead\n");
disable_interrupts(chip);
}
} else {
tpm_tis_probe_irq(chip, intmask);
}

View file

@ -37,11 +37,11 @@
* Null hashes to align with hw behavior on imx6sl and ull
* these are flipped for consistency with hw output
*/
const uint8_t sha1_null_hash[] =
static const uint8_t sha1_null_hash[] =
"\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
"\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
const uint8_t sha256_null_hash[] =
static const uint8_t sha256_null_hash[] =
"\x55\xb8\x52\x78\x1b\x99\x95\xa4"
"\x4c\x93\x9b\x64\xe4\x41\xae\x27"
"\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"

View file

@ -996,6 +996,26 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
{
int i;
struct pipe_ctx *pipe;
for (i = 0; i < MAX_PIPES; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->plane_state)
continue;
/* Must set to false to start with, due to OR in update function */
pipe->plane_state->status.is_flip_pending = false;
dc->hwss.update_pending_status(pipe);
if (pipe->plane_state->status.is_flip_pending)
return true;
}
return false;
}
bool dc_post_update_surfaces_to_stream(struct dc *dc)
{
int i;
@ -1003,6 +1023,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc);
if (is_flip_pending_in_pipes(dc, context))
return true;
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {

View file

@ -61,7 +61,7 @@ static void sync_for_device(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
if (get_dma_ops(dev)) {
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} else {
@ -74,7 +74,7 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj)
{
struct device *dev = msm_obj->base.dev->dev;
if (get_dma_ops(dev)) {
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} else {

View file

@ -1308,8 +1308,30 @@ static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc)
static void stm32_adc_dma_buffer_done(void *data)
{
struct iio_dev *indio_dev = data;
struct stm32_adc *adc = iio_priv(indio_dev);
int residue = stm32_adc_dma_residue(adc);
iio_trigger_poll_chained(indio_dev->trig);
/*
* In DMA mode the trigger services of IIO are not used
* (e.g. no call to iio_trigger_poll).
* Calling irq handler associated to the hardware trigger is not
* relevant as the conversions have already been done. Data
* transfers are performed directly in DMA callback instead.
* This implementation avoids to call trigger irq handler that
* may sleep, in an atomic context (DMA irq handler context).
*/
dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi);
while (residue >= indio_dev->scan_bytes) {
u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi];
iio_push_to_buffers(indio_dev, buffer);
residue -= indio_dev->scan_bytes;
adc->bufi += indio_dev->scan_bytes;
if (adc->bufi >= adc->rx_buf_sz)
adc->bufi = 0;
}
}
static int stm32_adc_dma_start(struct iio_dev *indio_dev)
@ -1703,6 +1725,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
{
struct iio_dev *indio_dev;
struct device *dev = &pdev->dev;
irqreturn_t (*handler)(int irq, void *p) = NULL;
struct stm32_adc *adc;
int ret;
@ -1785,9 +1808,11 @@ static int stm32_adc_probe(struct platform_device *pdev)
if (ret < 0)
goto err_clk_disable;
if (!adc->dma_chan)
handler = &stm32_adc_trigger_handler;
ret = iio_triggered_buffer_setup(indio_dev,
&iio_pollfunc_store_time,
&stm32_adc_trigger_handler,
&iio_pollfunc_store_time, handler,
&stm32_adc_buffer_setup_ops);
if (ret) {
dev_err(&pdev->dev, "buffer setup failed\n");

View file

@ -103,6 +103,16 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
#define XADC_FLAGS_BUFFERED BIT(0)
/*
* The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does
* not have a hardware FIFO. Which means an interrupt is generated for each
* conversion sequence. At 1MSPS sample rate the CPU in ZYNQ7000 is completely
* overloaded by the interrupts that it soft-lockups. For this reason the driver
* limits the maximum samplerate 150kSPS. At this rate the CPU is fairly busy,
* but still responsive.
*/
#define XADC_MAX_SAMPLERATE 150000
static void xadc_write_reg(struct xadc *xadc, unsigned int reg,
uint32_t val)
{
@ -675,7 +685,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
spin_lock_irqsave(&xadc->lock, flags);
xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS);
xadc_write_reg(xadc, XADC_AXI_REG_IPISR, XADC_AXI_INT_EOS);
if (state)
val |= XADC_AXI_INT_EOS;
else
@ -723,13 +733,14 @@ static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
{
uint16_t val;
/* Powerdown the ADC-B when it is not needed. */
switch (seq_mode) {
case XADC_CONF1_SEQ_SIMULTANEOUS:
case XADC_CONF1_SEQ_INDEPENDENT:
val = XADC_CONF2_PD_ADC_B;
val = 0;
break;
default:
val = 0;
val = XADC_CONF2_PD_ADC_B;
break;
}
@ -798,6 +809,16 @@ static int xadc_preenable(struct iio_dev *indio_dev)
if (ret)
goto err;
/*
* In simultaneous mode the upper and lower aux channels are samples at
* the same time. In this mode the upper 8 bits in the sequencer
* register are don't care and the lower 8 bits control two channels
* each. As such we must set the bit if either the channel in the lower
* group or the upper group is enabled.
*/
if (seq_mode == XADC_CONF1_SEQ_SIMULTANEOUS)
scan_mask = ((scan_mask >> 8) | scan_mask) & 0xff0000;
ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
if (ret)
goto err;
@ -824,11 +845,27 @@ static const struct iio_buffer_setup_ops xadc_buffer_ops = {
.postdisable = &xadc_postdisable,
};
static int xadc_read_samplerate(struct xadc *xadc)
{
unsigned int div;
uint16_t val16;
int ret;
ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
if (ret)
return ret;
div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
if (div < 2)
div = 2;
return xadc_get_dclk_rate(xadc) / div / 26;
}
static int xadc_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val, int *val2, long info)
{
struct xadc *xadc = iio_priv(indio_dev);
unsigned int div;
uint16_t val16;
int ret;
@ -881,41 +918,31 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
*val = -((273150 << 12) / 503975);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
if (ret)
ret = xadc_read_samplerate(xadc);
if (ret < 0)
return ret;
div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
if (div < 2)
div = 2;
*val = xadc_get_dclk_rate(xadc) / div / 26;
*val = ret;
return IIO_VAL_INT;
default:
return -EINVAL;
}
}
static int xadc_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long info)
static int xadc_write_samplerate(struct xadc *xadc, int val)
{
struct xadc *xadc = iio_priv(indio_dev);
unsigned long clk_rate = xadc_get_dclk_rate(xadc);
unsigned int div;
if (!clk_rate)
return -EINVAL;
if (info != IIO_CHAN_INFO_SAMP_FREQ)
return -EINVAL;
if (val <= 0)
return -EINVAL;
/* Max. 150 kSPS */
if (val > 150000)
val = 150000;
if (val > XADC_MAX_SAMPLERATE)
val = XADC_MAX_SAMPLERATE;
val *= 26;
@ -928,7 +955,7 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
* limit.
*/
div = clk_rate / val;
if (clk_rate / div / 26 > 150000)
if (clk_rate / div / 26 > XADC_MAX_SAMPLERATE)
div++;
if (div < 2)
div = 2;
@ -939,6 +966,17 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
div << XADC_CONF2_DIV_OFFSET);
}
static int xadc_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long info)
{
struct xadc *xadc = iio_priv(indio_dev);
if (info != IIO_CHAN_INFO_SAMP_FREQ)
return -EINVAL;
return xadc_write_samplerate(xadc, val);
}
static const struct iio_event_spec xadc_temp_events[] = {
{
.type = IIO_EV_TYPE_THRESH,
@ -1226,6 +1264,21 @@ static int xadc_probe(struct platform_device *pdev)
if (ret)
goto err_free_samplerate_trigger;
/*
* Make sure not to exceed the maximum samplerate since otherwise the
* resulting interrupt storm will soft-lock the system.
*/
if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
ret = xadc_read_samplerate(xadc);
if (ret < 0)
goto err_free_samplerate_trigger;
if (ret > XADC_MAX_SAMPLERATE) {
ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE);
if (ret < 0)
goto err_free_samplerate_trigger;
}
}
ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0,
dev_name(&pdev->dev), indio_dev);
if (ret)

View file

@ -93,7 +93,7 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
struct st_sensor_odr_avl odr_out = {0, 0};
struct st_sensor_data *sdata = iio_priv(indio_dev);
if (!sdata->sensor_settings->odr.addr)
if (!sdata->sensor_settings->odr.mask)
return 0;
err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);

View file

@ -408,16 +408,15 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
struct flowi6 fl6;
struct dst_entry *dst;
struct rt6_info *rt;
int ret;
memset(&fl6, 0, sizeof fl6);
fl6.daddr = dst_in->sin6_addr;
fl6.saddr = src_in->sin6_addr;
fl6.flowi6_oif = addr->bound_dev_if;
ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
if (ret < 0)
return ret;
dst = ipv6_stub->ipv6_dst_lookup_flow(addr->net, NULL, &fl6, NULL);
if (IS_ERR(dst))
return PTR_ERR(dst);
rt = (struct rt6_info *)dst;
if (ipv6_addr_any(&src_in->sin6_addr)) {

View file

@ -154,10 +154,12 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
memcpy(&fl6.daddr, daddr, sizeof(*daddr));
fl6.flowi6_proto = IPPROTO_UDP;
if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk),
recv_sockets.sk6->sk, &ndst, &fl6))) {
ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
recv_sockets.sk6->sk, &fl6,
NULL);
if (unlikely(IS_ERR(ndst))) {
pr_err_ratelimited("no route to %pI6\n", daddr);
goto put;
return NULL;
}
if (unlikely(ndst->error)) {

View file

@ -1253,6 +1253,10 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
reg |= ARLTBL_RW;
else
reg &= ~ARLTBL_RW;
if (dev->vlan_enabled)
reg &= ~ARLTBL_IVL_SVL_SELECT;
else
reg |= ARLTBL_IVL_SVL_SELECT;
b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
return b53_arl_op_wait(dev);
@ -1262,6 +1266,7 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
u16 vid, struct b53_arl_entry *ent, u8 *idx,
bool is_valid)
{
DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
unsigned int i;
int ret;
@ -1269,6 +1274,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
if (ret)
return ret;
bitmap_zero(free_bins, dev->num_arl_entries);
/* Read the bins */
for (i = 0; i < dev->num_arl_entries; i++) {
u64 mac_vid;
@ -1280,13 +1287,24 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
b53_arl_to_entry(ent, mac_vid, fwd_entry);
if (!(fwd_entry & ARLTBL_VALID))
if (!(fwd_entry & ARLTBL_VALID)) {
set_bit(i, free_bins);
continue;
}
if ((mac_vid & ARLTBL_MAC_MASK) != mac)
continue;
if (dev->vlan_enabled &&
((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
continue;
*idx = i;
return 0;
}
if (bitmap_weight(free_bins, dev->num_arl_entries) == 0)
return -ENOSPC;
*idx = find_first_bit(free_bins, dev->num_arl_entries);
return -ENOENT;
}
@ -1316,10 +1334,21 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
if (op)
return ret;
/* We could not find a matching MAC, so reset to a new entry */
if (ret) {
switch (ret) {
case -ENOSPC:
dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
addr, vid);
return is_valid ? ret : 0;
case -ENOENT:
/* We could not find a matching MAC, so reset to a new entry */
dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
addr, vid, idx);
fwd_entry = 0;
idx = 1;
break;
default:
dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
addr, vid, idx);
break;
}
memset(&ent, 0, sizeof(ent));

View file

@ -292,6 +292,7 @@
/* ARL Table Read/Write Register (8 bit) */
#define B53_ARLTBL_RW_CTRL 0x00
#define ARLTBL_RW BIT(0)
#define ARLTBL_IVL_SVL_SELECT BIT(6)
#define ARLTBL_START_DONE BIT(7)
/* MAC Address Index Register (48 bit) */
@ -304,7 +305,7 @@
*
* BCM5325 and BCM5365 share most definitions below
*/
#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
#define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10)
#define ARLTBL_MAC_MASK 0xffffffffffffULL
#define ARLTBL_VID_S 48
#define ARLTBL_VID_MASK_25 0xff
@ -316,13 +317,16 @@
#define ARLTBL_VALID_25 BIT(63)
/* ARL Table Data Entry N Registers (32 bit) */
#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08)
#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18)
#define ARLTBL_DATA_PORT_ID_MASK 0x1ff
#define ARLTBL_TC(tc) ((3 & tc) << 11)
#define ARLTBL_AGE BIT(14)
#define ARLTBL_STATIC BIT(15)
#define ARLTBL_VALID BIT(16)
/* Maximum number of bin entries in the ARL for all switches */
#define B53_ARLTBL_MAX_BIN_ENTRIES 4
/* ARL Search Control Register (8 bit) */
#define B53_ARL_SRCH_CTL 0x50
#define B53_ARL_SRCH_CTL_25 0x20

View file

@ -998,6 +998,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
if (netif_running(dev))
bcmgenet_update_mib_counters(priv);
dev->netdev_ops->ndo_get_stats(dev);
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
const struct bcmgenet_stats *s;
char *p;
@ -3211,6 +3213,7 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
dev->stats.rx_packets = rx_packets;
dev->stats.rx_errors = rx_errors;
dev->stats.rx_missed_errors = rx_errors;
dev->stats.rx_dropped = rx_dropped;
return &dev->stats;
}

View file

@ -1065,9 +1065,9 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
}
}
static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err,
u8 mem_type)
static int cudbg_mem_region_size(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err,
u8 mem_type, unsigned long *region_size)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_meminfo mem_info;
@ -1076,15 +1076,23 @@ static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
rc = cudbg_fill_meminfo(padap, &mem_info);
if (rc)
if (rc) {
cudbg_err->sys_err = rc;
return rc;
}
cudbg_t4_fwcache(pdbg_init, cudbg_err);
rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
if (rc)
if (rc) {
cudbg_err->sys_err = rc;
return rc;
}
return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
if (region_size)
*region_size = mem_info.avail[mc_idx].limit -
mem_info.avail[mc_idx].base;
return 0;
}
static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
@ -1092,7 +1100,12 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err,
u8 mem_type)
{
unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type);
unsigned long size = 0;
int rc;
rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size);
if (rc)
return rc;
return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
cudbg_err);

View file

@ -311,32 +311,17 @@ static int cxgb4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
*/
static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct adapter *adapter = (struct adapter *)container_of(ptp,
struct adapter, ptp_clock_info);
struct fw_ptp_cmd c;
struct adapter *adapter = container_of(ptp, struct adapter,
ptp_clock_info);
u64 ns;
int err;
memset(&c, 0, sizeof(c));
c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
FW_CMD_REQUEST_F |
FW_CMD_READ_F |
FW_PTP_CMD_PORTID_V(0));
c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
c.u.ts.sc = FW_PTP_SC_GET_TIME;
err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), &c);
if (err < 0) {
dev_err(adapter->pdev_dev,
"PTP: %s error %d\n", __func__, -err);
return err;
}
ns = t4_read_reg(adapter, T5_PORT_REG(0, MAC_PORT_PTP_SUM_LO_A));
ns |= (u64)t4_read_reg(adapter,
T5_PORT_REG(0, MAC_PORT_PTP_SUM_HI_A)) << 32;
/* convert to timespec*/
ns = be64_to_cpu(c.u.ts.tm);
*ts = ns_to_timespec64(ns);
return err;
return 0;
}
/**

View file

@ -1896,6 +1896,9 @@
#define MAC_PORT_CFG2_A 0x818
#define MAC_PORT_PTP_SUM_LO_A 0x990
#define MAC_PORT_PTP_SUM_HI_A 0x994
#define MPS_CMN_CTL_A 0x9000
#define COUNTPAUSEMCRX_S 5

View file

@ -2217,12 +2217,11 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int ret;
ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
fl6);
if (ret < 0)
return ret;
dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6,
NULL);
if (IS_ERR(dst))
return PTR_ERR(dst);
if (!(*out_ttl))
*out_ttl = ip6_dst_hoplimit(dst);
@ -2428,7 +2427,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
struct ip_tunnel_key *tun_key = &e->tun_info.key;
struct net_device *out_dev;
struct net_device *out_dev = NULL;
struct neighbour *n = NULL;
struct flowi6 fl6 = {};
u8 nud_state, tos, ttl;

View file

@ -316,7 +316,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
return NULL;
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&block->resource_list);
block->afa = mlxsw_afa;
@ -344,7 +344,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
mlxsw_afa_set_destroy(block->first_set);
err_first_set_create:
kfree(block);
return NULL;
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(mlxsw_afa_block_create);

View file

@ -88,8 +88,8 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
* to be written using PEFA register to all indexes for all regions.
*/
afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
if (!afa_block) {
err = -ENOMEM;
if (IS_ERR(afa_block)) {
err = PTR_ERR(afa_block);
goto err_afa_block;
}
err = mlxsw_afa_block_continue(afa_block);

View file

@ -442,7 +442,8 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
if (!rulei)
return NULL;
return ERR_PTR(-ENOMEM);
rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
if (IS_ERR(rulei->act_block)) {
err = PTR_ERR(rulei->act_block);

View file

@ -199,8 +199,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
int err;
afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
if (!afa_block)
return ERR_PTR(-ENOMEM);
if (IS_ERR(afa_block))
return afa_block;
err = mlxsw_afa_block_append_allocated_counter(afa_block,
counter_index);

View file

@ -125,6 +125,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
{ .div = 5, .val = 5, },
{ .div = 6, .val = 6, },
{ .div = 7, .val = 7, },
{ /* end of array */ }
};
clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);

View file

@ -801,7 +801,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
if (dst)
return dst;
}
if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) {
dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6,
NULL);
if (IS_ERR(dst)) {
netdev_dbg(dev, "no route to %pI6\n", &fl6->daddr);
return ERR_PTR(-ENETUNREACH);
}

View file

@ -3238,11 +3238,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev;
int err;
sci_t sci;
u8 icv_len = DEFAULT_ICV_LEN;
rx_handler_func_t *rx_handler;
u8 icv_len = DEFAULT_ICV_LEN;
struct net_device *real_dev;
int err, mtu;
sci_t sci;
if (!tb[IFLA_LINK])
return -EINVAL;
@ -3258,7 +3258,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (data && data[IFLA_MACSEC_ICV_LEN])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
if (mtu < 0)
dev->mtu = 0;
else
dev->mtu = mtu;
rx_handler = rtnl_dereference(real_dev->rx_handler);
if (rx_handler && rx_handler != macsec_handle_frame)

View file

@ -1676,7 +1676,7 @@ static int macvlan_device_event(struct notifier_block *unused,
struct macvlan_dev,
list);
if (macvlan_sync_address(vlan->dev, dev->dev_addr))
if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr))
return NOTIFY_BAD;
break;

View file

@ -475,6 +475,9 @@ static const struct team_mode *team_mode_get(const char *kind)
struct team_mode_item *mitem;
const struct team_mode *mode = NULL;
if (!try_module_get(THIS_MODULE))
return NULL;
spin_lock(&mode_list_lock);
mitem = __find_mode(kind);
if (!mitem) {
@ -490,6 +493,7 @@ static const struct team_mode *team_mode_get(const char *kind)
}
spin_unlock(&mode_list_lock);
module_put(THIS_MODULE);
return mode;
}

View file

@ -192,8 +192,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
fl6.flowi6_proto = iph->nexthdr;
fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
dst = ip6_route_output(net, NULL, &fl6);
if (dst == dst_null)
dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
if (IS_ERR(dst) || dst == dst_null)
goto err;
skb_dst_drop(skb);
@ -478,7 +478,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
return skb;
if (qdisc_tx_is_default(vrf_dev))
if (qdisc_tx_is_default(vrf_dev) ||
IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
return vrf_ip6_out_direct(vrf_dev, sk, skb);
return vrf_ip6_out_redirect(vrf_dev, skb);
@ -692,7 +693,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
ipv4_is_lbcast(ip_hdr(skb)->daddr))
return skb;
if (qdisc_tx_is_default(vrf_dev))
if (qdisc_tx_is_default(vrf_dev) ||
IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
return vrf_ip_out_direct(vrf_dev, sk, skb);
return vrf_ip_out_redirect(vrf_dev, skb);

View file

@ -1963,7 +1963,6 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct dst_entry *ndst;
struct flowi6 fl6;
int err;
if (!sock6)
return ERR_PTR(-EIO);
@ -1986,10 +1985,9 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.fl6_dport = dport;
fl6.fl6_sport = sport;
err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
sock6->sock->sk,
&ndst, &fl6);
if (unlikely(err < 0)) {
ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk,
&fl6, NULL);
if (unlikely(IS_ERR(ndst))) {
netdev_dbg(dev, "no route to %pI6\n", daddr);
return ERR_PTR(-ENETUNREACH);
}

View file

@ -587,6 +587,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_mvm_stat_data {
struct iwl_mvm *mvm;
__le32 flags;
__le32 mac_id;
u8 beacon_filter_average_energy;
void *general;
@ -630,6 +631,13 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
}
}
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
*/
if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
mvmvif->beacon_stats.accu_num_beacons +=
mvmvif->beacon_stats.num_beacons;
if (mvmvif->id != id)
return;
@ -790,6 +798,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
flags = stats->flag;
}
data.flags = flags;
iwl_mvm_rx_stats_check_trigger(mvm, pkt);

View file

@ -1231,6 +1231,9 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
iwl_pcie_gen2_txq_unmap(trans, queue);
iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
trans_pcie->txq[queue] = NULL;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
}

View file

@ -402,7 +402,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (!nr_nsids)
return 0;
down_write(&ctrl->namespaces_rwsem);
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
unsigned nsid = le32_to_cpu(desc->nsids[n]);
@ -413,7 +413,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (++n == nr_nsids)
break;
}
up_write(&ctrl->namespaces_rwsem);
up_read(&ctrl->namespaces_rwsem);
return 0;
}

View file

@ -67,6 +67,7 @@ struct pcie_link_state {
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
u32 clkpm_disable:1; /* Clock PM disabled */
/* Exit latencies */
struct aspm_latency latency_up; /* Upstream direction exit latency */
@ -164,8 +165,11 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
/* Don't enable Clock PM if the link is not Clock PM capable */
if (!link->clkpm_capable)
/*
* Don't enable Clock PM if the link is not Clock PM capable
* or Clock PM is disabled
*/
if (!link->clkpm_capable || link->clkpm_disable)
enable = 0;
/* Need nothing if the specified equals to current state */
if (link->clkpm_enabled == enable)
@ -195,7 +199,8 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
}
link->clkpm_enabled = enabled;
link->clkpm_default = enabled;
link->clkpm_capable = (blacklist) ? 0 : capable;
link->clkpm_capable = capable;
link->clkpm_disable = blacklist ? 1 : 0;
}
static bool pcie_retrain_link(struct pcie_link_state *link)
@ -1106,10 +1111,9 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
link->aspm_disable |= ASPM_STATE_L1;
pcie_config_aspm_link(link, policy_to_aspm_state(link));
if (state & PCIE_LINK_STATE_CLKPM) {
link->clkpm_capable = 0;
pcie_set_clkpm(link, 0);
}
if (state & PCIE_LINK_STATE_CLKPM)
link->clkpm_disable = 1;
pcie_set_clkpm(link, policy_to_clkpm_state(link));
mutex_unlock(&aspm_lock);
if (sem)
up_read(&pci_bus_sem);

View file

@ -166,6 +166,7 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &bcm2835_pwm_ops;
pc->chip.base = -1;
pc->chip.npwm = 2;
pc->chip.of_xlate = of_pwm_xlate_with_flags;
pc->chip.of_pwm_n_cells = 3;

View file

@ -232,24 +232,28 @@ static int rcar_pwm_probe(struct platform_device *pdev)
rcar_pwm->chip.base = -1;
rcar_pwm->chip.npwm = 1;
pm_runtime_enable(&pdev->dev);
ret = pwmchip_add(&rcar_pwm->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM chip: %d\n", ret);
pm_runtime_disable(&pdev->dev);
return ret;
}
pm_runtime_enable(&pdev->dev);
return 0;
}
static int rcar_pwm_remove(struct platform_device *pdev)
{
struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
int ret;
ret = pwmchip_remove(&rcar_pwm->chip);
pm_runtime_disable(&pdev->dev);
return pwmchip_remove(&rcar_pwm->chip);
return ret;
}
static const struct of_device_id rcar_pwm_of_table[] = {

View file

@ -423,16 +423,17 @@ static int tpu_probe(struct platform_device *pdev)
tpu->chip.base = -1;
tpu->chip.npwm = TPU_CHANNEL_MAX;
pm_runtime_enable(&pdev->dev);
ret = pwmchip_add(&tpu->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM chip\n");
pm_runtime_disable(&pdev->dev);
return ret;
}
dev_info(&pdev->dev, "TPU PWM %d registered\n", tpu->pdev->id);
pm_runtime_enable(&pdev->dev);
return 0;
}
@ -442,12 +443,10 @@ static int tpu_remove(struct platform_device *pdev)
int ret;
ret = pwmchip_remove(&tpu->chip);
if (ret)
return ret;
pm_runtime_disable(&pdev->dev);
return 0;
return ret;
}
#ifdef CONFIG_OF

View file

@ -827,8 +827,10 @@ static void io_subchannel_register(struct ccw_device *cdev)
* Now we know this subchannel will stay, we can throw
* our delayed uevent.
*/
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
if (dev_get_uevent_suppress(&sch->dev)) {
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
/* make it known to the system */
ret = ccw_device_add(cdev);
if (ret) {
@ -1036,8 +1038,11 @@ static int io_subchannel_probe(struct subchannel *sch)
* Throw the delayed uevent for the subchannel, register
* the ccw_device and exit.
*/
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
if (dev_get_uevent_suppress(&sch->dev)) {
/* should always be the case for the console */
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
}
cdev = sch_get_cdev(sch);
rc = ccw_device_add(cdev);
if (rc) {

View file

@ -342,13 +342,15 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
ndlp->nrport = NULL;
ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
}
spin_unlock_irq(&vport->phba->hbalock);
spin_unlock_irq(&vport->phba->hbalock);
/* Remove original register reference. The host transport
* won't reference this rport/remoteport any further.
*/
lpfc_nlp_put(ndlp);
/* Remove original register reference. The host transport
* won't reference this rport/remoteport any further.
*/
lpfc_nlp_put(ndlp);
} else {
spin_unlock_irq(&vport->phba->hbalock);
}
rport_err:
return;

View file

@ -2472,6 +2472,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
!pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi;
if (phba->sli_rev == LPFC_SLI_REV4)
vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->vport = vport;
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;

View file

@ -2010,7 +2010,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
if (session->target_id == ISCSI_MAX_TARGET) {
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
return;
goto unbind_session_exit;
}
target_id = session->target_id;
@ -2022,6 +2022,8 @@ static void __iscsi_unbind_session(struct work_struct *work)
ida_simple_remove(&iscsi_sess_ida, target_id);
scsi_remove_target(&session->dev);
unbind_session_exit:
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
}

View file

@ -50,9 +50,9 @@ static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy)
struct sas_phy *phy = pqi_sas_phy->phy;
sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy);
sas_phy_free(phy);
if (pqi_sas_phy->added_to_port)
list_del(&pqi_sas_phy->phy_list_entry);
sas_phy_delete(phy);
kfree(pqi_sas_phy);
}

View file

@ -2594,8 +2594,10 @@ static int comedi_open(struct inode *inode, struct file *file)
}
cfp = kzalloc(sizeof(*cfp), GFP_KERNEL);
if (!cfp)
if (!cfp) {
comedi_dev_put(dev);
return -ENOMEM;
}
cfp->dev = dev;

View file

@ -92,6 +92,7 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
int ret;
for (i = 0; i < insn->n; i++) {
/* FIXME: lo bit 0 chooses voltage output or current output */
lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01;
hi = (data[i] & 0xff0) >> 4;
@ -105,6 +106,8 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
if (ret)
return ret;
outb(hi, dev->iobase + DT2815_DATA);
devpriv->ao_readback[chan] = data[i];
}
return i;

View file

@ -143,7 +143,8 @@ void vnt_int_process_data(struct vnt_private *priv)
priv->wake_up_count =
priv->hw->conf.listen_interval;
--priv->wake_up_count;
if (priv->wake_up_count)
--priv->wake_up_count;
/* Turn on wake up to listen next beacon */
if (priv->wake_up_count == 1)

View file

@ -81,9 +81,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
case VNT_KEY_PAIRWISE:
key_mode |= mode;
key_inx = 4;
/* Don't save entry for pairwise key for station mode */
if (priv->op_mode == NL80211_IFTYPE_STATION)
clear_bit(entry, &priv->key_entry_inuse);
break;
default:
return -EINVAL;
@ -107,7 +104,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, struct ieee80211_key_conf *key)
{
struct ieee80211_bss_conf *conf = &vif->bss_conf;
struct vnt_private *priv = hw->priv;
u8 *mac_addr = NULL;
u8 key_dec_mode = 0;
@ -149,16 +145,12 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
}
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE,
key_dec_mode, true);
} else {
vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY,
else
vnt_set_keymode(hw, mac_addr, key, VNT_KEY_GROUP_ADDRESS,
key_dec_mode, true);
vnt_set_keymode(hw, (u8 *)conf->bssid, key,
VNT_KEY_GROUP_ADDRESS, key_dec_mode, true);
}
return 0;
}

View file

@ -595,8 +595,6 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
priv->op_mode = vif->type;
vnt_set_bss_mode(priv);
/* LED blink on TX */
vnt_mac_set_led(priv, LEDSTS_STS, LEDSTS_INTER);
@ -683,7 +681,6 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->basic_rates = conf->basic_rates;
vnt_update_top_rates(priv);
vnt_set_bss_mode(priv);
dev_dbg(&priv->usb->dev, "basic rates %x\n", conf->basic_rates);
}
@ -712,11 +709,14 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
priv->short_slot_time = false;
vnt_set_short_slot_time(priv);
vnt_update_ifs(priv);
vnt_set_vga_gain_offset(priv, priv->bb_vga[0]);
vnt_update_pre_ed_threshold(priv, false);
}
if (changed & (BSS_CHANGED_BASIC_RATES | BSS_CHANGED_ERP_PREAMBLE |
BSS_CHANGED_ERP_SLOT))
vnt_set_bss_mode(priv);
if (changed & BSS_CHANGED_TXPOWER)
vnt_rf_setpower(priv, priv->current_rate,
conf->chandef.chan->hw_value);
@ -740,12 +740,15 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL,
TFTCTL_TSFCNTREN);
vnt_adjust_tsf(priv, conf->beacon_rate->hw_value,
conf->sync_tsf, priv->current_tsf);
vnt_mac_set_beacon_interval(priv, conf->beacon_int);
vnt_reset_next_tbtt(priv, conf->beacon_int);
vnt_adjust_tsf(priv, conf->beacon_rate->hw_value,
conf->sync_tsf, priv->current_tsf);
vnt_update_next_tbtt(priv,
conf->sync_tsf, conf->beacon_int);
} else {
vnt_clear_current_tsf(priv);
@ -780,15 +783,11 @@ static void vnt_configure(struct ieee80211_hw *hw,
{
struct vnt_private *priv = hw->priv;
u8 rx_mode = 0;
int rc;
*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
if (!rc)
rx_mode = RCR_MULTICAST | RCR_BROADCAST;
vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode);
@ -829,8 +828,12 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
break;
case DISABLE_KEY:
if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) {
clear_bit(key->hw_key_idx, &priv->key_entry_inuse);
vnt_mac_disable_keyentry(priv, key->hw_key_idx);
}
default:
break;
}

View file

@ -302,10 +302,6 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
vtermnos[index] = vtermno;
cons_ops[index] = ops;
/* reserve all indices up to and including this index */
if (last_hvc < index)
last_hvc = index;
/* check if we need to re-register the kernel console */
hvc_check_console(index);
@ -960,13 +956,22 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
cons_ops[i] == hp->ops)
break;
/* no matching slot, just use a counter */
if (i >= MAX_NR_HVC_CONSOLES)
i = ++last_hvc;
if (i >= MAX_NR_HVC_CONSOLES) {
/* find 'empty' slot for console */
for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) {
}
/* no matching slot, just use a counter */
if (i == MAX_NR_HVC_CONSOLES)
i = ++last_hvc + MAX_NR_HVC_CONSOLES;
}
hp->index = i;
cons_ops[i] = ops;
vtermnos[i] = vtermno;
if (i < MAX_NR_HVC_CONSOLES) {
cons_ops[i] = ops;
vtermnos[i] = vtermno;
}
list_add_tail(&(hp->next), &hvc_structs);
mutex_unlock(&hvc_structs_mutex);

View file

@ -632,18 +632,21 @@ init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
tty_port_init(&info->port);
info->port.ops = &rocket_port_ops;
info->flags &= ~ROCKET_MODE_MASK;
switch (pc104[board][line]) {
case 422:
info->flags |= ROCKET_MODE_RS422;
break;
case 485:
info->flags |= ROCKET_MODE_RS485;
break;
case 232:
default:
if (board < ARRAY_SIZE(pc104) && line < ARRAY_SIZE(pc104_1))
switch (pc104[board][line]) {
case 422:
info->flags |= ROCKET_MODE_RS422;
break;
case 485:
info->flags |= ROCKET_MODE_RS485;
break;
case 232:
default:
info->flags |= ROCKET_MODE_RS232;
break;
}
else
info->flags |= ROCKET_MODE_RS232;
break;
}
info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR;
if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) {

View file

@ -873,9 +873,16 @@ static void sci_receive_chars(struct uart_port *port)
tty_insert_flip_char(tport, c, TTY_NORMAL);
} else {
for (i = 0; i < count; i++) {
char c = serial_port_in(port, SCxRDR);
char c;
status = serial_port_in(port, SCxSR);
if (port->type == PORT_SCIF ||
port->type == PORT_HSCIF) {
status = serial_port_in(port, SCxSR);
c = serial_port_in(port, SCxRDR);
} else {
c = serial_port_in(port, SCxRDR);
status = serial_port_in(port, SCxSR);
}
if (uart_handle_sysrq_char(port, c)) {
count--; i--;
continue;

View file

@ -81,6 +81,7 @@
#include <linux/errno.h>
#include <linux/kd.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/major.h>
#include <linux/mm.h>
#include <linux/console.h>
@ -350,7 +351,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
/* allocate everything in one go */
memsize = cols * rows * sizeof(char32_t);
memsize += rows * sizeof(char32_t *);
p = kmalloc(memsize, GFP_KERNEL);
p = vmalloc(memsize);
if (!p)
return NULL;
@ -366,7 +367,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
{
kfree(vc->vc_uni_screen);
vfree(vc->vc_uni_screen);
vc->vc_uni_screen = new_uniscr;
}
@ -1209,7 +1210,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
return 0;
if (new_screen_size > (4 << 20))
if (new_screen_size > KMALLOC_MAX_SIZE)
return -EINVAL;
newscreen = kzalloc(new_screen_size, GFP_USER);
if (!newscreen)

View file

@ -412,9 +412,12 @@ static void acm_ctrl_irq(struct urb *urb)
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval && retval != -EPERM)
if (retval && retval != -EPERM && retval != -ENODEV)
dev_err(&acm->control->dev,
"%s - usb_submit_urb failed: %d\n", __func__, retval);
else
dev_vdbg(&acm->control->dev,
"control resubmission terminated %d\n", retval);
}
static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
@ -430,6 +433,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
dev_err(&acm->data->dev,
"urb %d failed submission with %d\n",
index, res);
} else {
dev_vdbg(&acm->data->dev, "intended failure %d\n", res);
}
set_bit(index, &acm->read_urbs_free);
return res;
@ -472,6 +477,7 @@ static void acm_read_bulk_callback(struct urb *urb)
int status = urb->status;
bool stopped = false;
bool stalled = false;
bool cooldown = false;
dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
rb->index, urb->actual_length, status);
@ -498,6 +504,14 @@ static void acm_read_bulk_callback(struct urb *urb)
__func__, status);
stopped = true;
break;
case -EOVERFLOW:
case -EPROTO:
dev_dbg(&acm->data->dev,
"%s - cooling babbling device\n", __func__);
usb_mark_last_busy(acm->dev);
set_bit(rb->index, &acm->urbs_in_error_delay);
cooldown = true;
break;
default:
dev_dbg(&acm->data->dev,
"%s - nonzero urb status received: %d\n",
@ -519,9 +533,11 @@ static void acm_read_bulk_callback(struct urb *urb)
*/
smp_mb__after_atomic();
if (stopped || stalled) {
if (stopped || stalled || cooldown) {
if (stalled)
schedule_work(&acm->work);
else if (cooldown)
schedule_delayed_work(&acm->dwork, HZ / 2);
return;
}
@ -563,14 +579,20 @@ static void acm_softint(struct work_struct *work)
struct acm *acm = container_of(work, struct acm, work);
if (test_bit(EVENT_RX_STALL, &acm->flags)) {
if (!(usb_autopm_get_interface(acm->data))) {
smp_mb(); /* against acm_suspend() */
if (!acm->susp_count) {
for (i = 0; i < acm->rx_buflimit; i++)
usb_kill_urb(acm->read_urbs[i]);
usb_clear_halt(acm->dev, acm->in);
acm_submit_read_urbs(acm, GFP_KERNEL);
usb_autopm_put_interface(acm->data);
clear_bit(EVENT_RX_STALL, &acm->flags);
}
clear_bit(EVENT_RX_STALL, &acm->flags);
}
if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) {
for (i = 0; i < ACM_NR; i++)
if (test_and_clear_bit(i, &acm->urbs_in_error_delay))
acm_submit_read_urb(acm, i, GFP_NOIO);
}
if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
@ -1365,6 +1387,7 @@ static int acm_probe(struct usb_interface *intf,
acm->readsize = readsize;
acm->rx_buflimit = num_rx_buf;
INIT_WORK(&acm->work, acm_softint);
INIT_DELAYED_WORK(&acm->dwork, acm_softint);
init_waitqueue_head(&acm->wioctl);
spin_lock_init(&acm->write_lock);
spin_lock_init(&acm->read_lock);
@ -1574,6 +1597,7 @@ static void acm_disconnect(struct usb_interface *intf)
acm_kill_urbs(acm);
cancel_work_sync(&acm->work);
cancel_delayed_work_sync(&acm->dwork);
tty_unregister_device(acm_tty_driver, acm->minor);
@ -1616,6 +1640,8 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
acm_kill_urbs(acm);
cancel_work_sync(&acm->work);
cancel_delayed_work_sync(&acm->dwork);
acm->urbs_in_error_delay = 0;
return 0;
}

View file

@ -108,8 +108,11 @@ struct acm {
unsigned long flags;
# define EVENT_TTY_WAKEUP 0
# define EVENT_RX_STALL 1
# define ACM_ERROR_DELAY 3
unsigned long urbs_in_error_delay; /* these need to be restarted after a delay */
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
struct work_struct work; /* work queue entry for various purposes*/
struct delayed_work dwork; /* for cool downs needed in error recovery */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
unsigned int ctrlout; /* output control lines (DTR, RTS) */
struct async_icount iocount; /* counters for control line changes */

View file

@ -1197,6 +1197,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
#ifdef CONFIG_PM
udev->reset_resume = 1;
#endif
/* Don't set the change_bits when the device
* was powered off.
*/
if (test_bit(port1, hub->power_bits))
set_bit(port1, hub->change_bits);
} else {
/* The power session is gone; tell hub_wq */
@ -3052,6 +3057,15 @@ static int check_port_resume_type(struct usb_device *udev,
if (portchange & USB_PORT_STAT_C_ENABLE)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_ENABLE);
/*
* Whatever made this reset-resume necessary may have
* turned on the port1 bit in hub->change_bits. But after
* a successful reset-resume we want the bit to be clear;
* if it was on it would indicate that something happened
* following the reset-resume.
*/
clear_bit(port1, hub->change_bits);
}
return status;

View file

@ -588,12 +588,13 @@ void usb_sg_cancel(struct usb_sg_request *io)
int i, retval;
spin_lock_irqsave(&io->lock, flags);
if (io->status) {
if (io->status || io->count == 0) {
spin_unlock_irqrestore(&io->lock, flags);
return;
}
/* shut everything down */
io->status = -ECONNRESET;
io->count++; /* Keep the request alive until we're done */
spin_unlock_irqrestore(&io->lock, flags);
for (i = io->entries - 1; i >= 0; --i) {
@ -607,6 +608,12 @@ void usb_sg_cancel(struct usb_sg_request *io)
dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
__func__, retval);
}
spin_lock_irqsave(&io->lock, flags);
io->count--;
if (!io->count)
complete(&io->complete);
spin_unlock_irqrestore(&io->lock, flags);
}
EXPORT_SYMBOL_GPL(usb_sg_cancel);

View file

@ -430,6 +430,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Corsair K70 LUX */
{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
/* Corsair K70 RGB RAPDIFIRE */
{ USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT |
USB_QUIRK_DELAY_CTRL_MSG },
/* MIDI keyboard WORLDE MINI */
{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },

View file

@ -2280,14 +2280,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
{
/*
* For OUT direction, host may send less than the setup
* length. Return true for all OUT requests.
*/
if (!req->direction)
return true;
return req->request.actual == req->request.length;
return req->num_pending_sgs == 0;
}
static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
@ -2311,8 +2304,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
req->request.actual = req->request.length - req->remaining;
if (!dwc3_gadget_ep_request_completed(req) ||
req->num_pending_sgs) {
if (!dwc3_gadget_ep_request_completed(req)) {
__dwc3_gadget_kick_transfer(dep);
goto out;
}

View file

@ -735,19 +735,19 @@ static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
case COMP_USB_TRANSACTION_ERROR:
case COMP_STALL_ERROR:
default:
if (ep_id == XDBC_EPID_OUT)
if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL)
xdbc.flags |= XDBC_FLAGS_OUT_STALL;
if (ep_id == XDBC_EPID_IN)
if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL)
xdbc.flags |= XDBC_FLAGS_IN_STALL;
xdbc_trace("endpoint %d stalled\n", ep_id);
break;
}
if (ep_id == XDBC_EPID_IN) {
if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) {
xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS;
xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
} else if (ep_id == XDBC_EPID_OUT) {
} else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) {
xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS;
} else {
xdbc_trace("invalid endpoint id %d\n", ep_id);

View file

@ -120,8 +120,22 @@ struct xdbc_ring {
u32 cycle_state;
};
#define XDBC_EPID_OUT 2
#define XDBC_EPID_IN 3
/*
* These are the "Endpoint ID" (also known as "Context Index") values for the
* OUT Transfer Ring and the IN Transfer Ring of a Debug Capability Context data
* structure.
* According to the "eXtensible Host Controller Interface for Universal Serial
* Bus (xHCI)" specification, section "7.6.3.2 Endpoint Contexts and Transfer
* Rings", these should be 0 and 1, and those are the values AMD machines give
* you; but Intel machines seem to use the formula from section "4.5.1 Device
* Context Index", which is supposed to be used for the Device Context only.
* Luckily the values from Intel don't overlap with those from AMD, so we can
* just test for both.
*/
#define XDBC_EPID_OUT 0
#define XDBC_EPID_IN 1
#define XDBC_EPID_OUT_INTEL 2
#define XDBC_EPID_IN_INTEL 3
struct xdbc_state {
u16 vendor;

View file

@ -1737,6 +1737,10 @@ static void ffs_data_reset(struct ffs_data *ffs)
ffs->state = FFS_READ_DESCRIPTORS;
ffs->setup_state = FFS_NO_SETUP;
ffs->flags = 0;
ffs->ms_os_descs_ext_prop_count = 0;
ffs->ms_os_descs_ext_prop_name_len = 0;
ffs->ms_os_descs_ext_prop_data_len = 0;
}

View file

@ -1266,7 +1266,16 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_set_link_state(xhci, ports[wIndex], link_state);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(20); /* wait device to enter */
if (link_state == USB_SS_PORT_LS_U3) {
int retries = 16;
while (retries--) {
usleep_range(4000, 8000);
temp = readl(ports[wIndex]->addr);
if ((temp & PORT_PLS_MASK) == XDEV_U3)
break;
}
}
spin_lock_irqsave(&xhci->lock, flags);
temp = readl(ports[wIndex]->addr);
@ -1472,6 +1481,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
}
if ((temp & PORT_RC))
reset_change = true;
if (temp & PORT_OC)
status = 1;
}
if (!status && !reset_change) {
xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
@ -1537,6 +1548,13 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
port_index);
goto retry;
}
/* bail out if port detected a over-current condition */
if (t1 & PORT_OC) {
bus_state->bus_suspended = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Bus suspend bailout, port over-current detected\n");
return -EBUSY;
}
/* suspend ports in U0, or bail out for new connect changes */
if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
if ((t1 & PORT_CSC) && wake_enabled) {

View file

@ -1199,18 +1199,18 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
/* High level: Gfx (indexed) register access */
#ifdef INCL_SISUSB_CON
int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data)
int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data)
{
return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
}
int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data)
int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 *data)
{
return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
}
#endif
int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 data)
{
int ret;
@ -1220,7 +1220,7 @@ int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
return ret;
}
int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 *data)
{
int ret;
@ -1230,7 +1230,7 @@ int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
return ret;
}
int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port, u8 idx,
u8 myand, u8 myor)
{
int ret;
@ -1245,7 +1245,7 @@ int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
}
static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb,
int port, u8 idx, u8 data, u8 mask)
u32 port, u8 idx, u8 data, u8 mask)
{
int ret;
u8 tmp;
@ -1258,13 +1258,13 @@ static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb,
return ret;
}
int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port,
int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 myor)
{
return sisusb_setidxregandor(sisusb, port, index, 0xff, myor);
}
int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port,
u8 idx, u8 myand)
{
return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00);
@ -2787,8 +2787,8 @@ static loff_t sisusb_lseek(struct file *file, loff_t offset, int orig)
static int sisusb_handle_command(struct sisusb_usb_data *sisusb,
struct sisusb_command *y, unsigned long arg)
{
int retval, port, length;
u32 address;
int retval, length;
u32 port, address;
/* All our commands require the device
* to be initialized.

View file

@ -812,17 +812,17 @@ static const struct SiS_VCLKData SiSUSB_VCLKData[] = {
int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo);
int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo);
extern int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data);
extern int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 * data);
extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
extern int sisusb_setreg(struct sisusb_usb_data *sisusb, u32 port, u8 data);
extern int sisusb_getreg(struct sisusb_usb_data *sisusb, u32 port, u8 * data);
extern int sisusb_setidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 data);
extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
extern int sisusb_getidxreg(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 * data);
extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port,
extern int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, u32 port,
u8 idx, u8 myand, u8 myor);
extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port,
extern int sisusb_setidxregor(struct sisusb_usb_data *sisusb, u32 port,
u8 index, u8 myor);
extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
extern int sisusb_setidxregand(struct sisusb_usb_data *sisusb, u32 port,
u8 idx, u8 myand);
void sisusb_delete(struct kref *kref);

View file

@ -81,6 +81,19 @@ static void uas_free_streams(struct uas_dev_info *devinfo);
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
int status);
/*
* This driver needs its own workqueue, as we need to control memory allocation.
*
* In the course of error handling and power management uas_wait_for_pending_cmnds()
* needs to flush pending work items. In these contexts we cannot allocate memory
* by doing block IO as we would deadlock. For the same reason we cannot wait
* for anything allocating memory not heeding these constraints.
*
* So we have to control all work items that can be on the workqueue we flush.
* Hence we cannot share a queue and need our own.
*/
static struct workqueue_struct *workqueue;
static void uas_do_work(struct work_struct *work)
{
struct uas_dev_info *devinfo =
@ -109,7 +122,7 @@ static void uas_do_work(struct work_struct *work)
if (!err)
cmdinfo->state &= ~IS_IN_WORK_LIST;
else
schedule_work(&devinfo->work);
queue_work(workqueue, &devinfo->work);
}
out:
spin_unlock_irqrestore(&devinfo->lock, flags);
@ -134,7 +147,7 @@ static void uas_add_work(struct uas_cmd_info *cmdinfo)
lockdep_assert_held(&devinfo->lock);
cmdinfo->state |= IS_IN_WORK_LIST;
schedule_work(&devinfo->work);
queue_work(workqueue, &devinfo->work);
}
static void uas_zap_pending(struct uas_dev_info *devinfo, int result)
@ -190,6 +203,9 @@ static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
struct uas_cmd_info *ci = (void *)&cmnd->SCp;
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
if (status == -ENODEV) /* too late */
return;
scmd_printk(KERN_INFO, cmnd,
"%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ",
prefix, status, cmdinfo->uas_tag,
@ -1233,7 +1249,31 @@ static struct usb_driver uas_driver = {
.id_table = uas_usb_ids,
};
module_usb_driver(uas_driver);
static int __init uas_init(void)
{
int rv;
workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM, 0);
if (!workqueue)
return -ENOMEM;
rv = usb_register(&uas_driver);
if (rv) {
destroy_workqueue(workqueue);
return -ENOMEM;
}
return 0;
}
static void __exit uas_exit(void)
{
usb_deregister(&uas_driver);
destroy_workqueue(workqueue);
}
module_init(uas_init);
module_exit(uas_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(

View file

@ -2323,6 +2323,13 @@ UNUSUAL_DEV( 0x3340, 0xffff, 0x0000, 0x0000,
USB_SC_DEVICE,USB_PR_DEVICE,NULL,
US_FL_MAX_SECTORS_64 ),
/* Reported by Cyril Roelandt <tipecaml@gmail.com> */
UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114,
"JMicron",
"USB to ATA/ATAPI Bridge",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA ),
/* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
"iRiver",

View file

@ -264,6 +264,7 @@ static int watchdog_start(struct watchdog_device *wdd)
if (err == 0) {
set_bit(WDOG_ACTIVE, &wdd->status);
wd_data->last_keepalive = started_at;
wd_data->last_hw_keepalive = started_at;
watchdog_update_worker(wdd);
}

View file

@ -1972,8 +1972,12 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
}
/* want more caps from mds? */
if (want & ~(cap->mds_wanted | cap->issued))
goto ack;
if (want & ~cap->mds_wanted) {
if (want & ~(cap->mds_wanted | cap->issued))
goto ack;
if (!__cap_is_valid(cap))
goto ack;
}
/* things we might delay */
if ((cap->issued & ~retain) == 0 &&

View file

@ -151,6 +151,11 @@ static struct dentry *__get_parent(struct super_block *sb,
req->r_num_caps = 1;
err = ceph_mdsc_do_request(mdsc, NULL, req);
if (err) {
ceph_mdsc_put_request(req);
return ERR_PTR(err);
}
inode = req->r_target_inode;
if (inode)
ihold(inode);

View file

@ -498,6 +498,30 @@ int ext4_ext_check_inode(struct inode *inode)
return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
}
static void ext4_cache_extents(struct inode *inode,
struct ext4_extent_header *eh)
{
struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
ext4_lblk_t prev = 0;
int i;
for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
unsigned int status = EXTENT_STATUS_WRITTEN;
ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
int len = ext4_ext_get_actual_len(ex);
if (prev && (prev != lblk))
ext4_es_cache_extent(inode, prev, lblk - prev, ~0,
EXTENT_STATUS_HOLE);
if (ext4_ext_is_unwritten(ex))
status = EXTENT_STATUS_UNWRITTEN;
ext4_es_cache_extent(inode, lblk, len,
ext4_ext_pblock(ex), status);
prev = lblk + len;
}
}
static struct buffer_head *
__read_extent_tree_block(const char *function, unsigned int line,
struct inode *inode, ext4_fsblk_t pblk, int depth,
@ -532,26 +556,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
*/
if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
struct ext4_extent_header *eh = ext_block_hdr(bh);
struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
ext4_lblk_t prev = 0;
int i;
for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
unsigned int status = EXTENT_STATUS_WRITTEN;
ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
int len = ext4_ext_get_actual_len(ex);
if (prev && (prev != lblk))
ext4_es_cache_extent(inode, prev,
lblk - prev, ~0,
EXTENT_STATUS_HOLE);
if (ext4_ext_is_unwritten(ex))
status = EXTENT_STATUS_UNWRITTEN;
ext4_es_cache_extent(inode, lblk, len,
ext4_ext_pblock(ex), status);
prev = lblk + len;
}
ext4_cache_extents(inode, eh);
}
return bh;
errout:
@ -899,6 +904,8 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
path[0].p_bh = NULL;
i = depth;
if (!(flags & EXT4_EX_NOCACHE) && depth == 0)
ext4_cache_extents(inode, eh);
/* walk through the tree */
while (i) {
ext_debug("depth %d: num %d, max %d\n",

View file

@ -560,6 +560,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
struct inode *inode = d_inode(dentry);
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
struct f2fs_xattr_entry *entry;
void *base_addr, *last_base_addr;
int error = 0;

View file

@ -3166,8 +3166,8 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
/* make certain new is below the root */
if (!is_path_reachable(new_mnt, new.dentry, &root))
goto out4;
root_mp->m_count++; /* pin it so it won't go away */
lock_mount_hash();
root_mp->m_count++; /* pin it so it won't go away */
detach_mnt(new_mnt, &parent_path);
detach_mnt(root_mnt, &root_parent);
if (root_mnt->mnt.mnt_flags & MNT_LOCKED) {

View file

@ -250,7 +250,8 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
if (start < offset + dump->size) {
tsz = min(offset + (u64)dump->size - start, (u64)size);
buf = dump->buf + start - offset;
if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) {
if (remap_vmalloc_range_partial(vma, dst, buf, 0,
tsz)) {
ret = -EFAULT;
goto out_unlock;
}
@ -607,7 +608,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
kaddr, tsz))
kaddr, 0, tsz))
goto fail;
size -= tsz;

View file

@ -2949,7 +2949,8 @@ xfs_rename(
spaceres);
/*
* Set up the target.
* Check for expected errors before we dirty the transaction
* so we can return an error without a transaction abort.
*/
if (target_ip == NULL) {
/*
@ -2961,6 +2962,46 @@ xfs_rename(
if (error)
goto out_trans_cancel;
}
} else {
/*
* If target exists and it's a directory, check that whether
* it can be destroyed.
*/
if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
(!xfs_dir_isempty(target_ip) ||
(VFS_I(target_ip)->i_nlink > 2))) {
error = -EEXIST;
goto out_trans_cancel;
}
}
/*
* Directory entry creation below may acquire the AGF. Remove
* the whiteout from the unlinked list first to preserve correct
* AGI/AGF locking order. This dirties the transaction so failures
* after this point will abort and log recovery will clean up the
* mess.
*
* For whiteouts, we need to bump the link count on the whiteout
* inode. After this point, we have a real link, clear the tmpfile
* state flag from the inode so it doesn't accidentally get misused
* in future.
*/
if (wip) {
ASSERT(VFS_I(wip)->i_nlink == 0);
error = xfs_iunlink_remove(tp, wip);
if (error)
goto out_trans_cancel;
xfs_bumplink(tp, wip);
xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
VFS_I(wip)->i_state &= ~I_LINKABLE;
}
/*
* Set up the target.
*/
if (target_ip == NULL) {
/*
* If target does not exist and the rename crosses
* directories, adjust the target directory link count
@ -2980,22 +3021,6 @@ xfs_rename(
goto out_trans_cancel;
}
} else { /* target_ip != NULL */
/*
* If target exists and it's a directory, check that both
* target and source are directories and that target can be
* destroyed, or that neither is a directory.
*/
if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
/*
* Make sure target dir is empty.
*/
if (!(xfs_dir_isempty(target_ip)) ||
(VFS_I(target_ip)->i_nlink > 2)) {
error = -EEXIST;
goto out_trans_cancel;
}
}
/*
* Link the source inode under the target name.
* If the source inode is a directory and we are moving
@ -3086,32 +3111,6 @@ xfs_rename(
if (error)
goto out_trans_cancel;
/*
* For whiteouts, we need to bump the link count on the whiteout inode.
* This means that failures all the way up to this point leave the inode
* on the unlinked list and so cleanup is a simple matter of dropping
* the remaining reference to it. If we fail here after bumping the link
* count, we're shutting down the filesystem so we'll never see the
* intermediate state on disk.
*/
if (wip) {
ASSERT(VFS_I(wip)->i_nlink == 0);
error = xfs_bumplink(tp, wip);
if (error)
goto out_trans_cancel;
error = xfs_iunlink_remove(tp, wip);
if (error)
goto out_trans_cancel;
xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
/*
* Now we have a real link, clear the "I'm a tmpfile" state
* flag from the inode so it doesn't accidentally get misused in
* future.
*/
VFS_I(wip)->i_state &= ~I_LINKABLE;
}
xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
if (new_parent)

View file

@ -629,7 +629,7 @@ struct request_queue {
unsigned int sg_reserved_size;
int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
struct blk_trace *blk_trace;
struct blk_trace __rcu *blk_trace;
struct mutex blk_trace_mutex;
#endif
/*

View file

@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
**/
#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
do { \
struct blk_trace *bt = (q)->blk_trace; \
struct blk_trace *bt; \
\
rcu_read_lock(); \
bt = rcu_dereference((q)->blk_trace); \
if (unlikely(bt)) \
__trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
rcu_read_unlock(); \
} while (0)
#define blk_add_trace_msg(q, fmt, ...) \
blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
@ -61,10 +65,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
static inline bool blk_trace_note_message_enabled(struct request_queue *q)
{
struct blk_trace *bt = q->blk_trace;
if (likely(!bt))
return false;
return bt->act_mask & BLK_TC_NOTIFY;
struct blk_trace *bt;
bool ret;
rcu_read_lock();
bt = rcu_dereference(q->blk_trace);
ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
rcu_read_unlock();
return ret;
}
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,

View file

@ -599,7 +599,7 @@ void iio_device_unregister(struct iio_dev *indio_dev);
* 0 on success, negative error number on failure.
*/
#define devm_iio_device_register(dev, indio_dev) \
__devm_iio_device_register((dev), (indio_dev), THIS_MODULE);
__devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
struct module *this_mod);
void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);

View file

@ -206,6 +206,32 @@ enum {
READING_SHADOW_PAGE_TABLES,
};
#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA)
struct kvm_host_map {
/*
* Only valid if the 'pfn' is managed by the host kernel (i.e. There is
* a 'struct page' for it. When using mem= kernel parameter some memory
* can be used as guest memory but they are not managed by host
* kernel).
* If 'pfn' is not managed by the host kernel, this field is
* initialized to KVM_UNMAPPED_PAGE.
*/
struct page *page;
void *hva;
kvm_pfn_t pfn;
kvm_pfn_t gfn;
};
/*
* Used to check if the mapping is valid or not. Never use 'kvm_host_map'
* directly to check for that.
*/
static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
{
return !!map->hva;
}
/*
* Sometimes a large or cross-page mmio needs to be broken up into separate
* exits for userspace servicing.
@ -682,6 +708,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
void kvm_get_pfn(kvm_pfn_t pfn);
void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
@ -711,7 +738,13 @@ struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool atomic);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
@ -966,7 +999,7 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn)
start = slot + 1;
}
if (gfn >= memslots[start].base_gfn &&
if (start < slots->used_slots && gfn >= memslots[start].base_gfn &&
gfn < memslots[start].base_gfn + memslots[start].npages) {
atomic_set(&slots->lru_slot, start);
return &memslots[start];

View file

@ -32,7 +32,7 @@ struct kvm_memslots;
enum kvm_mr_change;
#include <asm/types.h>
#include <linux/types.h>
/*
* Address types:
@ -63,4 +63,11 @@ struct gfn_to_hva_cache {
struct kvm_memory_slot *memslot;
};
struct gfn_to_pfn_cache {
u64 generation;
gfn_t gfn;
kvm_pfn_t pfn;
bool dirty;
};
#endif /* __KVM_TYPES_H__ */

View file

@ -105,7 +105,7 @@ extern void vunmap(const void *addr);
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
unsigned long uaddr, void *kaddr,
unsigned long size);
unsigned long pgoff, unsigned long size);
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);

View file

@ -235,8 +235,10 @@ struct ipv6_stub {
const struct in6_addr *addr);
int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
const struct in6_addr *addr);
int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
struct dst_entry **dst, struct flowi6 *fl6);
struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net,
const struct sock *sk,
struct flowi6 *fl6,
const struct in6_addr *final_dst);
struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
struct fib6_info *(*fib6_lookup)(struct net *net, int oif,

View file

@ -959,7 +959,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
struct flowi6 *fl6);
struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst);
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst,

View file

@ -53,7 +53,7 @@ extern struct inet_hashinfo tcp_hashinfo;
extern struct percpu_counter tcp_orphan_count;
void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER (128 + MAX_HEADER)
#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40
#define TCP_MIN_SND_MSS 48
#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)

Some files were not shown because too many files have changed in this diff Show more