This is the 4.19.153 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl+ag5UACgkQONu9yGCS
 aT5O3w//RaOcwQdi47/UJz8zyja1ZG8MSSCGibpwvaDwrsXu9es1QtqLAC38H10o
 ygxNLBZQHxhScsRpicNc+Dy87+lcSj8cF1ed7sd1LU8rvmQ18uIeFUZxfzYth8jW
 i6erzas0Ojw8IMy566GDxkfAC6n5GhJuJTVFQWUQpoEbsb5rXcGCLx3u+S3Ew+5t
 Xb9qE6r5cImYymvMkMy7RQ4Db2qgOwjkaCj+Ol+4BSR0bF4OweMQLPJs9gN8pJpr
 o2nxHg7wdO8SKJZCBVw8ZmfO4zF6czcKy+KzFajn+4LA2oT5mgiV8y21cd9CWYeQ
 JQK1jZGwwl/xljrM1yLd+crG8i11DhCStY90+4bxD68r8H+g1kwZ8jELmCwuuyx6
 dk1s7jOxyKl9qAnMt6r2HqrjgxGD+2hL+2S84jPGRBow5IYjrdD0REXZjyk1R7Rp
 8k00lRk1ATEy7H2lj4JW34tcsTEEDcn8PqUFx7MRKtCUI2uo4Gr5HXqf6wTJDp6S
 BsDe8mm77jd81vtw/AZ8Fv7Fg42QIPt7G1QV9wBbFvDmKmDa7Gj6SuQqTeu75oU9
 M++aWSwyOb08wZEE0y94wsm6r4raN3A8o70Df9FltNFTALowuIcR+CVtOnQfHEuL
 BUBJcWg3SDsIxkXYgvQ9jO5h38i6dhAIVGAcU4VB0rgP/ePKMQs=
 =GiLo
 -----END PGP SIGNATURE-----

Merge 4.19.153 into android-4.19-stable

Changes in 4.19.153
	ibmveth: Switch order of ibmveth_helper calls.
	ibmveth: Identify ingress large send packets.
	ipv4: Restore flowi4_oif update before call to xfrm_lookup_route
	mlx4: handle non-napi callers to napi_poll
	net: fec: Fix phy_device lookup for phy_reset_after_clk_enable()
	net: fec: Fix PHY init after phy_reset_after_clk_enable()
	net: fix pos incrementment in ipv6_route_seq_next
	net/smc: fix valid DMBE buffer sizes
	net: usb: qmi_wwan: add Cellient MPL200 card
	tipc: fix the skb_unshare() in tipc_buf_append()
	net/ipv4: always honour route mtu during forwarding
	r8169: fix data corruption issue on RTL8402
	net/tls: sendfile fails with ktls offload
	binder: fix UAF when releasing todo list
	ALSA: bebob: potential info leak in hwdep_read()
	chelsio/chtls: fix socket lock
	chelsio/chtls: correct netdevice for vlan interface
	chelsio/chtls: correct function return and return type
	net: hdlc: In hdlc_rcv, check to make sure dev is an HDLC device
	net: hdlc_raw_eth: Clear the IFF_TX_SKB_SHARING flag after calling ether_setup
	net/sched: act_tunnel_key: fix OOB write in case of IPv6 ERSPAN tunnels
	nfc: Ensure presence of NFC_ATTR_FIRMWARE_NAME attribute in nfc_genl_fw_download()
	tcp: fix to update snd_wl1 in bulk receiver fast path
	r8169: fix operation under forced interrupt threading
	icmp: randomize the global rate limiter
	ALSA: hda/realtek: Enable audio jacks of ASUS D700SA with ALC887
	cifs: remove bogus debug code
	cifs: Return the error from crypt_message when enc/dec key not found.
	KVM: x86/mmu: Commit zap of remaining invalid pages when recovering lpages
	KVM: SVM: Initialize prev_ga_tag before use
	ima: Don't ignore errors from crypto_shash_update()
	crypto: algif_aead - Do not set MAY_BACKLOG on the async path
	EDAC/i5100: Fix error handling order in i5100_init_one()
	EDAC/ti: Fix handling of platform_get_irq() error
	x86/fpu: Allow multiple bits in clearcpuid= parameter
	drivers/perf: xgene_pmu: Fix uninitialized resource struct
	x86/nmi: Fix nmi_handle() duration miscalculation
	x86/events/amd/iommu: Fix sizeof mismatch
	crypto: algif_skcipher - EBUSY on aio should be an error
	crypto: mediatek - Fix wrong return value in mtk_desc_ring_alloc()
	crypto: ixp4xx - Fix the size used in a 'dma_free_coherent()' call
	crypto: picoxcell - Fix potential race condition bug
	media: tuner-simple: fix regression in simple_set_radio_freq
	media: Revert "media: exynos4-is: Add missed check for pinctrl_lookup_state()"
	media: m5mols: Check function pointer in m5mols_sensor_power
	media: uvcvideo: Set media controller entity functions
	media: uvcvideo: Silence shift-out-of-bounds warning
	media: omap3isp: Fix memleak in isp_probe
	crypto: omap-sham - fix digcnt register handling with export/import
	hwmon: (pmbus/max34440) Fix status register reads for MAX344{51,60,61}
	cypto: mediatek - fix leaks in mtk_desc_ring_alloc
	media: mx2_emmaprp: Fix memleak in emmaprp_probe
	media: tc358743: initialize variable
	media: tc358743: cleanup tc358743_cec_isr
	media: rcar-vin: Fix a reference count leak.
	media: rockchip/rga: Fix a reference count leak.
	media: platform: fcp: Fix a reference count leak.
	media: camss: Fix a reference count leak.
	media: s5p-mfc: Fix a reference count leak
	media: stm32-dcmi: Fix a reference count leak
	media: ti-vpe: Fix a missing check and reference count leak
	regulator: resolve supply after creating regulator
	pinctrl: bcm: fix kconfig dependency warning when !GPIOLIB
	spi: spi-s3c64xx: swap s3c64xx_spi_set_cs() and s3c64xx_enable_datapath()
	spi: spi-s3c64xx: Check return values
	ath10k: provide survey info as accumulated data
	Bluetooth: hci_uart: Cancel init work before unregistering
	ath6kl: prevent potential array overflow in ath6kl_add_new_sta()
	ath9k: Fix potential out of bounds in ath9k_htc_txcompletion_cb()
	ath10k: Fix the size used in a 'dma_free_coherent()' call in an error handling path
	wcn36xx: Fix reported 802.11n rx_highest rate wcn3660/wcn3680
	ASoC: qcom: lpass-platform: fix memory leak
	ASoC: qcom: lpass-cpu: fix concurrency issue
	brcmfmac: check ndev pointer
	mwifiex: Do not use GFP_KERNEL in atomic context
	staging: rtl8192u: Do not use GFP_KERNEL in atomic context
	drm/gma500: fix error check
	scsi: qla4xxx: Fix an error handling path in 'qla4xxx_get_host_stats()'
	scsi: qla2xxx: Fix wrong return value in qla_nvme_register_hba()
	scsi: csiostor: Fix wrong return value in csio_hw_prep_fw()
	backlight: sky81452-backlight: Fix refcount imbalance on error
	VMCI: check return value of get_user_pages_fast() for errors
	tty: serial: earlycon dependency
	tty: hvcs: Don't NULL tty->driver_data until hvcs_cleanup()
	pty: do tty_flip_buffer_push without port->lock in pty_write
	pwm: lpss: Fix off by one error in base_unit math in pwm_lpss_prepare()
	pwm: lpss: Add range limit check for the base_unit register value
	drivers/virt/fsl_hypervisor: Fix error handling path
	video: fbdev: vga16fb: fix setting of pixclock because a pass-by-value error
	video: fbdev: sis: fix null ptr dereference
	video: fbdev: radeon: Fix memleak in radeonfb_pci_register
	HID: roccat: add bounds checking in kone_sysfs_write_settings()
	pinctrl: mcp23s08: Fix mcp23x17_regmap initialiser
	pinctrl: mcp23s08: Fix mcp23x17 precious range
	net/mlx5: Don't call timecounter cyc2time directly from 1PPS flow
	net: stmmac: use netif_tx_start|stop_all_queues() function
	cpufreq: armada-37xx: Add missing MODULE_DEVICE_TABLE
	net: dsa: rtl8366: Check validity of passed VLANs
	net: dsa: rtl8366: Refactor VLAN/PVID init
	net: dsa: rtl8366: Skip PVID setting if not requested
	net: dsa: rtl8366rb: Support all 4096 VLANs
	ath6kl: wmi: prevent a shift wrapping bug in ath6kl_wmi_delete_pstream_cmd()
	misc: mic: scif: Fix error handling path
	ALSA: seq: oss: Avoid mutex lock for a long-time ioctl
	usb: dwc2: Fix parameter type in function pointer prototype
	quota: clear padding in v2r1_mem2diskdqb()
	slimbus: core: check get_addr before removing laddr ida
	slimbus: core: do not enter to clock pause mode in core
	slimbus: qcom-ngd-ctrl: disable ngd in qmi server down callback
	HID: hid-input: fix stylus battery reporting
	qtnfmac: fix resource leaks on unsupported iftype error return path
	net: enic: Cure the enic api locking trainwreck
	mfd: sm501: Fix leaks in probe()
	iwlwifi: mvm: split a print to avoid a WARNING in ROC
	usb: gadget: f_ncm: fix ncm_bitrate for SuperSpeed and above.
	usb: gadget: u_ether: enable qmult on SuperSpeed Plus as well
	nl80211: fix non-split wiphy information
	usb: dwc2: Fix INTR OUT transfers in DDMA mode.
	scsi: target: tcmu: Fix warning: 'page' may be used uninitialized
	scsi: be2iscsi: Fix a theoretical leak in beiscsi_create_eqs()
	platform/x86: mlx-platform: Remove PSU EEPROM configuration
	mwifiex: fix double free
	ipvs: clear skb->tstamp in forwarding path
	net: korina: fix kfree of rx/tx descriptor array
	netfilter: nf_log: missing vlan offload tag and proto
	mm/memcg: fix device private memcg accounting
	mm, oom_adj: don't loop through tasks in __set_oom_adj when not necessary
	IB/mlx4: Fix starvation in paravirt mux/demux
	IB/mlx4: Adjust delayed work when a dup is observed
	powerpc/pseries: Fix missing of_node_put() in rng_init()
	powerpc/icp-hv: Fix missing of_node_put() in success path
	RDMA/ucma: Fix locking for ctx->events_reported
	RDMA/ucma: Add missing locking around rdma_leave_multicast()
	mtd: lpddr: fix excessive stack usage with clang
	powerpc/pseries: explicitly reschedule during drmem_lmb list traversal
	mtd: mtdoops: Don't write panic data twice
	ARM: 9007/1: l2c: fix prefetch bits init in L2X0_AUX_CTRL using DT values
	arc: plat-hsdk: fix kconfig dependency warning when !RESET_CONTROLLER
	xfs: limit entries returned when counting fsmap records
	xfs: fix high key handling in the rt allocator's query_range function
	RDMA/qedr: Fix use of uninitialized field
	RDMA/qedr: Fix inline size returned for iWARP
	powerpc/tau: Use appropriate temperature sample interval
	powerpc/tau: Convert from timer to workqueue
	powerpc/tau: Remove duplicated set_thresholds() call
	Linux 4.19.153

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I9e85e8ca67ab8e28d04a77339f80fdbf3c568956
This commit is contained in:
Greg Kroah-Hartman 2020-10-29 11:36:20 +01:00
commit b9a942466b
147 changed files with 962 additions and 543 deletions

View file

@ -562,7 +562,7 @@
loops can be debugged more effectively on production loops can be debugged more effectively on production
systems. systems.
clearcpuid=BITNUM [X86] clearcpuid=BITNUM[,BITNUM...] [X86]
Disable CPUID feature X for the kernel. See Disable CPUID feature X for the kernel. See
arch/x86/include/asm/cpufeatures.h for the valid bit arch/x86/include/asm/cpufeatures.h for the valid bit
numbers. Note the Linux specific bits are not necessarily numbers. Note the Linux specific bits are not necessarily

View file

@ -934,12 +934,14 @@ icmp_ratelimit - INTEGER
icmp_msgs_per_sec - INTEGER icmp_msgs_per_sec - INTEGER
Limit maximal number of ICMP packets sent per second from this host. Limit maximal number of ICMP packets sent per second from this host.
Only messages whose type matches icmp_ratemask (see below) are Only messages whose type matches icmp_ratemask (see below) are
controlled by this limit. controlled by this limit. For security reasons, the precise count
of messages per second is randomized.
Default: 1000 Default: 1000
icmp_msgs_burst - INTEGER icmp_msgs_burst - INTEGER
icmp_msgs_per_sec controls number of ICMP packets sent per second, icmp_msgs_per_sec controls number of ICMP packets sent per second,
while icmp_msgs_burst controls the burst size of these packets. while icmp_msgs_burst controls the burst size of these packets.
For security reasons, the precise burst size is randomized.
Default: 50 Default: 50
icmp_ratemask - INTEGER icmp_ratemask - INTEGER

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 152 SUBLEVEL = 153
EXTRAVERSION = EXTRAVERSION =
NAME = "People's Front" NAME = "People's Front"

View file

@ -11,5 +11,6 @@ menuconfig ARC_SOC_HSDK
select ARC_HAS_ACCL_REGS select ARC_HAS_ACCL_REGS
select ARC_IRQ_NO_AUTOSAVE select ARC_IRQ_NO_AUTOSAVE
select CLK_HSDK select CLK_HSDK
select RESET_CONTROLLER
select RESET_HSDK select RESET_HSDK
select MIGHT_HAVE_PCI select MIGHT_HAVE_PCI

View file

@ -1261,20 +1261,28 @@ static void __init l2c310_of_parse(const struct device_node *np,
ret = of_property_read_u32(np, "prefetch-data", &val); ret = of_property_read_u32(np, "prefetch-data", &val);
if (ret == 0) { if (ret == 0) {
if (val) if (val) {
prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH; prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
else *aux_val |= L310_PREFETCH_CTRL_DATA_PREFETCH;
} else {
prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH; prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
*aux_val &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
}
*aux_mask &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
} else if (ret != -EINVAL) { } else if (ret != -EINVAL) {
pr_err("L2C-310 OF prefetch-data property value is missing\n"); pr_err("L2C-310 OF prefetch-data property value is missing\n");
} }
ret = of_property_read_u32(np, "prefetch-instr", &val); ret = of_property_read_u32(np, "prefetch-instr", &val);
if (ret == 0) { if (ret == 0) {
if (val) if (val) {
prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH; prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
else *aux_val |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
} else {
prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH; prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
*aux_val &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
}
*aux_mask &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
} else if (ret != -EINVAL) { } else if (ret != -EINVAL) {
pr_err("L2C-310 OF prefetch-instr property value is missing\n"); pr_err("L2C-310 OF prefetch-instr property value is missing\n");
} }

View file

@ -12,6 +12,8 @@
#ifndef _ASM_POWERPC_LMB_H #ifndef _ASM_POWERPC_LMB_H
#define _ASM_POWERPC_LMB_H #define _ASM_POWERPC_LMB_H
#include <linux/sched.h>
struct drmem_lmb { struct drmem_lmb {
u64 base_addr; u64 base_addr;
u32 drc_index; u32 drc_index;
@ -27,8 +29,22 @@ struct drmem_lmb_info {
extern struct drmem_lmb_info *drmem_info; extern struct drmem_lmb_info *drmem_info;
static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
const struct drmem_lmb *start)
{
/*
* DLPAR code paths can take several milliseconds per element
* when interacting with firmware. Ensure that we don't
* unfairly monopolize the CPU.
*/
if (((++lmb - start) % 16) == 0)
cond_resched();
return lmb;
}
#define for_each_drmem_lmb_in_range(lmb, start, end) \ #define for_each_drmem_lmb_in_range(lmb, start, end) \
for ((lmb) = (start); (lmb) < (end); (lmb)++) for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
#define for_each_drmem_lmb(lmb) \ #define for_each_drmem_lmb(lmb) \
for_each_drmem_lmb_in_range((lmb), \ for_each_drmem_lmb_in_range((lmb), \

View file

@ -788,7 +788,7 @@
#define THRM1_TIN (1 << 31) #define THRM1_TIN (1 << 31)
#define THRM1_TIV (1 << 30) #define THRM1_TIV (1 << 30)
#define THRM1_THRES(x) ((x&0x7f)<<23) #define THRM1_THRES(x) ((x&0x7f)<<23)
#define THRM3_SITV(x) ((x&0x3fff)<<1) #define THRM3_SITV(x) ((x & 0x1fff) << 1)
#define THRM1_TID (1<<2) #define THRM1_TID (1<<2)
#define THRM1_TIE (1<<1) #define THRM1_TIE (1<<1)
#define THRM1_V (1<<0) #define THRM1_V (1<<0)

View file

@ -13,13 +13,14 @@
*/ */
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/param.h> #include <linux/param.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/reg.h> #include <asm/reg.h>
@ -39,8 +40,6 @@ static struct tau_temp
unsigned char grew; unsigned char grew;
} tau[NR_CPUS]; } tau[NR_CPUS];
struct timer_list tau_timer;
#undef DEBUG #undef DEBUG
/* TODO: put these in a /proc interface, with some sanity checks, and maybe /* TODO: put these in a /proc interface, with some sanity checks, and maybe
@ -50,7 +49,7 @@ struct timer_list tau_timer;
#define step_size 2 /* step size when temp goes out of range */ #define step_size 2 /* step size when temp goes out of range */
#define window_expand 1 /* expand the window by this much */ #define window_expand 1 /* expand the window by this much */
/* configurable values for shrinking the window */ /* configurable values for shrinking the window */
#define shrink_timer 2*HZ /* period between shrinking the window */ #define shrink_timer 2000 /* period between shrinking the window */
#define min_window 2 /* minimum window size, degrees C */ #define min_window 2 /* minimum window size, degrees C */
static void set_thresholds(unsigned long cpu) static void set_thresholds(unsigned long cpu)
@ -111,11 +110,6 @@ static void TAUupdate(int cpu)
#ifdef DEBUG #ifdef DEBUG
printk("grew = %d\n", tau[cpu].grew); printk("grew = %d\n", tau[cpu].grew);
#endif #endif
#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
set_thresholds(cpu);
#endif
} }
#ifdef CONFIG_TAU_INT #ifdef CONFIG_TAU_INT
@ -178,27 +172,27 @@ static void tau_timeout(void * info)
* complex sleep code needs to be added. One mtspr every time * complex sleep code needs to be added. One mtspr every time
* tau_timeout is called is probably not a big deal. * tau_timeout is called is probably not a big deal.
* *
* Enable thermal sensor and set up sample interval timer * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet"
* need 20 us to do the compare.. until a nice 'cpu_speed' function * recommends that "the maximum value be set in THRM3 under all
* call is implemented, just assume a 500 mhz clock. It doesn't really * conditions."
* matter if we take too long for a compare since it's all interrupt
* driven anyway.
*
* use a extra long time.. (60 us @ 500 mhz)
*/ */
mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E); mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E);
local_irq_restore(flags); local_irq_restore(flags);
} }
static void tau_timeout_smp(struct timer_list *unused) static struct workqueue_struct *tau_workq;
{
/* schedule ourselves to be run again */ static void tau_work_func(struct work_struct *work)
mod_timer(&tau_timer, jiffies + shrink_timer) ; {
msleep(shrink_timer);
on_each_cpu(tau_timeout, NULL, 0); on_each_cpu(tau_timeout, NULL, 0);
/* schedule ourselves to be run again */
queue_work(tau_workq, work);
} }
DECLARE_WORK(tau_work, tau_work_func);
/* /*
* setup the TAU * setup the TAU
* *
@ -231,21 +225,16 @@ static int __init TAU_init(void)
return 1; return 1;
} }
tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1);
/* first, set up the window shrinking timer */ if (!tau_workq)
timer_setup(&tau_timer, tau_timeout_smp, 0); return -ENOMEM;
tau_timer.expires = jiffies + shrink_timer;
add_timer(&tau_timer);
on_each_cpu(TAU_init_smp, NULL, 0); on_each_cpu(TAU_init_smp, NULL, 0);
printk("Thermal assist unit "); queue_work(tau_workq, &tau_work);
#ifdef CONFIG_TAU_INT
printk("using interrupts, "); pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n",
#else IS_ENABLED(CONFIG_TAU_INT) ? "interrupts" : "workqueue", shrink_timer);
printk("using timers, ");
#endif
printk("shrink_timer: %d jiffies\n", shrink_timer);
tau_initialized = 1; tau_initialized = 1;
return 0; return 0;

View file

@ -40,6 +40,7 @@ static __init int rng_init(void)
ppc_md.get_random_seed = pseries_get_random_long; ppc_md.get_random_seed = pseries_get_random_long;
of_node_put(dn);
return 0; return 0;
} }
machine_subsys_initcall(pseries, rng_init); machine_subsys_initcall(pseries, rng_init);

View file

@ -179,6 +179,7 @@ int icp_hv_init(void)
icp_ops = &icp_hv_ops; icp_ops = &icp_hv_ops;
of_node_put(np);
return 0; return 0;
} }

View file

@ -387,7 +387,7 @@ static __init int _init_events_attrs(void)
while (amd_iommu_v2_event_descs[i].attr.attr.name) while (amd_iommu_v2_event_descs[i].attr.attr.name)
i++; i++;
attrs = kcalloc(i + 1, sizeof(struct attribute **), GFP_KERNEL); attrs = kcalloc(i + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs) if (!attrs)
return -ENOMEM; return -ENOMEM;

View file

@ -249,9 +249,9 @@ static void __init fpu__init_system_ctx_switch(void)
*/ */
static void __init fpu__init_parse_early_param(void) static void __init fpu__init_parse_early_param(void)
{ {
char arg[32]; char arg[128];
char *argptr = arg; char *argptr = arg;
int bit; int arglen, res, bit;
if (cmdline_find_option_bool(boot_command_line, "no387")) if (cmdline_find_option_bool(boot_command_line, "no387"))
setup_clear_cpu_cap(X86_FEATURE_FPU); setup_clear_cpu_cap(X86_FEATURE_FPU);
@ -271,12 +271,26 @@ static void __init fpu__init_parse_early_param(void)
if (cmdline_find_option_bool(boot_command_line, "noxsaves")) if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
setup_clear_cpu_cap(X86_FEATURE_XSAVES); setup_clear_cpu_cap(X86_FEATURE_XSAVES);
if (cmdline_find_option(boot_command_line, "clearcpuid", arg, arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
sizeof(arg)) && if (arglen <= 0)
get_option(&argptr, &bit) && return;
bit >= 0 &&
bit < NCAPINTS * 32) pr_info("Clearing CPUID bits:");
setup_clear_cpu_cap(bit); do {
res = get_option(&argptr, &bit);
if (res == 0 || res == 3)
break;
/* If the argument was too long, the last bit may be cut off */
if (res == 1 && arglen >= sizeof(arg))
break;
if (bit >= 0 && bit < NCAPINTS * 32) {
pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
setup_clear_cpu_cap(bit);
}
} while (res == 2);
pr_cont("\n");
} }
/* /*

View file

@ -104,7 +104,6 @@ fs_initcall(nmi_warning_debugfs);
static void nmi_check_duration(struct nmiaction *action, u64 duration) static void nmi_check_duration(struct nmiaction *action, u64 duration)
{ {
u64 whole_msecs = READ_ONCE(action->max_duration);
int remainder_ns, decimal_msecs; int remainder_ns, decimal_msecs;
if (duration < nmi_longest_ns || duration < action->max_duration) if (duration < nmi_longest_ns || duration < action->max_duration)
@ -112,12 +111,12 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
action->max_duration = duration; action->max_duration = duration;
remainder_ns = do_div(whole_msecs, (1000 * 1000)); remainder_ns = do_div(duration, (1000 * 1000));
decimal_msecs = remainder_ns / 1000; decimal_msecs = remainder_ns / 1000;
printk_ratelimited(KERN_INFO printk_ratelimited(KERN_INFO
"INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n", "INFO: NMI handler (%ps) took too long to run: %lld.%03d msecs\n",
action->handler, whole_msecs, decimal_msecs); action->handler, duration, decimal_msecs);
} }
static int nmi_handle(unsigned int type, struct pt_regs *regs) static int nmi_handle(unsigned int type, struct pt_regs *regs)

View file

@ -6225,6 +6225,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
cond_resched_lock(&kvm->mmu_lock); cond_resched_lock(&kvm->mmu_lock);
} }
} }
kvm_mmu_commit_zap_page(kvm, &invalid_list);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, rcu_idx); srcu_read_unlock(&kvm->srcu, rcu_idx);

View file

@ -5380,6 +5380,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
* - Tell IOMMU to use legacy mode for this interrupt. * - Tell IOMMU to use legacy mode for this interrupt.
* - Retrieve ga_tag of prior interrupt remapping data. * - Retrieve ga_tag of prior interrupt remapping data.
*/ */
pi.prev_ga_tag = 0;
pi.is_guest_mode = false; pi.is_guest_mode = false;
ret = irq_set_vcpu_affinity(host_irq, &pi); ret = irq_set_vcpu_affinity(host_irq, &pi);

View file

@ -82,7 +82,7 @@ static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm,
SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm); SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
skcipher_request_set_tfm(skreq, null_tfm); skcipher_request_set_tfm(skreq, null_tfm);
skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG, skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL); NULL, NULL);
skcipher_request_set_crypt(skreq, src, dst, len, NULL); skcipher_request_set_crypt(skreq, src, dst, len, NULL);
@ -295,19 +295,20 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
areq->outlen = outlen; areq->outlen = outlen;
aead_request_set_callback(&areq->cra_u.aead_req, aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG, CRYPTO_TFM_REQ_MAY_SLEEP,
af_alg_async_cb, areq); af_alg_async_cb, areq);
err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) : err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req); crypto_aead_decrypt(&areq->cra_u.aead_req);
/* AIO operation in progress */ /* AIO operation in progress */
if (err == -EINPROGRESS || err == -EBUSY) if (err == -EINPROGRESS)
return -EIOCBQUEUED; return -EIOCBQUEUED;
sock_put(sk); sock_put(sk);
} else { } else {
/* Synchronous operation */ /* Synchronous operation */
aead_request_set_callback(&areq->cra_u.aead_req, aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait); crypto_req_done, &ctx->wait);
err = crypto_wait_req(ctx->enc ? err = crypto_wait_req(ctx->enc ?

View file

@ -127,7 +127,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
/* AIO operation in progress */ /* AIO operation in progress */
if (err == -EINPROGRESS || err == -EBUSY) if (err == -EINPROGRESS)
return -EIOCBQUEUED; return -EIOCBQUEUED;
sock_put(sk); sock_put(sk);

View file

@ -545,6 +545,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
clear_bit(HCI_UART_PROTO_READY, &hu->flags); clear_bit(HCI_UART_PROTO_READY, &hu->flags);
percpu_up_write(&hu->proto_lock); percpu_up_write(&hu->proto_lock);
cancel_work_sync(&hu->init_ready);
cancel_work_sync(&hu->write_work); cancel_work_sync(&hu->write_work);
if (hdev) { if (hdev) {

View file

@ -369,6 +369,8 @@ void hci_uart_unregister_device(struct hci_uart *hu)
struct hci_dev *hdev = hu->hdev; struct hci_dev *hdev = hu->hdev;
clear_bit(HCI_UART_PROTO_READY, &hu->flags); clear_bit(HCI_UART_PROTO_READY, &hu->flags);
cancel_work_sync(&hu->init_ready);
if (test_bit(HCI_UART_REGISTERED, &hu->flags)) if (test_bit(HCI_UART_REGISTERED, &hu->flags))
hci_unregister_dev(hdev); hci_unregister_dev(hdev);
hci_free_dev(hdev); hci_free_dev(hdev);

View file

@ -486,6 +486,12 @@ static int __init armada37xx_cpufreq_driver_init(void)
/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */ /* late_initcall, to guarantee the driver is loaded after A37xx clock driver */
late_initcall(armada37xx_cpufreq_driver_init); late_initcall(armada37xx_cpufreq_driver_init);
static const struct of_device_id __maybe_unused armada37xx_cpufreq_of_match[] = {
{ .compatible = "marvell,armada-3700-nb-pm" },
{ },
};
MODULE_DEVICE_TABLE(of, armada37xx_cpufreq_of_match);
MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>"); MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
MODULE_DESCRIPTION("Armada 37xx cpufreq driver"); MODULE_DESCRIPTION("Armada 37xx cpufreq driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");

View file

@ -1057,6 +1057,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
ndev = n->dev; ndev = n->dev;
if (!ndev) if (!ndev)
goto free_dst; goto free_dst;
if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
port_id = cxgb4_port_idx(ndev); port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev); csk = chtls_sock_create(cdev);

View file

@ -914,9 +914,9 @@ static int tls_header_read(struct tls_hdr *thdr, struct iov_iter *from)
return (__force int)cpu_to_be16(thdr->length); return (__force int)cpu_to_be16(thdr->length);
} }
static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk) static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
{ {
return (cdev->max_host_sndbuf - sk->sk_wmem_queued); return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0);
} }
static int csk_wait_memory(struct chtls_dev *cdev, static int csk_wait_memory(struct chtls_dev *cdev,
@ -1217,6 +1217,7 @@ int chtls_sendpage(struct sock *sk, struct page *page,
copied = 0; copied = 0;
csk = rcu_dereference_sk_user_data(sk); csk = rcu_dereference_sk_user_data(sk);
cdev = csk->cdev; cdev = csk->cdev;
lock_sock(sk);
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
err = sk_stream_wait_connect(sk, &timeo); err = sk_stream_wait_connect(sk, &timeo);

View file

@ -530,7 +530,7 @@ static void release_ixp_crypto(struct device *dev)
if (crypt_virt) { if (crypt_virt) {
dma_free_coherent(dev, dma_free_coherent(dev,
NPE_QLEN_TOTAL * sizeof( struct crypt_ctl), NPE_QLEN * sizeof(struct crypt_ctl),
crypt_virt, crypt_phys); crypt_virt, crypt_phys);
} }
} }

View file

@ -446,7 +446,7 @@ static void mtk_desc_dma_free(struct mtk_cryp *cryp)
static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
{ {
struct mtk_ring **ring = cryp->ring; struct mtk_ring **ring = cryp->ring;
int i, err = ENOMEM; int i;
for (i = 0; i < MTK_RING_MAX; i++) { for (i = 0; i < MTK_RING_MAX; i++) {
ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL); ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
@ -473,14 +473,14 @@ static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
return 0; return 0;
err_cleanup: err_cleanup:
for (; i--; ) { do {
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->res_base, ring[i]->res_dma); ring[i]->res_base, ring[i]->res_dma);
dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
ring[i]->cmd_base, ring[i]->cmd_dma); ring[i]->cmd_base, ring[i]->cmd_dma);
kfree(ring[i]); kfree(ring[i]);
} } while (i--);
return err; return -ENOMEM;
} }
static int mtk_crypto_probe(struct platform_device *pdev) static int mtk_crypto_probe(struct platform_device *pdev)

View file

@ -456,6 +456,9 @@ static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
u32 val, mask; u32 val, mask;
if (likely(ctx->digcnt))
omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
/* /*
* Setting ALGO_CONST only for the first iteration and * Setting ALGO_CONST only for the first iteration and
* CLOSE_HASH only for the last one. Note that flags mode bits * CLOSE_HASH only for the last one. Note that flags mode bits

View file

@ -1701,11 +1701,6 @@ static int spacc_probe(struct platform_device *pdev)
goto err_clk_put; goto err_clk_put;
} }
ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
if (ret)
goto err_clk_disable;
/* /*
* Use an IRQ threshold of 50% as a default. This seems to be a * Use an IRQ threshold of 50% as a default. This seems to be a
* reasonable trade off of latency against throughput but can be * reasonable trade off of latency against throughput but can be
@ -1713,6 +1708,10 @@ static int spacc_probe(struct platform_device *pdev)
*/ */
engine->stat_irq_thresh = (engine->fifo_sz / 2); engine->stat_irq_thresh = (engine->fifo_sz / 2);
ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
if (ret)
goto err_clk_disable;
/* /*
* Configure the interrupts. We only use the STAT_CNT interrupt as we * Configure the interrupts. We only use the STAT_CNT interrupt as we
* only submit a new packet for processing when we complete another in * only submit a new packet for processing when we complete another in

View file

@ -1072,16 +1072,15 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
PCI_DEVICE_ID_INTEL_5100_19, 0); PCI_DEVICE_ID_INTEL_5100_19, 0);
if (!einj) { if (!einj) {
ret = -ENODEV; ret = -ENODEV;
goto bail_einj; goto bail_mc_free;
} }
rc = pci_enable_device(einj); rc = pci_enable_device(einj);
if (rc < 0) { if (rc < 0) {
ret = rc; ret = rc;
goto bail_disable_einj; goto bail_einj;
} }
mci->pdev = &pdev->dev; mci->pdev = &pdev->dev;
priv = mci->pvt_info; priv = mci->pvt_info;
@ -1147,14 +1146,14 @@ static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
bail_scrub: bail_scrub:
priv->scrub_enable = 0; priv->scrub_enable = 0;
cancel_delayed_work_sync(&(priv->i5100_scrubbing)); cancel_delayed_work_sync(&(priv->i5100_scrubbing));
edac_mc_free(mci);
bail_disable_einj:
pci_disable_device(einj); pci_disable_device(einj);
bail_einj: bail_einj:
pci_dev_put(einj); pci_dev_put(einj);
bail_mc_free:
edac_mc_free(mci);
bail_disable_ch1: bail_disable_ch1:
pci_disable_device(ch1mm); pci_disable_device(ch1mm);

View file

@ -278,7 +278,8 @@ static int ti_edac_probe(struct platform_device *pdev)
/* add EMIF ECC error handler */ /* add EMIF ECC error handler */
error_irq = platform_get_irq(pdev, 0); error_irq = platform_get_irq(pdev, 0);
if (!error_irq) { if (error_irq < 0) {
ret = error_irq;
edac_printk(KERN_ERR, EDAC_MOD_NAME, edac_printk(KERN_ERR, EDAC_MOD_NAME,
"EMIF irq number not defined.\n"); "EMIF irq number not defined.\n");
goto err; goto err;

View file

@ -2119,7 +2119,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
intel_dp->dpcd, intel_dp->dpcd,
sizeof(intel_dp->dpcd)); sizeof(intel_dp->dpcd));
cdv_intel_edp_panel_vdd_off(gma_encoder); cdv_intel_edp_panel_vdd_off(gma_encoder);
if (ret == 0) { if (ret <= 0) {
/* if this fails, presume the device is a ghost */ /* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n"); DRM_INFO("failed to retrieve link info, disabling eDP\n");
cdv_intel_dp_encoder_destroy(encoder); cdv_intel_dp_encoder_destroy(encoder);

View file

@ -796,7 +796,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x3b: /* Battery Strength */ case 0x3b: /* Battery Strength */
hidinput_setup_battery(device, HID_INPUT_REPORT, field); hidinput_setup_battery(device, HID_INPUT_REPORT, field);
usage->type = EV_PWR; usage->type = EV_PWR;
goto ignore; return;
case 0x3c: /* Invert */ case 0x3c: /* Invert */
map_key_clear(BTN_TOOL_RUBBER); map_key_clear(BTN_TOOL_RUBBER);
@ -1052,7 +1052,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case HID_DC_BATTERYSTRENGTH: case HID_DC_BATTERYSTRENGTH:
hidinput_setup_battery(device, HID_INPUT_REPORT, field); hidinput_setup_battery(device, HID_INPUT_REPORT, field);
usage->type = EV_PWR; usage->type = EV_PWR;
goto ignore; return;
} }
goto unknown; goto unknown;

View file

@ -297,31 +297,40 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev)); struct kone_device *kone = hid_get_drvdata(dev_get_drvdata(dev));
struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
int retval = 0, difference, old_profile; int retval = 0, difference, old_profile;
struct kone_settings *settings = (struct kone_settings *)buf;
/* I need to get my data in one piece */ /* I need to get my data in one piece */
if (off != 0 || count != sizeof(struct kone_settings)) if (off != 0 || count != sizeof(struct kone_settings))
return -EINVAL; return -EINVAL;
mutex_lock(&kone->kone_lock); mutex_lock(&kone->kone_lock);
difference = memcmp(buf, &kone->settings, sizeof(struct kone_settings)); difference = memcmp(settings, &kone->settings,
sizeof(struct kone_settings));
if (difference) { if (difference) {
retval = kone_set_settings(usb_dev, if (settings->startup_profile < 1 ||
(struct kone_settings const *)buf); settings->startup_profile > 5) {
if (retval) { retval = -EINVAL;
mutex_unlock(&kone->kone_lock); goto unlock;
return retval;
} }
retval = kone_set_settings(usb_dev, settings);
if (retval)
goto unlock;
old_profile = kone->settings.startup_profile; old_profile = kone->settings.startup_profile;
memcpy(&kone->settings, buf, sizeof(struct kone_settings)); memcpy(&kone->settings, settings, sizeof(struct kone_settings));
kone_profile_activated(kone, kone->settings.startup_profile); kone_profile_activated(kone, kone->settings.startup_profile);
if (kone->settings.startup_profile != old_profile) if (kone->settings.startup_profile != old_profile)
kone_profile_report(kone, kone->settings.startup_profile); kone_profile_report(kone, kone->settings.startup_profile);
} }
unlock:
mutex_unlock(&kone->kone_lock); mutex_unlock(&kone->kone_lock);
if (retval)
return retval;
return sizeof(struct kone_settings); return sizeof(struct kone_settings);
} }
static BIN_ATTR(settings, 0660, kone_sysfs_read_settings, static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,

View file

@ -400,7 +400,6 @@ static struct pmbus_driver_info max34440_info[] = {
.func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data, .read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data, .write_word_data = max34440_write_word_data,
}, },
@ -431,7 +430,6 @@ static struct pmbus_driver_info max34440_info[] = {
.func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[15] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[16] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[17] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data, .read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data, .write_word_data = max34440_write_word_data,
}, },
@ -467,7 +465,6 @@ static struct pmbus_driver_info max34440_info[] = {
.func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[19] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[20] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, .func[21] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
.read_byte_data = max34440_read_byte_data,
.read_word_data = max34440_read_word_data, .read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data, .write_word_data = max34440_write_word_data,
}, },

View file

@ -588,6 +588,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
list_move_tail(&uevent->list, &list); list_move_tail(&uevent->list, &list);
} }
list_del(&ctx->list); list_del(&ctx->list);
events_reported = ctx->events_reported;
mutex_unlock(&ctx->file->mut); mutex_unlock(&ctx->file->mut);
list_for_each_entry_safe(uevent, tmp, &list, list) { list_for_each_entry_safe(uevent, tmp, &list, list) {
@ -597,7 +598,6 @@ static int ucma_free_ctx(struct ucma_context *ctx)
kfree(uevent); kfree(uevent);
} }
events_reported = ctx->events_reported;
mutex_destroy(&ctx->mutex); mutex_destroy(&ctx->mutex);
kfree(ctx); kfree(ctx);
return events_reported; return events_reported;
@ -1476,7 +1476,9 @@ static ssize_t ucma_process_join(struct ucma_file *file,
return 0; return 0;
err3: err3:
mutex_lock(&ctx->mutex);
rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
mutex_unlock(&ctx->mutex);
ucma_cleanup_mc_events(mc); ucma_cleanup_mc_events(mc);
err2: err2:
mutex_lock(&mut); mutex_lock(&mut);
@ -1644,7 +1646,9 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
cur_file = ctx->file; cur_file = ctx->file;
if (cur_file == new_file) { if (cur_file == new_file) {
mutex_lock(&cur_file->mut);
resp.events_reported = ctx->events_reported; resp.events_reported = ctx->events_reported;
mutex_unlock(&cur_file->mut);
goto response; goto response;
} }

View file

@ -307,6 +307,9 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
if (!sriov->is_going_down) { if (!sriov->is_going_down) {
id->scheduled_delete = 1; id->scheduled_delete = 1;
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT); schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
} else if (id->scheduled_delete) {
/* Adjust timeout if already scheduled */
mod_delayed_work(system_wq, &id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
} }
spin_unlock_irqrestore(&sriov->going_down_lock, flags); spin_unlock_irqrestore(&sriov->going_down_lock, flags);
spin_unlock(&sriov->id_map_lock); spin_unlock(&sriov->id_map_lock);

View file

@ -1305,6 +1305,18 @@ static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags); spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
} }
static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
{
unsigned long flags;
struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
queue_work(ctx->wi_wq, &ctx->work);
spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
}
static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx, static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
struct mlx4_ib_demux_pv_qp *tun_qp, struct mlx4_ib_demux_pv_qp *tun_qp,
int index) int index)
@ -2000,7 +2012,8 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
cq_size *= 2; cq_size *= 2;
cq_attr.cqe = cq_size; cq_attr.cqe = cq_size;
ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, ctx->cq = ib_create_cq(ctx->ib_dev,
create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
NULL, ctx, &cq_attr); NULL, ctx, &cq_attr);
if (IS_ERR(ctx->cq)) { if (IS_ERR(ctx->cq)) {
ret = PTR_ERR(ctx->cq); ret = PTR_ERR(ctx->cq);
@ -2037,6 +2050,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
if (ret) { if (ret) {
@ -2180,7 +2194,7 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
goto err_mcg; goto err_mcg;
} }
snprintf(name, sizeof name, "mlx4_ibt%d", port); snprintf(name, sizeof(name), "mlx4_ibt%d", port);
ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->wq) { if (!ctx->wq) {
pr_err("Failed to create tunnelling WQ for port %d\n", port); pr_err("Failed to create tunnelling WQ for port %d\n", port);
@ -2188,7 +2202,15 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
goto err_wq; goto err_wq;
} }
snprintf(name, sizeof name, "mlx4_ibud%d", port); snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->wi_wq) {
pr_err("Failed to create wire WQ for port %d\n", port);
ret = -ENOMEM;
goto err_wiwq;
}
snprintf(name, sizeof(name), "mlx4_ibud%d", port);
ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
if (!ctx->ud_wq) { if (!ctx->ud_wq) {
pr_err("Failed to create up/down WQ for port %d\n", port); pr_err("Failed to create up/down WQ for port %d\n", port);
@ -2199,6 +2221,10 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
return 0; return 0;
err_udwq: err_udwq:
destroy_workqueue(ctx->wi_wq);
ctx->wi_wq = NULL;
err_wiwq:
destroy_workqueue(ctx->wq); destroy_workqueue(ctx->wq);
ctx->wq = NULL; ctx->wq = NULL;
@ -2246,12 +2272,14 @@ static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
} }
flush_workqueue(ctx->wq); flush_workqueue(ctx->wq);
flush_workqueue(ctx->wi_wq);
for (i = 0; i < dev->dev->caps.sqp_demux; i++) { for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
free_pv_object(dev, i, ctx->port); free_pv_object(dev, i, ctx->port);
} }
kfree(ctx->tun); kfree(ctx->tun);
destroy_workqueue(ctx->ud_wq); destroy_workqueue(ctx->ud_wq);
destroy_workqueue(ctx->wi_wq);
destroy_workqueue(ctx->wq); destroy_workqueue(ctx->wq);
} }
} }

View file

@ -464,6 +464,7 @@ struct mlx4_ib_demux_pv_ctx {
struct ib_pd *pd; struct ib_pd *pd;
struct work_struct work; struct work_struct work;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct workqueue_struct *wi_wq;
struct mlx4_ib_demux_pv_qp qp[2]; struct mlx4_ib_demux_pv_qp qp[2];
}; };
@ -471,6 +472,7 @@ struct mlx4_ib_demux_ctx {
struct ib_device *ib_dev; struct ib_device *ib_dev;
int port; int port;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct workqueue_struct *wi_wq;
struct workqueue_struct *ud_wq; struct workqueue_struct *ud_wq;
spinlock_t ud_lock; spinlock_t ud_lock;
atomic64_t subnet_prefix; atomic64_t subnet_prefix;

View file

@ -604,7 +604,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx); qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
/* Part 2 - check capabilities */ /* Part 2 - check capabilities */
page_size = ~dev->attr.page_size_caps + 1; page_size = ~qed_attr->page_size_caps + 1;
if (page_size > PAGE_SIZE) { if (page_size > PAGE_SIZE) {
DP_ERR(dev, DP_ERR(dev,
"Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n", "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",

View file

@ -2522,7 +2522,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_wr = qp->rq.max_wr; qp_attr->cap.max_recv_wr = qp->rq.max_wr;
qp_attr->cap.max_send_sge = qp->sq.max_sges; qp_attr->cap.max_send_sge = qp->sq.max_sges;
qp_attr->cap.max_recv_sge = qp->rq.max_sges; qp_attr->cap.max_recv_sge = qp->rq.max_sges;
qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE; qp_attr->cap.max_inline_data = dev->attr.max_inline;
qp_init_attr->cap = qp_attr->cap; qp_init_attr->cap = qp_attr->cap;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;

View file

@ -768,7 +768,8 @@ static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies); ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
if (ret) { if (ret) {
info->set_power(&client->dev, 0); if (info->set_power)
info->set_power(&client->dev, 0);
return ret; return ret;
} }

View file

@ -919,8 +919,8 @@ static const struct cec_adap_ops tc358743_cec_adap_ops = {
.adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable, .adap_monitor_all_enable = tc358743_cec_adap_monitor_all_enable,
}; };
static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus, static void tc358743_cec_handler(struct v4l2_subdev *sd, u16 intstatus,
bool *handled) bool *handled)
{ {
struct tc358743_state *state = to_state(sd); struct tc358743_state *state = to_state(sd);
unsigned int cec_rxint, cec_txint; unsigned int cec_rxint, cec_txint;
@ -953,7 +953,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
cec_transmit_attempt_done(state->cec_adap, cec_transmit_attempt_done(state->cec_adap,
CEC_TX_STATUS_ERROR); CEC_TX_STATUS_ERROR);
} }
*handled = true; if (handled)
*handled = true;
} }
if ((intstatus & MASK_CEC_RINT) && if ((intstatus & MASK_CEC_RINT) &&
(cec_rxint & MASK_CECRIEND)) { (cec_rxint & MASK_CECRIEND)) {
@ -968,7 +969,8 @@ static void tc358743_cec_isr(struct v4l2_subdev *sd, u16 intstatus,
msg.msg[i] = v & 0xff; msg.msg[i] = v & 0xff;
} }
cec_received_msg(state->cec_adap, &msg); cec_received_msg(state->cec_adap, &msg);
*handled = true; if (handled)
*handled = true;
} }
i2c_wr16(sd, INTSTATUS, i2c_wr16(sd, INTSTATUS,
intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)); intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
@ -1432,7 +1434,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
#ifdef CONFIG_VIDEO_TC358743_CEC #ifdef CONFIG_VIDEO_TC358743_CEC
if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) { if (intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)) {
tc358743_cec_isr(sd, intstatus, handled); tc358743_cec_handler(sd, intstatus, handled);
i2c_wr16(sd, INTSTATUS, i2c_wr16(sd, INTSTATUS,
intstatus & (MASK_CEC_RINT | MASK_CEC_TINT)); intstatus & (MASK_CEC_RINT | MASK_CEC_TINT));
intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT); intstatus &= ~(MASK_CEC_RINT | MASK_CEC_TINT);
@ -1461,7 +1463,7 @@ static int tc358743_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
static irqreturn_t tc358743_irq_handler(int irq, void *dev_id) static irqreturn_t tc358743_irq_handler(int irq, void *dev_id)
{ {
struct tc358743_state *state = dev_id; struct tc358743_state *state = dev_id;
bool handled; bool handled = false;
tc358743_isr(&state->sd, 0, &handled); tc358743_isr(&state->sd, 0, &handled);

View file

@ -1257,11 +1257,9 @@ static int fimc_md_get_pinctrl(struct fimc_md *fmd)
if (IS_ERR(pctl->state_default)) if (IS_ERR(pctl->state_default))
return PTR_ERR(pctl->state_default); return PTR_ERR(pctl->state_default);
/* PINCTRL_STATE_IDLE is optional */
pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl, pctl->state_idle = pinctrl_lookup_state(pctl->pinctrl,
PINCTRL_STATE_IDLE); PINCTRL_STATE_IDLE);
if (IS_ERR(pctl->state_idle))
return PTR_ERR(pctl->state_idle);
return 0; return 0;
} }

View file

@ -929,8 +929,11 @@ static int emmaprp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcdev); platform_set_drvdata(pdev, pcdev);
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) if (irq < 0) {
return irq; ret = irq;
goto rel_vdev;
}
ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0, ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0,
dev_name(&pdev->dev), pcdev); dev_name(&pdev->dev), pcdev);
if (ret) if (ret)

View file

@ -2265,8 +2265,10 @@ static int isp_probe(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, i); mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
isp->mmio_base[map_idx] = isp->mmio_base[map_idx] =
devm_ioremap_resource(isp->dev, mem); devm_ioremap_resource(isp->dev, mem);
if (IS_ERR(isp->mmio_base[map_idx])) if (IS_ERR(isp->mmio_base[map_idx])) {
return PTR_ERR(isp->mmio_base[map_idx]); ret = PTR_ERR(isp->mmio_base[map_idx]);
goto error;
}
} }
ret = isp_get_clocks(isp); ret = isp_get_clocks(isp);

View file

@ -176,8 +176,10 @@ static int csiphy_set_power(struct v4l2_subdev *sd, int on)
int ret; int ret;
ret = pm_runtime_get_sync(dev); ret = pm_runtime_get_sync(dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_sync(dev);
return ret; return ret;
}
ret = csiphy_set_clock_rates(csiphy); ret = csiphy_set_clock_rates(csiphy);
if (ret < 0) { if (ret < 0) {

View file

@ -103,8 +103,10 @@ int rcar_fcp_enable(struct rcar_fcp_device *fcp)
return 0; return 0;
ret = pm_runtime_get_sync(fcp->dev); ret = pm_runtime_get_sync(fcp->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_noidle(fcp->dev);
return ret; return ret;
}
return 0; return 0;
} }

View file

@ -1323,8 +1323,10 @@ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel)
int ret; int ret;
ret = pm_runtime_get_sync(vin->dev); ret = pm_runtime_get_sync(vin->dev);
if (ret < 0) if (ret < 0) {
pm_runtime_put_noidle(vin->dev);
return ret; return ret;
}
/* Make register writes take effect immediately. */ /* Make register writes take effect immediately. */
vnmc = rvin_read(vin, VNMC_REG); vnmc = rvin_read(vin, VNMC_REG);

View file

@ -89,6 +89,7 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
ret = pm_runtime_get_sync(rga->dev); ret = pm_runtime_get_sync(rga->dev);
if (ret < 0) { if (ret < 0) {
pm_runtime_put_noidle(rga->dev);
rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED); rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
return ret; return ret;
} }

View file

@ -83,8 +83,10 @@ int s5p_mfc_power_on(void)
int i, ret = 0; int i, ret = 0;
ret = pm_runtime_get_sync(pm->device); ret = pm_runtime_get_sync(pm->device);
if (ret < 0) if (ret < 0) {
pm_runtime_put_noidle(pm->device);
return ret; return ret;
}
/* clock control */ /* clock control */
for (i = 0; i < pm->num_clocks; i++) { for (i = 0; i < pm->num_clocks; i++) {

View file

@ -587,7 +587,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
if (ret < 0) { if (ret < 0) {
dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n", dev_err(dcmi->dev, "%s: Failed to start streaming, cannot get sync (%d)\n",
__func__, ret); __func__, ret);
goto err_release_buffers; goto err_pm_put;
} }
/* Enable stream on the sub device */ /* Enable stream on the sub device */
@ -682,8 +682,6 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
err_pm_put: err_pm_put:
pm_runtime_put(dcmi->dev); pm_runtime_put(dcmi->dev);
err_release_buffers:
spin_lock_irq(&dcmi->irqlock); spin_lock_irq(&dcmi->irqlock);
/* /*
* Return all buffers to vb2 in QUEUED state. * Return all buffers to vb2 in QUEUED state.

View file

@ -2451,6 +2451,8 @@ static int vpe_runtime_get(struct platform_device *pdev)
r = pm_runtime_get_sync(&pdev->dev); r = pm_runtime_get_sync(&pdev->dev);
WARN_ON(r < 0); WARN_ON(r < 0);
if (r)
pm_runtime_put_noidle(&pdev->dev);
return r < 0 ? r : 0; return r < 0 ? r : 0;
} }

View file

@ -499,7 +499,7 @@ static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer)
case TUNER_TENA_9533_DI: case TUNER_TENA_9533_DI:
case TUNER_YMEC_TVF_5533MF: case TUNER_YMEC_TVF_5533MF:
tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n"); tuner_dbg("This tuner doesn't have FM. Most cards have a TEA5767 for FM\n");
return 0; return -EINVAL;
case TUNER_PHILIPS_FM1216ME_MK3: case TUNER_PHILIPS_FM1216ME_MK3:
case TUNER_PHILIPS_FM1236_MK3: case TUNER_PHILIPS_FM1236_MK3:
case TUNER_PHILIPS_FMD1216ME_MK3: case TUNER_PHILIPS_FMD1216ME_MK3:
@ -701,7 +701,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe,
TUNER_RATIO_SELECT_50; /* 50 kHz step */ TUNER_RATIO_SELECT_50; /* 50 kHz step */
/* Bandswitch byte */ /* Bandswitch byte */
simple_radio_bandswitch(fe, &buffer[0]); if (simple_radio_bandswitch(fe, &buffer[0]))
return 0;
/* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps /* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps
freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) = freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) =

View file

@ -778,12 +778,16 @@ static s32 uvc_get_le_value(struct uvc_control_mapping *mapping,
offset &= 7; offset &= 7;
mask = ((1LL << bits) - 1) << offset; mask = ((1LL << bits) - 1) << offset;
for (; bits > 0; data++) { while (1) {
u8 byte = *data & mask; u8 byte = *data & mask;
value |= offset > 0 ? (byte >> offset) : (byte << (-offset)); value |= offset > 0 ? (byte >> offset) : (byte << (-offset));
bits -= 8 - (offset > 0 ? offset : 0); bits -= 8 - (offset > 0 ? offset : 0);
if (bits <= 0)
break;
offset -= 8; offset -= 8;
mask = (1 << bits) - 1; mask = (1 << bits) - 1;
data++;
} }
/* Sign-extend the value if needed. */ /* Sign-extend the value if needed. */

View file

@ -78,10 +78,45 @@ static int uvc_mc_init_entity(struct uvc_video_chain *chain,
int ret; int ret;
if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) { if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
u32 function;
v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops); v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
strlcpy(entity->subdev.name, entity->name, strlcpy(entity->subdev.name, entity->name,
sizeof(entity->subdev.name)); sizeof(entity->subdev.name));
switch (UVC_ENTITY_TYPE(entity)) {
case UVC_VC_SELECTOR_UNIT:
function = MEDIA_ENT_F_VID_MUX;
break;
case UVC_VC_PROCESSING_UNIT:
case UVC_VC_EXTENSION_UNIT:
/* For lack of a better option. */
function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
break;
case UVC_COMPOSITE_CONNECTOR:
case UVC_COMPONENT_CONNECTOR:
function = MEDIA_ENT_F_CONN_COMPOSITE;
break;
case UVC_SVIDEO_CONNECTOR:
function = MEDIA_ENT_F_CONN_SVIDEO;
break;
case UVC_ITT_CAMERA:
function = MEDIA_ENT_F_CAM_SENSOR;
break;
case UVC_TT_VENDOR_SPECIFIC:
case UVC_ITT_VENDOR_SPECIFIC:
case UVC_ITT_MEDIA_TRANSPORT_INPUT:
case UVC_OTT_VENDOR_SPECIFIC:
case UVC_OTT_DISPLAY:
case UVC_OTT_MEDIA_TRANSPORT_OUTPUT:
case UVC_EXTERNAL_VENDOR_SPECIFIC:
default:
function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
break;
}
entity->subdev.entity.function = function;
ret = media_entity_pads_init(&entity->subdev.entity, ret = media_entity_pads_init(&entity->subdev.entity,
entity->num_pads, entity->pads); entity->num_pads, entity->pads);

View file

@ -1429,8 +1429,14 @@ static int sm501_plat_probe(struct platform_device *dev)
goto err_claim; goto err_claim;
} }
return sm501_init_dev(sm); ret = sm501_init_dev(sm);
if (ret)
goto err_unmap;
return 0;
err_unmap:
iounmap(sm->regs);
err_claim: err_claim:
release_resource(sm->regs_claim); release_resource(sm->regs_claim);
kfree(sm->regs_claim); kfree(sm->regs_claim);

View file

@ -1403,6 +1403,8 @@ int __scif_pin_pages(void *addr, size_t len, int *out_prot,
NULL); NULL);
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
if (nr_pages != pinned_pages->nr_pages) { if (nr_pages != pinned_pages->nr_pages) {
if (pinned_pages->nr_pages < 0)
pinned_pages->nr_pages = 0;
if (try_upgrade) { if (try_upgrade) {
if (ulimit) if (ulimit)
__scif_dec_pinned_vm_lock(mm, __scif_dec_pinned_vm_lock(mm,
@ -1423,7 +1425,6 @@ int __scif_pin_pages(void *addr, size_t len, int *out_prot,
if (pinned_pages->nr_pages < nr_pages) { if (pinned_pages->nr_pages < nr_pages) {
err = -EFAULT; err = -EFAULT;
pinned_pages->nr_pages = nr_pages;
goto dec_pinned; goto dec_pinned;
} }
@ -1436,7 +1437,6 @@ int __scif_pin_pages(void *addr, size_t len, int *out_prot,
__scif_dec_pinned_vm_lock(mm, nr_pages, 0); __scif_dec_pinned_vm_lock(mm, nr_pages, 0);
/* Something went wrong! Rollback */ /* Something went wrong! Rollback */
error_unmap: error_unmap:
pinned_pages->nr_pages = nr_pages;
scif_destroy_pinned_pages(pinned_pages); scif_destroy_pinned_pages(pinned_pages);
*pages = NULL; *pages = NULL;
dev_dbg(scif_info.mdev.this_device, dev_dbg(scif_info.mdev.this_device,

View file

@ -671,8 +671,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
if (retval < (int)produce_q->kernel_if->num_pages) { if (retval < (int)produce_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(produce) failed (retval=%d)", pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
retval); retval);
qp_release_pages(produce_q->kernel_if->u.h.header_page, if (retval > 0)
retval, false); qp_release_pages(produce_q->kernel_if->u.h.header_page,
retval, false);
err = VMCI_ERROR_NO_MEM; err = VMCI_ERROR_NO_MEM;
goto out; goto out;
} }
@ -683,8 +684,9 @@ static int qp_host_get_user_memory(u64 produce_uva,
if (retval < (int)consume_q->kernel_if->num_pages) { if (retval < (int)consume_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(consume) failed (retval=%d)", pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
retval); retval);
qp_release_pages(consume_q->kernel_if->u.h.header_page, if (retval > 0)
retval, false); qp_release_pages(consume_q->kernel_if->u.h.header_page,
retval, false);
qp_release_pages(produce_q->kernel_if->u.h.header_page, qp_release_pages(produce_q->kernel_if->u.h.header_page,
produce_q->kernel_if->num_pages, false); produce_q->kernel_if->num_pages, false);
err = VMCI_ERROR_NO_MEM; err = VMCI_ERROR_NO_MEM;

View file

@ -402,6 +402,17 @@ static int lpddr2_nvm_lock(struct mtd_info *mtd, loff_t start_add,
return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK); return lpddr2_nvm_do_block_op(mtd, start_add, len, LPDDR2_NVM_LOCK);
} }
static const struct mtd_info lpddr2_nvm_mtd_info = {
.type = MTD_RAM,
.writesize = 1,
.flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
._read = lpddr2_nvm_read,
._write = lpddr2_nvm_write,
._erase = lpddr2_nvm_erase,
._unlock = lpddr2_nvm_unlock,
._lock = lpddr2_nvm_lock,
};
/* /*
* lpddr2_nvm driver probe method * lpddr2_nvm driver probe method
*/ */
@ -442,6 +453,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
.pfow_base = OW_BASE_ADDRESS, .pfow_base = OW_BASE_ADDRESS,
.fldrv_priv = pcm_data, .fldrv_priv = pcm_data,
}; };
if (IS_ERR(map->virt)) if (IS_ERR(map->virt))
return PTR_ERR(map->virt); return PTR_ERR(map->virt);
@ -453,22 +465,13 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
return PTR_ERR(pcm_data->ctl_regs); return PTR_ERR(pcm_data->ctl_regs);
/* Populate mtd_info data structure */ /* Populate mtd_info data structure */
*mtd = (struct mtd_info) { *mtd = lpddr2_nvm_mtd_info;
.dev = { .parent = &pdev->dev }, mtd->dev.parent = &pdev->dev;
.name = pdev->dev.init_name, mtd->name = pdev->dev.init_name;
.type = MTD_RAM, mtd->priv = map;
.priv = map, mtd->size = resource_size(add_range);
.size = resource_size(add_range), mtd->erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width;
.erasesize = ERASE_BLOCKSIZE * pcm_data->bus_width, mtd->writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width;
.writesize = 1,
.writebufsize = WRITE_BUFFSIZE * pcm_data->bus_width,
.flags = (MTD_CAP_NVRAM | MTD_POWERUP_LOCK),
._read = lpddr2_nvm_read,
._write = lpddr2_nvm_write,
._erase = lpddr2_nvm_erase,
._unlock = lpddr2_nvm_unlock,
._lock = lpddr2_nvm_lock,
};
/* Verify the presence of the device looking for PFOW string */ /* Verify the presence of the device looking for PFOW string */
if (!lpddr2_nvm_pfow_present(map)) { if (!lpddr2_nvm_pfow_present(map)) {

View file

@ -293,12 +293,13 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE, kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
record_size - MTDOOPS_HEADER_SIZE, NULL); record_size - MTDOOPS_HEADER_SIZE, NULL);
/* Panics must be written immediately */ if (reason != KMSG_DUMP_OOPS) {
if (reason != KMSG_DUMP_OOPS) /* Panics must be written immediately */
mtdoops_write(cxt, 1); mtdoops_write(cxt, 1);
} else {
/* For other cases, schedule work to write it "nicely" */ /* For other cases, schedule work to write it "nicely" */
schedule_work(&cxt->work_write); schedule_work(&cxt->work_write);
}
} }
static void mtdoops_notify_add(struct mtd_info *mtd) static void mtdoops_notify_add(struct mtd_info *mtd)

View file

@ -25,6 +25,9 @@ struct rtl8366_mib_counter {
const char *name; const char *name;
}; };
/**
* struct rtl8366_vlan_mc - Virtual LAN member configuration
*/
struct rtl8366_vlan_mc { struct rtl8366_vlan_mc {
u16 vid; u16 vid;
u16 untag; u16 untag;
@ -119,7 +122,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi);
int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used); int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
u32 untag, u32 fid); u32 untag, u32 fid);
int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
unsigned int vid); unsigned int vid);
int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable); int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);

View file

@ -36,13 +36,114 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
} }
EXPORT_SYMBOL_GPL(rtl8366_mc_is_used); EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, /**
u32 untag, u32 fid) * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
* @smi: the Realtek SMI device instance
* @vid: the VLAN ID to look up or allocate
* @vlanmc: the pointer will be assigned to a pointer to a valid member config
* if successful
* @return: index of a new member config or negative error number
*/
static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
struct rtl8366_vlan_mc *vlanmc)
{ {
struct rtl8366_vlan_4k vlan4k; struct rtl8366_vlan_4k vlan4k;
int ret; int ret;
int i; int i;
/* Try to find an existing member config entry for this VID */
for (i = 0; i < smi->num_vlan_mc; i++) {
ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
if (ret) {
dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
if (vid == vlanmc->vid)
return i;
}
/* We have no MC entry for this VID, try to find an empty one */
for (i = 0; i < smi->num_vlan_mc; i++) {
ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
if (ret) {
dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
if (vlanmc->vid == 0 && vlanmc->member == 0) {
/* Update the entry from the 4K table */
ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
if (ret) {
dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
vlanmc->vid = vid;
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
if (ret) {
dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
i, vid);
return i;
}
}
/* MC table is full, try to find an unused entry and replace it */
for (i = 0; i < smi->num_vlan_mc; i++) {
int used;
ret = rtl8366_mc_is_used(smi, i, &used);
if (ret)
return ret;
if (!used) {
/* Update the entry from the 4K table */
ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
if (ret)
return ret;
vlanmc->vid = vid;
vlanmc->member = vlan4k.member;
vlanmc->untag = vlan4k.untag;
vlanmc->fid = vlan4k.fid;
ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
if (ret) {
dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
i, vid);
return ret;
}
dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
i, vid);
return i;
}
}
dev_err(smi->dev, "all VLAN member configurations are in use\n");
return -ENOSPC;
}
int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
u32 untag, u32 fid)
{
struct rtl8366_vlan_mc vlanmc;
struct rtl8366_vlan_4k vlan4k;
int mc;
int ret;
if (!smi->ops->is_vlan_valid(smi, vid))
return -EINVAL;
dev_dbg(smi->dev, dev_dbg(smi->dev,
"setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, member, untag); vid, member, untag);
@ -63,133 +164,58 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
"resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, vlan4k.member, vlan4k.untag); vid, vlan4k.member, vlan4k.untag);
/* Try to find an existing MC entry for this VID */ /* Find or allocate a member config for this VID */
for (i = 0; i < smi->num_vlan_mc; i++) { ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
struct rtl8366_vlan_mc vlanmc; if (ret < 0)
return ret;
mc = ret;
ret = smi->ops->get_vlan_mc(smi, i, &vlanmc); /* Update the MC entry */
if (ret) vlanmc.member |= member;
return ret; vlanmc.untag |= untag;
vlanmc.fid = fid;
if (vid == vlanmc.vid) { /* Commit updates to the MC entry */
/* update the MC entry */ ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
vlanmc.member |= member; if (ret)
vlanmc.untag |= untag; dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
vlanmc.fid = fid; mc, vid);
else
ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); dev_dbg(smi->dev,
"resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
dev_dbg(smi->dev, vid, vlanmc.member, vlanmc.untag);
"resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
vid, vlanmc.member, vlanmc.untag);
break;
}
}
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(rtl8366_set_vlan); EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
{
struct rtl8366_vlan_mc vlanmc;
int ret;
int index;
ret = smi->ops->get_mc_index(smi, port, &index);
if (ret)
return ret;
ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
if (ret)
return ret;
*val = vlanmc.vid;
return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port, int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
unsigned int vid) unsigned int vid)
{ {
struct rtl8366_vlan_mc vlanmc; struct rtl8366_vlan_mc vlanmc;
struct rtl8366_vlan_4k vlan4k; int mc;
int ret; int ret;
int i;
/* Try to find an existing MC entry for this VID */ if (!smi->ops->is_vlan_valid(smi, vid))
for (i = 0; i < smi->num_vlan_mc; i++) { return -EINVAL;
ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
if (ret)
return ret;
if (vid == vlanmc.vid) { /* Find or allocate a member config for this VID */
ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
if (ret) if (ret < 0)
return ret; return ret;
mc = ret;
ret = smi->ops->set_mc_index(smi, port, i); ret = smi->ops->set_mc_index(smi, port, mc);
return ret; if (ret) {
} dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
mc, port);
return ret;
} }
/* We have no MC entry for this VID, try to find an empty one */ dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
for (i = 0; i < smi->num_vlan_mc; i++) { port, vid, mc);
ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
if (ret)
return ret;
if (vlanmc.vid == 0 && vlanmc.member == 0) { return 0;
/* Update the entry from the 4K table */
ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
if (ret)
return ret;
vlanmc.vid = vid;
vlanmc.member = vlan4k.member;
vlanmc.untag = vlan4k.untag;
vlanmc.fid = vlan4k.fid;
ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
if (ret)
return ret;
ret = smi->ops->set_mc_index(smi, port, i);
return ret;
}
}
/* MC table is full, try to find an unused entry and replace it */
for (i = 0; i < smi->num_vlan_mc; i++) {
int used;
ret = rtl8366_mc_is_used(smi, i, &used);
if (ret)
return ret;
if (!used) {
/* Update the entry from the 4K table */
ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
if (ret)
return ret;
vlanmc.vid = vid;
vlanmc.member = vlan4k.member;
vlanmc.untag = vlan4k.untag;
vlanmc.fid = vlan4k.fid;
ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
if (ret)
return ret;
ret = smi->ops->set_mc_index(smi, port, i);
return ret;
}
}
dev_err(smi->dev,
"all VLAN member configurations are in use\n");
return -ENOSPC;
} }
EXPORT_SYMBOL_GPL(rtl8366_set_pvid); EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
@ -389,7 +415,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
if (!smi->ops->is_vlan_valid(smi, vid)) if (!smi->ops->is_vlan_valid(smi, vid))
return; return;
dev_info(smi->dev, "add VLAN on port %d, %s, %s\n", dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
vlan->vid_begin,
port, port,
untagged ? "untagged" : "tagged", untagged ? "untagged" : "tagged",
pvid ? " PVID" : "no PVID"); pvid ? " PVID" : "no PVID");
@ -398,34 +425,29 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
dev_err(smi->dev, "port is DSA or CPU port\n"); dev_err(smi->dev, "port is DSA or CPU port\n");
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
int pvid_val = 0;
dev_info(smi->dev, "add VLAN %04x\n", vid);
member |= BIT(port); member |= BIT(port);
if (untagged) if (untagged)
untag |= BIT(port); untag |= BIT(port);
/* To ensure that we have a valid MC entry for this VLAN,
* initialize the port VLAN ID here.
*/
ret = rtl8366_get_pvid(smi, port, &pvid_val);
if (ret < 0) {
dev_err(smi->dev, "could not lookup PVID for port %d\n",
port);
return;
}
if (pvid_val == 0) {
ret = rtl8366_set_pvid(smi, port, vid);
if (ret < 0)
return;
}
ret = rtl8366_set_vlan(smi, vid, member, untag, 0); ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
if (ret) if (ret)
dev_err(smi->dev, dev_err(smi->dev,
"failed to set up VLAN %04x", "failed to set up VLAN %04x",
vid); vid);
if (!pvid)
continue;
ret = rtl8366_set_pvid(smi, port, vid);
if (ret)
dev_err(smi->dev,
"failed to set PVID on port %d to VLAN %04x",
port, vid);
if (!ret)
dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
vid, port);
} }
} }
EXPORT_SYMBOL_GPL(rtl8366_vlan_add); EXPORT_SYMBOL_GPL(rtl8366_vlan_add);

View file

@ -1270,7 +1270,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
if (smi->vlan4k_enabled) if (smi->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1; max = RTL8366RB_NUM_VIDS - 1;
if (vlan == 0 || vlan >= max) if (vlan == 0 || vlan > max)
return false; return false;
return true; return true;

View file

@ -171,6 +171,7 @@ struct enic {
u16 num_vfs; u16 num_vfs;
#endif #endif
spinlock_t enic_api_lock; spinlock_t enic_api_lock;
bool enic_api_busy;
struct enic_port_profile *pp; struct enic_port_profile *pp;
/* work queue cache line section */ /* work queue cache line section */

View file

@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
struct vnic_dev *vdev = enic->vdev; struct vnic_dev *vdev = enic->vdev;
spin_lock(&enic->enic_api_lock); spin_lock(&enic->enic_api_lock);
while (enic->enic_api_busy) {
spin_unlock(&enic->enic_api_lock);
cpu_relax();
spin_lock(&enic->enic_api_lock);
}
spin_lock_bh(&enic->devcmd_lock); spin_lock_bh(&enic->devcmd_lock);
vnic_dev_cmd_proxy_by_index_start(vdev, vf); vnic_dev_cmd_proxy_by_index_start(vdev, vf);

View file

@ -2142,8 +2142,6 @@ static int enic_dev_wait(struct vnic_dev *vdev,
int done; int done;
int err; int err;
BUG_ON(in_interrupt());
err = start(vdev, arg); err = start(vdev, arg);
if (err) if (err)
return err; return err;
@ -2331,6 +2329,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
rss_hash_bits, rss_base_cpu, rss_enable); rss_hash_bits, rss_base_cpu, rss_enable);
} }
static void enic_set_api_busy(struct enic *enic, bool busy)
{
spin_lock(&enic->enic_api_lock);
enic->enic_api_busy = busy;
spin_unlock(&enic->enic_api_lock);
}
static void enic_reset(struct work_struct *work) static void enic_reset(struct work_struct *work)
{ {
struct enic *enic = container_of(work, struct enic, reset); struct enic *enic = container_of(work, struct enic, reset);
@ -2340,7 +2345,9 @@ static void enic_reset(struct work_struct *work)
rtnl_lock(); rtnl_lock();
spin_lock(&enic->enic_api_lock); /* Stop any activity from infiniband */
enic_set_api_busy(enic, true);
enic_stop(enic->netdev); enic_stop(enic->netdev);
enic_dev_soft_reset(enic); enic_dev_soft_reset(enic);
enic_reset_addr_lists(enic); enic_reset_addr_lists(enic);
@ -2348,7 +2355,10 @@ static void enic_reset(struct work_struct *work)
enic_set_rss_nic_cfg(enic); enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic); enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev); enic_open(enic->netdev);
spin_unlock(&enic->enic_api_lock);
/* Allow infiniband to fiddle with the device again */
enic_set_api_busy(enic, false);
call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock(); rtnl_unlock();
@ -2360,7 +2370,9 @@ static void enic_tx_hang_reset(struct work_struct *work)
rtnl_lock(); rtnl_lock();
spin_lock(&enic->enic_api_lock); /* Stop any activity from infiniband */
enic_set_api_busy(enic, true);
enic_dev_hang_notify(enic); enic_dev_hang_notify(enic);
enic_stop(enic->netdev); enic_stop(enic->netdev);
enic_dev_hang_reset(enic); enic_dev_hang_reset(enic);
@ -2369,7 +2381,10 @@ static void enic_tx_hang_reset(struct work_struct *work)
enic_set_rss_nic_cfg(enic); enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic); enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev); enic_open(enic->netdev);
spin_unlock(&enic->enic_api_lock);
/* Allow infiniband to fiddle with the device again */
enic_set_api_busy(enic, false);
call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev); call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock(); rtnl_unlock();

View file

@ -1897,6 +1897,27 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return ret; return ret;
} }
static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct phy_device *phy_dev = ndev->phydev;
if (phy_dev) {
phy_reset_after_clk_enable(phy_dev);
} else if (fep->phy_node) {
/*
* If the PHY still is not bound to the MAC, but there is
* OF PHY node and a matching PHY device instance already,
* use the OF PHY node to obtain the PHY device instance,
* and then use that PHY device instance when triggering
* the PHY reset.
*/
phy_dev = of_phy_find_device(fep->phy_node);
phy_reset_after_clk_enable(phy_dev);
put_device(&phy_dev->mdio.dev);
}
}
static int fec_enet_clk_enable(struct net_device *ndev, bool enable) static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
{ {
struct fec_enet_private *fep = netdev_priv(ndev); struct fec_enet_private *fep = netdev_priv(ndev);
@ -1923,7 +1944,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
if (ret) if (ret)
goto failed_clk_ref; goto failed_clk_ref;
phy_reset_after_clk_enable(ndev->phydev); fec_enet_phy_reset_after_clk_enable(ndev);
} else { } else {
clk_disable_unprepare(fep->clk_enet_out); clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) { if (fep->clk_ptp) {
@ -2929,16 +2950,16 @@ fec_enet_open(struct net_device *ndev)
/* Init MAC prior to mii bus probe */ /* Init MAC prior to mii bus probe */
fec_restart(ndev); fec_restart(ndev);
/* Probe and connect to PHY when open the interface */
ret = fec_enet_mii_probe(ndev);
if (ret)
goto err_enet_mii_probe;
/* Call phy_reset_after_clk_enable() again if it failed during /* Call phy_reset_after_clk_enable() again if it failed during
* phy_reset_after_clk_enable() before because the PHY wasn't probed. * phy_reset_after_clk_enable() before because the PHY wasn't probed.
*/ */
if (reset_again) if (reset_again)
phy_reset_after_clk_enable(ndev->phydev); fec_enet_phy_reset_after_clk_enable(ndev);
/* Probe and connect to PHY when open the interface */
ret = fec_enet_mii_probe(ndev);
if (ret)
goto err_enet_mii_probe;
if (fep->quirks & FEC_QUIRK_ERR006687) if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used(); imx6q_cpuidle_fec_irqs_used();

View file

@ -1330,6 +1330,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
int offset = ibmveth_rxq_frame_offset(adapter); int offset = ibmveth_rxq_frame_offset(adapter);
int csum_good = ibmveth_rxq_csum_good(adapter); int csum_good = ibmveth_rxq_csum_good(adapter);
int lrg_pkt = ibmveth_rxq_large_packet(adapter); int lrg_pkt = ibmveth_rxq_large_packet(adapter);
__sum16 iph_check = 0;
skb = ibmveth_rxq_get_buffer(adapter); skb = ibmveth_rxq_get_buffer(adapter);
@ -1366,16 +1367,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
skb_put(skb, length); skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
/* PHYP without PLSO support places a -1 in the ip
* checksum for large send frames.
*/
if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
struct iphdr *iph = (struct iphdr *)skb->data;
iph_check = iph->check;
}
if ((length > netdev->mtu + ETH_HLEN) ||
lrg_pkt || iph_check == 0xffff) {
ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
adapter->rx_large_packets++;
}
if (csum_good) { if (csum_good) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
ibmveth_rx_csum_helper(skb, adapter); ibmveth_rx_csum_helper(skb, adapter);
} }
if (length > netdev->mtu + ETH_HLEN) {
ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
adapter->rx_large_packets++;
}
napi_gro_receive(napi, skb); /* send it up */ napi_gro_receive(napi, skb); /* send it up */
netdev->stats.rx_packets++; netdev->stats.rx_packets++;

View file

@ -1113,7 +1113,7 @@ static int korina_probe(struct platform_device *pdev)
return rc; return rc;
probe_err_register: probe_err_register:
kfree(lp->td_ring); kfree(KSEG0ADDR(lp->td_ring));
probe_err_td_ring: probe_err_td_ring:
iounmap(lp->tx_dma_regs); iounmap(lp->tx_dma_regs);
probe_err_dma_tx: probe_err_dma_tx:
@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev)
iounmap(lp->eth_regs); iounmap(lp->eth_regs);
iounmap(lp->rx_dma_regs); iounmap(lp->rx_dma_regs);
iounmap(lp->tx_dma_regs); iounmap(lp->tx_dma_regs);
kfree(KSEG0ADDR(lp->td_ring));
unregister_netdev(bif->dev); unregister_netdev(bif->dev);
free_netdev(bif->dev); free_netdev(bif->dev);

View file

@ -945,6 +945,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
bool clean_complete = true; bool clean_complete = true;
int done; int done;
if (!budget)
return 0;
if (priv->tx_ring_num[TX_XDP]) { if (priv->tx_ring_num[TX_XDP]) {
xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring]; xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
if (xdp_tx_cq->xdp_busy) { if (xdp_tx_cq->xdp_busy) {

View file

@ -343,7 +343,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
.dma = tx_info->map0_dma, .dma = tx_info->map0_dma,
}; };
if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
dma_unmap_page(priv->ddev, tx_info->map0_dma, dma_unmap_page(priv->ddev, tx_info->map0_dma,
PAGE_SIZE, priv->dma_dir); PAGE_SIZE, priv->dma_dir);
put_page(tx_info->page); put_page(tx_info->page);

View file

@ -475,8 +475,9 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
switch (clock->ptp_info.pin_config[pin].func) { switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS: case PTP_PF_EXTTS:
ptp_event.index = pin; ptp_event.index = pin;
ptp_event.timestamp = timecounter_cyc2time(&clock->tc, ptp_event.timestamp =
be64_to_cpu(eqe->data.pps.time_stamp)); mlx5_timecounter_cyc2time(clock,
be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) { if (clock->pps_info.enabled) {
ptp_event.type = PTP_CLOCK_PPSUSR; ptp_event.type = PTP_CLOCK_PPSUSR;
ptp_event.pps_times.ts_real = ptp_event.pps_times.ts_real =

View file

@ -4111,6 +4111,27 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
rtl_unlock_work(tp); rtl_unlock_work(tp);
} }
static void rtl_init_rxcfg(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
break;
}
}
static int rtl_set_mac_address(struct net_device *dev, void *p) static int rtl_set_mac_address(struct net_device *dev, void *p)
{ {
struct rtl8169_private *tp = netdev_priv(dev); struct rtl8169_private *tp = netdev_priv(dev);
@ -4128,6 +4149,10 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
pm_runtime_put_noidle(d); pm_runtime_put_noidle(d);
/* Reportedly at least Asus X453MA truncates packets otherwise */
if (tp->mac_version == RTL_GIGA_MAC_VER_37)
rtl_init_rxcfg(tp);
return 0; return 0;
} }
@ -4289,27 +4314,6 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
} }
} }
static void rtl_init_rxcfg(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
case RTL_GIGA_MAC_VER_10 ... RTL_GIGA_MAC_VER_17:
RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
break;
}
}
static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
{ {
tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
@ -6826,7 +6830,7 @@ static int rtl8169_close(struct net_device *dev)
phy_disconnect(dev->phydev); phy_disconnect(dev->phydev);
pci_free_irq(pdev, 0, tp); free_irq(pci_irq_vector(pdev, 0), tp);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr); tp->RxPhyAddr);
@ -6881,8 +6885,8 @@ static int rtl_open(struct net_device *dev)
rtl_request_firmware(tp); rtl_request_firmware(tp);
retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, tp, retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
dev->name); IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp);
if (retval < 0) if (retval < 0)
goto err_release_fw_2; goto err_release_fw_2;
@ -6915,7 +6919,7 @@ static int rtl_open(struct net_device *dev)
return retval; return retval;
err_free_irq: err_free_irq:
pci_free_irq(pdev, 0, tp); free_irq(pci_irq_vector(pdev, 0), tp);
err_release_fw_2: err_release_fw_2:
rtl_release_firmware(tp); rtl_release_firmware(tp);
rtl8169_rx_clear(tp); rtl8169_rx_clear(tp);

View file

@ -177,32 +177,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
} }
} }
/**
* stmmac_stop_all_queues - Stop all queues
* @priv: driver private structure
*/
static void stmmac_stop_all_queues(struct stmmac_priv *priv)
{
u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
u32 queue;
for (queue = 0; queue < tx_queues_cnt; queue++)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
/**
* stmmac_start_all_queues - Start all queues
* @priv: driver private structure
*/
static void stmmac_start_all_queues(struct stmmac_priv *priv)
{
u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
u32 queue;
for (queue = 0; queue < tx_queues_cnt; queue++)
netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
}
static void stmmac_service_event_schedule(struct stmmac_priv *priv) static void stmmac_service_event_schedule(struct stmmac_priv *priv)
{ {
if (!test_bit(STMMAC_DOWN, &priv->state) && if (!test_bit(STMMAC_DOWN, &priv->state) &&
@ -2678,7 +2652,7 @@ static int stmmac_open(struct net_device *dev)
} }
stmmac_enable_all_queues(priv); stmmac_enable_all_queues(priv);
stmmac_start_all_queues(priv); netif_tx_start_all_queues(priv->dev);
return 0; return 0;
@ -2724,8 +2698,6 @@ static int stmmac_release(struct net_device *dev)
phy_disconnect(dev->phydev); phy_disconnect(dev->phydev);
} }
stmmac_stop_all_queues(priv);
stmmac_disable_all_queues(priv); stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
@ -4519,7 +4491,6 @@ int stmmac_suspend(struct device *dev)
mutex_lock(&priv->lock); mutex_lock(&priv->lock);
netif_device_detach(ndev); netif_device_detach(ndev);
stmmac_stop_all_queues(priv);
stmmac_disable_all_queues(priv); stmmac_disable_all_queues(priv);
@ -4628,8 +4599,6 @@ int stmmac_resume(struct device *dev)
stmmac_enable_all_queues(priv); stmmac_enable_all_queues(priv);
stmmac_start_all_queues(priv);
mutex_unlock(&priv->lock); mutex_unlock(&priv->lock);
if (ndev->phydev) if (ndev->phydev)

View file

@ -1312,6 +1312,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/ {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
/* 4. Gobi 1000 devices */ /* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */

View file

@ -49,7 +49,15 @@ static struct hdlc_proto *first_proto;
static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev, static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *p, struct net_device *orig_dev) struct packet_type *p, struct net_device *orig_dev)
{ {
struct hdlc_device *hdlc = dev_to_hdlc(dev); struct hdlc_device *hdlc;
/* First make sure "dev" is an HDLC device */
if (!(dev->priv_flags & IFF_WAN_HDLC)) {
kfree_skb(skb);
return NET_RX_SUCCESS;
}
hdlc = dev_to_hdlc(dev);
if (!net_eq(dev_net(dev), &init_net)) { if (!net_eq(dev_net(dev), &init_net)) {
kfree_skb(skb); kfree_skb(skb);

View file

@ -102,6 +102,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
old_qlen = dev->tx_queue_len; old_qlen = dev->tx_queue_len;
ether_setup(dev); ether_setup(dev);
dev->tx_queue_len = old_qlen; dev->tx_queue_len = old_qlen;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
eth_hw_addr_random(dev); eth_hw_addr_random(dev);
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_off(dev); netif_dormant_off(dev);

View file

@ -1453,7 +1453,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries); ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
if (ret) { if (ret) {
dma_free_coherent(ar->dev, dma_free_coherent(ar->dev,
(nentries * sizeof(struct ce_desc_64) + (nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space_unaligned, src_ring->base_addr_owner_space_unaligned,
base_addr); base_addr);

View file

@ -6862,7 +6862,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
struct ieee80211_channel *channel) struct ieee80211_channel *channel)
{ {
int ret; int ret;
enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
lockdep_assert_held(&ar->conf_mutex); lockdep_assert_held(&ar->conf_mutex);

View file

@ -429,6 +429,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid); ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
if (aid < 1 || aid > AP_MAX_NUM_STA)
return;
if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) { if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
struct ieee80211_mgmt *mgmt = struct ieee80211_mgmt *mgmt =
(struct ieee80211_mgmt *) assoc_info; (struct ieee80211_mgmt *) assoc_info;

View file

@ -2648,6 +2648,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
return -EINVAL; return -EINVAL;
} }
if (tsid >= 16) {
ath6kl_err("invalid tsid: %d\n", tsid);
return -EINVAL;
}
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd)); skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
if (!skb) if (!skb)
return -ENOMEM; return -ENOMEM;

View file

@ -342,6 +342,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
if (skb) { if (skb) {
htc_hdr = (struct htc_frame_hdr *) skb->data; htc_hdr = (struct htc_frame_hdr *) skb->data;
if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint))
goto ret;
endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id]; endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
skb_pull(skb, sizeof(struct htc_frame_hdr)); skb_pull(skb, sizeof(struct htc_frame_hdr));

View file

@ -163,7 +163,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = {
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
.mcs = { .mcs = {
.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
.rx_highest = cpu_to_le16(72), .rx_highest = cpu_to_le16(150),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED, .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
} }
} }

View file

@ -438,7 +438,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
ret = brcmf_proto_hdrpull(drvr, true, skb, ifp); ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
if (ret || !(*ifp) || !(*ifp)->ndev) { if (ret || !(*ifp) || !(*ifp)->ndev) {
if (ret != -ENODATA && *ifp) if (ret != -ENODATA && *ifp && (*ifp)->ndev)
(*ifp)->ndev->stats.rx_errors++; (*ifp)->ndev->stats.rx_errors++;
brcmu_pkt_buf_free_skb(skb); brcmu_pkt_buf_free_skb(skb);
return -ENODATA; return -ENODATA;

View file

@ -3424,9 +3424,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
aux_roc_req.apply_time_max_delay = cpu_to_le32(delay); aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
IWL_DEBUG_TE(mvm, IWL_DEBUG_TE(mvm,
"ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", "ROC: Requesting to remain on channel %u for %ums\n",
channel->hw_value, req_dur, duration, delay, channel->hw_value, req_dur);
dtim_interval); IWL_DEBUG_TE(mvm,
"\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
duration, delay, dtim_interval);
/* Set the node address */ /* Set the node address */
memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);

View file

@ -1895,7 +1895,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
chan, CFG80211_BSS_FTYPE_UNKNOWN, chan, CFG80211_BSS_FTYPE_UNKNOWN,
bssid, timestamp, bssid, timestamp,
cap_info_bitmap, beacon_period, cap_info_bitmap, beacon_period,
ie_buf, ie_len, rssi, GFP_KERNEL); ie_buf, ie_len, rssi, GFP_ATOMIC);
if (bss) { if (bss) {
bss_priv = (struct mwifiex_bss_priv *)bss->priv; bss_priv = (struct mwifiex_bss_priv *)bss->priv;
bss_priv->band = band; bss_priv->band = band;

View file

@ -1985,6 +1985,8 @@ static int mwifiex_alloc_sdio_mpa_buffers(struct mwifiex_adapter *adapter,
kfree(card->mpa_rx.buf); kfree(card->mpa_rx.buf);
card->mpa_tx.buf_size = 0; card->mpa_tx.buf_size = 0;
card->mpa_rx.buf_size = 0; card->mpa_rx.buf_size = 0;
card->mpa_tx.buf = NULL;
card->mpa_rx.buf = NULL;
} }
return ret; return ret;

View file

@ -894,6 +894,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
default: default:
pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid, pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid,
vif->vifid, vif->wdev.iftype); vif->vifid, vif->wdev.iftype);
dev_kfree_skb(cmd_skb);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
@ -2212,6 +2213,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
break; break;
default: default:
pr_err("unsupported iftype %d\n", vif->wdev.iftype); pr_err("unsupported iftype %d\n", vif->wdev.iftype);
dev_kfree_skb(cmd_skb);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }

View file

@ -1474,17 +1474,6 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
} }
#if defined(CONFIG_ACPI) #if defined(CONFIG_ACPI)
static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data)
{
struct resource *res = data;
if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32)
acpi_dev_resource_memory(ares, res);
/* Always tell the ACPI core to skip this resource */
return 1;
}
static struct static struct
xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
struct acpi_device *adev, u32 type) struct acpi_device *adev, u32 type)
@ -1496,6 +1485,7 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
struct hw_pmu_info *inf; struct hw_pmu_info *inf;
void __iomem *dev_csr; void __iomem *dev_csr;
struct resource res; struct resource res;
struct resource_entry *rentry;
int enable_bit; int enable_bit;
int rc; int rc;
@ -1504,11 +1494,23 @@ xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu,
return NULL; return NULL;
INIT_LIST_HEAD(&resource_list); INIT_LIST_HEAD(&resource_list);
rc = acpi_dev_get_resources(adev, &resource_list, rc = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
acpi_pmu_dev_add_resource, &res); if (rc <= 0) {
dev_err(dev, "PMU type %d: No resources found\n", type);
return NULL;
}
list_for_each_entry(rentry, &resource_list, node) {
if (resource_type(rentry->res) == IORESOURCE_MEM) {
res = *rentry->res;
rentry = NULL;
break;
}
}
acpi_dev_free_resource_list(&resource_list); acpi_dev_free_resource_list(&resource_list);
if (rc < 0) {
dev_err(dev, "PMU type %d: No resource address found\n", type); if (rentry) {
dev_err(dev, "PMU type %d: No memory resource found\n", type);
return NULL; return NULL;
} }

View file

@ -21,6 +21,7 @@ config PINCTRL_BCM2835
select PINMUX select PINMUX
select PINCONF select PINCONF
select GENERIC_PINCONF select GENERIC_PINCONF
select GPIOLIB
select GPIOLIB_IRQCHIP select GPIOLIB_IRQCHIP
config PINCTRL_IPROC_GPIO config PINCTRL_IPROC_GPIO

View file

@ -120,7 +120,7 @@ static const struct regmap_config mcp23x08_regmap = {
.max_register = MCP_OLAT, .max_register = MCP_OLAT,
}; };
static const struct reg_default mcp23x16_defaults[] = { static const struct reg_default mcp23x17_defaults[] = {
{.reg = MCP_IODIR << 1, .def = 0xffff}, {.reg = MCP_IODIR << 1, .def = 0xffff},
{.reg = MCP_IPOL << 1, .def = 0x0000}, {.reg = MCP_IPOL << 1, .def = 0x0000},
{.reg = MCP_GPINTEN << 1, .def = 0x0000}, {.reg = MCP_GPINTEN << 1, .def = 0x0000},
@ -131,23 +131,23 @@ static const struct reg_default mcp23x16_defaults[] = {
{.reg = MCP_OLAT << 1, .def = 0x0000}, {.reg = MCP_OLAT << 1, .def = 0x0000},
}; };
static const struct regmap_range mcp23x16_volatile_range = { static const struct regmap_range mcp23x17_volatile_range = {
.range_min = MCP_INTF << 1, .range_min = MCP_INTF << 1,
.range_max = MCP_GPIO << 1, .range_max = MCP_GPIO << 1,
}; };
static const struct regmap_access_table mcp23x16_volatile_table = { static const struct regmap_access_table mcp23x17_volatile_table = {
.yes_ranges = &mcp23x16_volatile_range, .yes_ranges = &mcp23x17_volatile_range,
.n_yes_ranges = 1, .n_yes_ranges = 1,
}; };
static const struct regmap_range mcp23x16_precious_range = { static const struct regmap_range mcp23x17_precious_range = {
.range_min = MCP_GPIO << 1, .range_min = MCP_INTCAP << 1,
.range_max = MCP_GPIO << 1, .range_max = MCP_GPIO << 1,
}; };
static const struct regmap_access_table mcp23x16_precious_table = { static const struct regmap_access_table mcp23x17_precious_table = {
.yes_ranges = &mcp23x16_precious_range, .yes_ranges = &mcp23x17_precious_range,
.n_yes_ranges = 1, .n_yes_ranges = 1,
}; };
@ -157,10 +157,10 @@ static const struct regmap_config mcp23x17_regmap = {
.reg_stride = 2, .reg_stride = 2,
.max_register = MCP_OLAT << 1, .max_register = MCP_OLAT << 1,
.volatile_table = &mcp23x16_volatile_table, .volatile_table = &mcp23x17_volatile_table,
.precious_table = &mcp23x16_precious_table, .precious_table = &mcp23x17_precious_table,
.reg_defaults = mcp23x16_defaults, .reg_defaults = mcp23x17_defaults,
.num_reg_defaults = ARRAY_SIZE(mcp23x16_defaults), .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
.cache_type = REGCACHE_FLAT, .cache_type = REGCACHE_FLAT,
.val_format_endian = REGMAP_ENDIAN_LITTLE, .val_format_endian = REGMAP_ENDIAN_LITTLE,
}; };

View file

@ -221,15 +221,6 @@ static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
}, },
}; };
static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = {
{
I2C_BOARD_INFO("24c32", 0x51),
},
{
I2C_BOARD_INFO("24c32", 0x50),
},
};
static struct i2c_board_info mlxplat_mlxcpld_pwr[] = { static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
{ {
I2C_BOARD_INFO("dps460", 0x59), I2C_BOARD_INFO("dps460", 0x59),
@ -589,15 +580,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = {
.label = "psu1", .label = "psu1",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(0), .mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0], .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
}, },
{ {
.label = "psu2", .label = "psu2",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(1), .mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1], .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
.hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
}, },
}; };

View file

@ -105,10 +105,12 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
* The equation is: * The equation is:
* base_unit = round(base_unit_range * freq / c) * base_unit = round(base_unit_range * freq / c)
*/ */
base_unit_range = BIT(lpwm->info->base_unit_bits) - 1; base_unit_range = BIT(lpwm->info->base_unit_bits);
freq *= base_unit_range; freq *= base_unit_range;
base_unit = DIV_ROUND_CLOSEST_ULL(freq, c); base_unit = DIV_ROUND_CLOSEST_ULL(freq, c);
/* base_unit must not be 0 and we also want to avoid overflowing it */
base_unit = clamp_val(base_unit, 1, base_unit_range - 1);
on_time_div = 255ULL * duty_ns; on_time_div = 255ULL * duty_ns;
do_div(on_time_div, period_ns); do_div(on_time_div, period_ns);
@ -116,8 +118,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
orig_ctrl = ctrl = pwm_lpss_read(pwm); orig_ctrl = ctrl = pwm_lpss_read(pwm);
ctrl &= ~PWM_ON_TIME_DIV_MASK; ctrl &= ~PWM_ON_TIME_DIV_MASK;
ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT); ctrl &= ~((base_unit_range - 1) << PWM_BASE_UNIT_SHIFT);
base_unit &= base_unit_range;
ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT; ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
ctrl |= on_time_div; ctrl |= on_time_div;

View file

@ -4396,15 +4396,20 @@ regulator_register(const struct regulator_desc *regulator_desc,
else if (regulator_desc->supply_name) else if (regulator_desc->supply_name)
rdev->supply_name = regulator_desc->supply_name; rdev->supply_name = regulator_desc->supply_name;
/*
* Attempt to resolve the regulator supply, if specified,
* but don't return an error if we fail because we will try
* to resolve it again later as more regulators are added.
*/
if (regulator_resolve_supply(rdev))
rdev_dbg(rdev, "unable to resolve supply\n");
ret = set_machine_constraints(rdev, constraints); ret = set_machine_constraints(rdev, constraints);
if (ret == -EPROBE_DEFER) {
/* Regulator might be in bypass mode and so needs its supply
* to set the constraints */
/* FIXME: this currently triggers a chicken-and-egg problem
* when creating -SUPPLY symlink in sysfs to a regulator
* that is just being created */
ret = regulator_resolve_supply(rdev);
if (!ret)
ret = set_machine_constraints(rdev, constraints);
else
rdev_dbg(rdev, "unable to resolve supply early: %pe\n",
ERR_PTR(ret));
}
if (ret < 0) if (ret < 0)
goto wash; goto wash;

View file

@ -3039,6 +3039,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
goto create_eq_error; goto create_eq_error;
} }
mem->dma = paddr;
mem->va = eq_vaddress; mem->va = eq_vaddress;
ret = be_fill_queue(eq, phba->params.num_eq_entries, ret = be_fill_queue(eq, phba->params.num_eq_entries,
sizeof(struct be_eq_entry), eq_vaddress); sizeof(struct be_eq_entry), eq_vaddress);
@ -3048,7 +3049,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
goto create_eq_error; goto create_eq_error;
} }
mem->dma = paddr;
ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
BEISCSI_EQ_DELAY_DEF); BEISCSI_EQ_DELAY_DEF);
if (ret) { if (ret) {
@ -3105,6 +3105,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
goto create_cq_error; goto create_cq_error;
} }
mem->dma = paddr;
ret = be_fill_queue(cq, phba->params.num_cq_entries, ret = be_fill_queue(cq, phba->params.num_cq_entries,
sizeof(struct sol_cqe), cq_vaddress); sizeof(struct sol_cqe), cq_vaddress);
if (ret) { if (ret) {
@ -3114,7 +3115,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
goto create_cq_error; goto create_cq_error;
} }
mem->dma = paddr;
ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
false, 0); false, 0);
if (ret) { if (ret) {

View file

@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k), FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k)); FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
ret = EINVAL; ret = -EINVAL;
goto bye; goto bye;
} }

View file

@ -676,7 +676,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
struct nvme_fc_port_template *tmpl; struct nvme_fc_port_template *tmpl;
struct qla_hw_data *ha; struct qla_hw_data *ha;
struct nvme_fc_port_info pinfo; struct nvme_fc_port_info pinfo;
int ret = EINVAL; int ret = -EINVAL;
if (!IS_ENABLED(CONFIG_NVME_FC)) if (!IS_ENABLED(CONFIG_NVME_FC))
return ret; return ret;

View file

@ -1221,7 +1221,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
exit_host_stats: exit_host_stats:
if (ql_iscsi_stats) if (ql_iscsi_stats)
dma_free_coherent(&ha->pdev->dev, host_stats_size, dma_free_coherent(&ha->pdev->dev, stats_size,
ql_iscsi_stats, iscsi_stats_dma); ql_iscsi_stats, iscsi_stats_dma);
ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",

View file

@ -255,8 +255,6 @@ int slim_unregister_controller(struct slim_controller *ctrl)
{ {
/* Remove all clients */ /* Remove all clients */
device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device); device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
/* Enter Clock Pause */
slim_ctrl_clk_pause(ctrl, false, 0);
ida_simple_remove(&ctrl_ida, ctrl->id); ida_simple_remove(&ctrl_ida, ctrl->id);
return 0; return 0;
@ -297,8 +295,8 @@ void slim_report_absent(struct slim_device *sbdev)
mutex_lock(&ctrl->lock); mutex_lock(&ctrl->lock);
sbdev->is_laddr_valid = false; sbdev->is_laddr_valid = false;
mutex_unlock(&ctrl->lock); mutex_unlock(&ctrl->lock);
if (!ctrl->get_laddr)
ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr); ida_simple_remove(&ctrl->laddr_ida, sbdev->laddr);
slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN); slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
} }
EXPORT_SYMBOL_GPL(slim_report_absent); EXPORT_SYMBOL_GPL(slim_report_absent);

View file

@ -1272,9 +1272,13 @@ static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
{ {
struct qcom_slim_ngd_qmi *qmi = struct qcom_slim_ngd_qmi *qmi =
container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl); container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
struct qcom_slim_ngd_ctrl *ctrl =
container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
qmi->svc_info.sq_node = 0; qmi->svc_info.sq_node = 0;
qmi->svc_info.sq_port = 0; qmi->svc_info.sq_port = 0;
qcom_slim_ngd_enable(ctrl, false);
} }
static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = { static struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {

View file

@ -122,6 +122,7 @@
struct s3c64xx_spi_dma_data { struct s3c64xx_spi_dma_data {
struct dma_chan *ch; struct dma_chan *ch;
dma_cookie_t cookie;
enum dma_transfer_direction direction; enum dma_transfer_direction direction;
}; };
@ -264,12 +265,13 @@ static void s3c64xx_spi_dmacb(void *data)
spin_unlock_irqrestore(&sdd->lock, flags); spin_unlock_irqrestore(&sdd->lock, flags);
} }
static void prepare_dma(struct s3c64xx_spi_dma_data *dma, static int prepare_dma(struct s3c64xx_spi_dma_data *dma,
struct sg_table *sgt) struct sg_table *sgt)
{ {
struct s3c64xx_spi_driver_data *sdd; struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config; struct dma_slave_config config;
struct dma_async_tx_descriptor *desc; struct dma_async_tx_descriptor *desc;
int ret;
memset(&config, 0, sizeof(config)); memset(&config, 0, sizeof(config));
@ -293,12 +295,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
dma->direction, DMA_PREP_INTERRUPT); dma->direction, DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(&sdd->pdev->dev, "unable to prepare %s scatterlist",
dma->direction == DMA_DEV_TO_MEM ? "rx" : "tx");
return -ENOMEM;
}
desc->callback = s3c64xx_spi_dmacb; desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma; desc->callback_param = dma;
dmaengine_submit(desc); dma->cookie = dmaengine_submit(desc);
ret = dma_submit_error(dma->cookie);
if (ret) {
dev_err(&sdd->pdev->dev, "DMA submission failed");
return -EIO;
}
dma_async_issue_pending(dma->ch); dma_async_issue_pending(dma->ch);
return 0;
} }
static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable) static void s3c64xx_spi_set_cs(struct spi_device *spi, bool enable)
@ -348,11 +362,12 @@ static bool s3c64xx_spi_can_dma(struct spi_master *master,
return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1; return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
} }
static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd, static int s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer, int dma_mode) struct spi_transfer *xfer, int dma_mode)
{ {
void __iomem *regs = sdd->regs; void __iomem *regs = sdd->regs;
u32 modecfg, chcfg; u32 modecfg, chcfg;
int ret = 0;
modecfg = readl(regs + S3C64XX_SPI_MODE_CFG); modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON); modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
@ -378,7 +393,7 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON; chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) { if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
prepare_dma(&sdd->tx_dma, &xfer->tx_sg); ret = prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
} else { } else {
switch (sdd->cur_bpw) { switch (sdd->cur_bpw) {
case 32: case 32:
@ -410,12 +425,17 @@ static void s3c64xx_enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN, | S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT); regs + S3C64XX_SPI_PACKET_CNT);
prepare_dma(&sdd->rx_dma, &xfer->rx_sg); ret = prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
} }
} }
if (ret)
return ret;
writel(modecfg, regs + S3C64XX_SPI_MODE_CFG); writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
writel(chcfg, regs + S3C64XX_SPI_CH_CFG); writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
return 0;
} }
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd, static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
@ -548,9 +568,10 @@ static int s3c64xx_wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
return 0; return 0;
} }
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) static int s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{ {
void __iomem *regs = sdd->regs; void __iomem *regs = sdd->regs;
int ret;
u32 val; u32 val;
/* Disable Clock */ /* Disable Clock */
@ -598,7 +619,9 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
if (sdd->port_conf->clk_from_cmu) { if (sdd->port_conf->clk_from_cmu) {
/* The src_clk clock is divided internally by 2 */ /* The src_clk clock is divided internally by 2 */
clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); ret = clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
if (ret)
return ret;
} else { } else {
/* Configure Clock */ /* Configure Clock */
val = readl(regs + S3C64XX_SPI_CLK_CFG); val = readl(regs + S3C64XX_SPI_CLK_CFG);
@ -612,6 +635,8 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
val |= S3C64XX_SPI_ENCLK_ENABLE; val |= S3C64XX_SPI_ENCLK_ENABLE;
writel(val, regs + S3C64XX_SPI_CLK_CFG); writel(val, regs + S3C64XX_SPI_CLK_CFG);
} }
return 0;
} }
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
@ -654,7 +679,9 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->cur_bpw = bpw; sdd->cur_bpw = bpw;
sdd->cur_speed = speed; sdd->cur_speed = speed;
sdd->cur_mode = spi->mode; sdd->cur_mode = spi->mode;
s3c64xx_spi_config(sdd); status = s3c64xx_spi_config(sdd);
if (status)
return status;
} }
if (!is_polling(sdd) && (xfer->len > fifo_len) && if (!is_polling(sdd) && (xfer->len > fifo_len) &&
@ -678,13 +705,18 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
sdd->state &= ~RXBUSY; sdd->state &= ~RXBUSY;
sdd->state &= ~TXBUSY; sdd->state &= ~TXBUSY;
s3c64xx_enable_datapath(sdd, xfer, use_dma);
/* Start the signals */ /* Start the signals */
s3c64xx_spi_set_cs(spi, true); s3c64xx_spi_set_cs(spi, true);
status = s3c64xx_enable_datapath(sdd, xfer, use_dma);
spin_unlock_irqrestore(&sdd->lock, flags); spin_unlock_irqrestore(&sdd->lock, flags);
if (status) {
dev_err(&spi->dev, "failed to enable data path for transfer: %d\n", status);
break;
}
if (use_dma) if (use_dma)
status = s3c64xx_wait_for_dma(sdd, xfer); status = s3c64xx_wait_for_dma(sdd, xfer);
else else

View file

@ -599,7 +599,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE, prxbIndicateArray = kmalloc_array(REORDER_WIN_SIZE,
sizeof(struct ieee80211_rxb *), sizeof(struct ieee80211_rxb *),
GFP_KERNEL); GFP_ATOMIC);
if (!prxbIndicateArray) if (!prxbIndicateArray)
return; return;

Some files were not shown because too many files have changed in this diff Show more