This is the 4.19.10 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlwXXUsACgkQONu9yGCS
 aT5lHBAAm4DiCe303AjPWGQauwDWZPhXcF2ieF/gx77TSotIonxRa4w4nQdQAxVh
 aIiMzyxihwtgd6bCMNMkCjImWqUw+f189D6RGzKJZYLCB39HCPskJ6oMPvuzNXAL
 yF1+84288ZY+Z9DXxK3T9x8KJUj5qXexjgoMfdS9+lJWku/BsCTPFk8tIjxY5bI9
 hMSIePIfvZqmXWuz7Btw9uykOYwAzk3tqcVv1P1vSeWaUE7dWQts17NUZhnDt5zp
 alSnmUUt7I7w+9CWpORFOHC+ekfltf/7VjIVgzBf9cKTgxGeZ8+htceYGTRIwegg
 kzU4cq8IZGWp+Umfhm9r7vWxf+tjdil42dYkiDWs/XnbKVw5f2UFi8c2rAItmfVw
 vpSZK1hgUFm8dojOFIjbJF2AfhLpDDSqKuZNhw1SIzDmsA6rV8cLNdQx+suL9Xc5
 JoL+b1wH1uvrPnSOloScakF32gjsrU5mReP+yPgl3LNc1Hn/Nu85262i4OEzs+Od
 Kmy/TfaRWYlWWtejH3fydmVGGadJ4owNYqhuB9eYQgBKWbcSShDXZmvJ+VKVdmcs
 k9Nz/Lyt4GxrFYiaWGuQeE0VTG9z87FwQvuikYKJF7FptN4kixBITfzRlKh3JbM4
 sR/nASeAvGiv5WrwszcM6AJ0Ps0yzZJr5JZ1w7wbWX84QH457mo=
 =bF+8
 -----END PGP SIGNATURE-----

Merge 4.19.10 into android-4.19

Changes in 4.19.10
	ipv4: ipv6: netfilter: Adjust the frag mem limit when truesize changes
	ipv6: Check available headroom in ip6_xmit() even without options
	neighbour: Avoid writing before skb->head in neigh_hh_output()
	ipv6: sr: properly initialize flowi6 prior passing to ip6_route_output
	net: 8139cp: fix a BUG triggered by changing mtu with network traffic
	net/mlx4_core: Correctly set PFC param if global pause is turned off.
	net/mlx4_en: Change min MTU size to ETH_MIN_MTU
	net: phy: don't allow __set_phy_supported to add unsupported modes
	net: Prevent invalid access to skb->prev in __qdisc_drop_all
	net: use skb_list_del_init() to remove from RX sublists
	Revert "net/ibm/emac: wrong bit is used for STA control"
	rtnetlink: ndo_dflt_fdb_dump() only work for ARPHRD_ETHER devices
	sctp: kfree_rcu asoc
	tcp: Do not underestimate rwnd_limited
	tcp: fix NULL ref in tail loss probe
	tun: forbid iface creation with rtnl ops
	virtio-net: keep vnet header zeroed after processing XDP
	net: phy: sfp: correct store of detected link modes
	sctp: update frag_point when stream_interleave is set
	net: restore call to netdev_queue_numa_node_write when resetting XPS
	net: fix XPS static_key accounting
	ARM: OMAP2+: prm44xx: Fix section annotation on omap44xx_prm_enable_io_wakeup
	ASoC: rsnd: fixup clock start checker
	ASoC: qdsp6: q6afe: Fix wrong MI2S SD line mask
	ASoC: qdsp6: q6afe-dai: Fix the dai widgets
	staging: rtl8723bs: Fix the return value in case of error in 'rtw_wx_read32()'
	ARM: dts: am3517: Fix pinmuxing for CD on MMC1
	ARM: dts: LogicPD Torpedo: Fix mmc3_dat1 interrupt
	ARM: dts: logicpd-somlv: Fix interrupt on mmc3_dat1
	ARM: dts: am3517-som: Fix WL127x Wifi interrupt
	ARM: OMAP1: ams-delta: Fix possible use of uninitialized field
	tools: bpftool: prevent infinite loop in get_fdinfo()
	ASoC: sun8i-codec: fix crash on module removal
	arm64: dts: sdm845-mtp: Reserve reserved gpios
	sysv: return 'err' instead of 0 in __sysv_write_inode
	netfilter: nf_conncount: use spin_lock_bh instead of spin_lock
	netfilter: nf_conncount: fix list_del corruption in conn_free
	netfilter: nf_conncount: fix unexpected permanent node of list.
	netfilter: nf_tables: don't skip inactive chains during update
	selftests: add script to stress-test nft packet path vs. control plane
	perf tools: Fix crash on synthesizing the unit
	netfilter: xt_RATEEST: remove netns exit routine
	netfilter: nf_tables: fix use-after-free when deleting compat expressions
	s390/cio: Fix cleanup of pfn_array alloc failure
	s390/cio: Fix cleanup when unsupported IDA format is used
	hwmon (ina2xx) Fix NULL id pointer in probe()
	hwmon: (raspberrypi) Fix initial notify
	ASoC: rockchip: add missing slave_config setting for I2S
	ASoC: wm_adsp: Fix dma-unsafe read of scratch registers
	ASoC: Intel: Power down links before turning off display audio power
	ASoC: qcom: Set dai_link id to each dai_link
	s390/cpum_cf: Reject request for sampling in event initialization
	hwmon: (ina2xx) Fix current value calculation
	ASoC: omap-abe-twl6040: Fix missing audio card caused by deferred probing
	ASoC: dapm: Recalculate audio map forcely when card instantiated
	spi: omap2-mcspi: Add missing suspend and resume calls
	hwmon: (mlxreg-fan) Fix macros for tacho fault reading
	bpf: allocate local storage buffers using GFP_ATOMIC
	aio: fix failure to put the file pointer
	netfilter: xt_hashlimit: fix a possible memory leak in htable_create()
	hwmon: (w83795) temp4_type has writable permission
	perf tools: Restore proper cwd on return from mnt namespace
	PCI: imx6: Fix link training status detection in link up check
	ASoC: acpi: fix: continue searching when machine is ignored
	objtool: Fix double-free in .cold detection error path
	objtool: Fix segfault in .cold detection with -ffunction-sections
	phy: qcom-qusb2: Use HSTX_TRIM fused value as is
	phy: qcom-qusb2: Fix HSTX_TRIM tuning with fused value for SDM845
	ARM: dts: at91: sama5d2: use the divided clock for SMC
	Btrfs: send, fix infinite loop due to directory rename dependencies
	RDMA/mlx5: Fix fence type for IB_WR_LOCAL_INV WR
	RDMA/core: Add GIDs while changing MAC addr only for registered ndev
	RDMA/bnxt_re: Fix system hang when registration with L2 driver fails
	RDMA/bnxt_re: Avoid accessing the device structure after it is freed
	RDMA/rdmavt: Fix rvt_create_ah function signature
	tools: bpftool: fix potential NULL pointer dereference in do_load
	ASoC: omap-mcbsp: Fix latency value calculation for pm_qos
	ASoC: omap-mcpdm: Add pm_qos handling to avoid under/overruns with CPU_IDLE
	ASoC: omap-dmic: Add pm_qos handling to avoid overruns with CPU_IDLE
	exportfs: do not read dentry after free
	RDMA/hns: Bugfix pbl configuration for rereg mr
	bpf: fix check of allowed specifiers in bpf_trace_printk
	fsi: master-ast-cf: select GENERIC_ALLOCATOR
	ipvs: call ip_vs_dst_notifier earlier than ipv6_dev_notf
	USB: omap_udc: use devm_request_irq()
	USB: omap_udc: fix crashes on probe error and module removal
	USB: omap_udc: fix omap_udc_start() on 15xx machines
	USB: omap_udc: fix USB gadget functionality on Palm Tungsten E
	USB: omap_udc: fix rejection of out transfers when DMA is used
	thunderbolt: Prevent root port runtime suspend during NVM upgrade
	drm/meson: add support for 1080p25 mode
	netfilter: ipv6: Preserve link scope traffic original oif
	IB/mlx5: Fix page fault handling for MW
	netfilter: add missing error handling code for register functions
	netfilter: nat: fix double register in masquerade modules
	netfilter: nf_conncount: remove wrong condition check routine
	KVM: VMX: Update shared MSRs to be saved/restored on MSR_EFER.LMA changes
	KVM: x86: fix empty-body warnings
	x86/kvm/vmx: fix old-style function declaration
	net: thunderx: fix NULL pointer dereference in nic_remove
	usb: gadget: u_ether: fix unsafe list iteration
	netfilter: nf_tables: deactivate expressions in rule replecement routine
	ALSA: usb-audio: Add vendor and product name for Dell WD19 Dock
	cachefiles: Fix an assertion failure when trying to update a failed object
	fscache: Fix race in fscache_op_complete() due to split atomic_sub & read
	cachefiles: Fix page leak in cachefiles_read_backing_file while vmscan is active
	igb: fix uninitialized variables
	ixgbe: recognize 1000BaseLX SFP modules as 1Gbps
	net: hisilicon: remove unexpected free_netdev
	drm/amdgpu: Add delay after enable RLC ucode
	drm/ast: fixed reading monitor EDID not stable issue
	xen: xlate_mmu: add missing header to fix 'W=1' warning
	Revert "xen/balloon: Mark unallocated host memory as UNUSABLE"
	pvcalls-front: fixes incorrect error handling
	pstore/ram: Correctly calculate usable PRZ bytes
	afs: Fix validation/callback interaction
	fscache: fix race between enablement and dropping of object
	cachefiles: Explicitly cast enumerated type in put_object
	fscache, cachefiles: remove redundant variable 'cache'
	nvme: warn when finding multi-port subsystems without multipathing enabled
	nvme: flush namespace scanning work just before removing namespaces
	nvme-rdma: fix double freeing of async event data
	ACPI/IORT: Fix iort_get_platform_device_domain() uninitialized pointer value
	ocfs2: fix deadlock caused by ocfs2_defrag_extent()
	mm/page_alloc.c: fix calculation of pgdat->nr_zones
	hfs: do not free node before using
	hfsplus: do not free node before using
	debugobjects: avoid recursive calls with kmemleak
	proc: fixup map_files test on arm
	kernel/kcov.c: mark funcs in __sanitizer_cov_trace_pc() as notrace
	initramfs: clean old path before creating a hardlink
	ocfs2: fix potential use after free
	flexfiles: enforce per-mirror stateid only for v4 DSes
	dax: Check page->mapping isn't NULL
	ALSA: fireface: fix reference to wrong register for clock configuration
	ALSA: hda/realtek - Fixed headphone issue for ALC700
	ALSA: hda/realtek: ALC294 mic and headset-mode fixups for ASUS X542UN
	ALSA: hda/realtek: Enable audio jacks of ASUS UX533FD with ALC294
	ALSA: hda/realtek: Enable audio jacks of ASUS UX433FN/UX333FA with ALC294
	ALSA: hda/realtek - Fix the mute LED regresion on Lenovo X1 Carbon
	IB/hfi1: Fix an out-of-bounds access in get_hw_stats
	bpf: fix off-by-one error in adjust_subprog_starts
	tcp: lack of available data can also cause TSO defer
	Linux 4.19.10

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2018-12-17 09:39:43 +01:00
commit 67319b77a0
149 changed files with 1244 additions and 840 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 9
SUBLEVEL = 10
EXTRAVERSION =
NAME = "People's Front"

View file

@ -227,7 +227,7 @@
vmmc-supply = <&vmmc_fixed>;
bus-width = <4>;
wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
cd-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>; /* gpio_127 */
cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio_127 */
};
&mmc3 {

View file

@ -163,7 +163,7 @@
compatible = "ti,wl1271";
reg = <2>;
interrupt-parent = <&gpio6>;
interrupts = <10 IRQ_TYPE_LEVEL_HIGH>; /* gpio_170 */
interrupts = <10 IRQ_TYPE_EDGE_RISING>; /* gpio_170 */
ref-clock-frequency = <26000000>;
tcxo-clock-frequency = <26000000>;
};

View file

@ -129,7 +129,7 @@
};
&mmc3 {
interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
pinctrl-names = "default";
vmmc-supply = <&wl12xx_vmmc>;

View file

@ -35,7 +35,7 @@
* jumpering combinations for the long run.
*/
&mmc3 {
interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>;
pinctrl-names = "default";
vmmc-supply = <&wl12xx_vmmc>;

View file

@ -308,7 +308,7 @@
0x1 0x0 0x60000000 0x10000000
0x2 0x0 0x70000000 0x10000000
0x3 0x0 0x80000000 0x10000000>;
clocks = <&mck>;
clocks = <&h32ck>;
status = "disabled";
nand_controller: nand-controller {

View file

@ -726,6 +726,9 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old)
struct modem_private_data *priv = port->private_data;
int ret;
if (!priv)
return;
if (IS_ERR(priv->regulator))
return;

View file

@ -351,7 +351,7 @@ static void omap44xx_prm_reconfigure_io_chain(void)
* to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
* omap44xx_prm_reconfigure_io_chain() must be called. No return value.
*/
static void __init omap44xx_prm_enable_io_wakeup(void)
static void omap44xx_prm_enable_io_wakeup(void)
{
s32 inst = omap4_prmst_get_prm_dev_inst();

View file

@ -31,6 +31,10 @@
status = "okay";
};
&tlmm {
gpio-reserved-ranges = <0 4>, <81 4>;
};
&uart9 {
status = "okay";
};

View file

@ -346,6 +346,8 @@ static int __hw_perf_event_init(struct perf_event *event)
break;
case PERF_TYPE_HARDWARE:
if (is_sampling_event(event)) /* No sampling support */
return -ENOENT;
ev = attr->config;
/* Count user space (problem-state) only */
if (!attr->exclude_user && attr->exclude_kernel) {

View file

@ -55,7 +55,7 @@
#define PRIo64 "o"
/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
#define apic_debug(fmt, arg...)
#define apic_debug(fmt, arg...) do {} while (0)
/* 14 is the version for Xeon and Pentium 8.4.8*/
#define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))

View file

@ -962,6 +962,7 @@ struct vcpu_vmx {
struct shared_msr_entry *guest_msrs;
int nmsrs;
int save_nmsrs;
bool guest_msrs_dirty;
unsigned long host_idt_base;
#ifdef CONFIG_X86_64
u64 msr_host_kernel_gs_base;
@ -1284,7 +1285,7 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
u16 error_code);
static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
u32 msr, int type);
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
@ -2874,6 +2875,20 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
vmx->req_immediate_exit = false;
/*
* Note that guest MSRs to be saved/restored can also be changed
* when guest state is loaded. This happens when guest transitions
* to/from long-mode by setting MSR_EFER.LMA.
*/
if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) {
vmx->guest_msrs_dirty = false;
for (i = 0; i < vmx->save_nmsrs; ++i)
kvm_set_shared_msr(vmx->guest_msrs[i].index,
vmx->guest_msrs[i].data,
vmx->guest_msrs[i].mask);
}
if (vmx->loaded_cpu_state)
return;
@ -2934,11 +2949,6 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
vmcs_writel(HOST_GS_BASE, gs_base);
host_state->gs_base = gs_base;
}
for (i = 0; i < vmx->save_nmsrs; ++i)
kvm_set_shared_msr(vmx->guest_msrs[i].index,
vmx->guest_msrs[i].data,
vmx->guest_msrs[i].mask);
}
static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
@ -3418,6 +3428,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
move_msr_up(vmx, index, save_nmsrs++);
vmx->save_nmsrs = save_nmsrs;
vmx->guest_msrs_dirty = true;
if (cpu_has_vmx_msr_bitmap())
vmx_update_msr_bitmap(&vmx->vcpu);
@ -5924,7 +5935,7 @@ static void free_vpid(int vpid)
spin_unlock(&vmx_vpid_lock);
}
static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
u32 msr, int type)
{
int f = sizeof(unsigned long);
@ -5962,7 +5973,7 @@ static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bit
}
}
static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
u32 msr, int type)
{
int f = sizeof(unsigned long);
@ -6000,7 +6011,7 @@ static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitm
}
}
static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
u32 msr, int type, bool value)
{
if (value)

View file

@ -7,7 +7,6 @@
#include <xen/features.h>
#include <xen/page.h>
#include <xen/interface/memory.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
@ -343,80 +342,3 @@ void xen_arch_unregister_cpu(int num)
}
EXPORT_SYMBOL(xen_arch_unregister_cpu);
#endif
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
void __init arch_xen_balloon_init(struct resource *hostmem_resource)
{
struct xen_memory_map memmap;
int rc;
unsigned int i, last_guest_ram;
phys_addr_t max_addr = PFN_PHYS(max_pfn);
struct e820_table *xen_e820_table;
const struct e820_entry *entry;
struct resource *res;
if (!xen_initial_domain())
return;
xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
if (!xen_e820_table)
return;
memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
if (rc) {
pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
goto out;
}
last_guest_ram = 0;
for (i = 0; i < memmap.nr_entries; i++) {
if (xen_e820_table->entries[i].addr >= max_addr)
break;
if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
last_guest_ram = i;
}
entry = &xen_e820_table->entries[last_guest_ram];
if (max_addr >= entry->addr + entry->size)
goto out; /* No unallocated host RAM. */
hostmem_resource->start = max_addr;
hostmem_resource->end = entry->addr + entry->size;
/*
* Mark non-RAM regions between the end of dom0 RAM and end of host RAM
* as unavailable. The rest of that region can be used for hotplug-based
* ballooning.
*/
for (; i < memmap.nr_entries; i++) {
entry = &xen_e820_table->entries[i];
if (entry->type == E820_TYPE_RAM)
continue;
if (entry->addr >= hostmem_resource->end)
break;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
goto out;
res->name = "Unavailable host RAM";
res->start = entry->addr;
res->end = (entry->addr + entry->size < hostmem_resource->end) ?
entry->addr + entry->size : hostmem_resource->end;
rc = insert_resource(hostmem_resource, res);
if (rc) {
pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
__func__, res->start, res->end, rc);
kfree(res);
goto out;
}
}
out:
kfree(xen_e820_table);
}
#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */

View file

@ -808,6 +808,7 @@ char * __init xen_memory_setup(void)
addr = xen_e820_table.entries[0].addr;
size = xen_e820_table.entries[0].size;
while (i < xen_e820_table.nr_entries) {
bool discard = false;
chunk_size = size;
type = xen_e820_table.entries[i].type;
@ -823,10 +824,11 @@ char * __init xen_memory_setup(void)
xen_add_extra_mem(pfn_s, n_pfns);
xen_max_p2m_pfn = pfn_s + n_pfns;
} else
type = E820_TYPE_UNUSABLE;
discard = true;
}
xen_align_and_add_e820_region(addr, chunk_size, type);
if (!discard)
xen_align_and_add_e820_region(addr, chunk_size, type);
addr += chunk_size;
size -= chunk_size;

View file

@ -700,7 +700,7 @@ static void iort_set_device_domain(struct device *dev,
*/
static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
{
struct acpi_iort_node *node, *msi_parent;
struct acpi_iort_node *node, *msi_parent = NULL;
struct fwnode_handle *iort_fwnode;
struct acpi_iort_its_group *its;
int i;

View file

@ -46,6 +46,7 @@ config FSI_MASTER_AST_CF
tristate "FSI master based on Aspeed ColdFire coprocessor"
depends on GPIOLIB
depends on GPIO_ASPEED
select GENERIC_ALLOCATOR
---help---
This option enables a FSI master using the AST2400 and AST2500 GPIO
lines driven by the internal ColdFire coprocessor. This requires

View file

@ -2243,12 +2243,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
#endif
WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
udelay(50);
/* carrizo do enable cp interrupt after cp inited */
if (!(adev->flags & AMD_IS_APU))
if (!(adev->flags & AMD_IS_APU)) {
gfx_v9_0_enable_gui_idle_interrupt(adev, true);
udelay(50);
udelay(50);
}
#ifdef AMDGPU_RLC_DEBUG_RETRY
/* RLC_GPM_GENERAL_6 : RLC Ucode version */

View file

@ -973,9 +973,21 @@ static int get_clock(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = i2c->dev->dev_private;
uint32_t val;
uint32_t val, val2, count, pass;
count = 0;
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
do {
val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
if (val == val2) {
pass++;
} else {
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
}
} while ((pass < 5) && (count++ < 0x10000));
val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
return val & 1 ? 1 : 0;
}
@ -983,9 +995,21 @@ static int get_data(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = i2c->dev->dev_private;
uint32_t val;
uint32_t val, val2, count, pass;
count = 0;
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
do {
val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
if (val == val2) {
pass++;
} else {
pass = 0;
val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
}
} while ((pass < 5) && (count++ < 0x10000));
val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
return val & 1 ? 1 : 0;
}
@ -998,7 +1022,7 @@ static void set_clock(void *i2c_priv, int clock)
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((clock & 0x01) ? 0 : 1);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
if (ujcrb7 == jtemp)
break;
@ -1014,7 +1038,7 @@ static void set_data(void *i2c_priv, int data)
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
if (ujcrb7 == jtemp)
break;

View file

@ -715,6 +715,7 @@ struct meson_hdmi_venc_vic_mode {
{ 5, &meson_hdmi_encp_mode_1080i60 },
{ 20, &meson_hdmi_encp_mode_1080i50 },
{ 32, &meson_hdmi_encp_mode_1080p24 },
{ 33, &meson_hdmi_encp_mode_1080p50 },
{ 34, &meson_hdmi_encp_mode_1080p30 },
{ 31, &meson_hdmi_encp_mode_1080p50 },
{ 16, &meson_hdmi_encp_mode_1080p60 },

View file

@ -274,7 +274,7 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
break;
case INA2XX_CURRENT:
/* signed register, result in mA */
val = regval * data->current_lsb_uA;
val = (s16)regval * data->current_lsb_uA;
val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_CALIBRATION:
@ -491,7 +491,7 @@ static int ina2xx_probe(struct i2c_client *client,
}
data->groups[group++] = &ina2xx_group;
if (id->driver_data == ina226)
if (chip == ina226)
data->groups[group++] = &ina226_group;
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
@ -500,7 +500,7 @@ static int ina2xx_probe(struct i2c_client *client,
return PTR_ERR(hwmon_dev);
dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
id->name, data->rshunt);
client->name, data->rshunt);
return 0;
}

View file

@ -51,7 +51,7 @@
*/
#define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \
((rval) + (s)) * (d)))
#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask)))
#define MLXREG_FAN_GET_FAULT(val, mask) (!((val) ^ (mask)))
#define MLXREG_FAN_PWM_DUTY2STATE(duty) (DIV_ROUND_CLOSEST((duty) * \
MLXREG_FAN_MAX_STATE, \
MLXREG_FAN_MAX_DUTY))

View file

@ -115,7 +115,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rpi_hwmon_data *data;
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@ -124,11 +123,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
/* Parent driver assure that firmware is correct */
data->fw = dev_get_drvdata(dev->parent);
/* Init throttled */
ret = rpi_firmware_property(data->fw, RPI_FIRMWARE_GET_THROTTLED,
&data->last_throttled,
sizeof(data->last_throttled));
data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "rpi_volt",
data,
&rpi_chip_info,

View file

@ -1691,7 +1691,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
* somewhere else in the code
*/
#define SENSOR_ATTR_TEMP(index) { \
SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \
SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 5 ? S_IWUSR : 0), \
show_temp_mode, store_temp_mode, NOT_USED, index - 1), \
SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \
NULL, TEMP_READ, index - 1), \

View file

@ -767,8 +767,10 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
case NETDEV_CHANGEADDR:
cmds[0] = netdev_del_cmd;
cmds[1] = add_default_gid_cmd;
cmds[2] = add_cmd;
if (ndev->reg_state == NETREG_REGISTERED) {
cmds[1] = add_default_gid_cmd;
cmds[2] = add_cmd;
}
break;
case NETDEV_CHANGEUPPER:

View file

@ -1252,6 +1252,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
/* Registered a new RoCE device instance to netdev */
rc = bnxt_re_register_netdev(rdev);
if (rc) {
rtnl_unlock();
pr_err("Failed to register with netedev: %#x\n", rc);
return -EINVAL;
}
@ -1461,6 +1462,7 @@ static void bnxt_re_task(struct work_struct *work)
"Failed to register with IB: %#x", rc);
bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev);
goto exit;
}
break;
case NETDEV_UP:
@ -1484,6 +1486,7 @@ static void bnxt_re_task(struct work_struct *work)
}
smp_mb__before_atomic();
atomic_dec(&rdev->sched_count);
exit:
kfree(re_work);
}

View file

@ -12485,7 +12485,8 @@ static int init_cntrs(struct hfi1_devdata *dd)
}
/* allocate space for the counter values */
dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
GFP_KERNEL);
if (!dd->cntrs)
goto bail;

View file

@ -154,6 +154,8 @@ struct hfi1_ib_stats {
extern struct hfi1_ib_stats hfi1_stats;
extern const struct pci_error_handlers hfi1_pci_err_handler;
extern int num_driver_cntrs;
/*
* First-cut criterion for "device is active" is
* two thousand dwords combined Tx, Rx traffic per

View file

@ -1701,7 +1701,7 @@ static const char * const driver_cntr_names[] = {
static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
static const char **dev_cntr_names;
static const char **port_cntr_names;
static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
static int num_dev_cntrs;
static int num_port_cntrs;
static int cntr_names_initialized;

View file

@ -1661,10 +1661,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx)
static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
struct hns_roce_mr *mr)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
struct scatterlist *sg;
u64 page_addr;
u64 *pages;
@ -1672,6 +1671,53 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
int len;
int entry;
mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
roce_set_field(mpt_entry->byte_48_mode_ba,
V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
upper_32_bits(mr->pbl_ba >> 3));
pages = (u64 *)__get_free_page(GFP_KERNEL);
if (!pages)
return -ENOMEM;
i = 0;
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (j = 0; j < len; ++j) {
page_addr = sg_dma_address(sg) +
(j << mr->umem->page_shift);
pages[i] = page_addr >> 6;
/* Record the first 2 entry directly to MTPT table */
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
goto found;
i++;
}
}
found:
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
free_page((unsigned long)pages);
return 0;
}
static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
int ret;
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
@ -1686,7 +1732,6 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mr->pd);
mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
@ -1700,13 +1745,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
(mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
(mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
mr->type == MR_TYPE_MR ? 0 : 1);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
1);
mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
@ -1717,53 +1760,9 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
if (mr->type == MR_TYPE_DMA)
return 0;
mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
ret = set_mtpt_pbl(mpt_entry, mr);
mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
V2_MPT_BYTE_48_PBL_BA_H_S,
upper_32_bits(mr->pbl_ba >> 3));
mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
pages = (u64 *)__get_free_page(GFP_KERNEL);
if (!pages)
return -ENOMEM;
i = 0;
for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (j = 0; j < len; ++j) {
page_addr = sg_dma_address(sg) +
(j << mr->umem->page_shift);
pages[i] = page_addr >> 6;
/* Record the first 2 entry directly to MTPT table */
if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
goto found;
i++;
}
}
found:
mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
V2_MPT_BYTE_56_PA0_H_S,
upper_32_bits(pages[0]));
mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
free_page((unsigned long)pages);
roce_set_field(mpt_entry->byte_64_buf_pa1,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
return 0;
return ret;
}
static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
@ -1772,6 +1771,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
u64 size, void *mb_buf)
{
struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
int ret = 0;
if (flags & IB_MR_REREG_PD) {
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
@ -1784,14 +1784,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
V2_MPT_BYTE_8_BIND_EN_S,
(mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
V2_MPT_BYTE_8_ATOMIC_EN_S,
(mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0));
V2_MPT_BYTE_8_ATOMIC_EN_S,
mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
(mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0));
mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
(mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
(mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
}
if (flags & IB_MR_REREG_TRANS) {
@ -1800,21 +1800,13 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
mpt_entry->pbl_ba_l =
cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
roce_set_field(mpt_entry->byte_48_mode_ba,
V2_MPT_BYTE_48_PBL_BA_H_M,
V2_MPT_BYTE_48_PBL_BA_H_S,
upper_32_bits(mr->pbl_ba >> 3));
mpt_entry->byte_48_mode_ba =
cpu_to_le32(mpt_entry->byte_48_mode_ba);
mr->iova = iova;
mr->size = size;
ret = set_mtpt_pbl(mpt_entry, mr);
}
return 0;
return ret;
}
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)

View file

@ -724,6 +724,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
head = frame;
bcnt -= frame->bcnt;
offset = 0;
}
break;

View file

@ -4413,17 +4413,18 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
goto out;
}
if (wr->opcode == IB_WR_LOCAL_INV ||
wr->opcode == IB_WR_REG_MR) {
if (wr->opcode == IB_WR_REG_MR) {
fence = dev->umr_fence;
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
} else if (wr->send_flags & IB_SEND_FENCE) {
if (qp->next_fence)
fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
else
fence = MLX5_FENCE_MODE_FENCE;
} else {
fence = qp->next_fence;
} else {
if (wr->send_flags & IB_SEND_FENCE) {
if (qp->next_fence)
fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
else
fence = MLX5_FENCE_MODE_FENCE;
} else {
fence = qp->next_fence;
}
}
switch (ibqp->qp_type) {

View file

@ -91,13 +91,15 @@ EXPORT_SYMBOL(rvt_check_ah);
* rvt_create_ah - create an address handle
* @pd: the protection domain
* @ah_attr: the attributes of the AH
* @udata: pointer to user's input output buffer information.
*
* This may be called from interrupt context.
*
* Return: newly allocated ah
*/
struct ib_ah *rvt_create_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr)
struct rdma_ah_attr *ah_attr,
struct ib_udata *udata)
{
struct rvt_ah *ah;
struct rvt_dev_info *dev = ib_to_rvt(pd->device);

View file

@ -51,7 +51,8 @@
#include <rdma/rdma_vt.h>
struct ib_ah *rvt_create_ah(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr);
struct rdma_ah_attr *ah_attr,
struct ib_udata *udata);
int rvt_destroy_ah(struct ib_ah *ibah);
int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);

View file

@ -1441,6 +1441,9 @@ static void nic_remove(struct pci_dev *pdev)
{
struct nicpf *nic = pci_get_drvdata(pdev);
if (!nic)
return;
if (nic->flags & NIC_SRIOV_ENABLED)
pci_disable_sriov(pdev);

View file

@ -914,10 +914,8 @@ static int hip04_mac_probe(struct platform_device *pdev)
}
ret = register_netdev(ndev);
if (ret) {
free_netdev(ndev);
if (ret)
goto alloc_fail;
}
return 0;

View file

@ -231,7 +231,7 @@ struct emac_regs {
#define EMAC_STACR_PHYE 0x00004000
#define EMAC_STACR_STAC_MASK 0x00003000
#define EMAC_STACR_STAC_READ 0x00001000
#define EMAC_STACR_STAC_WRITE 0x00000800
#define EMAC_STACR_STAC_WRITE 0x00002000
#define EMAC_STACR_OPBC_MASK 0x00000C00
#define EMAC_STACR_OPBC_50 0x00000000
#define EMAC_STACR_OPBC_66 0x00000400

View file

@ -842,6 +842,7 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
nvm_word = E1000_INVM_DEFAULT_AL;
tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
phy_word = E1000_PHY_PLL_UNCONF;
for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
/* check current state directly from internal PHY */
igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);

View file

@ -2248,7 +2248,9 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
*autoneg = false;
if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
return 0;
}

View file

@ -1084,8 +1084,8 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
tx_pause = !!(pause->tx_pause);
rx_pause = !!(pause->rx_pause);
rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp;
tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,

View file

@ -3494,8 +3494,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
/* MTU range: 46 - hw-specific max */
dev->min_mtu = MLX4_EN_MIN_MTU;
/* MTU range: 68 - hw-specific max */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = priv->max_mtu;
mdev->pndev[port] = dev;

View file

@ -161,7 +161,6 @@
#define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
ETH_HLEN + PREAMBLE_LEN)
#define MLX4_EN_MIN_MTU 46
/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
* headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
*/

View file

@ -571,6 +571,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
struct cp_private *cp;
int handled = 0;
u16 status;
u16 mask;
if (unlikely(dev == NULL))
return IRQ_NONE;
@ -578,6 +579,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
spin_lock(&cp->lock);
mask = cpr16(IntrMask);
if (!mask)
goto out_unlock;
status = cpr16(IntrStatus);
if (!status || (status == 0xFFFF))
goto out_unlock;

View file

@ -1738,20 +1738,17 @@ EXPORT_SYMBOL(genphy_loopback);
static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
PHY_10BT_FEATURES);
switch (max_speed) {
default:
return -ENOTSUPP;
case SPEED_1000:
phydev->supported |= PHY_1000BT_FEATURES;
case SPEED_10:
phydev->supported &= ~PHY_100BT_FEATURES;
/* fall through */
case SPEED_100:
phydev->supported |= PHY_100BT_FEATURES;
/* fall through */
case SPEED_10:
phydev->supported |= PHY_10BT_FEATURES;
phydev->supported &= ~PHY_1000BT_FEATURES;
break;
case SPEED_1000:
break;
default:
return -ENOTSUPP;
}
return 0;

View file

@ -162,7 +162,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
/* 1000Base-PX or 1000Base-BX10 */
if ((id->base.e_base_px || id->base.e_base_bx10) &&
br_min <= 1300 && br_max >= 1200)
phylink_set(support, 1000baseX_Full);
phylink_set(modes, 1000baseX_Full);
/* For active or passive cables, select the link modes
* based on the bit rates and the cable compliance bytes.

View file

@ -2268,9 +2268,9 @@ static void tun_setup(struct net_device *dev)
static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
if (!data)
return 0;
return -EINVAL;
NL_SET_ERR_MSG(extack,
"tun/tap creation via rtnetlink is not supported.");
return -EOPNOTSUPP;
}
static size_t tun_get_size(const struct net_device *dev)

View file

@ -365,7 +365,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
struct page *page, unsigned int offset,
unsigned int len, unsigned int truesize)
unsigned int len, unsigned int truesize,
bool hdr_valid)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
@ -387,7 +388,8 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
memcpy(hdr, p, hdr_len);
if (hdr_valid)
memcpy(hdr, p, hdr_len);
len -= hdr_len;
offset += hdr_padded_len;
@ -739,7 +741,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct page *page = buf;
struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
PAGE_SIZE, true);
stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
@ -842,7 +845,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
rcu_read_unlock();
put_page(page);
head_skb = page_to_skb(vi, rq, xdp_page,
offset, len, PAGE_SIZE);
offset, len,
PAGE_SIZE, false);
return head_skb;
}
break;
@ -898,7 +902,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_skb;
}
head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
curr_skb = head_skb;
if (unlikely(!curr_skb))

View file

@ -3308,6 +3308,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
struct nvme_ns *ns, *next;
LIST_HEAD(ns_list);
/* prevent racing with ns scanning */
flush_work(&ctrl->scan_work);
/*
* The dead states indicates the controller was not gracefully
* disconnected. In that case, we won't be able to flush any data while
@ -3463,7 +3466,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
nvme_mpath_stop(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
flush_work(&ctrl->scan_work);
cancel_work_sync(&ctrl->fw_act_work);
if (ctrl->ops->stop_ctrl)
ctrl->ops->stop_ctrl(ctrl);

View file

@ -537,6 +537,9 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
struct nvme_id_ctrl *id)
{
if (ctrl->subsys->cmic & (1 << 3))
dev_warn(ctrl->device,
"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0;
}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)

View file

@ -184,6 +184,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
if (ib_dma_mapping_error(ibdev, qe->dma)) {
kfree(qe->data);
qe->data = NULL;
return -ENOMEM;
}
@ -816,6 +817,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
out_free_async_qe:
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ctrl->async_event_sqe.data = NULL;
out_free_queue:
nvme_rdma_free_queue(&ctrl->queues[0]);
return error;

View file

@ -80,8 +80,6 @@ struct imx6_pcie {
#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
#define PCIE_PHY_CTRL_DATA_LOC 0
@ -641,12 +639,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
return 0;
}
static int imx6_pcie_link_up(struct dw_pcie *pci)
{
return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) &
PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
}
static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
.host_init = imx6_pcie_host_init,
};
@ -679,7 +671,7 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
}
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = imx6_pcie_link_up,
/* No special ops needed, but pcie-designware still expects this struct */
};
static int imx6_pcie_probe(struct platform_device *pdev)

View file

@ -231,6 +231,7 @@ static const struct qusb2_phy_cfg sdm845_phy_cfg = {
.mask_core_ready = CORE_READY_STATUS,
.has_pll_override = true,
.autoresume_en = BIT(0),
.update_tune1_with_efuse = true,
};
static const char * const qusb2_phy_vreg_names[] = {
@ -402,10 +403,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
/*
* Read efuse register having TUNE2/1 parameter's high nibble.
* If efuse register shows value as 0x0, or if we fail to find
* a valid efuse register settings, then use default value
* as 0xB for high nibble that we have already set while
* configuring phy.
* If efuse register shows value as 0x0 (indicating value is not
* fused), or if we fail to find a valid efuse register setting,
* then use default value for high nibble that we have already
* set while configuring the phy.
*/
val = nvmem_cell_read(qphy->cell, NULL);
if (IS_ERR(val) || !val[0]) {
@ -415,12 +416,13 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
/* Fused TUNE1/2 value is the higher nibble only */
if (cfg->update_tune1_with_efuse)
qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
val[0] << 0x4);
qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
val[0] << HSTX_TRIM_SHIFT,
HSTX_TRIM_MASK);
else
qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
val[0] << 0x4);
qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
val[0] << HSTX_TRIM_SHIFT,
HSTX_TRIM_MASK);
}
static int qusb2_phy_set_mode(struct phy *phy, enum phy_mode mode)

View file

@ -387,8 +387,10 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
* orb specified one of the unsupported formats, we defer
* checking for IDAWs in unsupported formats to here.
*/
if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
kfree(p);
return -EOPNOTSUPP;
}
if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
break;
@ -528,7 +530,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
if (ret < 0)
goto out_init;
goto out_unpin;
/* Translate this direct ccw to a idal ccw. */
idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);

View file

@ -1455,13 +1455,26 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:omap2_mcspi");
#ifdef CONFIG_SUSPEND
static int omap2_mcspi_suspend_noirq(struct device *dev)
static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
{
return pinctrl_pm_select_sleep_state(dev);
struct spi_master *master = dev_get_drvdata(dev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
int error;
error = pinctrl_pm_select_sleep_state(dev);
if (error)
dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
__func__, error);
error = spi_master_suspend(master);
if (error)
dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
__func__, error);
return pm_runtime_force_suspend(dev);
}
static int omap2_mcspi_resume_noirq(struct device *dev)
static int __maybe_unused omap2_mcspi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
@ -1472,17 +1485,17 @@ static int omap2_mcspi_resume_noirq(struct device *dev)
dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
__func__, error);
return 0;
error = spi_master_resume(master);
if (error)
dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
__func__, error);
return pm_runtime_force_resume(dev);
}
#else
#define omap2_mcspi_suspend_noirq NULL
#define omap2_mcspi_resume_noirq NULL
#endif
static const struct dev_pm_ops omap2_mcspi_pm_ops = {
.suspend_noirq = omap2_mcspi_suspend_noirq,
.resume_noirq = omap2_mcspi_resume_noirq,
SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
omap2_mcspi_resume)
.runtime_resume = omap_mcspi_runtime_resume,
};

View file

@ -2289,7 +2289,7 @@ static int rtw_wx_read32(struct net_device *dev,
exit:
kfree(ptmp);
return 0;
return ret;
}
static int rtw_wx_write32(struct net_device *dev,

View file

@ -864,6 +864,30 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(key, 0600, key_show, key_store);
static void nvm_authenticate_start(struct tb_switch *sw)
{
struct pci_dev *root_port;
/*
* During host router NVM upgrade we should not allow root port to
* go into D3cold because some root ports cannot trigger PME
* itself. To be on the safe side keep the root port in D0 during
* the whole upgrade process.
*/
root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
if (root_port)
pm_runtime_get_noresume(&root_port->dev);
}
static void nvm_authenticate_complete(struct tb_switch *sw)
{
struct pci_dev *root_port;
root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
if (root_port)
pm_runtime_put(&root_port->dev);
}
static ssize_t nvm_authenticate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@ -913,10 +937,18 @@ static ssize_t nvm_authenticate_store(struct device *dev,
sw->nvm->authenticating = true;
if (!tb_route(sw))
if (!tb_route(sw)) {
/*
* Keep root port from suspending as long as the
* NVM upgrade process is running.
*/
nvm_authenticate_start(sw);
ret = nvm_authenticate_host(sw);
else
if (ret)
nvm_authenticate_complete(sw);
} else {
ret = nvm_authenticate_device(sw);
}
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
}
@ -1336,6 +1368,10 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
if (ret <= 0)
return ret;
/* Now we can allow root port to suspend again */
if (!tb_route(sw))
nvm_authenticate_complete(sw);
if (status) {
tb_sw_info(sw, "switch flash authentication failed\n");
tb_switch_set_uuid(sw);

View file

@ -401,12 +401,12 @@ static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
{
struct usb_request *req;
struct usb_request *tmp;
unsigned long flags;
/* fill unused rxq slots with some skb */
spin_lock_irqsave(&dev->req_lock, flags);
list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
while (!list_empty(&dev->rx_reqs)) {
req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
list_del_init(&req->list);
spin_unlock_irqrestore(&dev->req_lock, flags);
@ -1125,7 +1125,6 @@ void gether_disconnect(struct gether *link)
{
struct eth_dev *dev = link->ioport;
struct usb_request *req;
struct usb_request *tmp;
WARN_ON(!dev);
if (!dev)
@ -1142,7 +1141,8 @@ void gether_disconnect(struct gether *link)
*/
usb_ep_disable(link->in_ep);
spin_lock(&dev->req_lock);
list_for_each_entry_safe(req, tmp, &dev->tx_reqs, list) {
while (!list_empty(&dev->tx_reqs)) {
req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
list_del(&req->list);
spin_unlock(&dev->req_lock);
@ -1154,7 +1154,8 @@ void gether_disconnect(struct gether *link)
usb_ep_disable(link->out_ep);
spin_lock(&dev->req_lock);
list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
while (!list_empty(&dev->rx_reqs)) {
req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
list_del(&req->list);
spin_unlock(&dev->req_lock);

View file

@ -2033,6 +2033,7 @@ static inline int machine_without_vbus_sense(void)
{
return machine_is_omap_innovator()
|| machine_is_omap_osk()
|| machine_is_omap_palmte()
|| machine_is_sx1()
/* No known omap7xx boards with vbus sense */
|| cpu_is_omap7xx();
@ -2041,7 +2042,7 @@ static inline int machine_without_vbus_sense(void)
static int omap_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
int status = -ENODEV;
int status;
struct omap_ep *ep;
unsigned long flags;
@ -2079,6 +2080,7 @@ static int omap_udc_start(struct usb_gadget *g,
goto done;
}
} else {
status = 0;
if (can_pullup(udc))
pullup_enable(udc);
else
@ -2593,9 +2595,22 @@ omap_ep_setup(char *name, u8 addr, u8 type,
static void omap_udc_release(struct device *dev)
{
complete(udc->done);
pullup_disable(udc);
if (!IS_ERR_OR_NULL(udc->transceiver)) {
usb_put_phy(udc->transceiver);
udc->transceiver = NULL;
}
omap_writew(0, UDC_SYSCON1);
remove_proc_file();
if (udc->dc_clk) {
if (udc->clk_requested)
omap_udc_enable_clock(0);
clk_put(udc->hhc_clk);
clk_put(udc->dc_clk);
}
if (udc->done)
complete(udc->done);
kfree(udc);
udc = NULL;
}
static int
@ -2627,6 +2642,7 @@ omap_udc_setup(struct platform_device *odev, struct usb_phy *xceiv)
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->gadget.max_speed = USB_SPEED_FULL;
udc->gadget.name = driver_name;
udc->gadget.quirk_ep_out_aligned_size = 1;
udc->transceiver = xceiv;
/* ep0 is special; put it right after the SETUP buffer */
@ -2867,8 +2883,8 @@ static int omap_udc_probe(struct platform_device *pdev)
udc->clr_halt = UDC_RESET_EP;
/* USB general purpose IRQ: ep0, state changes, dma, etc */
status = request_irq(pdev->resource[1].start, omap_udc_irq,
0, driver_name, udc);
status = devm_request_irq(&pdev->dev, pdev->resource[1].start,
omap_udc_irq, 0, driver_name, udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[1].start, status);
@ -2876,20 +2892,20 @@ static int omap_udc_probe(struct platform_device *pdev)
}
/* USB "non-iso" IRQ (PIO for all but ep0) */
status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
0, "omap_udc pio", udc);
status = devm_request_irq(&pdev->dev, pdev->resource[2].start,
omap_udc_pio_irq, 0, "omap_udc pio", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[2].start, status);
goto cleanup2;
goto cleanup1;
}
#ifdef USE_ISO
status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
0, "omap_udc iso", udc);
status = devm_request_irq(&pdev->dev, pdev->resource[3].start,
omap_udc_iso_irq, 0, "omap_udc iso", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[3].start, status);
goto cleanup3;
goto cleanup1;
}
#endif
if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
@ -2900,23 +2916,8 @@ static int omap_udc_probe(struct platform_device *pdev)
}
create_proc_file();
status = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
omap_udc_release);
if (status)
goto cleanup4;
return 0;
cleanup4:
remove_proc_file();
#ifdef USE_ISO
cleanup3:
free_irq(pdev->resource[2].start, udc);
#endif
cleanup2:
free_irq(pdev->resource[1].start, udc);
return usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
omap_udc_release);
cleanup1:
kfree(udc);
@ -2943,42 +2944,15 @@ static int omap_udc_remove(struct platform_device *pdev)
{
DECLARE_COMPLETION_ONSTACK(done);
if (!udc)
return -ENODEV;
usb_del_gadget_udc(&udc->gadget);
if (udc->driver)
return -EBUSY;
udc->done = &done;
pullup_disable(udc);
if (!IS_ERR_OR_NULL(udc->transceiver)) {
usb_put_phy(udc->transceiver);
udc->transceiver = NULL;
}
omap_writew(0, UDC_SYSCON1);
usb_del_gadget_udc(&udc->gadget);
remove_proc_file();
#ifdef USE_ISO
free_irq(pdev->resource[3].start, udc);
#endif
free_irq(pdev->resource[2].start, udc);
free_irq(pdev->resource[1].start, udc);
if (udc->dc_clk) {
if (udc->clk_requested)
omap_udc_enable_clock(0);
clk_put(udc->hhc_clk);
clk_put(udc->dc_clk);
}
wait_for_completion(&done);
release_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
wait_for_completion(&done);
return 0;
}

View file

@ -251,25 +251,10 @@ static void release_memory_resource(struct resource *resource)
kfree(resource);
}
/*
* Host memory not allocated to dom0. We can use this range for hotplug-based
* ballooning.
*
* It's a type-less resource. Setting IORESOURCE_MEM will make resource
* management algorithms (arch_remove_reservations()) look into guest e820,
* which we don't want.
*/
static struct resource hostmem_resource = {
.name = "Host RAM",
};
void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
{}
static struct resource *additional_memory_resource(phys_addr_t size)
{
struct resource *res, *res_hostmem;
int ret = -ENOMEM;
struct resource *res;
int ret;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
@ -278,42 +263,13 @@ static struct resource *additional_memory_resource(phys_addr_t size)
res->name = "System RAM";
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
if (res_hostmem) {
/* Try to grab a range from hostmem */
res_hostmem->name = "Host memory";
ret = allocate_resource(&hostmem_resource, res_hostmem,
size, 0, -1,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
}
if (!ret) {
/*
* Insert this resource into iomem. Because hostmem_resource
* tracks portion of guest e820 marked as UNUSABLE noone else
* should try to use it.
*/
res->start = res_hostmem->start;
res->end = res_hostmem->end;
ret = insert_resource(&iomem_resource, res);
if (ret < 0) {
pr_err("Can't insert iomem_resource [%llx - %llx]\n",
res->start, res->end);
release_memory_resource(res_hostmem);
res_hostmem = NULL;
res->start = res->end = 0;
}
}
if (ret) {
ret = allocate_resource(&iomem_resource, res,
size, 0, -1,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
if (ret < 0) {
pr_err("Cannot allocate new System RAM resource\n");
kfree(res);
return NULL;
}
ret = allocate_resource(&iomem_resource, res,
size, 0, -1,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
if (ret < 0) {
pr_err("Cannot allocate new System RAM resource\n");
kfree(res);
return NULL;
}
#ifdef CONFIG_SPARSEMEM
@ -325,7 +281,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
pfn, limit);
release_memory_resource(res);
release_memory_resource(res_hostmem);
return NULL;
}
}
@ -747,8 +702,6 @@ static int __init balloon_init(void)
set_online_page_callback(&xen_online_page);
register_memory_notifier(&xen_memory_nb);
register_sysctl_table(xen_root);
arch_xen_balloon_init(&hostmem_resource);
#endif
#ifdef CONFIG_XEN_PV

View file

@ -385,8 +385,8 @@ static int create_active(struct sock_mapping *map, int *evtchn)
out_error:
if (*evtchn >= 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
kfree(map->active.data.in);
kfree(map->active.ring);
free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
free_page((unsigned long)map->active.ring);
return ret;
}

View file

@ -36,6 +36,7 @@
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
#include <xen/xen-ops.h>
#include <xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>

View file

@ -379,7 +379,7 @@ void afs_zap_data(struct afs_vnode *vnode)
int afs_validate(struct afs_vnode *vnode, struct key *key)
{
time64_t now = ktime_get_real_seconds();
bool valid = false;
bool valid;
int ret;
_enter("{v={%x:%u} fl=%lx},%x",
@ -399,15 +399,21 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
vnode->cb_v_break = vnode->volume->cb_v_break;
valid = false;
} else if (vnode->status.type == AFS_FTYPE_DIR &&
test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) &&
vnode->cb_expires_at - 10 > now) {
valid = true;
} else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
vnode->cb_expires_at - 10 > now) {
(!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) ||
vnode->cb_expires_at - 10 <= now)) {
valid = false;
} else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) ||
vnode->cb_expires_at - 10 <= now) {
valid = false;
} else {
valid = true;
}
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
valid = true;
} else {
vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
vnode->cb_v_break = vnode->volume->cb_v_break;
valid = false;
}
read_sequnlock_excl(&vnode->cb_lock);

View file

@ -1436,6 +1436,7 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
ret = ioprio_check_cap(iocb->aio_reqprio);
if (ret) {
pr_debug("aio ioprio check cap error: %d\n", ret);
fput(req->ki_filp);
return ret;
}

View file

@ -3344,7 +3344,8 @@ static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
kfree(m);
}
static void tail_append_pending_moves(struct pending_dir_move *moves,
static void tail_append_pending_moves(struct send_ctx *sctx,
struct pending_dir_move *moves,
struct list_head *stack)
{
if (list_empty(&moves->list)) {
@ -3355,6 +3356,10 @@ static void tail_append_pending_moves(struct pending_dir_move *moves,
list_add_tail(&moves->list, stack);
list_splice_tail(&list, stack);
}
if (!RB_EMPTY_NODE(&moves->node)) {
rb_erase(&moves->node, &sctx->pending_dir_moves);
RB_CLEAR_NODE(&moves->node);
}
}
static int apply_children_dir_moves(struct send_ctx *sctx)
@ -3369,7 +3374,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
return 0;
INIT_LIST_HEAD(&stack);
tail_append_pending_moves(pm, &stack);
tail_append_pending_moves(sctx, pm, &stack);
while (!list_empty(&stack)) {
pm = list_first_entry(&stack, struct pending_dir_move, list);
@ -3380,7 +3385,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
goto out;
pm = get_pending_dir_moves(sctx, parent_ino);
if (pm)
tail_append_pending_moves(pm, &stack);
tail_append_pending_moves(sctx, pm, &stack);
}
return 0;

View file

@ -244,11 +244,13 @@ static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_retry);
cache->cache.ops->put_object(&xobject->fscache,
(enum fscache_obj_ref_trace)cachefiles_obj_put_wait_retry);
goto try_again;
requeue:
cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo);
cache->cache.ops->put_object(&xobject->fscache,
(enum fscache_obj_ref_trace)cachefiles_obj_put_wait_timeo);
_leave(" = -ETIMEDOUT");
return -ETIMEDOUT;
}

View file

@ -535,7 +535,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
netpage->index, cachefiles_gfp);
if (ret < 0) {
if (ret == -EEXIST) {
put_page(backpage);
backpage = NULL;
put_page(netpage);
netpage = NULL;
fscache_retrieval_complete(op, 1);
continue;
}
@ -608,7 +611,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
netpage->index, cachefiles_gfp);
if (ret < 0) {
if (ret == -EEXIST) {
put_page(backpage);
backpage = NULL;
put_page(netpage);
netpage = NULL;
fscache_retrieval_complete(op, 1);
continue;
}
@ -962,11 +968,8 @@ void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
__releases(&object->fscache.cookie->lock)
{
struct cachefiles_object *object;
struct cachefiles_cache *cache;
object = container_of(_object, struct cachefiles_object, fscache);
cache = container_of(object->fscache.cache,
struct cachefiles_cache, cache);
_enter("%p,{%lu}", object, page->index);

View file

@ -135,7 +135,8 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
struct dentry *dentry = object->dentry;
int ret;
ASSERT(dentry);
if (!dentry)
return -ESTALE;
_enter("%p,#%d", object, auxdata->len);

View file

@ -423,7 +423,7 @@ bool dax_lock_mapping_entry(struct page *page)
for (;;) {
mapping = READ_ONCE(page->mapping);
if (!dax_mapping(mapping))
if (!mapping || !dax_mapping(mapping))
break;
/*

View file

@ -77,7 +77,7 @@ static bool dentry_connected(struct dentry *dentry)
struct dentry *parent = dget_parent(dentry);
dput(dentry);
if (IS_ROOT(dentry)) {
if (dentry == parent) {
dput(parent);
return false;
}

View file

@ -730,6 +730,9 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
if (awaken)
wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
/* Prevent a race with our last child, which has to signal EV_CLEARED
* before dropping our spinlock.

View file

@ -329,13 +329,14 @@ void hfs_bmap_free(struct hfs_bnode *node)
nidx -= len * 8;
i = node->next;
hfs_bnode_put(node);
if (!i) {
/* panic */;
pr_crit("unable to free bnode %u. bmap not found!\n",
node->this);
hfs_bnode_put(node);
return;
}
hfs_bnode_put(node);
node = hfs_bnode_find(tree, i);
if (IS_ERR(node))
return;

View file

@ -454,14 +454,15 @@ void hfs_bmap_free(struct hfs_bnode *node)
nidx -= len * 8;
i = node->next;
hfs_bnode_put(node);
if (!i) {
/* panic */;
pr_crit("unable to free bnode %u. "
"bmap not found!\n",
node->this);
hfs_bnode_put(node);
return;
}
hfs_bnode_put(node);
node = hfs_bnode_find(tree, i);
if (IS_ERR(node))
return;

View file

@ -1733,7 +1733,8 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
if (fh)
hdr->args.fh = fh;
if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
if (vers == 4 &&
!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
goto out_failed;
/*
@ -1798,7 +1799,8 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
if (fh)
hdr->args.fh = fh;
if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
if (vers == 4 &&
!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
goto out_failed;
/*

View file

@ -125,10 +125,10 @@ static struct dentry *ocfs2_get_dentry(struct super_block *sb,
check_gen:
if (handle->ih_generation != inode->i_generation) {
iput(inode);
trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
handle->ih_generation,
inode->i_generation);
iput(inode);
result = ERR_PTR(-ESTALE);
goto bail;
}

View file

@ -156,18 +156,14 @@ static int __ocfs2_move_extent(handle_t *handle,
}
/*
* lock allocators, and reserving appropriate number of bits for
* meta blocks and data clusters.
*
* in some cases, we don't need to reserve clusters, just let data_ac
* be NULL.
* lock allocator, and reserve appropriate number of bits for
* meta blocks.
*/
static int ocfs2_lock_allocators_move_extents(struct inode *inode,
static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
struct ocfs2_extent_tree *et,
u32 clusters_to_move,
u32 extents_to_split,
struct ocfs2_alloc_context **meta_ac,
struct ocfs2_alloc_context **data_ac,
int extra_blocks,
int *credits)
{
@ -192,13 +188,6 @@ static int ocfs2_lock_allocators_move_extents(struct inode *inode,
goto out;
}
if (data_ac) {
ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
}
*credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
@ -257,10 +246,10 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
}
}
ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
&context->meta_ac,
&context->data_ac,
extra_blocks, &credits);
ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
*len, 1,
&context->meta_ac,
extra_blocks, &credits);
if (ret) {
mlog_errno(ret);
goto out;
@ -283,6 +272,21 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
}
}
/*
* Make sure ocfs2_reserve_cluster is called after
* __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
*
* If ocfs2_reserve_cluster is called
* before __ocfs2_flush_truncate_log, dead lock on global bitmap
* may happen.
*
*/
ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
if (ret) {
mlog_errno(ret);
goto out_unlock_mutex;
}
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
@ -600,9 +604,10 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
}
}
ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
&context->meta_ac,
NULL, extra_blocks, &credits);
ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
len, 1,
&context->meta_ac,
extra_blocks, &credits);
if (ret) {
mlog_errno(ret);
goto out;

View file

@ -806,17 +806,14 @@ static int ramoops_probe(struct platform_device *pdev)
cxt->pstore.data = cxt;
/*
* Console can handle any buffer size, so prefer LOG_LINE_MAX. If we
* have to handle dumps, we must have at least record_size buffer. And
* for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be
* ZERO_SIZE_PTR).
* Since bufsize is only used for dmesg crash dumps, it
* must match the size of the dprz record (after PRZ header
* and ECC bytes have been accounted for).
*/
if (cxt->console_size)
cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */
cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize);
cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL);
cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
if (!cxt->pstore.buf) {
pr_err("cannot allocate pstore buffer\n");
pr_err("cannot allocate pstore crash dump buffer\n");
err = -ENOMEM;
goto fail_clear;
}

View file

@ -275,7 +275,7 @@ static int __sysv_write_inode(struct inode *inode, int wait)
}
}
brelse(bh);
return 0;
return err;
}
int sysv_write_inode(struct inode *inode, struct writeback_control *wbc)

View file

@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
int n_pages)
{
atomic_sub(n_pages, &op->n_pages);
if (atomic_read(&op->n_pages) <= 0)
if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
fscache_op_complete(&op->op, false);
}

View file

@ -90,7 +90,10 @@ struct pstore_record {
*
* @buf_lock: spinlock to serialize access to @buf
* @buf: preallocated crash dump buffer
* @bufsize: size of @buf available for crash dump writes
* @bufsize: size of @buf available for crash dump bytes (must match
* smallest number of bytes available for writing to a
* backend entry, since compressed bytes don't take kindly
* to being truncated)
*
* @read_mutex: serializes @open, @read, @close, and @erase callbacks
* @flags: bitfield of frontends the backend can accept writes for

View file

@ -1355,6 +1355,17 @@ static inline void skb_zcopy_abort(struct sk_buff *skb)
}
}
static inline void skb_mark_not_on_list(struct sk_buff *skb)
{
skb->next = NULL;
}
static inline void skb_list_del_init(struct sk_buff *skb)
{
__list_del_entry(&skb->list);
skb_mark_not_on_list(skb);
}
/**
* skb_queue_empty - check if a queue is empty
* @list: queue head

View file

@ -453,6 +453,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
{
unsigned int hh_alen = 0;
unsigned int seq;
unsigned int hh_len;
@ -460,16 +461,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
seq = read_seqbegin(&hh->hh_lock);
hh_len = hh->hh_len;
if (likely(hh_len <= HH_DATA_MOD)) {
/* this is inlined by gcc */
memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
} else {
unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
hh_alen = HH_DATA_MOD;
memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
/* skb_push() would proceed silently if we have room for
* the unaligned size but not for the aligned size:
* check headroom explicitly.
*/
if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
/* this is inlined by gcc */
memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
HH_DATA_MOD);
}
} else {
hh_alen = HH_DATA_ALIGN(hh_len);
if (likely(skb_headroom(skb) >= hh_alen)) {
memcpy(skb->data - hh_alen, hh->hh_data,
hh_alen);
}
}
} while (read_seqretry(&hh->hh_lock, seq));
skb_push(skb, hh_len);
if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
kfree_skb(skb);
return NET_XMIT_DROP;
}
__skb_push(skb, hh_len);
return dev_queue_xmit(skb);
}

View file

@ -9,7 +9,7 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
const struct nf_nat_range2 *range,
const struct net_device *out);
void nf_nat_masquerade_ipv4_register_notifier(void);
int nf_nat_masquerade_ipv4_register_notifier(void);
void nf_nat_masquerade_ipv4_unregister_notifier(void);
#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */

View file

@ -5,7 +5,7 @@
unsigned int
nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
const struct net_device *out);
void nf_nat_masquerade_ipv6_register_notifier(void);
int nf_nat_masquerade_ipv6_register_notifier(void);
void nf_nat_masquerade_ipv6_unregister_notifier(void);
#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */

View file

@ -2075,6 +2075,8 @@ struct sctp_association {
__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
struct rcu_head rcu;
};

View file

@ -44,8 +44,3 @@ static inline void xen_balloon_init(void)
{
}
#endif
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
struct resource;
void arch_xen_balloon_init(struct resource *hostmem_resource);
#endif

View file

@ -291,16 +291,6 @@ static int __init do_reset(void)
return 1;
}
static int __init maybe_link(void)
{
if (nlink >= 2) {
char *old = find_link(major, minor, ino, mode, collected);
if (old)
return (ksys_link(old, collected) < 0) ? -1 : 1;
}
return 0;
}
static void __init clean_path(char *path, umode_t fmode)
{
struct kstat st;
@ -313,6 +303,18 @@ static void __init clean_path(char *path, umode_t fmode)
}
}
static int __init maybe_link(void)
{
if (nlink >= 2) {
char *old = find_link(major, minor, ino, mode, collected);
if (old) {
clean_path(collected, 0);
return (ksys_link(old, collected) < 0) ? -1 : 1;
}
}
return 0;
}
static __initdata int wfd;
static int __init do_name(void)

View file

@ -138,7 +138,8 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
return -ENOENT;
new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
map->value_size, __GFP_ZERO | GFP_USER,
map->value_size,
__GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
map->numa_node);
if (!new)
return -ENOMEM;

View file

@ -5283,7 +5283,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
return;
/* NOTE: fake 'exit' subprog should be updated as well. */
for (i = 0; i <= env->subprog_cnt; i++) {
if (env->subprog_info[i].start < off)
if (env->subprog_info[i].start <= off)
continue;
env->subprog_info[i].start += len - 1;
}

View file

@ -56,7 +56,7 @@ struct kcov {
struct task_struct *t;
};
static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
{
unsigned int mode;
@ -78,7 +78,7 @@ static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
return mode == needed_mode;
}
static unsigned long canonicalize_ip(unsigned long ip)
static notrace unsigned long canonicalize_ip(unsigned long ip)
{
#ifdef CONFIG_RANDOMIZE_BASE
ip -= kaslr_offset();

View file

@ -196,11 +196,13 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
i++;
} else if (fmt[i] == 'p' || fmt[i] == 's') {
mod[fmt_cnt]++;
i++;
if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
/* disallow any further format extensions */
if (fmt[i + 1] != 0 &&
!isspace(fmt[i + 1]) &&
!ispunct(fmt[i + 1]))
return -EINVAL;
fmt_cnt++;
if (fmt[i - 1] == 's') {
if (fmt[i] == 's') {
if (str_seen)
/* allow only one '%s' per fmt string */
return -EINVAL;

View file

@ -135,7 +135,6 @@ static void fill_pool(void)
if (!new)
return;
kmemleak_ignore(new);
raw_spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
debug_objects_allocated++;
@ -1128,7 +1127,6 @@ static int __init debug_objects_replace_static_objects(void)
obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
if (!obj)
goto free;
kmemleak_ignore(obj);
hlist_add_head(&obj->node, &objects);
}
@ -1184,7 +1182,8 @@ void __init debug_objects_mem_init(void)
obj_cache = kmem_cache_create("debug_objects_cache",
sizeof (struct debug_obj), 0,
SLAB_DEBUG_OBJECTS, NULL);
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
NULL);
if (!obj_cache || debug_objects_replace_static_objects()) {
debug_objects_enabled = 0;

View file

@ -5745,8 +5745,10 @@ void __meminit init_currently_empty_zone(struct zone *zone,
unsigned long size)
{
struct pglist_data *pgdat = zone->zone_pgdat;
int zone_idx = zone_idx(zone) + 1;
pgdat->nr_zones = zone_idx(zone) + 1;
if (zone_idx > pgdat->nr_zones)
pgdat->nr_zones = zone_idx;
zone->zone_start_pfn = zone_start_pfn;

View file

@ -2161,6 +2161,20 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
return active;
}
static void reset_xps_maps(struct net_device *dev,
struct xps_dev_maps *dev_maps,
bool is_rxqs_map)
{
if (is_rxqs_map) {
static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
} else {
RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
}
static_key_slow_dec_cpuslocked(&xps_needed);
kfree_rcu(dev_maps, rcu);
}
static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
struct xps_dev_maps *dev_maps, unsigned int nr_ids,
u16 offset, u16 count, bool is_rxqs_map)
@ -2172,18 +2186,15 @@ static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
j < nr_ids;)
active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
count);
if (!active) {
if (is_rxqs_map) {
RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
} else {
RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
if (!active)
reset_xps_maps(dev, dev_maps, is_rxqs_map);
for (i = offset + (count - 1); count--; i--)
netdev_queue_numa_node_write(
netdev_get_tx_queue(dev, i),
NUMA_NO_NODE);
if (!is_rxqs_map) {
for (i = offset + (count - 1); count--; i--) {
netdev_queue_numa_node_write(
netdev_get_tx_queue(dev, i),
NUMA_NO_NODE);
}
kfree_rcu(dev_maps, rcu);
}
}
@ -2220,10 +2231,6 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
false);
out_no_maps:
if (static_key_enabled(&xps_rxqs_needed))
static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
static_key_slow_dec_cpuslocked(&xps_needed);
mutex_unlock(&xps_map_mutex);
cpus_read_unlock();
}
@ -2341,9 +2348,12 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
if (!new_dev_maps)
goto out_no_new_maps;
static_key_slow_inc_cpuslocked(&xps_needed);
if (is_rxqs_map)
static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
if (!dev_maps) {
/* Increment static keys at most once per type */
static_key_slow_inc_cpuslocked(&xps_needed);
if (is_rxqs_map)
static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
}
for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
j < nr_ids;) {
@ -2441,13 +2451,8 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
}
/* free map if not active */
if (!active) {
if (is_rxqs_map)
RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
else
RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
kfree_rcu(dev_maps, rcu);
}
if (!active)
reset_xps_maps(dev, dev_maps, is_rxqs_map);
out_no_maps:
mutex_unlock(&xps_map_mutex);
@ -4981,7 +4986,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
struct net_device *orig_dev = skb->dev;
struct packet_type *pt_prev = NULL;
list_del(&skb->list);
skb_list_del_init(skb);
__netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
if (!pt_prev)
continue;
@ -5137,7 +5142,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
net_timestamp_check(netdev_tstamp_prequeue, skb);
list_del(&skb->list);
skb_list_del_init(skb);
if (!skb_defer_rx_timestamp(skb))
list_add_tail(&skb->list, &sublist);
}
@ -5148,7 +5153,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
rcu_read_lock();
list_for_each_entry_safe(skb, next, head, list) {
xdp_prog = rcu_dereference(skb->dev->xdp_prog);
list_del(&skb->list);
skb_list_del_init(skb);
if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
list_add_tail(&skb->list, &sublist);
}
@ -5167,7 +5172,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
if (cpu >= 0) {
/* Will be handled, remove from list */
list_del(&skb->list);
skb_list_del_init(skb);
enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
}
}

View file

@ -3730,6 +3730,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
{
int err;
if (dev->type != ARPHRD_ETHER)
return -EINVAL;
netif_addr_lock_bh(dev);
err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
if (err)

View file

@ -513,6 +513,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
struct rb_node *rbn;
int len;
int ihlen;
int delta;
int err;
u8 ecn;
@ -554,10 +555,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
if (len > 65535)
goto out_oversize;
delta = - head->truesize;
/* Head of list must not be cloned. */
if (skb_unclone(head, GFP_ATOMIC))
goto out_nomem;
delta += head->truesize;
if (delta)
add_frag_mem_limit(qp->q.net, delta);
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */

View file

@ -551,7 +551,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
list_for_each_entry_safe(skb, next, head, list) {
struct dst_entry *dst;
list_del(&skb->list);
skb_list_del_init(skb);
/* if ingress device is enslaved to an L3 master device pass the
* skb to its handler for processing
*/
@ -598,7 +598,7 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
list_del(&skb->list);
skb_list_del_init(skb);
skb = ip_rcv_core(skb, net);
if (skb == NULL)
continue;

View file

@ -81,9 +81,12 @@ static int __init masquerade_tg_init(void)
int ret;
ret = xt_register_target(&masquerade_tg_reg);
if (ret)
return ret;
if (ret == 0)
nf_nat_masquerade_ipv4_register_notifier();
ret = nf_nat_masquerade_ipv4_register_notifier();
if (ret)
xt_unregister_target(&masquerade_tg_reg);
return ret;
}

View file

@ -131,28 +131,50 @@ static struct notifier_block masq_inet_notifier = {
.notifier_call = masq_inet_event,
};
static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
static int masq_refcnt;
static DEFINE_MUTEX(masq_mutex);
void nf_nat_masquerade_ipv4_register_notifier(void)
int nf_nat_masquerade_ipv4_register_notifier(void)
{
int ret = 0;
mutex_lock(&masq_mutex);
/* check if the notifier was already set */
if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
return;
if (++masq_refcnt > 1)
goto out_unlock;
/* Register for device down reports */
register_netdevice_notifier(&masq_dev_notifier);
ret = register_netdevice_notifier(&masq_dev_notifier);
if (ret)
goto err_dec;
/* Register IP address change reports */
register_inetaddr_notifier(&masq_inet_notifier);
ret = register_inetaddr_notifier(&masq_inet_notifier);
if (ret)
goto err_unregister;
mutex_unlock(&masq_mutex);
return ret;
err_unregister:
unregister_netdevice_notifier(&masq_dev_notifier);
err_dec:
masq_refcnt--;
out_unlock:
mutex_unlock(&masq_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier);
void nf_nat_masquerade_ipv4_unregister_notifier(void)
{
mutex_lock(&masq_mutex);
/* check if the notifier still has clients */
if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
return;
if (--masq_refcnt > 0)
goto out_unlock;
unregister_netdevice_notifier(&masq_dev_notifier);
unregister_inetaddr_notifier(&masq_inet_notifier);
out_unlock:
mutex_unlock(&masq_mutex);
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier);

View file

@ -69,7 +69,9 @@ static int __init nft_masq_ipv4_module_init(void)
if (ret < 0)
return ret;
nf_nat_masquerade_ipv4_register_notifier();
ret = nf_nat_masquerade_ipv4_register_notifier();
if (ret)
nft_unregister_expr(&nft_masq_ipv4_type);
return ret;
}

View file

@ -1885,7 +1885,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
* This algorithm is from John Heffner.
*/
static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
bool *is_cwnd_limited, u32 max_segs)
bool *is_cwnd_limited,
bool *is_rwnd_limited,
u32 max_segs)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 age, send_win, cong_win, limit, in_flight;
@ -1893,9 +1895,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
struct sk_buff *head;
int win_divisor;
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto send_now;
if (icsk->icsk_ca_state >= TCP_CA_Recovery)
goto send_now;
@ -1954,10 +1953,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
if (age < (tp->srtt_us >> 4))
goto send_now;
/* Ok, it looks like it is advisable to defer. */
/* Ok, it looks like it is advisable to defer.
* Three cases are tracked :
* 1) We are cwnd-limited
* 2) We are rwnd-limited
* 3) We are application limited.
*/
if (cong_win < send_win) {
if (cong_win <= skb->len) {
*is_cwnd_limited = true;
return true;
}
} else {
if (send_win <= skb->len) {
*is_rwnd_limited = true;
return true;
}
}
if (cong_win < send_win && cong_win <= skb->len)
*is_cwnd_limited = true;
/* If this packet won't get more data, do not wait. */
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto send_now;
return true;
@ -2321,7 +2337,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
} else {
if (!push_one &&
tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
max_segs))
&is_rwnd_limited, max_segs))
break;
}
@ -2459,15 +2475,18 @@ void tcp_send_loss_probe(struct sock *sk)
goto rearm_timer;
}
skb = skb_rb_last(&sk->tcp_rtx_queue);
if (unlikely(!skb)) {
WARN_ONCE(tp->packets_out,
"invalid inflight: %u state %u cwnd %u mss %d\n",
tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
inet_csk(sk)->icsk_pending = 0;
return;
}
/* At most one outstanding TLP retransmission. */
if (tp->tlp_high_seq)
goto rearm_timer;
/* Retransmit last segment. */
if (WARN_ON(!skb))
goto rearm_timer;
if (skb_still_in_host_queue(sk, skb))
goto rearm_timer;

View file

@ -95,7 +95,7 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
list_for_each_entry_safe(skb, next, head, list) {
struct dst_entry *dst;
list_del(&skb->list);
skb_list_del_init(skb);
/* if ingress device is enslaved to an L3 master device pass the
* skb to its handler for processing
*/
@ -295,7 +295,7 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
list_del(&skb->list);
skb_list_del_init(skb);
skb = ip6_rcv_core(skb, dev, net);
if (skb == NULL)
continue;

Some files were not shown because too many files have changed in this diff Show more