Merge android-4.19.54 (237d383
) into msm-4.19
* refs/heads/tmp-237d383: Linux 4.19.54 Abort file_remove_privs() for non-reg. files coredump: fix race condition between collapse_huge_page() and core dumping ocfs2: fix error path kobject memory leak mlxsw: spectrum: Prevent force of 56G scsi: libsas: delete sas port if expander discover failed scsi: scsi_dh_alua: Fix possible null-ptr-deref scsi: smartpqi: properly set both the DMA mask and the coherent DMA mask scsi: libcxgbi: add a check for NULL pointer in cxgbi_check_route() net: phy: dp83867: Set up RGMII TX delay net: phylink: ensure consistent phy interface mode net: sh_eth: fix mdio access in sh_eth_close() for R-Car Gen2 and RZ/A1 SoCs arm64: use the correct function type for __arm64_sys_ni_syscall arm64: use the correct function type in SYSCALL_DEFINE0 arm64: fix syscall_fn_t type KVM: PPC: Book3S HV: Don't take kvm->lock around kvm_for_each_vcpu KVM: PPC: Book3S: Use new mutex to synchronize access to rtas token list xenbus: Avoid deadlock during suspend due to open transactions xen/pvcalls: Remove set but not used variable ia64: fix build errors by exporting paddr_to_nid() perf record: Fix s390 missing module symbol and warning for non-root users perf namespace: Protect reading thread's namespace perf data: Fix 'strncat may truncate' build failure with recent gcc configfs: Fix use-after-free when accessing sd->s_dentry ALSA: hda - Force polling mode on CNL for fixing codec communication i2c: dev: fix potential memory leak in i2cdev_ioctl_rdwr net: aquantia: fix LRO with FCS error net: aquantia: tx clean budget logic error drm/etnaviv: lock MMU while dumping core ACPI/PCI: PM: Add missing wakeup.flags.valid checks net: tulip: de4x5: Drop redundant MODULE_DEVICE_TABLE() net: stmmac: update rx tail pointer register to fix rx dma hang issue. gpio: fix gpio-adp5588 build errors perf/ring-buffer: Always use {READ,WRITE}_ONCE() for rb->user_page data perf/ring_buffer: Add ordering to rb->nest increment perf/ring_buffer: Fix exposing a temporarily decreased data_head x86/CPU/AMD: Don't force the CPB cap when running under a hypervisor mISDN: make sure device name is NUL terminated usb: xhci: Fix a potential null pointer dereference in xhci_debugfs_create_endpoint() powerpc/powernv: Return for invalid IMC domain clk: ti: clkctrl: Fix clkdm_clk handling selftests: netfilter: missing error check when setting up veth interface ipvs: Fix use-after-free in ip_vs_in netfilter: nf_queue: fix reinject verdict handling perf/x86/intel/ds: Fix EVENT vs. UEVENT PEBS constraints Staging: vc04_services: Fix a couple error codes net: mvpp2: prs: Use the correct helpers when removing all VID filters net: mvpp2: prs: Fix parser range for VID filtering net/mlx5: Avoid reloading already removed devices vsock/virtio: set SOCK_DONE on peer shutdown tipc: purge deferredq list for each grp member in tipc_group_delete sunhv: Fix device naming inconsistency between sunhv_console and sunhv_reg sctp: Free cookie before we memdup a new one nfc: Ensure presence of required attributes in the deactivate_target handler net: openvswitch: do not free vport if register_netdevice() is failed. net: dsa: rtl8366: Fix up VLAN filtering neigh: fix use-after-free read in pneigh_get_next lapb: fixed leak of control-blocks. ipv6: flowlabel: fl6_sock_lookup() must use atomic_inc_not_zero hv_netvsc: Set probe mode to sync be2net: Fix number of Rx queues used for flow hashing ax25: fix inconsistent lock state in ax25_destroy_timer ANDROID: kernel: cgroup: cpuset: Clear cpus_requested for empty buf ANDROID: kernel: cgroup: cpuset: Add missing allocation of cpus_requested in alloc_trial_cpuset Conflicts: fs/configfs/dir.c Change-Id: Ib11d19d5a6926562356aa66315b03a7a0f7b9640 Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
commit
5c7f2dd744
65 changed files with 321 additions and 162 deletions
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 53
|
||||
SUBLEVEL = 54
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
|
|||
|
||||
asmlinkage long sys_ni_syscall(void);
|
||||
|
||||
SYSCALL_DEFINE0(ni_syscall)
|
||||
asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused)
|
||||
{
|
||||
return sys_ni_syscall();
|
||||
}
|
||||
|
|
|
@ -133,13 +133,6 @@ COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
|
|||
return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
|
||||
}
|
||||
|
||||
asmlinkage long sys_ni_syscall(void);
|
||||
|
||||
COMPAT_SYSCALL_DEFINE0(ni_syscall)
|
||||
{
|
||||
return sys_ni_syscall();
|
||||
}
|
||||
|
||||
#undef __SYSCALL
|
||||
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
|
||||
#include <asm/unistd32.h>
|
||||
|
|
|
@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr)
|
|||
|
||||
return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
|
||||
}
|
||||
EXPORT_SYMBOL(paddr_to_nid);
|
||||
|
||||
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
|
||||
/*
|
||||
|
|
|
@ -299,6 +299,7 @@ struct kvm_arch {
|
|||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct list_head spapr_tce_tables;
|
||||
struct list_head rtas_tokens;
|
||||
struct mutex rtas_token_lock;
|
||||
DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_MPIC
|
||||
|
|
|
@ -840,6 +840,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
|
|||
#ifdef CONFIG_PPC64
|
||||
INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
|
||||
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
|
||||
mutex_init(&kvm->arch.rtas_token_lock);
|
||||
#endif
|
||||
|
||||
return kvm->arch.kvm_ops->init_vm(kvm);
|
||||
|
|
|
@ -426,12 +426,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
|
|||
|
||||
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
|
||||
{
|
||||
struct kvm_vcpu *ret;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
ret = kvm_get_vcpu_by_id(kvm, id);
|
||||
mutex_unlock(&kvm->lock);
|
||||
return ret;
|
||||
return kvm_get_vcpu_by_id(kvm, id);
|
||||
}
|
||||
|
||||
static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
|
||||
|
@ -1309,7 +1304,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
|
|||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
u64 mask;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
spin_lock(&vc->lock);
|
||||
/*
|
||||
* If ILE (interrupt little-endian) has changed, update the
|
||||
|
@ -1349,7 +1343,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
|
|||
mask &= 0xFFFFFFFF;
|
||||
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
|
||||
spin_unlock(&vc->lock);
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
|
|
|
@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
|
|||
{
|
||||
struct rtas_token_definition *d, *tmp;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
lockdep_assert_held(&kvm->arch.rtas_token_lock);
|
||||
|
||||
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
|
||||
if (rtas_name_matches(d->handler->name, name)) {
|
||||
|
@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
|
|||
bool found;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
lockdep_assert_held(&kvm->arch.rtas_token_lock);
|
||||
|
||||
list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
|
||||
if (d->token == token)
|
||||
|
@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
|
|||
if (copy_from_user(&args, argp, sizeof(args)))
|
||||
return -EFAULT;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.rtas_token_lock);
|
||||
|
||||
if (args.token)
|
||||
rc = rtas_token_define(kvm, args.name, args.token);
|
||||
else
|
||||
rc = rtas_token_undefine(kvm, args.name);
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.rtas_token_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -245,7 +245,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
|
|||
orig_rets = args.rets;
|
||||
args.rets = &args.args[be32_to_cpu(args.nargs)];
|
||||
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
|
||||
|
||||
rc = -ENOENT;
|
||||
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
|
||||
|
@ -256,7 +256,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
|
||||
|
||||
if (rc == 0) {
|
||||
args.rets = orig_rets;
|
||||
|
@ -282,8 +282,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
|
|||
{
|
||||
struct rtas_token_definition *d, *tmp;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
|
||||
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
|
||||
list_del(&d->list);
|
||||
kfree(d);
|
||||
|
|
|
@ -161,6 +161,10 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
|
|||
struct imc_pmu *pmu_ptr;
|
||||
u32 offset;
|
||||
|
||||
/* Return for unknown domain */
|
||||
if (domain < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* memory for pmu */
|
||||
pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
|
||||
if (!pmu_ptr)
|
||||
|
|
|
@ -684,7 +684,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
|
|||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
|
||||
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
|
@ -693,7 +693,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
|
|||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
|
||||
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
|
||||
/* Allow all events as PEBS with no flags */
|
||||
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
|
||||
EVENT_CONSTRAINT_END
|
||||
|
@ -701,7 +701,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
|
|||
|
||||
struct event_constraint intel_slm_pebs_event_constraints[] = {
|
||||
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
|
||||
/* Allow all events as PEBS with no flags */
|
||||
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
|
||||
EVENT_CONSTRAINT_END
|
||||
|
@ -726,7 +726,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
|
|||
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
|
||||
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
|
@ -743,7 +743,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
|
|||
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
|
||||
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
|
@ -752,7 +752,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
|
|||
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
|
||||
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
|
||||
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
|
||||
|
@ -767,9 +767,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
|
|||
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
|
||||
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
|
||||
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
|
||||
|
@ -783,9 +783,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
|
|||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
|
||||
INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
|
||||
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
|
||||
|
@ -806,9 +806,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
|
|||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
|
||||
INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
|
||||
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
|
||||
|
@ -829,9 +829,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
|
|||
struct event_constraint intel_skl_pebs_event_constraints[] = {
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
||||
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
/* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
|
||||
INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
|
||||
|
|
|
@ -819,8 +819,11 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
|
|||
{
|
||||
set_cpu_cap(c, X86_FEATURE_ZEN);
|
||||
|
||||
/* Fix erratum 1076: CPB feature bit not being set in CPUID. */
|
||||
if (!cpu_has(c, X86_FEATURE_CPB))
|
||||
/*
|
||||
* Fix erratum 1076: CPB feature bit not being set in CPUID.
|
||||
* Always set it, except when running under a hypervisor.
|
||||
*/
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
|
||||
set_cpu_cap(c, X86_FEATURE_CPB);
|
||||
}
|
||||
|
||||
|
|
|
@ -948,8 +948,8 @@ static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
|
|||
u32 sys_target = acpi_target_system_state();
|
||||
int ret, state;
|
||||
|
||||
if (!pm_runtime_suspended(dev) || !adev ||
|
||||
device_may_wakeup(dev) != !!adev->wakeup.prepare_count)
|
||||
if (!pm_runtime_suspended(dev) || !adev || (adev->wakeup.flags.valid &&
|
||||
device_may_wakeup(dev) != !!adev->wakeup.prepare_count))
|
||||
return true;
|
||||
|
||||
if (sys_target == ACPI_STATE_S0)
|
||||
|
|
|
@ -137,9 +137,6 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
|
|||
int ret;
|
||||
union omap4_timeout timeout = { 0 };
|
||||
|
||||
if (!clk->enable_bit)
|
||||
return 0;
|
||||
|
||||
if (clk->clkdm) {
|
||||
ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
|
||||
if (ret) {
|
||||
|
@ -151,6 +148,9 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
|
|||
}
|
||||
}
|
||||
|
||||
if (!clk->enable_bit)
|
||||
return 0;
|
||||
|
||||
val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
|
||||
|
||||
val &= ~OMAP4_MODULEMODE_MASK;
|
||||
|
@ -179,7 +179,7 @@ static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
|
|||
union omap4_timeout timeout = { 0 };
|
||||
|
||||
if (!clk->enable_bit)
|
||||
return;
|
||||
goto exit;
|
||||
|
||||
val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
|
||||
|
||||
|
|
|
@ -784,6 +784,7 @@ config GPIO_ADP5588
|
|||
config GPIO_ADP5588_IRQ
|
||||
bool "Interrupt controller support for ADP5588"
|
||||
depends on GPIO_ADP5588=y
|
||||
select GPIOLIB_IRQCHIP
|
||||
help
|
||||
Say yes here to enable the adp5588 to be used as an interrupt
|
||||
controller. It requires the driver to be built in the kernel.
|
||||
|
|
|
@ -124,6 +124,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
|
|||
return;
|
||||
etnaviv_dump_core = false;
|
||||
|
||||
mutex_lock(&gpu->mmu->lock);
|
||||
|
||||
mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
|
||||
|
||||
/* We always dump registers, mmu, ring and end marker */
|
||||
|
@ -166,6 +168,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
|
|||
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
|
||||
PAGE_KERNEL);
|
||||
if (!iter.start) {
|
||||
mutex_unlock(&gpu->mmu->lock);
|
||||
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
|
||||
return;
|
||||
}
|
||||
|
@ -233,6 +236,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
|
|||
obj->base.size);
|
||||
}
|
||||
|
||||
mutex_unlock(&gpu->mmu->lock);
|
||||
|
||||
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
|
||||
|
||||
dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
|
||||
|
|
|
@ -283,6 +283,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
|
|||
msgs[i].len < 1 || msgs[i].buf[0] < 1 ||
|
||||
msgs[i].len < msgs[i].buf[0] +
|
||||
I2C_SMBUS_BLOCK_MAX) {
|
||||
i++;
|
||||
res = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -394,7 +394,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
|
|||
memcpy(di.channelmap, dev->channelmap,
|
||||
sizeof(di.channelmap));
|
||||
di.nrbchan = dev->nrbchan;
|
||||
strcpy(di.name, dev_name(&dev->dev));
|
||||
strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
|
||||
if (copy_to_user((void __user *)arg, &di, sizeof(di)))
|
||||
err = -EFAULT;
|
||||
} else
|
||||
|
@ -677,7 +677,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
|
|||
memcpy(di.channelmap, dev->channelmap,
|
||||
sizeof(di.channelmap));
|
||||
di.nrbchan = dev->nrbchan;
|
||||
strcpy(di.name, dev_name(&dev->dev));
|
||||
strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
|
||||
if (copy_to_user((void __user *)arg, &di, sizeof(di)))
|
||||
err = -EFAULT;
|
||||
} else
|
||||
|
@ -691,6 +691,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
|
|||
err = -EFAULT;
|
||||
break;
|
||||
}
|
||||
dn.name[sizeof(dn.name) - 1] = '\0';
|
||||
dev = get_mdevice(dn.id);
|
||||
if (dev)
|
||||
err = device_rename(&dev->dev, dn.name);
|
||||
|
|
|
@ -307,7 +307,8 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
|
|||
struct rtl8366_vlan_4k vlan4k;
|
||||
int ret;
|
||||
|
||||
if (!smi->ops->is_vlan_valid(smi, port))
|
||||
/* Use VLAN nr port + 1 since VLAN0 is not valid */
|
||||
if (!smi->ops->is_vlan_valid(smi, port + 1))
|
||||
return -EINVAL;
|
||||
|
||||
dev_info(smi->dev, "%s filtering on port %d\n",
|
||||
|
@ -318,12 +319,12 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
|
|||
* The hardware support filter ID (FID) 0..7, I have no clue how to
|
||||
* support this in the driver when the callback only says on/off.
|
||||
*/
|
||||
ret = smi->ops->get_vlan_4k(smi, port, &vlan4k);
|
||||
ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Just set the filter to FID 1 for now then */
|
||||
ret = rtl8366_set_vlan(smi, port,
|
||||
ret = rtl8366_set_vlan(smi, port + 1,
|
||||
vlan4k.member,
|
||||
vlan4k.untag,
|
||||
1);
|
||||
|
|
|
@ -139,10 +139,10 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)
|
|||
bool aq_ring_tx_clean(struct aq_ring_s *self)
|
||||
{
|
||||
struct device *dev = aq_nic_get_dev(self->aq_nic);
|
||||
unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET;
|
||||
unsigned int budget;
|
||||
|
||||
for (; self->sw_head != self->hw_head && budget--;
|
||||
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
|
||||
for (budget = AQ_CFG_TX_CLEAN_BUDGET;
|
||||
budget && self->sw_head != self->hw_head; budget--) {
|
||||
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
|
||||
|
||||
if (likely(buff->is_mapped)) {
|
||||
|
@ -167,6 +167,7 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
|
|||
|
||||
buff->pa = 0U;
|
||||
buff->eop_index = 0xffffU;
|
||||
self->sw_head = aq_ring_next_dx(self, self->sw_head);
|
||||
}
|
||||
|
||||
return !!budget;
|
||||
|
|
|
@ -695,38 +695,41 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
|
|||
if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
|
||||
/* MAC error or DMA error */
|
||||
buff->is_error = 1U;
|
||||
} else {
|
||||
if (self->aq_nic_cfg->is_rss) {
|
||||
/* last 4 byte */
|
||||
u16 rss_type = rxd_wb->type & 0xFU;
|
||||
}
|
||||
if (self->aq_nic_cfg->is_rss) {
|
||||
/* last 4 byte */
|
||||
u16 rss_type = rxd_wb->type & 0xFU;
|
||||
|
||||
if (rss_type && rss_type < 0x8U) {
|
||||
buff->is_hash_l4 = (rss_type == 0x4 ||
|
||||
rss_type == 0x5);
|
||||
buff->rss_hash = rxd_wb->rss_hash;
|
||||
}
|
||||
if (rss_type && rss_type < 0x8U) {
|
||||
buff->is_hash_l4 = (rss_type == 0x4 ||
|
||||
rss_type == 0x5);
|
||||
buff->rss_hash = rxd_wb->rss_hash;
|
||||
}
|
||||
}
|
||||
|
||||
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
|
||||
buff->len = rxd_wb->pkt_len %
|
||||
AQ_CFG_RX_FRAME_MAX;
|
||||
buff->len = buff->len ?
|
||||
buff->len : AQ_CFG_RX_FRAME_MAX;
|
||||
buff->next = 0U;
|
||||
buff->is_eop = 1U;
|
||||
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
|
||||
buff->len = rxd_wb->pkt_len %
|
||||
AQ_CFG_RX_FRAME_MAX;
|
||||
buff->len = buff->len ?
|
||||
buff->len : AQ_CFG_RX_FRAME_MAX;
|
||||
buff->next = 0U;
|
||||
buff->is_eop = 1U;
|
||||
} else {
|
||||
buff->len =
|
||||
rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
|
||||
AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
|
||||
|
||||
if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
|
||||
rxd_wb->status) {
|
||||
/* LRO */
|
||||
buff->next = rxd_wb->next_desc_ptr;
|
||||
++ring->stats.rx.lro_packets;
|
||||
} else {
|
||||
if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
|
||||
rxd_wb->status) {
|
||||
/* LRO */
|
||||
buff->next = rxd_wb->next_desc_ptr;
|
||||
++ring->stats.rx.lro_packets;
|
||||
} else {
|
||||
/* jumbo */
|
||||
buff->next =
|
||||
aq_ring_next_dx(ring,
|
||||
ring->hw_head);
|
||||
++ring->stats.rx.jumbo_packets;
|
||||
}
|
||||
/* jumbo */
|
||||
buff->next =
|
||||
aq_ring_next_dx(ring,
|
||||
ring->hw_head);
|
||||
++ring->stats.rx.jumbo_packets;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2107,7 +2107,6 @@ static struct eisa_driver de4x5_eisa_driver = {
|
|||
.remove = de4x5_eisa_remove,
|
||||
}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
|
|
|
@ -1105,7 +1105,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
|
|||
cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
|
||||
break;
|
||||
case ETHTOOL_GRXRINGS:
|
||||
cmd->data = adapter->num_rx_qs - 1;
|
||||
cmd->data = adapter->num_rx_qs;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1905,8 +1905,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
|
|||
}
|
||||
|
||||
/* Find tcam entry with matched pair <vid,port> */
|
||||
static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
|
||||
u16 mask)
|
||||
static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
|
||||
{
|
||||
unsigned char byte[2], enable[2];
|
||||
struct mvpp2_prs_entry pe;
|
||||
|
@ -1914,13 +1913,13 @@ static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
|
|||
int tid;
|
||||
|
||||
/* Go through the all entries with MVPP2_PRS_LU_VID */
|
||||
for (tid = MVPP2_PE_VID_FILT_RANGE_START;
|
||||
tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
|
||||
if (!priv->prs_shadow[tid].valid ||
|
||||
priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
|
||||
for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
|
||||
tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
|
||||
if (!port->priv->prs_shadow[tid].valid ||
|
||||
port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
|
||||
continue;
|
||||
|
||||
mvpp2_prs_init_from_hw(priv, &pe, tid);
|
||||
mvpp2_prs_init_from_hw(port->priv, &pe, tid);
|
||||
|
||||
mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
|
||||
mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
|
||||
|
@ -1950,7 +1949,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
|
|||
memset(&pe, 0, sizeof(pe));
|
||||
|
||||
/* Scan TCAM and see if entry with this <vid,port> already exist */
|
||||
tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
|
||||
tid = mvpp2_prs_vid_range_find(port, vid, mask);
|
||||
|
||||
reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
|
||||
if (reg_val & MVPP2_DSA_EXTENDED)
|
||||
|
@ -2008,7 +2007,7 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
|
|||
int tid;
|
||||
|
||||
/* Scan TCAM and see if entry with this <vid,port> already exist */
|
||||
tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
|
||||
tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
|
||||
|
||||
/* No such entry */
|
||||
if (tid < 0)
|
||||
|
@ -2026,8 +2025,10 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
|
|||
|
||||
for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
|
||||
tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
|
||||
if (priv->prs_shadow[tid].valid)
|
||||
mvpp2_prs_vid_entry_remove(port, tid);
|
||||
if (priv->prs_shadow[tid].valid) {
|
||||
mvpp2_prs_hw_inv(priv, tid);
|
||||
priv->prs_shadow[tid].valid = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -342,11 +342,32 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
|
|||
}
|
||||
EXPORT_SYMBOL(mlx5_unregister_interface);
|
||||
|
||||
/* Must be called with intf_mutex held */
|
||||
static bool mlx5_has_added_dev_by_protocol(struct mlx5_core_dev *mdev, int protocol)
|
||||
{
|
||||
struct mlx5_device_context *dev_ctx;
|
||||
struct mlx5_interface *intf;
|
||||
bool found = false;
|
||||
|
||||
list_for_each_entry(intf, &intf_list, list) {
|
||||
if (intf->protocol == protocol) {
|
||||
dev_ctx = mlx5_get_device(intf, &mdev->priv);
|
||||
if (dev_ctx && test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
|
||||
{
|
||||
mutex_lock(&mlx5_intf_mutex);
|
||||
mlx5_remove_dev_by_protocol(mdev, protocol);
|
||||
mlx5_add_dev_by_protocol(mdev, protocol);
|
||||
if (mlx5_has_added_dev_by_protocol(mdev, protocol)) {
|
||||
mlx5_remove_dev_by_protocol(mdev, protocol);
|
||||
mlx5_add_dev_by_protocol(mdev, protocol);
|
||||
}
|
||||
mutex_unlock(&mlx5_intf_mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -2488,6 +2488,10 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
|
|||
mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL);
|
||||
|
||||
autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
|
||||
if (!autoneg && cmd->base.speed == SPEED_56000) {
|
||||
netdev_err(dev, "56G not supported with autoneg off\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
eth_proto_new = autoneg ?
|
||||
mlxsw_sp_to_ptys_advert_link(cmd) :
|
||||
mlxsw_sp_to_ptys_speed(cmd->base.speed);
|
||||
|
|
|
@ -1588,6 +1588,10 @@ static void sh_eth_dev_exit(struct net_device *ndev)
|
|||
sh_eth_get_stats(ndev);
|
||||
mdp->cd->soft_reset(ndev);
|
||||
|
||||
/* Set the RMII mode again if required */
|
||||
if (mdp->cd->rmiimode)
|
||||
sh_eth_write(ndev, 0x1, RMIIMODE);
|
||||
|
||||
/* Set MAC address again */
|
||||
update_mac_address(ndev);
|
||||
}
|
||||
|
|
|
@ -3319,6 +3319,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
|
|||
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
|
||||
}
|
||||
rx_q->dirty_rx = entry;
|
||||
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -2405,7 +2405,7 @@ static struct hv_driver netvsc_drv = {
|
|||
.probe = netvsc_probe,
|
||||
.remove = netvsc_remove,
|
||||
.driver = {
|
||||
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
||||
.probe_type = PROBE_FORCE_SYNCHRONOUS,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -260,10 +260,8 @@ static int dp83867_config_init(struct phy_device *phydev)
|
|||
ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
|
||||
(phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
|
||||
/* Set up RGMII delays */
|
||||
val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
|
||||
|
||||
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
|
||||
|
|
|
@ -54,6 +54,10 @@ struct phylink {
|
|||
|
||||
/* The link configuration settings */
|
||||
struct phylink_link_state link_config;
|
||||
|
||||
/* The current settings */
|
||||
phy_interface_t cur_interface;
|
||||
|
||||
struct gpio_desc *link_gpio;
|
||||
struct timer_list link_poll;
|
||||
void (*get_fixed_state)(struct net_device *dev,
|
||||
|
@ -477,12 +481,12 @@ static void phylink_resolve(struct work_struct *w)
|
|||
if (!link_state.link) {
|
||||
netif_carrier_off(ndev);
|
||||
pl->ops->mac_link_down(ndev, pl->link_an_mode,
|
||||
pl->phy_state.interface);
|
||||
pl->cur_interface);
|
||||
netdev_info(ndev, "Link is Down\n");
|
||||
} else {
|
||||
pl->cur_interface = link_state.interface;
|
||||
pl->ops->mac_link_up(ndev, pl->link_an_mode,
|
||||
pl->phy_state.interface,
|
||||
pl->phydev);
|
||||
pl->cur_interface, pl->phydev);
|
||||
|
||||
netif_carrier_on(ndev);
|
||||
|
||||
|
|
|
@ -625,7 +625,8 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
|
|||
if (!adev || !acpi_device_power_manageable(adev))
|
||||
return false;
|
||||
|
||||
if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
|
||||
if (adev->wakeup.flags.valid &&
|
||||
device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
|
||||
return true;
|
||||
|
||||
if (acpi_target_system_state() == ACPI_STATE_S0)
|
||||
|
|
|
@ -641,6 +641,10 @@ cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
|
|||
|
||||
if (ndev->flags & IFF_LOOPBACK) {
|
||||
ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
|
||||
if (!ndev) {
|
||||
err = -ENETUNREACH;
|
||||
goto rel_neigh;
|
||||
}
|
||||
mtu = ndev->mtu;
|
||||
pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
|
||||
n->dev->name, ndev->name, mtu);
|
||||
|
|
|
@ -1173,10 +1173,8 @@ static int __init alua_init(void)
|
|||
int r;
|
||||
|
||||
kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
|
||||
if (!kaluad_wq) {
|
||||
/* Temporary failure, bypass */
|
||||
return SCSI_DH_DEV_TEMP_BUSY;
|
||||
}
|
||||
if (!kaluad_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
r = scsi_register_device_handler(&alua_dh);
|
||||
if (r != 0) {
|
||||
|
|
|
@ -989,6 +989,8 @@ static struct domain_device *sas_ex_discover_expander(
|
|||
list_del(&child->dev_list_node);
|
||||
spin_unlock_irq(&parent->port->dev_list_lock);
|
||||
sas_put_device(child);
|
||||
sas_port_delete(phy->port);
|
||||
phy->port = NULL;
|
||||
return NULL;
|
||||
}
|
||||
list_add_tail(&child->siblings, &parent->ex_dev.children);
|
||||
|
|
|
@ -6378,7 +6378,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
|
|||
else
|
||||
mask = DMA_BIT_MASK(32);
|
||||
|
||||
rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
|
||||
rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
|
||||
if (rc) {
|
||||
dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
|
||||
goto disable_device;
|
||||
|
|
|
@ -576,7 +576,7 @@ static int ctrl_set_image_effect(struct bm2835_mmal_dev *dev,
|
|||
dev->colourfx.enable ? "true" : "false",
|
||||
dev->colourfx.u, dev->colourfx.v,
|
||||
ret, (ret == 0 ? 0 : -EINVAL));
|
||||
return (ret == 0 ? 0 : EINVAL);
|
||||
return (ret == 0 ? 0 : -EINVAL);
|
||||
}
|
||||
|
||||
static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
|
||||
|
@ -600,7 +600,7 @@ static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
|
|||
"%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n",
|
||||
__func__, mmal_ctrl, ctrl->id, ctrl->val, ret,
|
||||
(ret == 0 ? 0 : -EINVAL));
|
||||
return (ret == 0 ? 0 : EINVAL);
|
||||
return (ret == 0 ? 0 : -EINVAL);
|
||||
}
|
||||
|
||||
static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
|
||||
|
|
|
@ -397,7 +397,7 @@ static const struct uart_ops sunhv_pops = {
|
|||
static struct uart_driver sunhv_reg = {
|
||||
.owner = THIS_MODULE,
|
||||
.driver_name = "sunhv",
|
||||
.dev_name = "ttyS",
|
||||
.dev_name = "ttyHV",
|
||||
.major = TTY_MAJOR,
|
||||
};
|
||||
|
||||
|
|
|
@ -440,6 +440,9 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
|
|||
struct xhci_ep_priv *epriv;
|
||||
struct xhci_slot_priv *spriv = dev->debugfs_private;
|
||||
|
||||
if (!spriv)
|
||||
return;
|
||||
|
||||
if (spriv->eps[ep_index])
|
||||
return;
|
||||
|
||||
|
|
|
@ -538,7 +538,6 @@ static int __write_ring(struct pvcalls_data_intf *intf,
|
|||
int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
size_t len)
|
||||
{
|
||||
struct pvcalls_bedata *bedata;
|
||||
struct sock_mapping *map;
|
||||
int sent, tot_sent = 0;
|
||||
int count = 0, flags;
|
||||
|
@ -550,7 +549,6 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
map = pvcalls_enter_sock(sock);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
|
||||
|
||||
mutex_lock(&map->active.out_mutex);
|
||||
if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
|
||||
|
@ -633,7 +631,6 @@ static int __read_ring(struct pvcalls_data_intf *intf,
|
|||
int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||
int flags)
|
||||
{
|
||||
struct pvcalls_bedata *bedata;
|
||||
int ret;
|
||||
struct sock_mapping *map;
|
||||
|
||||
|
@ -643,7 +640,6 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
|||
map = pvcalls_enter_sock(sock);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
|
||||
|
||||
mutex_lock(&map->active.in_mutex);
|
||||
if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
|
||||
|
|
|
@ -83,6 +83,7 @@ struct xb_req_data {
|
|||
int num_vecs;
|
||||
int err;
|
||||
enum xb_req_state state;
|
||||
bool user_req;
|
||||
void (*cb)(struct xb_req_data *);
|
||||
void *par;
|
||||
};
|
||||
|
@ -133,4 +134,6 @@ void xenbus_ring_ops_init(void);
|
|||
int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par);
|
||||
void xenbus_dev_queue_reply(struct xb_req_data *req);
|
||||
|
||||
extern unsigned int xb_dev_generation_id;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -62,6 +62,8 @@
|
|||
|
||||
#include "xenbus.h"
|
||||
|
||||
unsigned int xb_dev_generation_id;
|
||||
|
||||
/*
|
||||
* An element of a list of outstanding transactions, for which we're
|
||||
* still waiting a reply.
|
||||
|
@ -69,6 +71,7 @@
|
|||
struct xenbus_transaction_holder {
|
||||
struct list_head list;
|
||||
struct xenbus_transaction handle;
|
||||
unsigned int generation_id;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -441,6 +444,7 @@ static int xenbus_write_transaction(unsigned msg_type,
|
|||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
trans->generation_id = xb_dev_generation_id;
|
||||
list_add(&trans->list, &u->transactions);
|
||||
} else if (msg->hdr.tx_id != 0 &&
|
||||
!xenbus_get_transaction(u, msg->hdr.tx_id))
|
||||
|
@ -449,6 +453,20 @@ static int xenbus_write_transaction(unsigned msg_type,
|
|||
!(msg->hdr.len == 2 &&
|
||||
(!strcmp(msg->body, "T") || !strcmp(msg->body, "F"))))
|
||||
return xenbus_command_reply(u, XS_ERROR, "EINVAL");
|
||||
else if (msg_type == XS_TRANSACTION_END) {
|
||||
trans = xenbus_get_transaction(u, msg->hdr.tx_id);
|
||||
if (trans && trans->generation_id != xb_dev_generation_id) {
|
||||
list_del(&trans->list);
|
||||
kfree(trans);
|
||||
if (!strcmp(msg->body, "T"))
|
||||
return xenbus_command_reply(u, XS_ERROR,
|
||||
"EAGAIN");
|
||||
else
|
||||
return xenbus_command_reply(u,
|
||||
XS_TRANSACTION_END,
|
||||
"OK");
|
||||
}
|
||||
}
|
||||
|
||||
rc = xenbus_dev_request_and_reply(&msg->hdr, u);
|
||||
if (rc && trans) {
|
||||
|
|
|
@ -105,6 +105,7 @@ static void xs_suspend_enter(void)
|
|||
|
||||
static void xs_suspend_exit(void)
|
||||
{
|
||||
xb_dev_generation_id++;
|
||||
spin_lock(&xs_state_lock);
|
||||
xs_suspend_active--;
|
||||
spin_unlock(&xs_state_lock);
|
||||
|
@ -125,7 +126,7 @@ static uint32_t xs_request_enter(struct xb_req_data *req)
|
|||
spin_lock(&xs_state_lock);
|
||||
}
|
||||
|
||||
if (req->type == XS_TRANSACTION_START)
|
||||
if (req->type == XS_TRANSACTION_START && !req->user_req)
|
||||
xs_state_users++;
|
||||
xs_state_users++;
|
||||
rq_id = xs_request_id++;
|
||||
|
@ -140,7 +141,7 @@ void xs_request_exit(struct xb_req_data *req)
|
|||
spin_lock(&xs_state_lock);
|
||||
xs_state_users--;
|
||||
if ((req->type == XS_TRANSACTION_START && req->msg.type == XS_ERROR) ||
|
||||
(req->type == XS_TRANSACTION_END &&
|
||||
(req->type == XS_TRANSACTION_END && !req->user_req &&
|
||||
!WARN_ON_ONCE(req->msg.type == XS_ERROR &&
|
||||
!strcmp(req->body, "ENOENT"))))
|
||||
xs_state_users--;
|
||||
|
@ -286,6 +287,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par)
|
|||
req->num_vecs = 1;
|
||||
req->cb = xenbus_dev_queue_reply;
|
||||
req->par = par;
|
||||
req->user_req = true;
|
||||
|
||||
xs_send(req, msg);
|
||||
|
||||
|
@ -313,6 +315,7 @@ static void *xs_talkv(struct xenbus_transaction t,
|
|||
req->vec = iovec;
|
||||
req->num_vecs = num_vecs;
|
||||
req->cb = xs_wake_up;
|
||||
req->user_req = false;
|
||||
|
||||
msg.req_id = 0;
|
||||
msg.tx_id = t.id;
|
||||
|
|
|
@ -59,11 +59,10 @@ static void configfs_d_iput(struct dentry * dentry,
|
|||
/* Coordinate with configfs_readdir */
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
/*
|
||||
* Set sd->s_dentry to null only when this dentry is the
|
||||
* one that is going to be killed.
|
||||
* If not do so, configfs_d_iput may run just after
|
||||
* configfs_attach_attr and set sd->s_dentry to null
|
||||
* even it's still in use.
|
||||
* Set sd->s_dentry to null only when this dentry is the one
|
||||
* that is going to be killed. Otherwise configfs_d_iput may
|
||||
* run just after configfs_attach_attr and set sd->s_dentry to
|
||||
* NULL even it's still in use.
|
||||
*/
|
||||
if (sd->s_dentry == dentry)
|
||||
sd->s_dentry = NULL;
|
||||
|
|
|
@ -1817,8 +1817,13 @@ int file_remove_privs(struct file *file)
|
|||
int kill;
|
||||
int error = 0;
|
||||
|
||||
/* Fast path for nothing security related */
|
||||
if (IS_NOSEC(inode))
|
||||
/*
|
||||
* Fast path for nothing security related.
|
||||
* As well for non-regular files, e.g. blkdev inodes.
|
||||
* For example, blkdev_write_iter() might get here
|
||||
* trying to remove privs which it is not allowed to.
|
||||
*/
|
||||
if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
|
||||
return 0;
|
||||
|
||||
kill = dentry_needs_remove_privs(dentry);
|
||||
|
|
|
@ -193,6 +193,7 @@ int ocfs2_filecheck_create_sysfs(struct ocfs2_super *osb)
|
|||
ret = kobject_init_and_add(&entry->fs_kobj, &ocfs2_ktype_filecheck,
|
||||
NULL, "filecheck");
|
||||
if (ret) {
|
||||
kobject_put(&entry->fs_kobj);
|
||||
kfree(fcheck);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -54,6 +54,10 @@ static inline void mmdrop(struct mm_struct *mm)
|
|||
* followed by taking the mmap_sem for writing before modifying the
|
||||
* vmas or anything the coredump pretends not to change from under it.
|
||||
*
|
||||
* It also has to be called when mmgrab() is used in the context of
|
||||
* the process, but then the mm_count refcount is transferred outside
|
||||
* the context of the process to run down_write() on that pinned mm.
|
||||
*
|
||||
* NOTE: find_extend_vma() called from GUP context is the only place
|
||||
* that can modify the "mm" (notably the vm_start/end) under mmap_sem
|
||||
* for reading and outside the context of the process, so it is also
|
||||
|
|
|
@ -433,14 +433,19 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
|
|||
|
||||
if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL))
|
||||
goto free_cs;
|
||||
if (!alloc_cpumask_var(&trial->cpus_requested, GFP_KERNEL))
|
||||
goto free_allowed;
|
||||
if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL))
|
||||
goto free_cpus;
|
||||
|
||||
cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
|
||||
cpumask_copy(trial->cpus_requested, cs->cpus_requested);
|
||||
cpumask_copy(trial->effective_cpus, cs->effective_cpus);
|
||||
return trial;
|
||||
|
||||
free_cpus:
|
||||
free_cpumask_var(trial->cpus_requested);
|
||||
free_allowed:
|
||||
free_cpumask_var(trial->cpus_allowed);
|
||||
free_cs:
|
||||
kfree(trial);
|
||||
|
@ -454,6 +459,7 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
|
|||
static void free_trial_cpuset(struct cpuset *trial)
|
||||
{
|
||||
free_cpumask_var(trial->effective_cpus);
|
||||
free_cpumask_var(trial->cpus_requested);
|
||||
free_cpumask_var(trial->cpus_allowed);
|
||||
kfree(trial);
|
||||
}
|
||||
|
@ -979,24 +985,24 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
|
|||
return -EACCES;
|
||||
|
||||
/*
|
||||
* An empty cpus_allowed is ok only if the cpuset has no tasks.
|
||||
* An empty cpus_requested is ok only if the cpuset has no tasks.
|
||||
* Since cpulist_parse() fails on an empty mask, we special case
|
||||
* that parsing. The validate_change() call ensures that cpusets
|
||||
* with tasks have cpus.
|
||||
*/
|
||||
if (!*buf) {
|
||||
cpumask_clear(trialcs->cpus_allowed);
|
||||
cpumask_clear(trialcs->cpus_requested);
|
||||
} else {
|
||||
retval = cpulist_parse(buf, trialcs->cpus_requested);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
|
||||
return -EINVAL;
|
||||
|
||||
cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
|
||||
}
|
||||
|
||||
if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
|
||||
return -EINVAL;
|
||||
|
||||
cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
|
||||
|
||||
/* Nothing to do if the cpus didn't change */
|
||||
if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
|
||||
return 0;
|
||||
|
|
|
@ -49,14 +49,30 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
|
|||
unsigned long head;
|
||||
|
||||
again:
|
||||
/*
|
||||
* In order to avoid publishing a head value that goes backwards,
|
||||
* we must ensure the load of @rb->head happens after we've
|
||||
* incremented @rb->nest.
|
||||
*
|
||||
* Otherwise we can observe a @rb->head value before one published
|
||||
* by an IRQ/NMI happening between the load and the increment.
|
||||
*/
|
||||
barrier();
|
||||
head = local_read(&rb->head);
|
||||
|
||||
/*
|
||||
* IRQ/NMI can happen here, which means we can miss a head update.
|
||||
* IRQ/NMI can happen here and advance @rb->head, causing our
|
||||
* load above to be stale.
|
||||
*/
|
||||
|
||||
if (!local_dec_and_test(&rb->nest))
|
||||
/*
|
||||
* If this isn't the outermost nesting, we don't have to update
|
||||
* @rb->user_page->data_head.
|
||||
*/
|
||||
if (local_read(&rb->nest) > 1) {
|
||||
local_dec(&rb->nest);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since the mmap() consumer (userspace) can run on a different CPU:
|
||||
|
@ -85,12 +101,21 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
|
|||
* See perf_output_begin().
|
||||
*/
|
||||
smp_wmb(); /* B, matches C */
|
||||
rb->user_page->data_head = head;
|
||||
WRITE_ONCE(rb->user_page->data_head, head);
|
||||
|
||||
/*
|
||||
* Now check if we missed an update -- rely on previous implied
|
||||
* compiler barriers to force a re-read.
|
||||
* We must publish the head before decrementing the nest count,
|
||||
* otherwise an IRQ/NMI can publish a more recent head value and our
|
||||
* write will (temporarily) publish a stale value.
|
||||
*/
|
||||
barrier();
|
||||
local_set(&rb->nest, 0);
|
||||
|
||||
/*
|
||||
* Ensure we decrement @rb->nest before we validate the @rb->head.
|
||||
* Otherwise we cannot be sure we caught the 'last' nested update.
|
||||
*/
|
||||
barrier();
|
||||
if (unlikely(head != local_read(&rb->head))) {
|
||||
local_inc(&rb->nest);
|
||||
goto again;
|
||||
|
@ -465,7 +490,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
|
|||
handle->aux_flags);
|
||||
}
|
||||
|
||||
rb->user_page->aux_head = rb->aux_head;
|
||||
WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
|
||||
if (rb_need_aux_wakeup(rb))
|
||||
wakeup = true;
|
||||
|
||||
|
@ -497,7 +522,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
|
|||
|
||||
rb->aux_head += size;
|
||||
|
||||
rb->user_page->aux_head = rb->aux_head;
|
||||
WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
|
||||
if (rb_need_aux_wakeup(rb)) {
|
||||
perf_output_wakeup(handle);
|
||||
handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
|
||||
|
|
|
@ -1007,6 +1007,9 @@ static void collapse_huge_page(struct mm_struct *mm,
|
|||
* handled by the anon_vma lock + PG_lock.
|
||||
*/
|
||||
down_write(&mm->mmap_sem);
|
||||
result = SCAN_ANY_PROCESS;
|
||||
if (!mmget_still_valid(mm))
|
||||
goto out;
|
||||
result = hugepage_vma_revalidate(mm, address, &vma);
|
||||
if (result)
|
||||
goto out;
|
||||
|
|
|
@ -429,9 +429,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
|
|||
}
|
||||
|
||||
if (ax25->sk != NULL) {
|
||||
local_bh_disable();
|
||||
bh_lock_sock(ax25->sk);
|
||||
sock_reset_flag(ax25->sk, SOCK_ZAPPED);
|
||||
bh_unlock_sock(ax25->sk);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
put:
|
||||
|
|
|
@ -2751,6 +2751,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
|
|||
}
|
||||
|
||||
void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
|
||||
__acquires(tbl->lock)
|
||||
__acquires(rcu_bh)
|
||||
{
|
||||
struct neigh_seq_state *state = seq->private;
|
||||
|
@ -2761,6 +2762,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
|
|||
|
||||
rcu_read_lock_bh();
|
||||
state->nht = rcu_dereference_bh(tbl->nht);
|
||||
read_lock(&tbl->lock);
|
||||
|
||||
return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
|
||||
}
|
||||
|
@ -2794,8 +2796,13 @@ void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|||
EXPORT_SYMBOL(neigh_seq_next);
|
||||
|
||||
void neigh_seq_stop(struct seq_file *seq, void *v)
|
||||
__releases(tbl->lock)
|
||||
__releases(rcu_bh)
|
||||
{
|
||||
struct neigh_seq_state *state = seq->private;
|
||||
struct neigh_table *tbl = state->tbl;
|
||||
|
||||
read_unlock(&tbl->lock);
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
EXPORT_SYMBOL(neigh_seq_stop);
|
||||
|
|
|
@ -254,9 +254,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
|
|||
rcu_read_lock_bh();
|
||||
for_each_sk_fl_rcu(np, sfl) {
|
||||
struct ip6_flowlabel *fl = sfl->fl;
|
||||
if (fl->label == label) {
|
||||
|
||||
if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
|
||||
fl->lastuse = jiffies;
|
||||
atomic_inc(&fl->users);
|
||||
rcu_read_unlock_bh();
|
||||
return fl;
|
||||
}
|
||||
|
@ -622,7 +622,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
|
|||
goto done;
|
||||
}
|
||||
fl1 = sfl->fl;
|
||||
atomic_inc(&fl1->users);
|
||||
if (!atomic_inc_not_zero(&fl1->users))
|
||||
fl1 = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -182,6 +182,7 @@ int lapb_unregister(struct net_device *dev)
|
|||
lapb = __lapb_devtostruct(dev);
|
||||
if (!lapb)
|
||||
goto out;
|
||||
lapb_put(lapb);
|
||||
|
||||
lapb_stop_t1timer(lapb);
|
||||
lapb_stop_t2timer(lapb);
|
||||
|
|
|
@ -2280,7 +2280,6 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
|
|||
{
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
|
||||
nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
|
||||
ip_vs_service_net_cleanup(ipvs); /* ip_vs_flush() with locks */
|
||||
ip_vs_conn_net_cleanup(ipvs);
|
||||
ip_vs_app_net_cleanup(ipvs);
|
||||
|
@ -2295,6 +2294,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
|
|||
{
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
EnterFunction(2);
|
||||
nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
|
||||
ipvs->enable = 0; /* Disable packet reception */
|
||||
smp_wmb();
|
||||
ip_vs_sync_net_cleanup(ipvs);
|
||||
|
|
|
@ -238,6 +238,7 @@ static unsigned int nf_iterate(struct sk_buff *skb,
|
|||
repeat:
|
||||
verdict = nf_hook_entry_hookfn(hook, skb, state);
|
||||
if (verdict != NF_ACCEPT) {
|
||||
*index = i;
|
||||
if (verdict != NF_REPEAT)
|
||||
return verdict;
|
||||
goto repeat;
|
||||
|
|
|
@ -922,7 +922,8 @@ static int nfc_genl_deactivate_target(struct sk_buff *skb,
|
|||
u32 device_idx, target_idx;
|
||||
int rc;
|
||||
|
||||
if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
|
||||
if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
|
||||
!info->attrs[NFC_ATTR_TARGET_INDEX])
|
||||
return -EINVAL;
|
||||
|
||||
device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
|
||||
|
|
|
@ -169,7 +169,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
|
|||
{
|
||||
struct vport *vport;
|
||||
struct internal_dev *internal_dev;
|
||||
struct net_device *dev;
|
||||
int err;
|
||||
bool free_vport = true;
|
||||
|
||||
vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
|
||||
if (IS_ERR(vport)) {
|
||||
|
@ -177,8 +179,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
|
|||
goto error;
|
||||
}
|
||||
|
||||
vport->dev = alloc_netdev(sizeof(struct internal_dev),
|
||||
parms->name, NET_NAME_USER, do_setup);
|
||||
dev = alloc_netdev(sizeof(struct internal_dev),
|
||||
parms->name, NET_NAME_USER, do_setup);
|
||||
vport->dev = dev;
|
||||
if (!vport->dev) {
|
||||
err = -ENOMEM;
|
||||
goto error_free_vport;
|
||||
|
@ -199,8 +202,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
|
|||
|
||||
rtnl_lock();
|
||||
err = register_netdevice(vport->dev);
|
||||
if (err)
|
||||
if (err) {
|
||||
free_vport = false;
|
||||
goto error_unlock;
|
||||
}
|
||||
|
||||
dev_set_promiscuity(vport->dev, 1);
|
||||
rtnl_unlock();
|
||||
|
@ -210,11 +215,12 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
|
|||
|
||||
error_unlock:
|
||||
rtnl_unlock();
|
||||
free_percpu(vport->dev->tstats);
|
||||
free_percpu(dev->tstats);
|
||||
error_free_netdev:
|
||||
free_netdev(vport->dev);
|
||||
free_netdev(dev);
|
||||
error_free_vport:
|
||||
ovs_vport_free(vport);
|
||||
if (free_vport)
|
||||
ovs_vport_free(vport);
|
||||
error:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
|
|
@ -2600,6 +2600,8 @@ static int sctp_process_param(struct sctp_association *asoc,
|
|||
case SCTP_PARAM_STATE_COOKIE:
|
||||
asoc->peer.cookie_len =
|
||||
ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
|
||||
if (asoc->peer.cookie)
|
||||
kfree(asoc->peer.cookie);
|
||||
asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
|
||||
if (!asoc->peer.cookie)
|
||||
retval = 0;
|
||||
|
@ -2664,6 +2666,8 @@ static int sctp_process_param(struct sctp_association *asoc,
|
|||
goto fall_through;
|
||||
|
||||
/* Save peer's random parameter */
|
||||
if (asoc->peer.peer_random)
|
||||
kfree(asoc->peer.peer_random);
|
||||
asoc->peer.peer_random = kmemdup(param.p,
|
||||
ntohs(param.p->length), gfp);
|
||||
if (!asoc->peer.peer_random) {
|
||||
|
@ -2677,6 +2681,8 @@ static int sctp_process_param(struct sctp_association *asoc,
|
|||
goto fall_through;
|
||||
|
||||
/* Save peer's HMAC list */
|
||||
if (asoc->peer.peer_hmacs)
|
||||
kfree(asoc->peer.peer_hmacs);
|
||||
asoc->peer.peer_hmacs = kmemdup(param.p,
|
||||
ntohs(param.p->length), gfp);
|
||||
if (!asoc->peer.peer_hmacs) {
|
||||
|
@ -2692,6 +2698,8 @@ static int sctp_process_param(struct sctp_association *asoc,
|
|||
if (!ep->auth_enable)
|
||||
goto fall_through;
|
||||
|
||||
if (asoc->peer.peer_chunks)
|
||||
kfree(asoc->peer.peer_chunks);
|
||||
asoc->peer.peer_chunks = kmemdup(param.p,
|
||||
ntohs(param.p->length), gfp);
|
||||
if (!asoc->peer.peer_chunks)
|
||||
|
|
|
@ -218,6 +218,7 @@ void tipc_group_delete(struct net *net, struct tipc_group *grp)
|
|||
|
||||
rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
|
||||
tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq);
|
||||
__skb_queue_purge(&m->deferredq);
|
||||
list_del(&m->list);
|
||||
kfree(m);
|
||||
}
|
||||
|
|
|
@ -871,8 +871,10 @@ virtio_transport_recv_connected(struct sock *sk,
|
|||
if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
|
||||
vsk->peer_shutdown |= SEND_SHUTDOWN;
|
||||
if (vsk->peer_shutdown == SHUTDOWN_MASK &&
|
||||
vsock_stream_has_data(vsk) <= 0)
|
||||
vsock_stream_has_data(vsk) <= 0) {
|
||||
sock_set_flag(sk, SOCK_DONE);
|
||||
sk->sk_state = TCP_CLOSING;
|
||||
}
|
||||
if (le32_to_cpu(pkt->hdr.flags))
|
||||
sk->sk_state_change(sk);
|
||||
break;
|
||||
|
|
|
@ -378,6 +378,7 @@ enum {
|
|||
|
||||
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
|
||||
#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
|
||||
#define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
|
||||
|
||||
static char *driver_short_names[] = {
|
||||
[AZX_DRIVER_ICH] = "HDA Intel",
|
||||
|
@ -1795,8 +1796,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
|
|||
else
|
||||
chip->bdl_pos_adj = bdl_pos_adj[dev];
|
||||
|
||||
/* Workaround for a communication error on CFL (bko#199007) */
|
||||
if (IS_CFL(pci))
|
||||
/* Workaround for a communication error on CFL (bko#199007) and CNL */
|
||||
if (IS_CFL(pci) || IS_CNL(pci))
|
||||
chip->polling_mode = 1;
|
||||
|
||||
err = azx_bus_init(chip, model[dev], &pci_hda_io_ops);
|
||||
|
|
|
@ -5,16 +5,19 @@
|
|||
#include "util.h"
|
||||
#include "machine.h"
|
||||
#include "api/fs/fs.h"
|
||||
#include "debug.h"
|
||||
|
||||
int arch__fix_module_text_start(u64 *start, const char *name)
|
||||
{
|
||||
u64 m_start = *start;
|
||||
char path[PATH_MAX];
|
||||
|
||||
snprintf(path, PATH_MAX, "module/%.*s/sections/.text",
|
||||
(int)strlen(name) - 2, name + 1);
|
||||
|
||||
if (sysfs__read_ull(path, (unsigned long long *)start) < 0)
|
||||
return -1;
|
||||
if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
|
||||
pr_debug2("Using module %s start:%#lx\n", path, m_start);
|
||||
*start = m_start;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -271,7 +271,7 @@ static int string_set_value(struct bt_ctf_field *field, const char *string)
|
|||
if (i > 0)
|
||||
strncpy(buffer, string, i);
|
||||
}
|
||||
strncat(buffer + p, numstr, 4);
|
||||
memcpy(buffer + p, numstr, 4);
|
||||
p += 3;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -128,7 +128,7 @@ void thread__put(struct thread *thread)
|
|||
}
|
||||
}
|
||||
|
||||
struct namespaces *thread__namespaces(const struct thread *thread)
|
||||
static struct namespaces *__thread__namespaces(const struct thread *thread)
|
||||
{
|
||||
if (list_empty(&thread->namespaces_list))
|
||||
return NULL;
|
||||
|
@ -136,10 +136,21 @@ struct namespaces *thread__namespaces(const struct thread *thread)
|
|||
return list_first_entry(&thread->namespaces_list, struct namespaces, list);
|
||||
}
|
||||
|
||||
struct namespaces *thread__namespaces(const struct thread *thread)
|
||||
{
|
||||
struct namespaces *ns;
|
||||
|
||||
down_read((struct rw_semaphore *)&thread->namespaces_lock);
|
||||
ns = __thread__namespaces(thread);
|
||||
up_read((struct rw_semaphore *)&thread->namespaces_lock);
|
||||
|
||||
return ns;
|
||||
}
|
||||
|
||||
static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
|
||||
struct namespaces_event *event)
|
||||
{
|
||||
struct namespaces *new, *curr = thread__namespaces(thread);
|
||||
struct namespaces *new, *curr = __thread__namespaces(thread);
|
||||
|
||||
new = namespaces__new(event);
|
||||
if (!new)
|
||||
|
|
|
@ -23,7 +23,11 @@ ip netns add ns0
|
|||
ip netns add ns1
|
||||
ip netns add ns2
|
||||
|
||||
ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
|
||||
ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 > /dev/null 2>&1
|
||||
if [ $? -ne 0 ];then
|
||||
echo "SKIP: No virtual ethernet pair device support in kernel"
|
||||
exit $ksft_skip
|
||||
fi
|
||||
ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
|
||||
|
||||
ip -net ns0 link set lo up
|
||||
|
|
Loading…
Add table
Reference in a new issue