This is the 4.19.38 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlzKo0YACgkQONu9yGCS aT4dbQ//U1bo/8bdBJec+a0aNMy3cxzPF1Ozbrb/vEaHofj1BR87hgo4BODBO7pu 6ppwloPle9VFrsfT1FYOjsicUBhT4NmieHlsC3msAR4xlBEbHEOBTEbUdu3HinGV Jn/uL/NDTrq+wA5rROGOh9sTlQ5w6dqItjHAWvnGkXlerbUJwIgnzbgH5qGBFZhQ 6SbPmqJv5V+C+qYy3yXNs2CnbtS7+cfulLy26MNnkFMEZGbHTWeNbeu9H41AK6T4 xtO8INse28RD6lbAPvW/xb//iAXsOHv+7KF1TgtZq89Z1RmlaqLSdPdgTYvCxm+Y RhWa8KyIdhADJ8z8sRcPviFI5bR65cfCMUAEgBcFNYYByDv36KCBLsXajn4JbBsF OOOtqnGaZyAJBZgMXySfVJIXLAx7cUlt07YD9cIdsOzjl1DCMP76XvypeGXLw5Mk ZBXBJ+By+8jwnE7PAtecij/VH6qCDsfn4HqoRELsRLVahFsnFFid5lutVIjsO21j QHrwi4hChuYGa89MhD48KyC2ZuaQmbs3rm6F3O0iQ0aipknvlsDoB4jYYp9qRI04 0FYMlZLlVyg+sNYOM2XvTtpOBFa1PFwFwscqXoyt0CGtig0D+pD3gDYExRONj6Fp 8h+OUBWbVHWscceMc6G1p/Qu+YcgmQTu8CFAUO8l/X8xq655c1A= =isRm -----END PGP SIGNATURE----- Merge 4.19.38 into android-4.19 Changes in 4.19.38 netfilter: nft_compat: use refcnt_t type for nft_xt reference count netfilter: nft_compat: make lists per netns netfilter: nf_tables: split set destruction in deactivate and destroy phase netfilter: nft_compat: destroy function must not have side effects netfilter: nf_tables: warn when expr implements only one of activate/deactivate netfilter: nf_tables: unbind set in rule from commit path netfilter: nft_compat: don't use refcount_inc on newly allocated entry netfilter: nft_compat: use .release_ops and remove list of extension netfilter: nf_tables: fix set double-free in abort path netfilter: nf_tables: bogus EBUSY when deleting set after flush netfilter: nf_tables: bogus EBUSY in helper removal from transaction net/ibmvnic: Fix RTNL deadlock during device reset net: mvpp2: fix validate for PPv2.1 ext4: fix some error pointer dereferences tipc: handle the err returned from cmd header function loop: do not print warn message if partition scan is successful drm/rockchip: fix for mailbox read validation. vsock/virtio: fix kernel panic from virtio_transport_reset_no_sock ipvs: fix warning on unused variable powerpc/vdso32: fix CLOCK_MONOTONIC on PPC64 ALSA: hda/ca0132 - Fix build error without CONFIG_PCI net: dsa: mv88e6xxx: add call to mv88e6xxx_ports_cmode_init to probe for new DSA framework cifs: fix memory leak in SMB2_read cifs: do not attempt cifs operation on smb2+ rename error tracing: Fix a memory leak by early error exit in trace_pid_write() tracing: Fix buffer_ref pipe ops gpio: eic: sprd: Fix incorrect irq type setting for the sync EIC zram: pass down the bvec we need to read into in the work struct lib/Kconfig.debug: fix build error without CONFIG_BLOCK MIPS: scall64-o32: Fix indirect syscall number load trace: Fix preempt_enable_no_resched() abuse IB/rdmavt: Fix frwr memory registration RDMA/mlx5: Do not allow the user to write to the clock page sched/numa: Fix a possible divide-by-zero ceph: only use d_name directly when parent is locked ceph: ensure d_name stability in ceph_dentry_hash() ceph: fix ci->i_head_snapc leak nfsd: Don't release the callback slot unless it was actually held sunrpc: don't mark uninitialised items as VALID. perf/x86/intel: Update KBL Package C-state events to also include PC8/PC9/PC10 counters Input: synaptics-rmi4 - write config register values to the right offset vfio/type1: Limit DMA mappings per container dmaengine: sh: rcar-dmac: With cyclic DMA residue 0 is valid dmaengine: sh: rcar-dmac: Fix glitch in dmaengine_tx_status ARM: 8857/1: efi: enable CP15 DMB instructions before cleaning the cache powerpc/mm/radix: Make Radix require HUGETLB_PAGE drm/vc4: Fix memory leak during gpu reset. Revert "drm/i915/fbdev: Actually configure untiled displays" drm/vc4: Fix compilation error reported by kbuild test bot USB: Add new USB LPM helpers USB: Consolidate LPM checks to avoid enabling LPM twice slip: make slhc_free() silently accept an error pointer intel_th: gth: Fix an off-by-one in output unassigning fs/proc/proc_sysctl.c: Fix a NULL pointer dereference workqueue: Try to catch flush_work() without INIT_WORK(). binder: fix handling of misaligned binder object sched/deadline: Correctly handle active 0-lag timers NFS: Forbid setting AF_INET6 to "struct sockaddr_in"->sin_family. netfilter: ebtables: CONFIG_COMPAT: drop a bogus WARN_ON fm10k: Fix a potential NULL pointer dereference tipc: check bearer name with right length in tipc_nl_compat_bearer_enable tipc: check link name with right length in tipc_nl_compat_link_set net: netrom: Fix error cleanup path of nr_proto_init net/rds: Check address length before reading address family rxrpc: fix race condition in rxrpc_input_packet() aio: clear IOCB_HIPRI aio: use assigned completion handler aio: separate out ring reservation from req allocation aio: don't zero entire aio_kiocb aio_get_req() aio: use iocb_put() instead of open coding it aio: split out iocb copy from io_submit_one() aio: abstract out io_event filler helper aio: initialize kiocb private in case any filesystems expect it. aio: simplify - and fix - fget/fput for io_submit() pin iocb through aio. aio: fold lookup_kiocb() into its sole caller aio: keep io_event in aio_kiocb aio: store event at final iocb_put() Fix aio_poll() races x86, retpolines: Raise limit for generating indirect calls from switch-case x86/retpolines: Disable switch jump tables when retpolines are enabled mm: Fix warning in insert_pfn() x86/fpu: Don't export __kernel_fpu_{begin,end}() ipv4: add sanity checks in ipv4_link_failure() ipv4: set the tcp_min_rtt_wlen range from 0 to one day mlxsw: spectrum: Fix autoneg status in ethtool net/mlx5e: ethtool, Remove unsupported SFP EEPROM high pages query net: rds: exchange of 8K and 1M pool net/rose: fix unbound loop in rose_loopback_timer() net: stmmac: move stmmac_check_ether_addr() to driver probe net/tls: fix refcount adjustment in fallback stmmac: pci: Adjust IOT2000 matching team: fix possible recursive locking when add slaves net: hns: Fix WARNING when hns modules installed mlxsw: pci: Reincrease PCI reset timeout mlxsw: spectrum: Put MC TCs into DWRR mode net/mlx5e: Fix the max MTU check in case of XDP net/mlx5e: Fix use-after-free after xdp_return_frame net/tls: avoid potential deadlock in tls_set_device_offload_rx() net/tls: don't leak IV and record seq when offload fails powerpc/fsl: Add FSL_PPC_BOOK3E as supported arch for nospectre_v2 boot arg Linux 4.19.38 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
5e7b4fbe36
99 changed files with 971 additions and 595 deletions
Documentation
Makefilearch
arm/boot/compressed
mips/kernel
powerpc
x86
drivers
android
block
dma/sh
gpio
gpu/drm
hwtracing/intel_th
infiniband
input/rmi4
net
dsa/mv88e6xxx
ethernet
hisilicon/hns
ibm
intel/fm10k
marvell/mvpp2
mellanox
stmicro/stmmac
slip
team
usb/core
vfio
fs
include
kernel
lib
mm
net
bridge/netfilter
ipv4
netfilter
netrom
rds
rose
rxrpc
sunrpc
tipc
tls
vmw_vsock
sound/pci/hda
|
@ -2805,7 +2805,7 @@
|
|||
check bypass). With this option data leaks are possible
|
||||
in the system.
|
||||
|
||||
nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
|
||||
nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
|
||||
(indirect branch prediction) vulnerability. System may
|
||||
allow data leaks with this option, which is equivalent
|
||||
to spectre_v2=off.
|
||||
|
|
|
@ -410,6 +410,7 @@ tcp_min_rtt_wlen - INTEGER
|
|||
minimum RTT when it is moved to a longer path (e.g., due to traffic
|
||||
engineering). A longer window makes the filter more resistant to RTT
|
||||
inflations such as transient congestion. The unit is seconds.
|
||||
Possible values: 0 - 86400 (1 day)
|
||||
Default: 300
|
||||
|
||||
tcp_moderate_rcvbuf - BOOLEAN
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 37
|
||||
SUBLEVEL = 38
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
|
@ -1395,7 +1395,21 @@ ENTRY(efi_stub_entry)
|
|||
|
||||
@ Preserve return value of efi_entry() in r4
|
||||
mov r4, r0
|
||||
bl cache_clean_flush
|
||||
|
||||
@ our cache maintenance code relies on CP15 barrier instructions
|
||||
@ but since we arrived here with the MMU and caches configured
|
||||
@ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
|
||||
@ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
|
||||
@ the enable path will be executed on v7+ only.
|
||||
mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
|
||||
tst r1, #(1 << 5) @ CP15BEN bit set?
|
||||
bne 0f
|
||||
orr r1, r1, #(1 << 5) @ CP15 barrier instructions
|
||||
mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
|
||||
ARM( .inst 0xf57ff06f @ v7+ isb )
|
||||
THUMB( isb )
|
||||
|
||||
0: bl cache_clean_flush
|
||||
bl cache_off
|
||||
|
||||
@ Set parameters for booting zImage according to boot protocol
|
||||
|
|
|
@ -125,7 +125,7 @@ trace_a_syscall:
|
|||
subu t1, v0, __NR_O32_Linux
|
||||
move a1, v0
|
||||
bnez t1, 1f /* __NR_syscall at offset 0 */
|
||||
lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
|
||||
ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
|
||||
.set pop
|
||||
|
||||
1: jal syscall_trace_enter
|
||||
|
|
|
@ -195,6 +195,7 @@ CONFIG_UDF_FS=m
|
|||
CONFIG_MSDOS_FS=m
|
||||
CONFIG_VFAT_FS=m
|
||||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_HUGETLBFS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
# CONFIG_MISC_FILESYSTEMS is not set
|
||||
|
|
|
@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
|
|||
* can be used, r7 contains NSEC_PER_SEC.
|
||||
*/
|
||||
|
||||
lwz r5,WTOM_CLOCK_SEC(r9)
|
||||
lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9)
|
||||
lwz r6,WTOM_CLOCK_NSEC(r9)
|
||||
|
||||
/* We now have our offset in r5,r6. We create a fake dependency
|
||||
|
|
|
@ -330,7 +330,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
|
|||
|
||||
config PPC_RADIX_MMU
|
||||
bool "Radix MMU Support"
|
||||
depends on PPC_BOOK3S_64
|
||||
depends on PPC_BOOK3S_64 && HUGETLB_PAGE
|
||||
select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
|
||||
default y
|
||||
help
|
||||
|
|
|
@ -224,6 +224,15 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
|||
# Avoid indirect branches in kernel to deal with Spectre
|
||||
ifdef CONFIG_RETPOLINE
|
||||
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
|
||||
# Additionally, avoid generating expensive indirect jumps which
|
||||
# are subject to retpolines for small number of switch cases.
|
||||
# clang turns off jump table generation by default when under
|
||||
# retpoline builds, however, gcc does not for x86. This has
|
||||
# only been fixed starting from gcc stable version 8.4.0 and
|
||||
# onwards, but not for older ones. See gcc bug #86952.
|
||||
ifndef CONFIG_CC_IS_CLANG
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
|
||||
endif
|
||||
endif
|
||||
|
||||
archscripts: scripts_basic
|
||||
|
|
|
@ -76,15 +76,15 @@
|
|||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
|
||||
* perf code: 0x04
|
||||
* Available model: HSW ULT,CNL
|
||||
* Available model: HSW ULT,KBL,CNL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
|
||||
* perf code: 0x05
|
||||
* Available model: HSW ULT,CNL
|
||||
* Available model: HSW ULT,KBL,CNL
|
||||
* Scope: Package (physical package)
|
||||
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
|
||||
* perf code: 0x06
|
||||
* Available model: HSW ULT,GLM,CNL
|
||||
* Available model: HSW ULT,KBL,GLM,CNL
|
||||
* Scope: Package (physical package)
|
||||
*
|
||||
*/
|
||||
|
@ -572,8 +572,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
|
|||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
|
||||
|
||||
|
|
|
@ -82,8 +82,7 @@ struct efi_scratch {
|
|||
#define arch_efi_call_virt_setup() \
|
||||
({ \
|
||||
efi_sync_low_kernel_mappings(); \
|
||||
preempt_disable(); \
|
||||
__kernel_fpu_begin(); \
|
||||
kernel_fpu_begin(); \
|
||||
firmware_restrict_branch_speculation_start(); \
|
||||
\
|
||||
if (!efi_enabled(EFI_OLD_MEMMAP)) \
|
||||
|
@ -99,8 +98,7 @@ struct efi_scratch {
|
|||
efi_switch_mm(efi_scratch.prev_mm); \
|
||||
\
|
||||
firmware_restrict_branch_speculation_end(); \
|
||||
__kernel_fpu_end(); \
|
||||
preempt_enable(); \
|
||||
kernel_fpu_end(); \
|
||||
})
|
||||
|
||||
extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
|
||||
|
|
|
@ -12,17 +12,12 @@
|
|||
#define _ASM_X86_FPU_API_H
|
||||
|
||||
/*
|
||||
* Careful: __kernel_fpu_begin/end() must be called with preempt disabled
|
||||
* and they don't touch the preempt state on their own.
|
||||
* If you enable preemption after __kernel_fpu_begin(), preempt notifier
|
||||
* should call the __kernel_fpu_end() to prevent the kernel/user FPU
|
||||
* state from getting corrupted. KVM for example uses this model.
|
||||
*
|
||||
* All other cases use kernel_fpu_begin/end() which disable preemption
|
||||
* during kernel FPU usage.
|
||||
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
|
||||
* disables preemption so be careful if you intend to use it for long periods
|
||||
* of time.
|
||||
* If you intend to use the FPU in softirq you need to check first with
|
||||
* irq_fpu_usable() if it is possible.
|
||||
*/
|
||||
extern void __kernel_fpu_begin(void);
|
||||
extern void __kernel_fpu_end(void);
|
||||
extern void kernel_fpu_begin(void);
|
||||
extern void kernel_fpu_end(void);
|
||||
extern bool irq_fpu_usable(void);
|
||||
|
|
|
@ -93,7 +93,7 @@ bool irq_fpu_usable(void)
|
|||
}
|
||||
EXPORT_SYMBOL(irq_fpu_usable);
|
||||
|
||||
void __kernel_fpu_begin(void)
|
||||
static void __kernel_fpu_begin(void)
|
||||
{
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
|
||||
|
@ -111,9 +111,8 @@ void __kernel_fpu_begin(void)
|
|||
__cpu_invalidate_fpregs_state();
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__kernel_fpu_begin);
|
||||
|
||||
void __kernel_fpu_end(void)
|
||||
static void __kernel_fpu_end(void)
|
||||
{
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
|
||||
|
@ -122,7 +121,6 @@ void __kernel_fpu_end(void)
|
|||
|
||||
kernel_fpu_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__kernel_fpu_end);
|
||||
|
||||
void kernel_fpu_begin(void)
|
||||
{
|
||||
|
|
|
@ -926,14 +926,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|||
|
||||
index = page - alloc->pages;
|
||||
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
|
||||
|
||||
mm = alloc->vma_vm_mm;
|
||||
if (!mmget_not_zero(mm))
|
||||
goto err_mmget;
|
||||
if (!down_write_trylock(&mm->mmap_sem))
|
||||
goto err_down_write_mmap_sem_failed;
|
||||
vma = binder_alloc_get_vma(alloc);
|
||||
if (vma) {
|
||||
if (!mmget_not_zero(alloc->vma_vm_mm))
|
||||
goto err_mmget;
|
||||
mm = alloc->vma_vm_mm;
|
||||
if (!down_write_trylock(&mm->mmap_sem))
|
||||
goto err_down_write_mmap_sem_failed;
|
||||
}
|
||||
|
||||
list_lru_isolate(lru, item);
|
||||
spin_unlock(lock);
|
||||
|
@ -944,10 +943,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|||
zap_page_range(vma, page_addr, PAGE_SIZE);
|
||||
|
||||
trace_binder_unmap_user_end(alloc, index);
|
||||
|
||||
up_write(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
}
|
||||
up_write(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
|
||||
trace_binder_unmap_kernel_start(alloc, index);
|
||||
|
||||
|
|
|
@ -1112,8 +1112,9 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
|
|||
err = __blkdev_reread_part(bdev);
|
||||
else
|
||||
err = blkdev_reread_part(bdev);
|
||||
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
|
||||
__func__, lo_number, err);
|
||||
if (err)
|
||||
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
|
||||
__func__, lo_number, err);
|
||||
/* Device is gone, no point in returning error */
|
||||
err = 0;
|
||||
}
|
||||
|
|
|
@ -794,18 +794,18 @@ struct zram_work {
|
|||
struct zram *zram;
|
||||
unsigned long entry;
|
||||
struct bio *bio;
|
||||
struct bio_vec bvec;
|
||||
};
|
||||
|
||||
#if PAGE_SIZE != 4096
|
||||
static void zram_sync_read(struct work_struct *work)
|
||||
{
|
||||
struct bio_vec bvec;
|
||||
struct zram_work *zw = container_of(work, struct zram_work, work);
|
||||
struct zram *zram = zw->zram;
|
||||
unsigned long entry = zw->entry;
|
||||
struct bio *bio = zw->bio;
|
||||
|
||||
read_from_bdev_async(zram, &bvec, entry, bio);
|
||||
read_from_bdev_async(zram, &zw->bvec, entry, bio);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -818,6 +818,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
|
|||
{
|
||||
struct zram_work work;
|
||||
|
||||
work.bvec = *bvec;
|
||||
work.zram = zram;
|
||||
work.entry = entry;
|
||||
work.bio = bio;
|
||||
|
|
|
@ -1281,6 +1281,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
|
|||
enum dma_status status;
|
||||
unsigned int residue = 0;
|
||||
unsigned int dptr = 0;
|
||||
unsigned int chcrb;
|
||||
unsigned int tcrb;
|
||||
unsigned int i;
|
||||
|
||||
if (!desc)
|
||||
return 0;
|
||||
|
@ -1328,6 +1331,24 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to read two registers.
|
||||
* Make sure the control register does not skip to next chunk
|
||||
* while reading the counter.
|
||||
* Trying it 3 times should be enough: Initial read, retry, retry
|
||||
* for the paranoid.
|
||||
*/
|
||||
for (i = 0; i < 3; i++) {
|
||||
chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
|
||||
RCAR_DMACHCRB_DPTR_MASK;
|
||||
tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
|
||||
/* Still the same? */
|
||||
if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
|
||||
RCAR_DMACHCRB_DPTR_MASK))
|
||||
break;
|
||||
}
|
||||
WARN_ONCE(i >= 3, "residue might be not continuous!");
|
||||
|
||||
/*
|
||||
* In descriptor mode the descriptor running pointer is not maintained
|
||||
* by the interrupt handler, find the running descriptor from the
|
||||
|
@ -1335,8 +1356,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
|
|||
* mode just use the running descriptor pointer.
|
||||
*/
|
||||
if (desc->hwdescs.use) {
|
||||
dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
|
||||
RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
|
||||
dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
|
||||
if (dptr == 0)
|
||||
dptr = desc->nchunks;
|
||||
dptr--;
|
||||
|
@ -1354,7 +1374,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
|
|||
}
|
||||
|
||||
/* Add the residue for the current chunk. */
|
||||
residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
|
||||
residue += tcrb << desc->xfer_shift;
|
||||
|
||||
return residue;
|
||||
}
|
||||
|
@ -1367,6 +1387,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
|
|||
enum dma_status status;
|
||||
unsigned long flags;
|
||||
unsigned int residue;
|
||||
bool cyclic;
|
||||
|
||||
status = dma_cookie_status(chan, cookie, txstate);
|
||||
if (status == DMA_COMPLETE || !txstate)
|
||||
|
@ -1374,10 +1395,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
|
|||
|
||||
spin_lock_irqsave(&rchan->lock, flags);
|
||||
residue = rcar_dmac_chan_get_residue(rchan, cookie);
|
||||
cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
|
||||
spin_unlock_irqrestore(&rchan->lock, flags);
|
||||
|
||||
/* if there's no residue, the cookie is complete */
|
||||
if (!residue)
|
||||
if (!residue && !cyclic)
|
||||
return DMA_COMPLETE;
|
||||
|
||||
dma_set_residue(txstate, residue);
|
||||
|
|
|
@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
|
|||
irq_set_handler_locked(data, handle_edge_irq);
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_BOTH:
|
||||
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
|
||||
sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
|
||||
irq_set_handler_locked(data, handle_edge_irq);
|
||||
break;
|
||||
|
|
|
@ -334,8 +334,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
bool *enabled, int width, int height)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
|
||||
unsigned long conn_configured, conn_seq, mask;
|
||||
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
|
||||
unsigned long conn_configured, conn_seq;
|
||||
int i, j;
|
||||
bool *save_enabled;
|
||||
bool fallback = true, ret = true;
|
||||
|
@ -353,9 +353,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
drm_modeset_backoff(&ctx);
|
||||
|
||||
memcpy(save_enabled, enabled, count);
|
||||
conn_seq = GENMASK(count - 1, 0);
|
||||
mask = GENMASK(count - 1, 0);
|
||||
conn_configured = 0;
|
||||
retry:
|
||||
conn_seq = conn_configured;
|
||||
for (i = 0; i < count; i++) {
|
||||
struct drm_fb_helper_connector *fb_conn;
|
||||
struct drm_connector *connector;
|
||||
|
@ -368,8 +369,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
if (conn_configured & BIT(i))
|
||||
continue;
|
||||
|
||||
/* First pass, only consider tiled connectors */
|
||||
if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
|
||||
if (conn_seq == 0 && !connector->has_tile)
|
||||
continue;
|
||||
|
||||
if (connector->status == connector_status_connected)
|
||||
|
@ -473,10 +473,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
|||
conn_configured |= BIT(i);
|
||||
}
|
||||
|
||||
if (conn_configured != conn_seq) { /* repeat until no more are found */
|
||||
conn_seq = conn_configured;
|
||||
if ((conn_configured & mask) != mask && conn_configured != conn_seq)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the BIOS didn't enable everything it could, fall back to have the
|
||||
|
|
|
@ -113,7 +113,7 @@ static int cdp_dp_mailbox_write(struct cdn_dp_device *dp, u8 val)
|
|||
|
||||
static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
|
||||
u8 module_id, u8 opcode,
|
||||
u8 req_size)
|
||||
u16 req_size)
|
||||
{
|
||||
u32 mbox_size, i;
|
||||
u8 header[4];
|
||||
|
|
|
@ -998,7 +998,7 @@ static void
|
|||
vc4_crtc_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
if (crtc->state)
|
||||
__drm_atomic_helper_crtc_destroy_state(crtc->state);
|
||||
vc4_crtc_destroy_state(crtc, crtc->state);
|
||||
|
||||
crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
|
||||
if (crtc->state)
|
||||
|
|
|
@ -616,7 +616,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
|
|||
othdev->output.port = -1;
|
||||
othdev->output.active = false;
|
||||
gth->output[port].output = NULL;
|
||||
for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
|
||||
for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
|
||||
if (gth->master[master] == port)
|
||||
gth->master[master] = -1;
|
||||
spin_unlock(>h->gth_lock);
|
||||
|
|
|
@ -2014,6 +2014,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
|
|||
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
return -EPERM;
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
|
||||
if (!dev->mdev->clock_info_page)
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -2197,6 +2198,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
|
|||
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
return -EPERM;
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
|
||||
/* Don't expose to user-space information it shouldn't have */
|
||||
if (PAGE_SIZE > 4096)
|
||||
|
|
|
@ -611,11 +611,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
|
|||
if (unlikely(mapped_segs == mr->mr.max_segs))
|
||||
return -ENOMEM;
|
||||
|
||||
if (mr->mr.length == 0) {
|
||||
mr->mr.user_base = addr;
|
||||
mr->mr.iova = addr;
|
||||
}
|
||||
|
||||
m = mapped_segs / RVT_SEGSZ;
|
||||
n = mapped_segs % RVT_SEGSZ;
|
||||
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
|
||||
|
@ -633,17 +628,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
|
|||
* @sg_nents: number of entries in sg
|
||||
* @sg_offset: offset in bytes into sg
|
||||
*
|
||||
* Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
|
||||
*
|
||||
* Return: number of sg elements mapped to the memory region
|
||||
*/
|
||||
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
|
||||
int sg_nents, unsigned int *sg_offset)
|
||||
{
|
||||
struct rvt_mr *mr = to_imr(ibmr);
|
||||
int ret;
|
||||
|
||||
mr->mr.length = 0;
|
||||
mr->mr.page_shift = PAGE_SHIFT;
|
||||
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
|
||||
rvt_set_page);
|
||||
ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
|
||||
mr->mr.user_base = ibmr->iova;
|
||||
mr->mr.iova = ibmr->iova;
|
||||
mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
|
||||
mr->mr.length = (size_t)ibmr->length;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -674,6 +676,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
|
|||
ibmr->rkey = key;
|
||||
mr->mr.lkey = key;
|
||||
mr->mr.access_flags = access;
|
||||
mr->mr.iova = ibmr->iova;
|
||||
atomic_set(&mr->mr.lkey_invalid, 0);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
|
|||
}
|
||||
|
||||
rc = f11_write_control_regs(fn, &f11->sens_query,
|
||||
&f11->dev_controls, fn->fd.query_base_addr);
|
||||
&f11->dev_controls, fn->fd.control_base_addr);
|
||||
if (rc)
|
||||
dev_warn(&fn->dev, "Failed to write control registers\n");
|
||||
|
||||
|
|
|
@ -4821,6 +4821,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
mv88e6xxx_ports_cmode_init(chip);
|
||||
mv88e6xxx_phy_init(chip);
|
||||
|
||||
if (chip->info->ops->get_eeprom) {
|
||||
|
|
|
@ -1169,6 +1169,12 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
|
|||
if (!h->phy_dev)
|
||||
return 0;
|
||||
|
||||
phy_dev->supported &= h->if_support;
|
||||
phy_dev->advertising = phy_dev->supported;
|
||||
|
||||
if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
|
||||
phy_dev->autoneg = false;
|
||||
|
||||
if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
|
||||
phy_dev->dev_flags = 0;
|
||||
|
||||
|
@ -1180,15 +1186,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
|
|||
if (unlikely(ret))
|
||||
return -ENODEV;
|
||||
|
||||
phy_dev->supported &= h->if_support;
|
||||
phy_dev->advertising = phy_dev->supported;
|
||||
|
||||
if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
|
||||
phy_dev->autoneg = false;
|
||||
|
||||
if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
|
||||
phy_stop(phy_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1859,7 +1859,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
|
||||
if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
|
||||
adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
|
||||
netdev_notify_peers(netdev);
|
||||
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
|
||||
|
||||
netif_carrier_on(netdev);
|
||||
|
||||
|
|
|
@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
|
|||
/* create driver workqueue */
|
||||
fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
|
||||
fm10k_driver_name);
|
||||
if (!fm10k_workqueue)
|
||||
return -ENOMEM;
|
||||
|
||||
fm10k_dbg_init();
|
||||
|
||||
|
|
|
@ -4272,7 +4272,7 @@ static void mvpp2_phylink_validate(struct net_device *dev,
|
|||
case PHY_INTERFACE_MODE_RGMII_ID:
|
||||
case PHY_INTERFACE_MODE_RGMII_RXID:
|
||||
case PHY_INTERFACE_MODE_RGMII_TXID:
|
||||
if (port->gop_id == 0)
|
||||
if (port->priv->hw_version == MVPP22 && port->gop_id == 0)
|
||||
goto empty_set;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -33,6 +33,26 @@
|
|||
#include <linux/bpf_trace.h>
|
||||
#include "en/xdp.h"
|
||||
|
||||
int mlx5e_xdp_max_mtu(struct mlx5e_params *params)
|
||||
{
|
||||
int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
|
||||
|
||||
/* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
|
||||
* The condition checked in mlx5e_rx_is_linear_skb is:
|
||||
* SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1)
|
||||
* (Note that hw_mtu == sw_mtu + hard_mtu.)
|
||||
* What is returned from this function is:
|
||||
* max_mtu = PAGE_SIZE - S - hr - hard_mtu (2)
|
||||
* After assigning sw_mtu := max_mtu, the left side of (1) turns to
|
||||
* SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE,
|
||||
* because both PAGE_SIZE and S are already aligned. Any number greater
|
||||
* than max_mtu would make the left side of (1) greater than PAGE_SIZE,
|
||||
* so max_mtu is the maximum MTU allowed.
|
||||
*/
|
||||
|
||||
return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr));
|
||||
}
|
||||
|
||||
static inline bool
|
||||
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
|
||||
struct xdp_buff *xdp)
|
||||
|
@ -207,9 +227,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
|
|||
sqcc++;
|
||||
|
||||
if (is_redirect) {
|
||||
xdp_return_frame(xdpi->xdpf);
|
||||
dma_unmap_single(sq->pdev, xdpi->dma_addr,
|
||||
xdpi->xdpf->len, DMA_TO_DEVICE);
|
||||
xdp_return_frame(xdpi->xdpf);
|
||||
} else {
|
||||
/* Recycle RX page */
|
||||
mlx5e_page_release(rq, &xdpi->di, true);
|
||||
|
@ -243,9 +263,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
|
|||
sq->cc++;
|
||||
|
||||
if (is_redirect) {
|
||||
xdp_return_frame(xdpi->xdpf);
|
||||
dma_unmap_single(sq->pdev, xdpi->dma_addr,
|
||||
xdpi->xdpf->len, DMA_TO_DEVICE);
|
||||
xdp_return_frame(xdpi->xdpf);
|
||||
} else {
|
||||
/* Recycle RX page */
|
||||
mlx5e_page_release(rq, &xdpi->di, false);
|
||||
|
|
|
@ -34,12 +34,11 @@
|
|||
|
||||
#include "en.h"
|
||||
|
||||
#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
|
||||
MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
|
||||
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
|
||||
#define MLX5E_XDP_TX_DS_COUNT \
|
||||
((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
|
||||
|
||||
int mlx5e_xdp_max_mtu(struct mlx5e_params *params);
|
||||
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
|
||||
void *va, u16 *rx_headroom, u32 *len);
|
||||
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
|
||||
|
|
|
@ -1317,7 +1317,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
|
|||
break;
|
||||
case MLX5_MODULE_ID_SFP:
|
||||
modinfo->type = ETH_MODULE_SFF_8472;
|
||||
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
|
||||
modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
|
||||
break;
|
||||
default:
|
||||
netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
|
||||
|
|
|
@ -3761,7 +3761,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
|
|||
if (params->xdp_prog &&
|
||||
!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
|
||||
netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
|
||||
new_mtu, MLX5E_XDP_MAX_MTU);
|
||||
new_mtu, mlx5e_xdp_max_mtu(params));
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -4227,7 +4227,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
|
|||
|
||||
if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
|
||||
netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
|
||||
new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
|
||||
new_channels.params.sw_mtu,
|
||||
mlx5e_xdp_max_mtu(&new_channels.params));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -404,10 +404,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
|
|||
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
|
||||
|
||||
i2c_addr = MLX5_I2C_ADDR_LOW;
|
||||
if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
|
||||
i2c_addr = MLX5_I2C_ADDR_HIGH;
|
||||
offset -= MLX5_EEPROM_PAGE_LENGTH;
|
||||
}
|
||||
|
||||
MLX5_SET(mcia_reg, in, l, 0);
|
||||
MLX5_SET(mcia_reg, in, module, module_num);
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
|
||||
#define MLXSW_PCI_SW_RESET 0xF0010
|
||||
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
|
||||
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
|
||||
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 20000
|
||||
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
|
||||
#define MLXSW_PCI_FW_READY 0xA1844
|
||||
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
|
||||
|
|
|
@ -2504,11 +2504,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
mlxsw_sp_port->link.autoneg = autoneg;
|
||||
|
||||
if (!netif_running(dev))
|
||||
return 0;
|
||||
|
||||
mlxsw_sp_port->link.autoneg = autoneg;
|
||||
|
||||
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
|
||||
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
|
||||
|
||||
|
@ -2783,7 +2783,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
|||
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
|
||||
MLXSW_REG_QEEC_HIERARCY_TC,
|
||||
i + 8, i,
|
||||
false, 0);
|
||||
true, 100);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -2595,8 +2595,6 @@ static int stmmac_open(struct net_device *dev)
|
|||
u32 chan;
|
||||
int ret;
|
||||
|
||||
stmmac_check_ether_addr(priv);
|
||||
|
||||
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
|
||||
priv->hw->pcs != STMMAC_PCS_TBI &&
|
||||
priv->hw->pcs != STMMAC_PCS_RTBI) {
|
||||
|
@ -4296,6 +4294,8 @@ int stmmac_dvr_probe(struct device *device,
|
|||
if (ret)
|
||||
goto error_hw_init;
|
||||
|
||||
stmmac_check_ether_addr(priv);
|
||||
|
||||
/* Configure real RX and TX queues */
|
||||
netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
|
||||
netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
|
||||
|
|
|
@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
|
|||
},
|
||||
.driver_data = (void *)&galileo_stmmac_dmi_data,
|
||||
},
|
||||
/*
|
||||
* There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040.
|
||||
* The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
|
||||
* has only one pci network device while other asset tags are
|
||||
* for IOT2040 which has two.
|
||||
*/
|
||||
{
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
|
||||
|
@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
|
|||
{
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
|
||||
DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
|
||||
"6ES7647-0AA00-1YA2"),
|
||||
},
|
||||
.driver_data = (void *)&iot2040_stmmac_dmi_data,
|
||||
},
|
||||
|
|
|
@ -153,7 +153,7 @@ slhc_init(int rslots, int tslots)
|
|||
void
|
||||
slhc_free(struct slcompress *comp)
|
||||
{
|
||||
if ( comp == NULLSLCOMPR )
|
||||
if ( IS_ERR_OR_NULL(comp) )
|
||||
return;
|
||||
|
||||
if ( comp->tstate != NULLSLSTATE )
|
||||
|
|
|
@ -1160,6 +1160,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (netdev_has_upper_dev(dev, port_dev)) {
|
||||
NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
|
||||
netdev_err(dev, "Device %s is already an upper device of the team interface\n",
|
||||
portname);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
|
||||
vlan_uses_dev(dev)) {
|
||||
NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
|
||||
|
|
|
@ -1899,14 +1899,11 @@ int usb_runtime_idle(struct device *dev)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
|
||||
static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
|
||||
{
|
||||
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
|
||||
int ret = -EPERM;
|
||||
|
||||
if (enable && !udev->usb2_hw_lpm_allowed)
|
||||
return 0;
|
||||
|
||||
if (hcd->driver->set_usb2_hw_lpm) {
|
||||
ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
|
||||
if (!ret)
|
||||
|
@ -1916,6 +1913,24 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
|
||||
{
|
||||
if (!udev->usb2_hw_lpm_capable ||
|
||||
!udev->usb2_hw_lpm_allowed ||
|
||||
udev->usb2_hw_lpm_enabled)
|
||||
return 0;
|
||||
|
||||
return usb_set_usb2_hardware_lpm(udev, 1);
|
||||
}
|
||||
|
||||
int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
|
||||
{
|
||||
if (!udev->usb2_hw_lpm_enabled)
|
||||
return 0;
|
||||
|
||||
return usb_set_usb2_hardware_lpm(udev, 0);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
struct bus_type usb_bus_type = {
|
||||
|
|
|
@ -3217,8 +3217,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
|
|||
}
|
||||
|
||||
/* disable USB2 hardware LPM */
|
||||
if (udev->usb2_hw_lpm_enabled == 1)
|
||||
usb_set_usb2_hardware_lpm(udev, 0);
|
||||
usb_disable_usb2_hardware_lpm(udev);
|
||||
|
||||
if (usb_disable_ltm(udev)) {
|
||||
dev_err(&udev->dev, "Failed to disable LTM before suspend\n");
|
||||
|
@ -3256,8 +3255,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
|
|||
usb_enable_ltm(udev);
|
||||
err_ltm:
|
||||
/* Try to enable USB2 hardware LPM again */
|
||||
if (udev->usb2_hw_lpm_capable == 1)
|
||||
usb_set_usb2_hardware_lpm(udev, 1);
|
||||
usb_enable_usb2_hardware_lpm(udev);
|
||||
|
||||
if (udev->do_remote_wakeup)
|
||||
(void) usb_disable_remote_wakeup(udev);
|
||||
|
@ -3540,8 +3538,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
|
|||
hub_port_logical_disconnect(hub, port1);
|
||||
} else {
|
||||
/* Try to enable USB2 hardware LPM */
|
||||
if (udev->usb2_hw_lpm_capable == 1)
|
||||
usb_set_usb2_hardware_lpm(udev, 1);
|
||||
usb_enable_usb2_hardware_lpm(udev);
|
||||
|
||||
/* Try to enable USB3 LTM */
|
||||
usb_enable_ltm(udev);
|
||||
|
@ -4432,7 +4429,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
|
|||
if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
|
||||
connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
|
||||
udev->usb2_hw_lpm_allowed = 1;
|
||||
usb_set_usb2_hardware_lpm(udev, 1);
|
||||
usb_enable_usb2_hardware_lpm(udev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5608,8 +5605,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
|
|||
/* Disable USB2 hardware LPM.
|
||||
* It will be re-enabled by the enumeration process.
|
||||
*/
|
||||
if (udev->usb2_hw_lpm_enabled == 1)
|
||||
usb_set_usb2_hardware_lpm(udev, 0);
|
||||
usb_disable_usb2_hardware_lpm(udev);
|
||||
|
||||
/* Disable LPM while we reset the device and reinstall the alt settings.
|
||||
* Device-initiated LPM, and system exit latency settings are cleared
|
||||
|
@ -5712,7 +5708,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
|
|||
|
||||
done:
|
||||
/* Now that the alt settings are re-installed, enable LTM and LPM. */
|
||||
usb_set_usb2_hardware_lpm(udev, 1);
|
||||
usb_enable_usb2_hardware_lpm(udev);
|
||||
usb_unlocked_enable_lpm(udev);
|
||||
usb_enable_ltm(udev);
|
||||
usb_release_bos_descriptor(udev);
|
||||
|
|
|
@ -1243,8 +1243,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
|
|||
dev->actconfig->interface[i] = NULL;
|
||||
}
|
||||
|
||||
if (dev->usb2_hw_lpm_enabled == 1)
|
||||
usb_set_usb2_hardware_lpm(dev, 0);
|
||||
usb_disable_usb2_hardware_lpm(dev);
|
||||
usb_unlocked_disable_lpm(dev);
|
||||
usb_disable_ltm(dev);
|
||||
|
||||
|
|
|
@ -528,7 +528,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
|
|||
|
||||
if (!ret) {
|
||||
udev->usb2_hw_lpm_allowed = value;
|
||||
ret = usb_set_usb2_hardware_lpm(udev, value);
|
||||
if (value)
|
||||
ret = usb_enable_usb2_hardware_lpm(udev);
|
||||
else
|
||||
ret = usb_disable_usb2_hardware_lpm(udev);
|
||||
}
|
||||
|
||||
usb_unlock_device(udev);
|
||||
|
|
|
@ -92,7 +92,8 @@ extern int usb_remote_wakeup(struct usb_device *dev);
|
|||
extern int usb_runtime_suspend(struct device *dev);
|
||||
extern int usb_runtime_resume(struct device *dev);
|
||||
extern int usb_runtime_idle(struct device *dev);
|
||||
extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
|
||||
extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
|
||||
extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
|
||||
|
||||
#else
|
||||
|
||||
|
@ -112,7 +113,12 @@ static inline int usb_autoresume_device(struct usb_device *udev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
|
||||
static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
|
|||
MODULE_PARM_DESC(disable_hugepages,
|
||||
"Disable VFIO IOMMU support for IOMMU hugepages.");
|
||||
|
||||
static unsigned int dma_entry_limit __read_mostly = U16_MAX;
|
||||
module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
|
||||
MODULE_PARM_DESC(dma_entry_limit,
|
||||
"Maximum number of user DMA mappings per container (65535).");
|
||||
|
||||
struct vfio_iommu {
|
||||
struct list_head domain_list;
|
||||
struct vfio_domain *external_domain; /* domain for external user */
|
||||
struct mutex lock;
|
||||
struct rb_root dma_list;
|
||||
struct blocking_notifier_head notifier;
|
||||
unsigned int dma_avail;
|
||||
bool v2;
|
||||
bool nesting;
|
||||
};
|
||||
|
@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
|
|||
vfio_unlink_dma(iommu, dma);
|
||||
put_task_struct(dma->task);
|
||||
kfree(dma);
|
||||
iommu->dma_avail++;
|
||||
}
|
||||
|
||||
static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
|
||||
|
@ -1110,12 +1117,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!iommu->dma_avail) {
|
||||
ret = -ENOSPC;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
|
||||
if (!dma) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
iommu->dma_avail--;
|
||||
dma->iova = iova;
|
||||
dma->vaddr = vaddr;
|
||||
dma->prot = prot;
|
||||
|
@ -1612,6 +1625,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
|
|||
|
||||
INIT_LIST_HEAD(&iommu->domain_list);
|
||||
iommu->dma_list = RB_ROOT;
|
||||
iommu->dma_avail = dma_entry_limit;
|
||||
mutex_init(&iommu->lock);
|
||||
BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
|
||||
|
||||
|
|
368
fs/aio.c
368
fs/aio.c
|
@ -161,9 +161,13 @@ struct kioctx {
|
|||
unsigned id;
|
||||
};
|
||||
|
||||
/*
|
||||
* First field must be the file pointer in all the
|
||||
* iocb unions! See also 'struct kiocb' in <linux/fs.h>
|
||||
*/
|
||||
struct fsync_iocb {
|
||||
struct work_struct work;
|
||||
struct file *file;
|
||||
struct work_struct work;
|
||||
bool datasync;
|
||||
};
|
||||
|
||||
|
@ -171,14 +175,21 @@ struct poll_iocb {
|
|||
struct file *file;
|
||||
struct wait_queue_head *head;
|
||||
__poll_t events;
|
||||
bool woken;
|
||||
bool done;
|
||||
bool cancelled;
|
||||
struct wait_queue_entry wait;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
/*
|
||||
* NOTE! Each of the iocb union members has the file pointer
|
||||
* as the first entry in their struct definition. So you can
|
||||
* access the file pointer through any of the sub-structs,
|
||||
* or directly as just 'ki_filp' in this struct.
|
||||
*/
|
||||
struct aio_kiocb {
|
||||
union {
|
||||
struct file *ki_filp;
|
||||
struct kiocb rw;
|
||||
struct fsync_iocb fsync;
|
||||
struct poll_iocb poll;
|
||||
|
@ -187,8 +198,7 @@ struct aio_kiocb {
|
|||
struct kioctx *ki_ctx;
|
||||
kiocb_cancel_fn *ki_cancel;
|
||||
|
||||
struct iocb __user *ki_user_iocb; /* user's aiocb */
|
||||
__u64 ki_user_data; /* user's data for completion */
|
||||
struct io_event ki_res;
|
||||
|
||||
struct list_head ki_list; /* the aio core uses this
|
||||
* for cancellation */
|
||||
|
@ -902,7 +912,7 @@ static void put_reqs_available(struct kioctx *ctx, unsigned nr)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static bool get_reqs_available(struct kioctx *ctx)
|
||||
static bool __get_reqs_available(struct kioctx *ctx)
|
||||
{
|
||||
struct kioctx_cpu *kcpu;
|
||||
bool ret = false;
|
||||
|
@ -994,32 +1004,35 @@ static void user_refill_reqs_available(struct kioctx *ctx)
|
|||
spin_unlock_irq(&ctx->completion_lock);
|
||||
}
|
||||
|
||||
static bool get_reqs_available(struct kioctx *ctx)
|
||||
{
|
||||
if (__get_reqs_available(ctx))
|
||||
return true;
|
||||
user_refill_reqs_available(ctx);
|
||||
return __get_reqs_available(ctx);
|
||||
}
|
||||
|
||||
/* aio_get_req
|
||||
* Allocate a slot for an aio request.
|
||||
* Returns NULL if no requests are free.
|
||||
*
|
||||
* The refcount is initialized to 2 - one for the async op completion,
|
||||
* one for the synchronous code that does this.
|
||||
*/
|
||||
static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
|
||||
{
|
||||
struct aio_kiocb *req;
|
||||
|
||||
if (!get_reqs_available(ctx)) {
|
||||
user_refill_reqs_available(ctx);
|
||||
if (!get_reqs_available(ctx))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
|
||||
req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
|
||||
if (unlikely(!req))
|
||||
goto out_put;
|
||||
return NULL;
|
||||
|
||||
percpu_ref_get(&ctx->reqs);
|
||||
INIT_LIST_HEAD(&req->ki_list);
|
||||
refcount_set(&req->ki_refcnt, 0);
|
||||
req->ki_ctx = ctx;
|
||||
INIT_LIST_HEAD(&req->ki_list);
|
||||
refcount_set(&req->ki_refcnt, 2);
|
||||
req->ki_eventfd = NULL;
|
||||
return req;
|
||||
out_put:
|
||||
put_reqs_available(ctx, 1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct kioctx *lookup_ioctx(unsigned long ctx_id)
|
||||
|
@ -1050,19 +1063,18 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline void iocb_put(struct aio_kiocb *iocb)
|
||||
static inline void iocb_destroy(struct aio_kiocb *iocb)
|
||||
{
|
||||
if (refcount_read(&iocb->ki_refcnt) == 0 ||
|
||||
refcount_dec_and_test(&iocb->ki_refcnt)) {
|
||||
percpu_ref_put(&iocb->ki_ctx->reqs);
|
||||
kmem_cache_free(kiocb_cachep, iocb);
|
||||
}
|
||||
if (iocb->ki_filp)
|
||||
fput(iocb->ki_filp);
|
||||
percpu_ref_put(&iocb->ki_ctx->reqs);
|
||||
kmem_cache_free(kiocb_cachep, iocb);
|
||||
}
|
||||
|
||||
/* aio_complete
|
||||
* Called when the io request on the given iocb is complete.
|
||||
*/
|
||||
static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
|
||||
static void aio_complete(struct aio_kiocb *iocb)
|
||||
{
|
||||
struct kioctx *ctx = iocb->ki_ctx;
|
||||
struct aio_ring *ring;
|
||||
|
@ -1086,17 +1098,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
|
|||
ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
|
||||
event = ev_page + pos % AIO_EVENTS_PER_PAGE;
|
||||
|
||||
event->obj = (u64)(unsigned long)iocb->ki_user_iocb;
|
||||
event->data = iocb->ki_user_data;
|
||||
event->res = res;
|
||||
event->res2 = res2;
|
||||
*event = iocb->ki_res;
|
||||
|
||||
kunmap_atomic(ev_page);
|
||||
flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
|
||||
|
||||
pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
|
||||
ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
|
||||
res, res2);
|
||||
pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
|
||||
(void __user *)(unsigned long)iocb->ki_res.obj,
|
||||
iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
|
||||
|
||||
/* after flagging the request as done, we
|
||||
* must never even look at it again
|
||||
|
@ -1138,7 +1147,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
|
|||
|
||||
if (waitqueue_active(&ctx->wait))
|
||||
wake_up(&ctx->wait);
|
||||
iocb_put(iocb);
|
||||
}
|
||||
|
||||
static inline void iocb_put(struct aio_kiocb *iocb)
|
||||
{
|
||||
if (refcount_dec_and_test(&iocb->ki_refcnt)) {
|
||||
aio_complete(iocb);
|
||||
iocb_destroy(iocb);
|
||||
}
|
||||
}
|
||||
|
||||
/* aio_read_events_ring
|
||||
|
@ -1412,18 +1428,17 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
|
|||
file_end_write(kiocb->ki_filp);
|
||||
}
|
||||
|
||||
fput(kiocb->ki_filp);
|
||||
aio_complete(iocb, res, res2);
|
||||
iocb->ki_res.res = res;
|
||||
iocb->ki_res.res2 = res2;
|
||||
iocb_put(iocb);
|
||||
}
|
||||
|
||||
static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
|
||||
static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
|
||||
{
|
||||
int ret;
|
||||
|
||||
req->ki_filp = fget(iocb->aio_fildes);
|
||||
if (unlikely(!req->ki_filp))
|
||||
return -EBADF;
|
||||
req->ki_complete = aio_complete_rw;
|
||||
req->private = NULL;
|
||||
req->ki_pos = iocb->aio_offset;
|
||||
req->ki_flags = iocb_flags(req->ki_filp);
|
||||
if (iocb->aio_flags & IOCB_FLAG_RESFD)
|
||||
|
@ -1438,7 +1453,6 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
|
|||
ret = ioprio_check_cap(iocb->aio_reqprio);
|
||||
if (ret) {
|
||||
pr_debug("aio ioprio check cap error: %d\n", ret);
|
||||
fput(req->ki_filp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1448,11 +1462,13 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
|
|||
|
||||
ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
|
||||
if (unlikely(ret))
|
||||
fput(req->ki_filp);
|
||||
return ret;
|
||||
return ret;
|
||||
|
||||
req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec,
|
||||
static int aio_setup_rw(int rw, const struct iocb *iocb, struct iovec **iovec,
|
||||
bool vectored, bool compat, struct iov_iter *iter)
|
||||
{
|
||||
void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
|
||||
|
@ -1487,12 +1503,12 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
|
|||
ret = -EINTR;
|
||||
/*FALLTHRU*/
|
||||
default:
|
||||
aio_complete_rw(req, ret, 0);
|
||||
req->ki_complete(req, ret, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored,
|
||||
bool compat)
|
||||
static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
|
||||
bool vectored, bool compat)
|
||||
{
|
||||
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
|
||||
struct iov_iter iter;
|
||||
|
@ -1503,29 +1519,24 @@ static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored,
|
|||
if (ret)
|
||||
return ret;
|
||||
file = req->ki_filp;
|
||||
|
||||
ret = -EBADF;
|
||||
if (unlikely(!(file->f_mode & FMODE_READ)))
|
||||
goto out_fput;
|
||||
return -EBADF;
|
||||
ret = -EINVAL;
|
||||
if (unlikely(!file->f_op->read_iter))
|
||||
goto out_fput;
|
||||
return -EINVAL;
|
||||
|
||||
ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
|
||||
if (ret)
|
||||
goto out_fput;
|
||||
return ret;
|
||||
ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
|
||||
if (!ret)
|
||||
aio_rw_done(req, call_read_iter(file, req, &iter));
|
||||
kfree(iovec);
|
||||
out_fput:
|
||||
if (unlikely(ret))
|
||||
fput(file);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
|
||||
bool compat)
|
||||
static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
|
||||
bool vectored, bool compat)
|
||||
{
|
||||
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
|
||||
struct iov_iter iter;
|
||||
|
@ -1537,16 +1548,14 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
|
|||
return ret;
|
||||
file = req->ki_filp;
|
||||
|
||||
ret = -EBADF;
|
||||
if (unlikely(!(file->f_mode & FMODE_WRITE)))
|
||||
goto out_fput;
|
||||
ret = -EINVAL;
|
||||
return -EBADF;
|
||||
if (unlikely(!file->f_op->write_iter))
|
||||
goto out_fput;
|
||||
return -EINVAL;
|
||||
|
||||
ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
|
||||
if (ret)
|
||||
goto out_fput;
|
||||
return ret;
|
||||
ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
|
||||
if (!ret) {
|
||||
/*
|
||||
|
@ -1564,35 +1573,26 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
|
|||
aio_rw_done(req, call_write_iter(file, req, &iter));
|
||||
}
|
||||
kfree(iovec);
|
||||
out_fput:
|
||||
if (unlikely(ret))
|
||||
fput(file);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void aio_fsync_work(struct work_struct *work)
|
||||
{
|
||||
struct fsync_iocb *req = container_of(work, struct fsync_iocb, work);
|
||||
int ret;
|
||||
struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
|
||||
|
||||
ret = vfs_fsync(req->file, req->datasync);
|
||||
fput(req->file);
|
||||
aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
|
||||
iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
|
||||
iocb_put(iocb);
|
||||
}
|
||||
|
||||
static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
|
||||
static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
|
||||
bool datasync)
|
||||
{
|
||||
if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
|
||||
iocb->aio_rw_flags))
|
||||
return -EINVAL;
|
||||
|
||||
req->file = fget(iocb->aio_fildes);
|
||||
if (unlikely(!req->file))
|
||||
return -EBADF;
|
||||
if (unlikely(!req->file->f_op->fsync)) {
|
||||
fput(req->file);
|
||||
if (unlikely(!req->file->f_op->fsync))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req->datasync = datasync;
|
||||
INIT_WORK(&req->work, aio_fsync_work);
|
||||
|
@ -1600,14 +1600,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
|
||||
{
|
||||
struct file *file = iocb->poll.file;
|
||||
|
||||
aio_complete(iocb, mangle_poll(mask), 0);
|
||||
fput(file);
|
||||
}
|
||||
|
||||
static void aio_poll_complete_work(struct work_struct *work)
|
||||
{
|
||||
struct poll_iocb *req = container_of(work, struct poll_iocb, work);
|
||||
|
@ -1633,9 +1625,11 @@ static void aio_poll_complete_work(struct work_struct *work)
|
|||
return;
|
||||
}
|
||||
list_del_init(&iocb->ki_list);
|
||||
iocb->ki_res.res = mangle_poll(mask);
|
||||
req->done = true;
|
||||
spin_unlock_irq(&ctx->ctx_lock);
|
||||
|
||||
aio_poll_complete(iocb, mask);
|
||||
iocb_put(iocb);
|
||||
}
|
||||
|
||||
/* assumes we are called with irqs disabled */
|
||||
|
@ -1663,31 +1657,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
|
|||
__poll_t mask = key_to_poll(key);
|
||||
unsigned long flags;
|
||||
|
||||
req->woken = true;
|
||||
|
||||
/* for instances that support it check for an event match first: */
|
||||
if (mask) {
|
||||
if (!(mask & req->events))
|
||||
return 0;
|
||||
if (mask && !(mask & req->events))
|
||||
return 0;
|
||||
|
||||
list_del_init(&req->wait.entry);
|
||||
|
||||
if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
|
||||
/*
|
||||
* Try to complete the iocb inline if we can. Use
|
||||
* irqsave/irqrestore because not all filesystems (e.g. fuse)
|
||||
* call this function with IRQs disabled and because IRQs
|
||||
* have to be disabled before ctx_lock is obtained.
|
||||
*/
|
||||
if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
|
||||
list_del(&iocb->ki_list);
|
||||
spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
|
||||
|
||||
list_del_init(&req->wait.entry);
|
||||
aio_poll_complete(iocb, mask);
|
||||
return 1;
|
||||
}
|
||||
list_del(&iocb->ki_list);
|
||||
iocb->ki_res.res = mangle_poll(mask);
|
||||
req->done = true;
|
||||
spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
|
||||
iocb_put(iocb);
|
||||
} else {
|
||||
schedule_work(&req->work);
|
||||
}
|
||||
|
||||
list_del_init(&req->wait.entry);
|
||||
schedule_work(&req->work);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -1714,11 +1704,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
|
|||
add_wait_queue(head, &pt->iocb->poll.wait);
|
||||
}
|
||||
|
||||
static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
|
||||
static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
|
||||
{
|
||||
struct kioctx *ctx = aiocb->ki_ctx;
|
||||
struct poll_iocb *req = &aiocb->poll;
|
||||
struct aio_poll_table apt;
|
||||
bool cancel = false;
|
||||
__poll_t mask;
|
||||
|
||||
/* reject any unknown events outside the normal event mask. */
|
||||
|
@ -1730,9 +1721,10 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
|
|||
|
||||
INIT_WORK(&req->work, aio_poll_complete_work);
|
||||
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
|
||||
req->file = fget(iocb->aio_fildes);
|
||||
if (unlikely(!req->file))
|
||||
return -EBADF;
|
||||
|
||||
req->head = NULL;
|
||||
req->done = false;
|
||||
req->cancelled = false;
|
||||
|
||||
apt.pt._qproc = aio_poll_queue_proc;
|
||||
apt.pt._key = req->events;
|
||||
|
@ -1743,83 +1735,79 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
|
|||
INIT_LIST_HEAD(&req->wait.entry);
|
||||
init_waitqueue_func_entry(&req->wait, aio_poll_wake);
|
||||
|
||||
/* one for removal from waitqueue, one for this function */
|
||||
refcount_set(&aiocb->ki_refcnt, 2);
|
||||
|
||||
mask = vfs_poll(req->file, &apt.pt) & req->events;
|
||||
if (unlikely(!req->head)) {
|
||||
/* we did not manage to set up a waitqueue, done */
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_irq(&ctx->ctx_lock);
|
||||
spin_lock(&req->head->lock);
|
||||
if (req->woken) {
|
||||
/* wake_up context handles the rest */
|
||||
mask = 0;
|
||||
if (likely(req->head)) {
|
||||
spin_lock(&req->head->lock);
|
||||
if (unlikely(list_empty(&req->wait.entry))) {
|
||||
if (apt.error)
|
||||
cancel = true;
|
||||
apt.error = 0;
|
||||
mask = 0;
|
||||
}
|
||||
if (mask || apt.error) {
|
||||
list_del_init(&req->wait.entry);
|
||||
} else if (cancel) {
|
||||
WRITE_ONCE(req->cancelled, true);
|
||||
} else if (!req->done) { /* actually waiting for an event */
|
||||
list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
|
||||
aiocb->ki_cancel = aio_poll_cancel;
|
||||
}
|
||||
spin_unlock(&req->head->lock);
|
||||
}
|
||||
if (mask) { /* no async, we'd stolen it */
|
||||
aiocb->ki_res.res = mangle_poll(mask);
|
||||
apt.error = 0;
|
||||
} else if (mask || apt.error) {
|
||||
/* if we get an error or a mask we are done */
|
||||
WARN_ON_ONCE(list_empty(&req->wait.entry));
|
||||
list_del_init(&req->wait.entry);
|
||||
} else {
|
||||
/* actually waiting for an event */
|
||||
list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
|
||||
aiocb->ki_cancel = aio_poll_cancel;
|
||||
}
|
||||
spin_unlock(&req->head->lock);
|
||||
spin_unlock_irq(&ctx->ctx_lock);
|
||||
|
||||
out:
|
||||
if (unlikely(apt.error)) {
|
||||
fput(req->file);
|
||||
return apt.error;
|
||||
}
|
||||
|
||||
if (mask)
|
||||
aio_poll_complete(aiocb, mask);
|
||||
iocb_put(aiocb);
|
||||
return 0;
|
||||
iocb_put(aiocb);
|
||||
return apt.error;
|
||||
}
|
||||
|
||||
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
||||
bool compat)
|
||||
static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
|
||||
struct iocb __user *user_iocb, bool compat)
|
||||
{
|
||||
struct aio_kiocb *req;
|
||||
struct iocb iocb;
|
||||
ssize_t ret;
|
||||
|
||||
if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
|
||||
return -EFAULT;
|
||||
|
||||
/* enforce forwards compatibility on users */
|
||||
if (unlikely(iocb.aio_reserved2)) {
|
||||
if (unlikely(iocb->aio_reserved2)) {
|
||||
pr_debug("EINVAL: reserve field set\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* prevent overflows */
|
||||
if (unlikely(
|
||||
(iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
|
||||
(iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
|
||||
((ssize_t)iocb.aio_nbytes < 0)
|
||||
(iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
|
||||
(iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
|
||||
((ssize_t)iocb->aio_nbytes < 0)
|
||||
)) {
|
||||
pr_debug("EINVAL: overflow check\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req = aio_get_req(ctx);
|
||||
if (unlikely(!req))
|
||||
if (!get_reqs_available(ctx))
|
||||
return -EAGAIN;
|
||||
|
||||
if (iocb.aio_flags & IOCB_FLAG_RESFD) {
|
||||
ret = -EAGAIN;
|
||||
req = aio_get_req(ctx);
|
||||
if (unlikely(!req))
|
||||
goto out_put_reqs_available;
|
||||
|
||||
req->ki_filp = fget(iocb->aio_fildes);
|
||||
ret = -EBADF;
|
||||
if (unlikely(!req->ki_filp))
|
||||
goto out_put_req;
|
||||
|
||||
if (iocb->aio_flags & IOCB_FLAG_RESFD) {
|
||||
/*
|
||||
* If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
|
||||
* instance of the file* now. The file descriptor must be
|
||||
* an eventfd() fd, and will be signaled for each completed
|
||||
* event using the eventfd_signal() function.
|
||||
*/
|
||||
req->ki_eventfd = eventfd_ctx_fdget((int) iocb.aio_resfd);
|
||||
req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
|
||||
if (IS_ERR(req->ki_eventfd)) {
|
||||
ret = PTR_ERR(req->ki_eventfd);
|
||||
req->ki_eventfd = NULL;
|
||||
|
@ -1833,54 +1821,70 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
|||
goto out_put_req;
|
||||
}
|
||||
|
||||
req->ki_user_iocb = user_iocb;
|
||||
req->ki_user_data = iocb.aio_data;
|
||||
req->ki_res.obj = (u64)(unsigned long)user_iocb;
|
||||
req->ki_res.data = iocb->aio_data;
|
||||
req->ki_res.res = 0;
|
||||
req->ki_res.res2 = 0;
|
||||
|
||||
switch (iocb.aio_lio_opcode) {
|
||||
switch (iocb->aio_lio_opcode) {
|
||||
case IOCB_CMD_PREAD:
|
||||
ret = aio_read(&req->rw, &iocb, false, compat);
|
||||
ret = aio_read(&req->rw, iocb, false, compat);
|
||||
break;
|
||||
case IOCB_CMD_PWRITE:
|
||||
ret = aio_write(&req->rw, &iocb, false, compat);
|
||||
ret = aio_write(&req->rw, iocb, false, compat);
|
||||
break;
|
||||
case IOCB_CMD_PREADV:
|
||||
ret = aio_read(&req->rw, &iocb, true, compat);
|
||||
ret = aio_read(&req->rw, iocb, true, compat);
|
||||
break;
|
||||
case IOCB_CMD_PWRITEV:
|
||||
ret = aio_write(&req->rw, &iocb, true, compat);
|
||||
ret = aio_write(&req->rw, iocb, true, compat);
|
||||
break;
|
||||
case IOCB_CMD_FSYNC:
|
||||
ret = aio_fsync(&req->fsync, &iocb, false);
|
||||
ret = aio_fsync(&req->fsync, iocb, false);
|
||||
break;
|
||||
case IOCB_CMD_FDSYNC:
|
||||
ret = aio_fsync(&req->fsync, &iocb, true);
|
||||
ret = aio_fsync(&req->fsync, iocb, true);
|
||||
break;
|
||||
case IOCB_CMD_POLL:
|
||||
ret = aio_poll(req, &iocb);
|
||||
ret = aio_poll(req, iocb);
|
||||
break;
|
||||
default:
|
||||
pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
|
||||
pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Done with the synchronous reference */
|
||||
iocb_put(req);
|
||||
|
||||
/*
|
||||
* If ret is 0, we'd either done aio_complete() ourselves or have
|
||||
* arranged for that to be done asynchronously. Anything non-zero
|
||||
* means that we need to destroy req ourselves.
|
||||
*/
|
||||
if (ret)
|
||||
goto out_put_req;
|
||||
return 0;
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
out_put_req:
|
||||
put_reqs_available(ctx, 1);
|
||||
percpu_ref_put(&ctx->reqs);
|
||||
if (req->ki_eventfd)
|
||||
eventfd_ctx_put(req->ki_eventfd);
|
||||
kmem_cache_free(kiocb_cachep, req);
|
||||
iocb_destroy(req);
|
||||
out_put_reqs_available:
|
||||
put_reqs_available(ctx, 1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
|
||||
bool compat)
|
||||
{
|
||||
struct iocb iocb;
|
||||
|
||||
if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
|
||||
return -EFAULT;
|
||||
|
||||
return __io_submit_one(ctx, &iocb, user_iocb, compat);
|
||||
}
|
||||
|
||||
/* sys_io_submit:
|
||||
* Queue the nr iocbs pointed to by iocbpp for processing. Returns
|
||||
* the number of iocbs queued. May return -EINVAL if the aio_context
|
||||
|
@ -1973,24 +1977,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
|
|||
}
|
||||
#endif
|
||||
|
||||
/* lookup_kiocb
|
||||
* Finds a given iocb for cancellation.
|
||||
*/
|
||||
static struct aio_kiocb *
|
||||
lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
|
||||
{
|
||||
struct aio_kiocb *kiocb;
|
||||
|
||||
assert_spin_locked(&ctx->ctx_lock);
|
||||
|
||||
/* TODO: use a hash or array, this sucks. */
|
||||
list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
|
||||
if (kiocb->ki_user_iocb == iocb)
|
||||
return kiocb;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* sys_io_cancel:
|
||||
* Attempts to cancel an iocb previously passed to io_submit. If
|
||||
* the operation is successfully cancelled, the resulting event is
|
||||
|
@ -2008,6 +1994,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
|
|||
struct aio_kiocb *kiocb;
|
||||
int ret = -EINVAL;
|
||||
u32 key;
|
||||
u64 obj = (u64)(unsigned long)iocb;
|
||||
|
||||
if (unlikely(get_user(key, &iocb->aio_key)))
|
||||
return -EFAULT;
|
||||
|
@ -2019,10 +2006,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
|
|||
return -EINVAL;
|
||||
|
||||
spin_lock_irq(&ctx->ctx_lock);
|
||||
kiocb = lookup_kiocb(ctx, iocb);
|
||||
if (kiocb) {
|
||||
ret = kiocb->ki_cancel(&kiocb->rw);
|
||||
list_del_init(&kiocb->ki_list);
|
||||
/* TODO: use a hash or array, this sucks. */
|
||||
list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
|
||||
if (kiocb->ki_res.obj == obj) {
|
||||
ret = kiocb->ki_cancel(&kiocb->rw);
|
||||
list_del_init(&kiocb->ki_list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&ctx->ctx_lock);
|
||||
|
||||
|
|
|
@ -1470,6 +1470,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
|
|||
unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
|
||||
{
|
||||
struct ceph_inode_info *dci = ceph_inode(dir);
|
||||
unsigned hash;
|
||||
|
||||
switch (dci->i_dir_layout.dl_dir_hash) {
|
||||
case 0: /* for backward compat */
|
||||
|
@ -1477,8 +1478,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
|
|||
return dn->d_name.hash;
|
||||
|
||||
default:
|
||||
return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
|
||||
spin_lock(&dn->d_lock);
|
||||
hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
|
||||
dn->d_name.name, dn->d_name.len);
|
||||
spin_unlock(&dn->d_lock);
|
||||
return hash;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1290,6 +1290,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
|
|||
list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
|
||||
ci->i_prealloc_cap_flush = NULL;
|
||||
}
|
||||
|
||||
if (drop &&
|
||||
ci->i_wrbuffer_ref_head == 0 &&
|
||||
ci->i_wr_ref == 0 &&
|
||||
ci->i_dirty_caps == 0 &&
|
||||
ci->i_flushing_caps == 0) {
|
||||
ceph_put_snap_context(ci->i_head_snapc);
|
||||
ci->i_head_snapc = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
while (!list_empty(&to_remove)) {
|
||||
|
@ -1945,10 +1954,39 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
|
|||
return path;
|
||||
}
|
||||
|
||||
/* Duplicate the dentry->d_name.name safely */
|
||||
static int clone_dentry_name(struct dentry *dentry, const char **ppath,
|
||||
int *ppathlen)
|
||||
{
|
||||
u32 len;
|
||||
char *name;
|
||||
|
||||
retry:
|
||||
len = READ_ONCE(dentry->d_name.len);
|
||||
name = kmalloc(len + 1, GFP_NOFS);
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (dentry->d_name.len != len) {
|
||||
spin_unlock(&dentry->d_lock);
|
||||
kfree(name);
|
||||
goto retry;
|
||||
}
|
||||
memcpy(name, dentry->d_name.name, len);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
|
||||
name[len] = '\0';
|
||||
*ppath = name;
|
||||
*ppathlen = len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_dentry_path(struct dentry *dentry, struct inode *dir,
|
||||
const char **ppath, int *ppathlen, u64 *pino,
|
||||
int *pfreepath)
|
||||
bool *pfreepath, bool parent_locked)
|
||||
{
|
||||
int ret;
|
||||
char *path;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -1957,8 +1995,15 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
|
|||
if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
|
||||
*pino = ceph_ino(dir);
|
||||
rcu_read_unlock();
|
||||
*ppath = dentry->d_name.name;
|
||||
*ppathlen = dentry->d_name.len;
|
||||
if (parent_locked) {
|
||||
*ppath = dentry->d_name.name;
|
||||
*ppathlen = dentry->d_name.len;
|
||||
} else {
|
||||
ret = clone_dentry_name(dentry, ppath, ppathlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
*pfreepath = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -1966,13 +2011,13 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
|
|||
if (IS_ERR(path))
|
||||
return PTR_ERR(path);
|
||||
*ppath = path;
|
||||
*pfreepath = 1;
|
||||
*pfreepath = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int build_inode_path(struct inode *inode,
|
||||
const char **ppath, int *ppathlen, u64 *pino,
|
||||
int *pfreepath)
|
||||
bool *pfreepath)
|
||||
{
|
||||
struct dentry *dentry;
|
||||
char *path;
|
||||
|
@ -1988,7 +2033,7 @@ static int build_inode_path(struct inode *inode,
|
|||
if (IS_ERR(path))
|
||||
return PTR_ERR(path);
|
||||
*ppath = path;
|
||||
*pfreepath = 1;
|
||||
*pfreepath = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1999,7 +2044,7 @@ static int build_inode_path(struct inode *inode,
|
|||
static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
|
||||
struct inode *rdiri, const char *rpath,
|
||||
u64 rino, const char **ppath, int *pathlen,
|
||||
u64 *ino, int *freepath)
|
||||
u64 *ino, bool *freepath, bool parent_locked)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
|
@ -2009,7 +2054,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
|
|||
ceph_snap(rinode));
|
||||
} else if (rdentry) {
|
||||
r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
|
||||
freepath);
|
||||
freepath, parent_locked);
|
||||
dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
|
||||
*ppath);
|
||||
} else if (rpath || rino) {
|
||||
|
@ -2035,7 +2080,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
|
|||
const char *path2 = NULL;
|
||||
u64 ino1 = 0, ino2 = 0;
|
||||
int pathlen1 = 0, pathlen2 = 0;
|
||||
int freepath1 = 0, freepath2 = 0;
|
||||
bool freepath1 = false, freepath2 = false;
|
||||
int len;
|
||||
u16 releases;
|
||||
void *p, *end;
|
||||
|
@ -2043,16 +2088,19 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
|
|||
|
||||
ret = set_request_path_attr(req->r_inode, req->r_dentry,
|
||||
req->r_parent, req->r_path1, req->r_ino1.ino,
|
||||
&path1, &pathlen1, &ino1, &freepath1);
|
||||
&path1, &pathlen1, &ino1, &freepath1,
|
||||
test_bit(CEPH_MDS_R_PARENT_LOCKED,
|
||||
&req->r_req_flags));
|
||||
if (ret < 0) {
|
||||
msg = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If r_old_dentry is set, then assume that its parent is locked */
|
||||
ret = set_request_path_attr(NULL, req->r_old_dentry,
|
||||
req->r_old_dentry_dir,
|
||||
req->r_path2, req->r_ino2.ino,
|
||||
&path2, &pathlen2, &ino2, &freepath2);
|
||||
&path2, &pathlen2, &ino2, &freepath2, true);
|
||||
if (ret < 0) {
|
||||
msg = ERR_PTR(ret);
|
||||
goto out_free1;
|
||||
|
|
|
@ -568,7 +568,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
|
|||
old_snapc = NULL;
|
||||
|
||||
update_snapc:
|
||||
if (ci->i_head_snapc) {
|
||||
if (ci->i_wrbuffer_ref_head == 0 &&
|
||||
ci->i_wr_ref == 0 &&
|
||||
ci->i_dirty_caps == 0 &&
|
||||
ci->i_flushing_caps == 0) {
|
||||
ci->i_head_snapc = NULL;
|
||||
} else {
|
||||
ci->i_head_snapc = ceph_get_snap_context(new_snapc);
|
||||
dout(" new snapc is %p\n", new_snapc);
|
||||
}
|
||||
|
|
|
@ -1735,6 +1735,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
|
|||
if (rc == 0 || rc != -EBUSY)
|
||||
goto do_rename_exit;
|
||||
|
||||
/* Don't fall back to using SMB on SMB 2+ mount */
|
||||
if (server->vals->protocol_id != 0)
|
||||
goto do_rename_exit;
|
||||
|
||||
/* open-file renames don't work across directories */
|
||||
if (to_dentry->d_parent != from_dentry->d_parent)
|
||||
goto do_rename_exit;
|
||||
|
|
|
@ -3285,6 +3285,7 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
|
|||
rc);
|
||||
}
|
||||
free_rsp_buf(resp_buftype, rsp_iov.iov_base);
|
||||
cifs_small_buf_release(req);
|
||||
return rc == -ENODATA ? 0 : rc;
|
||||
} else
|
||||
trace_smb3_read_done(xid, req->PersistentFileId,
|
||||
|
|
|
@ -829,6 +829,7 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
|
|||
bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
|
||||
if (IS_ERR(bh)) {
|
||||
ret = PTR_ERR(bh);
|
||||
bh = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -2907,6 +2908,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
|
|||
if (error == -EIO)
|
||||
EXT4_ERROR_INODE(inode, "block %llu read error",
|
||||
EXT4_I(inode)->i_file_acl);
|
||||
bh = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
error = ext4_xattr_check_block(inode, bh);
|
||||
|
@ -3063,6 +3065,7 @@ ext4_xattr_block_cache_find(struct inode *inode,
|
|||
if (IS_ERR(bh)) {
|
||||
if (PTR_ERR(bh) == -ENOMEM)
|
||||
return NULL;
|
||||
bh = NULL;
|
||||
EXT4_ERROR_INODE(inode, "block %lu read error",
|
||||
(unsigned long)ce->e_value);
|
||||
} else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
|
||||
|
|
|
@ -2052,7 +2052,8 @@ static int nfs23_validate_mount_data(void *options,
|
|||
memcpy(sap, &data->addr, sizeof(data->addr));
|
||||
args->nfs_server.addrlen = sizeof(data->addr);
|
||||
args->nfs_server.port = ntohs(data->addr.sin_port);
|
||||
if (!nfs_verify_server_address(sap))
|
||||
if (sap->sa_family != AF_INET ||
|
||||
!nfs_verify_server_address(sap))
|
||||
goto out_no_address;
|
||||
|
||||
if (!(data->flags & NFS_MOUNT_TCP))
|
||||
|
|
|
@ -926,8 +926,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
|
|||
cb->cb_seq_status = 1;
|
||||
cb->cb_status = 0;
|
||||
if (minorversion) {
|
||||
if (!nfsd41_cb_get_slot(clp, task))
|
||||
if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
|
||||
return;
|
||||
cb->cb_holds_slot = true;
|
||||
}
|
||||
rpc_call_start(task);
|
||||
}
|
||||
|
@ -954,6 +955,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
|
|||
return true;
|
||||
}
|
||||
|
||||
if (!cb->cb_holds_slot)
|
||||
goto need_restart;
|
||||
|
||||
switch (cb->cb_seq_status) {
|
||||
case 0:
|
||||
/*
|
||||
|
@ -992,6 +996,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
|
|||
cb->cb_seq_status);
|
||||
}
|
||||
|
||||
cb->cb_holds_slot = false;
|
||||
clear_bit(0, &clp->cl_cb_slot_busy);
|
||||
rpc_wake_up_next(&clp->cl_cb_waitq);
|
||||
dprintk("%s: freed slot, new seqid=%d\n", __func__,
|
||||
|
@ -1199,6 +1204,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
|
|||
cb->cb_seq_status = 1;
|
||||
cb->cb_status = 0;
|
||||
cb->cb_need_restart = false;
|
||||
cb->cb_holds_slot = false;
|
||||
}
|
||||
|
||||
void nfsd4_run_cb(struct nfsd4_callback *cb)
|
||||
|
|
|
@ -70,6 +70,7 @@ struct nfsd4_callback {
|
|||
int cb_seq_status;
|
||||
int cb_status;
|
||||
bool cb_need_restart;
|
||||
bool cb_holds_slot;
|
||||
};
|
||||
|
||||
struct nfsd4_callback_ops {
|
||||
|
|
|
@ -1626,9 +1626,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
|
|||
if (--header->nreg)
|
||||
return;
|
||||
|
||||
if (parent)
|
||||
if (parent) {
|
||||
put_links(header);
|
||||
start_unregistering(header);
|
||||
start_unregistering(header);
|
||||
}
|
||||
|
||||
if (!--header->count)
|
||||
kfree_rcu(header, rcu);
|
||||
|
||||
|
|
|
@ -333,8 +333,8 @@ const struct pipe_buf_operations default_pipe_buf_ops = {
|
|||
.get = generic_pipe_buf_get,
|
||||
};
|
||||
|
||||
static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
|
||||
struct pipe_buffer *buf)
|
||||
int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
|
||||
struct pipe_buffer *buf)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -304,13 +304,19 @@ enum rw_hint {
|
|||
|
||||
struct kiocb {
|
||||
struct file *ki_filp;
|
||||
|
||||
/* The 'ki_filp' pointer is shared in a union for aio */
|
||||
randomized_struct_fields_start
|
||||
|
||||
loff_t ki_pos;
|
||||
void (*ki_complete)(struct kiocb *iocb, long ret, long ret2);
|
||||
void *private;
|
||||
int ki_flags;
|
||||
u16 ki_hint;
|
||||
u16 ki_ioprio; /* See linux/ioprio.h */
|
||||
} __randomize_layout;
|
||||
|
||||
randomized_struct_fields_end
|
||||
};
|
||||
|
||||
static inline bool is_sync_kiocb(struct kiocb *kiocb)
|
||||
{
|
||||
|
|
|
@ -181,6 +181,7 @@ void free_pipe_info(struct pipe_inode_info *);
|
|||
void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
|
||||
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
|
||||
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
|
||||
int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
|
||||
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
|
||||
void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
|
||||
|
||||
|
|
|
@ -382,6 +382,7 @@ void nft_unregister_set(struct nft_set_type *type);
|
|||
* @dtype: data type (verdict or numeric type defined by userspace)
|
||||
* @objtype: object type (see NFT_OBJECT_* definitions)
|
||||
* @size: maximum set size
|
||||
* @use: number of rules references to this set
|
||||
* @nelems: number of elements
|
||||
* @ndeact: number of deactivated elements queued for removal
|
||||
* @timeout: default timeout value in jiffies
|
||||
|
@ -407,6 +408,7 @@ struct nft_set {
|
|||
u32 dtype;
|
||||
u32 objtype;
|
||||
u32 size;
|
||||
u32 use;
|
||||
atomic_t nelems;
|
||||
u32 ndeact;
|
||||
u64 timeout;
|
||||
|
@ -416,7 +418,8 @@ struct nft_set {
|
|||
unsigned char *udata;
|
||||
/* runtime data below here */
|
||||
const struct nft_set_ops *ops ____cacheline_aligned;
|
||||
u16 flags:14,
|
||||
u16 flags:13,
|
||||
bound:1,
|
||||
genmask:2;
|
||||
u8 klen;
|
||||
u8 dlen;
|
||||
|
@ -466,10 +469,15 @@ struct nft_set_binding {
|
|||
u32 flags;
|
||||
};
|
||||
|
||||
enum nft_trans_phase;
|
||||
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding,
|
||||
enum nft_trans_phase phase);
|
||||
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding);
|
||||
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding);
|
||||
struct nft_set_binding *binding, bool commit);
|
||||
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
|
||||
|
||||
/**
|
||||
* enum nft_set_extensions - set extension type IDs
|
||||
|
@ -689,10 +697,12 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
|
|||
gcb->elems[gcb->head.cnt++] = elem;
|
||||
}
|
||||
|
||||
struct nft_expr_ops;
|
||||
/**
|
||||
* struct nft_expr_type - nf_tables expression type
|
||||
*
|
||||
* @select_ops: function to select nft_expr_ops
|
||||
* @release_ops: release nft_expr_ops
|
||||
* @ops: default ops, used when no select_ops functions is present
|
||||
* @list: used internally
|
||||
* @name: Identifier
|
||||
|
@ -705,6 +715,7 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb,
|
|||
struct nft_expr_type {
|
||||
const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
|
||||
const struct nlattr * const tb[]);
|
||||
void (*release_ops)(const struct nft_expr_ops *ops);
|
||||
const struct nft_expr_ops *ops;
|
||||
struct list_head list;
|
||||
const char *name;
|
||||
|
@ -718,13 +729,22 @@ struct nft_expr_type {
|
|||
#define NFT_EXPR_STATEFUL 0x1
|
||||
#define NFT_EXPR_GC 0x2
|
||||
|
||||
enum nft_trans_phase {
|
||||
NFT_TRANS_PREPARE,
|
||||
NFT_TRANS_ABORT,
|
||||
NFT_TRANS_COMMIT,
|
||||
NFT_TRANS_RELEASE
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nft_expr_ops - nf_tables expression operations
|
||||
*
|
||||
* @eval: Expression evaluation function
|
||||
* @size: full expression size, including private data size
|
||||
* @init: initialization function
|
||||
* @destroy: destruction function
|
||||
* @activate: activate expression in the next generation
|
||||
* @deactivate: deactivate expression in next generation
|
||||
* @destroy: destruction function, called after synchronize_rcu
|
||||
* @dump: function to dump parameters
|
||||
* @type: expression type
|
||||
* @validate: validate expression, called during loop detection
|
||||
|
@ -745,7 +765,8 @@ struct nft_expr_ops {
|
|||
void (*activate)(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr);
|
||||
void (*deactivate)(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr);
|
||||
const struct nft_expr *expr,
|
||||
enum nft_trans_phase phase);
|
||||
void (*destroy)(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr);
|
||||
void (*destroy_clone)(const struct nft_ctx *ctx,
|
||||
|
|
|
@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *);
|
|||
int nr_t1timer_running(struct sock *);
|
||||
|
||||
/* sysctl_net_netrom.c */
|
||||
void nr_register_sysctl(void);
|
||||
int nr_register_sysctl(void);
|
||||
void nr_unregister_sysctl(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -252,7 +252,6 @@ static void task_non_contending(struct task_struct *p)
|
|||
if (dl_entity_is_special(dl_se))
|
||||
return;
|
||||
|
||||
WARN_ON(hrtimer_active(&dl_se->inactive_timer));
|
||||
WARN_ON(dl_se->dl_non_contending);
|
||||
|
||||
zerolag_time = dl_se->deadline -
|
||||
|
@ -269,7 +268,7 @@ static void task_non_contending(struct task_struct *p)
|
|||
* If the "0-lag time" already passed, decrease the active
|
||||
* utilization now, instead of starting a timer
|
||||
*/
|
||||
if (zerolag_time < 0) {
|
||||
if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
|
||||
if (dl_task(p))
|
||||
sub_running_bw(dl_se, dl_rq);
|
||||
if (!dl_task(p) || p->state == TASK_DEAD) {
|
||||
|
|
|
@ -2013,6 +2013,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
|
|||
if (p->last_task_numa_placement) {
|
||||
delta = runtime - p->last_sum_exec_runtime;
|
||||
*period = now - p->last_task_numa_placement;
|
||||
|
||||
/* Avoid time going backwards, prevent potential divide error: */
|
||||
if (unlikely((s64)*period < 0))
|
||||
*period = 0;
|
||||
} else {
|
||||
delta = p->se.avg.load_sum;
|
||||
*period = LOAD_AVG_MAX;
|
||||
|
|
|
@ -730,7 +730,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
|
|||
|
||||
preempt_disable_notrace();
|
||||
time = rb_time_stamp(buffer);
|
||||
preempt_enable_no_resched_notrace();
|
||||
preempt_enable_notrace();
|
||||
|
||||
return time;
|
||||
}
|
||||
|
|
|
@ -496,8 +496,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
|
|||
* not modified.
|
||||
*/
|
||||
pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
|
||||
if (!pid_list)
|
||||
if (!pid_list) {
|
||||
trace_parser_put(&parser);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pid_list->pid_max = READ_ONCE(pid_max);
|
||||
|
||||
|
@ -507,6 +509,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
|
|||
|
||||
pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
|
||||
if (!pid_list->pids) {
|
||||
trace_parser_put(&parser);
|
||||
kfree(pid_list);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -6800,19 +6803,23 @@ struct buffer_ref {
|
|||
struct ring_buffer *buffer;
|
||||
void *page;
|
||||
int cpu;
|
||||
int ref;
|
||||
refcount_t refcount;
|
||||
};
|
||||
|
||||
static void buffer_ref_release(struct buffer_ref *ref)
|
||||
{
|
||||
if (!refcount_dec_and_test(&ref->refcount))
|
||||
return;
|
||||
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
|
||||
kfree(ref);
|
||||
}
|
||||
|
||||
static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
|
||||
struct pipe_buffer *buf)
|
||||
{
|
||||
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
|
||||
|
||||
if (--ref->ref)
|
||||
return;
|
||||
|
||||
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
|
||||
kfree(ref);
|
||||
buffer_ref_release(ref);
|
||||
buf->private = 0;
|
||||
}
|
||||
|
||||
|
@ -6821,7 +6828,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
|
|||
{
|
||||
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
|
||||
|
||||
ref->ref++;
|
||||
refcount_inc(&ref->refcount);
|
||||
}
|
||||
|
||||
/* Pipe buffer operations for a buffer. */
|
||||
|
@ -6829,7 +6836,7 @@ static const struct pipe_buf_operations buffer_pipe_buf_ops = {
|
|||
.can_merge = 0,
|
||||
.confirm = generic_pipe_buf_confirm,
|
||||
.release = buffer_pipe_buf_release,
|
||||
.steal = generic_pipe_buf_steal,
|
||||
.steal = generic_pipe_buf_nosteal,
|
||||
.get = buffer_pipe_buf_get,
|
||||
};
|
||||
|
||||
|
@ -6842,11 +6849,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
|
|||
struct buffer_ref *ref =
|
||||
(struct buffer_ref *)spd->partial[i].private;
|
||||
|
||||
if (--ref->ref)
|
||||
return;
|
||||
|
||||
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
|
||||
kfree(ref);
|
||||
buffer_ref_release(ref);
|
||||
spd->partial[i].private = 0;
|
||||
}
|
||||
|
||||
|
@ -6901,7 +6904,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
|||
break;
|
||||
}
|
||||
|
||||
ref->ref = 1;
|
||||
refcount_set(&ref->refcount, 1);
|
||||
ref->buffer = iter->trace_buffer->buffer;
|
||||
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
|
||||
if (IS_ERR(ref->page)) {
|
||||
|
|
|
@ -2931,6 +2931,9 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
|
|||
if (WARN_ON(!wq_online))
|
||||
return false;
|
||||
|
||||
if (WARN_ON(!work->func))
|
||||
return false;
|
||||
|
||||
if (!from_cancel) {
|
||||
lock_map_acquire(&work->lockdep_map);
|
||||
lock_map_release(&work->lockdep_map);
|
||||
|
|
|
@ -1934,6 +1934,7 @@ config TEST_KMOD
|
|||
depends on m
|
||||
depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
|
||||
depends on NETDEVICES && NET_CORE && INET # for TUN
|
||||
depends on BLOCK
|
||||
select TEST_LKM
|
||||
select XFS_FS
|
||||
select TUN
|
||||
|
|
|
@ -1787,10 +1787,15 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
|
|||
* in may not match the PFN we have mapped if the
|
||||
* mapped PFN is a writeable COW page. In the mkwrite
|
||||
* case we are creating a writable PTE for a shared
|
||||
* mapping and we expect the PFNs to match.
|
||||
* mapping and we expect the PFNs to match. If they
|
||||
* don't match, we are likely racing with block
|
||||
* allocation and mapping invalidation so just skip the
|
||||
* update.
|
||||
*/
|
||||
if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn)))
|
||||
if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
|
||||
WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
|
||||
goto out_unlock;
|
||||
}
|
||||
entry = *pte;
|
||||
goto out_mkwrite;
|
||||
} else
|
||||
|
|
|
@ -2032,7 +2032,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
|
|||
if (match_kern)
|
||||
match_kern->match_size = ret;
|
||||
|
||||
if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
|
||||
/* rule should have no remaining data after target */
|
||||
if (type == EBT_COMPAT_TARGET && size_left)
|
||||
return -EINVAL;
|
||||
|
||||
match32 = (struct compat_ebt_entry_mwt *) buf;
|
||||
|
|
|
@ -1185,25 +1185,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
|
|||
return dst;
|
||||
}
|
||||
|
||||
static void ipv4_link_failure(struct sk_buff *skb)
|
||||
static void ipv4_send_dest_unreach(struct sk_buff *skb)
|
||||
{
|
||||
struct ip_options opt;
|
||||
struct rtable *rt;
|
||||
int res;
|
||||
|
||||
/* Recompile ip options since IPCB may not be valid anymore.
|
||||
* Also check we have a reasonable ipv4 header.
|
||||
*/
|
||||
memset(&opt, 0, sizeof(opt));
|
||||
opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
|
||||
|
||||
rcu_read_lock();
|
||||
res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (res)
|
||||
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
|
||||
ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
|
||||
return;
|
||||
|
||||
memset(&opt, 0, sizeof(opt));
|
||||
if (ip_hdr(skb)->ihl > 5) {
|
||||
if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
|
||||
return;
|
||||
opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
|
||||
|
||||
rcu_read_lock();
|
||||
res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (res)
|
||||
return;
|
||||
}
|
||||
__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
|
||||
}
|
||||
|
||||
static void ipv4_link_failure(struct sk_buff *skb)
|
||||
{
|
||||
struct rtable *rt;
|
||||
|
||||
ipv4_send_dest_unreach(skb);
|
||||
|
||||
rt = skb_rtable(skb);
|
||||
if (rt)
|
||||
|
|
|
@ -49,6 +49,7 @@ static int ip_ping_group_range_min[] = { 0, 0 };
|
|||
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
|
||||
static int comp_sack_nr_max = 255;
|
||||
static u32 u32_max_div_HZ = UINT_MAX / HZ;
|
||||
static int one_day_secs = 24 * 3600;
|
||||
|
||||
/* obsolete */
|
||||
static int sysctl_tcp_low_latency __read_mostly;
|
||||
|
@ -1140,7 +1141,9 @@ static struct ctl_table ipv4_net_table[] = {
|
|||
.data = &init_net.ipv4.sysctl_tcp_min_rtt_wlen,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &zero,
|
||||
.extra2 = &one_day_secs
|
||||
},
|
||||
{
|
||||
.procname = "tcp_autocorking",
|
||||
|
|
|
@ -896,12 +896,13 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
|
|||
{
|
||||
struct ip_vs_dest *dest;
|
||||
unsigned int atype, i;
|
||||
int ret = 0;
|
||||
|
||||
EnterFunction(2);
|
||||
|
||||
#ifdef CONFIG_IP_VS_IPV6
|
||||
if (udest->af == AF_INET6) {
|
||||
int ret;
|
||||
|
||||
atype = ipv6_addr_type(&udest->addr.in6);
|
||||
if ((!(atype & IPV6_ADDR_UNICAST) ||
|
||||
atype & IPV6_ADDR_LINKLOCAL) &&
|
||||
|
|
|
@ -112,6 +112,23 @@ static void nft_trans_destroy(struct nft_trans *trans)
|
|||
kfree(trans);
|
||||
}
|
||||
|
||||
static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
struct net *net = ctx->net;
|
||||
struct nft_trans *trans;
|
||||
|
||||
if (!nft_set_is_anonymous(set))
|
||||
return;
|
||||
|
||||
list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
|
||||
if (trans->msg_type == NFT_MSG_NEWSET &&
|
||||
nft_trans_set(trans) == set) {
|
||||
set->bound = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int nf_tables_register_hook(struct net *net,
|
||||
const struct nft_table *table,
|
||||
struct nft_chain *chain)
|
||||
|
@ -222,14 +239,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
|
|||
}
|
||||
|
||||
static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
|
||||
struct nft_rule *rule)
|
||||
struct nft_rule *rule,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
struct nft_expr *expr;
|
||||
|
||||
expr = nft_expr_first(rule);
|
||||
while (expr != nft_expr_last(rule) && expr->ops) {
|
||||
if (expr->ops->deactivate)
|
||||
expr->ops->deactivate(ctx, expr);
|
||||
expr->ops->deactivate(ctx, expr, phase);
|
||||
|
||||
expr = nft_expr_next(expr);
|
||||
}
|
||||
|
@ -280,7 +298,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
|
|||
nft_trans_destroy(trans);
|
||||
return err;
|
||||
}
|
||||
nft_rule_expr_deactivate(ctx, rule);
|
||||
nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -301,7 +319,7 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
|
||||
static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
|
||||
struct nft_set *set)
|
||||
{
|
||||
struct nft_trans *trans;
|
||||
|
@ -321,7 +339,7 @@ static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int nft_delset(struct nft_ctx *ctx, struct nft_set *set)
|
||||
static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -2105,6 +2123,7 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
|
|||
{
|
||||
struct nft_expr_info info;
|
||||
struct nft_expr *expr;
|
||||
struct module *owner;
|
||||
int err;
|
||||
|
||||
err = nf_tables_expr_parse(ctx, nla, &info);
|
||||
|
@ -2124,7 +2143,11 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
|
|||
err3:
|
||||
kfree(expr);
|
||||
err2:
|
||||
module_put(info.ops->type->owner);
|
||||
owner = info.ops->type->owner;
|
||||
if (info.ops->type->release_ops)
|
||||
info.ops->type->release_ops(info.ops);
|
||||
|
||||
module_put(owner);
|
||||
err1:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
@ -2458,7 +2481,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
|
|||
static void nf_tables_rule_release(const struct nft_ctx *ctx,
|
||||
struct nft_rule *rule)
|
||||
{
|
||||
nft_rule_expr_deactivate(ctx, rule);
|
||||
nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
|
||||
nf_tables_rule_destroy(ctx, rule);
|
||||
}
|
||||
|
||||
|
@ -3562,19 +3585,15 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
|||
|
||||
static void nft_set_destroy(struct nft_set *set)
|
||||
{
|
||||
if (WARN_ON(set->use > 0))
|
||||
return;
|
||||
|
||||
set->ops->destroy(set);
|
||||
module_put(to_set_type(set->ops)->owner);
|
||||
kfree(set->name);
|
||||
kvfree(set);
|
||||
}
|
||||
|
||||
static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
list_del_rcu(&set->list);
|
||||
nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
|
||||
nft_set_destroy(set);
|
||||
}
|
||||
|
||||
static int nf_tables_delset(struct net *net, struct sock *nlsk,
|
||||
struct sk_buff *skb, const struct nlmsghdr *nlh,
|
||||
const struct nlattr * const nla[],
|
||||
|
@ -3609,7 +3628,7 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
|
|||
NL_SET_BAD_ATTR(extack, attr);
|
||||
return PTR_ERR(set);
|
||||
}
|
||||
if (!list_empty(&set->bindings) ||
|
||||
if (set->use ||
|
||||
(nlh->nlmsg_flags & NLM_F_NONREC && atomic_read(&set->nelems) > 0)) {
|
||||
NL_SET_BAD_ATTR(extack, attr);
|
||||
return -EBUSY;
|
||||
|
@ -3639,6 +3658,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
|||
struct nft_set_binding *i;
|
||||
struct nft_set_iter iter;
|
||||
|
||||
if (set->use == UINT_MAX)
|
||||
return -EOVERFLOW;
|
||||
|
||||
if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
|
||||
return -EBUSY;
|
||||
|
||||
|
@ -3665,21 +3687,53 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
|||
bind:
|
||||
binding->chain = ctx->chain;
|
||||
list_add_tail_rcu(&binding->list, &set->bindings);
|
||||
nft_set_trans_bind(ctx, set);
|
||||
set->use++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_tables_bind_set);
|
||||
|
||||
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding)
|
||||
struct nft_set_binding *binding, bool event)
|
||||
{
|
||||
list_del_rcu(&binding->list);
|
||||
|
||||
if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
|
||||
nft_is_active(ctx->net, set))
|
||||
nf_tables_set_destroy(ctx, set);
|
||||
if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
|
||||
list_del_rcu(&set->list);
|
||||
if (event)
|
||||
nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
|
||||
|
||||
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
switch (phase) {
|
||||
case NFT_TRANS_PREPARE:
|
||||
set->use--;
|
||||
return;
|
||||
case NFT_TRANS_ABORT:
|
||||
case NFT_TRANS_RELEASE:
|
||||
set->use--;
|
||||
/* fall through */
|
||||
default:
|
||||
nf_tables_unbind_set(ctx, set, binding,
|
||||
phase == NFT_TRANS_COMMIT);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_tables_deactivate_set);
|
||||
|
||||
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
|
||||
nft_set_destroy(set);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
|
||||
|
||||
const struct nft_set_ext_type nft_set_ext_types[] = {
|
||||
[NFT_SET_EXT_KEY] = {
|
||||
.align = __alignof__(u32),
|
||||
|
@ -6429,6 +6483,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
|||
nf_tables_rule_notify(&trans->ctx,
|
||||
nft_trans_rule(trans),
|
||||
NFT_MSG_DELRULE);
|
||||
nft_rule_expr_deactivate(&trans->ctx,
|
||||
nft_trans_rule(trans),
|
||||
NFT_TRANS_COMMIT);
|
||||
break;
|
||||
case NFT_MSG_NEWSET:
|
||||
nft_clear(net, nft_trans_set(trans));
|
||||
|
@ -6577,7 +6634,9 @@ static int __nf_tables_abort(struct net *net)
|
|||
case NFT_MSG_NEWRULE:
|
||||
trans->ctx.chain->use--;
|
||||
list_del_rcu(&nft_trans_rule(trans)->list);
|
||||
nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans));
|
||||
nft_rule_expr_deactivate(&trans->ctx,
|
||||
nft_trans_rule(trans),
|
||||
NFT_TRANS_ABORT);
|
||||
break;
|
||||
case NFT_MSG_DELRULE:
|
||||
trans->ctx.chain->use++;
|
||||
|
@ -6587,6 +6646,10 @@ static int __nf_tables_abort(struct net *net)
|
|||
break;
|
||||
case NFT_MSG_NEWSET:
|
||||
trans->ctx.table->use--;
|
||||
if (nft_trans_set(trans)->bound) {
|
||||
nft_trans_destroy(trans);
|
||||
break;
|
||||
}
|
||||
list_del_rcu(&nft_trans_set(trans)->list);
|
||||
break;
|
||||
case NFT_MSG_DELSET:
|
||||
|
@ -6595,8 +6658,11 @@ static int __nf_tables_abort(struct net *net)
|
|||
nft_trans_destroy(trans);
|
||||
break;
|
||||
case NFT_MSG_NEWSETELEM:
|
||||
if (nft_trans_elem_set(trans)->bound) {
|
||||
nft_trans_destroy(trans);
|
||||
break;
|
||||
}
|
||||
te = (struct nft_trans_elem *)trans->data;
|
||||
|
||||
te->set->ops->remove(net, te->set, &te->elem);
|
||||
atomic_dec(&te->set->nelems);
|
||||
break;
|
||||
|
|
|
@ -23,19 +23,6 @@
|
|||
#include <linux/netfilter_arp/arp_tables.h>
|
||||
#include <net/netfilter/nf_tables.h>
|
||||
|
||||
struct nft_xt {
|
||||
struct list_head head;
|
||||
struct nft_expr_ops ops;
|
||||
unsigned int refcnt;
|
||||
|
||||
/* Unlike other expressions, ops doesn't have static storage duration.
|
||||
* nft core assumes they do. We use kfree_rcu so that nft core can
|
||||
* can check expr->ops->size even after nft_compat->destroy() frees
|
||||
* the nft_xt struct that holds the ops structure.
|
||||
*/
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
/* Used for matches where *info is larger than X byte */
|
||||
#define NFT_MATCH_LARGE_THRESH 192
|
||||
|
||||
|
@ -43,17 +30,6 @@ struct nft_xt_match_priv {
|
|||
void *info;
|
||||
};
|
||||
|
||||
static bool nft_xt_put(struct nft_xt *xt)
|
||||
{
|
||||
if (--xt->refcnt == 0) {
|
||||
list_del(&xt->head);
|
||||
kfree_rcu(xt, rcu_head);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
|
||||
const char *tablename)
|
||||
{
|
||||
|
@ -248,7 +224,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
|||
struct xt_target *target = expr->ops->data;
|
||||
struct xt_tgchk_param par;
|
||||
size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
|
||||
struct nft_xt *nft_xt;
|
||||
u16 proto = 0;
|
||||
bool inv = false;
|
||||
union nft_entry e = {};
|
||||
|
@ -272,8 +247,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
|||
if (!target->target)
|
||||
return -EINVAL;
|
||||
|
||||
nft_xt = container_of(expr->ops, struct nft_xt, ops);
|
||||
nft_xt->refcnt++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -292,8 +265,8 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
|||
if (par.target->destroy != NULL)
|
||||
par.target->destroy(&par);
|
||||
|
||||
if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
|
||||
module_put(me);
|
||||
module_put(me);
|
||||
kfree(expr->ops);
|
||||
}
|
||||
|
||||
static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
|
@ -447,7 +420,6 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
|||
struct xt_match *match = expr->ops->data;
|
||||
struct xt_mtchk_param par;
|
||||
size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
|
||||
struct nft_xt *nft_xt;
|
||||
u16 proto = 0;
|
||||
bool inv = false;
|
||||
union nft_entry e = {};
|
||||
|
@ -463,13 +435,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
|||
|
||||
nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
|
||||
|
||||
ret = xt_check_match(&par, size, proto, inv);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
nft_xt = container_of(expr->ops, struct nft_xt, ops);
|
||||
nft_xt->refcnt++;
|
||||
return 0;
|
||||
return xt_check_match(&par, size, proto, inv);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -512,8 +478,8 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
|||
if (par.match->destroy != NULL)
|
||||
par.match->destroy(&par);
|
||||
|
||||
if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
|
||||
module_put(me);
|
||||
module_put(me);
|
||||
kfree(expr->ops);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -715,22 +681,13 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
|
|||
.cb = nfnl_nft_compat_cb,
|
||||
};
|
||||
|
||||
static LIST_HEAD(nft_match_list);
|
||||
|
||||
static struct nft_expr_type nft_match_type;
|
||||
|
||||
static bool nft_match_cmp(const struct xt_match *match,
|
||||
const char *name, u32 rev, u32 family)
|
||||
{
|
||||
return strcmp(match->name, name) == 0 && match->revision == rev &&
|
||||
(match->family == NFPROTO_UNSPEC || match->family == family);
|
||||
}
|
||||
|
||||
static const struct nft_expr_ops *
|
||||
nft_match_select_ops(const struct nft_ctx *ctx,
|
||||
const struct nlattr * const tb[])
|
||||
{
|
||||
struct nft_xt *nft_match;
|
||||
struct nft_expr_ops *ops;
|
||||
struct xt_match *match;
|
||||
unsigned int matchsize;
|
||||
char *mt_name;
|
||||
|
@ -746,14 +703,6 @@ nft_match_select_ops(const struct nft_ctx *ctx,
|
|||
rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
|
||||
family = ctx->family;
|
||||
|
||||
/* Re-use the existing match if it's already loaded. */
|
||||
list_for_each_entry(nft_match, &nft_match_list, head) {
|
||||
struct xt_match *match = nft_match->ops.data;
|
||||
|
||||
if (nft_match_cmp(match, mt_name, rev, family))
|
||||
return &nft_match->ops;
|
||||
}
|
||||
|
||||
match = xt_request_find_match(family, mt_name, rev);
|
||||
if (IS_ERR(match))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
@ -763,66 +712,62 @@ nft_match_select_ops(const struct nft_ctx *ctx,
|
|||
goto err;
|
||||
}
|
||||
|
||||
/* This is the first time we use this match, allocate operations */
|
||||
nft_match = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
|
||||
if (nft_match == NULL) {
|
||||
ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
|
||||
if (!ops) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
nft_match->refcnt = 0;
|
||||
nft_match->ops.type = &nft_match_type;
|
||||
nft_match->ops.eval = nft_match_eval;
|
||||
nft_match->ops.init = nft_match_init;
|
||||
nft_match->ops.destroy = nft_match_destroy;
|
||||
nft_match->ops.dump = nft_match_dump;
|
||||
nft_match->ops.validate = nft_match_validate;
|
||||
nft_match->ops.data = match;
|
||||
ops->type = &nft_match_type;
|
||||
ops->eval = nft_match_eval;
|
||||
ops->init = nft_match_init;
|
||||
ops->destroy = nft_match_destroy;
|
||||
ops->dump = nft_match_dump;
|
||||
ops->validate = nft_match_validate;
|
||||
ops->data = match;
|
||||
|
||||
matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
|
||||
if (matchsize > NFT_MATCH_LARGE_THRESH) {
|
||||
matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv));
|
||||
|
||||
nft_match->ops.eval = nft_match_large_eval;
|
||||
nft_match->ops.init = nft_match_large_init;
|
||||
nft_match->ops.destroy = nft_match_large_destroy;
|
||||
nft_match->ops.dump = nft_match_large_dump;
|
||||
ops->eval = nft_match_large_eval;
|
||||
ops->init = nft_match_large_init;
|
||||
ops->destroy = nft_match_large_destroy;
|
||||
ops->dump = nft_match_large_dump;
|
||||
}
|
||||
|
||||
nft_match->ops.size = matchsize;
|
||||
ops->size = matchsize;
|
||||
|
||||
list_add(&nft_match->head, &nft_match_list);
|
||||
|
||||
return &nft_match->ops;
|
||||
return ops;
|
||||
err:
|
||||
module_put(match->me);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void nft_match_release_ops(const struct nft_expr_ops *ops)
|
||||
{
|
||||
struct xt_match *match = ops->data;
|
||||
|
||||
module_put(match->me);
|
||||
kfree(ops);
|
||||
}
|
||||
|
||||
static struct nft_expr_type nft_match_type __read_mostly = {
|
||||
.name = "match",
|
||||
.select_ops = nft_match_select_ops,
|
||||
.release_ops = nft_match_release_ops,
|
||||
.policy = nft_match_policy,
|
||||
.maxattr = NFTA_MATCH_MAX,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static LIST_HEAD(nft_target_list);
|
||||
|
||||
static struct nft_expr_type nft_target_type;
|
||||
|
||||
static bool nft_target_cmp(const struct xt_target *tg,
|
||||
const char *name, u32 rev, u32 family)
|
||||
{
|
||||
return strcmp(tg->name, name) == 0 && tg->revision == rev &&
|
||||
(tg->family == NFPROTO_UNSPEC || tg->family == family);
|
||||
}
|
||||
|
||||
static const struct nft_expr_ops *
|
||||
nft_target_select_ops(const struct nft_ctx *ctx,
|
||||
const struct nlattr * const tb[])
|
||||
{
|
||||
struct nft_xt *nft_target;
|
||||
struct nft_expr_ops *ops;
|
||||
struct xt_target *target;
|
||||
char *tg_name;
|
||||
u32 rev, family;
|
||||
|
@ -842,17 +787,6 @@ nft_target_select_ops(const struct nft_ctx *ctx,
|
|||
strcmp(tg_name, "standard") == 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* Re-use the existing target if it's already loaded. */
|
||||
list_for_each_entry(nft_target, &nft_target_list, head) {
|
||||
struct xt_target *target = nft_target->ops.data;
|
||||
|
||||
if (!target->target)
|
||||
continue;
|
||||
|
||||
if (nft_target_cmp(target, tg_name, rev, family))
|
||||
return &nft_target->ops;
|
||||
}
|
||||
|
||||
target = xt_request_find_target(family, tg_name, rev);
|
||||
if (IS_ERR(target))
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
@ -867,38 +801,43 @@ nft_target_select_ops(const struct nft_ctx *ctx,
|
|||
goto err;
|
||||
}
|
||||
|
||||
/* This is the first time we use this target, allocate operations */
|
||||
nft_target = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
|
||||
if (nft_target == NULL) {
|
||||
ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL);
|
||||
if (!ops) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
nft_target->refcnt = 0;
|
||||
nft_target->ops.type = &nft_target_type;
|
||||
nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
|
||||
nft_target->ops.init = nft_target_init;
|
||||
nft_target->ops.destroy = nft_target_destroy;
|
||||
nft_target->ops.dump = nft_target_dump;
|
||||
nft_target->ops.validate = nft_target_validate;
|
||||
nft_target->ops.data = target;
|
||||
ops->type = &nft_target_type;
|
||||
ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
|
||||
ops->init = nft_target_init;
|
||||
ops->destroy = nft_target_destroy;
|
||||
ops->dump = nft_target_dump;
|
||||
ops->validate = nft_target_validate;
|
||||
ops->data = target;
|
||||
|
||||
if (family == NFPROTO_BRIDGE)
|
||||
nft_target->ops.eval = nft_target_eval_bridge;
|
||||
ops->eval = nft_target_eval_bridge;
|
||||
else
|
||||
nft_target->ops.eval = nft_target_eval_xt;
|
||||
ops->eval = nft_target_eval_xt;
|
||||
|
||||
list_add(&nft_target->head, &nft_target_list);
|
||||
|
||||
return &nft_target->ops;
|
||||
return ops;
|
||||
err:
|
||||
module_put(target->me);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void nft_target_release_ops(const struct nft_expr_ops *ops)
|
||||
{
|
||||
struct xt_target *target = ops->data;
|
||||
|
||||
module_put(target->me);
|
||||
kfree(ops);
|
||||
}
|
||||
|
||||
static struct nft_expr_type nft_target_type __read_mostly = {
|
||||
.name = "target",
|
||||
.select_ops = nft_target_select_ops,
|
||||
.release_ops = nft_target_release_ops,
|
||||
.policy = nft_target_policy,
|
||||
.maxattr = NFTA_TARGET_MAX,
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -923,7 +862,6 @@ static int __init nft_compat_module_init(void)
|
|||
}
|
||||
|
||||
return ret;
|
||||
|
||||
err_target:
|
||||
nft_unregister_expr(&nft_target_type);
|
||||
err_match:
|
||||
|
@ -933,32 +871,6 @@ static int __init nft_compat_module_init(void)
|
|||
|
||||
static void __exit nft_compat_module_exit(void)
|
||||
{
|
||||
struct nft_xt *xt, *next;
|
||||
|
||||
/* list should be empty here, it can be non-empty only in case there
|
||||
* was an error that caused nft_xt expr to not be initialized fully
|
||||
* and noone else requested the same expression later.
|
||||
*
|
||||
* In this case, the lists contain 0-refcount entries that still
|
||||
* hold module reference.
|
||||
*/
|
||||
list_for_each_entry_safe(xt, next, &nft_target_list, head) {
|
||||
struct xt_target *target = xt->ops.data;
|
||||
|
||||
if (WARN_ON_ONCE(xt->refcnt))
|
||||
continue;
|
||||
module_put(target->me);
|
||||
kfree(xt);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(xt, next, &nft_match_list, head) {
|
||||
struct xt_match *match = xt->ops.data;
|
||||
|
||||
if (WARN_ON_ONCE(xt->refcnt))
|
||||
continue;
|
||||
module_put(match->me);
|
||||
kfree(xt);
|
||||
}
|
||||
nfnetlink_subsys_unregister(&nfnl_compat_subsys);
|
||||
nft_unregister_expr(&nft_target_type);
|
||||
nft_unregister_expr(&nft_match_type);
|
||||
|
|
|
@ -235,14 +235,32 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
|
|||
return err;
|
||||
}
|
||||
|
||||
static void nft_dynset_deactivate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
struct nft_dynset *priv = nft_expr_priv(expr);
|
||||
|
||||
nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
|
||||
}
|
||||
|
||||
static void nft_dynset_activate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_dynset *priv = nft_expr_priv(expr);
|
||||
|
||||
priv->set->use++;
|
||||
}
|
||||
|
||||
static void nft_dynset_destroy(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_dynset *priv = nft_expr_priv(expr);
|
||||
|
||||
nf_tables_unbind_set(ctx, priv->set, &priv->binding);
|
||||
if (priv->expr != NULL)
|
||||
nft_expr_destroy(ctx, priv->expr);
|
||||
|
||||
nf_tables_destroy_set(ctx, priv->set);
|
||||
}
|
||||
|
||||
static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
|
@ -279,6 +297,8 @@ static const struct nft_expr_ops nft_dynset_ops = {
|
|||
.eval = nft_dynset_eval,
|
||||
.init = nft_dynset_init,
|
||||
.destroy = nft_dynset_destroy,
|
||||
.activate = nft_dynset_activate,
|
||||
.deactivate = nft_dynset_deactivate,
|
||||
.dump = nft_dynset_dump,
|
||||
};
|
||||
|
||||
|
|
|
@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
|
|||
}
|
||||
|
||||
static void nft_immediate_deactivate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
const struct nft_expr *expr,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
|
||||
|
||||
if (phase == NFT_TRANS_COMMIT)
|
||||
return;
|
||||
|
||||
return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
|
||||
}
|
||||
|
||||
|
|
|
@ -121,12 +121,29 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nft_lookup_deactivate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
struct nft_lookup *priv = nft_expr_priv(expr);
|
||||
|
||||
nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
|
||||
}
|
||||
|
||||
static void nft_lookup_activate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_lookup *priv = nft_expr_priv(expr);
|
||||
|
||||
priv->set->use++;
|
||||
}
|
||||
|
||||
static void nft_lookup_destroy(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_lookup *priv = nft_expr_priv(expr);
|
||||
|
||||
nf_tables_unbind_set(ctx, priv->set, &priv->binding);
|
||||
nf_tables_destroy_set(ctx, priv->set);
|
||||
}
|
||||
|
||||
static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||
|
@ -209,6 +226,8 @@ static const struct nft_expr_ops nft_lookup_ops = {
|
|||
.size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
|
||||
.eval = nft_lookup_eval,
|
||||
.init = nft_lookup_init,
|
||||
.activate = nft_lookup_activate,
|
||||
.deactivate = nft_lookup_deactivate,
|
||||
.destroy = nft_lookup_destroy,
|
||||
.dump = nft_lookup_dump,
|
||||
.validate = nft_lookup_validate,
|
||||
|
|
|
@ -64,21 +64,34 @@ static int nft_objref_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static void nft_objref_destroy(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
static void nft_objref_deactivate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
struct nft_object *obj = nft_objref_priv(expr);
|
||||
|
||||
if (phase == NFT_TRANS_COMMIT)
|
||||
return;
|
||||
|
||||
obj->use--;
|
||||
}
|
||||
|
||||
static void nft_objref_activate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_object *obj = nft_objref_priv(expr);
|
||||
|
||||
obj->use++;
|
||||
}
|
||||
|
||||
static struct nft_expr_type nft_objref_type;
|
||||
static const struct nft_expr_ops nft_objref_ops = {
|
||||
.type = &nft_objref_type,
|
||||
.size = NFT_EXPR_SIZE(sizeof(struct nft_object *)),
|
||||
.eval = nft_objref_eval,
|
||||
.init = nft_objref_init,
|
||||
.destroy = nft_objref_destroy,
|
||||
.activate = nft_objref_activate,
|
||||
.deactivate = nft_objref_deactivate,
|
||||
.dump = nft_objref_dump,
|
||||
};
|
||||
|
||||
|
@ -155,12 +168,29 @@ static int nft_objref_map_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
struct nft_objref_map *priv = nft_expr_priv(expr);
|
||||
|
||||
nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
|
||||
}
|
||||
|
||||
static void nft_objref_map_activate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_objref_map *priv = nft_expr_priv(expr);
|
||||
|
||||
priv->set->use++;
|
||||
}
|
||||
|
||||
static void nft_objref_map_destroy(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_objref_map *priv = nft_expr_priv(expr);
|
||||
|
||||
nf_tables_unbind_set(ctx, priv->set, &priv->binding);
|
||||
nf_tables_destroy_set(ctx, priv->set);
|
||||
}
|
||||
|
||||
static struct nft_expr_type nft_objref_type;
|
||||
|
@ -169,6 +199,8 @@ static const struct nft_expr_ops nft_objref_map_ops = {
|
|||
.size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
|
||||
.eval = nft_objref_map_eval,
|
||||
.init = nft_objref_map_init,
|
||||
.activate = nft_objref_map_activate,
|
||||
.deactivate = nft_objref_map_deactivate,
|
||||
.destroy = nft_objref_map_destroy,
|
||||
.dump = nft_objref_map_dump,
|
||||
};
|
||||
|
|
|
@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void)
|
|||
int i;
|
||||
int rc = proto_register(&nr_proto, 0);
|
||||
|
||||
if (rc != 0)
|
||||
goto out;
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
|
||||
printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
|
||||
return -1;
|
||||
pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
|
||||
__func__);
|
||||
rc = -EINVAL;
|
||||
goto unregister_proto;
|
||||
}
|
||||
|
||||
dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
|
||||
if (dev_nr == NULL) {
|
||||
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
|
||||
return -1;
|
||||
if (!dev_nr) {
|
||||
pr_err("NET/ROM: %s - unable to allocate device array\n",
|
||||
__func__);
|
||||
rc = -ENOMEM;
|
||||
goto unregister_proto;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_ndevs; i++) {
|
||||
|
@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void)
|
|||
sprintf(name, "nr%d", i);
|
||||
dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
|
||||
if (!dev) {
|
||||
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
dev->base_addr = i;
|
||||
if (register_netdev(dev)) {
|
||||
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
|
||||
rc = register_netdev(dev);
|
||||
if (rc) {
|
||||
free_netdev(dev);
|
||||
goto fail;
|
||||
}
|
||||
|
@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void)
|
|||
dev_nr[i] = dev;
|
||||
}
|
||||
|
||||
if (sock_register(&nr_family_ops)) {
|
||||
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
|
||||
rc = sock_register(&nr_family_ops);
|
||||
if (rc)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
register_netdevice_notifier(&nr_dev_notifier);
|
||||
rc = register_netdevice_notifier(&nr_dev_notifier);
|
||||
if (rc)
|
||||
goto out_sock;
|
||||
|
||||
ax25_register_pid(&nr_pid);
|
||||
ax25_linkfail_register(&nr_linkfail_notifier);
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
nr_register_sysctl();
|
||||
rc = nr_register_sysctl();
|
||||
if (rc)
|
||||
goto out_sysctl;
|
||||
#endif
|
||||
|
||||
nr_loopback_init();
|
||||
|
||||
proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops);
|
||||
proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops);
|
||||
proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops);
|
||||
out:
|
||||
return rc;
|
||||
rc = -ENOMEM;
|
||||
if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
|
||||
goto proc_remove1;
|
||||
if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
|
||||
&nr_neigh_seqops))
|
||||
goto proc_remove2;
|
||||
if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
|
||||
&nr_node_seqops))
|
||||
goto proc_remove3;
|
||||
|
||||
return 0;
|
||||
|
||||
proc_remove3:
|
||||
remove_proc_entry("nr_neigh", init_net.proc_net);
|
||||
proc_remove2:
|
||||
remove_proc_entry("nr", init_net.proc_net);
|
||||
proc_remove1:
|
||||
|
||||
nr_loopback_clear();
|
||||
nr_rt_free();
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
nr_unregister_sysctl();
|
||||
out_sysctl:
|
||||
#endif
|
||||
ax25_linkfail_release(&nr_linkfail_notifier);
|
||||
ax25_protocol_release(AX25_P_NETROM);
|
||||
unregister_netdevice_notifier(&nr_dev_notifier);
|
||||
out_sock:
|
||||
sock_unregister(PF_NETROM);
|
||||
fail:
|
||||
while (--i >= 0) {
|
||||
unregister_netdev(dev_nr[i]);
|
||||
free_netdev(dev_nr[i]);
|
||||
}
|
||||
kfree(dev_nr);
|
||||
unregister_proto:
|
||||
proto_unregister(&nr_proto);
|
||||
rc = -1;
|
||||
goto out;
|
||||
return rc;
|
||||
}
|
||||
|
||||
module_init(nr_proto_init);
|
||||
|
|
|
@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused)
|
|||
}
|
||||
}
|
||||
|
||||
void __exit nr_loopback_clear(void)
|
||||
void nr_loopback_clear(void)
|
||||
{
|
||||
del_timer_sync(&loopback_timer);
|
||||
skb_queue_purge(&loopback_queue);
|
||||
|
|
|
@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = {
|
|||
/*
|
||||
* Free all memory associated with the nodes and routes lists.
|
||||
*/
|
||||
void __exit nr_rt_free(void)
|
||||
void nr_rt_free(void)
|
||||
{
|
||||
struct nr_neigh *s = NULL;
|
||||
struct nr_node *t = NULL;
|
||||
|
|
|
@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
void __init nr_register_sysctl(void)
|
||||
int __init nr_register_sysctl(void)
|
||||
{
|
||||
nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
|
||||
if (!nr_table_header)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nr_unregister_sysctl(void)
|
||||
|
|
|
@ -506,6 +506,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
|
|||
struct rds_sock *rs = rds_sk_to_rs(sk);
|
||||
int ret = 0;
|
||||
|
||||
if (addr_len < offsetofend(struct sockaddr, sa_family))
|
||||
return -EINVAL;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
switch (uaddr->sa_family) {
|
||||
|
|
|
@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
|||
/* We allow an RDS socket to be bound to either IPv4 or IPv6
|
||||
* address.
|
||||
*/
|
||||
if (addr_len < offsetofend(struct sockaddr, sa_family))
|
||||
return -EINVAL;
|
||||
if (uaddr->sa_family == AF_INET) {
|
||||
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
|
||||
|
||||
|
|
|
@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
|
|||
else
|
||||
pool = rds_ibdev->mr_1m_pool;
|
||||
|
||||
if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
|
||||
queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
|
||||
|
||||
/* Switch pools if one of the pool is reaching upper limit */
|
||||
if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
|
||||
if (pool->pool_type == RDS_IB_MR_8K_POOL)
|
||||
pool = rds_ibdev->mr_1m_pool;
|
||||
else
|
||||
pool = rds_ibdev->mr_8k_pool;
|
||||
}
|
||||
|
||||
ibmr = rds_ib_try_reuse_ibmr(pool);
|
||||
if (ibmr)
|
||||
return ibmr;
|
||||
|
|
|
@ -454,9 +454,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
|
|||
struct rds_ib_mr *ibmr = NULL;
|
||||
int iter = 0;
|
||||
|
||||
if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
|
||||
queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
|
||||
|
||||
while (1) {
|
||||
ibmr = rds_ib_reuse_mr(pool);
|
||||
if (ibmr)
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/init.h>
|
||||
|
||||
static struct sk_buff_head loopback_queue;
|
||||
#define ROSE_LOOPBACK_LIMIT 1000
|
||||
static struct timer_list loopback_timer;
|
||||
|
||||
static void rose_set_loopback_timer(void);
|
||||
|
@ -35,29 +36,27 @@ static int rose_loopback_running(void)
|
|||
|
||||
int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
|
||||
{
|
||||
struct sk_buff *skbn;
|
||||
struct sk_buff *skbn = NULL;
|
||||
|
||||
skbn = skb_clone(skb, GFP_ATOMIC);
|
||||
if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
|
||||
skbn = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
kfree_skb(skb);
|
||||
|
||||
if (skbn != NULL) {
|
||||
if (skbn) {
|
||||
consume_skb(skb);
|
||||
skb_queue_tail(&loopback_queue, skbn);
|
||||
|
||||
if (!rose_loopback_running())
|
||||
rose_set_loopback_timer();
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
static void rose_set_loopback_timer(void)
|
||||
{
|
||||
del_timer(&loopback_timer);
|
||||
|
||||
loopback_timer.expires = jiffies + 10;
|
||||
add_timer(&loopback_timer);
|
||||
mod_timer(&loopback_timer, jiffies + 10);
|
||||
}
|
||||
|
||||
static void rose_loopback_timer(struct timer_list *unused)
|
||||
|
@ -68,8 +67,12 @@ static void rose_loopback_timer(struct timer_list *unused)
|
|||
struct sock *sk;
|
||||
unsigned short frametype;
|
||||
unsigned int lci_i, lci_o;
|
||||
int count;
|
||||
|
||||
while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
|
||||
for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
|
||||
skb = skb_dequeue(&loopback_queue);
|
||||
if (!skb)
|
||||
return;
|
||||
if (skb->len < ROSE_MIN_LEN) {
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
|
@ -106,6 +109,8 @@ static void rose_loopback_timer(struct timer_list *unused)
|
|||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
if (!skb_queue_empty(&loopback_queue))
|
||||
mod_timer(&loopback_timer, jiffies + 1);
|
||||
}
|
||||
|
||||
void __exit rose_loopback_clear(void)
|
||||
|
|
|
@ -1155,19 +1155,19 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
|
|||
* handle data received on the local endpoint
|
||||
* - may be called in interrupt context
|
||||
*
|
||||
* The socket is locked by the caller and this prevents the socket from being
|
||||
* shut down and the local endpoint from going away, thus sk_user_data will not
|
||||
* be cleared until this function returns.
|
||||
* [!] Note that as this is called from the encap_rcv hook, the socket is not
|
||||
* held locked by the caller and nothing prevents sk_user_data on the UDP from
|
||||
* being cleared in the middle of processing this function.
|
||||
*
|
||||
* Called with the RCU read lock held from the IP layer via UDP.
|
||||
*/
|
||||
int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
|
||||
{
|
||||
struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
|
||||
struct rxrpc_connection *conn;
|
||||
struct rxrpc_channel *chan;
|
||||
struct rxrpc_call *call = NULL;
|
||||
struct rxrpc_skb_priv *sp;
|
||||
struct rxrpc_local *local = udp_sk->sk_user_data;
|
||||
struct rxrpc_peer *peer = NULL;
|
||||
struct rxrpc_sock *rx = NULL;
|
||||
unsigned int channel;
|
||||
|
@ -1175,6 +1175,10 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
|
|||
|
||||
_enter("%p", udp_sk);
|
||||
|
||||
if (unlikely(!local)) {
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
if (skb->tstamp == 0)
|
||||
skb->tstamp = ktime_get_real();
|
||||
|
||||
|
|
|
@ -304,7 +304,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
|
|||
ret = -ENOMEM;
|
||||
sock_error:
|
||||
mutex_unlock(&rxnet->local_mutex);
|
||||
kfree(local);
|
||||
if (local)
|
||||
call_rcu(&local->rcu, rxrpc_local_rcu);
|
||||
_leave(" = %d", ret);
|
||||
return ERR_PTR(ret);
|
||||
|
||||
|
|
|
@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
|
|||
h->last_refresh = now;
|
||||
}
|
||||
|
||||
static inline int cache_is_valid(struct cache_head *h);
|
||||
static void cache_fresh_locked(struct cache_head *head, time_t expiry,
|
||||
struct cache_detail *detail);
|
||||
static void cache_fresh_unlocked(struct cache_head *head,
|
||||
|
@ -100,6 +101,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
|
|||
if (cache_is_expired(detail, tmp)) {
|
||||
hlist_del_init(&tmp->cache_list);
|
||||
detail->entries --;
|
||||
if (cache_is_valid(tmp) == -EAGAIN)
|
||||
set_bit(CACHE_NEGATIVE, &tmp->flags);
|
||||
cache_fresh_locked(tmp, 0, detail);
|
||||
freeme = tmp;
|
||||
break;
|
||||
|
|
|
@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
|
|||
if (msg->rep_type)
|
||||
tipc_tlv_init(msg->rep, msg->rep_type);
|
||||
|
||||
if (cmd->header)
|
||||
(*cmd->header)(msg);
|
||||
if (cmd->header) {
|
||||
err = (*cmd->header)(msg);
|
||||
if (err) {
|
||||
kfree_skb(msg->rep);
|
||||
msg->rep = NULL;
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
arg = nlmsg_new(0, GFP_KERNEL);
|
||||
if (!arg) {
|
||||
|
@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
|
|||
if (!bearer)
|
||||
return -EMSGSIZE;
|
||||
|
||||
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
|
||||
len = TLV_GET_DATA_LEN(msg->req);
|
||||
len -= offsetof(struct tipc_bearer_config, name);
|
||||
if (len <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
len = min_t(int, len, TIPC_MAX_BEARER_NAME);
|
||||
if (!string_is_valid(b->name, len))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
|
|||
|
||||
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
|
||||
|
||||
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
|
||||
len = TLV_GET_DATA_LEN(msg->req);
|
||||
len -= offsetof(struct tipc_link_config, name);
|
||||
if (len <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
len = min_t(int, len, TIPC_MAX_LINK_NAME);
|
||||
if (!string_is_valid(lc->name, len))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -874,7 +874,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
|
|||
goto release_netdev;
|
||||
|
||||
free_sw_resources:
|
||||
up_read(&device_offload_lock);
|
||||
tls_sw_free_resources_rx(sk);
|
||||
down_read(&device_offload_lock);
|
||||
release_ctx:
|
||||
ctx->priv_ctx_rx = NULL;
|
||||
release_netdev:
|
||||
|
@ -909,8 +911,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
|
|||
}
|
||||
out:
|
||||
up_read(&device_offload_lock);
|
||||
kfree(tls_ctx->rx.rec_seq);
|
||||
kfree(tls_ctx->rx.iv);
|
||||
tls_sw_release_resources_rx(sk);
|
||||
}
|
||||
|
||||
|
|
|
@ -193,6 +193,9 @@ static void update_chksum(struct sk_buff *skb, int headln)
|
|||
|
||||
static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
|
||||
{
|
||||
struct sock *sk = skb->sk;
|
||||
int delta;
|
||||
|
||||
skb_copy_header(nskb, skb);
|
||||
|
||||
skb_put(nskb, skb->len);
|
||||
|
@ -200,11 +203,15 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
|
|||
update_chksum(nskb, headln);
|
||||
|
||||
nskb->destructor = skb->destructor;
|
||||
nskb->sk = skb->sk;
|
||||
nskb->sk = sk;
|
||||
skb->destructor = NULL;
|
||||
skb->sk = NULL;
|
||||
refcount_add(nskb->truesize - skb->truesize,
|
||||
&nskb->sk->sk_wmem_alloc);
|
||||
|
||||
delta = nskb->truesize - skb->truesize;
|
||||
if (likely(delta < 0))
|
||||
WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
|
||||
else if (delta)
|
||||
refcount_add(delta, &sk->sk_wmem_alloc);
|
||||
}
|
||||
|
||||
/* This function may be called after the user socket is already
|
||||
|
|
|
@ -290,11 +290,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
|
|||
tls_sw_free_resources_tx(sk);
|
||||
}
|
||||
|
||||
if (ctx->rx_conf == TLS_SW) {
|
||||
kfree(ctx->rx.rec_seq);
|
||||
kfree(ctx->rx.iv);
|
||||
if (ctx->rx_conf == TLS_SW)
|
||||
tls_sw_free_resources_rx(sk);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
if (ctx->rx_conf == TLS_HW)
|
||||
|
|
|
@ -1118,6 +1118,9 @@ void tls_sw_release_resources_rx(struct sock *sk)
|
|||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
|
||||
|
||||
kfree(tls_ctx->rx.rec_seq);
|
||||
kfree(tls_ctx->rx.iv);
|
||||
|
||||
if (ctx->aead_recv) {
|
||||
kfree_skb(ctx->recv_pkt);
|
||||
ctx->recv_pkt = NULL;
|
||||
|
|
|
@ -662,6 +662,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
|
|||
*/
|
||||
static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
|
||||
{
|
||||
const struct virtio_transport *t;
|
||||
struct virtio_vsock_pkt *reply;
|
||||
struct virtio_vsock_pkt_info info = {
|
||||
.op = VIRTIO_VSOCK_OP_RST,
|
||||
.type = le16_to_cpu(pkt->hdr.type),
|
||||
|
@ -672,15 +674,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
|
|||
if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
|
||||
return 0;
|
||||
|
||||
pkt = virtio_transport_alloc_pkt(&info, 0,
|
||||
le64_to_cpu(pkt->hdr.dst_cid),
|
||||
le32_to_cpu(pkt->hdr.dst_port),
|
||||
le64_to_cpu(pkt->hdr.src_cid),
|
||||
le32_to_cpu(pkt->hdr.src_port));
|
||||
if (!pkt)
|
||||
reply = virtio_transport_alloc_pkt(&info, 0,
|
||||
le64_to_cpu(pkt->hdr.dst_cid),
|
||||
le32_to_cpu(pkt->hdr.dst_port),
|
||||
le64_to_cpu(pkt->hdr.src_cid),
|
||||
le32_to_cpu(pkt->hdr.src_port));
|
||||
if (!reply)
|
||||
return -ENOMEM;
|
||||
|
||||
return virtio_transport_get_ops()->send_pkt(pkt);
|
||||
t = virtio_transport_get_ops();
|
||||
if (!t) {
|
||||
virtio_transport_free_pkt(reply);
|
||||
return -ENOTCONN;
|
||||
}
|
||||
|
||||
return t->send_pkt(reply);
|
||||
}
|
||||
|
||||
static void virtio_transport_wait_close(struct sock *sk, long timeout)
|
||||
|
|
|
@ -7394,8 +7394,10 @@ static void ca0132_free(struct hda_codec *codec)
|
|||
ca0132_exit_chip(codec);
|
||||
|
||||
snd_hda_power_down(codec);
|
||||
if (IS_ENABLED(CONFIG_PCI) && spec->mem_base)
|
||||
#ifdef CONFIG_PCI
|
||||
if (spec->mem_base)
|
||||
pci_iounmap(codec->bus->pci, spec->mem_base);
|
||||
#endif
|
||||
kfree(spec->spec_init_verbs);
|
||||
kfree(codec->spec);
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue