This is the 4.19.17 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlxHf8cACgkQONu9yGCS
 aT71Mg/9FnDYja+AD9hj01kFsh6+C4K/QLZY69kLgzmNvr1htsWLRvxSta0dIKc0
 In4rianKMhOHekGub6ufO0Ne1jPV9ZCF61cZ/oENISB5D/oVZJL+baR92zeodSg9
 XFBPRu9eKPQV+UFPliyyKEJtyWEmLHvJMOQkKft0reduZgPy0xonkQ97K48QmF9G
 b/Ly6E8c/qfQThIqn0wfPQ2DUYET9cCE667iw8+Mwzr2HYuLoltyp9ODyMW2fuNT
 vyKve8s+IQ8wCKy1fkwyIJD7CjV0mJMJfUYx1Ax+ewU6MtBDrhEyfcfA9sJfsyRH
 k/BydK4aQJqcejp8ajOVQjZFZtGMnuTM38n3SpJnyNLWz6JvCTQr8dl2A5Y5/iph
 Q1FQH9BHKWCCJO8JVjfMYhCewvdo47mjE1gUfs9HyyW4SjJxhJCn07u2LU1YCRHW
 G9NqRb208UZw7O6prCsdZRlZPJjon1Fln7ym/esKjuMRyNNycV093ysPaqzhKrJq
 2Dxgt+fYBaP63BawAZUC+kQ0iX4OcSja78F4txbVBeksqskNAPHreMbcd5PDid/h
 bN89kPVCIV0eFJa0AMuKHdrbljRH/I6wbKmz3KvyjoRgq8KGc2PvrSe4DTJfax3W
 gOEnESLn7r58oUQ0OmfSv7U4zU700tuH9wOpFZyb5vqVvdXcQzA=
 =NSqX
 -----END PGP SIGNATURE-----

Merge 4.19.17 into android-4.19

Changes in 4.19.17
	tty/ldsem: Wake up readers after timed out down_write()
	tty: Hold tty_ldisc_lock() during tty_reopen()
	tty: Simplify tty->count math in tty_reopen()
	tty: Don't hold ldisc lock in tty_reopen() if ldisc present
	can: gw: ensure DLC boundaries after CAN frame modification
	netfilter: nf_conncount: replace CONNCOUNT_LOCK_SLOTS with CONNCOUNT_SLOTS
	netfilter: nf_conncount: don't skip eviction when age is negative
	netfilter: nf_conncount: split gc in two phases
	netfilter: nf_conncount: restart search when nodes have been erased
	netfilter: nf_conncount: merge lookup and add functions
	netfilter: nf_conncount: move all list iterations under spinlock
	netfilter: nf_conncount: speculative garbage collection on empty lists
	netfilter: nf_conncount: fix argument order to find_next_bit
	mmc: sdhci-msm: Disable CDR function on TX
	Revert "scsi: target: iscsi: cxgbit: fix csk leak"
	scsi: target: iscsi: cxgbit: fix csk leak
	scsi: target: iscsi: cxgbit: fix csk leak
	arm64/kvm: consistently handle host HCR_EL2 flags
	arm64: Don't trap host pointer auth use to EL2
	ipv6: fix kernel-infoleak in ipv6_local_error()
	net: bridge: fix a bug on using a neighbour cache entry without checking its state
	packet: Do not leak dev refcounts on error exit
	tcp: change txhash on SYN-data timeout
	tun: publish tfile after it's fully initialized
	lan743x: Remove phy_read from link status change function
	smc: move unhash as early as possible in smc_release()
	r8169: don't try to read counters if chip is in a PCI power-save state
	bonding: update nest level on unlink
	ip: on queued skb use skb_header_pointer instead of pskb_may_pull
	r8169: load Realtek PHY driver module before r8169
	crypto: sm3 - fix undefined shift by >= width of value
	crypto: caam - fix zero-length buffer DMA mapping
	crypto: authencesn - Avoid twice completion call in decrypt path
	crypto: ccree - convert to use crypto_authenc_extractkeys()
	crypto: bcm - convert to use crypto_authenc_extractkeys()
	crypto: authenc - fix parsing key with misaligned rta_len
	crypto: talitos - reorder code in talitos_edesc_alloc()
	crypto: talitos - fix ablkcipher for CONFIG_VMAP_STACK
	xen: Fix x86 sched_clock() interface for xen
	Revert "btrfs: balance dirty metadata pages in btrfs_finish_ordered_io"
	btrfs: wait on ordered extents on abort cleanup
	Yama: Check for pid death before checking ancestry
	scsi: core: Synchronize request queue PM status only on successful resume
	scsi: sd: Fix cache_type_store()
	mips: fix n32 compat_ipc_parse_version
	MIPS: BCM47XX: Setup struct device for the SoC
	MIPS: lantiq: Fix IPI interrupt handling
	drm/i915/gvt: Fix mmap range check
	OF: properties: add missing of_node_put
	mfd: tps6586x: Handle interrupts on suspend
	media: v4l: ioctl: Validate num_planes for debug messages
	RDMA/nldev: Don't expose unsafe global rkey to regular user
	RDMA/vmw_pvrdma: Return the correct opcode when creating WR
	kbuild: Disable LD_DEAD_CODE_DATA_ELIMINATION with ftrace & GCC <= 4.7
	net: dsa: realtek-smi: fix OF child-node lookup
	pstore/ram: Avoid allocation and leak of platform data
	arm64: kaslr: ensure randomized quantities are clean to the PoC
	arm64: dts: marvell: armada-ap806: reserve PSCI area
	Disable MSI also when pcie-octeon.pcie_disable on
	fix int_sqrt64() for very large numbers
	omap2fb: Fix stack memory disclosure
	media: vivid: fix error handling of kthread_run
	media: vivid: set min width/height to a value > 0
	bpf: in __bpf_redirect_no_mac pull mac only if present
	ipv6: make icmp6_send() robust against null skb->dev
	LSM: Check for NULL cred-security on free
	media: vb2: vb2_mmap: move lock up
	sunrpc: handle ENOMEM in rpcb_getport_async
	netfilter: ebtables: account ebt_table_info to kmemcg
	block: use rcu_work instead of call_rcu to avoid sleep in softirq
	selinux: fix GPF on invalid policy
	blockdev: Fix livelocks on loop device
	sctp: allocate sctp_sockaddr_entry with kzalloc
	tipc: fix uninit-value in in tipc_conn_rcv_sub
	tipc: fix uninit-value in tipc_nl_compat_link_reset_stats
	tipc: fix uninit-value in tipc_nl_compat_bearer_enable
	tipc: fix uninit-value in tipc_nl_compat_link_set
	tipc: fix uninit-value in tipc_nl_compat_name_table_dump
	tipc: fix uninit-value in tipc_nl_compat_doit
	block/loop: Don't grab "struct file" for vfs_getattr() operation.
	block/loop: Use global lock for ioctl() operation.
	loop: Fold __loop_release into loop_release
	loop: Get rid of loop_index_mutex
	loop: Push lo_ctl_mutex down into individual ioctls
	loop: Split setting of lo_state from loop_clr_fd
	loop: Push loop_ctl_mutex down into loop_clr_fd()
	loop: Push loop_ctl_mutex down to loop_get_status()
	loop: Push loop_ctl_mutex down to loop_set_status()
	loop: Push loop_ctl_mutex down to loop_set_fd()
	loop: Push loop_ctl_mutex down to loop_change_fd()
	loop: Move special partition reread handling in loop_clr_fd()
	loop: Move loop_reread_partitions() out of loop_ctl_mutex
	loop: Fix deadlock when calling blkdev_reread_part()
	loop: Avoid circular locking dependency between loop_ctl_mutex and bd_mutex
	loop: Get rid of 'nested' acquisition of loop_ctl_mutex
	loop: Fix double mutex_unlock(&loop_ctl_mutex) in loop_control_ioctl()
	loop: drop caches if offset or block_size are changed
	drm/fb-helper: Ignore the value of fb_var_screeninfo.pixclock
	selftests: Fix test errors related to lib.mk khdr target
	media: vb2: be sure to unlock mutex on errors
	nbd: Use set_blocksize() to set device blocksize
	Linux 4.19.17

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2019-01-23 08:46:58 +01:00
commit 73dc755ee0
86 changed files with 950 additions and 656 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 16 SUBLEVEL = 17
EXTRAVERSION = EXTRAVERSION =
NAME = "People's Front" NAME = "People's Front"

View file

@ -27,6 +27,23 @@
method = "smc"; method = "smc";
}; };
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
ranges;
/*
* This area matches the mapping done with a
* mainline U-Boot, and should be updated by the
* bootloader.
*/
psci-area@4000000 {
reg = <0x0 0x4000000 0x0 0x200000>;
no-map;
};
};
ap806 { ap806 {
#address-cells = <2>; #address-cells = <2>;
#size-cells = <2>; #size-cells = <2>;

View file

@ -24,6 +24,8 @@
/* Hyp Configuration Register (HCR) bits */ /* Hyp Configuration Register (HCR) bits */
#define HCR_FWB (UL(1) << 46) #define HCR_FWB (UL(1) << 46)
#define HCR_API (UL(1) << 41)
#define HCR_APK (UL(1) << 40)
#define HCR_TEA (UL(1) << 37) #define HCR_TEA (UL(1) << 37)
#define HCR_TERR (UL(1) << 36) #define HCR_TERR (UL(1) << 36)
#define HCR_TLOR (UL(1) << 35) #define HCR_TLOR (UL(1) << 35)
@ -87,6 +89,7 @@
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \ HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
HCR_FMO | HCR_IMO) HCR_FMO | HCR_IMO)
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF) #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
/* TCR_EL2 Registers bits */ /* TCR_EL2 Registers bits */

View file

@ -494,10 +494,9 @@ ENTRY(el2_setup)
#endif #endif
/* Hyp configuration. */ /* Hyp configuration. */
mov x0, #HCR_RW // 64-bit EL1 mov_q x0, HCR_HOST_NVHE_FLAGS
cbz x2, set_hcr cbz x2, set_hcr
orr x0, x0, #HCR_TGE // Enable Host Extensions mov_q x0, HCR_HOST_VHE_FLAGS
orr x0, x0, #HCR_E2H
set_hcr: set_hcr:
msr hcr_el2, x0 msr hcr_el2, x0
isb isb

View file

@ -14,6 +14,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/kernel-pgtable.h> #include <asm/kernel-pgtable.h>
#include <asm/memory.h> #include <asm/memory.h>
@ -43,7 +44,7 @@ static __init u64 get_kaslr_seed(void *fdt)
return ret; return ret;
} }
static __init const u8 *get_cmdline(void *fdt) static __init const u8 *kaslr_get_cmdline(void *fdt)
{ {
static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE; static __initconst const u8 default_cmdline[] = CONFIG_CMDLINE;
@ -109,7 +110,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
* Check if 'nokaslr' appears on the command line, and * Check if 'nokaslr' appears on the command line, and
* return 0 if that is the case. * return 0 if that is the case.
*/ */
cmdline = get_cmdline(fdt); cmdline = kaslr_get_cmdline(fdt);
str = strstr(cmdline, "nokaslr"); str = strstr(cmdline, "nokaslr");
if (str == cmdline || (str > cmdline && *(str - 1) == ' ')) if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
return 0; return 0;
@ -169,5 +170,8 @@ u64 __init kaslr_early_init(u64 dt_phys)
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
module_alloc_base &= PAGE_MASK; module_alloc_base &= PAGE_MASK;
__flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
__flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
return offset; return offset;
} }

View file

@ -157,7 +157,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
write_sysreg(mdcr_el2, mdcr_el2); write_sysreg(mdcr_el2, mdcr_el2);
write_sysreg(HCR_RW, hcr_el2); write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
} }

View file

@ -3149,6 +3149,7 @@ config MIPS32_O32
config MIPS32_N32 config MIPS32_N32
bool "Kernel support for n32 binaries" bool "Kernel support for n32 binaries"
depends on 64BIT depends on 64BIT
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select COMPAT select COMPAT
select MIPS32_COMPAT select MIPS32_COMPAT
select SYSVIPC_COMPAT if SYSVIPC select SYSVIPC_COMPAT if SYSVIPC

View file

@ -173,6 +173,31 @@ void __init plat_mem_setup(void)
pm_power_off = bcm47xx_machine_halt; pm_power_off = bcm47xx_machine_halt;
} }
#ifdef CONFIG_BCM47XX_BCMA
static struct device * __init bcm47xx_setup_device(void)
{
struct device *dev;
int err;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return NULL;
err = dev_set_name(dev, "bcm47xx_soc");
if (err) {
pr_err("Failed to set SoC device name: %d\n", err);
kfree(dev);
return NULL;
}
err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err)
pr_err("Failed to set SoC DMA mask: %d\n", err);
return dev;
}
#endif
/* /*
* This finishes bus initialization doing things that were not possible without * This finishes bus initialization doing things that were not possible without
* kmalloc. Make sure to call it late enough (after mm_init). * kmalloc. Make sure to call it late enough (after mm_init).
@ -183,6 +208,10 @@ void __init bcm47xx_bus_setup(void)
if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
int err; int err;
bcm47xx_bus.bcma.dev = bcm47xx_setup_device();
if (!bcm47xx_bus.bcma.dev)
panic("Failed to setup SoC device\n");
err = bcma_host_soc_init(&bcm47xx_bus.bcma); err = bcma_host_soc_init(&bcm47xx_bus.bcma);
if (err) if (err)
panic("Failed to initialize BCMA bus (err %d)", err); panic("Failed to initialize BCMA bus (err %d)", err);
@ -235,6 +264,8 @@ static int __init bcm47xx_register_bus_complete(void)
#endif #endif
#ifdef CONFIG_BCM47XX_BCMA #ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA: case BCM47XX_BUS_TYPE_BCMA:
if (device_register(bcm47xx_bus.bcma.dev))
pr_err("Failed to register SoC device\n");
bcma_bus_register(&bcm47xx_bus.bcma.bus); bcma_bus_register(&bcm47xx_bus.bcma.bus);
break; break;
#endif #endif

View file

@ -224,9 +224,11 @@ static struct irq_chip ltq_eiu_type = {
.irq_set_type = ltq_eiu_settype, .irq_set_type = ltq_eiu_settype,
}; };
static void ltq_hw_irqdispatch(int module) static void ltq_hw_irq_handler(struct irq_desc *desc)
{ {
int module = irq_desc_get_irq(desc) - 2;
u32 irq; u32 irq;
int hwirq;
irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR); irq = ltq_icu_r32(module, LTQ_ICU_IM0_IOSR);
if (irq == 0) if (irq == 0)
@ -237,7 +239,8 @@ static void ltq_hw_irqdispatch(int module)
* other bits might be bogus * other bits might be bogus
*/ */
irq = __fls(irq); irq = __fls(irq);
do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module)); hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
/* if this is a EBU irq, we need to ack it or get a deadlock */ /* if this is a EBU irq, we need to ack it or get a deadlock */
if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT) if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
@ -245,49 +248,6 @@ static void ltq_hw_irqdispatch(int module)
LTQ_EBU_PCC_ISTAT); LTQ_EBU_PCC_ISTAT);
} }
#define DEFINE_HWx_IRQDISPATCH(x) \
static void ltq_hw ## x ## _irqdispatch(void) \
{ \
ltq_hw_irqdispatch(x); \
}
DEFINE_HWx_IRQDISPATCH(0)
DEFINE_HWx_IRQDISPATCH(1)
DEFINE_HWx_IRQDISPATCH(2)
DEFINE_HWx_IRQDISPATCH(3)
DEFINE_HWx_IRQDISPATCH(4)
#if MIPS_CPU_TIMER_IRQ == 7
static void ltq_hw5_irqdispatch(void)
{
do_IRQ(MIPS_CPU_TIMER_IRQ);
}
#else
DEFINE_HWx_IRQDISPATCH(5)
#endif
static void ltq_hw_irq_handler(struct irq_desc *desc)
{
ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
int irq;
if (!pending) {
spurious_interrupt();
return;
}
pending >>= CAUSEB_IP;
while (pending) {
irq = fls(pending) - 1;
do_IRQ(MIPS_CPU_IRQ_BASE + irq);
pending &= ~BIT(irq);
}
}
static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{ {
struct irq_chip *chip = &ltq_irq_type; struct irq_chip *chip = &ltq_irq_type;
@ -343,28 +303,10 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
for (i = 0; i < MAX_IM; i++) for (i = 0; i < MAX_IM; i++)
irq_set_chained_handler(i + 2, ltq_hw_irq_handler); irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
if (cpu_has_vint) {
pr_info("Setting up vectored interrupts\n");
set_vi_handler(2, ltq_hw0_irqdispatch);
set_vi_handler(3, ltq_hw1_irqdispatch);
set_vi_handler(4, ltq_hw2_irqdispatch);
set_vi_handler(5, ltq_hw3_irqdispatch);
set_vi_handler(6, ltq_hw4_irqdispatch);
set_vi_handler(7, ltq_hw5_irqdispatch);
}
ltq_domain = irq_domain_add_linear(node, ltq_domain = irq_domain_add_linear(node,
(MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
&irq_domain_ops, 0); &irq_domain_ops, 0);
#ifndef CONFIG_MIPS_MT_SMP
set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#else
set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#endif
/* tell oprofile which irq to use */ /* tell oprofile which irq to use */
ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);

View file

@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
int irq; int irq;
struct irq_chip *msi; struct irq_chip *msi;
if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) { if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
return 0;
} else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0; msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1; msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2; msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;

View file

@ -361,8 +361,6 @@ void xen_timer_resume(void)
{ {
int cpu; int cpu;
pvclock_resume();
if (xen_clockevent != &xen_vcpuop_clockevent) if (xen_clockevent != &xen_vcpuop_clockevent)
return; return;
@ -379,12 +377,15 @@ static const struct pv_time_ops xen_time_ops __initconst = {
}; };
static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
static u64 xen_clock_value_saved;
void xen_save_time_memory_area(void) void xen_save_time_memory_area(void)
{ {
struct vcpu_register_time_memory_area t; struct vcpu_register_time_memory_area t;
int ret; int ret;
xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset;
if (!xen_clock) if (!xen_clock)
return; return;
@ -404,7 +405,7 @@ void xen_restore_time_memory_area(void)
int ret; int ret;
if (!xen_clock) if (!xen_clock)
return; goto out;
t.addr.v = &xen_clock->pvti; t.addr.v = &xen_clock->pvti;
@ -421,6 +422,11 @@ void xen_restore_time_memory_area(void)
if (ret != 0) if (ret != 0)
pr_notice("Cannot restore secondary vcpu_time_info (err %d)", pr_notice("Cannot restore secondary vcpu_time_info (err %d)",
ret); ret);
out:
/* Need pvclock_resume() before using xen_clocksource_read(). */
pvclock_resume();
xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved;
} }
static void xen_setup_vsyscall_time_info(void) static void xen_setup_vsyscall_time_info(void)

View file

@ -249,9 +249,10 @@ struct device_type part_type = {
.uevent = part_uevent, .uevent = part_uevent,
}; };
static void delete_partition_rcu_cb(struct rcu_head *head) static void delete_partition_work_fn(struct work_struct *work)
{ {
struct hd_struct *part = container_of(head, struct hd_struct, rcu_head); struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct,
rcu_work);
part->start_sect = 0; part->start_sect = 0;
part->nr_sects = 0; part->nr_sects = 0;
@ -262,7 +263,8 @@ static void delete_partition_rcu_cb(struct rcu_head *head)
void __delete_partition(struct percpu_ref *ref) void __delete_partition(struct percpu_ref *ref)
{ {
struct hd_struct *part = container_of(ref, struct hd_struct, ref); struct hd_struct *part = container_of(ref, struct hd_struct, ref);
call_rcu(&part->rcu_head, delete_partition_rcu_cb); INIT_RCU_WORK(&part->rcu_work, delete_partition_work_fn);
queue_rcu_work(system_wq, &part->rcu_work);
} }
/* /*

View file

@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
return -EINVAL; return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL; return -EINVAL;
if (RTA_PAYLOAD(rta) < sizeof(*param))
/*
* RTA_OK() didn't align the rtattr's payload when validating that it
* fits in the buffer. Yet, the keys should start on the next 4-byte
* aligned boundary. To avoid confusion, require that the rtattr
* payload be exactly the param struct, which has a 4-byte aligned size.
*/
if (RTA_PAYLOAD(rta) != sizeof(*param))
return -EINVAL; return -EINVAL;
BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
param = RTA_DATA(rta); param = RTA_DATA(rta);
keys->enckeylen = be32_to_cpu(param->enckeylen); keys->enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len); key += rta->rta_len;
keylen -= RTA_ALIGN(rta->rta_len); keylen -= rta->rta_len;
if (keylen < keys->enckeylen) if (keylen < keys->enckeylen)
return -EINVAL; return -EINVAL;

View file

@ -279,7 +279,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
struct aead_request *req = areq->data; struct aead_request *req = areq->data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0); err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
aead_request_complete(req, err); authenc_esn_request_complete(req, err);
} }
static int crypto_authenc_esn_decrypt(struct aead_request *req) static int crypto_authenc_esn_decrypt(struct aead_request *req)

View file

@ -100,7 +100,7 @@ static void sm3_compress(u32 *w, u32 *wt, u32 *m)
for (i = 0; i <= 63; i++) { for (i = 0; i <= 63; i++) {
ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i)), 7); ss1 = rol32((rol32(a, 12) + e + rol32(t(i), i & 31)), 7);
ss2 = ss1 ^ rol32(a, 12); ss2 = ss1 ^ rol32(a, 12);

View file

@ -83,7 +83,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
static DEFINE_IDR(loop_index_idr); static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_index_mutex); static DEFINE_MUTEX(loop_ctl_mutex);
static int max_part; static int max_part;
static int part_shift; static int part_shift;
@ -631,18 +631,7 @@ static void loop_reread_partitions(struct loop_device *lo,
{ {
int rc; int rc;
/* rc = blkdev_reread_part(bdev);
* bd_mutex has been held already in release path, so don't
* acquire it if this function is called in such case.
*
* If the reread partition isn't from release path, lo_refcnt
* must be at least one and it can only become zero when the
* current holder is released.
*/
if (!atomic_read(&lo->lo_refcnt))
rc = __blkdev_reread_part(bdev);
else
rc = blkdev_reread_part(bdev);
if (rc) if (rc)
pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n", pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
__func__, lo->lo_number, lo->lo_file_name, rc); __func__, lo->lo_number, lo->lo_file_name, rc);
@ -689,26 +678,30 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
unsigned int arg) unsigned int arg)
{ {
struct file *file, *old_file; struct file *file = NULL, *old_file;
int error; int error;
bool partscan;
error = mutex_lock_killable(&loop_ctl_mutex);
if (error)
return error;
error = -ENXIO; error = -ENXIO;
if (lo->lo_state != Lo_bound) if (lo->lo_state != Lo_bound)
goto out; goto out_err;
/* the loop device has to be read-only */ /* the loop device has to be read-only */
error = -EINVAL; error = -EINVAL;
if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
goto out; goto out_err;
error = -EBADF; error = -EBADF;
file = fget(arg); file = fget(arg);
if (!file) if (!file)
goto out; goto out_err;
error = loop_validate_file(file, bdev); error = loop_validate_file(file, bdev);
if (error) if (error)
goto out_putf; goto out_err;
old_file = lo->lo_backing_file; old_file = lo->lo_backing_file;
@ -716,7 +709,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
/* size of the new backing store needs to be the same */ /* size of the new backing store needs to be the same */
if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
goto out_putf; goto out_err;
/* and ... switch */ /* and ... switch */
blk_mq_freeze_queue(lo->lo_queue); blk_mq_freeze_queue(lo->lo_queue);
@ -727,15 +720,22 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
loop_update_dio(lo); loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue);
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
mutex_unlock(&loop_ctl_mutex);
/*
* We must drop file reference outside of loop_ctl_mutex as dropping
* the file ref can take bd_mutex which creates circular locking
* dependency.
*/
fput(old_file); fput(old_file);
if (lo->lo_flags & LO_FLAGS_PARTSCAN) if (partscan)
loop_reread_partitions(lo, bdev); loop_reread_partitions(lo, bdev);
return 0; return 0;
out_putf: out_err:
fput(file); mutex_unlock(&loop_ctl_mutex);
out: if (file)
fput(file);
return error; return error;
} }
@ -910,6 +910,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
int lo_flags = 0; int lo_flags = 0;
int error; int error;
loff_t size; loff_t size;
bool partscan;
/* This is safe, since we have a reference from open(). */ /* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE); __module_get(THIS_MODULE);
@ -919,13 +920,17 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (!file) if (!file)
goto out; goto out;
error = mutex_lock_killable(&loop_ctl_mutex);
if (error)
goto out_putf;
error = -EBUSY; error = -EBUSY;
if (lo->lo_state != Lo_unbound) if (lo->lo_state != Lo_unbound)
goto out_putf; goto out_unlock;
error = loop_validate_file(file, bdev); error = loop_validate_file(file, bdev);
if (error) if (error)
goto out_putf; goto out_unlock;
mapping = file->f_mapping; mapping = file->f_mapping;
inode = mapping->host; inode = mapping->host;
@ -937,10 +942,10 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
error = -EFBIG; error = -EFBIG;
size = get_loop_size(lo, file); size = get_loop_size(lo, file);
if ((loff_t)(sector_t)size != size) if ((loff_t)(sector_t)size != size)
goto out_putf; goto out_unlock;
error = loop_prepare_queue(lo); error = loop_prepare_queue(lo);
if (error) if (error)
goto out_putf; goto out_unlock;
error = 0; error = 0;
@ -972,18 +977,22 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->lo_state = Lo_bound; lo->lo_state = Lo_bound;
if (part_shift) if (part_shift)
lo->lo_flags |= LO_FLAGS_PARTSCAN; lo->lo_flags |= LO_FLAGS_PARTSCAN;
if (lo->lo_flags & LO_FLAGS_PARTSCAN) partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
loop_reread_partitions(lo, bdev);
/* Grab the block_device to prevent its destruction after we /* Grab the block_device to prevent its destruction after we
* put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
*/ */
bdgrab(bdev); bdgrab(bdev);
mutex_unlock(&loop_ctl_mutex);
if (partscan)
loop_reread_partitions(lo, bdev);
return 0; return 0;
out_putf: out_unlock:
mutex_unlock(&loop_ctl_mutex);
out_putf:
fput(file); fput(file);
out: out:
/* This is safe: open() is still holding a reference. */ /* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE); module_put(THIS_MODULE);
return error; return error;
@ -1026,39 +1035,31 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
return err; return err;
} }
static int loop_clr_fd(struct loop_device *lo) static int __loop_clr_fd(struct loop_device *lo, bool release)
{ {
struct file *filp = lo->lo_backing_file; struct file *filp = NULL;
gfp_t gfp = lo->old_gfp_mask; gfp_t gfp = lo->old_gfp_mask;
struct block_device *bdev = lo->lo_device; struct block_device *bdev = lo->lo_device;
int err = 0;
bool partscan = false;
int lo_number;
if (lo->lo_state != Lo_bound) mutex_lock(&loop_ctl_mutex);
return -ENXIO; if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
err = -ENXIO;
/* goto out_unlock;
* If we've explicitly asked to tear down the loop device,
* and it has an elevated reference count, set it for auto-teardown when
* the last reference goes away. This stops $!~#$@ udev from
* preventing teardown because it decided that it needs to run blkid on
* the loopback device whenever they appear. xfstests is notorious for
* failing tests because blkid via udev races with a losetup
* <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
* command to fail with EBUSY.
*/
if (atomic_read(&lo->lo_refcnt) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
mutex_unlock(&lo->lo_ctl_mutex);
return 0;
} }
if (filp == NULL) filp = lo->lo_backing_file;
return -EINVAL; if (filp == NULL) {
err = -EINVAL;
goto out_unlock;
}
/* freeze request queue during the transition */ /* freeze request queue during the transition */
blk_mq_freeze_queue(lo->lo_queue); blk_mq_freeze_queue(lo->lo_queue);
spin_lock_irq(&lo->lo_lock); spin_lock_irq(&lo->lo_lock);
lo->lo_state = Lo_rundown;
lo->lo_backing_file = NULL; lo->lo_backing_file = NULL;
spin_unlock_irq(&lo->lo_lock); spin_unlock_irq(&lo->lo_lock);
@ -1094,21 +1095,73 @@ static int loop_clr_fd(struct loop_device *lo)
module_put(THIS_MODULE); module_put(THIS_MODULE);
blk_mq_unfreeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue);
if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev) partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
loop_reread_partitions(lo, bdev); lo_number = lo->lo_number;
lo->lo_flags = 0; lo->lo_flags = 0;
if (!part_shift) if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
loop_unprepare_queue(lo); loop_unprepare_queue(lo);
mutex_unlock(&lo->lo_ctl_mutex); out_unlock:
mutex_unlock(&loop_ctl_mutex);
if (partscan) {
/*
* bd_mutex has been held already in release path, so don't
* acquire it if this function is called in such case.
*
* If the reread partition isn't from release path, lo_refcnt
* must be at least one and it can only become zero when the
* current holder is released.
*/
if (release)
err = __blkdev_reread_part(bdev);
else
err = blkdev_reread_part(bdev);
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo_number, err);
/* Device is gone, no point in returning error */
err = 0;
}
/* /*
* Need not hold lo_ctl_mutex to fput backing file. * Need not hold loop_ctl_mutex to fput backing file.
* Calling fput holding lo_ctl_mutex triggers a circular * Calling fput holding loop_ctl_mutex triggers a circular
* lock dependency possibility warning as fput can take * lock dependency possibility warning as fput can take
* bd_mutex which is usually taken before lo_ctl_mutex. * bd_mutex which is usually taken before loop_ctl_mutex.
*/ */
fput(filp); if (filp)
return 0; fput(filp);
return err;
}
static int loop_clr_fd(struct loop_device *lo)
{
int err;
err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
return err;
if (lo->lo_state != Lo_bound) {
mutex_unlock(&loop_ctl_mutex);
return -ENXIO;
}
/*
* If we've explicitly asked to tear down the loop device,
* and it has an elevated reference count, set it for auto-teardown when
* the last reference goes away. This stops $!~#$@ udev from
* preventing teardown because it decided that it needs to run blkid on
* the loopback device whenever they appear. xfstests is notorious for
* failing tests because blkid via udev races with a losetup
* <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
* command to fail with EBUSY.
*/
if (atomic_read(&lo->lo_refcnt) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
mutex_unlock(&loop_ctl_mutex);
return 0;
}
lo->lo_state = Lo_rundown;
mutex_unlock(&loop_ctl_mutex);
return __loop_clr_fd(lo, false);
} }
static int static int
@ -1117,15 +1170,26 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
int err; int err;
struct loop_func_table *xfer; struct loop_func_table *xfer;
kuid_t uid = current_uid(); kuid_t uid = current_uid();
struct block_device *bdev;
bool partscan = false;
err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
return err;
if (lo->lo_encrypt_key_size && if (lo->lo_encrypt_key_size &&
!uid_eq(lo->lo_key_owner, uid) && !uid_eq(lo->lo_key_owner, uid) &&
!capable(CAP_SYS_ADMIN)) !capable(CAP_SYS_ADMIN)) {
return -EPERM; err = -EPERM;
if (lo->lo_state != Lo_bound) goto out_unlock;
return -ENXIO; }
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) if (lo->lo_state != Lo_bound) {
return -EINVAL; err = -ENXIO;
goto out_unlock;
}
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) {
err = -EINVAL;
goto out_unlock;
}
if (lo->lo_offset != info->lo_offset || if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) { lo->lo_sizelimit != info->lo_sizelimit) {
@ -1138,26 +1202,26 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
err = loop_release_xfer(lo); err = loop_release_xfer(lo);
if (err) if (err)
goto exit; goto out_unfreeze;
if (info->lo_encrypt_type) { if (info->lo_encrypt_type) {
unsigned int type = info->lo_encrypt_type; unsigned int type = info->lo_encrypt_type;
if (type >= MAX_LO_CRYPT) { if (type >= MAX_LO_CRYPT) {
err = -EINVAL; err = -EINVAL;
goto exit; goto out_unfreeze;
} }
xfer = xfer_funcs[type]; xfer = xfer_funcs[type];
if (xfer == NULL) { if (xfer == NULL) {
err = -EINVAL; err = -EINVAL;
goto exit; goto out_unfreeze;
} }
} else } else
xfer = NULL; xfer = NULL;
err = loop_init_xfer(lo, xfer, info); err = loop_init_xfer(lo, xfer, info);
if (err) if (err)
goto exit; goto out_unfreeze;
if (lo->lo_offset != info->lo_offset || if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) { lo->lo_sizelimit != info->lo_sizelimit) {
@ -1167,11 +1231,11 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n", pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name, __func__, lo->lo_number, lo->lo_file_name,
lo->lo_device->bd_inode->i_mapping->nrpages); lo->lo_device->bd_inode->i_mapping->nrpages);
goto exit; goto out_unfreeze;
} }
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) { if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
err = -EFBIG; err = -EFBIG;
goto exit; goto out_unfreeze;
} }
} }
@ -1203,15 +1267,20 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
/* update dio if lo_offset or transfer is changed */ /* update dio if lo_offset or transfer is changed */
__loop_update_dio(lo, lo->use_dio); __loop_update_dio(lo, lo->use_dio);
exit: out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue);
if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) && if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
!(lo->lo_flags & LO_FLAGS_PARTSCAN)) { !(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
lo->lo_flags |= LO_FLAGS_PARTSCAN; lo->lo_flags |= LO_FLAGS_PARTSCAN;
lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN; lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
loop_reread_partitions(lo, lo->lo_device); bdev = lo->lo_device;
partscan = true;
} }
out_unlock:
mutex_unlock(&loop_ctl_mutex);
if (partscan)
loop_reread_partitions(lo, bdev);
return err; return err;
} }
@ -1219,12 +1288,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
static int static int
loop_get_status(struct loop_device *lo, struct loop_info64 *info) loop_get_status(struct loop_device *lo, struct loop_info64 *info)
{ {
struct file *file; struct path path;
struct kstat stat; struct kstat stat;
int ret; int ret;
ret = mutex_lock_killable(&loop_ctl_mutex);
if (ret)
return ret;
if (lo->lo_state != Lo_bound) { if (lo->lo_state != Lo_bound) {
mutex_unlock(&lo->lo_ctl_mutex); mutex_unlock(&loop_ctl_mutex);
return -ENXIO; return -ENXIO;
} }
@ -1243,17 +1315,17 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
lo->lo_encrypt_key_size); lo->lo_encrypt_key_size);
} }
/* Drop lo_ctl_mutex while we call into the filesystem. */ /* Drop loop_ctl_mutex while we call into the filesystem. */
file = get_file(lo->lo_backing_file); path = lo->lo_backing_file->f_path;
mutex_unlock(&lo->lo_ctl_mutex); path_get(&path);
ret = vfs_getattr(&file->f_path, &stat, STATX_INO, mutex_unlock(&loop_ctl_mutex);
AT_STATX_SYNC_AS_STAT); ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
if (!ret) { if (!ret) {
info->lo_device = huge_encode_dev(stat.dev); info->lo_device = huge_encode_dev(stat.dev);
info->lo_inode = stat.ino; info->lo_inode = stat.ino;
info->lo_rdevice = huge_encode_dev(stat.rdev); info->lo_rdevice = huge_encode_dev(stat.rdev);
} }
fput(file); path_put(&path);
return ret; return ret;
} }
@ -1337,10 +1409,8 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
struct loop_info64 info64; struct loop_info64 info64;
int err; int err;
if (!arg) { if (!arg)
mutex_unlock(&lo->lo_ctl_mutex);
return -EINVAL; return -EINVAL;
}
err = loop_get_status(lo, &info64); err = loop_get_status(lo, &info64);
if (!err) if (!err)
err = loop_info64_to_old(&info64, &info); err = loop_info64_to_old(&info64, &info);
@ -1355,10 +1425,8 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
struct loop_info64 info64; struct loop_info64 info64;
int err; int err;
if (!arg) { if (!arg)
mutex_unlock(&lo->lo_ctl_mutex);
return -EINVAL; return -EINVAL;
}
err = loop_get_status(lo, &info64); err = loop_get_status(lo, &info64);
if (!err && copy_to_user(arg, &info64, sizeof(info64))) if (!err && copy_to_user(arg, &info64, sizeof(info64)))
err = -EFAULT; err = -EFAULT;
@ -1425,70 +1493,73 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
return err; return err;
} }
static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
unsigned long arg)
{
int err;
err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
return err;
switch (cmd) {
case LOOP_SET_CAPACITY:
err = loop_set_capacity(lo);
break;
case LOOP_SET_DIRECT_IO:
err = loop_set_dio(lo, arg);
break;
case LOOP_SET_BLOCK_SIZE:
err = loop_set_block_size(lo, arg);
break;
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
mutex_unlock(&loop_ctl_mutex);
return err;
}
static int lo_ioctl(struct block_device *bdev, fmode_t mode, static int lo_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
struct loop_device *lo = bdev->bd_disk->private_data; struct loop_device *lo = bdev->bd_disk->private_data;
int err; int err;
err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1);
if (err)
goto out_unlocked;
switch (cmd) { switch (cmd) {
case LOOP_SET_FD: case LOOP_SET_FD:
err = loop_set_fd(lo, mode, bdev, arg); return loop_set_fd(lo, mode, bdev, arg);
break;
case LOOP_CHANGE_FD: case LOOP_CHANGE_FD:
err = loop_change_fd(lo, bdev, arg); return loop_change_fd(lo, bdev, arg);
break;
case LOOP_CLR_FD: case LOOP_CLR_FD:
/* loop_clr_fd would have unlocked lo_ctl_mutex on success */ return loop_clr_fd(lo);
err = loop_clr_fd(lo);
if (!err)
goto out_unlocked;
break;
case LOOP_SET_STATUS: case LOOP_SET_STATUS:
err = -EPERM; err = -EPERM;
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
err = loop_set_status_old(lo, err = loop_set_status_old(lo,
(struct loop_info __user *)arg); (struct loop_info __user *)arg);
}
break; break;
case LOOP_GET_STATUS: case LOOP_GET_STATUS:
err = loop_get_status_old(lo, (struct loop_info __user *) arg); return loop_get_status_old(lo, (struct loop_info __user *) arg);
/* loop_get_status() unlocks lo_ctl_mutex */
goto out_unlocked;
case LOOP_SET_STATUS64: case LOOP_SET_STATUS64:
err = -EPERM; err = -EPERM;
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
err = loop_set_status64(lo, err = loop_set_status64(lo,
(struct loop_info64 __user *) arg); (struct loop_info64 __user *) arg);
}
break; break;
case LOOP_GET_STATUS64: case LOOP_GET_STATUS64:
err = loop_get_status64(lo, (struct loop_info64 __user *) arg); return loop_get_status64(lo, (struct loop_info64 __user *) arg);
/* loop_get_status() unlocks lo_ctl_mutex */
goto out_unlocked;
case LOOP_SET_CAPACITY: case LOOP_SET_CAPACITY:
err = -EPERM;
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_capacity(lo);
break;
case LOOP_SET_DIRECT_IO: case LOOP_SET_DIRECT_IO:
err = -EPERM;
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_dio(lo, arg);
break;
case LOOP_SET_BLOCK_SIZE: case LOOP_SET_BLOCK_SIZE:
err = -EPERM; if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) return -EPERM;
err = loop_set_block_size(lo, arg); /* Fall through */
break;
default: default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; err = lo_simple_ioctl(lo, cmd, arg);
break;
} }
mutex_unlock(&lo->lo_ctl_mutex);
out_unlocked:
return err; return err;
} }
@ -1602,10 +1673,8 @@ loop_get_status_compat(struct loop_device *lo,
struct loop_info64 info64; struct loop_info64 info64;
int err; int err;
if (!arg) { if (!arg)
mutex_unlock(&lo->lo_ctl_mutex);
return -EINVAL; return -EINVAL;
}
err = loop_get_status(lo, &info64); err = loop_get_status(lo, &info64);
if (!err) if (!err)
err = loop_info64_to_compat(&info64, arg); err = loop_info64_to_compat(&info64, arg);
@ -1620,20 +1689,12 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
switch(cmd) { switch(cmd) {
case LOOP_SET_STATUS: case LOOP_SET_STATUS:
err = mutex_lock_killable(&lo->lo_ctl_mutex); err = loop_set_status_compat(lo,
if (!err) { (const struct compat_loop_info __user *)arg);
err = loop_set_status_compat(lo,
(const struct compat_loop_info __user *)arg);
mutex_unlock(&lo->lo_ctl_mutex);
}
break; break;
case LOOP_GET_STATUS: case LOOP_GET_STATUS:
err = mutex_lock_killable(&lo->lo_ctl_mutex); err = loop_get_status_compat(lo,
if (!err) { (struct compat_loop_info __user *)arg);
err = loop_get_status_compat(lo,
(struct compat_loop_info __user *)arg);
/* loop_get_status() unlocks lo_ctl_mutex */
}
break; break;
case LOOP_SET_CAPACITY: case LOOP_SET_CAPACITY:
case LOOP_CLR_FD: case LOOP_CLR_FD:
@ -1657,9 +1718,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
static int lo_open(struct block_device *bdev, fmode_t mode) static int lo_open(struct block_device *bdev, fmode_t mode)
{ {
struct loop_device *lo; struct loop_device *lo;
int err = 0; int err;
mutex_lock(&loop_index_mutex); err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
return err;
lo = bdev->bd_disk->private_data; lo = bdev->bd_disk->private_data;
if (!lo) { if (!lo) {
err = -ENXIO; err = -ENXIO;
@ -1668,26 +1731,30 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&lo->lo_refcnt); atomic_inc(&lo->lo_refcnt);
out: out:
mutex_unlock(&loop_index_mutex); mutex_unlock(&loop_ctl_mutex);
return err; return err;
} }
static void __lo_release(struct loop_device *lo) static void lo_release(struct gendisk *disk, fmode_t mode)
{ {
int err; struct loop_device *lo;
mutex_lock(&loop_ctl_mutex);
lo = disk->private_data;
if (atomic_dec_return(&lo->lo_refcnt)) if (atomic_dec_return(&lo->lo_refcnt))
return; goto out_unlock;
mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
if (lo->lo_state != Lo_bound)
goto out_unlock;
lo->lo_state = Lo_rundown;
mutex_unlock(&loop_ctl_mutex);
/* /*
* In autoclear mode, stop the loop thread * In autoclear mode, stop the loop thread
* and remove configuration after last close. * and remove configuration after last close.
*/ */
err = loop_clr_fd(lo); __loop_clr_fd(lo, true);
if (!err) return;
return;
} else if (lo->lo_state == Lo_bound) { } else if (lo->lo_state == Lo_bound) {
/* /*
* Otherwise keep thread (if running) and config, * Otherwise keep thread (if running) and config,
@ -1697,14 +1764,8 @@ static void __lo_release(struct loop_device *lo)
blk_mq_unfreeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue);
} }
mutex_unlock(&lo->lo_ctl_mutex); out_unlock:
} mutex_unlock(&loop_ctl_mutex);
static void lo_release(struct gendisk *disk, fmode_t mode)
{
mutex_lock(&loop_index_mutex);
__lo_release(disk->private_data);
mutex_unlock(&loop_index_mutex);
} }
static const struct block_device_operations lo_fops = { static const struct block_device_operations lo_fops = {
@ -1743,10 +1804,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
struct loop_device *lo = ptr; struct loop_device *lo = ptr;
struct loop_func_table *xfer = data; struct loop_func_table *xfer = data;
mutex_lock(&lo->lo_ctl_mutex); mutex_lock(&loop_ctl_mutex);
if (lo->lo_encryption == xfer) if (lo->lo_encryption == xfer)
loop_release_xfer(lo); loop_release_xfer(lo);
mutex_unlock(&lo->lo_ctl_mutex); mutex_unlock(&loop_ctl_mutex);
return 0; return 0;
} }
@ -1927,7 +1988,6 @@ static int loop_add(struct loop_device **l, int i)
if (!part_shift) if (!part_shift)
disk->flags |= GENHD_FL_NO_PART_SCAN; disk->flags |= GENHD_FL_NO_PART_SCAN;
disk->flags |= GENHD_FL_EXT_DEVT; disk->flags |= GENHD_FL_EXT_DEVT;
mutex_init(&lo->lo_ctl_mutex);
atomic_set(&lo->lo_refcnt, 0); atomic_set(&lo->lo_refcnt, 0);
lo->lo_number = i; lo->lo_number = i;
spin_lock_init(&lo->lo_lock); spin_lock_init(&lo->lo_lock);
@ -2006,7 +2066,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
struct kobject *kobj; struct kobject *kobj;
int err; int err;
mutex_lock(&loop_index_mutex); mutex_lock(&loop_ctl_mutex);
err = loop_lookup(&lo, MINOR(dev) >> part_shift); err = loop_lookup(&lo, MINOR(dev) >> part_shift);
if (err < 0) if (err < 0)
err = loop_add(&lo, MINOR(dev) >> part_shift); err = loop_add(&lo, MINOR(dev) >> part_shift);
@ -2014,7 +2074,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
kobj = NULL; kobj = NULL;
else else
kobj = get_disk_and_module(lo->lo_disk); kobj = get_disk_and_module(lo->lo_disk);
mutex_unlock(&loop_index_mutex); mutex_unlock(&loop_ctl_mutex);
*part = 0; *part = 0;
return kobj; return kobj;
@ -2024,9 +2084,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
unsigned long parm) unsigned long parm)
{ {
struct loop_device *lo; struct loop_device *lo;
int ret = -ENOSYS; int ret;
mutex_lock(&loop_index_mutex); ret = mutex_lock_killable(&loop_ctl_mutex);
if (ret)
return ret;
ret = -ENOSYS;
switch (cmd) { switch (cmd) {
case LOOP_CTL_ADD: case LOOP_CTL_ADD:
ret = loop_lookup(&lo, parm); ret = loop_lookup(&lo, parm);
@ -2040,21 +2104,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
ret = loop_lookup(&lo, parm); ret = loop_lookup(&lo, parm);
if (ret < 0) if (ret < 0)
break; break;
ret = mutex_lock_killable(&lo->lo_ctl_mutex);
if (ret)
break;
if (lo->lo_state != Lo_unbound) { if (lo->lo_state != Lo_unbound) {
ret = -EBUSY; ret = -EBUSY;
mutex_unlock(&lo->lo_ctl_mutex);
break; break;
} }
if (atomic_read(&lo->lo_refcnt) > 0) { if (atomic_read(&lo->lo_refcnt) > 0) {
ret = -EBUSY; ret = -EBUSY;
mutex_unlock(&lo->lo_ctl_mutex);
break; break;
} }
lo->lo_disk->private_data = NULL; lo->lo_disk->private_data = NULL;
mutex_unlock(&lo->lo_ctl_mutex);
idr_remove(&loop_index_idr, lo->lo_number); idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo); loop_remove(lo);
break; break;
@ -2064,7 +2122,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
break; break;
ret = loop_add(&lo, -1); ret = loop_add(&lo, -1);
} }
mutex_unlock(&loop_index_mutex); mutex_unlock(&loop_ctl_mutex);
return ret; return ret;
} }
@ -2148,10 +2206,10 @@ static int __init loop_init(void)
THIS_MODULE, loop_probe, NULL, NULL); THIS_MODULE, loop_probe, NULL, NULL);
/* pre-create number of devices given by config or max_loop */ /* pre-create number of devices given by config or max_loop */
mutex_lock(&loop_index_mutex); mutex_lock(&loop_ctl_mutex);
for (i = 0; i < nr; i++) for (i = 0; i < nr; i++)
loop_add(&lo, i); loop_add(&lo, i);
mutex_unlock(&loop_index_mutex); mutex_unlock(&loop_ctl_mutex);
printk(KERN_INFO "loop: module loaded\n"); printk(KERN_INFO "loop: module loaded\n");
return 0; return 0;

View file

@ -54,7 +54,6 @@ struct loop_device {
spinlock_t lo_lock; spinlock_t lo_lock;
int lo_state; int lo_state;
struct mutex lo_ctl_mutex;
struct kthread_worker worker; struct kthread_worker worker;
struct task_struct *worker_task; struct task_struct *worker_task;
bool use_dio; bool use_dio;

View file

@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd)
blk_queue_physical_block_size(nbd->disk->queue, config->blksize); blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
set_capacity(nbd->disk, config->bytesize >> 9); set_capacity(nbd->disk, config->bytesize >> 9);
if (bdev) { if (bdev) {
if (bdev->bd_disk) if (bdev->bd_disk) {
bd_set_size(bdev, config->bytesize); bd_set_size(bdev, config->bytesize);
else set_blocksize(bdev, config->blksize);
} else
bdev->bd_invalidated = 1; bdev->bd_invalidated = 1;
bdput(bdev); bdput(bdev);
} }

View file

@ -681,6 +681,7 @@ config CRYPTO_DEV_BCM_SPU
depends on ARCH_BCM_IPROC depends on ARCH_BCM_IPROC
depends on MAILBOX depends on MAILBOX
default m default m
select CRYPTO_AUTHENC
select CRYPTO_DES select CRYPTO_DES
select CRYPTO_MD5 select CRYPTO_MD5
select CRYPTO_SHA1 select CRYPTO_SHA1

View file

@ -2845,44 +2845,28 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
struct spu_hw *spu = &iproc_priv.spu; struct spu_hw *spu = &iproc_priv.spu;
struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher);
struct crypto_tfm *tfm = crypto_aead_tfm(cipher); struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
struct rtattr *rta = (void *)key; struct crypto_authenc_keys keys;
struct crypto_authenc_key_param *param; int ret;
const u8 *origkey = key;
const unsigned int origkeylen = keylen;
int ret = 0;
flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key,
keylen); keylen);
flow_dump(" key: ", key, keylen); flow_dump(" key: ", key, keylen);
if (!RTA_OK(rta, keylen)) ret = crypto_authenc_extractkeys(&keys, key, keylen);
goto badkey; if (ret)
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey; goto badkey;
param = RTA_DATA(rta); if (keys.enckeylen > MAX_KEY_SIZE ||
ctx->enckeylen = be32_to_cpu(param->enckeylen); keys.authkeylen > MAX_KEY_SIZE)
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < ctx->enckeylen)
goto badkey;
if (ctx->enckeylen > MAX_KEY_SIZE)
goto badkey; goto badkey;
ctx->authkeylen = keylen - ctx->enckeylen; ctx->enckeylen = keys.enckeylen;
ctx->authkeylen = keys.authkeylen;
if (ctx->authkeylen > MAX_KEY_SIZE) memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
goto badkey;
memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen);
/* May end up padding auth key. So make sure it's zeroed. */ /* May end up padding auth key. So make sure it's zeroed. */
memset(ctx->authkey, 0, sizeof(ctx->authkey)); memset(ctx->authkey, 0, sizeof(ctx->authkey));
memcpy(ctx->authkey, key, ctx->authkeylen); memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
switch (ctx->alg->cipher_info.alg) { switch (ctx->alg->cipher_info.alg) {
case CIPHER_ALG_DES: case CIPHER_ALG_DES:
@ -2890,7 +2874,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
u32 tmp[DES_EXPKEY_WORDS]; u32 tmp[DES_EXPKEY_WORDS];
u32 flags = CRYPTO_TFM_RES_WEAK_KEY; u32 flags = CRYPTO_TFM_RES_WEAK_KEY;
if (des_ekey(tmp, key) == 0) { if (des_ekey(tmp, keys.enckey) == 0) {
if (crypto_aead_get_flags(cipher) & if (crypto_aead_get_flags(cipher) &
CRYPTO_TFM_REQ_WEAK_KEY) { CRYPTO_TFM_REQ_WEAK_KEY) {
crypto_aead_set_flags(cipher, flags); crypto_aead_set_flags(cipher, flags);
@ -2905,7 +2889,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
break; break;
case CIPHER_ALG_3DES: case CIPHER_ALG_3DES:
if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { if (ctx->enckeylen == (DES_KEY_SIZE * 3)) {
const u32 *K = (const u32 *)key; const u32 *K = (const u32 *)keys.enckey;
u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED;
if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
@ -2956,9 +2940,7 @@ static int aead_authenc_setkey(struct crypto_aead *cipher,
ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
ctx->fallback_cipher->base.crt_flags |= ctx->fallback_cipher->base.crt_flags |=
tfm->crt_flags & CRYPTO_TFM_REQ_MASK; tfm->crt_flags & CRYPTO_TFM_REQ_MASK;
ret = ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen);
crypto_aead_setkey(ctx->fallback_cipher, origkey,
origkeylen);
if (ret) { if (ret) {
flow_log(" fallback setkey() returned:%d\n", ret); flow_log(" fallback setkey() returned:%d\n", ret);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;

View file

@ -1131,13 +1131,16 @@ static int ahash_final_no_ctx(struct ahash_request *req)
desc = edesc->hw_desc; desc = edesc->hw_desc;
state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); if (buflen) {
if (dma_mapping_error(jrdev, state->buf_dma)) { state->buf_dma = dma_map_single(jrdev, buf, buflen,
dev_err(jrdev, "unable to map src\n"); DMA_TO_DEVICE);
goto unmap; if (dma_mapping_error(jrdev, state->buf_dma)) {
} dev_err(jrdev, "unable to map src\n");
goto unmap;
}
append_seq_in_ptr(desc, state->buf_dma, buflen, 0); append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
}
edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
digestsize); digestsize);

View file

@ -540,13 +540,12 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen) unsigned int keylen)
{ {
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct rtattr *rta = (struct rtattr *)key;
struct cc_crypto_req cc_req = {}; struct cc_crypto_req cc_req = {};
struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
int rc = -EINVAL;
unsigned int seq_len = 0; unsigned int seq_len = 0;
struct device *dev = drvdata_to_dev(ctx->drvdata); struct device *dev = drvdata_to_dev(ctx->drvdata);
const u8 *enckey, *authkey;
int rc;
dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
@ -554,35 +553,33 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_0: Init and sanity checks */ /* STAT_PHASE_0: Init and sanity checks */
if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
if (!RTA_OK(rta, keylen)) struct crypto_authenc_keys keys;
rc = crypto_authenc_extractkeys(&keys, key, keylen);
if (rc)
goto badkey; goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) enckey = keys.enckey;
goto badkey; authkey = keys.authkey;
if (RTA_PAYLOAD(rta) < sizeof(*param)) ctx->enc_keylen = keys.enckeylen;
goto badkey; ctx->auth_keylen = keys.authkeylen;
param = RTA_DATA(rta);
ctx->enc_keylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < ctx->enc_keylen)
goto badkey;
ctx->auth_keylen = keylen - ctx->enc_keylen;
if (ctx->cipher_mode == DRV_CIPHER_CTR) { if (ctx->cipher_mode == DRV_CIPHER_CTR) {
/* the nonce is stored in bytes at end of key */ /* the nonce is stored in bytes at end of key */
rc = -EINVAL;
if (ctx->enc_keylen < if (ctx->enc_keylen <
(AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
goto badkey; goto badkey;
/* Copy nonce from last 4 bytes in CTR key to /* Copy nonce from last 4 bytes in CTR key to
* first 4 bytes in CTR IV * first 4 bytes in CTR IV
*/ */
memcpy(ctx->ctr_nonce, key + ctx->auth_keylen + memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
CTR_RFC3686_NONCE_SIZE);
/* Set CTR key size */ /* Set CTR key size */
ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
} }
} else { /* non-authenc - has just one key */ } else { /* non-authenc - has just one key */
enckey = key;
authkey = NULL;
ctx->enc_keylen = keylen; ctx->enc_keylen = keylen;
ctx->auth_keylen = 0; ctx->auth_keylen = 0;
} }
@ -594,13 +591,14 @@ static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
/* STAT_PHASE_1: Copy key to ctx */ /* STAT_PHASE_1: Copy key to ctx */
/* Get key material */ /* Get key material */
memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen); memcpy(ctx->enckey, enckey, ctx->enc_keylen);
if (ctx->enc_keylen == 24) if (ctx->enc_keylen == 24)
memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen); memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
ctx->auth_keylen);
} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen); rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
if (rc) if (rc)
goto badkey; goto badkey;
} }

View file

@ -1361,23 +1361,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
struct talitos_private *priv = dev_get_drvdata(dev); struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv); bool is_sec1 = has_ftr_sec1(priv);
int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
void *err;
if (cryptlen + authsize > max_len) { if (cryptlen + authsize > max_len) {
dev_err(dev, "length exceeds h/w max limit\n"); dev_err(dev, "length exceeds h/w max limit\n");
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (ivsize)
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
if (!dst || dst == src) { if (!dst || dst == src) {
src_len = assoclen + cryptlen + authsize; src_len = assoclen + cryptlen + authsize;
src_nents = sg_nents_for_len(src, src_len); src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) { if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n"); dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
goto error_sg;
} }
src_nents = (src_nents == 1) ? 0 : src_nents; src_nents = (src_nents == 1) ? 0 : src_nents;
dst_nents = dst ? src_nents : 0; dst_nents = dst ? src_nents : 0;
@ -1387,16 +1382,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
src_nents = sg_nents_for_len(src, src_len); src_nents = sg_nents_for_len(src, src_len);
if (src_nents < 0) { if (src_nents < 0) {
dev_err(dev, "Invalid number of src SG.\n"); dev_err(dev, "Invalid number of src SG.\n");
err = ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
goto error_sg;
} }
src_nents = (src_nents == 1) ? 0 : src_nents; src_nents = (src_nents == 1) ? 0 : src_nents;
dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
dst_nents = sg_nents_for_len(dst, dst_len); dst_nents = sg_nents_for_len(dst, dst_len);
if (dst_nents < 0) { if (dst_nents < 0) {
dev_err(dev, "Invalid number of dst SG.\n"); dev_err(dev, "Invalid number of dst SG.\n");
err = ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
goto error_sg;
} }
dst_nents = (dst_nents == 1) ? 0 : dst_nents; dst_nents = (dst_nents == 1) ? 0 : dst_nents;
} }
@ -1423,11 +1416,14 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
/* if its a ahash, add space for a second desc next to the first one */ /* if its a ahash, add space for a second desc next to the first one */
if (is_sec1 && !dst) if (is_sec1 && !dst)
alloc_len += sizeof(struct talitos_desc); alloc_len += sizeof(struct talitos_desc);
alloc_len += ivsize;
edesc = kmalloc(alloc_len, GFP_DMA | flags); edesc = kmalloc(alloc_len, GFP_DMA | flags);
if (!edesc) { if (!edesc)
err = ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
goto error_sg; if (ivsize) {
iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
} }
memset(&edesc->desc, 0, sizeof(edesc->desc)); memset(&edesc->desc, 0, sizeof(edesc->desc));
@ -1445,10 +1441,6 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} }
return edesc; return edesc;
error_sg:
if (iv_dma)
dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
return err;
} }
static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,

View file

@ -1690,9 +1690,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct drm_fb_helper *fb_helper = info->par; struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb; struct drm_framebuffer *fb = fb_helper->fb;
if (var->pixclock != 0 || in_dbg_master()) if (in_dbg_master())
return -EINVAL; return -EINVAL;
if (var->pixclock != 0) {
DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
var->pixclock = 0;
}
/* /*
* Changes struct fb_var_screeninfo are currently not pushed back * Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested. * to KMS, hence fail if different settings are requested.

View file

@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
{ {
unsigned int index; unsigned int index;
u64 virtaddr; u64 virtaddr;
unsigned long req_size, pgoff = 0; unsigned long req_size, pgoff, req_start;
pgprot_t pg_prot; pgprot_t pg_prot;
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
pg_prot = vma->vm_page_prot; pg_prot = vma->vm_page_prot;
virtaddr = vma->vm_start; virtaddr = vma->vm_start;
req_size = vma->vm_end - vma->vm_start; req_size = vma->vm_end - vma->vm_start;
pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; pgoff = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
req_start = pgoff << PAGE_SHIFT;
if (!intel_vgpu_in_aperture(vgpu, req_start))
return -EINVAL;
if (req_start + req_size >
vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
return -EINVAL;
pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
} }

View file

@ -579,10 +579,6 @@ static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD)) atomic_read(&pd->usecnt), RDMA_NLDEV_ATTR_PAD))
goto err; goto err;
if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
pd->unsafe_global_rkey))
goto err;
if (fill_res_name_pid(msg, res)) if (fill_res_name_pid(msg, res))
goto err; goto err;

View file

@ -427,7 +427,40 @@ static inline enum ib_qp_state pvrdma_qp_state_to_ib(enum pvrdma_qp_state state)
static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op) static inline enum pvrdma_wr_opcode ib_wr_opcode_to_pvrdma(enum ib_wr_opcode op)
{ {
return (enum pvrdma_wr_opcode)op; switch (op) {
case IB_WR_RDMA_WRITE:
return PVRDMA_WR_RDMA_WRITE;
case IB_WR_RDMA_WRITE_WITH_IMM:
return PVRDMA_WR_RDMA_WRITE_WITH_IMM;
case IB_WR_SEND:
return PVRDMA_WR_SEND;
case IB_WR_SEND_WITH_IMM:
return PVRDMA_WR_SEND_WITH_IMM;
case IB_WR_RDMA_READ:
return PVRDMA_WR_RDMA_READ;
case IB_WR_ATOMIC_CMP_AND_SWP:
return PVRDMA_WR_ATOMIC_CMP_AND_SWP;
case IB_WR_ATOMIC_FETCH_AND_ADD:
return PVRDMA_WR_ATOMIC_FETCH_AND_ADD;
case IB_WR_LSO:
return PVRDMA_WR_LSO;
case IB_WR_SEND_WITH_INV:
return PVRDMA_WR_SEND_WITH_INV;
case IB_WR_RDMA_READ_WITH_INV:
return PVRDMA_WR_RDMA_READ_WITH_INV;
case IB_WR_LOCAL_INV:
return PVRDMA_WR_LOCAL_INV;
case IB_WR_REG_MR:
return PVRDMA_WR_FAST_REG_MR;
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
return PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP;
case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
return PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD;
case IB_WR_REG_SIG_MR:
return PVRDMA_WR_REG_SIG_MR;
default:
return PVRDMA_WR_ERROR;
}
} }
static inline enum ib_wc_status pvrdma_wc_status_to_ib( static inline enum ib_wc_status pvrdma_wc_status_to_ib(

View file

@ -721,6 +721,12 @@ int pvrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
wqe_hdr->ex.imm_data = wr->ex.imm_data; wqe_hdr->ex.imm_data = wr->ex.imm_data;
if (unlikely(wqe_hdr->opcode == PVRDMA_WR_ERROR)) {
*bad_wr = wr;
ret = -EINVAL;
goto out;
}
switch (qp->ibqp.qp_type) { switch (qp->ibqp.qp_type) {
case IB_QPT_GSI: case IB_QPT_GSI:
case IB_QPT_UD: case IB_QPT_UD:

View file

@ -1933,9 +1933,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
return -EINVAL; return -EINVAL;
} }
} }
mutex_lock(&q->mmap_lock);
if (vb2_fileio_is_active(q)) { if (vb2_fileio_is_active(q)) {
dprintk(1, "mmap: file io in progress\n"); dprintk(1, "mmap: file io in progress\n");
return -EBUSY; ret = -EBUSY;
goto unlock;
} }
/* /*
@ -1943,7 +1947,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
*/ */
ret = __find_plane_by_offset(q, off, &buffer, &plane); ret = __find_plane_by_offset(q, off, &buffer, &plane);
if (ret) if (ret)
return ret; goto unlock;
vb = q->bufs[buffer]; vb = q->bufs[buffer];
@ -1956,11 +1960,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
if (length < (vma->vm_end - vma->vm_start)) { if (length < (vma->vm_end - vma->vm_start)) {
dprintk(1, dprintk(1,
"MMAP invalid, as it would overflow buffer length\n"); "MMAP invalid, as it would overflow buffer length\n");
return -EINVAL; ret = -EINVAL;
goto unlock;
} }
mutex_lock(&q->mmap_lock);
ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma); ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
unlock:
mutex_unlock(&q->mmap_lock); mutex_unlock(&q->mmap_lock);
if (ret) if (ret)
return ret; return ret;

View file

@ -865,8 +865,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
"%s-vid-cap", dev->v4l2_dev.name); "%s-vid-cap", dev->v4l2_dev.name);
if (IS_ERR(dev->kthread_vid_cap)) { if (IS_ERR(dev->kthread_vid_cap)) {
int err = PTR_ERR(dev->kthread_vid_cap);
dev->kthread_vid_cap = NULL;
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
return PTR_ERR(dev->kthread_vid_cap); return err;
} }
*pstreaming = true; *pstreaming = true;
vivid_grab_controls(dev, true); vivid_grab_controls(dev, true);

View file

@ -236,8 +236,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
"%s-vid-out", dev->v4l2_dev.name); "%s-vid-out", dev->v4l2_dev.name);
if (IS_ERR(dev->kthread_vid_out)) { if (IS_ERR(dev->kthread_vid_out)) {
int err = PTR_ERR(dev->kthread_vid_out);
dev->kthread_vid_out = NULL;
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
return PTR_ERR(dev->kthread_vid_out); return err;
} }
*pstreaming = true; *pstreaming = true;
vivid_grab_controls(dev, true); vivid_grab_controls(dev, true);

View file

@ -21,7 +21,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
.type = V4L2_DV_BT_656_1120, .type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */ /* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 }, .reserved = { 0 },
V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000, V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF, V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED) V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)

View file

@ -286,6 +286,7 @@ static void v4l_print_format(const void *arg, bool write_only)
const struct v4l2_window *win; const struct v4l2_window *win;
const struct v4l2_sdr_format *sdr; const struct v4l2_sdr_format *sdr;
const struct v4l2_meta_format *meta; const struct v4l2_meta_format *meta;
u32 planes;
unsigned i; unsigned i;
pr_cont("type=%s", prt_names(p->type, v4l2_type_names)); pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@ -316,7 +317,8 @@ static void v4l_print_format(const void *arg, bool write_only)
prt_names(mp->field, v4l2_field_names), prt_names(mp->field, v4l2_field_names),
mp->colorspace, mp->num_planes, mp->flags, mp->colorspace, mp->num_planes, mp->flags,
mp->ycbcr_enc, mp->quantization, mp->xfer_func); mp->ycbcr_enc, mp->quantization, mp->xfer_func);
for (i = 0; i < mp->num_planes; i++) planes = min_t(u32, mp->num_planes, VIDEO_MAX_PLANES);
for (i = 0; i < planes; i++)
printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i, printk(KERN_DEBUG "plane %u: bytesperline=%u sizeimage=%u\n", i,
mp->plane_fmt[i].bytesperline, mp->plane_fmt[i].bytesperline,
mp->plane_fmt[i].sizeimage); mp->plane_fmt[i].sizeimage);

View file

@ -592,6 +592,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
return 0; return 0;
} }
static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
{
struct tps6586x *tps6586x = dev_get_drvdata(dev);
if (tps6586x->client->irq)
disable_irq(tps6586x->client->irq);
return 0;
}
static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
{
struct tps6586x *tps6586x = dev_get_drvdata(dev);
if (tps6586x->client->irq)
enable_irq(tps6586x->client->irq);
return 0;
}
static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
tps6586x_i2c_resume);
static const struct i2c_device_id tps6586x_id_table[] = { static const struct i2c_device_id tps6586x_id_table[] = {
{ "tps6586x", 0 }, { "tps6586x", 0 },
{ }, { },
@ -602,6 +625,7 @@ static struct i2c_driver tps6586x_driver = {
.driver = { .driver = {
.name = "tps6586x", .name = "tps6586x",
.of_match_table = of_match_ptr(tps6586x_of_match), .of_match_table = of_match_ptr(tps6586x_of_match),
.pm = &tps6586x_pm_ops,
}, },
.probe = tps6586x_i2c_probe, .probe = tps6586x_i2c_probe,
.remove = tps6586x_i2c_remove, .remove = tps6586x_i2c_remove,

View file

@ -258,6 +258,8 @@ struct sdhci_msm_host {
bool mci_removed; bool mci_removed;
const struct sdhci_msm_variant_ops *var_ops; const struct sdhci_msm_variant_ops *var_ops;
const struct sdhci_msm_offset *offset; const struct sdhci_msm_offset *offset;
bool use_cdr;
u32 transfer_mode;
}; };
static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host) static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
@ -1025,6 +1027,26 @@ static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
return ret; return ret;
} }
static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
{
const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
u32 config, oldconfig = readl_relaxed(host->ioaddr +
msm_offset->core_dll_config);
config = oldconfig;
if (enable) {
config |= CORE_CDR_EN;
config &= ~CORE_CDR_EXT_EN;
} else {
config &= ~CORE_CDR_EN;
config |= CORE_CDR_EXT_EN;
}
if (config != oldconfig)
writel_relaxed(config, host->ioaddr +
msm_offset->core_dll_config);
}
static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
{ {
struct sdhci_host *host = mmc_priv(mmc); struct sdhci_host *host = mmc_priv(mmc);
@ -1042,8 +1064,14 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->clock <= CORE_FREQ_100MHZ || if (host->clock <= CORE_FREQ_100MHZ ||
!(ios.timing == MMC_TIMING_MMC_HS400 || !(ios.timing == MMC_TIMING_MMC_HS400 ||
ios.timing == MMC_TIMING_MMC_HS200 || ios.timing == MMC_TIMING_MMC_HS200 ||
ios.timing == MMC_TIMING_UHS_SDR104)) ios.timing == MMC_TIMING_UHS_SDR104)) {
msm_host->use_cdr = false;
sdhci_msm_set_cdr(host, false);
return 0; return 0;
}
/* Clock-Data-Recovery used to dynamically adjust RX sampling point */
msm_host->use_cdr = true;
/* /*
* For HS400 tuning in HS200 timing requires: * For HS400 tuning in HS200 timing requires:
@ -1525,6 +1553,19 @@ static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
case SDHCI_POWER_CONTROL: case SDHCI_POWER_CONTROL:
req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON; req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
break; break;
case SDHCI_TRANSFER_MODE:
msm_host->transfer_mode = val;
break;
case SDHCI_COMMAND:
if (!msm_host->use_cdr)
break;
if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200 &&
SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK)
sdhci_msm_set_cdr(host, true);
else
sdhci_msm_set_cdr(host, false);
break;
} }
if (req_type) { if (req_type) {

View file

@ -1947,6 +1947,9 @@ static int __bond_release_one(struct net_device *bond_dev,
if (!bond_has_slaves(bond)) { if (!bond_has_slaves(bond)) {
bond_set_carrier(bond); bond_set_carrier(bond);
eth_hw_addr_random(bond_dev); eth_hw_addr_random(bond_dev);
bond->nest_level = SINGLE_DEPTH_NESTING;
} else {
bond->nest_level = dev_get_nest_level(bond_dev) + 1;
} }
unblock_netpoll_tx(); unblock_netpoll_tx();

View file

@ -347,16 +347,17 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
struct device_node *mdio_np; struct device_node *mdio_np;
int ret; int ret;
mdio_np = of_find_compatible_node(smi->dev->of_node, NULL, mdio_np = of_get_compatible_child(smi->dev->of_node, "realtek,smi-mdio");
"realtek,smi-mdio");
if (!mdio_np) { if (!mdio_np) {
dev_err(smi->dev, "no MDIO bus node\n"); dev_err(smi->dev, "no MDIO bus node\n");
return -ENODEV; return -ENODEV;
} }
smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev); smi->slave_mii_bus = devm_mdiobus_alloc(smi->dev);
if (!smi->slave_mii_bus) if (!smi->slave_mii_bus) {
return -ENOMEM; ret = -ENOMEM;
goto err_put_node;
}
smi->slave_mii_bus->priv = smi; smi->slave_mii_bus->priv = smi;
smi->slave_mii_bus->name = "SMI slave MII"; smi->slave_mii_bus->name = "SMI slave MII";
smi->slave_mii_bus->read = realtek_smi_mdio_read; smi->slave_mii_bus->read = realtek_smi_mdio_read;
@ -371,10 +372,15 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
if (ret) { if (ret) {
dev_err(smi->dev, "unable to register MDIO bus %s\n", dev_err(smi->dev, "unable to register MDIO bus %s\n",
smi->slave_mii_bus->id); smi->slave_mii_bus->id);
of_node_put(mdio_np); goto err_put_node;
} }
return 0; return 0;
err_put_node:
of_node_put(mdio_np);
return ret;
} }
static int realtek_smi_probe(struct platform_device *pdev) static int realtek_smi_probe(struct platform_device *pdev)
@ -457,6 +463,8 @@ static int realtek_smi_remove(struct platform_device *pdev)
struct realtek_smi *smi = dev_get_drvdata(&pdev->dev); struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
dsa_unregister_switch(smi->ds); dsa_unregister_switch(smi->ds);
if (smi->slave_mii_bus)
of_node_put(smi->slave_mii_bus->dev.of_node);
gpiod_set_value(smi->reset, 1); gpiod_set_value(smi->reset, 1);
return 0; return 0;

View file

@ -962,13 +962,10 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
memset(&ksettings, 0, sizeof(ksettings)); memset(&ksettings, 0, sizeof(ksettings));
phy_ethtool_get_link_ksettings(netdev, &ksettings); phy_ethtool_get_link_ksettings(netdev, &ksettings);
local_advertisement = phy_read(phydev, MII_ADVERTISE); local_advertisement =
if (local_advertisement < 0) ethtool_adv_to_mii_adv_t(phydev->advertising);
return; remote_advertisement =
ethtool_adv_to_mii_adv_t(phydev->lp_advertising);
remote_advertisement = phy_read(phydev, MII_LPA);
if (remote_advertisement < 0)
return;
lan743x_phy_update_flowcontrol(adapter, lan743x_phy_update_flowcontrol(adapter,
ksettings.base.duplex, ksettings.base.duplex,

View file

@ -717,6 +717,7 @@ module_param(use_dac, int, 0);
MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
module_param_named(debug, debug.msg_enable, int, 0); module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
MODULE_SOFTDEP("pre: realtek");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_8168D_1); MODULE_FIRMWARE(FIRMWARE_8168D_1);
MODULE_FIRMWARE(FIRMWARE_8168D_2); MODULE_FIRMWARE(FIRMWARE_8168D_2);
@ -1730,11 +1731,13 @@ static bool rtl8169_reset_counters(struct rtl8169_private *tp)
static bool rtl8169_update_counters(struct rtl8169_private *tp) static bool rtl8169_update_counters(struct rtl8169_private *tp)
{ {
u8 val = RTL_R8(tp, ChipCmd);
/* /*
* Some chips are unable to dump tally counters when the receiver * Some chips are unable to dump tally counters when the receiver
* is disabled. * is disabled. If 0xff chip may be in a PCI power-save state.
*/ */
if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0) if (!(val & CmdRxEnb) || val == 0xff)
return true; return true;
return rtl8169_do_counters(tp, CounterDump); return rtl8169_do_counters(tp, CounterDump);

View file

@ -859,10 +859,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
err = 0; err = 0;
} }
rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
if (tfile->detached) { if (tfile->detached) {
tun_enable_queue(tfile); tun_enable_queue(tfile);
} else { } else {
@ -876,6 +872,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
* refcnt. * refcnt.
*/ */
/* Publish tfile->tun and tun->tfiles only after we've fully
* initialized tfile; otherwise we risk using half-initialized
* object.
*/
rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
out: out:
return err; return err;
} }

View file

@ -806,6 +806,7 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node,
if (!of_device_is_available(remote)) { if (!of_device_is_available(remote)) {
pr_debug("not available for remote node\n"); pr_debug("not available for remote node\n");
of_node_put(remote);
return NULL; return NULL;
} }

View file

@ -79,8 +79,22 @@ static int scsi_dev_type_resume(struct device *dev,
if (err == 0) { if (err == 0) {
pm_runtime_disable(dev); pm_runtime_disable(dev);
pm_runtime_set_active(dev); err = pm_runtime_set_active(dev);
pm_runtime_enable(dev); pm_runtime_enable(dev);
/*
* Forcibly set runtime PM status of request queue to "active"
* to make sure we can again get requests from the queue
* (see also blk_pm_peek_request()).
*
* The resume hook will correct runtime PM status of the disk.
*/
if (!err && scsi_is_sdev_device(dev)) {
struct scsi_device *sdev = to_scsi_device(dev);
if (sdev->request_queue->dev)
blk_set_runtime_active(sdev->request_queue);
}
} }
return err; return err;
@ -139,16 +153,6 @@ static int scsi_bus_resume_common(struct device *dev,
else else
fn = NULL; fn = NULL;
/*
* Forcibly set runtime PM status of request queue to "active" to
* make sure we can again get requests from the queue (see also
* blk_pm_peek_request()).
*
* The resume hook will correct runtime PM status of the disk.
*/
if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
blk_set_runtime_active(to_scsi_device(dev)->request_queue);
if (fn) { if (fn) {
async_schedule_domain(fn, dev, &scsi_sd_pm_domain); async_schedule_domain(fn, dev, &scsi_sd_pm_domain);

View file

@ -205,6 +205,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
sp = buffer_data[0] & 0x80 ? 1 : 0; sp = buffer_data[0] & 0x80 ? 1 : 0;
buffer_data[0] &= ~0x80; buffer_data[0] &= ~0x80;
/*
* Ensure WP, DPOFUA, and RESERVED fields are cleared in
* received mode parameter buffer before doing MODE SELECT.
*/
data.device_specific = 0;
if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT, if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
SD_MAX_RETRIES, &data, &sshdr)) { SD_MAX_RETRIES, &data, &sshdr)) {
if (scsi_sense_valid(&sshdr)) if (scsi_sense_valid(&sshdr))

View file

@ -598,9 +598,12 @@ static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
mutex_unlock(&cdev_list_lock); mutex_unlock(&cdev_list_lock);
} }
static void __cxgbit_free_conn(struct cxgbit_sock *csk);
void cxgbit_free_np(struct iscsi_np *np) void cxgbit_free_np(struct iscsi_np *np)
{ {
struct cxgbit_np *cnp = np->np_context; struct cxgbit_np *cnp = np->np_context;
struct cxgbit_sock *csk, *tmp;
cnp->com.state = CSK_STATE_DEAD; cnp->com.state = CSK_STATE_DEAD;
if (cnp->com.cdev) if (cnp->com.cdev)
@ -608,6 +611,13 @@ void cxgbit_free_np(struct iscsi_np *np)
else else
cxgbit_free_all_np(cnp); cxgbit_free_all_np(cnp);
spin_lock_bh(&cnp->np_accept_lock);
list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
list_del_init(&csk->accept_node);
__cxgbit_free_conn(csk);
}
spin_unlock_bh(&cnp->np_accept_lock);
np->np_context = NULL; np->np_context = NULL;
cxgbit_put_cnp(cnp); cxgbit_put_cnp(cnp);
} }
@ -708,9 +718,9 @@ void cxgbit_abort_conn(struct cxgbit_sock *csk)
csk->tid, 600, __func__); csk->tid, 600, __func__);
} }
void cxgbit_free_conn(struct iscsi_conn *conn) static void __cxgbit_free_conn(struct cxgbit_sock *csk)
{ {
struct cxgbit_sock *csk = conn->context; struct iscsi_conn *conn = csk->conn;
bool release = false; bool release = false;
pr_debug("%s: state %d\n", pr_debug("%s: state %d\n",
@ -719,7 +729,7 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
spin_lock_bh(&csk->lock); spin_lock_bh(&csk->lock);
switch (csk->com.state) { switch (csk->com.state) {
case CSK_STATE_ESTABLISHED: case CSK_STATE_ESTABLISHED:
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
csk->com.state = CSK_STATE_CLOSING; csk->com.state = CSK_STATE_CLOSING;
cxgbit_send_halfclose(csk); cxgbit_send_halfclose(csk);
} else { } else {
@ -744,6 +754,11 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
cxgbit_put_csk(csk); cxgbit_put_csk(csk);
} }
void cxgbit_free_conn(struct iscsi_conn *conn)
{
__cxgbit_free_conn(conn->context);
}
static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt) static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
{ {
csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] - csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
@ -806,6 +821,7 @@ void _cxgbit_free_csk(struct kref *kref)
spin_unlock_bh(&cdev->cskq.lock); spin_unlock_bh(&cdev->cskq.lock);
cxgbit_free_skb(csk); cxgbit_free_skb(csk);
cxgbit_put_cnp(csk->cnp);
cxgbit_put_cdev(cdev); cxgbit_put_cdev(cdev);
kfree(csk); kfree(csk);
@ -1354,6 +1370,7 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
goto rel_skb; goto rel_skb;
} }
cxgbit_get_cnp(cnp);
cxgbit_get_cdev(cdev); cxgbit_get_cdev(cdev);
spin_lock(&cdev->cskq.lock); spin_lock(&cdev->cskq.lock);

View file

@ -1255,7 +1255,8 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
static int tty_reopen(struct tty_struct *tty) static int tty_reopen(struct tty_struct *tty)
{ {
struct tty_driver *driver = tty->driver; struct tty_driver *driver = tty->driver;
int retval; struct tty_ldisc *ld;
int retval = 0;
if (driver->type == TTY_DRIVER_TYPE_PTY && if (driver->type == TTY_DRIVER_TYPE_PTY &&
driver->subtype == PTY_TYPE_MASTER) driver->subtype == PTY_TYPE_MASTER)
@ -1267,14 +1268,21 @@ static int tty_reopen(struct tty_struct *tty)
if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN)) if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
return -EBUSY; return -EBUSY;
tty->count++; ld = tty_ldisc_ref_wait(tty);
if (ld) {
tty_ldisc_deref(ld);
} else {
retval = tty_ldisc_lock(tty, 5 * HZ);
if (retval)
return retval;
if (tty->ldisc) if (!tty->ldisc)
return 0; retval = tty_ldisc_reinit(tty, tty->termios.c_line);
tty_ldisc_unlock(tty);
}
retval = tty_ldisc_reinit(tty, tty->termios.c_line); if (retval == 0)
if (retval) tty->count++;
tty->count--;
return retval; return retval;
} }

View file

@ -293,6 +293,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
if (!locked) if (!locked)
atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count); atomic_long_add_return(-LDSEM_WAIT_BIAS, &sem->count);
list_del(&waiter.list); list_del(&waiter.list);
/*
* In case of timeout, wake up every reader who gave the right of way
* to writer. Prevent separation readers into two groups:
* one that helds semaphore and another that sleeps.
* (in case of no contention with a writer)
*/
if (!locked && list_empty(&sem->write_wait))
__ldsem_wake_readers(sem);
raw_spin_unlock_irq(&sem->wait_lock); raw_spin_unlock_irq(&sem->wait_lock);
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);

View file

@ -609,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
int r = 0; int r = 0;
memset(&p, 0, sizeof(p));
switch (cmd) { switch (cmd) {
case OMAPFB_SYNC_GFX: case OMAPFB_SYNC_GFX:
DBG("ioctl SYNC_GFX\n"); DBG("ioctl SYNC_GFX\n");

View file

@ -1650,7 +1650,7 @@ void xen_callback_vector(void)
xen_have_vector_callback = 0; xen_have_vector_callback = 0;
return; return;
} }
pr_info("Xen HVM callback vector for event delivery is enabled\n"); pr_info_once("Xen HVM callback vector for event delivery is enabled\n");
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
xen_hvm_callback_vector); xen_hvm_callback_vector);
} }

View file

@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev)
} }
EXPORT_SYMBOL(invalidate_bdev); EXPORT_SYMBOL(invalidate_bdev);
static void set_init_blocksize(struct block_device *bdev)
{
unsigned bsize = bdev_logical_block_size(bdev);
loff_t size = i_size_read(bdev->bd_inode);
while (bsize < PAGE_SIZE) {
if (size & bsize)
break;
bsize <<= 1;
}
bdev->bd_block_size = bsize;
bdev->bd_inode->i_blkbits = blksize_bits(bsize);
}
int set_blocksize(struct block_device *bdev, int size) int set_blocksize(struct block_device *bdev, int size)
{ {
/* Size must be a power of two, and between 512 and PAGE_SIZE */ /* Size must be a power of two, and between 512 and PAGE_SIZE */
@ -1408,18 +1422,9 @@ EXPORT_SYMBOL(check_disk_change);
void bd_set_size(struct block_device *bdev, loff_t size) void bd_set_size(struct block_device *bdev, loff_t size)
{ {
unsigned bsize = bdev_logical_block_size(bdev);
inode_lock(bdev->bd_inode); inode_lock(bdev->bd_inode);
i_size_write(bdev->bd_inode, size); i_size_write(bdev->bd_inode, size);
inode_unlock(bdev->bd_inode); inode_unlock(bdev->bd_inode);
while (bsize < PAGE_SIZE) {
if (size & bsize)
break;
bsize <<= 1;
}
bdev->bd_block_size = bsize;
bdev->bd_inode->i_blkbits = blksize_bits(bsize);
} }
EXPORT_SYMBOL(bd_set_size); EXPORT_SYMBOL(bd_set_size);
@ -1496,8 +1501,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
} }
} }
if (!ret) if (!ret) {
bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
set_init_blocksize(bdev);
}
/* /*
* If the device is invalidated, rescan partition * If the device is invalidated, rescan partition
@ -1532,6 +1539,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
goto out_clear; goto out_clear;
} }
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
set_init_blocksize(bdev);
} }
if (bdev->bd_bdi == &noop_backing_dev_info) if (bdev->bd_bdi == &noop_backing_dev_info)

View file

@ -4155,6 +4155,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
spin_lock(&fs_info->ordered_root_lock); spin_lock(&fs_info->ordered_root_lock);
} }
spin_unlock(&fs_info->ordered_root_lock); spin_unlock(&fs_info->ordered_root_lock);
/*
* We need this here because if we've been flipped read-only we won't
* get sync() from the umount, so we need to make sure any ordered
* extents that haven't had their dirty pages IO start writeout yet
* actually get run and error out properly.
*/
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
} }
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,

View file

@ -3151,9 +3151,6 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
/* once for the tree */ /* once for the tree */
btrfs_put_ordered_extent(ordered_extent); btrfs_put_ordered_extent(ordered_extent);
/* Try to release some metadata so we don't get an OOM but don't wait */
btrfs_btree_balance_dirty_nodelay(fs_info);
return ret; return ret;
} }

View file

@ -713,18 +713,15 @@ static int ramoops_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct ramoops_platform_data *pdata = dev->platform_data; struct ramoops_platform_data *pdata = dev->platform_data;
struct ramoops_platform_data pdata_local;
struct ramoops_context *cxt = &oops_cxt; struct ramoops_context *cxt = &oops_cxt;
size_t dump_mem_sz; size_t dump_mem_sz;
phys_addr_t paddr; phys_addr_t paddr;
int err = -EINVAL; int err = -EINVAL;
if (dev_of_node(dev) && !pdata) { if (dev_of_node(dev) && !pdata) {
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); pdata = &pdata_local;
if (!pdata) { memset(pdata, 0, sizeof(*pdata));
pr_err("cannot allocate platform data buffer\n");
err = -ENOMEM;
goto fail_out;
}
err = ramoops_parse_dt(pdev, pdata); err = ramoops_parse_dt(pdev, pdata);
if (err < 0) if (err < 0)

View file

@ -6,6 +6,7 @@
struct bcma_soc { struct bcma_soc {
struct bcma_bus bus; struct bcma_bus bus;
struct device *dev;
}; };
int __init bcma_host_soc_register(struct bcma_soc *soc); int __init bcma_host_soc_register(struct bcma_soc *soc);

View file

@ -129,7 +129,7 @@ struct hd_struct {
struct disk_stats dkstats; struct disk_stats dkstats;
#endif #endif
struct percpu_ref ref; struct percpu_ref ref;
struct rcu_head rcu_head; struct rcu_work rcu_work;
}; };
#define GENHD_FL_REMOVABLE 1 #define GENHD_FL_REMOVABLE 1

View file

@ -5,17 +5,10 @@
struct nf_conncount_data; struct nf_conncount_data;
enum nf_conncount_list_add {
NF_CONNCOUNT_ADDED, /* list add was ok */
NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */
NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */
};
struct nf_conncount_list { struct nf_conncount_list {
spinlock_t list_lock; spinlock_t list_lock;
struct list_head head; /* connections with the same filtering key */ struct list_head head; /* connections with the same filtering key */
unsigned int count; /* length of list */ unsigned int count; /* length of list */
bool dead;
}; };
struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family, struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
@ -29,18 +22,12 @@ unsigned int nf_conncount_count(struct net *net,
const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone); const struct nf_conntrack_zone *zone);
void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list, int nf_conncount_add(struct net *net, struct nf_conncount_list *list,
const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone, const struct nf_conntrack_zone *zone);
bool *addit);
void nf_conncount_list_init(struct nf_conncount_list *list); void nf_conncount_list_init(struct nf_conncount_list *list);
enum nf_conncount_list_add
nf_conncount_add(struct nf_conncount_list *list,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone);
bool nf_conncount_gc_list(struct net *net, bool nf_conncount_gc_list(struct net *net,
struct nf_conncount_list *list); struct nf_conncount_list *list);

View file

@ -78,6 +78,7 @@ enum pvrdma_wr_opcode {
PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD, PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
PVRDMA_WR_BIND_MW, PVRDMA_WR_BIND_MW,
PVRDMA_WR_REG_SIG_MR, PVRDMA_WR_REG_SIG_MR,
PVRDMA_WR_ERROR,
}; };
enum pvrdma_wc_status { enum pvrdma_wc_status {

View file

@ -1125,6 +1125,7 @@ config LD_DEAD_CODE_DATA_ELIMINATION
bool "Dead code and data elimination (EXPERIMENTAL)" bool "Dead code and data elimination (EXPERIMENTAL)"
depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
depends on EXPERT depends on EXPERT
depends on !(FUNCTION_TRACER && CC_IS_GCC && GCC_VERSION < 40800)
depends on $(cc-option,-ffunction-sections -fdata-sections) depends on $(cc-option,-ffunction-sections -fdata-sections)
depends on $(ld-option,--gc-sections) depends on $(ld-option,--gc-sections)
help help

View file

@ -52,7 +52,7 @@ u32 int_sqrt64(u64 x)
if (x <= ULONG_MAX) if (x <= ULONG_MAX)
return int_sqrt((unsigned long) x); return int_sqrt((unsigned long) x);
m = 1ULL << (fls64(x) & ~1ULL); m = 1ULL << ((fls64(x) - 1) & ~1ULL);
while (m != 0) { while (m != 0) {
b = y + m; b = y + m;
y >>= 1; y >>= 1;

View file

@ -278,7 +278,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
int ret; int ret;
if (neigh->hh.hh_len) { if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
neigh_hh_bridge(&neigh->hh, skb); neigh_hh_bridge(&neigh->hh, skb);
skb->dev = nf_bridge->physindev; skb->dev = nf_bridge->physindev;
ret = br_handle_frame_finish(net, sk, skb); ret = br_handle_frame_finish(net, sk, skb);

View file

@ -1137,14 +1137,16 @@ static int do_replace(struct net *net, const void __user *user,
tmp.name[sizeof(tmp.name) - 1] = 0; tmp.name[sizeof(tmp.name) - 1] = 0;
countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
newinfo = vmalloc(sizeof(*newinfo) + countersize); newinfo = __vmalloc(sizeof(*newinfo) + countersize, GFP_KERNEL_ACCOUNT,
PAGE_KERNEL);
if (!newinfo) if (!newinfo)
return -ENOMEM; return -ENOMEM;
if (countersize) if (countersize)
memset(newinfo->counters, 0, countersize); memset(newinfo->counters, 0, countersize);
newinfo->entries = vmalloc(tmp.entries_size); newinfo->entries = __vmalloc(tmp.entries_size, GFP_KERNEL_ACCOUNT,
PAGE_KERNEL);
if (!newinfo->entries) { if (!newinfo->entries) {
ret = -ENOMEM; ret = -ENOMEM;
goto free_newinfo; goto free_newinfo;

View file

@ -416,13 +416,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
(*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
/* check for checksum updates when the CAN frame has been modified */ /* Has the CAN frame been modified? */
if (modidx) { if (modidx) {
if (gwj->mod.csumfunc.crc8) /* get available space for the processed CAN frame type */
(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); int max_len = nskb->len - offsetof(struct can_frame, data);
/* dlc may have changed, make sure it fits to the CAN frame */
if (cf->can_dlc > max_len)
goto out_delete;
/* check for checksum updates in classic CAN length only */
if (gwj->mod.csumfunc.crc8) {
if (cf->can_dlc > 8)
goto out_delete;
(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
}
if (gwj->mod.csumfunc.xor) {
if (cf->can_dlc > 8)
goto out_delete;
if (gwj->mod.csumfunc.xor)
(*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
}
} }
/* clear the skb timestamp if not configured the other way */ /* clear the skb timestamp if not configured the other way */
@ -434,6 +450,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
gwj->dropped_frames++; gwj->dropped_frames++;
else else
gwj->handled_frames++; gwj->handled_frames++;
return;
out_delete:
/* delete frame due to misconfiguration */
gwj->deleted_frames++;
kfree_skb(nskb);
return;
} }
static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)

View file

@ -2018,18 +2018,19 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
u32 flags) u32 flags)
{ {
/* skb->mac_len is not set on normal egress */ unsigned int mlen = skb_network_offset(skb);
unsigned int mlen = skb->network_header - skb->mac_header;
__skb_pull(skb, mlen); if (mlen) {
__skb_pull(skb, mlen);
/* At ingress, the mac header has already been pulled once. /* At ingress, the mac header has already been pulled once.
* At egress, skb_pospull_rcsum has to be done in case that * At egress, skb_pospull_rcsum has to be done in case that
* the skb is originated from ingress (i.e. a forwarded skb) * the skb is originated from ingress (i.e. a forwarded skb)
* to ensure that rcsum starts at net header. * to ensure that rcsum starts at net header.
*/ */
if (!skb_at_tc_ingress(skb)) if (!skb_at_tc_ingress(skb))
skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
}
skb_pop_mac_header(skb); skb_pop_mac_header(skb);
skb_reset_mac_len(skb); skb_reset_mac_len(skb);
return flags & BPF_F_INGRESS ? return flags & BPF_F_INGRESS ?

View file

@ -63,6 +63,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
lwt->name ? : "<unknown>"); lwt->name ? : "<unknown>");
ret = BPF_OK; ret = BPF_OK;
} else { } else {
skb_reset_mac_header(skb);
ret = skb_do_redirect(skb); ret = skb_do_redirect(skb);
if (ret == 0) if (ret == 0)
ret = BPF_REDIRECT; ret = BPF_REDIRECT;

View file

@ -148,19 +148,17 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{ {
__be16 _ports[2], *ports;
struct sockaddr_in sin; struct sockaddr_in sin;
__be16 *ports;
int end;
end = skb_transport_offset(skb) + 4;
if (end > 0 && !pskb_may_pull(skb, end))
return;
/* All current transport protocols have the port numbers in the /* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is * first four bytes of the transport header and this function is
* written with this assumption in mind. * written with this assumption in mind.
*/ */
ports = (__be16 *)skb_transport_header(skb); ports = skb_header_pointer(skb, skb_transport_offset(skb),
sizeof(_ports), &_ports);
if (!ports)
return;
sin.sin_family = AF_INET; sin.sin_family = AF_INET;
sin.sin_addr.s_addr = ip_hdr(skb)->daddr; sin.sin_addr.s_addr = ip_hdr(skb)->daddr;

View file

@ -224,7 +224,7 @@ static int tcp_write_timeout(struct sock *sk)
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits) { if (icsk->icsk_retransmits) {
dst_negative_advice(sk); dst_negative_advice(sk);
} else if (!tp->syn_data && !tp->syn_fastopen) { } else {
sk_rethink_txhash(sk); sk_rethink_txhash(sk);
} }
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;

View file

@ -341,6 +341,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
skb_reset_network_header(skb); skb_reset_network_header(skb);
iph = ipv6_hdr(skb); iph = ipv6_hdr(skb);
iph->daddr = fl6->daddr; iph->daddr = fl6->daddr;
ip6_flow_hdr(iph, 0, 0);
serr = SKB_EXT_ERR(skb); serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err; serr->ee.ee_errno = err;
@ -700,17 +701,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
} }
if (np->rxopt.bits.rxorigdstaddr) { if (np->rxopt.bits.rxorigdstaddr) {
struct sockaddr_in6 sin6; struct sockaddr_in6 sin6;
__be16 *ports; __be16 _ports[2], *ports;
int end;
end = skb_transport_offset(skb) + 4; ports = skb_header_pointer(skb, skb_transport_offset(skb),
if (end <= 0 || pskb_may_pull(skb, end)) { sizeof(_ports), &_ports);
if (ports) {
/* All current transport protocols have the port numbers in the /* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is * first four bytes of the transport header and this function is
* written with this assumption in mind. * written with this assumption in mind.
*/ */
ports = (__be16 *)skb_transport_header(skb);
sin6.sin6_family = AF_INET6; sin6.sin6_family = AF_INET6;
sin6.sin6_addr = ipv6_hdr(skb)->daddr; sin6.sin6_addr = ipv6_hdr(skb)->daddr;
sin6.sin6_port = ports[1]; sin6.sin6_port = ports[1];

View file

@ -421,10 +421,10 @@ static int icmp6_iif(const struct sk_buff *skb)
static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
const struct in6_addr *force_saddr) const struct in6_addr *force_saddr)
{ {
struct net *net = dev_net(skb->dev);
struct inet6_dev *idev = NULL; struct inet6_dev *idev = NULL;
struct ipv6hdr *hdr = ipv6_hdr(skb); struct ipv6hdr *hdr = ipv6_hdr(skb);
struct sock *sk; struct sock *sk;
struct net *net;
struct ipv6_pinfo *np; struct ipv6_pinfo *np;
const struct in6_addr *saddr = NULL; const struct in6_addr *saddr = NULL;
struct dst_entry *dst; struct dst_entry *dst;
@ -435,12 +435,16 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
int iif = 0; int iif = 0;
int addr_type = 0; int addr_type = 0;
int len; int len;
u32 mark = IP6_REPLY_MARK(net, skb->mark); u32 mark;
if ((u8 *)hdr < skb->head || if ((u8 *)hdr < skb->head ||
(skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb)) (skb_network_header(skb) + sizeof(*hdr)) > skb_tail_pointer(skb))
return; return;
if (!skb->dev)
return;
net = dev_net(skb->dev);
mark = IP6_REPLY_MARK(net, skb->mark);
/* /*
* Make sure we respect the rules * Make sure we respect the rules
* i.e. RFC 1885 2.4(e) * i.e. RFC 1885 2.4(e)

View file

@ -33,12 +33,6 @@
#define CONNCOUNT_SLOTS 256U #define CONNCOUNT_SLOTS 256U
#ifdef CONFIG_LOCKDEP
#define CONNCOUNT_LOCK_SLOTS 8U
#else
#define CONNCOUNT_LOCK_SLOTS 256U
#endif
#define CONNCOUNT_GC_MAX_NODES 8 #define CONNCOUNT_GC_MAX_NODES 8
#define MAX_KEYLEN 5 #define MAX_KEYLEN 5
@ -49,8 +43,6 @@ struct nf_conncount_tuple {
struct nf_conntrack_zone zone; struct nf_conntrack_zone zone;
int cpu; int cpu;
u32 jiffies32; u32 jiffies32;
bool dead;
struct rcu_head rcu_head;
}; };
struct nf_conncount_rb { struct nf_conncount_rb {
@ -60,7 +52,7 @@ struct nf_conncount_rb {
struct rcu_head rcu_head; struct rcu_head rcu_head;
}; };
static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp; static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
struct nf_conncount_data { struct nf_conncount_data {
unsigned int keylen; unsigned int keylen;
@ -89,79 +81,25 @@ static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
return memcmp(a, b, klen * sizeof(u32)); return memcmp(a, b, klen * sizeof(u32));
} }
enum nf_conncount_list_add static void conn_free(struct nf_conncount_list *list,
nf_conncount_add(struct nf_conncount_list *list,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone)
{
struct nf_conncount_tuple *conn;
if (WARN_ON_ONCE(list->count > INT_MAX))
return NF_CONNCOUNT_ERR;
conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
if (conn == NULL)
return NF_CONNCOUNT_ERR;
conn->tuple = *tuple;
conn->zone = *zone;
conn->cpu = raw_smp_processor_id();
conn->jiffies32 = (u32)jiffies;
conn->dead = false;
spin_lock_bh(&list->list_lock);
if (list->dead == true) {
kmem_cache_free(conncount_conn_cachep, conn);
spin_unlock_bh(&list->list_lock);
return NF_CONNCOUNT_SKIP;
}
list_add_tail(&conn->node, &list->head);
list->count++;
spin_unlock_bh(&list->list_lock);
return NF_CONNCOUNT_ADDED;
}
EXPORT_SYMBOL_GPL(nf_conncount_add);
static void __conn_free(struct rcu_head *h)
{
struct nf_conncount_tuple *conn;
conn = container_of(h, struct nf_conncount_tuple, rcu_head);
kmem_cache_free(conncount_conn_cachep, conn);
}
static bool conn_free(struct nf_conncount_list *list,
struct nf_conncount_tuple *conn) struct nf_conncount_tuple *conn)
{ {
bool free_entry = false; lockdep_assert_held(&list->list_lock);
spin_lock_bh(&list->list_lock);
if (conn->dead) {
spin_unlock_bh(&list->list_lock);
return free_entry;
}
list->count--; list->count--;
conn->dead = true; list_del(&conn->node);
list_del_rcu(&conn->node);
if (list->count == 0) {
list->dead = true;
free_entry = true;
}
spin_unlock_bh(&list->list_lock); kmem_cache_free(conncount_conn_cachep, conn);
call_rcu(&conn->rcu_head, __conn_free);
return free_entry;
} }
static const struct nf_conntrack_tuple_hash * static const struct nf_conntrack_tuple_hash *
find_or_evict(struct net *net, struct nf_conncount_list *list, find_or_evict(struct net *net, struct nf_conncount_list *list,
struct nf_conncount_tuple *conn, bool *free_entry) struct nf_conncount_tuple *conn)
{ {
const struct nf_conntrack_tuple_hash *found; const struct nf_conntrack_tuple_hash *found;
unsigned long a, b; unsigned long a, b;
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
__s32 age; u32 age;
found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple); found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
if (found) if (found)
@ -176,52 +114,45 @@ find_or_evict(struct net *net, struct nf_conncount_list *list,
*/ */
age = a - b; age = a - b;
if (conn->cpu == cpu || age >= 2) { if (conn->cpu == cpu || age >= 2) {
*free_entry = conn_free(list, conn); conn_free(list, conn);
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
} }
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
} }
void nf_conncount_lookup(struct net *net, static int __nf_conncount_add(struct net *net,
struct nf_conncount_list *list, struct nf_conncount_list *list,
const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone, const struct nf_conntrack_zone *zone)
bool *addit)
{ {
const struct nf_conntrack_tuple_hash *found; const struct nf_conntrack_tuple_hash *found;
struct nf_conncount_tuple *conn, *conn_n; struct nf_conncount_tuple *conn, *conn_n;
struct nf_conn *found_ct; struct nf_conn *found_ct;
unsigned int collect = 0; unsigned int collect = 0;
bool free_entry = false;
/* best effort only */
*addit = tuple ? true : false;
/* check the saved connections */ /* check the saved connections */
list_for_each_entry_safe(conn, conn_n, &list->head, node) { list_for_each_entry_safe(conn, conn_n, &list->head, node) {
if (collect > CONNCOUNT_GC_MAX_NODES) if (collect > CONNCOUNT_GC_MAX_NODES)
break; break;
found = find_or_evict(net, list, conn, &free_entry); found = find_or_evict(net, list, conn);
if (IS_ERR(found)) { if (IS_ERR(found)) {
/* Not found, but might be about to be confirmed */ /* Not found, but might be about to be confirmed */
if (PTR_ERR(found) == -EAGAIN) { if (PTR_ERR(found) == -EAGAIN) {
if (!tuple)
continue;
if (nf_ct_tuple_equal(&conn->tuple, tuple) && if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
nf_ct_zone_id(&conn->zone, conn->zone.dir) == nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
nf_ct_zone_id(zone, zone->dir)) nf_ct_zone_id(zone, zone->dir))
*addit = false; return 0; /* already exists */
} else if (PTR_ERR(found) == -ENOENT) } else {
collect++; collect++;
}
continue; continue;
} }
found_ct = nf_ct_tuplehash_to_ctrack(found); found_ct = nf_ct_tuplehash_to_ctrack(found);
if (tuple && nf_ct_tuple_equal(&conn->tuple, tuple) && if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
nf_ct_zone_equal(found_ct, zone, zone->dir)) { nf_ct_zone_equal(found_ct, zone, zone->dir)) {
/* /*
* We should not see tuples twice unless someone hooks * We should not see tuples twice unless someone hooks
@ -229,7 +160,8 @@ void nf_conncount_lookup(struct net *net,
* *
* Attempt to avoid a re-add in this case. * Attempt to avoid a re-add in this case.
*/ */
*addit = false; nf_ct_put(found_ct);
return 0;
} else if (already_closed(found_ct)) { } else if (already_closed(found_ct)) {
/* /*
* we do not care about connections which are * we do not care about connections which are
@ -243,19 +175,48 @@ void nf_conncount_lookup(struct net *net,
nf_ct_put(found_ct); nf_ct_put(found_ct);
} }
if (WARN_ON_ONCE(list->count > INT_MAX))
return -EOVERFLOW;
conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
if (conn == NULL)
return -ENOMEM;
conn->tuple = *tuple;
conn->zone = *zone;
conn->cpu = raw_smp_processor_id();
conn->jiffies32 = (u32)jiffies;
list_add_tail(&conn->node, &list->head);
list->count++;
return 0;
} }
EXPORT_SYMBOL_GPL(nf_conncount_lookup);
int nf_conncount_add(struct net *net,
struct nf_conncount_list *list,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone)
{
int ret;
/* check the saved connections */
spin_lock_bh(&list->list_lock);
ret = __nf_conncount_add(net, list, tuple, zone);
spin_unlock_bh(&list->list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(nf_conncount_add);
void nf_conncount_list_init(struct nf_conncount_list *list) void nf_conncount_list_init(struct nf_conncount_list *list)
{ {
spin_lock_init(&list->list_lock); spin_lock_init(&list->list_lock);
INIT_LIST_HEAD(&list->head); INIT_LIST_HEAD(&list->head);
list->count = 0; list->count = 0;
list->dead = false;
} }
EXPORT_SYMBOL_GPL(nf_conncount_list_init); EXPORT_SYMBOL_GPL(nf_conncount_list_init);
/* Return true if the list is empty */ /* Return true if the list is empty. Must be called with BH disabled. */
bool nf_conncount_gc_list(struct net *net, bool nf_conncount_gc_list(struct net *net,
struct nf_conncount_list *list) struct nf_conncount_list *list)
{ {
@ -263,17 +224,17 @@ bool nf_conncount_gc_list(struct net *net,
struct nf_conncount_tuple *conn, *conn_n; struct nf_conncount_tuple *conn, *conn_n;
struct nf_conn *found_ct; struct nf_conn *found_ct;
unsigned int collected = 0; unsigned int collected = 0;
bool free_entry = false;
bool ret = false; bool ret = false;
/* don't bother if other cpu is already doing GC */
if (!spin_trylock(&list->list_lock))
return false;
list_for_each_entry_safe(conn, conn_n, &list->head, node) { list_for_each_entry_safe(conn, conn_n, &list->head, node) {
found = find_or_evict(net, list, conn, &free_entry); found = find_or_evict(net, list, conn);
if (IS_ERR(found)) { if (IS_ERR(found)) {
if (PTR_ERR(found) == -ENOENT) { if (PTR_ERR(found) == -ENOENT)
if (free_entry)
return true;
collected++; collected++;
}
continue; continue;
} }
@ -284,23 +245,19 @@ bool nf_conncount_gc_list(struct net *net,
* closed already -> ditch it * closed already -> ditch it
*/ */
nf_ct_put(found_ct); nf_ct_put(found_ct);
if (conn_free(list, conn)) conn_free(list, conn);
return true;
collected++; collected++;
continue; continue;
} }
nf_ct_put(found_ct); nf_ct_put(found_ct);
if (collected > CONNCOUNT_GC_MAX_NODES) if (collected > CONNCOUNT_GC_MAX_NODES)
return false; break;
} }
spin_lock_bh(&list->list_lock); if (!list->count)
if (!list->count) {
list->dead = true;
ret = true; ret = true;
} spin_unlock(&list->list_lock);
spin_unlock_bh(&list->list_lock);
return ret; return ret;
} }
@ -314,6 +271,7 @@ static void __tree_nodes_free(struct rcu_head *h)
kmem_cache_free(conncount_rb_cachep, rbconn); kmem_cache_free(conncount_rb_cachep, rbconn);
} }
/* caller must hold tree nf_conncount_locks[] lock */
static void tree_nodes_free(struct rb_root *root, static void tree_nodes_free(struct rb_root *root,
struct nf_conncount_rb *gc_nodes[], struct nf_conncount_rb *gc_nodes[],
unsigned int gc_count) unsigned int gc_count)
@ -323,8 +281,10 @@ static void tree_nodes_free(struct rb_root *root,
while (gc_count) { while (gc_count) {
rbconn = gc_nodes[--gc_count]; rbconn = gc_nodes[--gc_count];
spin_lock(&rbconn->list.list_lock); spin_lock(&rbconn->list.list_lock);
rb_erase(&rbconn->node, root); if (!rbconn->list.count) {
call_rcu(&rbconn->rcu_head, __tree_nodes_free); rb_erase(&rbconn->node, root);
call_rcu(&rbconn->rcu_head, __tree_nodes_free);
}
spin_unlock(&rbconn->list.list_lock); spin_unlock(&rbconn->list.list_lock);
} }
} }
@ -341,20 +301,19 @@ insert_tree(struct net *net,
struct rb_root *root, struct rb_root *root,
unsigned int hash, unsigned int hash,
const u32 *key, const u32 *key,
u8 keylen,
const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone) const struct nf_conntrack_zone *zone)
{ {
enum nf_conncount_list_add ret;
struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES]; struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
struct rb_node **rbnode, *parent; struct rb_node **rbnode, *parent;
struct nf_conncount_rb *rbconn; struct nf_conncount_rb *rbconn;
struct nf_conncount_tuple *conn; struct nf_conncount_tuple *conn;
unsigned int count = 0, gc_count = 0; unsigned int count = 0, gc_count = 0;
bool node_found = false; u8 keylen = data->keylen;
bool do_gc = true;
spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
spin_lock_bh(&nf_conncount_locks[hash]);
restart:
parent = NULL; parent = NULL;
rbnode = &(root->rb_node); rbnode = &(root->rb_node);
while (*rbnode) { while (*rbnode) {
@ -368,45 +327,32 @@ insert_tree(struct net *net,
} else if (diff > 0) { } else if (diff > 0) {
rbnode = &((*rbnode)->rb_right); rbnode = &((*rbnode)->rb_right);
} else { } else {
/* unlikely: other cpu added node already */ int ret;
node_found = true;
ret = nf_conncount_add(&rbconn->list, tuple, zone); ret = nf_conncount_add(net, &rbconn->list, tuple, zone);
if (ret == NF_CONNCOUNT_ERR) { if (ret)
count = 0; /* hotdrop */ count = 0; /* hotdrop */
} else if (ret == NF_CONNCOUNT_ADDED) { else
count = rbconn->list.count; count = rbconn->list.count;
} else { tree_nodes_free(root, gc_nodes, gc_count);
/* NF_CONNCOUNT_SKIP, rbconn is already goto out_unlock;
* reclaimed by gc, insert a new tree node
*/
node_found = false;
}
break;
} }
if (gc_count >= ARRAY_SIZE(gc_nodes)) if (gc_count >= ARRAY_SIZE(gc_nodes))
continue; continue;
if (nf_conncount_gc_list(net, &rbconn->list)) if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
gc_nodes[gc_count++] = rbconn; gc_nodes[gc_count++] = rbconn;
} }
if (gc_count) { if (gc_count) {
tree_nodes_free(root, gc_nodes, gc_count); tree_nodes_free(root, gc_nodes, gc_count);
/* tree_node_free before new allocation permits schedule_gc_worker(data, hash);
* allocator to re-use newly free'd object. gc_count = 0;
* do_gc = false;
* This is a rare event; in most cases we will find goto restart;
* existing node to re-use. (or gc_count is 0).
*/
if (gc_count >= ARRAY_SIZE(gc_nodes))
schedule_gc_worker(data, hash);
} }
if (node_found)
goto out_unlock;
/* expected case: match, insert new node */ /* expected case: match, insert new node */
rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC); rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
if (rbconn == NULL) if (rbconn == NULL)
@ -430,7 +376,7 @@ insert_tree(struct net *net,
rb_link_node_rcu(&rbconn->node, parent, rbnode); rb_link_node_rcu(&rbconn->node, parent, rbnode);
rb_insert_color(&rbconn->node, root); rb_insert_color(&rbconn->node, root);
out_unlock: out_unlock:
spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); spin_unlock_bh(&nf_conncount_locks[hash]);
return count; return count;
} }
@ -441,7 +387,6 @@ count_tree(struct net *net,
const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone) const struct nf_conntrack_zone *zone)
{ {
enum nf_conncount_list_add ret;
struct rb_root *root; struct rb_root *root;
struct rb_node *parent; struct rb_node *parent;
struct nf_conncount_rb *rbconn; struct nf_conncount_rb *rbconn;
@ -454,7 +399,6 @@ count_tree(struct net *net,
parent = rcu_dereference_raw(root->rb_node); parent = rcu_dereference_raw(root->rb_node);
while (parent) { while (parent) {
int diff; int diff;
bool addit;
rbconn = rb_entry(parent, struct nf_conncount_rb, node); rbconn = rb_entry(parent, struct nf_conncount_rb, node);
@ -464,31 +408,36 @@ count_tree(struct net *net,
} else if (diff > 0) { } else if (diff > 0) {
parent = rcu_dereference_raw(parent->rb_right); parent = rcu_dereference_raw(parent->rb_right);
} else { } else {
/* same source network -> be counted! */ int ret;
nf_conncount_lookup(net, &rbconn->list, tuple, zone,
&addit);
if (!addit) if (!tuple) {
nf_conncount_gc_list(net, &rbconn->list);
return rbconn->list.count; return rbconn->list.count;
}
ret = nf_conncount_add(&rbconn->list, tuple, zone); spin_lock_bh(&rbconn->list.list_lock);
if (ret == NF_CONNCOUNT_ERR) { /* Node might be about to be free'd.
return 0; /* hotdrop */ * We need to defer to insert_tree() in this case.
} else if (ret == NF_CONNCOUNT_ADDED) { */
return rbconn->list.count; if (rbconn->list.count == 0) {
} else { spin_unlock_bh(&rbconn->list.list_lock);
/* NF_CONNCOUNT_SKIP, rbconn is already
* reclaimed by gc, insert a new tree node
*/
break; break;
} }
/* same source network -> be counted! */
ret = __nf_conncount_add(net, &rbconn->list, tuple, zone);
spin_unlock_bh(&rbconn->list.list_lock);
if (ret)
return 0; /* hotdrop */
else
return rbconn->list.count;
} }
} }
if (!tuple) if (!tuple)
return 0; return 0;
return insert_tree(net, data, root, hash, key, keylen, tuple, zone); return insert_tree(net, data, root, hash, key, tuple, zone);
} }
static void tree_gc_worker(struct work_struct *work) static void tree_gc_worker(struct work_struct *work)
@ -499,27 +448,47 @@ static void tree_gc_worker(struct work_struct *work)
struct rb_node *node; struct rb_node *node;
unsigned int tree, next_tree, gc_count = 0; unsigned int tree, next_tree, gc_count = 0;
tree = data->gc_tree % CONNCOUNT_LOCK_SLOTS; tree = data->gc_tree % CONNCOUNT_SLOTS;
root = &data->root[tree]; root = &data->root[tree];
local_bh_disable();
rcu_read_lock(); rcu_read_lock();
for (node = rb_first(root); node != NULL; node = rb_next(node)) { for (node = rb_first(root); node != NULL; node = rb_next(node)) {
rbconn = rb_entry(node, struct nf_conncount_rb, node); rbconn = rb_entry(node, struct nf_conncount_rb, node);
if (nf_conncount_gc_list(data->net, &rbconn->list)) if (nf_conncount_gc_list(data->net, &rbconn->list))
gc_nodes[gc_count++] = rbconn; gc_count++;
} }
rcu_read_unlock(); rcu_read_unlock();
local_bh_enable();
cond_resched();
spin_lock_bh(&nf_conncount_locks[tree]); spin_lock_bh(&nf_conncount_locks[tree]);
if (gc_count < ARRAY_SIZE(gc_nodes))
goto next; /* do not bother */
if (gc_count) { gc_count = 0;
tree_nodes_free(root, gc_nodes, gc_count); node = rb_first(root);
while (node != NULL) {
rbconn = rb_entry(node, struct nf_conncount_rb, node);
node = rb_next(node);
if (rbconn->list.count > 0)
continue;
gc_nodes[gc_count++] = rbconn;
if (gc_count >= ARRAY_SIZE(gc_nodes)) {
tree_nodes_free(root, gc_nodes, gc_count);
gc_count = 0;
}
} }
tree_nodes_free(root, gc_nodes, gc_count);
next:
clear_bit(tree, data->pending_trees); clear_bit(tree, data->pending_trees);
next_tree = (tree + 1) % CONNCOUNT_SLOTS; next_tree = (tree + 1) % CONNCOUNT_SLOTS;
next_tree = find_next_bit(data->pending_trees, next_tree, CONNCOUNT_SLOTS); next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
if (next_tree < CONNCOUNT_SLOTS) { if (next_tree < CONNCOUNT_SLOTS) {
data->gc_tree = next_tree; data->gc_tree = next_tree;
@ -621,10 +590,7 @@ static int __init nf_conncount_modinit(void)
{ {
int i; int i;
BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS); for (i = 0; i < CONNCOUNT_SLOTS; ++i)
BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0);
for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i)
spin_lock_init(&nf_conncount_locks[i]); spin_lock_init(&nf_conncount_locks[i]);
conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple", conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",

View file

@ -30,7 +30,6 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
enum ip_conntrack_info ctinfo; enum ip_conntrack_info ctinfo;
const struct nf_conn *ct; const struct nf_conn *ct;
unsigned int count; unsigned int count;
bool addit;
tuple_ptr = &tuple; tuple_ptr = &tuple;
@ -44,19 +43,12 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
return; return;
} }
nf_conncount_lookup(nft_net(pkt), &priv->list, tuple_ptr, zone, if (nf_conncount_add(nft_net(pkt), &priv->list, tuple_ptr, zone)) {
&addit);
count = priv->list.count;
if (!addit)
goto out;
if (nf_conncount_add(&priv->list, tuple_ptr, zone) == NF_CONNCOUNT_ERR) {
regs->verdict.code = NF_DROP; regs->verdict.code = NF_DROP;
return; return;
} }
count++;
out: count = priv->list.count;
if ((count > priv->limit) ^ priv->invert) { if ((count > priv->limit) ^ priv->invert) {
regs->verdict.code = NFT_BREAK; regs->verdict.code = NFT_BREAK;

View file

@ -2628,7 +2628,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
addr = saddr->sll_halen ? saddr->sll_addr : NULL; addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len) if (addr && dev && saddr->sll_halen < dev->addr_len)
goto out; goto out_put;
} }
err = -ENXIO; err = -ENXIO;
@ -2828,7 +2828,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
addr = saddr->sll_halen ? saddr->sll_addr : NULL; addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex); dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len) if (addr && dev && saddr->sll_halen < dev->addr_len)
goto out; goto out_unlock;
} }
err = -ENXIO; err = -ENXIO;

View file

@ -97,11 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
switch (ev) { switch (ev) {
case NETDEV_UP: case NETDEV_UP:
addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) { if (addr) {
addr->a.v6.sin6_family = AF_INET6; addr->a.v6.sin6_family = AF_INET6;
addr->a.v6.sin6_port = 0;
addr->a.v6.sin6_flowinfo = 0;
addr->a.v6.sin6_addr = ifa->addr; addr->a.v6.sin6_addr = ifa->addr;
addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
addr->valid = 1; addr->valid = 1;
@ -431,7 +429,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
addr = kzalloc(sizeof(*addr), GFP_ATOMIC); addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) { if (addr) {
addr->a.v6.sin6_family = AF_INET6; addr->a.v6.sin6_family = AF_INET6;
addr->a.v6.sin6_port = 0;
addr->a.v6.sin6_addr = ifp->addr; addr->a.v6.sin6_addr = ifp->addr;
addr->a.v6.sin6_scope_id = dev->ifindex; addr->a.v6.sin6_scope_id = dev->ifindex;
addr->valid = 1; addr->valid = 1;

View file

@ -101,7 +101,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
addr = kzalloc(sizeof(*addr), GFP_ATOMIC); addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) { if (addr) {
addr->a.v4.sin_family = AF_INET; addr->a.v4.sin_family = AF_INET;
addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local; addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
addr->valid = 1; addr->valid = 1;
INIT_LIST_HEAD(&addr->list); INIT_LIST_HEAD(&addr->list);
@ -776,10 +775,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
switch (ev) { switch (ev) {
case NETDEV_UP: case NETDEV_UP:
addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) { if (addr) {
addr->a.v4.sin_family = AF_INET; addr->a.v4.sin_family = AF_INET;
addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local; addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
addr->valid = 1; addr->valid = 1;
spin_lock_bh(&net->sctp.local_addr_lock); spin_lock_bh(&net->sctp.local_addr_lock);

View file

@ -144,6 +144,9 @@ static int smc_release(struct socket *sock)
sock_set_flag(sk, SOCK_DEAD); sock_set_flag(sk, SOCK_DEAD);
sk->sk_shutdown |= SHUTDOWN_MASK; sk->sk_shutdown |= SHUTDOWN_MASK;
} }
sk->sk_prot->unhash(sk);
if (smc->clcsock) { if (smc->clcsock) {
if (smc->use_fallback && sk->sk_state == SMC_LISTEN) { if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
/* wake up clcsock accept */ /* wake up clcsock accept */
@ -168,7 +171,6 @@ static int smc_release(struct socket *sock)
smc_conn_free(&smc->conn); smc_conn_free(&smc->conn);
release_sock(sk); release_sock(sk);
sk->sk_prot->unhash(sk);
sock_put(sk); /* final sock_put */ sock_put(sk); /* final sock_put */
out: out:
return rc; return rc;

View file

@ -771,6 +771,12 @@ void rpcb_getport_async(struct rpc_task *task)
case RPCBVERS_3: case RPCBVERS_3:
map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID]; map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC); map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
if (!map->r_addr) {
status = -ENOMEM;
dprintk("RPC: %5u %s: no memory available\n",
task->tk_pid, __func__);
goto bailout_free_args;
}
map->r_owner = ""; map->r_owner = "";
break; break;
case RPCBVERS_2: case RPCBVERS_2:
@ -793,6 +799,8 @@ void rpcb_getport_async(struct rpc_task *task)
rpc_put_task(child); rpc_put_task(child);
return; return;
bailout_free_args:
kfree(map);
bailout_release_client: bailout_release_client:
rpc_release_client(rpcb_clnt); rpc_release_client(rpcb_clnt);
bailout_nofree: bailout_nofree:

View file

@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
return limit; return limit;
} }
static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
{
return TLV_GET_LEN(tlv) - TLV_SPACE(0);
}
static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len) static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
{ {
struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb); struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
return buf; return buf;
} }
static inline bool string_is_valid(char *s, int len)
{
return memchr(s, '\0', len) ? true : false;
}
static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg, struct tipc_nl_compat_msg *msg,
struct sk_buff *arg) struct sk_buff *arg)
@ -379,6 +389,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
struct nlattr *prop; struct nlattr *prop;
struct nlattr *bearer; struct nlattr *bearer;
struct tipc_bearer_config *b; struct tipc_bearer_config *b;
int len;
b = (struct tipc_bearer_config *)TLV_DATA(msg->req); b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
@ -386,6 +397,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer) if (!bearer)
return -EMSGSIZE; return -EMSGSIZE;
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
if (!string_is_valid(b->name, len))
return -EINVAL;
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name)) if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
return -EMSGSIZE; return -EMSGSIZE;
@ -411,6 +426,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
{ {
char *name; char *name;
struct nlattr *bearer; struct nlattr *bearer;
int len;
name = (char *)TLV_DATA(msg->req); name = (char *)TLV_DATA(msg->req);
@ -418,6 +434,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer) if (!bearer)
return -EMSGSIZE; return -EMSGSIZE;
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
if (!string_is_valid(name, len))
return -EINVAL;
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name)) if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
return -EMSGSIZE; return -EMSGSIZE;
@ -478,6 +498,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
int err; int err;
int len;
if (!attrs[TIPC_NLA_LINK]) if (!attrs[TIPC_NLA_LINK])
return -EINVAL; return -EINVAL;
@ -504,6 +525,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
return err; return err;
name = (char *)TLV_DATA(msg->req); name = (char *)TLV_DATA(msg->req);
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
if (!string_is_valid(name, len))
return -EINVAL;
if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
return 0; return 0;
@ -644,6 +670,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
struct nlattr *prop; struct nlattr *prop;
struct nlattr *media; struct nlattr *media;
struct tipc_link_config *lc; struct tipc_link_config *lc;
int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req); lc = (struct tipc_link_config *)TLV_DATA(msg->req);
@ -651,6 +678,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
if (!media) if (!media)
return -EMSGSIZE; return -EMSGSIZE;
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
if (!string_is_valid(lc->name, len))
return -EINVAL;
if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name)) if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
return -EMSGSIZE; return -EMSGSIZE;
@ -671,6 +702,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
struct nlattr *prop; struct nlattr *prop;
struct nlattr *bearer; struct nlattr *bearer;
struct tipc_link_config *lc; struct tipc_link_config *lc;
int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req); lc = (struct tipc_link_config *)TLV_DATA(msg->req);
@ -678,6 +710,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
if (!bearer) if (!bearer)
return -EMSGSIZE; return -EMSGSIZE;
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
if (!string_is_valid(lc->name, len))
return -EINVAL;
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name)) if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
return -EMSGSIZE; return -EMSGSIZE;
@ -726,9 +762,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
struct tipc_link_config *lc; struct tipc_link_config *lc;
struct tipc_bearer *bearer; struct tipc_bearer *bearer;
struct tipc_media *media; struct tipc_media *media;
int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req); lc = (struct tipc_link_config *)TLV_DATA(msg->req);
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
if (!string_is_valid(lc->name, len))
return -EINVAL;
media = tipc_media_find(lc->name); media = tipc_media_find(lc->name);
if (media) { if (media) {
cmd->doit = &__tipc_nl_media_set; cmd->doit = &__tipc_nl_media_set;
@ -750,6 +791,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
{ {
char *name; char *name;
struct nlattr *link; struct nlattr *link;
int len;
name = (char *)TLV_DATA(msg->req); name = (char *)TLV_DATA(msg->req);
@ -757,6 +799,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
if (!link) if (!link)
return -EMSGSIZE; return -EMSGSIZE;
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
if (!string_is_valid(name, len))
return -EINVAL;
if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name)) if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
return -EMSGSIZE; return -EMSGSIZE;
@ -778,6 +824,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
}; };
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
return -EINVAL;
depth = ntohl(ntq->depth); depth = ntohl(ntq->depth);
@ -1201,7 +1249,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
} }
len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN); len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
if (len && !TLV_OK(msg.req, len)) { if (!len || !TLV_OK(msg.req, len)) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED); msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
goto send; goto send;

View file

@ -404,7 +404,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT); ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
if (ret == -EWOULDBLOCK) if (ret == -EWOULDBLOCK)
return -EWOULDBLOCK; return -EWOULDBLOCK;
if (ret > 0) { if (ret == sizeof(s)) {
read_lock_bh(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
ret = tipc_conn_rcv_sub(srv, con, &s); ret = tipc_conn_rcv_sub(srv, con, &s);
read_unlock_bh(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);

View file

@ -1004,6 +1004,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
void security_cred_free(struct cred *cred) void security_cred_free(struct cred *cred)
{ {
/*
* There is a failure case in prepare_creds() that
* may result in a call here with ->security being NULL.
*/
if (unlikely(cred->security == NULL))
return;
call_void_hook(cred_free, cred); call_void_hook(cred_free, cred);
} }

View file

@ -732,7 +732,8 @@ static int sens_destroy(void *key, void *datum, void *p)
kfree(key); kfree(key);
if (datum) { if (datum) {
levdatum = datum; levdatum = datum;
ebitmap_destroy(&levdatum->level->cat); if (levdatum->level)
ebitmap_destroy(&levdatum->level->cat);
kfree(levdatum->level); kfree(levdatum->level);
} }
kfree(datum); kfree(datum);

View file

@ -368,7 +368,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
break; break;
case YAMA_SCOPE_RELATIONAL: case YAMA_SCOPE_RELATIONAL:
rcu_read_lock(); rcu_read_lock();
if (!task_is_descendant(current, child) && if (!pid_alive(child))
rc = -EPERM;
if (!rc && !task_is_descendant(current, child) &&
!ptracer_exception_found(current, child) && !ptracer_exception_found(current, child) &&
!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
rc = -EPERM; rc = -EPERM;

View file

@ -6,7 +6,7 @@ TEST_PROGS := run.sh
include ../lib.mk include ../lib.mk
all: khdr all:
@for DIR in $(SUBDIRS); do \ @for DIR in $(SUBDIRS); do \
BUILD_TARGET=$(OUTPUT)/$$DIR; \ BUILD_TARGET=$(OUTPUT)/$$DIR; \
mkdir $$BUILD_TARGET -p; \ mkdir $$BUILD_TARGET -p; \

View file

@ -19,6 +19,7 @@ TEST_GEN_FILES := \
TEST_PROGS := run.sh TEST_PROGS := run.sh
top_srcdir = ../../../../.. top_srcdir = ../../../../..
KSFT_KHDR_INSTALL := 1
include ../../lib.mk include ../../lib.mk
$(TEST_GEN_FILES): $(HEADERS) $(TEST_GEN_FILES): $(HEADERS)

View file

@ -9,6 +9,7 @@ EXTRA_OBJS := ../gpiogpio-event-mon-in.o ../gpiogpio-event-mon.o
EXTRA_OBJS += ../gpiogpio-hammer-in.o ../gpiogpio-utils.o ../gpiolsgpio-in.o EXTRA_OBJS += ../gpiogpio-hammer-in.o ../gpiogpio-utils.o ../gpiolsgpio-in.o
EXTRA_OBJS += ../gpiolsgpio.o EXTRA_OBJS += ../gpiolsgpio.o
KSFT_KHDR_INSTALL := 1
include ../lib.mk include ../lib.mk
all: $(BINARIES) all: $(BINARIES)

View file

@ -1,6 +1,7 @@
all: all:
top_srcdir = ../../../../ top_srcdir = ../../../../
KSFT_KHDR_INSTALL := 1
UNAME_M := $(shell uname -m) UNAME_M := $(shell uname -m)
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
@ -40,4 +41,3 @@ $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
all: $(STATIC_LIBS) all: $(STATIC_LIBS)
$(TEST_GEN_PROGS): $(STATIC_LIBS) $(TEST_GEN_PROGS): $(STATIC_LIBS)
$(STATIC_LIBS):| khdr

View file

@ -16,18 +16,18 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED)) TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES)) TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
ifdef KSFT_KHDR_INSTALL
top_srcdir ?= ../../../.. top_srcdir ?= ../../../..
include $(top_srcdir)/scripts/subarch.include include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH) ARCH ?= $(SUBARCH)
all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
.PHONY: khdr .PHONY: khdr
khdr: khdr:
make ARCH=$(ARCH) -C $(top_srcdir) headers_install make ARCH=$(ARCH) -C $(top_srcdir) headers_install
ifdef KSFT_KHDR_INSTALL all: khdr $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr else
all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
endif endif
.ONESHELL: .ONESHELL:

View file

@ -6,6 +6,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp
all: $(TEST_PROGS) all: $(TEST_PROGS)
top_srcdir = ../../../../.. top_srcdir = ../../../../..
KSFT_KHDR_INSTALL := 1
include ../../lib.mk include ../../lib.mk
clean: clean:

View file

@ -24,6 +24,7 @@ TEST_GEN_FILES += virtual_address_range
TEST_PROGS := run_vmtests TEST_PROGS := run_vmtests
KSFT_KHDR_INSTALL := 1
include ../lib.mk include ../lib.mk
$(OUTPUT)/userfaultfd: LDLIBS += -lpthread $(OUTPUT)/userfaultfd: LDLIBS += -lpthread