Merge android-4.19.48 (01f5de3
) into msm-4.19
* refs/heads/tmp-01f5de3: Linux 4.19.48 tipc: fix modprobe tipc failed after switch order of device registration Revert "tipc: fix modprobe tipc failed after switch order of device registration" xen/pciback: Don't disable PCI_COMMAND on PCI device reset. jump_label: move 'asm goto' support test to Kconfig compiler.h: give up __compiletime_assert_fallback() include/linux/compiler*.h: define asm_volatile_goto crypto: vmx - ghash: do nosimd fallback manually net/tls: don't ignore netdev notifications if no TLS features net/tls: fix state removal with feature flags off bnxt_en: Fix aggregation buffer leak under OOM condition. net: stmmac: dma channel control register need to be init first net/mlx5e: Disable rxhash when CQE compress is enabled net/mlx5: Allocate root ns memory using kzalloc to match kfree tipc: Avoid copying bytes beyond the supplied data net/mlx5: Avoid double free in fs init error unwinding path usbnet: fix kernel crash after disconnect net: stmmac: fix reset gpio free missing net: sched: don't use tc_action->order during action dump net: phy: marvell10g: report if the PHY fails to boot firmware net: mvpp2: fix bad MVPP2_TXQ_SCHED_TOKEN_CNTR_REG queue value net: mvneta: Fix err code path of probe net-gro: fix use-after-free read in napi_gro_frags() net: fec: fix the clk mismatch in failed_reset path net: dsa: mv88e6xxx: fix handling of upper half of STATS_TYPE_PORT llc: fix skb leak in llc_build_and_send_ui_pkt() ipv6: Fix redirect with VRF ipv6: Consider sk_bound_dev_if when binding a raw socket to an address ipv4/igmp: fix build error if !CONFIG_IP_MULTICAST ipv4/igmp: fix another memory leak in igmpv3_del_delrec() inet: switch IP ID generator to siphash cxgb4: offload VLAN flows regardless of VLAN ethtype bonding/802.3ad: fix slave link initialization transition states Conflicts: include/linux/compiler.h Change-Id: I43dd2908aa00a247ac985c36d210e83370361315 Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
commit
d7864ac281
70 changed files with 335 additions and 326 deletions
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 47
|
||||
SUBLEVEL = 48
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
|
@ -71,6 +71,7 @@ config KPROBES
|
|||
config JUMP_LABEL
|
||||
bool "Optimize very unlikely/likely branches"
|
||||
depends on HAVE_ARCH_JUMP_LABEL
|
||||
depends on CC_HAS_ASM_GOTO
|
||||
help
|
||||
This option enables a transparent branch optimization that
|
||||
makes certain almost-always-true or almost-always-false branch
|
||||
|
|
|
@ -4,8 +4,6 @@
|
|||
#include <asm/patch.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
|
||||
static void __arch_jump_label_transform(struct jump_entry *entry,
|
||||
enum jump_label_type type,
|
||||
bool is_static)
|
||||
|
@ -35,5 +33,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry,
|
|||
{
|
||||
__arch_jump_label_transform(entry, type, true);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
#include <linux/jump_label.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
|
||||
void arch_jump_label_transform(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
{
|
||||
|
@ -49,5 +47,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry,
|
|||
* NOP needs to be replaced by a branch.
|
||||
*/
|
||||
}
|
||||
|
||||
#endif /* HAVE_JUMP_LABEL */
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
#include <asm/cacheflush.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
|
||||
/*
|
||||
* Define parameters for the standard MIPS and the microMIPS jump
|
||||
* instruction encoding respectively:
|
||||
|
@ -70,5 +68,3 @@ void arch_jump_label_transform(struct jump_entry *e,
|
|||
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
|
||||
#endif /* HAVE_JUMP_LABEL */
|
||||
|
|
|
@ -38,7 +38,7 @@ extern struct static_key hcall_tracepoint_key;
|
|||
void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
|
||||
void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
|
||||
/* OPAL tracing */
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
extern struct static_key opal_tracepoint_key;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#include <linux/jump_label.h>
|
||||
#include <asm/code-patching.h>
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
void arch_jump_label_transform(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
{
|
||||
|
@ -22,4 +21,3 @@ void arch_jump_label_transform(struct jump_entry *entry,
|
|||
else
|
||||
patch_instruction(addr, PPC_INST_NOP);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#include <asm/trace.h>
|
||||
#include <asm/asm-prototypes.h>
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
|
||||
|
||||
int opal_tracepoint_regfunc(void)
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
.section ".text"
|
||||
|
||||
#ifdef CONFIG_TRACEPOINTS
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
#define OPAL_BRANCH(LABEL) \
|
||||
ARCH_STATIC_BRANCH(LABEL, opal_tracepoint_key)
|
||||
#else
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
#ifdef CONFIG_TRACEPOINTS
|
||||
|
||||
#ifndef HAVE_JUMP_LABEL
|
||||
#ifndef CONFIG_JUMP_LABEL
|
||||
.section ".toc","aw"
|
||||
|
||||
.globl hcall_tracepoint_refcount
|
||||
|
@ -79,7 +79,7 @@ hcall_tracepoint_refcount:
|
|||
mr r5,BUFREG; \
|
||||
__HCALL_INST_POSTCALL
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
#define HCALL_BRANCH(LABEL) \
|
||||
ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key)
|
||||
#else
|
||||
|
|
|
@ -828,7 +828,7 @@ EXPORT_SYMBOL(arch_free_page);
|
|||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
#ifdef CONFIG_TRACEPOINTS
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
|
||||
|
||||
int hcall_tracepoint_regfunc(void)
|
||||
|
|
|
@ -44,7 +44,7 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
|
|||
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
|
||||
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
|
||||
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o early_nobss.o
|
||||
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
|
||||
obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o
|
||||
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
|
||||
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
|
||||
obj-y += nospec-branch.o
|
||||
|
@ -68,6 +68,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o
|
|||
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_UPROBES) += uprobes.o
|
||||
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
|
||||
|
||||
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
|
||||
obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
|
||||
|
|
|
@ -10,8 +10,6 @@
|
|||
#include <linux/jump_label.h>
|
||||
#include <asm/ipl.h>
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
|
||||
struct insn {
|
||||
u16 opcode;
|
||||
s32 offset;
|
||||
|
@ -102,5 +100,3 @@ void arch_jump_label_transform_static(struct jump_entry *entry,
|
|||
{
|
||||
__jump_label_transform(entry, type, 1);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -118,4 +118,4 @@ pc--$(CONFIG_PERF_EVENTS) := perf_event.o
|
|||
obj-$(CONFIG_SPARC64) += $(pc--y)
|
||||
|
||||
obj-$(CONFIG_UPROBES) += uprobes.o
|
||||
obj-$(CONFIG_SPARC64) += jump_label.o
|
||||
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
|
||||
|
|
|
@ -9,8 +9,6 @@
|
|||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
|
||||
void arch_jump_label_transform(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
{
|
||||
|
@ -47,5 +45,3 @@ void arch_jump_label_transform(struct jump_entry *entry,
|
|||
flushi(insn);
|
||||
mutex_unlock(&text_mutex);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -337,7 +337,7 @@ For 32-bit we have the following conventions - kernel is built with
|
|||
*/
|
||||
.macro CALL_enter_from_user_mode
|
||||
#ifdef CONFIG_CONTEXT_TRACKING
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
|
||||
#endif
|
||||
call enter_from_user_mode
|
||||
|
|
|
@ -140,7 +140,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
|
|||
|
||||
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
|
||||
|
||||
#if defined(__clang__) && !defined(CC_HAVE_ASM_GOTO)
|
||||
#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
|
||||
|
||||
/*
|
||||
* Workaround for the sake of BPF compilation which utilizes kernel
|
||||
|
|
|
@ -2,19 +2,6 @@
|
|||
#ifndef _ASM_X86_JUMP_LABEL_H
|
||||
#define _ASM_X86_JUMP_LABEL_H
|
||||
|
||||
#ifndef HAVE_JUMP_LABEL
|
||||
/*
|
||||
* For better or for worse, if jump labels (the gcc extension) are missing,
|
||||
* then the entire static branch patching infrastructure is compiled out.
|
||||
* If that happens, the code in here will malfunction. Raise a compiler
|
||||
* error instead.
|
||||
*
|
||||
* In theory, jump labels and the static branch patching infrastructure
|
||||
* could be decoupled to fix this.
|
||||
*/
|
||||
#error asm/jump_label.h included on a non-jump-label kernel
|
||||
#endif
|
||||
|
||||
#define JUMP_LABEL_NOP_SIZE 5
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
#define __CLOBBERS_MEM(clb...) "memory", ## clb
|
||||
|
||||
#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
|
||||
#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO)
|
||||
|
||||
/* Use asm goto */
|
||||
|
||||
|
@ -21,7 +21,7 @@ cc_label: \
|
|||
#define __BINARY_RMWcc_ARG " %1, "
|
||||
|
||||
|
||||
#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
|
||||
#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
|
||||
|
||||
/* Use flags output or a set instruction */
|
||||
|
||||
|
@ -36,7 +36,7 @@ do { \
|
|||
|
||||
#define __BINARY_RMWcc_ARG " %2, "
|
||||
|
||||
#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
|
||||
#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
|
||||
|
||||
#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
|
||||
__GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
|
||||
|
|
|
@ -49,7 +49,8 @@ obj-$(CONFIG_COMPAT) += signal_compat.o
|
|||
obj-y += traps.o idt.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
|
||||
obj-y += time.o ioport.o dumpstack.o nmi.o
|
||||
obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o
|
||||
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
|
||||
obj-y += setup.o x86_init.o i8259.o irqinit.o
|
||||
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
|
||||
obj-$(CONFIG_IRQ_WORK) += irq_work.o
|
||||
obj-y += probe_roms.o
|
||||
obj-$(CONFIG_X86_64) += sys_x86_64.o
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
#include <asm/alternative.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
|
||||
union jump_code_union {
|
||||
char code[JUMP_LABEL_NOP_SIZE];
|
||||
struct {
|
||||
|
@ -142,5 +140,3 @@ __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
|
|||
if (jlstate == JL_STATE_UPDATE)
|
||||
__jump_label_transform(entry, type, text_poke_early, 1);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -456,7 +456,7 @@ FOP_END;
|
|||
|
||||
/*
|
||||
* XXX: inoutclob user must know where the argument is being expanded.
|
||||
* Relying on CC_HAVE_ASM_GOTO would allow us to remove _fault.
|
||||
* Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
|
||||
*/
|
||||
#define asm_safe(insn, inoutclob...) \
|
||||
({ \
|
||||
|
|
|
@ -1,22 +1,14 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/**
|
||||
* GHASH routines supporting VMX instructions on the Power 8
|
||||
*
|
||||
* Copyright (C) 2015 International Business Machines Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 only.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
* Copyright (C) 2015, 2019 International Business Machines Inc.
|
||||
*
|
||||
* Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
|
||||
*
|
||||
* Extended by Daniel Axtens <dja@axtens.net> to replace the fallback
|
||||
* mechanism. The new approach is based on arm64 code, which is:
|
||||
* Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
|
@ -39,71 +31,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
|
|||
const u8 *in, size_t len);
|
||||
|
||||
struct p8_ghash_ctx {
|
||||
/* key used by vector asm */
|
||||
u128 htable[16];
|
||||
struct crypto_shash *fallback;
|
||||
/* key used by software fallback */
|
||||
be128 key;
|
||||
};
|
||||
|
||||
struct p8_ghash_desc_ctx {
|
||||
u64 shash[2];
|
||||
u8 buffer[GHASH_DIGEST_SIZE];
|
||||
int bytes;
|
||||
struct shash_desc fallback_desc;
|
||||
};
|
||||
|
||||
static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
const char *alg = "ghash-generic";
|
||||
struct crypto_shash *fallback;
|
||||
struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
|
||||
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
|
||||
if (IS_ERR(fallback)) {
|
||||
printk(KERN_ERR
|
||||
"Failed to allocate transformation for '%s': %ld\n",
|
||||
alg, PTR_ERR(fallback));
|
||||
return PTR_ERR(fallback);
|
||||
}
|
||||
|
||||
crypto_shash_set_flags(fallback,
|
||||
crypto_shash_get_flags((struct crypto_shash
|
||||
*) tfm));
|
||||
|
||||
/* Check if the descsize defined in the algorithm is still enough. */
|
||||
if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
|
||||
+ crypto_shash_descsize(fallback)) {
|
||||
printk(KERN_ERR
|
||||
"Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
|
||||
alg,
|
||||
shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
|
||||
crypto_shash_descsize(fallback));
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->fallback = fallback;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void p8_ghash_exit_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if (ctx->fallback) {
|
||||
crypto_free_shash(ctx->fallback);
|
||||
ctx->fallback = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int p8_ghash_init(struct shash_desc *desc)
|
||||
{
|
||||
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
|
||||
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
dctx->bytes = 0;
|
||||
memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
|
||||
dctx->fallback_desc.tfm = ctx->fallback;
|
||||
dctx->fallback_desc.flags = desc->flags;
|
||||
return crypto_shash_init(&dctx->fallback_desc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
|
||||
|
@ -121,7 +67,51 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
|
|||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
return crypto_shash_setkey(ctx->fallback, key, keylen);
|
||||
|
||||
memcpy(&ctx->key, key, GHASH_BLOCK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __ghash_block(struct p8_ghash_ctx *ctx,
|
||||
struct p8_ghash_desc_ctx *dctx)
|
||||
{
|
||||
if (!IN_INTERRUPT) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
gcm_ghash_p8(dctx->shash, ctx->htable,
|
||||
dctx->buffer, GHASH_DIGEST_SIZE);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
} else {
|
||||
crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
|
||||
gf128mul_lle((be128 *)dctx->shash, &ctx->key);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
|
||||
struct p8_ghash_desc_ctx *dctx,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
if (!IN_INTERRUPT) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
gcm_ghash_p8(dctx->shash, ctx->htable,
|
||||
src, srclen);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
} else {
|
||||
while (srclen >= GHASH_BLOCK_SIZE) {
|
||||
crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
|
||||
gf128mul_lle((be128 *)dctx->shash, &ctx->key);
|
||||
srclen -= GHASH_BLOCK_SIZE;
|
||||
src += GHASH_BLOCK_SIZE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int p8_ghash_update(struct shash_desc *desc,
|
||||
|
@ -131,49 +121,33 @@ static int p8_ghash_update(struct shash_desc *desc,
|
|||
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
|
||||
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
if (IN_INTERRUPT) {
|
||||
return crypto_shash_update(&dctx->fallback_desc, src,
|
||||
srclen);
|
||||
} else {
|
||||
if (dctx->bytes) {
|
||||
if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
|
||||
memcpy(dctx->buffer + dctx->bytes, src,
|
||||
srclen);
|
||||
dctx->bytes += srclen;
|
||||
return 0;
|
||||
}
|
||||
if (dctx->bytes) {
|
||||
if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
|
||||
memcpy(dctx->buffer + dctx->bytes, src,
|
||||
GHASH_DIGEST_SIZE - dctx->bytes);
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
gcm_ghash_p8(dctx->shash, ctx->htable,
|
||||
dctx->buffer, GHASH_DIGEST_SIZE);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
src += GHASH_DIGEST_SIZE - dctx->bytes;
|
||||
srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
|
||||
dctx->bytes = 0;
|
||||
srclen);
|
||||
dctx->bytes += srclen;
|
||||
return 0;
|
||||
}
|
||||
len = srclen & ~(GHASH_DIGEST_SIZE - 1);
|
||||
if (len) {
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
src += len;
|
||||
srclen -= len;
|
||||
}
|
||||
if (srclen) {
|
||||
memcpy(dctx->buffer, src, srclen);
|
||||
dctx->bytes = srclen;
|
||||
}
|
||||
return 0;
|
||||
memcpy(dctx->buffer + dctx->bytes, src,
|
||||
GHASH_DIGEST_SIZE - dctx->bytes);
|
||||
|
||||
__ghash_block(ctx, dctx);
|
||||
|
||||
src += GHASH_DIGEST_SIZE - dctx->bytes;
|
||||
srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
|
||||
dctx->bytes = 0;
|
||||
}
|
||||
len = srclen & ~(GHASH_DIGEST_SIZE - 1);
|
||||
if (len) {
|
||||
__ghash_blocks(ctx, dctx, src, len);
|
||||
src += len;
|
||||
srclen -= len;
|
||||
}
|
||||
if (srclen) {
|
||||
memcpy(dctx->buffer, src, srclen);
|
||||
dctx->bytes = srclen;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int p8_ghash_final(struct shash_desc *desc, u8 *out)
|
||||
|
@ -182,25 +156,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
|
|||
struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
|
||||
struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
|
||||
if (IN_INTERRUPT) {
|
||||
return crypto_shash_final(&dctx->fallback_desc, out);
|
||||
} else {
|
||||
if (dctx->bytes) {
|
||||
for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
|
||||
dctx->buffer[i] = 0;
|
||||
preempt_disable();
|
||||
pagefault_disable();
|
||||
enable_kernel_vsx();
|
||||
gcm_ghash_p8(dctx->shash, ctx->htable,
|
||||
dctx->buffer, GHASH_DIGEST_SIZE);
|
||||
disable_kernel_vsx();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
dctx->bytes = 0;
|
||||
}
|
||||
memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
|
||||
return 0;
|
||||
if (dctx->bytes) {
|
||||
for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
|
||||
dctx->buffer[i] = 0;
|
||||
__ghash_block(ctx, dctx);
|
||||
dctx->bytes = 0;
|
||||
}
|
||||
memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct shash_alg p8_ghash_alg = {
|
||||
|
@ -215,11 +178,8 @@ struct shash_alg p8_ghash_alg = {
|
|||
.cra_name = "ghash",
|
||||
.cra_driver_name = "p8_ghash",
|
||||
.cra_priority = 1000,
|
||||
.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
|
||||
.cra_blocksize = GHASH_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct p8_ghash_ctx),
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_init = p8_ghash_init_tfm,
|
||||
.cra_exit = p8_ghash_exit_tfm,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -3107,13 +3107,18 @@ static int bond_slave_netdev_event(unsigned long event,
|
|||
case NETDEV_CHANGE:
|
||||
/* For 802.3ad mode only:
|
||||
* Getting invalid Speed/Duplex values here will put slave
|
||||
* in weird state. So mark it as link-fail for the time
|
||||
* being and let link-monitoring (miimon) set it right when
|
||||
* correct speeds/duplex are available.
|
||||
* in weird state. Mark it as link-fail if the link was
|
||||
* previously up or link-down if it hasn't yet come up, and
|
||||
* let link-monitoring (miimon) set it right when correct
|
||||
* speeds/duplex are available.
|
||||
*/
|
||||
if (bond_update_speed_duplex(slave) &&
|
||||
BOND_MODE(bond) == BOND_MODE_8023AD)
|
||||
slave->link = BOND_LINK_FAIL;
|
||||
BOND_MODE(bond) == BOND_MODE_8023AD) {
|
||||
if (slave->last_link_up)
|
||||
slave->link = BOND_LINK_FAIL;
|
||||
else
|
||||
slave->link = BOND_LINK_DOWN;
|
||||
}
|
||||
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD)
|
||||
bond_3ad_adapter_speed_duplex_changed(slave);
|
||||
|
|
|
@ -877,7 +877,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
|
|||
err = mv88e6xxx_port_read(chip, port, s->reg + 1, ®);
|
||||
if (err)
|
||||
return U64_MAX;
|
||||
high = reg;
|
||||
low |= ((u32)reg) << 16;
|
||||
}
|
||||
break;
|
||||
case STATS_TYPE_BANK1:
|
||||
|
|
|
@ -1599,6 +1599,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
|
|||
skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
|
||||
bnxt_reuse_rx_data(rxr, cons, data);
|
||||
if (!skb) {
|
||||
if (agg_bufs)
|
||||
bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
|
||||
rc = -ENOMEM;
|
||||
goto next_rx;
|
||||
}
|
||||
|
|
|
@ -228,6 +228,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
|
|||
fs->val.ivlan = vlan_tci;
|
||||
fs->mask.ivlan = vlan_tci_mask;
|
||||
|
||||
fs->val.ivlan_vld = 1;
|
||||
fs->mask.ivlan_vld = 1;
|
||||
|
||||
/* Chelsio adapters use ivlan_vld bit to match vlan packets
|
||||
* as 802.1Q. Also, when vlan tag is present in packets,
|
||||
* ethtype match is used then to match on ethtype of inner
|
||||
|
@ -238,8 +241,6 @@ static void cxgb4_process_flow_match(struct net_device *dev,
|
|||
* ethtype value with ethtype of inner header.
|
||||
*/
|
||||
if (fs->val.ethtype == ETH_P_8021Q) {
|
||||
fs->val.ivlan_vld = 1;
|
||||
fs->mask.ivlan_vld = 1;
|
||||
fs->val.ethtype = 0;
|
||||
fs->mask.ethtype = 0;
|
||||
}
|
||||
|
|
|
@ -3571,7 +3571,7 @@ fec_probe(struct platform_device *pdev)
|
|||
if (fep->reg_phy)
|
||||
regulator_disable(fep->reg_phy);
|
||||
failed_reset:
|
||||
pm_runtime_put(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
failed_regulator:
|
||||
clk_disable_unprepare(fep->clk_ahb);
|
||||
|
|
|
@ -4611,7 +4611,7 @@ static int mvneta_probe(struct platform_device *pdev)
|
|||
err = register_netdev(dev);
|
||||
if (err < 0) {
|
||||
dev_err(&pdev->dev, "failed to register\n");
|
||||
goto err_free_stats;
|
||||
goto err_netdev;
|
||||
}
|
||||
|
||||
netdev_info(dev, "Using %s mac address %pM\n", mac_from,
|
||||
|
@ -4622,14 +4622,12 @@ static int mvneta_probe(struct platform_device *pdev)
|
|||
return 0;
|
||||
|
||||
err_netdev:
|
||||
unregister_netdev(dev);
|
||||
if (pp->bm_priv) {
|
||||
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
|
||||
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
|
||||
1 << pp->id);
|
||||
mvneta_bm_put(pp->bm_priv);
|
||||
}
|
||||
err_free_stats:
|
||||
free_percpu(pp->stats);
|
||||
err_free_ports:
|
||||
free_percpu(pp->ports);
|
||||
|
|
|
@ -1404,7 +1404,7 @@ static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
|
|||
/* Set defaults to the MVPP2 port */
|
||||
static void mvpp2_defaults_set(struct mvpp2_port *port)
|
||||
{
|
||||
int tx_port_num, val, queue, ptxq, lrxq;
|
||||
int tx_port_num, val, queue, lrxq;
|
||||
|
||||
if (port->priv->hw_version == MVPP21) {
|
||||
/* Update TX FIFO MIN Threshold */
|
||||
|
@ -1422,11 +1422,9 @@ static void mvpp2_defaults_set(struct mvpp2_port *port)
|
|||
mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
|
||||
|
||||
/* Close bandwidth for all queues */
|
||||
for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
|
||||
ptxq = mvpp2_txq_phys(port->id, queue);
|
||||
for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
|
||||
mvpp2_write(port->priv,
|
||||
MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
|
||||
}
|
||||
MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
|
||||
|
||||
/* Set refill period to 1 usec, refill tokens
|
||||
* and bucket size to maximum
|
||||
|
@ -2271,7 +2269,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
|
|||
txq->descs_dma = 0;
|
||||
|
||||
/* Set minimum bandwidth for disabled TXQs */
|
||||
mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
|
||||
mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
|
||||
|
||||
/* Set Tx descriptors queue starting address and size */
|
||||
cpu = get_cpu();
|
||||
|
|
|
@ -3734,6 +3734,12 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
|
|||
netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
|
||||
}
|
||||
|
||||
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
|
||||
features &= ~NETIF_F_RXHASH;
|
||||
if (netdev->features & NETIF_F_RXHASH)
|
||||
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
|
||||
}
|
||||
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
return features;
|
||||
|
@ -3860,6 +3866,9 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
|
|||
memcpy(&priv->tstamp, &config, sizeof(config));
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
/* might need to fix some features */
|
||||
netdev_update_features(priv->netdev);
|
||||
|
||||
return copy_to_user(ifr->ifr_data, &config,
|
||||
sizeof(config)) ? -EFAULT : 0;
|
||||
}
|
||||
|
@ -4702,6 +4711,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
|||
if (!priv->channels.params.scatter_fcs_en)
|
||||
netdev->features &= ~NETIF_F_RXFCS;
|
||||
|
||||
/* prefere CQE compression over rxhash */
|
||||
if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
|
||||
netdev->features &= ~NETIF_F_RXHASH;
|
||||
|
||||
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
|
||||
if (FT_CAP(flow_modify_en) &&
|
||||
FT_CAP(modify_root) &&
|
||||
|
|
|
@ -2220,7 +2220,7 @@ static struct mlx5_flow_root_namespace
|
|||
cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
|
||||
|
||||
/* Create the root namespace */
|
||||
root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
|
||||
root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
|
||||
if (!root_ns)
|
||||
return NULL;
|
||||
|
||||
|
@ -2363,6 +2363,7 @@ static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
|
|||
cleanup_root_ns(steering->esw_egress_root_ns[i]);
|
||||
|
||||
kfree(steering->esw_egress_root_ns);
|
||||
steering->esw_egress_root_ns = NULL;
|
||||
}
|
||||
|
||||
static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
|
||||
|
@ -2377,6 +2378,7 @@ static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
|
|||
cleanup_root_ns(steering->esw_ingress_root_ns[i]);
|
||||
|
||||
kfree(steering->esw_ingress_root_ns);
|
||||
steering->esw_ingress_root_ns = NULL;
|
||||
}
|
||||
|
||||
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
|
||||
|
@ -2505,6 +2507,7 @@ static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
|
|||
for (i--; i >= 0; i--)
|
||||
cleanup_root_ns(steering->esw_egress_root_ns[i]);
|
||||
kfree(steering->esw_egress_root_ns);
|
||||
steering->esw_egress_root_ns = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -2532,6 +2535,7 @@ static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
|
|||
for (i--; i >= 0; i--)
|
||||
cleanup_root_ns(steering->esw_ingress_root_ns[i]);
|
||||
kfree(steering->esw_ingress_root_ns);
|
||||
steering->esw_ingress_root_ns = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -2195,6 +2195,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
|
|||
if (priv->plat->axi)
|
||||
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
|
||||
|
||||
/* DMA CSR Channel configuration */
|
||||
for (chan = 0; chan < dma_csr_ch; chan++)
|
||||
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
|
||||
|
||||
/* DMA RX Channel Configuration */
|
||||
for (chan = 0; chan < rx_channels_count; chan++) {
|
||||
rx_q = &priv->rx_queue[chan];
|
||||
|
@ -2220,10 +2224,6 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
|
|||
tx_q->tx_tail_addr, chan);
|
||||
}
|
||||
|
||||
/* DMA CSR Channel configuration */
|
||||
for (chan = 0; chan < dma_csr_ch; chan++)
|
||||
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -267,7 +267,8 @@ int stmmac_mdio_reset(struct mii_bus *bus)
|
|||
of_property_read_u32_array(np,
|
||||
"snps,reset-delays-us", data->delays, 3);
|
||||
|
||||
if (gpio_request(data->reset_gpio, "mdio-reset"))
|
||||
if (devm_gpio_request(priv->device, data->reset_gpio,
|
||||
"mdio-reset"))
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,9 @@
|
|||
#include <linux/phy.h>
|
||||
|
||||
enum {
|
||||
MV_PMA_BOOT = 0xc050,
|
||||
MV_PMA_BOOT_FATAL = BIT(0),
|
||||
|
||||
MV_PCS_BASE_T = 0x0000,
|
||||
MV_PCS_BASE_R = 0x1000,
|
||||
MV_PCS_1000BASEX = 0x2000,
|
||||
|
@ -226,6 +229,16 @@ static int mv3310_probe(struct phy_device *phydev)
|
|||
(phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask)
|
||||
return -ENODEV;
|
||||
|
||||
ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MV_PMA_BOOT);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret & MV_PMA_BOOT_FATAL) {
|
||||
dev_warn(&phydev->mdio.dev,
|
||||
"PHY failed to boot firmware, status=%04x\n", ret);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -506,6 +506,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
|
|||
|
||||
if (netif_running (dev->net) &&
|
||||
netif_device_present (dev->net) &&
|
||||
test_bit(EVENT_DEV_OPEN, &dev->flags) &&
|
||||
!test_bit (EVENT_RX_HALT, &dev->flags) &&
|
||||
!test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
|
||||
switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
|
||||
|
@ -1431,6 +1432,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
|
|||
spin_unlock_irqrestore(&dev->txq.lock, flags);
|
||||
goto drop;
|
||||
}
|
||||
if (netif_queue_stopped(net)) {
|
||||
usb_autopm_put_interface_async(dev->intf);
|
||||
spin_unlock_irqrestore(&dev->txq.lock, flags);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/* if this triggers the device is still a sleep */
|
||||
|
|
|
@ -127,8 +127,6 @@ void xen_pcibk_reset_device(struct pci_dev *dev)
|
|||
if (pci_is_enabled(dev))
|
||||
pci_disable_device(dev);
|
||||
|
||||
pci_write_config_word(dev, PCI_COMMAND, 0);
|
||||
|
||||
dev->is_busmaster = 0;
|
||||
} else {
|
||||
pci_read_config_word(dev, PCI_COMMAND, &cmd);
|
||||
|
|
|
@ -155,6 +155,10 @@ struct ftrace_likely_data {
|
|||
#define __assume_aligned(a, ...)
|
||||
#endif
|
||||
|
||||
#ifndef asm_volatile_goto
|
||||
#define asm_volatile_goto(x...) asm goto(x)
|
||||
#endif
|
||||
|
||||
/* Are two types/vars the same type (ignoring qualifiers)? */
|
||||
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#ifndef _DYNAMIC_DEBUG_H
|
||||
#define _DYNAMIC_DEBUG_H
|
||||
|
||||
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
|
||||
#if defined(CONFIG_JUMP_LABEL)
|
||||
#include <linux/jump_label.h>
|
||||
#endif
|
||||
|
||||
|
@ -38,7 +38,7 @@ struct _ddebug {
|
|||
#define _DPRINTK_FLAGS_DEFAULT 0
|
||||
#endif
|
||||
unsigned int flags:8;
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
union {
|
||||
struct static_key_true dd_key_true;
|
||||
struct static_key_false dd_key_false;
|
||||
|
@ -83,7 +83,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
|
|||
dd_key_init(key, init) \
|
||||
}
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
|
||||
#define dd_key_init(key, init) key = (init)
|
||||
|
||||
|
|
|
@ -71,10 +71,6 @@
|
|||
* Additional babbling in: Documentation/static-keys.txt
|
||||
*/
|
||||
|
||||
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
|
||||
# define HAVE_JUMP_LABEL
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
@ -86,7 +82,7 @@ extern bool static_key_initialized;
|
|||
"%s(): static key '%pS' used before call to jump_label_init()", \
|
||||
__func__, (key))
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
|
||||
struct static_key {
|
||||
atomic_t enabled;
|
||||
|
@ -114,10 +110,10 @@ struct static_key {
|
|||
struct static_key {
|
||||
atomic_t enabled;
|
||||
};
|
||||
#endif /* HAVE_JUMP_LABEL */
|
||||
#endif /* CONFIG_JUMP_LABEL */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
#include <asm/jump_label.h>
|
||||
#endif
|
||||
|
||||
|
@ -130,7 +126,7 @@ enum jump_label_type {
|
|||
|
||||
struct module;
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
|
||||
#define JUMP_TYPE_FALSE 0UL
|
||||
#define JUMP_TYPE_TRUE 1UL
|
||||
|
@ -184,7 +180,7 @@ extern void static_key_disable_cpuslocked(struct static_key *key);
|
|||
{ .enabled = { 0 }, \
|
||||
{ .entries = (void *)JUMP_TYPE_FALSE } }
|
||||
|
||||
#else /* !HAVE_JUMP_LABEL */
|
||||
#else /* !CONFIG_JUMP_LABEL */
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bug.h>
|
||||
|
@ -271,7 +267,7 @@ static inline void static_key_disable(struct static_key *key)
|
|||
#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
|
||||
#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
|
||||
|
||||
#endif /* HAVE_JUMP_LABEL */
|
||||
#endif /* CONFIG_JUMP_LABEL */
|
||||
|
||||
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
|
||||
#define jump_label_enabled static_key_enabled
|
||||
|
@ -335,7 +331,7 @@ extern bool ____wrong_branch_error(void);
|
|||
static_key_count((struct static_key *)x) > 0; \
|
||||
})
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
|
||||
/*
|
||||
* Combine the right initial value (type) with the right branch order
|
||||
|
@ -417,12 +413,12 @@ extern bool ____wrong_branch_error(void);
|
|||
unlikely(branch); \
|
||||
})
|
||||
|
||||
#else /* !HAVE_JUMP_LABEL */
|
||||
#else /* !CONFIG_JUMP_LABEL */
|
||||
|
||||
#define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
|
||||
#define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
|
||||
|
||||
#endif /* HAVE_JUMP_LABEL */
|
||||
#endif /* CONFIG_JUMP_LABEL */
|
||||
|
||||
/*
|
||||
* Advanced usage; refcount, branch is enabled when: count != 0
|
||||
|
|
|
@ -5,21 +5,19 @@
|
|||
#include <linux/jump_label.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
|
||||
#if defined(CONFIG_JUMP_LABEL)
|
||||
struct static_key_deferred {
|
||||
struct static_key key;
|
||||
unsigned long timeout;
|
||||
struct delayed_work work;
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
|
||||
extern void static_key_deferred_flush(struct static_key_deferred *key);
|
||||
extern void
|
||||
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
|
||||
|
||||
#else /* !HAVE_JUMP_LABEL */
|
||||
#else /* !CONFIG_JUMP_LABEL */
|
||||
struct static_key_deferred {
|
||||
struct static_key key;
|
||||
};
|
||||
|
@ -38,5 +36,5 @@ jump_label_rate_limit(struct static_key_deferred *key,
|
|||
{
|
||||
STATIC_KEY_CHECK_USE(key);
|
||||
}
|
||||
#endif /* HAVE_JUMP_LABEL */
|
||||
#endif /* CONFIG_JUMP_LABEL */
|
||||
#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
|
||||
|
|
|
@ -439,7 +439,7 @@ struct module {
|
|||
unsigned int num_tracepoints;
|
||||
tracepoint_ptr_t *tracepoints_ptrs;
|
||||
#endif
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
struct jump_entry *jump_entries;
|
||||
unsigned int num_jump_entries;
|
||||
#endif
|
||||
|
|
|
@ -176,7 +176,7 @@ void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
|
|||
int nf_register_sockopt(struct nf_sockopt_ops *reg);
|
||||
void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
|
||||
#endif
|
||||
|
||||
|
@ -198,7 +198,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
|
|||
struct nf_hook_entries *hook_head = NULL;
|
||||
int ret = 1;
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
if (__builtin_constant_p(pf) &&
|
||||
__builtin_constant_p(hook) &&
|
||||
!static_key_false(&nf_hooks_needed[pf][hook]))
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#ifdef CONFIG_NETFILTER_INGRESS
|
||||
static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
|
||||
{
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
|
||||
return false;
|
||||
#endif
|
||||
|
|
|
@ -21,6 +21,11 @@ typedef struct {
|
|||
u64 key[2];
|
||||
} siphash_key_t;
|
||||
|
||||
static inline bool siphash_key_is_zero(const siphash_key_t *key)
|
||||
{
|
||||
return !(key->key[0] | key->key[1]);
|
||||
}
|
||||
|
||||
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/uidgid.h>
|
||||
#include <net/inet_frag.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/siphash.h>
|
||||
|
||||
struct tcpm_hash_bucket;
|
||||
struct ctl_table_header;
|
||||
|
@ -215,5 +216,6 @@ struct netns_ipv4 {
|
|||
unsigned int ipmr_seq; /* protected by rtnl_mutex */
|
||||
|
||||
atomic_t rt_genid;
|
||||
siphash_key_t ip_id_key;
|
||||
};
|
||||
#endif
|
||||
|
|
|
@ -307,8 +307,10 @@ static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
|
|||
tlv_ptr = (struct tlv_desc *)tlv;
|
||||
tlv_ptr->tlv_type = htons(type);
|
||||
tlv_ptr->tlv_len = htons(tlv_len);
|
||||
if (len && data)
|
||||
memcpy(TLV_DATA(tlv_ptr), data, tlv_len);
|
||||
if (len && data) {
|
||||
memcpy(TLV_DATA(tlv_ptr), data, len);
|
||||
memset(TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len);
|
||||
}
|
||||
return TLV_SPACE(len);
|
||||
}
|
||||
|
||||
|
@ -405,8 +407,10 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags,
|
|||
tcm_hdr->tcm_len = htonl(msg_len);
|
||||
tcm_hdr->tcm_type = htons(cmd);
|
||||
tcm_hdr->tcm_flags = htons(flags);
|
||||
if (data_len && data)
|
||||
if (data_len && data) {
|
||||
memcpy(TCM_DATA(msg), data, data_len);
|
||||
memset(TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len);
|
||||
}
|
||||
return TCM_SPACE(data_len);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,9 @@ config CLANG_VERSION
|
|||
int
|
||||
default $(shell,$(srctree)/scripts/clang-version.sh $(CC))
|
||||
|
||||
config CC_HAS_ASM_GOTO
|
||||
def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
|
||||
|
||||
config CONSTRUCTORS
|
||||
bool
|
||||
depends on !UML
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
#include <linux/cpu.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
|
||||
/* mutex to protect coming/going of the the jump_label table */
|
||||
static DEFINE_MUTEX(jump_label_mutex);
|
||||
|
||||
|
@ -60,13 +58,13 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
|
|||
static void jump_label_update(struct static_key *key);
|
||||
|
||||
/*
|
||||
* There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
|
||||
* There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
|
||||
* The use of 'atomic_read()' requires atomic.h and its problematic for some
|
||||
* kernel headers such as kernel.h and others. Since static_key_count() is not
|
||||
* used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
|
||||
* used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
|
||||
* to have it be a function here. Similarly, for 'static_key_enable()' and
|
||||
* 'static_key_disable()', which require bug.h. This should allow jump_label.h
|
||||
* to be included from most/all places for HAVE_JUMP_LABEL.
|
||||
* to be included from most/all places for CONFIG_JUMP_LABEL.
|
||||
*/
|
||||
int static_key_count(struct static_key *key)
|
||||
{
|
||||
|
@ -796,5 +794,3 @@ static __init int jump_label_test(void)
|
|||
}
|
||||
early_initcall(jump_label_test);
|
||||
#endif /* STATIC_KEYS_SELFTEST */
|
||||
|
||||
#endif /* HAVE_JUMP_LABEL */
|
||||
|
|
|
@ -3106,7 +3106,7 @@ static int find_module_sections(struct module *mod, struct load_info *info)
|
|||
sizeof(*mod->tracepoints_ptrs),
|
||||
&mod->num_tracepoints);
|
||||
#endif
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
mod->jump_entries = section_objs(info, "__jump_table",
|
||||
sizeof(*mod->jump_entries),
|
||||
&mod->num_jump_entries);
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
||||
|
||||
#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
|
||||
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
|
||||
/*
|
||||
* Debugging: various feature bits
|
||||
*
|
||||
|
|
|
@ -73,7 +73,7 @@ static int sched_feat_show(struct seq_file *m, void *v)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
|
||||
#define jump_label_key__true STATIC_KEY_INIT_TRUE
|
||||
#define jump_label_key__false STATIC_KEY_INIT_FALSE
|
||||
|
@ -99,7 +99,7 @@ static void sched_feat_enable(int i)
|
|||
#else
|
||||
static void sched_feat_disable(int i) { };
|
||||
static void sched_feat_enable(int i) { };
|
||||
#endif /* HAVE_JUMP_LABEL */
|
||||
#endif /* CONFIG_JUMP_LABEL */
|
||||
|
||||
static int sched_feat_set(char *cmp)
|
||||
{
|
||||
|
|
|
@ -4411,7 +4411,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
|
|||
|
||||
#ifdef CONFIG_CFS_BANDWIDTH
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
static struct static_key __cfs_bandwidth_used;
|
||||
|
||||
static inline bool cfs_bandwidth_used(void)
|
||||
|
@ -4428,7 +4428,7 @@ void cfs_bandwidth_usage_dec(void)
|
|||
{
|
||||
static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
|
||||
}
|
||||
#else /* HAVE_JUMP_LABEL */
|
||||
#else /* CONFIG_JUMP_LABEL */
|
||||
static bool cfs_bandwidth_used(void)
|
||||
{
|
||||
return true;
|
||||
|
@ -4436,7 +4436,7 @@ static bool cfs_bandwidth_used(void)
|
|||
|
||||
void cfs_bandwidth_usage_inc(void) {}
|
||||
void cfs_bandwidth_usage_dec(void) {}
|
||||
#endif /* HAVE_JUMP_LABEL */
|
||||
#endif /* CONFIG_JUMP_LABEL */
|
||||
|
||||
/*
|
||||
* default period for cfs group bandwidth.
|
||||
|
|
|
@ -1617,7 +1617,7 @@ enum {
|
|||
|
||||
#undef SCHED_FEAT
|
||||
|
||||
#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
|
||||
#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_JUMP_LABEL)
|
||||
|
||||
/*
|
||||
* To support run-time toggling of sched features, all the translation units
|
||||
|
@ -1637,7 +1637,7 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
|
|||
extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
|
||||
#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
|
||||
|
||||
#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
|
||||
#else /* !(SCHED_DEBUG && CONFIG_JUMP_LABEL) */
|
||||
|
||||
/*
|
||||
* Each translation unit has its own copy of sysctl_sched_features to allow
|
||||
|
@ -1653,7 +1653,7 @@ static const_debug __maybe_unused unsigned int sysctl_sched_features =
|
|||
|
||||
#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
|
||||
|
||||
#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
|
||||
#endif /* SCHED_DEBUG && CONFIG_JUMP_LABEL */
|
||||
|
||||
extern struct static_key_false sched_numa_balancing;
|
||||
extern struct static_key_false sched_schedstats;
|
||||
|
|
|
@ -188,7 +188,7 @@ static int ddebug_change(const struct ddebug_query *query,
|
|||
newflags = (dp->flags & mask) | flags;
|
||||
if (newflags == dp->flags)
|
||||
continue;
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
if (dp->flags & _DPRINTK_FLAGS_PRINT) {
|
||||
if (!(flags & _DPRINTK_FLAGS_PRINT))
|
||||
static_branch_disable(&dp->key.dd_key_true);
|
||||
|
|
|
@ -1823,7 +1823,7 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
|
|||
#endif
|
||||
|
||||
static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
static atomic_t netstamp_needed_deferred;
|
||||
static atomic_t netstamp_wanted;
|
||||
static void netstamp_clear(struct work_struct *work)
|
||||
|
@ -1842,7 +1842,7 @@ static DECLARE_WORK(netstamp_work, netstamp_clear);
|
|||
|
||||
void net_enable_timestamp(void)
|
||||
{
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
int wanted;
|
||||
|
||||
while (1) {
|
||||
|
@ -1862,7 +1862,7 @@ EXPORT_SYMBOL(net_enable_timestamp);
|
|||
|
||||
void net_disable_timestamp(void)
|
||||
{
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
int wanted;
|
||||
|
||||
while (1) {
|
||||
|
@ -5732,7 +5732,6 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
|
|||
skb_reset_mac_header(skb);
|
||||
skb_gro_reset_offset(skb);
|
||||
|
||||
eth = skb_gro_header_fast(skb, 0);
|
||||
if (unlikely(skb_gro_header_hard(skb, hlen))) {
|
||||
eth = skb_gro_header_slow(skb, hlen, 0);
|
||||
if (unlikely(!eth)) {
|
||||
|
@ -5742,6 +5741,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
|
|||
return NULL;
|
||||
}
|
||||
} else {
|
||||
eth = (const struct ethhdr *)skb->data;
|
||||
gro_pull_from_frag0(skb, hlen);
|
||||
NAPI_GRO_CB(skb)->frag0 += hlen;
|
||||
NAPI_GRO_CB(skb)->frag0_len -= hlen;
|
||||
|
|
|
@ -190,6 +190,17 @@ static void ip_ma_put(struct ip_mc_list *im)
|
|||
pmc != NULL; \
|
||||
pmc = rtnl_dereference(pmc->next_rcu))
|
||||
|
||||
static void ip_sf_list_clear_all(struct ip_sf_list *psf)
|
||||
{
|
||||
struct ip_sf_list *next;
|
||||
|
||||
while (psf) {
|
||||
next = psf->sf_next;
|
||||
kfree(psf);
|
||||
psf = next;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IP_MULTICAST
|
||||
|
||||
/*
|
||||
|
@ -635,6 +646,13 @@ static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
|
|||
}
|
||||
}
|
||||
|
||||
static void kfree_pmc(struct ip_mc_list *pmc)
|
||||
{
|
||||
ip_sf_list_clear_all(pmc->sources);
|
||||
ip_sf_list_clear_all(pmc->tomb);
|
||||
kfree(pmc);
|
||||
}
|
||||
|
||||
static void igmpv3_send_cr(struct in_device *in_dev)
|
||||
{
|
||||
struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
|
||||
|
@ -671,7 +689,7 @@ static void igmpv3_send_cr(struct in_device *in_dev)
|
|||
else
|
||||
in_dev->mc_tomb = pmc_next;
|
||||
in_dev_put(pmc->interface);
|
||||
kfree(pmc);
|
||||
kfree_pmc(pmc);
|
||||
} else
|
||||
pmc_prev = pmc;
|
||||
}
|
||||
|
@ -1201,14 +1219,18 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
|
|||
im->interface = pmc->interface;
|
||||
if (im->sfmode == MCAST_INCLUDE) {
|
||||
im->tomb = pmc->tomb;
|
||||
pmc->tomb = NULL;
|
||||
|
||||
im->sources = pmc->sources;
|
||||
pmc->sources = NULL;
|
||||
|
||||
for (psf = im->sources; psf; psf = psf->sf_next)
|
||||
psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
} else {
|
||||
im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
|
||||
}
|
||||
in_dev_put(pmc->interface);
|
||||
kfree(pmc);
|
||||
kfree_pmc(pmc);
|
||||
}
|
||||
spin_unlock_bh(&im->lock);
|
||||
}
|
||||
|
@ -1229,21 +1251,18 @@ static void igmpv3_clear_delrec(struct in_device *in_dev)
|
|||
nextpmc = pmc->next;
|
||||
ip_mc_clear_src(pmc);
|
||||
in_dev_put(pmc->interface);
|
||||
kfree(pmc);
|
||||
kfree_pmc(pmc);
|
||||
}
|
||||
/* clear dead sources, too */
|
||||
rcu_read_lock();
|
||||
for_each_pmc_rcu(in_dev, pmc) {
|
||||
struct ip_sf_list *psf, *psf_next;
|
||||
struct ip_sf_list *psf;
|
||||
|
||||
spin_lock_bh(&pmc->lock);
|
||||
psf = pmc->tomb;
|
||||
pmc->tomb = NULL;
|
||||
spin_unlock_bh(&pmc->lock);
|
||||
for (; psf; psf = psf_next) {
|
||||
psf_next = psf->sf_next;
|
||||
kfree(psf);
|
||||
}
|
||||
ip_sf_list_clear_all(psf);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -2114,7 +2133,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
|
|||
|
||||
static void ip_mc_clear_src(struct ip_mc_list *pmc)
|
||||
{
|
||||
struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
|
||||
struct ip_sf_list *tomb, *sources;
|
||||
|
||||
spin_lock_bh(&pmc->lock);
|
||||
tomb = pmc->tomb;
|
||||
|
@ -2126,14 +2145,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
|
|||
pmc->sfcount[MCAST_EXCLUDE] = 1;
|
||||
spin_unlock_bh(&pmc->lock);
|
||||
|
||||
for (psf = tomb; psf; psf = nextpsf) {
|
||||
nextpsf = psf->sf_next;
|
||||
kfree(psf);
|
||||
}
|
||||
for (psf = sources; psf; psf = nextpsf) {
|
||||
nextpsf = psf->sf_next;
|
||||
kfree(psf);
|
||||
}
|
||||
ip_sf_list_clear_all(tomb);
|
||||
ip_sf_list_clear_all(sources);
|
||||
}
|
||||
|
||||
/* Join a multicast group
|
||||
|
|
|
@ -500,15 +500,17 @@ EXPORT_SYMBOL(ip_idents_reserve);
|
|||
|
||||
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
|
||||
{
|
||||
static u32 ip_idents_hashrnd __read_mostly;
|
||||
u32 hash, id;
|
||||
|
||||
net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
|
||||
/* Note the following code is not safe, but this is okay. */
|
||||
if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
|
||||
get_random_bytes(&net->ipv4.ip_id_key,
|
||||
sizeof(net->ipv4.ip_id_key));
|
||||
|
||||
hash = jhash_3words((__force u32)iph->daddr,
|
||||
hash = siphash_3u32((__force u32)iph->daddr,
|
||||
(__force u32)iph->saddr,
|
||||
iph->protocol ^ net_hash_mix(net),
|
||||
ip_idents_hashrnd);
|
||||
iph->protocol,
|
||||
&net->ipv4.ip_id_key);
|
||||
id = ip_idents_reserve(hash, segs);
|
||||
iph->id = htons(id);
|
||||
}
|
||||
|
|
|
@ -10,15 +10,25 @@
|
|||
#include <net/secure_seq.h>
|
||||
#include <linux/netfilter.h>
|
||||
|
||||
static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
|
||||
static u32 __ipv6_select_ident(struct net *net,
|
||||
const struct in6_addr *dst,
|
||||
const struct in6_addr *src)
|
||||
{
|
||||
const struct {
|
||||
struct in6_addr dst;
|
||||
struct in6_addr src;
|
||||
} __aligned(SIPHASH_ALIGNMENT) combined = {
|
||||
.dst = *dst,
|
||||
.src = *src,
|
||||
};
|
||||
u32 hash, id;
|
||||
|
||||
hash = __ipv6_addr_jhash(dst, hashrnd);
|
||||
hash = __ipv6_addr_jhash(src, hash);
|
||||
hash ^= net_hash_mix(net);
|
||||
/* Note the following code is not safe, but this is okay. */
|
||||
if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
|
||||
get_random_bytes(&net->ipv4.ip_id_key,
|
||||
sizeof(net->ipv4.ip_id_key));
|
||||
|
||||
hash = siphash(&combined, sizeof(combined), &net->ipv4.ip_id_key);
|
||||
|
||||
/* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
|
||||
* set the hight order instead thus minimizing possible future
|
||||
|
@ -41,7 +51,6 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
|
|||
*/
|
||||
__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
|
||||
{
|
||||
static u32 ip6_proxy_idents_hashrnd __read_mostly;
|
||||
struct in6_addr buf[2];
|
||||
struct in6_addr *addrs;
|
||||
u32 id;
|
||||
|
@ -53,11 +62,7 @@ __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
|
|||
if (!addrs)
|
||||
return 0;
|
||||
|
||||
net_get_random_once(&ip6_proxy_idents_hashrnd,
|
||||
sizeof(ip6_proxy_idents_hashrnd));
|
||||
|
||||
id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
|
||||
&addrs[1], &addrs[0]);
|
||||
id = __ipv6_select_ident(net, &addrs[1], &addrs[0]);
|
||||
return htonl(id);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
|
||||
|
@ -66,12 +71,9 @@ __be32 ipv6_select_ident(struct net *net,
|
|||
const struct in6_addr *daddr,
|
||||
const struct in6_addr *saddr)
|
||||
{
|
||||
static u32 ip6_idents_hashrnd __read_mostly;
|
||||
u32 id;
|
||||
|
||||
net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
|
||||
|
||||
id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr);
|
||||
id = __ipv6_select_ident(net, daddr, saddr);
|
||||
return htonl(id);
|
||||
}
|
||||
EXPORT_SYMBOL(ipv6_select_ident);
|
||||
|
|
|
@ -288,7 +288,9 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
/* Binding to link-local address requires an interface */
|
||||
if (!sk->sk_bound_dev_if)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (sk->sk_bound_dev_if) {
|
||||
err = -ENODEV;
|
||||
dev = dev_get_by_index_rcu(sock_net(sk),
|
||||
sk->sk_bound_dev_if);
|
||||
|
|
|
@ -2480,6 +2480,12 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
|
|||
struct fib6_info *rt;
|
||||
struct fib6_node *fn;
|
||||
|
||||
/* l3mdev_update_flow overrides oif if the device is enslaved; in
|
||||
* this case we must match on the real ingress device, so reset it
|
||||
*/
|
||||
if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
|
||||
fl6->flowi6_oif = skb->dev->ifindex;
|
||||
|
||||
/* Get the "current" route for this destination and
|
||||
* check if the redirect has come from appropriate router.
|
||||
*
|
||||
|
|
|
@ -72,6 +72,8 @@ int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
|
|||
rc = llc_mac_hdr_init(skb, skb->dev->dev_addr, dmac);
|
||||
if (likely(!rc))
|
||||
rc = dev_queue_xmit(skb);
|
||||
else
|
||||
kfree_skb(skb);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(nf_ipv6_ops);
|
|||
DEFINE_PER_CPU(bool, nf_skb_duplicated);
|
||||
EXPORT_SYMBOL_GPL(nf_skb_duplicated);
|
||||
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
|
||||
EXPORT_SYMBOL(nf_hooks_needed);
|
||||
#endif
|
||||
|
@ -347,7 +347,7 @@ static int __nf_register_net_hook(struct net *net, int pf,
|
|||
if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
|
||||
net_inc_ingress_queue();
|
||||
#endif
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]);
|
||||
#endif
|
||||
BUG_ON(p == new_hooks);
|
||||
|
@ -405,7 +405,7 @@ static void __nf_unregister_net_hook(struct net *net, int pf,
|
|||
if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
|
||||
net_dec_ingress_queue();
|
||||
#endif
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
static_key_slow_dec(&nf_hooks_needed[pf][reg->hooknum]);
|
||||
#endif
|
||||
} else {
|
||||
|
|
|
@ -744,7 +744,7 @@ int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
|
|||
|
||||
for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
|
||||
a = actions[i];
|
||||
nest = nla_nest_start(skb, a->order);
|
||||
nest = nla_nest_start(skb, i + 1);
|
||||
if (nest == NULL)
|
||||
goto nla_put_failure;
|
||||
err = tcf_action_dump_1(skb, a, bind, ref);
|
||||
|
@ -1257,7 +1257,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
|
|||
ret = PTR_ERR(act);
|
||||
goto err;
|
||||
}
|
||||
act->order = i;
|
||||
attr_size += tcf_action_fill_size(act);
|
||||
actions[i - 1] = act;
|
||||
}
|
||||
|
|
|
@ -66,10 +66,6 @@ static int __net_init tipc_init_net(struct net *net)
|
|||
INIT_LIST_HEAD(&tn->node_list);
|
||||
spin_lock_init(&tn->node_list_lock);
|
||||
|
||||
err = tipc_socket_init();
|
||||
if (err)
|
||||
goto out_socket;
|
||||
|
||||
err = tipc_sk_rht_init(net);
|
||||
if (err)
|
||||
goto out_sk_rht;
|
||||
|
@ -79,9 +75,6 @@ static int __net_init tipc_init_net(struct net *net)
|
|||
goto out_nametbl;
|
||||
|
||||
INIT_LIST_HEAD(&tn->dist_queue);
|
||||
err = tipc_topsrv_start(net);
|
||||
if (err)
|
||||
goto out_subscr;
|
||||
|
||||
err = tipc_bcast_init(net);
|
||||
if (err)
|
||||
|
@ -90,25 +83,19 @@ static int __net_init tipc_init_net(struct net *net)
|
|||
return 0;
|
||||
|
||||
out_bclink:
|
||||
tipc_bcast_stop(net);
|
||||
out_subscr:
|
||||
tipc_nametbl_stop(net);
|
||||
out_nametbl:
|
||||
tipc_sk_rht_destroy(net);
|
||||
out_sk_rht:
|
||||
tipc_socket_stop();
|
||||
out_socket:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __net_exit tipc_exit_net(struct net *net)
|
||||
{
|
||||
tipc_topsrv_stop(net);
|
||||
tipc_net_stop(net);
|
||||
tipc_bcast_stop(net);
|
||||
tipc_nametbl_stop(net);
|
||||
tipc_sk_rht_destroy(net);
|
||||
tipc_socket_stop();
|
||||
}
|
||||
|
||||
static struct pernet_operations tipc_net_ops = {
|
||||
|
@ -118,6 +105,11 @@ static struct pernet_operations tipc_net_ops = {
|
|||
.size = sizeof(struct tipc_net),
|
||||
};
|
||||
|
||||
static struct pernet_operations tipc_topsrv_net_ops = {
|
||||
.init = tipc_topsrv_init_net,
|
||||
.exit = tipc_topsrv_exit_net,
|
||||
};
|
||||
|
||||
static int __init tipc_init(void)
|
||||
{
|
||||
int err;
|
||||
|
@ -144,6 +136,14 @@ static int __init tipc_init(void)
|
|||
if (err)
|
||||
goto out_pernet;
|
||||
|
||||
err = tipc_socket_init();
|
||||
if (err)
|
||||
goto out_socket;
|
||||
|
||||
err = register_pernet_subsys(&tipc_topsrv_net_ops);
|
||||
if (err)
|
||||
goto out_pernet_topsrv;
|
||||
|
||||
err = tipc_bearer_setup();
|
||||
if (err)
|
||||
goto out_bearer;
|
||||
|
@ -151,6 +151,10 @@ static int __init tipc_init(void)
|
|||
pr_info("Started in single node mode\n");
|
||||
return 0;
|
||||
out_bearer:
|
||||
unregister_pernet_subsys(&tipc_topsrv_net_ops);
|
||||
out_pernet_topsrv:
|
||||
tipc_socket_stop();
|
||||
out_socket:
|
||||
unregister_pernet_subsys(&tipc_net_ops);
|
||||
out_pernet:
|
||||
tipc_unregister_sysctl();
|
||||
|
@ -166,6 +170,8 @@ static int __init tipc_init(void)
|
|||
static void __exit tipc_exit(void)
|
||||
{
|
||||
tipc_bearer_cleanup();
|
||||
unregister_pernet_subsys(&tipc_topsrv_net_ops);
|
||||
tipc_socket_stop();
|
||||
unregister_pernet_subsys(&tipc_net_ops);
|
||||
tipc_netlink_stop();
|
||||
tipc_netlink_compat_stop();
|
||||
|
|
|
@ -77,8 +77,9 @@ void tipc_sub_report_overlap(struct tipc_subscription *sub,
|
|||
u32 found_lower, u32 found_upper,
|
||||
u32 event, u32 port, u32 node,
|
||||
u32 scope, int must);
|
||||
int tipc_topsrv_start(struct net *net);
|
||||
void tipc_topsrv_stop(struct net *net);
|
||||
|
||||
int __net_init tipc_topsrv_init_net(struct net *net);
|
||||
void __net_exit tipc_topsrv_exit_net(struct net *net);
|
||||
|
||||
void tipc_sub_put(struct tipc_subscription *subscription);
|
||||
void tipc_sub_get(struct tipc_subscription *subscription);
|
||||
|
|
|
@ -643,7 +643,7 @@ static void tipc_topsrv_work_stop(struct tipc_topsrv *s)
|
|||
destroy_workqueue(s->send_wq);
|
||||
}
|
||||
|
||||
int tipc_topsrv_start(struct net *net)
|
||||
static int tipc_topsrv_start(struct net *net)
|
||||
{
|
||||
struct tipc_net *tn = tipc_net(net);
|
||||
const char name[] = "topology_server";
|
||||
|
@ -677,7 +677,7 @@ int tipc_topsrv_start(struct net *net)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void tipc_topsrv_stop(struct net *net)
|
||||
static void tipc_topsrv_stop(struct net *net)
|
||||
{
|
||||
struct tipc_topsrv *srv = tipc_topsrv(net);
|
||||
struct socket *lsock = srv->listener;
|
||||
|
@ -702,3 +702,13 @@ void tipc_topsrv_stop(struct net *net)
|
|||
idr_destroy(&srv->conn_idr);
|
||||
kfree(srv);
|
||||
}
|
||||
|
||||
int __net_init tipc_topsrv_init_net(struct net *net)
|
||||
{
|
||||
return tipc_topsrv_start(net);
|
||||
}
|
||||
|
||||
void __net_exit tipc_topsrv_exit_net(struct net *net)
|
||||
{
|
||||
tipc_topsrv_stop(net);
|
||||
}
|
||||
|
|
|
@ -916,12 +916,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
|
|||
if (!netdev)
|
||||
goto out;
|
||||
|
||||
if (!(netdev->features & NETIF_F_HW_TLS_RX)) {
|
||||
pr_err_ratelimited("%s: device is missing NETIF_F_HW_TLS_RX cap\n",
|
||||
__func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
netdev->tlsdev_ops->tls_dev_del(netdev, tls_ctx,
|
||||
TLS_OFFLOAD_CTX_DIR_RX);
|
||||
|
||||
|
@ -980,7 +974,8 @@ static int tls_dev_event(struct notifier_block *this, unsigned long event,
|
|||
{
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
|
||||
if (!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
|
||||
if (!dev->tlsdev_ops &&
|
||||
!(dev->features & (NETIF_F_HW_TLS_RX | NETIF_F_HW_TLS_TX)))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (event) {
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
# Test for gcc 'asm goto' support
|
||||
# Copyright (C) 2010, Jason Baron <jbaron@redhat.com>
|
||||
|
||||
cat << "END" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"
|
||||
cat << "END" | $@ -x c - -fno-PIE -c -o /dev/null
|
||||
int main(void)
|
||||
{
|
||||
#if defined(__arm__) || defined(__aarch64__)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#ifndef _TOOLS_LINUX_ASM_X86_RMWcc
|
||||
#define _TOOLS_LINUX_ASM_X86_RMWcc
|
||||
|
||||
#ifdef CC_HAVE_ASM_GOTO
|
||||
#ifdef CONFIG_CC_HAS_ASM_GOTO
|
||||
|
||||
#define __GEN_RMWcc(fullop, var, cc, ...) \
|
||||
do { \
|
||||
|
@ -20,7 +20,7 @@ cc_label: \
|
|||
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
|
||||
__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
|
||||
|
||||
#else /* !CC_HAVE_ASM_GOTO */
|
||||
#else /* !CONFIG_CC_HAS_ASM_GOTO */
|
||||
|
||||
#define __GEN_RMWcc(fullop, var, cc, ...) \
|
||||
do { \
|
||||
|
@ -37,6 +37,6 @@ do { \
|
|||
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
|
||||
__GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
|
||||
|
||||
#endif /* CC_HAVE_ASM_GOTO */
|
||||
#endif /* CONFIG_CC_HAS_ASM_GOTO */
|
||||
|
||||
#endif /* _TOOLS_LINUX_ASM_X86_RMWcc */
|
||||
|
|
Loading…
Add table
Reference in a new issue