This is the 4.19.37 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlzEBokACgkQONu9yGCS aT7G7w/8C93URGM67H7ynkCHTo8y3hkRE2rUJPckJNdS+IJKuecmOphak4tF0h07 qPWDPya70Q1S0cNu661TuVAGrhmE5jBx8/xfZaAOeaaU0xtZive+TfSHdAQQaHct tDk32O85N1aZ49rDEz9ibr7CGLVFDZtyhxV5gFMYQpjbqA7MzJC61zQg1jHyPSCz sKjQzW+uXMuSLru8jXHMvp41K5sFFp5gYdQbAVKlWtt79qPxWdxZPJbLbM0LBbtz XHt9E45Ink3ALF9P6tZ4e6gi4zzlNbh9yR92+X5NK5/8AP57yWba4W9JHWIfMBpC yyDYTOEAzdxqa2Jrgwr4WTdKH6U7FbQZFmWfTBB4VotbHLBWkVXj0OnF10qxP9eQ p5wGDTJAlWezhX1BTCfYroglDsvqhj+gHfwHzDRF1Del1dRgydRMQc0qLD1d9tul ovzwOkx1xyJrM2wq05I5gc0FoVyOL6/KCwqMrpVfKa3WKY7Uttjgf56bMqdIIkns i/6opzF+wtvwlLlCoXgYPXdm6kbWdgvS+skVHfWcHmZFMuGrFGGzJNwzXb7qnVjK T0hD1OestsfTyD/amnDNYkNeCkoOZqtHAi+xYOQR4kGY5cxP1lQJf85MgAy6RZSY h+rjys76Qf6+hTCtrowLr8SgksX4ACWxm+UarfAiiNnnDXwGfu8= =SrFV -----END PGP SIGNATURE----- Merge 4.19.37 into android-4.19-q Changes in 4.19.37 bonding: fix event handling for stacked bonds failover: allow name change on IFF_UP slave interfaces net: atm: Fix potential Spectre v1 vulnerabilities net: bridge: fix per-port af_packet sockets net: bridge: multicast: use rcu to access port list from br_multicast_start_querier net: Fix missing meta data in skb with vlan packet net: fou: do not use guehdr after iptunnel_pull_offloads in gue_udp_recv tcp: tcp_grow_window() needs to respect tcp_space() team: set slave to promisc if team is already in promisc mode tipc: missing entries in name table of publications vhost: reject zero size iova range ipv4: recompile ip options in ipv4_link_failure ipv4: ensure rcu_read_lock() in ipv4_link_failure() net: thunderx: raise XDP MTU to 1508 net: thunderx: don't allow jumbo frames with XDP net/mlx5: FPGA, tls, hold rcu read lock a bit longer net/tls: prevent bad memory access in tls_is_sk_tx_device_offloaded() net/mlx5: FPGA, tls, idr remove on flow delete route: Avoid crash from dereferencing NULL rt->from sch_cake: Use tc_skb_protocol() helper for getting packet protocol sch_cake: Make sure we can write the IP header before changing DSCP bits nfp: flower: replace CFI with vlan present nfp: flower: remove vlan CFI bit from push vlan action sch_cake: Simplify logic in cake_select_tin() net: IP defrag: encapsulate rbtree defrag code into callable functions net: IP6 defrag: use rbtrees for IPv6 defrag net: IP6 defrag: use rbtrees in nf_conntrack_reasm.c CIFS: keep FileInfo handle live during oplock break cifs: Fix use-after-free in SMB2_write cifs: Fix use-after-free in SMB2_read cifs: fix handle leak in smb2_query_symlink() KVM: x86: Don't clear EFER during SMM transitions for 32-bit vCPU KVM: x86: svm: make sure NMI is injected after nmi_singlestep Staging: iio: meter: fixed typo staging: iio: ad7192: Fix ad7193 channel address iio: gyro: mpu3050: fix chip ID reading iio/gyro/bmg160: Use millidegrees for temperature scale iio:chemical:bme680: Fix, report temperature in millidegrees iio:chemical:bme680: Fix SPI read interface iio: cros_ec: Fix the maths for gyro scale calculation iio: ad_sigma_delta: select channel when reading register iio: dac: mcp4725: add missing powerdown bits in store eeprom iio: Fix scan mask selection iio: adc: at91: disable adc channel interrupt in timeout case iio: core: fix a possible circular locking dependency io: accel: kxcjk1013: restore the range after resume. staging: most: core: use device description as name staging: comedi: vmk80xx: Fix use of uninitialized semaphore staging: comedi: vmk80xx: Fix possible double-free of ->usb_rx_buf staging: comedi: ni_usb6501: Fix use of uninitialized mutex staging: comedi: ni_usb6501: Fix possible double-free of ->usb_rx_buf ALSA: hda/realtek - add two more pin configuration sets to quirk table ALSA: core: Fix card races between register and disconnect Input: elan_i2c - add hardware ID for multiple Lenovo laptops serial: sh-sci: Fix HSCIF RX sampling point adjustment serial: sh-sci: Fix HSCIF RX sampling point calculation vt: fix cursor when clearing the screen scsi: core: set result when the command cannot be dispatched Revert "scsi: fcoe: clear FC_RP_STARTED flags when receiving a LOGO" Revert "svm: Fix AVIC incomplete IPI emulation" coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping ipmi: fix sleep-in-atomic in free_user at cleanup SRCU user->release_barrier crypto: x86/poly1305 - fix overflow during partial reduction drm/ttm: fix out-of-bounds read in ttm_put_pages() v2 arm64: futex: Restore oldval initialization to work around buggy compilers x86/kprobes: Verify stack frame on kretprobe kprobes: Mark ftrace mcount handler functions nokprobe kprobes: Fix error check when reusing optimized probes rt2x00: do not increment sequence number while re-transmitting mac80211: do not call driver wake_tx_queue op during reconfig drm/amdgpu/gmc9: fix VM_L2_CNTL3 programming perf/x86/amd: Add event map for AMD Family 17h x86/cpu/bugs: Use __initconst for 'const' init data perf/x86: Fix incorrect PEBS_REGS x86/speculation: Prevent deadlock on ssb_state::lock timers/sched_clock: Prevent generic sched_clock wrap caused by tick_freeze() nfit/ars: Remove ars_start_flags nfit/ars: Introduce scrub_flags nfit/ars: Allow root to busy-poll the ARS state machine nfit/ars: Avoid stale ARS results mmc: sdhci: Fix data command CRC error handling mmc: sdhci: Rename SDHCI_ACMD12_ERR and SDHCI_INT_ACMD12ERR mmc: sdhci: Handle auto-command errors modpost: file2alias: go back to simple devtable lookup modpost: file2alias: check prototype of handler tpm/tpm_i2c_atmel: Return -E2BIG when the transfer is incomplete tpm: Fix the type of the return value in calc_tpm2_event_size() Revert "kbuild: use -Oz instead of -Os when using clang" sched/fair: Limit sched_cfs_period_timer() loop to avoid hard lockup device_cgroup: fix RCU imbalance in error case mm/vmstat.c: fix /proc/vmstat format for CONFIG_DEBUG_TLBFLUSH=y CONFIG_SMP=n ALSA: info: Fix racy addition/deletion of nodes percpu: stop printing kernel addresses tools include: Adopt linux/bits.h ASoC: rockchip: add missing INTERLEAVED PCM attribute i2c-hid: properly terminate i2c_hid_dmi_desc_override_table[] array Revert "locking/lockdep: Add debug_locks check in __lock_downgrade()" kernel/sysctl.c: fix out-of-bounds access when setting file-max Linux 4.19.37 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
135cae83e1
109 changed files with 1504 additions and 1152 deletions
5
Makefile
5
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 36
|
||||
SUBLEVEL = 37
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
@ -658,8 +658,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
|
|||
KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
|
||||
|
||||
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
|
||||
KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
|
||||
KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
|
||||
else
|
||||
ifdef CONFIG_PROFILE_ALL_BRANCHES
|
||||
KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
|
||||
|
|
|
@ -50,7 +50,7 @@ do { \
|
|||
static inline int
|
||||
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
|
||||
{
|
||||
int oldval, ret, tmp;
|
||||
int oldval = 0, ret, tmp;
|
||||
u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
|
||||
|
||||
pagefault_disable();
|
||||
|
|
|
@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2)
|
|||
vpaddq t2,t1,t1
|
||||
vmovq t1x,d4
|
||||
|
||||
# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
|
||||
# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
|
||||
# amount. Careful: we must not assume the carry bits 'd0 >> 26',
|
||||
# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
|
||||
# integers. It's true in a single-block implementation, but not here.
|
||||
|
||||
# d1 += d0 >> 26
|
||||
mov d0,%rax
|
||||
shr $26,%rax
|
||||
|
@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2)
|
|||
# h0 += (d4 >> 26) * 5
|
||||
mov d4,%rax
|
||||
shr $26,%rax
|
||||
lea (%eax,%eax,4),%eax
|
||||
add %eax,%ebx
|
||||
lea (%rax,%rax,4),%rax
|
||||
add %rax,%rbx
|
||||
# h4 = d4 & 0x3ffffff
|
||||
mov d4,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h4
|
||||
|
||||
# h1 += h0 >> 26
|
||||
mov %ebx,%eax
|
||||
shr $26,%eax
|
||||
mov %rbx,%rax
|
||||
shr $26,%rax
|
||||
add %eax,h1
|
||||
# h0 = h0 & 0x3ffffff
|
||||
andl $0x3ffffff,%ebx
|
||||
|
|
|
@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2)
|
|||
# h0 += (d4 >> 26) * 5
|
||||
mov d4,%rax
|
||||
shr $26,%rax
|
||||
lea (%eax,%eax,4),%eax
|
||||
add %eax,%ebx
|
||||
lea (%rax,%rax,4),%rax
|
||||
add %rax,%rbx
|
||||
# h4 = d4 & 0x3ffffff
|
||||
mov d4,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h4
|
||||
|
||||
# h1 += h0 >> 26
|
||||
mov %ebx,%eax
|
||||
shr $26,%eax
|
||||
mov %rbx,%rax
|
||||
shr $26,%rax
|
||||
add %eax,h1
|
||||
# h0 = h0 & 0x3ffffff
|
||||
andl $0x3ffffff,%ebx
|
||||
|
@ -520,6 +520,12 @@ ENTRY(poly1305_2block_sse2)
|
|||
paddq t2,t1
|
||||
movq t1,d4
|
||||
|
||||
# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
|
||||
# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
|
||||
# amount. Careful: we must not assume the carry bits 'd0 >> 26',
|
||||
# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
|
||||
# integers. It's true in a single-block implementation, but not here.
|
||||
|
||||
# d1 += d0 >> 26
|
||||
mov d0,%rax
|
||||
shr $26,%rax
|
||||
|
@ -558,16 +564,16 @@ ENTRY(poly1305_2block_sse2)
|
|||
# h0 += (d4 >> 26) * 5
|
||||
mov d4,%rax
|
||||
shr $26,%rax
|
||||
lea (%eax,%eax,4),%eax
|
||||
add %eax,%ebx
|
||||
lea (%rax,%rax,4),%rax
|
||||
add %rax,%rbx
|
||||
# h4 = d4 & 0x3ffffff
|
||||
mov d4,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h4
|
||||
|
||||
# h1 += h0 >> 26
|
||||
mov %ebx,%eax
|
||||
shr $26,%eax
|
||||
mov %rbx,%rax
|
||||
shr $26,%rax
|
||||
add %eax,h1
|
||||
# h0 = h0 & 0x3ffffff
|
||||
andl $0x3ffffff,%ebx
|
||||
|
|
|
@ -117,22 +117,39 @@ static __initconst const u64 amd_hw_cache_event_ids
|
|||
};
|
||||
|
||||
/*
|
||||
* AMD Performance Monitor K7 and later.
|
||||
* AMD Performance Monitor K7 and later, up to and including Family 16h:
|
||||
*/
|
||||
static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
|
||||
};
|
||||
|
||||
/*
|
||||
* AMD Performance Monitor Family 17h and later:
|
||||
*/
|
||||
static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
|
||||
};
|
||||
|
||||
static u64 amd_pmu_event_map(int hw_event)
|
||||
{
|
||||
if (boot_cpu_data.x86 >= 0x17)
|
||||
return amd_f17h_perfmon_event_map[hw_event];
|
||||
|
||||
return amd_perfmon_event_map[hw_event];
|
||||
}
|
||||
|
||||
|
|
|
@ -3014,7 +3014,7 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
|
|||
flags &= ~PERF_SAMPLE_TIME;
|
||||
if (!event->attr.exclude_kernel)
|
||||
flags &= ~PERF_SAMPLE_REGS_USER;
|
||||
if (event->attr.sample_regs_user & ~PEBS_REGS)
|
||||
if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
|
||||
flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
|
||||
return flags;
|
||||
}
|
||||
|
|
|
@ -96,25 +96,25 @@ struct amd_nb {
|
|||
PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
|
||||
PERF_SAMPLE_PERIOD)
|
||||
|
||||
#define PEBS_REGS \
|
||||
(PERF_REG_X86_AX | \
|
||||
PERF_REG_X86_BX | \
|
||||
PERF_REG_X86_CX | \
|
||||
PERF_REG_X86_DX | \
|
||||
PERF_REG_X86_DI | \
|
||||
PERF_REG_X86_SI | \
|
||||
PERF_REG_X86_SP | \
|
||||
PERF_REG_X86_BP | \
|
||||
PERF_REG_X86_IP | \
|
||||
PERF_REG_X86_FLAGS | \
|
||||
PERF_REG_X86_R8 | \
|
||||
PERF_REG_X86_R9 | \
|
||||
PERF_REG_X86_R10 | \
|
||||
PERF_REG_X86_R11 | \
|
||||
PERF_REG_X86_R12 | \
|
||||
PERF_REG_X86_R13 | \
|
||||
PERF_REG_X86_R14 | \
|
||||
PERF_REG_X86_R15)
|
||||
#define PEBS_GP_REGS \
|
||||
((1ULL << PERF_REG_X86_AX) | \
|
||||
(1ULL << PERF_REG_X86_BX) | \
|
||||
(1ULL << PERF_REG_X86_CX) | \
|
||||
(1ULL << PERF_REG_X86_DX) | \
|
||||
(1ULL << PERF_REG_X86_DI) | \
|
||||
(1ULL << PERF_REG_X86_SI) | \
|
||||
(1ULL << PERF_REG_X86_SP) | \
|
||||
(1ULL << PERF_REG_X86_BP) | \
|
||||
(1ULL << PERF_REG_X86_IP) | \
|
||||
(1ULL << PERF_REG_X86_FLAGS) | \
|
||||
(1ULL << PERF_REG_X86_R8) | \
|
||||
(1ULL << PERF_REG_X86_R9) | \
|
||||
(1ULL << PERF_REG_X86_R10) | \
|
||||
(1ULL << PERF_REG_X86_R11) | \
|
||||
(1ULL << PERF_REG_X86_R12) | \
|
||||
(1ULL << PERF_REG_X86_R13) | \
|
||||
(1ULL << PERF_REG_X86_R14) | \
|
||||
(1ULL << PERF_REG_X86_R15))
|
||||
|
||||
/*
|
||||
* Per register state.
|
||||
|
|
|
@ -272,7 +272,7 @@ static const struct {
|
|||
const char *option;
|
||||
enum spectre_v2_user_cmd cmd;
|
||||
bool secure;
|
||||
} v2_user_options[] __initdata = {
|
||||
} v2_user_options[] __initconst = {
|
||||
{ "auto", SPECTRE_V2_USER_CMD_AUTO, false },
|
||||
{ "off", SPECTRE_V2_USER_CMD_NONE, false },
|
||||
{ "on", SPECTRE_V2_USER_CMD_FORCE, true },
|
||||
|
@ -407,7 +407,7 @@ static const struct {
|
|||
const char *option;
|
||||
enum spectre_v2_mitigation_cmd cmd;
|
||||
bool secure;
|
||||
} mitigation_options[] __initdata = {
|
||||
} mitigation_options[] __initconst = {
|
||||
{ "off", SPECTRE_V2_CMD_NONE, false },
|
||||
{ "on", SPECTRE_V2_CMD_FORCE, true },
|
||||
{ "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
|
||||
|
@ -643,7 +643,7 @@ static const char * const ssb_strings[] = {
|
|||
static const struct {
|
||||
const char *option;
|
||||
enum ssb_mitigation_cmd cmd;
|
||||
} ssb_mitigation_options[] __initdata = {
|
||||
} ssb_mitigation_options[] __initconst = {
|
||||
{ "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
||||
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
||||
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
||||
|
|
|
@ -569,6 +569,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
|
|||
unsigned long *sara = stack_addr(regs);
|
||||
|
||||
ri->ret_addr = (kprobe_opcode_t *) *sara;
|
||||
ri->fp = sara;
|
||||
|
||||
/* Replace the return addr with trampoline addr */
|
||||
*sara = (unsigned long) &kretprobe_trampoline;
|
||||
|
@ -759,15 +760,21 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
|
|||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
|
||||
kprobe_opcode_t *correct_ret_addr = NULL;
|
||||
void *frame_pointer;
|
||||
bool skipped = false;
|
||||
|
||||
INIT_HLIST_HEAD(&empty_rp);
|
||||
kretprobe_hash_lock(current, &head, &flags);
|
||||
/* fixup registers */
|
||||
#ifdef CONFIG_X86_64
|
||||
regs->cs = __KERNEL_CS;
|
||||
/* On x86-64, we use pt_regs->sp for return address holder. */
|
||||
frame_pointer = ®s->sp;
|
||||
#else
|
||||
regs->cs = __KERNEL_CS | get_kernel_rpl();
|
||||
regs->gs = 0;
|
||||
/* On x86-32, we use pt_regs->flags for return address holder. */
|
||||
frame_pointer = ®s->flags;
|
||||
#endif
|
||||
regs->ip = trampoline_address;
|
||||
regs->orig_ax = ~0UL;
|
||||
|
@ -789,8 +796,25 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
|
|||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
/*
|
||||
* Return probes must be pushed on this hash list correct
|
||||
* order (same as return order) so that it can be poped
|
||||
* correctly. However, if we find it is pushed it incorrect
|
||||
* order, this means we find a function which should not be
|
||||
* probed, because the wrong order entry is pushed on the
|
||||
* path of processing other kretprobe itself.
|
||||
*/
|
||||
if (ri->fp != frame_pointer) {
|
||||
if (!skipped)
|
||||
pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
|
||||
skipped = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
if (skipped)
|
||||
pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
|
||||
ri->rp->kp.addr);
|
||||
|
||||
if (orig_ret_address != trampoline_address)
|
||||
/*
|
||||
|
@ -808,6 +832,8 @@ __visible __used void *trampoline_handler(struct pt_regs *regs)
|
|||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
if (ri->fp != frame_pointer)
|
||||
continue;
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
if (ri->rp && ri->rp->handler) {
|
||||
|
|
|
@ -411,6 +411,8 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
|
|||
u64 msr = x86_spec_ctrl_base;
|
||||
bool updmsr = false;
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
/*
|
||||
* If TIF_SSBD is different, select the proper mitigation
|
||||
* method. Note that if SSBD mitigation is disabled or permanentely
|
||||
|
@ -462,10 +464,12 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
|
|||
|
||||
void speculation_ctrl_update(unsigned long tif)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* Forced update. Make sure all relevant TIF flags are different */
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
__speculation_ctrl_update(~tif, tif);
|
||||
preempt_enable();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/* Called from seccomp/prctl update */
|
||||
|
|
|
@ -2575,15 +2575,13 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
|||
* CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
|
||||
* supports long mode.
|
||||
*/
|
||||
cr4 = ctxt->ops->get_cr(ctxt, 4);
|
||||
if (emulator_has_longmode(ctxt)) {
|
||||
struct desc_struct cs_desc;
|
||||
|
||||
/* Zero CR4.PCIDE before CR0.PG. */
|
||||
if (cr4 & X86_CR4_PCIDE) {
|
||||
cr4 = ctxt->ops->get_cr(ctxt, 4);
|
||||
if (cr4 & X86_CR4_PCIDE)
|
||||
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
|
||||
cr4 &= ~X86_CR4_PCIDE;
|
||||
}
|
||||
|
||||
/* A 32-bit code segment is required to clear EFER.LMA. */
|
||||
memset(&cs_desc, 0, sizeof(cs_desc));
|
||||
|
@ -2597,13 +2595,16 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
|||
if (cr0 & X86_CR0_PE)
|
||||
ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
|
||||
|
||||
/* Now clear CR4.PAE (which must be done before clearing EFER.LME). */
|
||||
if (cr4 & X86_CR4_PAE)
|
||||
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
|
||||
if (emulator_has_longmode(ctxt)) {
|
||||
/* Clear CR4.PAE before clearing EFER.LME. */
|
||||
cr4 = ctxt->ops->get_cr(ctxt, 4);
|
||||
if (cr4 & X86_CR4_PAE)
|
||||
ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
|
||||
|
||||
/* And finally go back to 32-bit mode. */
|
||||
efer = 0;
|
||||
ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
|
||||
/* And finally go back to 32-bit mode. */
|
||||
efer = 0;
|
||||
ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
|
||||
}
|
||||
|
||||
smbase = ctxt->ops->get_smbase(ctxt);
|
||||
|
||||
|
|
|
@ -2679,6 +2679,7 @@ static int npf_interception(struct vcpu_svm *svm)
|
|||
static int db_interception(struct vcpu_svm *svm)
|
||||
{
|
||||
struct kvm_run *kvm_run = svm->vcpu.run;
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
|
||||
if (!(svm->vcpu.guest_debug &
|
||||
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
|
||||
|
@ -2689,6 +2690,8 @@ static int db_interception(struct vcpu_svm *svm)
|
|||
|
||||
if (svm->nmi_singlestep) {
|
||||
disable_nmi_singlestep(svm);
|
||||
/* Make sure we check for pending NMIs upon entry */
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
}
|
||||
|
||||
if (svm->vcpu.guest_debug &
|
||||
|
@ -4493,14 +4496,25 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
|
|||
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
|
||||
break;
|
||||
case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
|
||||
int i;
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm *kvm = svm->vcpu.kvm;
|
||||
struct kvm_lapic *apic = svm->vcpu.arch.apic;
|
||||
|
||||
/*
|
||||
* Update ICR high and low, then emulate sending IPI,
|
||||
* which is handled when writing APIC_ICR.
|
||||
* At this point, we expect that the AVIC HW has already
|
||||
* set the appropriate IRR bits on the valid target
|
||||
* vcpus. So, we just need to kick the appropriate vcpu.
|
||||
*/
|
||||
kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
|
||||
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
bool m = kvm_apic_match_dest(vcpu, apic,
|
||||
icrl & KVM_APIC_SHORT_MASK,
|
||||
GET_APIC_DEST_FIELD(icrh),
|
||||
icrl & KVM_APIC_DEST_MASK);
|
||||
|
||||
if (m && !avic_vcpu_is_running(vcpu))
|
||||
kvm_vcpu_wake_up(vcpu);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case AVIC_IPI_FAILURE_INVALID_TARGET:
|
||||
|
|
|
@ -5592,7 +5592,49 @@ static const struct hash_testvec poly1305_tv_template[] = {
|
|||
.psize = 80,
|
||||
.digest = "\x13\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
},
|
||||
}, { /* Regression test for overflow in AVX2 implementation */
|
||||
.plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff",
|
||||
.psize = 300,
|
||||
.digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
|
||||
"\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
|
||||
}
|
||||
};
|
||||
|
||||
/* NHPoly1305 test vectors from https://github.com/google/adiantum */
|
||||
|
|
|
@ -1298,19 +1298,30 @@ static ssize_t scrub_show(struct device *dev,
|
|||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct nvdimm_bus_descriptor *nd_desc;
|
||||
struct acpi_nfit_desc *acpi_desc;
|
||||
ssize_t rc = -ENXIO;
|
||||
bool busy;
|
||||
|
||||
device_lock(dev);
|
||||
nd_desc = dev_get_drvdata(dev);
|
||||
if (nd_desc) {
|
||||
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
||||
|
||||
mutex_lock(&acpi_desc->init_mutex);
|
||||
rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
|
||||
acpi_desc->scrub_busy
|
||||
&& !acpi_desc->cancel ? "+\n" : "\n");
|
||||
mutex_unlock(&acpi_desc->init_mutex);
|
||||
if (!nd_desc) {
|
||||
device_unlock(dev);
|
||||
return rc;
|
||||
}
|
||||
acpi_desc = to_acpi_desc(nd_desc);
|
||||
|
||||
mutex_lock(&acpi_desc->init_mutex);
|
||||
busy = test_bit(ARS_BUSY, &acpi_desc->scrub_flags)
|
||||
&& !test_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
|
||||
rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, busy ? "+\n" : "\n");
|
||||
/* Allow an admin to poll the busy state at a higher rate */
|
||||
if (busy && capable(CAP_SYS_RAWIO) && !test_and_set_bit(ARS_POLL,
|
||||
&acpi_desc->scrub_flags)) {
|
||||
acpi_desc->scrub_tmo = 1;
|
||||
mod_delayed_work(nfit_wq, &acpi_desc->dwork, HZ);
|
||||
}
|
||||
|
||||
mutex_unlock(&acpi_desc->init_mutex);
|
||||
device_unlock(dev);
|
||||
return rc;
|
||||
}
|
||||
|
@ -2529,7 +2540,10 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc,
|
|||
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
return cmd_rc;
|
||||
if (cmd_rc < 0)
|
||||
return cmd_rc;
|
||||
set_bit(ARS_VALID, &acpi_desc->scrub_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ars_continue(struct acpi_nfit_desc *acpi_desc)
|
||||
|
@ -2539,11 +2553,11 @@ static int ars_continue(struct acpi_nfit_desc *acpi_desc)
|
|||
struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
|
||||
struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
|
||||
|
||||
memset(&ars_start, 0, sizeof(ars_start));
|
||||
ars_start.address = ars_status->restart_address;
|
||||
ars_start.length = ars_status->restart_length;
|
||||
ars_start.type = ars_status->type;
|
||||
ars_start.flags = acpi_desc->ars_start_flags;
|
||||
ars_start = (struct nd_cmd_ars_start) {
|
||||
.address = ars_status->restart_address,
|
||||
.length = ars_status->restart_length,
|
||||
.type = ars_status->type,
|
||||
};
|
||||
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
|
||||
sizeof(ars_start), &cmd_rc);
|
||||
if (rc < 0)
|
||||
|
@ -2622,6 +2636,17 @@ static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
|
|||
*/
|
||||
if (ars_status->out_length < 44)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Ignore potentially stale results that are only refreshed
|
||||
* after a start-ARS event.
|
||||
*/
|
||||
if (!test_and_clear_bit(ARS_VALID, &acpi_desc->scrub_flags)) {
|
||||
dev_dbg(acpi_desc->dev, "skip %d stale records\n",
|
||||
ars_status->num_records);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < ars_status->num_records; i++) {
|
||||
/* only process full records */
|
||||
if (ars_status->out_length
|
||||
|
@ -2960,7 +2985,7 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
|
|||
|
||||
lockdep_assert_held(&acpi_desc->init_mutex);
|
||||
|
||||
if (acpi_desc->cancel)
|
||||
if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags))
|
||||
return 0;
|
||||
|
||||
if (query_rc == -EBUSY) {
|
||||
|
@ -3034,7 +3059,7 @@ static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
|
|||
{
|
||||
lockdep_assert_held(&acpi_desc->init_mutex);
|
||||
|
||||
acpi_desc->scrub_busy = 1;
|
||||
set_bit(ARS_BUSY, &acpi_desc->scrub_flags);
|
||||
/* note this should only be set from within the workqueue */
|
||||
if (tmo)
|
||||
acpi_desc->scrub_tmo = tmo;
|
||||
|
@ -3050,7 +3075,7 @@ static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
|
|||
{
|
||||
lockdep_assert_held(&acpi_desc->init_mutex);
|
||||
|
||||
acpi_desc->scrub_busy = 0;
|
||||
clear_bit(ARS_BUSY, &acpi_desc->scrub_flags);
|
||||
acpi_desc->scrub_count++;
|
||||
if (acpi_desc->scrub_count_state)
|
||||
sysfs_notify_dirent(acpi_desc->scrub_count_state);
|
||||
|
@ -3071,6 +3096,7 @@ static void acpi_nfit_scrub(struct work_struct *work)
|
|||
else
|
||||
notify_ars_done(acpi_desc);
|
||||
memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
|
||||
clear_bit(ARS_POLL, &acpi_desc->scrub_flags);
|
||||
mutex_unlock(&acpi_desc->init_mutex);
|
||||
}
|
||||
|
||||
|
@ -3105,6 +3131,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
|
|||
struct nfit_spa *nfit_spa;
|
||||
int rc;
|
||||
|
||||
set_bit(ARS_VALID, &acpi_desc->scrub_flags);
|
||||
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
|
||||
switch (nfit_spa_type(nfit_spa->spa)) {
|
||||
case NFIT_SPA_VOLATILE:
|
||||
|
@ -3322,7 +3349,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
|
|||
struct nfit_spa *nfit_spa;
|
||||
|
||||
mutex_lock(&acpi_desc->init_mutex);
|
||||
if (acpi_desc->cancel) {
|
||||
if (test_bit(ARS_CANCEL, &acpi_desc->scrub_flags)) {
|
||||
mutex_unlock(&acpi_desc->init_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
@ -3401,7 +3428,7 @@ void acpi_nfit_shutdown(void *data)
|
|||
mutex_unlock(&acpi_desc_lock);
|
||||
|
||||
mutex_lock(&acpi_desc->init_mutex);
|
||||
acpi_desc->cancel = 1;
|
||||
set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
|
||||
cancel_delayed_work_sync(&acpi_desc->dwork);
|
||||
mutex_unlock(&acpi_desc->init_mutex);
|
||||
|
||||
|
|
|
@ -181,6 +181,13 @@ struct nfit_mem {
|
|||
bool has_lsw;
|
||||
};
|
||||
|
||||
enum scrub_flags {
|
||||
ARS_BUSY,
|
||||
ARS_CANCEL,
|
||||
ARS_VALID,
|
||||
ARS_POLL,
|
||||
};
|
||||
|
||||
struct acpi_nfit_desc {
|
||||
struct nvdimm_bus_descriptor nd_desc;
|
||||
struct acpi_table_header acpi_header;
|
||||
|
@ -194,7 +201,6 @@ struct acpi_nfit_desc {
|
|||
struct list_head idts;
|
||||
struct nvdimm_bus *nvdimm_bus;
|
||||
struct device *dev;
|
||||
u8 ars_start_flags;
|
||||
struct nd_cmd_ars_status *ars_status;
|
||||
struct nfit_spa *scrub_spa;
|
||||
struct delayed_work dwork;
|
||||
|
@ -203,8 +209,7 @@ struct acpi_nfit_desc {
|
|||
unsigned int max_ars;
|
||||
unsigned int scrub_count;
|
||||
unsigned int scrub_mode;
|
||||
unsigned int scrub_busy:1;
|
||||
unsigned int cancel:1;
|
||||
unsigned long scrub_flags;
|
||||
unsigned long dimm_cmd_force_en;
|
||||
unsigned long bus_cmd_force_en;
|
||||
unsigned long bus_nfit_cmd_force_en;
|
||||
|
|
|
@ -213,6 +213,9 @@ struct ipmi_user {
|
|||
|
||||
/* Does this interface receive IPMI events? */
|
||||
bool gets_events;
|
||||
|
||||
/* Free must run in process context for RCU cleanup. */
|
||||
struct work_struct remove_work;
|
||||
};
|
||||
|
||||
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
|
||||
|
@ -1078,6 +1081,15 @@ static int intf_err_seq(struct ipmi_smi *intf,
|
|||
}
|
||||
|
||||
|
||||
static void free_user_work(struct work_struct *work)
|
||||
{
|
||||
struct ipmi_user *user = container_of(work, struct ipmi_user,
|
||||
remove_work);
|
||||
|
||||
cleanup_srcu_struct(&user->release_barrier);
|
||||
kfree(user);
|
||||
}
|
||||
|
||||
int ipmi_create_user(unsigned int if_num,
|
||||
const struct ipmi_user_hndl *handler,
|
||||
void *handler_data,
|
||||
|
@ -1121,6 +1133,8 @@ int ipmi_create_user(unsigned int if_num,
|
|||
goto out_kfree;
|
||||
|
||||
found:
|
||||
INIT_WORK(&new_user->remove_work, free_user_work);
|
||||
|
||||
rv = init_srcu_struct(&new_user->release_barrier);
|
||||
if (rv)
|
||||
goto out_kfree;
|
||||
|
@ -1183,8 +1197,9 @@ EXPORT_SYMBOL(ipmi_get_smi_info);
|
|||
static void free_user(struct kref *ref)
|
||||
{
|
||||
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
|
||||
cleanup_srcu_struct(&user->release_barrier);
|
||||
kfree(user);
|
||||
|
||||
/* SRCU cleanup must happen in task context. */
|
||||
schedule_work(&user->remove_work);
|
||||
}
|
||||
|
||||
static void _ipmi_destroy_user(struct ipmi_user *user)
|
||||
|
|
|
@ -37,8 +37,8 @@
|
|||
*
|
||||
* Returns size of the event. If it is an invalid event, returns 0.
|
||||
*/
|
||||
static int calc_tpm2_event_size(struct tcg_pcr_event2 *event,
|
||||
struct tcg_pcr_event *event_header)
|
||||
static size_t calc_tpm2_event_size(struct tcg_pcr_event2 *event,
|
||||
struct tcg_pcr_event *event_header)
|
||||
{
|
||||
struct tcg_efi_specid_event *efispecid;
|
||||
struct tcg_event_field *event_field;
|
||||
|
|
|
@ -69,6 +69,10 @@ static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
|
|||
if (status < 0)
|
||||
return status;
|
||||
|
||||
/* The upper layer does not support incomplete sends. */
|
||||
if (status != len)
|
||||
return -E2BIG;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -164,6 +164,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
|
|||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
|
||||
}
|
||||
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
|
||||
|
||||
tmp = mmVM_L2_CNTL4_DEFAULT;
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
|
||||
|
|
|
@ -730,7 +730,8 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
if (!(flags & TTM_PAGE_FLAG_DMA32)) {
|
||||
if (!(flags & TTM_PAGE_FLAG_DMA32) &&
|
||||
(npages - i) >= HPAGE_PMD_NR) {
|
||||
for (j = 0; j < HPAGE_PMD_NR; ++j)
|
||||
if (p++ != pages[i + j])
|
||||
break;
|
||||
|
@ -759,7 +760,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
|
|||
unsigned max_size, n2free;
|
||||
|
||||
spin_lock_irqsave(&huge->lock, irq_flags);
|
||||
while (i < npages) {
|
||||
while ((npages - i) >= HPAGE_PMD_NR) {
|
||||
struct page *p = pages[i];
|
||||
unsigned j;
|
||||
|
||||
|
|
|
@ -337,7 +337,8 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
|
|||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
|
||||
},
|
||||
.driver_data = (void *)&sipodev_desc
|
||||
}
|
||||
},
|
||||
{ } /* Terminate list */
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev)
|
|||
|
||||
mutex_lock(&data->mutex);
|
||||
ret = kxcjk1013_set_mode(data, OPERATION);
|
||||
if (ret == 0)
|
||||
ret = kxcjk1013_set_range(data, data->range);
|
||||
mutex_unlock(&data->mutex);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
|
|||
if (sigma_delta->info->has_registers) {
|
||||
data[0] = reg << sigma_delta->info->addr_shift;
|
||||
data[0] |= sigma_delta->info->read_mask;
|
||||
data[0] |= sigma_delta->comm;
|
||||
spi_message_add_tail(&t[0], &m);
|
||||
}
|
||||
spi_message_add_tail(&t[1], &m);
|
||||
|
|
|
@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev,
|
|||
ret = wait_event_interruptible_timeout(st->wq_data_avail,
|
||||
st->done,
|
||||
msecs_to_jiffies(1000));
|
||||
if (ret == 0)
|
||||
ret = -ETIMEDOUT;
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&st->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*val = st->last_value;
|
||||
|
||||
/* Disable interrupts, regardless if adc conversion was
|
||||
* successful or not
|
||||
*/
|
||||
at91_adc_writel(st, AT91_ADC_CHDR,
|
||||
AT91_ADC_CH(chan->channel));
|
||||
at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
|
||||
|
||||
st->last_value = 0;
|
||||
st->done = false;
|
||||
if (ret > 0) {
|
||||
/* a valid conversion took place */
|
||||
*val = st->last_value;
|
||||
st->last_value = 0;
|
||||
st->done = false;
|
||||
ret = IIO_VAL_INT;
|
||||
} else if (ret == 0) {
|
||||
/* conversion timeout */
|
||||
dev_err(&idev->dev, "ADC Channel %d timeout.\n",
|
||||
chan->channel);
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
mutex_unlock(&st->lock);
|
||||
return IIO_VAL_INT;
|
||||
return ret;
|
||||
|
||||
case IIO_CHAN_INFO_SCALE:
|
||||
*val = st->vref_mv;
|
||||
|
|
|
@ -2,11 +2,9 @@
|
|||
#ifndef BME680_H_
|
||||
#define BME680_H_
|
||||
|
||||
#define BME680_REG_CHIP_I2C_ID 0xD0
|
||||
#define BME680_REG_CHIP_SPI_ID 0x50
|
||||
#define BME680_REG_CHIP_ID 0xD0
|
||||
#define BME680_CHIP_ID_VAL 0x61
|
||||
#define BME680_REG_SOFT_RESET_I2C 0xE0
|
||||
#define BME680_REG_SOFT_RESET_SPI 0x60
|
||||
#define BME680_REG_SOFT_RESET 0xE0
|
||||
#define BME680_CMD_SOFTRESET 0xB6
|
||||
#define BME680_REG_STATUS 0x73
|
||||
#define BME680_SPI_MEM_PAGE_BIT BIT(4)
|
||||
|
|
|
@ -63,9 +63,23 @@ struct bme680_data {
|
|||
s32 t_fine;
|
||||
};
|
||||
|
||||
static const struct regmap_range bme680_volatile_ranges[] = {
|
||||
regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB),
|
||||
regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS),
|
||||
regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG),
|
||||
};
|
||||
|
||||
static const struct regmap_access_table bme680_volatile_table = {
|
||||
.yes_ranges = bme680_volatile_ranges,
|
||||
.n_yes_ranges = ARRAY_SIZE(bme680_volatile_ranges),
|
||||
};
|
||||
|
||||
const struct regmap_config bme680_regmap_config = {
|
||||
.reg_bits = 8,
|
||||
.val_bits = 8,
|
||||
.max_register = 0xef,
|
||||
.volatile_table = &bme680_volatile_table,
|
||||
.cache_type = REGCACHE_RBTREE,
|
||||
};
|
||||
EXPORT_SYMBOL(bme680_regmap_config);
|
||||
|
||||
|
@ -330,6 +344,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
|
|||
s64 var1, var2, var3;
|
||||
s16 calc_temp;
|
||||
|
||||
/* If the calibration is invalid, attempt to reload it */
|
||||
if (!calib->par_t2)
|
||||
bme680_read_calib(data, calib);
|
||||
|
||||
var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
|
||||
var2 = (var1 * calib->par_t2) >> 11;
|
||||
var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
|
||||
|
@ -591,8 +609,7 @@ static int bme680_gas_config(struct bme680_data *data)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int bme680_read_temp(struct bme680_data *data,
|
||||
int *val, int *val2)
|
||||
static int bme680_read_temp(struct bme680_data *data, int *val)
|
||||
{
|
||||
struct device *dev = regmap_get_device(data->regmap);
|
||||
int ret;
|
||||
|
@ -625,10 +642,9 @@ static int bme680_read_temp(struct bme680_data *data,
|
|||
* compensate_press/compensate_humid to get compensated
|
||||
* pressure/humidity readings.
|
||||
*/
|
||||
if (val && val2) {
|
||||
*val = comp_temp;
|
||||
*val2 = 100;
|
||||
return IIO_VAL_FRACTIONAL;
|
||||
if (val) {
|
||||
*val = comp_temp * 10; /* Centidegrees to millidegrees */
|
||||
return IIO_VAL_INT;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -643,7 +659,7 @@ static int bme680_read_press(struct bme680_data *data,
|
|||
s32 adc_press;
|
||||
|
||||
/* Read and compensate temperature to get a reading of t_fine */
|
||||
ret = bme680_read_temp(data, NULL, NULL);
|
||||
ret = bme680_read_temp(data, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -676,7 +692,7 @@ static int bme680_read_humid(struct bme680_data *data,
|
|||
u32 comp_humidity;
|
||||
|
||||
/* Read and compensate temperature to get a reading of t_fine */
|
||||
ret = bme680_read_temp(data, NULL, NULL);
|
||||
ret = bme680_read_temp(data, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -769,7 +785,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
|
|||
case IIO_CHAN_INFO_PROCESSED:
|
||||
switch (chan->type) {
|
||||
case IIO_TEMP:
|
||||
return bme680_read_temp(data, val, val2);
|
||||
return bme680_read_temp(data, val);
|
||||
case IIO_PRESSURE:
|
||||
return bme680_read_press(data, val, val2);
|
||||
case IIO_HUMIDITYRELATIVE:
|
||||
|
@ -905,8 +921,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
|
|||
{
|
||||
struct iio_dev *indio_dev;
|
||||
struct bme680_data *data;
|
||||
unsigned int val;
|
||||
int ret;
|
||||
|
||||
ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
|
||||
BME680_CMD_SOFTRESET);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to reset chip\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Error reading chip ID\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (val != BME680_CHIP_ID_VAL) {
|
||||
dev_err(dev, "Wrong chip ID, got %x expected %x\n",
|
||||
val, BME680_CHIP_ID_VAL);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
|
||||
if (!indio_dev)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
|
|||
{
|
||||
struct regmap *regmap;
|
||||
const char *name = NULL;
|
||||
unsigned int val;
|
||||
int ret;
|
||||
|
||||
regmap = devm_regmap_init_i2c(client, &bme680_regmap_config);
|
||||
if (IS_ERR(regmap)) {
|
||||
|
@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
|
|||
return PTR_ERR(regmap);
|
||||
}
|
||||
|
||||
ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C,
|
||||
BME680_CMD_SOFTRESET);
|
||||
if (ret < 0) {
|
||||
dev_err(&client->dev, "Failed to reset chip\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val);
|
||||
if (ret < 0) {
|
||||
dev_err(&client->dev, "Error reading I2C chip ID\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (val != BME680_CHIP_ID_VAL) {
|
||||
dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n",
|
||||
val, BME680_CHIP_ID_VAL);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (id)
|
||||
name = id->name;
|
||||
|
||||
|
|
|
@ -11,28 +11,93 @@
|
|||
|
||||
#include "bme680.h"
|
||||
|
||||
struct bme680_spi_bus_context {
|
||||
struct spi_device *spi;
|
||||
u8 current_page;
|
||||
};
|
||||
|
||||
/*
|
||||
* In SPI mode there are only 7 address bits, a "page" register determines
|
||||
* which part of the 8-bit range is active. This function looks at the address
|
||||
* and writes the page selection bit if needed
|
||||
*/
|
||||
static int bme680_regmap_spi_select_page(
|
||||
struct bme680_spi_bus_context *ctx, u8 reg)
|
||||
{
|
||||
struct spi_device *spi = ctx->spi;
|
||||
int ret;
|
||||
u8 buf[2];
|
||||
u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */
|
||||
|
||||
if (page == ctx->current_page)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Data sheet claims we're only allowed to change bit 4, so we must do
|
||||
* a read-modify-write on each and every page select
|
||||
*/
|
||||
buf[0] = BME680_REG_STATUS;
|
||||
ret = spi_write_then_read(spi, buf, 1, buf + 1, 1);
|
||||
if (ret < 0) {
|
||||
dev_err(&spi->dev, "failed to set page %u\n", page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
buf[0] = BME680_REG_STATUS;
|
||||
if (page)
|
||||
buf[1] |= BME680_SPI_MEM_PAGE_BIT;
|
||||
else
|
||||
buf[1] &= ~BME680_SPI_MEM_PAGE_BIT;
|
||||
|
||||
ret = spi_write(spi, buf, 2);
|
||||
if (ret < 0) {
|
||||
dev_err(&spi->dev, "failed to set page %u\n", page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ctx->current_page = page;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bme680_regmap_spi_write(void *context, const void *data,
|
||||
size_t count)
|
||||
{
|
||||
struct spi_device *spi = context;
|
||||
struct bme680_spi_bus_context *ctx = context;
|
||||
struct spi_device *spi = ctx->spi;
|
||||
int ret;
|
||||
u8 buf[2];
|
||||
|
||||
memcpy(buf, data, 2);
|
||||
|
||||
ret = bme680_regmap_spi_select_page(ctx, buf[0]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The SPI register address (= full register address without bit 7)
|
||||
* and the write command (bit7 = RW = '0')
|
||||
*/
|
||||
buf[0] &= ~0x80;
|
||||
|
||||
return spi_write_then_read(spi, buf, 2, NULL, 0);
|
||||
return spi_write(spi, buf, 2);
|
||||
}
|
||||
|
||||
static int bme680_regmap_spi_read(void *context, const void *reg,
|
||||
size_t reg_size, void *val, size_t val_size)
|
||||
{
|
||||
struct spi_device *spi = context;
|
||||
struct bme680_spi_bus_context *ctx = context;
|
||||
struct spi_device *spi = ctx->spi;
|
||||
int ret;
|
||||
u8 addr = *(const u8 *)reg;
|
||||
|
||||
return spi_write_then_read(spi, reg, reg_size, val, val_size);
|
||||
ret = bme680_regmap_spi_select_page(ctx, addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
addr |= 0x80; /* bit7 = RW = '1' */
|
||||
|
||||
return spi_write_then_read(spi, &addr, 1, val, val_size);
|
||||
}
|
||||
|
||||
static struct regmap_bus bme680_regmap_bus = {
|
||||
|
@ -45,8 +110,8 @@ static struct regmap_bus bme680_regmap_bus = {
|
|||
static int bme680_spi_probe(struct spi_device *spi)
|
||||
{
|
||||
const struct spi_device_id *id = spi_get_device_id(spi);
|
||||
struct bme680_spi_bus_context *bus_context;
|
||||
struct regmap *regmap;
|
||||
unsigned int val;
|
||||
int ret;
|
||||
|
||||
spi->bits_per_word = 8;
|
||||
|
@ -56,45 +121,21 @@ static int bme680_spi_probe(struct spi_device *spi)
|
|||
return ret;
|
||||
}
|
||||
|
||||
bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL);
|
||||
if (!bus_context)
|
||||
return -ENOMEM;
|
||||
|
||||
bus_context->spi = spi;
|
||||
bus_context->current_page = 0xff; /* Undefined on warm boot */
|
||||
|
||||
regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus,
|
||||
&spi->dev, &bme680_regmap_config);
|
||||
bus_context, &bme680_regmap_config);
|
||||
if (IS_ERR(regmap)) {
|
||||
dev_err(&spi->dev, "Failed to register spi regmap %d\n",
|
||||
(int)PTR_ERR(regmap));
|
||||
return PTR_ERR(regmap);
|
||||
}
|
||||
|
||||
ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI,
|
||||
BME680_CMD_SOFTRESET);
|
||||
if (ret < 0) {
|
||||
dev_err(&spi->dev, "Failed to reset chip\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */
|
||||
ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val);
|
||||
if (ret < 0) {
|
||||
dev_err(&spi->dev, "Error reading SPI chip ID\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (val != BME680_CHIP_ID_VAL) {
|
||||
dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n",
|
||||
val, BME680_CHIP_ID_VAL);
|
||||
return -ENODEV;
|
||||
}
|
||||
/*
|
||||
* select Page 1 of spi_mem_page to enable access to
|
||||
* to registers from address 0x00 to 0x7F.
|
||||
*/
|
||||
ret = regmap_write_bits(regmap, BME680_REG_STATUS,
|
||||
BME680_SPI_MEM_PAGE_BIT,
|
||||
BME680_SPI_MEM_PAGE_1_VAL);
|
||||
if (ret < 0) {
|
||||
dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bme680_core_probe(&spi->dev, regmap, id->name);
|
||||
}
|
||||
|
||||
|
|
|
@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
|
|||
* Do not use IIO_DEGREE_TO_RAD to avoid precision
|
||||
* loss. Round to the nearest integer.
|
||||
*/
|
||||
*val = div_s64(val64 * 314159 + 9000000ULL, 1000);
|
||||
*val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
|
||||
ret = IIO_VAL_FRACTIONAL;
|
||||
*val = 0;
|
||||
*val2 = div_s64(val64 * 3141592653ULL,
|
||||
180 << (CROS_EC_SENSOR_BITS - 1));
|
||||
ret = IIO_VAL_INT_PLUS_NANO;
|
||||
break;
|
||||
case MOTIONSENSE_TYPE_MAG:
|
||||
/*
|
||||
|
|
|
@ -98,6 +98,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
|
|||
|
||||
inoutbuf[0] = 0x60; /* write EEPROM */
|
||||
inoutbuf[0] |= data->ref_mode << 3;
|
||||
inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0;
|
||||
inoutbuf[1] = data->dac_value >> 4;
|
||||
inoutbuf[2] = (data->dac_value & 0xf) << 4;
|
||||
|
||||
|
|
|
@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
|
|||
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
|
||||
return bmg160_get_filter(data, val);
|
||||
case IIO_CHAN_INFO_SCALE:
|
||||
*val = 0;
|
||||
switch (chan->type) {
|
||||
case IIO_TEMP:
|
||||
*val2 = 500000;
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
*val = 500;
|
||||
return IIO_VAL_INT;
|
||||
case IIO_ANGL_VEL:
|
||||
{
|
||||
int i;
|
||||
|
@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
|
|||
for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
|
||||
if (bmg160_scale_table[i].dps_range ==
|
||||
data->dps_range) {
|
||||
*val = 0;
|
||||
*val2 = bmg160_scale_table[i].scale;
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
}
|
||||
|
|
|
@ -29,7 +29,8 @@
|
|||
|
||||
#include "mpu3050.h"
|
||||
|
||||
#define MPU3050_CHIP_ID 0x69
|
||||
#define MPU3050_CHIP_ID 0x68
|
||||
#define MPU3050_CHIP_ID_MASK 0x7E
|
||||
|
||||
/*
|
||||
* Register map: anything suffixed *_H is a big-endian high byte and always
|
||||
|
@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev,
|
|||
goto err_power_down;
|
||||
}
|
||||
|
||||
if (val != MPU3050_CHIP_ID) {
|
||||
dev_err(dev, "unsupported chip id %02x\n", (u8)val);
|
||||
if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) {
|
||||
dev_err(dev, "unsupported chip id %02x\n",
|
||||
(u8)(val & MPU3050_CHIP_ID_MASK));
|
||||
ret = -ENODEV;
|
||||
goto err_power_down;
|
||||
}
|
||||
|
|
|
@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
|
|||
const unsigned long *mask;
|
||||
unsigned long *trialmask;
|
||||
|
||||
trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
|
||||
sizeof(*trialmask),
|
||||
GFP_KERNEL);
|
||||
trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
|
||||
sizeof(*trialmask), GFP_KERNEL);
|
||||
if (trialmask == NULL)
|
||||
return -ENOMEM;
|
||||
if (!indio_dev->masklength) {
|
||||
|
|
|
@ -1735,10 +1735,10 @@ EXPORT_SYMBOL(__iio_device_register);
|
|||
**/
|
||||
void iio_device_unregister(struct iio_dev *indio_dev)
|
||||
{
|
||||
mutex_lock(&indio_dev->info_exist_lock);
|
||||
|
||||
cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
|
||||
|
||||
mutex_lock(&indio_dev->info_exist_lock);
|
||||
|
||||
iio_device_unregister_debugfs(indio_dev);
|
||||
|
||||
iio_disable_all_buffers(indio_dev);
|
||||
|
|
|
@ -1339,21 +1339,46 @@ static const struct acpi_device_id elan_acpi_id[] = {
|
|||
{ "ELAN0600", 0 },
|
||||
{ "ELAN0601", 0 },
|
||||
{ "ELAN0602", 0 },
|
||||
{ "ELAN0603", 0 },
|
||||
{ "ELAN0604", 0 },
|
||||
{ "ELAN0605", 0 },
|
||||
{ "ELAN0606", 0 },
|
||||
{ "ELAN0607", 0 },
|
||||
{ "ELAN0608", 0 },
|
||||
{ "ELAN0609", 0 },
|
||||
{ "ELAN060B", 0 },
|
||||
{ "ELAN060C", 0 },
|
||||
{ "ELAN060F", 0 },
|
||||
{ "ELAN0610", 0 },
|
||||
{ "ELAN0611", 0 },
|
||||
{ "ELAN0612", 0 },
|
||||
{ "ELAN0615", 0 },
|
||||
{ "ELAN0616", 0 },
|
||||
{ "ELAN0617", 0 },
|
||||
{ "ELAN0618", 0 },
|
||||
{ "ELAN0619", 0 },
|
||||
{ "ELAN061A", 0 },
|
||||
{ "ELAN061B", 0 },
|
||||
{ "ELAN061C", 0 },
|
||||
{ "ELAN061D", 0 },
|
||||
{ "ELAN061E", 0 },
|
||||
{ "ELAN061F", 0 },
|
||||
{ "ELAN0620", 0 },
|
||||
{ "ELAN0621", 0 },
|
||||
{ "ELAN0622", 0 },
|
||||
{ "ELAN0623", 0 },
|
||||
{ "ELAN0624", 0 },
|
||||
{ "ELAN0625", 0 },
|
||||
{ "ELAN0626", 0 },
|
||||
{ "ELAN0627", 0 },
|
||||
{ "ELAN0628", 0 },
|
||||
{ "ELAN0629", 0 },
|
||||
{ "ELAN062A", 0 },
|
||||
{ "ELAN062B", 0 },
|
||||
{ "ELAN062C", 0 },
|
||||
{ "ELAN062D", 0 },
|
||||
{ "ELAN0631", 0 },
|
||||
{ "ELAN0632", 0 },
|
||||
{ "ELAN1000", 0 },
|
||||
{ }
|
||||
};
|
||||
|
|
|
@ -429,7 +429,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
|
|||
val = readl(host->ioaddr + ESDHC_MIX_CTRL);
|
||||
else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
|
||||
/* the std tuning bits is in ACMD12_ERR for imx6sl */
|
||||
val = readl(host->ioaddr + SDHCI_ACMD12_ERR);
|
||||
val = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
|
||||
}
|
||||
|
||||
if (val & ESDHC_MIX_CTRL_EXE_TUNE)
|
||||
|
@ -494,7 +494,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
|
|||
}
|
||||
writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
|
||||
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
|
||||
u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
|
||||
u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
|
||||
u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
|
||||
if (val & SDHCI_CTRL_TUNED_CLK) {
|
||||
v |= ESDHC_MIX_CTRL_SMPCLK_SEL;
|
||||
|
@ -512,7 +512,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
|
|||
v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
|
||||
}
|
||||
|
||||
writel(v, host->ioaddr + SDHCI_ACMD12_ERR);
|
||||
writel(v, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
|
||||
writel(m, host->ioaddr + ESDHC_MIX_CTRL);
|
||||
}
|
||||
return;
|
||||
|
@ -957,9 +957,9 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
|
|||
writel(ctrl, host->ioaddr + ESDHC_MIX_CTRL);
|
||||
writel(0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
|
||||
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
|
||||
ctrl = readl(host->ioaddr + SDHCI_ACMD12_ERR);
|
||||
ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
|
||||
ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
|
||||
writel(ctrl, host->ioaddr + SDHCI_ACMD12_ERR);
|
||||
writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1319,7 +1319,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
|
|||
|
||||
/* clear tuning bits in case ROM has set it already */
|
||||
writel(0x0, host->ioaddr + ESDHC_MIX_CTRL);
|
||||
writel(0x0, host->ioaddr + SDHCI_ACMD12_ERR);
|
||||
writel(0x0, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
|
||||
writel(0x0, host->ioaddr + ESDHC_TUNE_CTRL_STATUS);
|
||||
}
|
||||
|
||||
|
|
|
@ -82,8 +82,8 @@ void sdhci_dumpregs(struct sdhci_host *host)
|
|||
SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
|
||||
sdhci_readl(host, SDHCI_INT_ENABLE),
|
||||
sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
|
||||
SDHCI_DUMP("AC12 err: 0x%08x | Slot int: 0x%08x\n",
|
||||
sdhci_readw(host, SDHCI_ACMD12_ERR),
|
||||
SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
|
||||
sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
|
||||
sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
|
||||
SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
|
||||
sdhci_readl(host, SDHCI_CAPABILITIES),
|
||||
|
@ -841,6 +841,11 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
|
|||
else
|
||||
host->ier = (host->ier & ~dma_irqs) | pio_irqs;
|
||||
|
||||
if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
|
||||
host->ier |= SDHCI_INT_AUTO_CMD_ERR;
|
||||
else
|
||||
host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
|
||||
|
||||
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
|
||||
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
|
||||
}
|
||||
|
@ -1078,8 +1083,7 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
|
|||
return (!(host->flags & SDHCI_DEVICE_DEAD) &&
|
||||
((mrq->cmd && mrq->cmd->error) ||
|
||||
(mrq->sbc && mrq->sbc->error) ||
|
||||
(mrq->data && ((mrq->data->error && !mrq->data->stop) ||
|
||||
(mrq->data->stop && mrq->data->stop->error))) ||
|
||||
(mrq->data && mrq->data->stop && mrq->data->stop->error) ||
|
||||
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
|
||||
}
|
||||
|
||||
|
@ -1131,6 +1135,16 @@ static void sdhci_finish_data(struct sdhci_host *host)
|
|||
host->data = NULL;
|
||||
host->data_cmd = NULL;
|
||||
|
||||
/*
|
||||
* The controller needs a reset of internal state machines upon error
|
||||
* conditions.
|
||||
*/
|
||||
if (data->error) {
|
||||
if (!host->cmd || host->cmd == data_cmd)
|
||||
sdhci_do_reset(host, SDHCI_RESET_CMD);
|
||||
sdhci_do_reset(host, SDHCI_RESET_DATA);
|
||||
}
|
||||
|
||||
if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
|
||||
(SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
|
||||
sdhci_adma_table_post(host, data);
|
||||
|
@ -1155,17 +1169,6 @@ static void sdhci_finish_data(struct sdhci_host *host)
|
|||
if (data->stop &&
|
||||
(data->error ||
|
||||
!data->mrq->sbc)) {
|
||||
|
||||
/*
|
||||
* The controller needs a reset of internal state machines
|
||||
* upon error conditions.
|
||||
*/
|
||||
if (data->error) {
|
||||
if (!host->cmd || host->cmd == data_cmd)
|
||||
sdhci_do_reset(host, SDHCI_RESET_CMD);
|
||||
sdhci_do_reset(host, SDHCI_RESET_DATA);
|
||||
}
|
||||
|
||||
/*
|
||||
* 'cap_cmd_during_tfr' request must not use the command line
|
||||
* after mmc_command_done() has been called. It is upper layer's
|
||||
|
@ -2642,8 +2645,23 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
|
|||
* *
|
||||
\*****************************************************************************/
|
||||
|
||||
static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
|
||||
static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
|
||||
{
|
||||
/* Handle auto-CMD12 error */
|
||||
if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
|
||||
struct mmc_request *mrq = host->data_cmd->mrq;
|
||||
u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
|
||||
int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
|
||||
SDHCI_INT_DATA_TIMEOUT :
|
||||
SDHCI_INT_DATA_CRC;
|
||||
|
||||
/* Treat auto-CMD12 error the same as data error */
|
||||
if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
|
||||
*intmask_p |= data_err_bit;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!host->cmd) {
|
||||
/*
|
||||
* SDHCI recovers from errors by resetting the cmd and data
|
||||
|
@ -2665,20 +2683,12 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
|
|||
else
|
||||
host->cmd->error = -EILSEQ;
|
||||
|
||||
/*
|
||||
* If this command initiates a data phase and a response
|
||||
* CRC error is signalled, the card can start transferring
|
||||
* data - the card may have received the command without
|
||||
* error. We must not terminate the mmc_request early.
|
||||
*
|
||||
* If the card did not receive the command or returned an
|
||||
* error which prevented it sending data, the data phase
|
||||
* will time out.
|
||||
*/
|
||||
/* Treat data command CRC error the same as data CRC error */
|
||||
if (host->cmd->data &&
|
||||
(intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
|
||||
SDHCI_INT_CRC) {
|
||||
host->cmd = NULL;
|
||||
*intmask_p |= SDHCI_INT_DATA_CRC;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2686,6 +2696,21 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
|
|||
return;
|
||||
}
|
||||
|
||||
/* Handle auto-CMD23 error */
|
||||
if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
|
||||
struct mmc_request *mrq = host->cmd->mrq;
|
||||
u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
|
||||
int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
|
||||
-ETIMEDOUT :
|
||||
-EILSEQ;
|
||||
|
||||
if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
|
||||
mrq->sbc->error = err;
|
||||
sdhci_finish_mrq(host, mrq);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (intmask & SDHCI_INT_RESPONSE)
|
||||
sdhci_finish_command(host);
|
||||
}
|
||||
|
@ -2906,7 +2931,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
|
|||
}
|
||||
|
||||
if (intmask & SDHCI_INT_CMD_MASK)
|
||||
sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
|
||||
sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
|
||||
|
||||
if (intmask & SDHCI_INT_DATA_MASK)
|
||||
sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
|
||||
|
|
|
@ -144,14 +144,15 @@
|
|||
#define SDHCI_INT_DATA_CRC 0x00200000
|
||||
#define SDHCI_INT_DATA_END_BIT 0x00400000
|
||||
#define SDHCI_INT_BUS_POWER 0x00800000
|
||||
#define SDHCI_INT_ACMD12ERR 0x01000000
|
||||
#define SDHCI_INT_AUTO_CMD_ERR 0x01000000
|
||||
#define SDHCI_INT_ADMA_ERROR 0x02000000
|
||||
|
||||
#define SDHCI_INT_NORMAL_MASK 0x00007FFF
|
||||
#define SDHCI_INT_ERROR_MASK 0xFFFF8000
|
||||
|
||||
#define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
|
||||
SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
|
||||
SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX | \
|
||||
SDHCI_INT_AUTO_CMD_ERR)
|
||||
#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
|
||||
SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
|
||||
SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
|
||||
|
@ -166,7 +167,11 @@
|
|||
|
||||
#define SDHCI_CQE_INT_MASK (SDHCI_CQE_INT_ERR_MASK | SDHCI_INT_CQE)
|
||||
|
||||
#define SDHCI_ACMD12_ERR 0x3C
|
||||
#define SDHCI_AUTO_CMD_STATUS 0x3C
|
||||
#define SDHCI_AUTO_CMD_TIMEOUT 0x00000002
|
||||
#define SDHCI_AUTO_CMD_CRC 0x00000004
|
||||
#define SDHCI_AUTO_CMD_END_BIT 0x00000008
|
||||
#define SDHCI_AUTO_CMD_INDEX 0x00000010
|
||||
|
||||
#define SDHCI_HOST_CONTROL2 0x3E
|
||||
#define SDHCI_CTRL_UHS_MASK 0x0007
|
||||
|
|
|
@ -3198,8 +3198,12 @@ static int bond_netdev_event(struct notifier_block *this,
|
|||
return NOTIFY_DONE;
|
||||
|
||||
if (event_dev->flags & IFF_MASTER) {
|
||||
int ret;
|
||||
|
||||
netdev_dbg(event_dev, "IFF_MASTER\n");
|
||||
return bond_master_netdev_event(event, event_dev);
|
||||
ret = bond_master_netdev_event(event, event_dev);
|
||||
if (ret != NOTIFY_DONE)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (event_dev->flags & IFF_SLAVE) {
|
||||
|
|
|
@ -32,6 +32,13 @@
|
|||
#define DRV_NAME "nicvf"
|
||||
#define DRV_VERSION "1.0"
|
||||
|
||||
/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
|
||||
* the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
|
||||
* this value, keeping headroom for the 14 byte Ethernet header and two
|
||||
* VLAN tags (for QinQ)
|
||||
*/
|
||||
#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
|
||||
|
||||
/* Supported devices */
|
||||
static const struct pci_device_id nicvf_id_table[] = {
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
|
||||
|
@ -1547,6 +1554,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
|
|||
struct nicvf *nic = netdev_priv(netdev);
|
||||
int orig_mtu = netdev->mtu;
|
||||
|
||||
/* For now just support only the usual MTU sized frames,
|
||||
* plus some headroom for VLAN, QinQ.
|
||||
*/
|
||||
if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
|
||||
netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
|
||||
netdev->mtu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
netdev->mtu = new_mtu;
|
||||
|
||||
if (!netif_running(netdev))
|
||||
|
@ -1795,8 +1811,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
|
|||
bool bpf_attached = false;
|
||||
int ret = 0;
|
||||
|
||||
/* For now just support only the usual MTU sized frames */
|
||||
if (prog && (dev->mtu > 1500)) {
|
||||
/* For now just support only the usual MTU sized frames,
|
||||
* plus some headroom for VLAN, QinQ.
|
||||
*/
|
||||
if (prog && dev->mtu > MAX_XDP_MTU) {
|
||||
netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
|
||||
dev->mtu);
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void mlx5_fpga_tls_release_swid(struct idr *idr,
|
||||
spinlock_t *idr_spinlock, u32 swid)
|
||||
static void *mlx5_fpga_tls_release_swid(struct idr *idr,
|
||||
spinlock_t *idr_spinlock, u32 swid)
|
||||
{
|
||||
unsigned long flags;
|
||||
void *ptr;
|
||||
|
||||
spin_lock_irqsave(idr_spinlock, flags);
|
||||
idr_remove(idr, swid);
|
||||
ptr = idr_remove(idr, swid);
|
||||
spin_unlock_irqrestore(idr_spinlock, flags);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
|
||||
|
@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
|
|||
kfree(buf);
|
||||
}
|
||||
|
||||
struct mlx5_teardown_stream_context {
|
||||
struct mlx5_fpga_tls_command_context cmd;
|
||||
u32 swid;
|
||||
};
|
||||
|
||||
static void
|
||||
mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
|
||||
struct mlx5_fpga_device *fdev,
|
||||
struct mlx5_fpga_tls_command_context *cmd,
|
||||
struct mlx5_fpga_dma_buf *resp)
|
||||
{
|
||||
struct mlx5_teardown_stream_context *ctx =
|
||||
container_of(cmd, struct mlx5_teardown_stream_context, cmd);
|
||||
|
||||
if (resp) {
|
||||
u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
|
||||
|
||||
|
@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
|
|||
mlx5_fpga_err(fdev,
|
||||
"Teardown stream failed with syndrome = %d",
|
||||
syndrome);
|
||||
else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
|
||||
mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
|
||||
&fdev->tls->tx_idr_spinlock,
|
||||
ctx->swid);
|
||||
else
|
||||
mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
|
||||
&fdev->tls->rx_idr_spinlock,
|
||||
ctx->swid);
|
||||
}
|
||||
mlx5_fpga_tls_put_command_ctx(cmd);
|
||||
}
|
||||
|
@ -217,22 +203,22 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
|
|||
void *cmd;
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!flow) {
|
||||
WARN_ONCE(1, "Received NULL pointer for handle\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buf = kzalloc(size, GFP_ATOMIC);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
cmd = (buf + 1);
|
||||
|
||||
rcu_read_lock();
|
||||
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
|
||||
if (unlikely(!flow)) {
|
||||
rcu_read_unlock();
|
||||
WARN_ONCE(1, "Received NULL pointer for handle\n");
|
||||
kfree(buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
|
||||
rcu_read_unlock();
|
||||
|
||||
MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
|
||||
MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
|
||||
|
@ -253,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
|
|||
static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
|
||||
void *flow, u32 swid, gfp_t flags)
|
||||
{
|
||||
struct mlx5_teardown_stream_context *ctx;
|
||||
struct mlx5_fpga_tls_command_context *ctx;
|
||||
struct mlx5_fpga_dma_buf *buf;
|
||||
void *cmd;
|
||||
|
||||
|
@ -261,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
|
|||
if (!ctx)
|
||||
return;
|
||||
|
||||
buf = &ctx->cmd.buf;
|
||||
buf = &ctx->buf;
|
||||
cmd = (ctx + 1);
|
||||
MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
|
||||
MLX5_SET(tls_cmd, cmd, swid, swid);
|
||||
|
@ -272,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
|
|||
buf->sg[0].data = cmd;
|
||||
buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
|
||||
|
||||
ctx->swid = swid;
|
||||
mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
|
||||
mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
|
||||
mlx5_fpga_tls_teardown_completion);
|
||||
}
|
||||
|
||||
|
@ -283,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
|
|||
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
|
||||
void *flow;
|
||||
|
||||
rcu_read_lock();
|
||||
if (direction_sx)
|
||||
flow = idr_find(&tls->tx_idr, swid);
|
||||
flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
|
||||
&tls->tx_idr_spinlock,
|
||||
swid);
|
||||
else
|
||||
flow = idr_find(&tls->rx_idr, swid);
|
||||
|
||||
rcu_read_unlock();
|
||||
flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
|
||||
&tls->rx_idr_spinlock,
|
||||
swid);
|
||||
|
||||
if (!flow) {
|
||||
mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
|
||||
|
@ -297,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
|
|||
return;
|
||||
}
|
||||
|
||||
synchronize_rcu(); /* before kfree(flow) */
|
||||
mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -80,8 +80,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
|
|||
|
||||
tmp_push_vlan_tci =
|
||||
FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
|
||||
FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
|
||||
NFP_FL_PUSH_VLAN_CFI;
|
||||
FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action));
|
||||
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
|
||||
|
||||
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
|
||||
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
|
||||
#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
|
||||
#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
|
||||
|
||||
#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
|
||||
|
@ -109,7 +109,6 @@
|
|||
#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
|
||||
|
||||
#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
|
||||
#define NFP_FL_PUSH_VLAN_CFI BIT(12)
|
||||
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
|
||||
|
||||
/* LAG ports */
|
||||
|
|
|
@ -56,14 +56,12 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
|
|||
FLOW_DISSECTOR_KEY_VLAN,
|
||||
target);
|
||||
/* Populate the tci field. */
|
||||
if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
|
||||
tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
|
||||
flow_vlan->vlan_priority) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
|
||||
flow_vlan->vlan_id) |
|
||||
NFP_FLOWER_MASK_VLAN_CFI;
|
||||
frame->tci = cpu_to_be16(tmp_tci);
|
||||
}
|
||||
tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
|
||||
tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
|
||||
flow_vlan->vlan_priority) |
|
||||
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
|
||||
flow_vlan->vlan_id);
|
||||
frame->tci = cpu_to_be16(tmp_tci);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1250,6 +1250,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
|
|||
goto err_option_port_add;
|
||||
}
|
||||
|
||||
/* set promiscuity level to new slave */
|
||||
if (dev->flags & IFF_PROMISC) {
|
||||
err = dev_set_promiscuity(port_dev, 1);
|
||||
if (err)
|
||||
goto err_set_slave_promisc;
|
||||
}
|
||||
|
||||
/* set allmulti level to new slave */
|
||||
if (dev->flags & IFF_ALLMULTI) {
|
||||
err = dev_set_allmulti(port_dev, 1);
|
||||
if (err) {
|
||||
if (dev->flags & IFF_PROMISC)
|
||||
dev_set_promiscuity(port_dev, -1);
|
||||
goto err_set_slave_promisc;
|
||||
}
|
||||
}
|
||||
|
||||
netif_addr_lock_bh(dev);
|
||||
dev_uc_sync_multiple(port_dev, dev);
|
||||
dev_mc_sync_multiple(port_dev, dev);
|
||||
|
@ -1266,6 +1283,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
|
|||
|
||||
return 0;
|
||||
|
||||
err_set_slave_promisc:
|
||||
__team_option_inst_del_port(team, port);
|
||||
|
||||
err_option_port_add:
|
||||
team_upper_dev_unlink(team, port);
|
||||
|
||||
|
@ -1311,6 +1331,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
|
|||
|
||||
team_port_disable(team, port);
|
||||
list_del_rcu(&port->list);
|
||||
|
||||
if (dev->flags & IFF_PROMISC)
|
||||
dev_set_promiscuity(port_dev, -1);
|
||||
if (dev->flags & IFF_ALLMULTI)
|
||||
dev_set_allmulti(port_dev, -1);
|
||||
|
||||
team_upper_dev_unlink(team, port);
|
||||
netdev_rx_handler_unregister(port_dev);
|
||||
team_port_disable_netpoll(port);
|
||||
|
|
|
@ -672,7 +672,6 @@ enum rt2x00_state_flags {
|
|||
CONFIG_CHANNEL_HT40,
|
||||
CONFIG_POWERSAVING,
|
||||
CONFIG_HT_DISABLED,
|
||||
CONFIG_QOS_DISABLED,
|
||||
CONFIG_MONITORING,
|
||||
|
||||
/*
|
||||
|
|
|
@ -642,18 +642,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
|
|||
rt2x00dev->intf_associated--;
|
||||
|
||||
rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
|
||||
|
||||
clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for access point which do not support 802.11e . We have to
|
||||
* generate data frames sequence number in S/W for such AP, because
|
||||
* of H/W bug.
|
||||
*/
|
||||
if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
|
||||
set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
|
||||
|
||||
/*
|
||||
* When the erp information has changed, we should perform
|
||||
* additional configuration steps. For all other changes we are done.
|
||||
|
|
|
@ -200,15 +200,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
|
|||
if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
|
||||
/*
|
||||
* rt2800 has a H/W (or F/W) bug, device incorrectly increase
|
||||
* seqno on retransmited data (non-QOS) frames. To workaround
|
||||
* the problem let's generate seqno in software if QOS is
|
||||
* disabled.
|
||||
* seqno on retransmitted data (non-QOS) and management frames.
|
||||
* To workaround the problem let's generate seqno in software.
|
||||
* Except for beacons which are transmitted periodically by H/W
|
||||
* hence hardware has to assign seqno for them.
|
||||
*/
|
||||
if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
|
||||
__clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
|
||||
else
|
||||
if (ieee80211_is_beacon(hdr->frame_control)) {
|
||||
__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
|
||||
/* H/W will generate sequence number */
|
||||
return;
|
||||
}
|
||||
|
||||
__clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2153,7 +2153,6 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
|
|||
FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
|
||||
fc_rport_state(rdata));
|
||||
|
||||
rdata->flags &= ~FC_RP_STARTED;
|
||||
fc_rport_enter_delete(rdata, RPORT_EV_STOP);
|
||||
mutex_unlock(&rdata->rp_mutex);
|
||||
kref_put(&rdata->kref, fc_rport_destroy);
|
||||
|
|
|
@ -2149,8 +2149,12 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
ret = BLK_STS_DEV_RESOURCE;
|
||||
break;
|
||||
default:
|
||||
if (unlikely(!scsi_device_online(sdev)))
|
||||
scsi_req(req)->result = DID_NO_CONNECT << 16;
|
||||
else
|
||||
scsi_req(req)->result = DID_ERROR << 16;
|
||||
/*
|
||||
* Make sure to release all allocated ressources when
|
||||
* Make sure to release all allocated resources when
|
||||
* we hit an error, as we will never see this command
|
||||
* again.
|
||||
*/
|
||||
|
|
|
@ -463,10 +463,8 @@ static int ni6501_alloc_usb_buffers(struct comedi_device *dev)
|
|||
|
||||
size = usb_endpoint_maxp(devpriv->ep_tx);
|
||||
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
|
||||
if (!devpriv->usb_tx_buf) {
|
||||
kfree(devpriv->usb_rx_buf);
|
||||
if (!devpriv->usb_tx_buf)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -518,6 +516,9 @@ static int ni6501_auto_attach(struct comedi_device *dev,
|
|||
if (!devpriv)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&devpriv->mut);
|
||||
usb_set_intfdata(intf, devpriv);
|
||||
|
||||
ret = ni6501_find_endpoints(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -526,9 +527,6 @@ static int ni6501_auto_attach(struct comedi_device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_init(&devpriv->mut);
|
||||
usb_set_intfdata(intf, devpriv);
|
||||
|
||||
ret = comedi_alloc_subdevices(dev, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -682,10 +682,8 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev)
|
|||
|
||||
size = usb_endpoint_maxp(devpriv->ep_tx);
|
||||
devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL);
|
||||
if (!devpriv->usb_tx_buf) {
|
||||
kfree(devpriv->usb_rx_buf);
|
||||
if (!devpriv->usb_tx_buf)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -800,6 +798,8 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
|
|||
|
||||
devpriv->model = board->model;
|
||||
|
||||
sema_init(&devpriv->limit_sem, 8);
|
||||
|
||||
ret = vmk80xx_find_usb_endpoints(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -808,8 +808,6 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
sema_init(&devpriv->limit_sem, 8);
|
||||
|
||||
usb_set_intfdata(intf, devpriv);
|
||||
|
||||
if (devpriv->model == VMK8055_MODEL)
|
||||
|
|
|
@ -109,10 +109,10 @@
|
|||
#define AD7192_CH_AIN3 BIT(6) /* AIN3 - AINCOM */
|
||||
#define AD7192_CH_AIN4 BIT(7) /* AIN4 - AINCOM */
|
||||
|
||||
#define AD7193_CH_AIN1P_AIN2M 0x000 /* AIN1(+) - AIN2(-) */
|
||||
#define AD7193_CH_AIN3P_AIN4M 0x001 /* AIN3(+) - AIN4(-) */
|
||||
#define AD7193_CH_AIN5P_AIN6M 0x002 /* AIN5(+) - AIN6(-) */
|
||||
#define AD7193_CH_AIN7P_AIN8M 0x004 /* AIN7(+) - AIN8(-) */
|
||||
#define AD7193_CH_AIN1P_AIN2M 0x001 /* AIN1(+) - AIN2(-) */
|
||||
#define AD7193_CH_AIN3P_AIN4M 0x002 /* AIN3(+) - AIN4(-) */
|
||||
#define AD7193_CH_AIN5P_AIN6M 0x004 /* AIN5(+) - AIN6(-) */
|
||||
#define AD7193_CH_AIN7P_AIN8M 0x008 /* AIN7(+) - AIN8(-) */
|
||||
#define AD7193_CH_TEMP 0x100 /* Temp senseor */
|
||||
#define AD7193_CH_AIN2P_AIN2M 0x200 /* AIN2(+) - AIN2(-) */
|
||||
#define AD7193_CH_AIN1 0x401 /* AIN1 - AINCOM */
|
||||
|
|
|
@ -269,7 +269,7 @@ static IIO_DEV_ATTR_VPEAK(0644,
|
|||
static IIO_DEV_ATTR_IPEAK(0644,
|
||||
ade7854_read_32bit,
|
||||
ade7854_write_32bit,
|
||||
ADE7854_VPEAK);
|
||||
ADE7854_IPEAK);
|
||||
static IIO_DEV_ATTR_APHCAL(0644,
|
||||
ade7854_read_16bit,
|
||||
ade7854_write_16bit,
|
||||
|
|
|
@ -1412,7 +1412,7 @@ int most_register_interface(struct most_interface *iface)
|
|||
|
||||
INIT_LIST_HEAD(&iface->p->channel_list);
|
||||
iface->p->dev_id = id;
|
||||
snprintf(iface->p->name, STRING_SIZE, "mdev%d", id);
|
||||
strcpy(iface->p->name, iface->description);
|
||||
iface->dev.init_name = iface->p->name;
|
||||
iface->dev.bus = &mc.bus;
|
||||
iface->dev.parent = &mc.dev;
|
||||
|
|
|
@ -2497,14 +2497,16 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
|
|||
* center of the last stop bit in sampling clocks.
|
||||
*/
|
||||
int last_stop = bits * 2 - 1;
|
||||
int deviation = min_err * srr * last_stop / 2 / baud;
|
||||
int deviation = DIV_ROUND_CLOSEST(min_err * last_stop *
|
||||
(int)(srr + 1),
|
||||
2 * (int)baud);
|
||||
|
||||
if (abs(deviation) >= 2) {
|
||||
/* At least two sampling clocks off at the
|
||||
* last stop bit; we can increase the error
|
||||
* margin by shifting the sampling point.
|
||||
*/
|
||||
int shift = min(-8, max(7, deviation / 2));
|
||||
int shift = clamp(deviation / 2, -8, 7);
|
||||
|
||||
hssrr |= (shift << HSCIF_SRHP_SHIFT) &
|
||||
HSCIF_SRHP_MASK;
|
||||
|
|
|
@ -1521,7 +1521,8 @@ static void csi_J(struct vc_data *vc, int vpar)
|
|||
return;
|
||||
}
|
||||
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
|
||||
update_region(vc, (unsigned long) start, count);
|
||||
if (con_should_update(vc))
|
||||
do_update_region(vc, (unsigned long) start, count);
|
||||
vc->vc_need_wrap = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -911,8 +911,12 @@ static int vhost_new_umem_range(struct vhost_umem *umem,
|
|||
u64 start, u64 size, u64 end,
|
||||
u64 userspace_addr, int perm)
|
||||
{
|
||||
struct vhost_umem_node *tmp, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
|
||||
struct vhost_umem_node *tmp, *node;
|
||||
|
||||
if (!size)
|
||||
return -EFAULT;
|
||||
|
||||
node = kmalloc(sizeof(*node), GFP_ATOMIC);
|
||||
if (!node)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1263,6 +1263,7 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
|
|||
}
|
||||
|
||||
struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
|
||||
void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
|
||||
void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
|
||||
|
||||
#define CIFS_CACHE_READ_FLG 1
|
||||
|
@ -1763,6 +1764,7 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
|
|||
#endif /* CONFIG_CIFS_ACL */
|
||||
|
||||
void cifs_oplock_break(struct work_struct *work);
|
||||
void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
|
||||
|
||||
extern const struct slow_work_ops cifs_oplock_break_ops;
|
||||
extern struct workqueue_struct *cifsiod_wq;
|
||||
|
|
|
@ -358,12 +358,30 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
|
|||
return cifs_file;
|
||||
}
|
||||
|
||||
/*
|
||||
* Release a reference on the file private data. This may involve closing
|
||||
* the filehandle out on the server. Must be called without holding
|
||||
* tcon->open_file_lock and cifs_file->file_info_lock.
|
||||
/**
|
||||
* cifsFileInfo_put - release a reference of file priv data
|
||||
*
|
||||
* Always potentially wait for oplock handler. See _cifsFileInfo_put().
|
||||
*/
|
||||
void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
|
||||
{
|
||||
_cifsFileInfo_put(cifs_file, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* _cifsFileInfo_put - release a reference of file priv data
|
||||
*
|
||||
* This may involve closing the filehandle @cifs_file out on the
|
||||
* server. Must be called without holding tcon->open_file_lock and
|
||||
* cifs_file->file_info_lock.
|
||||
*
|
||||
* If @wait_for_oplock_handler is true and we are releasing the last
|
||||
* reference, wait for any running oplock break handler of the file
|
||||
* and cancel any pending one. If calling this function from the
|
||||
* oplock break handler, you need to pass false.
|
||||
*
|
||||
*/
|
||||
void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
|
||||
{
|
||||
struct inode *inode = d_inode(cifs_file->dentry);
|
||||
struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
|
||||
|
@ -411,7 +429,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
|
|||
|
||||
spin_unlock(&tcon->open_file_lock);
|
||||
|
||||
oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
|
||||
oplock_break_cancelled = wait_oplock_handler ?
|
||||
cancel_work_sync(&cifs_file->oplock_break) : false;
|
||||
|
||||
if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
|
||||
struct TCP_Server_Info *server = tcon->ses->server;
|
||||
|
@ -4170,6 +4189,7 @@ void cifs_oplock_break(struct work_struct *work)
|
|||
cinode);
|
||||
cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
|
||||
}
|
||||
_cifsFileInfo_put(cfile, false /* do not wait for ourself */);
|
||||
cifs_done_oplock_break(cinode);
|
||||
}
|
||||
|
||||
|
|
|
@ -490,8 +490,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
|
|||
CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
|
||||
&pCifsInode->flags);
|
||||
|
||||
queue_work(cifsoplockd_wq,
|
||||
&netfile->oplock_break);
|
||||
cifs_queue_oplock_break(netfile);
|
||||
netfile->oplock_break_cancelled = false;
|
||||
|
||||
spin_unlock(&tcon->open_file_lock);
|
||||
|
@ -588,6 +587,28 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
|
|||
spin_unlock(&cinode->writers_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* cifs_queue_oplock_break - queue the oplock break handler for cfile
|
||||
*
|
||||
* This function is called from the demultiplex thread when it
|
||||
* receives an oplock break for @cfile.
|
||||
*
|
||||
* Assumes the tcon->open_file_lock is held.
|
||||
* Assumes cfile->file_info_lock is NOT held.
|
||||
*/
|
||||
void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
|
||||
{
|
||||
/*
|
||||
* Bump the handle refcount now while we hold the
|
||||
* open_file_lock to enforce the validity of it for the oplock
|
||||
* break handler. The matching put is done at the end of the
|
||||
* handler.
|
||||
*/
|
||||
cifsFileInfo_get(cfile);
|
||||
|
||||
queue_work(cifsoplockd_wq, &cfile->oplock_break);
|
||||
}
|
||||
|
||||
void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
|
||||
{
|
||||
clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
|
||||
|
|
|
@ -555,7 +555,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
|
|||
clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
|
||||
&cinode->flags);
|
||||
|
||||
queue_work(cifsoplockd_wq, &cfile->oplock_break);
|
||||
cifs_queue_oplock_break(cfile);
|
||||
kfree(lw);
|
||||
return true;
|
||||
}
|
||||
|
@ -719,8 +719,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
|
|||
CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
|
||||
&cinode->flags);
|
||||
spin_unlock(&cfile->file_info_lock);
|
||||
queue_work(cifsoplockd_wq,
|
||||
&cfile->oplock_break);
|
||||
|
||||
cifs_queue_oplock_break(cfile);
|
||||
|
||||
spin_unlock(&tcon->open_file_lock);
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
|
|
|
@ -1906,6 +1906,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
|
|||
|
||||
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov,
|
||||
&resp_buftype);
|
||||
if (!rc)
|
||||
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
|
||||
if (!rc || !err_iov.iov_base) {
|
||||
rc = -ENOENT;
|
||||
goto free_path;
|
||||
|
|
|
@ -3273,8 +3273,6 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
|
|||
rqst.rq_nvec = 1;
|
||||
|
||||
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
|
||||
cifs_small_buf_release(req);
|
||||
|
||||
rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
|
||||
|
||||
if (rc) {
|
||||
|
@ -3293,6 +3291,8 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
|
|||
io_parms->tcon->tid, ses->Suid,
|
||||
io_parms->offset, io_parms->length);
|
||||
|
||||
cifs_small_buf_release(req);
|
||||
|
||||
*nbytes = le32_to_cpu(rsp->DataLength);
|
||||
if ((*nbytes > CIFS_MAX_MSGSIZE) ||
|
||||
(*nbytes > io_parms->length)) {
|
||||
|
@ -3591,7 +3591,6 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
|
|||
|
||||
rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
|
||||
&resp_buftype, flags, &rsp_iov);
|
||||
cifs_small_buf_release(req);
|
||||
rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
|
||||
|
||||
if (rc) {
|
||||
|
@ -3609,6 +3608,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
|
|||
io_parms->offset, *nbytes);
|
||||
}
|
||||
|
||||
cifs_small_buf_release(req);
|
||||
free_rsp_buf(resp_buftype, rsp);
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -1200,6 +1200,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
|||
count = -EINTR;
|
||||
goto out_mm;
|
||||
}
|
||||
/*
|
||||
* Avoid to modify vma->vm_flags
|
||||
* without locked ops while the
|
||||
* coredump reads the vm_flags.
|
||||
*/
|
||||
if (!mmget_still_valid(mm)) {
|
||||
/*
|
||||
* Silently return "count"
|
||||
* like if get_task_mm()
|
||||
* failed. FIXME: should this
|
||||
* function have returned
|
||||
* -ESRCH if get_task_mm()
|
||||
* failed like if
|
||||
* get_proc_task() fails?
|
||||
*/
|
||||
up_write(&mm->mmap_sem);
|
||||
goto out_mm;
|
||||
}
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
vma->vm_flags &= ~VM_SOFTDIRTY;
|
||||
vma_set_page_prot(vma);
|
||||
|
|
|
@ -630,6 +630,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
|
|||
|
||||
/* the various vma->vm_userfaultfd_ctx still points to it */
|
||||
down_write(&mm->mmap_sem);
|
||||
/* no task can run (and in turn coredump) yet */
|
||||
VM_WARN_ON(!mmget_still_valid(mm));
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next)
|
||||
if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
|
||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||
|
@ -884,6 +886,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
|
|||
* taking the mmap_sem for writing.
|
||||
*/
|
||||
down_write(&mm->mmap_sem);
|
||||
if (!mmget_still_valid(mm))
|
||||
goto skip_mm;
|
||||
prev = NULL;
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
cond_resched();
|
||||
|
@ -907,6 +911,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
|
|||
vma->vm_flags = new_flags;
|
||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||
}
|
||||
skip_mm:
|
||||
up_write(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
wakeup:
|
||||
|
@ -1335,6 +1340,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
|
|||
goto out;
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
if (!mmget_still_valid(mm))
|
||||
goto out_unlock;
|
||||
vma = find_vma_prev(mm, start, &prev);
|
||||
if (!vma)
|
||||
goto out_unlock;
|
||||
|
@ -1523,6 +1530,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
|||
goto out;
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
if (!mmget_still_valid(mm))
|
||||
goto out_unlock;
|
||||
vma = find_vma_prev(mm, start, &prev);
|
||||
if (!vma)
|
||||
goto out_unlock;
|
||||
|
|
|
@ -173,6 +173,7 @@ struct kretprobe_instance {
|
|||
struct kretprobe *rp;
|
||||
kprobe_opcode_t *ret_addr;
|
||||
struct task_struct *task;
|
||||
void *fp;
|
||||
char data[0];
|
||||
};
|
||||
|
||||
|
|
|
@ -1456,6 +1456,7 @@ struct net_device_ops {
|
|||
* @IFF_FAILOVER: device is a failover master device
|
||||
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
|
||||
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
|
||||
* @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
|
||||
*/
|
||||
enum netdev_priv_flags {
|
||||
IFF_802_1Q_VLAN = 1<<0,
|
||||
|
@ -1488,6 +1489,7 @@ enum netdev_priv_flags {
|
|||
IFF_FAILOVER = 1<<27,
|
||||
IFF_FAILOVER_SLAVE = 1<<28,
|
||||
IFF_L3MDEV_RX_HANDLER = 1<<29,
|
||||
IFF_LIVE_RENAME_OK = 1<<30,
|
||||
};
|
||||
|
||||
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
|
||||
|
@ -1519,6 +1521,7 @@ enum netdev_priv_flags {
|
|||
#define IFF_FAILOVER IFF_FAILOVER
|
||||
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
|
||||
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
|
||||
#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
|
||||
|
||||
/**
|
||||
* struct net_device - The DEVICE structure.
|
||||
|
|
|
@ -49,6 +49,27 @@ static inline void mmdrop(struct mm_struct *mm)
|
|||
__mmdrop(mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* This has to be called after a get_task_mm()/mmget_not_zero()
|
||||
* followed by taking the mmap_sem for writing before modifying the
|
||||
* vmas or anything the coredump pretends not to change from under it.
|
||||
*
|
||||
* NOTE: find_extend_vma() called from GUP context is the only place
|
||||
* that can modify the "mm" (notably the vm_start/end) under mmap_sem
|
||||
* for reading and outside the context of the process, so it is also
|
||||
* the only case that holds the mmap_sem for reading that must call
|
||||
* this function. Generally if the mmap_sem is hold for reading
|
||||
* there's no need of this check after get_task_mm()/mmget_not_zero().
|
||||
*
|
||||
* This function can be obsoleted and the check can be removed, after
|
||||
* the coredump code will hold the mmap_sem for writing before
|
||||
* invoking the ->core_dump methods.
|
||||
*/
|
||||
static inline bool mmget_still_valid(struct mm_struct *mm)
|
||||
{
|
||||
return likely(!mm->core_state);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmget() - Pin the address space associated with a &struct mm_struct.
|
||||
* @mm: The address space to pin.
|
||||
|
|
|
@ -77,8 +77,8 @@ struct inet_frag_queue {
|
|||
struct timer_list timer;
|
||||
spinlock_t lock;
|
||||
refcount_t refcnt;
|
||||
struct sk_buff *fragments; /* Used in IPv6. */
|
||||
struct rb_root rb_fragments; /* Used in IPv4. */
|
||||
struct sk_buff *fragments; /* used in 6lopwpan IPv6. */
|
||||
struct rb_root rb_fragments; /* Used in IPv4/IPv6. */
|
||||
struct sk_buff *fragments_tail;
|
||||
struct sk_buff *last_run_head;
|
||||
ktime_t stamp;
|
||||
|
@ -153,4 +153,16 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
|
|||
|
||||
extern const u8 ip_frag_ecn_table[16];
|
||||
|
||||
/* Return values of inet_frag_queue_insert() */
|
||||
#define IPFRAG_OK 0
|
||||
#define IPFRAG_DUP 1
|
||||
#define IPFRAG_OVERLAP 2
|
||||
int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
|
||||
int offset, int end);
|
||||
void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
|
||||
struct sk_buff *parent);
|
||||
void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
|
||||
void *reasm_data);
|
||||
struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -82,8 +82,15 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
|
|||
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
|
||||
|
||||
/* Don't send error if the first segment did not arrive. */
|
||||
head = fq->q.fragments;
|
||||
if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
|
||||
if (!(fq->q.flags & INET_FRAG_FIRST_IN))
|
||||
goto out;
|
||||
|
||||
/* sk_buff::dev and sk_buff::rbnode are unionized. So we
|
||||
* pull the head out of the tree in order to be able to
|
||||
* deal with head->dev.
|
||||
*/
|
||||
head = inet_frag_pull_head(&fq->q);
|
||||
if (!head)
|
||||
goto out;
|
||||
|
||||
head->dev = dev;
|
||||
|
|
|
@ -317,7 +317,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
|
|||
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
|
||||
{
|
||||
#ifdef CONFIG_SOCK_VALIDATE_XMIT
|
||||
return sk_fullsock(sk) &
|
||||
return sk_fullsock(sk) &&
|
||||
(smp_load_acquire(&sk->sk_validate_xmit_skb) ==
|
||||
&tls_validate_xmit_skb);
|
||||
#else
|
||||
|
|
|
@ -703,7 +703,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
|
|||
static int reuse_unused_kprobe(struct kprobe *ap)
|
||||
{
|
||||
struct optimized_kprobe *op;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!kprobe_unused(ap));
|
||||
/*
|
||||
|
@ -715,9 +714,8 @@ static int reuse_unused_kprobe(struct kprobe *ap)
|
|||
/* Enable the probe again */
|
||||
ap->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
/* Optimize it again (remove from op->list) */
|
||||
ret = kprobe_optready(ap);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!kprobe_optready(ap))
|
||||
return -EINVAL;
|
||||
|
||||
optimize_kprobe(ap);
|
||||
return 0;
|
||||
|
|
|
@ -3567,9 +3567,6 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
|
|||
unsigned int depth;
|
||||
int i;
|
||||
|
||||
if (unlikely(!debug_locks))
|
||||
return 0;
|
||||
|
||||
depth = curr->lockdep_depth;
|
||||
/*
|
||||
* This function is about (re)setting the class of a held lock,
|
||||
|
|
|
@ -4894,12 +4894,15 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
|
|||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
extern const u64 max_cfs_quota_period;
|
||||
|
||||
static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
|
||||
{
|
||||
struct cfs_bandwidth *cfs_b =
|
||||
container_of(timer, struct cfs_bandwidth, period_timer);
|
||||
int overrun;
|
||||
int idle = 0;
|
||||
int count = 0;
|
||||
|
||||
raw_spin_lock(&cfs_b->lock);
|
||||
for (;;) {
|
||||
|
@ -4907,6 +4910,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
|
|||
if (!overrun)
|
||||
break;
|
||||
|
||||
if (++count > 3) {
|
||||
u64 new, old = ktime_to_ns(cfs_b->period);
|
||||
|
||||
new = (old * 147) / 128; /* ~115% */
|
||||
new = min(new, max_cfs_quota_period);
|
||||
|
||||
cfs_b->period = ns_to_ktime(new);
|
||||
|
||||
/* since max is 1s, this is limited to 1e9^2, which fits in u64 */
|
||||
cfs_b->quota *= new;
|
||||
cfs_b->quota = div64_u64(cfs_b->quota, old);
|
||||
|
||||
pr_warn_ratelimited(
|
||||
"cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
|
||||
smp_processor_id(),
|
||||
div_u64(new, NSEC_PER_USEC),
|
||||
div_u64(cfs_b->quota, NSEC_PER_USEC));
|
||||
|
||||
/* reset count so we don't come right back in here */
|
||||
count = 0;
|
||||
}
|
||||
|
||||
idle = do_sched_cfs_period_timer(cfs_b, overrun);
|
||||
}
|
||||
if (idle)
|
||||
|
|
|
@ -126,6 +126,7 @@ static int zero;
|
|||
static int __maybe_unused one = 1;
|
||||
static int __maybe_unused two = 2;
|
||||
static int __maybe_unused four = 4;
|
||||
static unsigned long zero_ul;
|
||||
static unsigned long one_ul = 1;
|
||||
static unsigned long long_max = LONG_MAX;
|
||||
static int one_hundred = 100;
|
||||
|
@ -1719,7 +1720,7 @@ static struct ctl_table fs_table[] = {
|
|||
.maxlen = sizeof(files_stat.max_files),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_doulongvec_minmax,
|
||||
.extra1 = &zero,
|
||||
.extra1 = &zero_ul,
|
||||
.extra2 = &long_max,
|
||||
},
|
||||
{
|
||||
|
|
|
@ -275,7 +275,7 @@ static u64 notrace suspended_sched_clock_read(void)
|
|||
return cd.read_data[seq & 1].epoch_cyc;
|
||||
}
|
||||
|
||||
static int sched_clock_suspend(void)
|
||||
int sched_clock_suspend(void)
|
||||
{
|
||||
struct clock_read_data *rd = &cd.read_data[0];
|
||||
|
||||
|
@ -286,7 +286,7 @@ static int sched_clock_suspend(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sched_clock_resume(void)
|
||||
void sched_clock_resume(void)
|
||||
{
|
||||
struct clock_read_data *rd = &cd.read_data[0];
|
||||
|
||||
|
|
|
@ -491,6 +491,7 @@ void tick_freeze(void)
|
|||
trace_suspend_resume(TPS("timekeeping_freeze"),
|
||||
smp_processor_id(), true);
|
||||
system_state = SYSTEM_SUSPEND;
|
||||
sched_clock_suspend();
|
||||
timekeeping_suspend();
|
||||
} else {
|
||||
tick_suspend_local();
|
||||
|
@ -514,6 +515,7 @@ void tick_unfreeze(void)
|
|||
|
||||
if (tick_freeze_depth == num_online_cpus()) {
|
||||
timekeeping_resume();
|
||||
sched_clock_resume();
|
||||
system_state = SYSTEM_RUNNING;
|
||||
trace_suspend_resume(TPS("timekeeping_freeze"),
|
||||
smp_processor_id(), false);
|
||||
|
|
|
@ -14,6 +14,13 @@ extern u64 timekeeping_max_deferment(void);
|
|||
extern void timekeeping_warp_clock(void);
|
||||
extern int timekeeping_suspend(void);
|
||||
extern void timekeeping_resume(void);
|
||||
#ifdef CONFIG_GENERIC_SCHED_CLOCK
|
||||
extern int sched_clock_suspend(void);
|
||||
extern void sched_clock_resume(void);
|
||||
#else
|
||||
static inline int sched_clock_suspend(void) { return 0; }
|
||||
static inline void sched_clock_resume(void) { }
|
||||
#endif
|
||||
|
||||
extern void do_timer(unsigned long ticks);
|
||||
extern void update_wall_time(void);
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
|
@ -6250,7 +6251,7 @@ void ftrace_reset_array_ops(struct trace_array *tr)
|
|||
tr->ops->func = ftrace_stub;
|
||||
}
|
||||
|
||||
static inline void
|
||||
static nokprobe_inline void
|
||||
__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *ignored, struct pt_regs *regs)
|
||||
{
|
||||
|
@ -6310,11 +6311,13 @@ static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
|
|||
{
|
||||
__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
|
||||
}
|
||||
NOKPROBE_SYMBOL(ftrace_ops_list_func);
|
||||
#else
|
||||
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
|
||||
{
|
||||
__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
|
||||
}
|
||||
NOKPROBE_SYMBOL(ftrace_ops_no_ops);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -6341,6 +6344,7 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
|
|||
preempt_enable_notrace();
|
||||
trace_clear_recursion(bit);
|
||||
}
|
||||
NOKPROBE_SYMBOL(ftrace_ops_assist_func);
|
||||
|
||||
/**
|
||||
* ftrace_ops_get_func - get the function a trampoline should call
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include <linux/moduleparam.h>
|
||||
#include <linux/pkeys.h>
|
||||
#include <linux/oom.h>
|
||||
#include <linux/sched/mm.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
@ -2500,7 +2501,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
|
|||
vma = find_vma_prev(mm, addr, &prev);
|
||||
if (vma && (vma->vm_start <= addr))
|
||||
return vma;
|
||||
if (!prev || expand_stack(prev, addr))
|
||||
/* don't alter vm_end if the coredump is running */
|
||||
if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
|
||||
return NULL;
|
||||
if (prev->vm_flags & VM_LOCKED)
|
||||
populate_vma_page_range(prev, addr, prev->vm_end, NULL);
|
||||
|
@ -2526,6 +2528,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
|
|||
return vma;
|
||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
||||
return NULL;
|
||||
/* don't alter vm_start if the coredump is running */
|
||||
if (!mmget_still_valid(mm))
|
||||
return NULL;
|
||||
start = vma->vm_start;
|
||||
if (expand_stack(vma, addr))
|
||||
return NULL;
|
||||
|
|
|
@ -2529,8 +2529,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
|
|||
ai->groups[group].base_offset = areas[group] - base;
|
||||
}
|
||||
|
||||
pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
|
||||
PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
|
||||
pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
|
||||
PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
|
||||
ai->dyn_size, ai->unit_size);
|
||||
|
||||
rc = pcpu_setup_first_chunk(ai, base);
|
||||
|
@ -2651,8 +2651,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
|
|||
}
|
||||
|
||||
/* we're ready, commit */
|
||||
pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
|
||||
unit_pages, psize_str, vm.addr, ai->static_size,
|
||||
pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
|
||||
unit_pages, psize_str, ai->static_size,
|
||||
ai->reserved_size, ai->dyn_size);
|
||||
|
||||
rc = pcpu_setup_first_chunk(ai, vm.addr);
|
||||
|
|
|
@ -1273,13 +1273,8 @@ const char * const vmstat_text[] = {
|
|||
#endif
|
||||
#endif /* CONFIG_MEMORY_BALLOON */
|
||||
#ifdef CONFIG_DEBUG_TLBFLUSH
|
||||
#ifdef CONFIG_SMP
|
||||
"nr_tlb_remote_flush",
|
||||
"nr_tlb_remote_flush_received",
|
||||
#else
|
||||
"", /* nr_tlb_remote_flush */
|
||||
"", /* nr_tlb_remote_flush_received */
|
||||
#endif /* CONFIG_SMP */
|
||||
"nr_tlb_local_flush_all",
|
||||
"nr_tlb_local_flush_one",
|
||||
#endif /* CONFIG_DEBUG_TLBFLUSH */
|
||||
|
|
|
@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
|
|||
|
||||
static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
|
||||
{
|
||||
if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
|
||||
if (arg < 0 || arg >= MAX_LEC_ITF)
|
||||
return -EINVAL;
|
||||
arg = array_index_nospec(arg, MAX_LEC_ITF);
|
||||
if (!dev_lec[arg])
|
||||
return -EINVAL;
|
||||
vcc->proto_data = dev_lec[arg];
|
||||
return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
|
||||
|
@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
|
|||
i = arg;
|
||||
if (arg >= MAX_LEC_ITF)
|
||||
return -EINVAL;
|
||||
i = array_index_nospec(arg, MAX_LEC_ITF);
|
||||
if (!dev_lec[i]) {
|
||||
int size;
|
||||
|
||||
|
|
|
@ -195,13 +195,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
|
|||
/* note: already called with rcu_read_lock */
|
||||
static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct net_bridge_port *p = br_port_get_rcu(skb->dev);
|
||||
|
||||
__br_handle_local_finish(skb);
|
||||
|
||||
BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
|
||||
br_pass_frame_up(skb);
|
||||
return 0;
|
||||
/* return 1 to signal the okfn() was called so it's ok to use the skb */
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -278,10 +275,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
|
|||
goto forward;
|
||||
}
|
||||
|
||||
/* Deliver packet to local host only */
|
||||
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
|
||||
NULL, skb, skb->dev, NULL, br_handle_local_finish);
|
||||
return RX_HANDLER_CONSUMED;
|
||||
/* The else clause should be hit when nf_hook():
|
||||
* - returns < 0 (drop/error)
|
||||
* - returns = 0 (stolen/nf_queue)
|
||||
* Thus return 1 from the okfn() to signal the skb is ok to pass
|
||||
*/
|
||||
if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
|
||||
dev_net(skb->dev), NULL, skb, skb->dev, NULL,
|
||||
br_handle_local_finish) == 1) {
|
||||
return RX_HANDLER_PASS;
|
||||
} else {
|
||||
return RX_HANDLER_CONSUMED;
|
||||
}
|
||||
}
|
||||
|
||||
forward:
|
||||
|
|
|
@ -2152,7 +2152,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
|
|||
|
||||
__br_multicast_open(br, query);
|
||||
|
||||
list_for_each_entry(port, &br->port_list, list) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(port, &br->port_list, list) {
|
||||
if (port->state == BR_STATE_DISABLED ||
|
||||
port->state == BR_STATE_BLOCKING)
|
||||
continue;
|
||||
|
@ -2164,6 +2165,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
|
|||
br_multicast_enable(&port->ip6_own_query);
|
||||
#endif
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
|
||||
|
|
|
@ -1180,7 +1180,21 @@ int dev_change_name(struct net_device *dev, const char *newname)
|
|||
BUG_ON(!dev_net(dev));
|
||||
|
||||
net = dev_net(dev);
|
||||
if (dev->flags & IFF_UP)
|
||||
|
||||
/* Some auto-enslaved devices e.g. failover slaves are
|
||||
* special, as userspace might rename the device after
|
||||
* the interface had been brought up and running since
|
||||
* the point kernel initiated auto-enslavement. Allow
|
||||
* live name change even when these slave devices are
|
||||
* up and running.
|
||||
*
|
||||
* Typically, users of these auto-enslaving devices
|
||||
* don't actually care about slave name change, as
|
||||
* they are supposed to operate on master interface
|
||||
* directly.
|
||||
*/
|
||||
if (dev->flags & IFF_UP &&
|
||||
likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
|
||||
return -EBUSY;
|
||||
|
||||
write_seqcount_begin(&devnet_rename_seq);
|
||||
|
|
|
@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev)
|
|||
goto err_upper_link;
|
||||
}
|
||||
|
||||
slave_dev->priv_flags |= IFF_FAILOVER_SLAVE;
|
||||
slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
|
||||
|
||||
if (fops && fops->slave_register &&
|
||||
!fops->slave_register(slave_dev, failover_dev))
|
||||
return NOTIFY_OK;
|
||||
|
||||
netdev_upper_dev_unlink(slave_dev, failover_dev);
|
||||
slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
|
||||
slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
|
||||
err_upper_link:
|
||||
netdev_rx_handler_unregister(slave_dev);
|
||||
done:
|
||||
|
@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev)
|
|||
|
||||
netdev_rx_handler_unregister(slave_dev);
|
||||
netdev_upper_dev_unlink(slave_dev, failover_dev);
|
||||
slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE;
|
||||
slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
|
||||
|
||||
if (fops && fops->slave_unregister &&
|
||||
!fops->slave_unregister(slave_dev, failover_dev))
|
||||
|
|
|
@ -5071,7 +5071,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
|
|||
|
||||
static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
|
||||
{
|
||||
int mac_len;
|
||||
int mac_len, meta_len;
|
||||
void *meta;
|
||||
|
||||
if (skb_cow(skb, skb_headroom(skb)) < 0) {
|
||||
kfree_skb(skb);
|
||||
|
@ -5083,6 +5084,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
|
|||
memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
|
||||
mac_len - VLAN_HLEN - ETH_TLEN);
|
||||
}
|
||||
|
||||
meta_len = skb_metadata_len(skb);
|
||||
if (meta_len) {
|
||||
meta = skb_metadata_end(skb) - meta_len;
|
||||
memmove(meta + VLAN_HLEN, meta, meta_len);
|
||||
}
|
||||
|
||||
skb->mac_header += VLAN_HLEN;
|
||||
return skb;
|
||||
}
|
||||
|
|
|
@ -120,6 +120,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
|
|||
struct guehdr *guehdr;
|
||||
void *data;
|
||||
u16 doffset = 0;
|
||||
u8 proto_ctype;
|
||||
|
||||
if (!fou)
|
||||
return 1;
|
||||
|
@ -211,13 +212,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
|
|||
if (unlikely(guehdr->control))
|
||||
return gue_control_message(skb, guehdr);
|
||||
|
||||
proto_ctype = guehdr->proto_ctype;
|
||||
__skb_pull(skb, sizeof(struct udphdr) + hdrlen);
|
||||
skb_reset_transport_header(skb);
|
||||
|
||||
if (iptunnel_pull_offloads(skb))
|
||||
goto drop;
|
||||
|
||||
return -guehdr->proto_ctype;
|
||||
return -proto_ctype;
|
||||
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -25,6 +25,62 @@
|
|||
#include <net/sock.h>
|
||||
#include <net/inet_frag.h>
|
||||
#include <net/inet_ecn.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/ipv6.h>
|
||||
|
||||
/* Use skb->cb to track consecutive/adjacent fragments coming at
|
||||
* the end of the queue. Nodes in the rb-tree queue will
|
||||
* contain "runs" of one or more adjacent fragments.
|
||||
*
|
||||
* Invariants:
|
||||
* - next_frag is NULL at the tail of a "run";
|
||||
* - the head of a "run" has the sum of all fragment lengths in frag_run_len.
|
||||
*/
|
||||
struct ipfrag_skb_cb {
|
||||
union {
|
||||
struct inet_skb_parm h4;
|
||||
struct inet6_skb_parm h6;
|
||||
};
|
||||
struct sk_buff *next_frag;
|
||||
int frag_run_len;
|
||||
};
|
||||
|
||||
#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
|
||||
|
||||
static void fragcb_clear(struct sk_buff *skb)
|
||||
{
|
||||
RB_CLEAR_NODE(&skb->rbnode);
|
||||
FRAG_CB(skb)->next_frag = NULL;
|
||||
FRAG_CB(skb)->frag_run_len = skb->len;
|
||||
}
|
||||
|
||||
/* Append skb to the last "run". */
|
||||
static void fragrun_append_to_last(struct inet_frag_queue *q,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
fragcb_clear(skb);
|
||||
|
||||
FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
|
||||
FRAG_CB(q->fragments_tail)->next_frag = skb;
|
||||
q->fragments_tail = skb;
|
||||
}
|
||||
|
||||
/* Create a new "run" with the skb. */
|
||||
static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
|
||||
fragcb_clear(skb);
|
||||
|
||||
if (q->last_run_head)
|
||||
rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
|
||||
&q->last_run_head->rbnode.rb_right);
|
||||
else
|
||||
rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
|
||||
rb_insert_color(&skb->rbnode, &q->rb_fragments);
|
||||
|
||||
q->fragments_tail = skb;
|
||||
q->last_run_head = skb;
|
||||
}
|
||||
|
||||
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
|
||||
* Value : 0xff if frame should be dropped.
|
||||
|
@ -123,6 +179,28 @@ static void inet_frag_destroy_rcu(struct rcu_head *head)
|
|||
kmem_cache_free(f->frags_cachep, q);
|
||||
}
|
||||
|
||||
unsigned int inet_frag_rbtree_purge(struct rb_root *root)
|
||||
{
|
||||
struct rb_node *p = rb_first(root);
|
||||
unsigned int sum = 0;
|
||||
|
||||
while (p) {
|
||||
struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
|
||||
|
||||
p = rb_next(p);
|
||||
rb_erase(&skb->rbnode, root);
|
||||
while (skb) {
|
||||
struct sk_buff *next = FRAG_CB(skb)->next_frag;
|
||||
|
||||
sum += skb->truesize;
|
||||
kfree_skb(skb);
|
||||
skb = next;
|
||||
}
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
EXPORT_SYMBOL(inet_frag_rbtree_purge);
|
||||
|
||||
void inet_frag_destroy(struct inet_frag_queue *q)
|
||||
{
|
||||
struct sk_buff *fp;
|
||||
|
@ -224,3 +302,218 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
|
|||
return fq;
|
||||
}
|
||||
EXPORT_SYMBOL(inet_frag_find);
|
||||
|
||||
int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
|
||||
int offset, int end)
|
||||
{
|
||||
struct sk_buff *last = q->fragments_tail;
|
||||
|
||||
/* RFC5722, Section 4, amended by Errata ID : 3089
|
||||
* When reassembling an IPv6 datagram, if
|
||||
* one or more its constituent fragments is determined to be an
|
||||
* overlapping fragment, the entire datagram (and any constituent
|
||||
* fragments) MUST be silently discarded.
|
||||
*
|
||||
* Duplicates, however, should be ignored (i.e. skb dropped, but the
|
||||
* queue/fragments kept for later reassembly).
|
||||
*/
|
||||
if (!last)
|
||||
fragrun_create(q, skb); /* First fragment. */
|
||||
else if (last->ip_defrag_offset + last->len < end) {
|
||||
/* This is the common case: skb goes to the end. */
|
||||
/* Detect and discard overlaps. */
|
||||
if (offset < last->ip_defrag_offset + last->len)
|
||||
return IPFRAG_OVERLAP;
|
||||
if (offset == last->ip_defrag_offset + last->len)
|
||||
fragrun_append_to_last(q, skb);
|
||||
else
|
||||
fragrun_create(q, skb);
|
||||
} else {
|
||||
/* Binary search. Note that skb can become the first fragment,
|
||||
* but not the last (covered above).
|
||||
*/
|
||||
struct rb_node **rbn, *parent;
|
||||
|
||||
rbn = &q->rb_fragments.rb_node;
|
||||
do {
|
||||
struct sk_buff *curr;
|
||||
int curr_run_end;
|
||||
|
||||
parent = *rbn;
|
||||
curr = rb_to_skb(parent);
|
||||
curr_run_end = curr->ip_defrag_offset +
|
||||
FRAG_CB(curr)->frag_run_len;
|
||||
if (end <= curr->ip_defrag_offset)
|
||||
rbn = &parent->rb_left;
|
||||
else if (offset >= curr_run_end)
|
||||
rbn = &parent->rb_right;
|
||||
else if (offset >= curr->ip_defrag_offset &&
|
||||
end <= curr_run_end)
|
||||
return IPFRAG_DUP;
|
||||
else
|
||||
return IPFRAG_OVERLAP;
|
||||
} while (*rbn);
|
||||
/* Here we have parent properly set, and rbn pointing to
|
||||
* one of its NULL left/right children. Insert skb.
|
||||
*/
|
||||
fragcb_clear(skb);
|
||||
rb_link_node(&skb->rbnode, parent, rbn);
|
||||
rb_insert_color(&skb->rbnode, &q->rb_fragments);
|
||||
}
|
||||
|
||||
skb->ip_defrag_offset = offset;
|
||||
|
||||
return IPFRAG_OK;
|
||||
}
|
||||
EXPORT_SYMBOL(inet_frag_queue_insert);
|
||||
|
||||
void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
|
||||
struct sk_buff *parent)
|
||||
{
|
||||
struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
|
||||
struct sk_buff **nextp;
|
||||
int delta;
|
||||
|
||||
if (head != skb) {
|
||||
fp = skb_clone(skb, GFP_ATOMIC);
|
||||
if (!fp)
|
||||
return NULL;
|
||||
FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
|
||||
if (RB_EMPTY_NODE(&skb->rbnode))
|
||||
FRAG_CB(parent)->next_frag = fp;
|
||||
else
|
||||
rb_replace_node(&skb->rbnode, &fp->rbnode,
|
||||
&q->rb_fragments);
|
||||
if (q->fragments_tail == skb)
|
||||
q->fragments_tail = fp;
|
||||
skb_morph(skb, head);
|
||||
FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
|
||||
rb_replace_node(&head->rbnode, &skb->rbnode,
|
||||
&q->rb_fragments);
|
||||
consume_skb(head);
|
||||
head = skb;
|
||||
}
|
||||
WARN_ON(head->ip_defrag_offset != 0);
|
||||
|
||||
delta = -head->truesize;
|
||||
|
||||
/* Head of list must not be cloned. */
|
||||
if (skb_unclone(head, GFP_ATOMIC))
|
||||
return NULL;
|
||||
|
||||
delta += head->truesize;
|
||||
if (delta)
|
||||
add_frag_mem_limit(q->net, delta);
|
||||
|
||||
/* If the first fragment is fragmented itself, we split
|
||||
* it to two chunks: the first with data and paged part
|
||||
* and the second, holding only fragments.
|
||||
*/
|
||||
if (skb_has_frag_list(head)) {
|
||||
struct sk_buff *clone;
|
||||
int i, plen = 0;
|
||||
|
||||
clone = alloc_skb(0, GFP_ATOMIC);
|
||||
if (!clone)
|
||||
return NULL;
|
||||
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
|
||||
skb_frag_list_init(head);
|
||||
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
|
||||
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
|
||||
clone->data_len = head->data_len - plen;
|
||||
clone->len = clone->data_len;
|
||||
head->truesize += clone->truesize;
|
||||
clone->csum = 0;
|
||||
clone->ip_summed = head->ip_summed;
|
||||
add_frag_mem_limit(q->net, clone->truesize);
|
||||
skb_shinfo(head)->frag_list = clone;
|
||||
nextp = &clone->next;
|
||||
} else {
|
||||
nextp = &skb_shinfo(head)->frag_list;
|
||||
}
|
||||
|
||||
return nextp;
|
||||
}
|
||||
EXPORT_SYMBOL(inet_frag_reasm_prepare);
|
||||
|
||||
void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
|
||||
void *reasm_data)
|
||||
{
|
||||
struct sk_buff **nextp = (struct sk_buff **)reasm_data;
|
||||
struct rb_node *rbn;
|
||||
struct sk_buff *fp;
|
||||
|
||||
skb_push(head, head->data - skb_network_header(head));
|
||||
|
||||
/* Traverse the tree in order, to build frag_list. */
|
||||
fp = FRAG_CB(head)->next_frag;
|
||||
rbn = rb_next(&head->rbnode);
|
||||
rb_erase(&head->rbnode, &q->rb_fragments);
|
||||
while (rbn || fp) {
|
||||
/* fp points to the next sk_buff in the current run;
|
||||
* rbn points to the next run.
|
||||
*/
|
||||
/* Go through the current run. */
|
||||
while (fp) {
|
||||
*nextp = fp;
|
||||
nextp = &fp->next;
|
||||
fp->prev = NULL;
|
||||
memset(&fp->rbnode, 0, sizeof(fp->rbnode));
|
||||
fp->sk = NULL;
|
||||
head->data_len += fp->len;
|
||||
head->len += fp->len;
|
||||
if (head->ip_summed != fp->ip_summed)
|
||||
head->ip_summed = CHECKSUM_NONE;
|
||||
else if (head->ip_summed == CHECKSUM_COMPLETE)
|
||||
head->csum = csum_add(head->csum, fp->csum);
|
||||
head->truesize += fp->truesize;
|
||||
fp = FRAG_CB(fp)->next_frag;
|
||||
}
|
||||
/* Move to the next run. */
|
||||
if (rbn) {
|
||||
struct rb_node *rbnext = rb_next(rbn);
|
||||
|
||||
fp = rb_to_skb(rbn);
|
||||
rb_erase(rbn, &q->rb_fragments);
|
||||
rbn = rbnext;
|
||||
}
|
||||
}
|
||||
sub_frag_mem_limit(q->net, head->truesize);
|
||||
|
||||
*nextp = NULL;
|
||||
skb_mark_not_on_list(head);
|
||||
head->prev = NULL;
|
||||
head->tstamp = q->stamp;
|
||||
}
|
||||
EXPORT_SYMBOL(inet_frag_reasm_finish);
|
||||
|
||||
struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
|
||||
{
|
||||
struct sk_buff *head;
|
||||
|
||||
if (q->fragments) {
|
||||
head = q->fragments;
|
||||
q->fragments = head->next;
|
||||
} else {
|
||||
struct sk_buff *skb;
|
||||
|
||||
head = skb_rb_first(&q->rb_fragments);
|
||||
if (!head)
|
||||
return NULL;
|
||||
skb = FRAG_CB(head)->next_frag;
|
||||
if (skb)
|
||||
rb_replace_node(&head->rbnode, &skb->rbnode,
|
||||
&q->rb_fragments);
|
||||
else
|
||||
rb_erase(&head->rbnode, &q->rb_fragments);
|
||||
memset(&head->rbnode, 0, sizeof(head->rbnode));
|
||||
barrier();
|
||||
}
|
||||
if (head == q->fragments_tail)
|
||||
q->fragments_tail = NULL;
|
||||
|
||||
sub_frag_mem_limit(q->net, head->truesize);
|
||||
|
||||
return head;
|
||||
}
|
||||
EXPORT_SYMBOL(inet_frag_pull_head);
|
||||
|
|
|
@ -57,57 +57,6 @@
|
|||
*/
|
||||
static const char ip_frag_cache_name[] = "ip4-frags";
|
||||
|
||||
/* Use skb->cb to track consecutive/adjacent fragments coming at
|
||||
* the end of the queue. Nodes in the rb-tree queue will
|
||||
* contain "runs" of one or more adjacent fragments.
|
||||
*
|
||||
* Invariants:
|
||||
* - next_frag is NULL at the tail of a "run";
|
||||
* - the head of a "run" has the sum of all fragment lengths in frag_run_len.
|
||||
*/
|
||||
struct ipfrag_skb_cb {
|
||||
struct inet_skb_parm h;
|
||||
struct sk_buff *next_frag;
|
||||
int frag_run_len;
|
||||
};
|
||||
|
||||
#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
|
||||
|
||||
static void ip4_frag_init_run(struct sk_buff *skb)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
|
||||
|
||||
FRAG_CB(skb)->next_frag = NULL;
|
||||
FRAG_CB(skb)->frag_run_len = skb->len;
|
||||
}
|
||||
|
||||
/* Append skb to the last "run". */
|
||||
static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
RB_CLEAR_NODE(&skb->rbnode);
|
||||
FRAG_CB(skb)->next_frag = NULL;
|
||||
|
||||
FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
|
||||
FRAG_CB(q->fragments_tail)->next_frag = skb;
|
||||
q->fragments_tail = skb;
|
||||
}
|
||||
|
||||
/* Create a new "run" with the skb. */
|
||||
static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
|
||||
{
|
||||
if (q->last_run_head)
|
||||
rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
|
||||
&q->last_run_head->rbnode.rb_right);
|
||||
else
|
||||
rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
|
||||
rb_insert_color(&skb->rbnode, &q->rb_fragments);
|
||||
|
||||
ip4_frag_init_run(skb);
|
||||
q->fragments_tail = skb;
|
||||
q->last_run_head = skb;
|
||||
}
|
||||
|
||||
/* Describe an entry in the "incomplete datagrams" queue. */
|
||||
struct ipq {
|
||||
struct inet_frag_queue q;
|
||||
|
@ -212,27 +161,9 @@ static void ip_expire(struct timer_list *t)
|
|||
* pull the head out of the tree in order to be able to
|
||||
* deal with head->dev.
|
||||
*/
|
||||
if (qp->q.fragments) {
|
||||
head = qp->q.fragments;
|
||||
qp->q.fragments = head->next;
|
||||
} else {
|
||||
head = skb_rb_first(&qp->q.rb_fragments);
|
||||
if (!head)
|
||||
goto out;
|
||||
if (FRAG_CB(head)->next_frag)
|
||||
rb_replace_node(&head->rbnode,
|
||||
&FRAG_CB(head)->next_frag->rbnode,
|
||||
&qp->q.rb_fragments);
|
||||
else
|
||||
rb_erase(&head->rbnode, &qp->q.rb_fragments);
|
||||
memset(&head->rbnode, 0, sizeof(head->rbnode));
|
||||
barrier();
|
||||
}
|
||||
if (head == qp->q.fragments_tail)
|
||||
qp->q.fragments_tail = NULL;
|
||||
|
||||
sub_frag_mem_limit(qp->q.net, head->truesize);
|
||||
|
||||
head = inet_frag_pull_head(&qp->q);
|
||||
if (!head)
|
||||
goto out;
|
||||
head->dev = dev_get_by_index_rcu(net, qp->iif);
|
||||
if (!head->dev)
|
||||
goto out;
|
||||
|
@ -345,12 +276,10 @@ static int ip_frag_reinit(struct ipq *qp)
|
|||
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
||||
{
|
||||
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
|
||||
struct rb_node **rbn, *parent;
|
||||
struct sk_buff *skb1, *prev_tail;
|
||||
int ihl, end, skb1_run_end;
|
||||
int ihl, end, flags, offset;
|
||||
struct sk_buff *prev_tail;
|
||||
struct net_device *dev;
|
||||
unsigned int fragsize;
|
||||
int flags, offset;
|
||||
int err = -ENOENT;
|
||||
u8 ecn;
|
||||
|
||||
|
@ -382,7 +311,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|||
*/
|
||||
if (end < qp->q.len ||
|
||||
((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
|
||||
goto err;
|
||||
goto discard_qp;
|
||||
qp->q.flags |= INET_FRAG_LAST_IN;
|
||||
qp->q.len = end;
|
||||
} else {
|
||||
|
@ -394,82 +323,33 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|||
if (end > qp->q.len) {
|
||||
/* Some bits beyond end -> corruption. */
|
||||
if (qp->q.flags & INET_FRAG_LAST_IN)
|
||||
goto err;
|
||||
goto discard_qp;
|
||||
qp->q.len = end;
|
||||
}
|
||||
}
|
||||
if (end == offset)
|
||||
goto err;
|
||||
goto discard_qp;
|
||||
|
||||
err = -ENOMEM;
|
||||
if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
|
||||
goto err;
|
||||
goto discard_qp;
|
||||
|
||||
err = pskb_trim_rcsum(skb, end - offset);
|
||||
if (err)
|
||||
goto err;
|
||||
goto discard_qp;
|
||||
|
||||
/* Note : skb->rbnode and skb->dev share the same location. */
|
||||
dev = skb->dev;
|
||||
/* Makes sure compiler wont do silly aliasing games */
|
||||
barrier();
|
||||
|
||||
/* RFC5722, Section 4, amended by Errata ID : 3089
|
||||
* When reassembling an IPv6 datagram, if
|
||||
* one or more its constituent fragments is determined to be an
|
||||
* overlapping fragment, the entire datagram (and any constituent
|
||||
* fragments) MUST be silently discarded.
|
||||
*
|
||||
* We do the same here for IPv4 (and increment an snmp counter) but
|
||||
* we do not want to drop the whole queue in response to a duplicate
|
||||
* fragment.
|
||||
*/
|
||||
|
||||
err = -EINVAL;
|
||||
/* Find out where to put this fragment. */
|
||||
prev_tail = qp->q.fragments_tail;
|
||||
if (!prev_tail)
|
||||
ip4_frag_create_run(&qp->q, skb); /* First fragment. */
|
||||
else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
|
||||
/* This is the common case: skb goes to the end. */
|
||||
/* Detect and discard overlaps. */
|
||||
if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
|
||||
goto discard_qp;
|
||||
if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
|
||||
ip4_frag_append_to_last_run(&qp->q, skb);
|
||||
else
|
||||
ip4_frag_create_run(&qp->q, skb);
|
||||
} else {
|
||||
/* Binary search. Note that skb can become the first fragment,
|
||||
* but not the last (covered above).
|
||||
*/
|
||||
rbn = &qp->q.rb_fragments.rb_node;
|
||||
do {
|
||||
parent = *rbn;
|
||||
skb1 = rb_to_skb(parent);
|
||||
skb1_run_end = skb1->ip_defrag_offset +
|
||||
FRAG_CB(skb1)->frag_run_len;
|
||||
if (end <= skb1->ip_defrag_offset)
|
||||
rbn = &parent->rb_left;
|
||||
else if (offset >= skb1_run_end)
|
||||
rbn = &parent->rb_right;
|
||||
else if (offset >= skb1->ip_defrag_offset &&
|
||||
end <= skb1_run_end)
|
||||
goto err; /* No new data, potential duplicate */
|
||||
else
|
||||
goto discard_qp; /* Found an overlap */
|
||||
} while (*rbn);
|
||||
/* Here we have parent properly set, and rbn pointing to
|
||||
* one of its NULL left/right children. Insert skb.
|
||||
*/
|
||||
ip4_frag_init_run(skb);
|
||||
rb_link_node(&skb->rbnode, parent, rbn);
|
||||
rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
|
||||
}
|
||||
err = inet_frag_queue_insert(&qp->q, skb, offset, end);
|
||||
if (err)
|
||||
goto insert_error;
|
||||
|
||||
if (dev)
|
||||
qp->iif = dev->ifindex;
|
||||
skb->ip_defrag_offset = offset;
|
||||
|
||||
qp->q.stamp = skb->tstamp;
|
||||
qp->q.meat += skb->len;
|
||||
|
@ -494,15 +374,24 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|||
skb->_skb_refdst = 0UL;
|
||||
err = ip_frag_reasm(qp, skb, prev_tail, dev);
|
||||
skb->_skb_refdst = orefdst;
|
||||
if (err)
|
||||
inet_frag_kill(&qp->q);
|
||||
return err;
|
||||
}
|
||||
|
||||
skb_dst_drop(skb);
|
||||
return -EINPROGRESS;
|
||||
|
||||
insert_error:
|
||||
if (err == IPFRAG_DUP) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
err = -EINVAL;
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
|
||||
discard_qp:
|
||||
inet_frag_kill(&qp->q);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
|
||||
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
|
||||
err:
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
|
@ -514,13 +403,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
|
|||
{
|
||||
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
|
||||
struct iphdr *iph;
|
||||
struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
|
||||
struct sk_buff **nextp; /* To build frag_list. */
|
||||
struct rb_node *rbn;
|
||||
int len;
|
||||
int ihlen;
|
||||
int delta;
|
||||
int err;
|
||||
void *reasm_data;
|
||||
int len, err;
|
||||
u8 ecn;
|
||||
|
||||
ipq_kill(qp);
|
||||
|
@ -530,117 +414,23 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
|
|||
err = -EINVAL;
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
/* Make the one we just received the head. */
|
||||
if (head != skb) {
|
||||
fp = skb_clone(skb, GFP_ATOMIC);
|
||||
if (!fp)
|
||||
goto out_nomem;
|
||||
FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
|
||||
if (RB_EMPTY_NODE(&skb->rbnode))
|
||||
FRAG_CB(prev_tail)->next_frag = fp;
|
||||
else
|
||||
rb_replace_node(&skb->rbnode, &fp->rbnode,
|
||||
&qp->q.rb_fragments);
|
||||
if (qp->q.fragments_tail == skb)
|
||||
qp->q.fragments_tail = fp;
|
||||
skb_morph(skb, head);
|
||||
FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
|
||||
rb_replace_node(&head->rbnode, &skb->rbnode,
|
||||
&qp->q.rb_fragments);
|
||||
consume_skb(head);
|
||||
head = skb;
|
||||
}
|
||||
|
||||
WARN_ON(head->ip_defrag_offset != 0);
|
||||
|
||||
/* Allocate a new buffer for the datagram. */
|
||||
ihlen = ip_hdrlen(head);
|
||||
len = ihlen + qp->q.len;
|
||||
reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
|
||||
if (!reasm_data)
|
||||
goto out_nomem;
|
||||
|
||||
len = ip_hdrlen(skb) + qp->q.len;
|
||||
err = -E2BIG;
|
||||
if (len > 65535)
|
||||
goto out_oversize;
|
||||
|
||||
delta = - head->truesize;
|
||||
inet_frag_reasm_finish(&qp->q, skb, reasm_data);
|
||||
|
||||
/* Head of list must not be cloned. */
|
||||
if (skb_unclone(head, GFP_ATOMIC))
|
||||
goto out_nomem;
|
||||
skb->dev = dev;
|
||||
IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
|
||||
|
||||
delta += head->truesize;
|
||||
if (delta)
|
||||
add_frag_mem_limit(qp->q.net, delta);
|
||||
|
||||
/* If the first fragment is fragmented itself, we split
|
||||
* it to two chunks: the first with data and paged part
|
||||
* and the second, holding only fragments. */
|
||||
if (skb_has_frag_list(head)) {
|
||||
struct sk_buff *clone;
|
||||
int i, plen = 0;
|
||||
|
||||
clone = alloc_skb(0, GFP_ATOMIC);
|
||||
if (!clone)
|
||||
goto out_nomem;
|
||||
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
|
||||
skb_frag_list_init(head);
|
||||
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
|
||||
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
|
||||
clone->len = clone->data_len = head->data_len - plen;
|
||||
head->truesize += clone->truesize;
|
||||
clone->csum = 0;
|
||||
clone->ip_summed = head->ip_summed;
|
||||
add_frag_mem_limit(qp->q.net, clone->truesize);
|
||||
skb_shinfo(head)->frag_list = clone;
|
||||
nextp = &clone->next;
|
||||
} else {
|
||||
nextp = &skb_shinfo(head)->frag_list;
|
||||
}
|
||||
|
||||
skb_push(head, head->data - skb_network_header(head));
|
||||
|
||||
/* Traverse the tree in order, to build frag_list. */
|
||||
fp = FRAG_CB(head)->next_frag;
|
||||
rbn = rb_next(&head->rbnode);
|
||||
rb_erase(&head->rbnode, &qp->q.rb_fragments);
|
||||
while (rbn || fp) {
|
||||
/* fp points to the next sk_buff in the current run;
|
||||
* rbn points to the next run.
|
||||
*/
|
||||
/* Go through the current run. */
|
||||
while (fp) {
|
||||
*nextp = fp;
|
||||
nextp = &fp->next;
|
||||
fp->prev = NULL;
|
||||
memset(&fp->rbnode, 0, sizeof(fp->rbnode));
|
||||
fp->sk = NULL;
|
||||
head->data_len += fp->len;
|
||||
head->len += fp->len;
|
||||
if (head->ip_summed != fp->ip_summed)
|
||||
head->ip_summed = CHECKSUM_NONE;
|
||||
else if (head->ip_summed == CHECKSUM_COMPLETE)
|
||||
head->csum = csum_add(head->csum, fp->csum);
|
||||
head->truesize += fp->truesize;
|
||||
fp = FRAG_CB(fp)->next_frag;
|
||||
}
|
||||
/* Move to the next run. */
|
||||
if (rbn) {
|
||||
struct rb_node *rbnext = rb_next(rbn);
|
||||
|
||||
fp = rb_to_skb(rbn);
|
||||
rb_erase(rbn, &qp->q.rb_fragments);
|
||||
rbn = rbnext;
|
||||
}
|
||||
}
|
||||
sub_frag_mem_limit(qp->q.net, head->truesize);
|
||||
|
||||
*nextp = NULL;
|
||||
head->next = NULL;
|
||||
head->prev = NULL;
|
||||
head->dev = dev;
|
||||
head->tstamp = qp->q.stamp;
|
||||
IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
|
||||
|
||||
iph = ip_hdr(head);
|
||||
iph = ip_hdr(skb);
|
||||
iph->tot_len = htons(len);
|
||||
iph->tos |= ecn;
|
||||
|
||||
|
@ -653,7 +443,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
|
|||
* from one very small df-fragment and one large non-df frag.
|
||||
*/
|
||||
if (qp->max_df_size == qp->q.max_size) {
|
||||
IPCB(head)->flags |= IPSKB_FRAG_PMTU;
|
||||
IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
|
||||
iph->frag_off = htons(IP_DF);
|
||||
} else {
|
||||
iph->frag_off = 0;
|
||||
|
@ -751,28 +541,6 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
|
|||
}
|
||||
EXPORT_SYMBOL(ip_check_defrag);
|
||||
|
||||
unsigned int inet_frag_rbtree_purge(struct rb_root *root)
|
||||
{
|
||||
struct rb_node *p = rb_first(root);
|
||||
unsigned int sum = 0;
|
||||
|
||||
while (p) {
|
||||
struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
|
||||
|
||||
p = rb_next(p);
|
||||
rb_erase(&skb->rbnode, root);
|
||||
while (skb) {
|
||||
struct sk_buff *next = FRAG_CB(skb)->next_frag;
|
||||
|
||||
sum += skb->truesize;
|
||||
kfree_skb(skb);
|
||||
skb = next;
|
||||
}
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
EXPORT_SYMBOL(inet_frag_rbtree_purge);
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static int dist_min;
|
||||
|
||||
|
|
|
@ -1187,9 +1187,23 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
|
|||
|
||||
static void ipv4_link_failure(struct sk_buff *skb)
|
||||
{
|
||||
struct ip_options opt;
|
||||
struct rtable *rt;
|
||||
int res;
|
||||
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
|
||||
/* Recompile ip options since IPCB may not be valid anymore.
|
||||
*/
|
||||
memset(&opt, 0, sizeof(opt));
|
||||
opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
|
||||
|
||||
rcu_read_lock();
|
||||
res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (res)
|
||||
return;
|
||||
|
||||
__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
|
||||
|
||||
rt = skb_rtable(skb);
|
||||
if (rt)
|
||||
|
|
|
@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
|
|||
static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int room;
|
||||
|
||||
room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
|
||||
|
||||
/* Check #1 */
|
||||
if (tp->rcv_ssthresh < tp->window_clamp &&
|
||||
(int)tp->rcv_ssthresh < tcp_space(sk) &&
|
||||
!tcp_under_memory_pressure(sk)) {
|
||||
if (room > 0 && !tcp_under_memory_pressure(sk)) {
|
||||
int incr;
|
||||
|
||||
/* Check #2. Increase window, if skb with such overhead
|
||||
|
@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
|
|||
|
||||
if (incr) {
|
||||
incr = max_t(int, incr, 2 * skb->len);
|
||||
tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
|
||||
tp->window_clamp);
|
||||
tp->rcv_ssthresh += min(room, incr);
|
||||
inet_csk(sk)->icsk_ack.quick |= 1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -136,6 +136,9 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
|
|||
}
|
||||
#endif
|
||||
|
||||
static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
|
||||
struct sk_buff *prev_tail, struct net_device *dev);
|
||||
|
||||
static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
|
||||
{
|
||||
return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
|
||||
|
@ -177,9 +180,10 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
|
|||
static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
|
||||
const struct frag_hdr *fhdr, int nhoff)
|
||||
{
|
||||
struct sk_buff *prev, *next;
|
||||
unsigned int payload_len;
|
||||
int offset, end;
|
||||
struct net_device *dev;
|
||||
struct sk_buff *prev;
|
||||
int offset, end, err;
|
||||
u8 ecn;
|
||||
|
||||
if (fq->q.flags & INET_FRAG_COMPLETE) {
|
||||
|
@ -254,55 +258,18 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|||
goto err;
|
||||
}
|
||||
|
||||
/* Find out which fragments are in front and at the back of us
|
||||
* in the chain of fragments so far. We must know where to put
|
||||
* this fragment, right?
|
||||
*/
|
||||
prev = fq->q.fragments_tail;
|
||||
if (!prev || prev->ip_defrag_offset < offset) {
|
||||
next = NULL;
|
||||
goto found;
|
||||
}
|
||||
prev = NULL;
|
||||
for (next = fq->q.fragments; next != NULL; next = next->next) {
|
||||
if (next->ip_defrag_offset >= offset)
|
||||
break; /* bingo! */
|
||||
prev = next;
|
||||
}
|
||||
|
||||
found:
|
||||
/* RFC5722, Section 4:
|
||||
* When reassembling an IPv6 datagram, if
|
||||
* one or more its constituent fragments is determined to be an
|
||||
* overlapping fragment, the entire datagram (and any constituent
|
||||
* fragments, including those not yet received) MUST be silently
|
||||
* discarded.
|
||||
*/
|
||||
|
||||
/* Check for overlap with preceding fragment. */
|
||||
if (prev &&
|
||||
(prev->ip_defrag_offset + prev->len) > offset)
|
||||
goto discard_fq;
|
||||
|
||||
/* Look for overlap with succeeding segment. */
|
||||
if (next && next->ip_defrag_offset < end)
|
||||
goto discard_fq;
|
||||
|
||||
/* Note : skb->ip_defrag_offset and skb->dev share the same location */
|
||||
if (skb->dev)
|
||||
fq->iif = skb->dev->ifindex;
|
||||
/* Note : skb->rbnode and skb->dev share the same location. */
|
||||
dev = skb->dev;
|
||||
/* Makes sure compiler wont do silly aliasing games */
|
||||
barrier();
|
||||
skb->ip_defrag_offset = offset;
|
||||
|
||||
/* Insert this fragment in the chain of fragments. */
|
||||
skb->next = next;
|
||||
if (!next)
|
||||
fq->q.fragments_tail = skb;
|
||||
if (prev)
|
||||
prev->next = skb;
|
||||
else
|
||||
fq->q.fragments = skb;
|
||||
prev = fq->q.fragments_tail;
|
||||
err = inet_frag_queue_insert(&fq->q, skb, offset, end);
|
||||
if (err)
|
||||
goto insert_error;
|
||||
|
||||
if (dev)
|
||||
fq->iif = dev->ifindex;
|
||||
|
||||
fq->q.stamp = skb->tstamp;
|
||||
fq->q.meat += skb->len;
|
||||
|
@ -319,11 +286,25 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|||
fq->q.flags |= INET_FRAG_FIRST_IN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
|
||||
fq->q.meat == fq->q.len) {
|
||||
unsigned long orefdst = skb->_skb_refdst;
|
||||
|
||||
discard_fq:
|
||||
skb->_skb_refdst = 0UL;
|
||||
err = nf_ct_frag6_reasm(fq, skb, prev, dev);
|
||||
skb->_skb_refdst = orefdst;
|
||||
return err;
|
||||
}
|
||||
|
||||
skb_dst_drop(skb);
|
||||
return -EINPROGRESS;
|
||||
|
||||
insert_error:
|
||||
if (err == IPFRAG_DUP)
|
||||
goto err;
|
||||
inet_frag_kill(&fq->q);
|
||||
err:
|
||||
skb_dst_drop(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -333,147 +314,67 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|||
* It is called with locked fq, and caller must check that
|
||||
* queue is eligible for reassembly i.e. it is not COMPLETE,
|
||||
* the last and the first frames arrived and all the bits are here.
|
||||
*
|
||||
* returns true if *prev skb has been transformed into the reassembled
|
||||
* skb, false otherwise.
|
||||
*/
|
||||
static bool
|
||||
nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
|
||||
static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
|
||||
struct sk_buff *prev_tail, struct net_device *dev)
|
||||
{
|
||||
struct sk_buff *fp, *head = fq->q.fragments;
|
||||
int payload_len, delta;
|
||||
void *reasm_data;
|
||||
int payload_len;
|
||||
u8 ecn;
|
||||
|
||||
inet_frag_kill(&fq->q);
|
||||
|
||||
WARN_ON(head == NULL);
|
||||
WARN_ON(head->ip_defrag_offset != 0);
|
||||
|
||||
ecn = ip_frag_ecn_table[fq->ecn];
|
||||
if (unlikely(ecn == 0xff))
|
||||
return false;
|
||||
goto err;
|
||||
|
||||
/* Unfragmented part is taken from the first segment. */
|
||||
payload_len = ((head->data - skb_network_header(head)) -
|
||||
reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
|
||||
if (!reasm_data)
|
||||
goto err;
|
||||
|
||||
payload_len = ((skb->data - skb_network_header(skb)) -
|
||||
sizeof(struct ipv6hdr) + fq->q.len -
|
||||
sizeof(struct frag_hdr));
|
||||
if (payload_len > IPV6_MAXPLEN) {
|
||||
net_dbg_ratelimited("nf_ct_frag6_reasm: payload len = %d\n",
|
||||
payload_len);
|
||||
return false;
|
||||
}
|
||||
|
||||
delta = - head->truesize;
|
||||
|
||||
/* Head of list must not be cloned. */
|
||||
if (skb_unclone(head, GFP_ATOMIC))
|
||||
return false;
|
||||
|
||||
delta += head->truesize;
|
||||
if (delta)
|
||||
add_frag_mem_limit(fq->q.net, delta);
|
||||
|
||||
/* If the first fragment is fragmented itself, we split
|
||||
* it to two chunks: the first with data and paged part
|
||||
* and the second, holding only fragments. */
|
||||
if (skb_has_frag_list(head)) {
|
||||
struct sk_buff *clone;
|
||||
int i, plen = 0;
|
||||
|
||||
clone = alloc_skb(0, GFP_ATOMIC);
|
||||
if (clone == NULL)
|
||||
return false;
|
||||
|
||||
clone->next = head->next;
|
||||
head->next = clone;
|
||||
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
|
||||
skb_frag_list_init(head);
|
||||
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
|
||||
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
|
||||
clone->len = clone->data_len = head->data_len - plen;
|
||||
head->data_len -= clone->len;
|
||||
head->len -= clone->len;
|
||||
clone->csum = 0;
|
||||
clone->ip_summed = head->ip_summed;
|
||||
|
||||
add_frag_mem_limit(fq->q.net, clone->truesize);
|
||||
}
|
||||
|
||||
/* morph head into last received skb: prev.
|
||||
*
|
||||
* This allows callers of ipv6 conntrack defrag to continue
|
||||
* to use the last skb(frag) passed into the reasm engine.
|
||||
* The last skb frag 'silently' turns into the full reassembled skb.
|
||||
*
|
||||
* Since prev is also part of q->fragments we have to clone it first.
|
||||
*/
|
||||
if (head != prev) {
|
||||
struct sk_buff *iter;
|
||||
|
||||
fp = skb_clone(prev, GFP_ATOMIC);
|
||||
if (!fp)
|
||||
return false;
|
||||
|
||||
fp->next = prev->next;
|
||||
|
||||
iter = head;
|
||||
while (iter) {
|
||||
if (iter->next == prev) {
|
||||
iter->next = fp;
|
||||
break;
|
||||
}
|
||||
iter = iter->next;
|
||||
}
|
||||
|
||||
skb_morph(prev, head);
|
||||
prev->next = head->next;
|
||||
consume_skb(head);
|
||||
head = prev;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* We have to remove fragment header from datagram and to relocate
|
||||
* header in order to calculate ICV correctly. */
|
||||
skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
|
||||
memmove(head->head + sizeof(struct frag_hdr), head->head,
|
||||
(head->data - head->head) - sizeof(struct frag_hdr));
|
||||
head->mac_header += sizeof(struct frag_hdr);
|
||||
head->network_header += sizeof(struct frag_hdr);
|
||||
skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0];
|
||||
memmove(skb->head + sizeof(struct frag_hdr), skb->head,
|
||||
(skb->data - skb->head) - sizeof(struct frag_hdr));
|
||||
skb->mac_header += sizeof(struct frag_hdr);
|
||||
skb->network_header += sizeof(struct frag_hdr);
|
||||
|
||||
skb_shinfo(head)->frag_list = head->next;
|
||||
skb_reset_transport_header(head);
|
||||
skb_push(head, head->data - skb_network_header(head));
|
||||
skb_reset_transport_header(skb);
|
||||
|
||||
for (fp = head->next; fp; fp = fp->next) {
|
||||
head->data_len += fp->len;
|
||||
head->len += fp->len;
|
||||
if (head->ip_summed != fp->ip_summed)
|
||||
head->ip_summed = CHECKSUM_NONE;
|
||||
else if (head->ip_summed == CHECKSUM_COMPLETE)
|
||||
head->csum = csum_add(head->csum, fp->csum);
|
||||
head->truesize += fp->truesize;
|
||||
fp->sk = NULL;
|
||||
}
|
||||
sub_frag_mem_limit(fq->q.net, head->truesize);
|
||||
inet_frag_reasm_finish(&fq->q, skb, reasm_data);
|
||||
|
||||
head->ignore_df = 1;
|
||||
head->next = NULL;
|
||||
head->dev = dev;
|
||||
head->tstamp = fq->q.stamp;
|
||||
ipv6_hdr(head)->payload_len = htons(payload_len);
|
||||
ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
|
||||
IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
|
||||
skb->ignore_df = 1;
|
||||
skb->dev = dev;
|
||||
ipv6_hdr(skb)->payload_len = htons(payload_len);
|
||||
ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
|
||||
IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
|
||||
|
||||
/* Yes, and fold redundant checksum back. 8) */
|
||||
if (head->ip_summed == CHECKSUM_COMPLETE)
|
||||
head->csum = csum_partial(skb_network_header(head),
|
||||
skb_network_header_len(head),
|
||||
head->csum);
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
skb->csum = csum_partial(skb_network_header(skb),
|
||||
skb_network_header_len(skb),
|
||||
skb->csum);
|
||||
|
||||
fq->q.fragments = NULL;
|
||||
fq->q.rb_fragments = RB_ROOT;
|
||||
fq->q.fragments_tail = NULL;
|
||||
fq->q.last_run_head = NULL;
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
inet_frag_kill(&fq->q);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -542,7 +443,6 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
|
|||
int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
|
||||
{
|
||||
u16 savethdr = skb->transport_header;
|
||||
struct net_device *dev = skb->dev;
|
||||
int fhoff, nhoff, ret;
|
||||
struct frag_hdr *fhdr;
|
||||
struct frag_queue *fq;
|
||||
|
@ -565,10 +465,6 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
|
|||
hdr = ipv6_hdr(skb);
|
||||
fhdr = (struct frag_hdr *)skb_transport_header(skb);
|
||||
|
||||
if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
|
||||
fhdr->frag_off & htons(IP6_MF))
|
||||
return -EINVAL;
|
||||
|
||||
skb_orphan(skb);
|
||||
fq = fq_find(net, fhdr->identification, user, hdr,
|
||||
skb->dev ? skb->dev->ifindex : 0);
|
||||
|
@ -580,31 +476,17 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
|
|||
spin_lock_bh(&fq->q.lock);
|
||||
|
||||
ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
|
||||
if (ret < 0) {
|
||||
if (ret == -EPROTO) {
|
||||
skb->transport_header = savethdr;
|
||||
ret = 0;
|
||||
}
|
||||
goto out_unlock;
|
||||
if (ret == -EPROTO) {
|
||||
skb->transport_header = savethdr;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
/* after queue has assumed skb ownership, only 0 or -EINPROGRESS
|
||||
* must be returned.
|
||||
*/
|
||||
ret = -EINPROGRESS;
|
||||
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
|
||||
fq->q.meat == fq->q.len) {
|
||||
unsigned long orefdst = skb->_skb_refdst;
|
||||
if (ret)
|
||||
ret = -EINPROGRESS;
|
||||
|
||||
skb->_skb_refdst = 0UL;
|
||||
if (nf_ct_frag6_reasm(fq, skb, dev))
|
||||
ret = 0;
|
||||
skb->_skb_refdst = orefdst;
|
||||
} else {
|
||||
skb_dst_drop(skb);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_bh(&fq->q.lock);
|
||||
inet_frag_put(&fq->q);
|
||||
return ret;
|
||||
|
|
|
@ -69,8 +69,8 @@ static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
|
|||
|
||||
static struct inet_frags ip6_frags;
|
||||
|
||||
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
|
||||
struct net_device *dev);
|
||||
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
|
||||
struct sk_buff *prev_tail, struct net_device *dev);
|
||||
|
||||
static void ip6_frag_expire(struct timer_list *t)
|
||||
{
|
||||
|
@ -111,21 +111,26 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|||
struct frag_hdr *fhdr, int nhoff,
|
||||
u32 *prob_offset)
|
||||
{
|
||||
struct sk_buff *prev, *next;
|
||||
struct net_device *dev;
|
||||
int offset, end, fragsize;
|
||||
struct net *net = dev_net(skb_dst(skb)->dev);
|
||||
int offset, end, fragsize;
|
||||
struct sk_buff *prev_tail;
|
||||
struct net_device *dev;
|
||||
int err = -ENOENT;
|
||||
u8 ecn;
|
||||
|
||||
if (fq->q.flags & INET_FRAG_COMPLETE)
|
||||
goto err;
|
||||
|
||||
err = -EINVAL;
|
||||
offset = ntohs(fhdr->frag_off) & ~0x7;
|
||||
end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
|
||||
((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
|
||||
|
||||
if ((unsigned int)end > IPV6_MAXPLEN) {
|
||||
*prob_offset = (u8 *)&fhdr->frag_off - skb_network_header(skb);
|
||||
/* note that if prob_offset is set, the skb is freed elsewhere,
|
||||
* we do not free it here.
|
||||
*/
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -145,7 +150,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|||
*/
|
||||
if (end < fq->q.len ||
|
||||
((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
|
||||
goto err;
|
||||
goto discard_fq;
|
||||
fq->q.flags |= INET_FRAG_LAST_IN;
|
||||
fq->q.len = end;
|
||||
} else {
|
||||
|
@ -162,70 +167,35 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|||
if (end > fq->q.len) {
|
||||
/* Some bits beyond end -> corruption. */
|
||||
if (fq->q.flags & INET_FRAG_LAST_IN)
|
||||
goto err;
|
||||
goto discard_fq;
|
||||
fq->q.len = end;
|
||||
}
|
||||
}
|
||||
|
||||
if (end == offset)
|
||||
goto err;
|
||||
goto discard_fq;
|
||||
|
||||
err = -ENOMEM;
|
||||
/* Point into the IP datagram 'data' part. */
|
||||
if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
|
||||
goto err;
|
||||
|
||||
if (pskb_trim_rcsum(skb, end - offset))
|
||||
goto err;
|
||||
|
||||
/* Find out which fragments are in front and at the back of us
|
||||
* in the chain of fragments so far. We must know where to put
|
||||
* this fragment, right?
|
||||
*/
|
||||
prev = fq->q.fragments_tail;
|
||||
if (!prev || prev->ip_defrag_offset < offset) {
|
||||
next = NULL;
|
||||
goto found;
|
||||
}
|
||||
prev = NULL;
|
||||
for (next = fq->q.fragments; next != NULL; next = next->next) {
|
||||
if (next->ip_defrag_offset >= offset)
|
||||
break; /* bingo! */
|
||||
prev = next;
|
||||
}
|
||||
|
||||
found:
|
||||
/* RFC5722, Section 4, amended by Errata ID : 3089
|
||||
* When reassembling an IPv6 datagram, if
|
||||
* one or more its constituent fragments is determined to be an
|
||||
* overlapping fragment, the entire datagram (and any constituent
|
||||
* fragments) MUST be silently discarded.
|
||||
*/
|
||||
|
||||
/* Check for overlap with preceding fragment. */
|
||||
if (prev &&
|
||||
(prev->ip_defrag_offset + prev->len) > offset)
|
||||
goto discard_fq;
|
||||
|
||||
/* Look for overlap with succeeding segment. */
|
||||
if (next && next->ip_defrag_offset < end)
|
||||
err = pskb_trim_rcsum(skb, end - offset);
|
||||
if (err)
|
||||
goto discard_fq;
|
||||
|
||||
/* Note : skb->ip_defrag_offset and skb->dev share the same location */
|
||||
/* Note : skb->rbnode and skb->dev share the same location. */
|
||||
dev = skb->dev;
|
||||
if (dev)
|
||||
fq->iif = dev->ifindex;
|
||||
/* Makes sure compiler wont do silly aliasing games */
|
||||
barrier();
|
||||
skb->ip_defrag_offset = offset;
|
||||
|
||||
/* Insert this fragment in the chain of fragments. */
|
||||
skb->next = next;
|
||||
if (!next)
|
||||
fq->q.fragments_tail = skb;
|
||||
if (prev)
|
||||
prev->next = skb;
|
||||
else
|
||||
fq->q.fragments = skb;
|
||||
prev_tail = fq->q.fragments_tail;
|
||||
err = inet_frag_queue_insert(&fq->q, skb, offset, end);
|
||||
if (err)
|
||||
goto insert_error;
|
||||
|
||||
if (dev)
|
||||
fq->iif = dev->ifindex;
|
||||
|
||||
fq->q.stamp = skb->tstamp;
|
||||
fq->q.meat += skb->len;
|
||||
|
@ -246,44 +216,48 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|||
|
||||
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
|
||||
fq->q.meat == fq->q.len) {
|
||||
int res;
|
||||
unsigned long orefdst = skb->_skb_refdst;
|
||||
|
||||
skb->_skb_refdst = 0UL;
|
||||
res = ip6_frag_reasm(fq, prev, dev);
|
||||
err = ip6_frag_reasm(fq, skb, prev_tail, dev);
|
||||
skb->_skb_refdst = orefdst;
|
||||
return res;
|
||||
return err;
|
||||
}
|
||||
|
||||
skb_dst_drop(skb);
|
||||
return -1;
|
||||
return -EINPROGRESS;
|
||||
|
||||
insert_error:
|
||||
if (err == IPFRAG_DUP) {
|
||||
kfree_skb(skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
err = -EINVAL;
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_REASM_OVERLAPS);
|
||||
discard_fq:
|
||||
inet_frag_kill(&fq->q);
|
||||
err:
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_REASMFAILS);
|
||||
err:
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if this packet is complete.
|
||||
* Returns NULL on failure by any reason, and pointer
|
||||
* to current nexthdr field in reassembled frame.
|
||||
*
|
||||
* It is called with locked fq, and caller must check that
|
||||
* queue is eligible for reassembly i.e. it is not COMPLETE,
|
||||
* the last and the first frames arrived and all the bits are here.
|
||||
*/
|
||||
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
|
||||
struct net_device *dev)
|
||||
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
|
||||
struct sk_buff *prev_tail, struct net_device *dev)
|
||||
{
|
||||
struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
|
||||
struct sk_buff *fp, *head = fq->q.fragments;
|
||||
int payload_len, delta;
|
||||
unsigned int nhoff;
|
||||
int sum_truesize;
|
||||
void *reasm_data;
|
||||
int payload_len;
|
||||
u8 ecn;
|
||||
|
||||
inet_frag_kill(&fq->q);
|
||||
|
@ -292,121 +266,40 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
|
|||
if (unlikely(ecn == 0xff))
|
||||
goto out_fail;
|
||||
|
||||
/* Make the one we just received the head. */
|
||||
if (prev) {
|
||||
head = prev->next;
|
||||
fp = skb_clone(head, GFP_ATOMIC);
|
||||
reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
|
||||
if (!reasm_data)
|
||||
goto out_oom;
|
||||
|
||||
if (!fp)
|
||||
goto out_oom;
|
||||
|
||||
fp->next = head->next;
|
||||
if (!fp->next)
|
||||
fq->q.fragments_tail = fp;
|
||||
prev->next = fp;
|
||||
|
||||
skb_morph(head, fq->q.fragments);
|
||||
head->next = fq->q.fragments->next;
|
||||
|
||||
consume_skb(fq->q.fragments);
|
||||
fq->q.fragments = head;
|
||||
}
|
||||
|
||||
WARN_ON(head == NULL);
|
||||
WARN_ON(head->ip_defrag_offset != 0);
|
||||
|
||||
/* Unfragmented part is taken from the first segment. */
|
||||
payload_len = ((head->data - skb_network_header(head)) -
|
||||
payload_len = ((skb->data - skb_network_header(skb)) -
|
||||
sizeof(struct ipv6hdr) + fq->q.len -
|
||||
sizeof(struct frag_hdr));
|
||||
if (payload_len > IPV6_MAXPLEN)
|
||||
goto out_oversize;
|
||||
|
||||
delta = - head->truesize;
|
||||
|
||||
/* Head of list must not be cloned. */
|
||||
if (skb_unclone(head, GFP_ATOMIC))
|
||||
goto out_oom;
|
||||
|
||||
delta += head->truesize;
|
||||
if (delta)
|
||||
add_frag_mem_limit(fq->q.net, delta);
|
||||
|
||||
/* If the first fragment is fragmented itself, we split
|
||||
* it to two chunks: the first with data and paged part
|
||||
* and the second, holding only fragments. */
|
||||
if (skb_has_frag_list(head)) {
|
||||
struct sk_buff *clone;
|
||||
int i, plen = 0;
|
||||
|
||||
clone = alloc_skb(0, GFP_ATOMIC);
|
||||
if (!clone)
|
||||
goto out_oom;
|
||||
clone->next = head->next;
|
||||
head->next = clone;
|
||||
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
|
||||
skb_frag_list_init(head);
|
||||
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
|
||||
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
|
||||
clone->len = clone->data_len = head->data_len - plen;
|
||||
head->data_len -= clone->len;
|
||||
head->len -= clone->len;
|
||||
clone->csum = 0;
|
||||
clone->ip_summed = head->ip_summed;
|
||||
add_frag_mem_limit(fq->q.net, clone->truesize);
|
||||
}
|
||||
|
||||
/* We have to remove fragment header from datagram and to relocate
|
||||
* header in order to calculate ICV correctly. */
|
||||
nhoff = fq->nhoffset;
|
||||
skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
|
||||
memmove(head->head + sizeof(struct frag_hdr), head->head,
|
||||
(head->data - head->head) - sizeof(struct frag_hdr));
|
||||
if (skb_mac_header_was_set(head))
|
||||
head->mac_header += sizeof(struct frag_hdr);
|
||||
head->network_header += sizeof(struct frag_hdr);
|
||||
skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
|
||||
memmove(skb->head + sizeof(struct frag_hdr), skb->head,
|
||||
(skb->data - skb->head) - sizeof(struct frag_hdr));
|
||||
if (skb_mac_header_was_set(skb))
|
||||
skb->mac_header += sizeof(struct frag_hdr);
|
||||
skb->network_header += sizeof(struct frag_hdr);
|
||||
|
||||
skb_reset_transport_header(head);
|
||||
skb_push(head, head->data - skb_network_header(head));
|
||||
skb_reset_transport_header(skb);
|
||||
|
||||
sum_truesize = head->truesize;
|
||||
for (fp = head->next; fp;) {
|
||||
bool headstolen;
|
||||
int delta;
|
||||
struct sk_buff *next = fp->next;
|
||||
inet_frag_reasm_finish(&fq->q, skb, reasm_data);
|
||||
|
||||
sum_truesize += fp->truesize;
|
||||
if (head->ip_summed != fp->ip_summed)
|
||||
head->ip_summed = CHECKSUM_NONE;
|
||||
else if (head->ip_summed == CHECKSUM_COMPLETE)
|
||||
head->csum = csum_add(head->csum, fp->csum);
|
||||
|
||||
if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
|
||||
kfree_skb_partial(fp, headstolen);
|
||||
} else {
|
||||
fp->sk = NULL;
|
||||
if (!skb_shinfo(head)->frag_list)
|
||||
skb_shinfo(head)->frag_list = fp;
|
||||
head->data_len += fp->len;
|
||||
head->len += fp->len;
|
||||
head->truesize += fp->truesize;
|
||||
}
|
||||
fp = next;
|
||||
}
|
||||
sub_frag_mem_limit(fq->q.net, sum_truesize);
|
||||
|
||||
head->next = NULL;
|
||||
head->dev = dev;
|
||||
head->tstamp = fq->q.stamp;
|
||||
ipv6_hdr(head)->payload_len = htons(payload_len);
|
||||
ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
|
||||
IP6CB(head)->nhoff = nhoff;
|
||||
IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
|
||||
IP6CB(head)->frag_max_size = fq->q.max_size;
|
||||
skb->dev = dev;
|
||||
ipv6_hdr(skb)->payload_len = htons(payload_len);
|
||||
ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
|
||||
IP6CB(skb)->nhoff = nhoff;
|
||||
IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
|
||||
IP6CB(skb)->frag_max_size = fq->q.max_size;
|
||||
|
||||
/* Yes, and fold redundant checksum back. 8) */
|
||||
skb_postpush_rcsum(head, skb_network_header(head),
|
||||
skb_network_header_len(head));
|
||||
skb_postpush_rcsum(skb, skb_network_header(skb),
|
||||
skb_network_header_len(skb));
|
||||
|
||||
rcu_read_lock();
|
||||
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
|
||||
|
@ -414,6 +307,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
|
|||
fq->q.fragments = NULL;
|
||||
fq->q.rb_fragments = RB_ROOT;
|
||||
fq->q.fragments_tail = NULL;
|
||||
fq->q.last_run_head = NULL;
|
||||
return 1;
|
||||
|
||||
out_oversize:
|
||||
|
@ -425,6 +319,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
|
|||
rcu_read_lock();
|
||||
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
|
||||
rcu_read_unlock();
|
||||
inet_frag_kill(&fq->q);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -463,10 +358,6 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
|
||||
fhdr->frag_off & htons(IP6_MF))
|
||||
goto fail_hdr;
|
||||
|
||||
iif = skb->dev ? skb->dev->ifindex : 0;
|
||||
fq = fq_find(net, fhdr->identification, hdr, iif);
|
||||
if (fq) {
|
||||
|
@ -484,6 +375,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
|
|||
if (prob_offset) {
|
||||
__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
|
||||
IPSTATS_MIB_INHDRERRORS);
|
||||
/* icmpv6_param_prob() calls kfree_skb(skb) */
|
||||
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);
|
||||
}
|
||||
return ret;
|
||||
|
|
|
@ -2367,6 +2367,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
|
|||
|
||||
rcu_read_lock();
|
||||
from = rcu_dereference(rt6->from);
|
||||
if (!from) {
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
|
||||
if (nrt6) {
|
||||
rt6_do_update_pmtu(nrt6, mtu);
|
||||
|
|
|
@ -1166,6 +1166,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
|
|||
{
|
||||
struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
|
||||
|
||||
if (local->in_reconfig)
|
||||
return;
|
||||
|
||||
if (!check_sdata_in_driver(sdata))
|
||||
return;
|
||||
|
||||
|
|
|
@ -1508,32 +1508,29 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
|
|||
return idx + (tin << 16);
|
||||
}
|
||||
|
||||
static void cake_wash_diffserv(struct sk_buff *skb)
|
||||
{
|
||||
switch (skb->protocol) {
|
||||
case htons(ETH_P_IP):
|
||||
ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
|
||||
break;
|
||||
case htons(ETH_P_IPV6):
|
||||
ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
|
||||
{
|
||||
int wlen = skb_network_offset(skb);
|
||||
u8 dscp;
|
||||
|
||||
switch (skb->protocol) {
|
||||
switch (tc_skb_protocol(skb)) {
|
||||
case htons(ETH_P_IP):
|
||||
wlen += sizeof(struct iphdr);
|
||||
if (!pskb_may_pull(skb, wlen) ||
|
||||
skb_try_make_writable(skb, wlen))
|
||||
return 0;
|
||||
|
||||
dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
|
||||
if (wash && dscp)
|
||||
ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
|
||||
return dscp;
|
||||
|
||||
case htons(ETH_P_IPV6):
|
||||
wlen += sizeof(struct ipv6hdr);
|
||||
if (!pskb_may_pull(skb, wlen) ||
|
||||
skb_try_make_writable(skb, wlen))
|
||||
return 0;
|
||||
|
||||
dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
|
||||
if (wash && dscp)
|
||||
ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
|
||||
|
@ -1553,25 +1550,27 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
|
|||
{
|
||||
struct cake_sched_data *q = qdisc_priv(sch);
|
||||
u32 tin;
|
||||
u8 dscp;
|
||||
|
||||
if (TC_H_MAJ(skb->priority) == sch->handle &&
|
||||
TC_H_MIN(skb->priority) > 0 &&
|
||||
TC_H_MIN(skb->priority) <= q->tin_cnt) {
|
||||
/* Tin selection: Default to diffserv-based selection, allow overriding
|
||||
* using firewall marks or skb->priority.
|
||||
*/
|
||||
dscp = cake_handle_diffserv(skb,
|
||||
q->rate_flags & CAKE_FLAG_WASH);
|
||||
|
||||
if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
|
||||
tin = 0;
|
||||
|
||||
else if (TC_H_MAJ(skb->priority) == sch->handle &&
|
||||
TC_H_MIN(skb->priority) > 0 &&
|
||||
TC_H_MIN(skb->priority) <= q->tin_cnt)
|
||||
tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
|
||||
|
||||
if (q->rate_flags & CAKE_FLAG_WASH)
|
||||
cake_wash_diffserv(skb);
|
||||
} else if (q->tin_mode != CAKE_DIFFSERV_BESTEFFORT) {
|
||||
/* extract the Diffserv Precedence field, if it exists */
|
||||
/* and clear DSCP bits if washing */
|
||||
tin = q->tin_index[cake_handle_diffserv(skb,
|
||||
q->rate_flags & CAKE_FLAG_WASH)];
|
||||
else {
|
||||
tin = q->tin_index[dscp];
|
||||
|
||||
if (unlikely(tin >= q->tin_cnt))
|
||||
tin = 0;
|
||||
} else {
|
||||
tin = 0;
|
||||
if (q->rate_flags & CAKE_FLAG_WASH)
|
||||
cake_wash_diffserv(skb);
|
||||
}
|
||||
|
||||
return &q->tins[tin];
|
||||
|
|
|
@ -908,7 +908,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg,
|
|||
for (; i < TIPC_NAMETBL_SIZE; i++) {
|
||||
head = &tn->nametbl->services[i];
|
||||
|
||||
if (*last_type) {
|
||||
if (*last_type ||
|
||||
(!i && *last_key && (*last_lower == *last_key))) {
|
||||
service = tipc_service_find(net, *last_type);
|
||||
if (!service)
|
||||
return -EPIPE;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue