Merge android-4.19.50 (be7c1cb
) into msm-4.19
* refs/heads/tmp-be7c1cb: Linux 4.19.50 ethtool: check the return value of get_regs_len ipv4: Define __ipv4_neigh_lookup_noref when CONFIG_INET is disabled TTY: serial_core, add ->install drm/i915/gvt: Initialize intel_gvt_gtt_entry in stack drm: don't block fb changes for async plane updates drm/i915: Maintain consistent documentation subsection ordering drm/i915/fbc: disable framebuffer compression on GeminiLake drm/i915: Fix I915_EXEC_RING_MASK drm/amdgpu: remove ATPX_DGPU_REQ_POWER_FOR_DISPLAYS check when hotplug-in drm/radeon: prefer lower reference dividers drm/amdgpu/psp: move psp version specific function pointers to early_init drm: add non-desktop quirks to Sensics and OSVR headsets. drm/nouveau: add kconfig option to turn off nouveau legacy contexts. (v3) drm: add non-desktop quirk for Valve HMDs drm/msm: fix fb references in async update drm/gma500/cdv: Check vbt config bits when detecting lvds panels test_firmware: Use correct snprintf() limit genwqe: Prevent an integer overflow in the ioctl Revert "MIPS: perf: ath79: Fix perfcount IRQ assignment" MIPS: pistachio: Build uImage.gz by default MIPS: Bounds check virt_addr_valid xen-blkfront: switch kcalloc to kvcalloc for large array allocation s390/mm: fix address space detection in exception handling i2c: xiic: Add max_read_len quirk x86/insn-eval: Fix use-after-free access to LDT entry x86/power: Fix 'nosmt' vs hibernation triple fault during resume pstore/ram: Run without kernel crash dump region pstore: Set tfm to NULL on free_buf_for_compression pstore: Convert buf_lock to semaphore pstore: Remove needless lock during console writes fuse: fallocate: fix return with locked inode NFSv4.1: Fix bug only first CB_NOTIFY_LOCK is handled NFSv4.1: Again fix a race where CB_NOTIFY_LOCK fails to wake a waiter parisc: Use implicit space register selection for loading the coherence index of I/O pdirs rcu: locking and unlocking need to always be at least barriers mtd: spinand: macronix: Fix ECC Status Read ipv6: fix EFAULT on sendto with icmpv6 and hdrincl ipv6: use READ_ONCE() for inet->hdrincl as in ipv4 Revert "fib_rules: return 0 directly if an exactly same rule exists when NLM_F_EXCL not supplied" pktgen: do not sleep with the thread lock held. packet: unconditionally free po->rollover net/tls: replace the sleeping lock around RX resync with a bit lock net: sfp: read eeprom in maximum 16 byte increments net: rds: fix memory leak in rds_ib_flush_mr_pool net: mvpp2: Use strscpy to handle stat strings net/mlx4_en: ethtool, Remove unsupported SFP EEPROM high pages query net: ethernet: ti: cpsw_ethtool: fix ethtool ring param set neighbor: Call __ipv4_neigh_lookup_noref in neigh_xmit ipv6: fix the check before getting the cookie in rt6_get_cookie ipv4: not do cache for local delivery if bc_forwarding is enabled Fix memory leak in sctp_process_init ethtool: fix potential userspace buffer overflow Change-Id: Ic49494d073fe049a92a42dd95a84315b64a13c3e Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
commit
4bcfb79fa8
65 changed files with 471 additions and 268 deletions
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 49
|
||||
SUBLEVEL = 50
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
|
@ -211,6 +211,12 @@ const char *get_system_type(void)
|
|||
return ath79_sys_type;
|
||||
}
|
||||
|
||||
int get_c0_perfcount_int(void)
|
||||
{
|
||||
return ATH79_MISC_IRQ(5);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||
|
||||
unsigned int get_c0_compare_int(void)
|
||||
{
|
||||
return CP0_LEGACY_COMPARE_IRQ;
|
||||
|
|
|
@ -203,6 +203,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
|
|||
|
||||
int __virt_addr_valid(const volatile void *kaddr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long)vaddr;
|
||||
|
||||
if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
|
||||
return 0;
|
||||
|
||||
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__virt_addr_valid);
|
||||
|
|
|
@ -6,3 +6,4 @@ cflags-$(CONFIG_MACH_PISTACHIO) += \
|
|||
-I$(srctree)/arch/mips/include/asm/mach-pistachio
|
||||
load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000
|
||||
zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000
|
||||
all-$(CONFIG_MACH_PISTACHIO) := uImage.gz
|
||||
|
|
|
@ -563,8 +563,6 @@ static int nvram_pstore_init(void)
|
|||
nvram_pstore_info.buf = oops_data;
|
||||
nvram_pstore_info.bufsize = oops_data_sz;
|
||||
|
||||
spin_lock_init(&nvram_pstore_info.buf_lock);
|
||||
|
||||
rc = pstore_register(&nvram_pstore_info);
|
||||
if (rc && (rc != -EPERM))
|
||||
/* Print error only when pstore.backend == nvram */
|
||||
|
|
|
@ -107,7 +107,6 @@ void bust_spinlocks(int yes)
|
|||
|
||||
/*
|
||||
* Find out which address space caused the exception.
|
||||
* Access register mode is impossible, ignore space == 3.
|
||||
*/
|
||||
static inline enum fault_type get_fault_type(struct pt_regs *regs)
|
||||
{
|
||||
|
@ -132,6 +131,10 @@ static inline enum fault_type get_fault_type(struct pt_regs *regs)
|
|||
}
|
||||
return VDSO_FAULT;
|
||||
}
|
||||
if (trans_exc_code == 1) {
|
||||
/* access register mode, not used in the kernel */
|
||||
return USER_FAULT;
|
||||
}
|
||||
/* home space exception -> access via kernel ASCE */
|
||||
return KERNEL_FAULT;
|
||||
}
|
||||
|
|
|
@ -555,7 +555,8 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
|
|||
}
|
||||
|
||||
/**
|
||||
* get_desc() - Obtain pointer to a segment descriptor
|
||||
* get_desc() - Obtain contents of a segment descriptor
|
||||
* @out: Segment descriptor contents on success
|
||||
* @sel: Segment selector
|
||||
*
|
||||
* Given a segment selector, obtain a pointer to the segment descriptor.
|
||||
|
@ -563,18 +564,18 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
|
|||
*
|
||||
* Returns:
|
||||
*
|
||||
* Pointer to segment descriptor on success.
|
||||
* True on success, false on failure.
|
||||
*
|
||||
* NULL on error.
|
||||
*/
|
||||
static struct desc_struct *get_desc(unsigned short sel)
|
||||
static bool get_desc(struct desc_struct *out, unsigned short sel)
|
||||
{
|
||||
struct desc_ptr gdt_desc = {0, 0};
|
||||
unsigned long desc_base;
|
||||
|
||||
#ifdef CONFIG_MODIFY_LDT_SYSCALL
|
||||
if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) {
|
||||
struct desc_struct *desc = NULL;
|
||||
bool success = false;
|
||||
struct ldt_struct *ldt;
|
||||
|
||||
/* Bits [15:3] contain the index of the desired entry. */
|
||||
|
@ -582,12 +583,14 @@ static struct desc_struct *get_desc(unsigned short sel)
|
|||
|
||||
mutex_lock(¤t->active_mm->context.lock);
|
||||
ldt = current->active_mm->context.ldt;
|
||||
if (ldt && sel < ldt->nr_entries)
|
||||
desc = &ldt->entries[sel];
|
||||
if (ldt && sel < ldt->nr_entries) {
|
||||
*out = ldt->entries[sel];
|
||||
success = true;
|
||||
}
|
||||
|
||||
mutex_unlock(¤t->active_mm->context.lock);
|
||||
|
||||
return desc;
|
||||
return success;
|
||||
}
|
||||
#endif
|
||||
native_store_gdt(&gdt_desc);
|
||||
|
@ -602,9 +605,10 @@ static struct desc_struct *get_desc(unsigned short sel)
|
|||
desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK);
|
||||
|
||||
if (desc_base > gdt_desc.size)
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
return (struct desc_struct *)(gdt_desc.address + desc_base);
|
||||
*out = *(struct desc_struct *)(gdt_desc.address + desc_base);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -626,7 +630,7 @@ static struct desc_struct *get_desc(unsigned short sel)
|
|||
*/
|
||||
unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
|
||||
{
|
||||
struct desc_struct *desc;
|
||||
struct desc_struct desc;
|
||||
short sel;
|
||||
|
||||
sel = get_segment_selector(regs, seg_reg_idx);
|
||||
|
@ -664,11 +668,10 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
|
|||
if (!sel)
|
||||
return -1L;
|
||||
|
||||
desc = get_desc(sel);
|
||||
if (!desc)
|
||||
if (!get_desc(&desc, sel))
|
||||
return -1L;
|
||||
|
||||
return get_desc_base(desc);
|
||||
return get_desc_base(&desc);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -690,7 +693,7 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
|
|||
*/
|
||||
static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
|
||||
{
|
||||
struct desc_struct *desc;
|
||||
struct desc_struct desc;
|
||||
unsigned long limit;
|
||||
short sel;
|
||||
|
||||
|
@ -704,8 +707,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
|
|||
if (!sel)
|
||||
return 0;
|
||||
|
||||
desc = get_desc(sel);
|
||||
if (!desc)
|
||||
if (!get_desc(&desc, sel))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -714,8 +716,8 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
|
|||
* not tested when checking the segment limits. In practice,
|
||||
* this means that the segment ends in (limit << 12) + 0xfff.
|
||||
*/
|
||||
limit = get_desc_limit(desc);
|
||||
if (desc->g)
|
||||
limit = get_desc_limit(&desc);
|
||||
if (desc.g)
|
||||
limit = (limit << 12) + 0xfff;
|
||||
|
||||
return limit;
|
||||
|
@ -739,7 +741,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
|
|||
*/
|
||||
int insn_get_code_seg_params(struct pt_regs *regs)
|
||||
{
|
||||
struct desc_struct *desc;
|
||||
struct desc_struct desc;
|
||||
short sel;
|
||||
|
||||
if (v8086_mode(regs))
|
||||
|
@ -750,8 +752,7 @@ int insn_get_code_seg_params(struct pt_regs *regs)
|
|||
if (sel < 0)
|
||||
return sel;
|
||||
|
||||
desc = get_desc(sel);
|
||||
if (!desc)
|
||||
if (!get_desc(&desc, sel))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
|
@ -759,10 +760,10 @@ int insn_get_code_seg_params(struct pt_regs *regs)
|
|||
* determines whether a segment contains data or code. If this is a data
|
||||
* segment, return error.
|
||||
*/
|
||||
if (!(desc->type & BIT(3)))
|
||||
if (!(desc.type & BIT(3)))
|
||||
return -EINVAL;
|
||||
|
||||
switch ((desc->l << 1) | desc->d) {
|
||||
switch ((desc.l << 1) | desc.d) {
|
||||
case 0: /*
|
||||
* Legacy mode. CS.L=0, CS.D=0. Address and operand size are
|
||||
* both 16-bit.
|
||||
|
|
|
@ -299,7 +299,17 @@ int hibernate_resume_nonboot_cpu_disable(void)
|
|||
* address in its instruction pointer may not be possible to resolve
|
||||
* any more at that point (the page tables used by it previously may
|
||||
* have been overwritten by hibernate image data).
|
||||
*
|
||||
* First, make sure that we wake up all the potentially disabled SMT
|
||||
* threads which have been initially brought up and then put into
|
||||
* mwait/cpuidle sleep.
|
||||
* Those will be put to proper (not interfering with hibernation
|
||||
* resume) sleep afterwards, and the resumed kernel will decide itself
|
||||
* what to do with them.
|
||||
*/
|
||||
ret = cpuhp_smt_enable();
|
||||
if (ret)
|
||||
return ret;
|
||||
smp_ops.play_dead = resume_play_dead;
|
||||
ret = disable_nonboot_cpus();
|
||||
smp_ops.play_dead = play_dead;
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/suspend.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <crypto/hash.h>
|
||||
|
||||
|
@ -363,3 +364,35 @@ int arch_hibernation_header_restore(void *addr)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_resume_nosmt(void)
|
||||
{
|
||||
int ret = 0;
|
||||
/*
|
||||
* We reached this while coming out of hibernation. This means
|
||||
* that SMT siblings are sleeping in hlt, as mwait is not safe
|
||||
* against control transition during resume (see comment in
|
||||
* hibernate_resume_nonboot_cpu_disable()).
|
||||
*
|
||||
* If the resumed kernel has SMT disabled, we have to take all the
|
||||
* SMT siblings out of hlt, and offline them again so that they
|
||||
* end up in mwait proper.
|
||||
*
|
||||
* Called with hotplug disabled.
|
||||
*/
|
||||
cpu_hotplug_enable();
|
||||
if (cpu_smt_control == CPU_SMT_DISABLED ||
|
||||
cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
|
||||
enum cpuhp_smt_control old = cpu_smt_control;
|
||||
|
||||
ret = cpuhp_smt_enable();
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = cpuhp_smt_disable(old);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
cpu_hotplug_disable();
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1176,7 +1176,6 @@ static int __init erst_init(void)
|
|||
"Error Record Serialization Table (ERST) support is initialized.\n");
|
||||
|
||||
buf = kmalloc(erst_erange.size, GFP_KERNEL);
|
||||
spin_lock_init(&erst_info.buf_lock);
|
||||
if (buf) {
|
||||
erst_info.buf = buf + sizeof(struct cper_pstore_record);
|
||||
erst_info.bufsize = erst_erange.size -
|
||||
|
|
|
@ -1310,11 +1310,11 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
|
|||
}
|
||||
|
||||
free_shadow:
|
||||
kfree(rinfo->shadow[i].grants_used);
|
||||
kvfree(rinfo->shadow[i].grants_used);
|
||||
rinfo->shadow[i].grants_used = NULL;
|
||||
kfree(rinfo->shadow[i].indirect_grants);
|
||||
kvfree(rinfo->shadow[i].indirect_grants);
|
||||
rinfo->shadow[i].indirect_grants = NULL;
|
||||
kfree(rinfo->shadow[i].sg);
|
||||
kvfree(rinfo->shadow[i].sg);
|
||||
rinfo->shadow[i].sg = NULL;
|
||||
}
|
||||
|
||||
|
@ -1353,7 +1353,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
|
|||
for (i = 0; i < info->nr_rings; i++)
|
||||
blkif_free_ring(&info->rinfo[i]);
|
||||
|
||||
kfree(info->rinfo);
|
||||
kvfree(info->rinfo);
|
||||
info->rinfo = NULL;
|
||||
info->nr_rings = 0;
|
||||
}
|
||||
|
@ -1914,9 +1914,9 @@ static int negotiate_mq(struct blkfront_info *info)
|
|||
if (!info->nr_rings)
|
||||
info->nr_rings = 1;
|
||||
|
||||
info->rinfo = kcalloc(info->nr_rings,
|
||||
sizeof(struct blkfront_ring_info),
|
||||
GFP_KERNEL);
|
||||
info->rinfo = kvcalloc(info->nr_rings,
|
||||
sizeof(struct blkfront_ring_info),
|
||||
GFP_KERNEL);
|
||||
if (!info->rinfo) {
|
||||
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
|
||||
info->nr_rings = 0;
|
||||
|
@ -2232,17 +2232,17 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
|
|||
|
||||
for (i = 0; i < BLK_RING_SIZE(info); i++) {
|
||||
rinfo->shadow[i].grants_used =
|
||||
kcalloc(grants,
|
||||
sizeof(rinfo->shadow[i].grants_used[0]),
|
||||
GFP_NOIO);
|
||||
rinfo->shadow[i].sg = kcalloc(psegs,
|
||||
sizeof(rinfo->shadow[i].sg[0]),
|
||||
GFP_NOIO);
|
||||
kvcalloc(grants,
|
||||
sizeof(rinfo->shadow[i].grants_used[0]),
|
||||
GFP_NOIO);
|
||||
rinfo->shadow[i].sg = kvcalloc(psegs,
|
||||
sizeof(rinfo->shadow[i].sg[0]),
|
||||
GFP_NOIO);
|
||||
if (info->max_indirect_segments)
|
||||
rinfo->shadow[i].indirect_grants =
|
||||
kcalloc(INDIRECT_GREFS(grants),
|
||||
sizeof(rinfo->shadow[i].indirect_grants[0]),
|
||||
GFP_NOIO);
|
||||
kvcalloc(INDIRECT_GREFS(grants),
|
||||
sizeof(rinfo->shadow[i].indirect_grants[0]),
|
||||
GFP_NOIO);
|
||||
if ((rinfo->shadow[i].grants_used == NULL) ||
|
||||
(rinfo->shadow[i].sg == NULL) ||
|
||||
(info->max_indirect_segments &&
|
||||
|
@ -2256,11 +2256,11 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
|
|||
|
||||
out_of_memory:
|
||||
for (i = 0; i < BLK_RING_SIZE(info); i++) {
|
||||
kfree(rinfo->shadow[i].grants_used);
|
||||
kvfree(rinfo->shadow[i].grants_used);
|
||||
rinfo->shadow[i].grants_used = NULL;
|
||||
kfree(rinfo->shadow[i].sg);
|
||||
kvfree(rinfo->shadow[i].sg);
|
||||
rinfo->shadow[i].sg = NULL;
|
||||
kfree(rinfo->shadow[i].indirect_grants);
|
||||
kvfree(rinfo->shadow[i].indirect_grants);
|
||||
rinfo->shadow[i].indirect_grants = NULL;
|
||||
}
|
||||
if (!list_empty(&rinfo->indirect_pages)) {
|
||||
|
|
|
@ -259,8 +259,7 @@ static int efi_pstore_write(struct pstore_record *record)
|
|||
efi_name[i] = name[i];
|
||||
|
||||
ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
|
||||
!pstore_cannot_block_path(record->reason),
|
||||
record->size, record->psi->buf);
|
||||
preemptible(), record->size, record->psi->buf);
|
||||
|
||||
if (record->reason == KMSG_DUMP_OOPS)
|
||||
efivar_run_worker();
|
||||
|
@ -369,7 +368,6 @@ static __init int efivars_pstore_init(void)
|
|||
return -ENOMEM;
|
||||
|
||||
efi_pstore_info.bufsize = 1024;
|
||||
spin_lock_init(&efi_pstore_info.buf_lock);
|
||||
|
||||
if (pstore_register(&efi_pstore_info)) {
|
||||
kfree(efi_pstore_info.buf);
|
||||
|
|
|
@ -416,8 +416,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
|
||||
if ((adev->flags & AMD_IS_PX) &&
|
||||
amdgpu_atpx_dgpu_req_power_for_displays()) {
|
||||
if (adev->flags & AMD_IS_PX) {
|
||||
pm_runtime_get_sync(adev->ddev->dev);
|
||||
/* Just fire off a uevent and let userspace tell us what to do */
|
||||
drm_helper_hpd_irq_event(adev->ddev);
|
||||
|
|
|
@ -37,18 +37,10 @@ static void psp_set_funcs(struct amdgpu_device *adev);
|
|||
static int psp_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
|
||||
psp_set_funcs(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
int ret;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10:
|
||||
case CHIP_VEGA12:
|
||||
|
@ -67,6 +59,15 @@ static int psp_sw_init(void *handle)
|
|||
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
|
||||
return 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
int ret;
|
||||
|
||||
ret = psp_init_microcode(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to load psp firmware!\n");
|
||||
|
|
|
@ -1573,15 +1573,6 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
|
|||
if (old_plane_state->fb != new_plane_state->fb)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* FIXME: Since prepare_fb and cleanup_fb are always called on
|
||||
* the new_plane_state for async updates we need to block framebuffer
|
||||
* changes. This prevents use of a fb that's been cleaned up and
|
||||
* double cleanups from occuring.
|
||||
*/
|
||||
if (old_plane_state->fb != new_plane_state->fb)
|
||||
return -EINVAL;
|
||||
|
||||
funcs = plane->helper_private;
|
||||
if (!funcs->atomic_async_update)
|
||||
return -EINVAL;
|
||||
|
@ -1612,6 +1603,8 @@ EXPORT_SYMBOL(drm_atomic_helper_async_check);
|
|||
* drm_atomic_async_check() succeeds. Async commits are not supposed to swap
|
||||
* the states like normal sync commits, but just do in-place changes on the
|
||||
* current state.
|
||||
*
|
||||
* TODO: Implement full swap instead of doing in-place changes.
|
||||
*/
|
||||
void drm_atomic_helper_async_commit(struct drm_device *dev,
|
||||
struct drm_atomic_state *state)
|
||||
|
@ -1622,6 +1615,9 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
|
|||
int i;
|
||||
|
||||
for_each_new_plane_in_state(state, plane, plane_state, i) {
|
||||
struct drm_framebuffer *new_fb = plane_state->fb;
|
||||
struct drm_framebuffer *old_fb = plane->state->fb;
|
||||
|
||||
funcs = plane->helper_private;
|
||||
funcs->atomic_async_update(plane, plane_state);
|
||||
|
||||
|
@ -1630,11 +1626,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
|
|||
* plane->state in-place, make sure at least common
|
||||
* properties have been properly updated.
|
||||
*/
|
||||
WARN_ON_ONCE(plane->state->fb != plane_state->fb);
|
||||
WARN_ON_ONCE(plane->state->fb != new_fb);
|
||||
WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
|
||||
WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
|
||||
WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
|
||||
WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
|
||||
|
||||
/*
|
||||
* Make sure the FBs have been swapped so that cleanups in the
|
||||
* new_state performs a cleanup in the old FB.
|
||||
*/
|
||||
WARN_ON_ONCE(plane_state->fb != old_fb);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_async_commit);
|
||||
|
|
|
@ -180,6 +180,25 @@ static const struct edid_quirk {
|
|||
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
|
||||
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
|
||||
|
||||
/* Valve Index Headset */
|
||||
{ "VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91be, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP },
|
||||
|
||||
/* HTC Vive and Vive Pro VR Headsets */
|
||||
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
|
||||
{ "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
|
||||
|
@ -201,6 +220,12 @@ static const struct edid_quirk {
|
|||
|
||||
/* Sony PlayStation VR Headset */
|
||||
{ "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
|
||||
|
||||
/* Sensics VR Headsets */
|
||||
{ "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP },
|
||||
|
||||
/* OSVR HDK and HDK2 VR Headsets */
|
||||
{ "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP },
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -594,6 +594,9 @@ void cdv_intel_lvds_init(struct drm_device *dev,
|
|||
int pipe;
|
||||
u8 pin;
|
||||
|
||||
if (!dev_priv->lvds_enabled_in_vbt)
|
||||
return;
|
||||
|
||||
pin = GMBUS_PORT_PANEL;
|
||||
if (!lvds_is_present_in_vbt(dev, &pin)) {
|
||||
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
|
||||
|
|
|
@ -436,6 +436,9 @@ parse_driver_features(struct drm_psb_private *dev_priv,
|
|||
if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
|
||||
dev_priv->edp.support = 1;
|
||||
|
||||
dev_priv->lvds_enabled_in_vbt = driver->lvds_config != 0;
|
||||
DRM_DEBUG_KMS("LVDS VBT config bits: 0x%x\n", driver->lvds_config);
|
||||
|
||||
/* This bit means to use 96Mhz for DPLL_A or not */
|
||||
if (driver->primary_lfp_id)
|
||||
dev_priv->dplla_96mhz = true;
|
||||
|
|
|
@ -538,6 +538,7 @@ struct drm_psb_private {
|
|||
int lvds_ssc_freq;
|
||||
bool is_lvds_on;
|
||||
bool is_mipi_on;
|
||||
bool lvds_enabled_in_vbt;
|
||||
u32 mipi_ctrl_display;
|
||||
|
||||
unsigned int core_freq;
|
||||
|
|
|
@ -2161,7 +2161,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
||||
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
|
||||
unsigned long gma, gfn;
|
||||
struct intel_gvt_gtt_entry e, m;
|
||||
struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
|
||||
struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
|
||||
dma_addr_t dma_addr;
|
||||
int ret;
|
||||
|
||||
|
@ -2237,7 +2238,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
|
||||
if (ops->test_present(&e)) {
|
||||
gfn = ops->get_pfn(&e);
|
||||
m = e;
|
||||
m.val64 = e.val64;
|
||||
m.type = e.type;
|
||||
|
||||
/* one PTE update may be issued in multiple writes and the
|
||||
* first write may not construct a valid gfn
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
* macros. Do **not** mass change existing definitions just to update the style.
|
||||
*
|
||||
* Layout
|
||||
* ''''''
|
||||
* ~~~~~~
|
||||
*
|
||||
* Keep helper macros near the top. For example, _PIPE() and friends.
|
||||
*
|
||||
|
@ -78,7 +78,7 @@
|
|||
* style. Use lower case in hexadecimal values.
|
||||
*
|
||||
* Naming
|
||||
* ''''''
|
||||
* ~~~~~~
|
||||
*
|
||||
* Try to name registers according to the specs. If the register name changes in
|
||||
* the specs from platform to another, stick to the original name.
|
||||
|
@ -96,7 +96,7 @@
|
|||
* suffix to the name. For example, ``_SKL`` or ``_GEN8``.
|
||||
*
|
||||
* Examples
|
||||
* ''''''''
|
||||
* ~~~~~~~~
|
||||
*
|
||||
* (Note that the values in the example are indented using spaces instead of
|
||||
* TABs to avoid misalignment in generated documentation. Use TABs in the
|
||||
|
|
|
@ -1267,6 +1267,10 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
|
|||
if (!HAS_FBC(dev_priv))
|
||||
return 0;
|
||||
|
||||
/* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
|
||||
if (IS_GEMINILAKE(dev_priv))
|
||||
return 0;
|
||||
|
||||
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
|
||||
return 1;
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
* costly and simplifies things. We can revisit this in the future.
|
||||
*
|
||||
* Layout
|
||||
* ''''''
|
||||
* ~~~~~~
|
||||
*
|
||||
* Keep things in this file ordered by WA type, as per the above (context, GT,
|
||||
* display, register whitelist, batchbuffer). Then, inside each type, keep the
|
||||
|
|
|
@ -503,6 +503,8 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
|
|||
static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_state)
|
||||
{
|
||||
struct drm_framebuffer *old_fb = plane->state->fb;
|
||||
|
||||
plane->state->src_x = new_state->src_x;
|
||||
plane->state->src_y = new_state->src_y;
|
||||
plane->state->crtc_x = new_state->crtc_x;
|
||||
|
@ -525,6 +527,8 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
|
|||
|
||||
*to_mdp5_plane_state(plane->state) =
|
||||
*to_mdp5_plane_state(new_state);
|
||||
|
||||
new_state->fb = old_fb;
|
||||
}
|
||||
|
||||
static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
|
||||
|
|
|
@ -16,10 +16,21 @@ config DRM_NOUVEAU
|
|||
select INPUT if ACPI && X86
|
||||
select THERMAL if ACPI && X86
|
||||
select ACPI_VIDEO if ACPI && X86
|
||||
select DRM_VM
|
||||
help
|
||||
Choose this option for open-source NVIDIA support.
|
||||
|
||||
config NOUVEAU_LEGACY_CTX_SUPPORT
|
||||
bool "Nouveau legacy context support"
|
||||
depends on DRM_NOUVEAU
|
||||
select DRM_VM
|
||||
default y
|
||||
help
|
||||
There was a version of the nouveau DDX that relied on legacy
|
||||
ctx ioctls not erroring out. But that was back in time a long
|
||||
ways, so offer a way to disable it now. For uapi compat with
|
||||
old nouveau ddx this should be on by default, but modern distros
|
||||
should consider turning it off.
|
||||
|
||||
config NOUVEAU_PLATFORM_DRIVER
|
||||
bool "Nouveau (NVIDIA) SoC GPUs"
|
||||
depends on DRM_NOUVEAU && ARCH_TEGRA
|
||||
|
|
|
@ -1015,8 +1015,11 @@ nouveau_driver_fops = {
|
|||
static struct drm_driver
|
||||
driver_stub = {
|
||||
.driver_features =
|
||||
DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
|
||||
DRIVER_KMS_LEGACY_CONTEXT,
|
||||
DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER
|
||||
#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
|
||||
| DRIVER_KMS_LEGACY_CONTEXT
|
||||
#endif
|
||||
,
|
||||
|
||||
.load = nouveau_drm_load,
|
||||
.unload = nouveau_drm_unload,
|
||||
|
|
|
@ -921,12 +921,12 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
|
|||
ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
|
||||
|
||||
/* get matching reference and feedback divider */
|
||||
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
|
||||
*ref_div = min(max(den/post_div, 1u), ref_div_max);
|
||||
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
|
||||
|
||||
/* limit fb divider to its maximum */
|
||||
if (*fb_div > fb_div_max) {
|
||||
*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
|
||||
*ref_div = (*ref_div * fb_div_max)/(*fb_div);
|
||||
*fb_div = fb_div_max;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -718,11 +718,16 @@ static const struct i2c_algorithm xiic_algorithm = {
|
|||
.functionality = xiic_func,
|
||||
};
|
||||
|
||||
static const struct i2c_adapter_quirks xiic_quirks = {
|
||||
.max_read_len = 255,
|
||||
};
|
||||
|
||||
static const struct i2c_adapter xiic_adapter = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = DRIVER_NAME,
|
||||
.class = I2C_CLASS_DEPRECATED,
|
||||
.algo = &xiic_algorithm,
|
||||
.quirks = &xiic_quirks,
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -22,15 +22,6 @@
|
|||
#define AR71XX_RESET_REG_MISC_INT_ENABLE 4
|
||||
|
||||
#define ATH79_MISC_IRQ_COUNT 32
|
||||
#define ATH79_MISC_PERF_IRQ 5
|
||||
|
||||
static int ath79_perfcount_irq;
|
||||
|
||||
int get_c0_perfcount_int(void)
|
||||
{
|
||||
return ath79_perfcount_irq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||
|
||||
static void ath79_misc_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
|
@ -122,8 +113,6 @@ static void __init ath79_misc_intc_domain_init(
|
|||
{
|
||||
void __iomem *base = domain->host_data;
|
||||
|
||||
ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
|
||||
|
||||
/* Disable and clear all interrupts */
|
||||
__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
|
||||
__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
|
||||
|
|
|
@ -780,6 +780,8 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
|
|||
|
||||
if ((m->addr == 0x0) || (m->size == 0))
|
||||
return -EINVAL;
|
||||
if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
map_addr = (m->addr & PAGE_MASK);
|
||||
map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
|
||||
|
|
|
@ -587,6 +587,10 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
|
|||
/* determine space needed for page_list. */
|
||||
data = (unsigned long)uaddr;
|
||||
offs = offset_in_page(data);
|
||||
if (size > ULONG_MAX - PAGE_SIZE - offs) {
|
||||
m->size = 0; /* mark unused and not added */
|
||||
return -EINVAL;
|
||||
}
|
||||
m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
|
||||
|
||||
m->page_list = kcalloc(m->nr_pages,
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/mtd/spinand.h>
|
||||
|
||||
#define SPINAND_MFR_MACRONIX 0xC2
|
||||
#define MACRONIX_ECCSR_MASK 0x0F
|
||||
|
||||
static SPINAND_OP_VARIANTS(read_cache_variants,
|
||||
SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
|
||||
|
@ -55,7 +56,12 @@ static int mx35lf1ge4ab_get_eccsr(struct spinand_device *spinand, u8 *eccsr)
|
|||
SPI_MEM_OP_DUMMY(1, 1),
|
||||
SPI_MEM_OP_DATA_IN(1, eccsr, 1));
|
||||
|
||||
return spi_mem_exec_op(spinand->spimem, &op);
|
||||
int ret = spi_mem_exec_op(spinand->spimem, &op);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*eccsr &= MACRONIX_ECCSR_MASK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mx35lf1ge4ab_ecc_get_status(struct spinand_device *spinand,
|
||||
|
|
|
@ -1310,8 +1310,8 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
|
||||
memcpy(data + i * ETH_GSTRING_LEN,
|
||||
&mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
|
||||
strscpy(data + i * ETH_GSTRING_LEN,
|
||||
mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2010,6 +2010,8 @@ static int mlx4_en_set_tunable(struct net_device *dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#define MLX4_EEPROM_PAGE_LEN 256
|
||||
|
||||
static int mlx4_en_get_module_info(struct net_device *dev,
|
||||
struct ethtool_modinfo *modinfo)
|
||||
{
|
||||
|
@ -2044,7 +2046,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
|
|||
break;
|
||||
case MLX4_MODULE_ID_SFP:
|
||||
modinfo->type = ETH_MODULE_SFF_8472;
|
||||
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
|
||||
modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
|
@ -2077,11 +2077,6 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
|
|||
size -= offset + size - I2C_PAGE_SIZE;
|
||||
|
||||
i2c_addr = I2C_ADDR_LOW;
|
||||
if (offset >= I2C_PAGE_SIZE) {
|
||||
/* Reset offset to high page */
|
||||
i2c_addr = I2C_ADDR_HIGH;
|
||||
offset -= I2C_PAGE_SIZE;
|
||||
}
|
||||
|
||||
cable_info = (struct mlx4_cable_info *)inmad->data;
|
||||
cable_info->dev_mem_address = cpu_to_be16(offset);
|
||||
|
|
|
@ -2978,7 +2978,7 @@ static void cpsw_get_ringparam(struct net_device *ndev,
|
|||
struct cpsw_common *cpsw = priv->cpsw;
|
||||
|
||||
/* not supported */
|
||||
ering->tx_max_pending = 0;
|
||||
ering->tx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
|
||||
ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
|
||||
ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
|
||||
ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
|
||||
|
|
|
@ -280,6 +280,7 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
|
|||
{
|
||||
struct i2c_msg msgs[2];
|
||||
u8 bus_addr = a2 ? 0x51 : 0x50;
|
||||
size_t this_len;
|
||||
int ret;
|
||||
|
||||
msgs[0].addr = bus_addr;
|
||||
|
@ -291,11 +292,26 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
|
|||
msgs[1].len = len;
|
||||
msgs[1].buf = buf;
|
||||
|
||||
ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
while (len) {
|
||||
this_len = len;
|
||||
if (this_len > 16)
|
||||
this_len = 16;
|
||||
|
||||
return ret == ARRAY_SIZE(msgs) ? len : 0;
|
||||
msgs[1].len = this_len;
|
||||
|
||||
ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret != ARRAY_SIZE(msgs))
|
||||
break;
|
||||
|
||||
msgs[1].buf += this_len;
|
||||
dev_addr += this_len;
|
||||
len -= this_len;
|
||||
}
|
||||
|
||||
return msgs[1].buf - (u8 *)buf;
|
||||
}
|
||||
|
||||
static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
|
||||
|
|
|
@ -565,8 +565,6 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
|
|||
/* We currently only support kernel addresses */
|
||||
BUG_ON(sid != KERNEL_SPACE);
|
||||
|
||||
mtsp(sid,1);
|
||||
|
||||
/*
|
||||
** WORD 1 - low order word
|
||||
** "hints" parm includes the VALID bit!
|
||||
|
@ -597,7 +595,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
|
|||
** Grab virtual index [0:11]
|
||||
** Deposit virt_idx bits into I/O PDIR word
|
||||
*/
|
||||
asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
|
||||
asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
|
||||
asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
|
||||
asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
|
||||
|
||||
|
|
|
@ -575,8 +575,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
|
|||
pa = virt_to_phys(vba);
|
||||
pa &= IOVP_MASK;
|
||||
|
||||
mtsp(sid,1);
|
||||
asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
|
||||
asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
|
||||
pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
|
||||
|
||||
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
|
||||
|
|
|
@ -130,9 +130,6 @@ static void uart_start(struct tty_struct *tty)
|
|||
struct uart_port *port;
|
||||
unsigned long flags;
|
||||
|
||||
if (!state)
|
||||
return;
|
||||
|
||||
port = uart_port_lock(state, flags);
|
||||
__uart_start(tty);
|
||||
uart_port_unlock(port, flags);
|
||||
|
@ -730,9 +727,6 @@ static void uart_unthrottle(struct tty_struct *tty)
|
|||
upstat_t mask = UPSTAT_SYNC_FIFO;
|
||||
struct uart_port *port;
|
||||
|
||||
if (!state)
|
||||
return;
|
||||
|
||||
port = uart_port_ref(state);
|
||||
if (!port)
|
||||
return;
|
||||
|
@ -1708,6 +1702,16 @@ static void uart_dtr_rts(struct tty_port *port, int raise)
|
|||
uart_port_deref(uport);
|
||||
}
|
||||
|
||||
static int uart_install(struct tty_driver *driver, struct tty_struct *tty)
|
||||
{
|
||||
struct uart_driver *drv = driver->driver_state;
|
||||
struct uart_state *state = drv->state + tty->index;
|
||||
|
||||
tty->driver_data = state;
|
||||
|
||||
return tty_standard_install(driver, tty);
|
||||
}
|
||||
|
||||
/*
|
||||
* Calls to uart_open are serialised by the tty_lock in
|
||||
* drivers/tty/tty_io.c:tty_open()
|
||||
|
@ -1720,11 +1724,8 @@ static void uart_dtr_rts(struct tty_port *port, int raise)
|
|||
*/
|
||||
static int uart_open(struct tty_struct *tty, struct file *filp)
|
||||
{
|
||||
struct uart_driver *drv = tty->driver->driver_state;
|
||||
int retval, line = tty->index;
|
||||
struct uart_state *state = drv->state + line;
|
||||
|
||||
tty->driver_data = state;
|
||||
struct uart_state *state = tty->driver_data;
|
||||
int retval;
|
||||
|
||||
retval = tty_port_open(&state->port, tty, filp);
|
||||
if (retval > 0)
|
||||
|
@ -2409,6 +2410,7 @@ static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
|
|||
#endif
|
||||
|
||||
static const struct tty_operations uart_ops = {
|
||||
.install = uart_install,
|
||||
.open = uart_open,
|
||||
.close = uart_close,
|
||||
.write = uart_write,
|
||||
|
|
|
@ -2981,7 +2981,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
|
|||
offset + length > i_size_read(inode)) {
|
||||
err = inode_newsize_ok(inode, offset + length);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!(mode & FALLOC_FL_KEEP_SIZE))
|
||||
|
|
|
@ -6850,7 +6850,6 @@ struct nfs4_lock_waiter {
|
|||
struct task_struct *task;
|
||||
struct inode *inode;
|
||||
struct nfs_lowner *owner;
|
||||
bool notified;
|
||||
};
|
||||
|
||||
static int
|
||||
|
@ -6872,13 +6871,13 @@ nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, vo
|
|||
/* Make sure it's for the right inode */
|
||||
if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
|
||||
return 0;
|
||||
|
||||
waiter->notified = true;
|
||||
}
|
||||
|
||||
/* override "private" so we can use default_wake_function */
|
||||
wait->private = waiter->task;
|
||||
ret = autoremove_wake_function(wait, mode, flags, key);
|
||||
ret = woken_wake_function(wait, mode, flags, key);
|
||||
if (ret)
|
||||
list_del_init(&wait->entry);
|
||||
wait->private = waiter;
|
||||
return ret;
|
||||
}
|
||||
|
@ -6887,7 +6886,6 @@ static int
|
|||
nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
||||
{
|
||||
int status = -ERESTARTSYS;
|
||||
unsigned long flags;
|
||||
struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
|
||||
struct nfs_server *server = NFS_SERVER(state->inode);
|
||||
struct nfs_client *clp = server->nfs_client;
|
||||
|
@ -6897,8 +6895,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|||
.s_dev = server->s_dev };
|
||||
struct nfs4_lock_waiter waiter = { .task = current,
|
||||
.inode = state->inode,
|
||||
.owner = &owner,
|
||||
.notified = false };
|
||||
.owner = &owner};
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
/* Don't bother with waitqueue if we don't expect a callback */
|
||||
|
@ -6908,27 +6905,22 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|||
init_wait(&wait);
|
||||
wait.private = &waiter;
|
||||
wait.func = nfs4_wake_lock_waiter;
|
||||
add_wait_queue(q, &wait);
|
||||
|
||||
while(!signalled()) {
|
||||
waiter.notified = false;
|
||||
add_wait_queue(q, &wait);
|
||||
status = nfs4_proc_setlk(state, cmd, request);
|
||||
if ((status != -EAGAIN) || IS_SETLK(cmd))
|
||||
if ((status != -EAGAIN) || IS_SETLK(cmd)) {
|
||||
finish_wait(q, &wait);
|
||||
break;
|
||||
}
|
||||
|
||||
status = -ERESTARTSYS;
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
if (waiter.notified) {
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
continue;
|
||||
}
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
|
||||
freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT);
|
||||
freezer_do_not_count();
|
||||
wait_woken(&wait, TASK_INTERRUPTIBLE, NFS4_LOCK_MAXTIMEOUT);
|
||||
freezer_count();
|
||||
finish_wait(q, &wait);
|
||||
}
|
||||
|
||||
finish_wait(q, &wait);
|
||||
return status;
|
||||
}
|
||||
#else /* !CONFIG_NFS_V4_1 */
|
||||
|
|
|
@ -124,26 +124,27 @@ static const char *get_reason_str(enum kmsg_dump_reason reason)
|
|||
}
|
||||
}
|
||||
|
||||
bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
|
||||
/*
|
||||
* Should pstore_dump() wait for a concurrent pstore_dump()? If
|
||||
* not, the current pstore_dump() will report a failure to dump
|
||||
* and return.
|
||||
*/
|
||||
static bool pstore_cannot_wait(enum kmsg_dump_reason reason)
|
||||
{
|
||||
/*
|
||||
* In case of NMI path, pstore shouldn't be blocked
|
||||
* regardless of reason.
|
||||
*/
|
||||
/* In NMI path, pstore shouldn't block regardless of reason. */
|
||||
if (in_nmi())
|
||||
return true;
|
||||
|
||||
switch (reason) {
|
||||
/* In panic case, other cpus are stopped by smp_send_stop(). */
|
||||
case KMSG_DUMP_PANIC:
|
||||
/* Emergency restart shouldn't be blocked by spin lock. */
|
||||
/* Emergency restart shouldn't be blocked. */
|
||||
case KMSG_DUMP_EMERG:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
|
||||
|
||||
#if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
|
||||
static int zbufsize_deflate(size_t size)
|
||||
|
@ -323,8 +324,10 @@ static void allocate_buf_for_compression(void)
|
|||
|
||||
static void free_buf_for_compression(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm)
|
||||
if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) {
|
||||
crypto_free_comp(tfm);
|
||||
tfm = NULL;
|
||||
}
|
||||
kfree(big_oops_buf);
|
||||
big_oops_buf = NULL;
|
||||
big_oops_buf_sz = 0;
|
||||
|
@ -378,23 +381,23 @@ static void pstore_dump(struct kmsg_dumper *dumper,
|
|||
unsigned long total = 0;
|
||||
const char *why;
|
||||
unsigned int part = 1;
|
||||
unsigned long flags = 0;
|
||||
int is_locked;
|
||||
int ret;
|
||||
|
||||
why = get_reason_str(reason);
|
||||
|
||||
if (pstore_cannot_block_path(reason)) {
|
||||
is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
|
||||
if (!is_locked) {
|
||||
pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
|
||||
, in_nmi() ? "NMI" : why);
|
||||
if (down_trylock(&psinfo->buf_lock)) {
|
||||
/* Failed to acquire lock: give up if we cannot wait. */
|
||||
if (pstore_cannot_wait(reason)) {
|
||||
pr_err("dump skipped in %s path: may corrupt error record\n",
|
||||
in_nmi() ? "NMI" : why);
|
||||
return;
|
||||
}
|
||||
if (down_interruptible(&psinfo->buf_lock)) {
|
||||
pr_err("could not grab semaphore?!\n");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
spin_lock_irqsave(&psinfo->buf_lock, flags);
|
||||
is_locked = 1;
|
||||
}
|
||||
|
||||
oopscount++;
|
||||
while (total < kmsg_bytes) {
|
||||
char *dst;
|
||||
|
@ -411,7 +414,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
|
|||
record.part = part;
|
||||
record.buf = psinfo->buf;
|
||||
|
||||
if (big_oops_buf && is_locked) {
|
||||
if (big_oops_buf) {
|
||||
dst = big_oops_buf;
|
||||
dst_size = big_oops_buf_sz;
|
||||
} else {
|
||||
|
@ -429,7 +432,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
|
|||
dst_size, &dump_size))
|
||||
break;
|
||||
|
||||
if (big_oops_buf && is_locked) {
|
||||
if (big_oops_buf) {
|
||||
zipped_len = pstore_compress(dst, psinfo->buf,
|
||||
header_size + dump_size,
|
||||
psinfo->bufsize);
|
||||
|
@ -452,8 +455,8 @@ static void pstore_dump(struct kmsg_dumper *dumper,
|
|||
total += record.size;
|
||||
part++;
|
||||
}
|
||||
if (is_locked)
|
||||
spin_unlock_irqrestore(&psinfo->buf_lock, flags);
|
||||
|
||||
up(&psinfo->buf_lock);
|
||||
}
|
||||
|
||||
static struct kmsg_dumper pstore_dumper = {
|
||||
|
@ -476,31 +479,14 @@ static void pstore_unregister_kmsg(void)
|
|||
#ifdef CONFIG_PSTORE_CONSOLE
|
||||
static void pstore_console_write(struct console *con, const char *s, unsigned c)
|
||||
{
|
||||
const char *e = s + c;
|
||||
struct pstore_record record;
|
||||
|
||||
while (s < e) {
|
||||
struct pstore_record record;
|
||||
unsigned long flags;
|
||||
pstore_record_init(&record, psinfo);
|
||||
record.type = PSTORE_TYPE_CONSOLE;
|
||||
|
||||
pstore_record_init(&record, psinfo);
|
||||
record.type = PSTORE_TYPE_CONSOLE;
|
||||
|
||||
if (c > psinfo->bufsize)
|
||||
c = psinfo->bufsize;
|
||||
|
||||
if (oops_in_progress) {
|
||||
if (!spin_trylock_irqsave(&psinfo->buf_lock, flags))
|
||||
break;
|
||||
} else {
|
||||
spin_lock_irqsave(&psinfo->buf_lock, flags);
|
||||
}
|
||||
record.buf = (char *)s;
|
||||
record.size = c;
|
||||
psinfo->write(&record);
|
||||
spin_unlock_irqrestore(&psinfo->buf_lock, flags);
|
||||
s += c;
|
||||
c = e - s;
|
||||
}
|
||||
record.buf = (char *)s;
|
||||
record.size = c;
|
||||
psinfo->write(&record);
|
||||
}
|
||||
|
||||
static struct console pstore_console = {
|
||||
|
@ -589,6 +575,7 @@ int pstore_register(struct pstore_info *psi)
|
|||
psi->write_user = pstore_write_user_compat;
|
||||
psinfo = psi;
|
||||
mutex_init(&psinfo->read_mutex);
|
||||
sema_init(&psinfo->buf_lock, 1);
|
||||
spin_unlock(&pstore_lock);
|
||||
|
||||
if (owner && !try_module_get(owner)) {
|
||||
|
@ -596,7 +583,8 @@ int pstore_register(struct pstore_info *psi)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
allocate_buf_for_compression();
|
||||
if (psi->flags & PSTORE_FLAGS_DMESG)
|
||||
allocate_buf_for_compression();
|
||||
|
||||
if (pstore_is_mounted())
|
||||
pstore_get_records(0);
|
||||
|
|
|
@ -802,27 +802,36 @@ static int ramoops_probe(struct platform_device *pdev)
|
|||
goto fail_init_mprz;
|
||||
|
||||
cxt->pstore.data = cxt;
|
||||
/*
|
||||
* Prepare frontend flags based on which areas are initialized.
|
||||
* For ramoops_init_przs() cases, the "max count" variable tells
|
||||
* if there are regions present. For ramoops_init_prz() cases,
|
||||
* the single region size is how to check.
|
||||
*/
|
||||
cxt->pstore.flags = 0;
|
||||
if (cxt->max_dump_cnt)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
|
||||
if (cxt->console_size)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
|
||||
if (cxt->max_ftrace_cnt)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
|
||||
if (cxt->pmsg_size)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
|
||||
|
||||
/*
|
||||
* Since bufsize is only used for dmesg crash dumps, it
|
||||
* must match the size of the dprz record (after PRZ header
|
||||
* and ECC bytes have been accounted for).
|
||||
*/
|
||||
cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
|
||||
cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
|
||||
if (!cxt->pstore.buf) {
|
||||
pr_err("cannot allocate pstore crash dump buffer\n");
|
||||
err = -ENOMEM;
|
||||
goto fail_clear;
|
||||
if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) {
|
||||
cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
|
||||
cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
|
||||
if (!cxt->pstore.buf) {
|
||||
pr_err("cannot allocate pstore crash dump buffer\n");
|
||||
err = -ENOMEM;
|
||||
goto fail_clear;
|
||||
}
|
||||
}
|
||||
spin_lock_init(&cxt->pstore.buf_lock);
|
||||
|
||||
cxt->pstore.flags = PSTORE_FLAGS_DMESG;
|
||||
if (cxt->console_size)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
|
||||
if (cxt->ftrace_size)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
|
||||
if (cxt->pmsg_size)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
|
||||
|
||||
err = pstore_register(&cxt->pstore);
|
||||
if (err) {
|
||||
|
|
|
@ -1174,6 +1174,14 @@ struct drm_plane_helper_funcs {
|
|||
* current one with the new plane configurations in the new
|
||||
* plane_state.
|
||||
*
|
||||
* Drivers should also swap the framebuffers between current plane
|
||||
* state (&drm_plane.state) and new_state.
|
||||
* This is required since cleanup for async commits is performed on
|
||||
* the new state, rather than old state like for traditional commits.
|
||||
* Since we want to give up the reference on the current (old) fb
|
||||
* instead of our brand new one, swap them in the driver during the
|
||||
* async commit.
|
||||
*
|
||||
* FIXME:
|
||||
* - It only works for single plane updates
|
||||
* - Async Pageflips are not supported yet
|
||||
|
|
|
@ -183,10 +183,14 @@ enum cpuhp_smt_control {
|
|||
extern enum cpuhp_smt_control cpu_smt_control;
|
||||
extern void cpu_smt_disable(bool force);
|
||||
extern void cpu_smt_check_topology(void);
|
||||
extern int cpuhp_smt_enable(void);
|
||||
extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
|
||||
#else
|
||||
# define cpu_smt_control (CPU_SMT_ENABLED)
|
||||
static inline void cpu_smt_disable(bool force) { }
|
||||
static inline void cpu_smt_check_topology(void) { }
|
||||
static inline int cpuhp_smt_enable(void) { return 0; }
|
||||
static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
|
||||
#endif
|
||||
|
||||
#define IDLE_START 1
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/kmsg_dump.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
|
@ -88,7 +88,7 @@ struct pstore_record {
|
|||
* @owner: module which is repsonsible for this backend driver
|
||||
* @name: name of the backend driver
|
||||
*
|
||||
* @buf_lock: spinlock to serialize access to @buf
|
||||
* @buf_lock: semaphore to serialize access to @buf
|
||||
* @buf: preallocated crash dump buffer
|
||||
* @bufsize: size of @buf available for crash dump bytes (must match
|
||||
* smallest number of bytes available for writing to a
|
||||
|
@ -173,7 +173,7 @@ struct pstore_info {
|
|||
struct module *owner;
|
||||
char *name;
|
||||
|
||||
spinlock_t buf_lock;
|
||||
struct semaphore buf_lock;
|
||||
char *buf;
|
||||
size_t bufsize;
|
||||
|
||||
|
@ -199,7 +199,6 @@ struct pstore_info {
|
|||
|
||||
extern int pstore_register(struct pstore_info *);
|
||||
extern void pstore_unregister(struct pstore_info *);
|
||||
extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
|
||||
|
||||
struct pstore_ftrace_record {
|
||||
unsigned long ip;
|
||||
|
|
|
@ -78,14 +78,12 @@ void synchronize_rcu(void);
|
|||
|
||||
static inline void __rcu_read_lock(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
|
||||
preempt_disable();
|
||||
preempt_disable();
|
||||
}
|
||||
|
||||
static inline void __rcu_read_unlock(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
|
||||
preempt_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline void synchronize_rcu(void)
|
||||
|
|
|
@ -18,6 +18,7 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
|
|||
return val * hash_rnd[0];
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
|
||||
{
|
||||
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
|
||||
|
@ -25,6 +26,13 @@ static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev
|
|||
|
||||
return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
|
||||
{
|
||||
|
|
|
@ -260,8 +260,7 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
|
|||
rcu_read_lock();
|
||||
|
||||
from = rcu_dereference(rt->from);
|
||||
if (from && (rt->rt6i_flags & RTF_PCPU ||
|
||||
unlikely(!list_empty(&rt->rt6i_uncached))))
|
||||
if (from)
|
||||
fib6_get_cookie_safe(from, &cookie);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -161,6 +161,10 @@ enum {
|
|||
TLS_PENDING_CLOSED_RECORD
|
||||
};
|
||||
|
||||
enum tls_context_flags {
|
||||
TLS_RX_SYNC_RUNNING = 0,
|
||||
};
|
||||
|
||||
struct cipher_context {
|
||||
u16 prepend_size;
|
||||
u16 tag_size;
|
||||
|
|
|
@ -942,7 +942,7 @@ struct drm_i915_gem_execbuffer2 {
|
|||
* struct drm_i915_gem_exec_fence *fences.
|
||||
*/
|
||||
__u64 cliprects_ptr;
|
||||
#define I915_EXEC_RING_MASK (7<<0)
|
||||
#define I915_EXEC_RING_MASK (0x3f)
|
||||
#define I915_EXEC_DEFAULT (0<<0)
|
||||
#define I915_EXEC_RENDER (1<<0)
|
||||
#define I915_EXEC_BSD (2<<0)
|
||||
|
|
|
@ -2118,7 +2118,7 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
|
|||
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
|
||||
}
|
||||
|
||||
static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
||||
int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
||||
{
|
||||
int cpu, ret = 0;
|
||||
|
||||
|
@ -2152,7 +2152,7 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int cpuhp_smt_enable(void)
|
||||
int cpuhp_smt_enable(void)
|
||||
{
|
||||
int cpu, ret = 0;
|
||||
|
||||
|
|
|
@ -258,6 +258,11 @@ void swsusp_show_speed(ktime_t start, ktime_t stop,
|
|||
(kps % 1000) / 10);
|
||||
}
|
||||
|
||||
__weak int arch_resume_nosmt(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* create_image - Create a hibernation image.
|
||||
* @platform_mode: Whether or not to use the platform driver.
|
||||
|
@ -325,6 +330,10 @@ static int create_image(int platform_mode)
|
|||
Enable_cpus:
|
||||
enable_nonboot_cpus();
|
||||
|
||||
/* Allow architectures to do nosmt-specific post-resume dances */
|
||||
if (!in_suspend)
|
||||
error = arch_resume_nosmt();
|
||||
|
||||
Platform_finish:
|
||||
platform_finish(platform_mode);
|
||||
|
||||
|
|
|
@ -223,30 +223,30 @@ static ssize_t config_show(struct device *dev,
|
|||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
|
||||
len += snprintf(buf, PAGE_SIZE,
|
||||
len += scnprintf(buf, PAGE_SIZE - len,
|
||||
"Custom trigger configuration for: %s\n",
|
||||
dev_name(dev));
|
||||
|
||||
if (test_fw_config->name)
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
len += scnprintf(buf+len, PAGE_SIZE - len,
|
||||
"name:\t%s\n",
|
||||
test_fw_config->name);
|
||||
else
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
len += scnprintf(buf+len, PAGE_SIZE - len,
|
||||
"name:\tEMTPY\n");
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
len += scnprintf(buf+len, PAGE_SIZE - len,
|
||||
"num_requests:\t%u\n", test_fw_config->num_requests);
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
len += scnprintf(buf+len, PAGE_SIZE - len,
|
||||
"send_uevent:\t\t%s\n",
|
||||
test_fw_config->send_uevent ?
|
||||
"FW_ACTION_HOTPLUG" :
|
||||
"FW_ACTION_NOHOTPLUG");
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
len += scnprintf(buf+len, PAGE_SIZE - len,
|
||||
"sync_direct:\t\t%s\n",
|
||||
test_fw_config->sync_direct ? "true" : "false");
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
len += scnprintf(buf+len, PAGE_SIZE - len,
|
||||
"read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
|
||||
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
|
|
@ -880,8 +880,13 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
|
|||
if (rc >= 0)
|
||||
info.n_priv_flags = rc;
|
||||
}
|
||||
if (ops->get_regs_len)
|
||||
info.regdump_len = ops->get_regs_len(dev);
|
||||
if (ops->get_regs_len) {
|
||||
int ret = ops->get_regs_len(dev);
|
||||
|
||||
if (ret > 0)
|
||||
info.regdump_len = ret;
|
||||
}
|
||||
|
||||
if (ops->get_eeprom_len)
|
||||
info.eedump_len = ops->get_eeprom_len(dev);
|
||||
|
||||
|
@ -1424,6 +1429,9 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
|
|||
return -EFAULT;
|
||||
|
||||
reglen = ops->get_regs_len(dev);
|
||||
if (reglen <= 0)
|
||||
return reglen;
|
||||
|
||||
if (regs.len > reglen)
|
||||
regs.len = reglen;
|
||||
|
||||
|
@ -1434,13 +1442,16 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (regs.len < reglen)
|
||||
reglen = regs.len;
|
||||
|
||||
ops->get_regs(dev, ®s, regbuf);
|
||||
|
||||
ret = -EFAULT;
|
||||
if (copy_to_user(useraddr, ®s, sizeof(regs)))
|
||||
goto out;
|
||||
useraddr += offsetof(struct ethtool_regs, data);
|
||||
if (regbuf && copy_to_user(useraddr, regbuf, regs.len))
|
||||
if (copy_to_user(useraddr, regbuf, reglen))
|
||||
goto out;
|
||||
ret = 0;
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/times.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/neighbour.h>
|
||||
#include <net/arp.h>
|
||||
#include <net/dst.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/netevent.h>
|
||||
|
@ -2536,7 +2537,13 @@ int neigh_xmit(int index, struct net_device *dev,
|
|||
if (!tbl)
|
||||
goto out;
|
||||
rcu_read_lock_bh();
|
||||
neigh = __neigh_lookup_noref(tbl, addr, dev);
|
||||
if (index == NEIGH_ARP_TABLE) {
|
||||
u32 key = *((u32 *)addr);
|
||||
|
||||
neigh = __ipv4_neigh_lookup_noref(dev, key);
|
||||
} else {
|
||||
neigh = __neigh_lookup_noref(tbl, addr, dev);
|
||||
}
|
||||
if (!neigh)
|
||||
neigh = __neigh_create(tbl, addr, dev, false);
|
||||
err = PTR_ERR(neigh);
|
||||
|
|
|
@ -3065,7 +3065,13 @@ static int pktgen_wait_thread_run(struct pktgen_thread *t)
|
|||
{
|
||||
while (thread_is_running(t)) {
|
||||
|
||||
/* note: 't' will still be around even after the unlock/lock
|
||||
* cycle because pktgen_thread threads are only cleared at
|
||||
* net exit
|
||||
*/
|
||||
mutex_unlock(&pktgen_thread_lock);
|
||||
msleep_interruptible(100);
|
||||
mutex_lock(&pktgen_thread_lock);
|
||||
|
||||
if (signal_pending(current))
|
||||
goto signal;
|
||||
|
@ -3080,6 +3086,10 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
|
|||
struct pktgen_thread *t;
|
||||
int sig = 1;
|
||||
|
||||
/* prevent from racing with rmmod */
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return sig;
|
||||
|
||||
mutex_lock(&pktgen_thread_lock);
|
||||
|
||||
list_for_each_entry(t, &pn->pktgen_threads, th_list) {
|
||||
|
@ -3093,6 +3103,7 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
|
|||
t->control |= (T_STOP);
|
||||
|
||||
mutex_unlock(&pktgen_thread_lock);
|
||||
module_put(THIS_MODULE);
|
||||
return sig;
|
||||
}
|
||||
|
||||
|
|
|
@ -1960,7 +1960,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
|||
u32 itag = 0;
|
||||
struct rtable *rth;
|
||||
struct flowi4 fl4;
|
||||
bool do_cache;
|
||||
bool do_cache = true;
|
||||
|
||||
/* IP on this device is disabled. */
|
||||
|
||||
|
@ -2037,6 +2037,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
|||
if (res->type == RTN_BROADCAST) {
|
||||
if (IN_DEV_BFORWARD(in_dev))
|
||||
goto make_route;
|
||||
/* not do cache if bc_forwarding is enabled */
|
||||
if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
|
||||
do_cache = false;
|
||||
goto brd_input;
|
||||
}
|
||||
|
||||
|
@ -2074,16 +2077,13 @@ out: return err;
|
|||
RT_CACHE_STAT_INC(in_brd);
|
||||
|
||||
local_input:
|
||||
do_cache = false;
|
||||
if (res->fi) {
|
||||
if (!itag) {
|
||||
rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
|
||||
if (rt_cache_valid(rth)) {
|
||||
skb_dst_set_noref(skb, &rth->dst);
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
do_cache = true;
|
||||
do_cache &= res->fi && !itag;
|
||||
if (do_cache) {
|
||||
rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
|
||||
if (rt_cache_valid(rth)) {
|
||||
skb_dst_set_noref(skb, &rth->dst);
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -782,6 +782,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
|||
struct flowi6 fl6;
|
||||
struct ipcm6_cookie ipc6;
|
||||
int addr_len = msg->msg_namelen;
|
||||
int hdrincl;
|
||||
u16 proto;
|
||||
int err;
|
||||
|
||||
|
@ -795,6 +796,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
|||
if (msg->msg_flags & MSG_OOB)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* hdrincl should be READ_ONCE(inet->hdrincl)
|
||||
* but READ_ONCE() doesn't work with bit fields.
|
||||
* Doing this indirectly yields the same result.
|
||||
*/
|
||||
hdrincl = inet->hdrincl;
|
||||
hdrincl = READ_ONCE(hdrincl);
|
||||
|
||||
/*
|
||||
* Get and verify the address.
|
||||
*/
|
||||
|
@ -886,11 +894,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
|||
opt = ipv6_fixup_options(&opt_space, opt);
|
||||
|
||||
fl6.flowi6_proto = proto;
|
||||
rfv.msg = msg;
|
||||
rfv.hlen = 0;
|
||||
err = rawv6_probe_proto_opt(&rfv, &fl6);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (!hdrincl) {
|
||||
rfv.msg = msg;
|
||||
rfv.hlen = 0;
|
||||
err = rawv6_probe_proto_opt(&rfv, &fl6);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!ipv6_addr_any(daddr))
|
||||
fl6.daddr = *daddr;
|
||||
|
@ -907,7 +918,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
|||
fl6.flowi6_oif = np->ucast_oif;
|
||||
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
|
||||
|
||||
if (inet->hdrincl)
|
||||
if (hdrincl)
|
||||
fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
|
||||
|
||||
if (ipc6.tclass < 0)
|
||||
|
@ -930,7 +941,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
|||
goto do_confirm;
|
||||
|
||||
back_from_confirm:
|
||||
if (inet->hdrincl)
|
||||
if (hdrincl)
|
||||
err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst,
|
||||
msg->msg_flags, &ipc6.sockc);
|
||||
else {
|
||||
|
|
|
@ -3017,8 +3017,8 @@ static int packet_release(struct socket *sock)
|
|||
|
||||
synchronize_net();
|
||||
|
||||
kfree(po->rollover);
|
||||
if (f) {
|
||||
kfree(po->rollover);
|
||||
fanout_release_data(f);
|
||||
kfree(f);
|
||||
}
|
||||
|
|
|
@ -428,12 +428,14 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
|
|||
wait_clean_list_grace();
|
||||
|
||||
list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
|
||||
if (ibmr_ret)
|
||||
if (ibmr_ret) {
|
||||
*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
|
||||
|
||||
clean_nodes = clean_nodes->next;
|
||||
}
|
||||
/* more than one entry in llist nodes */
|
||||
if (clean_nodes->next)
|
||||
llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
|
||||
if (clean_nodes)
|
||||
llist_add_batch(clean_nodes, clean_tail,
|
||||
&pool->clean_list);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -2329,7 +2329,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
|
|||
union sctp_addr addr;
|
||||
struct sctp_af *af;
|
||||
int src_match = 0;
|
||||
char *cookie;
|
||||
|
||||
/* We must include the address that the INIT packet came from.
|
||||
* This is the only address that matters for an INIT packet.
|
||||
|
@ -2433,14 +2432,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
|
|||
/* Peer Rwnd : Current calculated value of the peer's rwnd. */
|
||||
asoc->peer.rwnd = asoc->peer.i.a_rwnd;
|
||||
|
||||
/* Copy cookie in case we need to resend COOKIE-ECHO. */
|
||||
cookie = asoc->peer.cookie;
|
||||
if (cookie) {
|
||||
asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp);
|
||||
if (!asoc->peer.cookie)
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
/* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily
|
||||
* high (for example, implementations MAY use the size of the receiver
|
||||
* advertised window).
|
||||
|
@ -2609,7 +2600,9 @@ static int sctp_process_param(struct sctp_association *asoc,
|
|||
case SCTP_PARAM_STATE_COOKIE:
|
||||
asoc->peer.cookie_len =
|
||||
ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
|
||||
asoc->peer.cookie = param.cookie->body;
|
||||
asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
|
||||
if (!asoc->peer.cookie)
|
||||
retval = 0;
|
||||
break;
|
||||
|
||||
case SCTP_PARAM_HEARTBEAT_INFO:
|
||||
|
|
|
@ -898,6 +898,11 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
|
|||
asoc->rto_initial;
|
||||
}
|
||||
|
||||
if (sctp_state(asoc, ESTABLISHED)) {
|
||||
kfree(asoc->peer.cookie);
|
||||
asoc->peer.cookie = NULL;
|
||||
}
|
||||
|
||||
if (sctp_state(asoc, ESTABLISHED) ||
|
||||
sctp_state(asoc, CLOSED) ||
|
||||
sctp_state(asoc, SHUTDOWN_RECEIVED)) {
|
||||
|
|
|
@ -545,10 +545,22 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
|
|||
return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
|
||||
}
|
||||
|
||||
static void tls_device_resync_rx(struct tls_context *tls_ctx,
|
||||
struct sock *sk, u32 seq, u64 rcd_sn)
|
||||
{
|
||||
struct net_device *netdev;
|
||||
|
||||
if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
|
||||
return;
|
||||
netdev = READ_ONCE(tls_ctx->netdev);
|
||||
if (netdev)
|
||||
netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
|
||||
clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
|
||||
}
|
||||
|
||||
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct net_device *netdev = tls_ctx->netdev;
|
||||
struct tls_offload_context_rx *rx_ctx;
|
||||
u32 is_req_pending;
|
||||
s64 resync_req;
|
||||
|
@ -563,10 +575,10 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
|
|||
is_req_pending = resync_req;
|
||||
|
||||
if (unlikely(is_req_pending) && req_seq == seq &&
|
||||
atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
|
||||
netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk,
|
||||
seq + TLS_HEADER_SIZE - 1,
|
||||
rcd_sn);
|
||||
atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
|
||||
seq += TLS_HEADER_SIZE - 1;
|
||||
tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
|
||||
}
|
||||
}
|
||||
|
||||
static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
|
||||
|
@ -954,7 +966,10 @@ static int tls_device_down(struct net_device *netdev)
|
|||
if (ctx->rx_conf == TLS_HW)
|
||||
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
|
||||
TLS_OFFLOAD_CTX_DIR_RX);
|
||||
ctx->netdev = NULL;
|
||||
WRITE_ONCE(ctx->netdev, NULL);
|
||||
smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
|
||||
while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
|
||||
usleep_range(10, 200);
|
||||
dev_put(netdev);
|
||||
list_del_init(&ctx->list);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue