Merge android-4.19.63 (75ff56e) into msm-4.19

* refs/heads/tmp-75ff56e:
  Linux 4.19.63
  access: avoid the RCU grace period for the temporary subjective credentials
  libnvdimm/bus: Stop holding nvdimm_bus_list_mutex over __nd_ioctl()
  powerpc/tm: Fix oops on sigreturn on systems without TM
  powerpc/xive: Fix loop exit-condition in xive_find_target_in_mask()
  ALSA: hda - Add a conexant codec entry to let mute led work
  ALSA: line6: Fix wrong altsetting for LINE6_PODHD500_1
  ALSA: ac97: Fix double free of ac97_codec_device
  hpet: Fix division by zero in hpet_time_div()
  mei: me: add mule creek canyon (EHL) device ids
  fpga-manager: altera-ps-spi: Fix build error
  binder: prevent transactions to context manager from its own process.
  x86/speculation/mds: Apply more accurate check on hypervisor platform
  x86/sysfb_efi: Add quirks for some devices with swapped width and height
  btrfs: inode: Don't compress if NODATASUM or NODATACOW set
  usb: pci-quirks: Correct AMD PLL quirk detection
  usb: wusbcore: fix unbalanced get/put cluster_id
  locking/lockdep: Hide unused 'class' variable
  mm: use down_read_killable for locking mmap_sem in access_remote_vm
  locking/lockdep: Fix lock used or unused stats error
  proc: use down_read_killable mmap_sem for /proc/pid/maps
  cxgb4: reduce kernel stack usage in cudbg_collect_mem_region()
  proc: use down_read_killable mmap_sem for /proc/pid/map_files
  proc: use down_read_killable mmap_sem for /proc/pid/clear_refs
  proc: use down_read_killable mmap_sem for /proc/pid/pagemap
  proc: use down_read_killable mmap_sem for /proc/pid/smaps_rollup
  mm/mmu_notifier: use hlist_add_head_rcu()
  memcg, fsnotify: no oom-kill for remote memcg charging
  mm/gup.c: remove some BUG_ONs from get_gate_page()
  mm/gup.c: mark undo_dev_pagemap as __maybe_unused
  9p: pass the correct prototype to read_cache_page
  mm/kmemleak.c: fix check for softirq context
  sh: prevent warnings when using iounmap
  block/bio-integrity: fix a memory leak bug
  powerpc/eeh: Handle hugepages in ioremap space
  dlm: check if workqueues are NULL before flushing/destroying
  mailbox: handle failed named mailbox channel request
  f2fs: avoid out-of-range memory access
  block: init flush rq ref count to 1
  powerpc/boot: add {get, put}_unaligned_be32 to xz_config.h
  PCI: dwc: pci-dra7xx: Fix compilation when !CONFIG_GPIOLIB
  RDMA/rxe: Fill in wc byte_len with IB_WC_RECV_RDMA_WITH_IMM
  perf hists browser: Fix potential NULL pointer dereference found by the smatch tool
  perf annotate: Fix dereferencing freed memory found by the smatch tool
  perf session: Fix potential NULL pointer dereference found by the smatch tool
  perf top: Fix potential NULL pointer dereference detected by the smatch tool
  perf stat: Fix use-after-freed pointer detected by the smatch tool
  perf test mmap-thread-lookup: Initialize variable to suppress memory sanitizer warning
  PCI: mobiveil: Use the 1st inbound window for MEM inbound transactions
  PCI: mobiveil: Initialize Primary/Secondary/Subordinate bus numbers
  kallsyms: exclude kasan local symbols on s390
  PCI: mobiveil: Fix the Class Code field
  PCI: mobiveil: Fix PCI base address in MEM/IO outbound windows
  arm64: assembler: Switch ESB-instruction with a vanilla nop if !ARM64_HAS_RAS
  IB/ipoib: Add child to parent list only if device initialized
  powerpc/mm: Handle page table allocation failures
  IB/mlx5: Fixed reporting counters on 2nd port for Dual port RoCE
  serial: sh-sci: Fix TX DMA buffer flushing and workqueue races
  serial: sh-sci: Terminate TX DMA during buffer flushing
  RDMA/i40iw: Set queue pair state when being queried
  powerpc/4xx/uic: clear pending interrupt after irq type/pol change
  um: Silence lockdep complaint about mmap_sem
  mm/swap: fix release_pages() when releasing devmap pages
  mfd: hi655x-pmic: Fix missing return value check for devm_regmap_init_mmio_clk
  mfd: arizona: Fix undefined behavior
  mfd: core: Set fwnode for created devices
  mfd: madera: Add missing of table registration
  recordmcount: Fix spurious mcount entries on powerpc
  powerpc/xmon: Fix disabling tracing while in xmon
  powerpc/cacheflush: fix variable set but not used
  iio: iio-utils: Fix possible incorrect mask calculation
  PCI: xilinx-nwl: Fix Multi MSI data programming
  genksyms: Teach parser about 128-bit built-in types
  kbuild: Add -Werror=unknown-warning-option to CLANG_FLAGS
  i2c: stm32f7: fix the get_irq error cases
  PCI: sysfs: Ignore lockdep for remove attribute
  serial: mctrl_gpio: Check if GPIO property exisits before requesting it
  drm/msm: Depopulate platform on probe failure
  powerpc/pci/of: Fix OF flags parsing for 64bit BARs
  mmc: sdhci: sdhci-pci-o2micro: Check if controller supports 8-bit width
  usb: gadget: Zero ffs_io_data
  tty: serial_core: Set port active bit in uart_port_activate
  serial: imx: fix locking in set_termios()
  drm/rockchip: Properly adjust to a true clock in adjusted_mode
  powerpc/pseries/mobility: prevent cpu hotplug during DT update
  drm/amd/display: fix compilation error
  phy: renesas: rcar-gen2: Fix memory leak at error paths
  drm/virtio: Add memory barriers for capset cache.
  drm/amd/display: Always allocate initial connector state state
  serial: 8250: Fix TX interrupt handling condition
  tty: serial: msm_serial: avoid system lockup condition
  tty/serial: digicolor: Fix digicolor-usart already registered warning
  memstick: Fix error cleanup path of memstick_init
  drm/crc-debugfs: Also sprinkle irqrestore over early exits
  drm/crc-debugfs: User irqsafe spinlock in drm_crtc_add_crc_entry
  gpu: host1x: Increase maximum DMA segment size
  drm/bridge: sii902x: pixel clock unit is 10kHz instead of 1kHz
  drm/bridge: tc358767: read display_props in get_modes()
  PCI: Return error if cannot probe VF
  drm/edid: Fix a missing-check bug in drm_load_edid_firmware()
  drm/amdkfd: Fix sdma queue map issue
  drm/amdkfd: Fix a potential memory leak
  drm/amd/display: Disable ABM before destroy ABM struct
  drm/amdgpu/sriov: Need to initialize the HDP_NONSURFACE_BAStE
  drm/amd/display: Fill prescale_params->scale for RGB565
  tty: serial: cpm_uart - fix init when SMC is relocated
  pinctrl: rockchip: fix leaked of_node references
  tty: max310x: Fix invalid baudrate divisors calculator
  usb: core: hub: Disable hub-initiated U1/U2
  staging: vt6656: use meaningful error code during buffer allocation
  iio: adc: stm32-dfsdm: missing error case during probe
  iio: adc: stm32-dfsdm: manage the get_irq error case
  drm/panel: simple: Fix panel_simple_dsi_probe
  hvsock: fix epollout hang from race condition

Change-Id: I4c37256db5ec08367a22a1c50bb97db267c822da
Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
Ivaylo Georgiev 2019-08-13 01:20:37 -07:00
commit 5996b2fe7b
114 changed files with 853 additions and 358 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 62 SUBLEVEL = 63
EXTRAVERSION = EXTRAVERSION =
NAME = "People's Front" NAME = "People's Front"
@ -499,6 +499,7 @@ ifneq ($(GCC_TOOLCHAIN),)
CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN) CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
endif endif
CLANG_FLAGS += -no-integrated-as CLANG_FLAGS += -no-integrated-as
CLANG_FLAGS += -Werror=unknown-warning-option
KBUILD_CFLAGS += $(CLANG_FLAGS) KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_AFLAGS += $(CLANG_FLAGS) KBUILD_AFLAGS += $(CLANG_FLAGS)
export CLANG_FLAGS export CLANG_FLAGS

View file

@ -124,7 +124,11 @@
* RAS Error Synchronization barrier * RAS Error Synchronization barrier
*/ */
.macro esb .macro esb
#ifdef CONFIG_ARM64_RAS_EXTN
hint #16 hint #16
#else
nop
#endif
.endm .endm
/* /*

View file

@ -20,10 +20,30 @@ static inline uint32_t swab32p(void *p)
#ifdef __LITTLE_ENDIAN__ #ifdef __LITTLE_ENDIAN__
#define get_le32(p) (*((uint32_t *) (p))) #define get_le32(p) (*((uint32_t *) (p)))
#define cpu_to_be32(x) swab32(x)
static inline u32 be32_to_cpup(const u32 *p)
{
return swab32p((u32 *)p);
}
#else #else
#define get_le32(p) swab32p(p) #define get_le32(p) swab32p(p)
#define cpu_to_be32(x) (x)
static inline u32 be32_to_cpup(const u32 *p)
{
return *p;
}
#endif #endif
static inline uint32_t get_unaligned_be32(const void *p)
{
return be32_to_cpup(p);
}
static inline void put_unaligned_be32(u32 val, void *p)
{
*((u32 *)p) = cpu_to_be32(val);
}
#define memeq(a, b, size) (memcmp(a, b, size) == 0) #define memeq(a, b, size) (memcmp(a, b, size) == 0)
#define memzero(buf, size) memset(buf, 0, size) #define memzero(buf, size) memset(buf, 0, size)

View file

@ -32,9 +32,12 @@
* not expect this type of fault. flush_cache_vmap is not exactly the right * not expect this type of fault. flush_cache_vmap is not exactly the right
* place to put this, but it seems to work well enough. * place to put this, but it seems to work well enough.
*/ */
#define flush_cache_vmap(start, end) do { asm volatile("ptesync" ::: "memory"); } while (0) static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
asm volatile("ptesync" ::: "memory");
}
#else #else
#define flush_cache_vmap(start, end) do { } while (0) static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
#endif #endif
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1

View file

@ -360,10 +360,19 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
ptep = find_init_mm_pte(token, &hugepage_shift); ptep = find_init_mm_pte(token, &hugepage_shift);
if (!ptep) if (!ptep)
return token; return token;
WARN_ON(hugepage_shift);
pa = pte_pfn(*ptep) << PAGE_SHIFT;
return pa | (token & (PAGE_SIZE-1)); pa = pte_pfn(*ptep);
/* On radix we can do hugepage mappings for io, so handle that */
if (hugepage_shift) {
pa <<= hugepage_shift;
pa |= token & ((1ul << hugepage_shift) - 1);
} else {
pa <<= PAGE_SHIFT;
pa |= token & (PAGE_SIZE - 1);
}
return pa;
} }
/* /*

View file

@ -45,6 +45,8 @@ unsigned int pci_parse_of_flags(u32 addr0, int bridge)
if (addr0 & 0x02000000) { if (addr0 & 0x02000000) {
flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
flags |= IORESOURCE_MEM_64;
flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
if (addr0 & 0x40000000) if (addr0 & 0x40000000)
flags |= IORESOURCE_PREFETCH flags |= IORESOURCE_PREFETCH

View file

@ -1202,6 +1202,9 @@ SYSCALL_DEFINE0(rt_sigreturn)
goto bad; goto bad;
if (MSR_TM_ACTIVE(msr_hi<<32)) { if (MSR_TM_ACTIVE(msr_hi<<32)) {
/* Trying to start TM on non TM system */
if (!cpu_has_feature(CPU_FTR_TM))
goto bad;
/* We only recheckpoint on return if we're /* We only recheckpoint on return if we're
* transaction. * transaction.
*/ */

View file

@ -750,6 +750,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (MSR_TM_ACTIVE(msr)) { if (MSR_TM_ACTIVE(msr)) {
/* We recheckpoint on return. */ /* We recheckpoint on return. */
struct ucontext __user *uc_transact; struct ucontext __user *uc_transact;
/* Trying to start TM on non TM system */
if (!cpu_has_feature(CPU_FTR_TM))
goto badframe;
if (__get_user(uc_transact, &uc->uc_link)) if (__get_user(uc_transact, &uc->uc_link))
goto badframe; goto badframe;
if (restore_tm_sigcontexts(current, &uc->uc_mcontext, if (restore_tm_sigcontexts(current, &uc->uc_mcontext,

View file

@ -150,6 +150,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
} else { } else {
pdshift = PUD_SHIFT; pdshift = PUD_SHIFT;
pu = pud_alloc(mm, pg, addr); pu = pud_alloc(mm, pg, addr);
if (!pu)
return NULL;
if (pshift == PUD_SHIFT) if (pshift == PUD_SHIFT)
return (pte_t *)pu; return (pte_t *)pu;
else if (pshift > PMD_SHIFT) { else if (pshift > PMD_SHIFT) {
@ -158,6 +160,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
} else { } else {
pdshift = PMD_SHIFT; pdshift = PMD_SHIFT;
pm = pmd_alloc(mm, pu, addr); pm = pmd_alloc(mm, pu, addr);
if (!pm)
return NULL;
if (pshift == PMD_SHIFT) if (pshift == PMD_SHIFT)
/* 16MB hugepage */ /* 16MB hugepage */
return (pte_t *)pm; return (pte_t *)pm;
@ -174,12 +178,16 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
} else { } else {
pdshift = PUD_SHIFT; pdshift = PUD_SHIFT;
pu = pud_alloc(mm, pg, addr); pu = pud_alloc(mm, pg, addr);
if (!pu)
return NULL;
if (pshift >= PUD_SHIFT) { if (pshift >= PUD_SHIFT) {
ptl = pud_lockptr(mm, pu); ptl = pud_lockptr(mm, pu);
hpdp = (hugepd_t *)pu; hpdp = (hugepd_t *)pu;
} else { } else {
pdshift = PMD_SHIFT; pdshift = PMD_SHIFT;
pm = pmd_alloc(mm, pu, addr); pm = pmd_alloc(mm, pu, addr);
if (!pm)
return NULL;
ptl = pmd_lockptr(mm, pm); ptl = pmd_lockptr(mm, pm);
hpdp = (hugepd_t *)pm; hpdp = (hugepd_t *)pm;
} }

View file

@ -158,6 +158,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
mtdcr(uic->dcrbase + UIC_PR, pr); mtdcr(uic->dcrbase + UIC_PR, pr);
mtdcr(uic->dcrbase + UIC_TR, tr); mtdcr(uic->dcrbase + UIC_TR, tr);
mtdcr(uic->dcrbase + UIC_SR, ~mask);
raw_spin_unlock_irqrestore(&uic->lock, flags); raw_spin_unlock_irqrestore(&uic->lock, flags);

View file

@ -9,6 +9,7 @@
* 2 as published by the Free Software Foundation. * 2 as published by the Free Software Foundation.
*/ */
#include <linux/cpu.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/kobject.h> #include <linux/kobject.h>
#include <linux/smp.h> #include <linux/smp.h>
@ -344,11 +345,19 @@ void post_mobility_fixup(void)
if (rc) if (rc)
printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc); printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc);
/*
* We don't want CPUs to go online/offline while the device
* tree is being updated.
*/
cpus_read_lock();
rc = pseries_devicetree_update(MIGRATION_SCOPE); rc = pseries_devicetree_update(MIGRATION_SCOPE);
if (rc) if (rc)
printk(KERN_ERR "Post-mobility device tree update " printk(KERN_ERR "Post-mobility device tree update "
"failed: %d\n", rc); "failed: %d\n", rc);
cpus_read_unlock();
/* Possibly switch to a new RFI flush type */ /* Possibly switch to a new RFI flush type */
pseries_setup_rfi_flush(); pseries_setup_rfi_flush();

View file

@ -483,7 +483,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
* Now go through the entire mask until we find a valid * Now go through the entire mask until we find a valid
* target. * target.
*/ */
for (;;) { do {
/* /*
* We re-check online as the fallback case passes us * We re-check online as the fallback case passes us
* an untested affinity mask * an untested affinity mask
@ -491,12 +491,11 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
if (cpu_online(cpu) && xive_try_pick_target(cpu)) if (cpu_online(cpu) && xive_try_pick_target(cpu))
return cpu; return cpu;
cpu = cpumask_next(cpu, mask); cpu = cpumask_next(cpu, mask);
if (cpu == first)
break;
/* Wrap around */ /* Wrap around */
if (cpu >= nr_cpu_ids) if (cpu >= nr_cpu_ids)
cpu = cpumask_first(mask); cpu = cpumask_first(mask);
} } while (cpu != first);
return -1; return -1;
} }

View file

@ -466,8 +466,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
local_irq_save(flags); local_irq_save(flags);
hard_irq_disable(); hard_irq_disable();
tracing_enabled = tracing_is_on(); if (!fromipi) {
tracing_off(); tracing_enabled = tracing_is_on();
tracing_off();
}
bp = in_breakpoint_table(regs->nip, &offset); bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL) { if (bp != NULL) {

View file

@ -371,7 +371,11 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
#define ioremap_nocache ioremap #define ioremap_nocache ioremap
#define ioremap_uc ioremap #define ioremap_uc ioremap
#define iounmap __iounmap
static inline void iounmap(void __iomem *addr)
{
__iounmap(addr);
}
/* /*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem * Convert a physical pointer to a virtual kernel pointer for /dev/mem

View file

@ -53,7 +53,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
* when the new ->mm is used for the first time. * when the new ->mm is used for the first time.
*/ */
__switch_mm(&new->context.id); __switch_mm(&new->context.id);
down_write(&new->mmap_sem); down_write_nested(&new->mmap_sem, 1);
uml_setup_stubs(new); uml_setup_stubs(new);
up_write(&new->mmap_sem); up_write(&new->mmap_sem);
} }

View file

@ -1196,7 +1196,7 @@ static ssize_t l1tf_show_state(char *buf)
static ssize_t mds_show_state(char *buf) static ssize_t mds_show_state(char *buf)
{ {
if (!hypervisor_is_type(X86_HYPER_NATIVE)) { if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
return sprintf(buf, "%s; SMT Host state unknown\n", return sprintf(buf, "%s; SMT Host state unknown\n",
mds_strings[mds_mitigation]); mds_strings[mds_mitigation]);
} }

View file

@ -231,9 +231,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
{}, {},
}; };
/*
* Some devices have a portrait LCD but advertise a landscape resolution (and
* pitch). We simply swap width and height for these devices so that we can
* correctly deal with some of them coming with multiple resolutions.
*/
static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
{
/*
* Lenovo MIIX310-10ICR, only some batches have the troublesome
* 800x1280 portrait screen. Luckily the portrait version has
* its own BIOS version, so we match on that.
*/
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"),
},
},
{
/* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
"Lenovo MIIX 320-10ICR"),
},
},
{
/* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
"Lenovo ideapad D330-10IGM"),
},
},
{},
};
__init void sysfb_apply_efi_quirks(void) __init void sysfb_apply_efi_quirks(void)
{ {
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS)) !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
dmi_check_system(efifb_dmi_system_table); dmi_check_system(efifb_dmi_system_table);
if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
dmi_check_system(efifb_dmi_swap_width_height)) {
u16 temp = screen_info.lfb_width;
screen_info.lfb_width = screen_info.lfb_height;
screen_info.lfb_height = temp;
screen_info.lfb_linelength = 4 * screen_info.lfb_width;
}
} }

View file

@ -291,8 +291,12 @@ bool bio_integrity_prep(struct bio *bio)
ret = bio_integrity_add_page(bio, virt_to_page(buf), ret = bio_integrity_add_page(bio, virt_to_page(buf),
bytes, offset); bytes, offset);
if (ret == 0) if (ret == 0) {
return false; printk(KERN_ERR "could not attach integrity payload\n");
kfree(buf);
status = BLK_STS_RESOURCE;
goto err_end_io;
}
if (ret < bytes) if (ret < bytes)
break; break;

View file

@ -198,6 +198,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->internal_tag = -1; rq->internal_tag = -1;
rq->start_time_ns = ktime_get_ns(); rq->start_time_ns = ktime_get_ns();
rq->part = NULL; rq->part = NULL;
refcount_set(&rq->ref, 1);
} }
EXPORT_SYMBOL(blk_rq_init); EXPORT_SYMBOL(blk_rq_init);

View file

@ -3104,7 +3104,7 @@ static void binder_transaction(struct binder_proc *proc,
else else
return_error = BR_DEAD_REPLY; return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock); mutex_unlock(&context->context_mgr_node_lock);
if (target_node && target_proc == proc) { if (target_node && target_proc->pid == proc->pid) {
binder_user_error("%d:%d got transaction to context manager from process owning it\n", binder_user_error("%d:%d got transaction to context manager from process owning it\n",
proc->pid, thread->pid); proc->pid, thread->pid);
return_error = BR_FAILED_REPLY; return_error = BR_FAILED_REPLY;

View file

@ -570,8 +570,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
unsigned long long m; unsigned long long m;
m = hpets->hp_tick_freq + (dis >> 1); m = hpets->hp_tick_freq + (dis >> 1);
do_div(m, dis); return div64_ul(m, dis);
return (unsigned long)m;
} }
static int static int

View file

@ -39,6 +39,7 @@ config ALTERA_PR_IP_CORE_PLAT
config FPGA_MGR_ALTERA_PS_SPI config FPGA_MGR_ALTERA_PS_SPI
tristate "Altera FPGA Passive Serial over SPI" tristate "Altera FPGA Passive Serial over SPI"
depends on SPI depends on SPI
select BITREVERSE
help help
FPGA manager driver support for Altera Arria/Cyclone/Stratix FPGA manager driver support for Altera Arria/Cyclone/Stratix
using the passive serial interface over SPI. using the passive serial interface over SPI.

View file

@ -1037,6 +1037,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
/* After HDP is initialized, flush HDP.*/ /* After HDP is initialized, flush HDP.*/
adev->nbio_funcs->hdp_flush(adev, NULL); adev->nbio_funcs->hdp_flush(adev, NULL);

View file

@ -1268,12 +1268,17 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
return 0; return 0;
} }
static int unmap_sdma_queues(struct device_queue_manager *dqm, static int unmap_sdma_queues(struct device_queue_manager *dqm)
unsigned int sdma_engine)
{ {
return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA, int i, retval = 0;
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
sdma_engine); for (i = 0; i < dqm->dev->device_info->num_sdma_engines; i++) {
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
if (retval)
return retval;
}
return retval;
} }
/* dqm->lock mutex has to be locked before calling this function */ /* dqm->lock mutex has to be locked before calling this function */
@ -1312,10 +1317,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
pr_debug("Before destroying queues, sdma queue count is : %u\n", pr_debug("Before destroying queues, sdma queue count is : %u\n",
dqm->sdma_queue_count); dqm->sdma_queue_count);
if (dqm->sdma_queue_count > 0) { if (dqm->sdma_queue_count > 0)
unmap_sdma_queues(dqm, 0); unmap_sdma_queues(dqm);
unmap_sdma_queues(dqm, 1);
}
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
filter, filter_param, false, 0); filter, filter_param, false, 0);

View file

@ -75,6 +75,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
struct v9_mqd *m; struct v9_mqd *m;
struct kfd_dev *kfd = mm->dev; struct kfd_dev *kfd = mm->dev;
*mqd_mem_obj = NULL;
/* From V9, for CWSR, the control stack is located on the next page /* From V9, for CWSR, the control stack is located on the next page
* boundary after the mqd, we will use the gtt allocation function * boundary after the mqd, we will use the gtt allocation function
* instead of sub-allocation function. * instead of sub-allocation function.
@ -92,8 +93,10 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
} else } else
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
mqd_mem_obj); mqd_mem_obj);
if (retval != 0) if (retval) {
kfree(*mqd_mem_obj);
return -ENOMEM; return -ENOMEM;
}
m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr; m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr;
addr = (*mqd_mem_obj)->gpu_addr; addr = (*mqd_mem_obj)->gpu_addr;

View file

@ -3644,6 +3644,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
{ {
struct amdgpu_device *adev = dm->ddev->dev_private; struct amdgpu_device *adev = dm->ddev->dev_private;
/*
* Some of the properties below require access to state, like bpc.
* Allocate some default initial connector state with our reset helper.
*/
if (aconnector->base.funcs->reset)
aconnector->base.funcs->reset(&aconnector->base);
aconnector->connector_id = link_index; aconnector->connector_id = link_index;
aconnector->dc_link = link; aconnector->dc_link = link;
aconnector->base.interlace_allowed = false; aconnector->base.interlace_allowed = false;
@ -3811,9 +3818,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
&aconnector->base, &aconnector->base,
&amdgpu_dm_connector_helper_funcs); &amdgpu_dm_connector_helper_funcs);
if (aconnector->base.funcs->reset)
aconnector->base.funcs->reset(&aconnector->base);
amdgpu_dm_connector_init_helper( amdgpu_dm_connector_init_helper(
dm, dm,
aconnector, aconnector,

View file

@ -474,6 +474,8 @@ void dce_abm_destroy(struct abm **abm)
{ {
struct dce_abm *abm_dce = TO_DCE_ABM(*abm); struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
abm_dce->base.funcs->set_abm_immediate_disable(*abm);
kfree(abm_dce); kfree(abm_dce);
*abm = NULL; *abm = NULL;
} }

View file

@ -242,6 +242,9 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params,
prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED; prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
switch (plane_state->format) { switch (plane_state->format) {
case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
prescale_params->scale = 0x2082;
break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
prescale_params->scale = 0x2020; prescale_params->scale = 0x2020;

View file

@ -23,6 +23,7 @@
* *
*/ */
#include <linux/delay.h>
#include "dm_services.h" #include "dm_services.h"
#include "core_types.h" #include "core_types.h"
#include "resource.h" #include "resource.h"

View file

@ -261,10 +261,11 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
struct regmap *regmap = sii902x->regmap; struct regmap *regmap = sii902x->regmap;
u8 buf[HDMI_INFOFRAME_SIZE(AVI)]; u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
struct hdmi_avi_infoframe frame; struct hdmi_avi_infoframe frame;
u16 pixel_clock_10kHz = adj->clock / 10;
int ret; int ret;
buf[0] = adj->clock; buf[0] = pixel_clock_10kHz & 0xff;
buf[1] = adj->clock >> 8; buf[1] = pixel_clock_10kHz >> 8;
buf[2] = adj->vrefresh; buf[2] = adj->vrefresh;
buf[3] = 0x00; buf[3] = 0x00;
buf[4] = adj->hdisplay; buf[4] = adj->hdisplay;

View file

@ -1149,6 +1149,13 @@ static int tc_connector_get_modes(struct drm_connector *connector)
struct tc_data *tc = connector_to_tc(connector); struct tc_data *tc = connector_to_tc(connector);
struct edid *edid; struct edid *edid;
unsigned int count; unsigned int count;
int ret;
ret = tc_get_display_props(tc);
if (ret < 0) {
dev_err(tc->dev, "failed to read display props: %d\n", ret);
return 0;
}
if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) { if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
count = tc->panel->funcs->get_modes(tc->panel); count = tc->panel->funcs->get_modes(tc->panel);

View file

@ -379,12 +379,13 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
struct drm_crtc_crc *crc = &crtc->crc; struct drm_crtc_crc *crc = &crtc->crc;
struct drm_crtc_crc_entry *entry; struct drm_crtc_crc_entry *entry;
int head, tail; int head, tail;
unsigned long flags;
spin_lock(&crc->lock); spin_lock_irqsave(&crc->lock, flags);
/* Caller may not have noticed yet that userspace has stopped reading */ /* Caller may not have noticed yet that userspace has stopped reading */
if (!crc->entries) { if (!crc->entries) {
spin_unlock(&crc->lock); spin_unlock_irqrestore(&crc->lock, flags);
return -EINVAL; return -EINVAL;
} }
@ -395,7 +396,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
bool was_overflow = crc->overflow; bool was_overflow = crc->overflow;
crc->overflow = true; crc->overflow = true;
spin_unlock(&crc->lock); spin_unlock_irqrestore(&crc->lock, flags);
if (!was_overflow) if (!was_overflow)
DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n"); DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
@ -411,7 +412,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1); head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
crc->head = head; crc->head = head;
spin_unlock(&crc->lock); spin_unlock_irqrestore(&crc->lock, flags);
wake_up_interruptible(&crc->wq); wake_up_interruptible(&crc->wq);

View file

@ -290,6 +290,8 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector)
* the last one found one as a fallback. * the last one found one as a fallback.
*/ */
fwstr = kstrdup(edid_firmware, GFP_KERNEL); fwstr = kstrdup(edid_firmware, GFP_KERNEL);
if (!fwstr)
return ERR_PTR(-ENOMEM);
edidstr = fwstr; edidstr = fwstr;
while ((edidname = strsep(&edidstr, ","))) { while ((edidname = strsep(&edidstr, ","))) {

View file

@ -1336,16 +1336,24 @@ static int msm_pdev_probe(struct platform_device *pdev)
ret = add_gpu_components(&pdev->dev, &match); ret = add_gpu_components(&pdev->dev, &match);
if (ret) if (ret)
return ret; goto fail;
/* on all devices that I am aware of, iommu's which can map /* on all devices that I am aware of, iommu's which can map
* any address the cpu can see are used: * any address the cpu can see are used:
*/ */
ret = dma_set_mask_and_coherent(&pdev->dev, ~0); ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
if (ret) if (ret)
return ret; goto fail;
return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
if (ret)
goto fail;
return 0;
fail:
of_platform_depopulate(&pdev->dev);
return ret;
} }
static int msm_pdev_remove(struct platform_device *pdev) static int msm_pdev_remove(struct platform_device *pdev)

View file

@ -2803,7 +2803,14 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = desc->format; dsi->format = desc->format;
dsi->lanes = desc->lanes; dsi->lanes = desc->lanes;
return mipi_dsi_attach(dsi); err = mipi_dsi_attach(dsi);
if (err) {
struct panel_simple *panel = dev_get_drvdata(&dsi->dev);
drm_panel_remove(&panel->base);
}
return err;
} }
static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi) static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)

View file

@ -880,7 +880,8 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
struct vop *vop = to_vop(crtc); struct vop *vop = to_vop(crtc);
adjusted_mode->clock = adjusted_mode->clock =
clk_round_rate(vop->dclk, mode->clock * 1000) / 1000; DIV_ROUND_UP(clk_round_rate(vop->dclk, mode->clock * 1000),
1000);
return true; return true;
} }

View file

@ -528,6 +528,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
if (!ret) if (!ret)
return -EBUSY; return -EBUSY;
/* is_valid check must proceed before copy of the cache entry. */
smp_rmb();
ptr = cache_ent->caps_cache; ptr = cache_ent->caps_cache;
copy_exit: copy_exit:

View file

@ -588,6 +588,8 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
cache_ent->id == le32_to_cpu(cmd->capset_id)) { cache_ent->id == le32_to_cpu(cmd->capset_id)) {
memcpy(cache_ent->caps_cache, resp->capset_data, memcpy(cache_ent->caps_cache, resp->capset_data,
cache_ent->size); cache_ent->size);
/* Copy must occur before is_valid is signalled. */
smp_wmb();
atomic_set(&cache_ent->is_valid, 1); atomic_set(&cache_ent->is_valid, 1);
break; break;
} }

View file

@ -423,6 +423,9 @@ static int host1x_device_add(struct host1x *host1x,
of_dma_configure(&device->dev, host1x->dev->of_node, true); of_dma_configure(&device->dev, host1x->dev->of_node, true);
device->dev.dma_parms = &device->dma_parms;
dma_set_max_seg_size(&device->dev, SZ_4M);
err = host1x_device_parse_dt(device, driver); err = host1x_device_parse_dt(device, driver);
if (err < 0) { if (err < 0) {
kfree(device); kfree(device);

View file

@ -24,7 +24,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/reset.h> #include <linux/reset.h>
@ -1782,15 +1781,14 @@ static struct i2c_algorithm stm32f7_i2c_algo = {
static int stm32f7_i2c_probe(struct platform_device *pdev) static int stm32f7_i2c_probe(struct platform_device *pdev)
{ {
struct device_node *np = pdev->dev.of_node;
struct stm32f7_i2c_dev *i2c_dev; struct stm32f7_i2c_dev *i2c_dev;
const struct stm32f7_i2c_setup *setup; const struct stm32f7_i2c_setup *setup;
struct resource *res; struct resource *res;
u32 irq_error, irq_event, clk_rate, rise_time, fall_time; u32 clk_rate, rise_time, fall_time;
struct i2c_adapter *adap; struct i2c_adapter *adap;
struct reset_control *rst; struct reset_control *rst;
dma_addr_t phy_addr; dma_addr_t phy_addr;
int ret; int irq_error, irq_event, ret;
i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL); i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
if (!i2c_dev) if (!i2c_dev)
@ -1802,16 +1800,20 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
return PTR_ERR(i2c_dev->base); return PTR_ERR(i2c_dev->base);
phy_addr = (dma_addr_t)res->start; phy_addr = (dma_addr_t)res->start;
irq_event = irq_of_parse_and_map(np, 0); irq_event = platform_get_irq(pdev, 0);
if (!irq_event) { if (irq_event <= 0) {
dev_err(&pdev->dev, "IRQ event missing or invalid\n"); if (irq_event != -EPROBE_DEFER)
return -EINVAL; dev_err(&pdev->dev, "Failed to get IRQ event: %d\n",
irq_event);
return irq_event ? : -ENOENT;
} }
irq_error = irq_of_parse_and_map(np, 1); irq_error = platform_get_irq(pdev, 1);
if (!irq_error) { if (irq_error <= 0) {
dev_err(&pdev->dev, "IRQ error missing or invalid\n"); if (irq_error != -EPROBE_DEFER)
return -EINVAL; dev_err(&pdev->dev, "Failed to get IRQ error: %d\n",
irq_error);
return irq_error ? : -ENOENT;
} }
i2c_dev->clk = devm_clk_get(&pdev->dev, NULL); i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);

View file

@ -1144,6 +1144,12 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
* So IRQ associated to filter instance 0 is dedicated to the Filter 0. * So IRQ associated to filter instance 0 is dedicated to the Filter 0.
*/ */
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) {
if (irq != -EPROBE_DEFER)
dev_err(dev, "Failed to get IRQ: %d\n", irq);
return irq;
}
ret = devm_request_irq(dev, irq, stm32_dfsdm_irq, ret = devm_request_irq(dev, irq, stm32_dfsdm_irq,
0, pdev->name, adc); 0, pdev->name, adc);
if (ret < 0) { if (ret < 0) {

View file

@ -213,6 +213,8 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev,
} }
priv->dfsdm.phys_base = res->start; priv->dfsdm.phys_base = res->start;
priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res); priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->dfsdm.base))
return PTR_ERR(priv->dfsdm.base);
/* /*
* "dfsdm" clock is mandatory for DFSDM peripheral clocking. * "dfsdm" clock is mandatory for DFSDM peripheral clocking.
@ -222,8 +224,10 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev,
*/ */
priv->clk = devm_clk_get(&pdev->dev, "dfsdm"); priv->clk = devm_clk_get(&pdev->dev, "dfsdm");
if (IS_ERR(priv->clk)) { if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "No stm32_dfsdm_clk clock found\n"); ret = PTR_ERR(priv->clk);
return -EINVAL; if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev, "Failed to get clock (%d)\n", ret);
return ret;
} }
priv->aclk = devm_clk_get(&pdev->dev, "audio"); priv->aclk = devm_clk_get(&pdev->dev, "audio");

View file

@ -806,6 +806,8 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
struct i40iw_qp *iwqp = to_iwqp(ibqp); struct i40iw_qp *iwqp = to_iwqp(ibqp);
struct i40iw_sc_qp *qp = &iwqp->sc_qp; struct i40iw_sc_qp *qp = &iwqp->sc_qp;
attr->qp_state = iwqp->ibqp_state;
attr->cur_qp_state = attr->qp_state;
attr->qp_access_flags = 0; attr->qp_access_flags = 0;
attr->cap.max_send_wr = qp->qp_uk.sq_size; attr->cap.max_send_wr = qp->qp_uk.sq_size;
attr->cap.max_recv_wr = qp->qp_uk.rq_size; attr->cap.max_recv_wr = qp->qp_uk.rq_size;

View file

@ -197,19 +197,33 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
vl_15_dropped); vl_15_dropped);
} }
static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num, static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
const struct ib_mad *in_mad, struct ib_mad *out_mad) const struct ib_mad *in_mad, struct ib_mad *out_mad)
{ {
int err; struct mlx5_core_dev *mdev;
bool native_port = true;
u8 mdev_port_num;
void *out_cnt; void *out_cnt;
int err;
mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
if (!mdev) {
/* Fail to get the native port, likely due to 2nd port is still
* unaffiliated. In such case default to 1st port and attached
* PF device.
*/
native_port = false;
mdev = dev->mdev;
mdev_port_num = 1;
}
/* Declaring support of extended counters */ /* Declaring support of extended counters */
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
struct ib_class_port_info cpi = {}; struct ib_class_port_info cpi = {};
cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
memcpy((out_mad->data + 40), &cpi, sizeof(cpi)); memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
goto done;
} }
if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) { if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
@ -218,11 +232,13 @@ static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
out_cnt = kvzalloc(sz, GFP_KERNEL); out_cnt = kvzalloc(sz, GFP_KERNEL);
if (!out_cnt) if (!out_cnt) {
return IB_MAD_RESULT_FAILURE; err = IB_MAD_RESULT_FAILURE;
goto done;
}
err = mlx5_core_query_vport_counter(mdev, 0, 0, err = mlx5_core_query_vport_counter(mdev, 0, 0,
port_num, out_cnt, sz); mdev_port_num, out_cnt, sz);
if (!err) if (!err)
pma_cnt_ext_assign(pma_cnt_ext, out_cnt); pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
} else { } else {
@ -231,20 +247,23 @@ static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
out_cnt = kvzalloc(sz, GFP_KERNEL); out_cnt = kvzalloc(sz, GFP_KERNEL);
if (!out_cnt) if (!out_cnt) {
return IB_MAD_RESULT_FAILURE; err = IB_MAD_RESULT_FAILURE;
goto done;
}
err = mlx5_core_query_ib_ppcnt(mdev, port_num, err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num,
out_cnt, sz); out_cnt, sz);
if (!err) if (!err)
pma_cnt_assign(pma_cnt, out_cnt); pma_cnt_assign(pma_cnt, out_cnt);
} }
kvfree(out_cnt); kvfree(out_cnt);
if (err) err = err ? IB_MAD_RESULT_FAILURE :
return IB_MAD_RESULT_FAILURE; IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
done:
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; if (native_port)
mlx5_ib_put_native_port_mdev(dev, port_num);
return err;
} }
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@ -256,8 +275,6 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct ib_mad *in_mad = (const struct ib_mad *)in; const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out; struct ib_mad *out_mad = (struct ib_mad *)out;
struct mlx5_core_dev *mdev;
u8 mdev_port_num;
int ret; int ret;
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
@ -266,19 +283,14 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
memset(out_mad->data, 0, sizeof(out_mad->data)); memset(out_mad->data, 0, sizeof(out_mad->data));
mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
if (!mdev)
return IB_MAD_RESULT_FAILURE;
if (MLX5_CAP_GEN(mdev, vport_counters) &&
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) { in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
ret = process_pma_cmd(mdev, mdev_port_num, in_mad, out_mad); ret = process_pma_cmd(dev, port_num, in_mad, out_mad);
} else { } else {
ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
in_mad, out_mad); in_mad, out_mad);
} }
mlx5_ib_put_native_port_mdev(dev, port_num);
return ret; return ret;
} }

View file

@ -435,6 +435,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
qp->resp.va = reth_va(pkt); qp->resp.va = reth_va(pkt);
qp->resp.rkey = reth_rkey(pkt); qp->resp.rkey = reth_rkey(pkt);
qp->resp.resid = reth_len(pkt); qp->resp.resid = reth_len(pkt);
qp->resp.length = reth_len(pkt);
} }
access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
: IB_ACCESS_REMOTE_WRITE; : IB_ACCESS_REMOTE_WRITE;
@ -859,7 +860,9 @@ static enum resp_states do_complete(struct rxe_qp *qp,
pkt->mask & RXE_WRITE_MASK) ? pkt->mask & RXE_WRITE_MASK) ?
IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV; IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
wc->vendor_err = 0; wc->vendor_err = 0;
wc->byte_len = wqe->dma.length - wqe->dma.resid; wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
pkt->mask & RXE_WRITE_MASK) ?
qp->resp.length : wqe->dma.length - wqe->dma.resid;
/* fields after byte_len are different between kernel and user /* fields after byte_len are different between kernel and user
* space * space

View file

@ -212,6 +212,7 @@ struct rxe_resp_info {
struct rxe_mem *mr; struct rxe_mem *mr;
u32 resid; u32 resid;
u32 rkey; u32 rkey;
u32 length;
u64 atomic_orig; u64 atomic_orig;
/* SRQ only */ /* SRQ only */

View file

@ -1892,12 +1892,6 @@ static void ipoib_child_init(struct net_device *ndev)
struct ipoib_dev_priv *priv = ipoib_priv(ndev); struct ipoib_dev_priv *priv = ipoib_priv(ndev);
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent); struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
dev_hold(priv->parent);
down_write(&ppriv->vlan_rwsem);
list_add_tail(&priv->list, &ppriv->child_intfs);
up_write(&ppriv->vlan_rwsem);
priv->max_ib_mtu = ppriv->max_ib_mtu; priv->max_ib_mtu = ppriv->max_ib_mtu;
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN); memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
@ -1940,6 +1934,17 @@ static int ipoib_ndo_init(struct net_device *ndev)
if (rc) { if (rc) {
pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n", pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
priv->ca->name, priv->dev->name, priv->port, rc); priv->ca->name, priv->dev->name, priv->port, rc);
return rc;
}
if (priv->parent) {
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
dev_hold(priv->parent);
down_write(&ppriv->vlan_rwsem);
list_add_tail(&priv->list, &ppriv->child_intfs);
up_write(&ppriv->vlan_rwsem);
} }
return 0; return 0;
@ -1957,6 +1962,14 @@ static void ipoib_ndo_uninit(struct net_device *dev)
*/ */
WARN_ON(!list_empty(&priv->child_intfs)); WARN_ON(!list_empty(&priv->child_intfs));
if (priv->parent) {
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
down_write(&ppriv->vlan_rwsem);
list_del(&priv->list);
up_write(&ppriv->vlan_rwsem);
}
ipoib_neigh_hash_uninit(dev); ipoib_neigh_hash_uninit(dev);
ipoib_ib_dev_cleanup(dev); ipoib_ib_dev_cleanup(dev);
@ -1968,15 +1981,8 @@ static void ipoib_ndo_uninit(struct net_device *dev)
priv->wq = NULL; priv->wq = NULL;
} }
if (priv->parent) { if (priv->parent)
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
down_write(&ppriv->vlan_rwsem);
list_del(&priv->list);
up_write(&ppriv->vlan_rwsem);
dev_put(priv->parent); dev_put(priv->parent);
}
} }
static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state) static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)

View file

@ -409,11 +409,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
of_property_for_each_string(np, "mbox-names", prop, mbox_name) { of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
if (!strncmp(name, mbox_name, strlen(name))) if (!strncmp(name, mbox_name, strlen(name)))
break; return mbox_request_channel(cl, index);
index++; index++;
} }
return mbox_request_channel(cl, index); dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
__func__, name);
return ERR_PTR(-EINVAL);
} }
EXPORT_SYMBOL_GPL(mbox_request_channel_byname); EXPORT_SYMBOL_GPL(mbox_request_channel_byname);

View file

@ -629,13 +629,18 @@ static int __init memstick_init(void)
return -ENOMEM; return -ENOMEM;
rc = bus_register(&memstick_bus_type); rc = bus_register(&memstick_bus_type);
if (!rc) if (rc)
rc = class_register(&memstick_host_class); goto error_destroy_workqueue;
if (!rc) rc = class_register(&memstick_host_class);
return 0; if (rc)
goto error_bus_unregister;
return 0;
error_bus_unregister:
bus_unregister(&memstick_bus_type); bus_unregister(&memstick_bus_type);
error_destroy_workqueue:
destroy_workqueue(workqueue); destroy_workqueue(workqueue);
return rc; return rc;

View file

@ -996,7 +996,7 @@ int arizona_dev_init(struct arizona *arizona)
unsigned int reg, val; unsigned int reg, val;
int (*apply_patch)(struct arizona *) = NULL; int (*apply_patch)(struct arizona *) = NULL;
const struct mfd_cell *subdevs = NULL; const struct mfd_cell *subdevs = NULL;
int n_subdevs, ret, i; int n_subdevs = 0, ret, i;
dev_set_drvdata(arizona->dev, arizona); dev_set_drvdata(arizona->dev, arizona);
mutex_init(&arizona->clk_lock); mutex_init(&arizona->clk_lock);

View file

@ -112,6 +112,8 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
pmic->regmap = devm_regmap_init_mmio_clk(dev, NULL, base, pmic->regmap = devm_regmap_init_mmio_clk(dev, NULL, base,
&hi655x_regmap_config); &hi655x_regmap_config);
if (IS_ERR(pmic->regmap))
return PTR_ERR(pmic->regmap);
regmap_read(pmic->regmap, HI655X_BUS_ADDR(HI655X_VER_REG), &pmic->ver); regmap_read(pmic->regmap, HI655X_BUS_ADDR(HI655X_VER_REG), &pmic->ver);
if ((pmic->ver < PMU_VER_START) || (pmic->ver > PMU_VER_END)) { if ((pmic->ver < PMU_VER_START) || (pmic->ver > PMU_VER_END)) {

View file

@ -278,6 +278,7 @@ const struct of_device_id madera_of_match[] = {
{ .compatible = "cirrus,wm1840", .data = (void *)WM1840 }, { .compatible = "cirrus,wm1840", .data = (void *)WM1840 },
{} {}
}; };
MODULE_DEVICE_TABLE(of, madera_of_match);
EXPORT_SYMBOL_GPL(madera_of_match); EXPORT_SYMBOL_GPL(madera_of_match);
static int madera_get_reset_gpio(struct madera *madera) static int madera_get_reset_gpio(struct madera *madera)

View file

@ -179,6 +179,7 @@ static int mfd_add_device(struct device *parent, int id,
for_each_child_of_node(parent->of_node, np) { for_each_child_of_node(parent->of_node, np) {
if (of_device_is_compatible(np, cell->of_compatible)) { if (of_device_is_compatible(np, cell->of_compatible)) {
pdev->dev.of_node = np; pdev->dev.of_node = np;
pdev->dev.fwnode = &np->fwnode;
break; break;
} }
} }

View file

@ -141,6 +141,9 @@
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
/* /*
* MEI HW Section * MEI HW Section
*/ */

View file

@ -107,6 +107,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
/* required last entry */ /* required last entry */
{0, } {0, }
}; };

View file

@ -290,11 +290,21 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{ {
struct sdhci_pci_chip *chip; struct sdhci_pci_chip *chip;
struct sdhci_host *host; struct sdhci_host *host;
u32 reg; u32 reg, caps;
int ret; int ret;
chip = slot->chip; chip = slot->chip;
host = slot->host; host = slot->host;
caps = sdhci_readl(host, SDHCI_CAPABILITIES);
/*
* mmc_select_bus_width() will test the bus to determine the actual bus
* width.
*/
if (caps & SDHCI_CAN_DO_8BIT)
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
switch (chip->pdev->device) { switch (chip->pdev->device) {
case PCI_DEVICE_ID_O2_SDS0: case PCI_DEVICE_ID_O2_SDS0:
case PCI_DEVICE_ID_O2_SEABIRD0: case PCI_DEVICE_ID_O2_SEABIRD0:

View file

@ -1065,14 +1065,12 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
} }
} }
static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err,
struct cudbg_error *cudbg_err, u8 mem_type)
u8 mem_type)
{ {
struct adapter *padap = pdbg_init->adap; struct adapter *padap = pdbg_init->adap;
struct cudbg_meminfo mem_info; struct cudbg_meminfo mem_info;
unsigned long size;
u8 mc_idx; u8 mc_idx;
int rc; int rc;
@ -1086,7 +1084,16 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
if (rc) if (rc)
return rc; return rc;
size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base; return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
}
static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err,
u8 mem_type)
{
unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type);
return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size, return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
cudbg_err); cudbg_err);
} }

View file

@ -86,7 +86,7 @@ static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
{ {
nvdimm_bus_lock(&nvdimm_bus->dev); nvdimm_bus_lock(&nvdimm_bus->dev);
if (--nvdimm_bus->probe_active == 0) if (--nvdimm_bus->probe_active == 0)
wake_up(&nvdimm_bus->probe_wait); wake_up(&nvdimm_bus->wait);
nvdimm_bus_unlock(&nvdimm_bus->dev); nvdimm_bus_unlock(&nvdimm_bus->dev);
} }
@ -348,7 +348,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
return NULL; return NULL;
INIT_LIST_HEAD(&nvdimm_bus->list); INIT_LIST_HEAD(&nvdimm_bus->list);
INIT_LIST_HEAD(&nvdimm_bus->mapping_list); INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
init_waitqueue_head(&nvdimm_bus->probe_wait); init_waitqueue_head(&nvdimm_bus->wait);
nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL); nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
mutex_init(&nvdimm_bus->reconfig_mutex); mutex_init(&nvdimm_bus->reconfig_mutex);
badrange_init(&nvdimm_bus->badrange); badrange_init(&nvdimm_bus->badrange);
@ -418,6 +418,9 @@ static int nd_bus_remove(struct device *dev)
list_del_init(&nvdimm_bus->list); list_del_init(&nvdimm_bus->list);
mutex_unlock(&nvdimm_bus_list_mutex); mutex_unlock(&nvdimm_bus_list_mutex);
wait_event(nvdimm_bus->wait,
atomic_read(&nvdimm_bus->ioctl_active) == 0);
nd_synchronize(); nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister); device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
@ -838,7 +841,7 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
if (nvdimm_bus->probe_active == 0) if (nvdimm_bus->probe_active == 0)
break; break;
nvdimm_bus_unlock(&nvdimm_bus->dev); nvdimm_bus_unlock(&nvdimm_bus->dev);
wait_event(nvdimm_bus->probe_wait, wait_event(nvdimm_bus->wait,
nvdimm_bus->probe_active == 0); nvdimm_bus->probe_active == 0);
nvdimm_bus_lock(&nvdimm_bus->dev); nvdimm_bus_lock(&nvdimm_bus->dev);
} while (true); } while (true);
@ -1068,24 +1071,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
return rc; return rc;
} }
static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) enum nd_ioctl_mode {
{ BUS_IOCTL,
long id = (long) file->private_data; DIMM_IOCTL,
int rc = -ENXIO, ro; };
struct nvdimm_bus *nvdimm_bus;
ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
mutex_lock(&nvdimm_bus_list_mutex);
list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
if (nvdimm_bus->id == id) {
rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg);
break;
}
}
mutex_unlock(&nvdimm_bus_list_mutex);
return rc;
}
static int match_dimm(struct device *dev, void *data) static int match_dimm(struct device *dev, void *data)
{ {
@ -1100,31 +1089,62 @@ static int match_dimm(struct device *dev, void *data)
return 0; return 0;
} }
static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
enum nd_ioctl_mode mode)
{ {
int rc = -ENXIO, ro; struct nvdimm_bus *nvdimm_bus, *found = NULL;
struct nvdimm_bus *nvdimm_bus; long id = (long) file->private_data;
struct nvdimm *nvdimm = NULL;
int rc, ro;
ro = ((file->f_flags & O_ACCMODE) == O_RDONLY); ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
mutex_lock(&nvdimm_bus_list_mutex); mutex_lock(&nvdimm_bus_list_mutex);
list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) { list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
struct device *dev = device_find_child(&nvdimm_bus->dev, if (mode == DIMM_IOCTL) {
file->private_data, match_dimm); struct device *dev;
struct nvdimm *nvdimm;
if (!dev) dev = device_find_child(&nvdimm_bus->dev,
continue; file->private_data, match_dimm);
if (!dev)
continue;
nvdimm = to_nvdimm(dev);
found = nvdimm_bus;
} else if (nvdimm_bus->id == id) {
found = nvdimm_bus;
}
nvdimm = to_nvdimm(dev); if (found) {
rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg); atomic_inc(&nvdimm_bus->ioctl_active);
put_device(dev); break;
break; }
} }
mutex_unlock(&nvdimm_bus_list_mutex); mutex_unlock(&nvdimm_bus_list_mutex);
if (!found)
return -ENXIO;
nvdimm_bus = found;
rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
if (nvdimm)
put_device(&nvdimm->dev);
if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
wake_up(&nvdimm_bus->wait);
return rc; return rc;
} }
static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return nd_ioctl(file, cmd, arg, BUS_IOCTL);
}
static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
return nd_ioctl(file, cmd, arg, DIMM_IOCTL);
}
static int nd_open(struct inode *inode, struct file *file) static int nd_open(struct inode *inode, struct file *file)
{ {
long minor = iminor(inode); long minor = iminor(inode);
@ -1136,16 +1156,16 @@ static int nd_open(struct inode *inode, struct file *file)
static const struct file_operations nvdimm_bus_fops = { static const struct file_operations nvdimm_bus_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = nd_open, .open = nd_open,
.unlocked_ioctl = nd_ioctl, .unlocked_ioctl = bus_ioctl,
.compat_ioctl = nd_ioctl, .compat_ioctl = bus_ioctl,
.llseek = noop_llseek, .llseek = noop_llseek,
}; };
static const struct file_operations nvdimm_fops = { static const struct file_operations nvdimm_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = nd_open, .open = nd_open,
.unlocked_ioctl = nvdimm_ioctl, .unlocked_ioctl = dimm_ioctl,
.compat_ioctl = nvdimm_ioctl, .compat_ioctl = dimm_ioctl,
.llseek = noop_llseek, .llseek = noop_llseek,
}; };

View file

@ -25,10 +25,11 @@ extern int nvdimm_major;
struct nvdimm_bus { struct nvdimm_bus {
struct nvdimm_bus_descriptor *nd_desc; struct nvdimm_bus_descriptor *nd_desc;
wait_queue_head_t probe_wait; wait_queue_head_t wait;
struct list_head list; struct list_head list;
struct device dev; struct device dev;
int id, probe_active; int id, probe_active;
atomic_t ioctl_active;
struct list_head mapping_list; struct list_head mapping_list;
struct mutex reconfig_mutex; struct mutex reconfig_mutex;
struct badrange badrange; struct badrange badrange;

View file

@ -26,6 +26,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/mfd/syscon.h> #include <linux/mfd/syscon.h>
#include <linux/regmap.h> #include <linux/regmap.h>
#include <linux/gpio/consumer.h>
#include "../../pci.h" #include "../../pci.h"
#include "pcie-designware.h" #include "pcie-designware.h"

View file

@ -508,6 +508,12 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
return err; return err;
} }
/* setup bus numbers */
value = csr_readl(pcie, PCI_PRIMARY_BUS);
value &= 0xff000000;
value |= 0x00ff0100;
csr_writel(pcie, value, PCI_PRIMARY_BUS);
/* /*
* program Bus Master Enable Bit in Command Register in PAB Config * program Bus Master Enable Bit in Command Register in PAB Config
* Space * Space
@ -547,7 +553,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
resource_size(pcie->ob_io_res)); resource_size(pcie->ob_io_res));
/* memory inbound translation window */ /* memory inbound translation window */
program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); program_ib_windows(pcie, WIN_NUM_0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
/* Get the I/O and memory ranges from DT */ /* Get the I/O and memory ranges from DT */
resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
@ -559,11 +565,18 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
if (type) { if (type) {
/* configure outbound translation window */ /* configure outbound translation window */
program_ob_windows(pcie, pcie->ob_wins_configured, program_ob_windows(pcie, pcie->ob_wins_configured,
win->res->start, 0, type, win->res->start,
resource_size(win->res)); win->res->start - win->offset,
type, resource_size(win->res));
} }
} }
/* fixup for PCIe class register */
value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
value &= 0xff;
value |= (PCI_CLASS_BRIDGE_PCI << 16);
csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
/* setup MSI hardware registers */ /* setup MSI hardware registers */
mobiveil_pcie_enable_msi(pcie); mobiveil_pcie_enable_msi(pcie);
@ -804,9 +817,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
goto error; goto error;
} }
/* fixup for PCIe class register */
csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS);
/* initialize the IRQ domains */ /* initialize the IRQ domains */
ret = mobiveil_pcie_init_irq_domain(pcie); ret = mobiveil_pcie_init_irq_domain(pcie);
if (ret) { if (ret) {

View file

@ -483,15 +483,13 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
int i; int i;
mutex_lock(&msi->lock); mutex_lock(&msi->lock);
bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0, bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR,
nr_irqs, 0); get_count_order(nr_irqs));
if (bit >= INT_PCI_MSI_NR) { if (bit < 0) {
mutex_unlock(&msi->lock); mutex_unlock(&msi->lock);
return -ENOSPC; return -ENOSPC;
} }
bitmap_set(msi->bitmap, bit, nr_irqs);
for (i = 0; i < nr_irqs; i++) { for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip, irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
domain->host_data, handle_simple_irq, domain->host_data, handle_simple_irq,
@ -509,7 +507,8 @@ static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
struct nwl_msi *msi = &pcie->msi; struct nwl_msi *msi = &pcie->msi;
mutex_lock(&msi->lock); mutex_lock(&msi->lock);
bitmap_clear(msi->bitmap, data->hwirq, nr_irqs); bitmap_release_region(msi->bitmap, data->hwirq,
get_count_order(nr_irqs));
mutex_unlock(&msi->lock); mutex_unlock(&msi->lock);
} }

View file

@ -414,6 +414,9 @@ static int pci_device_probe(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = to_pci_driver(dev->driver); struct pci_driver *drv = to_pci_driver(dev->driver);
if (!pci_device_can_probe(pci_dev))
return -ENODEV;
pci_assign_irq(pci_dev); pci_assign_irq(pci_dev);
error = pcibios_alloc_irq(pci_dev); error = pcibios_alloc_irq(pci_dev);
@ -421,12 +424,10 @@ static int pci_device_probe(struct device *dev)
return error; return error;
pci_dev_get(pci_dev); pci_dev_get(pci_dev);
if (pci_device_can_probe(pci_dev)) { error = __pci_device_probe(drv, pci_dev);
error = __pci_device_probe(drv, pci_dev); if (error) {
if (error) { pcibios_free_irq(pci_dev);
pcibios_free_irq(pci_dev); pci_dev_put(pci_dev);
pci_dev_put(pci_dev);
}
} }
return error; return error;

View file

@ -478,7 +478,7 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
return count; return count;
} }
static struct device_attribute dev_remove_attr = __ATTR(remove, static struct device_attribute dev_remove_attr = __ATTR_IGNORE_LOCKDEP(remove,
(S_IWUSR|S_IWGRP), (S_IWUSR|S_IWGRP),
NULL, remove_store); NULL, remove_store);

View file

@ -288,6 +288,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
error = of_property_read_u32(np, "reg", &channel_num); error = of_property_read_u32(np, "reg", &channel_num);
if (error || channel_num > 2) { if (error || channel_num > 2) {
dev_err(dev, "Invalid \"reg\" property\n"); dev_err(dev, "Invalid \"reg\" property\n");
of_node_put(np);
return error; return error;
} }
channel->select_mask = select_mask[channel_num]; channel->select_mask = select_mask[channel_num];
@ -303,6 +304,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
&rcar_gen2_phy_ops); &rcar_gen2_phy_ops);
if (IS_ERR(phy->phy)) { if (IS_ERR(phy->phy)) {
dev_err(dev, "Failed to create PHY\n"); dev_err(dev, "Failed to create PHY\n");
of_node_put(np);
return PTR_ERR(phy->phy); return PTR_ERR(phy->phy);
} }
phy_set_drvdata(phy->phy, phy); phy_set_drvdata(phy->phy, phy);

View file

@ -3172,6 +3172,7 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
base, base,
&rockchip_regmap_config); &rockchip_regmap_config);
} }
of_node_put(node);
} }
bank->irq = irq_of_parse_and_map(bank->of_node, 0); bank->irq = irq_of_parse_and_map(bank->of_node, 0);

View file

@ -402,16 +402,19 @@ static void vnt_free_int_bufs(struct vnt_private *priv)
kfree(priv->int_buf.data_buf); kfree(priv->int_buf.data_buf);
} }
static bool vnt_alloc_bufs(struct vnt_private *priv) static int vnt_alloc_bufs(struct vnt_private *priv)
{ {
int ret = 0;
struct vnt_usb_send_context *tx_context; struct vnt_usb_send_context *tx_context;
struct vnt_rcb *rcb; struct vnt_rcb *rcb;
int ii; int ii;
for (ii = 0; ii < priv->num_tx_context; ii++) { for (ii = 0; ii < priv->num_tx_context; ii++) {
tx_context = kmalloc(sizeof(*tx_context), GFP_KERNEL); tx_context = kmalloc(sizeof(*tx_context), GFP_KERNEL);
if (!tx_context) if (!tx_context) {
ret = -ENOMEM;
goto free_tx; goto free_tx;
}
priv->tx_context[ii] = tx_context; priv->tx_context[ii] = tx_context;
tx_context->priv = priv; tx_context->priv = priv;
@ -419,16 +422,20 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
/* allocate URBs */ /* allocate URBs */
tx_context->urb = usb_alloc_urb(0, GFP_KERNEL); tx_context->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!tx_context->urb) if (!tx_context->urb) {
ret = -ENOMEM;
goto free_tx; goto free_tx;
}
tx_context->in_use = false; tx_context->in_use = false;
} }
for (ii = 0; ii < priv->num_rcb; ii++) { for (ii = 0; ii < priv->num_rcb; ii++) {
priv->rcb[ii] = kzalloc(sizeof(*priv->rcb[ii]), GFP_KERNEL); priv->rcb[ii] = kzalloc(sizeof(*priv->rcb[ii]), GFP_KERNEL);
if (!priv->rcb[ii]) if (!priv->rcb[ii]) {
ret = -ENOMEM;
goto free_rx_tx; goto free_rx_tx;
}
rcb = priv->rcb[ii]; rcb = priv->rcb[ii];
@ -436,39 +443,46 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
/* allocate URBs */ /* allocate URBs */
rcb->urb = usb_alloc_urb(0, GFP_KERNEL); rcb->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!rcb->urb) if (!rcb->urb) {
ret = -ENOMEM;
goto free_rx_tx; goto free_rx_tx;
}
rcb->skb = dev_alloc_skb(priv->rx_buf_sz); rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
if (!rcb->skb) if (!rcb->skb) {
ret = -ENOMEM;
goto free_rx_tx; goto free_rx_tx;
}
rcb->in_use = false; rcb->in_use = false;
/* submit rx urb */ /* submit rx urb */
if (vnt_submit_rx_urb(priv, rcb)) ret = vnt_submit_rx_urb(priv, rcb);
if (ret)
goto free_rx_tx; goto free_rx_tx;
} }
priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL); priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!priv->interrupt_urb) if (!priv->interrupt_urb) {
goto free_rx_tx; ret = -ENOMEM;
priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
if (!priv->int_buf.data_buf) {
usb_free_urb(priv->interrupt_urb);
goto free_rx_tx; goto free_rx_tx;
} }
return true; priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
if (!priv->int_buf.data_buf) {
ret = -ENOMEM;
goto free_rx_tx_urb;
}
return 0;
free_rx_tx_urb:
usb_free_urb(priv->interrupt_urb);
free_rx_tx: free_rx_tx:
vnt_free_rx_bufs(priv); vnt_free_rx_bufs(priv);
free_tx: free_tx:
vnt_free_tx_bufs(priv); vnt_free_tx_bufs(priv);
return ret;
return false;
} }
static void vnt_tx_80211(struct ieee80211_hw *hw, static void vnt_tx_80211(struct ieee80211_hw *hw,

View file

@ -1874,7 +1874,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
status = serial8250_rx_chars(up, status); status = serial8250_rx_chars(up, status);
} }
serial8250_modem_status(up); serial8250_modem_status(up);
if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE)) if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE) &&
(up->ier & UART_IER_THRI))
serial8250_tx_chars(up); serial8250_tx_chars(up);
spin_unlock_irqrestore(&port->lock, flags); spin_unlock_irqrestore(&port->lock, flags);

View file

@ -407,7 +407,16 @@ static int cpm_uart_startup(struct uart_port *port)
clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX); clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX);
} }
cpm_uart_initbd(pinfo); cpm_uart_initbd(pinfo);
cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX); if (IS_SMC(pinfo)) {
out_be32(&pinfo->smcup->smc_rstate, 0);
out_be32(&pinfo->smcup->smc_tstate, 0);
out_be16(&pinfo->smcup->smc_rbptr,
in_be16(&pinfo->smcup->smc_rbase));
out_be16(&pinfo->smcup->smc_tbptr,
in_be16(&pinfo->smcup->smc_tbase));
} else {
cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
}
} }
/* Install interrupt handler. */ /* Install interrupt handler. */
retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port); retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port);
@ -861,16 +870,14 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
(u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE); (u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE);
/* /*
* In case SMC1 is being relocated... * In case SMC is being relocated...
*/ */
#if defined (CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
out_be16(&up->smc_rbptr, in_be16(&pinfo->smcup->smc_rbase)); out_be16(&up->smc_rbptr, in_be16(&pinfo->smcup->smc_rbase));
out_be16(&up->smc_tbptr, in_be16(&pinfo->smcup->smc_tbase)); out_be16(&up->smc_tbptr, in_be16(&pinfo->smcup->smc_tbase));
out_be32(&up->smc_rstate, 0); out_be32(&up->smc_rstate, 0);
out_be32(&up->smc_tstate, 0); out_be32(&up->smc_tstate, 0);
out_be16(&up->smc_brkcr, 1); /* number of break chars */ out_be16(&up->smc_brkcr, 1); /* number of break chars */
out_be16(&up->smc_brkec, 0); out_be16(&up->smc_brkec, 0);
#endif
/* Set up the uart parameters in the /* Set up the uart parameters in the
* parameter ram. * parameter ram.
@ -884,8 +891,6 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
out_be16(&up->smc_brkec, 0); out_be16(&up->smc_brkec, 0);
out_be16(&up->smc_brkcr, 1); out_be16(&up->smc_brkcr, 1);
cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
/* Set UART mode, 8 bit, no parity, one stop. /* Set UART mode, 8 bit, no parity, one stop.
* Enable receive and transmit. * Enable receive and transmit.
*/ */

View file

@ -541,7 +541,11 @@ static int __init digicolor_uart_init(void)
if (ret) if (ret)
return ret; return ret;
return platform_driver_register(&digicolor_uart_platform); ret = platform_driver_register(&digicolor_uart_platform);
if (ret)
uart_unregister_driver(&digicolor_uart);
return ret;
} }
module_init(digicolor_uart_init); module_init(digicolor_uart_init);

View file

@ -382,6 +382,7 @@ static void imx_uart_ucrs_restore(struct imx_port *sport,
} }
#endif #endif
/* called with port.lock taken and irqs caller dependent */
static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2) static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2)
{ {
*ucr2 &= ~(UCR2_CTSC | UCR2_CTS); *ucr2 &= ~(UCR2_CTSC | UCR2_CTS);
@ -390,6 +391,7 @@ static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2)
mctrl_gpio_set(sport->gpios, sport->port.mctrl); mctrl_gpio_set(sport->gpios, sport->port.mctrl);
} }
/* called with port.lock taken and irqs caller dependent */
static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2) static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2)
{ {
*ucr2 &= ~UCR2_CTSC; *ucr2 &= ~UCR2_CTSC;
@ -399,6 +401,7 @@ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2)
mctrl_gpio_set(sport->gpios, sport->port.mctrl); mctrl_gpio_set(sport->gpios, sport->port.mctrl);
} }
/* called with port.lock taken and irqs caller dependent */
static void imx_uart_rts_auto(struct imx_port *sport, u32 *ucr2) static void imx_uart_rts_auto(struct imx_port *sport, u32 *ucr2)
{ {
*ucr2 |= UCR2_CTSC; *ucr2 |= UCR2_CTSC;
@ -1554,6 +1557,16 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
old_csize = CS8; old_csize = CS8;
} }
del_timer_sync(&sport->timer);
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
quot = uart_get_divisor(port, baud);
spin_lock_irqsave(&sport->port.lock, flags);
if ((termios->c_cflag & CSIZE) == CS8) if ((termios->c_cflag & CSIZE) == CS8)
ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS; ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
else else
@ -1597,16 +1610,6 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
ucr2 |= UCR2_PROE; ucr2 |= UCR2_PROE;
} }
del_timer_sync(&sport->timer);
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
quot = uart_get_divisor(port, baud);
spin_lock_irqsave(&sport->port.lock, flags);
sport->port.read_status_mask = 0; sport->port.read_status_mask = 0;
if (termios->c_iflag & INPCK) if (termios->c_iflag & INPCK)
sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR); sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);

View file

@ -491,37 +491,48 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
static int max310x_set_baud(struct uart_port *port, int baud) static int max310x_set_baud(struct uart_port *port, int baud)
{ {
unsigned int mode = 0, clk = port->uartclk, div = clk / baud; unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0;
/* Check for minimal value for divider */ /*
if (div < 16) * Calculate the integer divisor first. Select a proper mode
div = 16; * in case if the requested baud is too high for the pre-defined
* clocks frequency.
if (clk % baud && (div / 16) < 0x8000) { */
div = port->uartclk / baud;
if (div < 8) {
/* Mode x4 */
c = 4;
mode = MAX310X_BRGCFG_4XMODE_BIT;
} else if (div < 16) {
/* Mode x2 */ /* Mode x2 */
c = 8;
mode = MAX310X_BRGCFG_2XMODE_BIT; mode = MAX310X_BRGCFG_2XMODE_BIT;
clk = port->uartclk * 2; } else {
div = clk / baud; c = 16;
if (clk % baud && (div / 16) < 0x8000) {
/* Mode x4 */
mode = MAX310X_BRGCFG_4XMODE_BIT;
clk = port->uartclk * 4;
div = clk / baud;
}
} }
max310x_port_write(port, MAX310X_BRGDIVMSB_REG, (div / 16) >> 8); /* Calculate the divisor in accordance with the fraction coefficient */
max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div / 16); div /= c;
max310x_port_write(port, MAX310X_BRGCFG_REG, (div % 16) | mode); F = c*baud;
return DIV_ROUND_CLOSEST(clk, div); /* Calculate the baud rate fraction */
if (div > 0)
frac = (16*(port->uartclk % F)) / F;
else
div = 1;
max310x_port_write(port, MAX310X_BRGDIVMSB_REG, div >> 8);
max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div);
max310x_port_write(port, MAX310X_BRGCFG_REG, frac | mode);
/* Return the actual baud rate we just programmed */
return (16*port->uartclk) / (c*(16*div + frac));
} }
static int max310x_update_best_err(unsigned long f, long *besterr) static int max310x_update_best_err(unsigned long f, long *besterr)
{ {
/* Use baudrate 115200 for calculate error */ /* Use baudrate 115200 for calculate error */
long err = f % (115200 * 16); long err = f % (460800 * 16);
if ((*besterr < 0) || (*besterr > err)) { if ((*besterr < 0) || (*besterr > err)) {
*besterr = err; *besterr = err;

View file

@ -383,10 +383,14 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
static inline void msm_wait_for_xmitr(struct uart_port *port) static inline void msm_wait_for_xmitr(struct uart_port *port)
{ {
unsigned int timeout = 500000;
while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) { while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
if (msm_read(port, UART_ISR) & UART_ISR_TX_READY) if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
break; break;
udelay(1); udelay(1);
if (!timeout--)
break;
} }
msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR); msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
} }

View file

@ -1738,6 +1738,7 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
{ {
struct uart_state *state = container_of(port, struct uart_state, port); struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport; struct uart_port *uport;
int ret;
uport = uart_port_check(state); uport = uart_port_check(state);
if (!uport || uport->flags & UPF_DEAD) if (!uport || uport->flags & UPF_DEAD)
@ -1748,7 +1749,11 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
/* /*
* Start up the serial port. * Start up the serial port.
*/ */
return uart_startup(tty, state, 0); ret = uart_startup(tty, state, 0);
if (ret > 0)
tty_port_set_active(port, 1);
return ret;
} }
static const char *uart_type(struct uart_port *port) static const char *uart_type(struct uart_port *port)

View file

@ -12,6 +12,7 @@
#include <linux/termios.h> #include <linux/termios.h>
#include <linux/serial_core.h> #include <linux/serial_core.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/property.h>
#include "serial_mctrl_gpio.h" #include "serial_mctrl_gpio.h"
@ -115,6 +116,19 @@ struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx)
for (i = 0; i < UART_GPIO_MAX; i++) { for (i = 0; i < UART_GPIO_MAX; i++) {
enum gpiod_flags flags; enum gpiod_flags flags;
char *gpio_str;
bool present;
/* Check if GPIO property exists and continue if not */
gpio_str = kasprintf(GFP_KERNEL, "%s-gpios",
mctrl_gpios_desc[i].name);
if (!gpio_str)
continue;
present = device_property_present(dev, gpio_str);
kfree(gpio_str);
if (!present)
continue;
if (mctrl_gpios_desc[i].dir_out) if (mctrl_gpios_desc[i].dir_out)
flags = GPIOD_OUT_LOW; flags = GPIOD_OUT_LOW;

View file

@ -1376,6 +1376,7 @@ static void work_fn_tx(struct work_struct *work)
struct circ_buf *xmit = &port->state->xmit; struct circ_buf *xmit = &port->state->xmit;
unsigned long flags; unsigned long flags;
dma_addr_t buf; dma_addr_t buf;
int head, tail;
/* /*
* DMA is idle now. * DMA is idle now.
@ -1385,16 +1386,23 @@ static void work_fn_tx(struct work_struct *work)
* consistent xmit buffer state. * consistent xmit buffer state.
*/ */
spin_lock_irq(&port->lock); spin_lock_irq(&port->lock);
buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1)); head = xmit->head;
tail = xmit->tail;
buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1));
s->tx_dma_len = min_t(unsigned int, s->tx_dma_len = min_t(unsigned int,
CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), CIRC_CNT(head, tail, UART_XMIT_SIZE),
CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE));
spin_unlock_irq(&port->lock); if (!s->tx_dma_len) {
/* Transmit buffer has been flushed */
spin_unlock_irq(&port->lock);
return;
}
desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len, desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
DMA_MEM_TO_DEV, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK); DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) { if (!desc) {
spin_unlock_irq(&port->lock);
dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n"); dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
goto switch_to_pio; goto switch_to_pio;
} }
@ -1402,18 +1410,18 @@ static void work_fn_tx(struct work_struct *work)
dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len, dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
spin_lock_irq(&port->lock);
desc->callback = sci_dma_tx_complete; desc->callback = sci_dma_tx_complete;
desc->callback_param = s; desc->callback_param = s;
spin_unlock_irq(&port->lock);
s->cookie_tx = dmaengine_submit(desc); s->cookie_tx = dmaengine_submit(desc);
if (dma_submit_error(s->cookie_tx)) { if (dma_submit_error(s->cookie_tx)) {
spin_unlock_irq(&port->lock);
dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
goto switch_to_pio; goto switch_to_pio;
} }
spin_unlock_irq(&port->lock);
dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
__func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx); __func__, xmit->buf, tail, head, s->cookie_tx);
dma_async_issue_pending(chan); dma_async_issue_pending(chan);
return; return;
@ -1633,11 +1641,18 @@ static void sci_free_dma(struct uart_port *port)
static void sci_flush_buffer(struct uart_port *port) static void sci_flush_buffer(struct uart_port *port)
{ {
struct sci_port *s = to_sci_port(port);
/* /*
* In uart_flush_buffer(), the xmit circular buffer has just been * In uart_flush_buffer(), the xmit circular buffer has just been
* cleared, so we have to reset tx_dma_len accordingly. * cleared, so we have to reset tx_dma_len accordingly, and stop any
* pending transfers
*/ */
to_sci_port(port)->tx_dma_len = 0; s->tx_dma_len = 0;
if (s->chan_tx) {
dmaengine_terminate_async(s->chan_tx);
s->cookie_tx = -EINVAL;
}
} }
#else /* !CONFIG_SERIAL_SH_SCI_DMA */ #else /* !CONFIG_SERIAL_SH_SCI_DMA */
static inline void sci_request_dma(struct uart_port *port) static inline void sci_request_dma(struct uart_port *port)

View file

@ -3975,6 +3975,9 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
* control transfers to set the hub timeout or enable device-initiated U1/U2 * control transfers to set the hub timeout or enable device-initiated U1/U2
* will be successful. * will be successful.
* *
* If the control transfer to enable device-initiated U1/U2 entry fails, then
* hub-initiated U1/U2 will be disabled.
*
* If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI * If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI
* driver know about it. If that call fails, it should be harmless, and just * driver know about it. If that call fails, it should be harmless, and just
* take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency. * take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency.
@ -4029,23 +4032,24 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
* host know that this link state won't be enabled. * host know that this link state won't be enabled.
*/ */
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
} else { return;
/* Only a configured device will accept the Set Feature }
* U1/U2_ENABLE
*/
if (udev->actconfig)
usb_set_device_initiated_lpm(udev, state, true);
/* As soon as usb_set_lpm_timeout(timeout) returns 0, the /* Only a configured device will accept the Set Feature
* hub-initiated LPM is enabled. Thus, LPM is enabled no * U1/U2_ENABLE
* matter the result of usb_set_device_initiated_lpm(). */
* The only difference is whether device is able to initiate if (udev->actconfig &&
* LPM. usb_set_device_initiated_lpm(udev, state, true) == 0) {
*/
if (state == USB3_LPM_U1) if (state == USB3_LPM_U1)
udev->usb3_lpm_u1_enabled = 1; udev->usb3_lpm_u1_enabled = 1;
else if (state == USB3_LPM_U2) else if (state == USB3_LPM_U2)
udev->usb3_lpm_u2_enabled = 1; udev->usb3_lpm_u2_enabled = 1;
} else {
/* Don't request U1/U2 entry if the device
* cannot transition to U1/U2.
*/
usb_set_lpm_timeout(udev, state, 0);
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
} }
} }

View file

@ -1202,11 +1202,12 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
ffs_log("enter"); ffs_log("enter");
if (!is_sync_kiocb(kiocb)) { if (!is_sync_kiocb(kiocb)) {
p = kmalloc(sizeof(io_data), GFP_KERNEL); p = kzalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p)) if (unlikely(!p))
return -ENOMEM; return -ENOMEM;
p->aio = true; p->aio = true;
} else { } else {
memset(p, 0, sizeof(*p));
p->aio = false; p->aio = false;
} }
@ -1245,11 +1246,12 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
ffs_log("enter"); ffs_log("enter");
if (!is_sync_kiocb(kiocb)) { if (!is_sync_kiocb(kiocb)) {
p = kmalloc(sizeof(io_data), GFP_KERNEL); p = kzalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p)) if (unlikely(!p))
return -ENOMEM; return -ENOMEM;
p->aio = true; p->aio = true;
} else { } else {
memset(p, 0, sizeof(*p));
p->aio = false; p->aio = false;
} }

View file

@ -159,7 +159,7 @@ static int hwahc_op_start(struct usb_hcd *usb_hcd)
return result; return result;
error_set_cluster_id: error_set_cluster_id:
wusb_cluster_id_put(wusbhc->cluster_id); wusb_cluster_id_put(addr);
error_cluster_id_get: error_cluster_id_get:
goto out; goto out;

View file

@ -205,7 +205,7 @@ int usb_amd_find_chipset_info(void)
{ {
unsigned long flags; unsigned long flags;
struct amd_chipset_info info; struct amd_chipset_info info;
int ret; int need_pll_quirk = 0;
spin_lock_irqsave(&amd_lock, flags); spin_lock_irqsave(&amd_lock, flags);
@ -219,21 +219,28 @@ int usb_amd_find_chipset_info(void)
spin_unlock_irqrestore(&amd_lock, flags); spin_unlock_irqrestore(&amd_lock, flags);
if (!amd_chipset_sb_type_init(&info)) { if (!amd_chipset_sb_type_init(&info)) {
ret = 0;
goto commit; goto commit;
} }
/* Below chipset generations needn't enable AMD PLL quirk */ switch (info.sb_type.gen) {
if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN || case AMD_CHIPSET_SB700:
info.sb_type.gen == AMD_CHIPSET_SB600 || need_pll_quirk = info.sb_type.rev <= 0x3B;
info.sb_type.gen == AMD_CHIPSET_YANGTZE || break;
(info.sb_type.gen == AMD_CHIPSET_SB700 && case AMD_CHIPSET_SB800:
info.sb_type.rev > 0x3b)) { case AMD_CHIPSET_HUDSON2:
case AMD_CHIPSET_BOLTON:
need_pll_quirk = 1;
break;
default:
need_pll_quirk = 0;
break;
}
if (!need_pll_quirk) {
if (info.smbus_dev) { if (info.smbus_dev) {
pci_dev_put(info.smbus_dev); pci_dev_put(info.smbus_dev);
info.smbus_dev = NULL; info.smbus_dev = NULL;
} }
ret = 0;
goto commit; goto commit;
} }
@ -252,7 +259,7 @@ int usb_amd_find_chipset_info(void)
} }
} }
ret = info.probe_result = 1; need_pll_quirk = info.probe_result = 1;
printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
commit: commit:
@ -263,7 +270,7 @@ int usb_amd_find_chipset_info(void)
/* Mark that we where here */ /* Mark that we where here */
amd_chipset.probe_count++; amd_chipset.probe_count++;
ret = amd_chipset.probe_result; need_pll_quirk = amd_chipset.probe_result;
spin_unlock_irqrestore(&amd_lock, flags); spin_unlock_irqrestore(&amd_lock, flags);
@ -277,7 +284,7 @@ int usb_amd_find_chipset_info(void)
spin_unlock_irqrestore(&amd_lock, flags); spin_unlock_irqrestore(&amd_lock, flags);
} }
return ret; return need_pll_quirk;
} }
EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);

View file

@ -388,10 +388,31 @@ static noinline int add_async_extent(struct async_cow *cow,
return 0; return 0;
} }
/*
* Check if the inode has flags compatible with compression
*/
static inline bool inode_can_compress(struct inode *inode)
{
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW ||
BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
return false;
return true;
}
/*
* Check if the inode needs to be submitted to compression, based on mount
* options, defragmentation, properties or heuristics.
*/
static inline int inode_need_compress(struct inode *inode, u64 start, u64 end) static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
if (!inode_can_compress(inode)) {
WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
btrfs_ino(BTRFS_I(inode)));
return 0;
}
/* force compress */ /* force compress */
if (btrfs_test_opt(fs_info, FORCE_COMPRESS)) if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
return 1; return 1;
@ -1596,7 +1617,8 @@ static int run_delalloc_range(void *private_data, struct page *locked_page,
} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) { } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
ret = run_delalloc_nocow(inode, locked_page, start, end, ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written); page_started, 0, nr_written);
} else if (!inode_need_compress(inode, start, end)) { } else if (!inode_can_compress(inode) ||
!inode_need_compress(inode, start, end)) {
ret = cow_file_range(inode, locked_page, start, end, end, ret = cow_file_range(inode, locked_page, start, end, end,
page_started, nr_written, 1, NULL); page_started, nr_written, 1, NULL);
} else { } else {

View file

@ -1630,8 +1630,10 @@ static void clean_writequeues(void)
static void work_stop(void) static void work_stop(void)
{ {
destroy_workqueue(recv_workqueue); if (recv_workqueue)
destroy_workqueue(send_workqueue); destroy_workqueue(recv_workqueue);
if (send_workqueue)
destroy_workqueue(send_workqueue);
} }
static int work_start(void) static int work_start(void)
@ -1691,13 +1693,17 @@ static void work_flush(void)
struct hlist_node *n; struct hlist_node *n;
struct connection *con; struct connection *con;
flush_workqueue(recv_workqueue); if (recv_workqueue)
flush_workqueue(send_workqueue); flush_workqueue(recv_workqueue);
if (send_workqueue)
flush_workqueue(send_workqueue);
do { do {
ok = 1; ok = 1;
foreach_conn(stop_conn); foreach_conn(stop_conn);
flush_workqueue(recv_workqueue); if (recv_workqueue)
flush_workqueue(send_workqueue); flush_workqueue(recv_workqueue);
if (send_workqueue)
flush_workqueue(send_workqueue);
for (i = 0; i < CONN_HASH_SIZE && ok; i++) { for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
hlist_for_each_entry_safe(con, n, hlist_for_each_entry_safe(con, n,
&connection_hash[i], list) { &connection_hash[i], list) {

View file

@ -3393,6 +3393,11 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
seg_i = CURSEG_I(sbi, i); seg_i = CURSEG_I(sbi, i);
segno = le32_to_cpu(ckpt->cur_data_segno[i]); segno = le32_to_cpu(ckpt->cur_data_segno[i]);
blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
if (blk_off > ENTRIES_IN_SUM) {
f2fs_bug_on(sbi, 1);
f2fs_put_page(page, 1);
return -EFAULT;
}
seg_i->next_segno = segno; seg_i->next_segno = segno;
reset_curseg(sbi, i, 0); reset_curseg(sbi, i, 0);
seg_i->alloc_type = ckpt->alloc_type[i]; seg_i->alloc_type = ckpt->alloc_type[i];

View file

@ -148,10 +148,13 @@ struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
/* /*
* For queues with unlimited length lost events are not expected and * For queues with unlimited length lost events are not expected and
* can possibly have security implications. Avoid losing events when * can possibly have security implications. Avoid losing events when
* memory is short. * memory is short. For the limited size queues, avoid OOM killer in the
* target monitoring memcg as it may have security repercussion.
*/ */
if (group->max_events == UINT_MAX) if (group->max_events == UINT_MAX)
gfp |= __GFP_NOFAIL; gfp |= __GFP_NOFAIL;
else
gfp |= __GFP_RETRY_MAYFAIL;
/* Whoever is interested in the event, pays for the allocation. */ /* Whoever is interested in the event, pays for the allocation. */
memalloc_use_memcg(group->memcg); memalloc_use_memcg(group->memcg);

View file

@ -99,9 +99,13 @@ int inotify_handle_event(struct fsnotify_group *group,
i_mark = container_of(inode_mark, struct inotify_inode_mark, i_mark = container_of(inode_mark, struct inotify_inode_mark,
fsn_mark); fsn_mark);
/* Whoever is interested in the event, pays for the allocation. */ /*
* Whoever is interested in the event, pays for the allocation. Do not
* trigger OOM killer in the target monitoring memcg as it may have
* security repercussion.
*/
memalloc_use_memcg(group->memcg); memalloc_use_memcg(group->memcg);
event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT); event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
memalloc_unuse_memcg(); memalloc_unuse_memcg();
if (unlikely(!event)) { if (unlikely(!event)) {

View file

@ -383,6 +383,25 @@ long do_faccessat(int dfd, const char __user *filename, int mode)
override_cred->cap_permitted; override_cred->cap_permitted;
} }
/*
* The new set of credentials can *only* be used in
* task-synchronous circumstances, and does not need
* RCU freeing, unless somebody then takes a separate
* reference to it.
*
* NOTE! This is _only_ true because this credential
* is used purely for override_creds() that installs
* it as the subjective cred. Other threads will be
* accessing ->real_cred, not the subjective cred.
*
* If somebody _does_ make a copy of this (using the
* 'get_current_cred()' function), that will clear the
* non_rcu field, because now that other user may be
* expecting RCU freeing. But normal thread-synchronous
* cred accesses will keep things non-RCY.
*/
override_cred->non_rcu = 1;
old_cred = override_creds(override_cred); old_cred = override_creds(override_cred);
retry: retry:
res = user_path_at(dfd, filename, lookup_flags, &path); res = user_path_at(dfd, filename, lookup_flags, &path);

View file

@ -2159,9 +2159,12 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
goto out; goto out;
if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) { if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
down_read(&mm->mmap_sem); status = down_read_killable(&mm->mmap_sem);
exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end); if (!status) {
up_read(&mm->mmap_sem); exact_vma_exists = !!find_exact_vma(mm, vm_start,
vm_end);
up_read(&mm->mmap_sem);
}
} }
mmput(mm); mmput(mm);
@ -2207,8 +2210,11 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
if (rc) if (rc)
goto out_mmput; goto out_mmput;
rc = down_read_killable(&mm->mmap_sem);
if (rc)
goto out_mmput;
rc = -ENOENT; rc = -ENOENT;
down_read(&mm->mmap_sem);
vma = find_exact_vma(mm, vm_start, vm_end); vma = find_exact_vma(mm, vm_start, vm_end);
if (vma && vma->vm_file) { if (vma && vma->vm_file) {
*path = vma->vm_file->f_path; *path = vma->vm_file->f_path;
@ -2304,7 +2310,11 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
if (!mm) if (!mm)
goto out_put_task; goto out_put_task;
down_read(&mm->mmap_sem); result = ERR_PTR(-EINTR);
if (down_read_killable(&mm->mmap_sem))
goto out_put_mm;
result = ERR_PTR(-ENOENT);
vma = find_exact_vma(mm, vm_start, vm_end); vma = find_exact_vma(mm, vm_start, vm_end);
if (!vma) if (!vma)
goto out_no_vma; goto out_no_vma;
@ -2315,6 +2325,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
out_no_vma: out_no_vma:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
out_put_mm:
mmput(mm); mmput(mm);
out_put_task: out_put_task:
put_task_struct(task); put_task_struct(task);
@ -2356,7 +2367,12 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
mm = get_task_mm(task); mm = get_task_mm(task);
if (!mm) if (!mm)
goto out_put_task; goto out_put_task;
down_read(&mm->mmap_sem);
ret = down_read_killable(&mm->mmap_sem);
if (ret) {
mmput(mm);
goto out_put_task;
}
nr_files = 0; nr_files = 0;

View file

@ -218,7 +218,11 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
if (!mm || !mmget_not_zero(mm)) if (!mm || !mmget_not_zero(mm))
return NULL; return NULL;
down_read(&mm->mmap_sem); if (down_read_killable(&mm->mmap_sem)) {
mmput(mm);
return ERR_PTR(-EINTR);
}
hold_task_mempolicy(priv); hold_task_mempolicy(priv);
priv->tail_vma = get_gate_vma(mm); priv->tail_vma = get_gate_vma(mm);
@ -890,7 +894,10 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
memset(&mss, 0, sizeof(mss)); memset(&mss, 0, sizeof(mss));
down_read(&mm->mmap_sem); ret = down_read_killable(&mm->mmap_sem);
if (ret)
goto out_put_mm;
hold_task_mempolicy(priv); hold_task_mempolicy(priv);
for (vma = priv->mm->mmap; vma; vma = vma->vm_next) { for (vma = priv->mm->mmap; vma; vma = vma->vm_next) {
@ -907,8 +914,9 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
release_task_mempolicy(priv); release_task_mempolicy(priv);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
mmput(mm);
out_put_mm:
mmput(mm);
out_put_task: out_put_task:
put_task_struct(priv->task); put_task_struct(priv->task);
priv->task = NULL; priv->task = NULL;
@ -1191,7 +1199,10 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
goto out_mm; goto out_mm;
} }
down_read(&mm->mmap_sem); if (down_read_killable(&mm->mmap_sem)) {
count = -EINTR;
goto out_mm;
}
tlb_gather_mmu(&tlb, mm, 0, -1); tlb_gather_mmu(&tlb, mm, 0, -1);
if (type == CLEAR_REFS_SOFT_DIRTY) { if (type == CLEAR_REFS_SOFT_DIRTY) {
for (vma = mm->mmap; vma; vma = vma->vm_next) { for (vma = mm->mmap; vma; vma = vma->vm_next) {
@ -1598,7 +1609,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
/* overflow ? */ /* overflow ? */
if (end < start_vaddr || end > end_vaddr) if (end < start_vaddr || end > end_vaddr)
end = end_vaddr; end = end_vaddr;
down_read(&mm->mmap_sem); ret = down_read_killable(&mm->mmap_sem);
if (ret)
goto out_free;
ret = walk_page_range(start_vaddr, end, &pagemap_walk); ret = walk_page_range(start_vaddr, end, &pagemap_walk);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
start_vaddr = end; start_vaddr = end;

View file

@ -211,7 +211,11 @@ static void *m_start(struct seq_file *m, loff_t *pos)
if (!mm || !mmget_not_zero(mm)) if (!mm || !mmget_not_zero(mm))
return NULL; return NULL;
down_read(&mm->mmap_sem); if (down_read_killable(&mm->mmap_sem)) {
mmput(mm);
return ERR_PTR(-EINTR);
}
/* start from the Nth VMA */ /* start from the Nth VMA */
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
if (n-- == 0) if (n-- == 0)

View file

@ -150,7 +150,11 @@ struct cred {
struct user_struct *user; /* real user ID subscription */ struct user_struct *user; /* real user ID subscription */
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */ struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
struct group_info *group_info; /* supplementary groups for euid/fsgid */ struct group_info *group_info; /* supplementary groups for euid/fsgid */
struct rcu_head rcu; /* RCU deletion hook */ /* RCU deletion */
union {
int non_rcu; /* Can we skip RCU deletion? */
struct rcu_head rcu; /* RCU deletion hook */
};
} __randomize_layout; } __randomize_layout;
extern void __put_cred(struct cred *); extern void __put_cred(struct cred *);
@ -248,6 +252,7 @@ static inline const struct cred *get_cred(const struct cred *cred)
{ {
struct cred *nonconst_cred = (struct cred *) cred; struct cred *nonconst_cred = (struct cred *) cred;
validate_creds(cred); validate_creds(cred);
nonconst_cred->non_rcu = 0;
return get_new_cred(nonconst_cred); return get_new_cred(nonconst_cred);
} }

View file

@ -310,6 +310,8 @@ struct host1x_device {
struct list_head clients; struct list_head clients;
bool registered; bool registered;
struct device_dma_parameters dma_parms;
}; };
static inline struct host1x_device *to_host1x_device(struct device *dev) static inline struct host1x_device *to_host1x_device(struct device *dev)

View file

@ -147,7 +147,10 @@ void __put_cred(struct cred *cred)
BUG_ON(cred == current->cred); BUG_ON(cred == current->cred);
BUG_ON(cred == current->real_cred); BUG_ON(cred == current->real_cred);
call_rcu(&cred->rcu, put_cred_rcu); if (cred->non_rcu)
put_cred_rcu(&cred->rcu);
else
call_rcu(&cred->rcu, put_cred_rcu);
} }
EXPORT_SYMBOL(__put_cred); EXPORT_SYMBOL(__put_cred);
@ -258,6 +261,7 @@ struct cred *prepare_creds(void)
old = task->cred; old = task->cred;
memcpy(new, old, sizeof(struct cred)); memcpy(new, old, sizeof(struct cred));
new->non_rcu = 0;
atomic_set(&new->usage, 1); atomic_set(&new->usage, 1);
set_cred_subscribers(new, 0); set_cred_subscribers(new, 0);
get_group_info(new->group_info); get_group_info(new->group_info);
@ -537,7 +541,19 @@ const struct cred *override_creds(const struct cred *new)
validate_creds(old); validate_creds(old);
validate_creds(new); validate_creds(new);
get_cred(new);
/*
* NOTE! This uses 'get_new_cred()' rather than 'get_cred()'.
*
* That means that we do not clear the 'non_rcu' flag, since
* we are only installing the cred into the thread-synchronous
* '->cred' pointer, not the '->real_cred' pointer that is
* visible to other threads under RCU.
*
* Also note that we did validate_creds() manually, not depending
* on the validation in 'get_cred()'.
*/
get_new_cred((struct cred *)new);
alter_cred_subscribers(new, 1); alter_cred_subscribers(new, 1);
rcu_assign_pointer(current->cred, new); rcu_assign_pointer(current->cred, new);
alter_cred_subscribers(old, -1); alter_cred_subscribers(old, -1);
@ -620,6 +636,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
validate_creds(old); validate_creds(old);
*new = *old; *new = *old;
new->non_rcu = 0;
atomic_set(&new->usage, 1); atomic_set(&new->usage, 1);
set_cred_subscribers(new, 0); set_cred_subscribers(new, 0);
get_uid(new->user); get_uid(new->user);

View file

@ -200,7 +200,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
static int lockdep_stats_show(struct seq_file *m, void *v) static int lockdep_stats_show(struct seq_file *m, void *v)
{ {
struct lock_class *class;
unsigned long nr_unused = 0, nr_uncategorized = 0, unsigned long nr_unused = 0, nr_uncategorized = 0,
nr_irq_safe = 0, nr_irq_unsafe = 0, nr_irq_safe = 0, nr_irq_unsafe = 0,
nr_softirq_safe = 0, nr_softirq_unsafe = 0, nr_softirq_safe = 0, nr_softirq_unsafe = 0,
@ -210,6 +209,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0, nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
sum_forward_deps = 0; sum_forward_deps = 0;
#ifdef CONFIG_PROVE_LOCKING
struct lock_class *class;
list_for_each_entry(class, &all_lock_classes, lock_entry) { list_for_each_entry(class, &all_lock_classes, lock_entry) {
if (class->usage_mask == 0) if (class->usage_mask == 0)
@ -241,12 +243,12 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ) if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
nr_hardirq_read_unsafe++; nr_hardirq_read_unsafe++;
#ifdef CONFIG_PROVE_LOCKING
sum_forward_deps += lockdep_count_forward_deps(class); sum_forward_deps += lockdep_count_forward_deps(class);
#endif
} }
#ifdef CONFIG_DEBUG_LOCKDEP #ifdef CONFIG_DEBUG_LOCKDEP
DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused); DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
#endif
#endif #endif
seq_printf(m, " lock-classes: %11lu [max: %lu]\n", seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
nr_lock_classes, MAX_LOCKDEP_KEYS); nr_lock_classes, MAX_LOCKDEP_KEYS);

View file

@ -458,11 +458,14 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
pgd = pgd_offset_k(address); pgd = pgd_offset_k(address);
else else
pgd = pgd_offset_gate(mm, address); pgd = pgd_offset_gate(mm, address);
BUG_ON(pgd_none(*pgd)); if (pgd_none(*pgd))
return -EFAULT;
p4d = p4d_offset(pgd, address); p4d = p4d_offset(pgd, address);
BUG_ON(p4d_none(*p4d)); if (p4d_none(*p4d))
return -EFAULT;
pud = pud_offset(p4d, address); pud = pud_offset(p4d, address);
BUG_ON(pud_none(*pud)); if (pud_none(*pud))
return -EFAULT;
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd)) if (!pmd_present(*pmd))
return -EFAULT; return -EFAULT;
@ -1367,7 +1370,8 @@ static inline pte_t gup_get_pte(pte_t *ptep)
} }
#endif #endif
static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
struct page **pages)
{ {
while ((*nr) - nr_start) { while ((*nr) - nr_start) {
struct page *page = pages[--(*nr)]; struct page *page = pages[--(*nr)];

View file

@ -588,7 +588,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
if (in_irq()) { if (in_irq()) {
object->pid = 0; object->pid = 0;
strncpy(object->comm, "hardirq", sizeof(object->comm)); strncpy(object->comm, "hardirq", sizeof(object->comm));
} else if (in_softirq()) { } else if (in_serving_softirq()) {
object->pid = 0; object->pid = 0;
strncpy(object->comm, "softirq", sizeof(object->comm)); strncpy(object->comm, "softirq", sizeof(object->comm));
} else { } else {

View file

@ -4982,7 +4982,9 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
void *old_buf = buf; void *old_buf = buf;
int write = gup_flags & FOLL_WRITE; int write = gup_flags & FOLL_WRITE;
down_read(&mm->mmap_sem); if (down_read_killable(&mm->mmap_sem))
return 0;
/* ignore errors, just check how much was successfully transferred */ /* ignore errors, just check how much was successfully transferred */
while (len) { while (len) {
int bytes, ret, offset; int bytes, ret, offset;

View file

@ -316,7 +316,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
* thanks to mm_take_all_locks(). * thanks to mm_take_all_locks().
*/ */
spin_lock(&mm->mmu_notifier_mm->lock); spin_lock(&mm->mmu_notifier_mm->lock);
hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
spin_unlock(&mm->mmu_notifier_mm->lock); spin_unlock(&mm->mmu_notifier_mm->lock);
mm_drop_all_locks(mm); mm_drop_all_locks(mm);

View file

@ -1779,7 +1779,8 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct *vma; struct vm_area_struct *vma;
int write = gup_flags & FOLL_WRITE; int write = gup_flags & FOLL_WRITE;
down_read(&mm->mmap_sem); if (down_read_killable(&mm->mmap_sem))
return 0;
/* the access must start within one of the target process's mappings */ /* the access must start within one of the target process's mappings */
vma = find_vma(mm, addr); vma = find_vma(mm, addr);

View file

@ -740,15 +740,20 @@ void release_pages(struct page **pages, int nr)
if (is_huge_zero_page(page)) if (is_huge_zero_page(page))
continue; continue;
/* Device public page can not be huge page */ if (is_zone_device_page(page)) {
if (is_device_public_page(page)) {
if (locked_pgdat) { if (locked_pgdat) {
spin_unlock_irqrestore(&locked_pgdat->lru_lock, spin_unlock_irqrestore(&locked_pgdat->lru_lock,
flags); flags);
locked_pgdat = NULL; locked_pgdat = NULL;
} }
put_devmap_managed_page(page); /*
continue; * ZONE_DEVICE pages that return 'false' from
* put_devmap_managed_page() do not require special
* processing, and instead, expect a call to
* put_page_testzero().
*/
if (put_devmap_managed_page(page))
continue;
} }
page = compound_head(page); page = compound_head(page);

View file

@ -217,18 +217,6 @@ static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan)
set_channel_pending_send_size(chan, set_channel_pending_send_size(chan,
HVS_PKT_LEN(HVS_SEND_BUF_SIZE)); HVS_PKT_LEN(HVS_SEND_BUF_SIZE));
/* See hvs_stream_has_space(): we must make sure the host has seen
* the new pending send size, before we can re-check the writable
* bytes.
*/
virt_mb();
}
static void hvs_clear_channel_pending_send_size(struct vmbus_channel *chan)
{
set_channel_pending_send_size(chan, 0);
/* Ditto */
virt_mb(); virt_mb();
} }
@ -298,9 +286,6 @@ static void hvs_channel_cb(void *ctx)
if (hvs_channel_readable(chan)) if (hvs_channel_readable(chan))
sk->sk_data_ready(sk); sk->sk_data_ready(sk);
/* See hvs_stream_has_space(): when we reach here, the writable bytes
* may be already less than HVS_PKT_LEN(HVS_SEND_BUF_SIZE).
*/
if (hv_get_bytes_to_write(&chan->outbound) > 0) if (hv_get_bytes_to_write(&chan->outbound) > 0)
sk->sk_write_space(sk); sk->sk_write_space(sk);
} }
@ -328,8 +313,9 @@ static void hvs_open_connection(struct vmbus_channel *chan)
struct sockaddr_vm addr; struct sockaddr_vm addr;
struct sock *sk, *new = NULL; struct sock *sk, *new = NULL;
struct vsock_sock *vnew; struct vsock_sock *vnew = NULL;
struct hvsock *hvs, *hvs_new; struct hvsock *hvs = NULL;
struct hvsock *hvs_new = NULL;
int ret; int ret;
if_type = &chan->offermsg.offer.if_type; if_type = &chan->offermsg.offer.if_type;
@ -388,6 +374,13 @@ static void hvs_open_connection(struct vmbus_channel *chan)
set_per_channel_state(chan, conn_from_host ? new : sk); set_per_channel_state(chan, conn_from_host ? new : sk);
vmbus_set_chn_rescind_callback(chan, hvs_close_connection); vmbus_set_chn_rescind_callback(chan, hvs_close_connection);
/* Set the pending send size to max packet size to always get
* notifications from the host when there is enough writable space.
* The host is optimized to send notifications only when the pending
* size boundary is crossed, and not always.
*/
hvs_set_channel_pending_send_size(chan);
if (conn_from_host) { if (conn_from_host) {
new->sk_state = TCP_ESTABLISHED; new->sk_state = TCP_ESTABLISHED;
sk->sk_ack_backlog++; sk->sk_ack_backlog++;
@ -651,23 +644,8 @@ static s64 hvs_stream_has_data(struct vsock_sock *vsk)
static s64 hvs_stream_has_space(struct vsock_sock *vsk) static s64 hvs_stream_has_space(struct vsock_sock *vsk)
{ {
struct hvsock *hvs = vsk->trans; struct hvsock *hvs = vsk->trans;
struct vmbus_channel *chan = hvs->chan;
s64 ret;
ret = hvs_channel_writable_bytes(chan); return hvs_channel_writable_bytes(hvs->chan);
if (ret > 0) {
hvs_clear_channel_pending_send_size(chan);
} else {
/* See hvs_channel_cb() */
hvs_set_channel_pending_send_size(chan);
/* Re-check the writable bytes to avoid race */
ret = hvs_channel_writable_bytes(chan);
if (ret > 0)
hvs_clear_channel_pending_send_size(chan);
}
return ret;
} }
static u64 hvs_stream_rcvhiwat(struct vsock_sock *vsk) static u64 hvs_stream_rcvhiwat(struct vsock_sock *vsk)

Some files were not shown because too many files have changed in this diff Show more