This is the 4.19.78 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl2bbnkACgkQONu9yGCS aT6bng/+Jvj4gXLq2w+KmeN1SRbNu2ee+GjQsgQR6JZ3/dY5+rzPhuL37Op0fQd6 UwnLhY4TL3PUiRCE8pNaVYI8nDfpxRkohYP+SMtGyoQKmoiy3W/SWe3CgEwniLwg k9TsuqxUsIUeEdSr6Bjbry0IU4VoZ3MP0cmMc1SrFUqJzFoGoUMHsHmQJvDPiy1f l7oZUgYrXArcnPhCda6peD9AJUfuIRKAM4BW47WN6Z9moqAAAa60eXN/u/hHp+Qc w55AZTSxel7CMbLDMnZ6/xWDgY/FHTLjkmhdIl9H6Qi8SbrJwq23zXnBO5xVameQ 4MaLIgrp7M5sohAVFdqAVZYyZrkX91ssVujYwc+I6O1TYdja1Usj2TzdyL/MPqzY FLM9s3P0C3xKLdlg5gq9BxxnohIIhNmBy069NGsmfFd9jP2o6vFUEjiVxSY7Hp3r GpZP1ETAfMNHCG3jN3o5EkwyLoQHegFWfLpCIEF++k9gjo2CP00+Lf16RVrSsG47 ILobFW5Wy4RXFyd7M8PrjSyuAtuzkUTzxg6P+6zuEtPwwvZPtadd97dGbA90pKvv eB1UPHu8/emMhW/8fwDBbpeQbIh0pHtX2yQq/LTItEHGr+YjRTIyH3Z/fST5itre ZDofsls4A+70TQ5/XOgjlDjco93iUs8KULDzQqvTFIuUIlvk3hQ= =7fm2 -----END PGP SIGNATURE----- Merge 4.19.78 into android-4.19-q Changes in 4.19.78 tpm: use tpm_try_get_ops() in tpm-sysfs.c. tpm: Fix TPM 1.2 Shutdown sequence to prevent future TPM operations drm/bridge: tc358767: Increase AUX transfer length limit drm/panel: simple: fix AUO g185han01 horizontal blanking video: ssd1307fb: Start page range at page_offset drm/stm: attach gem fence to atomic state drm/panel: check failure cases in the probe func drm/rockchip: Check for fast link training before enabling psr drm/radeon: Fix EEH during kexec gpu: drm: radeon: Fix a possible null-pointer dereference in radeon_connector_set_property() PCI: rpaphp: Avoid a sometimes-uninitialized warning ipmi_si: Only schedule continuously in the thread in maintenance mode clk: qoriq: Fix -Wunused-const-variable clk: sunxi-ng: v3s: add missing clock slices for MMC2 module clocks drm/amd/display: fix issue where 252-255 values are clipped drm/amd/display: reprogram VM config when system resume powerpc/powernv/ioda2: Allocate TCE table levels on demand for default DMA window clk: actions: Don't reference clk_init_data after registration clk: sirf: Don't reference clk_init_data after registration clk: sprd: Don't reference clk_init_data after registration clk: zx296718: Don't reference clk_init_data after registration powerpc/xmon: Check for HV mode when dumping XIVE info from OPAL powerpc/rtas: use device model APIs and serialization during LPM powerpc/futex: Fix warning: 'oldval' may be used uninitialized in this function powerpc/pseries/mobility: use cond_resched when updating device tree pinctrl: tegra: Fix write barrier placement in pmx_writel powerpc/eeh: Clear stale EEH_DEV_NO_HANDLER flag vfio_pci: Restore original state on release drm/nouveau/volt: Fix for some cards having 0 maximum voltage pinctrl: amd: disable spurious-firing GPIO IRQs clk: renesas: mstp: Set GENPD_FLAG_ALWAYS_ON for clock domain clk: renesas: cpg-mssr: Set GENPD_FLAG_ALWAYS_ON for clock domain drm/amd/display: support spdif drm/amdgpu/si: fix ASIC tests powerpc/64s/exception: machine check use correct cfar for late handler pstore: fs superblock limits clk: qcom: gcc-sdm845: Use floor ops for sdcc clks powerpc/pseries: correctly track irq state in default idle pinctrl: meson-gxbb: Fix wrong pinning definition for uart_c arm64: fix unreachable code issue with cmpxchg clk: at91: select parent if main oscillator or bypass is enabled powerpc: dump kernel log before carrying out fadump or kdump mbox: qcom: add APCS child device for QCS404 clk: sprd: add missing kfree scsi: core: Reduce memory required for SCSI logging dma-buf/sw_sync: Synchronize signal vs syncpt free ext4: fix potential use after free after remounting with noblock_validity MIPS: Ingenic: Disable broken BTB lookup optimization. MIPS: tlbex: Explicitly cast _PAGE_NO_EXEC to a boolean i2c-cht-wc: Fix lockdep warning mfd: intel-lpss: Remove D3cold delay PCI: tegra: Fix OF node reference leak HID: wacom: Fix several minor compiler warnings livepatch: Nullify obj->mod in klp_module_coming()'s error path ARM: 8898/1: mm: Don't treat faults reported from cache maintenance as writes soundwire: intel: fix channel number reported by hardware ARM: 8875/1: Kconfig: default to AEABI w/ Clang rtc: snvs: fix possible race condition rtc: pcf85363/pcf85263: fix regmap error in set_time HID: apple: Fix stuck function keys when using FN PCI: rockchip: Propagate errors for optional regulators PCI: histb: Propagate errors for optional regulators PCI: imx6: Propagate errors for optional regulators PCI: exynos: Propagate errors for optional PHYs security: smack: Fix possible null-pointer dereferences in smack_socket_sock_rcv_skb() ARM: 8903/1: ensure that usable memory in bank 0 starts from a PMD-aligned address fat: work around race with userspace's read via blockdev while mounting pktcdvd: remove warning on attempting to register non-passthrough dev hypfs: Fix error number left in struct pointer member crypto: hisilicon - Fix double free in sec_free_hw_sgl() kbuild: clean compressed initramfs image ocfs2: wait for recovering done after direct unlock request kmemleak: increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE default to 16K arm64: consider stack randomization for mmap base only when necessary mips: properly account for stack randomization and stack guard gap arm: properly account for stack randomization and stack guard gap arm: use STACK_TOP when computing mmap base address block: mq-deadline: Fix queue restart handling bpf: fix use after free in prog symbol exposure cxgb4:Fix out-of-bounds MSI-X info array access erspan: remove the incorrect mtu limit for erspan hso: fix NULL-deref on tty open ipv6: drop incoming packets having a v4mapped source address ipv6: Handle missing host route in __ipv6_ifa_notify net: ipv4: avoid mixed n_redirects and rate_tokens usage net: qlogic: Fix memory leak in ql_alloc_large_buffers net: Unpublish sk from sk_reuseport_cb before call_rcu nfc: fix memory leak in llcp_sock_bind() qmi_wwan: add support for Cinterion CLS8 devices rxrpc: Fix rxrpc_recvmsg tracepoint sch_dsmark: fix potential NULL deref in dsmark_init() udp: fix gso_segs calculations vsock: Fix a lockdep warning in __vsock_release() net: dsa: rtl8366: Check VLAN ID and not ports udp: only do GSO if # of segs > 1 net/rds: Fix error handling in rds_ib_add_one() xen-netfront: do not use ~0U as error return value for xennet_fill_frags() tipc: fix unlimited bundling of small messages sch_cbq: validate TCA_CBQ_WRROPT to avoid crash soundwire: Kconfig: fix help format soundwire: fix regmap dependencies and align with other serial links Smack: Don't ignore other bprm->unsafe flags if LSM_UNSAFE_PTRACE is set smack: use GFP_NOFS while holding inode_smack::smk_lock NFC: fix attrs checks in netlink interface kexec: bail out upon SIGKILL when allocating memory. 9p/cache.c: Fix memory leak in v9fs_cache_session_get_cookie Linux 4.19.78 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I52ea645e7237f31c01f138e92560a81c786449b5
This commit is contained in:
commit
d9e388f82a
118 changed files with 967 additions and 525 deletions
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 77
|
||||
SUBLEVEL = 78
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
|
@ -1586,8 +1586,9 @@ config ARM_PATCH_IDIV
|
|||
code to do integer division.
|
||||
|
||||
config AEABI
|
||||
bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && !CPU_V7M && !CPU_V6 && !CPU_V6K
|
||||
default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K
|
||||
bool "Use the ARM EABI to compile the kernel" if !CPU_V7 && \
|
||||
!CPU_V7M && !CPU_V6 && !CPU_V6K && !CC_IS_CLANG
|
||||
default CPU_V7 || CPU_V7M || CPU_V6 || CPU_V6K || CC_IS_CLANG
|
||||
help
|
||||
This option allows for the kernel to be compiled using the latest
|
||||
ARM ABI (aka EABI). This is only useful if you are using a user
|
||||
|
|
|
@ -216,7 +216,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
|
|||
{
|
||||
unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
|
||||
|
||||
if (fsr & FSR_WRITE)
|
||||
if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
|
||||
mask = VM_WRITE;
|
||||
if (fsr & FSR_LNX_PF)
|
||||
mask = VM_EXEC;
|
||||
|
@ -287,7 +287,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
|||
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
if (fsr & FSR_WRITE)
|
||||
if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
|
||||
/*
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
* Fault status register encodings. We steal bit 31 for our own purposes.
|
||||
*/
|
||||
#define FSR_LNX_PF (1 << 31)
|
||||
#define FSR_CM (1 << 13)
|
||||
#define FSR_WRITE (1 << 11)
|
||||
#define FSR_FS4 (1 << 10)
|
||||
#define FSR_FS3_0 (15)
|
||||
|
|
|
@ -18,8 +18,9 @@
|
|||
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
|
||||
|
||||
/* gap between mmap and stack */
|
||||
#define MIN_GAP (128*1024*1024UL)
|
||||
#define MAX_GAP ((TASK_SIZE)/6*5)
|
||||
#define MIN_GAP (128*1024*1024UL)
|
||||
#define MAX_GAP ((STACK_TOP)/6*5)
|
||||
#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
|
||||
|
||||
static int mmap_is_legacy(struct rlimit *rlim_stack)
|
||||
{
|
||||
|
@ -35,13 +36,22 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
|
|||
static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
|
||||
{
|
||||
unsigned long gap = rlim_stack->rlim_cur;
|
||||
unsigned long pad = stack_guard_gap;
|
||||
|
||||
/* Account for stack randomization if necessary */
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
pad += (STACK_RND_MASK << PAGE_SHIFT);
|
||||
|
||||
/* Values close to RLIM_INFINITY can overflow. */
|
||||
if (gap + pad > gap)
|
||||
gap += pad;
|
||||
|
||||
if (gap < MIN_GAP)
|
||||
gap = MIN_GAP;
|
||||
else if (gap > MAX_GAP)
|
||||
gap = MAX_GAP;
|
||||
|
||||
return PAGE_ALIGN(TASK_SIZE - gap - rnd);
|
||||
return PAGE_ALIGN(STACK_TOP - gap - rnd);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1175,6 +1175,22 @@ void __init adjust_lowmem_bounds(void)
|
|||
*/
|
||||
vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
|
||||
|
||||
/*
|
||||
* The first usable region must be PMD aligned. Mark its start
|
||||
* as MEMBLOCK_NOMAP if it isn't
|
||||
*/
|
||||
for_each_memblock(memory, reg) {
|
||||
if (!memblock_is_nomap(reg)) {
|
||||
if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
|
||||
phys_addr_t len;
|
||||
|
||||
len = round_up(reg->base, PMD_SIZE) - reg->base;
|
||||
memblock_mark_nomap(reg->base, len);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
phys_addr_t block_start = reg->base;
|
||||
phys_addr_t block_end = reg->base + reg->size;
|
||||
|
|
|
@ -74,7 +74,7 @@ __XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
|
|||
#undef __XCHG_CASE
|
||||
|
||||
#define __XCHG_GEN(sfx) \
|
||||
static inline unsigned long __xchg##sfx(unsigned long x, \
|
||||
static __always_inline unsigned long __xchg##sfx(unsigned long x, \
|
||||
volatile void *ptr, \
|
||||
int size) \
|
||||
{ \
|
||||
|
@ -116,7 +116,7 @@ __XCHG_GEN(_mb)
|
|||
#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
|
||||
|
||||
#define __CMPXCHG_GEN(sfx) \
|
||||
static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
|
||||
static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
|
||||
unsigned long old, \
|
||||
unsigned long new, \
|
||||
int size) \
|
||||
|
@ -223,7 +223,7 @@ __CMPWAIT_CASE( , , 8);
|
|||
#undef __CMPWAIT_CASE
|
||||
|
||||
#define __CMPWAIT_GEN(sfx) \
|
||||
static inline void __cmpwait##sfx(volatile void *ptr, \
|
||||
static __always_inline void __cmpwait##sfx(volatile void *ptr, \
|
||||
unsigned long val, \
|
||||
int size) \
|
||||
{ \
|
||||
|
|
|
@ -65,7 +65,11 @@ unsigned long arch_mmap_rnd(void)
|
|||
static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
|
||||
{
|
||||
unsigned long gap = rlim_stack->rlim_cur;
|
||||
unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
|
||||
unsigned long pad = stack_guard_gap;
|
||||
|
||||
/* Account for stack randomization if necessary */
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
pad += (STACK_RND_MASK << PAGE_SHIFT);
|
||||
|
||||
/* Values close to RLIM_INFINITY can overflow. */
|
||||
if (gap + pad > gap)
|
||||
|
|
|
@ -688,6 +688,9 @@
|
|||
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
|
||||
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
|
||||
|
||||
/* Ingenic Config7 bits */
|
||||
#define MIPS_CONF7_BTB_LOOP_EN (_ULCAST_(1) << 4)
|
||||
|
||||
/* Config7 Bits specific to MIPS Technologies. */
|
||||
|
||||
/* Performance counters implemented Per TC */
|
||||
|
@ -2774,6 +2777,7 @@ __BUILD_SET_C0(status)
|
|||
__BUILD_SET_C0(cause)
|
||||
__BUILD_SET_C0(config)
|
||||
__BUILD_SET_C0(config5)
|
||||
__BUILD_SET_C0(config7)
|
||||
__BUILD_SET_C0(intcontrol)
|
||||
__BUILD_SET_C0(intctl)
|
||||
__BUILD_SET_C0(srsmap)
|
||||
|
|
|
@ -1879,6 +1879,13 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
|
|||
c->cputype = CPU_JZRISC;
|
||||
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
|
||||
__cpu_name[cpu] = "Ingenic JZRISC";
|
||||
/*
|
||||
* The XBurst core by default attempts to avoid branch target
|
||||
* buffer lookups by detecting & special casing loops. This
|
||||
* feature will cause BogoMIPS and lpj calculate in error.
|
||||
* Set cp0 config7 bit 4 to disable this feature.
|
||||
*/
|
||||
set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
|
||||
break;
|
||||
default:
|
||||
panic("Unknown Ingenic Processor ID!");
|
||||
|
|
|
@ -21,8 +21,9 @@ unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
|
|||
EXPORT_SYMBOL(shm_align_mask);
|
||||
|
||||
/* gap between mmap and stack */
|
||||
#define MIN_GAP (128*1024*1024UL)
|
||||
#define MAX_GAP ((TASK_SIZE)/6*5)
|
||||
#define MIN_GAP (128*1024*1024UL)
|
||||
#define MAX_GAP ((TASK_SIZE)/6*5)
|
||||
#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
|
||||
|
||||
static int mmap_is_legacy(struct rlimit *rlim_stack)
|
||||
{
|
||||
|
@ -38,6 +39,15 @@ static int mmap_is_legacy(struct rlimit *rlim_stack)
|
|||
static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
|
||||
{
|
||||
unsigned long gap = rlim_stack->rlim_cur;
|
||||
unsigned long pad = stack_guard_gap;
|
||||
|
||||
/* Account for stack randomization if necessary */
|
||||
if (current->flags & PF_RANDOMIZE)
|
||||
pad += (STACK_RND_MASK << PAGE_SHIFT);
|
||||
|
||||
/* Values close to RLIM_INFINITY can overflow. */
|
||||
if (gap + pad > gap)
|
||||
gap += pad;
|
||||
|
||||
if (gap < MIN_GAP)
|
||||
gap = MIN_GAP;
|
||||
|
|
|
@ -630,7 +630,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
|
|||
return;
|
||||
}
|
||||
|
||||
if (cpu_has_rixi && _PAGE_NO_EXEC) {
|
||||
if (cpu_has_rixi && !!_PAGE_NO_EXEC) {
|
||||
if (fill_includes_sw_bits) {
|
||||
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
|
||||
} else {
|
||||
|
|
|
@ -59,8 +59,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
|
|||
|
||||
pagefault_enable();
|
||||
|
||||
if (!ret)
|
||||
*oval = oldval;
|
||||
*oval = oldval;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -811,6 +811,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
|
|||
pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
|
||||
pe->freeze_count, eeh_max_freezes);
|
||||
|
||||
eeh_for_each_pe(pe, tmp_pe)
|
||||
eeh_pe_for_each_dev(tmp_pe, edev, tmp)
|
||||
edev->mode &= ~EEH_DEV_NO_HANDLER;
|
||||
|
||||
/* Walk the various device drivers attached to this slot through
|
||||
* a reset sequence, giving each an opportunity to do what it needs
|
||||
* to accomplish the reset. Each child gets a report of the
|
||||
|
@ -1004,7 +1008,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
|
|||
*/
|
||||
void eeh_handle_special_event(void)
|
||||
{
|
||||
struct eeh_pe *pe, *phb_pe;
|
||||
struct eeh_pe *pe, *phb_pe, *tmp_pe;
|
||||
struct eeh_dev *edev, *tmp_edev;
|
||||
struct pci_bus *bus;
|
||||
struct pci_controller *hose;
|
||||
unsigned long flags;
|
||||
|
@ -1075,6 +1080,10 @@ void eeh_handle_special_event(void)
|
|||
(phb_pe->state & EEH_PE_RECOVERING))
|
||||
continue;
|
||||
|
||||
eeh_for_each_pe(pe, tmp_pe)
|
||||
eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
|
||||
edev->mode &= ~EEH_DEV_NO_HANDLER;
|
||||
|
||||
/* Notify all devices to be down */
|
||||
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
|
||||
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
|
||||
|
|
|
@ -520,6 +520,10 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
|
|||
RFI_TO_USER_OR_KERNEL
|
||||
9:
|
||||
/* Deliver the machine check to host kernel in V mode. */
|
||||
BEGIN_FTR_SECTION
|
||||
ld r10,ORIG_GPR3(r1)
|
||||
mtspr SPRN_CFAR,r10
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
|
||||
MACHINE_CHECK_HANDLER_WINDUP
|
||||
b machine_check_pSeries
|
||||
|
||||
|
|
|
@ -875,15 +875,17 @@ static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
|
|||
return 0;
|
||||
|
||||
for_each_cpu(cpu, cpus) {
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
|
||||
switch (state) {
|
||||
case DOWN:
|
||||
cpuret = cpu_down(cpu);
|
||||
cpuret = device_offline(dev);
|
||||
break;
|
||||
case UP:
|
||||
cpuret = cpu_up(cpu);
|
||||
cpuret = device_online(dev);
|
||||
break;
|
||||
}
|
||||
if (cpuret) {
|
||||
if (cpuret < 0) {
|
||||
pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
|
||||
__func__,
|
||||
((state == UP) ? "up" : "down"),
|
||||
|
@ -972,6 +974,8 @@ int rtas_ibm_suspend_me(u64 handle)
|
|||
data.token = rtas_token("ibm,suspend-me");
|
||||
data.complete = &done;
|
||||
|
||||
lock_device_hotplug();
|
||||
|
||||
/* All present CPUs must be online */
|
||||
cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
|
||||
cpuret = rtas_online_cpus_mask(offline_mask);
|
||||
|
@ -1003,6 +1007,7 @@ int rtas_ibm_suspend_me(u64 handle)
|
|||
__func__);
|
||||
|
||||
out:
|
||||
unlock_device_hotplug();
|
||||
free_cpumask_var(offline_mask);
|
||||
return atomic_read(&data.error);
|
||||
}
|
||||
|
|
|
@ -399,6 +399,7 @@ void system_reset_exception(struct pt_regs *regs)
|
|||
if (debugger(regs))
|
||||
goto out;
|
||||
|
||||
kmsg_dump(KMSG_DUMP_OOPS);
|
||||
/*
|
||||
* A system reset is a request to dump, so we always send
|
||||
* it through the crashdump code (if fadump or kdump are
|
||||
|
|
|
@ -36,7 +36,8 @@ static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift)
|
|||
struct page *tce_mem = NULL;
|
||||
__be64 *addr;
|
||||
|
||||
tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT);
|
||||
tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN,
|
||||
shift - PAGE_SHIFT);
|
||||
if (!tce_mem) {
|
||||
pr_err("Failed to allocate a TCE memory, level shift=%d\n",
|
||||
shift);
|
||||
|
@ -161,6 +162,9 @@ void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
|
|||
|
||||
if (ptce)
|
||||
*ptce = cpu_to_be64(0);
|
||||
else
|
||||
/* Skip the rest of the level */
|
||||
i |= tbl->it_level_size - 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -260,7 +264,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|||
unsigned int table_shift = max_t(unsigned int, entries_shift + 3,
|
||||
PAGE_SHIFT);
|
||||
const unsigned long tce_table_size = 1UL << table_shift;
|
||||
unsigned int tmplevels = levels;
|
||||
|
||||
if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS))
|
||||
return -EINVAL;
|
||||
|
@ -268,9 +271,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|||
if (!is_power_of_2(window_size))
|
||||
return -EINVAL;
|
||||
|
||||
if (alloc_userspace_copy && (window_size > (1ULL << 32)))
|
||||
tmplevels = 1;
|
||||
|
||||
/* Adjust direct table size from window_size and levels */
|
||||
entries_shift = (entries_shift + levels - 1) / levels;
|
||||
level_shift = entries_shift + 3;
|
||||
|
@ -281,7 +281,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|||
|
||||
/* Allocate TCE table */
|
||||
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
||||
tmplevels, tce_table_size, &offset, &total_allocated);
|
||||
1, tce_table_size, &offset, &total_allocated);
|
||||
|
||||
/* addr==NULL means that the first level allocation failed */
|
||||
if (!addr)
|
||||
|
@ -292,18 +292,18 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|||
* we did not allocate as much as we wanted,
|
||||
* release partially allocated table.
|
||||
*/
|
||||
if (tmplevels == levels && offset < tce_table_size)
|
||||
if (levels == 1 && offset < tce_table_size)
|
||||
goto free_tces_exit;
|
||||
|
||||
/* Allocate userspace view of the TCE table */
|
||||
if (alloc_userspace_copy) {
|
||||
offset = 0;
|
||||
uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
||||
tmplevels, tce_table_size, &offset,
|
||||
1, tce_table_size, &offset,
|
||||
&total_allocated_uas);
|
||||
if (!uas)
|
||||
goto free_tces_exit;
|
||||
if (tmplevels == levels && (offset < tce_table_size ||
|
||||
if (levels == 1 && (offset < tce_table_size ||
|
||||
total_allocated_uas != total_allocated))
|
||||
goto free_uas_exit;
|
||||
}
|
||||
|
@ -318,7 +318,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|||
|
||||
pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n",
|
||||
window_size, tce_table_size, bus_offset, tbl->it_base,
|
||||
tbl->it_userspace, tmplevels, levels);
|
||||
tbl->it_userspace, 1, levels);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -243,7 +243,7 @@ extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe);
|
|||
extern int pnv_npu2_init(struct pnv_phb *phb);
|
||||
|
||||
/* pci-ioda-tce.c */
|
||||
#define POWERNV_IOMMU_DEFAULT_LEVELS 1
|
||||
#define POWERNV_IOMMU_DEFAULT_LEVELS 2
|
||||
#define POWERNV_IOMMU_MAX_LEVELS 5
|
||||
|
||||
extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/cpu.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/completion.h>
|
||||
|
@ -209,7 +210,11 @@ static int update_dt_node(__be32 phandle, s32 scope)
|
|||
|
||||
prop_data += vd;
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
} while (rtas_rc == 1);
|
||||
|
||||
of_node_put(dn);
|
||||
|
@ -318,8 +323,12 @@ int pseries_devicetree_update(s32 scope)
|
|||
add_dt_node(phandle, drc_index);
|
||||
break;
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
} while (rc == 1);
|
||||
|
||||
kfree(rtas_buf);
|
||||
|
|
|
@ -325,6 +325,9 @@ static void pseries_lpar_idle(void)
|
|||
* low power mode by ceding processor to hypervisor
|
||||
*/
|
||||
|
||||
if (!prep_irq_for_idle())
|
||||
return;
|
||||
|
||||
/* Indicate to hypervisor that we are idle. */
|
||||
get_lppaca()->idle = 1;
|
||||
|
||||
|
|
|
@ -2497,13 +2497,16 @@ static void dump_pacas(void)
|
|||
static void dump_one_xive(int cpu)
|
||||
{
|
||||
unsigned int hwid = get_hard_smp_processor_id(cpu);
|
||||
bool hv = cpu_has_feature(CPU_FTR_HVMODE);
|
||||
|
||||
opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
|
||||
opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
|
||||
opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
|
||||
opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
|
||||
opal_xive_dump(XIVE_DUMP_VP, hwid);
|
||||
opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
|
||||
if (hv) {
|
||||
opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
|
||||
opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
|
||||
opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
|
||||
opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
|
||||
opal_xive_dump(XIVE_DUMP_VP, hwid);
|
||||
opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
|
||||
}
|
||||
|
||||
if (setjmp(bus_error_jmp) != 0) {
|
||||
catch_memory_errors = 0;
|
||||
|
|
|
@ -269,7 +269,7 @@ static int hypfs_show_options(struct seq_file *s, struct dentry *root)
|
|||
static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
{
|
||||
struct inode *root_inode;
|
||||
struct dentry *root_dentry;
|
||||
struct dentry *root_dentry, *update_file;
|
||||
int rc = 0;
|
||||
struct hypfs_sb_info *sbi;
|
||||
|
||||
|
@ -300,9 +300,10 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
rc = hypfs_diag_create_files(root_dentry);
|
||||
if (rc)
|
||||
return rc;
|
||||
sbi->update_file = hypfs_create_update_file(root_dentry);
|
||||
if (IS_ERR(sbi->update_file))
|
||||
return PTR_ERR(sbi->update_file);
|
||||
update_file = hypfs_create_update_file(root_dentry);
|
||||
if (IS_ERR(update_file))
|
||||
return PTR_ERR(update_file);
|
||||
sbi->update_file = update_file;
|
||||
hypfs_update_update(sb);
|
||||
pr_info("Hypervisor filesystem mounted\n");
|
||||
return 0;
|
||||
|
|
|
@ -376,13 +376,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd)
|
|||
* hardware queue, but we may return a request that is for a
|
||||
* different hardware queue. This is because mq-deadline has shared
|
||||
* state for all hardware queues, in terms of sorting, FIFOs, etc.
|
||||
*
|
||||
* For a zoned block device, __dd_dispatch_request() may return NULL
|
||||
* if all the queued write requests are directed at zones that are already
|
||||
* locked due to on-going write requests. In this case, make sure to mark
|
||||
* the queue as needing a restart to ensure that the queue is run again
|
||||
* and the pending writes dispatched once the target zones for the ongoing
|
||||
* write requests are unlocked in dd_finish_request().
|
||||
*/
|
||||
static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
|
@ -391,9 +384,6 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
|
|||
|
||||
spin_lock(&dd->lock);
|
||||
rq = __dd_dispatch_request(dd);
|
||||
if (!rq && blk_queue_is_zoned(hctx->queue) &&
|
||||
!list_empty(&dd->fifo_list[WRITE]))
|
||||
blk_mq_sched_mark_restart_hctx(hctx);
|
||||
spin_unlock(&dd->lock);
|
||||
|
||||
return rq;
|
||||
|
@ -559,6 +549,13 @@ static void dd_prepare_request(struct request *rq, struct bio *bio)
|
|||
* spinlock so that the zone is never unlocked while deadline_fifo_request()
|
||||
* or deadline_next_request() are executing. This function is called for
|
||||
* all requests, whether or not these requests complete successfully.
|
||||
*
|
||||
* For a zoned block device, __dd_dispatch_request() may have stopped
|
||||
* dispatching requests if all the queued requests are write requests directed
|
||||
* at zones that are already locked due to on-going write requests. To ensure
|
||||
* write request dispatch progress in this case, mark the queue as needing a
|
||||
* restart to ensure that the queue is run again after completion of the
|
||||
* request and zones being unlocked.
|
||||
*/
|
||||
static void dd_finish_request(struct request *rq)
|
||||
{
|
||||
|
@ -570,6 +567,12 @@ static void dd_finish_request(struct request *rq)
|
|||
|
||||
spin_lock_irqsave(&dd->zone_lock, flags);
|
||||
blk_req_zone_write_unlock(rq);
|
||||
if (!list_empty(&dd->fifo_list[WRITE])) {
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
|
||||
hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
|
||||
blk_mq_sched_mark_restart_hctx(hctx);
|
||||
}
|
||||
spin_unlock_irqrestore(&dd->zone_lock, flags);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ config REGMAP_IRQ
|
|||
|
||||
config REGMAP_SOUNDWIRE
|
||||
tristate
|
||||
depends on SOUNDWIRE_BUS
|
||||
depends on SOUNDWIRE
|
||||
|
||||
config REGMAP_SCCB
|
||||
tristate
|
||||
|
|
|
@ -2596,7 +2596,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
|
||||
WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
|
||||
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -221,6 +221,9 @@ struct smi_info {
|
|||
*/
|
||||
bool irq_enable_broken;
|
||||
|
||||
/* Is the driver in maintenance mode? */
|
||||
bool in_maintenance_mode;
|
||||
|
||||
/*
|
||||
* Did we get an attention that we did not handle?
|
||||
*/
|
||||
|
@ -1013,11 +1016,20 @@ static int ipmi_thread(void *data)
|
|||
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
|
||||
busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
|
||||
&busy_until);
|
||||
if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
|
||||
if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
|
||||
; /* do nothing */
|
||||
else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
|
||||
schedule();
|
||||
else if (smi_result == SI_SM_IDLE) {
|
||||
} else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
|
||||
/*
|
||||
* In maintenance mode we run as fast as
|
||||
* possible to allow firmware updates to
|
||||
* complete as fast as possible, but normally
|
||||
* don't bang on the scheduler.
|
||||
*/
|
||||
if (smi_info->in_maintenance_mode)
|
||||
schedule();
|
||||
else
|
||||
usleep_range(100, 200);
|
||||
} else if (smi_result == SI_SM_IDLE) {
|
||||
if (atomic_read(&smi_info->need_watch)) {
|
||||
schedule_timeout_interruptible(100);
|
||||
} else {
|
||||
|
@ -1025,8 +1037,9 @@ static int ipmi_thread(void *data)
|
|||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule();
|
||||
}
|
||||
} else
|
||||
} else {
|
||||
schedule_timeout_interruptible(1);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -1201,6 +1214,7 @@ static void set_maintenance_mode(void *send_info, bool enable)
|
|||
|
||||
if (!enable)
|
||||
atomic_set(&smi_info->req_events, 0);
|
||||
smi_info->in_maintenance_mode = enable;
|
||||
}
|
||||
|
||||
static void shutdown_smi(void *send_info);
|
||||
|
|
|
@ -187,12 +187,13 @@ static int tpm_class_shutdown(struct device *dev)
|
|||
{
|
||||
struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
|
||||
|
||||
down_write(&chip->ops_sem);
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
|
||||
down_write(&chip->ops_sem);
|
||||
tpm2_shutdown(chip, TPM2_SU_CLEAR);
|
||||
chip->ops = NULL;
|
||||
up_write(&chip->ops_sem);
|
||||
}
|
||||
chip->ops = NULL;
|
||||
up_write(&chip->ops_sem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -39,7 +39,6 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
|
|||
{
|
||||
struct tpm_buf tpm_buf;
|
||||
struct tpm_readpubek_out *out;
|
||||
ssize_t rc;
|
||||
int i;
|
||||
char *str = buf;
|
||||
struct tpm_chip *chip = to_tpm_chip(dev);
|
||||
|
@ -47,19 +46,18 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
memset(&anti_replay, 0, sizeof(anti_replay));
|
||||
|
||||
rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (tpm_try_get_ops(chip))
|
||||
return 0;
|
||||
|
||||
if (tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK))
|
||||
goto out_ops;
|
||||
|
||||
tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay));
|
||||
|
||||
rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
|
||||
if (tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE,
|
||||
READ_PUBEK_RESULT_MIN_BODY_SIZE, 0,
|
||||
"attempting to read the PUBEK");
|
||||
if (rc) {
|
||||
tpm_buf_destroy(&tpm_buf);
|
||||
return 0;
|
||||
}
|
||||
"attempting to read the PUBEK"))
|
||||
goto out_buf;
|
||||
|
||||
out = (struct tpm_readpubek_out *)&tpm_buf.data[10];
|
||||
str +=
|
||||
|
@ -90,9 +88,11 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
|
|||
str += sprintf(str, "\n");
|
||||
}
|
||||
|
||||
rc = str - buf;
|
||||
out_buf:
|
||||
tpm_buf_destroy(&tpm_buf);
|
||||
return rc;
|
||||
out_ops:
|
||||
tpm_put_ops(chip);
|
||||
return str - buf;
|
||||
}
|
||||
static DEVICE_ATTR_RO(pubek);
|
||||
|
||||
|
@ -106,12 +106,16 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
|
|||
char *str = buf;
|
||||
struct tpm_chip *chip = to_tpm_chip(dev);
|
||||
|
||||
rc = tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
|
||||
"attempting to determine the number of PCRS",
|
||||
sizeof(cap.num_pcrs));
|
||||
if (rc)
|
||||
if (tpm_try_get_ops(chip))
|
||||
return 0;
|
||||
|
||||
if (tpm_getcap(chip, TPM_CAP_PROP_PCR, &cap,
|
||||
"attempting to determine the number of PCRS",
|
||||
sizeof(cap.num_pcrs))) {
|
||||
tpm_put_ops(chip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
num_pcrs = be32_to_cpu(cap.num_pcrs);
|
||||
for (i = 0; i < num_pcrs; i++) {
|
||||
rc = tpm_pcr_read_dev(chip, i, digest);
|
||||
|
@ -122,6 +126,7 @@ static ssize_t pcrs_show(struct device *dev, struct device_attribute *attr,
|
|||
str += sprintf(str, "%02X ", digest[j]);
|
||||
str += sprintf(str, "\n");
|
||||
}
|
||||
tpm_put_ops(chip);
|
||||
return str - buf;
|
||||
}
|
||||
static DEVICE_ATTR_RO(pcrs);
|
||||
|
@ -129,16 +134,21 @@ static DEVICE_ATTR_RO(pcrs);
|
|||
static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct tpm_chip *chip = to_tpm_chip(dev);
|
||||
ssize_t rc = 0;
|
||||
cap_t cap;
|
||||
ssize_t rc;
|
||||
|
||||
rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
|
||||
"attempting to determine the permanent enabled state",
|
||||
sizeof(cap.perm_flags));
|
||||
if (rc)
|
||||
if (tpm_try_get_ops(chip))
|
||||
return 0;
|
||||
|
||||
if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
|
||||
"attempting to determine the permanent enabled state",
|
||||
sizeof(cap.perm_flags)))
|
||||
goto out_ops;
|
||||
|
||||
rc = sprintf(buf, "%d\n", !cap.perm_flags.disable);
|
||||
out_ops:
|
||||
tpm_put_ops(chip);
|
||||
return rc;
|
||||
}
|
||||
static DEVICE_ATTR_RO(enabled);
|
||||
|
@ -146,16 +156,21 @@ static DEVICE_ATTR_RO(enabled);
|
|||
static ssize_t active_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct tpm_chip *chip = to_tpm_chip(dev);
|
||||
ssize_t rc = 0;
|
||||
cap_t cap;
|
||||
ssize_t rc;
|
||||
|
||||
rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_PERM, &cap,
|
||||
"attempting to determine the permanent active state",
|
||||
sizeof(cap.perm_flags));
|
||||
if (rc)
|
||||
if (tpm_try_get_ops(chip))
|
||||
return 0;
|
||||
|
||||
if (tpm_getcap(chip, TPM_CAP_FLAG_PERM, &cap,
|
||||
"attempting to determine the permanent active state",
|
||||
sizeof(cap.perm_flags)))
|
||||
goto out_ops;
|
||||
|
||||
rc = sprintf(buf, "%d\n", !cap.perm_flags.deactivated);
|
||||
out_ops:
|
||||
tpm_put_ops(chip);
|
||||
return rc;
|
||||
}
|
||||
static DEVICE_ATTR_RO(active);
|
||||
|
@ -163,16 +178,21 @@ static DEVICE_ATTR_RO(active);
|
|||
static ssize_t owned_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct tpm_chip *chip = to_tpm_chip(dev);
|
||||
ssize_t rc = 0;
|
||||
cap_t cap;
|
||||
ssize_t rc;
|
||||
|
||||
rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
|
||||
"attempting to determine the owner state",
|
||||
sizeof(cap.owned));
|
||||
if (rc)
|
||||
if (tpm_try_get_ops(chip))
|
||||
return 0;
|
||||
|
||||
if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_PROP_OWNER, &cap,
|
||||
"attempting to determine the owner state",
|
||||
sizeof(cap.owned)))
|
||||
goto out_ops;
|
||||
|
||||
rc = sprintf(buf, "%d\n", cap.owned);
|
||||
out_ops:
|
||||
tpm_put_ops(chip);
|
||||
return rc;
|
||||
}
|
||||
static DEVICE_ATTR_RO(owned);
|
||||
|
@ -180,16 +200,21 @@ static DEVICE_ATTR_RO(owned);
|
|||
static ssize_t temp_deactivated_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct tpm_chip *chip = to_tpm_chip(dev);
|
||||
ssize_t rc = 0;
|
||||
cap_t cap;
|
||||
ssize_t rc;
|
||||
|
||||
rc = tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
|
||||
"attempting to determine the temporary state",
|
||||
sizeof(cap.stclear_flags));
|
||||
if (rc)
|
||||
if (tpm_try_get_ops(chip))
|
||||
return 0;
|
||||
|
||||
if (tpm_getcap(to_tpm_chip(dev), TPM_CAP_FLAG_VOL, &cap,
|
||||
"attempting to determine the temporary state",
|
||||
sizeof(cap.stclear_flags)))
|
||||
goto out_ops;
|
||||
|
||||
rc = sprintf(buf, "%d\n", cap.stclear_flags.deactivated);
|
||||
out_ops:
|
||||
tpm_put_ops(chip);
|
||||
return rc;
|
||||
}
|
||||
static DEVICE_ATTR_RO(temp_deactivated);
|
||||
|
@ -198,15 +223,18 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
|
|||
char *buf)
|
||||
{
|
||||
struct tpm_chip *chip = to_tpm_chip(dev);
|
||||
cap_t cap;
|
||||
ssize_t rc;
|
||||
ssize_t rc = 0;
|
||||
char *str = buf;
|
||||
cap_t cap;
|
||||
|
||||
rc = tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
|
||||
"attempting to determine the manufacturer",
|
||||
sizeof(cap.manufacturer_id));
|
||||
if (rc)
|
||||
if (tpm_try_get_ops(chip))
|
||||
return 0;
|
||||
|
||||
if (tpm_getcap(chip, TPM_CAP_PROP_MANUFACTURER, &cap,
|
||||
"attempting to determine the manufacturer",
|
||||
sizeof(cap.manufacturer_id)))
|
||||
goto out_ops;
|
||||
|
||||
str += sprintf(str, "Manufacturer: 0x%x\n",
|
||||
be32_to_cpu(cap.manufacturer_id));
|
||||
|
||||
|
@ -223,20 +251,22 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
|
|||
cap.tpm_version_1_2.revMinor);
|
||||
} else {
|
||||
/* Otherwise just use TPM_STRUCT_VER */
|
||||
rc = tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
|
||||
"attempting to determine the 1.1 version",
|
||||
sizeof(cap.tpm_version));
|
||||
if (rc)
|
||||
return 0;
|
||||
if (tpm_getcap(chip, TPM_CAP_VERSION_1_1, &cap,
|
||||
"attempting to determine the 1.1 version",
|
||||
sizeof(cap.tpm_version)))
|
||||
goto out_ops;
|
||||
|
||||
str += sprintf(str,
|
||||
"TCG version: %d.%d\nFirmware version: %d.%d\n",
|
||||
cap.tpm_version.Major,
|
||||
cap.tpm_version.Minor,
|
||||
cap.tpm_version.revMajor,
|
||||
cap.tpm_version.revMinor);
|
||||
}
|
||||
|
||||
return str - buf;
|
||||
}
|
||||
rc = str - buf;
|
||||
out_ops:
|
||||
tpm_put_ops(chip);
|
||||
return rc;
|
||||
}
|
||||
static DEVICE_ATTR_RO(caps);
|
||||
|
||||
|
@ -244,10 +274,12 @@ static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
|
|||
const char *buf, size_t count)
|
||||
{
|
||||
struct tpm_chip *chip = to_tpm_chip(dev);
|
||||
if (chip == NULL)
|
||||
|
||||
if (tpm_try_get_ops(chip))
|
||||
return 0;
|
||||
|
||||
chip->ops->cancel(chip);
|
||||
tpm_put_ops(chip);
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR_WO(cancel);
|
||||
|
|
|
@ -67,16 +67,17 @@ int owl_clk_probe(struct device *dev, struct clk_hw_onecell_data *hw_clks)
|
|||
struct clk_hw *hw;
|
||||
|
||||
for (i = 0; i < hw_clks->num; i++) {
|
||||
const char *name;
|
||||
|
||||
hw = hw_clks->hws[i];
|
||||
|
||||
if (IS_ERR_OR_NULL(hw))
|
||||
continue;
|
||||
|
||||
name = hw->init->name;
|
||||
ret = devm_clk_hw_register(dev, hw);
|
||||
if (ret) {
|
||||
dev_err(dev, "Couldn't register clock %d - %s\n",
|
||||
i, hw->init->name);
|
||||
i, name);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,10 @@
|
|||
|
||||
#define MOR_KEY_MASK (0xff << 16)
|
||||
|
||||
#define clk_main_parent_select(s) (((s) & \
|
||||
(AT91_PMC_MOSCEN | \
|
||||
AT91_PMC_OSCBYPASS)) ? 1 : 0)
|
||||
|
||||
struct clk_main_osc {
|
||||
struct clk_hw hw;
|
||||
struct regmap *regmap;
|
||||
|
@ -119,7 +123,7 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw)
|
|||
|
||||
regmap_read(regmap, AT91_PMC_SR, &status);
|
||||
|
||||
return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN);
|
||||
return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp);
|
||||
}
|
||||
|
||||
static const struct clk_ops main_osc_ops = {
|
||||
|
@ -530,7 +534,7 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
|
|||
|
||||
regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
|
||||
|
||||
return status & AT91_PMC_MOSCEN ? 1 : 0;
|
||||
return clk_main_parent_select(status);
|
||||
}
|
||||
|
||||
static const struct clk_ops sam9x5_main_ops = {
|
||||
|
@ -572,7 +576,7 @@ at91_clk_register_sam9x5_main(struct regmap *regmap,
|
|||
clkmain->hw.init = &init;
|
||||
clkmain->regmap = regmap;
|
||||
regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status);
|
||||
clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0;
|
||||
clkmain->parent = clk_main_parent_select(status);
|
||||
|
||||
hw = &clkmain->hw;
|
||||
ret = clk_hw_register(NULL, &clkmain->hw);
|
||||
|
|
|
@ -610,7 +610,7 @@ static const struct clockgen_chipinfo chipinfo[] = {
|
|||
.guts_compat = "fsl,qoriq-device-config-1.0",
|
||||
.init_periph = p5020_init_periph,
|
||||
.cmux_groups = {
|
||||
&p2041_cmux_grp1, &p2041_cmux_grp2
|
||||
&p5020_cmux_grp1, &p5020_cmux_grp2
|
||||
},
|
||||
.cmux_to_group = {
|
||||
0, 1, -1
|
||||
|
|
|
@ -647,7 +647,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
|
|||
.name = "gcc_sdcc2_apps_clk_src",
|
||||
.parent_names = gcc_parent_names_10,
|
||||
.num_parents = 5,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_floor_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -671,7 +671,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
|
|||
.name = "gcc_sdcc4_apps_clk_src",
|
||||
.parent_names = gcc_parent_names_0,
|
||||
.num_parents = 4,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_floor_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -341,7 +341,8 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
|
|||
return;
|
||||
|
||||
pd->name = np->name;
|
||||
pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
|
||||
pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
|
||||
GENPD_FLAG_ACTIVE_WAKEUP;
|
||||
pd->attach_dev = cpg_mstp_attach_dev;
|
||||
pd->detach_dev = cpg_mstp_detach_dev;
|
||||
pm_genpd_init(pd, &pm_domain_always_on_gov, false);
|
||||
|
|
|
@ -514,7 +514,8 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
|
|||
|
||||
genpd = &pd->genpd;
|
||||
genpd->name = np->name;
|
||||
genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
|
||||
genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
|
||||
GENPD_FLAG_ACTIVE_WAKEUP;
|
||||
genpd->attach_dev = cpg_mssr_attach_dev;
|
||||
genpd->detach_dev = cpg_mssr_detach_dev;
|
||||
pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
|
||||
|
|
|
@ -298,9 +298,10 @@ static u8 dmn_clk_get_parent(struct clk_hw *hw)
|
|||
{
|
||||
struct clk_dmn *clk = to_dmnclk(hw);
|
||||
u32 cfg = clkc_readl(clk->regofs);
|
||||
const char *name = clk_hw_get_name(hw);
|
||||
|
||||
/* parent of io domain can only be pll3 */
|
||||
if (strcmp(hw->init->name, "io") == 0)
|
||||
if (strcmp(name, "io") == 0)
|
||||
return 4;
|
||||
|
||||
WARN_ON((cfg & (BIT(3) - 1)) > 4);
|
||||
|
@ -312,9 +313,10 @@ static int dmn_clk_set_parent(struct clk_hw *hw, u8 parent)
|
|||
{
|
||||
struct clk_dmn *clk = to_dmnclk(hw);
|
||||
u32 cfg = clkc_readl(clk->regofs);
|
||||
const char *name = clk_hw_get_name(hw);
|
||||
|
||||
/* parent of io domain can only be pll3 */
|
||||
if (strcmp(hw->init->name, "io") == 0)
|
||||
if (strcmp(name, "io") == 0)
|
||||
return -EINVAL;
|
||||
|
||||
cfg &= ~(BIT(3) - 1);
|
||||
|
@ -354,7 +356,8 @@ static long dmn_clk_round_rate(struct clk_hw *hw, unsigned long rate,
|
|||
{
|
||||
unsigned long fin;
|
||||
unsigned ratio, wait, hold;
|
||||
unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
|
||||
const char *name = clk_hw_get_name(hw);
|
||||
unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
|
||||
|
||||
fin = *parent_rate;
|
||||
ratio = fin / rate;
|
||||
|
@ -376,7 +379,8 @@ static int dmn_clk_set_rate(struct clk_hw *hw, unsigned long rate,
|
|||
struct clk_dmn *clk = to_dmnclk(hw);
|
||||
unsigned long fin;
|
||||
unsigned ratio, wait, hold, reg;
|
||||
unsigned bits = (strcmp(hw->init->name, "mem") == 0) ? 3 : 4;
|
||||
const char *name = clk_hw_get_name(hw);
|
||||
unsigned bits = (strcmp(name, "mem") == 0) ? 3 : 4;
|
||||
|
||||
fin = parent_rate;
|
||||
ratio = fin / rate;
|
||||
|
|
|
@ -71,16 +71,17 @@ int sprd_clk_probe(struct device *dev, struct clk_hw_onecell_data *clkhw)
|
|||
struct clk_hw *hw;
|
||||
|
||||
for (i = 0; i < clkhw->num; i++) {
|
||||
const char *name;
|
||||
|
||||
hw = clkhw->hws[i];
|
||||
|
||||
if (!hw)
|
||||
continue;
|
||||
|
||||
name = hw->init->name;
|
||||
ret = devm_clk_hw_register(dev, hw);
|
||||
if (ret) {
|
||||
dev_err(dev, "Couldn't register clock %d - %s\n",
|
||||
i, hw->init->name);
|
||||
i, name);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -136,6 +136,7 @@ static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll,
|
|||
k2 + refin * nint * CLK_PLL_1M;
|
||||
}
|
||||
|
||||
kfree(cfg);
|
||||
return rate;
|
||||
}
|
||||
|
||||
|
@ -222,6 +223,7 @@ static int _sprd_pll_set_rate(const struct sprd_pll *pll,
|
|||
if (!ret)
|
||||
udelay(pll->udelay);
|
||||
|
||||
kfree(cfg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -499,6 +499,9 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
|
|||
[CLK_MMC1] = &mmc1_clk.common.hw,
|
||||
[CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw,
|
||||
[CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw,
|
||||
[CLK_MMC2] = &mmc2_clk.common.hw,
|
||||
[CLK_MMC2_SAMPLE] = &mmc2_sample_clk.common.hw,
|
||||
[CLK_MMC2_OUTPUT] = &mmc2_output_clk.common.hw,
|
||||
[CLK_CE] = &ce_clk.common.hw,
|
||||
[CLK_SPI0] = &spi0_clk.common.hw,
|
||||
[CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
|
||||
|
|
|
@ -567,6 +567,7 @@ static int __init top_clocks_init(struct device_node *np)
|
|||
{
|
||||
void __iomem *reg_base;
|
||||
int i, ret;
|
||||
const char *name;
|
||||
|
||||
reg_base = of_iomap(np, 0);
|
||||
if (!reg_base) {
|
||||
|
@ -576,11 +577,10 @@ static int __init top_clocks_init(struct device_node *np)
|
|||
|
||||
for (i = 0; i < ARRAY_SIZE(zx296718_pll_clk); i++) {
|
||||
zx296718_pll_clk[i].reg_base += (uintptr_t)reg_base;
|
||||
name = zx296718_pll_clk[i].hw.init->name;
|
||||
ret = clk_hw_register(NULL, &zx296718_pll_clk[i].hw);
|
||||
if (ret) {
|
||||
pr_warn("top clk %s init error!\n",
|
||||
zx296718_pll_clk[i].hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("top clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(top_ffactor_clk); i++) {
|
||||
|
@ -588,11 +588,10 @@ static int __init top_clocks_init(struct device_node *np)
|
|||
top_hw_onecell_data.hws[top_ffactor_clk[i].id] =
|
||||
&top_ffactor_clk[i].factor.hw;
|
||||
|
||||
name = top_ffactor_clk[i].factor.hw.init->name;
|
||||
ret = clk_hw_register(NULL, &top_ffactor_clk[i].factor.hw);
|
||||
if (ret) {
|
||||
pr_warn("top clk %s init error!\n",
|
||||
top_ffactor_clk[i].factor.hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("top clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(top_mux_clk); i++) {
|
||||
|
@ -601,11 +600,10 @@ static int __init top_clocks_init(struct device_node *np)
|
|||
&top_mux_clk[i].mux.hw;
|
||||
|
||||
top_mux_clk[i].mux.reg += (uintptr_t)reg_base;
|
||||
name = top_mux_clk[i].mux.hw.init->name;
|
||||
ret = clk_hw_register(NULL, &top_mux_clk[i].mux.hw);
|
||||
if (ret) {
|
||||
pr_warn("top clk %s init error!\n",
|
||||
top_mux_clk[i].mux.hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("top clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(top_gate_clk); i++) {
|
||||
|
@ -614,11 +612,10 @@ static int __init top_clocks_init(struct device_node *np)
|
|||
&top_gate_clk[i].gate.hw;
|
||||
|
||||
top_gate_clk[i].gate.reg += (uintptr_t)reg_base;
|
||||
name = top_gate_clk[i].gate.hw.init->name;
|
||||
ret = clk_hw_register(NULL, &top_gate_clk[i].gate.hw);
|
||||
if (ret) {
|
||||
pr_warn("top clk %s init error!\n",
|
||||
top_gate_clk[i].gate.hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("top clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(top_div_clk); i++) {
|
||||
|
@ -627,11 +624,10 @@ static int __init top_clocks_init(struct device_node *np)
|
|||
&top_div_clk[i].div.hw;
|
||||
|
||||
top_div_clk[i].div.reg += (uintptr_t)reg_base;
|
||||
name = top_div_clk[i].div.hw.init->name;
|
||||
ret = clk_hw_register(NULL, &top_div_clk[i].div.hw);
|
||||
if (ret) {
|
||||
pr_warn("top clk %s init error!\n",
|
||||
top_div_clk[i].div.hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("top clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
|
||||
|
@ -757,6 +753,7 @@ static int __init lsp0_clocks_init(struct device_node *np)
|
|||
{
|
||||
void __iomem *reg_base;
|
||||
int i, ret;
|
||||
const char *name;
|
||||
|
||||
reg_base = of_iomap(np, 0);
|
||||
if (!reg_base) {
|
||||
|
@ -770,11 +767,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
|
|||
&lsp0_mux_clk[i].mux.hw;
|
||||
|
||||
lsp0_mux_clk[i].mux.reg += (uintptr_t)reg_base;
|
||||
name = lsp0_mux_clk[i].mux.hw.init->name;
|
||||
ret = clk_hw_register(NULL, &lsp0_mux_clk[i].mux.hw);
|
||||
if (ret) {
|
||||
pr_warn("lsp0 clk %s init error!\n",
|
||||
lsp0_mux_clk[i].mux.hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("lsp0 clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(lsp0_gate_clk); i++) {
|
||||
|
@ -783,11 +779,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
|
|||
&lsp0_gate_clk[i].gate.hw;
|
||||
|
||||
lsp0_gate_clk[i].gate.reg += (uintptr_t)reg_base;
|
||||
name = lsp0_gate_clk[i].gate.hw.init->name;
|
||||
ret = clk_hw_register(NULL, &lsp0_gate_clk[i].gate.hw);
|
||||
if (ret) {
|
||||
pr_warn("lsp0 clk %s init error!\n",
|
||||
lsp0_gate_clk[i].gate.hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("lsp0 clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(lsp0_div_clk); i++) {
|
||||
|
@ -796,11 +791,10 @@ static int __init lsp0_clocks_init(struct device_node *np)
|
|||
&lsp0_div_clk[i].div.hw;
|
||||
|
||||
lsp0_div_clk[i].div.reg += (uintptr_t)reg_base;
|
||||
name = lsp0_div_clk[i].div.hw.init->name;
|
||||
ret = clk_hw_register(NULL, &lsp0_div_clk[i].div.hw);
|
||||
if (ret) {
|
||||
pr_warn("lsp0 clk %s init error!\n",
|
||||
lsp0_div_clk[i].div.hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("lsp0 clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
|
||||
|
@ -865,6 +859,7 @@ static int __init lsp1_clocks_init(struct device_node *np)
|
|||
{
|
||||
void __iomem *reg_base;
|
||||
int i, ret;
|
||||
const char *name;
|
||||
|
||||
reg_base = of_iomap(np, 0);
|
||||
if (!reg_base) {
|
||||
|
@ -878,11 +873,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
|
|||
&lsp0_mux_clk[i].mux.hw;
|
||||
|
||||
lsp1_mux_clk[i].mux.reg += (uintptr_t)reg_base;
|
||||
name = lsp1_mux_clk[i].mux.hw.init->name;
|
||||
ret = clk_hw_register(NULL, &lsp1_mux_clk[i].mux.hw);
|
||||
if (ret) {
|
||||
pr_warn("lsp1 clk %s init error!\n",
|
||||
lsp1_mux_clk[i].mux.hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("lsp1 clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(lsp1_gate_clk); i++) {
|
||||
|
@ -891,11 +885,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
|
|||
&lsp1_gate_clk[i].gate.hw;
|
||||
|
||||
lsp1_gate_clk[i].gate.reg += (uintptr_t)reg_base;
|
||||
name = lsp1_gate_clk[i].gate.hw.init->name;
|
||||
ret = clk_hw_register(NULL, &lsp1_gate_clk[i].gate.hw);
|
||||
if (ret) {
|
||||
pr_warn("lsp1 clk %s init error!\n",
|
||||
lsp1_gate_clk[i].gate.hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("lsp1 clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(lsp1_div_clk); i++) {
|
||||
|
@ -904,11 +897,10 @@ static int __init lsp1_clocks_init(struct device_node *np)
|
|||
&lsp1_div_clk[i].div.hw;
|
||||
|
||||
lsp1_div_clk[i].div.reg += (uintptr_t)reg_base;
|
||||
name = lsp1_div_clk[i].div.hw.init->name;
|
||||
ret = clk_hw_register(NULL, &lsp1_div_clk[i].div.hw);
|
||||
if (ret) {
|
||||
pr_warn("lsp1 clk %s init error!\n",
|
||||
lsp1_div_clk[i].div.hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("lsp1 clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
|
||||
|
@ -982,6 +974,7 @@ static int __init audio_clocks_init(struct device_node *np)
|
|||
{
|
||||
void __iomem *reg_base;
|
||||
int i, ret;
|
||||
const char *name;
|
||||
|
||||
reg_base = of_iomap(np, 0);
|
||||
if (!reg_base) {
|
||||
|
@ -995,11 +988,10 @@ static int __init audio_clocks_init(struct device_node *np)
|
|||
&audio_mux_clk[i].mux.hw;
|
||||
|
||||
audio_mux_clk[i].mux.reg += (uintptr_t)reg_base;
|
||||
name = audio_mux_clk[i].mux.hw.init->name;
|
||||
ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw);
|
||||
if (ret) {
|
||||
pr_warn("audio clk %s init error!\n",
|
||||
audio_mux_clk[i].mux.hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("audio clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) {
|
||||
|
@ -1008,11 +1000,10 @@ static int __init audio_clocks_init(struct device_node *np)
|
|||
&audio_adiv_clk[i].hw;
|
||||
|
||||
audio_adiv_clk[i].reg_base += (uintptr_t)reg_base;
|
||||
name = audio_adiv_clk[i].hw.init->name;
|
||||
ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw);
|
||||
if (ret) {
|
||||
pr_warn("audio clk %s init error!\n",
|
||||
audio_adiv_clk[i].hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("audio clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) {
|
||||
|
@ -1021,11 +1012,10 @@ static int __init audio_clocks_init(struct device_node *np)
|
|||
&audio_div_clk[i].div.hw;
|
||||
|
||||
audio_div_clk[i].div.reg += (uintptr_t)reg_base;
|
||||
name = audio_div_clk[i].div.hw.init->name;
|
||||
ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw);
|
||||
if (ret) {
|
||||
pr_warn("audio clk %s init error!\n",
|
||||
audio_div_clk[i].div.hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("audio clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) {
|
||||
|
@ -1034,11 +1024,10 @@ static int __init audio_clocks_init(struct device_node *np)
|
|||
&audio_gate_clk[i].gate.hw;
|
||||
|
||||
audio_gate_clk[i].gate.reg += (uintptr_t)reg_base;
|
||||
name = audio_gate_clk[i].gate.hw.init->name;
|
||||
ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw);
|
||||
if (ret) {
|
||||
pr_warn("audio clk %s init error!\n",
|
||||
audio_gate_clk[i].gate.hw.init->name);
|
||||
}
|
||||
if (ret)
|
||||
pr_warn("audio clk %s init error!\n", name);
|
||||
}
|
||||
|
||||
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
|
||||
|
|
|
@ -215,17 +215,18 @@ static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
|
|||
dma_addr_t psec_sgl, struct sec_dev_info *info)
|
||||
{
|
||||
struct sec_hw_sgl *sgl_current, *sgl_next;
|
||||
dma_addr_t sgl_next_dma;
|
||||
|
||||
if (!hw_sgl)
|
||||
return;
|
||||
sgl_current = hw_sgl;
|
||||
while (sgl_current->next) {
|
||||
while (sgl_current) {
|
||||
sgl_next = sgl_current->next;
|
||||
dma_pool_free(info->hw_sgl_pool, sgl_current,
|
||||
sgl_current->next_sgl);
|
||||
sgl_next_dma = sgl_current->next_sgl;
|
||||
|
||||
dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
|
||||
|
||||
sgl_current = sgl_next;
|
||||
psec_sgl = sgl_next_dma;
|
||||
}
|
||||
dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl);
|
||||
}
|
||||
|
||||
static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
|
||||
|
|
|
@ -141,17 +141,14 @@ static void timeline_fence_release(struct dma_fence *fence)
|
|||
{
|
||||
struct sync_pt *pt = dma_fence_to_sync_pt(fence);
|
||||
struct sync_timeline *parent = dma_fence_parent(fence);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
if (!list_empty(&pt->link)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
if (!list_empty(&pt->link)) {
|
||||
list_del(&pt->link);
|
||||
rb_erase(&pt->node, &parent->pt_tree);
|
||||
}
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
list_del(&pt->link);
|
||||
rb_erase(&pt->node, &parent->pt_tree);
|
||||
}
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
|
||||
sync_timeline_put(parent);
|
||||
dma_fence_free(fence);
|
||||
|
@ -274,7 +271,8 @@ static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
|
|||
p = &parent->rb_left;
|
||||
} else {
|
||||
if (dma_fence_get_rcu(&other->base)) {
|
||||
dma_fence_put(&pt->base);
|
||||
sync_timeline_put(obj);
|
||||
kfree(pt);
|
||||
pt = other;
|
||||
goto unlock;
|
||||
}
|
||||
|
|
|
@ -1813,7 +1813,7 @@ static void si_program_aspm(struct amdgpu_device *adev)
|
|||
if (orig != data)
|
||||
si_pif_phy1_wreg(adev,PB1_PIF_PWRDOWN_1, data);
|
||||
|
||||
if ((adev->family != CHIP_OLAND) && (adev->family != CHIP_HAINAN)) {
|
||||
if ((adev->asic_type != CHIP_OLAND) && (adev->asic_type != CHIP_HAINAN)) {
|
||||
orig = data = si_pif_phy0_rreg(adev,PB0_PIF_PWRDOWN_0);
|
||||
data &= ~PLL_RAMP_UP_TIME_0_MASK;
|
||||
if (orig != data)
|
||||
|
@ -1862,14 +1862,14 @@ static void si_program_aspm(struct amdgpu_device *adev)
|
|||
|
||||
orig = data = si_pif_phy0_rreg(adev,PB0_PIF_CNTL);
|
||||
data &= ~LS2_EXIT_TIME_MASK;
|
||||
if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
|
||||
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
|
||||
data |= LS2_EXIT_TIME(5);
|
||||
if (orig != data)
|
||||
si_pif_phy0_wreg(adev,PB0_PIF_CNTL, data);
|
||||
|
||||
orig = data = si_pif_phy1_rreg(adev,PB1_PIF_CNTL);
|
||||
data &= ~LS2_EXIT_TIME_MASK;
|
||||
if ((adev->family == CHIP_OLAND) || (adev->family == CHIP_HAINAN))
|
||||
if ((adev->asic_type == CHIP_OLAND) || (adev->asic_type == CHIP_HAINAN))
|
||||
data |= LS2_EXIT_TIME(5);
|
||||
if (orig != data)
|
||||
si_pif_phy1_wreg(adev,PB1_PIF_CNTL, data);
|
||||
|
|
|
@ -1585,6 +1585,14 @@ void dc_set_power_state(
|
|||
dc_resource_state_construct(dc, dc->current_state);
|
||||
|
||||
dc->hwss.init_hw(dc);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
|
||||
if (dc->hwss.init_sys_ctx != NULL &&
|
||||
dc->vm_pa_config.valid) {
|
||||
dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
|
||||
}
|
||||
#endif
|
||||
|
||||
break;
|
||||
default:
|
||||
|
||||
|
|
|
@ -229,12 +229,10 @@ bool resource_construct(
|
|||
DC_ERR("DC: failed to create audio!\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!aud->funcs->endpoint_valid(aud)) {
|
||||
aud->funcs->destroy(&aud);
|
||||
break;
|
||||
}
|
||||
|
||||
pool->audios[i] = aud;
|
||||
pool->audio_count++;
|
||||
}
|
||||
|
@ -1703,24 +1701,25 @@ static struct audio *find_first_free_audio(
|
|||
const struct resource_pool *pool,
|
||||
enum engine_id id)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < pool->audio_count; i++) {
|
||||
int i, available_audio_count;
|
||||
|
||||
available_audio_count = pool->audio_count;
|
||||
|
||||
for (i = 0; i < available_audio_count; i++) {
|
||||
if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
|
||||
/*we have enough audio endpoint, find the matching inst*/
|
||||
if (id != i)
|
||||
continue;
|
||||
|
||||
return pool->audios[i];
|
||||
}
|
||||
}
|
||||
|
||||
/* use engine id to find free audio */
|
||||
if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
|
||||
/* use engine id to find free audio */
|
||||
if ((id < available_audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
|
||||
return pool->audios[id];
|
||||
}
|
||||
|
||||
/*not found the matching one, first come first serve*/
|
||||
for (i = 0; i < pool->audio_count; i++) {
|
||||
for (i = 0; i < available_audio_count; i++) {
|
||||
if (res_ctx->is_audio_acquired[i] == false) {
|
||||
return pool->audios[i];
|
||||
}
|
||||
|
|
|
@ -611,6 +611,8 @@ void dce_aud_az_configure(
|
|||
|
||||
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
|
||||
value);
|
||||
DC_LOG_HW_AUDIO("\n\tAUDIO:az_configure: index: %u data, 0x%x, displayName %s: \n",
|
||||
audio->inst, value, audio_info->display_name);
|
||||
|
||||
/*
|
||||
*write the port ID:
|
||||
|
@ -922,7 +924,6 @@ static const struct audio_funcs funcs = {
|
|||
.az_configure = dce_aud_az_configure,
|
||||
.destroy = dce_aud_destroy,
|
||||
};
|
||||
|
||||
void dce_aud_destroy(struct audio **audio)
|
||||
{
|
||||
struct dce_audio *aud = DCE_AUD(*audio);
|
||||
|
@ -953,7 +954,6 @@ struct audio *dce_audio_create(
|
|||
audio->regs = reg;
|
||||
audio->shifts = shifts;
|
||||
audio->masks = masks;
|
||||
|
||||
return &audio->base;
|
||||
}
|
||||
|
||||
|
|
|
@ -292,9 +292,10 @@ bool cm_helper_translate_curve_to_hw_format(
|
|||
seg_distr[7] = 4;
|
||||
seg_distr[8] = 4;
|
||||
seg_distr[9] = 4;
|
||||
seg_distr[10] = 1;
|
||||
|
||||
region_start = -10;
|
||||
region_end = 0;
|
||||
region_end = 1;
|
||||
}
|
||||
|
||||
for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++)
|
||||
|
|
|
@ -1040,16 +1040,17 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Check whether panel supports fast training */
|
||||
ret = analogix_dp_fast_link_train_detection(dp);
|
||||
if (ret)
|
||||
dp->psr_enable = false;
|
||||
|
||||
if (dp->psr_enable) {
|
||||
ret = analogix_dp_enable_sink_psr(dp);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Check whether panel supports fast training */
|
||||
ret = analogix_dp_fast_link_train_detection(dp);
|
||||
if (ret)
|
||||
dp->psr_enable = false;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -302,7 +302,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
|
|||
struct drm_dp_aux_msg *msg)
|
||||
{
|
||||
struct tc_data *tc = aux_to_tc(aux);
|
||||
size_t size = min_t(size_t, 8, msg->size);
|
||||
size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size);
|
||||
u8 request = msg->request & ~DP_AUX_I2C_MOT;
|
||||
u8 *buf = msg->buffer;
|
||||
u32 tmp = 0;
|
||||
|
|
|
@ -96,6 +96,8 @@ nvbios_volt_parse(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
|
|||
info->min = min(info->base,
|
||||
info->base + info->step * info->vidmask);
|
||||
info->max = nvbios_rd32(bios, volt + 0x0e);
|
||||
if (!info->max)
|
||||
info->max = max(info->base, info->base + info->step * info->vidmask);
|
||||
break;
|
||||
case 0x50:
|
||||
info->min = nvbios_rd32(bios, volt + 0x0a);
|
||||
|
|
|
@ -400,7 +400,13 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
|
|||
|
||||
/* Look up the DSI host. It needs to probe before we do. */
|
||||
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
|
||||
if (!endpoint)
|
||||
return -ENODEV;
|
||||
|
||||
dsi_host_node = of_graph_get_remote_port_parent(endpoint);
|
||||
if (!dsi_host_node)
|
||||
goto error;
|
||||
|
||||
host = of_find_mipi_dsi_host_by_node(dsi_host_node);
|
||||
of_node_put(dsi_host_node);
|
||||
if (!host) {
|
||||
|
@ -409,6 +415,9 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
|
|||
}
|
||||
|
||||
info.node = of_graph_get_remote_port(endpoint);
|
||||
if (!info.node)
|
||||
goto error;
|
||||
|
||||
of_node_put(endpoint);
|
||||
|
||||
ts->dsi = mipi_dsi_device_register_full(host, &info);
|
||||
|
@ -429,6 +438,10 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
|
|||
return ret;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
of_node_put(endpoint);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int rpi_touchscreen_remove(struct i2c_client *i2c)
|
||||
|
|
|
@ -689,9 +689,9 @@ static const struct panel_desc auo_g133han01 = {
|
|||
static const struct display_timing auo_g185han01_timings = {
|
||||
.pixelclock = { 120000000, 144000000, 175000000 },
|
||||
.hactive = { 1920, 1920, 1920 },
|
||||
.hfront_porch = { 18, 60, 74 },
|
||||
.hback_porch = { 12, 44, 54 },
|
||||
.hsync_len = { 10, 24, 32 },
|
||||
.hfront_porch = { 36, 120, 148 },
|
||||
.hback_porch = { 24, 88, 108 },
|
||||
.hsync_len = { 20, 48, 64 },
|
||||
.vactive = { 1080, 1080, 1080 },
|
||||
.vfront_porch = { 6, 10, 40 },
|
||||
.vback_porch = { 2, 5, 20 },
|
||||
|
|
|
@ -751,7 +751,7 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
|
|||
|
||||
radeon_encoder->output_csc = val;
|
||||
|
||||
if (connector->encoder->crtc) {
|
||||
if (connector->encoder && connector->encoder->crtc) {
|
||||
struct drm_crtc *crtc = connector->encoder->crtc;
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
|
||||
|
|
|
@ -364,11 +364,19 @@ radeon_pci_remove(struct pci_dev *pdev)
|
|||
static void
|
||||
radeon_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *ddev = pci_get_drvdata(pdev);
|
||||
|
||||
/* if we are running in a VM, make sure the device
|
||||
* torn down properly on reboot/shutdown
|
||||
*/
|
||||
if (radeon_device_is_virtual())
|
||||
radeon_pci_remove(pdev);
|
||||
|
||||
/* Some adapters need to be suspended before a
|
||||
* shutdown occurs in order to prevent an error
|
||||
* during kexec.
|
||||
*/
|
||||
radeon_suspend_kms(ddev, true, true, false);
|
||||
}
|
||||
|
||||
static int radeon_pmops_suspend(struct device *dev)
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_fb_cma_helper.h>
|
||||
#include <drm/drm_gem_cma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
|
@ -825,6 +826,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
|
|||
};
|
||||
|
||||
static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
|
||||
.prepare_fb = drm_gem_fb_prepare_fb,
|
||||
.atomic_check = ltdc_plane_atomic_check,
|
||||
.atomic_update = ltdc_plane_atomic_update,
|
||||
.atomic_disable = ltdc_plane_atomic_disable,
|
||||
|
|
|
@ -57,7 +57,6 @@ MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\")
|
|||
struct apple_sc {
|
||||
unsigned long quirks;
|
||||
unsigned int fn_on;
|
||||
DECLARE_BITMAP(pressed_fn, KEY_CNT);
|
||||
DECLARE_BITMAP(pressed_numlock, KEY_CNT);
|
||||
};
|
||||
|
||||
|
@ -184,6 +183,8 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
|
|||
{
|
||||
struct apple_sc *asc = hid_get_drvdata(hid);
|
||||
const struct apple_key_translation *trans, *table;
|
||||
bool do_translate;
|
||||
u16 code = 0;
|
||||
|
||||
if (usage->code == KEY_FN) {
|
||||
asc->fn_on = !!value;
|
||||
|
@ -192,8 +193,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
|
|||
}
|
||||
|
||||
if (fnmode) {
|
||||
int do_translate;
|
||||
|
||||
if (hid->product >= USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI &&
|
||||
hid->product <= USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS)
|
||||
table = macbookair_fn_keys;
|
||||
|
@ -205,25 +204,33 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
|
|||
trans = apple_find_translation (table, usage->code);
|
||||
|
||||
if (trans) {
|
||||
if (test_bit(usage->code, asc->pressed_fn))
|
||||
do_translate = 1;
|
||||
else if (trans->flags & APPLE_FLAG_FKEY)
|
||||
do_translate = (fnmode == 2 && asc->fn_on) ||
|
||||
(fnmode == 1 && !asc->fn_on);
|
||||
else
|
||||
do_translate = asc->fn_on;
|
||||
if (test_bit(trans->from, input->key))
|
||||
code = trans->from;
|
||||
else if (test_bit(trans->to, input->key))
|
||||
code = trans->to;
|
||||
|
||||
if (do_translate) {
|
||||
if (value)
|
||||
set_bit(usage->code, asc->pressed_fn);
|
||||
else
|
||||
clear_bit(usage->code, asc->pressed_fn);
|
||||
if (!code) {
|
||||
if (trans->flags & APPLE_FLAG_FKEY) {
|
||||
switch (fnmode) {
|
||||
case 1:
|
||||
do_translate = !asc->fn_on;
|
||||
break;
|
||||
case 2:
|
||||
do_translate = asc->fn_on;
|
||||
break;
|
||||
default:
|
||||
/* should never happen */
|
||||
do_translate = false;
|
||||
}
|
||||
} else {
|
||||
do_translate = asc->fn_on;
|
||||
}
|
||||
|
||||
input_event(input, usage->type, trans->to,
|
||||
value);
|
||||
|
||||
return 1;
|
||||
code = do_translate ? trans->to : trans->from;
|
||||
}
|
||||
|
||||
input_event(input, usage->type, code, value);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (asc->quirks & APPLE_NUMLOCK_EMULATION &&
|
||||
|
|
|
@ -91,7 +91,7 @@ static void wacom_wac_queue_flush(struct hid_device *hdev,
|
|||
}
|
||||
|
||||
static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
|
||||
struct hid_report *report, u8 *raw_data, int size)
|
||||
struct hid_report *report, u8 *raw_data, int report_size)
|
||||
{
|
||||
struct wacom *wacom = hid_get_drvdata(hdev);
|
||||
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
|
||||
|
@ -152,7 +152,8 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
|
|||
if (flush)
|
||||
wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
|
||||
else if (insert)
|
||||
wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, raw_data, size);
|
||||
wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
|
||||
raw_data, report_size);
|
||||
|
||||
return insert && !flush;
|
||||
}
|
||||
|
@ -2147,7 +2148,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
|
|||
{
|
||||
struct wacom_wac *wacom_wac = &wacom->wacom_wac;
|
||||
struct wacom_features *features = &wacom_wac->features;
|
||||
char name[WACOM_NAME_MAX];
|
||||
char name[WACOM_NAME_MAX - 20]; /* Leave some room for suffixes */
|
||||
|
||||
/* Generic devices name unspecified */
|
||||
if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
|
||||
|
|
|
@ -255,7 +255,7 @@ static int wacom_dtu_irq(struct wacom_wac *wacom)
|
|||
|
||||
static int wacom_dtus_irq(struct wacom_wac *wacom)
|
||||
{
|
||||
char *data = wacom->data;
|
||||
unsigned char *data = wacom->data;
|
||||
struct input_dev *input = wacom->pen_input;
|
||||
unsigned short prox, pressure = 0;
|
||||
|
||||
|
@ -576,7 +576,7 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
|
|||
strip2 = ((data[3] & 0x1f) << 8) | data[4];
|
||||
}
|
||||
|
||||
prox = (buttons & ~(~0 << nbuttons)) | (keys & ~(~0 << nkeys)) |
|
||||
prox = (buttons & ~(~0U << nbuttons)) | (keys & ~(~0U << nkeys)) |
|
||||
(ring1 & 0x80) | (ring2 & 0x80) | strip1 | strip2;
|
||||
|
||||
wacom_report_numbered_buttons(input, nbuttons, buttons);
|
||||
|
|
|
@ -187,6 +187,51 @@ static const struct i2c_algorithm cht_wc_i2c_adap_algo = {
|
|||
.smbus_xfer = cht_wc_i2c_adap_smbus_xfer,
|
||||
};
|
||||
|
||||
/*
|
||||
* We are an i2c-adapter which itself is part of an i2c-client. This means that
|
||||
* transfers done through us take adapter->bus_lock twice, once for our parent
|
||||
* i2c-adapter and once to take our own bus_lock. Lockdep does not like this
|
||||
* nested locking, to make lockdep happy in the case of busses with muxes, the
|
||||
* i2c-core's i2c_adapter_lock_bus function calls:
|
||||
* rt_mutex_lock_nested(&adapter->bus_lock, i2c_adapter_depth(adapter));
|
||||
*
|
||||
* But i2c_adapter_depth only works when the direct parent of the adapter is
|
||||
* another adapter, as it is only meant for muxes. In our case there is an
|
||||
* i2c-client and MFD instantiated platform_device in the parent->child chain
|
||||
* between the 2 devices.
|
||||
*
|
||||
* So we override the default i2c_lock_operations and pass a hardcoded
|
||||
* depth of 1 to rt_mutex_lock_nested, to make lockdep happy.
|
||||
*
|
||||
* Note that if there were to be a mux attached to our adapter, this would
|
||||
* break things again since the i2c-mux code expects the root-adapter to have
|
||||
* a locking depth of 0. But we always have only 1 client directly attached
|
||||
* in the form of the Charger IC paired with the CHT Whiskey Cove PMIC.
|
||||
*/
|
||||
static void cht_wc_i2c_adap_lock_bus(struct i2c_adapter *adapter,
|
||||
unsigned int flags)
|
||||
{
|
||||
rt_mutex_lock_nested(&adapter->bus_lock, 1);
|
||||
}
|
||||
|
||||
static int cht_wc_i2c_adap_trylock_bus(struct i2c_adapter *adapter,
|
||||
unsigned int flags)
|
||||
{
|
||||
return rt_mutex_trylock(&adapter->bus_lock);
|
||||
}
|
||||
|
||||
static void cht_wc_i2c_adap_unlock_bus(struct i2c_adapter *adapter,
|
||||
unsigned int flags)
|
||||
{
|
||||
rt_mutex_unlock(&adapter->bus_lock);
|
||||
}
|
||||
|
||||
static const struct i2c_lock_operations cht_wc_i2c_adap_lock_ops = {
|
||||
.lock_bus = cht_wc_i2c_adap_lock_bus,
|
||||
.trylock_bus = cht_wc_i2c_adap_trylock_bus,
|
||||
.unlock_bus = cht_wc_i2c_adap_unlock_bus,
|
||||
};
|
||||
|
||||
/**** irqchip for the client connected to the extchgr i2c adapter ****/
|
||||
static void cht_wc_i2c_irq_lock(struct irq_data *data)
|
||||
{
|
||||
|
@ -295,6 +340,7 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
|
|||
adap->adapter.owner = THIS_MODULE;
|
||||
adap->adapter.class = I2C_CLASS_HWMON;
|
||||
adap->adapter.algo = &cht_wc_i2c_adap_algo;
|
||||
adap->adapter.lock_ops = &cht_wc_i2c_adap_lock_ops;
|
||||
strlcpy(adap->adapter.name, "PMIC I2C Adapter",
|
||||
sizeof(adap->adapter.name));
|
||||
adap->adapter.dev.parent = &pdev->dev;
|
||||
|
|
|
@ -55,7 +55,6 @@ static const struct mbox_chan_ops qcom_apcs_ipc_ops = {
|
|||
|
||||
static int qcom_apcs_ipc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct qcom_apcs_ipc *apcs;
|
||||
struct regmap *regmap;
|
||||
struct resource *res;
|
||||
|
@ -63,6 +62,11 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
|
|||
void __iomem *base;
|
||||
unsigned long i;
|
||||
int ret;
|
||||
const struct of_device_id apcs_clk_match_table[] = {
|
||||
{ .compatible = "qcom,msm8916-apcs-kpss-global", },
|
||||
{ .compatible = "qcom,qcs404-apcs-apps-global", },
|
||||
{}
|
||||
};
|
||||
|
||||
apcs = devm_kzalloc(&pdev->dev, sizeof(*apcs), GFP_KERNEL);
|
||||
if (!apcs)
|
||||
|
@ -97,7 +101,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (of_device_is_compatible(np, "qcom,msm8916-apcs-kpss-global")) {
|
||||
if (of_match_device(apcs_clk_match_table, &pdev->dev)) {
|
||||
apcs->clk = platform_device_register_data(&pdev->dev,
|
||||
"qcom-apcs-msm8916-clk",
|
||||
-1, NULL, 0);
|
||||
|
|
|
@ -39,6 +39,8 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
|
|||
info->mem = &pdev->resource[0];
|
||||
info->irq = pdev->irq;
|
||||
|
||||
pdev->d3cold_delay = 0;
|
||||
|
||||
/* Probably it is enough to set this for iDMA capable devices only */
|
||||
pci_set_master(pdev);
|
||||
pci_try_set_mwi(pdev);
|
||||
|
|
|
@ -339,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
|
|||
const struct switchdev_obj_port_vlan *vlan)
|
||||
{
|
||||
struct realtek_smi *smi = ds->priv;
|
||||
u16 vid;
|
||||
int ret;
|
||||
|
||||
if (!smi->ops->is_vlan_valid(smi, port))
|
||||
return -EINVAL;
|
||||
for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
|
||||
if (!smi->ops->is_vlan_valid(smi, vid))
|
||||
return -EINVAL;
|
||||
|
||||
dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
|
||||
vlan->vid_begin, vlan->vid_end);
|
||||
|
@ -370,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
|
|||
u16 vid;
|
||||
int ret;
|
||||
|
||||
if (!smi->ops->is_vlan_valid(smi, port))
|
||||
return;
|
||||
for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
|
||||
if (!smi->ops->is_vlan_valid(smi, vid))
|
||||
return;
|
||||
|
||||
dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
|
||||
port,
|
||||
|
|
|
@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
|
|||
static int alloc_uld_rxqs(struct adapter *adap,
|
||||
struct sge_uld_rxq_info *rxq_info, bool lro)
|
||||
{
|
||||
struct sge *s = &adap->sge;
|
||||
unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
|
||||
int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
|
||||
struct sge_ofld_rxq *q = rxq_info->uldrxq;
|
||||
unsigned short *ids = rxq_info->rspq_id;
|
||||
unsigned int bmap_idx = 0;
|
||||
struct sge *s = &adap->sge;
|
||||
unsigned int per_chan;
|
||||
int i, err, msi_idx, que_idx = 0;
|
||||
|
||||
per_chan = rxq_info->nrxq / adap->params.nports;
|
||||
|
||||
|
@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
|
|||
|
||||
if (msi_idx >= 0) {
|
||||
bmap_idx = get_msix_idx_from_bmap(adap);
|
||||
if (bmap_idx < 0) {
|
||||
err = -ENOSPC;
|
||||
goto freeout;
|
||||
}
|
||||
msi_idx = adap->msix_info_ulds[bmap_idx].idx;
|
||||
}
|
||||
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
|
||||
|
|
|
@ -2788,6 +2788,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
|
|||
netdev_err(qdev->ndev,
|
||||
"PCI mapping failed with error: %d\n",
|
||||
err);
|
||||
dev_kfree_skb_irq(skb);
|
||||
ql_free_large_buffers(qdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -2634,14 +2634,18 @@ static struct hso_device *hso_create_bulk_serial_device(
|
|||
*/
|
||||
if (serial->tiocmget) {
|
||||
tiocmget = serial->tiocmget;
|
||||
tiocmget->endp = hso_get_ep(interface,
|
||||
USB_ENDPOINT_XFER_INT,
|
||||
USB_DIR_IN);
|
||||
if (!tiocmget->endp) {
|
||||
dev_err(&interface->dev, "Failed to find INT IN ep\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (tiocmget->urb) {
|
||||
mutex_init(&tiocmget->mutex);
|
||||
init_waitqueue_head(&tiocmget->waitq);
|
||||
tiocmget->endp = hso_get_ep(
|
||||
interface,
|
||||
USB_ENDPOINT_XFER_INT,
|
||||
USB_DIR_IN);
|
||||
} else
|
||||
hso_free_tiomget(serial);
|
||||
}
|
||||
|
|
|
@ -1286,6 +1286,7 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
|
||||
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
|
||||
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
|
||||
{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
|
||||
|
|
|
@ -890,9 +890,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
||||
struct sk_buff *skb,
|
||||
struct sk_buff_head *list)
|
||||
static int xennet_fill_frags(struct netfront_queue *queue,
|
||||
struct sk_buff *skb,
|
||||
struct sk_buff_head *list)
|
||||
{
|
||||
RING_IDX cons = queue->rx.rsp_cons;
|
||||
struct sk_buff *nskb;
|
||||
|
@ -911,7 +911,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
|||
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
|
||||
queue->rx.rsp_cons = ++cons + skb_queue_len(list);
|
||||
kfree_skb(nskb);
|
||||
return ~0U;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
|
@ -922,7 +922,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
|||
kfree_skb(nskb);
|
||||
}
|
||||
|
||||
return cons;
|
||||
queue->rx.rsp_cons = cons;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
|
||||
|
@ -1048,8 +1050,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
|
|||
skb->data_len = rx->status;
|
||||
skb->len += rx->status;
|
||||
|
||||
i = xennet_fill_frags(queue, skb, &tmpq);
|
||||
if (unlikely(i == ~0U))
|
||||
if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
|
||||
goto err;
|
||||
|
||||
if (rx->flags & XEN_NETRXF_csum_blank)
|
||||
|
@ -1059,7 +1060,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
|
|||
|
||||
__skb_queue_tail(&rxq, skb);
|
||||
|
||||
queue->rx.rsp_cons = ++i;
|
||||
i = ++queue->rx.rsp_cons;
|
||||
work_done++;
|
||||
}
|
||||
|
||||
|
|
|
@ -465,7 +465,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
ep->phy = devm_of_phy_get(dev, np, NULL);
|
||||
if (IS_ERR(ep->phy)) {
|
||||
if (PTR_ERR(ep->phy) == -EPROBE_DEFER)
|
||||
if (PTR_ERR(ep->phy) != -ENODEV)
|
||||
return PTR_ERR(ep->phy);
|
||||
|
||||
ep->phy = NULL;
|
||||
|
|
|
@ -807,8 +807,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
|
||||
if (IS_ERR(imx6_pcie->vpcie)) {
|
||||
if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
|
||||
return PTR_ERR(imx6_pcie->vpcie);
|
||||
imx6_pcie->vpcie = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -340,8 +340,8 @@ static int histb_pcie_probe(struct platform_device *pdev)
|
|||
|
||||
hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie");
|
||||
if (IS_ERR(hipcie->vpcie)) {
|
||||
if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
if (PTR_ERR(hipcie->vpcie) != -ENODEV)
|
||||
return PTR_ERR(hipcie->vpcie);
|
||||
hipcie->vpcie = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -1975,14 +1975,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
|
|||
err = of_pci_get_devfn(port);
|
||||
if (err < 0) {
|
||||
dev_err(dev, "failed to parse address: %d\n", err);
|
||||
return err;
|
||||
goto err_node_put;
|
||||
}
|
||||
|
||||
index = PCI_SLOT(err);
|
||||
|
||||
if (index < 1 || index > soc->num_ports) {
|
||||
dev_err(dev, "invalid port number: %d\n", index);
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto err_node_put;
|
||||
}
|
||||
|
||||
index--;
|
||||
|
@ -1991,12 +1992,13 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
|
|||
if (err < 0) {
|
||||
dev_err(dev, "failed to parse # of lanes: %d\n",
|
||||
err);
|
||||
return err;
|
||||
goto err_node_put;
|
||||
}
|
||||
|
||||
if (value > 16) {
|
||||
dev_err(dev, "invalid # of lanes: %u\n", value);
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto err_node_put;
|
||||
}
|
||||
|
||||
lanes |= value << (index << 3);
|
||||
|
@ -2010,13 +2012,15 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
|
|||
lane += value;
|
||||
|
||||
rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
|
||||
if (!rp)
|
||||
return -ENOMEM;
|
||||
if (!rp) {
|
||||
err = -ENOMEM;
|
||||
goto err_node_put;
|
||||
}
|
||||
|
||||
err = of_address_to_resource(port, 0, &rp->regs);
|
||||
if (err < 0) {
|
||||
dev_err(dev, "failed to parse address: %d\n", err);
|
||||
return err;
|
||||
goto err_node_put;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&rp->list);
|
||||
|
@ -2043,6 +2047,10 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
|
|||
return err;
|
||||
|
||||
return 0;
|
||||
|
||||
err_node_put:
|
||||
of_node_put(port);
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -608,29 +608,29 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
|
|||
|
||||
rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v");
|
||||
if (IS_ERR(rockchip->vpcie12v)) {
|
||||
if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
if (PTR_ERR(rockchip->vpcie12v) != -ENODEV)
|
||||
return PTR_ERR(rockchip->vpcie12v);
|
||||
dev_info(dev, "no vpcie12v regulator found\n");
|
||||
}
|
||||
|
||||
rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3");
|
||||
if (IS_ERR(rockchip->vpcie3v3)) {
|
||||
if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV)
|
||||
return PTR_ERR(rockchip->vpcie3v3);
|
||||
dev_info(dev, "no vpcie3v3 regulator found\n");
|
||||
}
|
||||
|
||||
rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
|
||||
if (IS_ERR(rockchip->vpcie1v8)) {
|
||||
if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
|
||||
return PTR_ERR(rockchip->vpcie1v8);
|
||||
dev_info(dev, "no vpcie1v8 regulator found\n");
|
||||
}
|
||||
|
||||
rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
|
||||
if (IS_ERR(rockchip->vpcie0v9)) {
|
||||
if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
|
||||
return PTR_ERR(rockchip->vpcie0v9);
|
||||
dev_info(dev, "no vpcie0v9 regulator found\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -230,7 +230,7 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
|
|||
struct of_drc_info drc;
|
||||
const __be32 *value;
|
||||
char cell_drc_name[MAX_DRC_NAME_LEN];
|
||||
int j, fndit;
|
||||
int j;
|
||||
|
||||
info = of_find_property(dn->parent, "ibm,drc-info", NULL);
|
||||
if (info == NULL)
|
||||
|
@ -245,17 +245,13 @@ static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name,
|
|||
|
||||
/* Should now know end of current entry */
|
||||
|
||||
if (my_index > drc.last_drc_index)
|
||||
continue;
|
||||
|
||||
fndit = 1;
|
||||
break;
|
||||
/* Found it */
|
||||
if (my_index <= drc.last_drc_index) {
|
||||
sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
|
||||
my_index);
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Found it */
|
||||
|
||||
if (fndit)
|
||||
sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix,
|
||||
my_index);
|
||||
|
||||
if (((drc_name == NULL) ||
|
||||
(drc_name && !strcmp(drc_name, cell_drc_name))) &&
|
||||
|
|
|
@ -198,8 +198,8 @@ static const unsigned int uart_rts_b_pins[] = { GPIODV_27 };
|
|||
|
||||
static const unsigned int uart_tx_c_pins[] = { GPIOY_13 };
|
||||
static const unsigned int uart_rx_c_pins[] = { GPIOY_14 };
|
||||
static const unsigned int uart_cts_c_pins[] = { GPIOX_11 };
|
||||
static const unsigned int uart_rts_c_pins[] = { GPIOX_12 };
|
||||
static const unsigned int uart_cts_c_pins[] = { GPIOY_11 };
|
||||
static const unsigned int uart_rts_c_pins[] = { GPIOY_12 };
|
||||
|
||||
static const unsigned int i2c_sck_a_pins[] = { GPIODV_25 };
|
||||
static const unsigned int i2c_sda_a_pins[] = { GPIODV_24 };
|
||||
|
@ -445,10 +445,10 @@ static struct meson_pmx_group meson_gxbb_periphs_groups[] = {
|
|||
GROUP(pwm_f_x, 3, 18),
|
||||
|
||||
/* Bank Y */
|
||||
GROUP(uart_cts_c, 1, 19),
|
||||
GROUP(uart_rts_c, 1, 18),
|
||||
GROUP(uart_tx_c, 1, 17),
|
||||
GROUP(uart_rx_c, 1, 16),
|
||||
GROUP(uart_cts_c, 1, 17),
|
||||
GROUP(uart_rts_c, 1, 16),
|
||||
GROUP(uart_tx_c, 1, 19),
|
||||
GROUP(uart_rx_c, 1, 18),
|
||||
GROUP(pwm_a_y, 1, 21),
|
||||
GROUP(pwm_f_y, 1, 20),
|
||||
GROUP(i2s_out_ch23_y, 1, 5),
|
||||
|
|
|
@ -569,15 +569,25 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
|
|||
!(regval & BIT(INTERRUPT_MASK_OFF)))
|
||||
continue;
|
||||
irq = irq_find_mapping(gc->irq.domain, irqnr + i);
|
||||
generic_handle_irq(irq);
|
||||
if (irq != 0)
|
||||
generic_handle_irq(irq);
|
||||
|
||||
/* Clear interrupt.
|
||||
* We must read the pin register again, in case the
|
||||
* value was changed while executing
|
||||
* generic_handle_irq() above.
|
||||
* If we didn't find a mapping for the interrupt,
|
||||
* disable it in order to avoid a system hang caused
|
||||
* by an interrupt storm.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
|
||||
regval = readl(regs + i);
|
||||
if (irq == 0) {
|
||||
regval &= ~BIT(INTERRUPT_ENABLE_OFF);
|
||||
dev_dbg(&gpio_dev->pdev->dev,
|
||||
"Disabling spurious GPIO IRQ %d\n",
|
||||
irqnr + i);
|
||||
}
|
||||
writel(regval, regs + i);
|
||||
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
|
||||
ret = IRQ_HANDLED;
|
||||
|
|
|
@ -40,7 +40,9 @@ static inline u32 pmx_readl(struct tegra_pmx *pmx, u32 bank, u32 reg)
|
|||
|
||||
static inline void pmx_writel(struct tegra_pmx *pmx, u32 val, u32 bank, u32 reg)
|
||||
{
|
||||
writel(val, pmx->regs[bank] + reg);
|
||||
writel_relaxed(val, pmx->regs[bank] + reg);
|
||||
/* make sure pinmux register write completed */
|
||||
pmx_readl(pmx, bank, reg);
|
||||
}
|
||||
|
||||
static int tegra_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
|
||||
|
|
|
@ -169,7 +169,12 @@ static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm)
|
|||
buf[DT_YEARS] = bin2bcd(tm->tm_year % 100);
|
||||
|
||||
ret = regmap_bulk_write(pcf85363->regmap, CTRL_STOP_EN,
|
||||
tmp, sizeof(tmp));
|
||||
tmp, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_bulk_write(pcf85363->regmap, DT_100THS,
|
||||
buf, sizeof(tmp) - 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -273,6 +273,10 @@ static int snvs_rtc_probe(struct platform_device *pdev)
|
|||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
data->rtc = devm_rtc_allocate_device(&pdev->dev);
|
||||
if (IS_ERR(data->rtc))
|
||||
return PTR_ERR(data->rtc);
|
||||
|
||||
data->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap");
|
||||
|
||||
if (IS_ERR(data->regmap)) {
|
||||
|
@ -335,10 +339,9 @@ static int snvs_rtc_probe(struct platform_device *pdev)
|
|||
goto error_rtc_device_register;
|
||||
}
|
||||
|
||||
data->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
|
||||
&snvs_rtc_ops, THIS_MODULE);
|
||||
if (IS_ERR(data->rtc)) {
|
||||
ret = PTR_ERR(data->rtc);
|
||||
data->rtc->ops = &snvs_rtc_ops;
|
||||
ret = rtc_register_device(data->rtc);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to register rtc: %d\n", ret);
|
||||
goto error_rtc_device_register;
|
||||
}
|
||||
|
|
|
@ -16,57 +16,15 @@
|
|||
#include <scsi/scsi_eh.h>
|
||||
#include <scsi/scsi_dbg.h>
|
||||
|
||||
#define SCSI_LOG_SPOOLSIZE 4096
|
||||
|
||||
#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG
|
||||
#warning SCSI logging bitmask too large
|
||||
#endif
|
||||
|
||||
struct scsi_log_buf {
|
||||
char buffer[SCSI_LOG_SPOOLSIZE];
|
||||
unsigned long map;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log);
|
||||
|
||||
static char *scsi_log_reserve_buffer(size_t *len)
|
||||
{
|
||||
struct scsi_log_buf *buf;
|
||||
unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE;
|
||||
unsigned long idx = 0;
|
||||
|
||||
preempt_disable();
|
||||
buf = this_cpu_ptr(&scsi_format_log);
|
||||
idx = find_first_zero_bit(&buf->map, map_bits);
|
||||
if (likely(idx < map_bits)) {
|
||||
while (test_and_set_bit(idx, &buf->map)) {
|
||||
idx = find_next_zero_bit(&buf->map, map_bits, idx);
|
||||
if (idx >= map_bits)
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (WARN_ON(idx >= map_bits)) {
|
||||
preempt_enable();
|
||||
return NULL;
|
||||
}
|
||||
*len = SCSI_LOG_BUFSIZE;
|
||||
return buf->buffer + idx * SCSI_LOG_BUFSIZE;
|
||||
*len = 128;
|
||||
return kmalloc(*len, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static void scsi_log_release_buffer(char *bufptr)
|
||||
{
|
||||
struct scsi_log_buf *buf;
|
||||
unsigned long idx;
|
||||
int ret;
|
||||
|
||||
buf = this_cpu_ptr(&scsi_format_log);
|
||||
if (bufptr >= buf->buffer &&
|
||||
bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) {
|
||||
idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE;
|
||||
ret = test_and_clear_bit(idx, &buf->map);
|
||||
WARN_ON(!ret);
|
||||
}
|
||||
preempt_enable();
|
||||
kfree(bufptr);
|
||||
}
|
||||
|
||||
static inline const char *scmd_name(const struct scsi_cmnd *scmd)
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
#
|
||||
|
||||
menuconfig SOUNDWIRE
|
||||
bool "SoundWire support"
|
||||
---help---
|
||||
tristate "SoundWire support"
|
||||
help
|
||||
SoundWire is a 2-Pin interface with data and clock line ratified
|
||||
by the MIPI Alliance. SoundWire is used for transporting data
|
||||
typically related to audio functions. SoundWire interface is
|
||||
|
@ -16,17 +16,12 @@ if SOUNDWIRE
|
|||
|
||||
comment "SoundWire Devices"
|
||||
|
||||
config SOUNDWIRE_BUS
|
||||
tristate
|
||||
select REGMAP_SOUNDWIRE
|
||||
|
||||
config SOUNDWIRE_CADENCE
|
||||
tristate
|
||||
|
||||
config SOUNDWIRE_INTEL
|
||||
tristate "Intel SoundWire Master driver"
|
||||
select SOUNDWIRE_CADENCE
|
||||
select SOUNDWIRE_BUS
|
||||
depends on X86 && ACPI && SND_SOC
|
||||
---help---
|
||||
SoundWire Intel Master driver.
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
#Bus Objs
|
||||
soundwire-bus-objs := bus_type.o bus.o slave.o mipi_disco.o stream.o
|
||||
obj-$(CONFIG_SOUNDWIRE_BUS) += soundwire-bus.o
|
||||
obj-$(CONFIG_SOUNDWIRE) += soundwire-bus.o
|
||||
|
||||
#Cadence Objs
|
||||
soundwire-cadence-objs := cadence_master.o
|
||||
|
|
|
@ -282,6 +282,16 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
|
|||
|
||||
if (pcm) {
|
||||
count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
|
||||
|
||||
/*
|
||||
* WORKAROUND: on all existing Intel controllers, pdi
|
||||
* number 2 reports channel count as 1 even though it
|
||||
* supports 8 channels. Performing hardcoding for pdi
|
||||
* number 2.
|
||||
*/
|
||||
if (pdi_num == 2)
|
||||
count = 7;
|
||||
|
||||
} else {
|
||||
count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
|
||||
count = ((count & SDW_SHIM_PDMSCAP_CPSS) >>
|
||||
|
|
|
@ -373,11 +373,20 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
|
|||
pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
|
||||
|
||||
/*
|
||||
* Try to reset the device. The success of this is dependent on
|
||||
* being able to lock the device, which is not always possible.
|
||||
* Try to get the locks ourselves to prevent a deadlock. The
|
||||
* success of this is dependent on being able to lock the device,
|
||||
* which is not always possible.
|
||||
* We can not use the "try" reset interface here, which will
|
||||
* overwrite the previously restored configuration information.
|
||||
*/
|
||||
if (vdev->reset_works && !pci_try_reset_function(pdev))
|
||||
vdev->needs_reset = false;
|
||||
if (vdev->reset_works && pci_cfg_access_trylock(pdev)) {
|
||||
if (device_trylock(&pdev->dev)) {
|
||||
if (!__pci_reset_function_locked(pdev))
|
||||
vdev->needs_reset = false;
|
||||
device_unlock(&pdev->dev);
|
||||
}
|
||||
pci_cfg_access_unlock(pdev);
|
||||
}
|
||||
|
||||
pci_restore_state(pdev);
|
||||
out:
|
||||
|
|
|
@ -433,7 +433,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = ssd1307fb_write_cmd(par->client, 0x0);
|
||||
ret = ssd1307fb_write_cmd(par->client, par->page_offset);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -66,6 +66,8 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
|
|||
if (!v9ses->cachetag) {
|
||||
if (v9fs_random_cachetag(v9ses) < 0) {
|
||||
v9ses->fscache = NULL;
|
||||
kfree(v9ses->cachetag);
|
||||
v9ses->cachetag = NULL;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ int __init ext4_init_system_zone(void)
|
|||
|
||||
void ext4_exit_system_zone(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
kmem_cache_destroy(ext4_system_zone_cachep);
|
||||
}
|
||||
|
||||
|
@ -49,17 +50,26 @@ static inline int can_merge(struct ext4_system_zone *entry1,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void release_system_zone(struct ext4_system_blocks *system_blks)
|
||||
{
|
||||
struct ext4_system_zone *entry, *n;
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(entry, n,
|
||||
&system_blks->root, node)
|
||||
kmem_cache_free(ext4_system_zone_cachep, entry);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark a range of blocks as belonging to the "system zone" --- that
|
||||
* is, filesystem metadata blocks which should never be used by
|
||||
* inodes.
|
||||
*/
|
||||
static int add_system_zone(struct ext4_sb_info *sbi,
|
||||
static int add_system_zone(struct ext4_system_blocks *system_blks,
|
||||
ext4_fsblk_t start_blk,
|
||||
unsigned int count)
|
||||
{
|
||||
struct ext4_system_zone *new_entry = NULL, *entry;
|
||||
struct rb_node **n = &sbi->system_blks.rb_node, *node;
|
||||
struct rb_node **n = &system_blks->root.rb_node, *node;
|
||||
struct rb_node *parent = NULL, *new_node = NULL;
|
||||
|
||||
while (*n) {
|
||||
|
@ -91,7 +101,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
|
|||
new_node = &new_entry->node;
|
||||
|
||||
rb_link_node(new_node, parent, n);
|
||||
rb_insert_color(new_node, &sbi->system_blks);
|
||||
rb_insert_color(new_node, &system_blks->root);
|
||||
}
|
||||
|
||||
/* Can we merge to the left? */
|
||||
|
@ -101,7 +111,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
|
|||
if (can_merge(entry, new_entry)) {
|
||||
new_entry->start_blk = entry->start_blk;
|
||||
new_entry->count += entry->count;
|
||||
rb_erase(node, &sbi->system_blks);
|
||||
rb_erase(node, &system_blks->root);
|
||||
kmem_cache_free(ext4_system_zone_cachep, entry);
|
||||
}
|
||||
}
|
||||
|
@ -112,7 +122,7 @@ static int add_system_zone(struct ext4_sb_info *sbi,
|
|||
entry = rb_entry(node, struct ext4_system_zone, node);
|
||||
if (can_merge(new_entry, entry)) {
|
||||
new_entry->count += entry->count;
|
||||
rb_erase(node, &sbi->system_blks);
|
||||
rb_erase(node, &system_blks->root);
|
||||
kmem_cache_free(ext4_system_zone_cachep, entry);
|
||||
}
|
||||
}
|
||||
|
@ -126,7 +136,7 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
|
|||
int first = 1;
|
||||
|
||||
printk(KERN_INFO "System zones: ");
|
||||
node = rb_first(&sbi->system_blks);
|
||||
node = rb_first(&sbi->system_blks->root);
|
||||
while (node) {
|
||||
entry = rb_entry(node, struct ext4_system_zone, node);
|
||||
printk(KERN_CONT "%s%llu-%llu", first ? "" : ", ",
|
||||
|
@ -137,7 +147,47 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
|
|||
printk(KERN_CONT "\n");
|
||||
}
|
||||
|
||||
static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
|
||||
/*
|
||||
* Returns 1 if the passed-in block region (start_blk,
|
||||
* start_blk+count) is valid; 0 if some part of the block region
|
||||
* overlaps with filesystem metadata blocks.
|
||||
*/
|
||||
static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
|
||||
struct ext4_system_blocks *system_blks,
|
||||
ext4_fsblk_t start_blk,
|
||||
unsigned int count)
|
||||
{
|
||||
struct ext4_system_zone *entry;
|
||||
struct rb_node *n;
|
||||
|
||||
if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
|
||||
(start_blk + count < start_blk) ||
|
||||
(start_blk + count > ext4_blocks_count(sbi->s_es))) {
|
||||
sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (system_blks == NULL)
|
||||
return 1;
|
||||
|
||||
n = system_blks->root.rb_node;
|
||||
while (n) {
|
||||
entry = rb_entry(n, struct ext4_system_zone, node);
|
||||
if (start_blk + count - 1 < entry->start_blk)
|
||||
n = n->rb_left;
|
||||
else if (start_blk >= (entry->start_blk + entry->count))
|
||||
n = n->rb_right;
|
||||
else {
|
||||
sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int ext4_protect_reserved_inode(struct super_block *sb,
|
||||
struct ext4_system_blocks *system_blks,
|
||||
u32 ino)
|
||||
{
|
||||
struct inode *inode;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
|
@ -163,14 +213,15 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
|
|||
if (n == 0) {
|
||||
i++;
|
||||
} else {
|
||||
if (!ext4_data_block_valid(sbi, map.m_pblk, n)) {
|
||||
if (!ext4_data_block_valid_rcu(sbi, system_blks,
|
||||
map.m_pblk, n)) {
|
||||
ext4_error(sb, "blocks %llu-%llu from inode %u "
|
||||
"overlap system zone", map.m_pblk,
|
||||
map.m_pblk + map.m_len - 1, ino);
|
||||
err = -EFSCORRUPTED;
|
||||
break;
|
||||
}
|
||||
err = add_system_zone(sbi, map.m_pblk, n);
|
||||
err = add_system_zone(system_blks, map.m_pblk, n);
|
||||
if (err < 0)
|
||||
break;
|
||||
i += n;
|
||||
|
@ -180,93 +231,129 @@ static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void ext4_destroy_system_zone(struct rcu_head *rcu)
|
||||
{
|
||||
struct ext4_system_blocks *system_blks;
|
||||
|
||||
system_blks = container_of(rcu, struct ext4_system_blocks, rcu);
|
||||
release_system_zone(system_blks);
|
||||
kfree(system_blks);
|
||||
}
|
||||
|
||||
/*
|
||||
* Build system zone rbtree which is used for block validity checking.
|
||||
*
|
||||
* The update of system_blks pointer in this function is protected by
|
||||
* sb->s_umount semaphore. However we have to be careful as we can be
|
||||
* racing with ext4_data_block_valid() calls reading system_blks rbtree
|
||||
* protected only by RCU. That's why we first build the rbtree and then
|
||||
* swap it in place.
|
||||
*/
|
||||
int ext4_setup_system_zone(struct super_block *sb)
|
||||
{
|
||||
ext4_group_t ngroups = ext4_get_groups_count(sb);
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
struct ext4_system_blocks *system_blks;
|
||||
struct ext4_group_desc *gdp;
|
||||
ext4_group_t i;
|
||||
int flex_size = ext4_flex_bg_size(sbi);
|
||||
int ret;
|
||||
|
||||
if (!test_opt(sb, BLOCK_VALIDITY)) {
|
||||
if (sbi->system_blks.rb_node)
|
||||
if (sbi->system_blks)
|
||||
ext4_release_system_zone(sb);
|
||||
return 0;
|
||||
}
|
||||
if (sbi->system_blks.rb_node)
|
||||
if (sbi->system_blks)
|
||||
return 0;
|
||||
|
||||
system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
|
||||
if (!system_blks)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i=0; i < ngroups; i++) {
|
||||
if (ext4_bg_has_super(sb, i) &&
|
||||
((i < 5) || ((i % flex_size) == 0)))
|
||||
add_system_zone(sbi, ext4_group_first_block_no(sb, i),
|
||||
add_system_zone(system_blks,
|
||||
ext4_group_first_block_no(sb, i),
|
||||
ext4_bg_num_gdb(sb, i) + 1);
|
||||
gdp = ext4_get_group_desc(sb, i, NULL);
|
||||
ret = add_system_zone(sbi, ext4_block_bitmap(sb, gdp), 1);
|
||||
ret = add_system_zone(system_blks,
|
||||
ext4_block_bitmap(sb, gdp), 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = add_system_zone(sbi, ext4_inode_bitmap(sb, gdp), 1);
|
||||
goto err;
|
||||
ret = add_system_zone(system_blks,
|
||||
ext4_inode_bitmap(sb, gdp), 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = add_system_zone(sbi, ext4_inode_table(sb, gdp),
|
||||
goto err;
|
||||
ret = add_system_zone(system_blks,
|
||||
ext4_inode_table(sb, gdp),
|
||||
sbi->s_itb_per_group);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
}
|
||||
if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) {
|
||||
ret = ext4_protect_reserved_inode(sb,
|
||||
ret = ext4_protect_reserved_inode(sb, system_blks,
|
||||
le32_to_cpu(sbi->s_es->s_journal_inum));
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* System blks rbtree complete, announce it once to prevent racing
|
||||
* with ext4_data_block_valid() accessing the rbtree at the same
|
||||
* time.
|
||||
*/
|
||||
rcu_assign_pointer(sbi->system_blks, system_blks);
|
||||
|
||||
if (test_opt(sb, DEBUG))
|
||||
debug_print_tree(sbi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Called when the filesystem is unmounted */
|
||||
void ext4_release_system_zone(struct super_block *sb)
|
||||
{
|
||||
struct ext4_system_zone *entry, *n;
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(entry, n,
|
||||
&EXT4_SB(sb)->system_blks, node)
|
||||
kmem_cache_free(ext4_system_zone_cachep, entry);
|
||||
|
||||
EXT4_SB(sb)->system_blks = RB_ROOT;
|
||||
err:
|
||||
release_system_zone(system_blks);
|
||||
kfree(system_blks);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns 1 if the passed-in block region (start_blk,
|
||||
* start_blk+count) is valid; 0 if some part of the block region
|
||||
* overlaps with filesystem metadata blocks.
|
||||
* Called when the filesystem is unmounted or when remounting it with
|
||||
* noblock_validity specified.
|
||||
*
|
||||
* The update of system_blks pointer in this function is protected by
|
||||
* sb->s_umount semaphore. However we have to be careful as we can be
|
||||
* racing with ext4_data_block_valid() calls reading system_blks rbtree
|
||||
* protected only by RCU. So we first clear the system_blks pointer and
|
||||
* then free the rbtree only after RCU grace period expires.
|
||||
*/
|
||||
void ext4_release_system_zone(struct super_block *sb)
|
||||
{
|
||||
struct ext4_system_blocks *system_blks;
|
||||
|
||||
system_blks = rcu_dereference_protected(EXT4_SB(sb)->system_blks,
|
||||
lockdep_is_held(&sb->s_umount));
|
||||
rcu_assign_pointer(EXT4_SB(sb)->system_blks, NULL);
|
||||
|
||||
if (system_blks)
|
||||
call_rcu(&system_blks->rcu, ext4_destroy_system_zone);
|
||||
}
|
||||
|
||||
int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
|
||||
unsigned int count)
|
||||
{
|
||||
struct ext4_system_zone *entry;
|
||||
struct rb_node *n = sbi->system_blks.rb_node;
|
||||
struct ext4_system_blocks *system_blks;
|
||||
int ret;
|
||||
|
||||
if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
|
||||
(start_blk + count < start_blk) ||
|
||||
(start_blk + count > ext4_blocks_count(sbi->s_es))) {
|
||||
sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
|
||||
return 0;
|
||||
}
|
||||
while (n) {
|
||||
entry = rb_entry(n, struct ext4_system_zone, node);
|
||||
if (start_blk + count - 1 < entry->start_blk)
|
||||
n = n->rb_left;
|
||||
else if (start_blk >= (entry->start_blk + entry->count))
|
||||
n = n->rb_right;
|
||||
else {
|
||||
sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
/*
|
||||
* Lock the system zone to prevent it being released concurrently
|
||||
* when doing a remount which inverse current "[no]block_validity"
|
||||
* mount option.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
system_blks = rcu_dereference(sbi->system_blks);
|
||||
ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk,
|
||||
count);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ext4_check_blockref(const char *function, unsigned int line,
|
||||
|
|
|
@ -194,6 +194,14 @@ struct ext4_map_blocks {
|
|||
unsigned int m_flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* Block validity checking, system zone rbtree.
|
||||
*/
|
||||
struct ext4_system_blocks {
|
||||
struct rb_root root;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
/*
|
||||
* Flags for ext4_io_end->flags
|
||||
*/
|
||||
|
@ -1409,7 +1417,7 @@ struct ext4_sb_info {
|
|||
int s_jquota_fmt; /* Format of quota to use */
|
||||
#endif
|
||||
unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
|
||||
struct rb_root system_blks;
|
||||
struct ext4_system_blocks __rcu *system_blks;
|
||||
|
||||
#ifdef EXTENTS_STATS
|
||||
/* ext4 extents stats */
|
||||
|
|
13
fs/fat/dir.c
13
fs/fat/dir.c
|
@ -1097,8 +1097,11 @@ static int fat_zeroed_cluster(struct inode *dir, sector_t blknr, int nr_used,
|
|||
err = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
/* Avoid race with userspace read via bdev */
|
||||
lock_buffer(bhs[n]);
|
||||
memset(bhs[n]->b_data, 0, sb->s_blocksize);
|
||||
set_buffer_uptodate(bhs[n]);
|
||||
unlock_buffer(bhs[n]);
|
||||
mark_buffer_dirty_inode(bhs[n], dir);
|
||||
|
||||
n++;
|
||||
|
@ -1155,6 +1158,8 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
|
|||
fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
|
||||
|
||||
de = (struct msdos_dir_entry *)bhs[0]->b_data;
|
||||
/* Avoid race with userspace read via bdev */
|
||||
lock_buffer(bhs[0]);
|
||||
/* filling the new directory slots ("." and ".." entries) */
|
||||
memcpy(de[0].name, MSDOS_DOT, MSDOS_NAME);
|
||||
memcpy(de[1].name, MSDOS_DOTDOT, MSDOS_NAME);
|
||||
|
@ -1177,6 +1182,7 @@ int fat_alloc_new_dir(struct inode *dir, struct timespec64 *ts)
|
|||
de[0].size = de[1].size = 0;
|
||||
memset(de + 2, 0, sb->s_blocksize - 2 * sizeof(*de));
|
||||
set_buffer_uptodate(bhs[0]);
|
||||
unlock_buffer(bhs[0]);
|
||||
mark_buffer_dirty_inode(bhs[0], dir);
|
||||
|
||||
err = fat_zeroed_cluster(dir, blknr, 1, bhs, MAX_BUF_PER_PAGE);
|
||||
|
@ -1234,11 +1240,14 @@ static int fat_add_new_entries(struct inode *dir, void *slots, int nr_slots,
|
|||
|
||||
/* fill the directory entry */
|
||||
copy = min(size, sb->s_blocksize);
|
||||
/* Avoid race with userspace read via bdev */
|
||||
lock_buffer(bhs[n]);
|
||||
memcpy(bhs[n]->b_data, slots, copy);
|
||||
set_buffer_uptodate(bhs[n]);
|
||||
unlock_buffer(bhs[n]);
|
||||
mark_buffer_dirty_inode(bhs[n], dir);
|
||||
slots += copy;
|
||||
size -= copy;
|
||||
set_buffer_uptodate(bhs[n]);
|
||||
mark_buffer_dirty_inode(bhs[n], dir);
|
||||
if (!size)
|
||||
break;
|
||||
n++;
|
||||
|
|
|
@ -390,8 +390,11 @@ static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
|
|||
err = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
/* Avoid race with userspace read via bdev */
|
||||
lock_buffer(c_bh);
|
||||
memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
|
||||
set_buffer_uptodate(c_bh);
|
||||
unlock_buffer(c_bh);
|
||||
mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
|
||||
if (sb->s_flags & SB_SYNCHRONOUS)
|
||||
err = sync_dirty_buffer(c_bh);
|
||||
|
|
|
@ -105,7 +105,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
|
|||
enum dlm_status status;
|
||||
int actions = 0;
|
||||
int in_use;
|
||||
u8 owner;
|
||||
u8 owner;
|
||||
int recovery_wait = 0;
|
||||
|
||||
mlog(0, "master_node = %d, valblk = %d\n", master_node,
|
||||
flags & LKM_VALBLK);
|
||||
|
@ -208,9 +209,12 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
|
|||
}
|
||||
if (flags & LKM_CANCEL)
|
||||
lock->cancel_pending = 0;
|
||||
else
|
||||
lock->unlock_pending = 0;
|
||||
|
||||
else {
|
||||
if (!lock->unlock_pending)
|
||||
recovery_wait = 1;
|
||||
else
|
||||
lock->unlock_pending = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* get an extra ref on lock. if we are just switching
|
||||
|
@ -244,6 +248,17 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
|
|||
spin_unlock(&res->spinlock);
|
||||
wake_up(&res->wq);
|
||||
|
||||
if (recovery_wait) {
|
||||
spin_lock(&res->spinlock);
|
||||
/* Unlock request will directly succeed after owner dies,
|
||||
* and the lock is already removed from grant list. We have to
|
||||
* wait for RECOVERING done or we miss the chance to purge it
|
||||
* since the removement is much faster than RECOVERING proc.
|
||||
*/
|
||||
__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
|
||||
spin_unlock(&res->spinlock);
|
||||
}
|
||||
|
||||
/* let the caller's final dlm_lock_put handle the actual kfree */
|
||||
if (actions & DLM_UNLOCK_FREE_LOCK) {
|
||||
/* this should always be coupled with list removal */
|
||||
|
|
|
@ -162,6 +162,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
|
|||
if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n",
|
||||
(time64_t *)&time->tv_sec, &time->tv_nsec, &data_type,
|
||||
&header_length) == 3) {
|
||||
time->tv_nsec *= 1000;
|
||||
if (data_type == 'C')
|
||||
*compressed = true;
|
||||
else
|
||||
|
@ -169,6 +170,7 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
|
|||
} else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n",
|
||||
(time64_t *)&time->tv_sec, &time->tv_nsec,
|
||||
&header_length) == 2) {
|
||||
time->tv_nsec *= 1000;
|
||||
*compressed = false;
|
||||
} else {
|
||||
time->tv_sec = 0;
|
||||
|
|
|
@ -6,8 +6,6 @@ struct scsi_cmnd;
|
|||
struct scsi_device;
|
||||
struct scsi_sense_hdr;
|
||||
|
||||
#define SCSI_LOG_BUFSIZE 128
|
||||
|
||||
extern void scsi_print_command(struct scsi_cmnd *);
|
||||
extern size_t __scsi_format_command(char *, size_t,
|
||||
const unsigned char *, size_t);
|
||||
|
|
|
@ -1073,7 +1073,7 @@ TRACE_EVENT(rxrpc_recvmsg,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->call = call->debug_id;
|
||||
__entry->call = call ? call->debug_id : 0;
|
||||
__entry->why = why;
|
||||
__entry->seq = seq;
|
||||
__entry->offset = offset;
|
||||
|
|
|
@ -1454,19 +1454,25 @@ static int bpf_prog_load(union bpf_attr *attr)
|
|||
if (err)
|
||||
goto free_used_maps;
|
||||
|
||||
err = bpf_prog_new_fd(prog);
|
||||
if (err < 0) {
|
||||
/* failed to allocate fd.
|
||||
* bpf_prog_put() is needed because the above
|
||||
* bpf_prog_alloc_id() has published the prog
|
||||
* to the userspace and the userspace may
|
||||
* have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
|
||||
*/
|
||||
bpf_prog_put(prog);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Upon success of bpf_prog_alloc_id(), the BPF prog is
|
||||
* effectively publicly exposed. However, retrieving via
|
||||
* bpf_prog_get_fd_by_id() will take another reference,
|
||||
* therefore it cannot be gone underneath us.
|
||||
*
|
||||
* Only for the time /after/ successful bpf_prog_new_fd()
|
||||
* and before returning to userspace, we might just hold
|
||||
* one reference and any parallel close on that fd could
|
||||
* rip everything out. Hence, below notifications must
|
||||
* happen before bpf_prog_new_fd().
|
||||
*
|
||||
* Also, any failure handling from this point onwards must
|
||||
* be using bpf_prog_put() given the program is exposed.
|
||||
*/
|
||||
bpf_prog_kallsyms_add(prog);
|
||||
|
||||
err = bpf_prog_new_fd(prog);
|
||||
if (err < 0)
|
||||
bpf_prog_put(prog);
|
||||
return err;
|
||||
|
||||
free_used_maps:
|
||||
|
|
|
@ -301,6 +301,8 @@ static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
|
|||
{
|
||||
struct page *pages;
|
||||
|
||||
if (fatal_signal_pending(current))
|
||||
return NULL;
|
||||
pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
|
||||
if (pages) {
|
||||
unsigned int count, i;
|
||||
|
|
|
@ -1027,6 +1027,7 @@ int klp_module_coming(struct module *mod)
|
|||
pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
|
||||
patch->mod->name, obj->mod->name, obj->mod->name);
|
||||
mod->klp_alive = false;
|
||||
obj->mod = NULL;
|
||||
klp_cleanup_module_patches_limited(mod, patch);
|
||||
mutex_unlock(&klp_mutex);
|
||||
|
||||
|
|
|
@ -570,7 +570,7 @@ config DEBUG_KMEMLEAK_EARLY_LOG_SIZE
|
|||
int "Maximum kmemleak early log entries"
|
||||
depends on DEBUG_KMEMLEAK
|
||||
range 200 40000
|
||||
default 400
|
||||
default 16000
|
||||
help
|
||||
Kmemleak must track all the memory allocations to avoid
|
||||
reporting false positives. Since memory may be allocated or
|
||||
|
|
|
@ -1563,8 +1563,6 @@ static void __sk_destruct(struct rcu_head *head)
|
|||
sk_filter_uncharge(sk, filter);
|
||||
RCU_INIT_POINTER(sk->sk_filter, NULL);
|
||||
}
|
||||
if (rcu_access_pointer(sk->sk_reuseport_cb))
|
||||
reuseport_detach_sock(sk);
|
||||
|
||||
sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
|
||||
|
||||
|
@ -1587,7 +1585,14 @@ static void __sk_destruct(struct rcu_head *head)
|
|||
|
||||
void sk_destruct(struct sock *sk)
|
||||
{
|
||||
if (sock_flag(sk, SOCK_RCU_FREE))
|
||||
bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
|
||||
|
||||
if (rcu_access_pointer(sk->sk_reuseport_cb)) {
|
||||
reuseport_detach_sock(sk);
|
||||
use_call_rcu = true;
|
||||
}
|
||||
|
||||
if (use_call_rcu)
|
||||
call_rcu(&sk->sk_rcu, __sk_destruct);
|
||||
else
|
||||
__sk_destruct(&sk->sk_rcu);
|
||||
|
|
|
@ -1531,6 +1531,7 @@ static void erspan_setup(struct net_device *dev)
|
|||
struct ip_tunnel *t = netdev_priv(dev);
|
||||
|
||||
ether_setup(dev);
|
||||
dev->max_mtu = 0;
|
||||
dev->netdev_ops = &erspan_netdev_ops;
|
||||
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
||||
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
|
||||
|
|
|
@ -908,16 +908,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
|
|||
if (peer->rate_tokens == 0 ||
|
||||
time_after(jiffies,
|
||||
(peer->rate_last +
|
||||
(ip_rt_redirect_load << peer->rate_tokens)))) {
|
||||
(ip_rt_redirect_load << peer->n_redirects)))) {
|
||||
__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
|
||||
|
||||
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
|
||||
peer->rate_last = jiffies;
|
||||
++peer->rate_tokens;
|
||||
++peer->n_redirects;
|
||||
#ifdef CONFIG_IP_ROUTE_VERBOSE
|
||||
if (log_martians &&
|
||||
peer->rate_tokens == ip_rt_redirect_number)
|
||||
peer->n_redirects == ip_rt_redirect_number)
|
||||
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
|
||||
&ip_hdr(skb)->saddr, inet_iif(skb),
|
||||
&ip_hdr(skb)->daddr, &gw);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue