Merge 4.19.133 into android-4.19-stable
Changes in 4.19.133 KVM: s390: reduce number of IO pins to 1 spi: spi-fsl-dspi: Adding shutdown hook spi: spi-fsl-dspi: Fix lockup if device is removed during SPI transfer spi: spi-fsl-dspi: use IRQF_SHARED mode to request IRQ spi: spi-fsl-dspi: Fix external abort on interrupt in resume or exit paths regmap: fix alignment issue ARM: dts: omap4-droid4: Fix spi configuration and increase rate drm/tegra: hub: Do not enable orphaned window group gpu: host1x: Detach driver on unregister spi: spidev: fix a race between spidev_release and spidev_remove spi: spidev: fix a potential use-after-free in spidev_release() ixgbe: protect ring accesses with READ- and WRITE_ONCE i40e: protect ring accesses with READ- and WRITE_ONCE drm: panel-orientation-quirks: Add quirk for Asus T101HA panel drm: panel-orientation-quirks: Use generic orientation-data for Acer S1003 s390/kasan: fix early pgm check handler execution cifs: update ctime and mtime during truncate ARM: imx6: add missing put_device() call in imx6q_suspend_init() scsi: mptscsih: Fix read sense data size usb: dwc3: pci: Fix reference count leak in dwc3_pci_resume_work block: release bip in a right way in error path nvme-rdma: assign completion vector correctly x86/entry: Increase entry_stack size to a full page net: qrtr: Fix an out of bounds read qrtr_endpoint_post() drm/mediatek: Check plane visibility in atomic_update net: cxgb4: fix return error value in t4_prep_fw smsc95xx: check return value of smsc95xx_reset smsc95xx: avoid memory leak in smsc95xx_bind net: hns3: fix use-after-free when doing self test ALSA: compress: fix partial_drain completion state arm64: kgdb: Fix single-step exception handling oops nbd: Fix memory leak in nbd_add_socket cxgb4: fix all-mask IP address comparison bnxt_en: fix NULL dereference in case SR-IOV configuration fails net: macb: mark device wake capable when "magic-packet" property present mlxsw: spectrum_router: Remove inappropriate usage of WARN_ON() ALSA: opl3: fix infoleak in opl3 ALSA: hda - let hs_mic be picked ahead of hp_mic ALSA: usb-audio: add quirk for MacroSilicon MS2109 KVM: arm64: Fix definition of PAGE_HYP_DEVICE KVM: arm64: Stop clobbering x0 for HVC_SOFT_RESTART KVM: x86: bit 8 of non-leaf PDPEs is not reserved KVM: x86: Inject #GP if guest attempts to toggle CR4.LA57 in 64-bit mode KVM: x86: Mark CR4.TSD as being possibly owned by the guest kallsyms: Refactor kallsyms_show_value() to take cred kernel: module: Use struct_size() helper module: Refactor section attr into bin attribute module: Do not expose section addresses to non-CAP_SYSLOG kprobes: Do not expose probe addresses to non-CAP_SYSLOG bpf: Check correct cred for CAP_SYSLOG in bpf_dump_raw_ok() Revert "ath9k: Fix general protection fault in ath9k_hif_usb_rx_cb" btrfs: fix fatal extent_buffer readahead vs releasepage race drm/radeon: fix double free dm: use noio when sending kobject event ARC: entry: fix potential EFA clobber when TIF_SYSCALL_TRACE ARC: elf: use right ELF_ARCH s390/mm: fix huge pte soft dirty copying Linux 4.19.133 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I0a0198d501017d2bc701b653d75dc9cedd1ebbd9
This commit is contained in:
commit
588b1f9c92
57 changed files with 478 additions and 301 deletions
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 132
|
||||
SUBLEVEL = 133
|
||||
EXTRAVERSION =
|
||||
NAME = "People's Front"
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#define R_ARC_32_PCREL 0x31
|
||||
|
||||
/*to set parameters in the core dumps */
|
||||
#define ELF_ARCH EM_ARCOMPACT
|
||||
#define ELF_ARCH EM_ARC_INUSE
|
||||
#define ELF_CLASS ELFCLASS32
|
||||
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
|
|
|
@ -156,7 +156,6 @@ END(EV_Extension)
|
|||
tracesys:
|
||||
; save EFA in case tracer wants the PC of traced task
|
||||
; using ERET won't work since next-PC has already committed
|
||||
lr r12, [efa]
|
||||
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
|
||||
st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
|
||||
|
||||
|
@ -199,15 +198,9 @@ tracesys_exit:
|
|||
; Breakpoint TRAP
|
||||
; ---------------------------------------------
|
||||
trap_with_param:
|
||||
|
||||
; stop_pc info by gdb needs this info
|
||||
lr r0, [efa]
|
||||
mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc
|
||||
mov r1, sp
|
||||
|
||||
; Now that we have read EFA, it is safe to do "fake" rtie
|
||||
; and get out of CPU exception mode
|
||||
FAKE_RET_FROM_EXCPN
|
||||
|
||||
; Save callee regs in case gdb wants to have a look
|
||||
; SP will grow up by size of CALLEE Reg-File
|
||||
; NOTE: clobbers r12
|
||||
|
@ -234,6 +227,10 @@ ENTRY(EV_Trap)
|
|||
|
||||
EXCEPTION_PROLOGUE
|
||||
|
||||
lr r12, [efa]
|
||||
|
||||
FAKE_RET_FROM_EXCPN
|
||||
|
||||
;============ TRAP 1 :breakpoints
|
||||
; Check ECR for trap with arg (PROLOGUE ensures r9 has ECR)
|
||||
bmsk.f 0, r9, 7
|
||||
|
@ -241,9 +238,6 @@ ENTRY(EV_Trap)
|
|||
|
||||
;============ TRAP (no param): syscall top level
|
||||
|
||||
; First return from Exception to pure K mode (Exception/IRQs renabled)
|
||||
FAKE_RET_FROM_EXCPN
|
||||
|
||||
; If syscall tracing ongoing, invoke pre-post-hooks
|
||||
GET_CURR_THR_INFO_FLAGS r10
|
||||
btst r10, TIF_SYSCALL_TRACE
|
||||
|
|
|
@ -16,8 +16,10 @@
|
|||
#interrupt-cells = <2>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
spi-max-frequency = <3000000>;
|
||||
spi-max-frequency = <9600000>;
|
||||
spi-cs-high;
|
||||
spi-cpol;
|
||||
spi-cpha;
|
||||
|
||||
cpcap_adc: adc {
|
||||
compatible = "motorola,mapphone-cpcap-adc";
|
||||
|
|
|
@ -497,14 +497,14 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
|
|||
if (!ocram_pool) {
|
||||
pr_warn("%s: ocram pool unavailable!\n", __func__);
|
||||
ret = -ENODEV;
|
||||
goto put_node;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE);
|
||||
if (!ocram_base) {
|
||||
pr_warn("%s: unable to alloc ocram!\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto put_node;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
|
||||
|
@ -527,7 +527,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
|
|||
ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat);
|
||||
if (ret) {
|
||||
pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret);
|
||||
goto put_node;
|
||||
goto put_device;
|
||||
}
|
||||
|
||||
ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat);
|
||||
|
@ -574,7 +574,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
|
|||
&imx6_suspend,
|
||||
MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info));
|
||||
|
||||
goto put_node;
|
||||
goto put_device;
|
||||
|
||||
pl310_cache_map_failed:
|
||||
iounmap(pm_info->gpc_base.vbase);
|
||||
|
@ -584,6 +584,8 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
|
|||
iounmap(pm_info->src_base.vbase);
|
||||
src_map_failed:
|
||||
iounmap(pm_info->mmdc_base.vbase);
|
||||
put_device:
|
||||
put_device(&pdev->dev);
|
||||
put_node:
|
||||
of_node_put(node);
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@
|
|||
#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
|
||||
#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
|
||||
#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
|
||||
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
|
||||
#define PAGE_HYP_DEVICE __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN)
|
||||
|
||||
#define PAGE_S2_MEMATTR(attr) \
|
||||
({ \
|
||||
|
|
|
@ -269,7 +269,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
|
|||
if (user_mode(regs) || !kgdb_single_step)
|
||||
return DBG_HOOK_ERROR;
|
||||
|
||||
kgdb_handle_exception(1, SIGTRAP, 0, regs);
|
||||
kgdb_handle_exception(0, SIGTRAP, 0, regs);
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
|
||||
|
|
|
@ -144,11 +144,15 @@ ENTRY(__kvm_handle_stub_hvc)
|
|||
|
||||
1: cmp x0, #HVC_RESET_VECTORS
|
||||
b.ne 1f
|
||||
reset:
|
||||
|
||||
/*
|
||||
* Reset kvm back to the hyp stub. Do not clobber x0-x4 in
|
||||
* case we coming via HVC_SOFT_RESTART.
|
||||
* Set the HVC_RESET_VECTORS return code before entering the common
|
||||
* path so that we do not clobber x0-x2 in case we are coming via
|
||||
* HVC_SOFT_RESTART.
|
||||
*/
|
||||
mov x0, xzr
|
||||
reset:
|
||||
/* Reset kvm back to the hyp stub. */
|
||||
mrs x5, sctlr_el2
|
||||
ldr x6, =SCTLR_ELx_FLAGS
|
||||
bic x5, x5, x6 // Clear SCTL_M and etc
|
||||
|
@ -159,7 +163,6 @@ reset:
|
|||
/* Install stub vectors */
|
||||
adr_l x5, __hyp_stub_vectors
|
||||
msr vbar_el2, x5
|
||||
mov x0, xzr
|
||||
eret
|
||||
|
||||
1: /* Bad stub call */
|
||||
|
|
|
@ -30,12 +30,12 @@
|
|||
#define KVM_USER_MEM_SLOTS 32
|
||||
|
||||
/*
|
||||
* These seem to be used for allocating ->chip in the routing table,
|
||||
* which we don't use. 4096 is an out-of-thin-air value. If we need
|
||||
* to look at ->chip later on, we'll need to revisit this.
|
||||
* These seem to be used for allocating ->chip in the routing table, which we
|
||||
* don't use. 1 is as small as we can get to reduce the needed memory. If we
|
||||
* need to look at ->chip later on, we'll need to revisit this.
|
||||
*/
|
||||
#define KVM_NR_IRQCHIPS 1
|
||||
#define KVM_IRQCHIP_NUM_PINS 4096
|
||||
#define KVM_IRQCHIP_NUM_PINS 1
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 80000
|
||||
|
||||
/* s390-specific vcpu->requests bit members */
|
||||
|
|
|
@ -155,6 +155,8 @@ static noinline __init void setup_lowcore_early(void)
|
|||
psw_t psw;
|
||||
|
||||
psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
|
||||
if (IS_ENABLED(CONFIG_KASAN))
|
||||
psw.mask |= PSW_MASK_DAT;
|
||||
psw.addr = (unsigned long) s390_base_ext_handler;
|
||||
S390_lowcore.external_new_psw = psw;
|
||||
psw.addr = (unsigned long) s390_base_pgm_handler;
|
||||
|
|
|
@ -117,7 +117,7 @@ static inline pte_t __rste_to_pte(unsigned long rste)
|
|||
_PAGE_YOUNG);
|
||||
#ifdef CONFIG_MEM_SOFT_DIRTY
|
||||
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
|
||||
_PAGE_DIRTY);
|
||||
_PAGE_SOFT_DIRTY);
|
||||
#endif
|
||||
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
|
||||
_PAGE_NOEXEC);
|
||||
|
|
|
@ -337,7 +337,7 @@ struct x86_hw_tss {
|
|||
#define INVALID_IO_BITMAP_OFFSET 0x8000
|
||||
|
||||
struct entry_stack {
|
||||
unsigned long words[64];
|
||||
char stack[PAGE_SIZE];
|
||||
};
|
||||
|
||||
struct entry_stack_page {
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
|
||||
#define KVM_POSSIBLE_CR4_GUEST_BITS \
|
||||
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
|
||||
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
|
||||
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
|
||||
|
||||
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
|
||||
enum kvm_reg reg)
|
||||
|
|
|
@ -4474,7 +4474,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
|
|||
nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
|
||||
rsvd_bits(maxphyaddr, 51);
|
||||
rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
|
||||
nonleaf_bit8_rsvd | gbpages_bit_rsvd |
|
||||
gbpages_bit_rsvd |
|
||||
rsvd_bits(maxphyaddr, 51);
|
||||
rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
|
||||
rsvd_bits(maxphyaddr, 51);
|
||||
|
|
|
@ -6335,6 +6335,8 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
|
|||
|
||||
static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
|
||||
{
|
||||
BUILD_BUG_ON(KVM_CR4_GUEST_OWNED_BITS & ~KVM_POSSIBLE_CR4_GUEST_BITS);
|
||||
|
||||
vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
|
||||
if (enable_ept)
|
||||
vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
|
||||
|
|
|
@ -865,6 +865,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|||
if (is_long_mode(vcpu)) {
|
||||
if (!(cr4 & X86_CR4_PAE))
|
||||
return 1;
|
||||
if ((cr4 ^ old_cr4) & X86_CR4_LA57)
|
||||
return 1;
|
||||
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
|
||||
&& ((cr4 ^ old_cr4) & pdptr_bits)
|
||||
&& !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
|
||||
|
|
|
@ -38,6 +38,18 @@ void blk_flush_integrity(void)
|
|||
flush_workqueue(kintegrityd_wq);
|
||||
}
|
||||
|
||||
void __bio_integrity_free(struct bio_set *bs, struct bio_integrity_payload *bip)
|
||||
{
|
||||
if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
|
||||
if (bip->bip_vec)
|
||||
bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
|
||||
bip->bip_slab);
|
||||
mempool_free(bip, &bs->bio_integrity_pool);
|
||||
} else {
|
||||
kfree(bip);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_integrity_alloc - Allocate integrity payload and attach it to bio
|
||||
* @bio: bio to attach integrity metadata to
|
||||
|
@ -90,7 +102,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
|
|||
|
||||
return bip;
|
||||
err:
|
||||
mempool_free(bip, &bs->bio_integrity_pool);
|
||||
__bio_integrity_free(bs, bip);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
EXPORT_SYMBOL(bio_integrity_alloc);
|
||||
|
@ -111,14 +123,7 @@ static void bio_integrity_free(struct bio *bio)
|
|||
kfree(page_address(bip->bip_vec->bv_page) +
|
||||
bip->bip_vec->bv_offset);
|
||||
|
||||
if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
|
||||
bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, bip->bip_slab);
|
||||
|
||||
mempool_free(bip, &bs->bio_integrity_pool);
|
||||
} else {
|
||||
kfree(bip);
|
||||
}
|
||||
|
||||
__bio_integrity_free(bs, bip);
|
||||
bio->bi_integrity = NULL;
|
||||
bio->bi_opf &= ~REQ_INTEGRITY;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/hwspinlock.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
|
@ -232,22 +233,20 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
|
|||
|
||||
static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
|
||||
{
|
||||
__be16 *b = buf;
|
||||
|
||||
b[0] = cpu_to_be16(val << shift);
|
||||
put_unaligned_be16(val << shift, buf);
|
||||
}
|
||||
|
||||
static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
|
||||
{
|
||||
__le16 *b = buf;
|
||||
|
||||
b[0] = cpu_to_le16(val << shift);
|
||||
put_unaligned_le16(val << shift, buf);
|
||||
}
|
||||
|
||||
static void regmap_format_16_native(void *buf, unsigned int val,
|
||||
unsigned int shift)
|
||||
{
|
||||
*(u16 *)buf = val << shift;
|
||||
u16 v = val << shift;
|
||||
|
||||
memcpy(buf, &v, sizeof(v));
|
||||
}
|
||||
|
||||
static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
|
||||
|
@ -263,43 +262,39 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
|
|||
|
||||
static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
|
||||
{
|
||||
__be32 *b = buf;
|
||||
|
||||
b[0] = cpu_to_be32(val << shift);
|
||||
put_unaligned_be32(val << shift, buf);
|
||||
}
|
||||
|
||||
static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
|
||||
{
|
||||
__le32 *b = buf;
|
||||
|
||||
b[0] = cpu_to_le32(val << shift);
|
||||
put_unaligned_le32(val << shift, buf);
|
||||
}
|
||||
|
||||
static void regmap_format_32_native(void *buf, unsigned int val,
|
||||
unsigned int shift)
|
||||
{
|
||||
*(u32 *)buf = val << shift;
|
||||
u32 v = val << shift;
|
||||
|
||||
memcpy(buf, &v, sizeof(v));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
|
||||
{
|
||||
__be64 *b = buf;
|
||||
|
||||
b[0] = cpu_to_be64((u64)val << shift);
|
||||
put_unaligned_be64((u64) val << shift, buf);
|
||||
}
|
||||
|
||||
static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
|
||||
{
|
||||
__le64 *b = buf;
|
||||
|
||||
b[0] = cpu_to_le64((u64)val << shift);
|
||||
put_unaligned_le64((u64) val << shift, buf);
|
||||
}
|
||||
|
||||
static void regmap_format_64_native(void *buf, unsigned int val,
|
||||
unsigned int shift)
|
||||
{
|
||||
*(u64 *)buf = (u64)val << shift;
|
||||
u64 v = (u64) val << shift;
|
||||
|
||||
memcpy(buf, &v, sizeof(v));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -316,35 +311,34 @@ static unsigned int regmap_parse_8(const void *buf)
|
|||
|
||||
static unsigned int regmap_parse_16_be(const void *buf)
|
||||
{
|
||||
const __be16 *b = buf;
|
||||
|
||||
return be16_to_cpu(b[0]);
|
||||
return get_unaligned_be16(buf);
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_16_le(const void *buf)
|
||||
{
|
||||
const __le16 *b = buf;
|
||||
|
||||
return le16_to_cpu(b[0]);
|
||||
return get_unaligned_le16(buf);
|
||||
}
|
||||
|
||||
static void regmap_parse_16_be_inplace(void *buf)
|
||||
{
|
||||
__be16 *b = buf;
|
||||
u16 v = get_unaligned_be16(buf);
|
||||
|
||||
b[0] = be16_to_cpu(b[0]);
|
||||
memcpy(buf, &v, sizeof(v));
|
||||
}
|
||||
|
||||
static void regmap_parse_16_le_inplace(void *buf)
|
||||
{
|
||||
__le16 *b = buf;
|
||||
u16 v = get_unaligned_le16(buf);
|
||||
|
||||
b[0] = le16_to_cpu(b[0]);
|
||||
memcpy(buf, &v, sizeof(v));
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_16_native(const void *buf)
|
||||
{
|
||||
return *(u16 *)buf;
|
||||
u16 v;
|
||||
|
||||
memcpy(&v, buf, sizeof(v));
|
||||
return v;
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_24(const void *buf)
|
||||
|
@ -359,69 +353,67 @@ static unsigned int regmap_parse_24(const void *buf)
|
|||
|
||||
static unsigned int regmap_parse_32_be(const void *buf)
|
||||
{
|
||||
const __be32 *b = buf;
|
||||
|
||||
return be32_to_cpu(b[0]);
|
||||
return get_unaligned_be32(buf);
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_32_le(const void *buf)
|
||||
{
|
||||
const __le32 *b = buf;
|
||||
|
||||
return le32_to_cpu(b[0]);
|
||||
return get_unaligned_le32(buf);
|
||||
}
|
||||
|
||||
static void regmap_parse_32_be_inplace(void *buf)
|
||||
{
|
||||
__be32 *b = buf;
|
||||
u32 v = get_unaligned_be32(buf);
|
||||
|
||||
b[0] = be32_to_cpu(b[0]);
|
||||
memcpy(buf, &v, sizeof(v));
|
||||
}
|
||||
|
||||
static void regmap_parse_32_le_inplace(void *buf)
|
||||
{
|
||||
__le32 *b = buf;
|
||||
u32 v = get_unaligned_le32(buf);
|
||||
|
||||
b[0] = le32_to_cpu(b[0]);
|
||||
memcpy(buf, &v, sizeof(v));
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_32_native(const void *buf)
|
||||
{
|
||||
return *(u32 *)buf;
|
||||
u32 v;
|
||||
|
||||
memcpy(&v, buf, sizeof(v));
|
||||
return v;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static unsigned int regmap_parse_64_be(const void *buf)
|
||||
{
|
||||
const __be64 *b = buf;
|
||||
|
||||
return be64_to_cpu(b[0]);
|
||||
return get_unaligned_be64(buf);
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_64_le(const void *buf)
|
||||
{
|
||||
const __le64 *b = buf;
|
||||
|
||||
return le64_to_cpu(b[0]);
|
||||
return get_unaligned_le64(buf);
|
||||
}
|
||||
|
||||
static void regmap_parse_64_be_inplace(void *buf)
|
||||
{
|
||||
__be64 *b = buf;
|
||||
u64 v = get_unaligned_be64(buf);
|
||||
|
||||
b[0] = be64_to_cpu(b[0]);
|
||||
memcpy(buf, &v, sizeof(v));
|
||||
}
|
||||
|
||||
static void regmap_parse_64_le_inplace(void *buf)
|
||||
{
|
||||
__le64 *b = buf;
|
||||
u64 v = get_unaligned_le64(buf);
|
||||
|
||||
b[0] = le64_to_cpu(b[0]);
|
||||
memcpy(buf, &v, sizeof(v));
|
||||
}
|
||||
|
||||
static unsigned int regmap_parse_64_native(const void *buf)
|
||||
{
|
||||
return *(u64 *)buf;
|
||||
u64 v;
|
||||
|
||||
memcpy(&v, buf, sizeof(v));
|
||||
return v;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -974,25 +974,26 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
|
|||
test_bit(NBD_BOUND, &config->runtime_flags))) {
|
||||
dev_err(disk_to_dev(nbd->disk),
|
||||
"Device being setup by another task");
|
||||
sockfd_put(sock);
|
||||
return -EBUSY;
|
||||
err = -EBUSY;
|
||||
goto put_socket;
|
||||
}
|
||||
|
||||
nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
|
||||
if (!nsock) {
|
||||
err = -ENOMEM;
|
||||
goto put_socket;
|
||||
}
|
||||
|
||||
socks = krealloc(config->socks, (config->num_connections + 1) *
|
||||
sizeof(struct nbd_sock *), GFP_KERNEL);
|
||||
if (!socks) {
|
||||
sockfd_put(sock);
|
||||
return -ENOMEM;
|
||||
kfree(nsock);
|
||||
err = -ENOMEM;
|
||||
goto put_socket;
|
||||
}
|
||||
|
||||
config->socks = socks;
|
||||
|
||||
nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
|
||||
if (!nsock) {
|
||||
sockfd_put(sock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nsock->fallback_index = -1;
|
||||
nsock->dead = false;
|
||||
mutex_init(&nsock->tx_lock);
|
||||
|
@ -1004,6 +1005,10 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
|
|||
atomic_inc(&config->live_connections);
|
||||
|
||||
return 0;
|
||||
|
||||
put_socket:
|
||||
sockfd_put(sock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
|
||||
|
|
|
@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data {
|
|||
int orientation;
|
||||
};
|
||||
|
||||
static const struct drm_dmi_panel_orientation_data acer_s1003 = {
|
||||
.width = 800,
|
||||
.height = 1280,
|
||||
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
|
||||
};
|
||||
|
||||
static const struct drm_dmi_panel_orientation_data asus_t100ha = {
|
||||
.width = 800,
|
||||
.height = 1280,
|
||||
|
@ -100,13 +94,19 @@ static const struct dmi_system_id orientation_data[] = {
|
|||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
|
||||
},
|
||||
.driver_data = (void *)&acer_s1003,
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* Asus T100HA */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
|
||||
},
|
||||
.driver_data = (void *)&asus_t100ha,
|
||||
}, { /* Asus T101HA */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* GPD MicroPC (generic strings, also match on bios date) */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
|
||||
|
|
|
@ -108,6 +108,16 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
|
|||
true, true);
|
||||
}
|
||||
|
||||
static void mtk_plane_atomic_disable(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
|
||||
|
||||
state->pending.enable = false;
|
||||
wmb(); /* Make sure the above parameter is set before update */
|
||||
state->pending.dirty = true;
|
||||
}
|
||||
|
||||
static void mtk_plane_atomic_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
|
@ -122,6 +132,11 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
|
|||
if (!crtc || WARN_ON(!fb))
|
||||
return;
|
||||
|
||||
if (!plane->state->visible) {
|
||||
mtk_plane_atomic_disable(plane, old_state);
|
||||
return;
|
||||
}
|
||||
|
||||
gem = fb->obj[0];
|
||||
mtk_gem = to_mtk_gem_obj(gem);
|
||||
addr = mtk_gem->dma_addr;
|
||||
|
@ -143,16 +158,6 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
|
|||
state->pending.dirty = true;
|
||||
}
|
||||
|
||||
static void mtk_plane_atomic_disable(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state)
|
||||
{
|
||||
struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
|
||||
|
||||
state->pending.enable = false;
|
||||
wmb(); /* Make sure the above parameter is set before update */
|
||||
state->pending.dirty = true;
|
||||
}
|
||||
|
||||
static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
|
||||
.atomic_check = mtk_plane_atomic_check,
|
||||
.atomic_update = mtk_plane_atomic_update,
|
||||
|
|
|
@ -5574,6 +5574,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
|
|||
if (!rdev->pm.dpm.ps)
|
||||
return -ENOMEM;
|
||||
power_state_offset = (u8 *)state_array->states;
|
||||
rdev->pm.dpm.num_ps = 0;
|
||||
for (i = 0; i < state_array->ucNumEntries; i++) {
|
||||
u8 *idx;
|
||||
power_state = (union pplib_power_state *)power_state_offset;
|
||||
|
@ -5583,10 +5584,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
|
|||
if (!rdev->pm.power_state[i].clock_info)
|
||||
return -EINVAL;
|
||||
ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
|
||||
if (ps == NULL) {
|
||||
kfree(rdev->pm.dpm.ps);
|
||||
if (ps == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
rdev->pm.dpm.ps[i].ps_priv = ps;
|
||||
ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
|
||||
non_clock_info,
|
||||
|
@ -5608,8 +5607,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
|
|||
k++;
|
||||
}
|
||||
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
|
||||
rdev->pm.dpm.num_ps = i + 1;
|
||||
}
|
||||
rdev->pm.dpm.num_ps = state_array->ucNumEntries;
|
||||
|
||||
/* fill in the vce power states */
|
||||
for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
|
||||
|
|
|
@ -143,6 +143,8 @@ int tegra_display_hub_prepare(struct tegra_display_hub *hub)
|
|||
for (i = 0; i < hub->soc->num_wgrps; i++) {
|
||||
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
|
||||
|
||||
/* Skip orphaned window group whose parent DC is disabled */
|
||||
if (wgrp->parent)
|
||||
tegra_windowgroup_enable(wgrp);
|
||||
}
|
||||
|
||||
|
@ -160,6 +162,8 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
|
|||
for (i = 0; i < hub->soc->num_wgrps; i++) {
|
||||
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
|
||||
|
||||
/* Skip orphaned window group whose parent DC is disabled */
|
||||
if (wgrp->parent)
|
||||
tegra_windowgroup_disable(wgrp);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -632,8 +632,17 @@ EXPORT_SYMBOL(host1x_driver_register_full);
|
|||
*/
|
||||
void host1x_driver_unregister(struct host1x_driver *driver)
|
||||
{
|
||||
struct host1x *host1x;
|
||||
|
||||
driver_unregister(&driver->driver);
|
||||
|
||||
mutex_lock(&devices_lock);
|
||||
|
||||
list_for_each_entry(host1x, &devices, list)
|
||||
host1x_detach_driver(host1x, driver);
|
||||
|
||||
mutex_unlock(&devices_lock);
|
||||
|
||||
mutex_lock(&drivers_lock);
|
||||
list_del_init(&driver->list);
|
||||
mutex_unlock(&drivers_lock);
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/blkpg.h>
|
||||
#include <linux/bio.h>
|
||||
|
@ -3027,17 +3028,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
|
|||
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
|
||||
unsigned cookie)
|
||||
{
|
||||
int r;
|
||||
unsigned noio_flag;
|
||||
char udev_cookie[DM_COOKIE_LENGTH];
|
||||
char *envp[] = { udev_cookie, NULL };
|
||||
|
||||
noio_flag = memalloc_noio_save();
|
||||
|
||||
if (!cookie)
|
||||
return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
|
||||
r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
|
||||
else {
|
||||
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
|
||||
DM_COOKIE_ENV_VAR_NAME, cookie);
|
||||
return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
|
||||
r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
|
||||
action, envp);
|
||||
}
|
||||
|
||||
memalloc_noio_restore(noio_flag);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
uint32_t dm_next_uevent_seq(struct mapped_device *md)
|
||||
|
|
|
@ -118,8 +118,6 @@ int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
|
|||
int mptscsih_resume(struct pci_dev *pdev);
|
||||
#endif
|
||||
|
||||
#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
|
||||
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
/*
|
||||
|
@ -2420,7 +2418,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
|
|||
/* Copy the sense received into the scsi command block. */
|
||||
req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
|
||||
sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC));
|
||||
memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc));
|
||||
memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC);
|
||||
|
||||
/* Log SMART data (asc = 0x5D, non-IM case only) if required.
|
||||
*/
|
||||
|
|
|
@ -365,6 +365,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp)
|
|||
}
|
||||
}
|
||||
|
||||
bp->pf.active_vfs = 0;
|
||||
kfree(bp->pf.vf);
|
||||
bp->pf.vf = NULL;
|
||||
}
|
||||
|
@ -750,7 +751,6 @@ void bnxt_sriov_disable(struct bnxt *bp)
|
|||
|
||||
bnxt_free_vf_resources(bp);
|
||||
|
||||
bp->pf.active_vfs = 0;
|
||||
/* Reclaim all resources for the PF. */
|
||||
rtnl_lock();
|
||||
bnxt_restore_pf_fw_resources(bp);
|
||||
|
|
|
@ -4060,7 +4060,7 @@ static int macb_probe(struct platform_device *pdev)
|
|||
bp->wol = 0;
|
||||
if (of_get_property(np, "magic-packet", NULL))
|
||||
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
|
||||
device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
|
||||
device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
|
||||
|
||||
spin_lock_init(&bp->lock);
|
||||
|
||||
|
|
|
@ -810,16 +810,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family)
|
|||
struct in_addr *addr;
|
||||
|
||||
addr = (struct in_addr *)ipmask;
|
||||
if (ntohl(addr->s_addr) == 0xffffffff)
|
||||
if (addr->s_addr == htonl(0xffffffff))
|
||||
return true;
|
||||
} else if (family == AF_INET6) {
|
||||
struct in6_addr *addr6;
|
||||
|
||||
addr6 = (struct in6_addr *)ipmask;
|
||||
if (ntohl(addr6->s6_addr32[0]) == 0xffffffff &&
|
||||
ntohl(addr6->s6_addr32[1]) == 0xffffffff &&
|
||||
ntohl(addr6->s6_addr32[2]) == 0xffffffff &&
|
||||
ntohl(addr6->s6_addr32[3]) == 0xffffffff)
|
||||
if (addr6->s6_addr32[0] == htonl(0xffffffff) &&
|
||||
addr6->s6_addr32[1] == htonl(0xffffffff) &&
|
||||
addr6->s6_addr32[2] == htonl(0xffffffff) &&
|
||||
addr6->s6_addr32[3] == htonl(0xffffffff))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -3499,7 +3499,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
|
|||
drv_fw = &fw_info->fw_hdr;
|
||||
|
||||
/* Read the header of the firmware on the card */
|
||||
ret = -t4_read_flash(adap, FLASH_FW_START,
|
||||
ret = t4_read_flash(adap, FLASH_FW_START,
|
||||
sizeof(*card_fw) / sizeof(uint32_t),
|
||||
(uint32_t *)card_fw, 1);
|
||||
if (ret == 0) {
|
||||
|
@ -3528,7 +3528,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
|
|||
should_install_fs_fw(adap, card_fw_usable,
|
||||
be32_to_cpu(fs_fw->fw_ver),
|
||||
be32_to_cpu(card_fw->fw_ver))) {
|
||||
ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
|
||||
ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
|
||||
fw_size, 0);
|
||||
if (ret != 0) {
|
||||
dev_err(adap->pdev_dev,
|
||||
|
@ -3560,7 +3560,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
|
|||
FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
|
||||
FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
|
||||
FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
|
||||
ret = EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto bye;
|
||||
}
|
||||
|
||||
|
|
|
@ -151,18 +151,21 @@ static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring,
|
|||
{
|
||||
struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector;
|
||||
unsigned char *packet = skb->data;
|
||||
u32 len = skb_headlen(skb);
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < skb->len; i++)
|
||||
len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE);
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
if (packet[i] != (unsigned char)(i & 0xff))
|
||||
break;
|
||||
|
||||
/* The packet is correctly received */
|
||||
if (i == skb->len)
|
||||
if (i == HNS3_NIC_LB_TEST_PACKET_SIZE)
|
||||
tqp_vector->rx_group.total_packets++;
|
||||
else
|
||||
print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1,
|
||||
skb->data, skb->len, true);
|
||||
skb->data, len, true);
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
|
|
@ -446,11 +446,15 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
|
|||
i40e_get_netdev_stats_struct_tx(ring, stats);
|
||||
|
||||
if (i40e_enabled_xdp_vsi(vsi)) {
|
||||
ring++;
|
||||
ring = READ_ONCE(vsi->xdp_rings[i]);
|
||||
if (!ring)
|
||||
continue;
|
||||
i40e_get_netdev_stats_struct_tx(ring, stats);
|
||||
}
|
||||
|
||||
ring++;
|
||||
ring = READ_ONCE(vsi->rx_rings[i]);
|
||||
if (!ring)
|
||||
continue;
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&ring->syncp);
|
||||
packets = ring->stats.packets;
|
||||
|
@ -793,6 +797,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
|
|||
for (q = 0; q < vsi->num_queue_pairs; q++) {
|
||||
/* locate Tx ring */
|
||||
p = READ_ONCE(vsi->tx_rings[q]);
|
||||
if (!p)
|
||||
continue;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&p->syncp);
|
||||
|
@ -806,8 +812,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
|
|||
tx_linearize += p->tx_stats.tx_linearize;
|
||||
tx_force_wb += p->tx_stats.tx_force_wb;
|
||||
|
||||
/* Rx queue is part of the same block as Tx queue */
|
||||
p = &p[1];
|
||||
/* locate Rx ring */
|
||||
p = READ_ONCE(vsi->rx_rings[q]);
|
||||
if (!p)
|
||||
continue;
|
||||
|
||||
do {
|
||||
start = u64_stats_fetch_begin_irq(&p->syncp);
|
||||
packets = p->stats.packets;
|
||||
|
@ -10196,10 +10205,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
|
|||
if (vsi->tx_rings && vsi->tx_rings[0]) {
|
||||
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
|
||||
kfree_rcu(vsi->tx_rings[i], rcu);
|
||||
vsi->tx_rings[i] = NULL;
|
||||
vsi->rx_rings[i] = NULL;
|
||||
WRITE_ONCE(vsi->tx_rings[i], NULL);
|
||||
WRITE_ONCE(vsi->rx_rings[i], NULL);
|
||||
if (vsi->xdp_rings)
|
||||
vsi->xdp_rings[i] = NULL;
|
||||
WRITE_ONCE(vsi->xdp_rings[i], NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -10233,7 +10242,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
|
|||
if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
|
||||
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
|
||||
ring->itr_setting = pf->tx_itr_default;
|
||||
vsi->tx_rings[i] = ring++;
|
||||
WRITE_ONCE(vsi->tx_rings[i], ring++);
|
||||
|
||||
if (!i40e_enabled_xdp_vsi(vsi))
|
||||
goto setup_rx;
|
||||
|
@ -10251,7 +10260,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
|
|||
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
|
||||
set_ring_xdp(ring);
|
||||
ring->itr_setting = pf->tx_itr_default;
|
||||
vsi->xdp_rings[i] = ring++;
|
||||
WRITE_ONCE(vsi->xdp_rings[i], ring++);
|
||||
|
||||
setup_rx:
|
||||
ring->queue_index = i;
|
||||
|
@ -10264,7 +10273,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
|
|||
ring->size = 0;
|
||||
ring->dcb_tc = 0;
|
||||
ring->itr_setting = pf->rx_itr_default;
|
||||
vsi->rx_rings[i] = ring;
|
||||
WRITE_ONCE(vsi->rx_rings[i], ring);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -923,7 +923,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
|
|||
ring->queue_index = txr_idx;
|
||||
|
||||
/* assign ring to adapter */
|
||||
adapter->tx_ring[txr_idx] = ring;
|
||||
WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
|
||||
|
||||
/* update count and index */
|
||||
txr_count--;
|
||||
|
@ -950,7 +950,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
|
|||
set_ring_xdp(ring);
|
||||
|
||||
/* assign ring to adapter */
|
||||
adapter->xdp_ring[xdp_idx] = ring;
|
||||
WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
|
||||
|
||||
/* update count and index */
|
||||
xdp_count--;
|
||||
|
@ -993,7 +993,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
|
|||
ring->queue_index = rxr_idx;
|
||||
|
||||
/* assign ring to adapter */
|
||||
adapter->rx_ring[rxr_idx] = ring;
|
||||
WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
|
||||
|
||||
/* update count and index */
|
||||
rxr_count--;
|
||||
|
@ -1022,13 +1022,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
|
|||
|
||||
ixgbe_for_each_ring(ring, q_vector->tx) {
|
||||
if (ring_is_xdp(ring))
|
||||
adapter->xdp_ring[ring->queue_index] = NULL;
|
||||
WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
|
||||
else
|
||||
adapter->tx_ring[ring->queue_index] = NULL;
|
||||
WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
|
||||
}
|
||||
|
||||
ixgbe_for_each_ring(ring, q_vector->rx)
|
||||
adapter->rx_ring[ring->queue_index] = NULL;
|
||||
WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
|
||||
|
||||
adapter->q_vector[v_idx] = NULL;
|
||||
napi_hash_del(&q_vector->napi);
|
||||
|
|
|
@ -7005,7 +7005,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
|
|||
}
|
||||
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
|
||||
struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
|
||||
|
||||
if (!rx_ring)
|
||||
continue;
|
||||
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
|
||||
alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
|
||||
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
|
||||
|
@ -7026,15 +7029,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
|
|||
packets = 0;
|
||||
/* gather some stats to the adapter struct that are per queue */
|
||||
for (i = 0; i < adapter->num_tx_queues; i++) {
|
||||
struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
|
||||
struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
|
||||
|
||||
if (!tx_ring)
|
||||
continue;
|
||||
restart_queue += tx_ring->tx_stats.restart_queue;
|
||||
tx_busy += tx_ring->tx_stats.tx_busy;
|
||||
bytes += tx_ring->stats.bytes;
|
||||
packets += tx_ring->stats.packets;
|
||||
}
|
||||
for (i = 0; i < adapter->num_xdp_queues; i++) {
|
||||
struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
|
||||
struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
|
||||
|
||||
if (!xdp_ring)
|
||||
continue;
|
||||
restart_queue += xdp_ring->tx_stats.restart_queue;
|
||||
tx_busy += xdp_ring->tx_stats.tx_busy;
|
||||
bytes += xdp_ring->stats.bytes;
|
||||
|
|
|
@ -5982,7 +5982,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
|
|||
}
|
||||
|
||||
fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
|
||||
if (WARN_ON(!fib_work))
|
||||
if (!fib_work)
|
||||
return NOTIFY_BAD;
|
||||
|
||||
fib_work->mlxsw_sp = router->mlxsw_sp;
|
||||
|
|
|
@ -1301,11 +1301,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
|
||||
/* Init all registers */
|
||||
ret = smsc95xx_reset(dev);
|
||||
if (ret)
|
||||
goto free_pdata;
|
||||
|
||||
/* detect device revision as different features may be available */
|
||||
ret = smsc95xx_read_reg(dev, ID_REV, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto free_pdata;
|
||||
|
||||
val >>= 16;
|
||||
pdata->chip_id = val;
|
||||
pdata->mdix_ctrl = get_mdix_status(dev->net);
|
||||
|
@ -1331,6 +1334,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
|
||||
|
||||
return 0;
|
||||
|
||||
free_pdata:
|
||||
kfree(pdata);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
|
||||
|
|
|
@ -643,9 +643,9 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
|
|||
|
||||
static void ath9k_hif_usb_rx_cb(struct urb *urb)
|
||||
{
|
||||
struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
|
||||
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
|
||||
struct sk_buff *skb = rx_buf->skb;
|
||||
struct sk_buff *skb = (struct sk_buff *) urb->context;
|
||||
struct hif_device_usb *hif_dev =
|
||||
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
|
||||
int ret;
|
||||
|
||||
if (!skb)
|
||||
|
@ -685,15 +685,14 @@ static void ath9k_hif_usb_rx_cb(struct urb *urb)
|
|||
return;
|
||||
free:
|
||||
kfree_skb(skb);
|
||||
kfree(rx_buf);
|
||||
}
|
||||
|
||||
static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
|
||||
{
|
||||
struct rx_buf *rx_buf = (struct rx_buf *)urb->context;
|
||||
struct hif_device_usb *hif_dev = rx_buf->hif_dev;
|
||||
struct sk_buff *skb = rx_buf->skb;
|
||||
struct sk_buff *skb = (struct sk_buff *) urb->context;
|
||||
struct sk_buff *nskb;
|
||||
struct hif_device_usb *hif_dev =
|
||||
usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
|
||||
int ret;
|
||||
|
||||
if (!skb)
|
||||
|
@ -751,7 +750,6 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
|
|||
return;
|
||||
free:
|
||||
kfree_skb(skb);
|
||||
kfree(rx_buf);
|
||||
urb->context = NULL;
|
||||
}
|
||||
|
||||
|
@ -797,7 +795,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
|
|||
init_usb_anchor(&hif_dev->mgmt_submitted);
|
||||
|
||||
for (i = 0; i < MAX_TX_URB_NUM; i++) {
|
||||
tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL);
|
||||
tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
|
||||
if (!tx_buf)
|
||||
goto err;
|
||||
|
||||
|
@ -834,9 +832,8 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
|
|||
|
||||
static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
|
||||
{
|
||||
struct rx_buf *rx_buf = NULL;
|
||||
struct sk_buff *skb = NULL;
|
||||
struct urb *urb = NULL;
|
||||
struct sk_buff *skb = NULL;
|
||||
int i, ret;
|
||||
|
||||
init_usb_anchor(&hif_dev->rx_submitted);
|
||||
|
@ -844,12 +841,6 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
|
|||
|
||||
for (i = 0; i < MAX_RX_URB_NUM; i++) {
|
||||
|
||||
rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
|
||||
if (!rx_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto err_rxb;
|
||||
}
|
||||
|
||||
/* Allocate URB */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (urb == NULL) {
|
||||
|
@ -864,14 +855,11 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
|
|||
goto err_skb;
|
||||
}
|
||||
|
||||
rx_buf->hif_dev = hif_dev;
|
||||
rx_buf->skb = skb;
|
||||
|
||||
usb_fill_bulk_urb(urb, hif_dev->udev,
|
||||
usb_rcvbulkpipe(hif_dev->udev,
|
||||
USB_WLAN_RX_PIPE),
|
||||
skb->data, MAX_RX_BUF_SIZE,
|
||||
ath9k_hif_usb_rx_cb, rx_buf);
|
||||
ath9k_hif_usb_rx_cb, skb);
|
||||
|
||||
/* Anchor URB */
|
||||
usb_anchor_urb(urb, &hif_dev->rx_submitted);
|
||||
|
@ -897,8 +885,6 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
|
|||
err_skb:
|
||||
usb_free_urb(urb);
|
||||
err_urb:
|
||||
kfree(rx_buf);
|
||||
err_rxb:
|
||||
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -910,21 +896,14 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
|
|||
|
||||
static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
|
||||
{
|
||||
struct rx_buf *rx_buf = NULL;
|
||||
struct sk_buff *skb = NULL;
|
||||
struct urb *urb = NULL;
|
||||
struct sk_buff *skb = NULL;
|
||||
int i, ret;
|
||||
|
||||
init_usb_anchor(&hif_dev->reg_in_submitted);
|
||||
|
||||
for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
|
||||
|
||||
rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL);
|
||||
if (!rx_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto err_rxb;
|
||||
}
|
||||
|
||||
/* Allocate URB */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (urb == NULL) {
|
||||
|
@ -939,14 +918,11 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
|
|||
goto err_skb;
|
||||
}
|
||||
|
||||
rx_buf->hif_dev = hif_dev;
|
||||
rx_buf->skb = skb;
|
||||
|
||||
usb_fill_int_urb(urb, hif_dev->udev,
|
||||
usb_rcvintpipe(hif_dev->udev,
|
||||
USB_REG_IN_PIPE),
|
||||
skb->data, MAX_REG_IN_BUF_SIZE,
|
||||
ath9k_hif_usb_reg_in_cb, rx_buf, 1);
|
||||
ath9k_hif_usb_reg_in_cb, skb, 1);
|
||||
|
||||
/* Anchor URB */
|
||||
usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
|
||||
|
@ -972,8 +948,6 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
|
|||
err_skb:
|
||||
usb_free_urb(urb);
|
||||
err_urb:
|
||||
kfree(rx_buf);
|
||||
err_rxb:
|
||||
ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -86,11 +86,6 @@ struct tx_buf {
|
|||
struct list_head list;
|
||||
};
|
||||
|
||||
struct rx_buf {
|
||||
struct sk_buff *skb;
|
||||
struct hif_device_usb *hif_dev;
|
||||
};
|
||||
|
||||
#define HIF_USB_TX_STOP BIT(0)
|
||||
#define HIF_USB_TX_FLUSH BIT(1)
|
||||
|
||||
|
|
|
@ -447,7 +447,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
|
|||
* Spread I/O queues completion vectors according their queue index.
|
||||
* Admin queues can always go on completion vector 0.
|
||||
*/
|
||||
comp_vector = idx == 0 ? idx : idx - 1;
|
||||
comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
|
||||
|
||||
/* +1 for ib_stop_cq */
|
||||
queue->ib_cq = ib_alloc_cq(ibdev, queue,
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
//
|
||||
// Copyright 2013 Freescale Semiconductor, Inc.
|
||||
// Copyright 2020 NXP
|
||||
//
|
||||
// Freescale DSPI driver
|
||||
// This file contains a driver for the Freescale DSPI
|
||||
|
@ -43,6 +44,9 @@
|
|||
#define SPI_MCR_CLR_TXF (1 << 11)
|
||||
#define SPI_MCR_CLR_RXF (1 << 10)
|
||||
#define SPI_MCR_XSPI (1 << 3)
|
||||
#define SPI_MCR_DIS_TXF (1 << 13)
|
||||
#define SPI_MCR_DIS_RXF (1 << 12)
|
||||
#define SPI_MCR_HALT (1 << 0)
|
||||
|
||||
#define SPI_TCR 0x08
|
||||
#define SPI_TCR_GET_TCNT(x) (((x) & 0xffff0000) >> 16)
|
||||
|
@ -893,6 +897,8 @@ static int dspi_suspend(struct device *dev)
|
|||
struct spi_master *master = dev_get_drvdata(dev);
|
||||
struct fsl_dspi *dspi = spi_master_get_devdata(master);
|
||||
|
||||
if (dspi->irq)
|
||||
disable_irq(dspi->irq);
|
||||
spi_master_suspend(master);
|
||||
clk_disable_unprepare(dspi->clk);
|
||||
|
||||
|
@ -913,6 +919,8 @@ static int dspi_resume(struct device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
spi_master_resume(master);
|
||||
if (dspi->irq)
|
||||
enable_irq(dspi->irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1090,8 +1098,8 @@ static int dspi_probe(struct platform_device *pdev)
|
|||
goto out_clk_put;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
|
||||
pdev->name, dspi);
|
||||
ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
|
||||
IRQF_SHARED, pdev->name, dspi);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
|
||||
goto out_clk_put;
|
||||
|
@ -1101,7 +1109,7 @@ static int dspi_probe(struct platform_device *pdev)
|
|||
ret = dspi_request_dma(dspi, res->start);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "can't get dma channels\n");
|
||||
goto out_clk_put;
|
||||
goto out_free_irq;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1114,11 +1122,14 @@ static int dspi_probe(struct platform_device *pdev)
|
|||
ret = spi_register_master(master);
|
||||
if (ret != 0) {
|
||||
dev_err(&pdev->dev, "Problem registering DSPI master\n");
|
||||
goto out_clk_put;
|
||||
goto out_free_irq;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
out_free_irq:
|
||||
if (dspi->irq)
|
||||
free_irq(dspi->irq, dspi);
|
||||
out_clk_put:
|
||||
clk_disable_unprepare(dspi->clk);
|
||||
out_master_put:
|
||||
|
@ -1133,13 +1144,42 @@ static int dspi_remove(struct platform_device *pdev)
|
|||
struct fsl_dspi *dspi = spi_master_get_devdata(master);
|
||||
|
||||
/* Disconnect from the SPI framework */
|
||||
spi_unregister_controller(dspi->master);
|
||||
|
||||
/* Disable RX and TX */
|
||||
regmap_update_bits(dspi->regmap, SPI_MCR,
|
||||
SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
|
||||
SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF);
|
||||
|
||||
/* Stop Running */
|
||||
regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
|
||||
|
||||
dspi_release_dma(dspi);
|
||||
if (dspi->irq)
|
||||
free_irq(dspi->irq, dspi);
|
||||
clk_disable_unprepare(dspi->clk);
|
||||
spi_unregister_master(dspi->master);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dspi_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct spi_controller *ctlr = platform_get_drvdata(pdev);
|
||||
struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
|
||||
|
||||
/* Disable RX and TX */
|
||||
regmap_update_bits(dspi->regmap, SPI_MCR,
|
||||
SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
|
||||
SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF);
|
||||
|
||||
/* Stop Running */
|
||||
regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
|
||||
|
||||
dspi_release_dma(dspi);
|
||||
clk_disable_unprepare(dspi->clk);
|
||||
spi_unregister_controller(dspi->master);
|
||||
}
|
||||
|
||||
static struct platform_driver fsl_dspi_driver = {
|
||||
.driver.name = DRIVER_NAME,
|
||||
.driver.of_match_table = fsl_dspi_dt_ids,
|
||||
|
@ -1147,6 +1187,7 @@ static struct platform_driver fsl_dspi_driver = {
|
|||
.driver.pm = &dspi_pm,
|
||||
.probe = dspi_probe,
|
||||
.remove = dspi_remove,
|
||||
.shutdown = dspi_shutdown,
|
||||
};
|
||||
module_platform_driver(fsl_dspi_driver);
|
||||
|
||||
|
|
|
@ -607,15 +607,20 @@ static int spidev_open(struct inode *inode, struct file *filp)
|
|||
static int spidev_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct spidev_data *spidev;
|
||||
int dofree;
|
||||
|
||||
mutex_lock(&device_list_lock);
|
||||
spidev = filp->private_data;
|
||||
filp->private_data = NULL;
|
||||
|
||||
spin_lock_irq(&spidev->spi_lock);
|
||||
/* ... after we unbound from the underlying device? */
|
||||
dofree = (spidev->spi == NULL);
|
||||
spin_unlock_irq(&spidev->spi_lock);
|
||||
|
||||
/* last close? */
|
||||
spidev->users--;
|
||||
if (!spidev->users) {
|
||||
int dofree;
|
||||
|
||||
kfree(spidev->tx_buffer);
|
||||
spidev->tx_buffer = NULL;
|
||||
|
@ -623,18 +628,13 @@ static int spidev_release(struct inode *inode, struct file *filp)
|
|||
kfree(spidev->rx_buffer);
|
||||
spidev->rx_buffer = NULL;
|
||||
|
||||
spin_lock_irq(&spidev->spi_lock);
|
||||
if (spidev->spi)
|
||||
spidev->speed_hz = spidev->spi->max_speed_hz;
|
||||
|
||||
/* ... after we unbound from the underlying device? */
|
||||
dofree = (spidev->spi == NULL);
|
||||
spin_unlock_irq(&spidev->spi_lock);
|
||||
|
||||
if (dofree)
|
||||
kfree(spidev);
|
||||
else
|
||||
spidev->speed_hz = spidev->spi->max_speed_hz;
|
||||
}
|
||||
#ifdef CONFIG_SPI_SLAVE
|
||||
if (!dofree)
|
||||
spi_slave_abort(spidev->spi);
|
||||
#endif
|
||||
mutex_unlock(&device_list_lock);
|
||||
|
@ -782,13 +782,13 @@ static int spidev_remove(struct spi_device *spi)
|
|||
{
|
||||
struct spidev_data *spidev = spi_get_drvdata(spi);
|
||||
|
||||
/* prevent new opens */
|
||||
mutex_lock(&device_list_lock);
|
||||
/* make sure ops on existing fds can abort cleanly */
|
||||
spin_lock_irq(&spidev->spi_lock);
|
||||
spidev->spi = NULL;
|
||||
spin_unlock_irq(&spidev->spi_lock);
|
||||
|
||||
/* prevent new opens */
|
||||
mutex_lock(&device_list_lock);
|
||||
list_del(&spidev->device_entry);
|
||||
device_destroy(spidev_class, spidev->devt);
|
||||
clear_bit(MINOR(spidev->devt), minors);
|
||||
|
|
|
@ -204,8 +204,10 @@ static void dwc3_pci_resume_work(struct work_struct *work)
|
|||
int ret;
|
||||
|
||||
ret = pm_runtime_get_sync(&dwc3->dev);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
pm_runtime_put_sync_autosuspend(&dwc3->dev);
|
||||
return;
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(&dwc3->dev);
|
||||
pm_runtime_put_sync_autosuspend(&dwc3->dev);
|
||||
|
|
|
@ -4801,25 +4801,28 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
|
|||
static void check_buffer_tree_ref(struct extent_buffer *eb)
|
||||
{
|
||||
int refs;
|
||||
/* the ref bit is tricky. We have to make sure it is set
|
||||
* if we have the buffer dirty. Otherwise the
|
||||
* code to free a buffer can end up dropping a dirty
|
||||
* page
|
||||
/*
|
||||
* The TREE_REF bit is first set when the extent_buffer is added
|
||||
* to the radix tree. It is also reset, if unset, when a new reference
|
||||
* is created by find_extent_buffer.
|
||||
*
|
||||
* Once the ref bit is set, it won't go away while the
|
||||
* buffer is dirty or in writeback, and it also won't
|
||||
* go away while we have the reference count on the
|
||||
* eb bumped.
|
||||
* It is only cleared in two cases: freeing the last non-tree
|
||||
* reference to the extent_buffer when its STALE bit is set or
|
||||
* calling releasepage when the tree reference is the only reference.
|
||||
*
|
||||
* We can't just set the ref bit without bumping the
|
||||
* ref on the eb because free_extent_buffer might
|
||||
* see the ref bit and try to clear it. If this happens
|
||||
* free_extent_buffer might end up dropping our original
|
||||
* ref by mistake and freeing the page before we are able
|
||||
* to add one more ref.
|
||||
* In both cases, care is taken to ensure that the extent_buffer's
|
||||
* pages are not under io. However, releasepage can be concurrently
|
||||
* called with creating new references, which is prone to race
|
||||
* conditions between the calls to check_buffer_tree_ref in those
|
||||
* codepaths and clearing TREE_REF in try_release_extent_buffer.
|
||||
*
|
||||
* So bump the ref count first, then set the bit. If someone
|
||||
* beat us to it, drop the ref we added.
|
||||
* The actual lifetime of the extent_buffer in the radix tree is
|
||||
* adequately protected by the refcount, but the TREE_REF bit and
|
||||
* its corresponding reference are not. To protect against this
|
||||
* class of races, we call check_buffer_tree_ref from the codepaths
|
||||
* which trigger io after they set eb->io_pages. Note that once io is
|
||||
* initiated, TREE_REF can no longer be cleared, so that is the
|
||||
* moment at which any such race is best fixed.
|
||||
*/
|
||||
refs = atomic_read(&eb->refs);
|
||||
if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
|
||||
|
@ -5273,6 +5276,11 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
|
|||
clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
|
||||
eb->read_mirror = 0;
|
||||
atomic_set(&eb->io_pages, num_reads);
|
||||
/*
|
||||
* It is possible for releasepage to clear the TREE_REF bit before we
|
||||
* set io_pages. See check_buffer_tree_ref for a more detailed comment.
|
||||
*/
|
||||
check_buffer_tree_ref(eb);
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
page = eb->pages[i];
|
||||
|
||||
|
|
|
@ -2225,6 +2225,15 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
|
|||
if (rc == 0) {
|
||||
cifsInode->server_eof = attrs->ia_size;
|
||||
cifs_setsize(inode, attrs->ia_size);
|
||||
|
||||
/*
|
||||
* The man page of truncate says if the size changed,
|
||||
* then the st_ctime and st_mtime fields for the file
|
||||
* are updated.
|
||||
*/
|
||||
attrs->ia_ctime = attrs->ia_mtime = current_time(inode);
|
||||
attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME;
|
||||
|
||||
cifs_truncate_page(inode->i_mapping, inode->i_size);
|
||||
}
|
||||
|
||||
|
|
|
@ -812,12 +812,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
|
|||
void bpf_jit_compile(struct bpf_prog *prog);
|
||||
bool bpf_helper_changes_pkt_data(void *func);
|
||||
|
||||
static inline bool bpf_dump_raw_ok(void)
|
||||
static inline bool bpf_dump_raw_ok(const struct cred *cred)
|
||||
{
|
||||
/* Reconstruction of call-sites is dependent on kallsyms,
|
||||
* thus make dump the same restriction.
|
||||
*/
|
||||
return kallsyms_show_value() == 1;
|
||||
return kallsyms_show_value(cred);
|
||||
}
|
||||
|
||||
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
|
||||
2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
|
||||
|
||||
struct cred;
|
||||
struct module;
|
||||
|
||||
static inline int is_kernel_inittext(unsigned long addr)
|
||||
|
@ -98,7 +99,7 @@ int lookup_symbol_name(unsigned long addr, char *symname);
|
|||
int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
|
||||
|
||||
/* How and when do we show kallsyms values? */
|
||||
extern int kallsyms_show_value(void);
|
||||
extern bool kallsyms_show_value(const struct cred *cred);
|
||||
|
||||
#else /* !CONFIG_KALLSYMS */
|
||||
|
||||
|
@ -158,7 +159,7 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
|
|||
return -ERANGE;
|
||||
}
|
||||
|
||||
static inline int kallsyms_show_value(void)
|
||||
static inline bool kallsyms_show_value(const struct cred *cred)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -57,6 +57,7 @@ struct snd_compr_runtime {
|
|||
* @direction: stream direction, playback/recording
|
||||
* @metadata_set: metadata set flag, true when set
|
||||
* @next_track: has userspace signal next track transition, true when set
|
||||
* @partial_drain: undergoing partial_drain for stream, true when set
|
||||
* @private_data: pointer to DSP private data
|
||||
*/
|
||||
struct snd_compr_stream {
|
||||
|
@ -68,6 +69,7 @@ struct snd_compr_stream {
|
|||
enum snd_compr_direction direction;
|
||||
bool metadata_set;
|
||||
bool next_track;
|
||||
bool partial_drain;
|
||||
void *private_data;
|
||||
struct snd_soc_pcm_runtime *be;
|
||||
};
|
||||
|
@ -176,7 +178,13 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
|
|||
if (snd_BUG_ON(!stream))
|
||||
return;
|
||||
|
||||
/* for partial_drain case we are back to running state on success */
|
||||
if (stream->partial_drain) {
|
||||
stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
|
||||
stream->partial_drain = false; /* clear this flag as well */
|
||||
} else {
|
||||
stream->runtime->state = SNDRV_PCM_STATE_SETUP;
|
||||
}
|
||||
|
||||
wake_up(&stream->runtime->sleep);
|
||||
}
|
||||
|
|
|
@ -1903,7 +1903,8 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
|
||||
static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
|
||||
const struct cred *f_cred)
|
||||
{
|
||||
const struct bpf_map *map;
|
||||
struct bpf_insn *insns;
|
||||
|
@ -1925,7 +1926,7 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
|
|||
insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
|
||||
if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
|
||||
insns[i].code = BPF_JMP | BPF_CALL;
|
||||
if (!bpf_dump_raw_ok())
|
||||
if (!bpf_dump_raw_ok(f_cred))
|
||||
insns[i].imm = 0;
|
||||
continue;
|
||||
}
|
||||
|
@ -1942,7 +1943,7 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (!bpf_dump_raw_ok() &&
|
||||
if (!bpf_dump_raw_ok(f_cred) &&
|
||||
imm == (unsigned long)prog->aux) {
|
||||
insns[i].imm = 0;
|
||||
insns[i + 1].imm = 0;
|
||||
|
@ -1953,7 +1954,8 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
|
|||
return insns;
|
||||
}
|
||||
|
||||
static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
||||
static int bpf_prog_get_info_by_fd(struct file *file,
|
||||
struct bpf_prog *prog,
|
||||
const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
|
@ -2010,11 +2012,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
|||
struct bpf_insn *insns_sanitized;
|
||||
bool fault;
|
||||
|
||||
if (prog->blinded && !bpf_dump_raw_ok()) {
|
||||
if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
|
||||
info.xlated_prog_insns = 0;
|
||||
goto done;
|
||||
}
|
||||
insns_sanitized = bpf_insn_prepare_dump(prog);
|
||||
insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
|
||||
if (!insns_sanitized)
|
||||
return -ENOMEM;
|
||||
uinsns = u64_to_user_ptr(info.xlated_prog_insns);
|
||||
|
@ -2048,7 +2050,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
|||
}
|
||||
|
||||
if (info.jited_prog_len && ulen) {
|
||||
if (bpf_dump_raw_ok()) {
|
||||
if (bpf_dump_raw_ok(file->f_cred)) {
|
||||
uinsns = u64_to_user_ptr(info.jited_prog_insns);
|
||||
ulen = min_t(u32, info.jited_prog_len, ulen);
|
||||
|
||||
|
@ -2083,7 +2085,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
|||
ulen = info.nr_jited_ksyms;
|
||||
info.nr_jited_ksyms = prog->aux->func_cnt;
|
||||
if (info.nr_jited_ksyms && ulen) {
|
||||
if (bpf_dump_raw_ok()) {
|
||||
if (bpf_dump_raw_ok(file->f_cred)) {
|
||||
u64 __user *user_ksyms;
|
||||
ulong ksym_addr;
|
||||
u32 i;
|
||||
|
@ -2107,7 +2109,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
|||
ulen = info.nr_jited_func_lens;
|
||||
info.nr_jited_func_lens = prog->aux->func_cnt;
|
||||
if (info.nr_jited_func_lens && ulen) {
|
||||
if (bpf_dump_raw_ok()) {
|
||||
if (bpf_dump_raw_ok(file->f_cred)) {
|
||||
u32 __user *user_lens;
|
||||
u32 func_len, i;
|
||||
|
||||
|
@ -2132,7 +2134,8 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_map_get_info_by_fd(struct bpf_map *map,
|
||||
static int bpf_map_get_info_by_fd(struct file *file,
|
||||
struct bpf_map *map,
|
||||
const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
|
@ -2174,7 +2177,8 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_btf_get_info_by_fd(struct btf *btf,
|
||||
static int bpf_btf_get_info_by_fd(struct file *file,
|
||||
struct btf *btf,
|
||||
const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
|
@ -2206,13 +2210,13 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
|
|||
return -EBADFD;
|
||||
|
||||
if (f.file->f_op == &bpf_prog_fops)
|
||||
err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
|
||||
err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
|
||||
uattr);
|
||||
else if (f.file->f_op == &bpf_map_fops)
|
||||
err = bpf_map_get_info_by_fd(f.file->private_data, attr,
|
||||
err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
|
||||
uattr);
|
||||
else if (f.file->f_op == &btf_fops)
|
||||
err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
|
||||
err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
|
||||
else
|
||||
err = -EINVAL;
|
||||
|
||||
|
|
|
@ -687,19 +687,20 @@ static inline int kallsyms_for_perf(void)
|
|||
* Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
|
||||
* block even that).
|
||||
*/
|
||||
int kallsyms_show_value(void)
|
||||
bool kallsyms_show_value(const struct cred *cred)
|
||||
{
|
||||
switch (kptr_restrict) {
|
||||
case 0:
|
||||
if (kallsyms_for_perf())
|
||||
return 1;
|
||||
return true;
|
||||
/* fallthrough */
|
||||
case 1:
|
||||
if (has_capability_noaudit(current, CAP_SYSLOG))
|
||||
return 1;
|
||||
if (security_capable(cred, &init_user_ns, CAP_SYSLOG,
|
||||
CAP_OPT_NOAUDIT) == 0)
|
||||
return true;
|
||||
/* fallthrough */
|
||||
default:
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -716,7 +717,11 @@ static int kallsyms_open(struct inode *inode, struct file *file)
|
|||
return -ENOMEM;
|
||||
reset_iter(iter, 0);
|
||||
|
||||
iter->show_value = kallsyms_show_value();
|
||||
/*
|
||||
* Instead of checking this on every s_show() call, cache
|
||||
* the result here at open time.
|
||||
*/
|
||||
iter->show_value = kallsyms_show_value(file->f_cred);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2334,7 +2334,7 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
|
|||
else
|
||||
kprobe_type = "k";
|
||||
|
||||
if (!kallsyms_show_value())
|
||||
if (!kallsyms_show_value(pi->file->f_cred))
|
||||
addr = NULL;
|
||||
|
||||
if (sym)
|
||||
|
@ -2435,7 +2435,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
|
|||
* If /proc/kallsyms is not showing kernel address, we won't
|
||||
* show them here either.
|
||||
*/
|
||||
if (!kallsyms_show_value())
|
||||
if (!kallsyms_show_value(m->file->f_cred))
|
||||
seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
|
||||
(void *)ent->start_addr);
|
||||
else
|
||||
|
|
|
@ -1451,8 +1451,7 @@ static inline bool sect_empty(const Elf_Shdr *sect)
|
|||
}
|
||||
|
||||
struct module_sect_attr {
|
||||
struct module_attribute mattr;
|
||||
char *name;
|
||||
struct bin_attribute battr;
|
||||
unsigned long address;
|
||||
};
|
||||
|
||||
|
@ -1462,13 +1461,18 @@ struct module_sect_attrs {
|
|||
struct module_sect_attr attrs[0];
|
||||
};
|
||||
|
||||
static ssize_t module_sect_show(struct module_attribute *mattr,
|
||||
struct module_kobject *mk, char *buf)
|
||||
static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
|
||||
struct bin_attribute *battr,
|
||||
char *buf, loff_t pos, size_t count)
|
||||
{
|
||||
struct module_sect_attr *sattr =
|
||||
container_of(mattr, struct module_sect_attr, mattr);
|
||||
return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
|
||||
(void *)sattr->address : NULL);
|
||||
container_of(battr, struct module_sect_attr, battr);
|
||||
|
||||
if (pos != 0)
|
||||
return -EINVAL;
|
||||
|
||||
return sprintf(buf, "0x%px\n",
|
||||
kallsyms_show_value(file->f_cred) ? (void *)sattr->address : NULL);
|
||||
}
|
||||
|
||||
static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
|
||||
|
@ -1476,7 +1480,7 @@ static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
|
|||
unsigned int section;
|
||||
|
||||
for (section = 0; section < sect_attrs->nsections; section++)
|
||||
kfree(sect_attrs->attrs[section].name);
|
||||
kfree(sect_attrs->attrs[section].battr.attr.name);
|
||||
kfree(sect_attrs);
|
||||
}
|
||||
|
||||
|
@ -1485,43 +1489,41 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
|
|||
unsigned int nloaded = 0, i, size[2];
|
||||
struct module_sect_attrs *sect_attrs;
|
||||
struct module_sect_attr *sattr;
|
||||
struct attribute **gattr;
|
||||
struct bin_attribute **gattr;
|
||||
|
||||
/* Count loaded sections and allocate structures */
|
||||
for (i = 0; i < info->hdr->e_shnum; i++)
|
||||
if (!sect_empty(&info->sechdrs[i]))
|
||||
nloaded++;
|
||||
size[0] = ALIGN(sizeof(*sect_attrs)
|
||||
+ nloaded * sizeof(sect_attrs->attrs[0]),
|
||||
sizeof(sect_attrs->grp.attrs[0]));
|
||||
size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
|
||||
size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded),
|
||||
sizeof(sect_attrs->grp.bin_attrs[0]));
|
||||
size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
|
||||
sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
|
||||
if (sect_attrs == NULL)
|
||||
return;
|
||||
|
||||
/* Setup section attributes. */
|
||||
sect_attrs->grp.name = "sections";
|
||||
sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
|
||||
sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0];
|
||||
|
||||
sect_attrs->nsections = 0;
|
||||
sattr = §_attrs->attrs[0];
|
||||
gattr = §_attrs->grp.attrs[0];
|
||||
gattr = §_attrs->grp.bin_attrs[0];
|
||||
for (i = 0; i < info->hdr->e_shnum; i++) {
|
||||
Elf_Shdr *sec = &info->sechdrs[i];
|
||||
if (sect_empty(sec))
|
||||
continue;
|
||||
sysfs_bin_attr_init(&sattr->battr);
|
||||
sattr->address = sec->sh_addr;
|
||||
sattr->name = kstrdup(info->secstrings + sec->sh_name,
|
||||
GFP_KERNEL);
|
||||
if (sattr->name == NULL)
|
||||
sattr->battr.attr.name =
|
||||
kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
|
||||
if (sattr->battr.attr.name == NULL)
|
||||
goto out;
|
||||
sect_attrs->nsections++;
|
||||
sysfs_attr_init(&sattr->mattr.attr);
|
||||
sattr->mattr.show = module_sect_show;
|
||||
sattr->mattr.store = NULL;
|
||||
sattr->mattr.attr.name = sattr->name;
|
||||
sattr->mattr.attr.mode = S_IRUSR;
|
||||
*(gattr++) = &(sattr++)->mattr.attr;
|
||||
sattr->battr.read = module_sect_read;
|
||||
sattr->battr.size = 3 /* "0x", "\n" */ + (BITS_PER_LONG / 4);
|
||||
sattr->battr.attr.mode = 0400;
|
||||
*(gattr++) = &(sattr++)->battr;
|
||||
}
|
||||
*gattr = NULL;
|
||||
|
||||
|
@ -1611,7 +1613,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
|
|||
continue;
|
||||
if (info->sechdrs[i].sh_type == SHT_NOTE) {
|
||||
sysfs_bin_attr_init(nattr);
|
||||
nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
|
||||
nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name;
|
||||
nattr->attr.mode = S_IRUGO;
|
||||
nattr->size = info->sechdrs[i].sh_size;
|
||||
nattr->private = (void *) info->sechdrs[i].sh_addr;
|
||||
|
@ -4287,7 +4289,7 @@ static int modules_open(struct inode *inode, struct file *file)
|
|||
|
||||
if (!err) {
|
||||
struct seq_file *m = file->private_data;
|
||||
m->private = kallsyms_show_value() ? NULL : (void *)8ul;
|
||||
m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul;
|
||||
}
|
||||
|
||||
return err;
|
||||
|
|
|
@ -270,7 +270,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
|
|||
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
|
||||
if (write && !ret) {
|
||||
if (jit_enable < 2 ||
|
||||
(jit_enable == 2 && bpf_dump_raw_ok())) {
|
||||
(jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) {
|
||||
*(int *)table->data = jit_enable;
|
||||
if (jit_enable == 2)
|
||||
pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
|
||||
|
|
|
@ -266,7 +266,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
|
|||
unsigned int ver;
|
||||
size_t hdrlen;
|
||||
|
||||
if (len & 3)
|
||||
if (len == 0 || len & 3)
|
||||
return -EINVAL;
|
||||
|
||||
skb = netdev_alloc_skb(NULL, len);
|
||||
|
@ -280,6 +280,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
|
|||
|
||||
switch (ver) {
|
||||
case QRTR_PROTO_VER_1:
|
||||
if (len < sizeof(*v1))
|
||||
goto err;
|
||||
v1 = data;
|
||||
hdrlen = sizeof(*v1);
|
||||
|
||||
|
@ -293,6 +295,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
|
|||
size = le32_to_cpu(v1->size);
|
||||
break;
|
||||
case QRTR_PROTO_VER_2:
|
||||
if (len < sizeof(*v2))
|
||||
goto err;
|
||||
v2 = data;
|
||||
hdrlen = sizeof(*v2) + v2->optlen;
|
||||
|
||||
|
|
|
@ -723,6 +723,9 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
|
|||
|
||||
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
|
||||
if (!retval) {
|
||||
/* clear flags and stop any drain wait */
|
||||
stream->partial_drain = false;
|
||||
stream->metadata_set = false;
|
||||
snd_compr_drain_notify(stream);
|
||||
stream->runtime->total_bytes_available = 0;
|
||||
stream->runtime->total_bytes_transferred = 0;
|
||||
|
@ -880,6 +883,7 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
|
|||
if (stream->next_track == false)
|
||||
return -EPERM;
|
||||
|
||||
stream->partial_drain = true;
|
||||
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
|
||||
if (retval) {
|
||||
pr_debug("Partial drain returned failure\n");
|
||||
|
|
|
@ -105,6 +105,8 @@ int snd_opl3_ioctl(struct snd_hwdep * hw, struct file *file,
|
|||
{
|
||||
struct snd_dm_fm_info info;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
info.fm_mode = opl3->fm_mode;
|
||||
info.rhythm = opl3->rhythm;
|
||||
if (copy_to_user(argp, &info, sizeof(struct snd_dm_fm_info)))
|
||||
|
|
|
@ -76,6 +76,12 @@ static int compare_input_type(const void *ap, const void *bp)
|
|||
if (a->type != b->type)
|
||||
return (int)(a->type - b->type);
|
||||
|
||||
/* If has both hs_mic and hp_mic, pick the hs_mic ahead of hp_mic. */
|
||||
if (a->is_headset_mic && b->is_headphone_mic)
|
||||
return -1; /* don't swap */
|
||||
else if (a->is_headphone_mic && b->is_headset_mic)
|
||||
return 1; /* swap */
|
||||
|
||||
/* In case one has boost and the other one has not,
|
||||
pick the one with boost first. */
|
||||
return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
|
||||
|
|
|
@ -3460,4 +3460,56 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
|
|||
}
|
||||
},
|
||||
|
||||
/*
|
||||
* MacroSilicon MS2109 based HDMI capture cards
|
||||
*
|
||||
* These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch.
|
||||
* They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if
|
||||
* they pretend to be 96kHz mono as a workaround for stereo being broken
|
||||
* by that...
|
||||
*
|
||||
* They also have swapped L-R channels, but that's for userspace to deal
|
||||
* with.
|
||||
*/
|
||||
{
|
||||
USB_DEVICE(0x534d, 0x2109),
|
||||
.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
||||
.vendor_name = "MacroSilicon",
|
||||
.product_name = "MS2109",
|
||||
.ifnum = QUIRK_ANY_INTERFACE,
|
||||
.type = QUIRK_COMPOSITE,
|
||||
.data = &(const struct snd_usb_audio_quirk[]) {
|
||||
{
|
||||
.ifnum = 2,
|
||||
.type = QUIRK_AUDIO_ALIGN_TRANSFER,
|
||||
},
|
||||
{
|
||||
.ifnum = 2,
|
||||
.type = QUIRK_AUDIO_STANDARD_MIXER,
|
||||
},
|
||||
{
|
||||
.ifnum = 3,
|
||||
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
|
||||
.data = &(const struct audioformat) {
|
||||
.formats = SNDRV_PCM_FMTBIT_S16_LE,
|
||||
.channels = 2,
|
||||
.iface = 3,
|
||||
.altsetting = 1,
|
||||
.altset_idx = 1,
|
||||
.attributes = 0,
|
||||
.endpoint = 0x82,
|
||||
.ep_attr = USB_ENDPOINT_XFER_ISOC |
|
||||
USB_ENDPOINT_SYNC_ASYNC,
|
||||
.rates = SNDRV_PCM_RATE_CONTINUOUS,
|
||||
.rate_min = 48000,
|
||||
.rate_max = 48000,
|
||||
}
|
||||
},
|
||||
{
|
||||
.ifnum = -1
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
#undef USB_DEVICE_VENDOR_SPEC
|
||||
|
|
Loading…
Reference in a new issue