Merge android-4.19.60 (bafa20f
) into msm-4.19
* refs/heads/tmp-bafa20f: Linux 4.19.60 x86/entry/32: Fix ENDPROC of common_spurious drm/udl: move to embedding drm device inside udl device. drm/udl: Replace drm_dev_unref with drm_dev_put drm/udl: introduce a macro to convert dev to udl. regmap-irq: do not write mask register if mask_base is zero crypto/NX: Set receive window credits to max number of CRBs in RxFIFO crypto: talitos - fix hash on SEC1. crypto: talitos - move struct talitos_edesc into talitos.h s390/qdio: don't touch the dsci in tiqdio_add_input_queues() s390/qdio: (re-)initialize tiqdio list entries s390: fix stfle zero padding ARC: hide unused function unw_hdr_alloc x86/irq: Seperate unused system vectors from spurious entry again x86/irq: Handle spurious interrupt after shutdown gracefully x86/ioapic: Implement irq_get_irqchip_state() callback genirq: Add optional hardware synchronization for shutdown genirq: Fix misleading synchronize_irq() documentation genirq: Delay deactivation in free_irq() linux/kernel.h: fix overflow for DIV_ROUND_UP_ULL pinctrl: mediatek: Update cur_mask in mask/mask ops cpu/hotplug: Fix out-of-bounds read when setting fail state pinctrl: mediatek: Ignore interrupts that are wake only during resume HID: multitouch: Add pointstick support for ALPS Touchpad HID: chicony: add another quirk for PixArt mouse x86/boot/64: Add missing fixup_pointer() for next_early_pgt access x86/boot/64: Fix crash if kernel image crosses page table boundary dm verity: use message limit for data block corruption message dm table: don't copy from a NULL pointer in realloc_argv() pinctrl: mcp23s08: Fix add_data and irqchip_add_nested call order ARM: dts: imx6ul: fix PWM[1-4] interrupts sis900: fix TX completion ppp: mppe: Add softdep to arc4 be2net: fix link failure after ethtool offline test x86/apic: Fix integer overflow on 10 bit left shift of cpu_khz afs: Fix uninitialised spinlock afs_volume::cb_break_lock ARM: omap2: remove incorrect __init annotation ARM: dts: gemini Fix up DNS-313 compatible string perf/core: Fix perf_sample_regs_user() mm check efi/bgrt: Drop BGRT status field reserved bits check clk: ti: clkctrl: Fix returning uninitialized data irqchip/gic-v3-its: Fix command queue pointer comparison bug firmware: improve LSM/IMA security behaviour drivers: base: cacheinfo: Ensure cpu hotplug work is done before Intel RDT nilfs2: do not use unexported cpu_to_le32()/le32_to_cpu() in uapi header Input: synaptics - enable SMBUS on T480 thinkpad trackpad e1000e: start network tx queue only when link is up Revert "e1000e: fix cyclic resets at link up with active tx" ANDROID: overlayfs: override_creds=off option bypass creator_cred (part deux) ANDROID: f2fs: add android fsync tracepoint ANDROID: f2fs: fix wrong android tracepoint Conflicts: include/linux/cpuhotplug.h Change-Id: I8bdec8958ec0a3212ef8a8872bf7b079b4781b3a Signed-off-by: Ivaylo Georgiev <irgeorgiev@codeaurora.org>
This commit is contained in:
commit
77606b136a
61 changed files with 594 additions and 269 deletions
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 19
|
PATCHLEVEL = 19
|
||||||
SUBLEVEL = 59
|
SUBLEVEL = 60
|
||||||
EXTRAVERSION =
|
EXTRAVERSION =
|
||||||
NAME = "People's Front"
|
NAME = "People's Front"
|
||||||
|
|
||||||
|
|
|
@ -185,11 +185,6 @@ static void *__init unw_hdr_alloc_early(unsigned long sz)
|
||||||
MAX_DMA_ADDRESS);
|
MAX_DMA_ADDRESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *unw_hdr_alloc(unsigned long sz)
|
|
||||||
{
|
|
||||||
return kmalloc(sz, GFP_KERNEL);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void init_unwind_table(struct unwind_table *table, const char *name,
|
static void init_unwind_table(struct unwind_table *table, const char *name,
|
||||||
const void *core_start, unsigned long core_size,
|
const void *core_start, unsigned long core_size,
|
||||||
const void *init_start, unsigned long init_size,
|
const void *init_start, unsigned long init_size,
|
||||||
|
@ -370,6 +365,10 @@ static void init_unwind_hdr(struct unwind_table *table,
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MODULES
|
#ifdef CONFIG_MODULES
|
||||||
|
static void *unw_hdr_alloc(unsigned long sz)
|
||||||
|
{
|
||||||
|
return kmalloc(sz, GFP_KERNEL);
|
||||||
|
}
|
||||||
|
|
||||||
static struct unwind_table *last_table;
|
static struct unwind_table *last_table;
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
model = "D-Link DNS-313 1-Bay Network Storage Enclosure";
|
model = "D-Link DNS-313 1-Bay Network Storage Enclosure";
|
||||||
compatible = "dlink,dir-313", "cortina,gemini";
|
compatible = "dlink,dns-313", "cortina,gemini";
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <1>;
|
#size-cells = <1>;
|
||||||
|
|
||||||
|
|
|
@ -359,7 +359,7 @@
|
||||||
pwm1: pwm@2080000 {
|
pwm1: pwm@2080000 {
|
||||||
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
|
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
|
||||||
reg = <0x02080000 0x4000>;
|
reg = <0x02080000 0x4000>;
|
||||||
interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&clks IMX6UL_CLK_PWM1>,
|
clocks = <&clks IMX6UL_CLK_PWM1>,
|
||||||
<&clks IMX6UL_CLK_PWM1>;
|
<&clks IMX6UL_CLK_PWM1>;
|
||||||
clock-names = "ipg", "per";
|
clock-names = "ipg", "per";
|
||||||
|
@ -370,7 +370,7 @@
|
||||||
pwm2: pwm@2084000 {
|
pwm2: pwm@2084000 {
|
||||||
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
|
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
|
||||||
reg = <0x02084000 0x4000>;
|
reg = <0x02084000 0x4000>;
|
||||||
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&clks IMX6UL_CLK_PWM2>,
|
clocks = <&clks IMX6UL_CLK_PWM2>,
|
||||||
<&clks IMX6UL_CLK_PWM2>;
|
<&clks IMX6UL_CLK_PWM2>;
|
||||||
clock-names = "ipg", "per";
|
clock-names = "ipg", "per";
|
||||||
|
@ -381,7 +381,7 @@
|
||||||
pwm3: pwm@2088000 {
|
pwm3: pwm@2088000 {
|
||||||
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
|
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
|
||||||
reg = <0x02088000 0x4000>;
|
reg = <0x02088000 0x4000>;
|
||||||
interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&clks IMX6UL_CLK_PWM3>,
|
clocks = <&clks IMX6UL_CLK_PWM3>,
|
||||||
<&clks IMX6UL_CLK_PWM3>;
|
<&clks IMX6UL_CLK_PWM3>;
|
||||||
clock-names = "ipg", "per";
|
clock-names = "ipg", "per";
|
||||||
|
@ -392,7 +392,7 @@
|
||||||
pwm4: pwm@208c000 {
|
pwm4: pwm@208c000 {
|
||||||
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
|
compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm";
|
||||||
reg = <0x0208c000 0x4000>;
|
reg = <0x0208c000 0x4000>;
|
||||||
interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&clks IMX6UL_CLK_PWM4>,
|
clocks = <&clks IMX6UL_CLK_PWM4>,
|
||||||
<&clks IMX6UL_CLK_PWM4>;
|
<&clks IMX6UL_CLK_PWM4>;
|
||||||
clock-names = "ipg", "per";
|
clock-names = "ipg", "per";
|
||||||
|
|
|
@ -433,7 +433,7 @@ static void omap3_prm_reconfigure_io_chain(void)
|
||||||
* registers, and omap3xxx_prm_reconfigure_io_chain() must be called.
|
* registers, and omap3xxx_prm_reconfigure_io_chain() must be called.
|
||||||
* No return value.
|
* No return value.
|
||||||
*/
|
*/
|
||||||
static void __init omap3xxx_prm_enable_io_wakeup(void)
|
static void omap3xxx_prm_enable_io_wakeup(void)
|
||||||
{
|
{
|
||||||
if (prm_features & PRM_HAS_IO_WAKEUP)
|
if (prm_features & PRM_HAS_IO_WAKEUP)
|
||||||
omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
|
omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
|
||||||
|
|
|
@ -59,6 +59,18 @@ static inline int test_facility(unsigned long nr)
|
||||||
return __test_facility(nr, &S390_lowcore.stfle_fac_list);
|
return __test_facility(nr, &S390_lowcore.stfle_fac_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size)
|
||||||
|
{
|
||||||
|
register unsigned long reg0 asm("0") = size - 1;
|
||||||
|
|
||||||
|
asm volatile(
|
||||||
|
".insn s,0xb2b00000,0(%1)" /* stfle */
|
||||||
|
: "+d" (reg0)
|
||||||
|
: "a" (stfle_fac_list)
|
||||||
|
: "memory", "cc");
|
||||||
|
return reg0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* stfle - Store facility list extended
|
* stfle - Store facility list extended
|
||||||
* @stfle_fac_list: array where facility list can be stored
|
* @stfle_fac_list: array where facility list can be stored
|
||||||
|
@ -76,13 +88,8 @@ static inline void stfle(u64 *stfle_fac_list, int size)
|
||||||
memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
|
memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
|
||||||
if (S390_lowcore.stfl_fac_list & 0x01000000) {
|
if (S390_lowcore.stfl_fac_list & 0x01000000) {
|
||||||
/* More facility bits available with stfle */
|
/* More facility bits available with stfle */
|
||||||
register unsigned long reg0 asm("0") = size - 1;
|
nr = __stfle_asm(stfle_fac_list, size);
|
||||||
|
nr = min_t(unsigned long, (nr + 1) * 8, size * 8);
|
||||||
asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
|
|
||||||
: "+d" (reg0)
|
|
||||||
: "a" (stfle_fac_list)
|
|
||||||
: "memory", "cc");
|
|
||||||
nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
|
|
||||||
}
|
}
|
||||||
memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
|
memset((char *) stfle_fac_list + nr, 0, size * 8 - nr);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
|
@ -1098,6 +1098,30 @@ ENTRY(irq_entries_start)
|
||||||
.endr
|
.endr
|
||||||
END(irq_entries_start)
|
END(irq_entries_start)
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
|
.align 8
|
||||||
|
ENTRY(spurious_entries_start)
|
||||||
|
vector=FIRST_SYSTEM_VECTOR
|
||||||
|
.rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
|
||||||
|
pushl $(~vector+0x80) /* Note: always in signed byte range */
|
||||||
|
vector=vector+1
|
||||||
|
jmp common_spurious
|
||||||
|
.align 8
|
||||||
|
.endr
|
||||||
|
END(spurious_entries_start)
|
||||||
|
|
||||||
|
common_spurious:
|
||||||
|
ASM_CLAC
|
||||||
|
addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
|
||||||
|
SAVE_ALL switch_stacks=1
|
||||||
|
ENCODE_FRAME_POINTER
|
||||||
|
TRACE_IRQS_OFF
|
||||||
|
movl %esp, %eax
|
||||||
|
call smp_spurious_interrupt
|
||||||
|
jmp ret_from_intr
|
||||||
|
ENDPROC(common_spurious)
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* the CPU automatically disables interrupts when executing an IRQ vector,
|
* the CPU automatically disables interrupts when executing an IRQ vector,
|
||||||
* so IRQ-flags tracing has to follow that:
|
* so IRQ-flags tracing has to follow that:
|
||||||
|
|
|
@ -438,6 +438,18 @@ ENTRY(irq_entries_start)
|
||||||
.endr
|
.endr
|
||||||
END(irq_entries_start)
|
END(irq_entries_start)
|
||||||
|
|
||||||
|
.align 8
|
||||||
|
ENTRY(spurious_entries_start)
|
||||||
|
vector=FIRST_SYSTEM_VECTOR
|
||||||
|
.rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
|
||||||
|
UNWIND_HINT_IRET_REGS
|
||||||
|
pushq $(~vector+0x80) /* Note: always in signed byte range */
|
||||||
|
jmp common_spurious
|
||||||
|
.align 8
|
||||||
|
vector=vector+1
|
||||||
|
.endr
|
||||||
|
END(spurious_entries_start)
|
||||||
|
|
||||||
.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
|
.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
|
||||||
#ifdef CONFIG_DEBUG_ENTRY
|
#ifdef CONFIG_DEBUG_ENTRY
|
||||||
pushq %rax
|
pushq %rax
|
||||||
|
@ -634,10 +646,20 @@ _ASM_NOKPROBE(interrupt_entry)
|
||||||
|
|
||||||
/* Interrupt entry/exit. */
|
/* Interrupt entry/exit. */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The interrupt stubs push (~vector+0x80) onto the stack and
|
* The interrupt stubs push (~vector+0x80) onto the stack and
|
||||||
* then jump to common_interrupt.
|
* then jump to common_spurious/interrupt.
|
||||||
*/
|
*/
|
||||||
|
common_spurious:
|
||||||
|
addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
|
||||||
|
call interrupt_entry
|
||||||
|
UNWIND_HINT_REGS indirect=1
|
||||||
|
call smp_spurious_interrupt /* rdi points to pt_regs */
|
||||||
|
jmp ret_from_intr
|
||||||
|
END(common_spurious)
|
||||||
|
_ASM_NOKPROBE(common_spurious)
|
||||||
|
|
||||||
|
/* common_interrupt is a hotpath. Align it */
|
||||||
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
||||||
common_interrupt:
|
common_interrupt:
|
||||||
addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
|
addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
|
||||||
|
|
|
@ -150,8 +150,11 @@ extern char irq_entries_start[];
|
||||||
#define trace_irq_entries_start irq_entries_start
|
#define trace_irq_entries_start irq_entries_start
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern char spurious_entries_start[];
|
||||||
|
|
||||||
#define VECTOR_UNUSED NULL
|
#define VECTOR_UNUSED NULL
|
||||||
#define VECTOR_RETRIGGERED ((void *)~0UL)
|
#define VECTOR_SHUTDOWN ((void *)~0UL)
|
||||||
|
#define VECTOR_RETRIGGERED ((void *)~1UL)
|
||||||
|
|
||||||
typedef struct irq_desc* vector_irq_t[NR_VECTORS];
|
typedef struct irq_desc* vector_irq_t[NR_VECTORS];
|
||||||
DECLARE_PER_CPU(vector_irq_t, vector_irq);
|
DECLARE_PER_CPU(vector_irq_t, vector_irq);
|
||||||
|
|
|
@ -1452,7 +1452,8 @@ static void apic_pending_intr_clear(void)
|
||||||
if (queued) {
|
if (queued) {
|
||||||
if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
|
if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
|
||||||
ntsc = rdtsc();
|
ntsc = rdtsc();
|
||||||
max_loops = (cpu_khz << 10) - (ntsc - tsc);
|
max_loops = (long long)cpu_khz << 10;
|
||||||
|
max_loops -= ntsc - tsc;
|
||||||
} else {
|
} else {
|
||||||
max_loops--;
|
max_loops--;
|
||||||
}
|
}
|
||||||
|
@ -2026,21 +2027,32 @@ __visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
|
||||||
entering_irq();
|
entering_irq();
|
||||||
trace_spurious_apic_entry(vector);
|
trace_spurious_apic_entry(vector);
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if this really is a spurious interrupt and ACK it
|
|
||||||
* if it is a vectored one. Just in case...
|
|
||||||
* Spurious interrupts should not be ACKed.
|
|
||||||
*/
|
|
||||||
v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
|
|
||||||
if (v & (1 << (vector & 0x1f)))
|
|
||||||
ack_APIC_irq();
|
|
||||||
|
|
||||||
inc_irq_stat(irq_spurious_count);
|
inc_irq_stat(irq_spurious_count);
|
||||||
|
|
||||||
/* see sw-dev-man vol 3, chapter 7.4.13.5 */
|
/*
|
||||||
pr_info("spurious APIC interrupt through vector %02x on CPU#%d, "
|
* If this is a spurious interrupt then do not acknowledge
|
||||||
"should never happen.\n", vector, smp_processor_id());
|
*/
|
||||||
|
if (vector == SPURIOUS_APIC_VECTOR) {
|
||||||
|
/* See SDM vol 3 */
|
||||||
|
pr_info("Spurious APIC interrupt (vector 0xFF) on CPU#%d, should never happen.\n",
|
||||||
|
smp_processor_id());
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If it is a vectored one, verify it's set in the ISR. If set,
|
||||||
|
* acknowledge it.
|
||||||
|
*/
|
||||||
|
v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
|
||||||
|
if (v & (1 << (vector & 0x1f))) {
|
||||||
|
pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n",
|
||||||
|
vector, smp_processor_id());
|
||||||
|
ack_APIC_irq();
|
||||||
|
} else {
|
||||||
|
pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n",
|
||||||
|
vector, smp_processor_id());
|
||||||
|
}
|
||||||
|
out:
|
||||||
trace_spurious_apic_exit(vector);
|
trace_spurious_apic_exit(vector);
|
||||||
exiting_irq();
|
exiting_irq();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1891,6 +1891,50 @@ static int ioapic_set_affinity(struct irq_data *irq_data,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Interrupt shutdown masks the ioapic pin, but the interrupt might already
|
||||||
|
* be in flight, but not yet serviced by the target CPU. That means
|
||||||
|
* __synchronize_hardirq() would return and claim that everything is calmed
|
||||||
|
* down. So free_irq() would proceed and deactivate the interrupt and free
|
||||||
|
* resources.
|
||||||
|
*
|
||||||
|
* Once the target CPU comes around to service it it will find a cleared
|
||||||
|
* vector and complain. While the spurious interrupt is harmless, the full
|
||||||
|
* release of resources might prevent the interrupt from being acknowledged
|
||||||
|
* which keeps the hardware in a weird state.
|
||||||
|
*
|
||||||
|
* Verify that the corresponding Remote-IRR bits are clear.
|
||||||
|
*/
|
||||||
|
static int ioapic_irq_get_chip_state(struct irq_data *irqd,
|
||||||
|
enum irqchip_irq_state which,
|
||||||
|
bool *state)
|
||||||
|
{
|
||||||
|
struct mp_chip_data *mcd = irqd->chip_data;
|
||||||
|
struct IO_APIC_route_entry rentry;
|
||||||
|
struct irq_pin_list *p;
|
||||||
|
|
||||||
|
if (which != IRQCHIP_STATE_ACTIVE)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
*state = false;
|
||||||
|
raw_spin_lock(&ioapic_lock);
|
||||||
|
for_each_irq_pin(p, mcd->irq_2_pin) {
|
||||||
|
rentry = __ioapic_read_entry(p->apic, p->pin);
|
||||||
|
/*
|
||||||
|
* The remote IRR is only valid in level trigger mode. It's
|
||||||
|
* meaning is undefined for edge triggered interrupts and
|
||||||
|
* irrelevant because the IO-APIC treats them as fire and
|
||||||
|
* forget.
|
||||||
|
*/
|
||||||
|
if (rentry.irr && rentry.trigger) {
|
||||||
|
*state = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
raw_spin_unlock(&ioapic_lock);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static struct irq_chip ioapic_chip __read_mostly = {
|
static struct irq_chip ioapic_chip __read_mostly = {
|
||||||
.name = "IO-APIC",
|
.name = "IO-APIC",
|
||||||
.irq_startup = startup_ioapic_irq,
|
.irq_startup = startup_ioapic_irq,
|
||||||
|
@ -1900,6 +1944,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
|
||||||
.irq_eoi = ioapic_ack_level,
|
.irq_eoi = ioapic_ack_level,
|
||||||
.irq_set_affinity = ioapic_set_affinity,
|
.irq_set_affinity = ioapic_set_affinity,
|
||||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||||
|
.irq_get_irqchip_state = ioapic_irq_get_chip_state,
|
||||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1912,6 +1957,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
|
||||||
.irq_eoi = ioapic_ir_ack_level,
|
.irq_eoi = ioapic_ir_ack_level,
|
||||||
.irq_set_affinity = ioapic_set_affinity,
|
.irq_set_affinity = ioapic_set_affinity,
|
||||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||||
|
.irq_get_irqchip_state = ioapic_irq_get_chip_state,
|
||||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -342,7 +342,7 @@ static void clear_irq_vector(struct irq_data *irqd)
|
||||||
trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
|
trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
|
||||||
apicd->prev_cpu);
|
apicd->prev_cpu);
|
||||||
|
|
||||||
per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED;
|
per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
|
||||||
irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
|
irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
|
||||||
apicd->vector = 0;
|
apicd->vector = 0;
|
||||||
|
|
||||||
|
@ -351,7 +351,7 @@ static void clear_irq_vector(struct irq_data *irqd)
|
||||||
if (!vector)
|
if (!vector)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED;
|
per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
|
||||||
irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
|
irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
|
||||||
apicd->prev_vector = 0;
|
apicd->prev_vector = 0;
|
||||||
apicd->move_in_progress = 0;
|
apicd->move_in_progress = 0;
|
||||||
|
|
|
@ -184,24 +184,25 @@ unsigned long __head __startup_64(unsigned long physaddr,
|
||||||
pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
|
pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
|
||||||
|
|
||||||
if (la57) {
|
if (la57) {
|
||||||
p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
|
p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++],
|
||||||
|
physaddr);
|
||||||
|
|
||||||
i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
|
i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
|
||||||
pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
|
pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
|
||||||
pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
|
pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
|
||||||
|
|
||||||
i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D;
|
i = physaddr >> P4D_SHIFT;
|
||||||
p4d[i + 0] = (pgdval_t)pud + pgtable_flags;
|
p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
|
||||||
p4d[i + 1] = (pgdval_t)pud + pgtable_flags;
|
p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
|
||||||
} else {
|
} else {
|
||||||
i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
|
i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
|
||||||
pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
|
pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
|
||||||
pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
|
pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD;
|
i = physaddr >> PUD_SHIFT;
|
||||||
pud[i + 0] = (pudval_t)pmd + pgtable_flags;
|
pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
|
||||||
pud[i + 1] = (pudval_t)pmd + pgtable_flags;
|
pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
|
||||||
|
|
||||||
pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
|
pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
|
||||||
/* Filter out unsupported __PAGE_KERNEL_* bits: */
|
/* Filter out unsupported __PAGE_KERNEL_* bits: */
|
||||||
|
@ -211,8 +212,9 @@ unsigned long __head __startup_64(unsigned long physaddr,
|
||||||
pmd_entry += physaddr;
|
pmd_entry += physaddr;
|
||||||
|
|
||||||
for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
|
for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
|
||||||
int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD;
|
int idx = i + (physaddr >> PMD_SHIFT);
|
||||||
pmd[idx] = pmd_entry + i * PMD_SIZE;
|
|
||||||
|
pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -321,7 +321,8 @@ void __init idt_setup_apic_and_irq_gates(void)
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
|
for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
|
||||||
set_bit(i, system_vectors);
|
set_bit(i, system_vectors);
|
||||||
set_intr_gate(i, spurious_interrupt);
|
entry = spurious_entries_start + 8 * (i - FIRST_SYSTEM_VECTOR);
|
||||||
|
set_intr_gate(i, entry);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -246,7 +246,7 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
|
||||||
if (!handle_irq(desc, regs)) {
|
if (!handle_irq(desc, regs)) {
|
||||||
ack_APIC_irq();
|
ack_APIC_irq();
|
||||||
|
|
||||||
if (desc != VECTOR_RETRIGGERED) {
|
if (desc != VECTOR_RETRIGGERED && desc != VECTOR_SHUTDOWN) {
|
||||||
pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
|
pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
|
||||||
__func__, smp_processor_id(),
|
__func__, smp_processor_id(),
|
||||||
vector);
|
vector);
|
||||||
|
|
|
@ -653,7 +653,8 @@ static int cacheinfo_cpu_pre_down(unsigned int cpu)
|
||||||
|
|
||||||
static int __init cacheinfo_sysfs_init(void)
|
static int __init cacheinfo_sysfs_init(void)
|
||||||
{
|
{
|
||||||
return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
|
return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE,
|
||||||
|
"base/cacheinfo:online",
|
||||||
cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
|
cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
|
||||||
}
|
}
|
||||||
device_initcall(cacheinfo_sysfs_init);
|
device_initcall(cacheinfo_sysfs_init);
|
||||||
|
|
|
@ -659,7 +659,7 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
|
||||||
/* Also permit LSMs and IMA to fail firmware sysfs fallback */
|
/* Also permit LSMs and IMA to fail firmware sysfs fallback */
|
||||||
ret = security_kernel_load_data(LOADING_FIRMWARE);
|
ret = security_kernel_load_data(LOADING_FIRMWARE);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return false;
|
||||||
|
|
||||||
return fw_force_sysfs_fallback(opt_flags);
|
return fw_force_sysfs_fallback(opt_flags);
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,6 +91,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
|
||||||
* suppress pointless writes.
|
* suppress pointless writes.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < d->chip->num_regs; i++) {
|
for (i = 0; i < d->chip->num_regs; i++) {
|
||||||
|
if (!d->chip->mask_base)
|
||||||
|
continue;
|
||||||
|
|
||||||
reg = d->chip->mask_base +
|
reg = d->chip->mask_base +
|
||||||
(i * map->reg_stride * d->irq_reg_stride);
|
(i * map->reg_stride * d->irq_reg_stride);
|
||||||
if (d->chip->mask_invert) {
|
if (d->chip->mask_invert) {
|
||||||
|
@ -526,6 +529,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
|
||||||
/* Mask all the interrupts by default */
|
/* Mask all the interrupts by default */
|
||||||
for (i = 0; i < chip->num_regs; i++) {
|
for (i = 0; i < chip->num_regs; i++) {
|
||||||
d->mask_buf[i] = d->mask_buf_def[i];
|
d->mask_buf[i] = d->mask_buf_def[i];
|
||||||
|
if (!chip->mask_base)
|
||||||
|
continue;
|
||||||
|
|
||||||
reg = chip->mask_base +
|
reg = chip->mask_base +
|
||||||
(i * map->reg_stride * d->irq_reg_stride);
|
(i * map->reg_stride * d->irq_reg_stride);
|
||||||
if (chip->mask_invert)
|
if (chip->mask_invert)
|
||||||
|
|
|
@ -229,6 +229,7 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
|
||||||
{
|
{
|
||||||
struct omap_clkctrl_provider *provider = data;
|
struct omap_clkctrl_provider *provider = data;
|
||||||
struct omap_clkctrl_clk *entry;
|
struct omap_clkctrl_clk *entry;
|
||||||
|
bool found = false;
|
||||||
|
|
||||||
if (clkspec->args_count != 2)
|
if (clkspec->args_count != 2)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
@ -238,11 +239,13 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
|
||||||
|
|
||||||
list_for_each_entry(entry, &provider->clocks, node) {
|
list_for_each_entry(entry, &provider->clocks, node) {
|
||||||
if (entry->reg_offset == clkspec->args[0] &&
|
if (entry->reg_offset == clkspec->args[0] &&
|
||||||
entry->bit_offset == clkspec->args[1])
|
entry->bit_offset == clkspec->args[1]) {
|
||||||
|
found = true;
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!entry)
|
if (!found)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
return entry->clk;
|
return entry->clk;
|
||||||
|
|
|
@ -36,8 +36,6 @@ MODULE_ALIAS_CRYPTO("842-nx");
|
||||||
#define WORKMEM_ALIGN (CRB_ALIGN)
|
#define WORKMEM_ALIGN (CRB_ALIGN)
|
||||||
#define CSB_WAIT_MAX (5000) /* ms */
|
#define CSB_WAIT_MAX (5000) /* ms */
|
||||||
#define VAS_RETRIES (10)
|
#define VAS_RETRIES (10)
|
||||||
/* # of requests allowed per RxFIFO at a time. 0 for unlimited */
|
|
||||||
#define MAX_CREDITS_PER_RXFIFO (1024)
|
|
||||||
|
|
||||||
struct nx842_workmem {
|
struct nx842_workmem {
|
||||||
/* Below fields must be properly aligned */
|
/* Below fields must be properly aligned */
|
||||||
|
@ -821,7 +819,11 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id,
|
||||||
rxattr.lnotify_lpid = lpid;
|
rxattr.lnotify_lpid = lpid;
|
||||||
rxattr.lnotify_pid = pid;
|
rxattr.lnotify_pid = pid;
|
||||||
rxattr.lnotify_tid = tid;
|
rxattr.lnotify_tid = tid;
|
||||||
rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO;
|
/*
|
||||||
|
* Maximum RX window credits can not be more than #CRBs in
|
||||||
|
* RxFIFO. Otherwise, can get checkstop if RxFIFO overruns.
|
||||||
|
*/
|
||||||
|
rxattr.wcreds_max = fifo_size / CRB_SIZE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Open a VAS receice window which is used to configure RxFIFO
|
* Open a VAS receice window which is used to configure RxFIFO
|
||||||
|
|
|
@ -334,6 +334,21 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(talitos_submit);
|
EXPORT_SYMBOL(talitos_submit);
|
||||||
|
|
||||||
|
static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
|
||||||
|
{
|
||||||
|
struct talitos_edesc *edesc;
|
||||||
|
|
||||||
|
if (!is_sec1)
|
||||||
|
return request->desc->hdr;
|
||||||
|
|
||||||
|
if (!request->desc->next_desc)
|
||||||
|
return request->desc->hdr1;
|
||||||
|
|
||||||
|
edesc = container_of(request->desc, struct talitos_edesc, desc);
|
||||||
|
|
||||||
|
return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* process what was done, notify callback of error if not
|
* process what was done, notify callback of error if not
|
||||||
*/
|
*/
|
||||||
|
@ -355,12 +370,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
|
||||||
|
|
||||||
/* descriptors with their done bits set don't get the error */
|
/* descriptors with their done bits set don't get the error */
|
||||||
rmb();
|
rmb();
|
||||||
if (!is_sec1)
|
hdr = get_request_hdr(request, is_sec1);
|
||||||
hdr = request->desc->hdr;
|
|
||||||
else if (request->desc->next_desc)
|
|
||||||
hdr = (request->desc + 1)->hdr1;
|
|
||||||
else
|
|
||||||
hdr = request->desc->hdr1;
|
|
||||||
|
|
||||||
if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
|
if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
|
||||||
status = 0;
|
status = 0;
|
||||||
|
@ -490,8 +500,14 @@ static u32 current_desc_hdr(struct device *dev, int ch)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
|
if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
|
||||||
return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
|
struct talitos_edesc *edesc;
|
||||||
|
|
||||||
|
edesc = container_of(priv->chan[ch].fifo[iter].desc,
|
||||||
|
struct talitos_edesc, desc);
|
||||||
|
return ((struct talitos_desc *)
|
||||||
|
(edesc->buf + edesc->dma_len))->hdr;
|
||||||
|
}
|
||||||
|
|
||||||
return priv->chan[ch].fifo[iter].desc->hdr;
|
return priv->chan[ch].fifo[iter].desc->hdr;
|
||||||
}
|
}
|
||||||
|
@ -913,36 +929,6 @@ static int aead_setkey(struct crypto_aead *authenc,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* talitos_edesc - s/w-extended descriptor
|
|
||||||
* @src_nents: number of segments in input scatterlist
|
|
||||||
* @dst_nents: number of segments in output scatterlist
|
|
||||||
* @icv_ool: whether ICV is out-of-line
|
|
||||||
* @iv_dma: dma address of iv for checking continuity and link table
|
|
||||||
* @dma_len: length of dma mapped link_tbl space
|
|
||||||
* @dma_link_tbl: bus physical address of link_tbl/buf
|
|
||||||
* @desc: h/w descriptor
|
|
||||||
* @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
|
|
||||||
* @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
|
|
||||||
*
|
|
||||||
* if decrypting (with authcheck), or either one of src_nents or dst_nents
|
|
||||||
* is greater than 1, an integrity check value is concatenated to the end
|
|
||||||
* of link_tbl data
|
|
||||||
*/
|
|
||||||
struct talitos_edesc {
|
|
||||||
int src_nents;
|
|
||||||
int dst_nents;
|
|
||||||
bool icv_ool;
|
|
||||||
dma_addr_t iv_dma;
|
|
||||||
int dma_len;
|
|
||||||
dma_addr_t dma_link_tbl;
|
|
||||||
struct talitos_desc desc;
|
|
||||||
union {
|
|
||||||
struct talitos_ptr link_tbl[0];
|
|
||||||
u8 buf[0];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
static void talitos_sg_unmap(struct device *dev,
|
static void talitos_sg_unmap(struct device *dev,
|
||||||
struct talitos_edesc *edesc,
|
struct talitos_edesc *edesc,
|
||||||
struct scatterlist *src,
|
struct scatterlist *src,
|
||||||
|
@ -1431,15 +1417,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
|
||||||
edesc->dst_nents = dst_nents;
|
edesc->dst_nents = dst_nents;
|
||||||
edesc->iv_dma = iv_dma;
|
edesc->iv_dma = iv_dma;
|
||||||
edesc->dma_len = dma_len;
|
edesc->dma_len = dma_len;
|
||||||
if (dma_len) {
|
if (dma_len)
|
||||||
void *addr = &edesc->link_tbl[0];
|
edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
|
||||||
|
|
||||||
if (is_sec1 && !dst)
|
|
||||||
addr += sizeof(struct talitos_desc);
|
|
||||||
edesc->dma_link_tbl = dma_map_single(dev, addr,
|
|
||||||
edesc->dma_len,
|
edesc->dma_len,
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
}
|
|
||||||
return edesc;
|
return edesc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1706,14 +1688,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev,
|
||||||
struct talitos_private *priv = dev_get_drvdata(dev);
|
struct talitos_private *priv = dev_get_drvdata(dev);
|
||||||
bool is_sec1 = has_ftr_sec1(priv);
|
bool is_sec1 = has_ftr_sec1(priv);
|
||||||
struct talitos_desc *desc = &edesc->desc;
|
struct talitos_desc *desc = &edesc->desc;
|
||||||
struct talitos_desc *desc2 = desc + 1;
|
struct talitos_desc *desc2 = (struct talitos_desc *)
|
||||||
|
(edesc->buf + edesc->dma_len);
|
||||||
|
|
||||||
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
|
unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
|
||||||
if (desc->next_desc &&
|
if (desc->next_desc &&
|
||||||
desc->ptr[5].ptr != desc2->ptr[5].ptr)
|
desc->ptr[5].ptr != desc2->ptr[5].ptr)
|
||||||
unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
|
unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
|
||||||
|
|
||||||
talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
|
if (req_ctx->psrc)
|
||||||
|
talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
|
||||||
|
|
||||||
/* When using hashctx-in, must unmap it. */
|
/* When using hashctx-in, must unmap it. */
|
||||||
if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
|
if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
|
||||||
|
@ -1780,7 +1764,6 @@ static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
|
||||||
|
|
||||||
static int common_nonsnoop_hash(struct talitos_edesc *edesc,
|
static int common_nonsnoop_hash(struct talitos_edesc *edesc,
|
||||||
struct ahash_request *areq, unsigned int length,
|
struct ahash_request *areq, unsigned int length,
|
||||||
unsigned int offset,
|
|
||||||
void (*callback) (struct device *dev,
|
void (*callback) (struct device *dev,
|
||||||
struct talitos_desc *desc,
|
struct talitos_desc *desc,
|
||||||
void *context, int error))
|
void *context, int error))
|
||||||
|
@ -1819,9 +1802,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
|
||||||
|
|
||||||
sg_count = edesc->src_nents ?: 1;
|
sg_count = edesc->src_nents ?: 1;
|
||||||
if (is_sec1 && sg_count > 1)
|
if (is_sec1 && sg_count > 1)
|
||||||
sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
|
sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
|
||||||
edesc->buf + sizeof(struct talitos_desc),
|
|
||||||
length, req_ctx->nbuf);
|
|
||||||
else if (length)
|
else if (length)
|
||||||
sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
|
sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
@ -1834,7 +1815,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
} else {
|
} else {
|
||||||
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
|
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
|
||||||
&desc->ptr[3], sg_count, offset, 0);
|
&desc->ptr[3], sg_count, 0, 0);
|
||||||
if (sg_count > 1)
|
if (sg_count > 1)
|
||||||
sync_needed = true;
|
sync_needed = true;
|
||||||
}
|
}
|
||||||
|
@ -1858,7 +1839,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
|
||||||
talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
|
talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
|
||||||
|
|
||||||
if (is_sec1 && req_ctx->nbuf && length) {
|
if (is_sec1 && req_ctx->nbuf && length) {
|
||||||
struct talitos_desc *desc2 = desc + 1;
|
struct talitos_desc *desc2 = (struct talitos_desc *)
|
||||||
|
(edesc->buf + edesc->dma_len);
|
||||||
dma_addr_t next_desc;
|
dma_addr_t next_desc;
|
||||||
|
|
||||||
memset(desc2, 0, sizeof(*desc2));
|
memset(desc2, 0, sizeof(*desc2));
|
||||||
|
@ -1879,7 +1861,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
|
copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
|
||||||
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
|
sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
|
||||||
&desc2->ptr[3], sg_count, offset, 0);
|
&desc2->ptr[3], sg_count, 0, 0);
|
||||||
if (sg_count > 1)
|
if (sg_count > 1)
|
||||||
sync_needed = true;
|
sync_needed = true;
|
||||||
copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
|
copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
|
||||||
|
@ -1990,7 +1972,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
|
||||||
struct device *dev = ctx->dev;
|
struct device *dev = ctx->dev;
|
||||||
struct talitos_private *priv = dev_get_drvdata(dev);
|
struct talitos_private *priv = dev_get_drvdata(dev);
|
||||||
bool is_sec1 = has_ftr_sec1(priv);
|
bool is_sec1 = has_ftr_sec1(priv);
|
||||||
int offset = 0;
|
|
||||||
u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
|
u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
|
||||||
|
|
||||||
if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
|
if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
|
||||||
|
@ -2030,6 +2011,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
|
||||||
sg_chain(req_ctx->bufsl, 2, areq->src);
|
sg_chain(req_ctx->bufsl, 2, areq->src);
|
||||||
req_ctx->psrc = req_ctx->bufsl;
|
req_ctx->psrc = req_ctx->bufsl;
|
||||||
} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
|
} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
|
||||||
|
int offset;
|
||||||
|
|
||||||
if (nbytes_to_hash > blocksize)
|
if (nbytes_to_hash > blocksize)
|
||||||
offset = blocksize - req_ctx->nbuf;
|
offset = blocksize - req_ctx->nbuf;
|
||||||
else
|
else
|
||||||
|
@ -2042,7 +2025,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
|
||||||
sg_copy_to_buffer(areq->src, nents,
|
sg_copy_to_buffer(areq->src, nents,
|
||||||
ctx_buf + req_ctx->nbuf, offset);
|
ctx_buf + req_ctx->nbuf, offset);
|
||||||
req_ctx->nbuf += offset;
|
req_ctx->nbuf += offset;
|
||||||
req_ctx->psrc = areq->src;
|
req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
|
||||||
|
offset);
|
||||||
} else
|
} else
|
||||||
req_ctx->psrc = areq->src;
|
req_ctx->psrc = areq->src;
|
||||||
|
|
||||||
|
@ -2082,8 +2066,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
|
||||||
if (ctx->keylen && (req_ctx->first || req_ctx->last))
|
if (ctx->keylen && (req_ctx->first || req_ctx->last))
|
||||||
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
|
edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
|
||||||
|
|
||||||
return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
|
return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
|
||||||
ahash_done);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ahash_update(struct ahash_request *areq)
|
static int ahash_update(struct ahash_request *areq)
|
||||||
|
|
|
@ -65,6 +65,36 @@ struct talitos_desc {
|
||||||
|
|
||||||
#define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32))
|
#define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* talitos_edesc - s/w-extended descriptor
|
||||||
|
* @src_nents: number of segments in input scatterlist
|
||||||
|
* @dst_nents: number of segments in output scatterlist
|
||||||
|
* @icv_ool: whether ICV is out-of-line
|
||||||
|
* @iv_dma: dma address of iv for checking continuity and link table
|
||||||
|
* @dma_len: length of dma mapped link_tbl space
|
||||||
|
* @dma_link_tbl: bus physical address of link_tbl/buf
|
||||||
|
* @desc: h/w descriptor
|
||||||
|
* @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
|
||||||
|
* @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
|
||||||
|
*
|
||||||
|
* if decrypting (with authcheck), or either one of src_nents or dst_nents
|
||||||
|
* is greater than 1, an integrity check value is concatenated to the end
|
||||||
|
* of link_tbl data
|
||||||
|
*/
|
||||||
|
struct talitos_edesc {
|
||||||
|
int src_nents;
|
||||||
|
int dst_nents;
|
||||||
|
bool icv_ool;
|
||||||
|
dma_addr_t iv_dma;
|
||||||
|
int dma_len;
|
||||||
|
dma_addr_t dma_link_tbl;
|
||||||
|
struct talitos_desc desc;
|
||||||
|
union {
|
||||||
|
struct talitos_ptr link_tbl[0];
|
||||||
|
u8 buf[0];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* talitos_request - descriptor submission request
|
* talitos_request - descriptor submission request
|
||||||
* @desc: descriptor pointer (kernel virtual)
|
* @desc: descriptor pointer (kernel virtual)
|
||||||
|
|
|
@ -50,11 +50,6 @@ void __init efi_bgrt_init(struct acpi_table_header *table)
|
||||||
bgrt->version);
|
bgrt->version);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (bgrt->status & 0xfe) {
|
|
||||||
pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
|
|
||||||
bgrt->status);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
if (bgrt->image_type != 0) {
|
if (bgrt->image_type != 0) {
|
||||||
pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
|
pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
|
||||||
bgrt->image_type);
|
bgrt->image_type);
|
||||||
|
|
|
@ -47,10 +47,16 @@ static const struct file_operations udl_driver_fops = {
|
||||||
.llseek = noop_llseek,
|
.llseek = noop_llseek,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void udl_driver_release(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
udl_fini(dev);
|
||||||
|
udl_modeset_cleanup(dev);
|
||||||
|
drm_dev_fini(dev);
|
||||||
|
kfree(dev);
|
||||||
|
}
|
||||||
|
|
||||||
static struct drm_driver driver = {
|
static struct drm_driver driver = {
|
||||||
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
|
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
|
||||||
.load = udl_driver_load,
|
|
||||||
.unload = udl_driver_unload,
|
|
||||||
.release = udl_driver_release,
|
.release = udl_driver_release,
|
||||||
|
|
||||||
/* gem hooks */
|
/* gem hooks */
|
||||||
|
@ -74,28 +80,56 @@ static struct drm_driver driver = {
|
||||||
.patchlevel = DRIVER_PATCHLEVEL,
|
.patchlevel = DRIVER_PATCHLEVEL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct udl_device *udl_driver_create(struct usb_interface *interface)
|
||||||
|
{
|
||||||
|
struct usb_device *udev = interface_to_usbdev(interface);
|
||||||
|
struct udl_device *udl;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
udl = kzalloc(sizeof(*udl), GFP_KERNEL);
|
||||||
|
if (!udl)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
r = drm_dev_init(&udl->drm, &driver, &interface->dev);
|
||||||
|
if (r) {
|
||||||
|
kfree(udl);
|
||||||
|
return ERR_PTR(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
udl->udev = udev;
|
||||||
|
udl->drm.dev_private = udl;
|
||||||
|
|
||||||
|
r = udl_init(udl);
|
||||||
|
if (r) {
|
||||||
|
drm_dev_fini(&udl->drm);
|
||||||
|
kfree(udl);
|
||||||
|
return ERR_PTR(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
usb_set_intfdata(interface, udl);
|
||||||
|
return udl;
|
||||||
|
}
|
||||||
|
|
||||||
static int udl_usb_probe(struct usb_interface *interface,
|
static int udl_usb_probe(struct usb_interface *interface,
|
||||||
const struct usb_device_id *id)
|
const struct usb_device_id *id)
|
||||||
{
|
{
|
||||||
struct usb_device *udev = interface_to_usbdev(interface);
|
|
||||||
struct drm_device *dev;
|
|
||||||
int r;
|
int r;
|
||||||
|
struct udl_device *udl;
|
||||||
|
|
||||||
dev = drm_dev_alloc(&driver, &interface->dev);
|
udl = udl_driver_create(interface);
|
||||||
if (IS_ERR(dev))
|
if (IS_ERR(udl))
|
||||||
return PTR_ERR(dev);
|
return PTR_ERR(udl);
|
||||||
|
|
||||||
r = drm_dev_register(dev, (unsigned long)udev);
|
r = drm_dev_register(&udl->drm, 0);
|
||||||
if (r)
|
if (r)
|
||||||
goto err_free;
|
goto err_free;
|
||||||
|
|
||||||
usb_set_intfdata(interface, dev);
|
DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
|
||||||
DRM_INFO("Initialized udl on minor %d\n", dev->primary->index);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_free:
|
err_free:
|
||||||
drm_dev_unref(dev);
|
drm_dev_put(&udl->drm);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,8 +50,8 @@ struct urb_list {
|
||||||
struct udl_fbdev;
|
struct udl_fbdev;
|
||||||
|
|
||||||
struct udl_device {
|
struct udl_device {
|
||||||
|
struct drm_device drm;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct drm_device *ddev;
|
|
||||||
struct usb_device *udev;
|
struct usb_device *udev;
|
||||||
struct drm_crtc *crtc;
|
struct drm_crtc *crtc;
|
||||||
|
|
||||||
|
@ -71,6 +71,8 @@ struct udl_device {
|
||||||
atomic_t cpu_kcycles_used; /* transpired during pixel processing */
|
atomic_t cpu_kcycles_used; /* transpired during pixel processing */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define to_udl(x) container_of(x, struct udl_device, drm)
|
||||||
|
|
||||||
struct udl_gem_object {
|
struct udl_gem_object {
|
||||||
struct drm_gem_object base;
|
struct drm_gem_object base;
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
|
@ -102,9 +104,8 @@ struct urb *udl_get_urb(struct drm_device *dev);
|
||||||
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
|
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
|
||||||
void udl_urb_completion(struct urb *urb);
|
void udl_urb_completion(struct urb *urb);
|
||||||
|
|
||||||
int udl_driver_load(struct drm_device *dev, unsigned long flags);
|
int udl_init(struct udl_device *udl);
|
||||||
void udl_driver_unload(struct drm_device *dev);
|
void udl_fini(struct drm_device *dev);
|
||||||
void udl_driver_release(struct drm_device *dev);
|
|
||||||
|
|
||||||
int udl_fbdev_init(struct drm_device *dev);
|
int udl_fbdev_init(struct drm_device *dev);
|
||||||
void udl_fbdev_cleanup(struct drm_device *dev);
|
void udl_fbdev_cleanup(struct drm_device *dev);
|
||||||
|
|
|
@ -82,7 +82,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
|
||||||
int width, int height)
|
int width, int height)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = fb->base.dev;
|
struct drm_device *dev = fb->base.dev;
|
||||||
struct udl_device *udl = dev->dev_private;
|
struct udl_device *udl = to_udl(dev);
|
||||||
int i, ret;
|
int i, ret;
|
||||||
char *cmd;
|
char *cmd;
|
||||||
cycles_t start_cycles, end_cycles;
|
cycles_t start_cycles, end_cycles;
|
||||||
|
@ -210,10 +210,10 @@ static int udl_fb_open(struct fb_info *info, int user)
|
||||||
{
|
{
|
||||||
struct udl_fbdev *ufbdev = info->par;
|
struct udl_fbdev *ufbdev = info->par;
|
||||||
struct drm_device *dev = ufbdev->ufb.base.dev;
|
struct drm_device *dev = ufbdev->ufb.base.dev;
|
||||||
struct udl_device *udl = dev->dev_private;
|
struct udl_device *udl = to_udl(dev);
|
||||||
|
|
||||||
/* If the USB device is gone, we don't accept new opens */
|
/* If the USB device is gone, we don't accept new opens */
|
||||||
if (drm_dev_is_unplugged(udl->ddev))
|
if (drm_dev_is_unplugged(&udl->drm))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
ufbdev->fb_count++;
|
ufbdev->fb_count++;
|
||||||
|
@ -441,7 +441,7 @@ static void udl_fbdev_destroy(struct drm_device *dev,
|
||||||
|
|
||||||
int udl_fbdev_init(struct drm_device *dev)
|
int udl_fbdev_init(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct udl_device *udl = dev->dev_private;
|
struct udl_device *udl = to_udl(dev);
|
||||||
int bpp_sel = fb_bpp;
|
int bpp_sel = fb_bpp;
|
||||||
struct udl_fbdev *ufbdev;
|
struct udl_fbdev *ufbdev;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -480,7 +480,7 @@ int udl_fbdev_init(struct drm_device *dev)
|
||||||
|
|
||||||
void udl_fbdev_cleanup(struct drm_device *dev)
|
void udl_fbdev_cleanup(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct udl_device *udl = dev->dev_private;
|
struct udl_device *udl = to_udl(dev);
|
||||||
if (!udl->fbdev)
|
if (!udl->fbdev)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -491,7 +491,7 @@ void udl_fbdev_cleanup(struct drm_device *dev)
|
||||||
|
|
||||||
void udl_fbdev_unplug(struct drm_device *dev)
|
void udl_fbdev_unplug(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct udl_device *udl = dev->dev_private;
|
struct udl_device *udl = to_udl(dev);
|
||||||
struct udl_fbdev *ufbdev;
|
struct udl_fbdev *ufbdev;
|
||||||
if (!udl->fbdev)
|
if (!udl->fbdev)
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -203,7 +203,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
|
||||||
{
|
{
|
||||||
struct udl_gem_object *gobj;
|
struct udl_gem_object *gobj;
|
||||||
struct drm_gem_object *obj;
|
struct drm_gem_object *obj;
|
||||||
struct udl_device *udl = dev->dev_private;
|
struct udl_device *udl = to_udl(dev);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
mutex_lock(&udl->gem_lock);
|
mutex_lock(&udl->gem_lock);
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
static int udl_parse_vendor_descriptor(struct drm_device *dev,
|
static int udl_parse_vendor_descriptor(struct drm_device *dev,
|
||||||
struct usb_device *usbdev)
|
struct usb_device *usbdev)
|
||||||
{
|
{
|
||||||
struct udl_device *udl = dev->dev_private;
|
struct udl_device *udl = to_udl(dev);
|
||||||
char *desc;
|
char *desc;
|
||||||
char *buf;
|
char *buf;
|
||||||
char *desc_end;
|
char *desc_end;
|
||||||
|
@ -165,7 +165,7 @@ void udl_urb_completion(struct urb *urb)
|
||||||
|
|
||||||
static void udl_free_urb_list(struct drm_device *dev)
|
static void udl_free_urb_list(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct udl_device *udl = dev->dev_private;
|
struct udl_device *udl = to_udl(dev);
|
||||||
int count = udl->urbs.count;
|
int count = udl->urbs.count;
|
||||||
struct list_head *node;
|
struct list_head *node;
|
||||||
struct urb_node *unode;
|
struct urb_node *unode;
|
||||||
|
@ -198,7 +198,7 @@ static void udl_free_urb_list(struct drm_device *dev)
|
||||||
|
|
||||||
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
|
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
|
||||||
{
|
{
|
||||||
struct udl_device *udl = dev->dev_private;
|
struct udl_device *udl = to_udl(dev);
|
||||||
struct urb *urb;
|
struct urb *urb;
|
||||||
struct urb_node *unode;
|
struct urb_node *unode;
|
||||||
char *buf;
|
char *buf;
|
||||||
|
@ -262,7 +262,7 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
|
||||||
|
|
||||||
struct urb *udl_get_urb(struct drm_device *dev)
|
struct urb *udl_get_urb(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct udl_device *udl = dev->dev_private;
|
struct udl_device *udl = to_udl(dev);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct list_head *entry;
|
struct list_head *entry;
|
||||||
struct urb_node *unode;
|
struct urb_node *unode;
|
||||||
|
@ -295,7 +295,7 @@ struct urb *udl_get_urb(struct drm_device *dev)
|
||||||
|
|
||||||
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
|
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
|
||||||
{
|
{
|
||||||
struct udl_device *udl = dev->dev_private;
|
struct udl_device *udl = to_udl(dev);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BUG_ON(len > udl->urbs.size);
|
BUG_ON(len > udl->urbs.size);
|
||||||
|
@ -310,20 +310,12 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int udl_driver_load(struct drm_device *dev, unsigned long flags)
|
int udl_init(struct udl_device *udl)
|
||||||
{
|
{
|
||||||
struct usb_device *udev = (void*)flags;
|
struct drm_device *dev = &udl->drm;
|
||||||
struct udl_device *udl;
|
|
||||||
int ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
|
|
||||||
DRM_DEBUG("\n");
|
DRM_DEBUG("\n");
|
||||||
udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
|
|
||||||
if (!udl)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
udl->udev = udev;
|
|
||||||
udl->ddev = dev;
|
|
||||||
dev->dev_private = udl;
|
|
||||||
|
|
||||||
mutex_init(&udl->gem_lock);
|
mutex_init(&udl->gem_lock);
|
||||||
|
|
||||||
|
@ -357,7 +349,6 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
|
||||||
err:
|
err:
|
||||||
if (udl->urbs.count)
|
if (udl->urbs.count)
|
||||||
udl_free_urb_list(dev);
|
udl_free_urb_list(dev);
|
||||||
kfree(udl);
|
|
||||||
DRM_ERROR("%d\n", ret);
|
DRM_ERROR("%d\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -368,9 +359,9 @@ int udl_drop_usb(struct drm_device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void udl_driver_unload(struct drm_device *dev)
|
void udl_fini(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct udl_device *udl = dev->dev_private;
|
struct udl_device *udl = to_udl(dev);
|
||||||
|
|
||||||
drm_kms_helper_poll_fini(dev);
|
drm_kms_helper_poll_fini(dev);
|
||||||
|
|
||||||
|
@ -378,12 +369,4 @@ void udl_driver_unload(struct drm_device *dev)
|
||||||
udl_free_urb_list(dev);
|
udl_free_urb_list(dev);
|
||||||
|
|
||||||
udl_fbdev_cleanup(dev);
|
udl_fbdev_cleanup(dev);
|
||||||
kfree(udl);
|
|
||||||
}
|
|
||||||
|
|
||||||
void udl_driver_release(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
udl_modeset_cleanup(dev);
|
|
||||||
drm_dev_fini(dev);
|
|
||||||
kfree(dev);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -82,6 +82,7 @@
|
||||||
#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220
|
#define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220
|
||||||
#define HID_DEVICE_ID_ALPS_U1 0x1215
|
#define HID_DEVICE_ID_ALPS_U1 0x1215
|
||||||
#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C
|
#define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C
|
||||||
|
#define HID_DEVICE_ID_ALPS_1222 0x1222
|
||||||
|
|
||||||
|
|
||||||
#define USB_VENDOR_ID_AMI 0x046b
|
#define USB_VENDOR_ID_AMI 0x046b
|
||||||
|
@ -265,6 +266,7 @@
|
||||||
#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
|
#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
|
||||||
#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
|
#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
|
||||||
#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053
|
#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053
|
||||||
|
#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2 0x0939
|
||||||
#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
|
#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
|
||||||
#define USB_DEVICE_ID_ASUS_AK1D 0x1125
|
#define USB_DEVICE_ID_ASUS_AK1D 0x1125
|
||||||
#define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421
|
#define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421
|
||||||
|
|
|
@ -1788,6 +1788,10 @@ static const struct hid_device_id mt_devices[] = {
|
||||||
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
|
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
|
||||||
USB_VENDOR_ID_ALPS_JP,
|
USB_VENDOR_ID_ALPS_JP,
|
||||||
HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
|
HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
|
||||||
|
{ .driver_data = MT_CLS_WIN_8_DUAL,
|
||||||
|
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
|
||||||
|
USB_VENDOR_ID_ALPS_JP,
|
||||||
|
HID_DEVICE_ID_ALPS_1222) },
|
||||||
|
|
||||||
/* Lenovo X1 TAB Gen 2 */
|
/* Lenovo X1 TAB Gen 2 */
|
||||||
{ .driver_data = MT_CLS_WIN_8_DUAL,
|
{ .driver_data = MT_CLS_WIN_8_DUAL,
|
||||||
|
|
|
@ -45,6 +45,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
|
||||||
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2), HID_QUIRK_ALWAYS_POLL },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET },
|
||||||
|
|
|
@ -176,6 +176,7 @@ static const char * const smbus_pnp_ids[] = {
|
||||||
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
|
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
|
||||||
"LEN0073", /* X1 Carbon G5 (Elantech) */
|
"LEN0073", /* X1 Carbon G5 (Elantech) */
|
||||||
"LEN0092", /* X1 Carbon 6 */
|
"LEN0092", /* X1 Carbon 6 */
|
||||||
|
"LEN0093", /* T480 */
|
||||||
"LEN0096", /* X280 */
|
"LEN0096", /* X280 */
|
||||||
"LEN0097", /* X280 -> ALPS trackpoint */
|
"LEN0097", /* X280 -> ALPS trackpoint */
|
||||||
"LEN200f", /* T450s */
|
"LEN200f", /* T450s */
|
||||||
|
|
|
@ -740,32 +740,43 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int its_wait_for_range_completion(struct its_node *its,
|
static int its_wait_for_range_completion(struct its_node *its,
|
||||||
struct its_cmd_block *from,
|
u64 prev_idx,
|
||||||
struct its_cmd_block *to)
|
struct its_cmd_block *to)
|
||||||
{
|
{
|
||||||
u64 rd_idx, from_idx, to_idx;
|
u64 rd_idx, to_idx, linear_idx;
|
||||||
u32 count = 1000000; /* 1s! */
|
u32 count = 1000000; /* 1s! */
|
||||||
|
|
||||||
from_idx = its_cmd_ptr_to_offset(its, from);
|
/* Linearize to_idx if the command set has wrapped around */
|
||||||
to_idx = its_cmd_ptr_to_offset(its, to);
|
to_idx = its_cmd_ptr_to_offset(its, to);
|
||||||
|
if (to_idx < prev_idx)
|
||||||
|
to_idx += ITS_CMD_QUEUE_SZ;
|
||||||
|
|
||||||
|
linear_idx = prev_idx;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
|
s64 delta;
|
||||||
|
|
||||||
rd_idx = readl_relaxed(its->base + GITS_CREADR);
|
rd_idx = readl_relaxed(its->base + GITS_CREADR);
|
||||||
|
|
||||||
/* Direct case */
|
/*
|
||||||
if (from_idx < to_idx && rd_idx >= to_idx)
|
* Compute the read pointer progress, taking the
|
||||||
break;
|
* potential wrap-around into account.
|
||||||
|
*/
|
||||||
|
delta = rd_idx - prev_idx;
|
||||||
|
if (rd_idx < prev_idx)
|
||||||
|
delta += ITS_CMD_QUEUE_SZ;
|
||||||
|
|
||||||
/* Wrapped case */
|
linear_idx += delta;
|
||||||
if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
|
if (linear_idx >= to_idx)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
count--;
|
count--;
|
||||||
if (!count) {
|
if (!count) {
|
||||||
pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
|
pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
|
||||||
from_idx, to_idx, rd_idx);
|
to_idx, linear_idx);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
prev_idx = rd_idx;
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
udelay(1);
|
udelay(1);
|
||||||
}
|
}
|
||||||
|
@ -782,6 +793,7 @@ void name(struct its_node *its, \
|
||||||
struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
|
struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
|
||||||
synctype *sync_obj; \
|
synctype *sync_obj; \
|
||||||
unsigned long flags; \
|
unsigned long flags; \
|
||||||
|
u64 rd_idx; \
|
||||||
\
|
\
|
||||||
raw_spin_lock_irqsave(&its->lock, flags); \
|
raw_spin_lock_irqsave(&its->lock, flags); \
|
||||||
\
|
\
|
||||||
|
@ -803,10 +815,11 @@ void name(struct its_node *its, \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
post: \
|
post: \
|
||||||
|
rd_idx = readl_relaxed(its->base + GITS_CREADR); \
|
||||||
next_cmd = its_post_commands(its); \
|
next_cmd = its_post_commands(its); \
|
||||||
raw_spin_unlock_irqrestore(&its->lock, flags); \
|
raw_spin_unlock_irqrestore(&its->lock, flags); \
|
||||||
\
|
\
|
||||||
if (its_wait_for_range_completion(its, cmd, next_cmd)) \
|
if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
|
||||||
pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
|
pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -563,7 +563,7 @@ static char **realloc_argv(unsigned *size, char **old_argv)
|
||||||
gfp = GFP_NOIO;
|
gfp = GFP_NOIO;
|
||||||
}
|
}
|
||||||
argv = kmalloc_array(new_size, sizeof(*argv), gfp);
|
argv = kmalloc_array(new_size, sizeof(*argv), gfp);
|
||||||
if (argv) {
|
if (argv && old_argv) {
|
||||||
memcpy(argv, old_argv, *size * sizeof(*argv));
|
memcpy(argv, old_argv, *size * sizeof(*argv));
|
||||||
*size = new_size;
|
*size = new_size;
|
||||||
}
|
}
|
||||||
|
|
|
@ -236,8 +236,8 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str,
|
DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
|
||||||
block);
|
type_str, block);
|
||||||
|
|
||||||
if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
|
if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS)
|
||||||
DMERR("%s: reached maximum errors", v->data_dev->name);
|
DMERR("%s: reached maximum errors", v->data_dev->name);
|
||||||
|
|
|
@ -895,7 +895,7 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
|
||||||
u64 *data)
|
u64 *data)
|
||||||
{
|
{
|
||||||
struct be_adapter *adapter = netdev_priv(netdev);
|
struct be_adapter *adapter = netdev_priv(netdev);
|
||||||
int status;
|
int status, cnt;
|
||||||
u8 link_status = 0;
|
u8 link_status = 0;
|
||||||
|
|
||||||
if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
|
if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
|
||||||
|
@ -906,6 +906,9 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
|
||||||
|
|
||||||
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
|
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
|
||||||
|
|
||||||
|
/* check link status before offline tests */
|
||||||
|
link_status = netif_carrier_ok(netdev);
|
||||||
|
|
||||||
if (test->flags & ETH_TEST_FL_OFFLINE) {
|
if (test->flags & ETH_TEST_FL_OFFLINE) {
|
||||||
if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
|
if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
|
||||||
test->flags |= ETH_TEST_FL_FAILED;
|
test->flags |= ETH_TEST_FL_FAILED;
|
||||||
|
@ -926,13 +929,26 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
|
||||||
test->flags |= ETH_TEST_FL_FAILED;
|
test->flags |= ETH_TEST_FL_FAILED;
|
||||||
}
|
}
|
||||||
|
|
||||||
status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
|
/* link status was down prior to test */
|
||||||
if (status) {
|
if (!link_status) {
|
||||||
test->flags |= ETH_TEST_FL_FAILED;
|
|
||||||
data[4] = -1;
|
|
||||||
} else if (!link_status) {
|
|
||||||
test->flags |= ETH_TEST_FL_FAILED;
|
test->flags |= ETH_TEST_FL_FAILED;
|
||||||
data[4] = 1;
|
data[4] = 1;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (cnt = 10; cnt; cnt--) {
|
||||||
|
status = be_cmd_link_status_query(adapter, NULL, &link_status,
|
||||||
|
0);
|
||||||
|
if (status) {
|
||||||
|
test->flags |= ETH_TEST_FL_FAILED;
|
||||||
|
data[4] = -1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (link_status)
|
||||||
|
break;
|
||||||
|
|
||||||
|
msleep_interruptible(500);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4208,7 +4208,7 @@ void e1000e_up(struct e1000_adapter *adapter)
|
||||||
e1000_configure_msix(adapter);
|
e1000_configure_msix(adapter);
|
||||||
e1000_irq_enable(adapter);
|
e1000_irq_enable(adapter);
|
||||||
|
|
||||||
netif_start_queue(adapter->netdev);
|
/* Tx queue started by watchdog timer when link is up */
|
||||||
|
|
||||||
e1000e_trigger_lsc(adapter);
|
e1000e_trigger_lsc(adapter);
|
||||||
}
|
}
|
||||||
|
@ -4584,6 +4584,7 @@ int e1000e_open(struct net_device *netdev)
|
||||||
pm_runtime_get_sync(&pdev->dev);
|
pm_runtime_get_sync(&pdev->dev);
|
||||||
|
|
||||||
netif_carrier_off(netdev);
|
netif_carrier_off(netdev);
|
||||||
|
netif_stop_queue(netdev);
|
||||||
|
|
||||||
/* allocate transmit descriptors */
|
/* allocate transmit descriptors */
|
||||||
err = e1000e_setup_tx_resources(adapter->tx_ring);
|
err = e1000e_setup_tx_resources(adapter->tx_ring);
|
||||||
|
@ -4644,7 +4645,6 @@ int e1000e_open(struct net_device *netdev)
|
||||||
e1000_irq_enable(adapter);
|
e1000_irq_enable(adapter);
|
||||||
|
|
||||||
adapter->tx_hang_recheck = false;
|
adapter->tx_hang_recheck = false;
|
||||||
netif_start_queue(netdev);
|
|
||||||
|
|
||||||
hw->mac.get_link_status = true;
|
hw->mac.get_link_status = true;
|
||||||
pm_runtime_put(&pdev->dev);
|
pm_runtime_put(&pdev->dev);
|
||||||
|
@ -5266,6 +5266,7 @@ static void e1000_watchdog_task(struct work_struct *work)
|
||||||
if (phy->ops.cfg_on_link_up)
|
if (phy->ops.cfg_on_link_up)
|
||||||
phy->ops.cfg_on_link_up(hw);
|
phy->ops.cfg_on_link_up(hw);
|
||||||
|
|
||||||
|
netif_wake_queue(netdev);
|
||||||
netif_carrier_on(netdev);
|
netif_carrier_on(netdev);
|
||||||
|
|
||||||
if (!test_bit(__E1000_DOWN, &adapter->state))
|
if (!test_bit(__E1000_DOWN, &adapter->state))
|
||||||
|
@ -5279,6 +5280,7 @@ static void e1000_watchdog_task(struct work_struct *work)
|
||||||
/* Link status message must follow this format */
|
/* Link status message must follow this format */
|
||||||
pr_info("%s NIC Link is Down\n", adapter->netdev->name);
|
pr_info("%s NIC Link is Down\n", adapter->netdev->name);
|
||||||
netif_carrier_off(netdev);
|
netif_carrier_off(netdev);
|
||||||
|
netif_stop_queue(netdev);
|
||||||
if (!test_bit(__E1000_DOWN, &adapter->state))
|
if (!test_bit(__E1000_DOWN, &adapter->state))
|
||||||
mod_timer(&adapter->phy_info_timer,
|
mod_timer(&adapter->phy_info_timer,
|
||||||
round_jiffies(jiffies + 2 * HZ));
|
round_jiffies(jiffies + 2 * HZ));
|
||||||
|
@ -5286,13 +5288,8 @@ static void e1000_watchdog_task(struct work_struct *work)
|
||||||
/* 8000ES2LAN requires a Rx packet buffer work-around
|
/* 8000ES2LAN requires a Rx packet buffer work-around
|
||||||
* on link down event; reset the controller to flush
|
* on link down event; reset the controller to flush
|
||||||
* the Rx packet buffer.
|
* the Rx packet buffer.
|
||||||
*
|
|
||||||
* If the link is lost the controller stops DMA, but
|
|
||||||
* if there is queued Tx work it cannot be done. So
|
|
||||||
* reset the controller to flush the Tx packet buffers.
|
|
||||||
*/
|
*/
|
||||||
if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
|
if (adapter->flags & FLAG_RX_NEEDS_RESTART)
|
||||||
e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
|
|
||||||
adapter->flags |= FLAG_RESTART_NOW;
|
adapter->flags |= FLAG_RESTART_NOW;
|
||||||
else
|
else
|
||||||
pm_schedule_suspend(netdev->dev.parent,
|
pm_schedule_suspend(netdev->dev.parent,
|
||||||
|
@ -5315,6 +5312,14 @@ static void e1000_watchdog_task(struct work_struct *work)
|
||||||
adapter->gotc_old = adapter->stats.gotc;
|
adapter->gotc_old = adapter->stats.gotc;
|
||||||
spin_unlock(&adapter->stats64_lock);
|
spin_unlock(&adapter->stats64_lock);
|
||||||
|
|
||||||
|
/* If the link is lost the controller stops DMA, but
|
||||||
|
* if there is queued Tx work it cannot be done. So
|
||||||
|
* reset the controller to flush the Tx packet buffers.
|
||||||
|
*/
|
||||||
|
if (!netif_carrier_ok(netdev) &&
|
||||||
|
(e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
|
||||||
|
adapter->flags |= FLAG_RESTART_NOW;
|
||||||
|
|
||||||
/* If reset is necessary, do it outside of interrupt context. */
|
/* If reset is necessary, do it outside of interrupt context. */
|
||||||
if (adapter->flags & FLAG_RESTART_NOW) {
|
if (adapter->flags & FLAG_RESTART_NOW) {
|
||||||
schedule_work(&adapter->reset_task);
|
schedule_work(&adapter->reset_task);
|
||||||
|
|
|
@ -1057,7 +1057,7 @@ sis900_open(struct net_device *net_dev)
|
||||||
sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
|
sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
|
||||||
|
|
||||||
/* Enable all known interrupts by setting the interrupt mask. */
|
/* Enable all known interrupts by setting the interrupt mask. */
|
||||||
sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
|
sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
|
||||||
sw32(cr, RxENA | sr32(cr));
|
sw32(cr, RxENA | sr32(cr));
|
||||||
sw32(ier, IE);
|
sw32(ier, IE);
|
||||||
|
|
||||||
|
@ -1578,7 +1578,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
|
||||||
sw32(txdp, sis_priv->tx_ring_dma);
|
sw32(txdp, sis_priv->tx_ring_dma);
|
||||||
|
|
||||||
/* Enable all known interrupts by setting the interrupt mask. */
|
/* Enable all known interrupts by setting the interrupt mask. */
|
||||||
sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
|
sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1618,7 +1618,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
|
||||||
spin_unlock_irqrestore(&sis_priv->lock, flags);
|
spin_unlock_irqrestore(&sis_priv->lock, flags);
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
|
sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len);
|
||||||
sw32(cr, TxENA | sr32(cr));
|
sw32(cr, TxENA | sr32(cr));
|
||||||
|
|
||||||
sis_priv->cur_tx ++;
|
sis_priv->cur_tx ++;
|
||||||
|
@ -1674,7 +1674,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
|
||||||
do {
|
do {
|
||||||
status = sr32(isr);
|
status = sr32(isr);
|
||||||
|
|
||||||
if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
|
if ((status & (HIBERR|TxURN|TxERR|TxIDLE|TxDESC|RxORN|RxERR|RxOK)) == 0)
|
||||||
/* nothing intresting happened */
|
/* nothing intresting happened */
|
||||||
break;
|
break;
|
||||||
handled = 1;
|
handled = 1;
|
||||||
|
@ -1684,7 +1684,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
|
||||||
/* Rx interrupt */
|
/* Rx interrupt */
|
||||||
sis900_rx(net_dev);
|
sis900_rx(net_dev);
|
||||||
|
|
||||||
if (status & (TxURN | TxERR | TxIDLE))
|
if (status & (TxURN | TxERR | TxIDLE | TxDESC))
|
||||||
/* Tx interrupt */
|
/* Tx interrupt */
|
||||||
sis900_finish_xmit(net_dev);
|
sis900_finish_xmit(net_dev);
|
||||||
|
|
||||||
|
@ -1896,8 +1896,8 @@ static void sis900_finish_xmit (struct net_device *net_dev)
|
||||||
|
|
||||||
if (tx_status & OWN) {
|
if (tx_status & OWN) {
|
||||||
/* The packet is not transmitted yet (owned by hardware) !
|
/* The packet is not transmitted yet (owned by hardware) !
|
||||||
* Note: the interrupt is generated only when Tx Machine
|
* Note: this is an almost impossible condition
|
||||||
* is idle, so this is an almost impossible case */
|
* in case of TxDESC ('descriptor interrupt') */
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2473,7 +2473,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
|
||||||
sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
|
sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
|
||||||
|
|
||||||
/* Enable all known interrupts by setting the interrupt mask. */
|
/* Enable all known interrupts by setting the interrupt mask. */
|
||||||
sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
|
sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC);
|
||||||
sw32(cr, RxENA | sr32(cr));
|
sw32(cr, RxENA | sr32(cr));
|
||||||
sw32(ier, IE);
|
sw32(ier, IE);
|
||||||
|
|
||||||
|
|
|
@ -63,6 +63,7 @@ MODULE_AUTHOR("Frank Cusack <fcusack@fcusack.com>");
|
||||||
MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support");
|
MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support");
|
||||||
MODULE_LICENSE("Dual BSD/GPL");
|
MODULE_LICENSE("Dual BSD/GPL");
|
||||||
MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
|
MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE));
|
||||||
|
MODULE_SOFTDEP("pre: arc4");
|
||||||
MODULE_VERSION("1.0.2");
|
MODULE_VERSION("1.0.2");
|
||||||
|
|
||||||
static unsigned int
|
static unsigned int
|
||||||
|
|
|
@ -113,6 +113,8 @@ static void mtk_eint_mask(struct irq_data *d)
|
||||||
void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
|
void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
|
||||||
eint->regs->mask_set);
|
eint->regs->mask_set);
|
||||||
|
|
||||||
|
eint->cur_mask[d->hwirq >> 5] &= ~mask;
|
||||||
|
|
||||||
writel(mask, reg);
|
writel(mask, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,6 +125,8 @@ static void mtk_eint_unmask(struct irq_data *d)
|
||||||
void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
|
void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
|
||||||
eint->regs->mask_clr);
|
eint->regs->mask_clr);
|
||||||
|
|
||||||
|
eint->cur_mask[d->hwirq >> 5] |= mask;
|
||||||
|
|
||||||
writel(mask, reg);
|
writel(mask, reg);
|
||||||
|
|
||||||
if (eint->dual_edge[d->hwirq])
|
if (eint->dual_edge[d->hwirq])
|
||||||
|
@ -217,19 +221,6 @@ static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mtk_eint_chip_read_mask(const struct mtk_eint *eint,
|
|
||||||
void __iomem *base, u32 *buf)
|
|
||||||
{
|
|
||||||
int port;
|
|
||||||
void __iomem *reg;
|
|
||||||
|
|
||||||
for (port = 0; port < eint->hw->ports; port++) {
|
|
||||||
reg = base + eint->regs->mask + (port << 2);
|
|
||||||
buf[port] = ~readl_relaxed(reg);
|
|
||||||
/* Mask is 0 when irq is enabled, and 1 when disabled. */
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int mtk_eint_irq_request_resources(struct irq_data *d)
|
static int mtk_eint_irq_request_resources(struct irq_data *d)
|
||||||
{
|
{
|
||||||
struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
|
struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
|
||||||
|
@ -318,7 +309,7 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
|
||||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||||
struct mtk_eint *eint = irq_desc_get_handler_data(desc);
|
struct mtk_eint *eint = irq_desc_get_handler_data(desc);
|
||||||
unsigned int status, eint_num;
|
unsigned int status, eint_num;
|
||||||
int offset, index, virq;
|
int offset, mask_offset, index, virq;
|
||||||
void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat);
|
void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat);
|
||||||
int dual_edge, start_level, curr_level;
|
int dual_edge, start_level, curr_level;
|
||||||
|
|
||||||
|
@ -328,10 +319,24 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
|
||||||
status = readl(reg);
|
status = readl(reg);
|
||||||
while (status) {
|
while (status) {
|
||||||
offset = __ffs(status);
|
offset = __ffs(status);
|
||||||
|
mask_offset = eint_num >> 5;
|
||||||
index = eint_num + offset;
|
index = eint_num + offset;
|
||||||
virq = irq_find_mapping(eint->domain, index);
|
virq = irq_find_mapping(eint->domain, index);
|
||||||
status &= ~BIT(offset);
|
status &= ~BIT(offset);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we get an interrupt on pin that was only required
|
||||||
|
* for wake (but no real interrupt requested), mask the
|
||||||
|
* interrupt (as would mtk_eint_resume do anyway later
|
||||||
|
* in the resume sequence).
|
||||||
|
*/
|
||||||
|
if (eint->wake_mask[mask_offset] & BIT(offset) &&
|
||||||
|
!(eint->cur_mask[mask_offset] & BIT(offset))) {
|
||||||
|
writel_relaxed(BIT(offset), reg -
|
||||||
|
eint->regs->stat +
|
||||||
|
eint->regs->mask_set);
|
||||||
|
}
|
||||||
|
|
||||||
dual_edge = eint->dual_edge[index];
|
dual_edge = eint->dual_edge[index];
|
||||||
if (dual_edge) {
|
if (dual_edge) {
|
||||||
/*
|
/*
|
||||||
|
@ -370,7 +375,6 @@ static void mtk_eint_irq_handler(struct irq_desc *desc)
|
||||||
|
|
||||||
int mtk_eint_do_suspend(struct mtk_eint *eint)
|
int mtk_eint_do_suspend(struct mtk_eint *eint)
|
||||||
{
|
{
|
||||||
mtk_eint_chip_read_mask(eint, eint->base, eint->cur_mask);
|
|
||||||
mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
|
mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -889,6 +889,10 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
|
ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
|
||||||
|
if (ret < 0)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
mcp->irq_controller =
|
mcp->irq_controller =
|
||||||
device_property_read_bool(dev, "interrupt-controller");
|
device_property_read_bool(dev, "interrupt-controller");
|
||||||
if (mcp->irq && mcp->irq_controller) {
|
if (mcp->irq && mcp->irq_controller) {
|
||||||
|
@ -930,10 +934,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp);
|
|
||||||
if (ret < 0)
|
|
||||||
goto fail;
|
|
||||||
|
|
||||||
if (one_regmap_config) {
|
if (one_regmap_config) {
|
||||||
mcp->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL,
|
mcp->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL,
|
||||||
"mcp23xxx-pinctrl.%d", raw_chip_address);
|
"mcp23xxx-pinctrl.%d", raw_chip_address);
|
||||||
|
|
|
@ -151,6 +151,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
irq_ptr_qs[i] = q;
|
irq_ptr_qs[i] = q;
|
||||||
|
INIT_LIST_HEAD(&q->entry);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -179,6 +180,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
|
||||||
q->mask = 1 << (31 - i);
|
q->mask = 1 << (31 - i);
|
||||||
q->nr = i;
|
q->nr = i;
|
||||||
q->handler = handler;
|
q->handler = handler;
|
||||||
|
INIT_LIST_HEAD(&q->entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
|
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
|
||||||
|
|
|
@ -79,7 +79,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
|
||||||
mutex_lock(&tiq_list_lock);
|
mutex_lock(&tiq_list_lock);
|
||||||
list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
|
list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list);
|
||||||
mutex_unlock(&tiq_list_lock);
|
mutex_unlock(&tiq_list_lock);
|
||||||
xchg(irq_ptr->dsci, 1 << 7);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
|
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
|
||||||
|
@ -87,14 +86,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
|
||||||
struct qdio_q *q;
|
struct qdio_q *q;
|
||||||
|
|
||||||
q = irq_ptr->input_qs[0];
|
q = irq_ptr->input_qs[0];
|
||||||
/* if establish triggered an error */
|
if (!q)
|
||||||
if (!q || !q->entry.prev || !q->entry.next)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
mutex_lock(&tiq_list_lock);
|
mutex_lock(&tiq_list_lock);
|
||||||
list_del_rcu(&q->entry);
|
list_del_rcu(&q->entry);
|
||||||
mutex_unlock(&tiq_list_lock);
|
mutex_unlock(&tiq_list_lock);
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
|
INIT_LIST_HEAD(&q->entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
|
static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
|
||||||
|
|
|
@ -276,9 +276,9 @@ static void afs_break_one_callback(struct afs_server *server,
|
||||||
struct afs_super_info *as = AFS_FS_S(cbi->sb);
|
struct afs_super_info *as = AFS_FS_S(cbi->sb);
|
||||||
struct afs_volume *volume = as->volume;
|
struct afs_volume *volume = as->volume;
|
||||||
|
|
||||||
write_lock(&volume->cb_break_lock);
|
write_lock(&volume->cb_v_break_lock);
|
||||||
volume->cb_v_break++;
|
volume->cb_v_break++;
|
||||||
write_unlock(&volume->cb_break_lock);
|
write_unlock(&volume->cb_v_break_lock);
|
||||||
} else {
|
} else {
|
||||||
data.volume = NULL;
|
data.volume = NULL;
|
||||||
data.fid = *fid;
|
data.fid = *fid;
|
||||||
|
|
|
@ -477,7 +477,7 @@ struct afs_volume {
|
||||||
unsigned int servers_seq; /* Incremented each time ->servers changes */
|
unsigned int servers_seq; /* Incremented each time ->servers changes */
|
||||||
|
|
||||||
unsigned cb_v_break; /* Break-everything counter. */
|
unsigned cb_v_break; /* Break-everything counter. */
|
||||||
rwlock_t cb_break_lock;
|
rwlock_t cb_v_break_lock;
|
||||||
|
|
||||||
afs_voltype_t type; /* type of volume */
|
afs_voltype_t type; /* type of volume */
|
||||||
short error;
|
short error;
|
||||||
|
|
|
@ -47,6 +47,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_mount_params *params,
|
||||||
atomic_set(&volume->usage, 1);
|
atomic_set(&volume->usage, 1);
|
||||||
INIT_LIST_HEAD(&volume->proc_link);
|
INIT_LIST_HEAD(&volume->proc_link);
|
||||||
rwlock_init(&volume->servers_lock);
|
rwlock_init(&volume->servers_lock);
|
||||||
|
rwlock_init(&volume->cb_v_break_lock);
|
||||||
memcpy(volume->name, vldb->name, vldb->name_len + 1);
|
memcpy(volume->name, vldb->name, vldb->name_len + 1);
|
||||||
|
|
||||||
slist = afs_alloc_server_list(params->cell, params->key, vldb, type_mask);
|
slist = afs_alloc_server_list(params->cell, params->key, vldb, type_mask);
|
||||||
|
|
|
@ -515,7 +515,10 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
|
||||||
inc_page_count(fio->sbi, is_read_io(fio->op) ?
|
inc_page_count(fio->sbi, is_read_io(fio->op) ?
|
||||||
__read_io_type(page): WB_DATA_TYPE(fio->page));
|
__read_io_type(page): WB_DATA_TYPE(fio->page));
|
||||||
|
|
||||||
__f2fs_submit_read_bio(fio->sbi, bio, fio->type);
|
if (is_read_io(fio->op))
|
||||||
|
__f2fs_submit_read_bio(fio->sbi, bio, fio->type);
|
||||||
|
else
|
||||||
|
__submit_bio(fio->sbi, bio, fio->type);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include "gc.h"
|
#include "gc.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
#include <trace/events/f2fs.h>
|
#include <trace/events/f2fs.h>
|
||||||
|
#include <trace/events/android_fs.h>
|
||||||
|
|
||||||
static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
|
static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
|
@ -218,6 +219,15 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
|
||||||
|
|
||||||
trace_f2fs_sync_file_enter(inode);
|
trace_f2fs_sync_file_enter(inode);
|
||||||
|
|
||||||
|
if (trace_android_fs_fsync_start_enabled()) {
|
||||||
|
char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
|
||||||
|
|
||||||
|
path = android_fstrace_get_pathname(pathbuf,
|
||||||
|
MAX_TRACE_PATHBUF_LEN, inode);
|
||||||
|
trace_android_fs_fsync_start(inode,
|
||||||
|
current->pid, path, current->comm);
|
||||||
|
}
|
||||||
|
|
||||||
if (S_ISDIR(inode->i_mode))
|
if (S_ISDIR(inode->i_mode))
|
||||||
goto go_write;
|
goto go_write;
|
||||||
|
|
||||||
|
@ -323,6 +333,8 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
|
||||||
out:
|
out:
|
||||||
trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
|
trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
|
||||||
f2fs_trace_ios(NULL, 1);
|
f2fs_trace_ios(NULL, 1);
|
||||||
|
trace_android_fs_fsync_end(inode, start, end - start);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -179,7 +179,7 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
|
||||||
|
|
||||||
old_cred = ovl_override_creds(inode->i_sb);
|
old_cred = ovl_override_creds(inode->i_sb);
|
||||||
ret = vfs_llseek(real.file, offset, whence);
|
ret = vfs_llseek(real.file, offset, whence);
|
||||||
revert_creds(old_cred);
|
ovl_revert_creds(old_cred);
|
||||||
|
|
||||||
file->f_pos = real.file->f_pos;
|
file->f_pos = real.file->f_pos;
|
||||||
inode_unlock(inode);
|
inode_unlock(inode);
|
||||||
|
|
|
@ -178,6 +178,7 @@ enum cpuhp_state {
|
||||||
CPUHP_AP_WORKQUEUE_ONLINE,
|
CPUHP_AP_WORKQUEUE_ONLINE,
|
||||||
CPUHP_AP_RCUTREE_ONLINE,
|
CPUHP_AP_RCUTREE_ONLINE,
|
||||||
CPUHP_AP_NOTIFY_PERF_ONLINE,
|
CPUHP_AP_NOTIFY_PERF_ONLINE,
|
||||||
|
CPUHP_AP_BASE_CACHEINFO_ONLINE,
|
||||||
CPUHP_AP_ONLINE_DYN,
|
CPUHP_AP_ONLINE_DYN,
|
||||||
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
|
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
|
||||||
CPUHP_AP_X86_HPET_ONLINE,
|
CPUHP_AP_X86_HPET_ONLINE,
|
||||||
|
|
|
@ -118,7 +118,8 @@
|
||||||
#define DIV_ROUND_DOWN_ULL(ll, d) \
|
#define DIV_ROUND_DOWN_ULL(ll, d) \
|
||||||
({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
|
({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; })
|
||||||
|
|
||||||
#define DIV_ROUND_UP_ULL(ll, d) DIV_ROUND_DOWN_ULL((ll) + (d) - 1, (d))
|
#define DIV_ROUND_UP_ULL(ll, d) \
|
||||||
|
DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d))
|
||||||
|
|
||||||
#if BITS_PER_LONG == 32
|
#if BITS_PER_LONG == 32
|
||||||
# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
|
# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d)
|
||||||
|
|
|
@ -25,6 +25,15 @@ DEFINE_EVENT(android_fs_data_end_template, android_fs_datawrite_end,
|
||||||
TP_PROTO(struct inode *inode, loff_t offset, int bytes),
|
TP_PROTO(struct inode *inode, loff_t offset, int bytes),
|
||||||
TP_ARGS(inode, offset, bytes));
|
TP_ARGS(inode, offset, bytes));
|
||||||
|
|
||||||
|
DEFINE_EVENT(android_fs_fsync_start_template, android_fs_fsync_start,
|
||||||
|
TP_PROTO(struct inode *inode,
|
||||||
|
pid_t pid, char *pathname, char *command),
|
||||||
|
TP_ARGS(inode, pid, pathname, command));
|
||||||
|
|
||||||
|
DEFINE_EVENT(android_fs_data_end_template, android_fs_fsync_end,
|
||||||
|
TP_PROTO(struct inode *inode, loff_t offset, int bytes),
|
||||||
|
TP_ARGS(inode, offset, bytes));
|
||||||
|
|
||||||
#endif /* _TRACE_ANDROID_FS_H */
|
#endif /* _TRACE_ANDROID_FS_H */
|
||||||
|
|
||||||
/* This part must be outside protection */
|
/* This part must be outside protection */
|
||||||
|
|
|
@ -61,4 +61,38 @@ DECLARE_EVENT_CLASS(android_fs_data_end_template,
|
||||||
__entry->offset, __entry->bytes)
|
__entry->offset, __entry->bytes)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
DECLARE_EVENT_CLASS(android_fs_fsync_start_template,
|
||||||
|
TP_PROTO(struct inode *inode,
|
||||||
|
pid_t pid, char *pathname, char *command),
|
||||||
|
TP_ARGS(inode, pid, pathname, command),
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__string(pathbuf, pathname);
|
||||||
|
__field(loff_t, i_size);
|
||||||
|
__string(cmdline, command);
|
||||||
|
__field(pid_t, pid);
|
||||||
|
__field(ino_t, ino);
|
||||||
|
),
|
||||||
|
TP_fast_assign(
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Replace the spaces in filenames and cmdlines
|
||||||
|
* because this screws up the tooling that parses
|
||||||
|
* the traces.
|
||||||
|
*/
|
||||||
|
__assign_str(pathbuf, pathname);
|
||||||
|
(void)strreplace(__get_str(pathbuf), ' ', '_');
|
||||||
|
__entry->i_size = i_size_read(inode);
|
||||||
|
__assign_str(cmdline, command);
|
||||||
|
(void)strreplace(__get_str(cmdline), ' ', '_');
|
||||||
|
__entry->pid = pid;
|
||||||
|
__entry->ino = inode->i_ino;
|
||||||
|
}
|
||||||
|
),
|
||||||
|
TP_printk("entry_name %s, cmdline %s,"
|
||||||
|
" pid %d, i_size %llu, ino %lu",
|
||||||
|
__get_str(pathbuf),
|
||||||
|
__get_str(cmdline), __entry->pid, __entry->i_size,
|
||||||
|
(unsigned long) __entry->ino)
|
||||||
|
);
|
||||||
|
|
||||||
#endif /* _TRACE_ANDROID_FS_TEMPLATE_H */
|
#endif /* _TRACE_ANDROID_FS_TEMPLATE_H */
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/magic.h>
|
#include <linux/magic.h>
|
||||||
|
#include <asm/byteorder.h>
|
||||||
|
|
||||||
#define NILFS_INODE_BMAP_SIZE 7
|
#define NILFS_INODE_BMAP_SIZE 7
|
||||||
|
|
||||||
|
@ -533,19 +533,19 @@ enum {
|
||||||
static inline void \
|
static inline void \
|
||||||
nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \
|
nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \
|
||||||
{ \
|
{ \
|
||||||
cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \
|
cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) | \
|
||||||
(1UL << NILFS_CHECKPOINT_##flag)); \
|
(1UL << NILFS_CHECKPOINT_##flag)); \
|
||||||
} \
|
} \
|
||||||
static inline void \
|
static inline void \
|
||||||
nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \
|
nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \
|
||||||
{ \
|
{ \
|
||||||
cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \
|
cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) & \
|
||||||
~(1UL << NILFS_CHECKPOINT_##flag)); \
|
~(1UL << NILFS_CHECKPOINT_##flag)); \
|
||||||
} \
|
} \
|
||||||
static inline int \
|
static inline int \
|
||||||
nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \
|
nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \
|
||||||
{ \
|
{ \
|
||||||
return !!(le32_to_cpu(cp->cp_flags) & \
|
return !!(__le32_to_cpu(cp->cp_flags) & \
|
||||||
(1UL << NILFS_CHECKPOINT_##flag)); \
|
(1UL << NILFS_CHECKPOINT_##flag)); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -595,20 +595,20 @@ enum {
|
||||||
static inline void \
|
static inline void \
|
||||||
nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \
|
nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \
|
||||||
{ \
|
{ \
|
||||||
su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \
|
su->su_flags = __cpu_to_le32(__le32_to_cpu(su->su_flags) | \
|
||||||
(1UL << NILFS_SEGMENT_USAGE_##flag));\
|
(1UL << NILFS_SEGMENT_USAGE_##flag));\
|
||||||
} \
|
} \
|
||||||
static inline void \
|
static inline void \
|
||||||
nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \
|
nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \
|
||||||
{ \
|
{ \
|
||||||
su->su_flags = \
|
su->su_flags = \
|
||||||
cpu_to_le32(le32_to_cpu(su->su_flags) & \
|
__cpu_to_le32(__le32_to_cpu(su->su_flags) & \
|
||||||
~(1UL << NILFS_SEGMENT_USAGE_##flag)); \
|
~(1UL << NILFS_SEGMENT_USAGE_##flag)); \
|
||||||
} \
|
} \
|
||||||
static inline int \
|
static inline int \
|
||||||
nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \
|
nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \
|
||||||
{ \
|
{ \
|
||||||
return !!(le32_to_cpu(su->su_flags) & \
|
return !!(__le32_to_cpu(su->su_flags) & \
|
||||||
(1UL << NILFS_SEGMENT_USAGE_##flag)); \
|
(1UL << NILFS_SEGMENT_USAGE_##flag)); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -619,15 +619,15 @@ NILFS_SEGMENT_USAGE_FNS(ERROR, error)
|
||||||
static inline void
|
static inline void
|
||||||
nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
|
nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
|
||||||
{
|
{
|
||||||
su->su_lastmod = cpu_to_le64(0);
|
su->su_lastmod = __cpu_to_le64(0);
|
||||||
su->su_nblocks = cpu_to_le32(0);
|
su->su_nblocks = __cpu_to_le32(0);
|
||||||
su->su_flags = cpu_to_le32(0);
|
su->su_flags = __cpu_to_le32(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
|
nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
|
||||||
{
|
{
|
||||||
return !le32_to_cpu(su->su_flags);
|
return !__le32_to_cpu(su->su_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -2008,6 +2008,9 @@ static ssize_t write_cpuhp_fail(struct device *dev,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Cannot fail STARTING/DYING callbacks.
|
* Cannot fail STARTING/DYING callbacks.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -6079,7 +6079,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user,
|
||||||
if (user_mode(regs)) {
|
if (user_mode(regs)) {
|
||||||
regs_user->abi = perf_reg_abi(current);
|
regs_user->abi = perf_reg_abi(current);
|
||||||
regs_user->regs = regs;
|
regs_user->regs = regs;
|
||||||
} else if (current->mm) {
|
} else if (!(current->flags & PF_KTHREAD)) {
|
||||||
perf_get_regs_user(regs_user, regs, regs_user_copy);
|
perf_get_regs_user(regs_user, regs, regs_user_copy);
|
||||||
} else {
|
} else {
|
||||||
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
|
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
|
||||||
|
|
|
@ -90,7 +90,7 @@ unsigned long probe_irq_on(void)
|
||||||
/* It triggered already - consider it spurious. */
|
/* It triggered already - consider it spurious. */
|
||||||
if (!(desc->istate & IRQS_WAITING)) {
|
if (!(desc->istate & IRQS_WAITING)) {
|
||||||
desc->istate &= ~IRQS_AUTODETECT;
|
desc->istate &= ~IRQS_AUTODETECT;
|
||||||
irq_shutdown(desc);
|
irq_shutdown_and_deactivate(desc);
|
||||||
} else
|
} else
|
||||||
if (i < 32)
|
if (i < 32)
|
||||||
mask |= 1 << i;
|
mask |= 1 << i;
|
||||||
|
@ -127,7 +127,7 @@ unsigned int probe_irq_mask(unsigned long val)
|
||||||
mask |= 1 << i;
|
mask |= 1 << i;
|
||||||
|
|
||||||
desc->istate &= ~IRQS_AUTODETECT;
|
desc->istate &= ~IRQS_AUTODETECT;
|
||||||
irq_shutdown(desc);
|
irq_shutdown_and_deactivate(desc);
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
}
|
}
|
||||||
|
@ -169,7 +169,7 @@ int probe_irq_off(unsigned long val)
|
||||||
nr_of_irqs++;
|
nr_of_irqs++;
|
||||||
}
|
}
|
||||||
desc->istate &= ~IRQS_AUTODETECT;
|
desc->istate &= ~IRQS_AUTODETECT;
|
||||||
irq_shutdown(desc);
|
irq_shutdown_and_deactivate(desc);
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irq(&desc->lock);
|
raw_spin_unlock_irq(&desc->lock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -314,6 +314,12 @@ void irq_shutdown(struct irq_desc *desc)
|
||||||
}
|
}
|
||||||
irq_state_clr_started(desc);
|
irq_state_clr_started(desc);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void irq_shutdown_and_deactivate(struct irq_desc *desc)
|
||||||
|
{
|
||||||
|
irq_shutdown(desc);
|
||||||
/*
|
/*
|
||||||
* This must be called even if the interrupt was never started up,
|
* This must be called even if the interrupt was never started up,
|
||||||
* because the activation can happen before the interrupt is
|
* because the activation can happen before the interrupt is
|
||||||
|
|
|
@ -124,7 +124,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
|
||||||
*/
|
*/
|
||||||
if (irqd_affinity_is_managed(d)) {
|
if (irqd_affinity_is_managed(d)) {
|
||||||
irqd_set_managed_shutdown(d);
|
irqd_set_managed_shutdown(d);
|
||||||
irq_shutdown(desc);
|
irq_shutdown_and_deactivate(desc);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,6 +80,7 @@ extern int irq_activate_and_startup(struct irq_desc *desc, bool resend);
|
||||||
extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
|
extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
|
||||||
|
|
||||||
extern void irq_shutdown(struct irq_desc *desc);
|
extern void irq_shutdown(struct irq_desc *desc);
|
||||||
|
extern void irq_shutdown_and_deactivate(struct irq_desc *desc);
|
||||||
extern void irq_enable(struct irq_desc *desc);
|
extern void irq_enable(struct irq_desc *desc);
|
||||||
extern void irq_disable(struct irq_desc *desc);
|
extern void irq_disable(struct irq_desc *desc);
|
||||||
extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
|
extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
|
||||||
|
@ -94,6 +95,10 @@ static inline void irq_mark_irq(unsigned int irq) { }
|
||||||
extern void irq_mark_irq(unsigned int irq);
|
extern void irq_mark_irq(unsigned int irq);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern int __irq_get_irqchip_state(struct irq_data *data,
|
||||||
|
enum irqchip_irq_state which,
|
||||||
|
bool *state);
|
||||||
|
|
||||||
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
||||||
|
|
||||||
irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags);
|
irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags);
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/random.h>
|
#include <linux/random.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/irqdomain.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/sched/rt.h>
|
#include <linux/sched/rt.h>
|
||||||
|
@ -34,8 +35,9 @@ static int __init setup_forced_irqthreads(char *arg)
|
||||||
early_param("threadirqs", setup_forced_irqthreads);
|
early_param("threadirqs", setup_forced_irqthreads);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void __synchronize_hardirq(struct irq_desc *desc)
|
static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
|
||||||
{
|
{
|
||||||
|
struct irq_data *irqd = irq_desc_get_irq_data(desc);
|
||||||
bool inprogress;
|
bool inprogress;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
|
@ -51,6 +53,20 @@ static void __synchronize_hardirq(struct irq_desc *desc)
|
||||||
/* Ok, that indicated we're done: double-check carefully. */
|
/* Ok, that indicated we're done: double-check carefully. */
|
||||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
inprogress = irqd_irq_inprogress(&desc->irq_data);
|
inprogress = irqd_irq_inprogress(&desc->irq_data);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If requested and supported, check at the chip whether it
|
||||||
|
* is in flight at the hardware level, i.e. already pending
|
||||||
|
* in a CPU and waiting for service and acknowledge.
|
||||||
|
*/
|
||||||
|
if (!inprogress && sync_chip) {
|
||||||
|
/*
|
||||||
|
* Ignore the return code. inprogress is only updated
|
||||||
|
* when the chip supports it.
|
||||||
|
*/
|
||||||
|
__irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
|
||||||
|
&inprogress);
|
||||||
|
}
|
||||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
|
||||||
/* Oops, that failed? */
|
/* Oops, that failed? */
|
||||||
|
@ -73,13 +89,18 @@ static void __synchronize_hardirq(struct irq_desc *desc)
|
||||||
* Returns: false if a threaded handler is active.
|
* Returns: false if a threaded handler is active.
|
||||||
*
|
*
|
||||||
* This function may be called - with care - from IRQ context.
|
* This function may be called - with care - from IRQ context.
|
||||||
|
*
|
||||||
|
* It does not check whether there is an interrupt in flight at the
|
||||||
|
* hardware level, but not serviced yet, as this might deadlock when
|
||||||
|
* called with interrupts disabled and the target CPU of the interrupt
|
||||||
|
* is the current CPU.
|
||||||
*/
|
*/
|
||||||
bool synchronize_hardirq(unsigned int irq)
|
bool synchronize_hardirq(unsigned int irq)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
if (desc) {
|
if (desc) {
|
||||||
__synchronize_hardirq(desc);
|
__synchronize_hardirq(desc, false);
|
||||||
return !atomic_read(&desc->threads_active);
|
return !atomic_read(&desc->threads_active);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,14 +116,19 @@ EXPORT_SYMBOL(synchronize_hardirq);
|
||||||
* to complete before returning. If you use this function while
|
* to complete before returning. If you use this function while
|
||||||
* holding a resource the IRQ handler may need you will deadlock.
|
* holding a resource the IRQ handler may need you will deadlock.
|
||||||
*
|
*
|
||||||
* This function may be called - with care - from IRQ context.
|
* Can only be called from preemptible code as it might sleep when
|
||||||
|
* an interrupt thread is associated to @irq.
|
||||||
|
*
|
||||||
|
* It optionally makes sure (when the irq chip supports that method)
|
||||||
|
* that the interrupt is not pending in any CPU and waiting for
|
||||||
|
* service.
|
||||||
*/
|
*/
|
||||||
void synchronize_irq(unsigned int irq)
|
void synchronize_irq(unsigned int irq)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
|
|
||||||
if (desc) {
|
if (desc) {
|
||||||
__synchronize_hardirq(desc);
|
__synchronize_hardirq(desc, true);
|
||||||
/*
|
/*
|
||||||
* We made sure that no hardirq handler is
|
* We made sure that no hardirq handler is
|
||||||
* running. Now verify that no threaded handlers are
|
* running. Now verify that no threaded handlers are
|
||||||
|
@ -1619,6 +1645,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
|
||||||
/* If this was the last handler, shut down the IRQ line: */
|
/* If this was the last handler, shut down the IRQ line: */
|
||||||
if (!desc->action) {
|
if (!desc->action) {
|
||||||
irq_settings_clr_disable_unlazy(desc);
|
irq_settings_clr_disable_unlazy(desc);
|
||||||
|
/* Only shutdown. Deactivate after synchronize_hardirq() */
|
||||||
irq_shutdown(desc);
|
irq_shutdown(desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1647,8 +1674,12 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
|
||||||
|
|
||||||
unregister_handler_proc(irq, action);
|
unregister_handler_proc(irq, action);
|
||||||
|
|
||||||
/* Make sure it's not being used on another CPU: */
|
/*
|
||||||
synchronize_hardirq(irq);
|
* Make sure it's not being used on another CPU and if the chip
|
||||||
|
* supports it also make sure that there is no (not yet serviced)
|
||||||
|
* interrupt in flight at the hardware level.
|
||||||
|
*/
|
||||||
|
__synchronize_hardirq(desc, true);
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_SHIRQ
|
#ifdef CONFIG_DEBUG_SHIRQ
|
||||||
/*
|
/*
|
||||||
|
@ -1688,6 +1719,14 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
|
||||||
* require it to deallocate resources over the slow bus.
|
* require it to deallocate resources over the slow bus.
|
||||||
*/
|
*/
|
||||||
chip_bus_lock(desc);
|
chip_bus_lock(desc);
|
||||||
|
/*
|
||||||
|
* There is no interrupt on the fly anymore. Deactivate it
|
||||||
|
* completely.
|
||||||
|
*/
|
||||||
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
|
irq_domain_deactivate_irq(&desc->irq_data);
|
||||||
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
|
|
||||||
irq_release_resources(desc);
|
irq_release_resources(desc);
|
||||||
chip_bus_sync_unlock(desc);
|
chip_bus_sync_unlock(desc);
|
||||||
irq_remove_timings(desc);
|
irq_remove_timings(desc);
|
||||||
|
@ -2173,6 +2212,28 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__request_percpu_irq);
|
EXPORT_SYMBOL_GPL(__request_percpu_irq);
|
||||||
|
|
||||||
|
int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
|
||||||
|
bool *state)
|
||||||
|
{
|
||||||
|
struct irq_chip *chip;
|
||||||
|
int err = -EINVAL;
|
||||||
|
|
||||||
|
do {
|
||||||
|
chip = irq_data_get_irq_chip(data);
|
||||||
|
if (chip->irq_get_irqchip_state)
|
||||||
|
break;
|
||||||
|
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
|
||||||
|
data = data->parent_data;
|
||||||
|
#else
|
||||||
|
data = NULL;
|
||||||
|
#endif
|
||||||
|
} while (data);
|
||||||
|
|
||||||
|
if (data)
|
||||||
|
err = chip->irq_get_irqchip_state(data, which, state);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* irq_get_irqchip_state - returns the irqchip state of a interrupt.
|
* irq_get_irqchip_state - returns the irqchip state of a interrupt.
|
||||||
* @irq: Interrupt line that is forwarded to a VM
|
* @irq: Interrupt line that is forwarded to a VM
|
||||||
|
@ -2191,7 +2252,6 @@ int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
|
||||||
{
|
{
|
||||||
struct irq_desc *desc;
|
struct irq_desc *desc;
|
||||||
struct irq_data *data;
|
struct irq_data *data;
|
||||||
struct irq_chip *chip;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int err = -EINVAL;
|
int err = -EINVAL;
|
||||||
|
|
||||||
|
@ -2201,19 +2261,7 @@ int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
|
||||||
|
|
||||||
data = irq_desc_get_irq_data(desc);
|
data = irq_desc_get_irq_data(desc);
|
||||||
|
|
||||||
do {
|
err = __irq_get_irqchip_state(data, which, state);
|
||||||
chip = irq_data_get_irq_chip(data);
|
|
||||||
if (chip->irq_get_irqchip_state)
|
|
||||||
break;
|
|
||||||
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
|
|
||||||
data = data->parent_data;
|
|
||||||
#else
|
|
||||||
data = NULL;
|
|
||||||
#endif
|
|
||||||
} while (data);
|
|
||||||
|
|
||||||
if (data)
|
|
||||||
err = chip->irq_get_irqchip_state(data, which, state);
|
|
||||||
|
|
||||||
irq_put_desc_busunlock(desc, flags);
|
irq_put_desc_busunlock(desc, flags);
|
||||||
return err;
|
return err;
|
||||||
|
|
Loading…
Reference in a new issue