Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "The interrupt subsystem delivers this time: - Refactoring of the GIC-V3 driver to prepare for the GIC-V4 support - Initial GIC-V4 support - Consolidation of the FSL MSI support - Utilize the effective affinity interface in various ARM irqchip drivers - Yet another interrupt chip driver (UniPhier AIDET) - Bulk conversion of the irq chip driver to use %pOF - The usual small fixes and improvements all over the place" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (77 commits) irqchip/ls-scfg-msi: Add MSI affinity support irqchip/ls-scfg-msi: Add LS1043a v1.1 MSI support irqchip/ls-scfg-msi: Add LS1046a MSI support arm64: dts: ls1046a: Add MSI dts node arm64: dts: ls1043a: Share all MSIs arm: dts: ls1021a: Share all MSIs arm64: dts: ls1043a: Fix typo of MSI compatible string arm: dts: ls1021a: Fix typo of MSI compatible string irqchip/ls-scfg-msi: Fix typo of MSI compatible strings irqchip/irq-bcm7120-l2: Use correct I/O accessors for irq_fwd_mask irqchip/mmp: Make mmp_intc_conf const irqchip/gic: Make irq_chip const irqchip/gic-v3: Advertise GICv4 support to KVM irqchip/gic-v4: Enable low-level GICv4 operations irqchip/gic-v4: Add some basic documentation irqchip/gic-v4: Add VLPI configuration interface irqchip/gic-v4: Add VPE command interface irqchip/gic-v4: Add per-VM VPE domain creation irqchip/gic-v3-its: Set implementation defined bit to enable VLPIs irqchip/gic-v3-its: Allow doorbell interrupts to be injected/cleared ...
This commit is contained in:
commit
93cc1228b4
57 changed files with 3182 additions and 293 deletions
|
@ -4,8 +4,10 @@ Required properties:
|
|||
|
||||
- compatible: should be "fsl,<soc-name>-msi" to identify
|
||||
Layerscape PCIe MSI controller block such as:
|
||||
"fsl,1s1021a-msi"
|
||||
"fsl,1s1043a-msi"
|
||||
"fsl,ls1021a-msi"
|
||||
"fsl,ls1043a-msi"
|
||||
"fsl,ls1046a-msi"
|
||||
"fsl,ls1043a-v1.1-msi"
|
||||
- msi-controller: indicates that this is a PCIe MSI controller node
|
||||
- reg: physical base address of the controller and length of memory mapped.
|
||||
- interrupts: an interrupt to the parent interrupt controller.
|
||||
|
@ -23,7 +25,7 @@ MSI controller node
|
|||
Examples:
|
||||
|
||||
msi1: msi-controller@1571000 {
|
||||
compatible = "fsl,1s1043a-msi";
|
||||
compatible = "fsl,ls1043a-msi";
|
||||
reg = <0x0 0x1571000 0x0 0x8>,
|
||||
msi-controller;
|
||||
interrupts = <0 116 0x4>;
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
UniPhier AIDET
|
||||
|
||||
UniPhier AIDET (ARM Interrupt Detector) is an add-on block for ARM GIC (Generic
|
||||
Interrupt Controller). GIC itself can handle only high level and rising edge
|
||||
interrupts. The AIDET provides logic inverter to support low level and falling
|
||||
edge interrupts.
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be one of the following:
|
||||
"socionext,uniphier-ld4-aidet" - for LD4 SoC
|
||||
"socionext,uniphier-pro4-aidet" - for Pro4 SoC
|
||||
"socionext,uniphier-sld8-aidet" - for sLD8 SoC
|
||||
"socionext,uniphier-pro5-aidet" - for Pro5 SoC
|
||||
"socionext,uniphier-pxs2-aidet" - for PXs2/LD6b SoC
|
||||
"socionext,uniphier-ld11-aidet" - for LD11 SoC
|
||||
"socionext,uniphier-ld20-aidet" - for LD20 SoC
|
||||
"socionext,uniphier-pxs3-aidet" - for PXs3 SoC
|
||||
- reg: Specifies offset and length of the register set for the device.
|
||||
- interrupt-controller: Identifies the node as an interrupt controller
|
||||
- #interrupt-cells : Specifies the number of cells needed to encode an interrupt
|
||||
source. The value should be 2. The first cell defines the interrupt number
|
||||
(corresponds to the SPI interrupt number of GIC). The second cell specifies
|
||||
the trigger type as defined in interrupts.txt in this directory.
|
||||
|
||||
Example:
|
||||
|
||||
aidet: aidet@5fc20000 {
|
||||
compatible = "socionext,uniphier-pro4-aidet";
|
||||
reg = <0x5fc20000 0x200>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
|
@ -312,6 +312,7 @@ IRQ
|
|||
devm_irq_alloc_descs_from()
|
||||
devm_irq_alloc_generic_chip()
|
||||
devm_irq_setup_generic_chip()
|
||||
devm_irq_sim_init()
|
||||
|
||||
LED
|
||||
devm_led_classdev_register()
|
||||
|
|
|
@ -1993,6 +1993,7 @@ F: arch/arm64/boot/dts/socionext/
|
|||
F: drivers/bus/uniphier-system-bus.c
|
||||
F: drivers/clk/uniphier/
|
||||
F: drivers/i2c/busses/i2c-uniphier*
|
||||
F: drivers/irqchip/irq-uniphier-aidet.c
|
||||
F: drivers/pinctrl/uniphier/
|
||||
F: drivers/reset/reset-uniphier.c
|
||||
F: drivers/tty/serial/8250/8250_uniphier.c
|
||||
|
|
|
@ -129,14 +129,14 @@
|
|||
};
|
||||
|
||||
msi1: msi-controller@1570e00 {
|
||||
compatible = "fsl,1s1021a-msi";
|
||||
compatible = "fsl,ls1021a-msi";
|
||||
reg = <0x0 0x1570e00 0x0 0x8>;
|
||||
msi-controller;
|
||||
interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
msi2: msi-controller@1570e08 {
|
||||
compatible = "fsl,1s1021a-msi";
|
||||
compatible = "fsl,ls1021a-msi";
|
||||
reg = <0x0 0x1570e08 0x0 0x8>;
|
||||
msi-controller;
|
||||
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
@ -699,7 +699,7 @@
|
|||
bus-range = <0x0 0xff>;
|
||||
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
|
||||
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
|
||||
msi-parent = <&msi1>;
|
||||
msi-parent = <&msi1>, <&msi2>;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
|
||||
|
@ -722,7 +722,7 @@
|
|||
bus-range = <0x0 0xff>;
|
||||
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
|
||||
0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
|
||||
msi-parent = <&msi2>;
|
||||
msi-parent = <&msi1>, <&msi2>;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
|
||||
|
|
|
@ -275,6 +275,12 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
|
|||
#define gicr_read_pendbaser(c) __gic_readq_nonatomic(c)
|
||||
#define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c)
|
||||
|
||||
/*
|
||||
* GICR_xLPIR - only the lower bits are significant
|
||||
*/
|
||||
#define gic_read_lpir(c) readl_relaxed(c)
|
||||
#define gic_write_lpir(v, c) writel_relaxed(lower_32_bits(v), c)
|
||||
|
||||
/*
|
||||
* GITS_TYPER is an ID register and doesn't need atomicity.
|
||||
*/
|
||||
|
@ -291,5 +297,33 @@ static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr)
|
|||
*/
|
||||
#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c)
|
||||
|
||||
/*
|
||||
* GITS_VPROPBASER - hi and lo bits may be accessed independently.
|
||||
*/
|
||||
#define gits_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c)
|
||||
|
||||
/*
|
||||
* GITS_VPENDBASER - the Valid bit must be cleared before changing
|
||||
* anything else.
|
||||
*/
|
||||
static inline void gits_write_vpendbaser(u64 val, void * __iomem addr)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
tmp = readl_relaxed(addr + 4);
|
||||
if (tmp & (GICR_VPENDBASER_Valid >> 32)) {
|
||||
tmp &= ~(GICR_VPENDBASER_Valid >> 32);
|
||||
writel_relaxed(tmp, addr + 4);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the fact that __gic_writeq_nonatomic writes the second
|
||||
* half of the 64bit quantity after the first.
|
||||
*/
|
||||
__gic_writeq_nonatomic(val, addr);
|
||||
}
|
||||
|
||||
#define gits_read_vpendbaser(c) __gic_readq_nonatomic(c)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* !__ASM_ARCH_GICV3_H */
|
||||
|
|
|
@ -39,6 +39,7 @@ config ARCH_HIP04
|
|||
select HAVE_ARM_ARCH_TIMER
|
||||
select MCPM if SMP
|
||||
select MCPM_QUAD_CLUSTER if SMP
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
help
|
||||
Support for Hisilicon HiP04 SoC family
|
||||
|
||||
|
|
|
@ -653,21 +653,21 @@
|
|||
};
|
||||
|
||||
msi1: msi-controller1@1571000 {
|
||||
compatible = "fsl,1s1043a-msi";
|
||||
compatible = "fsl,ls1043a-msi";
|
||||
reg = <0x0 0x1571000 0x0 0x8>;
|
||||
msi-controller;
|
||||
interrupts = <0 116 0x4>;
|
||||
};
|
||||
|
||||
msi2: msi-controller2@1572000 {
|
||||
compatible = "fsl,1s1043a-msi";
|
||||
compatible = "fsl,ls1043a-msi";
|
||||
reg = <0x0 0x1572000 0x0 0x8>;
|
||||
msi-controller;
|
||||
interrupts = <0 126 0x4>;
|
||||
};
|
||||
|
||||
msi3: msi-controller3@1573000 {
|
||||
compatible = "fsl,1s1043a-msi";
|
||||
compatible = "fsl,ls1043a-msi";
|
||||
reg = <0x0 0x1573000 0x0 0x8>;
|
||||
msi-controller;
|
||||
interrupts = <0 160 0x4>;
|
||||
|
@ -689,7 +689,7 @@
|
|||
bus-range = <0x0 0xff>;
|
||||
ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */
|
||||
0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
|
||||
msi-parent = <&msi1>;
|
||||
msi-parent = <&msi1>, <&msi2>, <&msi3>;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0000 0 0 1 &gic 0 110 0x4>,
|
||||
|
@ -714,7 +714,7 @@
|
|||
bus-range = <0x0 0xff>;
|
||||
ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */
|
||||
0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
|
||||
msi-parent = <&msi2>;
|
||||
msi-parent = <&msi1>, <&msi2>, <&msi3>;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0000 0 0 1 &gic 0 120 0x4>,
|
||||
|
@ -739,7 +739,7 @@
|
|||
bus-range = <0x0 0xff>;
|
||||
ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */
|
||||
0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */
|
||||
msi-parent = <&msi3>;
|
||||
msi-parent = <&msi1>, <&msi2>, <&msi3>;
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0000 0 0 1 &gic 0 154 0x4>,
|
||||
|
|
|
@ -630,6 +630,37 @@
|
|||
interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clockgen 4 1>;
|
||||
};
|
||||
|
||||
msi1: msi-controller@1580000 {
|
||||
compatible = "fsl,ls1046a-msi";
|
||||
msi-controller;
|
||||
reg = <0x0 0x1580000 0x0 0x10000>;
|
||||
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
msi2: msi-controller@1590000 {
|
||||
compatible = "fsl,ls1046a-msi";
|
||||
msi-controller;
|
||||
reg = <0x0 0x1590000 0x0 0x10000>;
|
||||
interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
msi3: msi-controller@15a0000 {
|
||||
compatible = "fsl,ls1046a-msi";
|
||||
msi-controller;
|
||||
reg = <0x0 0x15a0000 0x0 0x10000>;
|
||||
interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
|
|
|
@ -116,6 +116,8 @@ static inline void gic_write_bpr1(u32 val)
|
|||
|
||||
#define gic_read_typer(c) readq_relaxed(c)
|
||||
#define gic_write_irouter(v, c) writeq_relaxed(v, c)
|
||||
#define gic_read_lpir(c) readq_relaxed(c)
|
||||
#define gic_write_lpir(v, c) writeq_relaxed(v, c)
|
||||
|
||||
#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
|
||||
|
||||
|
@ -133,5 +135,10 @@ static inline void gic_write_bpr1(u32 val)
|
|||
#define gicr_write_pendbaser(v, c) writeq_relaxed(v, c)
|
||||
#define gicr_read_pendbaser(c) readq_relaxed(c)
|
||||
|
||||
#define gits_write_vpropbaser(v, c) writeq_relaxed(v, c)
|
||||
|
||||
#define gits_write_vpendbaser(v, c) writeq_relaxed(v, c)
|
||||
#define gits_read_vpendbaser(c) readq_relaxed(c)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_ARCH_GICV3_H */
|
||||
|
|
|
@ -26,6 +26,7 @@ config METAG
|
|||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_UNDERSCORE_SYMBOL_PREFIX
|
||||
select IRQ_DOMAIN
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
select MODULES_USE_ELF_RELA
|
||||
select OF
|
||||
select OF_EARLY_FLATTREE
|
||||
|
|
|
@ -7,6 +7,7 @@ config ARM_GIC
|
|||
select IRQ_DOMAIN
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
select MULTI_IRQ_HANDLER
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
|
||||
config ARM_GIC_PM
|
||||
bool
|
||||
|
@ -34,6 +35,7 @@ config ARM_GIC_V3
|
|||
select MULTI_IRQ_HANDLER
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
select PARTITION_PERCPU
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
|
||||
config ARM_GIC_V3_ITS
|
||||
bool
|
||||
|
@ -64,6 +66,7 @@ config ARMADA_370_XP_IRQ
|
|||
bool
|
||||
select GENERIC_IRQ_CHIP
|
||||
select PCI_MSI if PCI
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
|
||||
config ALPINE_MSI
|
||||
bool
|
||||
|
@ -93,11 +96,13 @@ config BCM6345_L1_IRQ
|
|||
bool
|
||||
select GENERIC_IRQ_CHIP
|
||||
select IRQ_DOMAIN
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
|
||||
config BCM7038_L1_IRQ
|
||||
bool
|
||||
select GENERIC_IRQ_CHIP
|
||||
select IRQ_DOMAIN
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
|
||||
config BCM7120_L2_IRQ
|
||||
bool
|
||||
|
@ -136,6 +141,7 @@ config IRQ_MIPS_CPU
|
|||
select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING
|
||||
select IRQ_DOMAIN
|
||||
select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
|
||||
config CLPS711X_IRQCHIP
|
||||
bool
|
||||
|
@ -217,6 +223,7 @@ config VERSATILE_FPGA_IRQ_NR
|
|||
config XTENSA_MX
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
|
||||
config XILINX_INTC
|
||||
bool
|
||||
|
@ -306,3 +313,11 @@ config QCOM_IRQ_COMBINER
|
|||
help
|
||||
Say yes here to add support for the IRQ combiner devices embedded
|
||||
in Qualcomm Technologies chips.
|
||||
|
||||
config IRQ_UNIPHIER_AIDET
|
||||
bool "UniPhier AIDET support" if COMPILE_TEST
|
||||
depends on ARCH_UNIPHIER || COMPILE_TEST
|
||||
default ARCH_UNIPHIER
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
help
|
||||
Support for the UniPhier AIDET (ARM Interrupt Detector).
|
||||
|
|
|
@ -28,7 +28,7 @@ obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o
|
|||
obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
|
||||
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
|
||||
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
|
||||
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o
|
||||
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
|
||||
obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
|
||||
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
|
||||
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
|
||||
|
@ -78,3 +78,4 @@ obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
|
|||
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
|
||||
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
|
||||
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
|
||||
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
|
||||
|
|
|
@ -203,7 +203,7 @@ static struct irq_chip armada_370_xp_msi_irq_chip = {
|
|||
|
||||
static struct msi_domain_info armada_370_xp_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_MULTI_PCI_MSI),
|
||||
MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
|
||||
.chip = &armada_370_xp_msi_irq_chip,
|
||||
};
|
||||
|
||||
|
@ -330,6 +330,8 @@ static int armada_xp_set_affinity(struct irq_data *d,
|
|||
writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
|
||||
raw_spin_unlock(&irq_controller_lock);
|
||||
|
||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
#endif
|
||||
|
@ -363,6 +365,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
|
|||
} else {
|
||||
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
|
||||
handle_level_irq);
|
||||
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
|
||||
}
|
||||
irq_set_probe(virq);
|
||||
|
||||
|
|
|
@ -147,13 +147,12 @@ static int __init armctrl_of_init(struct device_node *node,
|
|||
|
||||
base = of_iomap(node, 0);
|
||||
if (!base)
|
||||
panic("%s: unable to map IC registers\n",
|
||||
node->full_name);
|
||||
panic("%pOF: unable to map IC registers\n", node);
|
||||
|
||||
intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0),
|
||||
&armctrl_ops, NULL);
|
||||
if (!intc.domain)
|
||||
panic("%s: unable to create IRQ domain\n", node->full_name);
|
||||
panic("%pOF: unable to create IRQ domain\n", node);
|
||||
|
||||
for (b = 0; b < NR_BANKS; b++) {
|
||||
intc.pending[b] = base + reg_pending[b];
|
||||
|
@ -173,8 +172,8 @@ static int __init armctrl_of_init(struct device_node *node,
|
|||
int parent_irq = irq_of_parse_and_map(node, 0);
|
||||
|
||||
if (!parent_irq) {
|
||||
panic("%s: unable to get parent interrupt.\n",
|
||||
node->full_name);
|
||||
panic("%pOF: unable to get parent interrupt.\n",
|
||||
node);
|
||||
}
|
||||
irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq);
|
||||
} else {
|
||||
|
|
|
@ -282,8 +282,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
|
|||
{
|
||||
intc.base = of_iomap(node, 0);
|
||||
if (!intc.base) {
|
||||
panic("%s: unable to map local interrupt registers\n",
|
||||
node->full_name);
|
||||
panic("%pOF: unable to map local interrupt registers\n", node);
|
||||
}
|
||||
|
||||
bcm2835_init_local_timer_frequency();
|
||||
|
@ -292,7 +291,7 @@ static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node,
|
|||
&bcm2836_arm_irqchip_intc_ops,
|
||||
NULL);
|
||||
if (!intc.domain)
|
||||
panic("%s: unable to create IRQ domain\n", node->full_name);
|
||||
panic("%pOF: unable to create IRQ domain\n", node);
|
||||
|
||||
bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ,
|
||||
&bcm2836_arm_irqchip_timer);
|
||||
|
|
|
@ -231,6 +231,8 @@ static int bcm6345_l1_set_affinity(struct irq_data *d,
|
|||
}
|
||||
raw_spin_unlock_irqrestore(&intc->lock, flags);
|
||||
|
||||
irq_data_update_effective_affinity(d, cpumask_of(new_cpu));
|
||||
|
||||
return IRQ_SET_MASK_OK_NOCOPY;
|
||||
}
|
||||
|
||||
|
@ -291,6 +293,7 @@ static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
|
|||
irq_set_chip_and_handler(virq,
|
||||
&bcm6345_l1_irq_chip, handle_percpu_irq);
|
||||
irq_set_chip_data(virq, d->host_data);
|
||||
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -212,6 +212,8 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
|
|||
__bcm7038_l1_unmask(d, first_cpu);
|
||||
|
||||
raw_spin_unlock_irqrestore(&intc->lock, flags);
|
||||
irq_data_update_effective_affinity(d, cpumask_of(first_cpu));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -299,6 +301,7 @@ static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
|
|||
{
|
||||
irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
|
||||
irq_set_chip_data(virq, d->host_data);
|
||||
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -250,12 +250,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
|
|||
if (ret < 0)
|
||||
goto out_free_l1_data;
|
||||
|
||||
for (idx = 0; idx < data->n_words; idx++) {
|
||||
__raw_writel(data->irq_fwd_mask[idx],
|
||||
data->pair_base[idx] +
|
||||
data->en_offset[idx]);
|
||||
}
|
||||
|
||||
for (irq = 0; irq < data->num_parent_irqs; irq++) {
|
||||
ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask);
|
||||
if (ret)
|
||||
|
@ -297,6 +291,10 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
|
|||
gc->reg_base = data->pair_base[idx];
|
||||
ct->regs.mask = data->en_offset[idx];
|
||||
|
||||
/* gc->reg_base is defined and so is gc->writel */
|
||||
irq_reg_writel(gc, data->irq_fwd_mask[idx],
|
||||
data->en_offset[idx]);
|
||||
|
||||
ct->chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
ct->chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
ct->chip.irq_ack = irq_gc_noop;
|
||||
|
|
|
@ -341,13 +341,13 @@ static int __init irqcrossbar_init(struct device_node *node,
|
|||
int err;
|
||||
|
||||
if (!parent) {
|
||||
pr_err("%s: no parent, giving up\n", node->full_name);
|
||||
pr_err("%pOF: no parent, giving up\n", node);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
parent_domain = irq_find_host(parent);
|
||||
if (!parent_domain) {
|
||||
pr_err("%s: unable to obtain parent domain\n", node->full_name);
|
||||
pr_err("%pOF: unable to obtain parent domain\n", node);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
|
@ -360,7 +360,7 @@ static int __init irqcrossbar_init(struct device_node *node,
|
|||
node, &crossbar_domain_ops,
|
||||
NULL);
|
||||
if (!domain) {
|
||||
pr_err("%s: failed to allocated domain\n", node->full_name);
|
||||
pr_err("%pOF: failed to allocated domain\n", node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ static int __init digicolor_of_init(struct device_node *node,
|
|||
|
||||
reg_base = of_iomap(node, 0);
|
||||
if (!reg_base) {
|
||||
pr_err("%s: unable to map IC registers\n", node->full_name);
|
||||
pr_err("%pOF: unable to map IC registers\n", node);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ static int __init digicolor_of_init(struct device_node *node,
|
|||
|
||||
ucregs = syscon_regmap_lookup_by_phandle(node, "syscon");
|
||||
if (IS_ERR(ucregs)) {
|
||||
pr_err("%s: unable to map UC registers\n", node->full_name);
|
||||
pr_err("%pOF: unable to map UC registers\n", node);
|
||||
return PTR_ERR(ucregs);
|
||||
}
|
||||
/* channel 1, regular IRQs */
|
||||
|
@ -97,7 +97,7 @@ static int __init digicolor_of_init(struct device_node *node,
|
|||
digicolor_irq_domain =
|
||||
irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL);
|
||||
if (!digicolor_irq_domain) {
|
||||
pr_err("%s: unable to create IRQ domain\n", node->full_name);
|
||||
pr_err("%pOF: unable to create IRQ domain\n", node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ static int __init digicolor_of_init(struct device_node *node,
|
|||
"digicolor_irq", handle_level_irq,
|
||||
clr, 0, 0);
|
||||
if (ret) {
|
||||
pr_err("%s: unable to allocate IRQ gc\n", node->full_name);
|
||||
pr_err("%pOF: unable to allocate IRQ gc\n", node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -79,24 +79,24 @@ static int __init dw_apb_ictl_init(struct device_node *np,
|
|||
/* Map the parent interrupt for the chained handler */
|
||||
irq = irq_of_parse_and_map(np, 0);
|
||||
if (irq <= 0) {
|
||||
pr_err("%s: unable to parse irq\n", np->full_name);
|
||||
pr_err("%pOF: unable to parse irq\n", np);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_address_to_resource(np, 0, &r);
|
||||
if (ret) {
|
||||
pr_err("%s: unable to get resource\n", np->full_name);
|
||||
pr_err("%pOF: unable to get resource\n", np);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
|
||||
pr_err("%s: unable to request mem region\n", np->full_name);
|
||||
pr_err("%pOF: unable to request mem region\n", np);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
iobase = ioremap(r.start, resource_size(&r));
|
||||
if (!iobase) {
|
||||
pr_err("%s: unable to map resource\n", np->full_name);
|
||||
pr_err("%pOF: unable to map resource\n", np);
|
||||
ret = -ENOMEM;
|
||||
goto err_release;
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
|
|||
domain = irq_domain_add_linear(np, nrirqs,
|
||||
&irq_generic_chip_ops, NULL);
|
||||
if (!domain) {
|
||||
pr_err("%s: unable to add irq domain\n", np->full_name);
|
||||
pr_err("%pOF: unable to add irq domain\n", np);
|
||||
ret = -ENOMEM;
|
||||
goto err_unmap;
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
|
|||
handle_level_irq, clr, 0,
|
||||
IRQ_GC_INIT_MASK_CACHE);
|
||||
if (ret) {
|
||||
pr_err("%s: unable to alloc irq domain gc\n", np->full_name);
|
||||
pr_err("%pOF: unable to alloc irq domain gc\n", np);
|
||||
goto err_unmap;
|
||||
}
|
||||
|
||||
|
|
|
@ -138,7 +138,7 @@ static int __init its_pci_of_msi_init(void)
|
|||
if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name))
|
||||
continue;
|
||||
|
||||
pr_info("PCI/MSI: %s domain created\n", np->full_name);
|
||||
pr_info("PCI/MSI: %pOF domain created\n", np);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
|
||||
* Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
|
@ -423,24 +423,14 @@ static void __init gic_dist_init(void)
|
|||
gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
|
||||
}
|
||||
|
||||
static int gic_populate_rdist(void)
|
||||
static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
|
||||
{
|
||||
unsigned long mpidr = cpu_logical_map(smp_processor_id());
|
||||
u64 typer;
|
||||
u32 aff;
|
||||
int ret = -ENODEV;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Convert affinity to a 32bit value that can be matched to
|
||||
* GICR_TYPER bits [63:32].
|
||||
*/
|
||||
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 0));
|
||||
|
||||
for (i = 0; i < gic_data.nr_redist_regions; i++) {
|
||||
void __iomem *ptr = gic_data.redist_regions[i].redist_base;
|
||||
u64 typer;
|
||||
u32 reg;
|
||||
|
||||
reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
|
||||
|
@ -452,15 +442,9 @@ static int gic_populate_rdist(void)
|
|||
|
||||
do {
|
||||
typer = gic_read_typer(ptr + GICR_TYPER);
|
||||
if ((typer >> 32) == aff) {
|
||||
u64 offset = ptr - gic_data.redist_regions[i].redist_base;
|
||||
gic_data_rdist_rd_base() = ptr;
|
||||
gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
|
||||
pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
|
||||
smp_processor_id(), mpidr, i,
|
||||
&gic_data_rdist()->phys_base);
|
||||
ret = fn(gic_data.redist_regions + i, ptr);
|
||||
if (!ret)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (gic_data.redist_regions[i].single_redist)
|
||||
break;
|
||||
|
@ -475,12 +459,71 @@ static int gic_populate_rdist(void)
|
|||
} while (!(typer & GICR_TYPER_LAST));
|
||||
}
|
||||
|
||||
return ret ? -ENODEV : 0;
|
||||
}
|
||||
|
||||
static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
|
||||
{
|
||||
unsigned long mpidr = cpu_logical_map(smp_processor_id());
|
||||
u64 typer;
|
||||
u32 aff;
|
||||
|
||||
/*
|
||||
* Convert affinity to a 32bit value that can be matched to
|
||||
* GICR_TYPER bits [63:32].
|
||||
*/
|
||||
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 0));
|
||||
|
||||
typer = gic_read_typer(ptr + GICR_TYPER);
|
||||
if ((typer >> 32) == aff) {
|
||||
u64 offset = ptr - region->redist_base;
|
||||
gic_data_rdist_rd_base() = ptr;
|
||||
gic_data_rdist()->phys_base = region->phys_base + offset;
|
||||
|
||||
pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
|
||||
smp_processor_id(), mpidr,
|
||||
(int)(region - gic_data.redist_regions),
|
||||
&gic_data_rdist()->phys_base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Try next one */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int gic_populate_rdist(void)
|
||||
{
|
||||
if (gic_iterate_rdists(__gic_populate_rdist) == 0)
|
||||
return 0;
|
||||
|
||||
/* We couldn't even deal with ourselves... */
|
||||
WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
|
||||
smp_processor_id(), mpidr);
|
||||
smp_processor_id(),
|
||||
(unsigned long)cpu_logical_map(smp_processor_id()));
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int __gic_update_vlpi_properties(struct redist_region *region,
|
||||
void __iomem *ptr)
|
||||
{
|
||||
u64 typer = gic_read_typer(ptr + GICR_TYPER);
|
||||
gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
|
||||
gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void gic_update_vlpi_properties(void)
|
||||
{
|
||||
gic_iterate_rdists(__gic_update_vlpi_properties);
|
||||
pr_info("%sVLPI support, %sdirect LPI support\n",
|
||||
!gic_data.rdists.has_vlpis ? "no " : "",
|
||||
!gic_data.rdists.has_direct_lpi ? "no " : "");
|
||||
}
|
||||
|
||||
static void gic_cpu_sys_reg_init(void)
|
||||
{
|
||||
/*
|
||||
|
@ -677,6 +720,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
|||
else
|
||||
gic_dist_wait_for_rwp();
|
||||
|
||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||
|
||||
return IRQ_SET_MASK_OK_DONE;
|
||||
}
|
||||
#else
|
||||
|
@ -775,6 +820,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
|||
irq_domain_set_info(d, irq, hw, chip, d->host_data,
|
||||
handle_fasteoi_irq, NULL, NULL);
|
||||
irq_set_probe(irq);
|
||||
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
|
||||
}
|
||||
/* LPIs */
|
||||
if (hw >= 8192 && hw < GIC_ID_NR) {
|
||||
|
@ -953,6 +999,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
|
|||
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
|
||||
&gic_data);
|
||||
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
|
||||
gic_data.rdists.has_vlpis = true;
|
||||
gic_data.rdists.has_direct_lpi = true;
|
||||
|
||||
if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
|
||||
err = -ENOMEM;
|
||||
|
@ -961,6 +1009,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
|
|||
|
||||
set_handle_irq(gic_handle_irq);
|
||||
|
||||
gic_update_vlpi_properties();
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
|
||||
its_init(handle, &gic_data.rdists, gic_data.domain);
|
||||
|
||||
|
@ -1067,7 +1117,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
|
|||
if (WARN_ON(cpu == -1))
|
||||
continue;
|
||||
|
||||
pr_cont("%s[%d] ", cpu_node->full_name, cpu);
|
||||
pr_cont("%pOF[%d] ", cpu_node, cpu);
|
||||
|
||||
cpumask_set_cpu(cpu, &part->mask);
|
||||
}
|
||||
|
@ -1122,6 +1172,7 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
|
|||
if (!ret)
|
||||
gic_v3_kvm_info.vcpu = r;
|
||||
|
||||
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
|
||||
gic_set_kvm_info(&gic_v3_kvm_info);
|
||||
}
|
||||
|
||||
|
@ -1135,15 +1186,13 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
|
|||
|
||||
dist_base = of_iomap(node, 0);
|
||||
if (!dist_base) {
|
||||
pr_err("%s: unable to map gic dist registers\n",
|
||||
node->full_name);
|
||||
pr_err("%pOF: unable to map gic dist registers\n", node);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
err = gic_validate_dist_version(dist_base);
|
||||
if (err) {
|
||||
pr_err("%s: no distributor detected, giving up\n",
|
||||
node->full_name);
|
||||
pr_err("%pOF: no distributor detected, giving up\n", node);
|
||||
goto out_unmap_dist;
|
||||
}
|
||||
|
||||
|
@ -1163,8 +1212,7 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
|
|||
ret = of_address_to_resource(node, 1 + i, &res);
|
||||
rdist_regs[i].redist_base = of_iomap(node, 1 + i);
|
||||
if (ret || !rdist_regs[i].redist_base) {
|
||||
pr_err("%s: couldn't map region %d\n",
|
||||
node->full_name, i);
|
||||
pr_err("%pOF: couldn't map region %d\n", node, i);
|
||||
err = -ENODEV;
|
||||
goto out_unmap_rdist;
|
||||
}
|
||||
|
@ -1418,6 +1466,7 @@ static void __init gic_acpi_setup_kvm_info(void)
|
|||
vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
|
||||
}
|
||||
|
||||
gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
|
||||
gic_set_kvm_info(&gic_v3_kvm_info);
|
||||
}
|
||||
|
||||
|
|
225
drivers/irqchip/irq-gic-v4.c
Normal file
225
drivers/irqchip/irq-gic-v4.c
Normal file
|
@ -0,0 +1,225 @@
|
|||
/*
|
||||
* Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <linux/irqchip/arm-gic-v4.h>
|
||||
|
||||
/*
|
||||
* WARNING: The blurb below assumes that you understand the
|
||||
* intricacies of GICv3, GICv4, and how a guest's view of a GICv3 gets
|
||||
* translated into GICv4 commands. So it effectively targets at most
|
||||
* two individuals. You know who you are.
|
||||
*
|
||||
* The core GICv4 code is designed to *avoid* exposing too much of the
|
||||
* core GIC code (that would in turn leak into the hypervisor code),
|
||||
* and instead provide a hypervisor agnostic interface to the HW (of
|
||||
* course, the astute reader will quickly realize that hypervisor
|
||||
* agnostic actually means KVM-specific - what were you thinking?).
|
||||
*
|
||||
* In order to achieve a modicum of isolation, we try to hide most of
|
||||
* the GICv4 "stuff" behind normal irqchip operations:
|
||||
*
|
||||
* - Any guest-visible VLPI is backed by a Linux interrupt (and a
|
||||
* physical LPI which gets unmapped when the guest maps the
|
||||
* VLPI). This allows the same DevID/EventID pair to be either
|
||||
* mapped to the LPI (host) or the VLPI (guest). Note that this is
|
||||
* exclusive, and you cannot have both.
|
||||
*
|
||||
* - Enabling/disabling a VLPI is done by issuing mask/unmask calls.
|
||||
*
|
||||
* - Guest INT/CLEAR commands are implemented through
|
||||
* irq_set_irqchip_state().
|
||||
*
|
||||
* - The *bizarre* stuff (mapping/unmapping an interrupt to a VLPI, or
|
||||
* issuing an INV after changing a priority) gets shoved into the
|
||||
* irq_set_vcpu_affinity() method. While this is quite horrible
|
||||
* (let's face it, this is the irqchip version of an ioctl), it
|
||||
* confines the crap to a single location. And map/unmap really is
|
||||
* about setting the affinity of a VLPI to a vcpu, so only INV is
|
||||
* majorly out of place. So there.
|
||||
*
|
||||
* A number of commands are simply not provided by this interface, as
|
||||
* they do not make direct sense. For example, MAPD is purely local to
|
||||
* the virtual ITS (because it references a virtual device, and the
|
||||
* physical ITS is still very much in charge of the physical
|
||||
* device). Same goes for things like MAPC (the physical ITS deals
|
||||
* with the actual vPE affinity, and not the braindead concept of
|
||||
* collection). SYNC is not provided either, as each and every command
|
||||
* is followed by a VSYNC. This could be relaxed in the future, should
|
||||
* this be seen as a bottleneck (yes, this means *never*).
|
||||
*
|
||||
* But handling VLPIs is only one side of the job of the GICv4
|
||||
* code. The other (darker) side is to take care of the doorbell
|
||||
* interrupts which are delivered when a VLPI targeting a non-running
|
||||
* vcpu is being made pending.
|
||||
*
|
||||
* The choice made here is that each vcpu (VPE in old northern GICv4
|
||||
* dialect) gets a single doorbell LPI, no matter how many interrupts
|
||||
* are targeting it. This has a nice property, which is that the
|
||||
* interrupt becomes a handle for the VPE, and that the hypervisor
|
||||
* code can manipulate it through the normal interrupt API:
|
||||
*
|
||||
* - VMs (or rather the VM abstraction that matters to the GIC)
|
||||
* contain an irq domain where each interrupt maps to a VPE. In
|
||||
* turn, this domain sits on top of the normal LPI allocator, and a
|
||||
* specially crafted irq_chip implementation.
|
||||
*
|
||||
* - mask/unmask do what is expected on the doorbell interrupt.
|
||||
*
|
||||
* - irq_set_affinity is used to move a VPE from one redistributor to
|
||||
* another.
|
||||
*
|
||||
* - irq_set_vcpu_affinity once again gets hijacked for the purpose of
|
||||
* creating a new sub-API, namely scheduling/descheduling a VPE
|
||||
* (which involves programming GICR_V{PROP,PEND}BASER) and
|
||||
* performing INVALL operations.
|
||||
*/
|
||||
|
||||
static struct irq_domain *gic_domain;
|
||||
static const struct irq_domain_ops *vpe_domain_ops;
|
||||
|
||||
int its_alloc_vcpu_irqs(struct its_vm *vm)
|
||||
{
|
||||
int vpe_base_irq, i;
|
||||
|
||||
vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe",
|
||||
task_pid_nr(current));
|
||||
if (!vm->fwnode)
|
||||
goto err;
|
||||
|
||||
vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes,
|
||||
vm->fwnode, vpe_domain_ops,
|
||||
vm);
|
||||
if (!vm->domain)
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < vm->nr_vpes; i++) {
|
||||
vm->vpes[i]->its_vm = vm;
|
||||
vm->vpes[i]->idai = true;
|
||||
}
|
||||
|
||||
vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes,
|
||||
NUMA_NO_NODE, vm,
|
||||
false, NULL);
|
||||
if (vpe_base_irq <= 0)
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < vm->nr_vpes; i++)
|
||||
vm->vpes[i]->irq = vpe_base_irq + i;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
if (vm->domain)
|
||||
irq_domain_remove(vm->domain);
|
||||
if (vm->fwnode)
|
||||
irq_domain_free_fwnode(vm->fwnode);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void its_free_vcpu_irqs(struct its_vm *vm)
|
||||
{
|
||||
irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes);
|
||||
irq_domain_remove(vm->domain);
|
||||
irq_domain_free_fwnode(vm->fwnode);
|
||||
}
|
||||
|
||||
static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
|
||||
{
|
||||
return irq_set_vcpu_affinity(vpe->irq, info);
|
||||
}
|
||||
|
||||
int its_schedule_vpe(struct its_vpe *vpe, bool on)
|
||||
{
|
||||
struct its_cmd_info info;
|
||||
|
||||
WARN_ON(preemptible());
|
||||
|
||||
info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;
|
||||
|
||||
return its_send_vpe_cmd(vpe, &info);
|
||||
}
|
||||
|
||||
int its_invall_vpe(struct its_vpe *vpe)
|
||||
{
|
||||
struct its_cmd_info info = {
|
||||
.cmd_type = INVALL_VPE,
|
||||
};
|
||||
|
||||
return its_send_vpe_cmd(vpe, &info);
|
||||
}
|
||||
|
||||
int its_map_vlpi(int irq, struct its_vlpi_map *map)
|
||||
{
|
||||
struct its_cmd_info info = {
|
||||
.cmd_type = MAP_VLPI,
|
||||
.map = map,
|
||||
};
|
||||
|
||||
/*
|
||||
* The host will never see that interrupt firing again, so it
|
||||
* is vital that we don't do any lazy masking.
|
||||
*/
|
||||
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
|
||||
|
||||
return irq_set_vcpu_affinity(irq, &info);
|
||||
}
|
||||
|
||||
int its_get_vlpi(int irq, struct its_vlpi_map *map)
|
||||
{
|
||||
struct its_cmd_info info = {
|
||||
.cmd_type = GET_VLPI,
|
||||
.map = map,
|
||||
};
|
||||
|
||||
return irq_set_vcpu_affinity(irq, &info);
|
||||
}
|
||||
|
||||
int its_unmap_vlpi(int irq)
|
||||
{
|
||||
irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
|
||||
return irq_set_vcpu_affinity(irq, NULL);
|
||||
}
|
||||
|
||||
int its_prop_update_vlpi(int irq, u8 config, bool inv)
|
||||
{
|
||||
struct its_cmd_info info = {
|
||||
.cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
|
||||
.config = config,
|
||||
};
|
||||
|
||||
return irq_set_vcpu_affinity(irq, &info);
|
||||
}
|
||||
|
||||
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops)
|
||||
{
|
||||
if (domain) {
|
||||
pr_info("ITS: Enabling GICv4 support\n");
|
||||
gic_domain = domain;
|
||||
vpe_domain_ops = ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_err("ITS: No GICv4 VPE domain allocated\n");
|
||||
return -ENODEV;
|
||||
}
|
|
@ -344,6 +344,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
|||
writel_relaxed(val | bit, reg);
|
||||
gic_unlock_irqrestore(flags);
|
||||
|
||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||
|
||||
return IRQ_SET_MASK_OK_DONE;
|
||||
}
|
||||
#endif
|
||||
|
@ -413,7 +415,7 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
|
|||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static struct irq_chip gic_chip = {
|
||||
static const struct irq_chip gic_chip = {
|
||||
.irq_mask = gic_mask_irq,
|
||||
.irq_unmask = gic_unmask_irq,
|
||||
.irq_eoi = gic_eoi_irq,
|
||||
|
@ -969,6 +971,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
|||
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
|
||||
handle_fasteoi_irq, NULL, NULL);
|
||||
irq_set_probe(irq);
|
||||
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -165,6 +165,8 @@ static int hip04_irq_set_affinity(struct irq_data *d,
|
|||
writel_relaxed(val | bit, reg);
|
||||
raw_spin_unlock(&irq_controller_lock);
|
||||
|
||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
#endif
|
||||
|
@ -312,6 +314,7 @@ static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
|||
irq_set_chip_and_handler(irq, &hip04_irq_chip,
|
||||
handle_fasteoi_irq);
|
||||
irq_set_probe(irq);
|
||||
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
|
||||
}
|
||||
irq_set_chip_data(irq, d->host_data);
|
||||
return 0;
|
||||
|
|
|
@ -214,13 +214,13 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
|
|||
int i;
|
||||
|
||||
if (!parent) {
|
||||
pr_err("%s: no parent, giving up\n", node->full_name);
|
||||
pr_err("%pOF: no parent, giving up\n", node);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
parent_domain = irq_find_host(parent);
|
||||
if (!parent_domain) {
|
||||
pr_err("%s: unable to get parent domain\n", node->full_name);
|
||||
pr_err("%pOF: unable to get parent domain\n", node);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
|
|||
|
||||
irqc->base = of_iomap(node, 0);
|
||||
if (!irqc->base) {
|
||||
pr_err("%s: unable to map registers\n", node->full_name);
|
||||
pr_err("%pOF: unable to map registers\n", node);
|
||||
kfree(irqc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -17,13 +17,32 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#define MSI_MAX_IRQS 32
|
||||
#define MSI_IBS_SHIFT 3
|
||||
#define MSIR 4
|
||||
#define MSI_IRQS_PER_MSIR 32
|
||||
#define MSI_MSIR_OFFSET 4
|
||||
|
||||
#define MSI_LS1043V1_1_IRQS_PER_MSIR 8
|
||||
#define MSI_LS1043V1_1_MSIR_OFFSET 0x10
|
||||
|
||||
struct ls_scfg_msi_cfg {
|
||||
u32 ibs_shift; /* Shift of interrupt bit select */
|
||||
u32 msir_irqs; /* The irq number per MSIR */
|
||||
u32 msir_base; /* The base address of MSIR */
|
||||
};
|
||||
|
||||
struct ls_scfg_msir {
|
||||
struct ls_scfg_msi *msi_data;
|
||||
unsigned int index;
|
||||
unsigned int gic_irq;
|
||||
unsigned int bit_start;
|
||||
unsigned int bit_end;
|
||||
unsigned int srs; /* Shared interrupt register select */
|
||||
void __iomem *reg;
|
||||
};
|
||||
|
||||
struct ls_scfg_msi {
|
||||
spinlock_t lock;
|
||||
|
@ -32,8 +51,11 @@ struct ls_scfg_msi {
|
|||
struct irq_domain *msi_domain;
|
||||
void __iomem *regs;
|
||||
phys_addr_t msiir_addr;
|
||||
int irq;
|
||||
DECLARE_BITMAP(used, MSI_MAX_IRQS);
|
||||
struct ls_scfg_msi_cfg *cfg;
|
||||
u32 msir_num;
|
||||
struct ls_scfg_msir *msir;
|
||||
u32 irqs_num;
|
||||
unsigned long *used;
|
||||
};
|
||||
|
||||
static struct irq_chip ls_scfg_msi_irq_chip = {
|
||||
|
@ -49,19 +71,56 @@ static struct msi_domain_info ls_scfg_msi_domain_info = {
|
|||
.chip = &ls_scfg_msi_irq_chip,
|
||||
};
|
||||
|
||||
static int msi_affinity_flag = 1;
|
||||
|
||||
static int __init early_parse_ls_scfg_msi(char *p)
|
||||
{
|
||||
if (p && strncmp(p, "no-affinity", 11) == 0)
|
||||
msi_affinity_flag = 0;
|
||||
else
|
||||
msi_affinity_flag = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("lsmsi", early_parse_ls_scfg_msi);
|
||||
|
||||
static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
|
||||
|
||||
msg->address_hi = upper_32_bits(msi_data->msiir_addr);
|
||||
msg->address_lo = lower_32_bits(msi_data->msiir_addr);
|
||||
msg->data = data->hwirq << MSI_IBS_SHIFT;
|
||||
msg->data = data->hwirq;
|
||||
|
||||
if (msi_affinity_flag)
|
||||
msg->data |= cpumask_first(data->common->affinity);
|
||||
}
|
||||
|
||||
static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
|
||||
u32 cpu;
|
||||
|
||||
if (!msi_affinity_flag)
|
||||
return -EINVAL;
|
||||
|
||||
if (!force)
|
||||
cpu = cpumask_any_and(mask, cpu_online_mask);
|
||||
else
|
||||
cpu = cpumask_first(mask);
|
||||
|
||||
if (cpu >= msi_data->msir_num)
|
||||
return -EINVAL;
|
||||
|
||||
if (msi_data->msir[cpu].gic_irq <= 0) {
|
||||
pr_warn("cannot bind the irq to cpu%d\n", cpu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cpumask_copy(irq_data->common->affinity, mask);
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
|
||||
static struct irq_chip ls_scfg_msi_parent_chip = {
|
||||
|
@ -81,8 +140,8 @@ static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
|
|||
WARN_ON(nr_irqs != 1);
|
||||
|
||||
spin_lock(&msi_data->lock);
|
||||
pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS);
|
||||
if (pos < MSI_MAX_IRQS)
|
||||
pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
|
||||
if (pos < msi_data->irqs_num)
|
||||
__set_bit(pos, msi_data->used);
|
||||
else
|
||||
err = -ENOSPC;
|
||||
|
@ -106,7 +165,7 @@ static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
|
|||
int pos;
|
||||
|
||||
pos = d->hwirq;
|
||||
if (pos < 0 || pos >= MSI_MAX_IRQS) {
|
||||
if (pos < 0 || pos >= msi_data->irqs_num) {
|
||||
pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
|
||||
return;
|
||||
}
|
||||
|
@ -123,15 +182,22 @@ static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
|
|||
|
||||
static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc);
|
||||
struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
|
||||
struct ls_scfg_msi *msi_data = msir->msi_data;
|
||||
unsigned long val;
|
||||
int pos, virq;
|
||||
int pos, size, virq, hwirq;
|
||||
|
||||
chained_irq_enter(irq_desc_get_chip(desc), desc);
|
||||
|
||||
val = ioread32be(msi_data->regs + MSIR);
|
||||
for_each_set_bit(pos, &val, MSI_MAX_IRQS) {
|
||||
virq = irq_find_mapping(msi_data->parent, (31 - pos));
|
||||
val = ioread32be(msir->reg);
|
||||
|
||||
pos = msir->bit_start;
|
||||
size = msir->bit_end + 1;
|
||||
|
||||
for_each_set_bit_from(pos, &val, size) {
|
||||
hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
|
||||
msir->srs;
|
||||
virq = irq_find_mapping(msi_data->parent, hwirq);
|
||||
if (virq)
|
||||
generic_handle_irq(virq);
|
||||
}
|
||||
|
@ -143,7 +209,7 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
|
|||
{
|
||||
/* Initialize MSI domain parent */
|
||||
msi_data->parent = irq_domain_add_linear(NULL,
|
||||
MSI_MAX_IRQS,
|
||||
msi_data->irqs_num,
|
||||
&ls_scfg_msi_domain_ops,
|
||||
msi_data);
|
||||
if (!msi_data->parent) {
|
||||
|
@ -164,16 +230,117 @@ static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
|
||||
{
|
||||
struct ls_scfg_msir *msir;
|
||||
int virq, i, hwirq;
|
||||
|
||||
virq = platform_get_irq(msi_data->pdev, index);
|
||||
if (virq <= 0)
|
||||
return -ENODEV;
|
||||
|
||||
msir = &msi_data->msir[index];
|
||||
msir->index = index;
|
||||
msir->msi_data = msi_data;
|
||||
msir->gic_irq = virq;
|
||||
msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
|
||||
|
||||
if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
|
||||
msir->bit_start = 32 - ((msir->index + 1) *
|
||||
MSI_LS1043V1_1_IRQS_PER_MSIR);
|
||||
msir->bit_end = msir->bit_start +
|
||||
MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
|
||||
} else {
|
||||
msir->bit_start = 0;
|
||||
msir->bit_end = msi_data->cfg->msir_irqs - 1;
|
||||
}
|
||||
|
||||
irq_set_chained_handler_and_data(msir->gic_irq,
|
||||
ls_scfg_msi_irq_handler,
|
||||
msir);
|
||||
|
||||
if (msi_affinity_flag) {
|
||||
/* Associate MSIR interrupt to the cpu */
|
||||
irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
|
||||
msir->srs = 0; /* This value is determined by the CPU */
|
||||
} else
|
||||
msir->srs = index;
|
||||
|
||||
/* Release the hwirqs corresponding to this MSIR */
|
||||
if (!msi_affinity_flag || msir->index == 0) {
|
||||
for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
|
||||
hwirq = i << msi_data->cfg->ibs_shift | msir->index;
|
||||
bitmap_clear(msi_data->used, hwirq, 1);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
|
||||
{
|
||||
struct ls_scfg_msi *msi_data = msir->msi_data;
|
||||
int i, hwirq;
|
||||
|
||||
if (msir->gic_irq > 0)
|
||||
irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
|
||||
|
||||
for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
|
||||
hwirq = i << msi_data->cfg->ibs_shift | msir->index;
|
||||
bitmap_set(msi_data->used, hwirq, 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
|
||||
.ibs_shift = 3,
|
||||
.msir_irqs = MSI_IRQS_PER_MSIR,
|
||||
.msir_base = MSI_MSIR_OFFSET,
|
||||
};
|
||||
|
||||
static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
|
||||
.ibs_shift = 2,
|
||||
.msir_irqs = MSI_IRQS_PER_MSIR,
|
||||
.msir_base = MSI_MSIR_OFFSET,
|
||||
};
|
||||
|
||||
static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
|
||||
.ibs_shift = 2,
|
||||
.msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
|
||||
.msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
|
||||
};
|
||||
|
||||
static const struct of_device_id ls_scfg_msi_id[] = {
|
||||
/* The following two misspelled compatibles are obsolete */
|
||||
{ .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
|
||||
{ .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
|
||||
|
||||
{ .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
|
||||
{ .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
|
||||
{ .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
|
||||
{ .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
|
||||
|
||||
static int ls_scfg_msi_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct ls_scfg_msi *msi_data;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
int i, ret;
|
||||
|
||||
match = of_match_device(ls_scfg_msi_id, &pdev->dev);
|
||||
if (!match)
|
||||
return -ENODEV;
|
||||
|
||||
msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
|
||||
if (!msi_data)
|
||||
return -ENOMEM;
|
||||
|
||||
msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(msi_data->regs)) {
|
||||
|
@ -182,23 +349,48 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
|
|||
}
|
||||
msi_data->msiir_addr = res->start;
|
||||
|
||||
msi_data->irq = platform_get_irq(pdev, 0);
|
||||
if (msi_data->irq <= 0) {
|
||||
dev_err(&pdev->dev, "failed to get MSI irq\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
msi_data->pdev = pdev;
|
||||
spin_lock_init(&msi_data->lock);
|
||||
|
||||
msi_data->irqs_num = MSI_IRQS_PER_MSIR *
|
||||
(1 << msi_data->cfg->ibs_shift);
|
||||
msi_data->used = devm_kcalloc(&pdev->dev,
|
||||
BITS_TO_LONGS(msi_data->irqs_num),
|
||||
sizeof(*msi_data->used),
|
||||
GFP_KERNEL);
|
||||
if (!msi_data->used)
|
||||
return -ENOMEM;
|
||||
/*
|
||||
* Reserve all the hwirqs
|
||||
* The available hwirqs will be released in ls1_msi_setup_hwirq()
|
||||
*/
|
||||
bitmap_set(msi_data->used, 0, msi_data->irqs_num);
|
||||
|
||||
msi_data->msir_num = of_irq_count(pdev->dev.of_node);
|
||||
|
||||
if (msi_affinity_flag) {
|
||||
u32 cpu_num;
|
||||
|
||||
cpu_num = num_possible_cpus();
|
||||
if (msi_data->msir_num >= cpu_num)
|
||||
msi_data->msir_num = cpu_num;
|
||||
else
|
||||
msi_affinity_flag = 0;
|
||||
}
|
||||
|
||||
msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
|
||||
sizeof(*msi_data->msir),
|
||||
GFP_KERNEL);
|
||||
if (!msi_data->msir)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < msi_data->msir_num; i++)
|
||||
ls_scfg_msi_setup_hwirq(msi_data, i);
|
||||
|
||||
ret = ls_scfg_msi_domains_init(msi_data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
irq_set_chained_handler_and_data(msi_data->irq,
|
||||
ls_scfg_msi_irq_handler,
|
||||
msi_data);
|
||||
|
||||
platform_set_drvdata(pdev, msi_data);
|
||||
|
||||
return 0;
|
||||
|
@ -207,8 +399,10 @@ static int ls_scfg_msi_probe(struct platform_device *pdev)
|
|||
static int ls_scfg_msi_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
|
||||
int i;
|
||||
|
||||
irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL);
|
||||
for (i = 0; i < msi_data->msir_num; i++)
|
||||
ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
|
||||
|
||||
irq_domain_remove(msi_data->msi_domain);
|
||||
irq_domain_remove(msi_data->parent);
|
||||
|
@ -218,12 +412,6 @@ static int ls_scfg_msi_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id ls_scfg_msi_id[] = {
|
||||
{ .compatible = "fsl,1s1021a-msi", },
|
||||
{ .compatible = "fsl,1s1043a-msi", },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct platform_driver ls_scfg_msi_driver = {
|
||||
.driver = {
|
||||
.name = "ls-scfg-msi",
|
||||
|
|
|
@ -518,6 +518,8 @@ static int meta_intc_set_affinity(struct irq_data *data,
|
|||
|
||||
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
|
||||
|
||||
irq_data_update_effective_affinity(data, cpumask_of(cpu));
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
|
@ -578,6 +580,8 @@ static int meta_intc_map(struct irq_domain *d, unsigned int irq,
|
|||
else
|
||||
irq_set_chip_and_handler(irq, &meta_intc_edge_chip,
|
||||
handle_edge_irq);
|
||||
|
||||
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -445,24 +445,27 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
|
|||
unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
|
||||
cpumask_t tmp = CPU_MASK_NONE;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
int i, cpu;
|
||||
|
||||
cpumask_and(&tmp, cpumask, cpu_online_mask);
|
||||
if (cpumask_empty(&tmp))
|
||||
return -EINVAL;
|
||||
|
||||
cpu = cpumask_first(&tmp);
|
||||
|
||||
/* Assumption : cpumask refers to a single CPU */
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
|
||||
/* Re-route this IRQ */
|
||||
gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
|
||||
gic_map_to_vpe(irq, mips_cm_vp_id(cpu));
|
||||
|
||||
/* Update the pcpu_masks */
|
||||
for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
|
||||
clear_bit(irq, pcpu_masks[i].pcpu_mask);
|
||||
set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
|
||||
set_bit(irq, pcpu_masks[cpu].pcpu_mask);
|
||||
|
||||
cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
|
||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
return IRQ_SET_MASK_OK_NOCOPY;
|
||||
|
@ -716,6 +719,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
|
||||
return gic_shared_irq_domain_map(d, virq, hwirq, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -181,13 +181,13 @@ const struct irq_domain_ops mmp_irq_domain_ops = {
|
|||
.xlate = mmp_irq_domain_xlate,
|
||||
};
|
||||
|
||||
static struct mmp_intc_conf mmp_conf = {
|
||||
static const struct mmp_intc_conf mmp_conf = {
|
||||
.conf_enable = 0x51,
|
||||
.conf_disable = 0x0,
|
||||
.conf_mask = 0x7f,
|
||||
};
|
||||
|
||||
static struct mmp_intc_conf mmp2_conf = {
|
||||
static const struct mmp_intc_conf mmp2_conf = {
|
||||
.conf_enable = 0x20,
|
||||
.conf_disable = 0x0,
|
||||
.conf_mask = 0x7f,
|
||||
|
|
|
@ -178,8 +178,7 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
|
|||
chip_data->intpol_words[i] = size / 4;
|
||||
chip_data->intpol_bases[i] = of_iomap(node, i);
|
||||
if (ret || !chip_data->intpol_bases[i]) {
|
||||
pr_err("%s: couldn't map region %d\n",
|
||||
node->full_name, i);
|
||||
pr_err("%pOF: couldn't map region %d\n", node, i);
|
||||
ret = -ENODEV;
|
||||
goto out_free_intpol;
|
||||
}
|
||||
|
|
|
@ -179,7 +179,7 @@ static void __init icoll_add_domain(struct device_node *np,
|
|||
&icoll_irq_domain_ops, NULL);
|
||||
|
||||
if (!icoll_domain)
|
||||
panic("%s: unable to create irq domain", np->full_name);
|
||||
panic("%pOF: unable to create irq domain", np);
|
||||
}
|
||||
|
||||
static void __iomem * __init icoll_init_iobase(struct device_node *np)
|
||||
|
@ -188,7 +188,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
|
|||
|
||||
icoll_base = of_io_request_and_map(np, 0, np->name);
|
||||
if (IS_ERR(icoll_base))
|
||||
panic("%s: unable to map resource", np->full_name);
|
||||
panic("%pOF: unable to map resource", np);
|
||||
return icoll_base;
|
||||
}
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ static int __init stm32_exti_init(struct device_node *node,
|
|||
|
||||
base = of_iomap(node, 0);
|
||||
if (!base) {
|
||||
pr_err("%s: Unable to map registers\n", node->full_name);
|
||||
pr_err("%pOF: Unable to map registers\n", node);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -149,7 +149,7 @@ static int __init stm32_exti_init(struct device_node *node,
|
|||
nr_exti = fls(readl_relaxed(base + EXTI_RTSR));
|
||||
writel_relaxed(0, base + EXTI_RTSR);
|
||||
|
||||
pr_info("%s: %d External IRQs detected\n", node->full_name, nr_exti);
|
||||
pr_info("%pOF: %d External IRQs detected\n", node, nr_exti);
|
||||
|
||||
domain = irq_domain_add_linear(node, nr_exti,
|
||||
&irq_exti_domain_ops, NULL);
|
||||
|
@ -163,8 +163,8 @@ static int __init stm32_exti_init(struct device_node *node,
|
|||
ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti",
|
||||
handle_edge_irq, clr, 0, 0);
|
||||
if (ret) {
|
||||
pr_err("%s: Could not allocate generic interrupt chip.\n",
|
||||
node->full_name);
|
||||
pr_err("%pOF: Could not allocate generic interrupt chip.\n",
|
||||
node);
|
||||
goto out_free_domain;
|
||||
}
|
||||
|
||||
|
|
|
@ -97,8 +97,8 @@ static int __init sun4i_of_init(struct device_node *node,
|
|||
{
|
||||
sun4i_irq_base = of_iomap(node, 0);
|
||||
if (!sun4i_irq_base)
|
||||
panic("%s: unable to map IC registers\n",
|
||||
node->full_name);
|
||||
panic("%pOF: unable to map IC registers\n",
|
||||
node);
|
||||
|
||||
/* Disable all interrupts */
|
||||
writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0));
|
||||
|
@ -124,7 +124,7 @@ static int __init sun4i_of_init(struct device_node *node,
|
|||
sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32,
|
||||
&sun4i_irq_ops, NULL);
|
||||
if (!sun4i_irq_domain)
|
||||
panic("%s: unable to create IRQ domain\n", node->full_name);
|
||||
panic("%pOF: unable to create IRQ domain\n", node);
|
||||
|
||||
set_handle_irq(sun4i_handle_irq);
|
||||
|
||||
|
|
|
@ -291,13 +291,13 @@ static int __init tegra_ictlr_init(struct device_node *node,
|
|||
int err;
|
||||
|
||||
if (!parent) {
|
||||
pr_err("%s: no parent, giving up\n", node->full_name);
|
||||
pr_err("%pOF: no parent, giving up\n", node);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
parent_domain = irq_find_host(parent);
|
||||
if (!parent_domain) {
|
||||
pr_err("%s: unable to obtain parent domain\n", node->full_name);
|
||||
pr_err("%pOF: unable to obtain parent domain\n", node);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
|
@ -329,29 +329,29 @@ static int __init tegra_ictlr_init(struct device_node *node,
|
|||
}
|
||||
|
||||
if (!num_ictlrs) {
|
||||
pr_err("%s: no valid regions, giving up\n", node->full_name);
|
||||
pr_err("%pOF: no valid regions, giving up\n", node);
|
||||
err = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
WARN(num_ictlrs != soc->num_ictlrs,
|
||||
"%s: Found %u interrupt controllers in DT; expected %u.\n",
|
||||
node->full_name, num_ictlrs, soc->num_ictlrs);
|
||||
"%pOF: Found %u interrupt controllers in DT; expected %u.\n",
|
||||
node, num_ictlrs, soc->num_ictlrs);
|
||||
|
||||
|
||||
domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32,
|
||||
node, &tegra_ictlr_domain_ops,
|
||||
lic);
|
||||
if (!domain) {
|
||||
pr_err("%s: failed to allocated domain\n", node->full_name);
|
||||
pr_err("%pOF: failed to allocated domain\n", node);
|
||||
err = -ENOMEM;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
tegra_ictlr_syscore_init();
|
||||
|
||||
pr_info("%s: %d interrupts forwarded to %s\n",
|
||||
node->full_name, num_ictlrs * 32, parent->full_name);
|
||||
pr_info("%pOF: %d interrupts forwarded to %pOF\n",
|
||||
node, num_ictlrs * 32, parent);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
261
drivers/irqchip/irq-uniphier-aidet.c
Normal file
261
drivers/irqchip/irq-uniphier-aidet.c
Normal file
|
@ -0,0 +1,261 @@
|
|||
/*
|
||||
* Driver for UniPhier AIDET (ARM Interrupt Detector)
|
||||
*
|
||||
* Copyright (C) 2017 Socionext Inc.
|
||||
* Author: Masahiro Yamada <yamada.masahiro@socionext.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#define UNIPHIER_AIDET_NR_IRQS 256
|
||||
|
||||
#define UNIPHIER_AIDET_DETCONF 0x04 /* inverter register base */
|
||||
|
||||
struct uniphier_aidet_priv {
|
||||
struct irq_domain *domain;
|
||||
void __iomem *reg_base;
|
||||
spinlock_t lock;
|
||||
u32 saved_vals[UNIPHIER_AIDET_NR_IRQS / 32];
|
||||
};
|
||||
|
||||
static void uniphier_aidet_reg_update(struct uniphier_aidet_priv *priv,
|
||||
unsigned int reg, u32 mask, u32 val)
|
||||
{
|
||||
unsigned long flags;
|
||||
u32 tmp;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
tmp = readl_relaxed(priv->reg_base + reg);
|
||||
tmp &= ~mask;
|
||||
tmp |= mask & val;
|
||||
writel_relaxed(tmp, priv->reg_base + reg);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static void uniphier_aidet_detconf_update(struct uniphier_aidet_priv *priv,
|
||||
unsigned long index, unsigned int val)
|
||||
{
|
||||
unsigned int reg;
|
||||
u32 mask;
|
||||
|
||||
reg = UNIPHIER_AIDET_DETCONF + index / 32 * 4;
|
||||
mask = BIT(index % 32);
|
||||
|
||||
uniphier_aidet_reg_update(priv, reg, mask, val ? mask : 0);
|
||||
}
|
||||
|
||||
static int uniphier_aidet_irq_set_type(struct irq_data *data, unsigned int type)
|
||||
{
|
||||
struct uniphier_aidet_priv *priv = data->chip_data;
|
||||
unsigned int val;
|
||||
|
||||
/* enable inverter for active low triggers */
|
||||
switch (type) {
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
case IRQ_TYPE_LEVEL_HIGH:
|
||||
val = 0;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
val = 1;
|
||||
type = IRQ_TYPE_EDGE_RISING;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
val = 1;
|
||||
type = IRQ_TYPE_LEVEL_HIGH;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
uniphier_aidet_detconf_update(priv, data->hwirq, val);
|
||||
|
||||
return irq_chip_set_type_parent(data, type);
|
||||
}
|
||||
|
||||
static struct irq_chip uniphier_aidet_irq_chip = {
|
||||
.name = "AIDET",
|
||||
.irq_mask = irq_chip_mask_parent,
|
||||
.irq_unmask = irq_chip_unmask_parent,
|
||||
.irq_eoi = irq_chip_eoi_parent,
|
||||
.irq_set_affinity = irq_chip_set_affinity_parent,
|
||||
.irq_set_type = uniphier_aidet_irq_set_type,
|
||||
};
|
||||
|
||||
static int uniphier_aidet_domain_translate(struct irq_domain *domain,
|
||||
struct irq_fwspec *fwspec,
|
||||
unsigned long *out_hwirq,
|
||||
unsigned int *out_type)
|
||||
{
|
||||
if (WARN_ON(fwspec->param_count < 2))
|
||||
return -EINVAL;
|
||||
|
||||
*out_hwirq = fwspec->param[0];
|
||||
*out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uniphier_aidet_domain_alloc(struct irq_domain *domain,
|
||||
unsigned int virq, unsigned int nr_irqs,
|
||||
void *arg)
|
||||
{
|
||||
struct irq_fwspec parent_fwspec;
|
||||
irq_hw_number_t hwirq;
|
||||
unsigned int type;
|
||||
int ret;
|
||||
|
||||
if (nr_irqs != 1)
|
||||
return -EINVAL;
|
||||
|
||||
ret = uniphier_aidet_domain_translate(domain, arg, &hwirq, &type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (type) {
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
case IRQ_TYPE_LEVEL_HIGH:
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
type = IRQ_TYPE_EDGE_RISING;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
type = IRQ_TYPE_LEVEL_HIGH;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hwirq >= UNIPHIER_AIDET_NR_IRQS)
|
||||
return -ENXIO;
|
||||
|
||||
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
|
||||
&uniphier_aidet_irq_chip,
|
||||
domain->host_data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* parent is GIC */
|
||||
parent_fwspec.fwnode = domain->parent->fwnode;
|
||||
parent_fwspec.param_count = 3;
|
||||
parent_fwspec.param[0] = 0; /* SPI */
|
||||
parent_fwspec.param[1] = hwirq;
|
||||
parent_fwspec.param[2] = type;
|
||||
|
||||
return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops uniphier_aidet_domain_ops = {
|
||||
.alloc = uniphier_aidet_domain_alloc,
|
||||
.free = irq_domain_free_irqs_common,
|
||||
.translate = uniphier_aidet_domain_translate,
|
||||
};
|
||||
|
||||
static int uniphier_aidet_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *parent_np;
|
||||
struct irq_domain *parent_domain;
|
||||
struct uniphier_aidet_priv *priv;
|
||||
struct resource *res;
|
||||
|
||||
parent_np = of_irq_find_parent(dev->of_node);
|
||||
if (!parent_np)
|
||||
return -ENXIO;
|
||||
|
||||
parent_domain = irq_find_host(parent_np);
|
||||
of_node_put(parent_np);
|
||||
if (!parent_domain)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
priv->reg_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(priv->reg_base))
|
||||
return PTR_ERR(priv->reg_base);
|
||||
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
priv->domain = irq_domain_create_hierarchy(
|
||||
parent_domain, 0,
|
||||
UNIPHIER_AIDET_NR_IRQS,
|
||||
of_node_to_fwnode(dev->of_node),
|
||||
&uniphier_aidet_domain_ops, priv);
|
||||
if (!priv->domain)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(pdev, priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused uniphier_aidet_suspend(struct device *dev)
|
||||
{
|
||||
struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
|
||||
priv->saved_vals[i] = readl_relaxed(
|
||||
priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused uniphier_aidet_resume(struct device *dev)
|
||||
{
|
||||
struct uniphier_aidet_priv *priv = dev_get_drvdata(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++)
|
||||
writel_relaxed(priv->saved_vals[i],
|
||||
priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops uniphier_aidet_pm_ops = {
|
||||
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(uniphier_aidet_suspend,
|
||||
uniphier_aidet_resume)
|
||||
};
|
||||
|
||||
static const struct of_device_id uniphier_aidet_match[] = {
|
||||
{ .compatible = "socionext,uniphier-ld4-aidet" },
|
||||
{ .compatible = "socionext,uniphier-pro4-aidet" },
|
||||
{ .compatible = "socionext,uniphier-sld8-aidet" },
|
||||
{ .compatible = "socionext,uniphier-pro5-aidet" },
|
||||
{ .compatible = "socionext,uniphier-pxs2-aidet" },
|
||||
{ .compatible = "socionext,uniphier-ld11-aidet" },
|
||||
{ .compatible = "socionext,uniphier-ld20-aidet" },
|
||||
{ .compatible = "socionext,uniphier-pxs3-aidet" },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
static struct platform_driver uniphier_aidet_driver = {
|
||||
.probe = uniphier_aidet_probe,
|
||||
.driver = {
|
||||
.name = "uniphier-aidet",
|
||||
.of_match_table = uniphier_aidet_match,
|
||||
.pm = &uniphier_aidet_pm_ops,
|
||||
},
|
||||
};
|
||||
builtin_platform_driver(uniphier_aidet_driver);
|
|
@ -186,8 +186,8 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
|
|||
if (irqc->intr_mask >> nr_irq)
|
||||
pr_warn("irq-xilinx: mismatch in kind-of-intr param\n");
|
||||
|
||||
pr_info("irq-xilinx: %s: num_irq=%d, edge=0x%x\n",
|
||||
intc->full_name, nr_irq, irqc->intr_mask);
|
||||
pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n",
|
||||
intc, nr_irq, irqc->intr_mask);
|
||||
|
||||
|
||||
/*
|
||||
|
|
|
@ -32,6 +32,7 @@ static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
|
|||
irq_set_status_flags(irq, IRQ_LEVEL);
|
||||
return 0;
|
||||
}
|
||||
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
|
||||
return xtensa_irq_map(d, irq, hw);
|
||||
}
|
||||
|
||||
|
@ -121,9 +122,12 @@ static int xtensa_mx_irq_retrigger(struct irq_data *d)
|
|||
static int xtensa_mx_irq_set_affinity(struct irq_data *d,
|
||||
const struct cpumask *dest, bool force)
|
||||
{
|
||||
unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask);
|
||||
int cpu = cpumask_any_and(dest, cpu_online_mask);
|
||||
unsigned mask = 1u << cpu;
|
||||
|
||||
set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
|
||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
|
|
@ -568,6 +568,8 @@ extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
|
|||
extern int irq_chip_pm_get(struct irq_data *data);
|
||||
extern int irq_chip_pm_put(struct irq_data *data);
|
||||
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
|
||||
extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
|
||||
extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
|
||||
extern void irq_chip_enable_parent(struct irq_data *data);
|
||||
extern void irq_chip_disable_parent(struct irq_data *data);
|
||||
extern void irq_chip_ack_parent(struct irq_data *data);
|
||||
|
@ -781,7 +783,10 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
|
|||
static inline
|
||||
struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
|
||||
{
|
||||
return d->common->effective_affinity;
|
||||
if (!cpumask_empty(d->common->effective_affinity))
|
||||
return d->common->effective_affinity;
|
||||
|
||||
return d->common->affinity;
|
||||
}
|
||||
static inline void irq_data_update_effective_affinity(struct irq_data *d,
|
||||
const struct cpumask *m)
|
||||
|
|
44
include/linux/irq_sim.h
Normal file
44
include/linux/irq_sim.h
Normal file
|
@ -0,0 +1,44 @@
|
|||
#ifndef _LINUX_IRQ_SIM_H
|
||||
#define _LINUX_IRQ_SIM_H
|
||||
/*
|
||||
* Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/device.h>
|
||||
|
||||
/*
|
||||
* Provides a framework for allocating simulated interrupts which can be
|
||||
* requested like normal irqs and enqueued from process context.
|
||||
*/
|
||||
|
||||
struct irq_sim_work_ctx {
|
||||
struct irq_work work;
|
||||
int irq;
|
||||
};
|
||||
|
||||
struct irq_sim_irq_ctx {
|
||||
int irqnum;
|
||||
bool enabled;
|
||||
};
|
||||
|
||||
struct irq_sim {
|
||||
struct irq_sim_work_ctx work_ctx;
|
||||
int irq_base;
|
||||
unsigned int irq_count;
|
||||
struct irq_sim_irq_ctx *irqs;
|
||||
};
|
||||
|
||||
int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs);
|
||||
int devm_irq_sim_init(struct device *dev, struct irq_sim *sim,
|
||||
unsigned int num_irqs);
|
||||
void irq_sim_fini(struct irq_sim *sim);
|
||||
void irq_sim_fire(struct irq_sim *sim, unsigned int offset);
|
||||
int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset);
|
||||
|
||||
#endif /* _LINUX_IRQ_SIM_H */
|
|
@ -27,6 +27,8 @@ struct gic_kvm_info {
|
|||
unsigned int maint_irq;
|
||||
/* Virtual control interface */
|
||||
struct resource vctrl;
|
||||
/* vlpi support */
|
||||
bool has_v4;
|
||||
};
|
||||
|
||||
const struct gic_kvm_info *gic_get_kvm_info(void);
|
||||
|
|
|
@ -204,6 +204,7 @@
|
|||
|
||||
#define GICR_TYPER_PLPIS (1U << 0)
|
||||
#define GICR_TYPER_VLPIS (1U << 1)
|
||||
#define GICR_TYPER_DirectLPIS (1U << 3)
|
||||
#define GICR_TYPER_LAST (1U << 4)
|
||||
|
||||
#define GIC_V3_REDIST_SIZE 0x20000
|
||||
|
@ -211,6 +212,69 @@
|
|||
#define LPI_PROP_GROUP1 (1 << 1)
|
||||
#define LPI_PROP_ENABLED (1 << 0)
|
||||
|
||||
/*
|
||||
* Re-Distributor registers, offsets from VLPI_base
|
||||
*/
|
||||
#define GICR_VPROPBASER 0x0070
|
||||
|
||||
#define GICR_VPROPBASER_IDBITS_MASK 0x1f
|
||||
|
||||
#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10)
|
||||
#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7)
|
||||
#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56)
|
||||
|
||||
#define GICR_VPROPBASER_SHAREABILITY_MASK \
|
||||
GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK)
|
||||
#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \
|
||||
GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK)
|
||||
#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \
|
||||
GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK)
|
||||
#define GICR_VPROPBASER_CACHEABILITY_MASK \
|
||||
GICR_VPROPBASER_INNER_CACHEABILITY_MASK
|
||||
|
||||
#define GICR_VPROPBASER_InnerShareable \
|
||||
GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable)
|
||||
|
||||
#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB)
|
||||
#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC)
|
||||
#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
|
||||
#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
|
||||
#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt)
|
||||
#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb)
|
||||
#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt)
|
||||
#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb)
|
||||
|
||||
#define GICR_VPENDBASER 0x0078
|
||||
|
||||
#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10)
|
||||
#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7)
|
||||
#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56)
|
||||
#define GICR_VPENDBASER_SHAREABILITY_MASK \
|
||||
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK)
|
||||
#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \
|
||||
GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK)
|
||||
#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \
|
||||
GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK)
|
||||
#define GICR_VPENDBASER_CACHEABILITY_MASK \
|
||||
GICR_VPENDBASER_INNER_CACHEABILITY_MASK
|
||||
|
||||
#define GICR_VPENDBASER_NonShareable \
|
||||
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
|
||||
|
||||
#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
|
||||
#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
|
||||
#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
|
||||
#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
|
||||
#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt)
|
||||
#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb)
|
||||
#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt)
|
||||
#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb)
|
||||
|
||||
#define GICR_VPENDBASER_Dirty (1ULL << 60)
|
||||
#define GICR_VPENDBASER_PendingLast (1ULL << 61)
|
||||
#define GICR_VPENDBASER_IDAI (1ULL << 62)
|
||||
#define GICR_VPENDBASER_Valid (1ULL << 63)
|
||||
|
||||
/*
|
||||
* ITS registers, offsets from ITS_base
|
||||
*/
|
||||
|
@ -234,15 +298,21 @@
|
|||
#define GITS_TRANSLATER 0x10040
|
||||
|
||||
#define GITS_CTLR_ENABLE (1U << 0)
|
||||
#define GITS_CTLR_ImDe (1U << 1)
|
||||
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
|
||||
#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT)
|
||||
#define GITS_CTLR_QUIESCENT (1U << 31)
|
||||
|
||||
#define GITS_TYPER_PLPIS (1UL << 0)
|
||||
#define GITS_TYPER_VLPIS (1UL << 1)
|
||||
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
|
||||
#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
|
||||
#define GITS_TYPER_IDBITS_SHIFT 8
|
||||
#define GITS_TYPER_DEVBITS_SHIFT 13
|
||||
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
|
||||
#define GITS_TYPER_PTA (1UL << 19)
|
||||
#define GITS_TYPER_HWCOLLCNT_SHIFT 24
|
||||
#define GITS_TYPER_VMOVP (1ULL << 37)
|
||||
|
||||
#define GITS_IIDR_REV_SHIFT 12
|
||||
#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT)
|
||||
|
@ -341,6 +411,18 @@
|
|||
#define GITS_CMD_CLEAR 0x04
|
||||
#define GITS_CMD_SYNC 0x05
|
||||
|
||||
/*
|
||||
* GICv4 ITS specific commands
|
||||
*/
|
||||
#define GITS_CMD_GICv4(x) ((x) | 0x20)
|
||||
#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL)
|
||||
#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC)
|
||||
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
|
||||
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
|
||||
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
|
||||
/* VMOVP is the odd one, as it doesn't have a physical counterpart */
|
||||
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
|
||||
|
||||
/*
|
||||
* ITS error numbers
|
||||
*/
|
||||
|
@ -487,6 +569,8 @@ struct rdists {
|
|||
struct page *prop_page;
|
||||
int id_bits;
|
||||
u64 flags;
|
||||
bool has_vlpis;
|
||||
bool has_direct_lpi;
|
||||
};
|
||||
|
||||
struct irq_domain;
|
||||
|
|
105
include/linux/irqchip/arm-gic-v4.h
Normal file
105
include/linux/irqchip/arm-gic-v4.h
Normal file
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Copyright (C) 2016,2017 ARM Limited, All Rights Reserved.
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_IRQCHIP_ARM_GIC_V4_H
|
||||
#define __LINUX_IRQCHIP_ARM_GIC_V4_H
|
||||
|
||||
struct its_vpe;
|
||||
|
||||
/* Embedded in kvm.arch */
|
||||
struct its_vm {
|
||||
struct fwnode_handle *fwnode;
|
||||
struct irq_domain *domain;
|
||||
struct page *vprop_page;
|
||||
struct its_vpe **vpes;
|
||||
int nr_vpes;
|
||||
irq_hw_number_t db_lpi_base;
|
||||
unsigned long *db_bitmap;
|
||||
int nr_db_lpis;
|
||||
};
|
||||
|
||||
/* Embedded in kvm_vcpu.arch */
|
||||
struct its_vpe {
|
||||
struct page *vpt_page;
|
||||
struct its_vm *its_vm;
|
||||
/* Doorbell interrupt */
|
||||
int irq;
|
||||
irq_hw_number_t vpe_db_lpi;
|
||||
/* VPE proxy mapping */
|
||||
int vpe_proxy_event;
|
||||
/*
|
||||
* This collection ID is used to indirect the target
|
||||
* redistributor for this VPE. The ID itself isn't involved in
|
||||
* programming of the ITS.
|
||||
*/
|
||||
u16 col_idx;
|
||||
/* Unique (system-wide) VPE identifier */
|
||||
u16 vpe_id;
|
||||
/* Implementation Defined Area Invalid */
|
||||
bool idai;
|
||||
/* Pending VLPIs on schedule out? */
|
||||
bool pending_last;
|
||||
};
|
||||
|
||||
/*
|
||||
* struct its_vlpi_map: structure describing the mapping of a
|
||||
* VLPI. Only to be interpreted in the context of a physical interrupt
|
||||
* it complements. To be used as the vcpu_info passed to
|
||||
* irq_set_vcpu_affinity().
|
||||
*
|
||||
* @vm: Pointer to the GICv4 notion of a VM
|
||||
* @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE)
|
||||
* @vintid: Virtual LPI number
|
||||
* @db_enabled: Is the VPE doorbell to be generated?
|
||||
*/
|
||||
struct its_vlpi_map {
|
||||
struct its_vm *vm;
|
||||
struct its_vpe *vpe;
|
||||
u32 vintid;
|
||||
bool db_enabled;
|
||||
};
|
||||
|
||||
enum its_vcpu_info_cmd_type {
|
||||
MAP_VLPI,
|
||||
GET_VLPI,
|
||||
PROP_UPDATE_VLPI,
|
||||
PROP_UPDATE_AND_INV_VLPI,
|
||||
SCHEDULE_VPE,
|
||||
DESCHEDULE_VPE,
|
||||
INVALL_VPE,
|
||||
};
|
||||
|
||||
struct its_cmd_info {
|
||||
enum its_vcpu_info_cmd_type cmd_type;
|
||||
union {
|
||||
struct its_vlpi_map *map;
|
||||
u8 config;
|
||||
};
|
||||
};
|
||||
|
||||
int its_alloc_vcpu_irqs(struct its_vm *vm);
|
||||
void its_free_vcpu_irqs(struct its_vm *vm);
|
||||
int its_schedule_vpe(struct its_vpe *vpe, bool on);
|
||||
int its_invall_vpe(struct its_vpe *vpe);
|
||||
int its_map_vlpi(int irq, struct its_vlpi_map *map);
|
||||
int its_get_vlpi(int irq, struct its_vlpi_map *map);
|
||||
int its_unmap_vlpi(int irq);
|
||||
int its_prop_update_vlpi(int irq, u8 config, bool inv);
|
||||
|
||||
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
|
||||
|
||||
#endif
|
|
@ -460,6 +460,9 @@ extern void irq_domain_free_irqs_common(struct irq_domain *domain,
|
|||
extern void irq_domain_free_irqs_top(struct irq_domain *domain,
|
||||
unsigned int virq, unsigned int nr_irqs);
|
||||
|
||||
extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
|
||||
extern int irq_domain_pop_irq(struct irq_domain *domain, int virq);
|
||||
|
||||
extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
|
||||
unsigned int irq_base,
|
||||
unsigned int nr_irqs, void *arg);
|
||||
|
|
|
@ -63,11 +63,20 @@ config GENERIC_IRQ_CHIP
|
|||
config IRQ_DOMAIN
|
||||
bool
|
||||
|
||||
# Support for simulated interrupts
|
||||
config IRQ_SIM
|
||||
bool
|
||||
select IRQ_WORK
|
||||
|
||||
# Support for hierarchical irq domains
|
||||
config IRQ_DOMAIN_HIERARCHY
|
||||
bool
|
||||
select IRQ_DOMAIN
|
||||
|
||||
# Support for hierarchical fasteoi+edge and fasteoi+level handlers
|
||||
config IRQ_FASTEOI_HIERARCHY_HANDLERS
|
||||
bool
|
||||
|
||||
# Generic IRQ IPI support
|
||||
config GENERIC_IRQ_IPI
|
||||
bool
|
||||
|
|
|
@ -4,6 +4,7 @@ obj-$(CONFIG_IRQ_TIMINGS) += timings.o
|
|||
obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o
|
||||
obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
|
||||
obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
|
||||
obj-$(CONFIG_IRQ_SIM) += irq_sim.o
|
||||
obj-$(CONFIG_PROC_FS) += proc.o
|
||||
obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
|
||||
obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
|
||||
|
|
|
@ -1098,6 +1098,112 @@ void irq_cpu_offline(void)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
|
||||
|
||||
#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
|
||||
/**
|
||||
* handle_fasteoi_ack_irq - irq handler for edge hierarchy
|
||||
* stacked on transparent controllers
|
||||
*
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* Like handle_fasteoi_irq(), but for use with hierarchy where
|
||||
* the irq_chip also needs to have its ->irq_ack() function
|
||||
* called.
|
||||
*/
|
||||
void handle_fasteoi_ack_irq(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = desc->irq_data.chip;
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
if (!irq_may_run(desc))
|
||||
goto out;
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
|
||||
/*
|
||||
* If its disabled or no action available
|
||||
* then mask it and get out of here:
|
||||
*/
|
||||
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
mask_irq(desc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
kstat_incr_irqs_this_cpu(desc);
|
||||
if (desc->istate & IRQS_ONESHOT)
|
||||
mask_irq(desc);
|
||||
|
||||
/* Start handling the irq */
|
||||
desc->irq_data.chip->irq_ack(&desc->irq_data);
|
||||
|
||||
preflow_handler(desc);
|
||||
handle_irq_event(desc);
|
||||
|
||||
cond_unmask_eoi_irq(desc, chip);
|
||||
|
||||
raw_spin_unlock(&desc->lock);
|
||||
return;
|
||||
out:
|
||||
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
|
||||
|
||||
/**
|
||||
* handle_fasteoi_mask_irq - irq handler for level hierarchy
|
||||
* stacked on transparent controllers
|
||||
*
|
||||
* @desc: the interrupt description structure for this irq
|
||||
*
|
||||
* Like handle_fasteoi_irq(), but for use with hierarchy where
|
||||
* the irq_chip also needs to have its ->irq_mask_ack() function
|
||||
* called.
|
||||
*/
|
||||
void handle_fasteoi_mask_irq(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = desc->irq_data.chip;
|
||||
|
||||
raw_spin_lock(&desc->lock);
|
||||
mask_ack_irq(desc);
|
||||
|
||||
if (!irq_may_run(desc))
|
||||
goto out;
|
||||
|
||||
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
|
||||
|
||||
/*
|
||||
* If its disabled or no action available
|
||||
* then mask it and get out of here:
|
||||
*/
|
||||
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
mask_irq(desc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
kstat_incr_irqs_this_cpu(desc);
|
||||
if (desc->istate & IRQS_ONESHOT)
|
||||
mask_irq(desc);
|
||||
|
||||
preflow_handler(desc);
|
||||
handle_irq_event(desc);
|
||||
|
||||
cond_unmask_eoi_irq(desc, chip);
|
||||
|
||||
raw_spin_unlock(&desc->lock);
|
||||
return;
|
||||
out:
|
||||
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
|
||||
|
||||
#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
|
||||
|
||||
/**
|
||||
* irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
|
||||
* NULL)
|
||||
|
@ -1111,6 +1217,7 @@ void irq_chip_enable_parent(struct irq_data *data)
|
|||
else
|
||||
data->chip->irq_unmask(data);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
|
||||
|
||||
/**
|
||||
* irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
|
||||
|
@ -1125,6 +1232,7 @@ void irq_chip_disable_parent(struct irq_data *data)
|
|||
else
|
||||
data->chip->irq_mask(data);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
|
||||
|
||||
/**
|
||||
* irq_chip_ack_parent - Acknowledge the parent interrupt
|
||||
|
@ -1187,6 +1295,7 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
|
|||
|
||||
return -ENOSYS;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
|
||||
|
||||
/**
|
||||
* irq_chip_set_type_parent - Set IRQ type on the parent interrupt
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
|
@ -171,8 +172,55 @@ static int irq_debug_open(struct inode *inode, struct file *file)
|
|||
return single_open(file, irq_debug_show, inode->i_private);
|
||||
}
|
||||
|
||||
static ssize_t irq_debug_write(struct file *file, const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct irq_desc *desc = file_inode(file)->i_private;
|
||||
char buf[8] = { 0, };
|
||||
size_t size;
|
||||
|
||||
size = min(sizeof(buf) - 1, count);
|
||||
if (copy_from_user(buf, user_buf, size))
|
||||
return -EFAULT;
|
||||
|
||||
if (!strncmp(buf, "trigger", size)) {
|
||||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
/* Try the HW interface first */
|
||||
err = irq_set_irqchip_state(irq_desc_get_irq(desc),
|
||||
IRQCHIP_STATE_PENDING, true);
|
||||
if (!err)
|
||||
return count;
|
||||
|
||||
/*
|
||||
* Otherwise, try to inject via the resend interface,
|
||||
* which may or may not succeed.
|
||||
*/
|
||||
chip_bus_lock(desc);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
if (irq_settings_is_level(desc)) {
|
||||
/* Can't do level, sorry */
|
||||
err = -EINVAL;
|
||||
} else {
|
||||
desc->istate |= IRQS_PENDING;
|
||||
check_irq_resend(desc);
|
||||
err = 0;
|
||||
}
|
||||
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
chip_bus_sync_unlock(desc);
|
||||
|
||||
return err ? err : count;
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct file_operations dfs_irq_ops = {
|
||||
.open = irq_debug_open,
|
||||
.write = irq_debug_write,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
|
@ -186,7 +234,7 @@ void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
|
|||
return;
|
||||
|
||||
sprintf(name, "%d", irq);
|
||||
desc->debugfs_file = debugfs_create_file(name, 0444, irq_dir, desc,
|
||||
desc->debugfs_file = debugfs_create_file(name, 0644, irq_dir, desc,
|
||||
&dfs_irq_ops);
|
||||
}
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ static inline void chip_bus_sync_unlock(struct irq_desc *desc)
|
|||
#define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU)
|
||||
|
||||
#define for_each_action_of_desc(desc, act) \
|
||||
for (act = desc->act; act; act = act->next)
|
||||
for (act = desc->action; act; act = act->next)
|
||||
|
||||
struct irq_desc *
|
||||
__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
|
||||
|
|
164
kernel/irq/irq_sim.c
Normal file
164
kernel/irq/irq_sim.c
Normal file
|
@ -0,0 +1,164 @@
|
|||
/*
|
||||
* Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/irq_sim.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
struct irq_sim_devres {
|
||||
struct irq_sim *sim;
|
||||
};
|
||||
|
||||
static void irq_sim_irqmask(struct irq_data *data)
|
||||
{
|
||||
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
|
||||
|
||||
irq_ctx->enabled = false;
|
||||
}
|
||||
|
||||
static void irq_sim_irqunmask(struct irq_data *data)
|
||||
{
|
||||
struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data);
|
||||
|
||||
irq_ctx->enabled = true;
|
||||
}
|
||||
|
||||
static struct irq_chip irq_sim_irqchip = {
|
||||
.name = "irq_sim",
|
||||
.irq_mask = irq_sim_irqmask,
|
||||
.irq_unmask = irq_sim_irqunmask,
|
||||
};
|
||||
|
||||
static void irq_sim_handle_irq(struct irq_work *work)
|
||||
{
|
||||
struct irq_sim_work_ctx *work_ctx;
|
||||
|
||||
work_ctx = container_of(work, struct irq_sim_work_ctx, work);
|
||||
handle_simple_irq(irq_to_desc(work_ctx->irq));
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_sim_init - Initialize the interrupt simulator: allocate a range of
|
||||
* dummy interrupts.
|
||||
*
|
||||
* @sim: The interrupt simulator object to initialize.
|
||||
* @num_irqs: Number of interrupts to allocate
|
||||
*
|
||||
* Returns 0 on success and a negative error number on failure.
|
||||
*/
|
||||
int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs)
|
||||
{
|
||||
int i;
|
||||
|
||||
sim->irqs = kmalloc_array(num_irqs, sizeof(*sim->irqs), GFP_KERNEL);
|
||||
if (!sim->irqs)
|
||||
return -ENOMEM;
|
||||
|
||||
sim->irq_base = irq_alloc_descs(-1, 0, num_irqs, 0);
|
||||
if (sim->irq_base < 0) {
|
||||
kfree(sim->irqs);
|
||||
return sim->irq_base;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_irqs; i++) {
|
||||
sim->irqs[i].irqnum = sim->irq_base + i;
|
||||
sim->irqs[i].enabled = false;
|
||||
irq_set_chip(sim->irq_base + i, &irq_sim_irqchip);
|
||||
irq_set_chip_data(sim->irq_base + i, &sim->irqs[i]);
|
||||
irq_set_handler(sim->irq_base + i, &handle_simple_irq);
|
||||
irq_modify_status(sim->irq_base + i,
|
||||
IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
|
||||
}
|
||||
|
||||
init_irq_work(&sim->work_ctx.work, irq_sim_handle_irq);
|
||||
sim->irq_count = num_irqs;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_sim_init);
|
||||
|
||||
/**
|
||||
* irq_sim_fini - Deinitialize the interrupt simulator: free the interrupt
|
||||
* descriptors and allocated memory.
|
||||
*
|
||||
* @sim: The interrupt simulator to tear down.
|
||||
*/
|
||||
void irq_sim_fini(struct irq_sim *sim)
|
||||
{
|
||||
irq_work_sync(&sim->work_ctx.work);
|
||||
irq_free_descs(sim->irq_base, sim->irq_count);
|
||||
kfree(sim->irqs);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_sim_fini);
|
||||
|
||||
static void devm_irq_sim_release(struct device *dev, void *res)
|
||||
{
|
||||
struct irq_sim_devres *this = res;
|
||||
|
||||
irq_sim_fini(this->sim);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_sim_init - Initialize the interrupt simulator for a managed device.
|
||||
*
|
||||
* @dev: Device to initialize the simulator object for.
|
||||
* @sim: The interrupt simulator object to initialize.
|
||||
* @num_irqs: Number of interrupts to allocate
|
||||
*
|
||||
* Returns 0 on success and a negative error number on failure.
|
||||
*/
|
||||
int devm_irq_sim_init(struct device *dev, struct irq_sim *sim,
|
||||
unsigned int num_irqs)
|
||||
{
|
||||
struct irq_sim_devres *dr;
|
||||
int rv;
|
||||
|
||||
dr = devres_alloc(devm_irq_sim_release, sizeof(*dr), GFP_KERNEL);
|
||||
if (!dr)
|
||||
return -ENOMEM;
|
||||
|
||||
rv = irq_sim_init(sim, num_irqs);
|
||||
if (rv) {
|
||||
devres_free(dr);
|
||||
return rv;
|
||||
}
|
||||
|
||||
dr->sim = sim;
|
||||
devres_add(dev, dr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_irq_sim_init);
|
||||
|
||||
/**
|
||||
* irq_sim_fire - Enqueue an interrupt.
|
||||
*
|
||||
* @sim: The interrupt simulator object.
|
||||
* @offset: Offset of the simulated interrupt which should be fired.
|
||||
*/
|
||||
void irq_sim_fire(struct irq_sim *sim, unsigned int offset)
|
||||
{
|
||||
if (sim->irqs[offset].enabled) {
|
||||
sim->work_ctx.irq = irq_sim_irqnum(sim, offset);
|
||||
irq_work_queue(&sim->work_ctx.work);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_sim_fire);
|
||||
|
||||
/**
|
||||
* irq_sim_irqnum - Get the allocated number of a dummy interrupt.
|
||||
*
|
||||
* @sim: The interrupt simulator object.
|
||||
* @offset: Offset of the simulated interrupt for which to retrieve
|
||||
* the number.
|
||||
*/
|
||||
int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset)
|
||||
{
|
||||
return sim->irqs[offset].irqnum;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_sim_irqnum);
|
|
@ -455,6 +455,31 @@ void irq_set_default_host(struct irq_domain *domain)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(irq_set_default_host);
|
||||
|
||||
static void irq_domain_clear_mapping(struct irq_domain *domain,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
if (hwirq < domain->revmap_size) {
|
||||
domain->linear_revmap[hwirq] = 0;
|
||||
} else {
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
radix_tree_delete(&domain->revmap_tree, hwirq);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void irq_domain_set_mapping(struct irq_domain *domain,
|
||||
irq_hw_number_t hwirq,
|
||||
struct irq_data *irq_data)
|
||||
{
|
||||
if (hwirq < domain->revmap_size) {
|
||||
domain->linear_revmap[hwirq] = irq_data->irq;
|
||||
} else {
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
|
||||
{
|
||||
struct irq_data *irq_data = irq_get_irq_data(irq);
|
||||
|
@ -483,13 +508,7 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
|
|||
domain->mapcount--;
|
||||
|
||||
/* Clear reverse map for this hwirq */
|
||||
if (hwirq < domain->revmap_size) {
|
||||
domain->linear_revmap[hwirq] = 0;
|
||||
} else {
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
radix_tree_delete(&domain->revmap_tree, hwirq);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
}
|
||||
irq_domain_clear_mapping(domain, hwirq);
|
||||
}
|
||||
|
||||
int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
|
||||
|
@ -533,13 +552,7 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
|
|||
}
|
||||
|
||||
domain->mapcount++;
|
||||
if (hwirq < domain->revmap_size) {
|
||||
domain->linear_revmap[hwirq] = virq;
|
||||
} else {
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
}
|
||||
irq_domain_set_mapping(domain, hwirq, irq_data);
|
||||
mutex_unlock(&irq_domain_mutex);
|
||||
|
||||
irq_clear_status_flags(virq, IRQ_NOREQUEST);
|
||||
|
@ -1138,16 +1151,9 @@ static void irq_domain_insert_irq(int virq)
|
|||
|
||||
for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
|
||||
struct irq_domain *domain = data->domain;
|
||||
irq_hw_number_t hwirq = data->hwirq;
|
||||
|
||||
domain->mapcount++;
|
||||
if (hwirq < domain->revmap_size) {
|
||||
domain->linear_revmap[hwirq] = virq;
|
||||
} else {
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
radix_tree_insert(&domain->revmap_tree, hwirq, data);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
}
|
||||
irq_domain_set_mapping(domain, data->hwirq, data);
|
||||
|
||||
/* If not already assigned, give the domain the chip's name */
|
||||
if (!domain->name && data->chip)
|
||||
|
@ -1171,13 +1177,7 @@ static void irq_domain_remove_irq(int virq)
|
|||
irq_hw_number_t hwirq = data->hwirq;
|
||||
|
||||
domain->mapcount--;
|
||||
if (hwirq < domain->revmap_size) {
|
||||
domain->linear_revmap[hwirq] = 0;
|
||||
} else {
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
radix_tree_delete(&domain->revmap_tree, hwirq);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
}
|
||||
irq_domain_clear_mapping(domain, hwirq);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1362,7 +1362,8 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
|
|||
unsigned int irq_base,
|
||||
unsigned int nr_irqs)
|
||||
{
|
||||
domain->ops->free(domain, irq_base, nr_irqs);
|
||||
if (domain->ops->free)
|
||||
domain->ops->free(domain, irq_base, nr_irqs);
|
||||
}
|
||||
|
||||
int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
|
||||
|
@ -1448,6 +1449,175 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* The irq_data was moved, fix the revmap to refer to the new location */
|
||||
static void irq_domain_fix_revmap(struct irq_data *d)
|
||||
{
|
||||
void **slot;
|
||||
|
||||
if (d->hwirq < d->domain->revmap_size)
|
||||
return; /* Not using radix tree. */
|
||||
|
||||
/* Fix up the revmap. */
|
||||
mutex_lock(&revmap_trees_mutex);
|
||||
slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
|
||||
if (slot)
|
||||
radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
|
||||
mutex_unlock(&revmap_trees_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
|
||||
* @domain: Domain to push.
|
||||
* @virq: Irq to push the domain in to.
|
||||
* @arg: Passed to the irq_domain_ops alloc() function.
|
||||
*
|
||||
* For an already existing irqdomain hierarchy, as might be obtained
|
||||
* via a call to pci_enable_msix(), add an additional domain to the
|
||||
* head of the processing chain. Must be called before request_irq()
|
||||
* has been called.
|
||||
*/
|
||||
int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
|
||||
{
|
||||
struct irq_data *child_irq_data;
|
||||
struct irq_data *root_irq_data = irq_get_irq_data(virq);
|
||||
struct irq_desc *desc;
|
||||
int rv = 0;
|
||||
|
||||
/*
|
||||
* Check that no action has been set, which indicates the virq
|
||||
* is in a state where this function doesn't have to deal with
|
||||
* races between interrupt handling and maintaining the
|
||||
* hierarchy. This will catch gross misuse. Attempting to
|
||||
* make the check race free would require holding locks across
|
||||
* calls to struct irq_domain_ops->alloc(), which could lead
|
||||
* to deadlock, so we just do a simple check before starting.
|
||||
*/
|
||||
desc = irq_to_desc(virq);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
if (WARN_ON(desc->action))
|
||||
return -EBUSY;
|
||||
|
||||
if (domain == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(!irq_domain_is_hierarchy(domain)))
|
||||
return -EINVAL;
|
||||
|
||||
if (!root_irq_data)
|
||||
return -EINVAL;
|
||||
|
||||
if (domain->parent != root_irq_data->domain)
|
||||
return -EINVAL;
|
||||
|
||||
child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
|
||||
irq_data_get_node(root_irq_data));
|
||||
if (!child_irq_data)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&irq_domain_mutex);
|
||||
|
||||
/* Copy the original irq_data. */
|
||||
*child_irq_data = *root_irq_data;
|
||||
|
||||
/*
|
||||
* Overwrite the root_irq_data, which is embedded in struct
|
||||
* irq_desc, with values for this domain.
|
||||
*/
|
||||
root_irq_data->parent_data = child_irq_data;
|
||||
root_irq_data->domain = domain;
|
||||
root_irq_data->mask = 0;
|
||||
root_irq_data->hwirq = 0;
|
||||
root_irq_data->chip = NULL;
|
||||
root_irq_data->chip_data = NULL;
|
||||
|
||||
/* May (probably does) set hwirq, chip, etc. */
|
||||
rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
|
||||
if (rv) {
|
||||
/* Restore the original irq_data. */
|
||||
*root_irq_data = *child_irq_data;
|
||||
goto error;
|
||||
}
|
||||
|
||||
irq_domain_fix_revmap(child_irq_data);
|
||||
irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
|
||||
|
||||
error:
|
||||
mutex_unlock(&irq_domain_mutex);
|
||||
|
||||
return rv;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_domain_push_irq);
|
||||
|
||||
/**
|
||||
* irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
|
||||
* @domain: Domain to remove.
|
||||
* @virq: Irq to remove the domain from.
|
||||
*
|
||||
* Undo the effects of a call to irq_domain_push_irq(). Must be
|
||||
* called either before request_irq() or after free_irq().
|
||||
*/
|
||||
int irq_domain_pop_irq(struct irq_domain *domain, int virq)
|
||||
{
|
||||
struct irq_data *root_irq_data = irq_get_irq_data(virq);
|
||||
struct irq_data *child_irq_data;
|
||||
struct irq_data *tmp_irq_data;
|
||||
struct irq_desc *desc;
|
||||
|
||||
/*
|
||||
* Check that no action is set, which indicates the virq is in
|
||||
* a state where this function doesn't have to deal with races
|
||||
* between interrupt handling and maintaining the hierarchy.
|
||||
* This will catch gross misuse. Attempting to make the check
|
||||
* race free would require holding locks across calls to
|
||||
* struct irq_domain_ops->free(), which could lead to
|
||||
* deadlock, so we just do a simple check before starting.
|
||||
*/
|
||||
desc = irq_to_desc(virq);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
if (WARN_ON(desc->action))
|
||||
return -EBUSY;
|
||||
|
||||
if (domain == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (!root_irq_data)
|
||||
return -EINVAL;
|
||||
|
||||
tmp_irq_data = irq_domain_get_irq_data(domain, virq);
|
||||
|
||||
/* We can only "pop" if this domain is at the top of the list */
|
||||
if (WARN_ON(root_irq_data != tmp_irq_data))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(root_irq_data->domain != domain))
|
||||
return -EINVAL;
|
||||
|
||||
child_irq_data = root_irq_data->parent_data;
|
||||
if (WARN_ON(!child_irq_data))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&irq_domain_mutex);
|
||||
|
||||
root_irq_data->parent_data = NULL;
|
||||
|
||||
irq_domain_clear_mapping(domain, root_irq_data->hwirq);
|
||||
irq_domain_free_irqs_hierarchy(domain, virq, 1);
|
||||
|
||||
/* Restore the original irq_data. */
|
||||
*root_irq_data = *child_irq_data;
|
||||
|
||||
irq_domain_fix_revmap(root_irq_data);
|
||||
|
||||
mutex_unlock(&irq_domain_mutex);
|
||||
|
||||
kfree(child_irq_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
|
||||
|
||||
/**
|
||||
* irq_domain_free_irqs - Free IRQ number and associated data structures
|
||||
* @virq: base IRQ number
|
||||
|
|
|
@ -400,8 +400,18 @@ int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
|
|||
return -EINVAL;
|
||||
|
||||
data = irq_desc_get_irq_data(desc);
|
||||
chip = irq_data_get_irq_chip(data);
|
||||
if (chip && chip->irq_set_vcpu_affinity)
|
||||
do {
|
||||
chip = irq_data_get_irq_chip(data);
|
||||
if (chip && chip->irq_set_vcpu_affinity)
|
||||
break;
|
||||
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
|
||||
data = data->parent_data;
|
||||
#else
|
||||
data = NULL;
|
||||
#endif
|
||||
} while (data);
|
||||
|
||||
if (data)
|
||||
ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
|
||||
irq_put_desc_unlock(desc, flags);
|
||||
|
||||
|
|
|
@ -61,12 +61,12 @@ static int show_irq_affinity(int type, struct seq_file *m)
|
|||
case EFFECTIVE:
|
||||
case EFFECTIVE_LIST:
|
||||
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
||||
mask = desc->irq_common_data.effective_affinity;
|
||||
mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
|
||||
break;
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
};
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case AFFINITY_LIST:
|
||||
|
|
Loading…
Reference in a new issue