kernel-fxtec-pro1x/arch/powerpc/platforms/cell/axon_msi.c
Arnd Bergmann 23e0e8afaf powerpc/cell/axon-msi: Fix MSI after kexec
Commit d015fe995 'powerpc/cell/axon-msi: Retry on missing interrupt'
has turned a rare failure to kexec on QS22 into a reproducible
error, which we have now analysed.

The problem is that after a kexec, the MSIC hardware still points
into the middle of the old ring buffer.  We set up the ring buffer
during reboot, but not the offset into it.  On older kernels, this
would cause a storm of thousands of spurious interrupts after a
kexec, which would most of the time get dropped silently.

With the new code, we time out on each interrupt, waiting for
it to become valid.  If more interrupts come in that we time
out on, this goes on indefinitely, which eventually leads to
a hard crash.

The solution in this commit is to read the current offset from
the MSIC when reinitializing it.  This now works correctly, as
expected.

Reported-by: Dirk Herrendoerfer <d.herrendoerfer@de.ibm.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2008-12-16 13:48:18 +11:00

505 lines
12 KiB
C

/*
* Copyright 2007, Michael Ellerman, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of_platform.h>
#include <linux/debugfs.h>
#include <asm/dcr.h>
#include <asm/machdep.h>
#include <asm/prom.h>
/*
* MSIC registers, specified as offsets from dcr_base
*/
#define MSIC_CTRL_REG 0x0
/* Base Address registers specify FIFO location in BE memory */
#define MSIC_BASE_ADDR_HI_REG 0x3
#define MSIC_BASE_ADDR_LO_REG 0x4
/* Hold the read/write offsets into the FIFO */
#define MSIC_READ_OFFSET_REG 0x5
#define MSIC_WRITE_OFFSET_REG 0x6
/* MSIC control register flags */
#define MSIC_CTRL_ENABLE 0x0001
#define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002
#define MSIC_CTRL_IRQ_ENABLE 0x0008
#define MSIC_CTRL_FULL_STOP_ENABLE 0x0010
/*
* The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
* Currently we're using a 64KB FIFO size.
*/
#define MSIC_FIFO_SIZE_SHIFT 16
#define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT)
/*
* To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
* 8-9 of the MSIC control reg.
*/
#define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
/*
* We need to mask the read/write offsets to make sure they stay within
* the bounds of the FIFO. Also they should always be 16-byte aligned.
*/
#define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
/* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
#define MSIC_FIFO_ENTRY_SIZE 0x10
struct axon_msic {
struct irq_host *irq_host;
__le32 *fifo_virt;
dma_addr_t fifo_phys;
dcr_host_t dcr_host;
u32 read_offset;
#ifdef DEBUG
u32 __iomem *trigger;
#endif
};
#ifdef DEBUG
void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
#else
static inline void axon_msi_debug_setup(struct device_node *dn,
struct axon_msic *msic) { }
#endif
static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
{
pr_debug("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
dcr_write(msic->dcr_host, dcr_n, val);
}
static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
{
struct axon_msic *msic = get_irq_data(irq);
u32 write_offset, msi;
int idx;
int retry = 0;
write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
pr_debug("axon_msi: original write_offset 0x%x\n", write_offset);
/* write_offset doesn't wrap properly, so we have to mask it */
write_offset &= MSIC_FIFO_SIZE_MASK;
while (msic->read_offset != write_offset && retry < 100) {
idx = msic->read_offset / sizeof(__le32);
msi = le32_to_cpu(msic->fifo_virt[idx]);
msi &= 0xFFFF;
pr_debug("axon_msi: woff %x roff %x msi %x\n",
write_offset, msic->read_offset, msi);
if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) {
generic_handle_irq(msi);
msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
} else {
/*
* Reading the MSIC_WRITE_OFFSET_REG does not
* reliably flush the outstanding DMA to the
* FIFO buffer. Here we were reading stale
* data, so we need to retry.
*/
udelay(1);
retry++;
pr_debug("axon_msi: invalid irq 0x%x!\n", msi);
continue;
}
if (retry) {
pr_debug("axon_msi: late irq 0x%x, retry %d\n",
msi, retry);
retry = 0;
}
msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
msic->read_offset &= MSIC_FIFO_SIZE_MASK;
}
if (retry) {
printk(KERN_WARNING "axon_msi: irq timed out\n");
msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
msic->read_offset &= MSIC_FIFO_SIZE_MASK;
}
desc->chip->eoi(irq);
}
static struct axon_msic *find_msi_translator(struct pci_dev *dev)
{
struct irq_host *irq_host;
struct device_node *dn, *tmp;
const phandle *ph;
struct axon_msic *msic = NULL;
dn = of_node_get(pci_device_to_OF_node(dev));
if (!dn) {
dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
return NULL;
}
for (; dn; dn = of_get_next_parent(dn)) {
ph = of_get_property(dn, "msi-translator", NULL);
if (ph)
break;
}
if (!ph) {
dev_dbg(&dev->dev,
"axon_msi: no msi-translator property found\n");
goto out_error;
}
tmp = dn;
dn = of_find_node_by_phandle(*ph);
of_node_put(tmp);
if (!dn) {
dev_dbg(&dev->dev,
"axon_msi: msi-translator doesn't point to a node\n");
goto out_error;
}
irq_host = irq_find_host(dn);
if (!irq_host) {
dev_dbg(&dev->dev, "axon_msi: no irq_host found for node %s\n",
dn->full_name);
goto out_error;
}
msic = irq_host->host_data;
out_error:
of_node_put(dn);
return msic;
}
static int axon_msi_check_device(struct pci_dev *dev, int nvec, int type)
{
if (!find_msi_translator(dev))
return -ENODEV;
return 0;
}
static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
{
struct device_node *dn;
struct msi_desc *entry;
int len;
const u32 *prop;
dn = of_node_get(pci_device_to_OF_node(dev));
if (!dn) {
dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
return -ENODEV;
}
entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
for (; dn; dn = of_get_next_parent(dn)) {
if (entry->msi_attrib.is_64) {
prop = of_get_property(dn, "msi-address-64", &len);
if (prop)
break;
}
prop = of_get_property(dn, "msi-address-32", &len);
if (prop)
break;
}
if (!prop) {
dev_dbg(&dev->dev,
"axon_msi: no msi-address-(32|64) properties found\n");
return -ENOENT;
}
switch (len) {
case 8:
msg->address_hi = prop[0];
msg->address_lo = prop[1];
break;
case 4:
msg->address_hi = 0;
msg->address_lo = prop[0];
break;
default:
dev_dbg(&dev->dev,
"axon_msi: malformed msi-address-(32|64) property\n");
of_node_put(dn);
return -EINVAL;
}
of_node_put(dn);
return 0;
}
static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
unsigned int virq, rc;
struct msi_desc *entry;
struct msi_msg msg;
struct axon_msic *msic;
msic = find_msi_translator(dev);
if (!msic)
return -ENODEV;
rc = setup_msi_msg_address(dev, &msg);
if (rc)
return rc;
/* We rely on being able to stash a virq in a u16 */
BUILD_BUG_ON(NR_IRQS > 65536);
list_for_each_entry(entry, &dev->msi_list, list) {
virq = irq_create_direct_mapping(msic->irq_host);
if (virq == NO_IRQ) {
dev_warn(&dev->dev,
"axon_msi: virq allocation failed!\n");
return -1;
}
dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
set_irq_msi(virq, entry);
msg.data = virq;
write_msi_msg(virq, &msg);
}
return 0;
}
static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
list_for_each_entry(entry, &dev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
set_irq_msi(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
}
}
static struct irq_chip msic_irq_chip = {
.mask = mask_msi_irq,
.unmask = unmask_msi_irq,
.shutdown = unmask_msi_irq,
.typename = "AXON-MSI",
};
static int msic_host_map(struct irq_host *h, unsigned int virq,
irq_hw_number_t hw)
{
set_irq_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
return 0;
}
static struct irq_host_ops msic_host_ops = {
.map = msic_host_map,
};
static int axon_msi_shutdown(struct of_device *device)
{
struct axon_msic *msic = device->dev.platform_data;
u32 tmp;
pr_debug("axon_msi: disabling %s\n",
msic->irq_host->of_node->full_name);
tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
return 0;
}
static int axon_msi_probe(struct of_device *device,
const struct of_device_id *device_id)
{
struct device_node *dn = device->node;
struct axon_msic *msic;
unsigned int virq;
int dcr_base, dcr_len;
pr_debug("axon_msi: setting up dn %s\n", dn->full_name);
msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL);
if (!msic) {
printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n",
dn->full_name);
goto out;
}
dcr_base = dcr_resource_start(dn, 0);
dcr_len = dcr_resource_len(dn, 0);
if (dcr_base == 0 || dcr_len == 0) {
printk(KERN_ERR
"axon_msi: couldn't parse dcr properties on %s\n",
dn->full_name);
goto out;
}
msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
if (!DCR_MAP_OK(msic->dcr_host)) {
printk(KERN_ERR "axon_msi: dcr_map failed for %s\n",
dn->full_name);
goto out_free_msic;
}
msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
&msic->fifo_phys, GFP_KERNEL);
if (!msic->fifo_virt) {
printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
dn->full_name);
goto out_free_msic;
}
virq = irq_of_parse_and_map(dn, 0);
if (virq == NO_IRQ) {
printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
dn->full_name);
goto out_free_fifo;
}
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP,
NR_IRQS, &msic_host_ops, 0);
if (!msic->irq_host) {
printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n",
dn->full_name);
goto out_free_fifo;
}
msic->irq_host->host_data = msic;
set_irq_data(virq, msic);
set_irq_chained_handler(virq, axon_msi_cascade);
pr_debug("axon_msi: irq 0x%x setup for axon_msi\n", virq);
/* Enable the MSIC hardware */
msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
msic->fifo_phys & 0xFFFFFFFF);
msic_dcr_write(msic, MSIC_CTRL_REG,
MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
MSIC_CTRL_FIFO_SIZE);
msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
& MSIC_FIFO_SIZE_MASK;
device->dev.platform_data = msic;
ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
ppc_md.msi_check_device = axon_msi_check_device;
axon_msi_debug_setup(dn, msic);
printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
return 0;
out_free_fifo:
dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
msic->fifo_phys);
out_free_msic:
kfree(msic);
out:
return -1;
}
static const struct of_device_id axon_msi_device_id[] = {
{
.compatible = "ibm,axon-msic"
},
{}
};
static struct of_platform_driver axon_msi_driver = {
.match_table = axon_msi_device_id,
.probe = axon_msi_probe,
.shutdown = axon_msi_shutdown,
.driver = {
.name = "axon-msi"
},
};
static int __init axon_msi_init(void)
{
return of_register_platform_driver(&axon_msi_driver);
}
subsys_initcall(axon_msi_init);
#ifdef DEBUG
static int msic_set(void *data, u64 val)
{
struct axon_msic *msic = data;
out_le32(msic->trigger, val);
return 0;
}
static int msic_get(void *data, u64 *val)
{
*val = 0;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
{
char name[8];
u64 addr;
addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
if (addr == OF_BAD_ADDR) {
pr_debug("axon_msi: couldn't translate reg property\n");
return;
}
msic->trigger = ioremap(addr, 0x4);
if (!msic->trigger) {
pr_debug("axon_msi: ioremap failed\n");
return;
}
snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
if (!debugfs_create_file(name, 0600, powerpc_debugfs_root,
msic, &fops_msic)) {
pr_debug("axon_msi: debugfs_create_file failed!\n");
return;
}
}
#endif /* DEBUG */