[SPARC64]: Sun4v interrupt handling.
Sun4v has 4 interrupt queues: cpu, device, resumable errors, and non-resumable errors. A set of head/tail offset pointers help maintain a work queue in physical memory. The entries are 64-bytes in size. Each queue is allocated then registered with the hypervisor as we bring cpus up. The two error queues each get a kernel side buffer that we use to quickly empty the main interrupt queue before we call up to C code to log the event and possibly take evasive action. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
ac29c11d4c
commit
5b0c0572fc
6 changed files with 568 additions and 11 deletions
|
@ -511,13 +511,14 @@ setup_tba:
|
|||
sparc64_boot_end:
|
||||
|
||||
#include "systbls.S"
|
||||
#include "sun4v_tlb_miss.S"
|
||||
#include "ktlb.S"
|
||||
#include "tsb.S"
|
||||
#include "etrap.S"
|
||||
#include "rtrap.S"
|
||||
#include "winfixup.S"
|
||||
#include "entry.S"
|
||||
#include "sun4v_tlb_miss.S"
|
||||
#include "sun4v_ivec.S"
|
||||
|
||||
/*
|
||||
* The following skip makes sure the trap table in ttable.S is aligned
|
||||
|
|
|
@ -888,7 +888,19 @@ static void __cpuinit init_one_mondo(unsigned long *pa_ptr, unsigned long type)
|
|||
}
|
||||
}
|
||||
|
||||
/* Allocate and init the mondo queues for this cpu. */
|
||||
static void __cpuinit init_one_kbuf(unsigned long *pa_ptr)
|
||||
{
|
||||
unsigned long page = get_zeroed_page(GFP_ATOMIC);
|
||||
|
||||
if (!page) {
|
||||
prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
*pa_ptr = __pa(page);
|
||||
}
|
||||
|
||||
/* Allocate and init the mondo and error queues for this cpu. */
|
||||
void __cpuinit sun4v_init_mondo_queues(void)
|
||||
{
|
||||
int cpu = hard_smp_processor_id();
|
||||
|
@ -897,7 +909,9 @@ void __cpuinit sun4v_init_mondo_queues(void)
|
|||
init_one_mondo(&tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
|
||||
init_one_mondo(&tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
|
||||
init_one_mondo(&tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
|
||||
init_one_kbuf(&tb->resum_kernel_buf_pa);
|
||||
init_one_mondo(&tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
|
||||
init_one_kbuf(&tb->nonresum_kernel_buf_pa);
|
||||
}
|
||||
|
||||
/* Only invoked on boot processor. */
|
||||
|
|
349
arch/sparc64/kernel/sun4v_ivec.S
Normal file
349
arch/sparc64/kernel/sun4v_ivec.S
Normal file
|
@ -0,0 +1,349 @@
|
|||
/* sun4v_ivec.S: Sun4v interrupt vector handling.
|
||||
*
|
||||
* Copyright (C) 2006 <davem@davemloft.net>
|
||||
*/
|
||||
|
||||
#include <asm/cpudata.h>
|
||||
#include <asm/intr_queue.h>
|
||||
|
||||
.text
|
||||
.align 32
|
||||
|
||||
sun4v_cpu_mondo:
|
||||
/* Head offset in %g2, tail offset in %g4.
|
||||
* If they are the same, no work.
|
||||
*/
|
||||
mov INTRQ_CPU_MONDO_HEAD, %g2
|
||||
ldxa [%g2] ASI_QUEUE, %g2
|
||||
mov INTRQ_CPU_MONDO_TAIL, %g4
|
||||
ldxa [%g4] ASI_QUEUE, %g4
|
||||
cmp %g2, %g4
|
||||
be,pn %xcc, sun4v_cpu_mondo_queue_empty
|
||||
nop
|
||||
|
||||
/* Get &trap_block[smp_processor_id()] into %g3. */
|
||||
__GET_CPUID(%g1)
|
||||
sethi %hi(trap_block), %g3
|
||||
sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7
|
||||
or %g3, %lo(trap_block), %g3
|
||||
add %g3, %g7, %g3
|
||||
|
||||
/* Get CPU mondo queue base phys address into %g7. */
|
||||
ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
|
||||
|
||||
/* Now get the cross-call arguments and handler PC, same
|
||||
* layout as sun4u:
|
||||
*
|
||||
* 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
|
||||
* high half is context arg to MMU flushes, into %g5
|
||||
* 2nd 64-bit word: 64-bit arg, load into %g1
|
||||
* 3rd 64-bit word: 64-bit arg, load into %g7
|
||||
*/
|
||||
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
add %g2, 0x8, %g2
|
||||
srlx %g3, 32, %g5
|
||||
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
|
||||
add %g2, 0x8, %g2
|
||||
srl %g3, 0, %g3
|
||||
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
|
||||
add %g2, 0x40 - 0x8 - 0x8, %g2
|
||||
|
||||
/* Update queue head pointer. */
|
||||
sethi %hi(8192 - 1), %g4
|
||||
or %g4, %lo(8192 - 1), %g4
|
||||
and %g2, %g4, %g2
|
||||
|
||||
mov INTRQ_CPU_MONDO_HEAD, %g4
|
||||
stxa %g2, [%g4] ASI_QUEUE
|
||||
membar #Sync
|
||||
|
||||
jmpl %g3, %g0
|
||||
nop
|
||||
|
||||
sun4v_cpu_mondo_queue_empty:
|
||||
retry
|
||||
|
||||
sun4v_dev_mondo:
|
||||
/* Head offset in %g2, tail offset in %g4. */
|
||||
mov INTRQ_DEVICE_MONDO_HEAD, %g2
|
||||
ldxa [%g2] ASI_QUEUE, %g2
|
||||
mov INTRQ_DEVICE_MONDO_TAIL, %g4
|
||||
ldxa [%g4] ASI_QUEUE, %g4
|
||||
cmp %g2, %g4
|
||||
be,pn %xcc, sun4v_dev_mondo_queue_empty
|
||||
nop
|
||||
|
||||
/* Get &trap_block[smp_processor_id()] into %g3. */
|
||||
__GET_CPUID(%g1)
|
||||
sethi %hi(trap_block), %g3
|
||||
sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7
|
||||
or %g3, %lo(trap_block), %g3
|
||||
add %g3, %g7, %g3
|
||||
|
||||
/* Get DEV mondo queue base phys address into %g5. */
|
||||
ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
|
||||
|
||||
/* Load IVEC into %g3. */
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
add %g2, 0x40, %g2
|
||||
|
||||
/* XXX There can be a full 64-byte block of data here.
|
||||
* XXX This is how we can get at MSI vector data.
|
||||
* XXX Current we do not capture this, but when we do we'll
|
||||
* XXX need to add a 64-byte storage area in the struct ino_bucket
|
||||
* XXX or the struct irq_desc.
|
||||
*/
|
||||
|
||||
/* Update queue head pointer, this frees up some registers. */
|
||||
sethi %hi(8192 - 1), %g4
|
||||
or %g4, %lo(8192 - 1), %g4
|
||||
and %g2, %g4, %g2
|
||||
|
||||
mov INTRQ_DEVICE_MONDO_HEAD, %g4
|
||||
stxa %g2, [%g4] ASI_QUEUE
|
||||
membar #Sync
|
||||
|
||||
/* Get &__irq_work[smp_processor_id()] into %g1. */
|
||||
sethi %hi(__irq_work), %g4
|
||||
sllx %g1, 6, %g1
|
||||
or %g4, %lo(__irq_work), %g4
|
||||
add %g4, %g1, %g1
|
||||
|
||||
/* Get &ivector_table[IVEC] into %g4. */
|
||||
sethi %hi(ivector_table), %g4
|
||||
sllx %g3, 5, %g3
|
||||
or %g4, %lo(ivector_table), %g4
|
||||
add %g4, %g3, %g4
|
||||
|
||||
/* Load IRQ %pil into %g5. */
|
||||
ldub [%g4 + 0x04], %g5
|
||||
|
||||
/* Insert ivector_table[] entry into __irq_work[] queue. */
|
||||
sllx %g5, 2, %g3
|
||||
lduw [%g1 + %g3], %g2 /* g2 = irq_work(cpu, pil) */
|
||||
stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */
|
||||
stw %g4, [%g1 + %g3] /* irq_work(cpu, pil) = bucket */
|
||||
|
||||
/* Signal the interrupt by setting (1 << pil) in %softint. */
|
||||
mov 1, %g2
|
||||
sllx %g2, %g5, %g2
|
||||
wr %g2, 0x0, %set_softint
|
||||
|
||||
sun4v_dev_mondo_queue_empty:
|
||||
retry
|
||||
|
||||
sun4v_res_mondo:
|
||||
/* Head offset in %g2, tail offset in %g4. */
|
||||
mov INTRQ_RESUM_MONDO_HEAD, %g2
|
||||
ldxa [%g2] ASI_QUEUE, %g2
|
||||
mov INTRQ_RESUM_MONDO_TAIL, %g4
|
||||
ldxa [%g4] ASI_QUEUE, %g4
|
||||
cmp %g2, %g4
|
||||
be,pn %xcc, sun4v_res_mondo_queue_empty
|
||||
nop
|
||||
|
||||
/* Get &trap_block[smp_processor_id()] into %g3. */
|
||||
__GET_CPUID(%g1)
|
||||
sethi %hi(trap_block), %g3
|
||||
sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7
|
||||
or %g3, %lo(trap_block), %g3
|
||||
add %g3, %g7, %g3
|
||||
|
||||
/* Get RES mondo queue base phys address into %g5. */
|
||||
ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
|
||||
|
||||
/* Get RES kernel buffer base phys address into %g7. */
|
||||
ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
|
||||
|
||||
/* If the first word is non-zero, queue is full. */
|
||||
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
|
||||
brnz,pn %g1, sun4v_res_mondo_queue_full
|
||||
nop
|
||||
|
||||
/* Remember this entry's offset in %g1. */
|
||||
mov %g2, %g1
|
||||
|
||||
/* Copy 64-byte queue entry into kernel buffer. */
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
|
||||
/* Update queue head pointer. */
|
||||
sethi %hi(8192 - 1), %g4
|
||||
or %g4, %lo(8192 - 1), %g4
|
||||
and %g2, %g4, %g2
|
||||
|
||||
mov INTRQ_RESUM_MONDO_HEAD, %g4
|
||||
stxa %g2, [%g4] ASI_QUEUE
|
||||
membar #Sync
|
||||
|
||||
/* Disable interrupts and save register state so we can call
|
||||
* C code. The etrap handling will leave %g4 in %l4 for us
|
||||
* when it's done.
|
||||
*/
|
||||
rdpr %pil, %g2
|
||||
wrpr %g0, 15, %pil
|
||||
mov %g1, %g4
|
||||
ba,pt %xcc, etrap_irq
|
||||
rd %pc, %g7
|
||||
|
||||
/* Log the event. */
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
call sun4v_resum_error
|
||||
mov %l4, %o1
|
||||
|
||||
/* Return from trap. */
|
||||
ba,pt %xcc, rtrap_irq
|
||||
nop
|
||||
|
||||
sun4v_res_mondo_queue_empty:
|
||||
retry
|
||||
|
||||
sun4v_res_mondo_queue_full:
|
||||
/* The queue is full, consolidate our damage by setting
|
||||
* the head equal to the tail. We'll just trap again otherwise.
|
||||
* Call C code to log the event.
|
||||
*/
|
||||
mov INTRQ_RESUM_MONDO_HEAD, %g2
|
||||
stxa %g4, [%g2] ASI_QUEUE
|
||||
membar #Sync
|
||||
|
||||
rdpr %pil, %g2
|
||||
wrpr %g0, 15, %pil
|
||||
ba,pt %xcc, etrap_irq
|
||||
rd %pc, %g7
|
||||
|
||||
call sun4v_resum_overflow
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
|
||||
ba,pt %xcc, rtrap_irq
|
||||
nop
|
||||
|
||||
sun4v_nonres_mondo:
|
||||
/* Head offset in %g2, tail offset in %g4. */
|
||||
mov INTRQ_NONRESUM_MONDO_HEAD, %g2
|
||||
ldxa [%g2] ASI_QUEUE, %g2
|
||||
mov INTRQ_NONRESUM_MONDO_TAIL, %g4
|
||||
ldxa [%g4] ASI_QUEUE, %g4
|
||||
cmp %g2, %g4
|
||||
be,pn %xcc, sun4v_nonres_mondo_queue_empty
|
||||
nop
|
||||
|
||||
/* Get &trap_block[smp_processor_id()] into %g3. */
|
||||
__GET_CPUID(%g1)
|
||||
sethi %hi(trap_block), %g3
|
||||
sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g7
|
||||
or %g3, %lo(trap_block), %g3
|
||||
add %g3, %g7, %g3
|
||||
|
||||
/* Get RES mondo queue base phys address into %g5. */
|
||||
ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
|
||||
|
||||
/* Get RES kernel buffer base phys address into %g7. */
|
||||
ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
|
||||
|
||||
/* If the first word is non-zero, queue is full. */
|
||||
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
|
||||
brnz,pn %g1, sun4v_nonres_mondo_queue_full
|
||||
nop
|
||||
|
||||
/* Remember this entry's offset in %g1. */
|
||||
mov %g2, %g1
|
||||
|
||||
/* Copy 64-byte queue entry into kernel buffer. */
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
|
||||
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
|
||||
add %g2, 0x08, %g2
|
||||
|
||||
/* Update queue head pointer. */
|
||||
sethi %hi(8192 - 1), %g4
|
||||
or %g4, %lo(8192 - 1), %g4
|
||||
and %g2, %g4, %g2
|
||||
|
||||
mov INTRQ_NONRESUM_MONDO_HEAD, %g4
|
||||
stxa %g2, [%g4] ASI_QUEUE
|
||||
membar #Sync
|
||||
|
||||
/* Disable interrupts and save register state so we can call
|
||||
* C code. The etrap handling will leave %g4 in %l4 for us
|
||||
* when it's done.
|
||||
*/
|
||||
rdpr %pil, %g2
|
||||
wrpr %g0, 15, %pil
|
||||
mov %g1, %g4
|
||||
ba,pt %xcc, etrap_irq
|
||||
rd %pc, %g7
|
||||
|
||||
/* Log the event. */
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
call sun4v_nonresum_error
|
||||
mov %l4, %o1
|
||||
|
||||
/* Return from trap. */
|
||||
ba,pt %xcc, rtrap_irq
|
||||
nop
|
||||
|
||||
sun4v_nonres_mondo_queue_empty:
|
||||
retry
|
||||
|
||||
sun4v_nonres_mondo_queue_full:
|
||||
/* The queue is full, consolidate our damage by setting
|
||||
* the head equal to the tail. We'll just trap again otherwise.
|
||||
* Call C code to log the event.
|
||||
*/
|
||||
mov INTRQ_NONRESUM_MONDO_HEAD, %g2
|
||||
stxa %g4, [%g2] ASI_QUEUE
|
||||
membar #Sync
|
||||
|
||||
rdpr %pil, %g2
|
||||
wrpr %g0, 15, %pil
|
||||
ba,pt %xcc, etrap_irq
|
||||
rd %pc, %g7
|
||||
|
||||
call sun4v_nonresum_overflow
|
||||
add %sp, PTREGS_OFF, %o0
|
||||
|
||||
ba,pt %xcc, rtrap_irq
|
||||
nop
|
|
@ -1668,6 +1668,186 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
|
|||
regs->tpc);
|
||||
}
|
||||
|
||||
struct sun4v_error_entry {
|
||||
u64 err_handle;
|
||||
u64 err_stick;
|
||||
|
||||
u32 err_type;
|
||||
#define SUN4V_ERR_TYPE_UNDEFINED 0
|
||||
#define SUN4V_ERR_TYPE_UNCORRECTED_RES 1
|
||||
#define SUN4V_ERR_TYPE_PRECISE_NONRES 2
|
||||
#define SUN4V_ERR_TYPE_DEFERRED_NONRES 3
|
||||
#define SUN4V_ERR_TYPE_WARNING_RES 4
|
||||
|
||||
u32 err_attrs;
|
||||
#define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001
|
||||
#define SUN4V_ERR_ATTRS_MEMORY 0x00000002
|
||||
#define SUN4V_ERR_ATTRS_PIO 0x00000004
|
||||
#define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008
|
||||
#define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010
|
||||
#define SUN4V_ERR_ATTRS_USER_MODE 0x01000000
|
||||
#define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000
|
||||
#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000
|
||||
|
||||
u64 err_raddr;
|
||||
u32 err_size;
|
||||
u16 err_cpu;
|
||||
u16 err_pad;
|
||||
};
|
||||
|
||||
static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
|
||||
static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
|
||||
|
||||
static const char *sun4v_err_type_to_str(u32 type)
|
||||
{
|
||||
switch (type) {
|
||||
case SUN4V_ERR_TYPE_UNDEFINED:
|
||||
return "undefined";
|
||||
case SUN4V_ERR_TYPE_UNCORRECTED_RES:
|
||||
return "uncorrected resumable";
|
||||
case SUN4V_ERR_TYPE_PRECISE_NONRES:
|
||||
return "precise nonresumable";
|
||||
case SUN4V_ERR_TYPE_DEFERRED_NONRES:
|
||||
return "deferred nonresumable";
|
||||
case SUN4V_ERR_TYPE_WARNING_RES:
|
||||
return "warning resumable";
|
||||
default:
|
||||
return "unknown";
|
||||
};
|
||||
}
|
||||
|
||||
static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
|
||||
{
|
||||
int cnt;
|
||||
|
||||
printk("%s: Reporting on cpu %d\n", pfx, cpu);
|
||||
printk("%s: err_handle[%lx] err_stick[%lx] err_type[%08x:%s]\n",
|
||||
pfx,
|
||||
ent->err_handle, ent->err_stick,
|
||||
ent->err_type,
|
||||
sun4v_err_type_to_str(ent->err_type));
|
||||
printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
|
||||
pfx,
|
||||
ent->err_attrs,
|
||||
((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
|
||||
"processor" : ""),
|
||||
((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
|
||||
"memory" : ""),
|
||||
((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
|
||||
"pio" : ""),
|
||||
((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
|
||||
"integer-regs" : ""),
|
||||
((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
|
||||
"fpu-regs" : ""),
|
||||
((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
|
||||
"user" : ""),
|
||||
((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
|
||||
"privileged" : ""),
|
||||
((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
|
||||
"queue-full" : ""));
|
||||
printk("%s: err_raddr[%016lx] err_size[%u] err_cpu[%u]\n",
|
||||
pfx,
|
||||
ent->err_raddr, ent->err_size, ent->err_cpu);
|
||||
|
||||
if ((cnt = atomic_read(ocnt)) != 0) {
|
||||
atomic_set(ocnt, 0);
|
||||
wmb();
|
||||
printk("%s: Queue overflowed %d times.\n",
|
||||
pfx, cnt);
|
||||
}
|
||||
}
|
||||
|
||||
/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
|
||||
* Log the event and clear the first word of the entry.
|
||||
*/
|
||||
void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
|
||||
{
|
||||
struct sun4v_error_entry *ent, local_copy;
|
||||
struct trap_per_cpu *tb;
|
||||
unsigned long paddr;
|
||||
int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
|
||||
tb = &trap_block[cpu];
|
||||
paddr = tb->resum_kernel_buf_pa + offset;
|
||||
ent = __va(paddr);
|
||||
|
||||
memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
|
||||
|
||||
/* We have a local copy now, so release the entry. */
|
||||
ent->err_handle = 0;
|
||||
wmb();
|
||||
|
||||
put_cpu();
|
||||
|
||||
sun4v_log_error(&local_copy, cpu,
|
||||
KERN_ERR "RESUMABLE ERROR",
|
||||
&sun4v_resum_oflow_cnt);
|
||||
}
|
||||
|
||||
/* If we try to printk() we'll probably make matters worse, by trying
|
||||
* to retake locks this cpu already holds or causing more errors. So
|
||||
* just bump a counter, and we'll report these counter bumps above.
|
||||
*/
|
||||
void sun4v_resum_overflow(struct pt_regs *regs)
|
||||
{
|
||||
atomic_inc(&sun4v_resum_oflow_cnt);
|
||||
}
|
||||
|
||||
/* We run with %pil set to 15 and PSTATE_IE enabled in %pstate.
|
||||
* Log the event, clear the first word of the entry, and die.
|
||||
*/
|
||||
void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
|
||||
{
|
||||
struct sun4v_error_entry *ent, local_copy;
|
||||
struct trap_per_cpu *tb;
|
||||
unsigned long paddr;
|
||||
int cpu;
|
||||
|
||||
cpu = get_cpu();
|
||||
|
||||
tb = &trap_block[cpu];
|
||||
paddr = tb->nonresum_kernel_buf_pa + offset;
|
||||
ent = __va(paddr);
|
||||
|
||||
memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
|
||||
|
||||
/* We have a local copy now, so release the entry. */
|
||||
ent->err_handle = 0;
|
||||
wmb();
|
||||
|
||||
put_cpu();
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
/* Check for the special PCI poke sequence. */
|
||||
if (pci_poke_in_progress && pci_poke_cpu == cpu) {
|
||||
pci_poke_faulted = 1;
|
||||
regs->tpc += 4;
|
||||
regs->tnpc = regs->tpc + 4;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
sun4v_log_error(&local_copy, cpu,
|
||||
KERN_EMERG "NON-RESUMABLE ERROR",
|
||||
&sun4v_nonresum_oflow_cnt);
|
||||
|
||||
panic("Non-resumable error.");
|
||||
}
|
||||
|
||||
/* If we try to printk() we'll probably make matters worse, by trying
|
||||
* to retake locks this cpu already holds or causing more errors. So
|
||||
* just bump a counter, and we'll report these counter bumps above.
|
||||
*/
|
||||
void sun4v_nonresum_overflow(struct pt_regs *regs)
|
||||
{
|
||||
/* XXX Actually even this can make not that much sense. Perhaps
|
||||
* XXX we should just pull the plug and panic directly from here?
|
||||
*/
|
||||
atomic_inc(&sun4v_nonresum_oflow_cnt);
|
||||
}
|
||||
|
||||
void do_fpe_common(struct pt_regs *regs)
|
||||
{
|
||||
if (regs->tstate & TSTATE_PRIV) {
|
||||
|
@ -2190,8 +2370,12 @@ void __init trap_init(void)
|
|||
offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
|
||||
(TRAP_PER_CPU_RESUM_MONDO_PA !=
|
||||
offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
|
||||
(TRAP_PER_CPU_RESUM_KBUF_PA !=
|
||||
offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
|
||||
(TRAP_PER_CPU_NONRESUM_MONDO_PA !=
|
||||
offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
|
||||
(TRAP_PER_CPU_NONRESUM_KBUF_PA !=
|
||||
offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
|
||||
(TRAP_PER_CPU_FAULT_INFO !=
|
||||
offsetof(struct trap_per_cpu, fault_info)))
|
||||
trap_per_cpu_offsets_are_bolixed_dave();
|
||||
|
|
|
@ -88,7 +88,10 @@ tl0_dcpe: BTRAP(0x71) /* D-cache Parity Error on Cheetah+ */
|
|||
tl0_icpe: BTRAP(0x72) /* I-cache Parity Error on Cheetah+ */
|
||||
tl0_resv073: BTRAP(0x73) BTRAP(0x74) BTRAP(0x75)
|
||||
tl0_resv076: BTRAP(0x76) BTRAP(0x77) BTRAP(0x78) BTRAP(0x79) BTRAP(0x7a) BTRAP(0x7b)
|
||||
tl0_resv07c: BTRAP(0x7c) BTRAP(0x7d) BTRAP(0x7e) BTRAP(0x7f)
|
||||
tl0_cpu_mondo: TRAP_NOSAVE(sun4v_cpu_mondo)
|
||||
tl0_dev_mondo: TRAP_NOSAVE(sun4v_dev_mondo)
|
||||
tl0_res_mondo: TRAP_NOSAVE(sun4v_res_mondo)
|
||||
tl0_nres_mondo: TRAP_NOSAVE(sun4v_nonres_mondo)
|
||||
tl0_s0n: SPILL_0_NORMAL
|
||||
tl0_s1n: SPILL_1_NORMAL
|
||||
tl0_s2n: SPILL_2_NORMAL
|
||||
|
|
|
@ -53,16 +53,17 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
|
|||
*/
|
||||
struct thread_info;
|
||||
struct trap_per_cpu {
|
||||
/* D-cache line 1: Basic thread information */
|
||||
/* D-cache line 1: Basic thread information, cpu and device mondo queues */
|
||||
struct thread_info *thread;
|
||||
unsigned long pgd_paddr;
|
||||
unsigned long __pad1[2];
|
||||
|
||||
/* D-cache line 2: Sun4V Mondo Queue pointers */
|
||||
unsigned long cpu_mondo_pa;
|
||||
unsigned long dev_mondo_pa;
|
||||
|
||||
/* D-cache line 2: Error Mondo Queue and kernel buffer pointers */
|
||||
unsigned long resum_mondo_pa;
|
||||
unsigned long resum_kernel_buf_pa;
|
||||
unsigned long nonresum_mondo_pa;
|
||||
unsigned long nonresum_kernel_buf_pa;
|
||||
|
||||
/* Dcache lines 3 and 4: Hypervisor Fault Status */
|
||||
struct hv_fault_status fault_info;
|
||||
|
@ -100,10 +101,12 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
|
|||
|
||||
#define TRAP_PER_CPU_THREAD 0x00
|
||||
#define TRAP_PER_CPU_PGD_PADDR 0x08
|
||||
#define TRAP_PER_CPU_CPU_MONDO_PA 0x20
|
||||
#define TRAP_PER_CPU_DEV_MONDO_PA 0x28
|
||||
#define TRAP_PER_CPU_RESUM_MONDO_PA 0x30
|
||||
#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x38
|
||||
#define TRAP_PER_CPU_CPU_MONDO_PA 0x10
|
||||
#define TRAP_PER_CPU_DEV_MONDO_PA 0x18
|
||||
#define TRAP_PER_CPU_RESUM_MONDO_PA 0x20
|
||||
#define TRAP_PER_CPU_RESUM_KBUF_PA 0x28
|
||||
#define TRAP_PER_CPU_NONRESUM_MONDO_PA 0x30
|
||||
#define TRAP_PER_CPU_NONRESUM_KBUF_PA 0x38
|
||||
#define TRAP_PER_CPU_FAULT_INFO 0x40
|
||||
|
||||
#define TRAP_BLOCK_SZ_SHIFT 7
|
||||
|
@ -188,6 +191,9 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
|
|||
|
||||
#else
|
||||
|
||||
#define __GET_CPUID(REG) \
|
||||
mov 0, REG;
|
||||
|
||||
/* Uniprocessor versions, we know the cpuid is zero. */
|
||||
#define TRAP_LOAD_PGD_PHYS(DEST, TMP) \
|
||||
sethi %hi(trap_block), DEST; \
|
||||
|
|
Loading…
Reference in a new issue