Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/paulus/ppc64-2.6
This commit is contained in:
commit
12829dcb10
18 changed files with 292 additions and 531 deletions
|
@ -52,7 +52,7 @@ obj-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.o, $(section)))
|
|||
src-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.c, $(section)))
|
||||
gz-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.gz, $(section)))
|
||||
|
||||
hostprogs-y := piggy addnote addRamDisk
|
||||
hostprogs-y := addnote addRamDisk
|
||||
targets += zImage zImage.initrd imagesize.c \
|
||||
$(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \
|
||||
$(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \
|
||||
|
@ -78,9 +78,6 @@ addsection = $(CROSS32OBJCOPY) $(1) \
|
|||
quiet_cmd_addnote = ADDNOTE $@
|
||||
cmd_addnote = $(CROSS32LD) $(BOOTLFLAGS) -o $@ $(obj-boot) && $(obj)/addnote $@
|
||||
|
||||
quiet_cmd_piggy = PIGGY $@
|
||||
cmd_piggy = $(obj)/piggyback $(@:.o=) < $< | $(CROSS32AS) -o $@
|
||||
|
||||
$(call gz-sec, $(required)): $(obj)/kernel-%.gz: % FORCE
|
||||
$(call if_changed,gzip)
|
||||
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
|
||||
extern void *finddevice(const char *);
|
||||
extern int getprop(void *, const char *, void *, int);
|
||||
extern void printk(char *fmt, ...);
|
||||
extern void printf(const char *fmt, ...);
|
||||
extern int sprintf(char *buf, const char *fmt, ...);
|
||||
void gunzip(void *, int, unsigned char *, int *);
|
||||
|
@ -147,10 +146,10 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
|
|||
}
|
||||
a1 = initrd.addr;
|
||||
a2 = initrd.size;
|
||||
printf("initial ramdisk moving 0x%lx <- 0x%lx (%lx bytes)\n\r",
|
||||
printf("initial ramdisk moving 0x%lx <- 0x%lx (0x%lx bytes)\n\r",
|
||||
initrd.addr, (unsigned long)_initrd_start, initrd.size);
|
||||
memmove((void *)initrd.addr, (void *)_initrd_start, initrd.size);
|
||||
printf("initrd head: 0x%lx\n\r", *((u32 *)initrd.addr));
|
||||
printf("initrd head: 0x%lx\n\r", *((unsigned long *)initrd.addr));
|
||||
}
|
||||
|
||||
/* Eventually gunzip the kernel */
|
||||
|
@ -201,9 +200,6 @@ void start(unsigned long a1, unsigned long a2, void *promptr)
|
|||
|
||||
flush_cache((void *)vmlinux.addr, vmlinux.size);
|
||||
|
||||
if (a1)
|
||||
printf("initrd head: 0x%lx\n\r", *((u32 *)initrd.addr));
|
||||
|
||||
kernel_entry = (kernel_entry_t)vmlinux.addr;
|
||||
#ifdef DEBUG
|
||||
printf( "kernel:\n\r"
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) Cort Dougan 1999.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*
|
||||
* Generate a note section as per the CHRP specification.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#define PL(x) printf("%c%c%c%c", ((x)>>24)&0xff, ((x)>>16)&0xff, ((x)>>8)&0xff, (x)&0xff );
|
||||
|
||||
int main(void)
|
||||
{
|
||||
/* header */
|
||||
/* namesz */
|
||||
PL(strlen("PowerPC")+1);
|
||||
/* descrsz */
|
||||
PL(6*4);
|
||||
/* type */
|
||||
PL(0x1275);
|
||||
/* name */
|
||||
printf("PowerPC"); printf("%c", 0);
|
||||
|
||||
/* descriptor */
|
||||
/* real-mode */
|
||||
PL(0xffffffff);
|
||||
/* real-base */
|
||||
PL(0x00c00000);
|
||||
/* real-size */
|
||||
PL(0xffffffff);
|
||||
/* virt-base */
|
||||
PL(0xffffffff);
|
||||
/* virt-size */
|
||||
PL(0xffffffff);
|
||||
/* load-base */
|
||||
PL(0x4000);
|
||||
return 0;
|
||||
}
|
|
@ -1,83 +0,0 @@
|
|||
/*
|
||||
* Copyright 2001 IBM Corp
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
|
||||
extern long ce_exec_config[];
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
int i, cnt, pos, len;
|
||||
unsigned int cksum, val;
|
||||
unsigned char *lp;
|
||||
unsigned char buf[8192];
|
||||
char *varname;
|
||||
if (argc != 2)
|
||||
{
|
||||
fprintf(stderr, "usage: %s name <in-file >out-file\n",
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
varname = strrchr(argv[1], '/');
|
||||
if (varname)
|
||||
varname++;
|
||||
else
|
||||
varname = argv[1];
|
||||
|
||||
fprintf(stdout, "#\n");
|
||||
fprintf(stdout, "# Miscellaneous data structures:\n");
|
||||
fprintf(stdout, "# WARNING - this file is automatically generated!\n");
|
||||
fprintf(stdout, "#\n");
|
||||
fprintf(stdout, "\n");
|
||||
fprintf(stdout, "\t.data\n");
|
||||
fprintf(stdout, "\t.globl %s_data\n", varname);
|
||||
fprintf(stdout, "%s_data:\n", varname);
|
||||
pos = 0;
|
||||
cksum = 0;
|
||||
while ((len = read(0, buf, sizeof(buf))) > 0)
|
||||
{
|
||||
cnt = 0;
|
||||
lp = (unsigned char *)buf;
|
||||
len = (len + 3) & ~3; /* Round up to longwords */
|
||||
for (i = 0; i < len; i += 4)
|
||||
{
|
||||
if (cnt == 0)
|
||||
{
|
||||
fprintf(stdout, "\t.long\t");
|
||||
}
|
||||
fprintf(stdout, "0x%02X%02X%02X%02X", lp[0], lp[1], lp[2], lp[3]);
|
||||
val = *(unsigned long *)lp;
|
||||
cksum ^= val;
|
||||
lp += 4;
|
||||
if (++cnt == 4)
|
||||
{
|
||||
cnt = 0;
|
||||
fprintf(stdout, " # %x \n", pos+i-12);
|
||||
fflush(stdout);
|
||||
} else
|
||||
{
|
||||
fprintf(stdout, ",");
|
||||
}
|
||||
}
|
||||
if (cnt)
|
||||
{
|
||||
fprintf(stdout, "0\n");
|
||||
}
|
||||
pos += len;
|
||||
}
|
||||
fprintf(stdout, "\t.globl %s_len\n", varname);
|
||||
fprintf(stdout, "%s_len:\t.long\t0x%x\n", varname, pos);
|
||||
fflush(stdout);
|
||||
fclose(stdout);
|
||||
fprintf(stderr, "cksum = %x\n", cksum);
|
||||
exit(0);
|
||||
}
|
||||
|
|
@ -40,7 +40,7 @@ void *finddevice(const char *name);
|
|||
int getprop(void *phandle, const char *name, void *buf, int buflen);
|
||||
void chrpboot(int a1, int a2, void *prom); /* in main.c */
|
||||
|
||||
void printk(char *fmt, ...);
|
||||
int printf(char *fmt, ...);
|
||||
|
||||
/* there is no convenient header to get this from... -- paulus */
|
||||
extern unsigned long strlen(const char *);
|
||||
|
@ -220,7 +220,7 @@ readchar(void)
|
|||
case 1:
|
||||
return ch;
|
||||
case -1:
|
||||
printk("read(stdin) returned -1\r\n");
|
||||
printf("read(stdin) returned -1\r\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
@ -627,18 +627,6 @@ int sprintf(char * buf, const char *fmt, ...)
|
|||
|
||||
static char sprint_buf[1024];
|
||||
|
||||
void
|
||||
printk(char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
int n;
|
||||
|
||||
va_start(args, fmt);
|
||||
n = vsprintf(sprint_buf, fmt, args);
|
||||
va_end(args);
|
||||
write(stdout, sprint_buf, n);
|
||||
}
|
||||
|
||||
int
|
||||
printf(char *fmt, ...)
|
||||
{
|
||||
|
|
|
@ -11,119 +11,118 @@
|
|||
#include <linux/stddef.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/paca.h>
|
||||
#include <asm/iSeries/ItLpQueue.h>
|
||||
#include <asm/iSeries/HvLpEvent.h>
|
||||
#include <asm/iSeries/HvCallEvent.h>
|
||||
|
||||
static __inline__ int set_inUse( struct ItLpQueue * lpQueue )
|
||||
{
|
||||
int t;
|
||||
u32 * inUseP = &(lpQueue->xInUseWord);
|
||||
/*
|
||||
* The LpQueue is used to pass event data from the hypervisor to
|
||||
* the partition. This is where I/O interrupt events are communicated.
|
||||
*
|
||||
* It is written to by the hypervisor so cannot end up in the BSS.
|
||||
*/
|
||||
struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
|
||||
|
||||
__asm__ __volatile__("\n\
|
||||
1: lwarx %0,0,%2 \n\
|
||||
cmpwi 0,%0,0 \n\
|
||||
li %0,0 \n\
|
||||
bne- 2f \n\
|
||||
addi %0,%0,1 \n\
|
||||
stwcx. %0,0,%2 \n\
|
||||
bne- 1b \n\
|
||||
2: eieio"
|
||||
: "=&r" (t), "=m" (lpQueue->xInUseWord)
|
||||
: "r" (inUseP), "m" (lpQueue->xInUseWord)
|
||||
: "cc");
|
||||
DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
static __inline__ void clear_inUse( struct ItLpQueue * lpQueue )
|
||||
{
|
||||
lpQueue->xInUseWord = 0;
|
||||
}
|
||||
static char *event_types[HvLpEvent_Type_NumTypes] = {
|
||||
"Hypervisor",
|
||||
"Machine Facilities",
|
||||
"Session Manager",
|
||||
"SPD I/O",
|
||||
"Virtual Bus",
|
||||
"PCI I/O",
|
||||
"RIO I/O",
|
||||
"Virtual Lan",
|
||||
"Virtual I/O"
|
||||
};
|
||||
|
||||
/* Array of LpEvent handler functions */
|
||||
extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
|
||||
unsigned long ItLpQueueInProcess = 0;
|
||||
|
||||
struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue )
|
||||
static struct HvLpEvent * get_next_hvlpevent(void)
|
||||
{
|
||||
struct HvLpEvent * nextLpEvent =
|
||||
(struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
|
||||
if ( nextLpEvent->xFlags.xValid ) {
|
||||
struct HvLpEvent * event;
|
||||
event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
|
||||
|
||||
if (event->xFlags.xValid) {
|
||||
/* rmb() needed only for weakly consistent machines (regatta) */
|
||||
rmb();
|
||||
/* Set pointer to next potential event */
|
||||
lpQueue->xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 +
|
||||
LpEventAlign ) /
|
||||
LpEventAlign ) *
|
||||
LpEventAlign;
|
||||
hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 +
|
||||
LpEventAlign) / LpEventAlign) * LpEventAlign;
|
||||
|
||||
/* Wrap to beginning if no room at end */
|
||||
if (lpQueue->xSlicCurEventPtr > lpQueue->xSlicLastValidEventPtr)
|
||||
lpQueue->xSlicCurEventPtr = lpQueue->xSlicEventStackPtr;
|
||||
if (hvlpevent_queue.xSlicCurEventPtr >
|
||||
hvlpevent_queue.xSlicLastValidEventPtr) {
|
||||
hvlpevent_queue.xSlicCurEventPtr =
|
||||
hvlpevent_queue.xSlicEventStackPtr;
|
||||
}
|
||||
} else {
|
||||
event = NULL;
|
||||
}
|
||||
else
|
||||
nextLpEvent = NULL;
|
||||
|
||||
return nextLpEvent;
|
||||
return event;
|
||||
}
|
||||
|
||||
int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue )
|
||||
static unsigned long spread_lpevents = NR_CPUS;
|
||||
|
||||
int hvlpevent_is_pending(void)
|
||||
{
|
||||
int retval = 0;
|
||||
struct HvLpEvent * nextLpEvent;
|
||||
if ( lpQueue ) {
|
||||
nextLpEvent = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
|
||||
retval = nextLpEvent->xFlags.xValid | lpQueue->xPlicOverflowIntPending;
|
||||
}
|
||||
return retval;
|
||||
struct HvLpEvent *next_event;
|
||||
|
||||
if (smp_processor_id() >= spread_lpevents)
|
||||
return 0;
|
||||
|
||||
next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
|
||||
|
||||
return next_event->xFlags.xValid |
|
||||
hvlpevent_queue.xPlicOverflowIntPending;
|
||||
}
|
||||
|
||||
void ItLpQueue_clearValid( struct HvLpEvent * event )
|
||||
static void hvlpevent_clear_valid(struct HvLpEvent * event)
|
||||
{
|
||||
/* Clear the valid bit of the event
|
||||
* Also clear bits within this event that might
|
||||
* look like valid bits (on 64-byte boundaries)
|
||||
*/
|
||||
unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) /
|
||||
LpEventAlign ) - 1;
|
||||
switch ( extra ) {
|
||||
case 3:
|
||||
((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0;
|
||||
case 2:
|
||||
((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0;
|
||||
case 1:
|
||||
((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0;
|
||||
case 0:
|
||||
;
|
||||
/* Tell the Hypervisor that we're done with this event.
|
||||
* Also clear bits within this event that might look like valid bits.
|
||||
* ie. on 64-byte boundaries.
|
||||
*/
|
||||
struct HvLpEvent *tmp;
|
||||
unsigned extra = ((event->xSizeMinus1 + LpEventAlign) /
|
||||
LpEventAlign) - 1;
|
||||
|
||||
switch (extra) {
|
||||
case 3:
|
||||
tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign);
|
||||
tmp->xFlags.xValid = 0;
|
||||
case 2:
|
||||
tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign);
|
||||
tmp->xFlags.xValid = 0;
|
||||
case 1:
|
||||
tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign);
|
||||
tmp->xFlags.xValid = 0;
|
||||
}
|
||||
|
||||
mb();
|
||||
|
||||
event->xFlags.xValid = 0;
|
||||
}
|
||||
|
||||
unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs )
|
||||
void process_hvlpevents(struct pt_regs *regs)
|
||||
{
|
||||
unsigned numIntsProcessed = 0;
|
||||
struct HvLpEvent * nextLpEvent;
|
||||
struct HvLpEvent * event;
|
||||
|
||||
/* If we have recursed, just return */
|
||||
if ( !set_inUse( lpQueue ) )
|
||||
return 0;
|
||||
|
||||
if (ItLpQueueInProcess == 0)
|
||||
ItLpQueueInProcess = 1;
|
||||
else
|
||||
BUG();
|
||||
if (!spin_trylock(&hvlpevent_queue.lock))
|
||||
return;
|
||||
|
||||
for (;;) {
|
||||
nextLpEvent = ItLpQueue_getNextLpEvent( lpQueue );
|
||||
if ( nextLpEvent ) {
|
||||
/* Count events to return to caller
|
||||
* and count processed events in lpQueue
|
||||
*/
|
||||
++numIntsProcessed;
|
||||
lpQueue->xLpIntCount++;
|
||||
event = get_next_hvlpevent();
|
||||
if (event) {
|
||||
/* Call appropriate handler here, passing
|
||||
* a pointer to the LpEvent. The handler
|
||||
* must make a copy of the LpEvent if it
|
||||
|
@ -136,31 +135,128 @@ unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs )
|
|||
* only be delivered with types that we have
|
||||
* registered for, so no type check is necessary
|
||||
* here!
|
||||
*/
|
||||
if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
|
||||
lpQueue->xLpIntCountByType[nextLpEvent->xType]++;
|
||||
if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
|
||||
lpEventHandler[nextLpEvent->xType] )
|
||||
lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
|
||||
*/
|
||||
if (event->xType < HvLpEvent_Type_NumTypes)
|
||||
__get_cpu_var(hvlpevent_counts)[event->xType]++;
|
||||
if (event->xType < HvLpEvent_Type_NumTypes &&
|
||||
lpEventHandler[event->xType])
|
||||
lpEventHandler[event->xType](event, regs);
|
||||
else
|
||||
printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType );
|
||||
printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
|
||||
|
||||
ItLpQueue_clearValid( nextLpEvent );
|
||||
} else if ( lpQueue->xPlicOverflowIntPending )
|
||||
hvlpevent_clear_valid(event);
|
||||
} else if (hvlpevent_queue.xPlicOverflowIntPending)
|
||||
/*
|
||||
* No more valid events. If overflow events are
|
||||
* pending process them
|
||||
*/
|
||||
HvCallEvent_getOverflowLpEvents( lpQueue->xIndex);
|
||||
HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex);
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
ItLpQueueInProcess = 0;
|
||||
mb();
|
||||
clear_inUse( lpQueue );
|
||||
|
||||
get_paca()->lpevent_count += numIntsProcessed;
|
||||
|
||||
return numIntsProcessed;
|
||||
spin_unlock(&hvlpevent_queue.lock);
|
||||
}
|
||||
|
||||
static int set_spread_lpevents(char *str)
|
||||
{
|
||||
unsigned long val = simple_strtoul(str, NULL, 0);
|
||||
|
||||
/*
|
||||
* The parameter is the number of processors to share in processing
|
||||
* lp events.
|
||||
*/
|
||||
if (( val > 0) && (val <= NR_CPUS)) {
|
||||
spread_lpevents = val;
|
||||
printk("lpevent processing spread over %ld processors\n", val);
|
||||
} else {
|
||||
printk("invalid spread_lpevents %ld\n", val);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
__setup("spread_lpevents=", set_spread_lpevents);
|
||||
|
||||
void setup_hvlpevent_queue(void)
|
||||
{
|
||||
void *eventStack;
|
||||
|
||||
/*
|
||||
* Allocate a page for the Event Stack. The Hypervisor needs the
|
||||
* absolute real address, so we subtract out the KERNELBASE and add
|
||||
* in the absolute real address of the kernel load area.
|
||||
*/
|
||||
eventStack = alloc_bootmem_pages(LpEventStackSize);
|
||||
memset(eventStack, 0, LpEventStackSize);
|
||||
|
||||
/* Invoke the hypervisor to initialize the event stack */
|
||||
HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
|
||||
|
||||
hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
|
||||
hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
|
||||
hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
|
||||
(LpEventStackSize - LpEventMaxSize);
|
||||
hvlpevent_queue.xIndex = 0;
|
||||
}
|
||||
|
||||
static int proc_lpevents_show(struct seq_file *m, void *v)
|
||||
{
|
||||
int cpu, i;
|
||||
unsigned long sum;
|
||||
static unsigned long cpu_totals[NR_CPUS];
|
||||
|
||||
/* FIXME: do we care that there's no locking here? */
|
||||
sum = 0;
|
||||
for_each_online_cpu(cpu) {
|
||||
cpu_totals[cpu] = 0;
|
||||
for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
|
||||
cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
|
||||
}
|
||||
sum += cpu_totals[cpu];
|
||||
}
|
||||
|
||||
seq_printf(m, "LpEventQueue 0\n");
|
||||
seq_printf(m, " events processed:\t%lu\n", sum);
|
||||
|
||||
for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
|
||||
sum = 0;
|
||||
for_each_online_cpu(cpu) {
|
||||
sum += per_cpu(hvlpevent_counts, cpu)[i];
|
||||
}
|
||||
|
||||
seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
|
||||
}
|
||||
|
||||
seq_printf(m, "\n events processed by processor:\n");
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int proc_lpevents_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, proc_lpevents_show, NULL);
|
||||
}
|
||||
|
||||
static struct file_operations proc_lpevents_operations = {
|
||||
.open = proc_lpevents_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int __init proc_lpevents_init(void)
|
||||
{
|
||||
struct proc_dir_entry *e;
|
||||
|
||||
e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
|
||||
if (e)
|
||||
e->proc_fops = &proc_lpevents_operations;
|
||||
|
||||
return 0;
|
||||
}
|
||||
__initcall(proc_lpevents_init);
|
||||
|
||||
|
|
|
@ -28,13 +28,6 @@
|
|||
#include <asm/iSeries/IoHriProcessorVpd.h>
|
||||
#include <asm/iSeries/ItSpCommArea.h>
|
||||
|
||||
/* The LpQueue is used to pass event data from the hypervisor to
|
||||
* the partition. This is where I/O interrupt events are communicated.
|
||||
*/
|
||||
|
||||
/* May be filled in by the hypervisor so cannot end up in the BSS */
|
||||
struct ItLpQueue xItLpQueue __attribute__((__section__(".data")));
|
||||
|
||||
|
||||
/* The HvReleaseData is the root of the information shared between
|
||||
* the hypervisor and Linux.
|
||||
|
@ -200,7 +193,7 @@ struct ItVpdAreas itVpdAreas = {
|
|||
0,0,0, /* 13 - 15 */
|
||||
sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */
|
||||
0,0,0,0,0,0, /* 17 - 22 */
|
||||
sizeof(struct ItLpQueue),/* 23 length of Lp Queue */
|
||||
sizeof(struct hvlpevent_queue), /* 23 length of Lp Queue */
|
||||
0,0 /* 24 - 25 */
|
||||
},
|
||||
.xSlicVpdAdrs = { /* VPD addresses */
|
||||
|
@ -218,7 +211,7 @@ struct ItVpdAreas itVpdAreas = {
|
|||
0,0,0, /* 13 - 15 */
|
||||
&xIoHriProcessorVpd, /* 16 Proc Vpd */
|
||||
0,0,0,0,0,0, /* 17 - 22 */
|
||||
&xItLpQueue, /* 23 Lp Queue */
|
||||
&hvlpevent_queue, /* 23 Lp Queue */
|
||||
0,0
|
||||
}
|
||||
};
|
||||
|
|
|
@ -40,50 +40,6 @@ static int __init iseries_proc_create(void)
|
|||
}
|
||||
core_initcall(iseries_proc_create);
|
||||
|
||||
static char *event_types[9] = {
|
||||
"Hypervisor\t\t",
|
||||
"Machine Facilities\t",
|
||||
"Session Manager\t",
|
||||
"SPD I/O\t\t",
|
||||
"Virtual Bus\t\t",
|
||||
"PCI I/O\t\t",
|
||||
"RIO I/O\t\t",
|
||||
"Virtual Lan\t\t",
|
||||
"Virtual I/O\t\t"
|
||||
};
|
||||
|
||||
static int proc_lpevents_show(struct seq_file *m, void *v)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
seq_printf(m, "LpEventQueue 0\n");
|
||||
seq_printf(m, " events processed:\t%lu\n",
|
||||
(unsigned long)xItLpQueue.xLpIntCount);
|
||||
|
||||
for (i = 0; i < 9; ++i)
|
||||
seq_printf(m, " %s %10lu\n", event_types[i],
|
||||
(unsigned long)xItLpQueue.xLpIntCountByType[i]);
|
||||
|
||||
seq_printf(m, "\n events processed by processor:\n");
|
||||
|
||||
for_each_online_cpu(i)
|
||||
seq_printf(m, " CPU%02d %10u\n", i, paca[i].lpevent_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int proc_lpevents_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, proc_lpevents_show, NULL);
|
||||
}
|
||||
|
||||
static struct file_operations proc_lpevents_operations = {
|
||||
.open = proc_lpevents_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static unsigned long startTitan = 0;
|
||||
static unsigned long startTb = 0;
|
||||
|
||||
|
@ -148,10 +104,6 @@ static int __init iseries_proc_init(void)
|
|||
{
|
||||
struct proc_dir_entry *e;
|
||||
|
||||
e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
|
||||
if (e)
|
||||
e->proc_fops = &proc_lpevents_operations;
|
||||
|
||||
e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL);
|
||||
if (e)
|
||||
e->proc_fops = &proc_titantod_operations;
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/kdev_t.h>
|
||||
|
@ -676,7 +675,6 @@ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr)
|
|||
*/
|
||||
static void __init iSeries_setup_arch(void)
|
||||
{
|
||||
void *eventStack;
|
||||
unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
|
||||
|
||||
/* Add an eye catcher and the systemcfg layout version number */
|
||||
|
@ -685,24 +683,7 @@ static void __init iSeries_setup_arch(void)
|
|||
systemcfg->version.minor = SYSTEMCFG_MINOR;
|
||||
|
||||
/* Setup the Lp Event Queue */
|
||||
|
||||
/* Allocate a page for the Event Stack
|
||||
* The hypervisor wants the absolute real address, so
|
||||
* we subtract out the KERNELBASE and add in the
|
||||
* absolute real address of the kernel load area
|
||||
*/
|
||||
eventStack = alloc_bootmem_pages(LpEventStackSize);
|
||||
memset(eventStack, 0, LpEventStackSize);
|
||||
|
||||
/* Invoke the hypervisor to initialize the event stack */
|
||||
HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
|
||||
|
||||
/* Initialize fields in our Lp Event Queue */
|
||||
xItLpQueue.xSlicEventStackPtr = (char *)eventStack;
|
||||
xItLpQueue.xSlicCurEventPtr = (char *)eventStack;
|
||||
xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack +
|
||||
(LpEventStackSize - LpEventMaxSize);
|
||||
xItLpQueue.xIndex = 0;
|
||||
setup_hvlpevent_queue();
|
||||
|
||||
/* Compute processor frequency */
|
||||
procFreqHz = ((1UL << 34) * 1000000) /
|
||||
|
@ -853,28 +834,6 @@ static int __init iSeries_src_init(void)
|
|||
|
||||
late_initcall(iSeries_src_init);
|
||||
|
||||
static int set_spread_lpevents(char *str)
|
||||
{
|
||||
unsigned long i;
|
||||
unsigned long val = simple_strtoul(str, NULL, 0);
|
||||
|
||||
/*
|
||||
* The parameter is the number of processors to share in processing
|
||||
* lp events.
|
||||
*/
|
||||
if (( val > 0) && (val <= NR_CPUS)) {
|
||||
for (i = 1; i < val; ++i)
|
||||
paca[i].lpqueue_ptr = paca[0].lpqueue_ptr;
|
||||
|
||||
printk("lpevent processing spread over %ld processors\n", val);
|
||||
} else {
|
||||
printk("invalid spread_lpevents %ld\n", val);
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
__setup("spread_lpevents=", set_spread_lpevents);
|
||||
|
||||
#ifndef CONFIG_PCI
|
||||
void __init iSeries_init_IRQ(void) { }
|
||||
#endif
|
||||
|
|
|
@ -88,7 +88,7 @@ static int iSeries_idle(void)
|
|||
|
||||
while (1) {
|
||||
if (lpaca->lppaca.shared_proc) {
|
||||
if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
|
||||
if (hvlpevent_is_pending())
|
||||
process_iSeries_events();
|
||||
if (!need_resched())
|
||||
yield_shared_processor();
|
||||
|
@ -100,7 +100,7 @@ static int iSeries_idle(void)
|
|||
|
||||
while (!need_resched()) {
|
||||
HMT_medium();
|
||||
if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
|
||||
if (hvlpevent_is_pending())
|
||||
process_iSeries_events();
|
||||
HMT_low();
|
||||
}
|
||||
|
|
|
@ -66,7 +66,6 @@ EXPORT_SYMBOL(irq_desc);
|
|||
int distribute_irqs = 1;
|
||||
int __irq_offset_value;
|
||||
int ppc_spurious_interrupts;
|
||||
unsigned long lpevent_count;
|
||||
u64 ppc64_interrupt_controller;
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
|
@ -269,7 +268,6 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
|
|||
void do_IRQ(struct pt_regs *regs)
|
||||
{
|
||||
struct paca_struct *lpaca;
|
||||
struct ItLpQueue *lpq;
|
||||
|
||||
irq_enter();
|
||||
|
||||
|
@ -295,9 +293,8 @@ void do_IRQ(struct pt_regs *regs)
|
|||
iSeries_smp_message_recv(regs);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
lpq = lpaca->lpqueue_ptr;
|
||||
if (lpq && ItLpQueue_isLpIntPending(lpq))
|
||||
lpevent_count += ItLpQueue_process(lpq, regs);
|
||||
if (hvlpevent_is_pending())
|
||||
process_hvlpevents(regs);
|
||||
|
||||
irq_exit();
|
||||
|
||||
|
|
|
@ -801,10 +801,8 @@ int mf_get_boot_rtc(struct rtc_time *tm)
|
|||
return rc;
|
||||
/* We need to poll here as we are not yet taking interrupts */
|
||||
while (rtc_data.busy) {
|
||||
extern unsigned long lpevent_count;
|
||||
struct ItLpQueue *lpq = get_paca()->lpqueue_ptr;
|
||||
if (lpq && ItLpQueue_isLpIntPending(lpq))
|
||||
lpevent_count += ItLpQueue_process(lpq, NULL);
|
||||
if (hvlpevent_is_pending())
|
||||
process_hvlpevents(NULL);
|
||||
}
|
||||
return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
|
||||
}
|
||||
|
|
|
@ -338,9 +338,8 @@ static int nvram_remove_os_partition(void)
|
|||
*/
|
||||
static int nvram_create_os_partition(void)
|
||||
{
|
||||
struct list_head * p;
|
||||
struct nvram_partition *part = NULL;
|
||||
struct nvram_partition *new_part = NULL;
|
||||
struct nvram_partition *part;
|
||||
struct nvram_partition *new_part;
|
||||
struct nvram_partition *free_part = NULL;
|
||||
int seq_init[2] = { 0, 0 };
|
||||
loff_t tmp_index;
|
||||
|
@ -349,8 +348,7 @@ static int nvram_create_os_partition(void)
|
|||
|
||||
/* Find a free partition that will give us the maximum needed size
|
||||
If can't find one that will give us the minimum size needed */
|
||||
list_for_each(p, &nvram_part->partition) {
|
||||
part = list_entry(p, struct nvram_partition, partition);
|
||||
list_for_each_entry(part, &nvram_part->partition, partition) {
|
||||
if (part->header.signature != NVRAM_SIG_FREE)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -42,21 +42,7 @@ extern unsigned long __toc_start;
|
|||
* processors. The processor VPD array needs one entry per physical
|
||||
* processor (not thread).
|
||||
*/
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
#define EXTRA_INITS(number, lpq) \
|
||||
.lppaca_ptr = &paca[number].lppaca, \
|
||||
.lpqueue_ptr = (lpq), /* &xItLpQueue, */ \
|
||||
.reg_save_ptr = &paca[number].reg_save, \
|
||||
.reg_save = { \
|
||||
.xDesc = 0xd397d9e2, /* "LpRS" */ \
|
||||
.xSize = sizeof(struct ItLpRegSave) \
|
||||
},
|
||||
#else
|
||||
#define EXTRA_INITS(number, lpq)
|
||||
#endif
|
||||
|
||||
#define PACAINITDATA(number,start,lpq,asrr,asrv) \
|
||||
{ \
|
||||
#define PACA_INIT_COMMON(number, start, asrr, asrv) \
|
||||
.lock_token = 0x8000, \
|
||||
.paca_index = (number), /* Paca Index */ \
|
||||
.default_decr = 0x00ff0000, /* Initial Decr */ \
|
||||
|
@ -74,147 +60,79 @@ extern unsigned long __toc_start;
|
|||
.end_of_quantum = 0xfffffffffffffffful, \
|
||||
.slb_count = 64, \
|
||||
}, \
|
||||
EXTRA_INITS((number), (lpq)) \
|
||||
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
#define PACA_INIT_ISERIES(number) \
|
||||
.lppaca_ptr = &paca[number].lppaca, \
|
||||
.reg_save_ptr = &paca[number].reg_save, \
|
||||
.reg_save = { \
|
||||
.xDesc = 0xd397d9e2, /* "LpRS" */ \
|
||||
.xSize = sizeof(struct ItLpRegSave) \
|
||||
}
|
||||
|
||||
#define PACA_INIT(number) \
|
||||
{ \
|
||||
PACA_INIT_COMMON(number, 0, 0, 0) \
|
||||
PACA_INIT_ISERIES(number) \
|
||||
}
|
||||
|
||||
#define BOOTCPU_PACA_INIT(number) \
|
||||
{ \
|
||||
PACA_INIT_COMMON(number, 1, 0, STAB0_VIRT_ADDR) \
|
||||
PACA_INIT_ISERIES(number) \
|
||||
}
|
||||
|
||||
struct paca_struct paca[] = {
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
PACAINITDATA( 0, 1, &xItLpQueue, 0, STAB0_VIRT_ADDR),
|
||||
#else
|
||||
PACAINITDATA( 0, 1, NULL, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR),
|
||||
#define PACA_INIT(number) \
|
||||
{ \
|
||||
PACA_INIT_COMMON(number, 0, 0, 0) \
|
||||
}
|
||||
|
||||
#define BOOTCPU_PACA_INIT(number) \
|
||||
{ \
|
||||
PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR) \
|
||||
}
|
||||
#endif
|
||||
|
||||
struct paca_struct paca[] = {
|
||||
BOOTCPU_PACA_INIT(0),
|
||||
#if NR_CPUS > 1
|
||||
PACAINITDATA( 1, 0, NULL, 0, 0),
|
||||
PACAINITDATA( 2, 0, NULL, 0, 0),
|
||||
PACAINITDATA( 3, 0, NULL, 0, 0),
|
||||
PACA_INIT( 1), PACA_INIT( 2), PACA_INIT( 3),
|
||||
#if NR_CPUS > 4
|
||||
PACAINITDATA( 4, 0, NULL, 0, 0),
|
||||
PACAINITDATA( 5, 0, NULL, 0, 0),
|
||||
PACAINITDATA( 6, 0, NULL, 0, 0),
|
||||
PACAINITDATA( 7, 0, NULL, 0, 0),
|
||||
PACA_INIT( 4), PACA_INIT( 5), PACA_INIT( 6), PACA_INIT( 7),
|
||||
#if NR_CPUS > 8
|
||||
PACAINITDATA( 8, 0, NULL, 0, 0),
|
||||
PACAINITDATA( 9, 0, NULL, 0, 0),
|
||||
PACAINITDATA(10, 0, NULL, 0, 0),
|
||||
PACAINITDATA(11, 0, NULL, 0, 0),
|
||||
PACAINITDATA(12, 0, NULL, 0, 0),
|
||||
PACAINITDATA(13, 0, NULL, 0, 0),
|
||||
PACAINITDATA(14, 0, NULL, 0, 0),
|
||||
PACAINITDATA(15, 0, NULL, 0, 0),
|
||||
PACAINITDATA(16, 0, NULL, 0, 0),
|
||||
PACAINITDATA(17, 0, NULL, 0, 0),
|
||||
PACAINITDATA(18, 0, NULL, 0, 0),
|
||||
PACAINITDATA(19, 0, NULL, 0, 0),
|
||||
PACAINITDATA(20, 0, NULL, 0, 0),
|
||||
PACAINITDATA(21, 0, NULL, 0, 0),
|
||||
PACAINITDATA(22, 0, NULL, 0, 0),
|
||||
PACAINITDATA(23, 0, NULL, 0, 0),
|
||||
PACAINITDATA(24, 0, NULL, 0, 0),
|
||||
PACAINITDATA(25, 0, NULL, 0, 0),
|
||||
PACAINITDATA(26, 0, NULL, 0, 0),
|
||||
PACAINITDATA(27, 0, NULL, 0, 0),
|
||||
PACAINITDATA(28, 0, NULL, 0, 0),
|
||||
PACAINITDATA(29, 0, NULL, 0, 0),
|
||||
PACAINITDATA(30, 0, NULL, 0, 0),
|
||||
PACAINITDATA(31, 0, NULL, 0, 0),
|
||||
PACA_INIT( 8), PACA_INIT( 9), PACA_INIT( 10), PACA_INIT( 11),
|
||||
PACA_INIT( 12), PACA_INIT( 13), PACA_INIT( 14), PACA_INIT( 15),
|
||||
PACA_INIT( 16), PACA_INIT( 17), PACA_INIT( 18), PACA_INIT( 19),
|
||||
PACA_INIT( 20), PACA_INIT( 21), PACA_INIT( 22), PACA_INIT( 23),
|
||||
PACA_INIT( 24), PACA_INIT( 25), PACA_INIT( 26), PACA_INIT( 27),
|
||||
PACA_INIT( 28), PACA_INIT( 29), PACA_INIT( 30), PACA_INIT( 31),
|
||||
#if NR_CPUS > 32
|
||||
PACAINITDATA(32, 0, NULL, 0, 0),
|
||||
PACAINITDATA(33, 0, NULL, 0, 0),
|
||||
PACAINITDATA(34, 0, NULL, 0, 0),
|
||||
PACAINITDATA(35, 0, NULL, 0, 0),
|
||||
PACAINITDATA(36, 0, NULL, 0, 0),
|
||||
PACAINITDATA(37, 0, NULL, 0, 0),
|
||||
PACAINITDATA(38, 0, NULL, 0, 0),
|
||||
PACAINITDATA(39, 0, NULL, 0, 0),
|
||||
PACAINITDATA(40, 0, NULL, 0, 0),
|
||||
PACAINITDATA(41, 0, NULL, 0, 0),
|
||||
PACAINITDATA(42, 0, NULL, 0, 0),
|
||||
PACAINITDATA(43, 0, NULL, 0, 0),
|
||||
PACAINITDATA(44, 0, NULL, 0, 0),
|
||||
PACAINITDATA(45, 0, NULL, 0, 0),
|
||||
PACAINITDATA(46, 0, NULL, 0, 0),
|
||||
PACAINITDATA(47, 0, NULL, 0, 0),
|
||||
PACAINITDATA(48, 0, NULL, 0, 0),
|
||||
PACAINITDATA(49, 0, NULL, 0, 0),
|
||||
PACAINITDATA(50, 0, NULL, 0, 0),
|
||||
PACAINITDATA(51, 0, NULL, 0, 0),
|
||||
PACAINITDATA(52, 0, NULL, 0, 0),
|
||||
PACAINITDATA(53, 0, NULL, 0, 0),
|
||||
PACAINITDATA(54, 0, NULL, 0, 0),
|
||||
PACAINITDATA(55, 0, NULL, 0, 0),
|
||||
PACAINITDATA(56, 0, NULL, 0, 0),
|
||||
PACAINITDATA(57, 0, NULL, 0, 0),
|
||||
PACAINITDATA(58, 0, NULL, 0, 0),
|
||||
PACAINITDATA(59, 0, NULL, 0, 0),
|
||||
PACAINITDATA(60, 0, NULL, 0, 0),
|
||||
PACAINITDATA(61, 0, NULL, 0, 0),
|
||||
PACAINITDATA(62, 0, NULL, 0, 0),
|
||||
PACAINITDATA(63, 0, NULL, 0, 0),
|
||||
PACA_INIT( 32), PACA_INIT( 33), PACA_INIT( 34), PACA_INIT( 35),
|
||||
PACA_INIT( 36), PACA_INIT( 37), PACA_INIT( 38), PACA_INIT( 39),
|
||||
PACA_INIT( 40), PACA_INIT( 41), PACA_INIT( 42), PACA_INIT( 43),
|
||||
PACA_INIT( 44), PACA_INIT( 45), PACA_INIT( 46), PACA_INIT( 47),
|
||||
PACA_INIT( 48), PACA_INIT( 49), PACA_INIT( 50), PACA_INIT( 51),
|
||||
PACA_INIT( 52), PACA_INIT( 53), PACA_INIT( 54), PACA_INIT( 55),
|
||||
PACA_INIT( 56), PACA_INIT( 57), PACA_INIT( 58), PACA_INIT( 59),
|
||||
PACA_INIT( 60), PACA_INIT( 61), PACA_INIT( 62), PACA_INIT( 63),
|
||||
#if NR_CPUS > 64
|
||||
PACAINITDATA(64, 0, NULL, 0, 0),
|
||||
PACAINITDATA(65, 0, NULL, 0, 0),
|
||||
PACAINITDATA(66, 0, NULL, 0, 0),
|
||||
PACAINITDATA(67, 0, NULL, 0, 0),
|
||||
PACAINITDATA(68, 0, NULL, 0, 0),
|
||||
PACAINITDATA(69, 0, NULL, 0, 0),
|
||||
PACAINITDATA(70, 0, NULL, 0, 0),
|
||||
PACAINITDATA(71, 0, NULL, 0, 0),
|
||||
PACAINITDATA(72, 0, NULL, 0, 0),
|
||||
PACAINITDATA(73, 0, NULL, 0, 0),
|
||||
PACAINITDATA(74, 0, NULL, 0, 0),
|
||||
PACAINITDATA(75, 0, NULL, 0, 0),
|
||||
PACAINITDATA(76, 0, NULL, 0, 0),
|
||||
PACAINITDATA(77, 0, NULL, 0, 0),
|
||||
PACAINITDATA(78, 0, NULL, 0, 0),
|
||||
PACAINITDATA(79, 0, NULL, 0, 0),
|
||||
PACAINITDATA(80, 0, NULL, 0, 0),
|
||||
PACAINITDATA(81, 0, NULL, 0, 0),
|
||||
PACAINITDATA(82, 0, NULL, 0, 0),
|
||||
PACAINITDATA(83, 0, NULL, 0, 0),
|
||||
PACAINITDATA(84, 0, NULL, 0, 0),
|
||||
PACAINITDATA(85, 0, NULL, 0, 0),
|
||||
PACAINITDATA(86, 0, NULL, 0, 0),
|
||||
PACAINITDATA(87, 0, NULL, 0, 0),
|
||||
PACAINITDATA(88, 0, NULL, 0, 0),
|
||||
PACAINITDATA(89, 0, NULL, 0, 0),
|
||||
PACAINITDATA(90, 0, NULL, 0, 0),
|
||||
PACAINITDATA(91, 0, NULL, 0, 0),
|
||||
PACAINITDATA(92, 0, NULL, 0, 0),
|
||||
PACAINITDATA(93, 0, NULL, 0, 0),
|
||||
PACAINITDATA(94, 0, NULL, 0, 0),
|
||||
PACAINITDATA(95, 0, NULL, 0, 0),
|
||||
PACAINITDATA(96, 0, NULL, 0, 0),
|
||||
PACAINITDATA(97, 0, NULL, 0, 0),
|
||||
PACAINITDATA(98, 0, NULL, 0, 0),
|
||||
PACAINITDATA(99, 0, NULL, 0, 0),
|
||||
PACAINITDATA(100, 0, NULL, 0, 0),
|
||||
PACAINITDATA(101, 0, NULL, 0, 0),
|
||||
PACAINITDATA(102, 0, NULL, 0, 0),
|
||||
PACAINITDATA(103, 0, NULL, 0, 0),
|
||||
PACAINITDATA(104, 0, NULL, 0, 0),
|
||||
PACAINITDATA(105, 0, NULL, 0, 0),
|
||||
PACAINITDATA(106, 0, NULL, 0, 0),
|
||||
PACAINITDATA(107, 0, NULL, 0, 0),
|
||||
PACAINITDATA(108, 0, NULL, 0, 0),
|
||||
PACAINITDATA(109, 0, NULL, 0, 0),
|
||||
PACAINITDATA(110, 0, NULL, 0, 0),
|
||||
PACAINITDATA(111, 0, NULL, 0, 0),
|
||||
PACAINITDATA(112, 0, NULL, 0, 0),
|
||||
PACAINITDATA(113, 0, NULL, 0, 0),
|
||||
PACAINITDATA(114, 0, NULL, 0, 0),
|
||||
PACAINITDATA(115, 0, NULL, 0, 0),
|
||||
PACAINITDATA(116, 0, NULL, 0, 0),
|
||||
PACAINITDATA(117, 0, NULL, 0, 0),
|
||||
PACAINITDATA(118, 0, NULL, 0, 0),
|
||||
PACAINITDATA(119, 0, NULL, 0, 0),
|
||||
PACAINITDATA(120, 0, NULL, 0, 0),
|
||||
PACAINITDATA(121, 0, NULL, 0, 0),
|
||||
PACAINITDATA(122, 0, NULL, 0, 0),
|
||||
PACAINITDATA(123, 0, NULL, 0, 0),
|
||||
PACAINITDATA(124, 0, NULL, 0, 0),
|
||||
PACAINITDATA(125, 0, NULL, 0, 0),
|
||||
PACAINITDATA(126, 0, NULL, 0, 0),
|
||||
PACAINITDATA(127, 0, NULL, 0, 0),
|
||||
PACA_INIT( 64), PACA_INIT( 65), PACA_INIT( 66), PACA_INIT( 67),
|
||||
PACA_INIT( 68), PACA_INIT( 69), PACA_INIT( 70), PACA_INIT( 71),
|
||||
PACA_INIT( 72), PACA_INIT( 73), PACA_INIT( 74), PACA_INIT( 75),
|
||||
PACA_INIT( 76), PACA_INIT( 77), PACA_INIT( 78), PACA_INIT( 79),
|
||||
PACA_INIT( 80), PACA_INIT( 81), PACA_INIT( 82), PACA_INIT( 83),
|
||||
PACA_INIT( 84), PACA_INIT( 85), PACA_INIT( 86), PACA_INIT( 87),
|
||||
PACA_INIT( 88), PACA_INIT( 89), PACA_INIT( 90), PACA_INIT( 91),
|
||||
PACA_INIT( 92), PACA_INIT( 93), PACA_INIT( 94), PACA_INIT( 95),
|
||||
PACA_INIT( 96), PACA_INIT( 97), PACA_INIT( 98), PACA_INIT( 99),
|
||||
PACA_INIT(100), PACA_INIT(101), PACA_INIT(102), PACA_INIT(103),
|
||||
PACA_INIT(104), PACA_INIT(105), PACA_INIT(106), PACA_INIT(107),
|
||||
PACA_INIT(108), PACA_INIT(109), PACA_INIT(110), PACA_INIT(111),
|
||||
PACA_INIT(112), PACA_INIT(113), PACA_INIT(114), PACA_INIT(115),
|
||||
PACA_INIT(116), PACA_INIT(117), PACA_INIT(118), PACA_INIT(119),
|
||||
PACA_INIT(120), PACA_INIT(121), PACA_INIT(122), PACA_INIT(123),
|
||||
PACA_INIT(124), PACA_INIT(125), PACA_INIT(126), PACA_INIT(127),
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -400,7 +400,12 @@ static int __init topology_init(void)
|
|||
struct cpu *c = &per_cpu(cpu_devices, cpu);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
parent = &node_devices[cpu_to_node(cpu)];
|
||||
/* The node to which a cpu belongs can't be known
|
||||
* until the cpu is made present.
|
||||
*/
|
||||
parent = NULL;
|
||||
if (cpu_present(cpu))
|
||||
parent = &node_devices[cpu_to_node(cpu)];
|
||||
#endif
|
||||
/*
|
||||
* For now, we just see if the system supports making
|
||||
|
|
|
@ -99,7 +99,6 @@ unsigned long tb_to_ns_shift;
|
|||
struct gettimeofday_struct do_gtod;
|
||||
|
||||
extern unsigned long wall_jiffies;
|
||||
extern unsigned long lpevent_count;
|
||||
extern int smp_tb_synchronized;
|
||||
|
||||
extern struct timezone sys_tz;
|
||||
|
@ -367,11 +366,8 @@ int timer_interrupt(struct pt_regs * regs)
|
|||
set_dec(next_dec);
|
||||
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
{
|
||||
struct ItLpQueue *lpq = lpaca->lpqueue_ptr;
|
||||
if (lpq && ItLpQueue_isLpIntPending(lpq))
|
||||
lpevent_count += ItLpQueue_process(lpq, regs);
|
||||
}
|
||||
if (hvlpevent_is_pending())
|
||||
process_hvlpevents(regs);
|
||||
#endif
|
||||
|
||||
/* collect purr register values often, for accurate calculations */
|
||||
|
|
|
@ -41,7 +41,7 @@ struct HvLpEvent;
|
|||
#define LpEventMaxSize 256
|
||||
#define LpEventAlign 64
|
||||
|
||||
struct ItLpQueue {
|
||||
struct hvlpevent_queue {
|
||||
/*
|
||||
* The xSlicCurEventPtr is the pointer to the next event stack entry
|
||||
* that will become valid. The OS must peek at this entry to determine
|
||||
|
@ -69,16 +69,13 @@ struct ItLpQueue {
|
|||
char *xSlicEventStackPtr; // 0x20
|
||||
u8 xIndex; // 0x28 unique sequential index.
|
||||
u8 xSlicRsvd[3]; // 0x29-2b
|
||||
u32 xInUseWord; // 0x2C
|
||||
u64 xLpIntCount; // 0x30 Total Lp Int msgs processed
|
||||
u64 xLpIntCountByType[9]; // 0x38-0x7F Event counts by type
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
extern struct ItLpQueue xItLpQueue;
|
||||
extern struct hvlpevent_queue hvlpevent_queue;
|
||||
|
||||
extern struct HvLpEvent *ItLpQueue_getNextLpEvent(struct ItLpQueue *);
|
||||
extern int ItLpQueue_isLpIntPending(struct ItLpQueue *);
|
||||
extern unsigned ItLpQueue_process(struct ItLpQueue *, struct pt_regs *);
|
||||
extern void ItLpQueue_clearValid(struct HvLpEvent *);
|
||||
extern int hvlpevent_is_pending(void);
|
||||
extern void process_hvlpevents(struct pt_regs *);
|
||||
extern void setup_hvlpevent_queue(void);
|
||||
|
||||
#endif /* _ITLPQUEUE_H */
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include <asm/types.h>
|
||||
#include <asm/lppaca.h>
|
||||
#include <asm/iSeries/ItLpRegSave.h>
|
||||
#include <asm/iSeries/ItLpQueue.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
register struct paca_struct *local_paca asm("r13");
|
||||
|
@ -62,7 +61,6 @@ struct paca_struct {
|
|||
u16 paca_index; /* Logical processor number */
|
||||
|
||||
u32 default_decr; /* Default decrementer value */
|
||||
struct ItLpQueue *lpqueue_ptr; /* LpQueue handled by this CPU */
|
||||
u64 kernel_toc; /* Kernel TOC address */
|
||||
u64 stab_real; /* Absolute address of segment table */
|
||||
u64 stab_addr; /* Virtual address of segment table */
|
||||
|
@ -91,7 +89,6 @@ struct paca_struct {
|
|||
u64 next_jiffy_update_tb; /* TB value for next jiffy update */
|
||||
u64 saved_r1; /* r1 save for RTAS calls */
|
||||
u64 saved_msr; /* MSR saved here by enter_rtas */
|
||||
u32 lpevent_count; /* lpevents processed */
|
||||
u8 proc_enabled; /* irq soft-enable flag */
|
||||
|
||||
/* not yet used */
|
||||
|
|
Loading…
Reference in a new issue