powerpc: Remove FW_FEATURE ISERIES from arch code

This is no longer selectable, so just remove all the dependent code.

Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
Stephen Rothwell 2012-03-15 18:18:00 +00:00 committed by Benjamin Herrenschmidt
parent ec86b45af4
commit f5339277eb
18 changed files with 26 additions and 367 deletions

View file

@ -17,7 +17,6 @@
#include <asm/types.h> #include <asm/types.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/firmware.h>
struct mschunks_map { struct mschunks_map {
unsigned long num_chunks; unsigned long num_chunks;
@ -46,30 +45,12 @@ static inline unsigned long addr_to_chunk(unsigned long addr)
static inline unsigned long phys_to_abs(unsigned long pa) static inline unsigned long phys_to_abs(unsigned long pa)
{ {
unsigned long chunk; return pa;
/* This is a no-op on non-iSeries */
if (!firmware_has_feature(FW_FEATURE_ISERIES))
return pa;
chunk = addr_to_chunk(pa);
if (chunk < mschunks_map.num_chunks)
chunk = mschunks_map.mapping[chunk];
return chunk_to_addr(chunk) + (pa & MSCHUNKS_OFFSET_MASK);
} }
/* Convenience macros */ /* Convenience macros */
#define virt_to_abs(va) phys_to_abs(__pa(va)) #define virt_to_abs(va) phys_to_abs(__pa(va))
#define abs_to_virt(aa) __va(aa) #define abs_to_virt(aa) __va(aa)
/*
* Converts Virtual Address to Real Address for
* Legacy iSeries Hypervisor calls
*/
#define iseries_hv_addr(virtaddr) \
(0x8000000000000000UL | virt_to_abs(virtaddr))
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_ABS_ADDR_H */ #endif /* _ASM_POWERPC_ABS_ADDR_H */

View file

@ -41,7 +41,6 @@
#define FW_FEATURE_XDABR ASM_CONST(0x0000000000040000) #define FW_FEATURE_XDABR ASM_CONST(0x0000000000040000)
#define FW_FEATURE_MULTITCE ASM_CONST(0x0000000000080000) #define FW_FEATURE_MULTITCE ASM_CONST(0x0000000000080000)
#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000) #define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000)
#define FW_FEATURE_ISERIES ASM_CONST(0x0000000000200000)
#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000) #define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000)
#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000) #define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000) #define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
@ -65,8 +64,6 @@ enum {
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO, FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO,
FW_FEATURE_PSERIES_ALWAYS = 0, FW_FEATURE_PSERIES_ALWAYS = 0,
FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
FW_FEATURE_POWERNV_ALWAYS = 0, FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
@ -79,9 +76,6 @@ enum {
#ifdef CONFIG_PPC_PSERIES #ifdef CONFIG_PPC_PSERIES
FW_FEATURE_PSERIES_POSSIBLE | FW_FEATURE_PSERIES_POSSIBLE |
#endif #endif
#ifdef CONFIG_PPC_ISERIES
FW_FEATURE_ISERIES_POSSIBLE |
#endif
#ifdef CONFIG_PPC_POWERNV #ifdef CONFIG_PPC_POWERNV
FW_FEATURE_POWERNV_POSSIBLE | FW_FEATURE_POWERNV_POSSIBLE |
#endif #endif
@ -99,9 +93,6 @@ enum {
#ifdef CONFIG_PPC_PSERIES #ifdef CONFIG_PPC_PSERIES
FW_FEATURE_PSERIES_ALWAYS & FW_FEATURE_PSERIES_ALWAYS &
#endif #endif
#ifdef CONFIG_PPC_ISERIES
FW_FEATURE_ISERIES_ALWAYS &
#endif
#ifdef CONFIG_PPC_POWERNV #ifdef CONFIG_PPC_POWERNV
FW_FEATURE_POWERNV_ALWAYS & FW_FEATURE_POWERNV_ALWAYS &
#endif #endif

View file

@ -18,11 +18,6 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <asm/processor.h> #include <asm/processor.h>
#ifdef CONFIG_PPC_ISERIES
#include <asm/paca.h>
#include <asm/firmware.h>
#include <asm/iseries/hv_call.h>
#endif
/* time.c */ /* time.c */
extern unsigned long tb_ticks_per_jiffy; extern unsigned long tb_ticks_per_jiffy;
@ -166,15 +161,6 @@ static inline void set_dec(int val)
#else #else
#ifndef CONFIG_BOOKE #ifndef CONFIG_BOOKE
--val; --val;
#endif
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES) &&
get_lppaca()->shared_proc) {
get_lppaca()->virtual_decr = val;
if (get_dec() > val)
HvCall_setVirtualDecr();
return;
}
#endif #endif
mtspr(SPRN_DEC, val); mtspr(SPRN_DEC, val);
#endif /* not 40x or 8xx_CPU6 */ #endif /* not 40x or 8xx_CPU6 */
@ -217,7 +203,6 @@ DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
#endif #endif
extern void secondary_cpu_time_init(void); extern void secondary_cpu_time_init(void);
extern void iSeries_time_init_early(void);
DECLARE_PER_CPU(u64, decrementers_next_tb); DECLARE_PER_CPU(u64, decrementers_next_tb);

View file

@ -211,11 +211,6 @@ notrace void arch_local_irq_restore(unsigned long en)
* External interrupt events on non-iseries will have caused * External interrupt events on non-iseries will have caused
* interrupts to be hard-disabled, so there is no problem, we * interrupts to be hard-disabled, so there is no problem, we
* cannot have preempted. * cannot have preempted.
*
* That leaves us with EEs on iSeries or decrementer interrupts,
* which I decided to safely ignore. The preemption would have
* itself been the result of an interrupt, upon which return we
* will have checked for pending events on the old CPU.
*/ */
irq_happened = get_irq_happened(); irq_happened = get_irq_happened();
if (!irq_happened) if (!irq_happened)
@ -458,15 +453,6 @@ void do_IRQ(struct pt_regs *regs)
irq_exit(); irq_exit();
set_irq_regs(old_regs); set_irq_regs(old_regs);
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES) &&
get_lppaca()->int_dword.fields.decr_int) {
get_lppaca()->int_dword.fields.decr_int = 0;
/* Signal a fake decrementer interrupt */
timer_interrupt(regs);
}
#endif
trace_irq_exit(regs); trace_irq_exit(regs);
} }

View file

@ -29,7 +29,6 @@
#include <asm/pci-bridge.h> #include <asm/pci-bridge.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/ppc-pci.h> #include <asm/ppc-pci.h>
#include <asm/firmware.h>
unsigned long isa_io_base; /* NULL if no ISA bus */ unsigned long isa_io_base; /* NULL if no ISA bus */
EXPORT_SYMBOL(isa_io_base); EXPORT_SYMBOL(isa_io_base);
@ -261,8 +260,6 @@ static struct notifier_block isa_bridge_notifier = {
*/ */
static int __init isa_bridge_init(void) static int __init isa_bridge_init(void)
{ {
if (firmware_has_feature(FW_FEATURE_ISERIES))
return 0;
bus_register_notifier(&pci_bus_type, &isa_bridge_notifier); bus_register_notifier(&pci_bus_type, &isa_bridge_notifier);
return 0; return 0;
} }

View file

@ -26,7 +26,6 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/iseries/hv_lp_config.h>
#include <asm/lppaca.h> #include <asm/lppaca.h>
#include <asm/hvcall.h> #include <asm/hvcall.h>
#include <asm/firmware.h> #include <asm/firmware.h>
@ -55,80 +54,14 @@ static unsigned long get_purr(void)
int cpu; int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (firmware_has_feature(FW_FEATURE_ISERIES)) struct cpu_usage *cu;
sum_purr += lppaca_of(cpu).emulated_time_base;
else {
struct cpu_usage *cu;
cu = &per_cpu(cpu_usage_array, cpu); cu = &per_cpu(cpu_usage_array, cpu);
sum_purr += cu->current_tb; sum_purr += cu->current_tb;
}
} }
return sum_purr; return sum_purr;
} }
#ifdef CONFIG_PPC_ISERIES
/*
* Methods used to fetch LPAR data when running on an iSeries platform.
*/
static int iseries_lparcfg_data(struct seq_file *m, void *v)
{
unsigned long pool_id;
int shared, entitled_capacity, max_entitled_capacity;
int processors, max_processors;
unsigned long purr = get_purr();
shared = (int)(local_paca->lppaca_ptr->shared_proc);
seq_printf(m, "system_active_processors=%d\n",
(int)HvLpConfig_getSystemPhysicalProcessors());
seq_printf(m, "system_potential_processors=%d\n",
(int)HvLpConfig_getSystemPhysicalProcessors());
processors = (int)HvLpConfig_getPhysicalProcessors();
seq_printf(m, "partition_active_processors=%d\n", processors);
max_processors = (int)HvLpConfig_getMaxPhysicalProcessors();
seq_printf(m, "partition_potential_processors=%d\n", max_processors);
if (shared) {
entitled_capacity = HvLpConfig_getSharedProcUnits();
max_entitled_capacity = HvLpConfig_getMaxSharedProcUnits();
} else {
entitled_capacity = processors * 100;
max_entitled_capacity = max_processors * 100;
}
seq_printf(m, "partition_entitled_capacity=%d\n", entitled_capacity);
seq_printf(m, "partition_max_entitled_capacity=%d\n",
max_entitled_capacity);
if (shared) {
pool_id = HvLpConfig_getSharedPoolIndex();
seq_printf(m, "pool=%d\n", (int)pool_id);
seq_printf(m, "pool_capacity=%d\n",
(int)(HvLpConfig_getNumProcsInSharedPool(pool_id) *
100));
seq_printf(m, "purr=%ld\n", purr);
}
seq_printf(m, "shared_processor_mode=%d\n", shared);
return 0;
}
#else /* CONFIG_PPC_ISERIES */
static int iseries_lparcfg_data(struct seq_file *m, void *v)
{
return 0;
}
#endif /* CONFIG_PPC_ISERIES */
#ifdef CONFIG_PPC_PSERIES
/* /*
* Methods used to fetch LPAR data when running on a pSeries platform. * Methods used to fetch LPAR data when running on a pSeries platform.
*/ */
@ -648,8 +581,7 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
u8 new_weight, *new_weight_ptr = &new_weight; u8 new_weight, *new_weight_ptr = &new_weight;
ssize_t retval; ssize_t retval;
if (!firmware_has_feature(FW_FEATURE_SPLPAR) || if (!firmware_has_feature(FW_FEATURE_SPLPAR))
firmware_has_feature(FW_FEATURE_ISERIES))
return -EINVAL; return -EINVAL;
if (count > kbuf_sz) if (count > kbuf_sz)
@ -709,21 +641,6 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
return retval; return retval;
} }
#else /* CONFIG_PPC_PSERIES */
static int pseries_lparcfg_data(struct seq_file *m, void *v)
{
return 0;
}
static ssize_t lparcfg_write(struct file *file, const char __user * buf,
size_t count, loff_t * off)
{
return -EINVAL;
}
#endif /* CONFIG_PPC_PSERIES */
static int lparcfg_data(struct seq_file *m, void *v) static int lparcfg_data(struct seq_file *m, void *v)
{ {
struct device_node *rootdn; struct device_node *rootdn;
@ -738,19 +655,11 @@ static int lparcfg_data(struct seq_file *m, void *v)
rootdn = of_find_node_by_path("/"); rootdn = of_find_node_by_path("/");
if (rootdn) { if (rootdn) {
tmp = of_get_property(rootdn, "model", NULL); tmp = of_get_property(rootdn, "model", NULL);
if (tmp) { if (tmp)
model = tmp; model = tmp;
/* Skip "IBM," - see platforms/iseries/dt.c */
if (firmware_has_feature(FW_FEATURE_ISERIES))
model += 4;
}
tmp = of_get_property(rootdn, "system-id", NULL); tmp = of_get_property(rootdn, "system-id", NULL);
if (tmp) { if (tmp)
system_id = tmp; system_id = tmp;
/* Skip "IBM," - see platforms/iseries/dt.c */
if (firmware_has_feature(FW_FEATURE_ISERIES))
system_id += 4;
}
lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
NULL); NULL);
if (lp_index_ptr) if (lp_index_ptr)
@ -761,8 +670,6 @@ static int lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "system_type=%s\n", model); seq_printf(m, "system_type=%s\n", model);
seq_printf(m, "partition_id=%d\n", (int)lp_index); seq_printf(m, "partition_id=%d\n", (int)lp_index);
if (firmware_has_feature(FW_FEATURE_ISERIES))
return iseries_lparcfg_data(m, v);
return pseries_lparcfg_data(m, v); return pseries_lparcfg_data(m, v);
} }
@ -786,8 +693,7 @@ static int __init lparcfg_init(void)
umode_t mode = S_IRUSR | S_IRGRP | S_IROTH; umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
/* Allow writing if we have FW_FEATURE_SPLPAR */ /* Allow writing if we have FW_FEATURE_SPLPAR */
if (firmware_has_feature(FW_FEATURE_SPLPAR) && if (firmware_has_feature(FW_FEATURE_SPLPAR))
!firmware_has_feature(FW_FEATURE_ISERIES))
mode |= S_IWUSR; mode |= S_IWUSR;
ent = proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops); ent = proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops);

View file

@ -11,13 +11,10 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <asm/firmware.h>
#include <asm/lppaca.h> #include <asm/lppaca.h>
#include <asm/paca.h> #include <asm/paca.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/iseries/lpar_map.h>
#include <asm/iseries/hv_types.h>
#include <asm/kexec.h> #include <asm/kexec.h>
/* This symbol is provided by the linker - let it fill in the paca /* This symbol is provided by the linker - let it fill in the paca
@ -30,8 +27,8 @@ extern unsigned long __toc_start;
* The structure which the hypervisor knows about - this structure * The structure which the hypervisor knows about - this structure
* should not cross a page boundary. The vpa_init/register_vpa call * should not cross a page boundary. The vpa_init/register_vpa call
* is now known to fail if the lppaca structure crosses a page * is now known to fail if the lppaca structure crosses a page
* boundary. The lppaca is also used on legacy iSeries and POWER5 * boundary. The lppaca is also used on POWER5 pSeries boxes.
* pSeries boxes. The lppaca is 640 bytes long, and cannot readily * The lppaca is 640 bytes long, and cannot readily
* change since the hypervisor knows its layout, so a 1kB alignment * change since the hypervisor knows its layout, so a 1kB alignment
* will suffice to ensure that it doesn't cross a page boundary. * will suffice to ensure that it doesn't cross a page boundary.
*/ */
@ -183,12 +180,9 @@ void __init allocate_pacas(void)
/* /*
* We can't take SLB misses on the paca, and we want to access them * We can't take SLB misses on the paca, and we want to access them
* in real mode, so allocate them within the RMA and also within * in real mode, so allocate them within the RMA and also within
* the first segment. On iSeries they must be within the area mapped * the first segment.
* by the HV, which is HvPagesToMap * HVPAGESIZE bytes.
*/ */
limit = min(0x10000000ULL, ppc64_rma_size); limit = min(0x10000000ULL, ppc64_rma_size);
if (firmware_has_feature(FW_FEATURE_ISERIES))
limit = min(limit, HvPagesToMap * HVPAGESIZE);
paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);

View file

@ -38,7 +38,6 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/ppc-pci.h> #include <asm/ppc-pci.h>
#include <asm/firmware.h>
#include <asm/eeh.h> #include <asm/eeh.h>
static DEFINE_SPINLOCK(hose_spinlock); static DEFINE_SPINLOCK(hose_spinlock);
@ -219,20 +218,6 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
struct of_irq oirq; struct of_irq oirq;
unsigned int virq; unsigned int virq;
/* The current device-tree that iSeries generates from the HV
* PCI informations doesn't contain proper interrupt routing,
* and all the fallback would do is print out crap, so we
* don't attempt to resolve the interrupts here at all, some
* iSeries specific fixup does it.
*
* In the long run, we will hopefully fix the generated device-tree
* instead.
*/
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES))
return -1;
#endif
pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
#ifdef DEBUG #ifdef DEBUG

View file

@ -12,7 +12,6 @@
#include <asm/current.h> #include <asm/current.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/firmware.h>
#include <asm/hvcall.h> #include <asm/hvcall.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/machdep.h> #include <asm/machdep.h>
@ -341,8 +340,7 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
int i, nattrs; int i, nattrs;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
if (!firmware_has_feature(FW_FEATURE_ISERIES) && if (cpu_has_feature(CPU_FTR_SMT))
cpu_has_feature(CPU_FTR_SMT))
device_create_file(s, &dev_attr_smt_snooze_delay); device_create_file(s, &dev_attr_smt_snooze_delay);
#endif #endif
@ -414,8 +412,7 @@ static void unregister_cpu_online(unsigned int cpu)
BUG_ON(!c->hotpluggable); BUG_ON(!c->hotpluggable);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
if (!firmware_has_feature(FW_FEATURE_ISERIES) && if (cpu_has_feature(CPU_FTR_SMT))
cpu_has_feature(CPU_FTR_SMT))
device_remove_file(s, &dev_attr_smt_snooze_delay); device_remove_file(s, &dev_attr_smt_snooze_delay);
#endif #endif

View file

@ -17,8 +17,7 @@
* *
* TODO (not necessarily in this file): * TODO (not necessarily in this file):
* - improve precision and reproducibility of timebase frequency * - improve precision and reproducibility of timebase frequency
* measurement at boot time. (for iSeries, we calibrate the timebase * measurement at boot time.
* against the Titan chip's clock.)
* - for astronomical applications: add a new function to get * - for astronomical applications: add a new function to get
* non ambiguous timestamps even around leap seconds. This needs * non ambiguous timestamps even around leap seconds. This needs
* a new timestamp format and a good name. * a new timestamp format and a good name.
@ -70,10 +69,6 @@
#include <asm/vdso_datapage.h> #include <asm/vdso_datapage.h>
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/cputime.h> #include <asm/cputime.h>
#ifdef CONFIG_PPC_ISERIES
#include <asm/iseries/it_lp_queue.h>
#include <asm/iseries/hv_call_xm.h>
#endif
/* powerpc clocksource/clockevent code */ /* powerpc clocksource/clockevent code */
@ -117,14 +112,6 @@ static struct clock_event_device decrementer_clockevent = {
DEFINE_PER_CPU(u64, decrementers_next_tb); DEFINE_PER_CPU(u64, decrementers_next_tb);
static DEFINE_PER_CPU(struct clock_event_device, decrementers); static DEFINE_PER_CPU(struct clock_event_device, decrementers);
#ifdef CONFIG_PPC_ISERIES
static unsigned long __initdata iSeries_recal_titan;
static signed long __initdata iSeries_recal_tb;
/* Forward declaration is only needed for iSereis compiles */
static void __init clocksource_init(void);
#endif
#define XSEC_PER_SEC (1024*1024) #define XSEC_PER_SEC (1024*1024)
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
@ -423,74 +410,6 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc); EXPORT_SYMBOL(profile_pc);
#endif #endif
#ifdef CONFIG_PPC_ISERIES
/*
* This function recalibrates the timebase based on the 49-bit time-of-day
* value in the Titan chip. The Titan is much more accurate than the value
* returned by the service processor for the timebase frequency.
*/
static int __init iSeries_tb_recal(void)
{
unsigned long titan, tb;
/* Make sure we only run on iSeries */
if (!firmware_has_feature(FW_FEATURE_ISERIES))
return -ENODEV;
tb = get_tb();
titan = HvCallXm_loadTod();
if ( iSeries_recal_titan ) {
unsigned long tb_ticks = tb - iSeries_recal_tb;
unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
unsigned long new_tb_ticks_per_jiffy =
DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ);
long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
char sign = '+';
/* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
if ( tick_diff < 0 ) {
tick_diff = -tick_diff;
sign = '-';
}
if ( tick_diff ) {
if ( tick_diff < tb_ticks_per_jiffy/25 ) {
printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
new_tb_ticks_per_jiffy, sign, tick_diff );
tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
tb_ticks_per_sec = new_tb_ticks_per_sec;
calc_cputime_factors();
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
setup_cputime_one_jiffy();
}
else {
printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
" new tb_ticks_per_jiffy = %lu\n"
" old tb_ticks_per_jiffy = %lu\n",
new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
}
}
}
iSeries_recal_titan = titan;
iSeries_recal_tb = tb;
/* Called here as now we know accurate values for the timebase */
clocksource_init();
return 0;
}
late_initcall(iSeries_tb_recal);
/* Called from platform early init */
void __init iSeries_time_init_early(void)
{
iSeries_recal_tb = get_tb();
iSeries_recal_titan = HvCallXm_loadTod();
}
#endif /* CONFIG_PPC_ISERIES */
#ifdef CONFIG_IRQ_WORK #ifdef CONFIG_IRQ_WORK
/* /*
@ -546,16 +465,6 @@ void arch_irq_work_raise(void)
#endif /* CONFIG_IRQ_WORK */ #endif /* CONFIG_IRQ_WORK */
/*
* For iSeries shared processors, we have to let the hypervisor
* set the hardware decrementer. We set a virtual decrementer
* in the lppaca and call the hypervisor if the virtual
* decrementer is less than the current value in the hardware
* decrementer. (almost always the new decrementer value will
* be greater than the current hardware decementer so the hypervisor
* call will not be needed)
*/
/* /*
* timer_interrupt - gets called when the decrementer overflows, * timer_interrupt - gets called when the decrementer overflows,
* with interrupts disabled. * with interrupts disabled.
@ -599,20 +508,10 @@ void timer_interrupt(struct pt_regs * regs)
irq_work_run(); irq_work_run();
} }
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES))
get_lppaca()->int_dword.fields.decr_int = 0;
#endif
*next_tb = ~(u64)0; *next_tb = ~(u64)0;
if (evt->event_handler) if (evt->event_handler)
evt->event_handler(evt); evt->event_handler(evt);
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
process_hvlpevents();
#endif
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
/* collect purr register values often, for accurate calculations */ /* collect purr register values often, for accurate calculations */
if (firmware_has_feature(FW_FEATURE_SPLPAR)) { if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
@ -984,9 +883,8 @@ void __init time_init(void)
*/ */
start_cpu_decrementer(); start_cpu_decrementer();
/* Register the clocksource, if we're not running on iSeries */ /* Register the clocksource */
if (!firmware_has_feature(FW_FEATURE_ISERIES)) clocksource_init();
clocksource_init();
init_decrementer_clockevent(); init_decrementer_clockevent();
} }

View file

@ -19,11 +19,9 @@
#include <linux/smp.h> #include <linux/smp.h>
/* waiting for a spinlock... */ /* waiting for a spinlock... */
#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) #if defined(CONFIG_PPC_SPLPAR)
#include <asm/hvcall.h> #include <asm/hvcall.h>
#include <asm/iseries/hv_call.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/firmware.h>
void __spin_yield(arch_spinlock_t *lock) void __spin_yield(arch_spinlock_t *lock)
{ {
@ -40,14 +38,8 @@ void __spin_yield(arch_spinlock_t *lock)
rmb(); rmb();
if (lock->slock != lock_value) if (lock->slock != lock_value)
return; /* something has changed */ return; /* something has changed */
if (firmware_has_feature(FW_FEATURE_ISERIES)) plpar_hcall_norets(H_CONFER,
HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, get_hard_smp_processor_id(holder_cpu), yield_count);
((u64)holder_cpu << 32) | yield_count);
#ifdef CONFIG_PPC_SPLPAR
else
plpar_hcall_norets(H_CONFER,
get_hard_smp_processor_id(holder_cpu), yield_count);
#endif
} }
/* /*
@ -71,14 +63,8 @@ void __rw_yield(arch_rwlock_t *rw)
rmb(); rmb();
if (rw->lock != lock_value) if (rw->lock != lock_value)
return; /* something has changed */ return; /* something has changed */
if (firmware_has_feature(FW_FEATURE_ISERIES)) plpar_hcall_norets(H_CONFER,
HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, get_hard_smp_processor_id(holder_cpu), yield_count);
((u64)holder_cpu << 32) | yield_count);
#ifdef CONFIG_PPC_SPLPAR
else
plpar_hcall_norets(H_CONFER,
get_hard_smp_processor_id(holder_cpu), yield_count);
#endif
} }
#endif #endif

View file

@ -56,6 +56,7 @@
#include <asm/udbg.h> #include <asm/udbg.h>
#include <asm/code-patching.h> #include <asm/code-patching.h>
#include <asm/fadump.h> #include <asm/fadump.h>
#include <asm/firmware.h>
#ifdef DEBUG #ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt) #define DBG(fmt...) udbg_printf(fmt)
@ -756,12 +757,9 @@ void __init early_init_mmu(void)
*/ */
htab_initialize(); htab_initialize();
/* Initialize stab / SLB management except on iSeries /* Initialize stab / SLB management */
*/
if (mmu_has_feature(MMU_FTR_SLB)) if (mmu_has_feature(MMU_FTR_SLB))
slb_initialize(); slb_initialize();
else if (!firmware_has_feature(FW_FEATURE_ISERIES))
stab_initialize(get_paca()->stab_real);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -772,8 +770,7 @@ void __cpuinit early_init_mmu_secondary(void)
mtspr(SPRN_SDR1, _SDR1); mtspr(SPRN_SDR1, _SDR1);
/* Initialize STAB/SLB. We use a virtual address as it works /* Initialize STAB/SLB. We use a virtual address as it works
* in real mode on pSeries and we want a virtual address on * in real mode on pSeries.
* iSeries anyway
*/ */
if (mmu_has_feature(MMU_FTR_SLB)) if (mmu_has_feature(MMU_FTR_SLB))
slb_initialize(); slb_initialize();

View file

@ -21,7 +21,6 @@
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/firmware.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/udbg.h> #include <asm/udbg.h>
#include <asm/code-patching.h> #include <asm/code-patching.h>
@ -307,11 +306,6 @@ void slb_initialize(void)
get_paca()->stab_rr = SLB_NUM_BOLTED; get_paca()->stab_rr = SLB_NUM_BOLTED;
/* On iSeries the bolted entries have already been set up by
* the hypervisor from the lparMap data in head.S */
if (firmware_has_feature(FW_FEATURE_ISERIES))
return;
lflags = SLB_VSID_KERNEL | linear_llp; lflags = SLB_VSID_KERNEL | linear_llp;
vflags = SLB_VSID_KERNEL | vmalloc_llp; vflags = SLB_VSID_KERNEL | vmalloc_llp;

View file

@ -21,8 +21,6 @@
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/abs_addr.h> #include <asm/abs_addr.h>
#include <asm/firmware.h>
#include <asm/iseries/hv_call.h>
struct stab_entry { struct stab_entry {
unsigned long esid_data; unsigned long esid_data;
@ -285,12 +283,5 @@ void stab_initialize(unsigned long stab)
/* Set ASR */ /* Set ASR */
stabreal = get_paca()->stab_real | 0x1ul; stabreal = get_paca()->stab_real | 0x1ul;
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
HvCall1(HvCallBaseSetASR, stabreal);
return;
}
#endif /* CONFIG_PPC_ISERIES */
mtspr(SPRN_ASR, stabreal); mtspr(SPRN_ASR, stabreal);
} }

View file

@ -195,9 +195,6 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
if (!cur_cpu_spec->oprofile_cpu_type) if (!cur_cpu_spec->oprofile_cpu_type)
return -ENODEV; return -ENODEV;
if (firmware_has_feature(FW_FEATURE_ISERIES))
return -ENODEV;
switch (cur_cpu_spec->oprofile_type) { switch (cur_cpu_spec->oprofile_type) {
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_OPROFILE_CELL #ifdef CONFIG_OPROFILE_CELL

View file

@ -31,6 +31,7 @@
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/tce.h> #include <asm/tce.h>
#include <asm/abs_addr.h> #include <asm/abs_addr.h>
#include <asm/firmware.h>
#include "powernv.h" #include "powernv.h"
#include "pci.h" #include "pci.h"

View file

@ -41,6 +41,7 @@
#include <asm/udbg.h> #include <asm/udbg.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/trace.h> #include <asm/trace.h>
#include <asm/firmware.h>
#include "plpar_wrappers.h" #include "plpar_wrappers.h"
#include "pseries.h" #include "pseries.h"

View file

@ -39,7 +39,6 @@
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
#include <asm/spu.h> #include <asm/spu.h>
#include <asm/spu_priv1.h> #include <asm/spu_priv1.h>
#include <asm/firmware.h>
#include <asm/setjmp.h> #include <asm/setjmp.h>
#include <asm/reg.h> #include <asm/reg.h>
@ -1635,25 +1634,6 @@ static void super_regs(void)
mfspr(SPRN_DEC), mfspr(SPRN_SPRG2)); mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3)); printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR)); printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR));
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
struct paca_struct *ptrPaca;
struct lppaca *ptrLpPaca;
/* Dump out relevant Paca data areas. */
printf("Paca: \n");
ptrPaca = local_paca;
printf(" Local Processor Control Area (LpPaca): \n");
ptrLpPaca = ptrPaca->lppaca_ptr;
printf(" Saved Srr0=%.16lx Saved Srr1=%.16lx \n",
ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1);
printf(" Saved Gpr3=%.16lx Saved Gpr4=%.16lx \n",
ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4);
printf(" Saved Gpr5=%.16lx \n",
ptrLpPaca->gpr5_dword.saved_gpr5);
}
#endif
return; return;
} }
@ -2856,10 +2836,6 @@ static void dump_tlb_book3e(void)
static void xmon_init(int enable) static void xmon_init(int enable)
{ {
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES))
return;
#endif
if (enable) { if (enable) {
__debugger = xmon; __debugger = xmon;
__debugger_ipi = xmon_ipi; __debugger_ipi = xmon_ipi;
@ -2896,10 +2872,6 @@ static struct sysrq_key_op sysrq_xmon_op = {
static int __init setup_xmon_sysrq(void) static int __init setup_xmon_sysrq(void)
{ {
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES))
return 0;
#endif
register_sysrq_key('x', &sysrq_xmon_op); register_sysrq_key('x', &sysrq_xmon_op);
return 0; return 0;
} }