2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* "High Precision Event Timer" based timekeeping.
|
|
|
|
*
|
|
|
|
* Copyright (c) 1991,1992,1995 Linus Torvalds
|
|
|
|
* Copyright (c) 1994 Alan Modra
|
|
|
|
* Copyright (c) 1995 Markus Kuhn
|
|
|
|
* Copyright (c) 1996 Ingo Molnar
|
|
|
|
* Copyright (c) 1998 Andrea Arcangeli
|
2006-06-26 05:58:38 -06:00
|
|
|
* Copyright (c) 2002,2006 Vojtech Pavlik
|
2005-04-16 16:20:36 -06:00
|
|
|
* Copyright (c) 2003 Andi Kleen
|
|
|
|
* RTC support code taken from arch/i386/kernel/timers/time_hpet.c
|
|
|
|
*/
|
|
|
|
|
2008-01-30 05:30:27 -07:00
|
|
|
#include <linux/clockchips.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
#include <linux/init.h>
|
2008-01-30 05:30:27 -07:00
|
|
|
#include <linux/interrupt.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
#include <linux/module.h>
|
2008-01-30 05:30:27 -07:00
|
|
|
#include <linux/time.h>
|
2008-07-21 15:42:52 -06:00
|
|
|
#include <linux/mca.h>
|
2009-01-04 03:58:22 -07:00
|
|
|
#include <linux/nmi.h>
|
2007-10-12 15:04:07 -06:00
|
|
|
|
2007-07-21 09:11:18 -06:00
|
|
|
#include <asm/i8253.h>
|
2007-02-16 02:28:19 -07:00
|
|
|
#include <asm/hpet.h>
|
2007-07-21 09:10:01 -06:00
|
|
|
#include <asm/vgtod.h>
|
2008-01-30 05:31:10 -07:00
|
|
|
#include <asm/time.h>
|
|
|
|
#include <asm/timer.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
|
|
|
|
|
|
|
|
unsigned long profile_pc(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
unsigned long pc = instruction_pointer(regs);
|
|
|
|
|
2006-09-26 02:52:28 -06:00
|
|
|
/* Assume the lock function has either no stack frame or a copy
|
2008-01-30 05:30:56 -07:00
|
|
|
of flags from PUSHF
|
2006-09-26 02:52:28 -06:00
|
|
|
Eflags always has bits 22 and up cleared unlike kernel addresses. */
|
2008-07-11 11:10:13 -06:00
|
|
|
if (!user_mode_vm(regs) && in_lock_functions(pc)) {
|
2008-07-11 10:53:43 -06:00
|
|
|
#ifdef CONFIG_FRAME_POINTER
|
|
|
|
return *(unsigned long *)(regs->bp + sizeof(long));
|
|
|
|
#else
|
2008-01-30 05:30:56 -07:00
|
|
|
unsigned long *sp = (unsigned long *)regs->sp;
|
2006-09-26 02:52:28 -06:00
|
|
|
if (sp[0] >> 22)
|
|
|
|
return sp[0];
|
|
|
|
if (sp[1] >> 22)
|
|
|
|
return sp[1];
|
2008-07-11 10:53:43 -06:00
|
|
|
#endif
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
return pc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(profile_pc);
|
|
|
|
|
2008-12-17 10:47:21 -07:00
|
|
|
static irqreturn_t timer_interrupt(int irq, void *dev_id)
|
2007-10-12 15:04:07 -06:00
|
|
|
{
|
2008-12-12 16:52:26 -07:00
|
|
|
inc_irq_stat(irq0_irqs);
|
2007-10-12 15:04:07 -06:00
|
|
|
|
2007-10-12 15:04:07 -06:00
|
|
|
global_clock_event->event_handler(global_clock_event);
|
|
|
|
|
2008-07-21 15:42:52 -06:00
|
|
|
#ifdef CONFIG_MCA
|
|
|
|
if (MCA_bus) {
|
|
|
|
u8 irq_v = inb_p(0x61); /* read the current state */
|
|
|
|
outb_p(irq_v|0x80, 0x61); /* reset the IRQ */
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-10-12 15:04:07 -06:00
|
|
|
return IRQ_HANDLED;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2007-05-02 11:27:06 -06:00
|
|
|
/* calibrate_cpu is used on systems with fixed rate TSCs to determine
|
|
|
|
* processor frequency */
|
|
|
|
#define TICK_COUNT 100000000
|
2008-07-01 12:43:34 -06:00
|
|
|
unsigned long __init calibrate_cpu(void)
|
2007-05-02 11:27:06 -06:00
|
|
|
{
|
2007-07-21 09:10:18 -06:00
|
|
|
int tsc_start, tsc_now;
|
|
|
|
int i, no_ctr_free;
|
|
|
|
unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++)
|
|
|
|
if (avail_to_resrv_perfctr_nmi_bit(i))
|
|
|
|
break;
|
|
|
|
no_ctr_free = (i == 4);
|
|
|
|
if (no_ctr_free) {
|
2008-11-12 11:35:00 -07:00
|
|
|
WARN(1, KERN_WARNING "Warning: AMD perfctrs busy ... "
|
|
|
|
"cpu_khz value may be incorrect.\n");
|
2007-07-21 09:10:18 -06:00
|
|
|
i = 3;
|
|
|
|
rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
|
|
|
|
wrmsrl(MSR_K7_EVNTSEL3, 0);
|
|
|
|
rdmsrl(MSR_K7_PERFCTR3, pmc3);
|
|
|
|
} else {
|
|
|
|
reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
|
|
|
|
reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
|
|
|
|
}
|
|
|
|
local_irq_save(flags);
|
2008-02-08 05:19:25 -07:00
|
|
|
/* start measuring cycles, incrementing from 0 */
|
2007-07-21 09:10:18 -06:00
|
|
|
wrmsrl(MSR_K7_PERFCTR0 + i, 0);
|
|
|
|
wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
|
|
|
|
rdtscl(tsc_start);
|
|
|
|
do {
|
|
|
|
rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
|
2008-01-30 05:32:39 -07:00
|
|
|
tsc_now = get_cycles();
|
2007-07-21 09:10:18 -06:00
|
|
|
} while ((tsc_now - tsc_start) < TICK_COUNT);
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
if (no_ctr_free) {
|
|
|
|
wrmsrl(MSR_K7_EVNTSEL3, 0);
|
|
|
|
wrmsrl(MSR_K7_PERFCTR3, pmc3);
|
|
|
|
wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
|
|
|
|
} else {
|
|
|
|
release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
|
|
|
|
release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
return pmc_now * tsc_khz / (tsc_now - tsc_start);
|
2007-05-02 11:27:06 -06:00
|
|
|
}
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
static struct irqaction irq0 = {
|
2008-07-11 12:25:13 -06:00
|
|
|
.handler = timer_interrupt,
|
2007-10-12 15:04:06 -06:00
|
|
|
.flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING,
|
2007-05-08 01:35:28 -06:00
|
|
|
.mask = CPU_MASK_NONE,
|
2007-07-21 09:10:18 -06:00
|
|
|
.name = "timer"
|
2005-04-16 16:20:36 -06:00
|
|
|
};
|
|
|
|
|
2008-01-30 05:31:10 -07:00
|
|
|
void __init hpet_time_init(void)
|
2006-09-26 02:52:28 -06:00
|
|
|
{
|
2007-10-12 15:04:07 -06:00
|
|
|
if (!hpet_enable())
|
|
|
|
setup_pit_timer();
|
2005-06-23 01:08:36 -06:00
|
|
|
|
2008-07-11 11:21:29 -06:00
|
|
|
irq0.mask = cpumask_of_cpu(0);
|
2007-10-12 15:04:07 -06:00
|
|
|
setup_irq(0, &irq0);
|
2008-01-30 05:31:10 -07:00
|
|
|
}
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2008-01-30 05:31:10 -07:00
|
|
|
void __init time_init(void)
|
|
|
|
{
|
2008-07-01 12:43:34 -06:00
|
|
|
tsc_init();
|
2006-09-26 02:52:28 -06:00
|
|
|
|
2008-01-30 05:31:10 -07:00
|
|
|
late_time_init = choose_time_init();
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|