[MIPS] MT: Improved multithreading support.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
parent
2600990e64
commit
41c594ab65
51 changed files with 3720 additions and 176 deletions
|
@ -1447,6 +1447,10 @@ choice
|
|||
prompt "MIPS MT options"
|
||||
depends on MIPS_MT
|
||||
|
||||
config MIPS_MT_SMTC
|
||||
bool "SMTC: Use all TCs on all VPEs for SMP"
|
||||
select SMP
|
||||
|
||||
config MIPS_MT_SMP
|
||||
bool "Use 1 TC on each available VPE for SMP"
|
||||
select SMP
|
||||
|
@ -1613,7 +1617,7 @@ source "mm/Kconfig"
|
|||
|
||||
config SMP
|
||||
bool "Multi-Processing support"
|
||||
depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP
|
||||
depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP || MIPS_MT_SMTC
|
||||
---help---
|
||||
This enables support for systems with more than one CPU. If you have
|
||||
a system with only one CPU, like most personal computers, say N. If
|
||||
|
|
|
@ -34,7 +34,9 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
|
|||
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
|
||||
obj-$(CONFIG_MIPS_MT_SMP) += smp_mt.o
|
||||
obj-$(CONFIG_MIPS_MT) += mips-mt.o
|
||||
obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
|
||||
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
|
||||
|
||||
obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
|
||||
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
|
||||
|
|
|
@ -69,6 +69,9 @@ void output_ptreg_defines(void)
|
|||
offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr);
|
||||
offset("#define PT_STATUS ", struct pt_regs, cp0_status);
|
||||
offset("#define PT_CAUSE ", struct pt_regs, cp0_cause);
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
offset("#define PT_TCSTATUS ", struct pt_regs, cp0_tcstatus);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
size("#define PT_SIZE ", struct pt_regs);
|
||||
linefeed;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
#include <asm/isadep.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/war.h>
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#include <asm/mipsmtregs.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
.macro preempt_stop
|
||||
|
@ -75,6 +78,37 @@ FEXPORT(syscall_exit)
|
|||
bnez t0, syscall_exit_work
|
||||
|
||||
FEXPORT(restore_all) # restore full frame
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Detect and execute deferred IPI "interrupts" */
|
||||
move a0,sp
|
||||
jal deferred_smtc_ipi
|
||||
/* Re-arm any temporarily masked interrupts not explicitly "acked" */
|
||||
mfc0 v0, CP0_TCSTATUS
|
||||
ori v1, v0, TCSTATUS_IXMT
|
||||
mtc0 v1, CP0_TCSTATUS
|
||||
andi v0, TCSTATUS_IXMT
|
||||
ehb
|
||||
mfc0 t0, CP0_TCCONTEXT
|
||||
DMT 9 # dmt t1
|
||||
jal mips_ihb
|
||||
mfc0 t2, CP0_STATUS
|
||||
andi t3, t0, 0xff00
|
||||
or t2, t2, t3
|
||||
mtc0 t2, CP0_STATUS
|
||||
ehb
|
||||
andi t1, t1, VPECONTROL_TE
|
||||
beqz t1, 1f
|
||||
EMT
|
||||
1:
|
||||
mfc0 v1, CP0_TCSTATUS
|
||||
/* We set IXMT above, XOR should cler it here */
|
||||
xori v1, v1, TCSTATUS_IXMT
|
||||
or v1, v0, v1
|
||||
mtc0 v1, CP0_TCSTATUS
|
||||
ehb
|
||||
xor t0, t0, t3
|
||||
mtc0 t0, CP0_TCCONTEXT
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
.set noat
|
||||
RESTORE_TEMP
|
||||
RESTORE_AT
|
||||
|
|
|
@ -283,11 +283,33 @@
|
|||
*/
|
||||
|
||||
3:
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Read-modify write of Status must be atomic */
|
||||
mfc0 t2, CP0_TCSTATUS
|
||||
ori t1, t2, TCSTATUS_IXMT
|
||||
mtc0 t1, CP0_TCSTATUS
|
||||
andi t2, t2, TCSTATUS_IXMT
|
||||
ehb
|
||||
DMT 9 # dmt t1
|
||||
jal mips_ihb
|
||||
nop
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
mfc0 t0, CP0_STATUS
|
||||
ori t0, 0x1f
|
||||
xori t0, 0x1f
|
||||
mtc0 t0, CP0_STATUS
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
andi t1, t1, VPECONTROL_TE
|
||||
beqz t1, 9f
|
||||
nop
|
||||
EMT # emt
|
||||
9:
|
||||
mfc0 t1, CP0_TCSTATUS
|
||||
xori t1, t1, TCSTATUS_IXMT
|
||||
or t1, t1, t2
|
||||
mtc0 t1, CP0_TCSTATUS
|
||||
ehb
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
LONG_L v0, GDB_FR_STATUS(sp)
|
||||
LONG_L v1, GDB_FR_EPC(sp)
|
||||
mtc0 v0, CP0_STATUS
|
||||
|
|
|
@ -140,6 +140,7 @@
|
|||
#include <asm/system.h>
|
||||
#include <asm/gdb-stub.h>
|
||||
#include <asm/inst.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
/*
|
||||
* external low-level support routines
|
||||
|
@ -669,6 +670,64 @@ static void kgdb_wait(void *arg)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* GDB stub needs to call kgdb_wait on all processor with interrupts
|
||||
* disabled, so it uses it's own special variant.
|
||||
*/
|
||||
static int kgdb_smp_call_kgdb_wait(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
struct call_data_struct data;
|
||||
int i, cpus = num_online_cpus() - 1;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* Can die spectacularly if this CPU isn't yet marked online
|
||||
*/
|
||||
BUG_ON(!cpu_online(cpu));
|
||||
|
||||
if (!cpus)
|
||||
return 0;
|
||||
|
||||
if (spin_is_locked(&smp_call_lock)) {
|
||||
/*
|
||||
* Some other processor is trying to make us do something
|
||||
* but we're not going to respond... give up
|
||||
*/
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* We will continue here, accepting the fact that
|
||||
* the kernel may deadlock if another CPU attempts
|
||||
* to call smp_call_function now...
|
||||
*/
|
||||
|
||||
data.func = kgdb_wait;
|
||||
data.info = NULL;
|
||||
atomic_set(&data.started, 0);
|
||||
data.wait = 0;
|
||||
|
||||
spin_lock(&smp_call_lock);
|
||||
call_data = &data;
|
||||
mb();
|
||||
|
||||
/* Send a message to all other CPUs and wait for them to respond */
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
if (cpu_online(i) && i != cpu)
|
||||
core_send_ipi(i, SMP_CALL_FUNCTION);
|
||||
|
||||
/* Wait for response */
|
||||
/* FIXME: lock-up detection, backtrace on lock-up */
|
||||
while (atomic_read(&data.started) != cpus)
|
||||
barrier();
|
||||
|
||||
call_data = NULL;
|
||||
spin_unlock(&smp_call_lock);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function does all command processing for interfacing to gdb. It
|
||||
|
@ -718,7 +777,7 @@ void handle_exception (struct gdb_regs *regs)
|
|||
/*
|
||||
* force other cpus to enter kgdb
|
||||
*/
|
||||
smp_call_function(kgdb_wait, NULL, 0, 0);
|
||||
kgdb_smp_call_kgdb_wait();
|
||||
|
||||
/*
|
||||
* If we're in breakpoint() increment the PC
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/init.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/cacheops.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/fpregdef.h>
|
||||
|
@ -171,6 +172,15 @@ NESTED(except_vec_vi, 0, sp)
|
|||
SAVE_AT
|
||||
.set push
|
||||
.set noreorder
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* To keep from blindly blocking *all* interrupts
|
||||
* during service by SMTC kernel, we also want to
|
||||
* pass the IM value to be cleared.
|
||||
*/
|
||||
EXPORT(except_vec_vi_mori)
|
||||
ori a0, $0, 0
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
EXPORT(except_vec_vi_lui)
|
||||
lui v0, 0 /* Patched */
|
||||
j except_vec_vi_handler
|
||||
|
@ -187,6 +197,25 @@ EXPORT(except_vec_vi_end)
|
|||
NESTED(except_vec_vi_handler, 0, sp)
|
||||
SAVE_TEMP
|
||||
SAVE_STATIC
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* SMTC has an interesting problem that interrupts are level-triggered,
|
||||
* and the CLI macro will clear EXL, potentially causing a duplicate
|
||||
* interrupt service invocation. So we need to clear the associated
|
||||
* IM bit of Status prior to doing CLI, and restore it after the
|
||||
* service routine has been invoked - we must assume that the
|
||||
* service routine will have cleared the state, and any active
|
||||
* level represents a new or otherwised unserviced event...
|
||||
*/
|
||||
mfc0 t1, CP0_STATUS
|
||||
and t0, a0, t1
|
||||
mfc0 t2, CP0_TCCONTEXT
|
||||
or t0, t0, t2
|
||||
mtc0 t0, CP0_TCCONTEXT
|
||||
xor t1, t1, t0
|
||||
mtc0 t1, CP0_STATUS
|
||||
ehb
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
CLI
|
||||
move a0, sp
|
||||
jalr v0
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/threads.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/mipsregs.h>
|
||||
|
@ -82,12 +83,33 @@
|
|||
*/
|
||||
.macro setup_c0_status set clr
|
||||
.set push
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* For SMTC, we need to set privilege and disable interrupts only for
|
||||
* the current TC, using the TCStatus register.
|
||||
*/
|
||||
mfc0 t0, CP0_TCSTATUS
|
||||
/* Fortunately CU 0 is in the same place in both registers */
|
||||
/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
|
||||
li t1, ST0_CU0 | 0x08001c00
|
||||
or t0, t1
|
||||
/* Clear TKSU, leave IXMT */
|
||||
xori t0, 0x00001800
|
||||
mtc0 t0, CP0_TCSTATUS
|
||||
ehb
|
||||
/* We need to leave the global IE bit set, but clear EXL...*/
|
||||
mfc0 t0, CP0_STATUS
|
||||
or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
|
||||
xor t0, ST0_EXL | ST0_ERL | \clr
|
||||
mtc0 t0, CP0_STATUS
|
||||
#else
|
||||
mfc0 t0, CP0_STATUS
|
||||
or t0, ST0_CU0|\set|0x1f|\clr
|
||||
xor t0, 0x1f|\clr
|
||||
mtc0 t0, CP0_STATUS
|
||||
.set noreorder
|
||||
sll zero,3 # ehb
|
||||
#endif
|
||||
.set pop
|
||||
.endm
|
||||
|
||||
|
@ -134,6 +156,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
|
|||
|
||||
ARC64_TWIDDLE_PC
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* In SMTC kernel, "CLI" is thread-specific, in TCStatus.
|
||||
* We still need to enable interrupts globally in Status,
|
||||
* and clear EXL/ERL.
|
||||
*
|
||||
* TCContext is used to track interrupt levels under
|
||||
* service in SMTC kernel. Clear for boot TC before
|
||||
* allowing any interrupts.
|
||||
*/
|
||||
mtc0 zero, CP0_TCCONTEXT
|
||||
|
||||
mfc0 t0, CP0_STATUS
|
||||
ori t0, t0, 0xff1f
|
||||
xori t0, t0, 0x001e
|
||||
mtc0 t0, CP0_STATUS
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
PTR_LA t0, __bss_start # clear .bss
|
||||
LONG_S zero, (t0)
|
||||
PTR_LA t1, __bss_stop - LONGSIZE
|
||||
|
@ -166,8 +206,25 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
|
|||
* function after setting up the stack and gp registers.
|
||||
*/
|
||||
NESTED(smp_bootstrap, 16, sp)
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* Read-modify-writes of Status must be atomic, and this
|
||||
* is one case where CLI is invoked without EXL being
|
||||
* necessarily set. The CLI and setup_c0_status will
|
||||
* in fact be redundant for all but the first TC of
|
||||
* each VPE being booted.
|
||||
*/
|
||||
DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
|
||||
jal mips_ihb
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
setup_c0_status_sec
|
||||
smp_slave_setup
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
andi t2, t2, VPECONTROL_TE
|
||||
beqz t2, 2f
|
||||
EMT # emt
|
||||
2:
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
j start_secondary
|
||||
END(smp_bootstrap)
|
||||
#endif /* CONFIG_SMP */
|
||||
|
|
|
@ -187,6 +187,10 @@ void mask_and_ack_8259A(unsigned int irq)
|
|||
outb(cached_21,0x21);
|
||||
outb(0x60+irq,0x20); /* 'Specific EOI' to master */
|
||||
}
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
if (irq_hwmask[irq] & ST0_IM)
|
||||
set_c0_status(irq_hwmask[irq] & ST0_IM);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
spin_unlock_irqrestore(&i8259A_lock, flags);
|
||||
return;
|
||||
|
||||
|
|
|
@ -76,6 +76,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
|
|||
mask_msc_irq(irq);
|
||||
if (!cpu_has_veic)
|
||||
MSCIC_WRITE(MSC01_IC_EOI, 0);
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* This actually needs to be a call into platform code */
|
||||
if (irq_hwmask[irq] & ST0_IM)
|
||||
set_c0_status(irq_hwmask[irq] & ST0_IM);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -92,6 +97,10 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
|
|||
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
|
||||
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
|
||||
}
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
if (irq_hwmask[irq] & ST0_IM)
|
||||
set_c0_status(irq_hwmask[irq] & ST0_IM);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -38,6 +38,15 @@ void ack_bad_irq(unsigned int irq)
|
|||
|
||||
atomic_t irq_err_count;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* SMTC Kernel needs to manipulate low-level CPU interrupt mask
|
||||
* in do_IRQ. These are passed in setup_irq_smtc() and stored
|
||||
* in this table.
|
||||
*/
|
||||
unsigned long irq_hwmask[NR_IRQS];
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
#undef do_IRQ
|
||||
|
||||
/*
|
||||
|
@ -49,6 +58,7 @@ asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs)
|
|||
{
|
||||
irq_enter();
|
||||
|
||||
__DO_IRQ_SMTC_HOOK();
|
||||
__do_IRQ(irq, regs);
|
||||
|
||||
irq_exit();
|
||||
|
@ -129,6 +139,9 @@ void __init init_IRQ(void)
|
|||
irq_desc[i].depth = 1;
|
||||
irq_desc[i].handler = &no_irq_type;
|
||||
spin_lock_init(&irq_desc[i].lock);
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
irq_hwmask[i] = 0;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
|
||||
arch_init_irq();
|
||||
|
|
449
arch/mips/kernel/mips-mt.c
Normal file
449
arch/mips/kernel/mips-mt.c
Normal file
|
@ -0,0 +1,449 @@
|
|||
/*
|
||||
* General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
|
||||
* Copyright (C) 2005 Mips Technologies, Inc
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/mipsmtregs.h>
|
||||
#include <asm/r4kcache.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
/*
|
||||
* CPU mask used to set process affinity for MT VPEs/TCs with FPUs
|
||||
*/
|
||||
|
||||
cpumask_t mt_fpu_cpumask;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
unsigned long mt_fpemul_threshold = 0;
|
||||
|
||||
/*
|
||||
* Replacement functions for the sys_sched_setaffinity() and
|
||||
* sys_sched_getaffinity() system calls, so that we can integrate
|
||||
* FPU affinity with the user's requested processor affinity.
|
||||
* This code is 98% identical with the sys_sched_setaffinity()
|
||||
* and sys_sched_getaffinity() system calls, and should be
|
||||
* updated when kernel/sched.c changes.
|
||||
*/
|
||||
|
||||
/*
|
||||
* find_process_by_pid - find a process with a matching PID value.
|
||||
* used in sys_sched_set/getaffinity() in kernel/sched.c, so
|
||||
* cloned here.
|
||||
*/
|
||||
static inline task_t *find_process_by_pid(pid_t pid)
|
||||
{
|
||||
return pid ? find_task_by_pid(pid) : current;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
|
||||
*/
|
||||
asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
|
||||
unsigned long __user *user_mask_ptr)
|
||||
{
|
||||
cpumask_t new_mask;
|
||||
cpumask_t effective_mask;
|
||||
int retval;
|
||||
task_t *p;
|
||||
|
||||
if (len < sizeof(new_mask))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
|
||||
return -EFAULT;
|
||||
|
||||
lock_cpu_hotplug();
|
||||
read_lock(&tasklist_lock);
|
||||
|
||||
p = find_process_by_pid(pid);
|
||||
if (!p) {
|
||||
read_unlock(&tasklist_lock);
|
||||
unlock_cpu_hotplug();
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
/*
|
||||
* It is not safe to call set_cpus_allowed with the
|
||||
* tasklist_lock held. We will bump the task_struct's
|
||||
* usage count and drop tasklist_lock before invoking
|
||||
* set_cpus_allowed.
|
||||
*/
|
||||
get_task_struct(p);
|
||||
|
||||
retval = -EPERM;
|
||||
if ((current->euid != p->euid) && (current->euid != p->uid) &&
|
||||
!capable(CAP_SYS_NICE)) {
|
||||
read_unlock(&tasklist_lock);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Record new user-specified CPU set for future reference */
|
||||
p->thread.user_cpus_allowed = new_mask;
|
||||
|
||||
/* Unlock the task list */
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
/* Compute new global allowed CPU set if necessary */
|
||||
if( (p->thread.mflags & MF_FPUBOUND)
|
||||
&& cpus_intersects(new_mask, mt_fpu_cpumask)) {
|
||||
cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
|
||||
retval = set_cpus_allowed(p, effective_mask);
|
||||
} else {
|
||||
p->thread.mflags &= ~MF_FPUBOUND;
|
||||
retval = set_cpus_allowed(p, new_mask);
|
||||
}
|
||||
|
||||
|
||||
out_unlock:
|
||||
put_task_struct(p);
|
||||
unlock_cpu_hotplug();
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
|
||||
*/
|
||||
asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
||||
unsigned long __user *user_mask_ptr)
|
||||
{
|
||||
unsigned int real_len;
|
||||
cpumask_t mask;
|
||||
int retval;
|
||||
task_t *p;
|
||||
|
||||
real_len = sizeof(mask);
|
||||
if (len < real_len)
|
||||
return -EINVAL;
|
||||
|
||||
lock_cpu_hotplug();
|
||||
read_lock(&tasklist_lock);
|
||||
|
||||
retval = -ESRCH;
|
||||
p = find_process_by_pid(pid);
|
||||
if (!p)
|
||||
goto out_unlock;
|
||||
|
||||
retval = 0;
|
||||
|
||||
cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
|
||||
|
||||
out_unlock:
|
||||
read_unlock(&tasklist_lock);
|
||||
unlock_cpu_hotplug();
|
||||
if (retval)
|
||||
return retval;
|
||||
if (copy_to_user(user_mask_ptr, &mask, real_len))
|
||||
return -EFAULT;
|
||||
return real_len;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
/*
|
||||
* Dump new MIPS MT state for the core. Does not leave TCs halted.
|
||||
* Takes an argument which taken to be a pre-call MVPControl value.
|
||||
*/
|
||||
|
||||
void mips_mt_regdump(unsigned long mvpctl)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long vpflags;
|
||||
unsigned long mvpconf0;
|
||||
int nvpe;
|
||||
int ntc;
|
||||
int i;
|
||||
int tc;
|
||||
unsigned long haltval;
|
||||
unsigned long tcstatval;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
void smtc_soft_dump(void);
|
||||
#endif /* CONFIG_MIPT_MT_SMTC */
|
||||
|
||||
local_irq_save(flags);
|
||||
vpflags = dvpe();
|
||||
printk("=== MIPS MT State Dump ===\n");
|
||||
printk("-- Global State --\n");
|
||||
printk(" MVPControl Passed: %08lx\n", mvpctl);
|
||||
printk(" MVPControl Read: %08lx\n", vpflags);
|
||||
printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
|
||||
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
|
||||
ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
|
||||
printk("-- per-VPE State --\n");
|
||||
for(i = 0; i < nvpe; i++) {
|
||||
for(tc = 0; tc < ntc; tc++) {
|
||||
settc(tc);
|
||||
if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
|
||||
printk(" VPE %d\n", i);
|
||||
printk(" VPEControl : %08lx\n", read_vpe_c0_vpecontrol());
|
||||
printk(" VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0());
|
||||
printk(" VPE%d.Status : %08lx\n",
|
||||
i, read_vpe_c0_status());
|
||||
printk(" VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc());
|
||||
printk(" VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause());
|
||||
printk(" VPE%d.Config7 : %08lx\n",
|
||||
i, read_vpe_c0_config7());
|
||||
break; /* Next VPE */
|
||||
}
|
||||
}
|
||||
}
|
||||
printk("-- per-TC State --\n");
|
||||
for(tc = 0; tc < ntc; tc++) {
|
||||
settc(tc);
|
||||
if(read_tc_c0_tcbind() == read_c0_tcbind()) {
|
||||
/* Are we dumping ourself? */
|
||||
haltval = 0; /* Then we're not halted, and mustn't be */
|
||||
tcstatval = flags; /* And pre-dump TCStatus is flags */
|
||||
printk(" TC %d (current TC with VPE EPC above)\n", tc);
|
||||
} else {
|
||||
haltval = read_tc_c0_tchalt();
|
||||
write_tc_c0_tchalt(1);
|
||||
tcstatval = read_tc_c0_tcstatus();
|
||||
printk(" TC %d\n", tc);
|
||||
}
|
||||
printk(" TCStatus : %08lx\n", tcstatval);
|
||||
printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
|
||||
printk(" TCRestart : %08lx\n", read_tc_c0_tcrestart());
|
||||
printk(" TCHalt : %08lx\n", haltval);
|
||||
printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
|
||||
if (!haltval)
|
||||
write_tc_c0_tchalt(0);
|
||||
}
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
smtc_soft_dump();
|
||||
#endif /* CONFIG_MIPT_MT_SMTC */
|
||||
printk("===========================\n");
|
||||
evpe(vpflags);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static int mt_opt_norps = 0;
|
||||
static int mt_opt_rpsctl = -1;
|
||||
static int mt_opt_nblsu = -1;
|
||||
static int mt_opt_forceconfig7 = 0;
|
||||
static int mt_opt_config7 = -1;
|
||||
|
||||
static int __init rps_disable(char *s)
|
||||
{
|
||||
mt_opt_norps = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("norps", rps_disable);
|
||||
|
||||
static int __init rpsctl_set(char *str)
|
||||
{
|
||||
get_option(&str, &mt_opt_rpsctl);
|
||||
return 1;
|
||||
}
|
||||
__setup("rpsctl=", rpsctl_set);
|
||||
|
||||
static int __init nblsu_set(char *str)
|
||||
{
|
||||
get_option(&str, &mt_opt_nblsu);
|
||||
return 1;
|
||||
}
|
||||
__setup("nblsu=", nblsu_set);
|
||||
|
||||
static int __init config7_set(char *str)
|
||||
{
|
||||
get_option(&str, &mt_opt_config7);
|
||||
mt_opt_forceconfig7 = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("config7=", config7_set);
|
||||
|
||||
/* Experimental cache flush control parameters that should go away some day */
|
||||
int mt_protiflush = 0;
|
||||
int mt_protdflush = 0;
|
||||
int mt_n_iflushes = 1;
|
||||
int mt_n_dflushes = 1;
|
||||
|
||||
static int __init set_protiflush(char *s)
|
||||
{
|
||||
mt_protiflush = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("protiflush", set_protiflush);
|
||||
|
||||
static int __init set_protdflush(char *s)
|
||||
{
|
||||
mt_protdflush = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("protdflush", set_protdflush);
|
||||
|
||||
static int __init niflush(char *s)
|
||||
{
|
||||
get_option(&s, &mt_n_iflushes);
|
||||
return 1;
|
||||
}
|
||||
__setup("niflush=", niflush);
|
||||
|
||||
static int __init ndflush(char *s)
|
||||
{
|
||||
get_option(&s, &mt_n_dflushes);
|
||||
return 1;
|
||||
}
|
||||
__setup("ndflush=", ndflush);
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
static int fpaff_threshold = -1;
|
||||
|
||||
static int __init fpaff_thresh(char *str)
|
||||
{
|
||||
get_option(&str, &fpaff_threshold);
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("fpaff=", fpaff_thresh);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
static unsigned int itc_base = 0;
|
||||
|
||||
static int __init set_itc_base(char *str)
|
||||
{
|
||||
get_option(&str, &itc_base);
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("itcbase=", set_itc_base);
|
||||
|
||||
void mips_mt_set_cpuoptions(void)
|
||||
{
|
||||
unsigned int oconfig7 = read_c0_config7();
|
||||
unsigned int nconfig7 = oconfig7;
|
||||
|
||||
if (mt_opt_norps) {
|
||||
printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
|
||||
}
|
||||
if (mt_opt_rpsctl >= 0) {
|
||||
printk("34K return prediction stack override set to %d.\n",
|
||||
mt_opt_rpsctl);
|
||||
if (mt_opt_rpsctl)
|
||||
nconfig7 |= (1 << 2);
|
||||
else
|
||||
nconfig7 &= ~(1 << 2);
|
||||
}
|
||||
if (mt_opt_nblsu >= 0) {
|
||||
printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
|
||||
if (mt_opt_nblsu)
|
||||
nconfig7 |= (1 << 5);
|
||||
else
|
||||
nconfig7 &= ~(1 << 5);
|
||||
}
|
||||
if (mt_opt_forceconfig7) {
|
||||
printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
|
||||
nconfig7 = mt_opt_config7;
|
||||
}
|
||||
if (oconfig7 != nconfig7) {
|
||||
__asm__ __volatile("sync");
|
||||
write_c0_config7(nconfig7);
|
||||
ehb ();
|
||||
printk("Config7: 0x%08x\n", read_c0_config7());
|
||||
}
|
||||
|
||||
/* Report Cache management debug options */
|
||||
if (mt_protiflush)
|
||||
printk("I-cache flushes single-threaded\n");
|
||||
if (mt_protdflush)
|
||||
printk("D-cache flushes single-threaded\n");
|
||||
if (mt_n_iflushes != 1)
|
||||
printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
|
||||
if (mt_n_dflushes != 1)
|
||||
printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_FPAFF
|
||||
/* FPU Use Factor empirically derived from experiments on 34K */
|
||||
#define FPUSEFACTOR 333
|
||||
|
||||
if (fpaff_threshold >= 0) {
|
||||
mt_fpemul_threshold = fpaff_threshold;
|
||||
} else {
|
||||
mt_fpemul_threshold =
|
||||
(FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
|
||||
}
|
||||
printk("FPU Affinity set after %ld emulations\n",
|
||||
mt_fpemul_threshold);
|
||||
#endif /* CONFIG_MIPS_MT_FPAFF */
|
||||
|
||||
if (itc_base != 0) {
|
||||
/*
|
||||
* Configure ITC mapping. This code is very
|
||||
* specific to the 34K core family, which uses
|
||||
* a special mode bit ("ITC") in the ErrCtl
|
||||
* register to enable access to ITC control
|
||||
* registers via cache "tag" operations.
|
||||
*/
|
||||
unsigned long ectlval;
|
||||
unsigned long itcblkgrn;
|
||||
|
||||
/* ErrCtl register is known as "ecc" to Linux */
|
||||
ectlval = read_c0_ecc();
|
||||
write_c0_ecc(ectlval | (0x1 << 26));
|
||||
ehb();
|
||||
#define INDEX_0 (0x80000000)
|
||||
#define INDEX_8 (0x80000008)
|
||||
/* Read "cache tag" for Dcache pseudo-index 8 */
|
||||
cache_op(Index_Load_Tag_D, INDEX_8);
|
||||
ehb();
|
||||
itcblkgrn = read_c0_dtaglo();
|
||||
itcblkgrn &= 0xfffe0000;
|
||||
/* Set for 128 byte pitch of ITC cells */
|
||||
itcblkgrn |= 0x00000c00;
|
||||
/* Stage in Tag register */
|
||||
write_c0_dtaglo(itcblkgrn);
|
||||
ehb();
|
||||
/* Write out to ITU with CACHE op */
|
||||
cache_op(Index_Store_Tag_D, INDEX_8);
|
||||
/* Now set base address, and turn ITC on with 0x1 bit */
|
||||
write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
|
||||
ehb();
|
||||
/* Write out to ITU with CACHE op */
|
||||
cache_op(Index_Store_Tag_D, INDEX_0);
|
||||
write_c0_ecc(ectlval);
|
||||
ehb();
|
||||
printk("Mapped %ld ITC cells starting at 0x%08x\n",
|
||||
((itcblkgrn & 0x7fe00000) >> 20), itc_base);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Function to protect cache flushes from concurrent execution
|
||||
* depends on MP software model chosen.
|
||||
*/
|
||||
|
||||
void mt_cflush_lockdown(void)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
void smtc_cflush_lockdown(void);
|
||||
|
||||
smtc_cflush_lockdown();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
/* FILL IN VSMP and AP/SP VERSIONS HERE */
|
||||
}
|
||||
|
||||
void mt_cflush_release(void)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
void smtc_cflush_release(void);
|
||||
|
||||
smtc_cflush_release();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
/* FILL IN VSMP and AP/SP VERSIONS HERE */
|
||||
}
|
|
@ -41,6 +41,10 @@
|
|||
#include <asm/elf.h>
|
||||
#include <asm/isadep.h>
|
||||
#include <asm/inst.h>
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#include <asm/mipsmtregs.h>
|
||||
extern void smtc_idle_loop_hook(void);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
/*
|
||||
* The idle thread. There's no useful work to be done, so just try to conserve
|
||||
|
@ -51,9 +55,13 @@ ATTRIB_NORET void cpu_idle(void)
|
|||
{
|
||||
/* endless idle loop with no priority at all */
|
||||
while (1) {
|
||||
while (!need_resched())
|
||||
while (!need_resched()) {
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
smtc_idle_loop_hook();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
if (cpu_wait)
|
||||
(*cpu_wait)();
|
||||
}
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
|
|
|
@ -248,10 +248,20 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
|||
break;
|
||||
case FPC_EIR: { /* implementation / version register */
|
||||
unsigned int flags;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
unsigned int irqflags;
|
||||
unsigned int mtflags;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
if (!cpu_has_fpu)
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Read-modify-write of Status must be atomic */
|
||||
local_irq_save(irqflags);
|
||||
mtflags = dmt();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
preempt_disable();
|
||||
if (cpu_has_mipsmt) {
|
||||
unsigned int vpflags = dvpe();
|
||||
|
@ -266,6 +276,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
|||
__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
|
||||
write_c0_status(flags);
|
||||
}
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
emt(mtflags);
|
||||
local_irq_restore(irqflags);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
preempt_enable();
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -173,12 +173,22 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
|
|||
break;
|
||||
case FPC_EIR: { /* implementation / version register */
|
||||
unsigned int flags;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
unsigned int irqflags;
|
||||
unsigned int mtflags;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
if (!cpu_has_fpu) {
|
||||
tmp = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Read-modify-write of Status must be atomic */
|
||||
local_irq_save(irqflags);
|
||||
mtflags = dmt();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
preempt_disable();
|
||||
if (cpu_has_mipsmt) {
|
||||
unsigned int vpflags = dvpe();
|
||||
|
@ -193,6 +203,10 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
|
|||
__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
|
||||
write_c0_status(flags);
|
||||
}
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
emt(mtflags);
|
||||
local_irq_restore(irqflags);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
preempt_enable();
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -88,7 +88,18 @@
|
|||
|
||||
PTR_ADDIU t0, $28, _THREAD_SIZE - 32
|
||||
set_saved_sp t0, t1, t2
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Read-modify-writes of Status must be atomic on a VPE */
|
||||
mfc0 t2, CP0_TCSTATUS
|
||||
ori t1, t2, TCSTATUS_IXMT
|
||||
mtc0 t1, CP0_TCSTATUS
|
||||
andi t2, t2, TCSTATUS_IXMT
|
||||
ehb
|
||||
DMT 8 # dmt t0
|
||||
move t1,ra
|
||||
jal mips_ihb
|
||||
move ra,t1
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
mfc0 t1, CP0_STATUS /* Do we really need this? */
|
||||
li a3, 0xff01
|
||||
and t1, a3
|
||||
|
@ -97,6 +108,18 @@
|
|||
and a2, a3
|
||||
or a2, t1
|
||||
mtc0 a2, CP0_STATUS
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
ehb
|
||||
andi t0, t0, VPECONTROL_TE
|
||||
beqz t0, 1f
|
||||
emt
|
||||
1:
|
||||
mfc0 t1, CP0_TCSTATUS
|
||||
xori t1, t1, TCSTATUS_IXMT
|
||||
or t1, t1, t2
|
||||
mtc0 t1, CP0_TCSTATUS
|
||||
ehb
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
move v0, a0
|
||||
jr ra
|
||||
END(resume)
|
||||
|
@ -131,10 +154,19 @@ LEAF(_restore_fp)
|
|||
#define FPU_DEFAULT 0x00000000
|
||||
|
||||
LEAF(_init_fpu)
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
|
||||
mfc0 t0, CP0_TCSTATUS
|
||||
/* Bit position is the same for Status, TCStatus */
|
||||
li t1, ST0_CU1
|
||||
or t0, t1
|
||||
mtc0 t0, CP0_TCSTATUS
|
||||
#else /* Normal MIPS CU1 enable */
|
||||
mfc0 t0, CP0_STATUS
|
||||
li t1, ST0_CU1
|
||||
or t0, t1
|
||||
mtc0 t0, CP0_STATUS
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
fpu_enable_hazard
|
||||
|
||||
li t1, FPU_DEFAULT
|
||||
|
|
|
@ -1,8 +1,4 @@
|
|||
/*
|
||||
* Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
|
||||
*
|
||||
* Elizabeth Clarke (beth@mips.com)
|
||||
*
|
||||
* This program is free software; you can distribute it and/or modify it
|
||||
* under the terms of the GNU General Public License (Version 2) as
|
||||
* published by the Free Software Foundation.
|
||||
|
@ -16,6 +12,10 @@
|
|||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
|
||||
* Elizabeth Clarke (beth@mips.com)
|
||||
* Ralf Baechle (ralf@linux-mips.org)
|
||||
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
|
@ -24,6 +24,7 @@
|
|||
#include <linux/compiler.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h>
|
||||
|
@ -33,8 +34,8 @@
|
|||
#include <asm/time.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/mipsmtregs.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mips-boards/maltaint.h>
|
||||
#include <asm/mips_mt.h>
|
||||
#include <asm/mips-boards/maltaint.h> /* This is f*cking wrong */
|
||||
|
||||
#define MIPS_CPU_IPI_RESCHED_IRQ 0
|
||||
#define MIPS_CPU_IPI_CALL_IRQ 1
|
||||
|
@ -66,6 +67,7 @@ void __init sanitize_tlb_entries(void)
|
|||
if (!cpu_has_mipsmt)
|
||||
return;
|
||||
|
||||
/* Enable VPC */
|
||||
set_c0_mvpcontrol(MVPCONTROL_VPC);
|
||||
|
||||
back_to_back_c0_hazard();
|
||||
|
@ -106,12 +108,12 @@ void __init sanitize_tlb_entries(void)
|
|||
|
||||
static void ipi_resched_dispatch (struct pt_regs *regs)
|
||||
{
|
||||
do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs);
|
||||
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ, regs);
|
||||
}
|
||||
|
||||
static void ipi_call_dispatch (struct pt_regs *regs)
|
||||
{
|
||||
do_IRQ(MIPS_CPU_IPI_CALL_IRQ, regs);
|
||||
do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ, regs);
|
||||
}
|
||||
|
||||
irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
||||
|
@ -155,6 +157,8 @@ void plat_smp_setup(void)
|
|||
dvpe();
|
||||
dmt();
|
||||
|
||||
mips_mt_set_cpuoptions();
|
||||
|
||||
/* Put MVPE's into 'configuration state' */
|
||||
set_c0_mvpcontrol(MVPCONTROL_VPC);
|
||||
|
||||
|
@ -189,11 +193,13 @@ void plat_smp_setup(void)
|
|||
|
||||
if (i != 0) {
|
||||
write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
|
||||
write_vpe_c0_cause(read_vpe_c0_cause() & ~CAUSEF_IP);
|
||||
|
||||
/* set config to be the same as vpe0, particularly kseg0 coherency alg */
|
||||
write_vpe_c0_config( read_c0_config());
|
||||
|
||||
/* make sure there are no software interrupts pending */
|
||||
write_vpe_c0_cause(read_vpe_c0_cause() & ~(C_SW1|C_SW0));
|
||||
|
||||
/* Propagate Config7 */
|
||||
write_vpe_c0_config7(read_c0_config7());
|
||||
}
|
||||
|
@ -233,16 +239,16 @@ void plat_smp_setup(void)
|
|||
/* We'll wait until starting the secondaries before starting MVPE */
|
||||
|
||||
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
|
||||
}
|
||||
|
||||
void __init plat_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
/* set up ipi interrupts */
|
||||
if (cpu_has_vint) {
|
||||
set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
|
||||
set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
|
||||
}
|
||||
}
|
||||
|
||||
void __init plat_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
|
||||
cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
|
||||
|
||||
|
@ -287,7 +293,8 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
|
|||
/* global pointer */
|
||||
write_tc_gpr_gp((unsigned long)gp);
|
||||
|
||||
flush_icache_range((unsigned long)gp, (unsigned long)(gp + 1));
|
||||
flush_icache_range((unsigned long)gp,
|
||||
(unsigned long)(gp + sizeof(struct thread_info)));
|
||||
|
||||
/* finally out of configuration and into chaos */
|
||||
clear_c0_mvpcontrol(MVPCONTROL_VPC);
|
|
@ -38,6 +38,10 @@
|
|||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#include <asm/mipsmtregs.h>
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */
|
||||
volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
|
||||
cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */
|
||||
|
@ -85,6 +89,10 @@ asmlinkage void start_secondary(void)
|
|||
{
|
||||
unsigned int cpu;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* Only do cpu_probe for first TC of CPU */
|
||||
if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
cpu_probe();
|
||||
cpu_report();
|
||||
per_cpu_trap_init();
|
||||
|
@ -179,11 +187,13 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
|
|||
if (wait)
|
||||
while (atomic_read(&data.finished) != cpus)
|
||||
barrier();
|
||||
call_data = NULL;
|
||||
spin_unlock(&smp_call_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void smp_call_function_interrupt(void)
|
||||
{
|
||||
void (*func) (void *info) = call_data->func;
|
||||
|
|
130
arch/mips/kernel/smtc-asm.S
Normal file
130
arch/mips/kernel/smtc-asm.S
Normal file
|
@ -0,0 +1,130 @@
|
|||
/*
|
||||
* Assembly Language Functions for MIPS MT SMTC support
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */
|
||||
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/stackframe.h>
|
||||
#include <asm/stackframe.h>
|
||||
|
||||
/*
|
||||
* "Software Interrupt" linkage.
|
||||
*
|
||||
* This is invoked when an "Interrupt" is sent from one TC to another,
|
||||
* where the TC to be interrupted is halted, has it's Restart address
|
||||
* and Status values saved by the "remote control" thread, then modified
|
||||
* to cause execution to begin here, in kenel mode. This code then
|
||||
* disguises the TC state as that of an exception and transfers
|
||||
* control to the general exception or vectored interrupt handler.
|
||||
*/
|
||||
.set noreorder
|
||||
|
||||
/*
|
||||
The __smtc_ipi_vector would use k0 and k1 as temporaries and
|
||||
1) Set EXL (this is per-VPE, so this can't be done by proxy!)
|
||||
2) Restore the K/CU and IXMT bits to the pre "exception" state
|
||||
(EXL means no interrupts and access to the kernel map).
|
||||
3) Set EPC to be the saved value of TCRestart.
|
||||
4) Jump to the exception handler entry point passed by the sender.
|
||||
|
||||
CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
|
||||
*/
|
||||
|
||||
/*
|
||||
* Reviled and slandered vision: Set EXL and restore K/CU/IXMT
|
||||
* state of pre-halt thread, then save everything and call
|
||||
* thought some function pointer to imaginary_exception, which
|
||||
* will parse a register value or memory message queue to
|
||||
* deliver things like interprocessor interrupts. On return
|
||||
* from that function, jump to the global ret_from_irq code
|
||||
* to invoke the scheduler and return as appropriate.
|
||||
*/
|
||||
|
||||
#define PT_PADSLOT4 (PT_R0-8)
|
||||
#define PT_PADSLOT5 (PT_R0-4)
|
||||
|
||||
.text
|
||||
.align 5
|
||||
FEXPORT(__smtc_ipi_vector)
|
||||
.set noat
|
||||
/* Disable thread scheduling to make Status update atomic */
|
||||
DMT 27 # dmt k1
|
||||
ehb
|
||||
/* Set EXL */
|
||||
mfc0 k0,CP0_STATUS
|
||||
ori k0,k0,ST0_EXL
|
||||
mtc0 k0,CP0_STATUS
|
||||
ehb
|
||||
/* Thread scheduling now inhibited by EXL. Restore TE state. */
|
||||
andi k1,k1,VPECONTROL_TE
|
||||
beqz k1,1f
|
||||
emt
|
||||
1:
|
||||
/*
|
||||
* The IPI sender has put some information on the anticipated
|
||||
* kernel stack frame. If we were in user mode, this will be
|
||||
* built above the saved kernel SP. If we were already in the
|
||||
* kernel, it will be built above the current CPU SP.
|
||||
*
|
||||
* Were we in kernel mode, as indicated by CU0?
|
||||
*/
|
||||
sll k1,k0,3
|
||||
.set noreorder
|
||||
bltz k1,2f
|
||||
move k1,sp
|
||||
.set reorder
|
||||
/*
|
||||
* If previously in user mode, set CU0 and use kernel stack.
|
||||
*/
|
||||
li k1,ST0_CU0
|
||||
or k1,k1,k0
|
||||
mtc0 k1,CP0_STATUS
|
||||
ehb
|
||||
get_saved_sp
|
||||
/* Interrupting TC will have pre-set values in slots in the new frame */
|
||||
2: subu k1,k1,PT_SIZE
|
||||
/* Load TCStatus Value */
|
||||
lw k0,PT_TCSTATUS(k1)
|
||||
/* Write it to TCStatus to restore CU/KSU/IXMT state */
|
||||
mtc0 k0,$2,1
|
||||
ehb
|
||||
lw k0,PT_EPC(k1)
|
||||
mtc0 k0,CP0_EPC
|
||||
/* Save all will redundantly recompute the SP, but use it for now */
|
||||
SAVE_ALL
|
||||
CLI
|
||||
move a0,sp
|
||||
/* Function to be invoked passed stack pad slot 5 */
|
||||
lw t0,PT_PADSLOT5(sp)
|
||||
/* Argument from sender passed in stack pad slot 4 */
|
||||
lw a1,PT_PADSLOT4(sp)
|
||||
jalr t0
|
||||
nop
|
||||
j ret_from_irq
|
||||
nop
|
||||
|
||||
/*
|
||||
* Called from idle loop to provoke processing of queued IPIs
|
||||
* First IPI message in queue passed as argument.
|
||||
*/
|
||||
|
||||
LEAF(self_ipi)
|
||||
/* Before anything else, block interrupts */
|
||||
mfc0 t0,CP0_TCSTATUS
|
||||
ori t1,t0,TCSTATUS_IXMT
|
||||
mtc0 t1,CP0_TCSTATUS
|
||||
ehb
|
||||
/* We know we're in kernel mode, so prepare stack frame */
|
||||
subu t1,sp,PT_SIZE
|
||||
sw ra,PT_EPC(t1)
|
||||
sw a0,PT_PADSLOT4(t1)
|
||||
la t2,ipi_decode
|
||||
sw t2,PT_PADSLOT5(t1)
|
||||
/* Save pre-disable value of TCStatus */
|
||||
sw t0,PT_TCSTATUS(t1)
|
||||
j __smtc_ipi_vector
|
||||
nop
|
||||
END(self_ipi)
|
93
arch/mips/kernel/smtc-proc.c
Normal file
93
arch/mips/kernel/smtc-proc.c
Normal file
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* /proc hooks for SMTC kernel
|
||||
* Copyright (C) 2005 Mips Technologies, Inc
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <linux/proc_fs.h>
|
||||
|
||||
#include <asm/smtc_proc.h>
|
||||
|
||||
/*
|
||||
* /proc diagnostic and statistics hooks
|
||||
*/
|
||||
|
||||
/*
|
||||
* Statistics gathered
|
||||
*/
|
||||
unsigned long selfipis[NR_CPUS];
|
||||
|
||||
struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
|
||||
|
||||
static struct proc_dir_entry *smtc_stats;
|
||||
|
||||
atomic_t smtc_fpu_recoveries;
|
||||
|
||||
static int proc_read_smtc(char *page, char **start, off_t off,
|
||||
int count, int *eof, void *data)
|
||||
{
|
||||
int totalen = 0;
|
||||
int len;
|
||||
int i;
|
||||
extern unsigned long ebase;
|
||||
|
||||
len = sprintf(page, "SMTC Status Word: 0x%08x\n", smtc_status);
|
||||
totalen += len;
|
||||
page += len;
|
||||
len = sprintf(page, "Config7: 0x%08x\n", read_c0_config7());
|
||||
totalen += len;
|
||||
page += len;
|
||||
len = sprintf(page, "EBASE: 0x%08lx\n", ebase);
|
||||
totalen += len;
|
||||
page += len;
|
||||
len = sprintf(page, "Counter Interrupts taken per CPU (TC)\n");
|
||||
totalen += len;
|
||||
page += len;
|
||||
for (i=0; i < NR_CPUS; i++) {
|
||||
len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
|
||||
totalen += len;
|
||||
page += len;
|
||||
}
|
||||
len = sprintf(page, "Self-IPIs by CPU:\n");
|
||||
totalen += len;
|
||||
page += len;
|
||||
for(i = 0; i < NR_CPUS; i++) {
|
||||
len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
|
||||
totalen += len;
|
||||
page += len;
|
||||
}
|
||||
len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n",
|
||||
atomic_read(&smtc_fpu_recoveries));
|
||||
totalen += len;
|
||||
page += len;
|
||||
|
||||
return totalen;
|
||||
}
|
||||
|
||||
void init_smtc_stats(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i=0; i<NR_CPUS; i++) {
|
||||
smtc_cpu_stats[i].timerints = 0;
|
||||
smtc_cpu_stats[i].selfipis = 0;
|
||||
}
|
||||
|
||||
atomic_set(&smtc_fpu_recoveries, 0);
|
||||
|
||||
smtc_stats = create_proc_read_entry("smtc", 0444, NULL,
|
||||
proc_read_smtc, NULL);
|
||||
}
|
1322
arch/mips/kernel/smtc.c
Normal file
1322
arch/mips/kernel/smtc.c
Normal file
File diff suppressed because it is too large
Load diff
|
@ -116,8 +116,7 @@ static void c0_timer_ack(void)
|
|||
write_c0_compare(expirelo);
|
||||
|
||||
/* Check to see if we have missed any timer interrupts. */
|
||||
count = read_c0_count();
|
||||
if ((count - expirelo) < 0x7fffffff) {
|
||||
while (((count = read_c0_count()) - expirelo) < 0x7fffffff) {
|
||||
/* missed_timer_count++; */
|
||||
expirelo = count + cycles_per_jiffy;
|
||||
write_c0_compare(expirelo);
|
||||
|
|
|
@ -280,9 +280,16 @@ static DEFINE_SPINLOCK(die_lock);
|
|||
NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
|
||||
{
|
||||
static int die_counter;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
unsigned long dvpret = dvpe();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
console_verbose();
|
||||
spin_lock_irq(&die_lock);
|
||||
bust_spinlocks(1);
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
mips_mt_regdump(dvpret);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
printk("%s[#%d]:\n", str, ++die_counter);
|
||||
show_registers(regs);
|
||||
spin_unlock_irq(&die_lock);
|
||||
|
@ -757,6 +764,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
|
|||
|
||||
case 2:
|
||||
case 3:
|
||||
die_if_kernel("do_cpu invoked from kernel context!", regs);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -794,6 +802,36 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
|
|||
|
||||
asmlinkage void do_mt(struct pt_regs *regs)
|
||||
{
|
||||
int subcode;
|
||||
|
||||
die_if_kernel("MIPS MT Thread exception in kernel", regs);
|
||||
|
||||
subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
|
||||
>> VPECONTROL_EXCPT_SHIFT;
|
||||
switch (subcode) {
|
||||
case 0:
|
||||
printk(KERN_ERR "Thread Underflow\n");
|
||||
break;
|
||||
case 1:
|
||||
printk(KERN_ERR "Thread Overflow\n");
|
||||
break;
|
||||
case 2:
|
||||
printk(KERN_ERR "Invalid YIELD Qualifier\n");
|
||||
break;
|
||||
case 3:
|
||||
printk(KERN_ERR "Gating Storage Exception\n");
|
||||
break;
|
||||
case 4:
|
||||
printk(KERN_ERR "YIELD Scheduler Exception\n");
|
||||
break;
|
||||
case 5:
|
||||
printk(KERN_ERR "Gating Storage Schedulier Exception\n");
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "*** UNKNOWN THREAD EXCEPTION %d ***\n",
|
||||
subcode);
|
||||
break;
|
||||
}
|
||||
die_if_kernel("MIPS MT Thread exception in kernel", regs);
|
||||
|
||||
force_sig(SIGILL, current);
|
||||
|
@ -929,7 +967,15 @@ void ejtag_exception_handler(struct pt_regs *regs)
|
|||
*/
|
||||
void nmi_exception_handler(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
unsigned long dvpret = dvpe();
|
||||
bust_spinlocks(1);
|
||||
printk("NMI taken!!!!\n");
|
||||
mips_mt_regdump(dvpret);
|
||||
#else
|
||||
bust_spinlocks(1);
|
||||
printk("NMI taken!!!!\n");
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
die("NMI", regs);
|
||||
while(1) ;
|
||||
}
|
||||
|
@ -1007,7 +1053,7 @@ int mips_srs_alloc(void)
|
|||
return set;
|
||||
}
|
||||
|
||||
void mips_srs_free (int set)
|
||||
void mips_srs_free(int set)
|
||||
{
|
||||
struct shadow_registers *sr = &shadow_registers;
|
||||
|
||||
|
@ -1027,8 +1073,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
|
|||
if (addr == NULL) {
|
||||
handler = (unsigned long) do_default_vi;
|
||||
srs = 0;
|
||||
}
|
||||
else
|
||||
} else
|
||||
handler = (unsigned long) addr;
|
||||
vi_handlers[n] = (unsigned long) addr;
|
||||
|
||||
|
@ -1040,8 +1085,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
|
|||
if (cpu_has_veic) {
|
||||
if (board_bind_eic_interrupt)
|
||||
board_bind_eic_interrupt (n, srs);
|
||||
}
|
||||
else if (cpu_has_vint) {
|
||||
} else if (cpu_has_vint) {
|
||||
/* SRSMap is only defined if shadow sets are implemented */
|
||||
if (mips_srs_max() > 1)
|
||||
change_c0_srsmap (0xf << n*4, srs << n*4);
|
||||
|
@ -1055,6 +1099,15 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
|
|||
|
||||
extern char except_vec_vi, except_vec_vi_lui;
|
||||
extern char except_vec_vi_ori, except_vec_vi_end;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* We need to provide the SMTC vectored interrupt handler
|
||||
* not only with the address of the handler, but with the
|
||||
* Status.IM bit to be masked before going there.
|
||||
*/
|
||||
extern char except_vec_vi_mori;
|
||||
const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
const int handler_len = &except_vec_vi_end - &except_vec_vi;
|
||||
const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
|
||||
const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
|
||||
|
@ -1068,6 +1121,12 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
|
|||
}
|
||||
|
||||
memcpy (b, &except_vec_vi, handler_len);
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
if (n > 7)
|
||||
printk("Vector index %d exceeds SMTC maximum\n", n);
|
||||
w = (u32 *)(b + mori_offset);
|
||||
*w = (*w & 0xffff0000) | (0x100 << n);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
w = (u32 *)(b + lui_offset);
|
||||
*w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
|
||||
w = (u32 *)(b + ori_offset);
|
||||
|
@ -1090,7 +1149,7 @@ static void *set_vi_srs_handler(int n, void *addr, int srs)
|
|||
return (void *)old_handler;
|
||||
}
|
||||
|
||||
void *set_vi_handler (int n, void *addr)
|
||||
void *set_vi_handler(int n, void *addr)
|
||||
{
|
||||
return set_vi_srs_handler(n, addr, 0);
|
||||
}
|
||||
|
@ -1108,8 +1167,29 @@ extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
|
|||
extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
|
||||
extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int smp_save_fp_context(struct sigcontext *sc)
|
||||
{
|
||||
return cpu_has_fpu
|
||||
? _save_fp_context(sc)
|
||||
: fpu_emulator_save_context(sc);
|
||||
}
|
||||
|
||||
static int smp_restore_fp_context(struct sigcontext *sc)
|
||||
{
|
||||
return cpu_has_fpu
|
||||
? _restore_fp_context(sc)
|
||||
: fpu_emulator_restore_context(sc);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void signal_init(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
/* For now just do the cpu_has_fpu check when the functions are invoked */
|
||||
save_fp_context = smp_save_fp_context;
|
||||
restore_fp_context = smp_restore_fp_context;
|
||||
#else
|
||||
if (cpu_has_fpu) {
|
||||
save_fp_context = _save_fp_context;
|
||||
restore_fp_context = _restore_fp_context;
|
||||
|
@ -1117,6 +1197,7 @@ static inline void signal_init(void)
|
|||
save_fp_context = fpu_emulator_save_context;
|
||||
restore_fp_context = fpu_emulator_restore_context;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MIPS32_COMPAT
|
||||
|
@ -1153,6 +1234,20 @@ void __init per_cpu_trap_init(void)
|
|||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned int status_set = ST0_CU0;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
int secondaryTC = 0;
|
||||
int bootTC = (cpu == 0);
|
||||
|
||||
/*
|
||||
* Only do per_cpu_trap_init() for first TC of Each VPE.
|
||||
* Note that this hack assumes that the SMTC init code
|
||||
* assigns TCs consecutively and in ascending order.
|
||||
*/
|
||||
|
||||
if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
|
||||
((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
|
||||
secondaryTC = 1;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
/*
|
||||
* Disable coprocessors and select 32-bit or 64-bit addressing
|
||||
|
@ -1175,6 +1270,10 @@ void __init per_cpu_trap_init(void)
|
|||
write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
if (!secondaryTC) {
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
/*
|
||||
* Interrupt handling.
|
||||
*/
|
||||
|
@ -1191,6 +1290,9 @@ void __init per_cpu_trap_init(void)
|
|||
} else
|
||||
set_c0_cause(CAUSEF_IV);
|
||||
}
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
}
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
|
||||
TLBMISS_HANDLER_SETUP();
|
||||
|
@ -1200,8 +1302,14 @@ void __init per_cpu_trap_init(void)
|
|||
BUG_ON(current->mm);
|
||||
enter_lazy_tlb(&init_mm, current);
|
||||
|
||||
cpu_cache_init();
|
||||
tlb_init();
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
if (bootTC) {
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
cpu_cache_init();
|
||||
tlb_init();
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
}
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
|
||||
/* Install CPU exception handler */
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#include <asm/asm-offsets.h>
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
|
||||
#undef mips /* CPP really sucks for this job */
|
||||
#undef mips
|
||||
#define mips mips
|
||||
OUTPUT_ARCH(mips)
|
||||
ENTRY(kernel_entry)
|
||||
|
|
|
@ -220,7 +220,6 @@ void __init kgdb_config (void)
|
|||
generic_putDebugChar (*s++);
|
||||
}
|
||||
|
||||
kgdb_enabled = 1;
|
||||
/* Breakpoint is invoked after interrupts are initialised */
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/mc146818rtc.h>
|
||||
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/mipsmtregs.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/irq.h>
|
||||
|
@ -50,16 +51,23 @@ unsigned long cpu_khz;
|
|||
static char display_string[] = " LINUX ON ATLAS ";
|
||||
#endif
|
||||
#if defined(CONFIG_MIPS_MALTA)
|
||||
#if defined(CONFIG_MIPS_MT_SMTC)
|
||||
static char display_string[] = " SMTC LINUX ON MALTA ";
|
||||
#else
|
||||
static char display_string[] = " LINUX ON MALTA ";
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
#endif
|
||||
#if defined(CONFIG_MIPS_SEAD)
|
||||
static char display_string[] = " LINUX ON SEAD ";
|
||||
#endif
|
||||
static unsigned int display_count = 0;
|
||||
static unsigned int display_count;
|
||||
#define MAX_DISPLAY_COUNT (sizeof(display_string) - 8)
|
||||
|
||||
static unsigned int timer_tick_count=0;
|
||||
#define CPUCTR_IMASKBIT (0x100 << MIPSCPU_INT_CPUCTR)
|
||||
|
||||
static unsigned int timer_tick_count;
|
||||
static int mips_cpu_timer_irq;
|
||||
extern void smtc_timer_broadcast(int);
|
||||
|
||||
static inline void scroll_display_message(void)
|
||||
{
|
||||
|
@ -75,15 +83,55 @@ static void mips_timer_dispatch (struct pt_regs *regs)
|
|||
do_IRQ (mips_cpu_timer_irq, regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Redeclare until I get around mopping the timer code insanity on MIPS.
|
||||
*/
|
||||
extern int null_perf_irq(struct pt_regs *regs);
|
||||
|
||||
extern int (*perf_irq)(struct pt_regs *regs);
|
||||
|
||||
irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
||||
{
|
||||
int r2 = cpu_has_mips_r2;
|
||||
int cpu = smp_processor_id();
|
||||
int r2 = cpu_has_mips_r2;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* In an SMTC system, one Count/Compare set exists per VPE.
|
||||
* Which TC within a VPE gets the interrupt is essentially
|
||||
* random - we only know that it shouldn't be one with
|
||||
* IXMT set. Whichever TC gets the interrupt needs to
|
||||
* send special interprocessor interrupts to the other
|
||||
* TCs to make sure that they schedule, etc.
|
||||
*
|
||||
* That code is specific to the SMTC kernel, not to
|
||||
* the a particular platform, so it's invoked from
|
||||
* the general MIPS timer_interrupt routine.
|
||||
*/
|
||||
|
||||
/*
|
||||
* DVPE is necessary so long as cross-VPE interrupts
|
||||
* are done via read-modify-write of Cause register.
|
||||
*/
|
||||
int vpflags = dvpe();
|
||||
write_c0_compare (read_c0_count() - 1);
|
||||
clear_c0_cause(CPUCTR_IMASKBIT);
|
||||
evpe(vpflags);
|
||||
|
||||
if (cpu_data[cpu].vpe_id == 0) {
|
||||
timer_interrupt(irq, dev_id, regs);
|
||||
scroll_display_message();
|
||||
} else
|
||||
write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
|
||||
smtc_timer_broadcast(cpu_data[cpu].vpe_id);
|
||||
|
||||
if (cpu != 0)
|
||||
/*
|
||||
* Other CPUs should do profiling and process accounting
|
||||
*/
|
||||
local_timer_interrupt(irq, dev_id, regs);
|
||||
|
||||
#else /* CONFIG_MIPS_MT_SMTC */
|
||||
if (cpu == 0) {
|
||||
/*
|
||||
* CPU 0 handles the global timer interrupt job and process
|
||||
|
@ -107,12 +155,14 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
|||
* More support needs to be added to kernel/time for
|
||||
* counter/timer interrupts on multiple CPU's
|
||||
*/
|
||||
write_c0_compare (read_c0_count() + (mips_hpt_frequency/HZ));
|
||||
write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
|
||||
|
||||
/*
|
||||
* other CPUs should do profiling and process accounting
|
||||
* Other CPUs should do profiling and process accounting
|
||||
*/
|
||||
local_timer_interrupt (irq, dev_id, regs);
|
||||
local_timer_interrupt(irq, dev_id, regs);
|
||||
}
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
out:
|
||||
return IRQ_HANDLED;
|
||||
|
@ -126,7 +176,7 @@ static unsigned int __init estimate_cpu_frequency(void)
|
|||
unsigned int prid = read_c0_prid() & 0xffff00;
|
||||
unsigned int count;
|
||||
|
||||
#ifdef CONFIG_MIPS_SEAD
|
||||
#if defined(CONFIG_MIPS_SEAD) || defined(CONFIG_MIPS_SIM)
|
||||
/*
|
||||
* The SEAD board doesn't have a real time clock, so we can't
|
||||
* really calculate the timer frequency
|
||||
|
@ -211,7 +261,11 @@ void __init mips_timer_setup(struct irqaction *irq)
|
|||
|
||||
/* we are using the cpu counter for timer interrupts */
|
||||
irq->handler = mips_timer_interrupt; /* we use our own handler */
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
setup_irq_smtc(mips_cpu_timer_irq, irq, CPUCTR_IMASKBIT);
|
||||
#else
|
||||
setup_irq(mips_cpu_timer_irq, irq);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* irq_desc(riptor) is a global resource, when the interrupt overlaps
|
||||
|
|
|
@ -20,3 +20,4 @@
|
|||
#
|
||||
|
||||
obj-y := malta_int.o malta_setup.o
|
||||
obj-$(CONFIG_SMP) += malta_smp.o
|
||||
|
|
|
@ -118,8 +118,9 @@ static void malta_hw0_irqdispatch(struct pt_regs *regs)
|
|||
int irq;
|
||||
|
||||
irq = get_int();
|
||||
if (irq < 0)
|
||||
if (irq < 0) {
|
||||
return; /* interrupt has already been cleared */
|
||||
}
|
||||
|
||||
do_IRQ(MALTA_INT_BASE+irq, regs);
|
||||
}
|
||||
|
@ -324,9 +325,15 @@ void __init arch_init_irq(void)
|
|||
else if (cpu_has_vint) {
|
||||
set_vi_handler (MIPSCPU_INT_I8259A, malta_hw0_irqdispatch);
|
||||
set_vi_handler (MIPSCPU_INT_COREHI, corehi_irqdispatch);
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq,
|
||||
(0x100 << MIPSCPU_INT_I8259A));
|
||||
setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI,
|
||||
&corehi_irqaction, (0x100 << MIPSCPU_INT_COREHI));
|
||||
#else /* Not SMTC */
|
||||
setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq);
|
||||
setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
else {
|
||||
setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq);
|
||||
|
|
128
arch/mips/mips-boards/malta/malta_smp.c
Normal file
128
arch/mips/mips-boards/malta/malta_smp.c
Normal file
|
@ -0,0 +1,128 @@
|
|||
/*
|
||||
* Malta Platform-specific hooks for SMP operation
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/smp.h>
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#include <asm/smtc_ipi.h>
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
/* VPE/SMP Prototype implements platform interfaces directly */
|
||||
#if !defined(CONFIG_MIPS_MT_SMP)
|
||||
|
||||
/*
|
||||
* Cause the specified action to be performed on a targeted "CPU"
|
||||
*/
|
||||
|
||||
void core_send_ipi(int cpu, unsigned int action)
|
||||
{
|
||||
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
|
||||
/*
|
||||
* Detect available CPUs/VPEs/TCs and populate phys_cpu_present_map
|
||||
*/
|
||||
|
||||
void __init prom_build_cpu_map(void)
|
||||
{
|
||||
int nextslot;
|
||||
|
||||
/*
|
||||
* As of November, 2004, MIPSsim only simulates one core
|
||||
* at a time. However, that core may be a MIPS MT core
|
||||
* with multiple virtual processors and thread contexts.
|
||||
*/
|
||||
|
||||
if (read_c0_config3() & (1<<2)) {
|
||||
nextslot = mipsmt_build_cpu_map(1);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Platform "CPU" startup hook
|
||||
*/
|
||||
|
||||
void prom_boot_secondary(int cpu, struct task_struct *idle)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
smtc_boot_secondary(cpu, idle);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
|
||||
/*
|
||||
* Post-config but pre-boot cleanup entry point
|
||||
*/
|
||||
|
||||
void prom_init_secondary(void)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
void smtc_init_secondary(void);
|
||||
int myvpe;
|
||||
|
||||
/* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
|
||||
myvpe = read_c0_tcbind() & TCBIND_CURVPE;
|
||||
if (myvpe != 0) {
|
||||
/* Ideally, this should be done only once per VPE, but... */
|
||||
clear_c0_status(STATUSF_IP2);
|
||||
set_c0_status(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP3
|
||||
| STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6
|
||||
| STATUSF_IP7);
|
||||
}
|
||||
|
||||
smtc_init_secondary();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
|
||||
/*
|
||||
* Platform SMP pre-initialization
|
||||
*
|
||||
* As noted above, we can assume a single CPU for now
|
||||
* but it may be multithreaded.
|
||||
*/
|
||||
|
||||
void plat_smp_setup(void)
|
||||
{
|
||||
if (read_c0_config3() & (1<<2))
|
||||
mipsmt_build_cpu_map(0);
|
||||
}
|
||||
|
||||
void __init plat_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
if (read_c0_config3() & (1<<2))
|
||||
mipsmt_prepare_cpus();
|
||||
}
|
||||
|
||||
/*
|
||||
* SMP initialization finalization entry point
|
||||
*/
|
||||
|
||||
void prom_smp_finish(void)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
smtc_smp_finish();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
|
||||
/*
|
||||
* Hook for after all CPUs are online
|
||||
*/
|
||||
|
||||
void prom_cpus_done(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MIPS32R2_MT_SMP */
|
|
@ -1,59 +0,0 @@
|
|||
/*
|
||||
* Carsten Langgaard, carstenl@mips.com
|
||||
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can distribute it and/or modify it
|
||||
* under the terms of the GNU General Public License (Version 2) as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
|
||||
*
|
||||
* Kernel command line creation using the prom monitor (YAMON) argc/argv.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <asm/bootinfo.h>
|
||||
|
||||
extern int prom_argc;
|
||||
extern int *_prom_argv;
|
||||
|
||||
/*
|
||||
* YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
|
||||
* This macro take care of sign extension.
|
||||
*/
|
||||
#define prom_argv(index) ((char *)(((int *)(int)_prom_argv)[(index)]))
|
||||
|
||||
char arcs_cmdline[CL_SIZE];
|
||||
|
||||
char * __init prom_getcmdline(void)
|
||||
{
|
||||
return &(arcs_cmdline[0]);
|
||||
}
|
||||
|
||||
|
||||
void __init prom_init_cmdline(void)
|
||||
{
|
||||
char *cp;
|
||||
int actr;
|
||||
|
||||
actr = 1; /* Always ignore argv[0] */
|
||||
|
||||
cp = &(arcs_cmdline[0]);
|
||||
while(actr < prom_argc) {
|
||||
strcpy(cp, prom_argv(actr));
|
||||
cp += strlen(prom_argv(actr));
|
||||
*cp++ = ' ';
|
||||
actr++;
|
||||
}
|
||||
if (cp != &(arcs_cmdline[0])) /* get rid of trailing space */
|
||||
--cp;
|
||||
*cp = '\0';
|
||||
}
|
|
@ -26,8 +26,10 @@ char * __init prom_getcmdline(void)
|
|||
return arcs_cmdline;
|
||||
}
|
||||
|
||||
|
||||
void __init prom_init_cmdline(void)
|
||||
{
|
||||
/* nothing to do */
|
||||
char *cp;
|
||||
cp = arcs_cmdline;
|
||||
/* Get boot line from environment? */
|
||||
*cp = '\0';
|
||||
}
|
||||
|
|
|
@ -44,8 +44,6 @@
|
|||
void core_send_ipi(int cpu, unsigned int action)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
void smtc_send_ipi(int, int, unsigned int);
|
||||
|
||||
smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
|
||||
|
@ -59,15 +57,8 @@ void core_send_ipi(int cpu, unsigned int action)
|
|||
void __init prom_build_cpu_map(void)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
extern int mipsmt_build_cpu_map(int startslot);
|
||||
int nextslot;
|
||||
|
||||
cpus_clear(phys_cpu_present_map);
|
||||
|
||||
/* Register the boot CPU */
|
||||
|
||||
smp_prepare_boot_cpu();
|
||||
|
||||
/*
|
||||
* As of November, 2004, MIPSsim only simulates one core
|
||||
* at a time. However, that core may be a MIPS MT core
|
||||
|
@ -87,8 +78,6 @@ void __init prom_build_cpu_map(void)
|
|||
void prom_boot_secondary(int cpu, struct task_struct *idle)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
extern void smtc_boot_secondary(int cpu, struct task_struct *t);
|
||||
|
||||
smtc_boot_secondary(cpu, idle);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
|
@ -113,7 +102,6 @@ void prom_init_secondary(void)
|
|||
void prom_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
void mipsmt_prepare_cpus(int c);
|
||||
/*
|
||||
* As noted above, we can assume a single CPU for now
|
||||
* but it may be multithreaded.
|
||||
|
@ -132,8 +120,6 @@ void prom_prepare_cpus(unsigned int max_cpus)
|
|||
void prom_smp_finish(void)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
void smtc_smp_finish(void);
|
||||
|
||||
smtc_smp_finish();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
|
|
|
@ -157,7 +157,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
|
|||
* Oops. The kernel tried to access some bad page. We'll have to
|
||||
* terminate things with extreme prejudice.
|
||||
*/
|
||||
|
||||
bust_spinlocks(1);
|
||||
|
||||
printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
|
||||
|
@ -188,11 +187,20 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
|
|||
/* Kernel mode? Handle exceptions or die */
|
||||
if (!user_mode(regs))
|
||||
goto no_context;
|
||||
|
||||
else
|
||||
/*
|
||||
* Send a sigbus, regardless of whether we were in kernel
|
||||
* or user mode.
|
||||
*/
|
||||
#if 0
|
||||
printk("do_page_fault() #3: sending SIGBUS to %s for "
|
||||
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
|
||||
tsk->comm,
|
||||
write ? "write access to" : "read access from",
|
||||
field, address,
|
||||
field, (unsigned long) regs->cp0_epc,
|
||||
field, (unsigned long) regs->regs[31]);
|
||||
#endif
|
||||
tsk->thread.cp0_badvaddr = address;
|
||||
info.si_signo = SIGBUS;
|
||||
info.si_errno = 0;
|
||||
|
@ -201,7 +209,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
|
|||
force_sig_info(SIGBUS, &info, tsk);
|
||||
|
||||
return;
|
||||
|
||||
vmalloc_fault:
|
||||
{
|
||||
/*
|
||||
|
|
|
@ -32,13 +32,35 @@ extern void build_tlb_refill_handler(void);
|
|||
"nop; nop; nop; nop; nop; nop;\n\t" \
|
||||
".set reorder\n\t")
|
||||
|
||||
/* Atomicity and interruptability */
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
|
||||
#include <asm/smtc.h>
|
||||
#include <asm/mipsmtregs.h>
|
||||
|
||||
#define ENTER_CRITICAL(flags) \
|
||||
{ \
|
||||
unsigned int mvpflags; \
|
||||
local_irq_save(flags);\
|
||||
mvpflags = dvpe()
|
||||
#define EXIT_CRITICAL(flags) \
|
||||
evpe(mvpflags); \
|
||||
local_irq_restore(flags); \
|
||||
}
|
||||
#else
|
||||
|
||||
#define ENTER_CRITICAL(flags) local_irq_save(flags)
|
||||
#define EXIT_CRITICAL(flags) local_irq_restore(flags)
|
||||
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
void local_flush_tlb_all(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long old_ctx;
|
||||
int entry;
|
||||
|
||||
local_irq_save(flags);
|
||||
ENTER_CRITICAL(flags);
|
||||
/* Save old context and create impossible VPN2 value */
|
||||
old_ctx = read_c0_entryhi();
|
||||
write_c0_entrylo0(0);
|
||||
|
@ -57,7 +79,7 @@ void local_flush_tlb_all(void)
|
|||
}
|
||||
tlbw_use_hazard();
|
||||
write_c0_entryhi(old_ctx);
|
||||
local_irq_restore(flags);
|
||||
EXIT_CRITICAL(flags);
|
||||
}
|
||||
|
||||
/* All entries common to a mm share an asid. To effectively flush
|
||||
|
@ -87,6 +109,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|||
unsigned long flags;
|
||||
int size;
|
||||
|
||||
ENTER_CRITICAL(flags);
|
||||
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
size = (size + 1) >> 1;
|
||||
local_irq_save(flags);
|
||||
|
@ -120,7 +143,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|||
} else {
|
||||
drop_mmu_context(mm, cpu);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
EXIT_CRITICAL(flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -129,9 +152,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|||
unsigned long flags;
|
||||
int size;
|
||||
|
||||
ENTER_CRITICAL(flags);
|
||||
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
|
||||
size = (size + 1) >> 1;
|
||||
local_irq_save(flags);
|
||||
if (size <= current_cpu_data.tlbsize / 2) {
|
||||
int pid = read_c0_entryhi();
|
||||
|
||||
|
@ -162,7 +185,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|||
} else {
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
EXIT_CRITICAL(flags);
|
||||
}
|
||||
|
||||
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
||||
|
@ -175,7 +198,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
|||
|
||||
newpid = cpu_asid(cpu, vma->vm_mm);
|
||||
page &= (PAGE_MASK << 1);
|
||||
local_irq_save(flags);
|
||||
ENTER_CRITICAL(flags);
|
||||
oldpid = read_c0_entryhi();
|
||||
write_c0_entryhi(page | newpid);
|
||||
mtc0_tlbw_hazard();
|
||||
|
@ -194,7 +217,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
|||
|
||||
finish:
|
||||
write_c0_entryhi(oldpid);
|
||||
local_irq_restore(flags);
|
||||
EXIT_CRITICAL(flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -207,7 +230,7 @@ void local_flush_tlb_one(unsigned long page)
|
|||
unsigned long flags;
|
||||
int oldpid, idx;
|
||||
|
||||
local_irq_save(flags);
|
||||
ENTER_CRITICAL(flags);
|
||||
oldpid = read_c0_entryhi();
|
||||
page &= (PAGE_MASK << 1);
|
||||
write_c0_entryhi(page);
|
||||
|
@ -226,7 +249,7 @@ void local_flush_tlb_one(unsigned long page)
|
|||
}
|
||||
write_c0_entryhi(oldpid);
|
||||
|
||||
local_irq_restore(flags);
|
||||
EXIT_CRITICAL(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -249,7 +272,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
|
|||
if (current->active_mm != vma->vm_mm)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
ENTER_CRITICAL(flags);
|
||||
|
||||
pid = read_c0_entryhi() & ASID_MASK;
|
||||
address &= (PAGE_MASK << 1);
|
||||
|
@ -277,7 +300,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
|
|||
else
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
local_irq_restore(flags);
|
||||
EXIT_CRITICAL(flags);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
@ -291,7 +314,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
|
|||
pte_t *ptep;
|
||||
int idx;
|
||||
|
||||
local_irq_save(flags);
|
||||
ENTER_CRITICAL(flags);
|
||||
address &= (PAGE_MASK << 1);
|
||||
asid = read_c0_entryhi() & ASID_MASK;
|
||||
write_c0_entryhi(address | asid);
|
||||
|
@ -310,7 +333,7 @@ static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
|
|||
else
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
local_irq_restore(flags);
|
||||
EXIT_CRITICAL(flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -322,7 +345,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
|
|||
unsigned long old_pagemask;
|
||||
unsigned long old_ctx;
|
||||
|
||||
local_irq_save(flags);
|
||||
ENTER_CRITICAL(flags);
|
||||
/* Save old context and create impossible VPN2 value */
|
||||
old_ctx = read_c0_entryhi();
|
||||
old_pagemask = read_c0_pagemask();
|
||||
|
@ -342,7 +365,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
|
|||
BARRIER;
|
||||
write_c0_pagemask(old_pagemask);
|
||||
local_flush_tlb_all();
|
||||
local_irq_restore(flags);
|
||||
EXIT_CRITICAL(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -362,7 +385,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
|
|||
unsigned long old_pagemask;
|
||||
unsigned long old_ctx;
|
||||
|
||||
local_irq_save(flags);
|
||||
ENTER_CRITICAL(flags);
|
||||
/* Save old context and create impossible VPN2 value */
|
||||
old_ctx = read_c0_entryhi();
|
||||
old_pagemask = read_c0_pagemask();
|
||||
|
@ -386,10 +409,11 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
|
|||
write_c0_entryhi(old_ctx);
|
||||
write_c0_pagemask(old_pagemask);
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
EXIT_CRITICAL(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
extern void __init sanitize_tlb_entries(void);
|
||||
static void __init probe_tlb(unsigned long config)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
|
@ -402,6 +426,14 @@ static void __init probe_tlb(unsigned long config)
|
|||
*/
|
||||
if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
|
||||
return;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* If TLB is shared in SMTC system, total size already
|
||||
* has been calculated and written into cpu_data tlbsize
|
||||
*/
|
||||
if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
|
||||
return;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
reg = read_c0_config1();
|
||||
if (!((config >> 7) & 3))
|
||||
|
@ -410,6 +442,15 @@ static void __init probe_tlb(unsigned long config)
|
|||
c->tlbsize = ((reg >> 25) & 0x3f) + 1;
|
||||
}
|
||||
|
||||
static int __initdata ntlb = 0;
|
||||
static int __init set_ntlb(char *str)
|
||||
{
|
||||
get_option(&str, &ntlb);
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("ntlb=", set_ntlb);
|
||||
|
||||
void __init tlb_init(void)
|
||||
{
|
||||
unsigned int config = read_c0_config();
|
||||
|
@ -432,5 +473,15 @@ void __init tlb_init(void)
|
|||
|
||||
/* Did I tell you that ARC SUCKS? */
|
||||
|
||||
if (ntlb) {
|
||||
if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
|
||||
int wired = current_cpu_data.tlbsize - ntlb;
|
||||
write_c0_wired(wired);
|
||||
write_c0_index(wired-1);
|
||||
printk ("Restricting TLB to %d entries\n", ntlb);
|
||||
} else
|
||||
printk("Ignoring invalid argument ntlb=%d\n", ntlb);
|
||||
}
|
||||
|
||||
build_tlb_refill_handler();
|
||||
}
|
||||
|
|
|
@ -7,6 +7,16 @@
|
|||
*
|
||||
* Copyright (C) 2004,2005 by Thiemo Seufer
|
||||
* Copyright (C) 2005 Maciej W. Rozycki
|
||||
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
|
||||
*
|
||||
* ... and the days got worse and worse and now you see
|
||||
* I've gone completly out of my mind.
|
||||
*
|
||||
* They're coming to take me a away haha
|
||||
* they're coming to take me a away hoho hihi haha
|
||||
* to the funny farm where code is beautiful all the time ...
|
||||
*
|
||||
* (Condolences to Napoleon XIV)
|
||||
*/
|
||||
|
||||
#include <stdarg.h>
|
||||
|
@ -68,6 +78,7 @@ enum fields
|
|||
BIMM = 0x040,
|
||||
JIMM = 0x080,
|
||||
FUNC = 0x100,
|
||||
SET = 0x200
|
||||
};
|
||||
|
||||
#define OP_MASK 0x2f
|
||||
|
@ -86,6 +97,8 @@ enum fields
|
|||
#define JIMM_SH 0
|
||||
#define FUNC_MASK 0x2f
|
||||
#define FUNC_SH 0
|
||||
#define SET_MASK 0x7
|
||||
#define SET_SH 0
|
||||
|
||||
enum opcode {
|
||||
insn_invalid,
|
||||
|
@ -129,8 +142,8 @@ static __initdata struct insn insn_table[] = {
|
|||
{ insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM },
|
||||
{ insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM },
|
||||
{ insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD },
|
||||
{ insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD },
|
||||
{ insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD },
|
||||
{ insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD | SET},
|
||||
{ insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD | SET},
|
||||
{ insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE },
|
||||
{ insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE },
|
||||
{ insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE },
|
||||
|
@ -145,8 +158,8 @@ static __initdata struct insn insn_table[] = {
|
|||
{ insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM },
|
||||
{ insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM },
|
||||
{ insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM },
|
||||
{ insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD },
|
||||
{ insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD },
|
||||
{ insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD | SET},
|
||||
{ insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD | SET},
|
||||
{ insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM },
|
||||
{ insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 },
|
||||
{ insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM },
|
||||
|
@ -242,6 +255,14 @@ static __init u32 build_func(u32 arg)
|
|||
return arg & FUNC_MASK;
|
||||
}
|
||||
|
||||
static __init u32 build_set(u32 arg)
|
||||
{
|
||||
if (arg & ~SET_MASK)
|
||||
printk(KERN_WARNING "TLB synthesizer field overflow\n");
|
||||
|
||||
return arg & SET_MASK;
|
||||
}
|
||||
|
||||
/*
|
||||
* The order of opcode arguments is implicitly left to right,
|
||||
* starting with RS and ending with FUNC or IMM.
|
||||
|
@ -273,6 +294,7 @@ static void __init build_insn(u32 **buf, enum opcode opc, ...)
|
|||
if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32));
|
||||
if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32));
|
||||
if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32));
|
||||
if (ip->fields & SET) op |= build_set(va_arg(ap, u32));
|
||||
va_end(ap);
|
||||
|
||||
**buf = op;
|
||||
|
@ -358,8 +380,8 @@ I_u1s2(_bgezl);
|
|||
I_u1s2(_bltz);
|
||||
I_u1s2(_bltzl);
|
||||
I_u1u2s3(_bne);
|
||||
I_u1u2(_dmfc0);
|
||||
I_u1u2(_dmtc0);
|
||||
I_u1u2u3(_dmfc0);
|
||||
I_u1u2u3(_dmtc0);
|
||||
I_u2u1s3(_daddiu);
|
||||
I_u3u1u2(_daddu);
|
||||
I_u2u1u3(_dsll);
|
||||
|
@ -376,8 +398,8 @@ I_u2s3u1(_ll);
|
|||
I_u2s3u1(_lld);
|
||||
I_u1s2(_lui);
|
||||
I_u2s3u1(_lw);
|
||||
I_u1u2(_mfc0);
|
||||
I_u1u2(_mtc0);
|
||||
I_u1u2u3(_mfc0);
|
||||
I_u1u2u3(_mtc0);
|
||||
I_u2u1u3(_ori);
|
||||
I_0(_rfe);
|
||||
I_u2s3u1(_sc);
|
||||
|
@ -451,8 +473,8 @@ L_LA(_r3000_write_probe_fail)
|
|||
# define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh)
|
||||
# define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh)
|
||||
# define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh)
|
||||
# define i_MFC0(buf, rt, rd) i_dmfc0(buf, rt, rd)
|
||||
# define i_MTC0(buf, rt, rd) i_dmtc0(buf, rt, rd)
|
||||
# define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd)
|
||||
# define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd)
|
||||
# define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val)
|
||||
# define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd)
|
||||
# define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd)
|
||||
|
@ -464,8 +486,8 @@ L_LA(_r3000_write_probe_fail)
|
|||
# define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh)
|
||||
# define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh)
|
||||
# define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh)
|
||||
# define i_MFC0(buf, rt, rd) i_mfc0(buf, rt, rd)
|
||||
# define i_MTC0(buf, rt, rd) i_mtc0(buf, rt, rd)
|
||||
# define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd)
|
||||
# define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd)
|
||||
# define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val)
|
||||
# define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd)
|
||||
# define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd)
|
||||
|
@ -670,14 +692,15 @@ static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
|
|||
#define K1 27
|
||||
|
||||
/* Some CP0 registers */
|
||||
#define C0_INDEX 0
|
||||
#define C0_ENTRYLO0 2
|
||||
#define C0_ENTRYLO1 3
|
||||
#define C0_CONTEXT 4
|
||||
#define C0_BADVADDR 8
|
||||
#define C0_ENTRYHI 10
|
||||
#define C0_EPC 14
|
||||
#define C0_XCONTEXT 20
|
||||
#define C0_INDEX 0, 0
|
||||
#define C0_ENTRYLO0 2, 0
|
||||
#define C0_TCBIND 2, 2
|
||||
#define C0_ENTRYLO1 3, 0
|
||||
#define C0_CONTEXT 4, 0
|
||||
#define C0_BADVADDR 8, 0
|
||||
#define C0_ENTRYHI 10, 0
|
||||
#define C0_EPC 14, 0
|
||||
#define C0_XCONTEXT 20, 0
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
# define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT)
|
||||
|
@ -951,12 +974,20 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
|
|||
/* No i_nop needed here, since the next insn doesn't touch TMP. */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* SMTC uses TCBind value as "CPU" index
|
||||
*/
|
||||
i_mfc0(p, ptr, C0_TCBIND);
|
||||
i_dsrl(p, ptr, ptr, 19);
|
||||
# else
|
||||
/*
|
||||
* 64 bit SMP running in XKPHYS has smp_processor_id() << 3
|
||||
* stored in CONTEXT.
|
||||
*/
|
||||
i_dmfc0(p, ptr, C0_CONTEXT);
|
||||
i_dsrl(p, ptr, ptr, 23);
|
||||
#endif
|
||||
i_LA_mostly(p, tmp, pgdc);
|
||||
i_daddu(p, ptr, ptr, tmp);
|
||||
i_dmfc0(p, tmp, C0_BADVADDR);
|
||||
|
@ -1014,9 +1045,21 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
|
|||
|
||||
/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
|
||||
#ifdef CONFIG_SMP
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* SMTC uses TCBind value as "CPU" index
|
||||
*/
|
||||
i_mfc0(p, ptr, C0_TCBIND);
|
||||
i_LA_mostly(p, tmp, pgdc);
|
||||
i_srl(p, ptr, ptr, 19);
|
||||
#else
|
||||
/*
|
||||
* smp_processor_id() << 3 is stored in CONTEXT.
|
||||
*/
|
||||
i_mfc0(p, ptr, C0_CONTEXT);
|
||||
i_LA_mostly(p, tmp, pgdc);
|
||||
i_srl(p, ptr, ptr, 23);
|
||||
#endif
|
||||
i_addu(p, ptr, tmp, ptr);
|
||||
#else
|
||||
i_LA_mostly(p, ptr, pgdc);
|
||||
|
|
|
@ -17,7 +17,26 @@
|
|||
#ifdef CONFIG_64BIT
|
||||
#include <asm/asmmacro-64.h>
|
||||
#endif
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#include <asm/mipsmtregs.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
.macro local_irq_enable reg=t0
|
||||
mfc0 \reg, CP0_TCSTATUS
|
||||
ori \reg, \reg, TCSTATUS_IXMT
|
||||
xori \reg, \reg, TCSTATUS_IXMT
|
||||
mtc0 \reg, CP0_TCSTATUS
|
||||
ehb
|
||||
.endm
|
||||
|
||||
.macro local_irq_disable reg=t0
|
||||
mfc0 \reg, CP0_TCSTATUS
|
||||
ori \reg, \reg, TCSTATUS_IXMT
|
||||
mtc0 \reg, CP0_TCSTATUS
|
||||
ehb
|
||||
.endm
|
||||
#else
|
||||
.macro local_irq_enable reg=t0
|
||||
mfc0 \reg, CP0_STATUS
|
||||
ori \reg, \reg, 1
|
||||
|
@ -32,6 +51,7 @@
|
|||
mtc0 \reg, CP0_STATUS
|
||||
irq_disable_hazard
|
||||
.endm
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
#ifdef CONFIG_CPU_SB1
|
||||
.macro fpu_enable_hazard
|
||||
|
@ -48,4 +68,31 @@
|
|||
.endm
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Temporary until all gas have MT ASE support
|
||||
*/
|
||||
.macro DMT reg=0
|
||||
.word (0x41600bc1 | (\reg << 16))
|
||||
.endm
|
||||
|
||||
.macro EMT reg=0
|
||||
.word (0x41600be1 | (\reg << 16))
|
||||
.endm
|
||||
|
||||
.macro DVPE reg=0
|
||||
.word (0x41600001 | (\reg << 16))
|
||||
.endm
|
||||
|
||||
.macro EVPE reg=0
|
||||
.word (0x41600021 | (\reg << 16))
|
||||
.endm
|
||||
|
||||
.macro MFTR rt=0, rd=0, u=0, sel=0
|
||||
.word (0x41000000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel))
|
||||
.endm
|
||||
|
||||
.macro MTTR rt=0, rd=0, u=0, sel=0
|
||||
.word (0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel))
|
||||
.endm
|
||||
|
||||
#endif /* _ASM_ASMMACRO_H */
|
||||
|
|
|
@ -73,6 +73,16 @@ struct cpuinfo_mips {
|
|||
struct cache_desc dcache; /* Primary D or combined I/D cache */
|
||||
struct cache_desc scache; /* Secondary cache */
|
||||
struct cache_desc tcache; /* Tertiary/split secondary cache */
|
||||
#if defined(CONFIG_MIPS_MT_SMTC)
|
||||
/*
|
||||
* In the MIPS MT "SMTC" model, each TC is considered
|
||||
* to be a "CPU" for the purposes of scheduling, but
|
||||
* exception resources, ASID spaces, etc, are common
|
||||
* to all TCs within the same VPE.
|
||||
*/
|
||||
int vpe_id; /* Virtual Processor number */
|
||||
int tc_id; /* Thread Context number */
|
||||
#endif /* CONFIG_MIPS_MT */
|
||||
void *data; /* Additional data */
|
||||
} __attribute__((aligned(SMP_CACHE_BYTES)));
|
||||
|
||||
|
|
|
@ -284,6 +284,8 @@ do { \
|
|||
#define instruction_hazard() do { } while (0)
|
||||
#endif
|
||||
|
||||
extern void mips_ihb(void);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_HAZARDS_H */
|
||||
|
|
|
@ -19,7 +19,12 @@ __asm__ (
|
|||
" .set push \n"
|
||||
" .set reorder \n"
|
||||
" .set noat \n"
|
||||
#ifdef CONFIG_CPU_MIPSR2
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
" mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
|
||||
" ori $1, 0x400 \n"
|
||||
" xori $1, 0x400 \n"
|
||||
" mtc0 $1, $2, 1 \n"
|
||||
#elif defined(CONFIG_CPU_MIPSR2)
|
||||
" ei \n"
|
||||
#else
|
||||
" mfc0 $1,$12 \n"
|
||||
|
@ -62,7 +67,12 @@ __asm__ (
|
|||
" .macro local_irq_disable\n"
|
||||
" .set push \n"
|
||||
" .set noat \n"
|
||||
#ifdef CONFIG_CPU_MIPSR2
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
" mfc0 $1, $2, 1 \n"
|
||||
" ori $1, 0x400 \n"
|
||||
" .set noreorder \n"
|
||||
" mtc0 $1, $2, 1 \n"
|
||||
#elif defined(CONFIG_CPU_MIPSR2)
|
||||
" di \n"
|
||||
#else
|
||||
" mfc0 $1,$12 \n"
|
||||
|
@ -88,7 +98,11 @@ __asm__ (
|
|||
" .macro local_save_flags flags \n"
|
||||
" .set push \n"
|
||||
" .set reorder \n"
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
" mfc0 \\flags, $2, 1 \n"
|
||||
#else
|
||||
" mfc0 \\flags, $12 \n"
|
||||
#endif
|
||||
" .set pop \n"
|
||||
" .endm \n");
|
||||
|
||||
|
@ -102,7 +116,13 @@ __asm__ (
|
|||
" .set push \n"
|
||||
" .set reorder \n"
|
||||
" .set noat \n"
|
||||
#ifdef CONFIG_CPU_MIPSR2
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
" mfc0 \\result, $2, 1 \n"
|
||||
" ori $1, \\result, 0x400 \n"
|
||||
" .set noreorder \n"
|
||||
" mtc0 $1, $2, 1 \n"
|
||||
" andi \\result, \\result, 0x400 \n"
|
||||
#elif defined(CONFIG_CPU_MIPSR2)
|
||||
" di \\result \n"
|
||||
" andi \\result, 1 \n"
|
||||
#else
|
||||
|
@ -128,7 +148,14 @@ __asm__ (
|
|||
" .set push \n"
|
||||
" .set noreorder \n"
|
||||
" .set noat \n"
|
||||
#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
"mfc0 $1, $2, 1 \n"
|
||||
"andi \\flags, 0x400 \n"
|
||||
"ori $1, 0x400 \n"
|
||||
"xori $1, 0x400 \n"
|
||||
"or \\flags, $1 \n"
|
||||
"mtc0 \\flags, $2, 1 \n"
|
||||
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
|
||||
/*
|
||||
* Slow, but doesn't suffer from a relativly unlikely race
|
||||
* condition we're having since days 1.
|
||||
|
@ -167,11 +194,29 @@ do { \
|
|||
: "memory"); \
|
||||
} while(0)
|
||||
|
||||
#define irqs_disabled() \
|
||||
({ \
|
||||
unsigned long flags; \
|
||||
local_save_flags(flags); \
|
||||
!(flags & 1); \
|
||||
})
|
||||
static inline int irqs_disabled(void)
|
||||
{
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
|
||||
*/
|
||||
unsigned long __result;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" .set noreorder \n"
|
||||
" mfc0 %0, $2, 1 \n"
|
||||
" andi %0, 0x400 \n"
|
||||
" slt %0, $0, %0 \n"
|
||||
" .set reorder \n"
|
||||
: "=r" (__result));
|
||||
|
||||
return __result;
|
||||
#else
|
||||
unsigned long flags;
|
||||
local_save_flags(flags);
|
||||
|
||||
return !(flags & 1);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* _ASM_INTERRUPT_H */
|
||||
|
|
|
@ -11,6 +11,9 @@
|
|||
|
||||
#include <linux/config.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/mipsmtregs.h>
|
||||
|
||||
#include <irq.h>
|
||||
|
||||
#ifdef CONFIG_I8259
|
||||
|
@ -26,6 +29,23 @@ struct pt_regs;
|
|||
|
||||
extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs);
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* Clear interrupt mask handling "backstop" if irq_hwmask
|
||||
* entry so indicates. This implies that the ack() or end()
|
||||
* functions will take over re-enabling the low-level mask.
|
||||
* Otherwise it will be done on return from exception.
|
||||
*/
|
||||
#define __DO_IRQ_SMTC_HOOK() \
|
||||
do { \
|
||||
if (irq_hwmask[irq] & 0x0000ff00) \
|
||||
write_c0_tccontext(read_c0_tccontext() & \
|
||||
~(irq_hwmask[irq] & 0x0000ff00)); \
|
||||
} while (0)
|
||||
#else
|
||||
#define __DO_IRQ_SMTC_HOOK() do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
|
||||
/*
|
||||
|
@ -39,6 +59,7 @@ extern asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs);
|
|||
#define do_IRQ(irq, regs) \
|
||||
do { \
|
||||
irq_enter(); \
|
||||
__DO_IRQ_SMTC_HOOK(); \
|
||||
__do_IRQ((irq), (regs)); \
|
||||
irq_exit(); \
|
||||
} while (0)
|
||||
|
@ -48,4 +69,12 @@ do { \
|
|||
extern void arch_init_irq(void);
|
||||
extern void spurious_interrupt(struct pt_regs *regs);
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
struct irqaction;
|
||||
|
||||
extern unsigned long irq_hwmask[];
|
||||
extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
|
||||
unsigned long hwmask);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
#endif /* _ASM_IRQ_H */
|
||||
|
|
15
include/asm-mips/mips_mt.h
Normal file
15
include/asm-mips/mips_mt.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* Definitions and decalrations for MIPS MT support
|
||||
* that are common between SMTC, VSMP, and/or AP/SP
|
||||
* kernel models.
|
||||
*/
|
||||
#ifndef __ASM_MIPS_MT_H
|
||||
#define __ASM_MIPS_MT_H
|
||||
|
||||
extern cpumask_t mt_fpu_cpumask;
|
||||
extern unsigned long mt_fpemul_threshold;
|
||||
|
||||
extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
|
||||
extern void mips_mt_set_cpuoptions(void);
|
||||
|
||||
#endif /* __ASM_MIPS_MT_H */
|
|
@ -165,7 +165,7 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern void mips_mt_regdump(void);
|
||||
extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
|
||||
|
||||
static inline unsigned int dvpe(void)
|
||||
{
|
||||
|
@ -282,8 +282,11 @@ static inline void ehb(void)
|
|||
\
|
||||
__asm__ __volatile__( \
|
||||
" .set push \n" \
|
||||
" .set noat \n" \
|
||||
" .set mips32r2 \n" \
|
||||
" mftgpr %0," #rt " \n" \
|
||||
" # mftgpr $1," #rt " \n" \
|
||||
" .word 0x41000820 | (" #rt " << 16) \n" \
|
||||
" move %0, $1 \n" \
|
||||
" .set pop \n" \
|
||||
: "=r" (__res)); \
|
||||
\
|
||||
|
@ -295,9 +298,7 @@ static inline void ehb(void)
|
|||
unsigned long __res; \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
".set noat\n\t" \
|
||||
"mftr\t%0, " #rt ", " #u ", " #sel "\n\t" \
|
||||
".set at\n\t" \
|
||||
" mftr %0, " #rt ", " #u ", " #sel " \n" \
|
||||
: "=r" (__res)); \
|
||||
\
|
||||
__res; \
|
||||
|
|
|
@ -861,7 +861,19 @@ do { \
|
|||
#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val)
|
||||
|
||||
#define read_c0_status() __read_32bit_c0_register($12, 0)
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#define write_c0_status(val) \
|
||||
do { \
|
||||
__write_32bit_c0_register($12, 0, val); \
|
||||
__ehb(); \
|
||||
} while (0)
|
||||
#else
|
||||
/*
|
||||
* Legacy non-SMTC code, which may be hazardous
|
||||
* but which might not support EHB
|
||||
*/
|
||||
#define write_c0_status(val) __write_32bit_c0_register($12, 0, val)
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
#define read_c0_cause() __read_32bit_c0_register($13, 0)
|
||||
#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val)
|
||||
|
@ -1004,6 +1016,9 @@ do { \
|
|||
#define read_c0_taglo() __read_32bit_c0_register($28, 0)
|
||||
#define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val)
|
||||
|
||||
#define read_c0_dtaglo() __read_32bit_c0_register($28, 2)
|
||||
#define write_c0_dtaglo(val) __write_32bit_c0_register($28, 2, val)
|
||||
|
||||
#define read_c0_taghi() __read_32bit_c0_register($29, 0)
|
||||
#define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val)
|
||||
|
||||
|
@ -1357,6 +1372,11 @@ static inline void tlb_write_random(void)
|
|||
/*
|
||||
* Manipulate bits in a c0 register.
|
||||
*/
|
||||
#ifndef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* SMTC Linux requires shutting-down microthread scheduling
|
||||
* during CP0 register read-modify-write sequences.
|
||||
*/
|
||||
#define __BUILD_SET_C0(name) \
|
||||
static inline unsigned int \
|
||||
set_c0_##name(unsigned int set) \
|
||||
|
@ -1395,6 +1415,119 @@ change_c0_##name(unsigned int change, unsigned int new) \
|
|||
return res; \
|
||||
}
|
||||
|
||||
#else /* SMTC versions that manage MT scheduling */
|
||||
|
||||
#include <asm/interrupt.h>
|
||||
|
||||
/*
|
||||
* This is a duplicate of dmt() in mipsmtregs.h to avoid problems with
|
||||
* header file recursion.
|
||||
*/
|
||||
static inline unsigned int __dmt(void)
|
||||
{
|
||||
int res;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" .set push \n"
|
||||
" .set mips32r2 \n"
|
||||
" .set noat \n"
|
||||
" .word 0x41610BC1 # dmt $1 \n"
|
||||
" ehb \n"
|
||||
" move %0, $1 \n"
|
||||
" .set pop \n"
|
||||
: "=r" (res));
|
||||
|
||||
instruction_hazard();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
#define __VPECONTROL_TE_SHIFT 15
|
||||
#define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT)
|
||||
|
||||
#define __EMT_ENABLE __VPECONTROL_TE
|
||||
|
||||
static inline void __emt(unsigned int previous)
|
||||
{
|
||||
if ((previous & __EMT_ENABLE))
|
||||
__asm__ __volatile__(
|
||||
" .set noreorder \n"
|
||||
" .set mips32r2 \n"
|
||||
" .word 0x41600be1 # emt \n"
|
||||
" ehb \n"
|
||||
" .set mips0 \n"
|
||||
" .set reorder \n");
|
||||
}
|
||||
|
||||
static inline void __ehb(void)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
" ehb \n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that local_irq_save/restore affect TC-specific IXMT state,
|
||||
* not Status.IE as in non-SMTC kernel.
|
||||
*/
|
||||
|
||||
#define __BUILD_SET_C0(name) \
|
||||
static inline unsigned int \
|
||||
set_c0_##name(unsigned int set) \
|
||||
{ \
|
||||
unsigned int res; \
|
||||
unsigned int omt; \
|
||||
unsigned int flags; \
|
||||
\
|
||||
local_irq_save(flags); \
|
||||
omt = __dmt(); \
|
||||
res = read_c0_##name(); \
|
||||
res |= set; \
|
||||
write_c0_##name(res); \
|
||||
__emt(omt); \
|
||||
local_irq_restore(flags); \
|
||||
\
|
||||
return res; \
|
||||
} \
|
||||
\
|
||||
static inline unsigned int \
|
||||
clear_c0_##name(unsigned int clear) \
|
||||
{ \
|
||||
unsigned int res; \
|
||||
unsigned int omt; \
|
||||
unsigned int flags; \
|
||||
\
|
||||
local_irq_save(flags); \
|
||||
omt = __dmt(); \
|
||||
res = read_c0_##name(); \
|
||||
res &= ~clear; \
|
||||
write_c0_##name(res); \
|
||||
__emt(omt); \
|
||||
local_irq_restore(flags); \
|
||||
\
|
||||
return res; \
|
||||
} \
|
||||
\
|
||||
static inline unsigned int \
|
||||
change_c0_##name(unsigned int change, unsigned int new) \
|
||||
{ \
|
||||
unsigned int res; \
|
||||
unsigned int omt; \
|
||||
unsigned int flags; \
|
||||
\
|
||||
local_irq_save(flags); \
|
||||
\
|
||||
omt = __dmt(); \
|
||||
res = read_c0_##name(); \
|
||||
res &= ~change; \
|
||||
res |= (new & change); \
|
||||
write_c0_##name(res); \
|
||||
__emt(omt); \
|
||||
local_irq_restore(flags); \
|
||||
\
|
||||
return res; \
|
||||
}
|
||||
#endif
|
||||
|
||||
__BUILD_SET_C0(status)
|
||||
__BUILD_SET_C0(cause)
|
||||
__BUILD_SET_C0(config)
|
||||
|
|
|
@ -17,6 +17,10 @@
|
|||
#include <linux/slab.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#include <asm/mipsmtregs.h>
|
||||
#include <asm/smtc.h>
|
||||
#endif /* SMTC */
|
||||
|
||||
/*
|
||||
* For the fast tlb miss handlers, we keep a per cpu array of pointers
|
||||
|
@ -54,6 +58,14 @@ extern unsigned long pgd_current[];
|
|||
#define ASID_INC 0x1
|
||||
#define ASID_MASK 0xfff
|
||||
|
||||
/* SMTC/34K debug hack - but maybe we'll keep it */
|
||||
#elif defined(CONFIG_MIPS_MT_SMTC)
|
||||
|
||||
#define ASID_INC 0x1
|
||||
extern unsigned long smtc_asid_mask;
|
||||
#define ASID_MASK (smtc_asid_mask)
|
||||
#define HW_ASID_MASK 0xff
|
||||
/* End SMTC/34K debug hack */
|
||||
#else /* FIXME: not correct for R6000 */
|
||||
|
||||
#define ASID_INC 0x1
|
||||
|
@ -76,6 +88,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|||
#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
|
||||
#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
|
||||
|
||||
#ifndef CONFIG_MIPS_MT_SMTC
|
||||
/* Normal, classic MIPS get_new_mmu_context */
|
||||
static inline void
|
||||
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
||||
{
|
||||
|
@ -91,6 +105,12 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
|
|||
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
|
||||
}
|
||||
|
||||
#else /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
#define get_new_mmu_context(mm,cpu) smtc_get_new_mmu_context((mm),(cpu))
|
||||
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
/*
|
||||
* Initialize the context related info for a new mm_struct
|
||||
* instance.
|
||||
|
@ -111,14 +131,46 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned long flags;
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
unsigned long oldasid;
|
||||
unsigned long mtflags;
|
||||
int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
|
||||
local_irq_save(flags);
|
||||
mtflags = dvpe();
|
||||
#else /* Not SMTC */
|
||||
local_irq_save(flags);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
/* Check if our ASID is of an older version and thus invalid */
|
||||
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
|
||||
get_new_mmu_context(next, cpu);
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* If the EntryHi ASID being replaced happens to be
|
||||
* the value flagged at ASID recycling time as having
|
||||
* an extended life, clear the bit showing it being
|
||||
* in use by this "CPU", and if that's the last bit,
|
||||
* free up the ASID value for use and flush any old
|
||||
* instances of it from the TLB.
|
||||
*/
|
||||
oldasid = (read_c0_entryhi() & ASID_MASK);
|
||||
if(smtc_live_asid[mytlb][oldasid]) {
|
||||
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
|
||||
if(smtc_live_asid[mytlb][oldasid] == 0)
|
||||
smtc_flush_tlb_asid(oldasid);
|
||||
}
|
||||
/*
|
||||
* Tread softly on EntryHi, and so long as we support
|
||||
* having ASID_MASK smaller than the hardware maximum,
|
||||
* make sure no "soft" bits become "hard"...
|
||||
*/
|
||||
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
|
||||
| (cpu_context(cpu, next) & ASID_MASK));
|
||||
ehb(); /* Make sure it propagates to TCStatus */
|
||||
evpe(mtflags);
|
||||
#else
|
||||
write_c0_entryhi(cpu_context(cpu, next));
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
|
||||
|
||||
/*
|
||||
|
@ -151,12 +203,34 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
|||
unsigned long flags;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
unsigned long oldasid;
|
||||
unsigned long mtflags;
|
||||
int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Unconditionally get a new ASID. */
|
||||
get_new_mmu_context(next, cpu);
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* See comments for similar code above */
|
||||
mtflags = dvpe();
|
||||
oldasid = read_c0_entryhi() & ASID_MASK;
|
||||
if(smtc_live_asid[mytlb][oldasid]) {
|
||||
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
|
||||
if(smtc_live_asid[mytlb][oldasid] == 0)
|
||||
smtc_flush_tlb_asid(oldasid);
|
||||
}
|
||||
/* See comments for similar code above */
|
||||
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
|
||||
(cpu_context(cpu, next) & ASID_MASK));
|
||||
ehb(); /* Make sure it propagates to TCStatus */
|
||||
evpe(mtflags);
|
||||
#else
|
||||
write_c0_entryhi(cpu_context(cpu, next));
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
|
||||
|
||||
/* mark mmu ownership change */
|
||||
|
@ -174,17 +248,49 @@ static inline void
|
|||
drop_mmu_context(struct mm_struct *mm, unsigned cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
unsigned long oldasid;
|
||||
/* Can't use spinlock because called from TLB flush within DVPE */
|
||||
unsigned int prevvpe;
|
||||
int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if (cpu_isset(cpu, mm->cpu_vm_mask)) {
|
||||
get_new_mmu_context(mm, cpu);
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/* See comments for similar code above */
|
||||
prevvpe = dvpe();
|
||||
oldasid = (read_c0_entryhi() & ASID_MASK);
|
||||
if(smtc_live_asid[mytlb][oldasid]) {
|
||||
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
|
||||
if(smtc_live_asid[mytlb][oldasid] == 0)
|
||||
smtc_flush_tlb_asid(oldasid);
|
||||
}
|
||||
/* See comments for similar code above */
|
||||
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
|
||||
| cpu_asid(cpu, mm));
|
||||
ehb(); /* Make sure it propagates to TCStatus */
|
||||
evpe(prevvpe);
|
||||
#else /* not CONFIG_MIPS_MT_SMTC */
|
||||
write_c0_entryhi(cpu_asid(cpu, mm));
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
} else {
|
||||
/* will get a new context next time */
|
||||
#ifndef CONFIG_MIPS_MT_SMTC
|
||||
cpu_context(cpu, mm) = 0;
|
||||
}
|
||||
#else /* SMTC */
|
||||
int i;
|
||||
|
||||
/* SMTC shares the TLB (and ASIDs) across VPEs */
|
||||
for (i = 0; i < num_online_cpus(); i++) {
|
||||
if((smtc_status & SMTC_TLB_SHARED)
|
||||
|| (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
|
||||
cpu_context(i, mm) = 0;
|
||||
}
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#define _ASM_PROCESSOR_H
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/threads.h>
|
||||
|
||||
#include <asm/cachectl.h>
|
||||
|
@ -107,6 +108,10 @@ struct mips_dsp_state {
|
|||
|
||||
#define INIT_DSP {{0,},}
|
||||
|
||||
#define INIT_CPUMASK { \
|
||||
{0,} \
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
unsigned long seg;
|
||||
} mm_segment_t;
|
||||
|
@ -142,6 +147,7 @@ struct thread_struct {
|
|||
#define MF_LOGADE 2 /* Log address errors to syslog */
|
||||
#define MF_32BIT_REGS 4 /* also implies 16/32 fprs */
|
||||
#define MF_32BIT_ADDR 8 /* 32-bit address space (o32/n32) */
|
||||
#define MF_FPUBOUND 0x10 /* thread bound to FPU-full CPU set */
|
||||
unsigned long mflags;
|
||||
unsigned long irix_trampoline; /* Wheee... */
|
||||
unsigned long irix_oldctx;
|
||||
|
|
|
@ -45,6 +45,10 @@ struct pt_regs {
|
|||
unsigned long cp0_badvaddr;
|
||||
unsigned long cp0_cause;
|
||||
unsigned long cp0_epc;
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
unsigned long cp0_tcstatus;
|
||||
unsigned long smtc_pad;
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
};
|
||||
|
||||
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <asm/asm.h>
|
||||
#include <asm/cacheops.h>
|
||||
#include <asm/cpu-features.h>
|
||||
#include <asm/mipsmtregs.h>
|
||||
|
||||
/*
|
||||
* This macro return a properly sign-extended address suitable as base address
|
||||
|
@ -39,14 +40,118 @@
|
|||
: \
|
||||
: "i" (op), "R" (*(unsigned char *)(addr)))
|
||||
|
||||
#ifdef CONFIG_MIPS_MT
|
||||
/*
|
||||
* Temporary hacks for SMTC debug. Optionally force single-threaded
|
||||
* execution during I-cache flushes.
|
||||
*/
|
||||
|
||||
#define PROTECT_CACHE_FLUSHES 1
|
||||
|
||||
#ifdef PROTECT_CACHE_FLUSHES
|
||||
|
||||
extern int mt_protiflush;
|
||||
extern int mt_protdflush;
|
||||
extern void mt_cflush_lockdown(void);
|
||||
extern void mt_cflush_release(void);
|
||||
|
||||
#define BEGIN_MT_IPROT \
|
||||
unsigned long flags = 0; \
|
||||
unsigned long mtflags = 0; \
|
||||
if(mt_protiflush) { \
|
||||
local_irq_save(flags); \
|
||||
ehb(); \
|
||||
mtflags = dvpe(); \
|
||||
mt_cflush_lockdown(); \
|
||||
}
|
||||
|
||||
#define END_MT_IPROT \
|
||||
if(mt_protiflush) { \
|
||||
mt_cflush_release(); \
|
||||
evpe(mtflags); \
|
||||
local_irq_restore(flags); \
|
||||
}
|
||||
|
||||
#define BEGIN_MT_DPROT \
|
||||
unsigned long flags = 0; \
|
||||
unsigned long mtflags = 0; \
|
||||
if(mt_protdflush) { \
|
||||
local_irq_save(flags); \
|
||||
ehb(); \
|
||||
mtflags = dvpe(); \
|
||||
mt_cflush_lockdown(); \
|
||||
}
|
||||
|
||||
#define END_MT_DPROT \
|
||||
if(mt_protdflush) { \
|
||||
mt_cflush_release(); \
|
||||
evpe(mtflags); \
|
||||
local_irq_restore(flags); \
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#define BEGIN_MT_IPROT
|
||||
#define BEGIN_MT_DPROT
|
||||
#define END_MT_IPROT
|
||||
#define END_MT_DPROT
|
||||
|
||||
#endif /* PROTECT_CACHE_FLUSHES */
|
||||
|
||||
#define __iflush_prologue \
|
||||
unsigned long redundance; \
|
||||
extern int mt_n_iflushes; \
|
||||
BEGIN_MT_IPROT \
|
||||
for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
|
||||
|
||||
#define __iflush_epilogue \
|
||||
END_MT_IPROT \
|
||||
}
|
||||
|
||||
#define __dflush_prologue \
|
||||
unsigned long redundance; \
|
||||
extern int mt_n_dflushes; \
|
||||
BEGIN_MT_DPROT \
|
||||
for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
|
||||
|
||||
#define __dflush_epilogue \
|
||||
END_MT_DPROT \
|
||||
}
|
||||
|
||||
#define __inv_dflush_prologue __dflush_prologue
|
||||
#define __inv_dflush_epilogue __dflush_epilogue
|
||||
#define __sflush_prologue {
|
||||
#define __sflush_epilogue }
|
||||
#define __inv_sflush_prologue __sflush_prologue
|
||||
#define __inv_sflush_epilogue __sflush_epilogue
|
||||
|
||||
#else /* CONFIG_MIPS_MT */
|
||||
|
||||
#define __iflush_prologue {
|
||||
#define __iflush_epilogue }
|
||||
#define __dflush_prologue {
|
||||
#define __dflush_epilogue }
|
||||
#define __inv_dflush_prologue {
|
||||
#define __inv_dflush_epilogue }
|
||||
#define __sflush_prologue {
|
||||
#define __sflush_epilogue }
|
||||
#define __inv_sflush_prologue {
|
||||
#define __inv_sflush_epilogue }
|
||||
|
||||
#endif /* CONFIG_MIPS_MT */
|
||||
|
||||
static inline void flush_icache_line_indexed(unsigned long addr)
|
||||
{
|
||||
__iflush_prologue
|
||||
cache_op(Index_Invalidate_I, addr);
|
||||
__iflush_epilogue
|
||||
}
|
||||
|
||||
static inline void flush_dcache_line_indexed(unsigned long addr)
|
||||
{
|
||||
__dflush_prologue
|
||||
cache_op(Index_Writeback_Inv_D, addr);
|
||||
__dflush_epilogue
|
||||
}
|
||||
|
||||
static inline void flush_scache_line_indexed(unsigned long addr)
|
||||
|
@ -56,17 +161,23 @@ static inline void flush_scache_line_indexed(unsigned long addr)
|
|||
|
||||
static inline void flush_icache_line(unsigned long addr)
|
||||
{
|
||||
__iflush_prologue
|
||||
cache_op(Hit_Invalidate_I, addr);
|
||||
__iflush_epilogue
|
||||
}
|
||||
|
||||
static inline void flush_dcache_line(unsigned long addr)
|
||||
{
|
||||
__dflush_prologue
|
||||
cache_op(Hit_Writeback_Inv_D, addr);
|
||||
__dflush_epilogue
|
||||
}
|
||||
|
||||
static inline void invalidate_dcache_line(unsigned long addr)
|
||||
{
|
||||
__dflush_prologue
|
||||
cache_op(Hit_Invalidate_D, addr);
|
||||
__dflush_epilogue
|
||||
}
|
||||
|
||||
static inline void invalidate_scache_line(unsigned long addr)
|
||||
|
@ -239,9 +350,13 @@ static inline void blast_##pfx##cache##lsize(void) \
|
|||
current_cpu_data.desc.waybit; \
|
||||
unsigned long ws, addr; \
|
||||
\
|
||||
__##pfx##flush_prologue \
|
||||
\
|
||||
for (ws = 0; ws < ws_end; ws += ws_inc) \
|
||||
for (addr = start; addr < end; addr += lsize * 32) \
|
||||
cache##lsize##_unroll32(addr|ws,indexop); \
|
||||
\
|
||||
__##pfx##flush_epilogue \
|
||||
} \
|
||||
\
|
||||
static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
|
||||
|
@ -249,10 +364,14 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
|
|||
unsigned long start = page; \
|
||||
unsigned long end = page + PAGE_SIZE; \
|
||||
\
|
||||
__##pfx##flush_prologue \
|
||||
\
|
||||
do { \
|
||||
cache##lsize##_unroll32(start,hitop); \
|
||||
start += lsize * 32; \
|
||||
} while (start < end); \
|
||||
\
|
||||
__##pfx##flush_epilogue \
|
||||
} \
|
||||
\
|
||||
static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
|
||||
|
@ -265,9 +384,13 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page)
|
|||
current_cpu_data.desc.waybit; \
|
||||
unsigned long ws, addr; \
|
||||
\
|
||||
__##pfx##flush_prologue \
|
||||
\
|
||||
for (ws = 0; ws < ws_end; ws += ws_inc) \
|
||||
for (addr = start; addr < end; addr += lsize * 32) \
|
||||
cache##lsize##_unroll32(addr|ws,indexop); \
|
||||
\
|
||||
__##pfx##flush_epilogue \
|
||||
}
|
||||
|
||||
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
|
||||
|
@ -288,12 +411,17 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
|
|||
unsigned long lsize = cpu_##desc##_line_size(); \
|
||||
unsigned long addr = start & ~(lsize - 1); \
|
||||
unsigned long aend = (end - 1) & ~(lsize - 1); \
|
||||
\
|
||||
__##pfx##flush_prologue \
|
||||
\
|
||||
while (1) { \
|
||||
prot##cache_op(hitop, addr); \
|
||||
if (addr == aend) \
|
||||
break; \
|
||||
addr += lsize; \
|
||||
} \
|
||||
\
|
||||
__##pfx##flush_epilogue \
|
||||
}
|
||||
|
||||
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
|
||||
|
|
55
include/asm-mips/smtc.h
Normal file
55
include/asm-mips/smtc.h
Normal file
|
@ -0,0 +1,55 @@
|
|||
#ifndef _ASM_SMTC_MT_H
|
||||
#define _ASM_SMTC_MT_H
|
||||
|
||||
/*
|
||||
* Definitions for SMTC multitasking on MIPS MT cores
|
||||
*/
|
||||
|
||||
#include <asm/mips_mt.h>
|
||||
|
||||
/*
|
||||
* System-wide SMTC status information
|
||||
*/
|
||||
|
||||
extern unsigned int smtc_status;
|
||||
|
||||
#define SMTC_TLB_SHARED 0x00000001
|
||||
#define SMTC_MTC_ACTIVE 0x00000002
|
||||
|
||||
/*
|
||||
* TLB/ASID Management information
|
||||
*/
|
||||
|
||||
#define MAX_SMTC_TLBS 2
|
||||
#define MAX_SMTC_ASIDS 256
|
||||
#if NR_CPUS <= 8
|
||||
typedef char asiduse;
|
||||
#else
|
||||
#if NR_CPUS <= 16
|
||||
typedef short asiduse;
|
||||
#else
|
||||
typedef long asiduse;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
|
||||
|
||||
void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
|
||||
|
||||
void smtc_flush_tlb_asid(unsigned long asid);
|
||||
extern int mipsmt_build_cpu_map(int startslot);
|
||||
extern void mipsmt_prepare_cpus(void);
|
||||
extern void smtc_smp_finish(void);
|
||||
extern void smtc_boot_secondary(int cpu, struct task_struct *t);
|
||||
|
||||
/*
|
||||
* Sharing the TLB between multiple VPEs means that the
|
||||
* "random" index selection function is not allowed to
|
||||
* select the current value of the Index register. To
|
||||
* avoid additional TLB pressure, the Index registers
|
||||
* are "parked" with an non-Valid value.
|
||||
*/
|
||||
|
||||
#define PARKED_INDEX ((unsigned int)0x80000000)
|
||||
|
||||
#endif /* _ASM_SMTC_MT_H */
|
118
include/asm-mips/smtc_ipi.h
Normal file
118
include/asm-mips/smtc_ipi.h
Normal file
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code.
|
||||
*/
|
||||
#ifndef __ASM_SMTC_IPI_H
|
||||
#define __ASM_SMTC_IPI_H
|
||||
|
||||
//#define SMTC_IPI_DEBUG
|
||||
|
||||
#ifdef SMTC_IPI_DEBUG
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/mipsmtregs.h>
|
||||
#endif /* SMTC_IPI_DEBUG */
|
||||
|
||||
/*
|
||||
* An IPI "message"
|
||||
*/
|
||||
|
||||
struct smtc_ipi {
|
||||
struct smtc_ipi *flink;
|
||||
int type;
|
||||
void *arg;
|
||||
int dest;
|
||||
#ifdef SMTC_IPI_DEBUG
|
||||
int sender;
|
||||
long stamp;
|
||||
#endif /* SMTC_IPI_DEBUG */
|
||||
};
|
||||
|
||||
/*
|
||||
* Defined IPI Types
|
||||
*/
|
||||
|
||||
#define LINUX_SMP_IPI 1
|
||||
#define SMTC_CLOCK_TICK 2
|
||||
|
||||
/*
|
||||
* A queue of IPI messages
|
||||
*/
|
||||
|
||||
struct smtc_ipi_q {
|
||||
struct smtc_ipi *head;
|
||||
spinlock_t lock;
|
||||
struct smtc_ipi *tail;
|
||||
int depth;
|
||||
};
|
||||
|
||||
extern struct smtc_ipi_q IPIQ[NR_CPUS];
|
||||
extern struct smtc_ipi_q freeIPIq;
|
||||
|
||||
static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
|
||||
{
|
||||
long flags;
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
if (q->head == NULL)
|
||||
q->head = q->tail = p;
|
||||
else
|
||||
q->tail->flink = p;
|
||||
p->flink = NULL;
|
||||
q->tail = p;
|
||||
q->depth++;
|
||||
#ifdef SMTC_IPI_DEBUG
|
||||
p->sender = read_c0_tcbind();
|
||||
p->stamp = read_c0_count();
|
||||
#endif /* SMTC_IPI_DEBUG */
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
|
||||
static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
|
||||
{
|
||||
struct smtc_ipi *p;
|
||||
long flags;
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
if (q->head == NULL)
|
||||
p = NULL;
|
||||
else {
|
||||
p = q->head;
|
||||
q->head = q->head->flink;
|
||||
q->depth--;
|
||||
/* Arguably unnecessary, but leaves queue cleaner */
|
||||
if (q->head == NULL)
|
||||
q->tail = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
return p;
|
||||
}
|
||||
|
||||
static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p)
|
||||
{
|
||||
long flags;
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
if (q->head == NULL) {
|
||||
q->head = q->tail = p;
|
||||
p->flink = NULL;
|
||||
} else {
|
||||
p->flink = q->head;
|
||||
q->head = p;
|
||||
}
|
||||
q->depth++;
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
|
||||
static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q)
|
||||
{
|
||||
long flags;
|
||||
int retval;
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
retval = q->depth;
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
return retval;
|
||||
}
|
||||
|
||||
extern void smtc_send_ipi(int cpu, int type, unsigned int action);
|
||||
|
||||
#endif /* __ASM_SMTC_IPI_H */
|
23
include/asm-mips/smtc_proc.h
Normal file
23
include/asm-mips/smtc_proc.h
Normal file
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* Definitions for SMTC /proc entries
|
||||
* Copyright(C) 2005 MIPS Technologies Inc.
|
||||
*/
|
||||
#ifndef __ASM_SMTC_PROC_H
|
||||
#define __ASM_SMTC_PROC_H
|
||||
|
||||
/*
|
||||
* per-"CPU" statistics
|
||||
*/
|
||||
|
||||
struct smtc_cpu_proc {
|
||||
unsigned long timerints;
|
||||
unsigned long selfipis;
|
||||
};
|
||||
|
||||
extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
|
||||
|
||||
/* Count of number of recoveries of "stolen" FPU access rights on 34K */
|
||||
|
||||
extern atomic_t smtc_fpu_recoveries;
|
||||
|
||||
#endif /* __ASM_SMTC_PROC_H */
|
|
@ -14,9 +14,14 @@
|
|||
#include <linux/threads.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#include <asm/mipsmtregs.h>
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
.macro SAVE_AT
|
||||
.set push
|
||||
.set noat
|
||||
|
@ -57,13 +62,30 @@
|
|||
#ifdef CONFIG_SMP
|
||||
.macro get_saved_sp /* SMP variation */
|
||||
#ifdef CONFIG_32BIT
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
.set mips32
|
||||
mfc0 k0, CP0_TCBIND;
|
||||
.set mips0
|
||||
lui k1, %hi(kernelsp)
|
||||
srl k0, k0, 19
|
||||
/* No need to shift down and up to clear bits 0-1 */
|
||||
#else
|
||||
mfc0 k0, CP0_CONTEXT
|
||||
lui k1, %hi(kernelsp)
|
||||
srl k0, k0, 23
|
||||
#endif
|
||||
addu k1, k0
|
||||
LONG_L k1, %lo(kernelsp)(k1)
|
||||
#endif
|
||||
#ifdef CONFIG_64BIT
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
.set mips64
|
||||
mfc0 k0, CP0_TCBIND;
|
||||
.set mips0
|
||||
lui k0, %highest(kernelsp)
|
||||
dsrl k1, 19
|
||||
/* No need to shift down and up to clear bits 0-2 */
|
||||
#else
|
||||
MFC0 k1, CP0_CONTEXT
|
||||
lui k0, %highest(kernelsp)
|
||||
dsrl k1, 23
|
||||
|
@ -71,19 +93,30 @@
|
|||
dsll k0, k0, 16
|
||||
daddiu k0, %hi(kernelsp)
|
||||
dsll k0, k0, 16
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
daddu k1, k1, k0
|
||||
LONG_L k1, %lo(kernelsp)(k1)
|
||||
#endif
|
||||
#endif /* CONFIG_64BIT */
|
||||
.endm
|
||||
|
||||
.macro set_saved_sp stackp temp temp2
|
||||
#ifdef CONFIG_32BIT
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
mfc0 \temp, CP0_TCBIND
|
||||
srl \temp, 19
|
||||
#else
|
||||
mfc0 \temp, CP0_CONTEXT
|
||||
srl \temp, 23
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_64BIT
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
mfc0 \temp, CP0_TCBIND
|
||||
dsrl \temp, 19
|
||||
#else
|
||||
MFC0 \temp, CP0_CONTEXT
|
||||
dsrl \temp, 23
|
||||
#endif
|
||||
#endif
|
||||
LONG_S \stackp, kernelsp(\temp)
|
||||
.endm
|
||||
|
@ -122,10 +155,25 @@
|
|||
PTR_SUBU sp, k1, PT_SIZE
|
||||
LONG_S k0, PT_R29(sp)
|
||||
LONG_S $3, PT_R3(sp)
|
||||
/*
|
||||
* You might think that you don't need to save $0,
|
||||
* but the FPU emulator and gdb remote debug stub
|
||||
* need it to operate correctly
|
||||
*/
|
||||
LONG_S $0, PT_R0(sp)
|
||||
mfc0 v1, CP0_STATUS
|
||||
LONG_S $2, PT_R2(sp)
|
||||
LONG_S v1, PT_STATUS(sp)
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* Ideally, these instructions would be shuffled in
|
||||
* to cover the pipeline delay.
|
||||
*/
|
||||
.set mips32
|
||||
mfc0 v1, CP0_TCSTATUS
|
||||
.set mips0
|
||||
LONG_S v1, PT_TCSTATUS(sp)
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
LONG_S $4, PT_R4(sp)
|
||||
mfc0 v1, CP0_CAUSE
|
||||
LONG_S $5, PT_R5(sp)
|
||||
|
@ -234,14 +282,36 @@
|
|||
.endm
|
||||
|
||||
#else
|
||||
/*
|
||||
* For SMTC kernel, global IE should be left set, and interrupts
|
||||
* controlled exclusively via IXMT.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
#define STATMASK 0x1e
|
||||
#else
|
||||
#define STATMASK 0x1f
|
||||
#endif
|
||||
.macro RESTORE_SOME
|
||||
.set push
|
||||
.set reorder
|
||||
.set noat
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
.set mips32r2
|
||||
/*
|
||||
* This may not really be necessary if ints are already
|
||||
* inhibited here.
|
||||
*/
|
||||
mfc0 v0, CP0_TCSTATUS
|
||||
ori v0, TCSTATUS_IXMT
|
||||
mtc0 v0, CP0_TCSTATUS
|
||||
ehb
|
||||
DMT 5 # dmt a1
|
||||
jal mips_ihb
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
mfc0 a0, CP0_STATUS
|
||||
ori a0, 0x1f
|
||||
xori a0, 0x1f
|
||||
ori a0, STATMASK
|
||||
xori a0, STATMASK
|
||||
mtc0 a0, CP0_STATUS
|
||||
li v1, 0xff00
|
||||
and a0, v1
|
||||
|
@ -250,6 +320,26 @@
|
|||
and v0, v1
|
||||
or v0, a0
|
||||
mtc0 v0, CP0_STATUS
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* Only after EXL/ERL have been restored to status can we
|
||||
* restore TCStatus.IXMT.
|
||||
*/
|
||||
LONG_L v1, PT_TCSTATUS(sp)
|
||||
ehb
|
||||
mfc0 v0, CP0_TCSTATUS
|
||||
andi v1, TCSTATUS_IXMT
|
||||
/* We know that TCStatua.IXMT should be set from above */
|
||||
xori v0, v0, TCSTATUS_IXMT
|
||||
or v0, v0, v1
|
||||
mtc0 v0, CP0_TCSTATUS
|
||||
ehb
|
||||
andi a1, a1, VPECONTROL_TE
|
||||
beqz a1, 1f
|
||||
emt
|
||||
1:
|
||||
.set mips0
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
LONG_L v1, PT_EPC(sp)
|
||||
MTC0 v1, CP0_EPC
|
||||
LONG_L $31, PT_R31(sp)
|
||||
|
@ -302,11 +392,33 @@
|
|||
* Set cp0 enable bit as sign that we're running on the kernel stack
|
||||
*/
|
||||
.macro CLI
|
||||
#if !defined(CONFIG_MIPS_MT_SMTC)
|
||||
mfc0 t0, CP0_STATUS
|
||||
li t1, ST0_CU0 | 0x1f
|
||||
or t0, t1
|
||||
xori t0, 0x1f
|
||||
mtc0 t0, CP0_STATUS
|
||||
#else /* CONFIG_MIPS_MT_SMTC */
|
||||
/*
|
||||
* For SMTC, we need to set privilege
|
||||
* and disable interrupts only for the
|
||||
* current TC, using the TCStatus register.
|
||||
*/
|
||||
mfc0 t0,CP0_TCSTATUS
|
||||
/* Fortunately CU 0 is in the same place in both registers */
|
||||
/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
|
||||
li t1, ST0_CU0 | 0x08001c00
|
||||
or t0,t1
|
||||
/* Clear TKSU, leave IXMT */
|
||||
xori t0, 0x00001800
|
||||
mtc0 t0, CP0_TCSTATUS
|
||||
ehb
|
||||
/* We need to leave the global IE bit set, but clear EXL...*/
|
||||
mfc0 t0, CP0_STATUS
|
||||
ori t0, ST0_EXL | ST0_ERL
|
||||
xori t0, ST0_EXL | ST0_ERL
|
||||
mtc0 t0, CP0_STATUS
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
irq_disable_hazard
|
||||
.endm
|
||||
|
||||
|
@ -315,11 +427,35 @@
|
|||
* Set cp0 enable bit as sign that we're running on the kernel stack
|
||||
*/
|
||||
.macro STI
|
||||
#if !defined(CONFIG_MIPS_MT_SMTC)
|
||||
mfc0 t0, CP0_STATUS
|
||||
li t1, ST0_CU0 | 0x1f
|
||||
or t0, t1
|
||||
xori t0, 0x1e
|
||||
mtc0 t0, CP0_STATUS
|
||||
#else /* CONFIG_MIPS_MT_SMTC */
|
||||
/*
|
||||
* For SMTC, we need to set privilege
|
||||
* and enable interrupts only for the
|
||||
* current TC, using the TCStatus register.
|
||||
*/
|
||||
ehb
|
||||
mfc0 t0,CP0_TCSTATUS
|
||||
/* Fortunately CU 0 is in the same place in both registers */
|
||||
/* Set TCU0, TKSU (for later inversion) and IXMT */
|
||||
li t1, ST0_CU0 | 0x08001c00
|
||||
or t0,t1
|
||||
/* Clear TKSU *and* IXMT */
|
||||
xori t0, 0x00001c00
|
||||
mtc0 t0, CP0_TCSTATUS
|
||||
ehb
|
||||
/* We need to leave the global IE bit set, but clear EXL...*/
|
||||
mfc0 t0, CP0_STATUS
|
||||
ori t0, ST0_EXL
|
||||
xori t0, ST0_EXL
|
||||
mtc0 t0, CP0_STATUS
|
||||
/* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
irq_enable_hazard
|
||||
.endm
|
||||
|
||||
|
@ -328,11 +464,56 @@
|
|||
* Set cp0 enable bit as sign that we're running on the kernel stack
|
||||
*/
|
||||
.macro KMODE
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
/*
|
||||
* This gets baroque in SMTC. We want to
|
||||
* protect the non-atomic clearing of EXL
|
||||
* with DMT/EMT, but we don't want to take
|
||||
* an interrupt while DMT is still in effect.
|
||||
*/
|
||||
|
||||
/* KMODE gets invoked from both reorder and noreorder code */
|
||||
.set push
|
||||
.set mips32r2
|
||||
.set noreorder
|
||||
mfc0 v0, CP0_TCSTATUS
|
||||
andi v1, v0, TCSTATUS_IXMT
|
||||
ori v0, TCSTATUS_IXMT
|
||||
mtc0 v0, CP0_TCSTATUS
|
||||
ehb
|
||||
DMT 2 # dmt v0
|
||||
/*
|
||||
* We don't know a priori if ra is "live"
|
||||
*/
|
||||
move t0, ra
|
||||
jal mips_ihb
|
||||
nop /* delay slot */
|
||||
move ra, t0
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
mfc0 t0, CP0_STATUS
|
||||
li t1, ST0_CU0 | 0x1e
|
||||
or t0, t1
|
||||
xori t0, 0x1e
|
||||
mtc0 t0, CP0_STATUS
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
ehb
|
||||
andi v0, v0, VPECONTROL_TE
|
||||
beqz v0, 2f
|
||||
nop /* delay slot */
|
||||
emt
|
||||
2:
|
||||
mfc0 v0, CP0_TCSTATUS
|
||||
/* Clear IXMT, then OR in previous value */
|
||||
ori v0, TCSTATUS_IXMT
|
||||
xori v0, TCSTATUS_IXMT
|
||||
or v0, v1, v0
|
||||
mtc0 v0, CP0_TCSTATUS
|
||||
/*
|
||||
* irq_disable_hazard below should expand to EHB
|
||||
* on 24K/34K CPUS
|
||||
*/
|
||||
.set pop
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
irq_disable_hazard
|
||||
.endm
|
||||
|
||||
|
|
Loading…
Reference in a new issue