Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS fixes from Ralf Baechle: "Another round of MIPS fixes for 4.2. No area does particularly stand out but we have a two unpleasant ones: - Kernel ptes are marked with a global bit which allows the kernel to share kernel TLB entries between all processes. For this to work both entries of an adjacent even/odd pte pair need to have the global bit set. There has been a subtle race in setting the other entry's global bit since ~ 2000 but it take particularly pathological workloads that essentially do mostly vmalloc/vfree to trigger this. This pull request fixes the 64-bit case but leaves the case of 32 bit CPUs with 64 bit ptes unsolved for now. The unfixed cases affect hardware that is not available in the field yet. - Instruction emulation requires loading instructions from user space but the current fast but simplistic approach will fail on pages that are PROT_EXEC but !PROT_READ. For this reason we temporarily do not permit this permission and will map pages with PROT_EXEC | PROT_READ. The remainder of this pull request is more or less across the field and the short log explains them well" * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: MIPS: Make set_pte() SMP safe. MIPS: Replace add and sub instructions in relocate_kernel.S with addiu MIPS: Flush RPS on kernel entry with EVA Revert "MIPS: BCM63xx: Provide a plat_post_dma_flush hook" MIPS: BMIPS: Delete unused Kconfig symbol MIPS: Export get_c0_perfcount_int() MIPS: show_stack: Fix stack trace with EVA MIPS: do_mcheck: Fix kernel code dump with EVA MIPS: SMP: Don't increment irq_count multiple times for call function IPIs MIPS: Partially disable RIXI support. MIPS: Handle page faults of executable but unreadable pages correctly. MIPS: Malta: Don't reinitialise RTC MIPS: unaligned: Fix build error on big endian R6 kernels MIPS: Fix sched_getaffinity with MT FPAFF enabled MIPS: Fix build with CONFIG_OF=y for non OF-enabled targets CPUFREQ: Loongson2: Fix broken build due to incorrect include.
This commit is contained in:
commit
3fbdc37956
31 changed files with 130 additions and 62 deletions
|
@ -151,7 +151,6 @@ config BMIPS_GENERIC
|
|||
select BCM7120_L2_IRQ
|
||||
select BRCMSTB_L2_IRQ
|
||||
select IRQ_MIPS_CPU
|
||||
select RAW_IRQ_ACCESSORS
|
||||
select DMA_NONCOHERENT
|
||||
select SYS_SUPPORTS_32BIT_KERNEL
|
||||
select SYS_SUPPORTS_LITTLE_ENDIAN
|
||||
|
|
|
@ -190,6 +190,7 @@ int get_c0_perfcount_int(void)
|
|||
{
|
||||
return ATH79_MISC_IRQ(5);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||
|
||||
unsigned int get_c0_compare_int(void)
|
||||
{
|
||||
|
|
|
@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
|
|||
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
|
||||
|
||||
if (action & SMP_CALL_FUNCTION)
|
||||
smp_call_function_interrupt();
|
||||
generic_smp_call_function_interrupt();
|
||||
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||
scheduler_ipi();
|
||||
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
|
||||
#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
|
||||
|
||||
#include <asm/bmips.h>
|
||||
|
||||
#define plat_post_dma_flush bmips_post_dma_flush
|
||||
|
||||
#include <asm/mach-generic/dma-coherence.h>
|
||||
|
||||
#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
|
|
@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
|
|||
* Make sure the buddy is global too (if it's !none,
|
||||
* it better already be global)
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* For SMP, multiple CPUs can race, so we need to do
|
||||
* this atomically.
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
#define LL_INSN "lld"
|
||||
#define SC_INSN "scd"
|
||||
#else /* CONFIG_32BIT */
|
||||
#define LL_INSN "ll"
|
||||
#define SC_INSN "sc"
|
||||
#endif
|
||||
unsigned long page_global = _PAGE_GLOBAL;
|
||||
unsigned long tmp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" .set push\n"
|
||||
" .set noreorder\n"
|
||||
"1: " LL_INSN " %[tmp], %[buddy]\n"
|
||||
" bnez %[tmp], 2f\n"
|
||||
" or %[tmp], %[tmp], %[global]\n"
|
||||
" " SC_INSN " %[tmp], %[buddy]\n"
|
||||
" beqz %[tmp], 1b\n"
|
||||
" nop\n"
|
||||
"2:\n"
|
||||
" .set pop"
|
||||
: [buddy] "+m" (buddy->pte),
|
||||
[tmp] "=&r" (tmp)
|
||||
: [global] "r" (page_global));
|
||||
#else /* !CONFIG_SMP */
|
||||
if (pte_none(*buddy))
|
||||
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu)
|
|||
extern void play_dead(void);
|
||||
#endif
|
||||
|
||||
extern asmlinkage void smp_call_function_interrupt(void);
|
||||
|
||||
static inline void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
extern struct plat_smp_ops *mp_ops; /* private */
|
||||
|
|
|
@ -152,6 +152,31 @@
|
|||
.set noreorder
|
||||
bltz k0, 8f
|
||||
move k1, sp
|
||||
#ifdef CONFIG_EVA
|
||||
/*
|
||||
* Flush interAptiv's Return Prediction Stack (RPS) by writing
|
||||
* EntryHi. Toggling Config7.RPS is slower and less portable.
|
||||
*
|
||||
* The RPS isn't automatically flushed when exceptions are
|
||||
* taken, which can result in kernel mode speculative accesses
|
||||
* to user addresses if the RPS mispredicts. That's harmless
|
||||
* when user and kernel share the same address space, but with
|
||||
* EVA the same user segments may be unmapped to kernel mode,
|
||||
* even containing sensitive MMIO regions or invalid memory.
|
||||
*
|
||||
* This can happen when the kernel sets the return address to
|
||||
* ret_from_* and jr's to the exception handler, which looks
|
||||
* more like a tail call than a function call. If nested calls
|
||||
* don't evict the last user address in the RPS, it will
|
||||
* mispredict the return and fetch from a user controlled
|
||||
* address into the icache.
|
||||
*
|
||||
* More recent EVA-capable cores with MAAR to restrict
|
||||
* speculative accesses aren't affected.
|
||||
*/
|
||||
MFC0 k0, CP0_ENTRYHI
|
||||
MTC0 k0, CP0_ENTRYHI
|
||||
#endif
|
||||
.set reorder
|
||||
/* Called from user mode, new stack. */
|
||||
get_saved_sp
|
||||
|
|
|
@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
|||
unsigned long __user *user_mask_ptr)
|
||||
{
|
||||
unsigned int real_len;
|
||||
cpumask_t mask;
|
||||
cpumask_t allowed, mask;
|
||||
int retval;
|
||||
struct task_struct *p;
|
||||
|
||||
|
@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
|
|||
if (retval)
|
||||
goto out_unlock;
|
||||
|
||||
cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
|
||||
cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
|
||||
cpumask_and(&mask, &allowed, cpu_active_mask);
|
||||
|
||||
out_unlock:
|
||||
read_unlock(&tasklist_lock);
|
||||
|
|
|
@ -38,7 +38,7 @@ char *mips_get_machine_name(void)
|
|||
return mips_machine_name;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
#ifdef CONFIG_USE_OF
|
||||
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
|
||||
{
|
||||
return add_memory_region(base, size, BOOT_MEM_RAM);
|
||||
|
|
|
@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
|
|||
|
||||
process_entry:
|
||||
PTR_L s2, (s0)
|
||||
PTR_ADD s0, s0, SZREG
|
||||
PTR_ADDIU s0, s0, SZREG
|
||||
|
||||
/*
|
||||
* In case of a kdump/crash kernel, the indirection page is not
|
||||
|
@ -61,9 +61,9 @@ copy_word:
|
|||
/* copy page word by word */
|
||||
REG_L s5, (s2)
|
||||
REG_S s5, (s4)
|
||||
PTR_ADD s4, s4, SZREG
|
||||
PTR_ADD s2, s2, SZREG
|
||||
LONG_SUB s6, s6, 1
|
||||
PTR_ADDIU s4, s4, SZREG
|
||||
PTR_ADDIU s2, s2, SZREG
|
||||
LONG_ADDIU s6, s6, -1
|
||||
beq s6, zero, process_entry
|
||||
b copy_word
|
||||
b process_entry
|
||||
|
|
|
@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
|
|||
if (action == 0)
|
||||
scheduler_ipi();
|
||||
else
|
||||
smp_call_function_interrupt();
|
||||
generic_smp_call_function_interrupt();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
|
|||
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||
scheduler_ipi();
|
||||
if (action & SMP_CALL_FUNCTION)
|
||||
smp_call_function_interrupt();
|
||||
generic_smp_call_function_interrupt();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -192,16 +192,6 @@ asmlinkage void start_secondary(void)
|
|||
cpu_startup_entry(CPUHP_ONLINE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Call into both interrupt handlers, as we share the IPI for them
|
||||
*/
|
||||
void __irq_entry smp_call_function_interrupt(void)
|
||||
{
|
||||
irq_enter();
|
||||
generic_smp_call_function_interrupt();
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
static void stop_this_cpu(void *dummy)
|
||||
{
|
||||
/*
|
||||
|
|
|
@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
|
|||
void show_stack(struct task_struct *task, unsigned long *sp)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
if (sp) {
|
||||
regs.regs[29] = (unsigned long)sp;
|
||||
regs.regs[31] = 0;
|
||||
|
@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
|
|||
prepare_frametrace(®s);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* show_stack() deals exclusively with kernel mode, so be sure to access
|
||||
* the stack in the kernel (not user) address space.
|
||||
*/
|
||||
set_fs(KERNEL_DS);
|
||||
show_stacktrace(task, ®s);
|
||||
set_fs(old_fs);
|
||||
}
|
||||
|
||||
static void show_code(unsigned int __user *pc)
|
||||
|
@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
|
|||
const int field = 2 * sizeof(unsigned long);
|
||||
int multi_match = regs->cp0_status & ST0_TS;
|
||||
enum ctx_state prev_state;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
prev_state = exception_enter();
|
||||
show_regs(regs);
|
||||
|
@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
|
|||
dump_tlb_all();
|
||||
}
|
||||
|
||||
if (!user_mode(regs))
|
||||
set_fs(KERNEL_DS);
|
||||
|
||||
show_code((unsigned int __user *) regs->cp0_epc);
|
||||
|
||||
set_fs(old_fs);
|
||||
|
||||
/*
|
||||
* Some chips may have other causes of machine check (e.g. SB1
|
||||
* graduation timer)
|
||||
|
|
|
@ -438,7 +438,7 @@ do { \
|
|||
: "memory"); \
|
||||
} while(0)
|
||||
|
||||
#define StoreDW(addr, value, res) \
|
||||
#define _StoreDW(addr, value, res) \
|
||||
do { \
|
||||
__asm__ __volatile__ ( \
|
||||
".set\tpush\n\t" \
|
||||
|
|
|
@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
|
|||
|
||||
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
smp_call_function_interrupt();
|
||||
generic_smp_call_function_interrupt();
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
|
|||
{
|
||||
return ltq_perfcount_irq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||
|
||||
unsigned int get_c0_compare_int(void)
|
||||
{
|
||||
|
|
|
@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
|
|||
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||
scheduler_ipi();
|
||||
|
||||
if (action & SMP_CALL_FUNCTION)
|
||||
smp_call_function_interrupt();
|
||||
if (action & SMP_CALL_FUNCTION) {
|
||||
irq_enter();
|
||||
generic_smp_call_function_interrupt();
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
if (action & SMP_ASK_C0COUNT) {
|
||||
BUG_ON(cpu != 0);
|
||||
|
|
|
@ -160,18 +160,18 @@ static inline void setup_protection_map(void)
|
|||
protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||
protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
||||
protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||
protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
|
||||
protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||
protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||
protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
|
||||
protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||
protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||
|
||||
protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
|
||||
protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
|
||||
protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
|
||||
protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
|
||||
protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
|
||||
protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||
protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
|
||||
protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
|
||||
protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
|
||||
protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
|
||||
|
||||
} else {
|
||||
|
|
|
@ -133,7 +133,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
|
|||
#endif
|
||||
goto bad_area;
|
||||
}
|
||||
if (!(vma->vm_flags & VM_READ)) {
|
||||
if (!(vma->vm_flags & VM_READ) &&
|
||||
exception_epc(regs) != address) {
|
||||
#if 0
|
||||
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
|
||||
raw_smp_processor_id(),
|
||||
|
|
|
@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
|
|||
|
||||
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
smp_call_function_interrupt();
|
||||
generic_smp_call_function_interrupt();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -154,6 +154,7 @@ int get_c0_perfcount_int(void)
|
|||
|
||||
return mips_cpu_perf_irq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||
|
||||
unsigned int get_c0_compare_int(void)
|
||||
{
|
||||
|
@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void)
|
|||
|
||||
static void __init init_rtc(void)
|
||||
{
|
||||
/* stop the clock whilst setting it up */
|
||||
CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
|
||||
unsigned char freq, ctrl;
|
||||
|
||||
/* 32KHz time base */
|
||||
CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
|
||||
/* Set 32KHz time base if not already set */
|
||||
freq = CMOS_READ(RTC_FREQ_SELECT);
|
||||
if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
|
||||
CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
|
||||
|
||||
/* start the clock */
|
||||
CMOS_WRITE(RTC_24H, RTC_CONTROL);
|
||||
/* Ensure SET bit is clear so RTC can run */
|
||||
ctrl = CMOS_READ(RTC_CONTROL);
|
||||
if (ctrl & RTC_SET)
|
||||
CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
|
||||
}
|
||||
|
||||
void __init plat_time_init(void)
|
||||
|
|
|
@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
|
|||
return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
|
||||
return -1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||
|
||||
unsigned int get_c0_compare_int(void)
|
||||
{
|
||||
|
|
|
@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
|
|||
{
|
||||
clear_c0_eimr(irq);
|
||||
ack_c0_eirr(irq);
|
||||
smp_call_function_interrupt();
|
||||
generic_smp_call_function_interrupt();
|
||||
set_c0_eimr(irq);
|
||||
}
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
|
|||
|
||||
static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
smp_call_function_interrupt();
|
||||
generic_smp_call_function_interrupt();
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
|
|||
{
|
||||
return gic_get_c0_perfcount_int();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||
|
||||
int get_c0_fdc_int(void)
|
||||
{
|
||||
|
|
|
@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
|
|||
|
||||
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
smp_call_function_interrupt();
|
||||
generic_smp_call_function_interrupt();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
|
|||
{
|
||||
return rt_perfcount_irq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||
|
||||
unsigned int get_c0_compare_int(void)
|
||||
{
|
||||
|
|
|
@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void)
|
|||
scheduler_ipi();
|
||||
} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
|
||||
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
|
||||
smp_call_function_interrupt();
|
||||
irq_enter();
|
||||
generic_smp_call_function_interrupt();
|
||||
irq_exit();
|
||||
} else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
|
||||
LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
|
||||
smp_call_function_interrupt();
|
||||
irq_enter();
|
||||
generic_smp_call_function_interrupt();
|
||||
irq_exit();
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
|
|
@ -29,8 +29,6 @@
|
|||
#include <asm/sibyte/bcm1480_regs.h>
|
||||
#include <asm/sibyte/bcm1480_int.h>
|
||||
|
||||
extern void smp_call_function_interrupt(void);
|
||||
|
||||
/*
|
||||
* These are routines for dealing with the bcm1480 smp capabilities
|
||||
* independent of board/firmware
|
||||
|
@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void)
|
|||
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||
scheduler_ipi();
|
||||
|
||||
if (action & SMP_CALL_FUNCTION)
|
||||
smp_call_function_interrupt();
|
||||
if (action & SMP_CALL_FUNCTION) {
|
||||
irq_enter();
|
||||
generic_smp_call_function_interrupt();
|
||||
irq_exit();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void)
|
|||
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||
scheduler_ipi();
|
||||
|
||||
if (action & SMP_CALL_FUNCTION)
|
||||
smp_call_function_interrupt();
|
||||
if (action & SMP_CALL_FUNCTION) {
|
||||
irq_enter();
|
||||
generic_smp_call_function_interrupt();
|
||||
irq_exit();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <asm/clock.h>
|
||||
#include <asm/idle.h>
|
||||
|
||||
#include <asm/mach-loongson/loongson.h>
|
||||
#include <asm/mach-loongson64/loongson.h>
|
||||
|
||||
static uint nowait;
|
||||
|
||||
|
|
|
@ -538,7 +538,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
|
|||
|
||||
static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
smp_call_function_interrupt();
|
||||
generic_smp_call_function_interrupt();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue