locking/core: Introduce cpu_relax_yield()
For spinning loops people do often use barrier() or cpu_relax(). For most architectures cpu_relax and barrier are the same, but on some architectures cpu_relax can add some latency. For example on power,sparc64 and arc, cpu_relax can shift the CPU towards other hardware threads in an SMT environment. On s390 cpu_relax does even more, it uses an hypercall to the hypervisor to give up the timeslice. In contrast to the SMT yielding this can result in larger latencies. In some places this latency is unwanted, so another variant "cpu_relax_lowlatency" was introduced. Before this is used in more and more places, lets revert the logic and provide a cpu_relax_yield that can be called in places where yielding is more important than latency. By default this is the same as cpu_relax on all architectures. Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Noam Camus <noamc@ezchip.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: linuxppc-dev@lists.ozlabs.org Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1477386195-32736-2-git-send-email-borntraeger@de.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
0f5225b024
commit
79ab11cdb9
33 changed files with 36 additions and 3 deletions
|
@ -58,6 +58,7 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define ARCH_HAS_PREFETCH
|
||||
|
|
|
@ -60,6 +60,7 @@ struct task_struct;
|
|||
#ifndef CONFIG_EZNPS_MTM_EXT
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#else
|
||||
|
@ -67,6 +68,7 @@ struct task_struct;
|
|||
#define cpu_relax() \
|
||||
__asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory")
|
||||
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() barrier()
|
||||
|
||||
#endif
|
||||
|
|
|
@ -82,6 +82,7 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define cpu_relax() barrier()
|
||||
#endif
|
||||
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
|
|
|
@ -149,6 +149,7 @@ static inline void cpu_relax(void)
|
|||
asm volatile("yield" ::: "memory");
|
||||
}
|
||||
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Thread switching */
|
||||
|
|
|
@ -92,6 +92,7 @@ extern struct avr32_cpuinfo boot_cpu_data;
|
|||
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
#define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory")
|
||||
|
||||
|
|
|
@ -92,6 +92,7 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
|
||||
|
||||
#define cpu_relax() smp_mb()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Get the Silicon Revision of the chip */
|
||||
|
|
|
@ -121,6 +121,7 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
|
||||
|
||||
#define cpu_relax() do { } while (0)
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
|
|
|
@ -63,6 +63,7 @@ static inline void release_thread(struct task_struct *dead_task)
|
|||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
void default_idle(void);
|
||||
|
|
|
@ -107,6 +107,7 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk)->thread.frame0->sp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* data cache prefetch */
|
||||
|
|
|
@ -127,6 +127,7 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define HARD_RESET_NOW() ({ \
|
||||
|
|
|
@ -56,6 +56,7 @@ struct thread_struct {
|
|||
}
|
||||
|
||||
#define cpu_relax() __vmyield()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/*
|
||||
|
|
|
@ -547,6 +547,7 @@ ia64_eoi (void)
|
|||
}
|
||||
|
||||
#define cpu_relax() ia64_hint(ia64_hint_pause)
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
static inline int
|
||||
|
|
|
@ -133,6 +133,7 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#endif /* _ASM_M32R_PROCESSOR_H */
|
||||
|
|
|
@ -156,6 +156,7 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define task_pt_regs(tsk) ((struct pt_regs *) ((tsk)->thread.esp0))
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#endif
|
||||
|
|
|
@ -152,6 +152,7 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
extern void setup_priv(void);
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
extern const struct seq_operations cpuinfo_op;
|
||||
|
||||
# define cpu_relax() barrier()
|
||||
# define cpu_relax_yield() cpu_relax()
|
||||
# define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define task_pt_regs(tsk) \
|
||||
|
|
|
@ -389,6 +389,7 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/*
|
||||
|
|
|
@ -69,6 +69,7 @@ extern void print_cpu_info(struct mn10300_cpuinfo *);
|
|||
extern void dodgy_tsc(void);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/*
|
||||
|
|
|
@ -88,6 +88,7 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk)->thread.kregs->sp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -92,6 +92,7 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
|
|||
#define init_stack (init_thread_union.stack)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -309,6 +309,7 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/*
|
||||
|
|
|
@ -404,6 +404,7 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
|
|||
#define cpu_relax() barrier()
|
||||
#endif
|
||||
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Check that a certain kernel stack pointer is valid in task_struct p */
|
||||
|
|
|
@ -234,8 +234,9 @@ static inline unsigned short stap(void)
|
|||
/*
|
||||
* Give up the time slice of the virtual PU.
|
||||
*/
|
||||
void cpu_relax(void);
|
||||
void cpu_relax_yield(void);
|
||||
|
||||
#define cpu_relax() cpu_relax_yield()
|
||||
#define cpu_relax_lowlatency() barrier()
|
||||
|
||||
#define ECAG_CACHE_ATTRIBUTE 0
|
||||
|
|
|
@ -53,7 +53,7 @@ void s390_update_cpu_mhz(void)
|
|||
on_each_cpu(update_cpu_mhz, NULL, 0);
|
||||
}
|
||||
|
||||
void notrace cpu_relax(void)
|
||||
void notrace cpu_relax_yield(void)
|
||||
{
|
||||
if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) {
|
||||
diag_stat_inc(DIAG_STAT_X044);
|
||||
|
@ -61,7 +61,7 @@ void notrace cpu_relax(void)
|
|||
}
|
||||
barrier();
|
||||
}
|
||||
EXPORT_SYMBOL(cpu_relax);
|
||||
EXPORT_SYMBOL(cpu_relax_yield);
|
||||
|
||||
/*
|
||||
* cpu_init - initializes state that is per-CPU.
|
||||
|
|
|
@ -24,6 +24,7 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define current_text_addr() ({ __label__ _l; _l: &&_l; })
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
#define release_thread(thread) do {} while (0)
|
||||
|
||||
|
|
|
@ -97,6 +97,7 @@ extern struct sh_cpuinfo cpu_data[];
|
|||
|
||||
#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
void default_idle(void);
|
||||
|
|
|
@ -119,6 +119,7 @@ extern struct task_struct *last_task_used_math;
|
|||
int do_mathemu(struct pt_regs *regs, struct task_struct *fpt);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
extern void (*sparc_idle)(void);
|
||||
|
|
|
@ -216,6 +216,7 @@ unsigned long get_wchan(struct task_struct *task);
|
|||
"nop\n\t" \
|
||||
".previous" \
|
||||
::: "memory")
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Prefetch support. This is tuned for UltraSPARC-III and later.
|
||||
|
|
|
@ -264,6 +264,7 @@ static inline void cpu_relax(void)
|
|||
barrier();
|
||||
}
|
||||
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Info on this processor (see fs/proc/cpuinfo.c) */
|
||||
|
|
|
@ -71,6 +71,7 @@ extern void release_thread(struct task_struct *);
|
|||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
|
|
|
@ -588,6 +588,7 @@ static __always_inline void cpu_relax(void)
|
|||
rep_nop();
|
||||
}
|
||||
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Stop speculative execution and prefetching of modified code. */
|
||||
|
|
|
@ -26,6 +26,7 @@ static inline void rep_nop(void)
|
|||
}
|
||||
|
||||
#define cpu_relax() rep_nop()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
#define task_pt_regs(t) (&(t)->thread.regs)
|
||||
|
|
|
@ -206,6 +206,7 @@ extern unsigned long get_wchan(struct task_struct *p);
|
|||
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define cpu_relax_yield() cpu_relax()
|
||||
#define cpu_relax_lowlatency() cpu_relax()
|
||||
|
||||
/* Special register access. */
|
||||
|
|
Loading…
Reference in a new issue