softirq/core: Consolidate default local_softirq_pending() implementations
Consolidate and optimize default softirq mask API implementations. Per-CPU operations are expected to be faster and a few architectures already rely on them to implement local_softirq_pending() and related accessors/mutators. Those will be migrated to the new generic code. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Acked-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David S. Miller <davem@davemloft.net> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: James E.J. Bottomley <jejb@parisc-linux.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Rich Felker <dalias@libc.org> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/1525786706-22846-6-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
0f6f47bacb
commit
0fd7d86285
2 changed files with 15 additions and 5 deletions
|
@ -432,11 +432,25 @@ extern bool force_irqthreads;
|
|||
#define force_irqthreads (0)
|
||||
#endif
|
||||
|
||||
#ifndef local_softirq_pending
|
||||
|
||||
#ifndef local_softirq_pending_ref
|
||||
#define local_softirq_pending_ref irq_stat.__softirq_pending
|
||||
#endif
|
||||
|
||||
#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
|
||||
#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
|
||||
#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
|
||||
|
||||
#else /* local_softirq_pending */
|
||||
|
||||
#ifndef __ARCH_SET_SOFTIRQ_PENDING
|
||||
#define set_softirq_pending(x) (local_softirq_pending() = (x))
|
||||
#define or_softirq_pending(x) (local_softirq_pending() |= (x))
|
||||
#endif
|
||||
|
||||
#endif /* local_softirq_pending */
|
||||
|
||||
/* Some architectures might implement lazy enabling/disabling of
|
||||
* interrupts. In some cases, such as stop_machine, we might want
|
||||
* to ensure that after a local_irq_disable(), interrupts have
|
||||
|
|
|
@ -22,11 +22,7 @@ DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); /* defined in asm/hardirq.h */
|
|||
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat.member, cpu))
|
||||
#endif
|
||||
|
||||
/* arch independent irq_stat fields */
|
||||
#define local_softirq_pending() \
|
||||
__IRQ_STAT(smp_processor_id(), __softirq_pending)
|
||||
|
||||
/* arch dependent irq_stat fields */
|
||||
/* arch dependent irq_stat fields */
|
||||
#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */
|
||||
|
||||
#endif /* __irq_cpustat_h */
|
||||
|
|
Loading…
Reference in a new issue