2005-04-16 16:20:36 -06:00
|
|
|
#ifndef _ASM_IA64_PERCPU_H
|
|
|
|
#define _ASM_IA64_PERCPU_H
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2002-2003 Hewlett-Packard Co
|
|
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
|
|
|
|
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
|
|
|
|
#else /* !__ASSEMBLY__ */
|
|
|
|
|
|
|
|
|
|
|
|
#include <linux/threads.h>
|
|
|
|
|
|
|
|
#ifdef HAVE_MODEL_SMALL_ATTRIBUTE
|
|
|
|
# define __SMALL_ADDR_AREA __attribute__((__model__ (__small__)))
|
|
|
|
#else
|
|
|
|
# define __SMALL_ADDR_AREA
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define DECLARE_PER_CPU(type, name) \
|
|
|
|
extern __SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
|
|
|
|
|
|
|
|
/* Separate out the type, so (int[3], foo) works. */
|
|
|
|
#define DEFINE_PER_CPU(type, name) \
|
|
|
|
__attribute__((__section__(".data.percpu"))) \
|
|
|
|
__SMALL_ADDR_AREA __typeof__(type) per_cpu__##name
|
|
|
|
|
2007-07-19 02:48:12 -06:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
|
|
|
|
__attribute__((__section__(".data.percpu.shared_aligned"))) \
|
|
|
|
__SMALL_ADDR_AREA __typeof__(type) per_cpu__##name \
|
|
|
|
____cacheline_aligned_in_smp
|
|
|
|
#else
|
|
|
|
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
|
|
|
|
DEFINE_PER_CPU(type, name)
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
/*
|
|
|
|
* Pretty much a literal copy of asm-generic/percpu.h, except that percpu_modcopy() is an
|
|
|
|
* external routine, to avoid include-hell.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
extern unsigned long __per_cpu_offset[NR_CPUS];
|
2007-10-29 12:21:45 -06:00
|
|
|
#define per_cpu_offset(x) (__per_cpu_offset[x])
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
/* Equal to __per_cpu_offset[smp_processor_id()], but faster to access: */
|
|
|
|
DECLARE_PER_CPU(unsigned long, local_per_cpu_offset);
|
|
|
|
|
|
|
|
#define per_cpu(var, cpu) (*RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]))
|
|
|
|
#define __get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
|
[PATCH] Define __raw_get_cpu_var and use it
There are several instances of per_cpu(foo, raw_smp_processor_id()), which
is semantically equivalent to __get_cpu_var(foo) but without the warning
that smp_processor_id() can give if CONFIG_DEBUG_PREEMPT is enabled. For
those architectures with optimized per-cpu implementations, namely ia64,
powerpc, s390, sparc64 and x86_64, per_cpu() turns into more and slower
code than __get_cpu_var(), so it would be preferable to use __get_cpu_var
on those platforms.
This defines a __raw_get_cpu_var(x) macro which turns into per_cpu(x,
raw_smp_processor_id()) on architectures that use the generic per-cpu
implementation, and turns into __get_cpu_var(x) on the architectures that
have an optimized per-cpu implementation.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-25 06:47:14 -06:00
|
|
|
#define __raw_get_cpu_var(var) (*RELOC_HIDE(&per_cpu__##var, __ia64_per_cpu_var(local_per_cpu_offset)))
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
extern void percpu_modcopy(void *pcpudst, const void *src, unsigned long size);
|
|
|
|
extern void setup_per_cpu_areas (void);
|
|
|
|
extern void *per_cpu_init(void);
|
|
|
|
|
|
|
|
#else /* ! SMP */
|
|
|
|
|
2005-06-23 01:09:59 -06:00
|
|
|
#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var))
|
2005-04-16 16:20:36 -06:00
|
|
|
#define __get_cpu_var(var) per_cpu__##var
|
[PATCH] Define __raw_get_cpu_var and use it
There are several instances of per_cpu(foo, raw_smp_processor_id()), which
is semantically equivalent to __get_cpu_var(foo) but without the warning
that smp_processor_id() can give if CONFIG_DEBUG_PREEMPT is enabled. For
those architectures with optimized per-cpu implementations, namely ia64,
powerpc, s390, sparc64 and x86_64, per_cpu() turns into more and slower
code than __get_cpu_var(), so it would be preferable to use __get_cpu_var
on those platforms.
This defines a __raw_get_cpu_var(x) macro which turns into per_cpu(x,
raw_smp_processor_id()) on architectures that use the generic per-cpu
implementation, and turns into __get_cpu_var(x) on the architectures that
have an optimized per-cpu implementation.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-25 06:47:14 -06:00
|
|
|
#define __raw_get_cpu_var(var) per_cpu__##var
|
2005-04-16 16:20:36 -06:00
|
|
|
#define per_cpu_init() (__phys_per_cpu_start)
|
|
|
|
|
|
|
|
#endif /* SMP */
|
|
|
|
|
|
|
|
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
|
|
|
|
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Be extremely careful when taking the address of this variable! Due to virtual
|
|
|
|
* remapping, it is different from the canonical address returned by __get_cpu_var(var)!
|
|
|
|
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
|
|
|
|
* more efficient.
|
|
|
|
*/
|
|
|
|
#define __ia64_per_cpu_var(var) (per_cpu__##var)
|
|
|
|
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
|
|
|
|
#endif /* _ASM_IA64_PERCPU_H */
|