Merge branch 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (30 commits) gameport: use this_cpu_read instead of lookup x86: udelay: Use this_cpu_read to avoid address calculation x86: Use this_cpu_inc_return for nmi counter x86: Replace uses of current_cpu_data with this_cpu ops x86: Use this_cpu_ops to optimize code vmstat: User per cpu atomics to avoid interrupt disable / enable irq_work: Use per cpu atomics instead of regular atomics cpuops: Use cmpxchg for xchg to avoid lock semantics x86: this_cpu_cmpxchg and this_cpu_xchg operations percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support percpu,x86: relocate this_cpu_add_return() and friends connector: Use this_cpu operations xen: Use this_cpu_inc_return taskstats: Use this_cpu_ops random: Use this_cpu_inc_return fs: Use this_cpu_inc_return in buffer.c highmem: Use this_cpu_xx_return() operations vmstat: Use this_cpu_inc_return for vm statistics x86: Support for this_cpu_add, sub, dec, inc_return percpu: Generic support for this_cpu_add, sub, dec, inc_return ... Fixed up conflicts: in arch/x86/kernel/{apic/nmi.c, apic/x2apic_uv_x.c, process.c} as per Tejun.
This commit is contained in:
commit
72eb6a7914
62 changed files with 703 additions and 275 deletions
10
MAINTAINERS
10
MAINTAINERS
|
@ -4653,6 +4653,16 @@ S: Maintained
|
|||
F: crypto/pcrypt.c
|
||||
F: include/crypto/pcrypt.h
|
||||
|
||||
PER-CPU MEMORY ALLOCATOR
|
||||
M: Tejun Heo <tj@kernel.org>
|
||||
M: Christoph Lameter <cl@linux-foundation.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
|
||||
S: Maintained
|
||||
F: include/linux/percpu*.h
|
||||
F: mm/percpu*.c
|
||||
F: arch/*/include/asm/percpu.h
|
||||
|
||||
PER-TASK DELAY ACCOUNTING
|
||||
M: Balbir Singh <balbir@linux.vnet.ibm.com>
|
||||
S: Maintained
|
||||
|
|
|
@ -310,6 +310,9 @@ config X86_INTERNODE_CACHE_SHIFT
|
|||
config X86_CMPXCHG
|
||||
def_bool X86_64 || (X86_32 && !M386)
|
||||
|
||||
config CMPXCHG_LOCAL
|
||||
def_bool X86_64 || (X86_32 && !M386)
|
||||
|
||||
config X86_L1_CACHE_SHIFT
|
||||
int
|
||||
default "7" if MPENTIUM4 || MPSC
|
||||
|
|
|
@ -94,7 +94,7 @@ static inline void hw_breakpoint_disable(void)
|
|||
|
||||
static inline int hw_breakpoint_active(void)
|
||||
{
|
||||
return __get_cpu_var(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
|
||||
return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
|
||||
}
|
||||
|
||||
extern void aout_dump_debugregs(struct user *dump);
|
||||
|
|
|
@ -229,6 +229,125 @@ do { \
|
|||
} \
|
||||
})
|
||||
|
||||
/*
|
||||
* Add return operation
|
||||
*/
|
||||
#define percpu_add_return_op(var, val) \
|
||||
({ \
|
||||
typeof(var) paro_ret__ = val; \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
asm("xaddb %0, "__percpu_arg(1) \
|
||||
: "+q" (paro_ret__), "+m" (var) \
|
||||
: : "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm("xaddw %0, "__percpu_arg(1) \
|
||||
: "+r" (paro_ret__), "+m" (var) \
|
||||
: : "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm("xaddl %0, "__percpu_arg(1) \
|
||||
: "+r" (paro_ret__), "+m" (var) \
|
||||
: : "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm("xaddq %0, "__percpu_arg(1) \
|
||||
: "+re" (paro_ret__), "+m" (var) \
|
||||
: : "memory"); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
paro_ret__ += val; \
|
||||
paro_ret__; \
|
||||
})
|
||||
|
||||
/*
|
||||
* xchg is implemented using cmpxchg without a lock prefix. xchg is
|
||||
* expensive due to the implied lock prefix. The processor cannot prefetch
|
||||
* cachelines if xchg is used.
|
||||
*/
|
||||
#define percpu_xchg_op(var, nval) \
|
||||
({ \
|
||||
typeof(var) pxo_ret__; \
|
||||
typeof(var) pxo_new__ = (nval); \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
asm("\n1:mov "__percpu_arg(1)",%%al" \
|
||||
"\n\tcmpxchgb %2, "__percpu_arg(1) \
|
||||
"\n\tjnz 1b" \
|
||||
: "=a" (pxo_ret__), "+m" (var) \
|
||||
: "q" (pxo_new__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm("\n1:mov "__percpu_arg(1)",%%ax" \
|
||||
"\n\tcmpxchgw %2, "__percpu_arg(1) \
|
||||
"\n\tjnz 1b" \
|
||||
: "=a" (pxo_ret__), "+m" (var) \
|
||||
: "r" (pxo_new__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm("\n1:mov "__percpu_arg(1)",%%eax" \
|
||||
"\n\tcmpxchgl %2, "__percpu_arg(1) \
|
||||
"\n\tjnz 1b" \
|
||||
: "=a" (pxo_ret__), "+m" (var) \
|
||||
: "r" (pxo_new__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm("\n1:mov "__percpu_arg(1)",%%rax" \
|
||||
"\n\tcmpxchgq %2, "__percpu_arg(1) \
|
||||
"\n\tjnz 1b" \
|
||||
: "=a" (pxo_ret__), "+m" (var) \
|
||||
: "r" (pxo_new__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
pxo_ret__; \
|
||||
})
|
||||
|
||||
/*
|
||||
* cmpxchg has no such implied lock semantics as a result it is much
|
||||
* more efficient for cpu local operations.
|
||||
*/
|
||||
#define percpu_cmpxchg_op(var, oval, nval) \
|
||||
({ \
|
||||
typeof(var) pco_ret__; \
|
||||
typeof(var) pco_old__ = (oval); \
|
||||
typeof(var) pco_new__ = (nval); \
|
||||
switch (sizeof(var)) { \
|
||||
case 1: \
|
||||
asm("cmpxchgb %2, "__percpu_arg(1) \
|
||||
: "=a" (pco_ret__), "+m" (var) \
|
||||
: "q" (pco_new__), "0" (pco_old__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm("cmpxchgw %2, "__percpu_arg(1) \
|
||||
: "=a" (pco_ret__), "+m" (var) \
|
||||
: "r" (pco_new__), "0" (pco_old__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm("cmpxchgl %2, "__percpu_arg(1) \
|
||||
: "=a" (pco_ret__), "+m" (var) \
|
||||
: "r" (pco_new__), "0" (pco_old__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm("cmpxchgq %2, "__percpu_arg(1) \
|
||||
: "=a" (pco_ret__), "+m" (var) \
|
||||
: "r" (pco_new__), "0" (pco_old__) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
default: __bad_percpu_size(); \
|
||||
} \
|
||||
pco_ret__; \
|
||||
})
|
||||
|
||||
/*
|
||||
* percpu_read() makes gcc load the percpu variable every time it is
|
||||
* accessed while percpu_read_stable() allows the value to be cached.
|
||||
|
@ -267,6 +386,12 @@ do { \
|
|||
#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
/*
|
||||
* Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much
|
||||
* faster than an xchg with forced lock semantics.
|
||||
*/
|
||||
#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
|
||||
#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||
#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||
|
@ -286,6 +411,11 @@ do { \
|
|||
#define this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
|
||||
#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
|
||||
#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
|
||||
|
@ -299,6 +429,31 @@ do { \
|
|||
#define irqsafe_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define irqsafe_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define irqsafe_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define irqsafe_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define irqsafe_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||
#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
|
||||
#ifndef CONFIG_M386
|
||||
#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define __this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#define __this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#define __this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
|
||||
#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
|
||||
#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
|
||||
#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||
#endif /* !CONFIG_M386 */
|
||||
|
||||
/*
|
||||
* Per cpu atomic 64 bit operations are only available under 64 bit.
|
||||
|
@ -311,6 +466,7 @@ do { \
|
|||
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
|
||||
|
||||
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||
|
@ -318,12 +474,12 @@ do { \
|
|||
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
|
||||
|
||||
#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
|
||||
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
|
||||
|
||||
#endif
|
||||
|
||||
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
|
||||
|
|
|
@ -141,10 +141,9 @@ extern __u32 cpu_caps_set[NCAPINTS];
|
|||
#ifdef CONFIG_SMP
|
||||
DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
|
||||
#define cpu_data(cpu) per_cpu(cpu_info, cpu)
|
||||
#define current_cpu_data __get_cpu_var(cpu_info)
|
||||
#else
|
||||
#define cpu_info boot_cpu_data
|
||||
#define cpu_data(cpu) boot_cpu_data
|
||||
#define current_cpu_data boot_cpu_data
|
||||
#endif
|
||||
|
||||
extern const struct seq_operations cpuinfo_op;
|
||||
|
|
|
@ -516,7 +516,7 @@ static void __cpuinit setup_APIC_timer(void)
|
|||
{
|
||||
struct clock_event_device *levt = &__get_cpu_var(lapic_events);
|
||||
|
||||
if (cpu_has(¤t_cpu_data, X86_FEATURE_ARAT)) {
|
||||
if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_ARAT)) {
|
||||
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
|
||||
/* Make LAPIC timer preferrable over percpu HPET */
|
||||
lapic_clockevent.rating = 150;
|
||||
|
|
|
@ -2329,7 +2329,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
|||
unsigned int irr;
|
||||
struct irq_desc *desc;
|
||||
struct irq_cfg *cfg;
|
||||
irq = __get_cpu_var(vector_irq)[vector];
|
||||
irq = __this_cpu_read(vector_irq[vector]);
|
||||
|
||||
if (irq == -1)
|
||||
continue;
|
||||
|
@ -2363,7 +2363,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
|||
apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
|
||||
goto unlock;
|
||||
}
|
||||
__get_cpu_var(vector_irq)[vector] = -1;
|
||||
__this_cpu_write(vector_irq[vector], -1);
|
||||
unlock:
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
|
|
|
@ -120,8 +120,8 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
|||
else if (!strcmp(oem_table_id, "UVX"))
|
||||
uv_system_type = UV_X2APIC;
|
||||
else if (!strcmp(oem_table_id, "UVH")) {
|
||||
__get_cpu_var(x2apic_extra_bits) =
|
||||
pnodeid << uvh_apicid.s.pnode_shift;
|
||||
__this_cpu_write(x2apic_extra_bits,
|
||||
pnodeid << uvh_apicid.s.pnode_shift);
|
||||
uv_system_type = UV_NON_UNIQUE_APIC;
|
||||
uv_set_apicid_hibit();
|
||||
return 1;
|
||||
|
@ -286,7 +286,7 @@ static unsigned int x2apic_get_apic_id(unsigned long x)
|
|||
unsigned int id;
|
||||
|
||||
WARN_ON(preemptible() && num_online_cpus() > 1);
|
||||
id = x | __get_cpu_var(x2apic_extra_bits);
|
||||
id = x | __this_cpu_read(x2apic_extra_bits);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
@ -378,7 +378,7 @@ struct apic __refdata apic_x2apic_uv_x = {
|
|||
|
||||
static __cpuinit void set_x2apic_extra_bits(int pnode)
|
||||
{
|
||||
__get_cpu_var(x2apic_extra_bits) = (pnode << 6);
|
||||
__this_cpu_write(x2apic_extra_bits, (pnode << 6));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -668,7 +668,7 @@ EXPORT_SYMBOL_GPL(amd_erratum_383);
|
|||
|
||||
bool cpu_has_amd_erratum(const int *erratum)
|
||||
{
|
||||
struct cpuinfo_x86 *cpu = ¤t_cpu_data;
|
||||
struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
|
||||
int osvw_id = *erratum++;
|
||||
u32 range;
|
||||
u32 ms;
|
||||
|
|
|
@ -521,7 +521,7 @@ static void check_supported_cpu(void *_rc)
|
|||
|
||||
*rc = -ENODEV;
|
||||
|
||||
if (current_cpu_data.x86_vendor != X86_VENDOR_AMD)
|
||||
if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD)
|
||||
return;
|
||||
|
||||
eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
|
||||
|
@ -1377,7 +1377,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
|
|||
static void query_values_on_cpu(void *_err)
|
||||
{
|
||||
int *err = _err;
|
||||
struct powernow_k8_data *data = __get_cpu_var(powernow_data);
|
||||
struct powernow_k8_data *data = __this_cpu_read(powernow_data);
|
||||
|
||||
*err = query_current_values_with_pending_wait(data);
|
||||
}
|
||||
|
|
|
@ -265,7 +265,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
|
|||
line_size = l2.line_size;
|
||||
lines_per_tag = l2.lines_per_tag;
|
||||
/* cpu_data has errata corrections for K7 applied */
|
||||
size_in_kb = current_cpu_data.x86_cache_size;
|
||||
size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
|
||||
break;
|
||||
case 3:
|
||||
if (!l3.val)
|
||||
|
@ -287,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
|
|||
eax->split.type = types[leaf];
|
||||
eax->split.level = levels[leaf];
|
||||
eax->split.num_threads_sharing = 0;
|
||||
eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
|
||||
eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
|
||||
|
||||
|
||||
if (assoc == 0xffff)
|
||||
|
|
|
@ -326,7 +326,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
|
|||
|
||||
static int msr_to_offset(u32 msr)
|
||||
{
|
||||
unsigned bank = __get_cpu_var(injectm.bank);
|
||||
unsigned bank = __this_cpu_read(injectm.bank);
|
||||
|
||||
if (msr == rip_msr)
|
||||
return offsetof(struct mce, ip);
|
||||
|
@ -346,7 +346,7 @@ static u64 mce_rdmsrl(u32 msr)
|
|||
{
|
||||
u64 v;
|
||||
|
||||
if (__get_cpu_var(injectm).finished) {
|
||||
if (__this_cpu_read(injectm.finished)) {
|
||||
int offset = msr_to_offset(msr);
|
||||
|
||||
if (offset < 0)
|
||||
|
@ -369,7 +369,7 @@ static u64 mce_rdmsrl(u32 msr)
|
|||
|
||||
static void mce_wrmsrl(u32 msr, u64 v)
|
||||
{
|
||||
if (__get_cpu_var(injectm).finished) {
|
||||
if (__this_cpu_read(injectm.finished)) {
|
||||
int offset = msr_to_offset(msr);
|
||||
|
||||
if (offset >= 0)
|
||||
|
@ -1159,7 +1159,7 @@ static void mce_start_timer(unsigned long data)
|
|||
|
||||
WARN_ON(smp_processor_id() != data);
|
||||
|
||||
if (mce_available(¤t_cpu_data)) {
|
||||
if (mce_available(__this_cpu_ptr(&cpu_info))) {
|
||||
machine_check_poll(MCP_TIMESTAMP,
|
||||
&__get_cpu_var(mce_poll_banks));
|
||||
}
|
||||
|
@ -1767,7 +1767,7 @@ static int mce_shutdown(struct sys_device *dev)
|
|||
static int mce_resume(struct sys_device *dev)
|
||||
{
|
||||
__mcheck_cpu_init_generic();
|
||||
__mcheck_cpu_init_vendor(¤t_cpu_data);
|
||||
__mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1775,7 +1775,7 @@ static int mce_resume(struct sys_device *dev)
|
|||
static void mce_cpu_restart(void *data)
|
||||
{
|
||||
del_timer_sync(&__get_cpu_var(mce_timer));
|
||||
if (!mce_available(¤t_cpu_data))
|
||||
if (!mce_available(__this_cpu_ptr(&cpu_info)))
|
||||
return;
|
||||
__mcheck_cpu_init_generic();
|
||||
__mcheck_cpu_init_timer();
|
||||
|
@ -1790,7 +1790,7 @@ static void mce_restart(void)
|
|||
/* Toggle features for corrected errors */
|
||||
static void mce_disable_ce(void *all)
|
||||
{
|
||||
if (!mce_available(¤t_cpu_data))
|
||||
if (!mce_available(__this_cpu_ptr(&cpu_info)))
|
||||
return;
|
||||
if (all)
|
||||
del_timer_sync(&__get_cpu_var(mce_timer));
|
||||
|
@ -1799,7 +1799,7 @@ static void mce_disable_ce(void *all)
|
|||
|
||||
static void mce_enable_ce(void *all)
|
||||
{
|
||||
if (!mce_available(¤t_cpu_data))
|
||||
if (!mce_available(__this_cpu_ptr(&cpu_info)))
|
||||
return;
|
||||
cmci_reenable();
|
||||
cmci_recheck();
|
||||
|
@ -2022,7 +2022,7 @@ static void __cpuinit mce_disable_cpu(void *h)
|
|||
unsigned long action = *(unsigned long *)h;
|
||||
int i;
|
||||
|
||||
if (!mce_available(¤t_cpu_data))
|
||||
if (!mce_available(__this_cpu_ptr(&cpu_info)))
|
||||
return;
|
||||
|
||||
if (!(action & CPU_TASKS_FROZEN))
|
||||
|
@ -2040,7 +2040,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
|
|||
unsigned long action = *(unsigned long *)h;
|
||||
int i;
|
||||
|
||||
if (!mce_available(¤t_cpu_data))
|
||||
if (!mce_available(__this_cpu_ptr(&cpu_info)))
|
||||
return;
|
||||
|
||||
if (!(action & CPU_TASKS_FROZEN))
|
||||
|
|
|
@ -130,7 +130,7 @@ void cmci_recheck(void)
|
|||
unsigned long flags;
|
||||
int banks;
|
||||
|
||||
if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks))
|
||||
if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
|
||||
return;
|
||||
local_irq_save(flags);
|
||||
machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
|
||||
|
|
|
@ -997,8 +997,7 @@ x86_perf_event_set_period(struct perf_event *event)
|
|||
|
||||
static void x86_pmu_enable_event(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
if (cpuc->enabled)
|
||||
if (__this_cpu_read(cpu_hw_events.enabled))
|
||||
__x86_pmu_enable_event(&event->hw,
|
||||
ARCH_PERFMON_EVENTSEL_ENABLE);
|
||||
}
|
||||
|
@ -1272,7 +1271,7 @@ perf_event_nmi_handler(struct notifier_block *self,
|
|||
break;
|
||||
case DIE_NMIUNKNOWN:
|
||||
this_nmi = percpu_read(irq_stat.__nmi_count);
|
||||
if (this_nmi != __get_cpu_var(pmu_nmi).marked)
|
||||
if (this_nmi != __this_cpu_read(pmu_nmi.marked))
|
||||
/* let the kernel handle the unknown nmi */
|
||||
return NOTIFY_DONE;
|
||||
/*
|
||||
|
@ -1296,8 +1295,8 @@ perf_event_nmi_handler(struct notifier_block *self,
|
|||
this_nmi = percpu_read(irq_stat.__nmi_count);
|
||||
if ((handled > 1) ||
|
||||
/* the next nmi could be a back-to-back nmi */
|
||||
((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
|
||||
(__get_cpu_var(pmu_nmi).handled > 1))) {
|
||||
((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
|
||||
(__this_cpu_read(pmu_nmi.handled) > 1))) {
|
||||
/*
|
||||
* We could have two subsequent back-to-back nmis: The
|
||||
* first handles more than one counter, the 2nd
|
||||
|
@ -1308,8 +1307,8 @@ perf_event_nmi_handler(struct notifier_block *self,
|
|||
* handling more than one counter. We will mark the
|
||||
* next (3rd) and then drop it if unhandled.
|
||||
*/
|
||||
__get_cpu_var(pmu_nmi).marked = this_nmi + 1;
|
||||
__get_cpu_var(pmu_nmi).handled = handled;
|
||||
__this_cpu_write(pmu_nmi.marked, this_nmi + 1);
|
||||
__this_cpu_write(pmu_nmi.handled, handled);
|
||||
}
|
||||
|
||||
return NOTIFY_STOP;
|
||||
|
@ -1484,11 +1483,9 @@ static inline void x86_pmu_read(struct perf_event *event)
|
|||
*/
|
||||
static void x86_pmu_start_txn(struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
perf_pmu_disable(pmu);
|
||||
cpuc->group_flag |= PERF_EVENT_TXN;
|
||||
cpuc->n_txn = 0;
|
||||
__this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
|
||||
__this_cpu_write(cpu_hw_events.n_txn, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1498,14 +1495,12 @@ static void x86_pmu_start_txn(struct pmu *pmu)
|
|||
*/
|
||||
static void x86_pmu_cancel_txn(struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
cpuc->group_flag &= ~PERF_EVENT_TXN;
|
||||
__this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
|
||||
/*
|
||||
* Truncate the collected events.
|
||||
*/
|
||||
cpuc->n_added -= cpuc->n_txn;
|
||||
cpuc->n_events -= cpuc->n_txn;
|
||||
__this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
|
||||
__this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
|
||||
perf_pmu_enable(pmu);
|
||||
}
|
||||
|
||||
|
|
|
@ -649,7 +649,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
|
|||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
|
||||
if (!__get_cpu_var(cpu_hw_events).enabled)
|
||||
if (!__this_cpu_read(cpu_hw_events.enabled))
|
||||
return;
|
||||
|
||||
intel_pmu_enable_bts(hwc->config);
|
||||
|
@ -679,7 +679,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event)
|
|||
|
||||
static void intel_pmu_reset(void)
|
||||
{
|
||||
struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
|
||||
struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
|
||||
unsigned long flags;
|
||||
int idx;
|
||||
|
||||
|
|
|
@ -170,9 +170,9 @@ static void ftrace_mod_code(void)
|
|||
|
||||
void ftrace_nmi_enter(void)
|
||||
{
|
||||
__get_cpu_var(save_modifying_code) = modifying_code;
|
||||
__this_cpu_write(save_modifying_code, modifying_code);
|
||||
|
||||
if (!__get_cpu_var(save_modifying_code))
|
||||
if (!__this_cpu_read(save_modifying_code))
|
||||
return;
|
||||
|
||||
if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
|
||||
|
@ -186,7 +186,7 @@ void ftrace_nmi_enter(void)
|
|||
|
||||
void ftrace_nmi_exit(void)
|
||||
{
|
||||
if (!__get_cpu_var(save_modifying_code))
|
||||
if (!__this_cpu_read(save_modifying_code))
|
||||
return;
|
||||
|
||||
/* Finish all executions before clearing nmi_running */
|
||||
|
|
|
@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
|
|||
return -EBUSY;
|
||||
|
||||
set_debugreg(info->address, i);
|
||||
__get_cpu_var(cpu_debugreg[i]) = info->address;
|
||||
__this_cpu_write(cpu_debugreg[i], info->address);
|
||||
|
||||
dr7 = &__get_cpu_var(cpu_dr7);
|
||||
*dr7 |= encode_dr7(i, info->len, info->type);
|
||||
|
@ -397,12 +397,12 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
|
|||
|
||||
void hw_breakpoint_restore(void)
|
||||
{
|
||||
set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0);
|
||||
set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1);
|
||||
set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2);
|
||||
set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3);
|
||||
set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0);
|
||||
set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1);
|
||||
set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2);
|
||||
set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3);
|
||||
set_debugreg(current->thread.debugreg6, 6);
|
||||
set_debugreg(__get_cpu_var(cpu_dr7), 7);
|
||||
set_debugreg(__this_cpu_read(cpu_dr7), 7);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
|
||||
|
||||
|
|
|
@ -234,7 +234,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
|
|||
exit_idle();
|
||||
irq_enter();
|
||||
|
||||
irq = __get_cpu_var(vector_irq)[vector];
|
||||
irq = __this_cpu_read(vector_irq[vector]);
|
||||
|
||||
if (!handle_irq(irq, regs)) {
|
||||
ack_APIC_irq();
|
||||
|
@ -350,12 +350,12 @@ void fixup_irqs(void)
|
|||
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
|
||||
unsigned int irr;
|
||||
|
||||
if (__get_cpu_var(vector_irq)[vector] < 0)
|
||||
if (__this_cpu_read(vector_irq[vector]) < 0)
|
||||
continue;
|
||||
|
||||
irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
|
||||
if (irr & (1 << (vector % 32))) {
|
||||
irq = __get_cpu_var(vector_irq)[vector];
|
||||
irq = __this_cpu_read(vector_irq[vector]);
|
||||
|
||||
data = irq_get_irq_data(irq);
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
|
|
@ -79,7 +79,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
|
|||
u32 *isp, arg1, arg2;
|
||||
|
||||
curctx = (union irq_ctx *) current_thread_info();
|
||||
irqctx = __get_cpu_var(hardirq_ctx);
|
||||
irqctx = __this_cpu_read(hardirq_ctx);
|
||||
|
||||
/*
|
||||
* this is where we switch to the IRQ stack. However, if we are
|
||||
|
@ -166,7 +166,7 @@ asmlinkage void do_softirq(void)
|
|||
|
||||
if (local_softirq_pending()) {
|
||||
curctx = current_thread_info();
|
||||
irqctx = __get_cpu_var(softirq_ctx);
|
||||
irqctx = __this_cpu_read(softirq_ctx);
|
||||
irqctx->tinfo.task = curctx->task;
|
||||
irqctx->tinfo.previous_esp = current_stack_pointer;
|
||||
|
||||
|
|
|
@ -403,7 +403,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
|||
|
||||
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
|
||||
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
|
||||
kcb->kprobe_status = kcb->prev_kprobe.status;
|
||||
kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
|
||||
kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
|
||||
|
@ -412,7 +412,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
|||
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = p;
|
||||
__this_cpu_write(current_kprobe, p);
|
||||
kcb->kprobe_saved_flags = kcb->kprobe_old_flags
|
||||
= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
|
||||
if (is_IF_modifier(p->ainsn.insn))
|
||||
|
@ -586,7 +586,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
|
|||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
} else if (kprobe_running()) {
|
||||
p = __get_cpu_var(current_kprobe);
|
||||
p = __this_cpu_read(current_kprobe);
|
||||
if (p->break_handler && p->break_handler(p, regs)) {
|
||||
setup_singlestep(p, regs, kcb, 0);
|
||||
return 1;
|
||||
|
@ -759,11 +759,11 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
|
|||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
if (ri->rp && ri->rp->handler) {
|
||||
__get_cpu_var(current_kprobe) = &ri->rp->kp;
|
||||
__this_cpu_write(current_kprobe, &ri->rp->kp);
|
||||
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
ri->ret_addr = correct_ret_addr;
|
||||
ri->rp->handler(ri, regs);
|
||||
__get_cpu_var(current_kprobe) = NULL;
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
|
||||
recycle_rp_inst(ri, &empty_rp);
|
||||
|
@ -1202,10 +1202,10 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
|
|||
regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
|
||||
regs->orig_ax = ~0UL;
|
||||
|
||||
__get_cpu_var(current_kprobe) = &op->kp;
|
||||
__this_cpu_write(current_kprobe, &op->kp);
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
opt_pre_handler(&op->kp, regs);
|
||||
__get_cpu_var(current_kprobe) = NULL;
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
|
|
|
@ -446,7 +446,7 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
|
|||
trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id());
|
||||
trace_cpu_idle((ax>>4)+1, smp_processor_id());
|
||||
if (!need_resched()) {
|
||||
if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
|
||||
if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
|
||||
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
||||
|
@ -462,7 +462,7 @@ static void mwait_idle(void)
|
|||
if (!need_resched()) {
|
||||
trace_power_start(POWER_CSTATE, 1, smp_processor_id());
|
||||
trace_cpu_idle(1, smp_processor_id());
|
||||
if (cpu_has(¤t_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
|
||||
if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
|
||||
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
||||
|
|
|
@ -427,7 +427,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
|||
|
||||
cpumask_set_cpu(cpu, c->llc_shared_map);
|
||||
|
||||
if (current_cpu_data.x86_max_cores == 1) {
|
||||
if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
|
||||
cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
|
||||
c->booted_cores = 1;
|
||||
return;
|
||||
|
@ -1089,7 +1089,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
|
|||
|
||||
preempt_disable();
|
||||
smp_cpu_index_default();
|
||||
current_cpu_data = boot_cpu_data;
|
||||
memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info));
|
||||
cpumask_copy(cpu_callin_mask, cpumask_of(0));
|
||||
mb();
|
||||
/*
|
||||
|
@ -1383,7 +1383,7 @@ void play_dead_common(void)
|
|||
|
||||
mb();
|
||||
/* Ack it */
|
||||
__get_cpu_var(cpu_state) = CPU_DEAD;
|
||||
__this_cpu_write(cpu_state, CPU_DEAD);
|
||||
|
||||
/*
|
||||
* With physical CPU hotplug, we should halt the cpu
|
||||
|
@ -1403,11 +1403,11 @@ static inline void mwait_play_dead(void)
|
|||
int i;
|
||||
void *mwait_ptr;
|
||||
|
||||
if (!cpu_has(¤t_cpu_data, X86_FEATURE_MWAIT))
|
||||
if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT))
|
||||
return;
|
||||
if (!cpu_has(¤t_cpu_data, X86_FEATURE_CLFLSH))
|
||||
if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
|
||||
return;
|
||||
if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
|
||||
if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
|
||||
return;
|
||||
|
||||
eax = CPUID_MWAIT_LEAF;
|
||||
|
@ -1458,7 +1458,7 @@ static inline void mwait_play_dead(void)
|
|||
|
||||
static inline void hlt_play_dead(void)
|
||||
{
|
||||
if (current_cpu_data.x86 >= 4)
|
||||
if (__this_cpu_read(cpu_info.x86) >= 4)
|
||||
wbinvd();
|
||||
|
||||
while (1) {
|
||||
|
|
|
@ -659,7 +659,7 @@ void restore_sched_clock_state(void)
|
|||
|
||||
local_irq_save(flags);
|
||||
|
||||
__get_cpu_var(cyc2ns_offset) = 0;
|
||||
__this_cpu_write(cyc2ns_offset, 0);
|
||||
offset = cyc2ns_suspend - sched_clock();
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
|
|
|
@ -976,7 +976,7 @@ static inline u64 nsec_to_cycles(u64 nsec)
|
|||
if (kvm_tsc_changes_freq())
|
||||
printk_once(KERN_WARNING
|
||||
"kvm: unreliable cycle conversion on adjustable rate TSC\n");
|
||||
ret = nsec * __get_cpu_var(cpu_tsc_khz);
|
||||
ret = nsec * __this_cpu_read(cpu_tsc_khz);
|
||||
do_div(ret, USEC_PER_SEC);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1061,7 +1061,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
|||
local_irq_save(flags);
|
||||
kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
|
||||
kernel_ns = get_kernel_ns();
|
||||
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
|
||||
this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
|
||||
|
||||
if (unlikely(this_tsc_khz == 0)) {
|
||||
local_irq_restore(flags);
|
||||
|
@ -4427,7 +4427,7 @@ EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
|
|||
|
||||
static void tsc_bad(void *info)
|
||||
{
|
||||
__get_cpu_var(cpu_tsc_khz) = 0;
|
||||
__this_cpu_write(cpu_tsc_khz, 0);
|
||||
}
|
||||
|
||||
static void tsc_khz_changed(void *data)
|
||||
|
@ -4441,7 +4441,7 @@ static void tsc_khz_changed(void *data)
|
|||
khz = cpufreq_quick_get(raw_smp_processor_id());
|
||||
if (!khz)
|
||||
khz = tsc_khz;
|
||||
__get_cpu_var(cpu_tsc_khz) = khz;
|
||||
__this_cpu_write(cpu_tsc_khz, khz);
|
||||
}
|
||||
|
||||
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
||||
|
|
|
@ -121,7 +121,7 @@ inline void __const_udelay(unsigned long xloops)
|
|||
asm("mull %%edx"
|
||||
:"=d" (xloops), "=&a" (d0)
|
||||
:"1" (xloops), "0"
|
||||
(cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
|
||||
(this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4)));
|
||||
|
||||
__delay(++xloops);
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ static inline int has_mux(void)
|
|||
|
||||
inline int op_x86_phys_to_virt(int phys)
|
||||
{
|
||||
return __get_cpu_var(switch_index) + phys;
|
||||
return __this_cpu_read(switch_index) + phys;
|
||||
}
|
||||
|
||||
inline int op_x86_virt_to_phys(int virt)
|
||||
|
|
|
@ -95,8 +95,8 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
|
|||
* counter width:
|
||||
*/
|
||||
if (!(eax.split.version_id == 0 &&
|
||||
current_cpu_data.x86 == 6 &&
|
||||
current_cpu_data.x86_model == 15)) {
|
||||
__this_cpu_read(cpu_info.x86) == 6 &&
|
||||
__this_cpu_read(cpu_info.x86_model) == 15)) {
|
||||
|
||||
if (counter_width < eax.split.bit_width)
|
||||
counter_width = eax.split.bit_width;
|
||||
|
@ -235,8 +235,8 @@ static void arch_perfmon_setup_counters(void)
|
|||
eax.full = cpuid_eax(0xa);
|
||||
|
||||
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
|
||||
if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
|
||||
current_cpu_data.x86_model == 15) {
|
||||
if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
|
||||
__this_cpu_read(cpu_info.x86_model) == 15) {
|
||||
eax.split.version_id = 2;
|
||||
eax.split.num_counters = 2;
|
||||
eax.split.bit_width = 40;
|
||||
|
|
|
@ -574,8 +574,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
|
|||
|
||||
preempt_disable();
|
||||
|
||||
start = __get_cpu_var(idt_desc).address;
|
||||
end = start + __get_cpu_var(idt_desc).size + 1;
|
||||
start = __this_cpu_read(idt_desc.address);
|
||||
end = start + __this_cpu_read(idt_desc.size) + 1;
|
||||
|
||||
xen_mc_flush();
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ static inline void xen_mc_batch(void)
|
|||
unsigned long flags;
|
||||
/* need to disable interrupts until this entry is complete */
|
||||
local_irq_save(flags);
|
||||
__get_cpu_var(xen_mc_irq_flags) = flags;
|
||||
__this_cpu_write(xen_mc_irq_flags, flags);
|
||||
}
|
||||
|
||||
static inline struct multicall_space xen_mc_entry(size_t args)
|
||||
|
|
|
@ -159,8 +159,8 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
|
|||
{
|
||||
struct xen_spinlock *prev;
|
||||
|
||||
prev = __get_cpu_var(lock_spinners);
|
||||
__get_cpu_var(lock_spinners) = xl;
|
||||
prev = __this_cpu_read(lock_spinners);
|
||||
__this_cpu_write(lock_spinners, xl);
|
||||
|
||||
wmb(); /* set lock of interest before count */
|
||||
|
||||
|
@ -179,14 +179,14 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
|
|||
asm(LOCK_PREFIX " decw %0"
|
||||
: "+m" (xl->spinners) : : "memory");
|
||||
wmb(); /* decrement count before restoring lock */
|
||||
__get_cpu_var(lock_spinners) = prev;
|
||||
__this_cpu_write(lock_spinners, prev);
|
||||
}
|
||||
|
||||
static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
|
||||
{
|
||||
struct xen_spinlock *xl = (struct xen_spinlock *)lock;
|
||||
struct xen_spinlock *prev;
|
||||
int irq = __get_cpu_var(lock_kicker_irq);
|
||||
int irq = __this_cpu_read(lock_kicker_irq);
|
||||
int ret;
|
||||
u64 start;
|
||||
|
||||
|
|
|
@ -135,24 +135,24 @@ static void do_stolen_accounting(void)
|
|||
|
||||
/* Add the appropriate number of ticks of stolen time,
|
||||
including any left-overs from last time. */
|
||||
stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
|
||||
stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
|
||||
|
||||
if (stolen < 0)
|
||||
stolen = 0;
|
||||
|
||||
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
|
||||
__get_cpu_var(xen_residual_stolen) = stolen;
|
||||
__this_cpu_write(xen_residual_stolen, stolen);
|
||||
account_steal_ticks(ticks);
|
||||
|
||||
/* Add the appropriate number of ticks of blocked time,
|
||||
including any left-overs from last time. */
|
||||
blocked += __get_cpu_var(xen_residual_blocked);
|
||||
blocked += __this_cpu_read(xen_residual_blocked);
|
||||
|
||||
if (blocked < 0)
|
||||
blocked = 0;
|
||||
|
||||
ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
|
||||
__get_cpu_var(xen_residual_blocked) = blocked;
|
||||
__this_cpu_write(xen_residual_blocked, blocked);
|
||||
account_idle_ticks(ticks);
|
||||
}
|
||||
|
||||
|
|
|
@ -746,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
|||
struct acpi_processor *pr;
|
||||
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
|
||||
|
||||
pr = __get_cpu_var(processors);
|
||||
pr = __this_cpu_read(processors);
|
||||
|
||||
if (unlikely(!pr))
|
||||
return 0;
|
||||
|
@ -787,7 +787,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
|
|||
s64 idle_time_ns;
|
||||
s64 idle_time;
|
||||
|
||||
pr = __get_cpu_var(processors);
|
||||
pr = __this_cpu_read(processors);
|
||||
|
||||
if (unlikely(!pr))
|
||||
return 0;
|
||||
|
@ -864,7 +864,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
|||
s64 idle_time;
|
||||
|
||||
|
||||
pr = __get_cpu_var(processors);
|
||||
pr = __this_cpu_read(processors);
|
||||
|
||||
if (unlikely(!pr))
|
||||
return 0;
|
||||
|
|
|
@ -626,7 +626,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
|
|||
preempt_disable();
|
||||
/* if over the trickle threshold, use only 1 in 4096 samples */
|
||||
if (input_pool.entropy_count > trickle_thresh &&
|
||||
(__get_cpu_var(trickle_count)++ & 0xfff))
|
||||
((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
|
||||
goto out;
|
||||
|
||||
sample.jiffies = jiffies;
|
||||
|
|
|
@ -43,9 +43,10 @@ static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
|
|||
|
||||
static inline void get_seq(__u32 *ts, int *cpu)
|
||||
{
|
||||
*ts = get_cpu_var(proc_event_counts)++;
|
||||
preempt_disable();
|
||||
*ts = __this_cpu_inc_return(proc_event_counts) -1;
|
||||
*cpu = smp_processor_id();
|
||||
put_cpu_var(proc_event_counts);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void proc_fork_connector(struct task_struct *task)
|
||||
|
|
|
@ -49,7 +49,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
|
|||
*/
|
||||
static void cpuidle_idle_call(void)
|
||||
{
|
||||
struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices);
|
||||
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
||||
struct cpuidle_state *target_state;
|
||||
int next_state;
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ static int gameport_measure_speed(struct gameport *gameport)
|
|||
}
|
||||
|
||||
gameport_close(gameport);
|
||||
return (cpu_data(raw_smp_processor_id()).loops_per_jiffy *
|
||||
return (this_cpu_read(cpu_info.loops_per_jiffy) *
|
||||
(unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx);
|
||||
|
||||
#else
|
||||
|
|
|
@ -619,7 +619,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
|
|||
s390_idle_check(regs, S390_lowcore.int_clock,
|
||||
S390_lowcore.async_enter_timer);
|
||||
irq_enter();
|
||||
__get_cpu_var(s390_idle).nohz_delay = 1;
|
||||
__this_cpu_write(s390_idle.nohz_delay, 1);
|
||||
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
|
||||
/* Serve timer interrupts first. */
|
||||
clock_comparator_work();
|
||||
|
|
|
@ -377,7 +377,7 @@ static int init_timing_params(unsigned int new_duty_cycle,
|
|||
duty_cycle = new_duty_cycle;
|
||||
freq = new_freq;
|
||||
|
||||
loops_per_sec = current_cpu_data.loops_per_jiffy;
|
||||
loops_per_sec = __this_cpu_read(cpu.info.loops_per_jiffy);
|
||||
loops_per_sec *= HZ;
|
||||
|
||||
/* How many clocks in a microsecond?, avoiding long long divide */
|
||||
|
@ -398,7 +398,7 @@ static int init_timing_params(unsigned int new_duty_cycle,
|
|||
dprintk("in init_timing_params, freq=%d, duty_cycle=%d, "
|
||||
"clk/jiffy=%ld, pulse=%ld, space=%ld, "
|
||||
"conv_us_to_clocks=%ld\n",
|
||||
freq, duty_cycle, current_cpu_data.loops_per_jiffy,
|
||||
freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy),
|
||||
pulse_width, space_width, conv_us_to_clocks);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -78,10 +78,10 @@ void speakup_fake_down_arrow(void)
|
|||
/* don't change CPU */
|
||||
preempt_disable();
|
||||
|
||||
__get_cpu_var(reporting_keystroke) = true;
|
||||
__this_cpu_write(reporting_keystroke, true);
|
||||
input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
|
||||
input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
|
||||
__get_cpu_var(reporting_keystroke) = false;
|
||||
__this_cpu_write(reporting_keystroke, false);
|
||||
|
||||
/* reenable preemption */
|
||||
preempt_enable();
|
||||
|
@ -95,10 +95,5 @@ void speakup_fake_down_arrow(void)
|
|||
*/
|
||||
bool speakup_fake_key_pressed(void)
|
||||
{
|
||||
bool is_pressed;
|
||||
|
||||
is_pressed = get_cpu_var(reporting_keystroke);
|
||||
put_cpu_var(reporting_keystroke);
|
||||
|
||||
return is_pressed;
|
||||
return this_cpu_read(reporting_keystroke);
|
||||
}
|
||||
|
|
|
@ -355,7 +355,7 @@ static void unmask_evtchn(int port)
|
|||
struct evtchn_unmask unmask = { .port = port };
|
||||
(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
|
||||
} else {
|
||||
struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
|
||||
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
|
||||
|
||||
sync_clear_bit(port, &s->evtchn_mask[0]);
|
||||
|
||||
|
@ -1101,7 +1101,7 @@ static void __xen_evtchn_do_upcall(void)
|
|||
{
|
||||
int cpu = get_cpu();
|
||||
struct shared_info *s = HYPERVISOR_shared_info;
|
||||
struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
|
||||
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
|
||||
unsigned count;
|
||||
|
||||
do {
|
||||
|
@ -1109,7 +1109,7 @@ static void __xen_evtchn_do_upcall(void)
|
|||
|
||||
vcpu_info->evtchn_upcall_pending = 0;
|
||||
|
||||
if (__get_cpu_var(xed_nesting_count)++)
|
||||
if (__this_cpu_inc_return(xed_nesting_count) - 1)
|
||||
goto out;
|
||||
|
||||
#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
|
||||
|
@ -1141,8 +1141,8 @@ static void __xen_evtchn_do_upcall(void)
|
|||
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
count = __get_cpu_var(xed_nesting_count);
|
||||
__get_cpu_var(xed_nesting_count) = 0;
|
||||
count = __this_cpu_read(xed_nesting_count);
|
||||
__this_cpu_write(xed_nesting_count, 0);
|
||||
} while (count != 1 || vcpu_info->evtchn_upcall_pending);
|
||||
|
||||
out:
|
||||
|
|
37
fs/buffer.c
37
fs/buffer.c
|
@ -1270,12 +1270,10 @@ static inline void check_irqs_on(void)
|
|||
static void bh_lru_install(struct buffer_head *bh)
|
||||
{
|
||||
struct buffer_head *evictee = NULL;
|
||||
struct bh_lru *lru;
|
||||
|
||||
check_irqs_on();
|
||||
bh_lru_lock();
|
||||
lru = &__get_cpu_var(bh_lrus);
|
||||
if (lru->bhs[0] != bh) {
|
||||
if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
|
||||
struct buffer_head *bhs[BH_LRU_SIZE];
|
||||
int in;
|
||||
int out = 0;
|
||||
|
@ -1283,7 +1281,8 @@ static void bh_lru_install(struct buffer_head *bh)
|
|||
get_bh(bh);
|
||||
bhs[out++] = bh;
|
||||
for (in = 0; in < BH_LRU_SIZE; in++) {
|
||||
struct buffer_head *bh2 = lru->bhs[in];
|
||||
struct buffer_head *bh2 =
|
||||
__this_cpu_read(bh_lrus.bhs[in]);
|
||||
|
||||
if (bh2 == bh) {
|
||||
__brelse(bh2);
|
||||
|
@ -1298,7 +1297,7 @@ static void bh_lru_install(struct buffer_head *bh)
|
|||
}
|
||||
while (out < BH_LRU_SIZE)
|
||||
bhs[out++] = NULL;
|
||||
memcpy(lru->bhs, bhs, sizeof(bhs));
|
||||
memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
|
||||
}
|
||||
bh_lru_unlock();
|
||||
|
||||
|
@ -1313,23 +1312,22 @@ static struct buffer_head *
|
|||
lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
|
||||
{
|
||||
struct buffer_head *ret = NULL;
|
||||
struct bh_lru *lru;
|
||||
unsigned int i;
|
||||
|
||||
check_irqs_on();
|
||||
bh_lru_lock();
|
||||
lru = &__get_cpu_var(bh_lrus);
|
||||
for (i = 0; i < BH_LRU_SIZE; i++) {
|
||||
struct buffer_head *bh = lru->bhs[i];
|
||||
struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
|
||||
|
||||
if (bh && bh->b_bdev == bdev &&
|
||||
bh->b_blocknr == block && bh->b_size == size) {
|
||||
if (i) {
|
||||
while (i) {
|
||||
lru->bhs[i] = lru->bhs[i - 1];
|
||||
__this_cpu_write(bh_lrus.bhs[i],
|
||||
__this_cpu_read(bh_lrus.bhs[i - 1]));
|
||||
i--;
|
||||
}
|
||||
lru->bhs[0] = bh;
|
||||
__this_cpu_write(bh_lrus.bhs[0], bh);
|
||||
}
|
||||
get_bh(bh);
|
||||
ret = bh;
|
||||
|
@ -3203,22 +3201,23 @@ static void recalc_bh_state(void)
|
|||
int i;
|
||||
int tot = 0;
|
||||
|
||||
if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
|
||||
if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
|
||||
return;
|
||||
__get_cpu_var(bh_accounting).ratelimit = 0;
|
||||
__this_cpu_write(bh_accounting.ratelimit, 0);
|
||||
for_each_online_cpu(i)
|
||||
tot += per_cpu(bh_accounting, i).nr;
|
||||
buffer_heads_over_limit = (tot > max_buffer_heads);
|
||||
}
|
||||
|
||||
|
||||
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
|
||||
{
|
||||
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
|
||||
if (ret) {
|
||||
INIT_LIST_HEAD(&ret->b_assoc_buffers);
|
||||
get_cpu_var(bh_accounting).nr++;
|
||||
preempt_disable();
|
||||
__this_cpu_inc(bh_accounting.nr);
|
||||
recalc_bh_state();
|
||||
put_cpu_var(bh_accounting);
|
||||
preempt_enable();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -3228,9 +3227,10 @@ void free_buffer_head(struct buffer_head *bh)
|
|||
{
|
||||
BUG_ON(!list_empty(&bh->b_assoc_buffers));
|
||||
kmem_cache_free(bh_cachep, bh);
|
||||
get_cpu_var(bh_accounting).nr--;
|
||||
preempt_disable();
|
||||
__this_cpu_dec(bh_accounting.nr);
|
||||
recalc_bh_state();
|
||||
put_cpu_var(bh_accounting);
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(free_buffer_head);
|
||||
|
||||
|
@ -3243,9 +3243,8 @@ static void buffer_exit_cpu(int cpu)
|
|||
brelse(b->bhs[i]);
|
||||
b->bhs[i] = NULL;
|
||||
}
|
||||
get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
|
||||
this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
|
||||
per_cpu(bh_accounting, cpu).nr = 0;
|
||||
put_cpu_var(bh_accounting);
|
||||
}
|
||||
|
||||
static int buffer_cpu_notify(struct notifier_block *self,
|
||||
|
|
|
@ -22,15 +22,15 @@ DECLARE_PER_CPU(struct pt_regs *, __irq_regs);
|
|||
|
||||
static inline struct pt_regs *get_irq_regs(void)
|
||||
{
|
||||
return __get_cpu_var(__irq_regs);
|
||||
return __this_cpu_read(__irq_regs);
|
||||
}
|
||||
|
||||
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
|
||||
{
|
||||
struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs);
|
||||
struct pt_regs *old_regs;
|
||||
|
||||
old_regs = *pp_regs;
|
||||
*pp_regs = new_regs;
|
||||
old_regs = __this_cpu_read(__irq_regs);
|
||||
__this_cpu_write(__irq_regs, new_regs);
|
||||
return old_regs;
|
||||
}
|
||||
|
||||
|
|
|
@ -195,15 +195,9 @@ enum {
|
|||
/*
|
||||
* io context count accounting
|
||||
*/
|
||||
#define elv_ioc_count_mod(name, __val) \
|
||||
do { \
|
||||
preempt_disable(); \
|
||||
__get_cpu_var(name) += (__val); \
|
||||
preempt_enable(); \
|
||||
} while (0)
|
||||
|
||||
#define elv_ioc_count_inc(name) elv_ioc_count_mod(name, 1)
|
||||
#define elv_ioc_count_dec(name) elv_ioc_count_mod(name, -1)
|
||||
#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val)
|
||||
#define elv_ioc_count_inc(name) this_cpu_inc(name)
|
||||
#define elv_ioc_count_dec(name) this_cpu_dec(name)
|
||||
|
||||
#define elv_ioc_count_read(name) \
|
||||
({ \
|
||||
|
|
|
@ -81,7 +81,8 @@ DECLARE_PER_CPU(int, __kmap_atomic_idx);
|
|||
|
||||
static inline int kmap_atomic_idx_push(void)
|
||||
{
|
||||
int idx = __get_cpu_var(__kmap_atomic_idx)++;
|
||||
int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
|
||||
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
WARN_ON_ONCE(in_irq() && !irqs_disabled());
|
||||
BUG_ON(idx > KM_TYPE_NR);
|
||||
|
@ -91,16 +92,18 @@ static inline int kmap_atomic_idx_push(void)
|
|||
|
||||
static inline int kmap_atomic_idx(void)
|
||||
{
|
||||
return __get_cpu_var(__kmap_atomic_idx) - 1;
|
||||
return __this_cpu_read(__kmap_atomic_idx) - 1;
|
||||
}
|
||||
|
||||
static inline int kmap_atomic_idx_pop(void)
|
||||
static inline void kmap_atomic_idx_pop(void)
|
||||
{
|
||||
int idx = --__get_cpu_var(__kmap_atomic_idx);
|
||||
#ifdef CONFIG_DEBUG_HIGHMEM
|
||||
int idx = __this_cpu_dec_return(__kmap_atomic_idx);
|
||||
|
||||
BUG_ON(idx < 0);
|
||||
#else
|
||||
__this_cpu_dec(__kmap_atomic_idx);
|
||||
#endif
|
||||
return idx;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -47,7 +47,7 @@ extern unsigned long long nr_context_switches(void);
|
|||
|
||||
#ifndef CONFIG_GENERIC_HARDIRQS
|
||||
#define kstat_irqs_this_cpu(irq) \
|
||||
(kstat_this_cpu.irqs[irq])
|
||||
(this_cpu_read(kstat.irqs[irq])
|
||||
|
||||
struct irq_desc;
|
||||
|
||||
|
|
|
@ -305,12 +305,12 @@ struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
|
|||
/* kprobe_running() will just return the current_kprobe on this CPU */
|
||||
static inline struct kprobe *kprobe_running(void)
|
||||
{
|
||||
return (__get_cpu_var(current_kprobe));
|
||||
return (__this_cpu_read(current_kprobe));
|
||||
}
|
||||
|
||||
static inline void reset_current_kprobe(void)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = NULL;
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
|
||||
static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
|
||||
|
|
|
@ -240,6 +240,21 @@ extern void __bad_size_call_parameter(void);
|
|||
pscr_ret__; \
|
||||
})
|
||||
|
||||
#define __pcpu_size_call_return2(stem, variable, ...) \
|
||||
({ \
|
||||
typeof(variable) pscr2_ret__; \
|
||||
__verify_pcpu_ptr(&(variable)); \
|
||||
switch(sizeof(variable)) { \
|
||||
case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
|
||||
case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
|
||||
case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
|
||||
case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
|
||||
default: \
|
||||
__bad_size_call_parameter(); break; \
|
||||
} \
|
||||
pscr2_ret__; \
|
||||
})
|
||||
|
||||
#define __pcpu_size_call(stem, variable, ...) \
|
||||
do { \
|
||||
__verify_pcpu_ptr(&(variable)); \
|
||||
|
@ -402,6 +417,89 @@ do { \
|
|||
# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
|
||||
#endif
|
||||
|
||||
#define _this_cpu_generic_add_return(pcp, val) \
|
||||
({ \
|
||||
typeof(pcp) ret__; \
|
||||
preempt_disable(); \
|
||||
__this_cpu_add(pcp, val); \
|
||||
ret__ = __this_cpu_read(pcp); \
|
||||
preempt_enable(); \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#ifndef this_cpu_add_return
|
||||
# ifndef this_cpu_add_return_1
|
||||
# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# ifndef this_cpu_add_return_2
|
||||
# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# ifndef this_cpu_add_return_4
|
||||
# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# ifndef this_cpu_add_return_8
|
||||
# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
|
||||
#endif
|
||||
|
||||
#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
|
||||
#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
|
||||
#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
|
||||
|
||||
#define _this_cpu_generic_xchg(pcp, nval) \
|
||||
({ typeof(pcp) ret__; \
|
||||
preempt_disable(); \
|
||||
ret__ = __this_cpu_read(pcp); \
|
||||
__this_cpu_write(pcp, nval); \
|
||||
preempt_enable(); \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#ifndef this_cpu_xchg
|
||||
# ifndef this_cpu_xchg_1
|
||||
# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
|
||||
# endif
|
||||
# ifndef this_cpu_xchg_2
|
||||
# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
|
||||
# endif
|
||||
# ifndef this_cpu_xchg_4
|
||||
# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
|
||||
# endif
|
||||
# ifndef this_cpu_xchg_8
|
||||
# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval)
|
||||
# endif
|
||||
# define this_cpu_xchg(pcp, nval) \
|
||||
__pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
|
||||
#endif
|
||||
|
||||
#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \
|
||||
({ typeof(pcp) ret__; \
|
||||
preempt_disable(); \
|
||||
ret__ = __this_cpu_read(pcp); \
|
||||
if (ret__ == (oval)) \
|
||||
__this_cpu_write(pcp, nval); \
|
||||
preempt_enable(); \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#ifndef this_cpu_cmpxchg
|
||||
# ifndef this_cpu_cmpxchg_1
|
||||
# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
# endif
|
||||
# ifndef this_cpu_cmpxchg_2
|
||||
# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
# endif
|
||||
# ifndef this_cpu_cmpxchg_4
|
||||
# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
# endif
|
||||
# ifndef this_cpu_cmpxchg_8
|
||||
# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
# endif
|
||||
# define this_cpu_cmpxchg(pcp, oval, nval) \
|
||||
__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generic percpu operations that do not require preemption handling.
|
||||
* Either we do not care about races or the caller has the
|
||||
|
@ -529,11 +627,87 @@ do { \
|
|||
# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
|
||||
#endif
|
||||
|
||||
#define __this_cpu_generic_add_return(pcp, val) \
|
||||
({ \
|
||||
__this_cpu_add(pcp, val); \
|
||||
__this_cpu_read(pcp); \
|
||||
})
|
||||
|
||||
#ifndef __this_cpu_add_return
|
||||
# ifndef __this_cpu_add_return_1
|
||||
# define __this_cpu_add_return_1(pcp, val) __this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# ifndef __this_cpu_add_return_2
|
||||
# define __this_cpu_add_return_2(pcp, val) __this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# ifndef __this_cpu_add_return_4
|
||||
# define __this_cpu_add_return_4(pcp, val) __this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# ifndef __this_cpu_add_return_8
|
||||
# define __this_cpu_add_return_8(pcp, val) __this_cpu_generic_add_return(pcp, val)
|
||||
# endif
|
||||
# define __this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
|
||||
#endif
|
||||
|
||||
#define __this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(val))
|
||||
#define __this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1)
|
||||
#define __this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1)
|
||||
|
||||
#define __this_cpu_generic_xchg(pcp, nval) \
|
||||
({ typeof(pcp) ret__; \
|
||||
ret__ = __this_cpu_read(pcp); \
|
||||
__this_cpu_write(pcp, nval); \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#ifndef __this_cpu_xchg
|
||||
# ifndef __this_cpu_xchg_1
|
||||
# define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
|
||||
# endif
|
||||
# ifndef __this_cpu_xchg_2
|
||||
# define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
|
||||
# endif
|
||||
# ifndef __this_cpu_xchg_4
|
||||
# define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
|
||||
# endif
|
||||
# ifndef __this_cpu_xchg_8
|
||||
# define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
|
||||
# endif
|
||||
# define __this_cpu_xchg(pcp, nval) \
|
||||
__pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
|
||||
#endif
|
||||
|
||||
#define __this_cpu_generic_cmpxchg(pcp, oval, nval) \
|
||||
({ \
|
||||
typeof(pcp) ret__; \
|
||||
ret__ = __this_cpu_read(pcp); \
|
||||
if (ret__ == (oval)) \
|
||||
__this_cpu_write(pcp, nval); \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#ifndef __this_cpu_cmpxchg
|
||||
# ifndef __this_cpu_cmpxchg_1
|
||||
# define __this_cpu_cmpxchg_1(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
# endif
|
||||
# ifndef __this_cpu_cmpxchg_2
|
||||
# define __this_cpu_cmpxchg_2(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
# endif
|
||||
# ifndef __this_cpu_cmpxchg_4
|
||||
# define __this_cpu_cmpxchg_4(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
# endif
|
||||
# ifndef __this_cpu_cmpxchg_8
|
||||
# define __this_cpu_cmpxchg_8(pcp, oval, nval) __this_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
# endif
|
||||
# define __this_cpu_cmpxchg(pcp, oval, nval) \
|
||||
__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* IRQ safe versions of the per cpu RMW operations. Note that these operations
|
||||
* are *not* safe against modification of the same variable from another
|
||||
* processors (which one gets when using regular atomic operations)
|
||||
. They are guaranteed to be atomic vs. local interrupts and
|
||||
* They are guaranteed to be atomic vs. local interrupts and
|
||||
* preemption only.
|
||||
*/
|
||||
#define irqsafe_cpu_generic_to_op(pcp, val, op) \
|
||||
|
@ -620,4 +794,33 @@ do { \
|
|||
# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
|
||||
#endif
|
||||
|
||||
#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval) \
|
||||
({ \
|
||||
typeof(pcp) ret__; \
|
||||
unsigned long flags; \
|
||||
local_irq_save(flags); \
|
||||
ret__ = __this_cpu_read(pcp); \
|
||||
if (ret__ == (oval)) \
|
||||
__this_cpu_write(pcp, nval); \
|
||||
local_irq_restore(flags); \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#ifndef irqsafe_cpu_cmpxchg
|
||||
# ifndef irqsafe_cpu_cmpxchg_1
|
||||
# define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
# endif
|
||||
# ifndef irqsafe_cpu_cmpxchg_2
|
||||
# define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
# endif
|
||||
# ifndef irqsafe_cpu_cmpxchg_4
|
||||
# define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
# endif
|
||||
# ifndef irqsafe_cpu_cmpxchg_8
|
||||
# define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
|
||||
# endif
|
||||
# define irqsafe_cpu_cmpxchg(pcp, oval, nval) \
|
||||
__pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_PERCPU_H */
|
||||
|
|
|
@ -69,7 +69,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
|
|||
|
||||
list_del_rcu(&p->tasks);
|
||||
list_del_init(&p->sibling);
|
||||
__get_cpu_var(process_counts)--;
|
||||
__this_cpu_dec(process_counts);
|
||||
}
|
||||
list_del_rcu(&p->thread_group);
|
||||
}
|
||||
|
|
|
@ -1285,7 +1285,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
|||
attach_pid(p, PIDTYPE_SID, task_session(current));
|
||||
list_add_tail(&p->sibling, &p->real_parent->children);
|
||||
list_add_tail_rcu(&p->tasks, &init_task.tasks);
|
||||
__get_cpu_var(process_counts)++;
|
||||
__this_cpu_inc(process_counts);
|
||||
}
|
||||
attach_pid(p, PIDTYPE_PID, pid);
|
||||
nr_threads++;
|
||||
|
|
|
@ -497,7 +497,7 @@ static inline int hrtimer_is_hres_enabled(void)
|
|||
*/
|
||||
static inline int hrtimer_hres_active(void)
|
||||
{
|
||||
return __get_cpu_var(hrtimer_bases).hres_active;
|
||||
return __this_cpu_read(hrtimer_bases.hres_active);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -77,21 +77,21 @@ void __weak arch_irq_work_raise(void)
|
|||
*/
|
||||
static void __irq_work_queue(struct irq_work *entry)
|
||||
{
|
||||
struct irq_work **head, *next;
|
||||
struct irq_work *next;
|
||||
|
||||
head = &get_cpu_var(irq_work_list);
|
||||
preempt_disable();
|
||||
|
||||
do {
|
||||
next = *head;
|
||||
next = __this_cpu_read(irq_work_list);
|
||||
/* Can assign non-atomic because we keep the flags set. */
|
||||
entry->next = next_flags(next, IRQ_WORK_FLAGS);
|
||||
} while (cmpxchg(head, next, entry) != next);
|
||||
} while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
|
||||
|
||||
/* The list was empty, raise self-interrupt to start processing. */
|
||||
if (!irq_work_next(entry))
|
||||
arch_irq_work_raise();
|
||||
|
||||
put_cpu_var(irq_work_list);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -120,16 +120,16 @@ EXPORT_SYMBOL_GPL(irq_work_queue);
|
|||
*/
|
||||
void irq_work_run(void)
|
||||
{
|
||||
struct irq_work *list, **head;
|
||||
struct irq_work *list;
|
||||
|
||||
head = &__get_cpu_var(irq_work_list);
|
||||
if (*head == NULL)
|
||||
if (this_cpu_read(irq_work_list) == NULL)
|
||||
return;
|
||||
|
||||
BUG_ON(!in_irq());
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
list = xchg(head, NULL);
|
||||
list = this_cpu_xchg(irq_work_list, NULL);
|
||||
|
||||
while (list != NULL) {
|
||||
struct irq_work *entry = list;
|
||||
|
||||
|
|
|
@ -317,12 +317,12 @@ void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
|
|||
/* We have preemption disabled.. so it is safe to use __ versions */
|
||||
static inline void set_kprobe_instance(struct kprobe *kp)
|
||||
{
|
||||
__get_cpu_var(kprobe_instance) = kp;
|
||||
__this_cpu_write(kprobe_instance, kp);
|
||||
}
|
||||
|
||||
static inline void reset_kprobe_instance(void)
|
||||
{
|
||||
__get_cpu_var(kprobe_instance) = NULL;
|
||||
__this_cpu_write(kprobe_instance, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -965,7 +965,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
|
|||
static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
|
||||
int trapnr)
|
||||
{
|
||||
struct kprobe *cur = __get_cpu_var(kprobe_instance);
|
||||
struct kprobe *cur = __this_cpu_read(kprobe_instance);
|
||||
|
||||
/*
|
||||
* if we faulted "during" the execution of a user specified
|
||||
|
@ -980,7 +980,7 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
|
|||
|
||||
static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *cur = __get_cpu_var(kprobe_instance);
|
||||
struct kprobe *cur = __this_cpu_read(kprobe_instance);
|
||||
int ret = 0;
|
||||
|
||||
if (cur && cur->break_handler) {
|
||||
|
|
|
@ -364,8 +364,8 @@ void rcu_irq_exit(void)
|
|||
WARN_ON_ONCE(rdtp->dynticks & 0x1);
|
||||
|
||||
/* If the interrupt queued a callback, get out of dyntick mode. */
|
||||
if (__get_cpu_var(rcu_sched_data).nxtlist ||
|
||||
__get_cpu_var(rcu_bh_data).nxtlist)
|
||||
if (__this_cpu_read(rcu_sched_data.nxtlist) ||
|
||||
__this_cpu_read(rcu_bh_data.nxtlist))
|
||||
set_need_resched();
|
||||
}
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ char *softirq_to_name[NR_SOFTIRQS] = {
|
|||
static void wakeup_softirqd(void)
|
||||
{
|
||||
/* Interrupts are disabled: no need to stop preemption */
|
||||
struct task_struct *tsk = __get_cpu_var(ksoftirqd);
|
||||
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
|
||||
|
||||
if (tsk && tsk->state != TASK_RUNNING)
|
||||
wake_up_process(tsk);
|
||||
|
@ -388,8 +388,8 @@ void __tasklet_schedule(struct tasklet_struct *t)
|
|||
|
||||
local_irq_save(flags);
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_vec).tail = t;
|
||||
__get_cpu_var(tasklet_vec).tail = &(t->next);
|
||||
*__this_cpu_read(tasklet_vec.tail) = t;
|
||||
__this_cpu_write(tasklet_vec.tail, &(t->next));
|
||||
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@ -402,8 +402,8 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
|
|||
|
||||
local_irq_save(flags);
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_hi_vec).tail = t;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
|
||||
*__this_cpu_read(tasklet_hi_vec.tail) = t;
|
||||
__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
|
||||
raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
@ -414,8 +414,8 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
|
|||
{
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
t->next = __get_cpu_var(tasklet_hi_vec).head;
|
||||
__get_cpu_var(tasklet_hi_vec).head = t;
|
||||
t->next = __this_cpu_read(tasklet_hi_vec.head);
|
||||
__this_cpu_write(tasklet_hi_vec.head, t);
|
||||
__raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
}
|
||||
|
||||
|
@ -426,9 +426,9 @@ static void tasklet_action(struct softirq_action *a)
|
|||
struct tasklet_struct *list;
|
||||
|
||||
local_irq_disable();
|
||||
list = __get_cpu_var(tasklet_vec).head;
|
||||
__get_cpu_var(tasklet_vec).head = NULL;
|
||||
__get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
|
||||
list = __this_cpu_read(tasklet_vec.head);
|
||||
__this_cpu_write(tasklet_vec.head, NULL);
|
||||
__this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
|
||||
local_irq_enable();
|
||||
|
||||
while (list) {
|
||||
|
@ -449,8 +449,8 @@ static void tasklet_action(struct softirq_action *a)
|
|||
|
||||
local_irq_disable();
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_vec).tail = t;
|
||||
__get_cpu_var(tasklet_vec).tail = &(t->next);
|
||||
*__this_cpu_read(tasklet_vec.tail) = t;
|
||||
__this_cpu_write(tasklet_vec.tail, &(t->next));
|
||||
__raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
@ -461,9 +461,9 @@ static void tasklet_hi_action(struct softirq_action *a)
|
|||
struct tasklet_struct *list;
|
||||
|
||||
local_irq_disable();
|
||||
list = __get_cpu_var(tasklet_hi_vec).head;
|
||||
__get_cpu_var(tasklet_hi_vec).head = NULL;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
|
||||
list = __this_cpu_read(tasklet_hi_vec.head);
|
||||
__this_cpu_write(tasklet_hi_vec.head, NULL);
|
||||
__this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
|
||||
local_irq_enable();
|
||||
|
||||
while (list) {
|
||||
|
@ -484,8 +484,8 @@ static void tasklet_hi_action(struct softirq_action *a)
|
|||
|
||||
local_irq_disable();
|
||||
t->next = NULL;
|
||||
*__get_cpu_var(tasklet_hi_vec).tail = t;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
|
||||
*__this_cpu_read(tasklet_hi_vec.tail) = t;
|
||||
__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
|
||||
__raise_softirq_irqoff(HI_SOFTIRQ);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
@ -802,16 +802,16 @@ static void takeover_tasklets(unsigned int cpu)
|
|||
|
||||
/* Find end, append list for that CPU. */
|
||||
if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
|
||||
*(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
|
||||
__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
|
||||
*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
|
||||
this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
|
||||
per_cpu(tasklet_vec, cpu).head = NULL;
|
||||
per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
|
||||
}
|
||||
raise_softirq_irqoff(TASKLET_SOFTIRQ);
|
||||
|
||||
if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
|
||||
*__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
|
||||
__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
|
||||
*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
|
||||
__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
|
||||
per_cpu(tasklet_hi_vec, cpu).head = NULL;
|
||||
per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
|
||||
}
|
||||
|
|
|
@ -89,8 +89,7 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
|
|||
return -ENOMEM;
|
||||
|
||||
if (!info) {
|
||||
int seq = get_cpu_var(taskstats_seqnum)++;
|
||||
put_cpu_var(taskstats_seqnum);
|
||||
int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
|
||||
|
||||
reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
|
||||
} else
|
||||
|
@ -612,7 +611,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
|
|||
fill_tgid_exit(tsk);
|
||||
}
|
||||
|
||||
listeners = &__raw_get_cpu_var(listener_array);
|
||||
listeners = __this_cpu_ptr(&listener_array);
|
||||
if (list_empty(&listeners->list))
|
||||
return;
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ struct tick_device *tick_get_device(int cpu)
|
|||
*/
|
||||
int tick_is_oneshot_available(void)
|
||||
{
|
||||
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
|
||||
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
|
||||
|
||||
return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
|
|||
*/
|
||||
int tick_program_event(ktime_t expires, int force)
|
||||
{
|
||||
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
|
||||
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
|
||||
|
||||
return tick_dev_program_event(dev, expires, force);
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ int tick_oneshot_mode_active(void)
|
|||
int ret;
|
||||
|
||||
local_irq_save(flags);
|
||||
ret = __get_cpu_var(tick_cpu_device).mode == TICKDEV_MODE_ONESHOT;
|
||||
ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT;
|
||||
local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -118,12 +118,12 @@ static void __touch_watchdog(void)
|
|||
{
|
||||
int this_cpu = smp_processor_id();
|
||||
|
||||
__get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
|
||||
__this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
|
||||
}
|
||||
|
||||
void touch_softlockup_watchdog(void)
|
||||
{
|
||||
__raw_get_cpu_var(watchdog_touch_ts) = 0;
|
||||
__this_cpu_write(watchdog_touch_ts, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
||||
|
||||
|
@ -167,12 +167,12 @@ void touch_softlockup_watchdog_sync(void)
|
|||
/* watchdog detector functions */
|
||||
static int is_hardlockup(void)
|
||||
{
|
||||
unsigned long hrint = __get_cpu_var(hrtimer_interrupts);
|
||||
unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
|
||||
|
||||
if (__get_cpu_var(hrtimer_interrupts_saved) == hrint)
|
||||
if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
|
||||
return 1;
|
||||
|
||||
__get_cpu_var(hrtimer_interrupts_saved) = hrint;
|
||||
__this_cpu_write(hrtimer_interrupts_saved, hrint);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@ -205,8 +205,8 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
|
|||
/* Ensure the watchdog never gets throttled */
|
||||
event->hw.interrupts = 0;
|
||||
|
||||
if (__get_cpu_var(watchdog_nmi_touch) == true) {
|
||||
__get_cpu_var(watchdog_nmi_touch) = false;
|
||||
if (__this_cpu_read(watchdog_nmi_touch) == true) {
|
||||
__this_cpu_write(watchdog_nmi_touch, false);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -220,7 +220,7 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
|
|||
int this_cpu = smp_processor_id();
|
||||
|
||||
/* only print hardlockups once */
|
||||
if (__get_cpu_var(hard_watchdog_warn) == true)
|
||||
if (__this_cpu_read(hard_watchdog_warn) == true)
|
||||
return;
|
||||
|
||||
if (hardlockup_panic)
|
||||
|
@ -228,16 +228,16 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
|
|||
else
|
||||
WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
|
||||
|
||||
__get_cpu_var(hard_watchdog_warn) = true;
|
||||
__this_cpu_write(hard_watchdog_warn, true);
|
||||
return;
|
||||
}
|
||||
|
||||
__get_cpu_var(hard_watchdog_warn) = false;
|
||||
__this_cpu_write(hard_watchdog_warn, false);
|
||||
return;
|
||||
}
|
||||
static void watchdog_interrupt_count(void)
|
||||
{
|
||||
__get_cpu_var(hrtimer_interrupts)++;
|
||||
__this_cpu_inc(hrtimer_interrupts);
|
||||
}
|
||||
#else
|
||||
static inline void watchdog_interrupt_count(void) { return; }
|
||||
|
@ -246,7 +246,7 @@ static inline void watchdog_interrupt_count(void) { return; }
|
|||
/* watchdog kicker functions */
|
||||
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
||||
{
|
||||
unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
|
||||
unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
int duration;
|
||||
|
||||
|
@ -254,18 +254,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
|||
watchdog_interrupt_count();
|
||||
|
||||
/* kick the softlockup detector */
|
||||
wake_up_process(__get_cpu_var(softlockup_watchdog));
|
||||
wake_up_process(__this_cpu_read(softlockup_watchdog));
|
||||
|
||||
/* .. and repeat */
|
||||
hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
|
||||
|
||||
if (touch_ts == 0) {
|
||||
if (unlikely(__get_cpu_var(softlockup_touch_sync))) {
|
||||
if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
|
||||
/*
|
||||
* If the time stamp was touched atomically
|
||||
* make sure the scheduler tick is up to date.
|
||||
*/
|
||||
__get_cpu_var(softlockup_touch_sync) = false;
|
||||
__this_cpu_write(softlockup_touch_sync, false);
|
||||
sched_clock_tick();
|
||||
}
|
||||
__touch_watchdog();
|
||||
|
@ -281,7 +281,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
|||
duration = is_softlockup(touch_ts);
|
||||
if (unlikely(duration)) {
|
||||
/* only warn once */
|
||||
if (__get_cpu_var(soft_watchdog_warn) == true)
|
||||
if (__this_cpu_read(soft_watchdog_warn) == true)
|
||||
return HRTIMER_RESTART;
|
||||
|
||||
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
|
||||
|
@ -296,9 +296,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
|
|||
|
||||
if (softlockup_panic)
|
||||
panic("softlockup: hung tasks");
|
||||
__get_cpu_var(soft_watchdog_warn) = true;
|
||||
__this_cpu_write(soft_watchdog_warn, true);
|
||||
} else
|
||||
__get_cpu_var(soft_watchdog_warn) = false;
|
||||
__this_cpu_write(soft_watchdog_warn, false);
|
||||
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
|
|
@ -72,18 +72,16 @@ EXPORT_SYMBOL(percpu_counter_set);
|
|||
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
|
||||
{
|
||||
s64 count;
|
||||
s32 *pcount;
|
||||
|
||||
preempt_disable();
|
||||
pcount = this_cpu_ptr(fbc->counters);
|
||||
count = *pcount + amount;
|
||||
count = __this_cpu_read(*fbc->counters) + amount;
|
||||
if (count >= batch || count <= -batch) {
|
||||
spin_lock(&fbc->lock);
|
||||
fbc->count += count;
|
||||
*pcount = 0;
|
||||
__this_cpu_write(*fbc->counters, 0);
|
||||
spin_unlock(&fbc->lock);
|
||||
} else {
|
||||
*pcount = count;
|
||||
__this_cpu_write(*fbc->counters, count);
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
|
|
@ -293,12 +293,8 @@ static void *pcpu_mem_alloc(size_t size)
|
|||
|
||||
if (size <= PAGE_SIZE)
|
||||
return kzalloc(size, GFP_KERNEL);
|
||||
else {
|
||||
void *ptr = vmalloc(size);
|
||||
if (ptr)
|
||||
memset(ptr, 0, size);
|
||||
return ptr;
|
||||
}
|
||||
else
|
||||
return vzalloc(size);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -829,12 +829,12 @@ static void init_reap_node(int cpu)
|
|||
|
||||
static void next_reap_node(void)
|
||||
{
|
||||
int node = __get_cpu_var(slab_reap_node);
|
||||
int node = __this_cpu_read(slab_reap_node);
|
||||
|
||||
node = next_node(node, node_online_map);
|
||||
if (unlikely(node >= MAX_NUMNODES))
|
||||
node = first_node(node_online_map);
|
||||
__get_cpu_var(slab_reap_node) = node;
|
||||
__this_cpu_write(slab_reap_node, node);
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -1012,7 +1012,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
|
|||
*/
|
||||
static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
|
||||
{
|
||||
int node = __get_cpu_var(slab_reap_node);
|
||||
int node = __this_cpu_read(slab_reap_node);
|
||||
|
||||
if (l3->alien) {
|
||||
struct array_cache *ac = l3->alien[node];
|
||||
|
|
149
mm/vmstat.c
149
mm/vmstat.c
|
@ -167,35 +167,23 @@ static void refresh_zone_stat_thresholds(void)
|
|||
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||
int delta)
|
||||
{
|
||||
struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
|
||||
|
||||
s8 *p = pcp->vm_stat_diff + item;
|
||||
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
||||
s8 __percpu *p = pcp->vm_stat_diff + item;
|
||||
long x;
|
||||
long t;
|
||||
|
||||
x = delta + *p;
|
||||
x = delta + __this_cpu_read(*p);
|
||||
|
||||
if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
|
||||
t = __this_cpu_read(pcp->stat_threshold);
|
||||
|
||||
if (unlikely(x > t || x < -t)) {
|
||||
zone_page_state_add(x, zone, item);
|
||||
x = 0;
|
||||
}
|
||||
*p = x;
|
||||
__this_cpu_write(*p, x);
|
||||
}
|
||||
EXPORT_SYMBOL(__mod_zone_page_state);
|
||||
|
||||
/*
|
||||
* For an unknown interrupt state
|
||||
*/
|
||||
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||
int delta)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__mod_zone_page_state(zone, item, delta);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(mod_zone_page_state);
|
||||
|
||||
/*
|
||||
* Optimized increment and decrement functions.
|
||||
*
|
||||
|
@ -221,16 +209,17 @@ EXPORT_SYMBOL(mod_zone_page_state);
|
|||
*/
|
||||
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
||||
{
|
||||
struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
|
||||
s8 *p = pcp->vm_stat_diff + item;
|
||||
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
||||
s8 __percpu *p = pcp->vm_stat_diff + item;
|
||||
s8 v, t;
|
||||
|
||||
(*p)++;
|
||||
v = __this_cpu_inc_return(*p);
|
||||
t = __this_cpu_read(pcp->stat_threshold);
|
||||
if (unlikely(v > t)) {
|
||||
s8 overstep = t >> 1;
|
||||
|
||||
if (unlikely(*p > pcp->stat_threshold)) {
|
||||
int overstep = pcp->stat_threshold / 2;
|
||||
|
||||
zone_page_state_add(*p + overstep, zone, item);
|
||||
*p = -overstep;
|
||||
zone_page_state_add(v + overstep, zone, item);
|
||||
__this_cpu_write(*p, -overstep);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -242,16 +231,17 @@ EXPORT_SYMBOL(__inc_zone_page_state);
|
|||
|
||||
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
|
||||
{
|
||||
struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
|
||||
s8 *p = pcp->vm_stat_diff + item;
|
||||
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
||||
s8 __percpu *p = pcp->vm_stat_diff + item;
|
||||
s8 v, t;
|
||||
|
||||
(*p)--;
|
||||
v = __this_cpu_dec_return(*p);
|
||||
t = __this_cpu_read(pcp->stat_threshold);
|
||||
if (unlikely(v < - t)) {
|
||||
s8 overstep = t >> 1;
|
||||
|
||||
if (unlikely(*p < - pcp->stat_threshold)) {
|
||||
int overstep = pcp->stat_threshold / 2;
|
||||
|
||||
zone_page_state_add(*p - overstep, zone, item);
|
||||
*p = overstep;
|
||||
zone_page_state_add(v - overstep, zone, item);
|
||||
__this_cpu_write(*p, overstep);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -261,6 +251,92 @@ void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
|
|||
}
|
||||
EXPORT_SYMBOL(__dec_zone_page_state);
|
||||
|
||||
#ifdef CONFIG_CMPXCHG_LOCAL
|
||||
/*
|
||||
* If we have cmpxchg_local support then we do not need to incur the overhead
|
||||
* that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
|
||||
*
|
||||
* mod_state() modifies the zone counter state through atomic per cpu
|
||||
* operations.
|
||||
*
|
||||
* Overstep mode specifies how overstep should handled:
|
||||
* 0 No overstepping
|
||||
* 1 Overstepping half of threshold
|
||||
* -1 Overstepping minus half of threshold
|
||||
*/
|
||||
static inline void mod_state(struct zone *zone,
|
||||
enum zone_stat_item item, int delta, int overstep_mode)
|
||||
{
|
||||
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
||||
s8 __percpu *p = pcp->vm_stat_diff + item;
|
||||
long o, n, t, z;
|
||||
|
||||
do {
|
||||
z = 0; /* overflow to zone counters */
|
||||
|
||||
/*
|
||||
* The fetching of the stat_threshold is racy. We may apply
|
||||
* a counter threshold to the wrong the cpu if we get
|
||||
* rescheduled while executing here. However, the following
|
||||
* will apply the threshold again and therefore bring the
|
||||
* counter under the threshold.
|
||||
*/
|
||||
t = this_cpu_read(pcp->stat_threshold);
|
||||
|
||||
o = this_cpu_read(*p);
|
||||
n = delta + o;
|
||||
|
||||
if (n > t || n < -t) {
|
||||
int os = overstep_mode * (t >> 1) ;
|
||||
|
||||
/* Overflow must be added to zone counters */
|
||||
z = n + os;
|
||||
n = -os;
|
||||
}
|
||||
} while (this_cpu_cmpxchg(*p, o, n) != o);
|
||||
|
||||
if (z)
|
||||
zone_page_state_add(z, zone, item);
|
||||
}
|
||||
|
||||
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||
int delta)
|
||||
{
|
||||
mod_state(zone, item, delta, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(mod_zone_page_state);
|
||||
|
||||
void inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
||||
{
|
||||
mod_state(zone, item, 1, 1);
|
||||
}
|
||||
|
||||
void inc_zone_page_state(struct page *page, enum zone_stat_item item)
|
||||
{
|
||||
mod_state(page_zone(page), item, 1, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(inc_zone_page_state);
|
||||
|
||||
void dec_zone_page_state(struct page *page, enum zone_stat_item item)
|
||||
{
|
||||
mod_state(page_zone(page), item, -1, -1);
|
||||
}
|
||||
EXPORT_SYMBOL(dec_zone_page_state);
|
||||
#else
|
||||
/*
|
||||
* Use interrupt disable to serialize counter updates
|
||||
*/
|
||||
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||
int delta)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__mod_zone_page_state(zone, item, delta);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(mod_zone_page_state);
|
||||
|
||||
void inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -291,6 +367,7 @@ void dec_zone_page_state(struct page *page, enum zone_stat_item item)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(dec_zone_page_state);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Update the zone counters for one cpu.
|
||||
|
|
Loading…
Reference in a new issue