kernel-fxtec-pro1x/include/linux/kernel_stat.h
Eric Dumazet 6c9ae009b2 irq: use per_cpu kstat_irqs
Use modern per_cpu API to increment {soft|hard}irq counters, and use
per_cpu allocation for (struct irq_desc)->kstats_irq instead of an array.

This gives better SMP/NUMA locality and saves few instructions per irq.

With small nr_cpuids values (8 for example), kstats_irq was a small array
(less than L1_CACHE_BYTES), potentially source of false sharing.

In the !CONFIG_SPARSE_IRQ case, remove the huge, NUMA/cache unfriendly
kstat_irqs_all[NR_IRQS][NR_CPUS] array.

Note: we still populate kstats_irq for all possible irqs in
early_irq_init().  We probably could use on-demand allocations.  (Code
included in alloc_descs()).  Problem is not all IRQS are used with a prior
alloc_descs() call.

kstat_irqs_this_cpu() is not used anymore, remove it.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Reviewed-by: Christoph Lameter <cl@linux.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2011-01-13 17:32:31 -08:00

125 lines
2.9 KiB
C

#ifndef _LINUX_KERNEL_STAT_H
#define _LINUX_KERNEL_STAT_H
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <asm/cputime.h>
/*
* 'kernel_stat.h' contains the definitions needed for doing
* some kernel statistics (CPU usage, context switches ...),
* used by rstatd/perfmeter
*/
struct cpu_usage_stat {
cputime64_t user;
cputime64_t nice;
cputime64_t system;
cputime64_t softirq;
cputime64_t irq;
cputime64_t idle;
cputime64_t iowait;
cputime64_t steal;
cputime64_t guest;
cputime64_t guest_nice;
};
struct kernel_stat {
struct cpu_usage_stat cpustat;
#ifndef CONFIG_GENERIC_HARDIRQS
unsigned int irqs[NR_IRQS];
#endif
unsigned long irqs_sum;
unsigned int softirqs[NR_SOFTIRQS];
};
DECLARE_PER_CPU(struct kernel_stat, kstat);
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
/* Must have preemption disabled for this to be meaningful. */
#define kstat_this_cpu __get_cpu_var(kstat)
extern unsigned long long nr_context_switches(void);
#ifndef CONFIG_GENERIC_HARDIRQS
struct irq_desc;
static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
struct irq_desc *desc)
{
__this_cpu_inc(kstat.irqs[irq]);
__this_cpu_inc(kstat.irqs_sum);
}
static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
{
return kstat_cpu(cpu).irqs[irq];
}
#else
#include <linux/irq.h>
extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
#define kstat_incr_irqs_this_cpu(irqno, DESC) \
do { \
__this_cpu_inc(*(DESC)->kstat_irqs); \
__this_cpu_inc(kstat.irqs_sum); \
} while (0)
#endif
static inline void kstat_incr_softirqs_this_cpu(unsigned int irq)
{
__this_cpu_inc(kstat.softirqs[irq]);
}
static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
{
return kstat_cpu(cpu).softirqs[irq];
}
/*
* Number of interrupts per specific IRQ source, since bootup
*/
#ifndef CONFIG_GENERIC_HARDIRQS
static inline unsigned int kstat_irqs(unsigned int irq)
{
unsigned int sum = 0;
int cpu;
for_each_possible_cpu(cpu)
sum += kstat_irqs_cpu(irq, cpu);
return sum;
}
#else
extern unsigned int kstat_irqs(unsigned int irq);
#endif
/*
* Number of interrupts per cpu, since bootup
*/
static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
{
return kstat_cpu(cpu).irqs_sum;
}
/*
* Lock/unlock the current runqueue - to extract task statistics:
*/
extern unsigned long long task_delta_exec(struct task_struct *);
extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
extern void account_steal_time(cputime_t);
extern void account_idle_time(cputime_t);
extern void account_process_tick(struct task_struct *, int user);
extern void account_steal_ticks(unsigned long ticks);
extern void account_idle_ticks(unsigned long ticks);
#endif /* _LINUX_KERNEL_STAT_H */