2005-04-16 16:20:36 -06:00
|
|
|
#ifndef _LINUX_PERCPU_COUNTER_H
|
|
|
|
#define _LINUX_PERCPU_COUNTER_H
|
|
|
|
/*
|
|
|
|
* A simple "approximate counter" for use in ext2 and ext3 superblocks.
|
|
|
|
*
|
|
|
|
* WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/smp.h>
|
2007-07-16 00:39:51 -06:00
|
|
|
#include <linux/list.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
#include <linux/threads.h>
|
|
|
|
#include <linux/percpu.h>
|
2006-06-23 03:05:41 -06:00
|
|
|
#include <linux/types.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
struct percpu_counter {
|
|
|
|
spinlock_t lock;
|
2006-06-23 03:05:41 -06:00
|
|
|
s64 count;
|
2007-07-16 00:39:51 -06:00
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
struct list_head list; /* All percpu_counters are on a list */
|
|
|
|
#endif
|
2006-06-23 03:05:41 -06:00
|
|
|
s32 *counters;
|
2005-04-16 16:20:36 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
#if NR_CPUS >= 16
|
|
|
|
#define FBC_BATCH (NR_CPUS*2)
|
|
|
|
#else
|
|
|
|
#define FBC_BATCH (NR_CPUS*4)
|
|
|
|
#endif
|
|
|
|
|
2007-10-17 00:25:45 -06:00
|
|
|
int percpu_counter_init(struct percpu_counter *fbc, s64 amount);
|
2007-10-17 00:25:46 -06:00
|
|
|
int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
|
2007-07-16 00:39:51 -06:00
|
|
|
void percpu_counter_destroy(struct percpu_counter *fbc);
|
2007-10-17 00:25:44 -06:00
|
|
|
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
|
2007-10-17 00:25:43 -06:00
|
|
|
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
|
2007-10-17 00:25:45 -06:00
|
|
|
s64 __percpu_counter_sum(struct percpu_counter *fbc);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2007-10-17 00:25:43 -06:00
|
|
|
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
|
2007-10-17 00:25:43 -06:00
|
|
|
{
|
|
|
|
__percpu_counter_add(fbc, amount, FBC_BATCH);
|
|
|
|
}
|
|
|
|
|
2007-10-17 00:25:45 -06:00
|
|
|
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
|
|
|
|
{
|
|
|
|
s64 ret = __percpu_counter_sum(fbc);
|
|
|
|
return ret < 0 ? 0 : ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
|
|
|
|
{
|
|
|
|
return __percpu_counter_sum(fbc);
|
|
|
|
}
|
|
|
|
|
2006-06-23 03:05:41 -06:00
|
|
|
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
return fbc->count;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It is possible for the percpu_counter_read() to return a small negative
|
|
|
|
* number for some counter which should never be negative.
|
2006-06-23 03:05:41 -06:00
|
|
|
*
|
2005-04-16 16:20:36 -06:00
|
|
|
*/
|
2006-06-23 03:05:41 -06:00
|
|
|
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2006-06-23 03:05:41 -06:00
|
|
|
s64 ret = fbc->count;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
barrier(); /* Prevent reloads of fbc->count */
|
2006-06-23 03:05:41 -06:00
|
|
|
if (ret >= 0)
|
2005-04-16 16:20:36 -06:00
|
|
|
return ret;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
struct percpu_counter {
|
2006-06-23 03:05:41 -06:00
|
|
|
s64 count;
|
2005-04-16 16:20:36 -06:00
|
|
|
};
|
|
|
|
|
2007-10-17 00:25:45 -06:00
|
|
|
static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
2006-06-23 03:05:41 -06:00
|
|
|
fbc->count = amount;
|
2007-10-17 00:25:45 -06:00
|
|
|
return 0;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2007-10-17 00:25:46 -06:00
|
|
|
#define percpu_counter_init_irq percpu_counter_init
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2007-10-17 00:25:44 -06:00
|
|
|
static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
|
|
|
|
{
|
|
|
|
fbc->count = amount;
|
|
|
|
}
|
|
|
|
|
2007-10-17 00:25:43 -06:00
|
|
|
#define __percpu_counter_add(fbc, amount, batch) \
|
|
|
|
percpu_counter_add(fbc, amount)
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
static inline void
|
2007-10-17 00:25:43 -06:00
|
|
|
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
preempt_disable();
|
|
|
|
fbc->count += amount;
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
2006-06-23 03:05:41 -06:00
|
|
|
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
return fbc->count;
|
|
|
|
}
|
|
|
|
|
2006-06-23 03:05:41 -06:00
|
|
|
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
|
2005-04-16 16:20:36 -06:00
|
|
|
{
|
|
|
|
return fbc->count;
|
|
|
|
}
|
|
|
|
|
2007-10-17 00:25:44 -06:00
|
|
|
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
|
2006-03-07 22:55:31 -07:00
|
|
|
{
|
|
|
|
return percpu_counter_read_positive(fbc);
|
|
|
|
}
|
|
|
|
|
2007-10-17 00:25:45 -06:00
|
|
|
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
|
|
|
|
{
|
|
|
|
return percpu_counter_read(fbc);
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
|
|
static inline void percpu_counter_inc(struct percpu_counter *fbc)
|
|
|
|
{
|
2007-10-17 00:25:42 -06:00
|
|
|
percpu_counter_add(fbc, 1);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void percpu_counter_dec(struct percpu_counter *fbc)
|
|
|
|
{
|
2007-10-17 00:25:42 -06:00
|
|
|
percpu_counter_add(fbc, -1);
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2007-10-17 00:25:42 -06:00
|
|
|
static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
|
|
|
|
{
|
|
|
|
percpu_counter_add(fbc, -amount);
|
|
|
|
}
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
#endif /* _LINUX_PERCPU_COUNTER_H */
|