[LIB]: Drop the pcounter itself.
The knock-out. The pcounter abstraction is not used any longer in the kernel. Not sure whether this should go via netdev tree, but as far as I remember it was added via this one, and besides Eric thinks that Andrew shouldn't mind this. Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
bdcde3d71a
commit
095d911201
3 changed files with 0 additions and 133 deletions
|
@ -1,74 +0,0 @@
|
|||
#ifndef __LINUX_PCOUNTER_H
|
||||
#define __LINUX_PCOUNTER_H
|
||||
/*
|
||||
* Using a dynamic percpu 'int' variable has a cost :
|
||||
* 1) Extra dereference
|
||||
* Current per_cpu_ptr() implementation uses an array per 'percpu variable'.
|
||||
* 2) memory cost of NR_CPUS*(32+sizeof(void *)) instead of num_possible_cpus()*4
|
||||
*
|
||||
* This pcounter implementation is an abstraction to be able to use
|
||||
* either a static or a dynamic per cpu variable.
|
||||
* One dynamic per cpu variable gets a fast & cheap implementation, we can
|
||||
* change pcounter implementation too.
|
||||
*/
|
||||
struct pcounter {
|
||||
#ifdef CONFIG_SMP
|
||||
void (*add)(struct pcounter *self, int inc);
|
||||
int (*getval)(const struct pcounter *self, int cpu);
|
||||
int *per_cpu_values;
|
||||
#else
|
||||
int val;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#define DEFINE_PCOUNTER(NAME) \
|
||||
static DEFINE_PER_CPU(int, NAME##_pcounter_values); \
|
||||
static void NAME##_pcounter_add(struct pcounter *self, int val) \
|
||||
{ \
|
||||
__get_cpu_var(NAME##_pcounter_values) += val; \
|
||||
} \
|
||||
static int NAME##_pcounter_getval(const struct pcounter *self, int cpu) \
|
||||
{ \
|
||||
return per_cpu(NAME##_pcounter_values, cpu); \
|
||||
} \
|
||||
|
||||
#define PCOUNTER_MEMBER_INITIALIZER(NAME, MEMBER) \
|
||||
MEMBER = { \
|
||||
.add = NAME##_pcounter_add, \
|
||||
.getval = NAME##_pcounter_getval, \
|
||||
}
|
||||
|
||||
|
||||
static inline void pcounter_add(struct pcounter *self, int inc)
|
||||
{
|
||||
self->add(self, inc);
|
||||
}
|
||||
|
||||
extern int pcounter_getval(const struct pcounter *self);
|
||||
extern int pcounter_alloc(struct pcounter *self);
|
||||
extern void pcounter_free(struct pcounter *self);
|
||||
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
static inline void pcounter_add(struct pcounter *self, int inc)
|
||||
{
|
||||
self->val += inc;
|
||||
}
|
||||
|
||||
static inline int pcounter_getval(const struct pcounter *self)
|
||||
{
|
||||
return self->val;
|
||||
}
|
||||
|
||||
#define DEFINE_PCOUNTER(NAME)
|
||||
#define PCOUNTER_MEMBER_INITIALIZER(NAME, MEMBER)
|
||||
#define pcounter_alloc(self) 0
|
||||
#define pcounter_free(self)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#endif /* __LINUX_PCOUNTER_H */
|
|
@ -61,7 +61,6 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
|
|||
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
|
||||
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
|
||||
obj-$(CONFIG_SMP) += percpu_counter.o
|
||||
obj-$(CONFIG_SMP) += pcounter.o
|
||||
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
|
||||
|
||||
obj-$(CONFIG_SWIOTLB) += swiotlb.o
|
||||
|
|
|
@ -1,58 +0,0 @@
|
|||
/*
|
||||
* Define default pcounter functions
|
||||
* Note that often used pcounters use dedicated functions to get a speed increase.
|
||||
* (see DEFINE_PCOUNTER/REF_PCOUNTER_MEMBER)
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/pcounter.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
static void pcounter_dyn_add(struct pcounter *self, int inc)
|
||||
{
|
||||
per_cpu_ptr(self->per_cpu_values, smp_processor_id())[0] += inc;
|
||||
}
|
||||
|
||||
static int pcounter_dyn_getval(const struct pcounter *self, int cpu)
|
||||
{
|
||||
return per_cpu_ptr(self->per_cpu_values, cpu)[0];
|
||||
}
|
||||
|
||||
int pcounter_getval(const struct pcounter *self)
|
||||
{
|
||||
int res = 0, cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
res += self->getval(self, cpu);
|
||||
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcounter_getval);
|
||||
|
||||
int pcounter_alloc(struct pcounter *self)
|
||||
{
|
||||
int rc = 0;
|
||||
if (self->add == NULL) {
|
||||
self->per_cpu_values = alloc_percpu(int);
|
||||
if (self->per_cpu_values != NULL) {
|
||||
self->add = pcounter_dyn_add;
|
||||
self->getval = pcounter_dyn_getval;
|
||||
} else
|
||||
rc = 1;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcounter_alloc);
|
||||
|
||||
void pcounter_free(struct pcounter *self)
|
||||
{
|
||||
if (self->per_cpu_values != NULL) {
|
||||
free_percpu(self->per_cpu_values);
|
||||
self->per_cpu_values = NULL;
|
||||
self->getval = NULL;
|
||||
self->add = NULL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcounter_free);
|
||||
|
Loading…
Reference in a new issue