kernel-fxtec-pro1x/include/linux/proportions.h
Peter Zijlstra 90eec103b9 treewide: Remove old email address
There were still a number of references to my old Red Hat email
address in the kernel source. Remove these while keeping the
Red Hat copyright notices intact.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-11-23 09:44:58 +01:00

137 lines
3.2 KiB
C

/*
* FLoating proportions
*
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* This file contains the public data structure and API definitions.
*/
#ifndef _LINUX_PROPORTIONS_H
#define _LINUX_PROPORTIONS_H
#include <linux/percpu_counter.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
struct prop_global {
/*
* The period over which we differentiate
*
* period = 2^shift
*/
int shift;
/*
* The total event counter aka 'time'.
*
* Treated as an unsigned long; the lower 'shift - 1' bits are the
* counter bits, the remaining upper bits the period counter.
*/
struct percpu_counter events;
};
/*
* global proportion descriptor
*
* this is needed to consistently flip prop_global structures.
*/
struct prop_descriptor {
int index;
struct prop_global pg[2];
struct mutex mutex; /* serialize the prop_global switch */
};
int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
void prop_change_shift(struct prop_descriptor *pd, int new_shift);
/*
* ----- PERCPU ------
*/
struct prop_local_percpu {
/*
* the local events counter
*/
struct percpu_counter events;
/*
* snapshot of the last seen global state
*/
int shift;
unsigned long period;
raw_spinlock_t lock; /* protect the snapshot state */
};
int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
void prop_local_destroy_percpu(struct prop_local_percpu *pl);
void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
long *numerator, long *denominator);
static inline
void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
{
unsigned long flags;
local_irq_save(flags);
__prop_inc_percpu(pd, pl);
local_irq_restore(flags);
}
/*
* Limit the time part in order to ensure there are some bits left for the
* cycle counter and fraction multiply.
*/
#if BITS_PER_LONG == 32
#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
#else
#define PROP_MAX_SHIFT (BITS_PER_LONG/2)
#endif
#define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
#define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT)
void __prop_inc_percpu_max(struct prop_descriptor *pd,
struct prop_local_percpu *pl, long frac);
/*
* ----- SINGLE ------
*/
struct prop_local_single {
/*
* the local events counter
*/
unsigned long events;
/*
* snapshot of the last seen global state
* and a lock protecting this state
*/
unsigned long period;
int shift;
raw_spinlock_t lock; /* protect the snapshot state */
};
#define INIT_PROP_LOCAL_SINGLE(name) \
{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
}
int prop_local_init_single(struct prop_local_single *pl);
void prop_local_destroy_single(struct prop_local_single *pl);
void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
long *numerator, long *denominator);
static inline
void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
{
unsigned long flags;
local_irq_save(flags);
__prop_inc_single(pd, pl);
local_irq_restore(flags);
}
#endif /* _LINUX_PROPORTIONS_H */