2005-04-16 16:20:36 -06:00
|
|
|
#ifndef _LINUX_TIMER_H
|
|
|
|
#define _LINUX_TIMER_H
|
|
|
|
|
|
|
|
#include <linux/list.h>
|
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 02:28:13 -07:00
|
|
|
#include <linux/ktime.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
#include <linux/stddef.h>
|
2008-04-30 01:55:03 -06:00
|
|
|
#include <linux/debugobjects.h>
|
2009-01-29 08:03:20 -07:00
|
|
|
#include <linux/stringify.h>
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2008-01-30 05:30:00 -07:00
|
|
|
struct tvec_base;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
struct timer_list {
|
2010-03-11 15:04:36 -07:00
|
|
|
/*
|
|
|
|
* All fields that change during normal runtime grouped to the
|
|
|
|
* same cacheline
|
|
|
|
*/
|
2005-04-16 16:20:36 -06:00
|
|
|
struct list_head entry;
|
|
|
|
unsigned long expires;
|
2010-03-11 15:04:36 -07:00
|
|
|
struct tvec_base *base;
|
2005-04-16 16:20:36 -06:00
|
|
|
|
|
|
|
void (*function)(unsigned long);
|
|
|
|
unsigned long data;
|
|
|
|
|
2010-03-11 15:04:36 -07:00
|
|
|
int slack;
|
|
|
|
|
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 02:28:13 -07:00
|
|
|
#ifdef CONFIG_TIMER_STATS
|
2010-10-20 16:57:30 -06:00
|
|
|
int start_pid;
|
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 02:28:13 -07:00
|
|
|
void *start_site;
|
|
|
|
char start_comm[16];
|
|
|
|
#endif
|
2009-01-29 08:03:20 -07:00
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
struct lockdep_map lockdep_map;
|
|
|
|
#endif
|
2005-04-16 16:20:36 -06:00
|
|
|
};
|
|
|
|
|
2008-01-30 05:30:00 -07:00
|
|
|
extern struct tvec_base boot_tvec_bases;
|
[PATCH] timers fixes/improvements
This patch tries to solve following problems:
1. del_timer_sync() is racy. The timer can be fired again after
del_timer_sync have checked all cpus and before it will recheck
timer_pending().
2. It has scalability problems. All cpus are scanned to determine
if the timer is running on that cpu.
With this patch del_timer_sync is O(1) and no slower than plain
del_timer(pending_timer), unless it has to actually wait for
completion of the currently running timer.
The only restriction is that the recurring timer should not use
add_timer_on().
3. The timers are not serialized wrt to itself.
If CPU_0 does mod_timer(jiffies+1) while the timer is currently
running on CPU 1, it is quite possible that local interrupt on
CPU_0 will start that timer before it finished on CPU_1.
4. The timers locking is suboptimal. __mod_timer() takes 3 locks
at once and still requires wmb() in del_timer/run_timers.
The new implementation takes 2 locks sequentially and does not
need memory barriers.
Currently ->base != NULL means that the timer is pending. In that case
->base.lock is used to lock the timer. __mod_timer also takes timer->lock
because ->base can be == NULL.
This patch uses timer->entry.next != NULL as indication that the timer is
pending. So it does __list_del(), entry->next = NULL instead of list_del()
when the timer is deleted.
The ->base field is used for hashed locking only, it is initialized
in init_timer() which sets ->base = per_cpu(tvec_bases). When the
tvec_bases.lock is locked, it means that all timers which are tied
to this base via timer->base are locked, and the base itself is locked
too.
So __run_timers/migrate_timers can safely modify all timers which could
be found on ->tvX lists (pending timers).
When the timer's base is locked, and the timer removed from ->entry list
(which means that _run_timers/migrate_timers can't see this timer), it is
possible to set timer->base = NULL and drop the lock: the timer remains
locked.
This patch adds lock_timer_base() helper, which waits for ->base != NULL,
locks the ->base, and checks it is still the same.
__mod_timer() schedules the timer on the local CPU and changes it's base.
However, it does not lock both old and new bases at once. It locks the
timer via lock_timer_base(), deletes the timer, sets ->base = NULL, and
unlocks old base. Then __mod_timer() locks new_base, sets ->base = new_base,
and adds this timer. This simplifies the code, because AB-BA deadlock is not
possible. __mod_timer() also ensures that the timer's base is not changed
while the timer's handler is running on the old base.
__run_timers(), del_timer() do not change ->base anymore, they only clear
pending flag.
So del_timer_sync() can test timer->base->running_timer == timer to detect
whether it is running or not.
We don't need timer_list->lock anymore, this patch kills it.
We also don't need barriers. del_timer() and __run_timers() used smp_wmb()
before clearing timer's pending flag. It was needed because __mod_timer()
did not lock old_base if the timer is not pending, so __mod_timer()->list_add()
could race with del_timer()->list_del(). With this patch these functions are
serialized through base->lock.
One problem. TIMER_INITIALIZER can't use per_cpu(tvec_bases). So this patch
adds global
struct timer_base_s {
spinlock_t lock;
struct timer_list *running_timer;
} __init_timer_base;
which is used by TIMER_INITIALIZER. The corresponding fields in tvec_t_base_s
struct are replaced by struct timer_base_s t_base.
It is indeed ugly. But this can't have scalability problems. The global
__init_timer_base.lock is used only when __mod_timer() is called for the first
time AND the timer was compile time initialized. After that the timer migrates
to the local CPU.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Renaud Lienhart <renaud.lienhart@free.fr>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:08:56 -06:00
|
|
|
|
2009-01-29 08:03:20 -07:00
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
/*
|
|
|
|
* NB: because we have to copy the lockdep_map, setting the lockdep_map key
|
|
|
|
* (second argument) here is required, otherwise it could be initialised to
|
|
|
|
* the copy of the lockdep_map later! We use the pointer to and the string
|
|
|
|
* "<file>:<line>" as the key resp. the name of the lockdep_map.
|
|
|
|
*/
|
|
|
|
#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \
|
|
|
|
.lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn),
|
|
|
|
#else
|
|
|
|
#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
|
|
|
|
#endif
|
|
|
|
|
2010-10-20 16:57:33 -06:00
|
|
|
/*
|
2012-08-08 12:10:28 -06:00
|
|
|
* Note that all tvec_bases are at least 4 byte aligned and lower two bits
|
|
|
|
* of base in timer_list is guaranteed to be zero. Use them for flags.
|
2010-10-20 16:57:33 -06:00
|
|
|
*
|
|
|
|
* A deferrable timer will work normally when the system is busy, but
|
|
|
|
* will not cause a CPU to come out of idle just to service it; instead,
|
|
|
|
* the timer will be serviced when the CPU eventually wakes up with a
|
|
|
|
* subsequent non-deferrable timer.
|
2012-08-08 12:10:28 -06:00
|
|
|
*
|
|
|
|
* An irqsafe timer is executed with IRQ disabled and it's safe to wait for
|
|
|
|
* the completion of the running instance from IRQ handlers, for example,
|
|
|
|
* by calling del_timer_sync().
|
|
|
|
*
|
|
|
|
* Note: The irq disabled callback execution is a special case for
|
|
|
|
* workqueue locking issues. It's not meant for executing random crap
|
|
|
|
* with interrupts disabled. Abuse is monitored!
|
2010-10-20 16:57:33 -06:00
|
|
|
*/
|
2012-08-08 12:10:25 -06:00
|
|
|
#define TIMER_DEFERRABLE 0x1LU
|
2012-08-08 12:10:28 -06:00
|
|
|
#define TIMER_IRQSAFE 0x2LU
|
2012-08-08 12:10:25 -06:00
|
|
|
|
2012-08-08 12:10:28 -06:00
|
|
|
#define TIMER_FLAG_MASK 0x3LU
|
2010-10-20 16:57:33 -06:00
|
|
|
|
2012-08-08 12:10:27 -06:00
|
|
|
#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
|
2008-04-30 01:55:03 -06:00
|
|
|
.entry = { .prev = TIMER_ENTRY_STATIC }, \
|
2005-04-16 16:20:36 -06:00
|
|
|
.function = (_function), \
|
|
|
|
.expires = (_expires), \
|
|
|
|
.data = (_data), \
|
2012-08-08 12:10:27 -06:00
|
|
|
.base = (void *)((unsigned long)&boot_tvec_bases + (_flags)), \
|
2010-10-20 16:57:30 -06:00
|
|
|
.slack = -1, \
|
2009-01-29 08:03:20 -07:00
|
|
|
__TIMER_LOCKDEP_MAP_INITIALIZER( \
|
|
|
|
__FILE__ ":" __stringify(__LINE__)) \
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
2012-08-08 12:10:27 -06:00
|
|
|
#define TIMER_INITIALIZER(_function, _expires, _data) \
|
|
|
|
__TIMER_INITIALIZER((_function), (_expires), (_data), 0)
|
2010-10-20 16:57:33 -06:00
|
|
|
|
2012-08-08 12:10:27 -06:00
|
|
|
#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \
|
|
|
|
__TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE)
|
2010-10-20 16:57:33 -06:00
|
|
|
|
2005-09-09 14:10:40 -06:00
|
|
|
#define DEFINE_TIMER(_name, _function, _expires, _data) \
|
|
|
|
struct timer_list _name = \
|
|
|
|
TIMER_INITIALIZER(_function, _expires, _data)
|
|
|
|
|
2012-08-08 12:10:27 -06:00
|
|
|
void init_timer_key(struct timer_list *timer, unsigned int flags,
|
|
|
|
const char *name, struct lock_class_key *key);
|
2009-01-29 08:03:20 -07:00
|
|
|
|
2012-08-08 12:10:26 -06:00
|
|
|
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
|
|
|
|
extern void init_timer_on_stack_key(struct timer_list *timer,
|
2012-08-08 12:10:27 -06:00
|
|
|
unsigned int flags, const char *name,
|
2012-08-08 12:10:26 -06:00
|
|
|
struct lock_class_key *key);
|
|
|
|
extern void destroy_timer_on_stack(struct timer_list *timer);
|
|
|
|
#else
|
|
|
|
static inline void destroy_timer_on_stack(struct timer_list *timer) { }
|
|
|
|
static inline void init_timer_on_stack_key(struct timer_list *timer,
|
2012-08-08 12:10:27 -06:00
|
|
|
unsigned int flags, const char *name,
|
2012-08-08 12:10:26 -06:00
|
|
|
struct lock_class_key *key)
|
|
|
|
{
|
2012-08-08 12:10:27 -06:00
|
|
|
init_timer_key(timer, flags, name, key);
|
2012-08-08 12:10:26 -06:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-01-29 08:03:20 -07:00
|
|
|
#ifdef CONFIG_LOCKDEP
|
2012-08-08 12:10:27 -06:00
|
|
|
#define __init_timer(_timer, _flags) \
|
2009-01-29 08:03:20 -07:00
|
|
|
do { \
|
|
|
|
static struct lock_class_key __key; \
|
2012-08-08 12:10:27 -06:00
|
|
|
init_timer_key((_timer), (_flags), #_timer, &__key); \
|
2009-01-29 08:03:20 -07:00
|
|
|
} while (0)
|
|
|
|
|
2012-08-08 12:10:27 -06:00
|
|
|
#define __init_timer_on_stack(_timer, _flags) \
|
2009-01-29 08:03:20 -07:00
|
|
|
do { \
|
|
|
|
static struct lock_class_key __key; \
|
2012-08-08 12:10:27 -06:00
|
|
|
init_timer_on_stack_key((_timer), (_flags), #_timer, &__key); \
|
2009-01-29 08:03:20 -07:00
|
|
|
} while (0)
|
2012-08-08 12:10:27 -06:00
|
|
|
#else
|
|
|
|
#define __init_timer(_timer, _flags) \
|
|
|
|
init_timer_key((_timer), (_flags), NULL, NULL)
|
|
|
|
#define __init_timer_on_stack(_timer, _flags) \
|
|
|
|
init_timer_on_stack_key((_timer), (_flags), NULL, NULL)
|
|
|
|
#endif
|
2009-01-29 08:03:20 -07:00
|
|
|
|
2012-08-08 12:10:27 -06:00
|
|
|
#define init_timer(timer) \
|
|
|
|
__init_timer((timer), 0)
|
|
|
|
#define init_timer_deferrable(timer) \
|
|
|
|
__init_timer((timer), TIMER_DEFERRABLE)
|
2009-01-29 08:03:20 -07:00
|
|
|
#define init_timer_on_stack(timer) \
|
2012-08-08 12:10:27 -06:00
|
|
|
__init_timer_on_stack((timer), 0)
|
|
|
|
|
|
|
|
#define __setup_timer(_timer, _fn, _data, _flags) \
|
2009-01-29 08:03:20 -07:00
|
|
|
do { \
|
2012-08-08 12:10:27 -06:00
|
|
|
__init_timer((_timer), (_flags)); \
|
|
|
|
(_timer)->function = (_fn); \
|
|
|
|
(_timer)->data = (_data); \
|
2009-01-29 08:03:20 -07:00
|
|
|
} while (0)
|
|
|
|
|
2012-08-08 12:10:27 -06:00
|
|
|
#define __setup_timer_on_stack(_timer, _fn, _data, _flags) \
|
2009-01-29 08:03:20 -07:00
|
|
|
do { \
|
2012-08-08 12:10:27 -06:00
|
|
|
__init_timer_on_stack((_timer), (_flags)); \
|
|
|
|
(_timer)->function = (_fn); \
|
|
|
|
(_timer)->data = (_data); \
|
2009-01-29 08:03:20 -07:00
|
|
|
} while (0)
|
|
|
|
|
2012-08-08 12:10:27 -06:00
|
|
|
#define setup_timer(timer, fn, data) \
|
|
|
|
__setup_timer((timer), (fn), (data), 0)
|
2009-01-29 08:03:20 -07:00
|
|
|
#define setup_timer_on_stack(timer, fn, data) \
|
2012-08-08 12:10:27 -06:00
|
|
|
__setup_timer_on_stack((timer), (fn), (data), 0)
|
2010-05-10 15:26:20 -06:00
|
|
|
#define setup_deferrable_timer_on_stack(timer, fn, data) \
|
2012-08-08 12:10:27 -06:00
|
|
|
__setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
|
2010-05-10 15:26:20 -06:00
|
|
|
|
2007-01-26 01:57:09 -07:00
|
|
|
/**
|
2005-04-16 16:20:36 -06:00
|
|
|
* timer_pending - is a timer pending?
|
|
|
|
* @timer: the timer in question
|
|
|
|
*
|
|
|
|
* timer_pending will tell whether a given timer is currently pending,
|
|
|
|
* or not. Callers must ensure serialization wrt. other operations done
|
|
|
|
* to this timer, eg. interrupt contexts, or other CPUs on SMP.
|
|
|
|
*
|
|
|
|
* return value: 1 if the timer is pending, 0 if not.
|
|
|
|
*/
|
|
|
|
static inline int timer_pending(const struct timer_list * timer)
|
|
|
|
{
|
[PATCH] timers fixes/improvements
This patch tries to solve following problems:
1. del_timer_sync() is racy. The timer can be fired again after
del_timer_sync have checked all cpus and before it will recheck
timer_pending().
2. It has scalability problems. All cpus are scanned to determine
if the timer is running on that cpu.
With this patch del_timer_sync is O(1) and no slower than plain
del_timer(pending_timer), unless it has to actually wait for
completion of the currently running timer.
The only restriction is that the recurring timer should not use
add_timer_on().
3. The timers are not serialized wrt to itself.
If CPU_0 does mod_timer(jiffies+1) while the timer is currently
running on CPU 1, it is quite possible that local interrupt on
CPU_0 will start that timer before it finished on CPU_1.
4. The timers locking is suboptimal. __mod_timer() takes 3 locks
at once and still requires wmb() in del_timer/run_timers.
The new implementation takes 2 locks sequentially and does not
need memory barriers.
Currently ->base != NULL means that the timer is pending. In that case
->base.lock is used to lock the timer. __mod_timer also takes timer->lock
because ->base can be == NULL.
This patch uses timer->entry.next != NULL as indication that the timer is
pending. So it does __list_del(), entry->next = NULL instead of list_del()
when the timer is deleted.
The ->base field is used for hashed locking only, it is initialized
in init_timer() which sets ->base = per_cpu(tvec_bases). When the
tvec_bases.lock is locked, it means that all timers which are tied
to this base via timer->base are locked, and the base itself is locked
too.
So __run_timers/migrate_timers can safely modify all timers which could
be found on ->tvX lists (pending timers).
When the timer's base is locked, and the timer removed from ->entry list
(which means that _run_timers/migrate_timers can't see this timer), it is
possible to set timer->base = NULL and drop the lock: the timer remains
locked.
This patch adds lock_timer_base() helper, which waits for ->base != NULL,
locks the ->base, and checks it is still the same.
__mod_timer() schedules the timer on the local CPU and changes it's base.
However, it does not lock both old and new bases at once. It locks the
timer via lock_timer_base(), deletes the timer, sets ->base = NULL, and
unlocks old base. Then __mod_timer() locks new_base, sets ->base = new_base,
and adds this timer. This simplifies the code, because AB-BA deadlock is not
possible. __mod_timer() also ensures that the timer's base is not changed
while the timer's handler is running on the old base.
__run_timers(), del_timer() do not change ->base anymore, they only clear
pending flag.
So del_timer_sync() can test timer->base->running_timer == timer to detect
whether it is running or not.
We don't need timer_list->lock anymore, this patch kills it.
We also don't need barriers. del_timer() and __run_timers() used smp_wmb()
before clearing timer's pending flag. It was needed because __mod_timer()
did not lock old_base if the timer is not pending, so __mod_timer()->list_add()
could race with del_timer()->list_del(). With this patch these functions are
serialized through base->lock.
One problem. TIMER_INITIALIZER can't use per_cpu(tvec_bases). So this patch
adds global
struct timer_base_s {
spinlock_t lock;
struct timer_list *running_timer;
} __init_timer_base;
which is used by TIMER_INITIALIZER. The corresponding fields in tvec_t_base_s
struct are replaced by struct timer_base_s t_base.
It is indeed ugly. But this can't have scalability problems. The global
__init_timer_base.lock is used only when __mod_timer() is called for the first
time AND the timer was compile time initialized. After that the timer migrates
to the local CPU.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Renaud Lienhart <renaud.lienhart@free.fr>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:08:56 -06:00
|
|
|
return timer->entry.next != NULL;
|
2005-04-16 16:20:36 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
extern void add_timer_on(struct timer_list *timer, int cpu);
|
|
|
|
extern int del_timer(struct timer_list * timer);
|
|
|
|
extern int mod_timer(struct timer_list *timer, unsigned long expires);
|
2009-02-18 04:23:29 -07:00
|
|
|
extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
|
2009-04-16 00:43:26 -06:00
|
|
|
extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2010-03-11 15:04:36 -07:00
|
|
|
extern void set_timer_slack(struct timer_list *time, int slack_hz);
|
|
|
|
|
2009-04-16 00:43:26 -06:00
|
|
|
#define TIMER_NOT_PINNED 0
|
|
|
|
#define TIMER_PINNED 1
|
2007-05-29 15:47:39 -06:00
|
|
|
/*
|
|
|
|
* The jiffies value which is added to now, when there is no timer
|
|
|
|
* in the timer wheel:
|
|
|
|
*/
|
|
|
|
#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
|
|
|
|
|
2007-02-16 02:27:47 -07:00
|
|
|
/*
|
|
|
|
* Return when the next timer-wheel timeout occurs (in absolute jiffies),
|
|
|
|
* locks the timer base and does the comparison against the given
|
|
|
|
* jiffie.
|
|
|
|
*/
|
|
|
|
extern unsigned long get_next_timer_interrupt(unsigned long now);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 02:28:13 -07:00
|
|
|
/*
|
|
|
|
* Timer-statistics info:
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_TIMER_STATS
|
|
|
|
|
2009-06-23 09:38:15 -06:00
|
|
|
extern int timer_stats_active;
|
|
|
|
|
2007-07-16 00:40:30 -06:00
|
|
|
#define TIMER_STATS_FLAG_DEFERRABLE 0x1
|
|
|
|
|
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 02:28:13 -07:00
|
|
|
extern void init_timer_stats(void);
|
|
|
|
|
|
|
|
extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
|
2007-07-16 00:40:30 -06:00
|
|
|
void *timerf, char *comm,
|
|
|
|
unsigned int timer_flag);
|
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 02:28:13 -07:00
|
|
|
|
|
|
|
extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
|
|
|
|
void *addr);
|
|
|
|
|
|
|
|
static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
|
|
|
|
{
|
2009-06-23 09:38:15 -06:00
|
|
|
if (likely(!timer_stats_active))
|
|
|
|
return;
|
[PATCH] Add debugging feature /proc/timer_stat
Add /proc/timer_stats support: debugging feature to profile timer expiration.
Both the starting site, process/PID and the expiration function is captured.
This allows the quick identification of timer event sources in a system.
Sample output:
# echo 1 > /proc/timer_stats
# cat /proc/timer_stats
Timer Stats Version: v0.1
Sample period: 4.010 s
24, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
11, 0 swapper sk_reset_timer (tcp_delack_timer)
6, 0 swapper hrtimer_stop_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
17, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
2, 1 swapper queue_delayed_work_on (delayed_work_timer_fn)
4, 2050 pcscd do_nanosleep (hrtimer_wakeup)
5, 4179 sshd sk_reset_timer (tcp_write_timer)
4, 2248 yum-updatesd schedule_timeout (process_timeout)
18, 0 swapper hrtimer_restart_sched_tick (hrtimer_sched_tick)
3, 0 swapper sk_reset_timer (tcp_delack_timer)
1, 1 swapper neigh_table_init_no_netlink (neigh_periodic_timer)
2, 1 swapper e1000_up (e1000_watchdog)
1, 1 init schedule_timeout (process_timeout)
100 total events, 25.24 events/sec
[ cleanups and hrtimers support from Thomas Gleixner <tglx@linutronix.de> ]
[bunk@stusta.de: nr_entries can become static]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-16 02:28:13 -07:00
|
|
|
__timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
|
|
|
|
{
|
|
|
|
timer->start_site = NULL;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void init_timer_stats(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-02-18 04:23:29 -07:00
|
|
|
extern void add_timer(struct timer_list *timer);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2010-10-20 16:57:31 -06:00
|
|
|
extern int try_to_del_timer_sync(struct timer_list *timer);
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
extern int del_timer_sync(struct timer_list *timer);
|
|
|
|
#else
|
2005-06-23 01:08:59 -06:00
|
|
|
# define del_timer_sync(t) del_timer(t)
|
2005-04-16 16:20:36 -06:00
|
|
|
#endif
|
|
|
|
|
[PATCH] timers fixes/improvements
This patch tries to solve following problems:
1. del_timer_sync() is racy. The timer can be fired again after
del_timer_sync have checked all cpus and before it will recheck
timer_pending().
2. It has scalability problems. All cpus are scanned to determine
if the timer is running on that cpu.
With this patch del_timer_sync is O(1) and no slower than plain
del_timer(pending_timer), unless it has to actually wait for
completion of the currently running timer.
The only restriction is that the recurring timer should not use
add_timer_on().
3. The timers are not serialized wrt to itself.
If CPU_0 does mod_timer(jiffies+1) while the timer is currently
running on CPU 1, it is quite possible that local interrupt on
CPU_0 will start that timer before it finished on CPU_1.
4. The timers locking is suboptimal. __mod_timer() takes 3 locks
at once and still requires wmb() in del_timer/run_timers.
The new implementation takes 2 locks sequentially and does not
need memory barriers.
Currently ->base != NULL means that the timer is pending. In that case
->base.lock is used to lock the timer. __mod_timer also takes timer->lock
because ->base can be == NULL.
This patch uses timer->entry.next != NULL as indication that the timer is
pending. So it does __list_del(), entry->next = NULL instead of list_del()
when the timer is deleted.
The ->base field is used for hashed locking only, it is initialized
in init_timer() which sets ->base = per_cpu(tvec_bases). When the
tvec_bases.lock is locked, it means that all timers which are tied
to this base via timer->base are locked, and the base itself is locked
too.
So __run_timers/migrate_timers can safely modify all timers which could
be found on ->tvX lists (pending timers).
When the timer's base is locked, and the timer removed from ->entry list
(which means that _run_timers/migrate_timers can't see this timer), it is
possible to set timer->base = NULL and drop the lock: the timer remains
locked.
This patch adds lock_timer_base() helper, which waits for ->base != NULL,
locks the ->base, and checks it is still the same.
__mod_timer() schedules the timer on the local CPU and changes it's base.
However, it does not lock both old and new bases at once. It locks the
timer via lock_timer_base(), deletes the timer, sets ->base = NULL, and
unlocks old base. Then __mod_timer() locks new_base, sets ->base = new_base,
and adds this timer. This simplifies the code, because AB-BA deadlock is not
possible. __mod_timer() also ensures that the timer's base is not changed
while the timer's handler is running on the old base.
__run_timers(), del_timer() do not change ->base anymore, they only clear
pending flag.
So del_timer_sync() can test timer->base->running_timer == timer to detect
whether it is running or not.
We don't need timer_list->lock anymore, this patch kills it.
We also don't need barriers. del_timer() and __run_timers() used smp_wmb()
before clearing timer's pending flag. It was needed because __mod_timer()
did not lock old_base if the timer is not pending, so __mod_timer()->list_add()
could race with del_timer()->list_del(). With this patch these functions are
serialized through base->lock.
One problem. TIMER_INITIALIZER can't use per_cpu(tvec_bases). So this patch
adds global
struct timer_base_s {
spinlock_t lock;
struct timer_list *running_timer;
} __init_timer_base;
which is used by TIMER_INITIALIZER. The corresponding fields in tvec_t_base_s
struct are replaced by struct timer_base_s t_base.
It is indeed ugly. But this can't have scalability problems. The global
__init_timer_base.lock is used only when __mod_timer() is called for the first
time AND the timer was compile time initialized. After that the timer migrates
to the local CPU.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Renaud Lienhart <renaud.lienhart@free.fr>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 01:08:56 -06:00
|
|
|
#define del_singleshot_timer_sync(t) del_timer_sync(t)
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
extern void init_timers(void);
|
|
|
|
extern void run_local_timers(void);
|
2006-03-26 02:38:12 -07:00
|
|
|
struct hrtimer;
|
2007-02-16 02:27:49 -07:00
|
|
|
extern enum hrtimer_restart it_real_fn(struct hrtimer *);
|
2005-04-16 16:20:36 -06:00
|
|
|
|
2006-12-10 03:21:24 -07:00
|
|
|
unsigned long __round_jiffies(unsigned long j, int cpu);
|
|
|
|
unsigned long __round_jiffies_relative(unsigned long j, int cpu);
|
|
|
|
unsigned long round_jiffies(unsigned long j);
|
|
|
|
unsigned long round_jiffies_relative(unsigned long j);
|
|
|
|
|
2008-11-06 00:42:48 -07:00
|
|
|
unsigned long __round_jiffies_up(unsigned long j, int cpu);
|
|
|
|
unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
|
|
|
|
unsigned long round_jiffies_up(unsigned long j);
|
|
|
|
unsigned long round_jiffies_up_relative(unsigned long j);
|
|
|
|
|
2005-04-16 16:20:36 -06:00
|
|
|
#endif
|