locking, timer_stats: Annotate table_lock as raw
The table_lock lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Reported-by: Andreas Sundebo <kernel@sundebo.dk> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Andreas Sundebo <kernel@sundebo.dk> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ddb6c9b58a
commit
2737c49f29
1 changed files with 3 additions and 3 deletions
|
@ -81,7 +81,7 @@ struct entry {
|
|||
/*
|
||||
* Spinlock protecting the tables - not taken during lookup:
|
||||
*/
|
||||
static DEFINE_SPINLOCK(table_lock);
|
||||
static DEFINE_RAW_SPINLOCK(table_lock);
|
||||
|
||||
/*
|
||||
* Per-CPU lookup locks for fast hash lookup:
|
||||
|
@ -188,7 +188,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
|
|||
prev = NULL;
|
||||
curr = *head;
|
||||
|
||||
spin_lock(&table_lock);
|
||||
raw_spin_lock(&table_lock);
|
||||
/*
|
||||
* Make sure we have not raced with another CPU:
|
||||
*/
|
||||
|
@ -215,7 +215,7 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
|
|||
*head = curr;
|
||||
}
|
||||
out_unlock:
|
||||
spin_unlock(&table_lock);
|
||||
raw_spin_unlock(&table_lock);
|
||||
|
||||
return curr;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue