ftrace: fix locking
we can hold all cpu trace buffer locks at once - put each into a separate lock class. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
b3806b4316
commit
d4c5a2f587
2 changed files with 4 additions and 5 deletions
|
@ -1865,11 +1865,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
|||
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
for_each_cpu_mask(cpu, mask) {
|
||||
data = iter->tr->data[cpu];
|
||||
|
||||
if (!cpu_isset(cpu, mask))
|
||||
continue;
|
||||
spin_unlock(&data->lock);
|
||||
atomic_dec(&data->disabled);
|
||||
}
|
||||
|
@ -2076,6 +2073,7 @@ static int trace_alloc_page(void)
|
|||
for_each_possible_cpu(i) {
|
||||
data = global_trace.data[i];
|
||||
spin_lock_init(&data->lock);
|
||||
lockdep_set_class(&data->lock, &data->lock_key);
|
||||
page = list_entry(pages.next, struct page, lru);
|
||||
list_del_init(&page->lru);
|
||||
list_add_tail(&page->lru, &data->trace_pages);
|
||||
|
@ -2084,6 +2082,7 @@ static int trace_alloc_page(void)
|
|||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
data = max_tr.data[i];
|
||||
spin_lock_init(&data->lock);
|
||||
lockdep_set_class(&data->lock, &data->lock_key);
|
||||
page = list_entry(pages.next, struct page, lru);
|
||||
list_del_init(&page->lru);
|
||||
list_add_tail(&page->lru, &data->trace_pages);
|
||||
|
@ -2203,5 +2202,4 @@ __init static int tracer_alloc_buffers(void)
|
|||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
fs_initcall(tracer_alloc_buffers);
|
||||
|
|
|
@ -56,6 +56,7 @@ struct trace_array_cpu {
|
|||
struct list_head trace_pages;
|
||||
atomic_t disabled;
|
||||
spinlock_t lock;
|
||||
struct lock_class_key lock_key;
|
||||
cycle_t time_offset;
|
||||
|
||||
/* these fields get copied into max-trace: */
|
||||
|
|
Loading…
Reference in a new issue