ring-buffer: prevent false positive warning
Impact: eliminate false WARN_ON message If an interrupt goes off after the setting of the local variable tail_page and before incrementing the write index of that page, the interrupt could push the commit forward to the next page. Later a check is made to see if interrupts pushed the buffer around the entire ring buffer by comparing the next page to the last commited page. This can produce a false positive if the interrupt had pushed the commit page forward as stated above. Thanks to Jiaying Zhang for finding this race. Reported-by: Jiaying Zhang <jiayingz@google.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Cc: <stable@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
a8ccf1d6f6
commit
98db8df777
1 changed files with 5 additions and 2 deletions
|
@ -962,12 +962,15 @@ static struct ring_buffer_event *
|
|||
__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
unsigned type, unsigned long length, u64 *ts)
|
||||
{
|
||||
struct buffer_page *tail_page, *head_page, *reader_page;
|
||||
struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
|
||||
unsigned long tail, write;
|
||||
struct ring_buffer *buffer = cpu_buffer->buffer;
|
||||
struct ring_buffer_event *event;
|
||||
unsigned long flags;
|
||||
|
||||
commit_page = cpu_buffer->commit_page;
|
||||
/* we just need to protect against interrupts */
|
||||
barrier();
|
||||
tail_page = cpu_buffer->tail_page;
|
||||
write = local_add_return(length, &tail_page->write);
|
||||
tail = write - length;
|
||||
|
@ -993,7 +996,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
|||
* it all the way around the buffer, bail, and warn
|
||||
* about it.
|
||||
*/
|
||||
if (unlikely(next_page == cpu_buffer->commit_page)) {
|
||||
if (unlikely(next_page == commit_page)) {
|
||||
WARN_ON_ONCE(1);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue