perf: Optimize ring-buffer write by depending on control dependencies
Remove a full barrier from the ring-buffer write path by relying on a control dependency to order a LOAD -> STORE scenario. Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-8alv40z6ikk57jzbaobnxrjl@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
813932149e
commit
c7f2e3cd6c
1 changed files with 26 additions and 16 deletions
|
@ -61,19 +61,20 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
|
|||
*
|
||||
* kernel user
|
||||
*
|
||||
* READ ->data_tail READ ->data_head
|
||||
* smp_mb() (A) smp_rmb() (C)
|
||||
* WRITE $data READ $data
|
||||
* smp_wmb() (B) smp_mb() (D)
|
||||
* STORE ->data_head WRITE ->data_tail
|
||||
* if (LOAD ->data_tail) { LOAD ->data_head
|
||||
* (A) smp_rmb() (C)
|
||||
* STORE $data LOAD $data
|
||||
* smp_wmb() (B) smp_mb() (D)
|
||||
* STORE ->data_head STORE ->data_tail
|
||||
* }
|
||||
*
|
||||
* Where A pairs with D, and B pairs with C.
|
||||
*
|
||||
* I don't think A needs to be a full barrier because we won't in fact
|
||||
* write data until we see the store from userspace. So we simply don't
|
||||
* issue the data WRITE until we observe it. Be conservative for now.
|
||||
* In our case (A) is a control dependency that separates the load of
|
||||
* the ->data_tail and the stores of $data. In case ->data_tail
|
||||
* indicates there is no room in the buffer to store $data we do not.
|
||||
*
|
||||
* OTOH, D needs to be a full barrier since it separates the data READ
|
||||
* D needs to be a full barrier since it separates the data READ
|
||||
* from the tail WRITE.
|
||||
*
|
||||
* For B a WMB is sufficient since it separates two WRITEs, and for C
|
||||
|
@ -81,7 +82,7 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
|
|||
*
|
||||
* See perf_output_begin().
|
||||
*/
|
||||
smp_wmb();
|
||||
smp_wmb(); /* B, matches C */
|
||||
rb->user_page->data_head = head;
|
||||
|
||||
/*
|
||||
|
@ -144,17 +145,26 @@ int perf_output_begin(struct perf_output_handle *handle,
|
|||
if (!rb->overwrite &&
|
||||
unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* The above forms a control dependency barrier separating the
|
||||
* @tail load above from the data stores below. Since the @tail
|
||||
* load is required to compute the branch to fail below.
|
||||
*
|
||||
* A, matches D; the full memory barrier userspace SHOULD issue
|
||||
* after reading the data and before storing the new tail
|
||||
* position.
|
||||
*
|
||||
* See perf_output_put_handle().
|
||||
*/
|
||||
|
||||
head += size;
|
||||
} while (local_cmpxchg(&rb->head, offset, head) != offset);
|
||||
|
||||
/*
|
||||
* Separate the userpage->tail read from the data stores below.
|
||||
* Matches the MB userspace SHOULD issue after reading the data
|
||||
* and before storing the new tail position.
|
||||
*
|
||||
* See perf_output_put_handle().
|
||||
* We rely on the implied barrier() by local_cmpxchg() to ensure
|
||||
* none of the data stores below can be lifted up by the compiler.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
|
||||
local_add(rb->watermark, &rb->wakeup);
|
||||
|
|
Loading…
Reference in a new issue