kernel: optimise seqlock
Add branch annotations for seqlock read fastpath, and introduce __read_seqcount_begin and __read_seqcount_end functions, that can avoid the smp_rmb() if used carefully. These will be used by store-free path walking algorithm performance is critical and seqlocks are in use. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
This commit is contained in:
parent
ff0c7d15f9
commit
3c22cd5709
1 changed files with 73 additions and 7 deletions
|
@ -107,7 +107,7 @@ static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
|
|||
{
|
||||
smp_rmb();
|
||||
|
||||
return (sl->sequence != start);
|
||||
return unlikely(sl->sequence != start);
|
||||
}
|
||||
|
||||
|
||||
|
@ -125,14 +125,25 @@ typedef struct seqcount {
|
|||
#define SEQCNT_ZERO { 0 }
|
||||
#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
|
||||
|
||||
/* Start of read using pointer to a sequence counter only. */
|
||||
static inline unsigned read_seqcount_begin(const seqcount_t *s)
|
||||
/**
|
||||
* __read_seqcount_begin - begin a seq-read critical section (without barrier)
|
||||
* @s: pointer to seqcount_t
|
||||
* Returns: count to be passed to read_seqcount_retry
|
||||
*
|
||||
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
|
||||
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
|
||||
* provided before actually loading any of the variables that are to be
|
||||
* protected in this critical section.
|
||||
*
|
||||
* Use carefully, only in critical code, and comment how the barrier is
|
||||
* provided.
|
||||
*/
|
||||
static inline unsigned __read_seqcount_begin(const seqcount_t *s)
|
||||
{
|
||||
unsigned ret;
|
||||
|
||||
repeat:
|
||||
ret = s->sequence;
|
||||
smp_rmb();
|
||||
if (unlikely(ret & 1)) {
|
||||
cpu_relax();
|
||||
goto repeat;
|
||||
|
@ -140,14 +151,56 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Test if reader processed invalid data because sequence number has changed.
|
||||
/**
|
||||
* read_seqcount_begin - begin a seq-read critical section
|
||||
* @s: pointer to seqcount_t
|
||||
* Returns: count to be passed to read_seqcount_retry
|
||||
*
|
||||
* read_seqcount_begin opens a read critical section of the given seqcount.
|
||||
* Validity of the critical section is tested by checking read_seqcount_retry
|
||||
* function.
|
||||
*/
|
||||
static inline unsigned read_seqcount_begin(const seqcount_t *s)
|
||||
{
|
||||
unsigned ret = __read_seqcount_begin(s);
|
||||
smp_rmb();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* __read_seqcount_retry - end a seq-read critical section (without barrier)
|
||||
* @s: pointer to seqcount_t
|
||||
* @start: count, from read_seqcount_begin
|
||||
* Returns: 1 if retry is required, else 0
|
||||
*
|
||||
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
|
||||
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
|
||||
* provided before actually loading any of the variables that are to be
|
||||
* protected in this critical section.
|
||||
*
|
||||
* Use carefully, only in critical code, and comment how the barrier is
|
||||
* provided.
|
||||
*/
|
||||
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
|
||||
{
|
||||
return unlikely(s->sequence != start);
|
||||
}
|
||||
|
||||
/**
|
||||
* read_seqcount_retry - end a seq-read critical section
|
||||
* @s: pointer to seqcount_t
|
||||
* @start: count, from read_seqcount_begin
|
||||
* Returns: 1 if retry is required, else 0
|
||||
*
|
||||
* read_seqcount_retry closes a read critical section of the given seqcount.
|
||||
* If the critical section was invalid, it must be ignored (and typically
|
||||
* retried).
|
||||
*/
|
||||
static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
|
||||
{
|
||||
smp_rmb();
|
||||
|
||||
return s->sequence != start;
|
||||
return __read_seqcount_retry(s, start);
|
||||
}
|
||||
|
||||
|
||||
|
@ -167,6 +220,19 @@ static inline void write_seqcount_end(seqcount_t *s)
|
|||
s->sequence++;
|
||||
}
|
||||
|
||||
/**
|
||||
* write_seqcount_barrier - invalidate in-progress read-side seq operations
|
||||
* @s: pointer to seqcount_t
|
||||
*
|
||||
* After write_seqcount_barrier, no read-side seq operations will complete
|
||||
* successfully and see data older than this.
|
||||
*/
|
||||
static inline void write_seqcount_barrier(seqcount_t *s)
|
||||
{
|
||||
smp_wmb();
|
||||
s->sequence+=2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Possible sw/hw IRQ protected versions of the interfaces.
|
||||
*/
|
||||
|
|
Loading…
Reference in a new issue