virtio: support unlocked queue poll
This adds a way to check ring empty state after enable_cb outside any locks. Will be used by virtio_net. Note: there's room for more optimization: caller is likely to have a memory barrier already, which means we might be able to get rid of a barrier here. Deferring this optimization until we do some benchmarking. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
01276ed242
commit
cc229884d3
2 changed files with 55 additions and 19 deletions
|
@ -606,6 +606,55 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
|
||||
|
||||
/**
|
||||
* virtqueue_enable_cb_prepare - restart callbacks after disable_cb
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
*
|
||||
* This re-enables callbacks; it returns current queue state
|
||||
* in an opaque unsigned value. This value should be later tested by
|
||||
* virtqueue_poll, to detect a possible race between the driver checking for
|
||||
* more work, and enabling callbacks.
|
||||
*
|
||||
* Caller must ensure we don't call this with other virtqueue
|
||||
* operations at the same time (except where noted).
|
||||
*/
|
||||
unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
u16 last_used_idx;
|
||||
|
||||
START_USE(vq);
|
||||
|
||||
/* We optimistically turn back on interrupts, then check if there was
|
||||
* more to do. */
|
||||
/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
|
||||
* either clear the flags bit or point the event index at the next
|
||||
* entry. Always do both to keep code simple. */
|
||||
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
||||
vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx;
|
||||
END_USE(vq);
|
||||
return last_used_idx;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
|
||||
|
||||
/**
|
||||
* virtqueue_poll - query pending used buffers
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
* @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
|
||||
*
|
||||
* Returns "true" if there are pending used buffers in the queue.
|
||||
*
|
||||
* This does not need to be serialized.
|
||||
*/
|
||||
bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
|
||||
virtio_mb(vq->weak_barriers);
|
||||
return (u16)last_used_idx != vq->vring.used->idx;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_poll);
|
||||
|
||||
/**
|
||||
* virtqueue_enable_cb - restart callbacks after disable_cb.
|
||||
* @vq: the struct virtqueue we're talking about.
|
||||
|
@ -619,25 +668,8 @@ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
|
|||
*/
|
||||
bool virtqueue_enable_cb(struct virtqueue *_vq)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
|
||||
START_USE(vq);
|
||||
|
||||
/* We optimistically turn back on interrupts, then check if there was
|
||||
* more to do. */
|
||||
/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
|
||||
* either clear the flags bit or point the event index at the next
|
||||
* entry. Always do both to keep code simple. */
|
||||
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
|
||||
vring_used_event(&vq->vring) = vq->last_used_idx;
|
||||
virtio_mb(vq->weak_barriers);
|
||||
if (unlikely(more_used(vq))) {
|
||||
END_USE(vq);
|
||||
return false;
|
||||
}
|
||||
|
||||
END_USE(vq);
|
||||
return true;
|
||||
unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
|
||||
return !virtqueue_poll(_vq, last_used_idx);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
|
||||
|
||||
|
|
|
@ -70,6 +70,10 @@ void virtqueue_disable_cb(struct virtqueue *vq);
|
|||
|
||||
bool virtqueue_enable_cb(struct virtqueue *vq);
|
||||
|
||||
unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
|
||||
|
||||
bool virtqueue_poll(struct virtqueue *vq, unsigned);
|
||||
|
||||
bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
|
||||
|
||||
void *virtqueue_detach_unused_buf(struct virtqueue *vq);
|
||||
|
|
Loading…
Reference in a new issue