Merge branch 'virtio' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* 'virtio' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: (27 commits) drivers/char: Eliminate use after free virtio: console: Accept console size along with resize control message virtio: console: Store each console's size in the console structure virtio: console: Resize console port 0 on config intr only if multiport is off virtio: console: Add support for nonblocking write()s virtio: console: Rename wait_is_over() to will_read_block() virtio: console: Don't always create a port 0 if using multiport virtio: console: Use a control message to add ports virtio: console: Move code around for future patches virtio: console: Remove config work handler virtio: console: Don't call hvc_remove() on unplugging console ports virtio: console: Return -EPIPE to hvc_console if we lost the connection virtio: console: Let host know of port or device add failures virtio: console: Add a __send_control_msg() that can send messages without a valid port virtio: Revert "virtio: disable multiport console support." virtio: add_buf_gfp trans_virtio: use virtqueue_xxx wrappers virtio-rng: use virtqueue_xxx wrappers virtio_ring: remove a level of indirection virtio_net: use virtqueue_xxx wrappers ... Fix up conflicts in drivers/net/virtio_net.c due to new virtqueue_xxx wrappers changes conflicting with some other cleanups.
This commit is contained in:
commit
1756ac3d3c
10 changed files with 542 additions and 410 deletions
|
@ -50,7 +50,7 @@ static void blk_done(struct virtqueue *vq)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vblk->lock, flags);
|
||||
while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
|
||||
while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
|
||||
int error;
|
||||
|
||||
switch (vbr->status) {
|
||||
|
@ -70,6 +70,8 @@ static void blk_done(struct virtqueue *vq)
|
|||
vbr->req->sense_len = vbr->in_hdr.sense_len;
|
||||
vbr->req->errors = vbr->in_hdr.errors;
|
||||
}
|
||||
if (blk_special_request(vbr->req))
|
||||
vbr->req->errors = (error != 0);
|
||||
|
||||
__blk_end_request_all(vbr->req, error);
|
||||
list_del(&vbr->list);
|
||||
|
@ -103,6 +105,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
|
|||
vbr->out_hdr.sector = 0;
|
||||
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
|
||||
break;
|
||||
case REQ_TYPE_SPECIAL:
|
||||
vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
|
||||
vbr->out_hdr.sector = 0;
|
||||
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
|
||||
break;
|
||||
case REQ_TYPE_LINUX_BLOCK:
|
||||
if (req->cmd[0] == REQ_LB_OP_FLUSH) {
|
||||
vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
|
||||
|
@ -151,7 +158,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
|
|||
}
|
||||
}
|
||||
|
||||
if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
|
||||
if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
|
||||
mempool_free(vbr, vblk->pool);
|
||||
return false;
|
||||
}
|
||||
|
@ -180,7 +187,7 @@ static void do_virtblk_request(struct request_queue *q)
|
|||
}
|
||||
|
||||
if (issued)
|
||||
vblk->vq->vq_ops->kick(vblk->vq);
|
||||
virtqueue_kick(vblk->vq);
|
||||
}
|
||||
|
||||
static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
|
||||
|
@ -189,12 +196,45 @@ static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
|
|||
req->cmd[0] = REQ_LB_OP_FLUSH;
|
||||
}
|
||||
|
||||
/* return id (s/n) string for *disk to *id_str
|
||||
*/
|
||||
static int virtblk_get_id(struct gendisk *disk, char *id_str)
|
||||
{
|
||||
struct virtio_blk *vblk = disk->private_data;
|
||||
struct request *req;
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
|
||||
GFP_KERNEL);
|
||||
if (IS_ERR(bio))
|
||||
return PTR_ERR(bio);
|
||||
|
||||
req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
|
||||
if (IS_ERR(req)) {
|
||||
bio_put(bio);
|
||||
return PTR_ERR(req);
|
||||
}
|
||||
|
||||
req->cmd_type = REQ_TYPE_SPECIAL;
|
||||
return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
|
||||
}
|
||||
|
||||
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned cmd, unsigned long data)
|
||||
{
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
struct virtio_blk *vblk = disk->private_data;
|
||||
|
||||
if (cmd == 0x56424944) { /* 'VBID' */
|
||||
void __user *usr_data = (void __user *)data;
|
||||
char id_str[VIRTIO_BLK_ID_BYTES];
|
||||
int err;
|
||||
|
||||
err = virtblk_get_id(disk, id_str);
|
||||
if (!err && copy_to_user(usr_data, id_str, VIRTIO_BLK_ID_BYTES))
|
||||
err = -EFAULT;
|
||||
return err;
|
||||
}
|
||||
/*
|
||||
* Only allow the generic SCSI ioctls if the host can support it.
|
||||
*/
|
||||
|
|
|
@ -32,7 +32,7 @@ static bool busy;
|
|||
static void random_recv_done(struct virtqueue *vq)
|
||||
{
|
||||
/* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */
|
||||
if (!vq->vq_ops->get_buf(vq, &data_avail))
|
||||
if (!virtqueue_get_buf(vq, &data_avail))
|
||||
return;
|
||||
|
||||
complete(&have_data);
|
||||
|
@ -46,10 +46,10 @@ static void register_buffer(u8 *buf, size_t size)
|
|||
sg_init_one(&sg, buf, size);
|
||||
|
||||
/* There should always be room for one buffer. */
|
||||
if (vq->vq_ops->add_buf(vq, &sg, 0, 1, buf) < 0)
|
||||
if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0)
|
||||
BUG();
|
||||
|
||||
vq->vq_ops->kick(vq);
|
||||
virtqueue_kick(vq);
|
||||
}
|
||||
|
||||
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -122,7 +122,7 @@ static void skb_xmit_done(struct virtqueue *svq)
|
|||
struct virtnet_info *vi = svq->vdev->priv;
|
||||
|
||||
/* Suppress further interrupts. */
|
||||
svq->vq_ops->disable_cb(svq);
|
||||
virtqueue_disable_cb(svq);
|
||||
|
||||
/* We were probably waiting for more output buffers. */
|
||||
netif_wake_queue(vi->dev);
|
||||
|
@ -210,7 +210,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
|
||||
page = virtqueue_get_buf(vi->rvq, &len);
|
||||
if (!page) {
|
||||
pr_debug("%s: rx error: %d buffers missing\n",
|
||||
skb->dev->name, hdr->mhdr.num_buffers);
|
||||
|
@ -340,7 +340,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
|
|||
|
||||
skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
|
||||
|
||||
err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 2, skb);
|
||||
err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb);
|
||||
if (err < 0)
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
|
@ -385,7 +385,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
|
|||
|
||||
/* chain first in list head */
|
||||
first->private = (unsigned long)list;
|
||||
err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
|
||||
err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
|
||||
first);
|
||||
if (err < 0)
|
||||
give_pages(vi, first);
|
||||
|
@ -404,7 +404,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
|
|||
|
||||
sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
|
||||
|
||||
err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 1, page);
|
||||
err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page);
|
||||
if (err < 0)
|
||||
give_pages(vi, page);
|
||||
|
||||
|
@ -433,7 +433,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
|
|||
} while (err > 0);
|
||||
if (unlikely(vi->num > vi->max))
|
||||
vi->max = vi->num;
|
||||
vi->rvq->vq_ops->kick(vi->rvq);
|
||||
virtqueue_kick(vi->rvq);
|
||||
return !oom;
|
||||
}
|
||||
|
||||
|
@ -442,7 +442,7 @@ static void skb_recv_done(struct virtqueue *rvq)
|
|||
struct virtnet_info *vi = rvq->vdev->priv;
|
||||
/* Schedule NAPI, Suppress further interrupts if successful. */
|
||||
if (napi_schedule_prep(&vi->napi)) {
|
||||
rvq->vq_ops->disable_cb(rvq);
|
||||
virtqueue_disable_cb(rvq);
|
||||
__napi_schedule(&vi->napi);
|
||||
}
|
||||
}
|
||||
|
@ -471,7 +471,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
|
|||
|
||||
again:
|
||||
while (received < budget &&
|
||||
(buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
|
||||
(buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
|
||||
receive_buf(vi->dev, buf, len);
|
||||
--vi->num;
|
||||
received++;
|
||||
|
@ -485,9 +485,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
|
|||
/* Out of packets? */
|
||||
if (received < budget) {
|
||||
napi_complete(napi);
|
||||
if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) &&
|
||||
if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
|
||||
napi_schedule_prep(napi)) {
|
||||
vi->rvq->vq_ops->disable_cb(vi->rvq);
|
||||
virtqueue_disable_cb(vi->rvq);
|
||||
__napi_schedule(napi);
|
||||
goto again;
|
||||
}
|
||||
|
@ -501,7 +501,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
|
|||
struct sk_buff *skb;
|
||||
unsigned int len, tot_sgs = 0;
|
||||
|
||||
while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
|
||||
while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
|
||||
pr_debug("Sent skb %p\n", skb);
|
||||
vi->dev->stats.tx_bytes += skb->len;
|
||||
vi->dev->stats.tx_packets++;
|
||||
|
@ -554,7 +554,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
|
|||
sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
|
||||
|
||||
hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
|
||||
return vi->svq->vq_ops->add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
|
||||
return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
|
||||
0, skb);
|
||||
}
|
||||
|
||||
|
@ -574,14 +574,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (unlikely(capacity < 0)) {
|
||||
netif_stop_queue(dev);
|
||||
dev_warn(&dev->dev, "Unexpected full queue\n");
|
||||
if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
|
||||
vi->svq->vq_ops->disable_cb(vi->svq);
|
||||
if (unlikely(!virtqueue_enable_cb(vi->svq))) {
|
||||
virtqueue_disable_cb(vi->svq);
|
||||
netif_start_queue(dev);
|
||||
goto again;
|
||||
}
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
vi->svq->vq_ops->kick(vi->svq);
|
||||
virtqueue_kick(vi->svq);
|
||||
|
||||
/* Don't wait up for transmitted skbs to be freed. */
|
||||
skb_orphan(skb);
|
||||
|
@ -591,12 +591,12 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
* before it gets out of hand. Naturally, this wastes entries. */
|
||||
if (capacity < 2+MAX_SKB_FRAGS) {
|
||||
netif_stop_queue(dev);
|
||||
if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
|
||||
if (unlikely(!virtqueue_enable_cb(vi->svq))) {
|
||||
/* More just got used, free them then recheck. */
|
||||
capacity += free_old_xmit_skbs(vi);
|
||||
if (capacity >= 2+MAX_SKB_FRAGS) {
|
||||
netif_start_queue(dev);
|
||||
vi->svq->vq_ops->disable_cb(vi->svq);
|
||||
virtqueue_disable_cb(vi->svq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -641,7 +641,7 @@ static int virtnet_open(struct net_device *dev)
|
|||
* now. virtnet_poll wants re-enable the queue, so we disable here.
|
||||
* We synchronize against interrupts via NAPI_STATE_SCHED */
|
||||
if (napi_schedule_prep(&vi->napi)) {
|
||||
vi->rvq->vq_ops->disable_cb(vi->rvq);
|
||||
virtqueue_disable_cb(vi->rvq);
|
||||
__napi_schedule(&vi->napi);
|
||||
}
|
||||
return 0;
|
||||
|
@ -678,15 +678,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
|
|||
sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
|
||||
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
|
||||
|
||||
BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
|
||||
BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
|
||||
|
||||
vi->cvq->vq_ops->kick(vi->cvq);
|
||||
virtqueue_kick(vi->cvq);
|
||||
|
||||
/*
|
||||
* Spin for a response, the kick causes an ioport write, trapping
|
||||
* into the hypervisor, so the request should be handled immediately.
|
||||
*/
|
||||
while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
|
||||
while (!virtqueue_get_buf(vi->cvq, &tmp))
|
||||
cpu_relax();
|
||||
|
||||
return status == VIRTIO_NET_OK;
|
||||
|
@ -1003,13 +1003,13 @@ static void free_unused_bufs(struct virtnet_info *vi)
|
|||
{
|
||||
void *buf;
|
||||
while (1) {
|
||||
buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
|
||||
buf = virtqueue_detach_unused_buf(vi->svq);
|
||||
if (!buf)
|
||||
break;
|
||||
dev_kfree_skb(buf);
|
||||
}
|
||||
while (1) {
|
||||
buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
|
||||
buf = virtqueue_detach_unused_buf(vi->rvq);
|
||||
if (!buf)
|
||||
break;
|
||||
if (vi->mergeable_rx_bufs || vi->big_packets)
|
||||
|
|
|
@ -75,7 +75,7 @@ static void balloon_ack(struct virtqueue *vq)
|
|||
struct virtio_balloon *vb;
|
||||
unsigned int len;
|
||||
|
||||
vb = vq->vq_ops->get_buf(vq, &len);
|
||||
vb = virtqueue_get_buf(vq, &len);
|
||||
if (vb)
|
||||
complete(&vb->acked);
|
||||
}
|
||||
|
@ -89,9 +89,9 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
|
|||
init_completion(&vb->acked);
|
||||
|
||||
/* We should always be able to add one buffer to an empty queue. */
|
||||
if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
|
||||
if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
|
||||
BUG();
|
||||
vq->vq_ops->kick(vq);
|
||||
virtqueue_kick(vq);
|
||||
|
||||
/* When host has read buffer, this completes via balloon_ack */
|
||||
wait_for_completion(&vb->acked);
|
||||
|
@ -204,7 +204,7 @@ static void stats_request(struct virtqueue *vq)
|
|||
struct virtio_balloon *vb;
|
||||
unsigned int len;
|
||||
|
||||
vb = vq->vq_ops->get_buf(vq, &len);
|
||||
vb = virtqueue_get_buf(vq, &len);
|
||||
if (!vb)
|
||||
return;
|
||||
vb->need_stats_update = 1;
|
||||
|
@ -221,9 +221,9 @@ static void stats_handle_request(struct virtio_balloon *vb)
|
|||
|
||||
vq = vb->stats_vq;
|
||||
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
|
||||
if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
|
||||
if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
|
||||
BUG();
|
||||
vq->vq_ops->kick(vq);
|
||||
virtqueue_kick(vq);
|
||||
}
|
||||
|
||||
static void virtballoon_changed(struct virtio_device *vdev)
|
||||
|
@ -314,10 +314,9 @@ static int virtballoon_probe(struct virtio_device *vdev)
|
|||
* use it to signal us later.
|
||||
*/
|
||||
sg_init_one(&sg, vb->stats, sizeof vb->stats);
|
||||
if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq,
|
||||
&sg, 1, 0, vb) < 0)
|
||||
if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0)
|
||||
BUG();
|
||||
vb->stats_vq->vq_ops->kick(vb->stats_vq);
|
||||
virtqueue_kick(vb->stats_vq);
|
||||
}
|
||||
|
||||
vb->thread = kthread_run(balloon, vb, "vballoon");
|
||||
|
|
|
@ -110,13 +110,14 @@ struct vring_virtqueue
|
|||
static int vring_add_indirect(struct vring_virtqueue *vq,
|
||||
struct scatterlist sg[],
|
||||
unsigned int out,
|
||||
unsigned int in)
|
||||
unsigned int in,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct vring_desc *desc;
|
||||
unsigned head;
|
||||
int i;
|
||||
|
||||
desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC);
|
||||
desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
|
||||
if (!desc)
|
||||
return vq->vring.num;
|
||||
|
||||
|
@ -155,11 +156,12 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
|
|||
return head;
|
||||
}
|
||||
|
||||
static int vring_add_buf(struct virtqueue *_vq,
|
||||
struct scatterlist sg[],
|
||||
unsigned int out,
|
||||
unsigned int in,
|
||||
void *data)
|
||||
int virtqueue_add_buf_gfp(struct virtqueue *_vq,
|
||||
struct scatterlist sg[],
|
||||
unsigned int out,
|
||||
unsigned int in,
|
||||
void *data,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
unsigned int i, avail, head, uninitialized_var(prev);
|
||||
|
@ -171,7 +173,7 @@ static int vring_add_buf(struct virtqueue *_vq,
|
|||
/* If the host supports indirect descriptor tables, and we have multiple
|
||||
* buffers, then go indirect. FIXME: tune this threshold */
|
||||
if (vq->indirect && (out + in) > 1 && vq->num_free) {
|
||||
head = vring_add_indirect(vq, sg, out, in);
|
||||
head = vring_add_indirect(vq, sg, out, in, gfp);
|
||||
if (head != vq->vring.num)
|
||||
goto add_head;
|
||||
}
|
||||
|
@ -232,8 +234,9 @@ static int vring_add_buf(struct virtqueue *_vq,
|
|||
return vq->num_free ? vq->vring.num : 0;
|
||||
return vq->num_free;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
|
||||
|
||||
static void vring_kick(struct virtqueue *_vq)
|
||||
void virtqueue_kick(struct virtqueue *_vq)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
START_USE(vq);
|
||||
|
@ -253,6 +256,7 @@ static void vring_kick(struct virtqueue *_vq)
|
|||
|
||||
END_USE(vq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_kick);
|
||||
|
||||
static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
|
||||
{
|
||||
|
@ -284,7 +288,7 @@ static inline bool more_used(const struct vring_virtqueue *vq)
|
|||
return vq->last_used_idx != vq->vring.used->idx;
|
||||
}
|
||||
|
||||
static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
|
||||
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
void *ret;
|
||||
|
@ -325,15 +329,17 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
|
|||
END_USE(vq);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_get_buf);
|
||||
|
||||
static void vring_disable_cb(struct virtqueue *_vq)
|
||||
void virtqueue_disable_cb(struct virtqueue *_vq)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
|
||||
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
|
||||
|
||||
static bool vring_enable_cb(struct virtqueue *_vq)
|
||||
bool virtqueue_enable_cb(struct virtqueue *_vq)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
|
||||
|
@ -351,8 +357,9 @@ static bool vring_enable_cb(struct virtqueue *_vq)
|
|||
END_USE(vq);
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
|
||||
|
||||
static void *vring_detach_unused_buf(struct virtqueue *_vq)
|
||||
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
|
||||
{
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
unsigned int i;
|
||||
|
@ -375,6 +382,7 @@ static void *vring_detach_unused_buf(struct virtqueue *_vq)
|
|||
END_USE(vq);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
|
||||
|
||||
irqreturn_t vring_interrupt(int irq, void *_vq)
|
||||
{
|
||||
|
@ -396,15 +404,6 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(vring_interrupt);
|
||||
|
||||
static struct virtqueue_ops vring_vq_ops = {
|
||||
.add_buf = vring_add_buf,
|
||||
.get_buf = vring_get_buf,
|
||||
.kick = vring_kick,
|
||||
.disable_cb = vring_disable_cb,
|
||||
.enable_cb = vring_enable_cb,
|
||||
.detach_unused_buf = vring_detach_unused_buf,
|
||||
};
|
||||
|
||||
struct virtqueue *vring_new_virtqueue(unsigned int num,
|
||||
unsigned int vring_align,
|
||||
struct virtio_device *vdev,
|
||||
|
@ -429,7 +428,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
|
|||
vring_init(&vq->vring, num, pages, vring_align);
|
||||
vq->vq.callback = callback;
|
||||
vq->vq.vdev = vdev;
|
||||
vq->vq.vq_ops = &vring_vq_ops;
|
||||
vq->vq.name = name;
|
||||
vq->notify = notify;
|
||||
vq->broken = false;
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
/**
|
||||
* virtqueue - a queue to register buffers for sending or receiving.
|
||||
|
@ -14,7 +15,6 @@
|
|||
* @callback: the function to call when buffers are consumed (can be NULL).
|
||||
* @name: the name of this virtqueue (mainly for debugging)
|
||||
* @vdev: the virtio device this queue was created for.
|
||||
* @vq_ops: the operations for this virtqueue (see below).
|
||||
* @priv: a pointer for the virtqueue implementation to use.
|
||||
*/
|
||||
struct virtqueue {
|
||||
|
@ -22,60 +22,71 @@ struct virtqueue {
|
|||
void (*callback)(struct virtqueue *vq);
|
||||
const char *name;
|
||||
struct virtio_device *vdev;
|
||||
struct virtqueue_ops *vq_ops;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
/**
|
||||
* virtqueue_ops - operations for virtqueue abstraction layer
|
||||
* @add_buf: expose buffer to other end
|
||||
* operations for virtqueue
|
||||
* virtqueue_add_buf: expose buffer to other end
|
||||
* vq: the struct virtqueue we're talking about.
|
||||
* sg: the description of the buffer(s).
|
||||
* out_num: the number of sg readable by other side
|
||||
* in_num: the number of sg which are writable (after readable ones)
|
||||
* data: the token identifying the buffer.
|
||||
* gfp: how to do memory allocations (if necessary).
|
||||
* Returns remaining capacity of queue (sg segments) or a negative error.
|
||||
* @kick: update after add_buf
|
||||
* virtqueue_kick: update after add_buf
|
||||
* vq: the struct virtqueue
|
||||
* After one or more add_buf calls, invoke this to kick the other side.
|
||||
* @get_buf: get the next used buffer
|
||||
* virtqueue_get_buf: get the next used buffer
|
||||
* vq: the struct virtqueue we're talking about.
|
||||
* len: the length written into the buffer
|
||||
* Returns NULL or the "data" token handed to add_buf.
|
||||
* @disable_cb: disable callbacks
|
||||
* virtqueue_disable_cb: disable callbacks
|
||||
* vq: the struct virtqueue we're talking about.
|
||||
* Note that this is not necessarily synchronous, hence unreliable and only
|
||||
* useful as an optimization.
|
||||
* @enable_cb: restart callbacks after disable_cb.
|
||||
* virtqueue_enable_cb: restart callbacks after disable_cb.
|
||||
* vq: the struct virtqueue we're talking about.
|
||||
* This re-enables callbacks; it returns "false" if there are pending
|
||||
* buffers in the queue, to detect a possible race between the driver
|
||||
* checking for more work, and enabling callbacks.
|
||||
* @detach_unused_buf: detach first unused buffer
|
||||
* virtqueue_detach_unused_buf: detach first unused buffer
|
||||
* vq: the struct virtqueue we're talking about.
|
||||
* Returns NULL or the "data" token handed to add_buf
|
||||
*
|
||||
* Locking rules are straightforward: the driver is responsible for
|
||||
* locking. No two operations may be invoked simultaneously, with the exception
|
||||
* of @disable_cb.
|
||||
* of virtqueue_disable_cb.
|
||||
*
|
||||
* All operations can be called in any context.
|
||||
*/
|
||||
struct virtqueue_ops {
|
||||
int (*add_buf)(struct virtqueue *vq,
|
||||
struct scatterlist sg[],
|
||||
unsigned int out_num,
|
||||
unsigned int in_num,
|
||||
void *data);
|
||||
|
||||
void (*kick)(struct virtqueue *vq);
|
||||
int virtqueue_add_buf_gfp(struct virtqueue *vq,
|
||||
struct scatterlist sg[],
|
||||
unsigned int out_num,
|
||||
unsigned int in_num,
|
||||
void *data,
|
||||
gfp_t gfp);
|
||||
|
||||
void *(*get_buf)(struct virtqueue *vq, unsigned int *len);
|
||||
static inline int virtqueue_add_buf(struct virtqueue *vq,
|
||||
struct scatterlist sg[],
|
||||
unsigned int out_num,
|
||||
unsigned int in_num,
|
||||
void *data)
|
||||
{
|
||||
return virtqueue_add_buf_gfp(vq, sg, out_num, in_num, data, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
void (*disable_cb)(struct virtqueue *vq);
|
||||
bool (*enable_cb)(struct virtqueue *vq);
|
||||
void *(*detach_unused_buf)(struct virtqueue *vq);
|
||||
};
|
||||
void virtqueue_kick(struct virtqueue *vq);
|
||||
|
||||
void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
|
||||
|
||||
void virtqueue_disable_cb(struct virtqueue *vq);
|
||||
|
||||
bool virtqueue_enable_cb(struct virtqueue *vq);
|
||||
|
||||
void *virtqueue_detach_unused_buf(struct virtqueue *vq);
|
||||
|
||||
/**
|
||||
* virtio_device - representation of a device using virtio
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
#define VIRTIO_BLK_F_FLUSH 9 /* Cache flush command support */
|
||||
#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */
|
||||
|
||||
#define VIRTIO_BLK_ID_BYTES 20 /* ID string length */
|
||||
|
||||
struct virtio_blk_config {
|
||||
/* The capacity (in 512-byte sectors). */
|
||||
__u64 capacity;
|
||||
|
@ -67,6 +69,9 @@ struct virtio_blk_config {
|
|||
/* Cache flush command */
|
||||
#define VIRTIO_BLK_T_FLUSH 4
|
||||
|
||||
/* Get device ID command */
|
||||
#define VIRTIO_BLK_T_GET_ID 8
|
||||
|
||||
/* Barrier before this op. */
|
||||
#define VIRTIO_BLK_T_BARRIER 0x80000000
|
||||
|
||||
|
|
|
@ -12,14 +12,39 @@
|
|||
|
||||
/* Feature bits */
|
||||
#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
|
||||
#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
|
||||
|
||||
#define VIRTIO_CONSOLE_BAD_ID (~(u32)0)
|
||||
|
||||
struct virtio_console_config {
|
||||
/* colums of the screens */
|
||||
__u16 cols;
|
||||
/* rows of the screens */
|
||||
__u16 rows;
|
||||
/* max. number of ports this device can hold */
|
||||
__u32 max_nr_ports;
|
||||
} __attribute__((packed));
|
||||
|
||||
/*
|
||||
* A message that's passed between the Host and the Guest for a
|
||||
* particular port.
|
||||
*/
|
||||
struct virtio_console_control {
|
||||
__u32 id; /* Port number */
|
||||
__u16 event; /* The kind of control event (see below) */
|
||||
__u16 value; /* Extra information for the key */
|
||||
};
|
||||
|
||||
/* Some events for control messages */
|
||||
#define VIRTIO_CONSOLE_DEVICE_READY 0
|
||||
#define VIRTIO_CONSOLE_PORT_ADD 1
|
||||
#define VIRTIO_CONSOLE_PORT_REMOVE 2
|
||||
#define VIRTIO_CONSOLE_PORT_READY 3
|
||||
#define VIRTIO_CONSOLE_CONSOLE_PORT 4
|
||||
#define VIRTIO_CONSOLE_RESIZE 5
|
||||
#define VIRTIO_CONSOLE_PORT_OPEN 6
|
||||
#define VIRTIO_CONSOLE_PORT_NAME 7
|
||||
|
||||
#ifdef __KERNEL__
|
||||
int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -137,7 +137,7 @@ static void req_done(struct virtqueue *vq)
|
|||
|
||||
P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
|
||||
|
||||
while ((rc = chan->vq->vq_ops->get_buf(chan->vq, &len)) != NULL) {
|
||||
while ((rc = virtqueue_get_buf(chan->vq, &len)) != NULL) {
|
||||
P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
|
||||
P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
|
||||
req = p9_tag_lookup(chan->client, rc->tag);
|
||||
|
@ -209,13 +209,13 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
|
|||
|
||||
req->status = REQ_STATUS_SENT;
|
||||
|
||||
if (chan->vq->vq_ops->add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
|
||||
if (virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc) < 0) {
|
||||
P9_DPRINTK(P9_DEBUG_TRANS,
|
||||
"9p debug: virtio rpc add_buf returned failure");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
chan->vq->vq_ops->kick(chan->vq);
|
||||
virtqueue_kick(chan->vq);
|
||||
|
||||
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request kicked\n");
|
||||
return 0;
|
||||
|
|
Loading…
Reference in a new issue