From 492dfb489658dfe4a755fa29dd0e34e9c8bd8fb8 Mon Sep 17 00:00:00 2001 From: James Bottomley Date: Wed, 30 Aug 2006 15:48:45 -0400 Subject: [PATCH] [SCSI] block: add support for shared tag maps The current block queue implementation already contains most of the machinery for shared tag maps. The only remaining pieces are a way to allocate and destroy a tag map independently of the queues (so that the maps can be managed on the life cycle of the overseeing entity) Acked-by: Jens Axboe Signed-off-by: James Bottomley --- block/ll_rw_blk.c | 109 +++++++++++++++++++++++++++++++++-------- include/linux/blkdev.h | 2 + 2 files changed, 90 insertions(+), 21 deletions(-) diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index ddd9253f9d55..556a3d354eab 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -847,6 +847,35 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag) EXPORT_SYMBOL(blk_queue_find_tag); +/** + * __blk_free_tags - release a given set of tag maintenance info + * @bqt: the tag map to free + * + * Tries to free the specified @bqt@. Returns true if it was + * actually freed and false if there are still references using it + */ +static int __blk_free_tags(struct blk_queue_tag *bqt) +{ + int retval; + + retval = atomic_dec_and_test(&bqt->refcnt); + if (retval) { + BUG_ON(bqt->busy); + BUG_ON(!list_empty(&bqt->busy_list)); + + kfree(bqt->tag_index); + bqt->tag_index = NULL; + + kfree(bqt->tag_map); + bqt->tag_map = NULL; + + kfree(bqt); + + } + + return retval; +} + /** * __blk_queue_free_tags - release tag maintenance info * @q: the request queue for the device @@ -862,23 +891,28 @@ static void __blk_queue_free_tags(request_queue_t *q) if (!bqt) return; - if (atomic_dec_and_test(&bqt->refcnt)) { - BUG_ON(bqt->busy); - BUG_ON(!list_empty(&bqt->busy_list)); - - kfree(bqt->tag_index); - bqt->tag_index = NULL; - - kfree(bqt->tag_map); - bqt->tag_map = NULL; - - kfree(bqt); - } + __blk_free_tags(bqt); q->queue_tags = NULL; q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); } + +/** + * blk_free_tags - release a given set of tag maintenance info + * @bqt: the tag map to free + * + * For externally managed @bqt@ frees the map. Callers of this + * function must guarantee to have released all the queues that + * might have been using this tag map. + */ +void blk_free_tags(struct blk_queue_tag *bqt) +{ + if (unlikely(!__blk_free_tags(bqt))) + BUG(); +} +EXPORT_SYMBOL(blk_free_tags); + /** * blk_queue_free_tags - release tag maintenance info * @q: the request queue for the device @@ -901,7 +935,7 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) unsigned long *tag_map; int nr_ulongs; - if (depth > q->nr_requests * 2) { + if (q && depth > q->nr_requests * 2) { depth = q->nr_requests * 2; printk(KERN_ERR "%s: adjusted depth to %d\n", __FUNCTION__, depth); @@ -927,6 +961,38 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) return -ENOMEM; } +static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, + int depth) +{ + struct blk_queue_tag *tags; + + tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); + if (!tags) + goto fail; + + if (init_tag_map(q, tags, depth)) + goto fail; + + INIT_LIST_HEAD(&tags->busy_list); + tags->busy = 0; + atomic_set(&tags->refcnt, 1); + return tags; +fail: + kfree(tags); + return NULL; +} + +/** + * blk_init_tags - initialize the tag info for an external tag map + * @depth: the maximum queue depth supported + * @tags: the tag to use + **/ +struct blk_queue_tag *blk_init_tags(int depth) +{ + return __blk_queue_init_tags(NULL, depth); +} +EXPORT_SYMBOL(blk_init_tags); + /** * blk_queue_init_tags - initialize the queue tag info * @q: the request queue for the device @@ -941,16 +1007,10 @@ int blk_queue_init_tags(request_queue_t *q, int depth, BUG_ON(tags && q->queue_tags && tags != q->queue_tags); if (!tags && !q->queue_tags) { - tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); + tags = __blk_queue_init_tags(q, depth); + if (!tags) goto fail; - - if (init_tag_map(q, tags, depth)) - goto fail; - - INIT_LIST_HEAD(&tags->busy_list); - tags->busy = 0; - atomic_set(&tags->refcnt, 1); } else if (q->queue_tags) { if ((rc = blk_queue_resize_tags(q, depth))) return rc; @@ -1001,6 +1061,13 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) return 0; } + /* + * Currently cannot replace a shared tag map with a new + * one, so error out if this is the case + */ + if (atomic_read(&bqt->refcnt) != 1) + return -EBUSY; + /* * save the old state info, so we can copy it back */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index aafe82788b4e..427b0d61be6c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -746,6 +746,8 @@ extern void blk_queue_free_tags(request_queue_t *); extern int blk_queue_resize_tags(request_queue_t *, int); extern void blk_queue_invalidate_tags(request_queue_t *); extern long blk_congestion_wait(int rw, long timeout); +extern struct blk_queue_tag *blk_init_tags(int); +extern void blk_free_tags(struct blk_queue_tag *); extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); extern int blkdev_issue_flush(struct block_device *, sector_t *);