block: change ->make_request_fn() and users to return a queue cookie
No functional changes in this patch, but it prepares us for returning a more useful cookie related to the IO that was queued up. Signed-off-by: Jens Axboe <axboe@fb.com> Acked-by: Christoph Hellwig <hch@lst.de> Acked-by: Keith Busch <keith.busch@intel.com>
This commit is contained in:
parent
8e483ed134
commit
dece16353e
28 changed files with 127 additions and 71 deletions
|
@ -59,7 +59,7 @@ struct nfhd_device {
|
|||
struct gendisk *disk;
|
||||
};
|
||||
|
||||
static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
|
||||
static blk_qc_t nfhd_make_request(struct request_queue *queue, struct bio *bio)
|
||||
{
|
||||
struct nfhd_device *dev = queue->queuedata;
|
||||
struct bio_vec bvec;
|
||||
|
@ -77,6 +77,7 @@ static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
|
|||
sec += len;
|
||||
}
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
|
|
|
@ -103,7 +103,7 @@ axon_ram_irq_handler(int irq, void *dev)
|
|||
* axon_ram_make_request - make_request() method for block device
|
||||
* @queue, @bio: see blk_queue_make_request()
|
||||
*/
|
||||
static void
|
||||
static blk_qc_t
|
||||
axon_ram_make_request(struct request_queue *queue, struct bio *bio)
|
||||
{
|
||||
struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
|
||||
|
@ -120,7 +120,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
|
|||
bio_for_each_segment(vec, bio, iter) {
|
||||
if (unlikely(phys_mem + vec.bv_len > phys_end)) {
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
user_mem = page_address(vec.bv_page) + vec.bv_offset;
|
||||
|
@ -133,6 +133,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
|
|||
transfered += vec.bv_len;
|
||||
}
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -101,7 +101,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
|
|||
spin_unlock(&dev->lock);
|
||||
}
|
||||
|
||||
static void simdisk_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct simdisk *dev = q->queuedata;
|
||||
struct bio_vec bvec;
|
||||
|
@ -119,6 +119,7 @@ static void simdisk_make_request(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int simdisk_open(struct block_device *bdev, fmode_t mode)
|
||||
|
|
|
@ -809,7 +809,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_init_queue_node);
|
||||
|
||||
static void blk_queue_bio(struct request_queue *q, struct bio *bio);
|
||||
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
|
||||
|
||||
struct request_queue *
|
||||
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
|
||||
|
@ -1678,7 +1678,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
|||
blk_rq_bio_prep(req->q, req, bio);
|
||||
}
|
||||
|
||||
static void blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
const bool sync = !!(bio->bi_rw & REQ_SYNC);
|
||||
struct blk_plug *plug;
|
||||
|
@ -1698,7 +1698,7 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
|
|||
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
|
||||
bio->bi_error = -EIO;
|
||||
bio_endio(bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
|
||||
|
@ -1713,7 +1713,7 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
|
|||
*/
|
||||
if (!blk_queue_nomerges(q)) {
|
||||
if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
} else
|
||||
request_count = blk_plug_queued_count(q);
|
||||
|
||||
|
@ -1791,6 +1791,8 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
|
|||
out_unlock:
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1996,12 +1998,13 @@ generic_make_request_checks(struct bio *bio)
|
|||
* a lower device by calling into generic_make_request recursively, which
|
||||
* means the bio should NOT be touched after the call to ->make_request_fn.
|
||||
*/
|
||||
void generic_make_request(struct bio *bio)
|
||||
blk_qc_t generic_make_request(struct bio *bio)
|
||||
{
|
||||
struct bio_list bio_list_on_stack;
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
|
||||
if (!generic_make_request_checks(bio))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We only want one ->make_request_fn to be active at a time, else
|
||||
|
@ -2015,7 +2018,7 @@ void generic_make_request(struct bio *bio)
|
|||
*/
|
||||
if (current->bio_list) {
|
||||
bio_list_add(current->bio_list, bio);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* following loop may be a bit non-obvious, and so deserves some
|
||||
|
@ -2040,7 +2043,7 @@ void generic_make_request(struct bio *bio)
|
|||
|
||||
if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
|
||||
|
||||
q->make_request_fn(q, bio);
|
||||
ret = q->make_request_fn(q, bio);
|
||||
|
||||
blk_queue_exit(q);
|
||||
|
||||
|
@ -2053,6 +2056,9 @@ void generic_make_request(struct bio *bio)
|
|||
}
|
||||
} while (bio);
|
||||
current->bio_list = NULL; /* deactivate */
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(generic_make_request);
|
||||
|
||||
|
@ -2066,7 +2072,7 @@ EXPORT_SYMBOL(generic_make_request);
|
|||
* interfaces; @bio must be presetup and ready for I/O.
|
||||
*
|
||||
*/
|
||||
void submit_bio(int rw, struct bio *bio)
|
||||
blk_qc_t submit_bio(int rw, struct bio *bio)
|
||||
{
|
||||
bio->bi_rw |= rw;
|
||||
|
||||
|
@ -2100,7 +2106,7 @@ void submit_bio(int rw, struct bio *bio)
|
|||
}
|
||||
}
|
||||
|
||||
generic_make_request(bio);
|
||||
return generic_make_request(bio);
|
||||
}
|
||||
EXPORT_SYMBOL(submit_bio);
|
||||
|
||||
|
|
|
@ -1235,7 +1235,7 @@ static int blk_mq_direct_issue_request(struct request *rq)
|
|||
* but will attempt to bypass the hctx queueing if we can go straight to
|
||||
* hardware for SYNC IO.
|
||||
*/
|
||||
static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
const int is_sync = rw_is_sync(bio->bi_rw);
|
||||
const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
|
||||
|
@ -1249,7 +1249,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
blk_queue_split(q, &bio, q->bio_split);
|
||||
|
@ -1257,13 +1257,13 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
if (!is_flush_fua && !blk_queue_nomerges(q)) {
|
||||
if (blk_attempt_plug_merge(q, bio, &request_count,
|
||||
&same_queue_rq))
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
} else
|
||||
request_count = blk_plug_queued_count(q);
|
||||
|
||||
rq = blk_mq_map_request(q, bio, &data);
|
||||
if (unlikely(!rq))
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
|
||||
if (unlikely(is_flush_fua)) {
|
||||
blk_mq_bio_to_request(rq, bio);
|
||||
|
@ -1302,11 +1302,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
old_rq = rq;
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
if (!old_rq)
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
if (!blk_mq_direct_issue_request(old_rq))
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
blk_mq_insert_request(old_rq, false, true, true);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
|
||||
|
@ -1320,13 +1320,14 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
|
||||
}
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Single hardware queue variant. This will attempt to use any per-process
|
||||
* plug for merging and IO deferral.
|
||||
*/
|
||||
static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
const int is_sync = rw_is_sync(bio->bi_rw);
|
||||
const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
|
||||
|
@ -1339,18 +1340,18 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
blk_queue_split(q, &bio, q->bio_split);
|
||||
|
||||
if (!is_flush_fua && !blk_queue_nomerges(q) &&
|
||||
blk_attempt_plug_merge(q, bio, &request_count, NULL))
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
|
||||
rq = blk_mq_map_request(q, bio, &data);
|
||||
if (unlikely(!rq))
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
|
||||
if (unlikely(is_flush_fua)) {
|
||||
blk_mq_bio_to_request(rq, bio);
|
||||
|
@ -1374,7 +1375,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
list_add_tail(&rq->queuelist, &plug->mq_list);
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
|
||||
|
@ -1389,6 +1390,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -323,7 +323,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
|
|||
return err;
|
||||
}
|
||||
|
||||
static void brd_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct block_device *bdev = bio->bi_bdev;
|
||||
struct brd_device *brd = bdev->bd_disk->private_data;
|
||||
|
@ -358,9 +358,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
out:
|
||||
bio_endio(bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
io_error:
|
||||
bio_io_error(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int brd_rw_page(struct block_device *bdev, sector_t sector,
|
||||
|
|
|
@ -1448,7 +1448,7 @@ extern int proc_details;
|
|||
/* drbd_req */
|
||||
extern void do_submit(struct work_struct *ws);
|
||||
extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
|
||||
extern void drbd_make_request(struct request_queue *q, struct bio *bio);
|
||||
extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
|
||||
extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
|
||||
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
|
||||
|
||||
|
|
|
@ -1494,7 +1494,7 @@ void do_submit(struct work_struct *ws)
|
|||
}
|
||||
}
|
||||
|
||||
void drbd_make_request(struct request_queue *q, struct bio *bio)
|
||||
blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct drbd_device *device = (struct drbd_device *) q->queuedata;
|
||||
unsigned long start_jif;
|
||||
|
@ -1510,6 +1510,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
inc_ap_bio(device);
|
||||
__drbd_make_request(device, bio, start_jif);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
void request_timer_fn(unsigned long data)
|
||||
|
|
|
@ -321,7 +321,7 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
|
|||
return &nullb->queues[index];
|
||||
}
|
||||
|
||||
static void null_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct nullb *nullb = q->queuedata;
|
||||
struct nullb_queue *nq = nullb_to_queue(nullb);
|
||||
|
@ -331,6 +331,7 @@ static void null_queue_bio(struct request_queue *q, struct bio *bio)
|
|||
cmd->bio = bio;
|
||||
|
||||
null_handle_cmd(cmd);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int null_rq_prep_fn(struct request_queue *q, struct request *req)
|
||||
|
|
|
@ -2441,7 +2441,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
}
|
||||
|
||||
static void pkt_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct pktcdvd_device *pd;
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
@ -2467,7 +2467,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
|
|||
*/
|
||||
if (bio_data_dir(bio) == READ) {
|
||||
pkt_make_request_read(pd, bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
|
||||
|
@ -2499,13 +2499,12 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
|
|||
pkt_make_request_write(q, split);
|
||||
} while (split != bio);
|
||||
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
end_io:
|
||||
bio_io_error(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void pkt_init_queue(struct pktcdvd_device *pd)
|
||||
{
|
||||
struct request_queue *q = pd->disk->queue;
|
||||
|
|
|
@ -598,7 +598,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
|
|||
return next;
|
||||
}
|
||||
|
||||
static void ps3vram_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct ps3_system_bus_device *dev = q->queuedata;
|
||||
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
|
||||
|
@ -614,11 +614,13 @@ static void ps3vram_make_request(struct request_queue *q, struct bio *bio)
|
|||
spin_unlock_irq(&priv->lock);
|
||||
|
||||
if (busy)
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
|
||||
do {
|
||||
bio = ps3vram_do_bio(dev, bio);
|
||||
} while (bio);
|
||||
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int ps3vram_probe(struct ps3_system_bus_device *dev)
|
||||
|
|
|
@ -145,7 +145,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
|
|||
}
|
||||
}
|
||||
|
||||
static void rsxx_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct rsxx_cardinfo *card = q->queuedata;
|
||||
struct rsxx_bio_meta *bio_meta;
|
||||
|
@ -199,7 +199,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
|
|||
if (st)
|
||||
goto queue_err;
|
||||
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
|
||||
queue_err:
|
||||
kmem_cache_free(bio_meta_pool, bio_meta);
|
||||
|
@ -207,6 +207,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
|
|||
if (st)
|
||||
bio->bi_error = st;
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
/*----------------- Device Setup -------------------*/
|
||||
|
|
|
@ -524,7 +524,7 @@ static int mm_check_plugged(struct cardinfo *card)
|
|||
return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb));
|
||||
}
|
||||
|
||||
static void mm_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct cardinfo *card = q->queuedata;
|
||||
pr_debug("mm_make_request %llu %u\n",
|
||||
|
@ -541,7 +541,7 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
|
|||
activate(card);
|
||||
spin_unlock_irq(&card->lock);
|
||||
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static irqreturn_t mm_interrupt(int irq, void *__card)
|
||||
|
|
|
@ -894,7 +894,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
|
|||
/*
|
||||
* Handler function for all zram I/O requests.
|
||||
*/
|
||||
static void zram_make_request(struct request_queue *queue, struct bio *bio)
|
||||
static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
|
||||
{
|
||||
struct zram *zram = queue->queuedata;
|
||||
|
||||
|
@ -911,11 +911,12 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
|
|||
|
||||
__zram_make_request(zram, bio);
|
||||
zram_meta_put(zram);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
put_zram:
|
||||
zram_meta_put(zram);
|
||||
error:
|
||||
bio_io_error(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static void zram_slot_free_notify(struct block_device *bdev,
|
||||
|
|
|
@ -803,7 +803,7 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
|
|||
return NVM_IO_OK;
|
||||
}
|
||||
|
||||
static void rrpc_make_rq(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t rrpc_make_rq(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct rrpc *rrpc = q->queuedata;
|
||||
struct nvm_rq *rqd;
|
||||
|
@ -811,21 +811,21 @@ static void rrpc_make_rq(struct request_queue *q, struct bio *bio)
|
|||
|
||||
if (bio->bi_rw & REQ_DISCARD) {
|
||||
rrpc_discard(rrpc, bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
rqd = mempool_alloc(rrpc->rq_pool, GFP_KERNEL);
|
||||
if (!rqd) {
|
||||
pr_err_ratelimited("rrpc: not able to queue bio.");
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
memset(rqd, 0, sizeof(struct nvm_rq));
|
||||
|
||||
err = rrpc_submit_io(rrpc, bio, rqd, NVM_IOTYPE_NONE);
|
||||
switch (err) {
|
||||
case NVM_IO_OK:
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
case NVM_IO_ERR:
|
||||
bio_io_error(bio);
|
||||
break;
|
||||
|
@ -841,6 +841,7 @@ static void rrpc_make_rq(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
|
||||
mempool_free(rqd, rrpc->rq_pool);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static void rrpc_requeue(struct work_struct *work)
|
||||
|
|
|
@ -958,7 +958,8 @@ static void cached_dev_nodata(struct closure *cl)
|
|||
|
||||
/* Cached devices - read & write stuff */
|
||||
|
||||
static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t cached_dev_make_request(struct request_queue *q,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct search *s;
|
||||
struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
|
||||
|
@ -997,6 +998,8 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
|
|||
else
|
||||
generic_make_request(bio);
|
||||
}
|
||||
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
|
||||
|
@ -1070,7 +1073,8 @@ static void flash_dev_nodata(struct closure *cl)
|
|||
continue_at(cl, search_free, NULL);
|
||||
}
|
||||
|
||||
static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t flash_dev_make_request(struct request_queue *q,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct search *s;
|
||||
struct closure *cl;
|
||||
|
@ -1093,7 +1097,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
|
|||
continue_at_nobarrier(&s->cl,
|
||||
flash_dev_nodata,
|
||||
bcache_wq);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
} else if (rw) {
|
||||
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
|
||||
&KEY(d->id, bio->bi_iter.bi_sector, 0),
|
||||
|
@ -1109,6 +1113,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
|
||||
continue_at(cl, search_free, NULL);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
|
||||
|
|
|
@ -1755,7 +1755,7 @@ static void __split_and_process_bio(struct mapped_device *md,
|
|||
* The request function that just remaps the bio built up by
|
||||
* dm_merge_bvec.
|
||||
*/
|
||||
static void dm_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
int rw = bio_data_dir(bio);
|
||||
struct mapped_device *md = q->queuedata;
|
||||
|
@ -1774,12 +1774,12 @@ static void dm_make_request(struct request_queue *q, struct bio *bio)
|
|||
queue_io(md, bio);
|
||||
else
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
__split_and_process_bio(md, map, bio);
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
int dm_request_based(struct mapped_device *md)
|
||||
|
|
|
@ -250,7 +250,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
|
|||
* call has finished, the bio has been linked into some internal structure
|
||||
* and so is visible to ->quiesce(), so we don't need the refcount any more.
|
||||
*/
|
||||
static void md_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
const int rw = bio_data_dir(bio);
|
||||
struct mddev *mddev = q->queuedata;
|
||||
|
@ -262,13 +262,13 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
|
|||
if (mddev == NULL || mddev->pers == NULL
|
||||
|| !mddev->ready) {
|
||||
bio_io_error(bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
if (mddev->ro == 1 && unlikely(rw == WRITE)) {
|
||||
if (bio_sectors(bio) != 0)
|
||||
bio->bi_error = -EROFS;
|
||||
bio_endio(bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
smp_rmb(); /* Ensure implications of 'active' are visible */
|
||||
rcu_read_lock();
|
||||
|
@ -302,6 +302,8 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
|
||||
wake_up(&mddev->sb_wait);
|
||||
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
/* mddev_suspend makes sure no new requests are submitted
|
||||
|
|
|
@ -161,7 +161,7 @@ static int nd_blk_do_bvec(struct nd_blk_device *blk_dev,
|
|||
return err;
|
||||
}
|
||||
|
||||
static void nd_blk_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct block_device *bdev = bio->bi_bdev;
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
|
@ -208,6 +208,7 @@ static void nd_blk_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
out:
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int nd_blk_rw_bytes(struct nd_namespace_common *ndns,
|
||||
|
|
|
@ -1150,7 +1150,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void btt_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct bio_integrity_payload *bip = bio_integrity(bio);
|
||||
struct btt *btt = q->queuedata;
|
||||
|
@ -1198,6 +1198,7 @@ static void btt_make_request(struct request_queue *q, struct bio *bio)
|
|||
|
||||
out:
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int btt_rw_page(struct block_device *bdev, sector_t sector,
|
||||
|
|
|
@ -64,7 +64,7 @@ static void pmem_do_bvec(struct pmem_device *pmem, struct page *page,
|
|||
kunmap_atomic(mem);
|
||||
}
|
||||
|
||||
static void pmem_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
bool do_acct;
|
||||
unsigned long start;
|
||||
|
@ -84,6 +84,7 @@ static void pmem_make_request(struct request_queue *q, struct bio *bio)
|
|||
wmb_pmem();
|
||||
|
||||
bio_endio(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
|
||||
|
|
|
@ -27,7 +27,8 @@
|
|||
|
||||
static int dcssblk_open(struct block_device *bdev, fmode_t mode);
|
||||
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
|
||||
static void dcssblk_make_request(struct request_queue *q, struct bio *bio);
|
||||
static blk_qc_t dcssblk_make_request(struct request_queue *q,
|
||||
struct bio *bio);
|
||||
static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
|
||||
void __pmem **kaddr, unsigned long *pfn);
|
||||
|
||||
|
@ -815,7 +816,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
|
|||
up_write(&dcssblk_devices_sem);
|
||||
}
|
||||
|
||||
static void
|
||||
static blk_qc_t
|
||||
dcssblk_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct dcssblk_dev_info *dev_info;
|
||||
|
@ -874,9 +875,10 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
|
|||
bytes_done += bvec.bv_len;
|
||||
}
|
||||
bio_endio(bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
fail:
|
||||
bio_io_error(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static long
|
||||
|
|
|
@ -181,7 +181,7 @@ static unsigned long xpram_highest_page_index(void)
|
|||
/*
|
||||
* Block device make request function.
|
||||
*/
|
||||
static void xpram_make_request(struct request_queue *q, struct bio *bio)
|
||||
static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
|
||||
struct bio_vec bvec;
|
||||
|
@ -223,9 +223,10 @@ static void xpram_make_request(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
}
|
||||
bio_endio(bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
fail:
|
||||
bio_io_error(bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
||||
|
|
|
@ -333,7 +333,7 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
|
|||
return count;
|
||||
}
|
||||
|
||||
static void loop_make_request(struct request_queue *q, struct bio *old_bio)
|
||||
static blk_qc_t loop_make_request(struct request_queue *q, struct bio *old_bio)
|
||||
{
|
||||
struct lloop_device *lo = q->queuedata;
|
||||
int rw = bio_rw(old_bio);
|
||||
|
@ -364,9 +364,10 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
|
|||
goto err;
|
||||
}
|
||||
loop_add_bio(lo, old_bio);
|
||||
return;
|
||||
return BLK_QC_T_NONE;
|
||||
err:
|
||||
bio_io_error(old_bio);
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
|
||||
|
|
|
@ -244,4 +244,28 @@ enum rq_flag_bits {
|
|||
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
|
||||
#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
|
||||
|
||||
typedef unsigned int blk_qc_t;
|
||||
#define BLK_QC_T_NONE -1U
|
||||
#define BLK_QC_T_SHIFT 16
|
||||
|
||||
static inline bool blk_qc_t_valid(blk_qc_t cookie)
|
||||
{
|
||||
return cookie != BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num)
|
||||
{
|
||||
return tag | (queue_num << BLK_QC_T_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
|
||||
{
|
||||
return cookie >> BLK_QC_T_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
|
||||
{
|
||||
return cookie & 0xffff;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_BLK_TYPES_H */
|
||||
|
|
|
@ -209,7 +209,7 @@ static inline unsigned short req_get_ioprio(struct request *req)
|
|||
struct blk_queue_ctx;
|
||||
|
||||
typedef void (request_fn_proc) (struct request_queue *q);
|
||||
typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
|
||||
typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
|
||||
typedef int (prep_rq_fn) (struct request_queue *, struct request *);
|
||||
typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
|
||||
|
||||
|
@ -761,7 +761,7 @@ static inline void rq_flush_dcache_pages(struct request *rq)
|
|||
|
||||
extern int blk_register_queue(struct gendisk *disk);
|
||||
extern void blk_unregister_queue(struct gendisk *disk);
|
||||
extern void generic_make_request(struct bio *bio);
|
||||
extern blk_qc_t generic_make_request(struct bio *bio);
|
||||
extern void blk_rq_init(struct request_queue *q, struct request *rq);
|
||||
extern void blk_put_request(struct request *);
|
||||
extern void __blk_put_request(struct request_queue *, struct request *);
|
||||
|
|
|
@ -2625,7 +2625,7 @@ static inline void remove_inode_hash(struct inode *inode)
|
|||
extern void inode_sb_list_add(struct inode *inode);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
extern void submit_bio(int, struct bio *);
|
||||
extern blk_qc_t submit_bio(int, struct bio *);
|
||||
extern int bdev_read_only(struct block_device *);
|
||||
#endif
|
||||
extern int set_blocksize(struct block_device *, int);
|
||||
|
|
|
@ -426,7 +426,7 @@ static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
|
|||
return ppa;
|
||||
}
|
||||
|
||||
typedef void (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
|
||||
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
|
||||
typedef sector_t (nvm_tgt_capacity_fn)(void *);
|
||||
typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int);
|
||||
typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
|
||||
|
|
Loading…
Reference in a new issue