Merge branch 'for-4.11/next' into for-4.11/linus-merge
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
commit
818551e2b2
171 changed files with 2500 additions and 2856 deletions
|
@ -49,9 +49,13 @@ config LBDAF
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config BLK_SCSI_REQUEST
|
||||
bool
|
||||
|
||||
config BLK_DEV_BSG
|
||||
bool "Block layer SG support v4"
|
||||
default y
|
||||
select BLK_SCSI_REQUEST
|
||||
help
|
||||
Saying Y here will enable generic SG (SCSI generic) v4 support
|
||||
for any block device.
|
||||
|
@ -71,6 +75,7 @@ config BLK_DEV_BSGLIB
|
|||
bool "Block layer SG support v4 helper lib"
|
||||
default n
|
||||
select BLK_DEV_BSG
|
||||
select BLK_SCSI_REQUEST
|
||||
help
|
||||
Subsystems will normally enable this if needed. Users will not
|
||||
normally need to manually enable this.
|
||||
|
|
|
@ -7,10 +7,11 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
|
|||
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
|
||||
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
|
||||
blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
|
||||
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
|
||||
genhd.o partition-generic.o ioprio.o \
|
||||
badblocks.o partitions/
|
||||
|
||||
obj-$(CONFIG_BOUNCE) += bounce.o
|
||||
obj-$(CONFIG_BOUNCE) += bounce.o
|
||||
obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_ioctl.o
|
||||
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
|
||||
obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
|
||||
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
|
||||
|
|
10
block/bio.c
10
block/bio.c
|
@ -1227,9 +1227,6 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
|
|||
if (!bio)
|
||||
goto out_bmd;
|
||||
|
||||
if (iter->type & WRITE)
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
|
||||
ret = 0;
|
||||
|
||||
if (map_data) {
|
||||
|
@ -1394,12 +1391,6 @@ struct bio *bio_map_user_iov(struct request_queue *q,
|
|||
|
||||
kfree(pages);
|
||||
|
||||
/*
|
||||
* set data direction, and check if mapped pages need bouncing
|
||||
*/
|
||||
if (iter->type & WRITE)
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
|
||||
bio_set_flag(bio, BIO_USER_MAPPED);
|
||||
|
||||
/*
|
||||
|
@ -1590,7 +1581,6 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
|
|||
bio->bi_private = data;
|
||||
} else {
|
||||
bio->bi_end_io = bio_copy_kern_endio;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
}
|
||||
|
||||
return bio;
|
||||
|
|
|
@ -184,7 +184,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
|
|||
goto err_free_blkg;
|
||||
}
|
||||
|
||||
wb_congested = wb_congested_get_create(&q->backing_dev_info,
|
||||
wb_congested = wb_congested_get_create(q->backing_dev_info,
|
||||
blkcg->css.id,
|
||||
GFP_NOWAIT | __GFP_NOWARN);
|
||||
if (!wb_congested) {
|
||||
|
@ -469,8 +469,8 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
|
|||
const char *blkg_dev_name(struct blkcg_gq *blkg)
|
||||
{
|
||||
/* some drivers (floppy) instantiate a queue w/o disk registered */
|
||||
if (blkg->q->backing_dev_info.dev)
|
||||
return dev_name(blkg->q->backing_dev_info.dev);
|
||||
if (blkg->q->backing_dev_info->dev)
|
||||
return dev_name(blkg->q->backing_dev_info->dev);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkg_dev_name);
|
||||
|
@ -1079,10 +1079,8 @@ int blkcg_init_queue(struct request_queue *q)
|
|||
if (preloaded)
|
||||
radix_tree_preload_end();
|
||||
|
||||
if (IS_ERR(blkg)) {
|
||||
blkg_free(new_blkg);
|
||||
if (IS_ERR(blkg))
|
||||
return PTR_ERR(blkg);
|
||||
}
|
||||
|
||||
q->root_blkg = blkg;
|
||||
q->root_rl.blkg = blkg;
|
||||
|
|
327
block/blk-core.c
327
block/blk-core.c
|
@ -33,6 +33,7 @@
|
|||
#include <linux/ratelimit.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/blk-cgroup.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/block.h>
|
||||
|
@ -42,6 +43,10 @@
|
|||
#include "blk-mq-sched.h"
|
||||
#include "blk-wbt.h"
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct dentry *blk_debugfs_root;
|
||||
#endif
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
|
||||
|
@ -75,7 +80,7 @@ static void blk_clear_congested(struct request_list *rl, int sync)
|
|||
* flip its congestion state for events on other blkcgs.
|
||||
*/
|
||||
if (rl == &rl->q->root_rl)
|
||||
clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
|
||||
clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -86,7 +91,7 @@ static void blk_set_congested(struct request_list *rl, int sync)
|
|||
#else
|
||||
/* see blk_clear_congested() */
|
||||
if (rl == &rl->q->root_rl)
|
||||
set_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
|
||||
set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -105,22 +110,6 @@ void blk_queue_congestion_threshold(struct request_queue *q)
|
|||
q->nr_congestion_off = nr;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_get_backing_dev_info - get the address of a queue's backing_dev_info
|
||||
* @bdev: device
|
||||
*
|
||||
* Locates the passed device's request queue and returns the address of its
|
||||
* backing_dev_info. This function can only be called if @bdev is opened
|
||||
* and the return value is never NULL.
|
||||
*/
|
||||
struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
return &q->backing_dev_info;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_get_backing_dev_info);
|
||||
|
||||
void blk_rq_init(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
memset(rq, 0, sizeof(*rq));
|
||||
|
@ -132,8 +121,6 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
|
|||
rq->__sector = (sector_t) -1;
|
||||
INIT_HLIST_NODE(&rq->hash);
|
||||
RB_CLEAR_NODE(&rq->rb_node);
|
||||
rq->cmd = rq->__cmd;
|
||||
rq->cmd_len = BLK_MAX_CDB;
|
||||
rq->tag = -1;
|
||||
rq->internal_tag = -1;
|
||||
rq->start_time = jiffies;
|
||||
|
@ -160,10 +147,8 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
|
|||
|
||||
void blk_dump_rq_flags(struct request *rq, char *msg)
|
||||
{
|
||||
int bit;
|
||||
|
||||
printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg,
|
||||
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
|
||||
printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
|
||||
rq->rq_disk ? rq->rq_disk->disk_name : "?",
|
||||
(unsigned long long) rq->cmd_flags);
|
||||
|
||||
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
|
||||
|
@ -171,13 +156,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
|
|||
blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
|
||||
printk(KERN_INFO " bio %p, biotail %p, len %u\n",
|
||||
rq->bio, rq->biotail, blk_rq_bytes(rq));
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
printk(KERN_INFO " cdb: ");
|
||||
for (bit = 0; bit < BLK_MAX_CDB; bit++)
|
||||
printk("%02x ", rq->cmd[bit]);
|
||||
printk("\n");
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(blk_dump_rq_flags);
|
||||
|
||||
|
@ -588,7 +566,7 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||
blk_flush_integrity();
|
||||
|
||||
/* @q won't process any more request, flush async actions */
|
||||
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
|
||||
del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer);
|
||||
blk_sync_queue(q);
|
||||
|
||||
if (q->mq_ops)
|
||||
|
@ -600,7 +578,8 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||
q->queue_lock = &q->__queue_lock;
|
||||
spin_unlock_irq(lock);
|
||||
|
||||
bdi_unregister(&q->backing_dev_info);
|
||||
bdi_unregister(q->backing_dev_info);
|
||||
put_disk_devt(q->disk_devt);
|
||||
|
||||
/* @q is and will stay empty, shutdown and put */
|
||||
blk_put_queue(q);
|
||||
|
@ -608,17 +587,41 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||
EXPORT_SYMBOL(blk_cleanup_queue);
|
||||
|
||||
/* Allocate memory local to the request queue */
|
||||
static void *alloc_request_struct(gfp_t gfp_mask, void *data)
|
||||
static void *alloc_request_simple(gfp_t gfp_mask, void *data)
|
||||
{
|
||||
int nid = (int)(long)data;
|
||||
return kmem_cache_alloc_node(request_cachep, gfp_mask, nid);
|
||||
struct request_queue *q = data;
|
||||
|
||||
return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
|
||||
}
|
||||
|
||||
static void free_request_struct(void *element, void *unused)
|
||||
static void free_request_simple(void *element, void *data)
|
||||
{
|
||||
kmem_cache_free(request_cachep, element);
|
||||
}
|
||||
|
||||
static void *alloc_request_size(gfp_t gfp_mask, void *data)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
struct request *rq;
|
||||
|
||||
rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
|
||||
q->node);
|
||||
if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
|
||||
kfree(rq);
|
||||
rq = NULL;
|
||||
}
|
||||
return rq;
|
||||
}
|
||||
|
||||
static void free_request_size(void *element, void *data)
|
||||
{
|
||||
struct request_queue *q = data;
|
||||
|
||||
if (q->exit_rq_fn)
|
||||
q->exit_rq_fn(q, element);
|
||||
kfree(element);
|
||||
}
|
||||
|
||||
int blk_init_rl(struct request_list *rl, struct request_queue *q,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
|
@ -631,10 +634,15 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q,
|
|||
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
|
||||
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
|
||||
|
||||
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct,
|
||||
free_request_struct,
|
||||
(void *)(long)q->node, gfp_mask,
|
||||
q->node);
|
||||
if (q->cmd_size) {
|
||||
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
|
||||
alloc_request_size, free_request_size,
|
||||
q, gfp_mask, q->node);
|
||||
} else {
|
||||
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
|
||||
alloc_request_simple, free_request_simple,
|
||||
q, gfp_mask, q->node);
|
||||
}
|
||||
if (!rl->rq_pool)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -697,7 +705,6 @@ static void blk_rq_timed_out_timer(unsigned long data)
|
|||
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||
{
|
||||
struct request_queue *q;
|
||||
int err;
|
||||
|
||||
q = kmem_cache_alloc_node(blk_requestq_cachep,
|
||||
gfp_mask | __GFP_ZERO, node_id);
|
||||
|
@ -712,17 +719,17 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
|||
if (!q->bio_split)
|
||||
goto fail_id;
|
||||
|
||||
q->backing_dev_info.ra_pages =
|
||||
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
|
||||
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
|
||||
q->backing_dev_info.name = "block";
|
||||
q->node = node_id;
|
||||
|
||||
err = bdi_init(&q->backing_dev_info);
|
||||
if (err)
|
||||
q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id);
|
||||
if (!q->backing_dev_info)
|
||||
goto fail_split;
|
||||
|
||||
setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
|
||||
q->backing_dev_info->ra_pages =
|
||||
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
|
||||
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
|
||||
q->backing_dev_info->name = "block";
|
||||
q->node = node_id;
|
||||
|
||||
setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
|
||||
laptop_mode_timer_fn, (unsigned long) q);
|
||||
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
|
||||
INIT_LIST_HEAD(&q->queue_head);
|
||||
|
@ -772,7 +779,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
|||
fail_ref:
|
||||
percpu_ref_exit(&q->q_usage_counter);
|
||||
fail_bdi:
|
||||
bdi_destroy(&q->backing_dev_info);
|
||||
bdi_put(q->backing_dev_info);
|
||||
fail_split:
|
||||
bioset_free(q->bio_split);
|
||||
fail_id:
|
||||
|
@ -825,15 +832,19 @@ EXPORT_SYMBOL(blk_init_queue);
|
|||
struct request_queue *
|
||||
blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
|
||||
{
|
||||
struct request_queue *uninit_q, *q;
|
||||
struct request_queue *q;
|
||||
|
||||
uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
|
||||
if (!uninit_q)
|
||||
q = blk_alloc_queue_node(GFP_KERNEL, node_id);
|
||||
if (!q)
|
||||
return NULL;
|
||||
|
||||
q = blk_init_allocated_queue(uninit_q, rfn, lock);
|
||||
if (!q)
|
||||
blk_cleanup_queue(uninit_q);
|
||||
q->request_fn = rfn;
|
||||
if (lock)
|
||||
q->queue_lock = lock;
|
||||
if (blk_init_allocated_queue(q) < 0) {
|
||||
blk_cleanup_queue(q);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return q;
|
||||
}
|
||||
|
@ -841,30 +852,22 @@ EXPORT_SYMBOL(blk_init_queue_node);
|
|||
|
||||
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
|
||||
|
||||
struct request_queue *
|
||||
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
|
||||
spinlock_t *lock)
|
||||
{
|
||||
if (!q)
|
||||
return NULL;
|
||||
|
||||
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0);
|
||||
int blk_init_allocated_queue(struct request_queue *q)
|
||||
{
|
||||
q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
|
||||
if (!q->fq)
|
||||
return NULL;
|
||||
return -ENOMEM;
|
||||
|
||||
if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
|
||||
goto out_free_flush_queue;
|
||||
|
||||
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
|
||||
goto fail;
|
||||
goto out_exit_flush_rq;
|
||||
|
||||
INIT_WORK(&q->timeout_work, blk_timeout_work);
|
||||
q->request_fn = rfn;
|
||||
q->prep_rq_fn = NULL;
|
||||
q->unprep_rq_fn = NULL;
|
||||
q->queue_flags |= QUEUE_FLAG_DEFAULT;
|
||||
|
||||
/* Override internal queue lock with supplied lock pointer */
|
||||
if (lock)
|
||||
q->queue_lock = lock;
|
||||
|
||||
/*
|
||||
* This also sets hw/phys segments, boundary and size
|
||||
*/
|
||||
|
@ -878,17 +881,19 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
|
|||
/* init elevator */
|
||||
if (elevator_init(q, NULL)) {
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
goto fail;
|
||||
goto out_exit_flush_rq;
|
||||
}
|
||||
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
return 0;
|
||||
|
||||
return q;
|
||||
|
||||
fail:
|
||||
out_exit_flush_rq:
|
||||
if (q->exit_rq_fn)
|
||||
q->exit_rq_fn(q, q->fq->flush_rq);
|
||||
out_free_flush_queue:
|
||||
blk_free_flush_queue(q->fq);
|
||||
wbt_exit(q);
|
||||
return NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_init_allocated_queue);
|
||||
|
||||
|
@ -1024,25 +1029,6 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine if elevator data should be initialized when allocating the
|
||||
* request associated with @bio.
|
||||
*/
|
||||
static bool blk_rq_should_init_elevator(struct bio *bio)
|
||||
{
|
||||
if (!bio)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Flush requests do not use the elevator so skip initialization.
|
||||
* This allows a request to share the flush and elevator data.
|
||||
*/
|
||||
if (op_is_flush(bio->bi_opf))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* __get_request - get a free request
|
||||
* @rl: request list to allocate from
|
||||
|
@ -1121,10 +1107,13 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
|
|||
* request is freed. This guarantees icq's won't be destroyed and
|
||||
* makes creating new ones safe.
|
||||
*
|
||||
* Flush requests do not use the elevator so skip initialization.
|
||||
* This allows a request to share the flush and elevator data.
|
||||
*
|
||||
* Also, lookup icq while holding queue_lock. If it doesn't exist,
|
||||
* it will be created after releasing queue_lock.
|
||||
*/
|
||||
if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
|
||||
if (!op_is_flush(op) && !blk_queue_bypass(q)) {
|
||||
rq_flags |= RQF_ELVPRIV;
|
||||
q->nr_rqs_elvpriv++;
|
||||
if (et->icq_cache && ioc)
|
||||
|
@ -1184,7 +1173,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
|
|||
* disturb iosched and blkcg but weird is bettern than dead.
|
||||
*/
|
||||
printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
|
||||
__func__, dev_name(q->backing_dev_info.dev));
|
||||
__func__, dev_name(q->backing_dev_info->dev));
|
||||
|
||||
rq->rq_flags &= ~RQF_ELVPRIV;
|
||||
rq->elv.icq = NULL;
|
||||
|
@ -1278,8 +1267,6 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
|
|||
{
|
||||
struct request *rq;
|
||||
|
||||
BUG_ON(rw != READ && rw != WRITE);
|
||||
|
||||
/* create ioc upfront */
|
||||
create_io_context(gfp_mask, q->node);
|
||||
|
||||
|
@ -1308,18 +1295,6 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_get_request);
|
||||
|
||||
/**
|
||||
* blk_rq_set_block_pc - initialize a request to type BLOCK_PC
|
||||
* @rq: request to be initialized
|
||||
*
|
||||
*/
|
||||
void blk_rq_set_block_pc(struct request *rq)
|
||||
{
|
||||
rq->cmd_type = REQ_TYPE_BLOCK_PC;
|
||||
memset(rq->__cmd, 0, sizeof(rq->__cmd));
|
||||
}
|
||||
EXPORT_SYMBOL(blk_rq_set_block_pc);
|
||||
|
||||
/**
|
||||
* blk_requeue_request - put a request back on queue
|
||||
* @q: request queue where request should be inserted
|
||||
|
@ -1510,6 +1485,30 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
|
|||
return true;
|
||||
}
|
||||
|
||||
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
|
||||
struct bio *bio)
|
||||
{
|
||||
unsigned short segments = blk_rq_nr_discard_segments(req);
|
||||
|
||||
if (segments >= queue_max_discard_segments(q))
|
||||
goto no_merge;
|
||||
if (blk_rq_sectors(req) + bio_sectors(bio) >
|
||||
blk_rq_get_max_sectors(req, blk_rq_pos(req)))
|
||||
goto no_merge;
|
||||
|
||||
req->biotail->bi_next = bio;
|
||||
req->biotail = bio;
|
||||
req->__data_len += bio->bi_iter.bi_size;
|
||||
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
|
||||
req->nr_phys_segments = segments + 1;
|
||||
|
||||
blk_account_io_start(req, false);
|
||||
return true;
|
||||
no_merge:
|
||||
req_set_nomerge(q, req);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_attempt_plug_merge - try to merge with %current's plugged list
|
||||
* @q: request_queue new bio is being queued at
|
||||
|
@ -1538,12 +1537,11 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
|||
{
|
||||
struct blk_plug *plug;
|
||||
struct request *rq;
|
||||
bool ret = false;
|
||||
struct list_head *plug_list;
|
||||
|
||||
plug = current->plug;
|
||||
if (!plug)
|
||||
goto out;
|
||||
return false;
|
||||
*request_count = 0;
|
||||
|
||||
if (q->mq_ops)
|
||||
|
@ -1552,7 +1550,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
|||
plug_list = &plug->list;
|
||||
|
||||
list_for_each_entry_reverse(rq, plug_list, queuelist) {
|
||||
int el_ret;
|
||||
bool merged = false;
|
||||
|
||||
if (rq->q == q) {
|
||||
(*request_count)++;
|
||||
|
@ -1568,19 +1566,25 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
|||
if (rq->q != q || !blk_rq_merge_ok(rq, bio))
|
||||
continue;
|
||||
|
||||
el_ret = blk_try_merge(rq, bio);
|
||||
if (el_ret == ELEVATOR_BACK_MERGE) {
|
||||
ret = bio_attempt_back_merge(q, rq, bio);
|
||||
if (ret)
|
||||
break;
|
||||
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
|
||||
ret = bio_attempt_front_merge(q, rq, bio);
|
||||
if (ret)
|
||||
break;
|
||||
switch (blk_try_merge(rq, bio)) {
|
||||
case ELEVATOR_BACK_MERGE:
|
||||
merged = bio_attempt_back_merge(q, rq, bio);
|
||||
break;
|
||||
case ELEVATOR_FRONT_MERGE:
|
||||
merged = bio_attempt_front_merge(q, rq, bio);
|
||||
break;
|
||||
case ELEVATOR_DISCARD_MERGE:
|
||||
merged = bio_attempt_discard_merge(q, rq, bio);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (merged)
|
||||
return true;
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned int blk_plug_queued_count(struct request_queue *q)
|
||||
|
@ -1609,7 +1613,6 @@ unsigned int blk_plug_queued_count(struct request_queue *q)
|
|||
|
||||
void init_request_from_bio(struct request *req, struct bio *bio)
|
||||
{
|
||||
req->cmd_type = REQ_TYPE_FS;
|
||||
if (bio->bi_opf & REQ_RAHEAD)
|
||||
req->cmd_flags |= REQ_FAILFAST_MASK;
|
||||
|
||||
|
@ -1623,8 +1626,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
|||
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
struct blk_plug *plug;
|
||||
int el_ret, where = ELEVATOR_INSERT_SORT;
|
||||
struct request *req;
|
||||
int where = ELEVATOR_INSERT_SORT;
|
||||
struct request *req, *free;
|
||||
unsigned int request_count = 0;
|
||||
unsigned int wb_acct;
|
||||
|
||||
|
@ -1661,21 +1664,29 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
|
|||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
|
||||
el_ret = elv_merge(q, &req, bio);
|
||||
if (el_ret == ELEVATOR_BACK_MERGE) {
|
||||
if (bio_attempt_back_merge(q, req, bio)) {
|
||||
elv_bio_merged(q, req, bio);
|
||||
if (!attempt_back_merge(q, req))
|
||||
elv_merged_request(q, req, el_ret);
|
||||
goto out_unlock;
|
||||
}
|
||||
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
|
||||
if (bio_attempt_front_merge(q, req, bio)) {
|
||||
elv_bio_merged(q, req, bio);
|
||||
if (!attempt_front_merge(q, req))
|
||||
elv_merged_request(q, req, el_ret);
|
||||
goto out_unlock;
|
||||
}
|
||||
switch (elv_merge(q, &req, bio)) {
|
||||
case ELEVATOR_BACK_MERGE:
|
||||
if (!bio_attempt_back_merge(q, req, bio))
|
||||
break;
|
||||
elv_bio_merged(q, req, bio);
|
||||
free = attempt_back_merge(q, req);
|
||||
if (free)
|
||||
__blk_put_request(q, free);
|
||||
else
|
||||
elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
|
||||
goto out_unlock;
|
||||
case ELEVATOR_FRONT_MERGE:
|
||||
if (!bio_attempt_front_merge(q, req, bio))
|
||||
break;
|
||||
elv_bio_merged(q, req, bio);
|
||||
free = attempt_front_merge(q, req);
|
||||
if (free)
|
||||
__blk_put_request(q, free);
|
||||
else
|
||||
elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
|
||||
goto out_unlock;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
get_rq:
|
||||
|
@ -2452,14 +2463,6 @@ void blk_start_request(struct request *req)
|
|||
wbt_issue(req->q->rq_wb, &req->issue_stat);
|
||||
}
|
||||
|
||||
/*
|
||||
* We are now handing the request to the hardware, initialize
|
||||
* resid_len to full count and add the timeout handler.
|
||||
*/
|
||||
req->resid_len = blk_rq_bytes(req);
|
||||
if (unlikely(blk_bidi_rq(req)))
|
||||
req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
|
||||
|
||||
BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
|
||||
blk_add_timer(req);
|
||||
}
|
||||
|
@ -2530,10 +2533,10 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
|||
* TODO: tj: This is too subtle. It would be better to let
|
||||
* low level drivers do what they see fit.
|
||||
*/
|
||||
if (req->cmd_type == REQ_TYPE_FS)
|
||||
if (!blk_rq_is_passthrough(req))
|
||||
req->errors = 0;
|
||||
|
||||
if (error && req->cmd_type == REQ_TYPE_FS &&
|
||||
if (error && !blk_rq_is_passthrough(req) &&
|
||||
!(req->rq_flags & RQF_QUIET)) {
|
||||
char *error_type;
|
||||
|
||||
|
@ -2605,7 +2608,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
|||
req->__data_len -= total_bytes;
|
||||
|
||||
/* update sector only for requests with clear definition of sector */
|
||||
if (req->cmd_type == REQ_TYPE_FS)
|
||||
if (!blk_rq_is_passthrough(req))
|
||||
req->__sector += total_bytes >> 9;
|
||||
|
||||
/* mixed attributes always follow the first bio */
|
||||
|
@ -2683,8 +2686,8 @@ void blk_finish_request(struct request *req, int error)
|
|||
|
||||
BUG_ON(blk_queued_rq(req));
|
||||
|
||||
if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
|
||||
laptop_io_completion(&req->q->backing_dev_info);
|
||||
if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
|
||||
laptop_io_completion(req->q->backing_dev_info);
|
||||
|
||||
blk_delete_timer(req);
|
||||
|
||||
|
@ -3007,8 +3010,6 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
|
|||
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
|
||||
{
|
||||
dst->cpu = src->cpu;
|
||||
dst->cmd_flags = src->cmd_flags | REQ_NOMERGE;
|
||||
dst->cmd_type = src->cmd_type;
|
||||
dst->__sector = blk_rq_pos(src);
|
||||
dst->__data_len = blk_rq_bytes(src);
|
||||
dst->nr_phys_segments = src->nr_phys_segments;
|
||||
|
@ -3484,5 +3485,9 @@ int __init blk_dev_init(void)
|
|||
blk_requestq_cachep = kmem_cache_create("request_queue",
|
||||
sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
blk_debugfs_root = debugfs_create_dir("block", NULL);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -11,11 +11,6 @@
|
|||
#include "blk.h"
|
||||
#include "blk-mq-sched.h"
|
||||
|
||||
/*
|
||||
* for max sense size
|
||||
*/
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
|
||||
/**
|
||||
* blk_end_sync_rq - executes a completion event on a request
|
||||
* @rq: request to complete
|
||||
|
@ -56,7 +51,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
|||
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
|
||||
|
||||
WARN_ON(irqs_disabled());
|
||||
WARN_ON(rq->cmd_type == REQ_TYPE_FS);
|
||||
WARN_ON(!blk_rq_is_passthrough(rq));
|
||||
|
||||
rq->rq_disk = bd_disk;
|
||||
rq->end_io = done;
|
||||
|
@ -101,16 +96,9 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
|
|||
struct request *rq, int at_head)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
char sense[SCSI_SENSE_BUFFERSIZE];
|
||||
int err = 0;
|
||||
unsigned long hang_check;
|
||||
|
||||
if (!rq->sense) {
|
||||
memset(sense, 0, sizeof(sense));
|
||||
rq->sense = sense;
|
||||
rq->sense_len = 0;
|
||||
}
|
||||
|
||||
rq->end_io_data = &wait;
|
||||
blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
|
||||
|
||||
|
@ -124,11 +112,6 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
|
|||
if (rq->errors)
|
||||
err = -EIO;
|
||||
|
||||
if (rq->sense == sense) {
|
||||
rq->sense = NULL;
|
||||
rq->sense_len = 0;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_execute_rq);
|
||||
|
|
|
@ -297,8 +297,14 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
|
|||
if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
|
||||
return false;
|
||||
|
||||
/* C2 and C3 */
|
||||
/* C2 and C3
|
||||
*
|
||||
* For blk-mq + scheduling, we can risk having all driver tags
|
||||
* assigned to empty flushes, and we deadlock if we are expecting
|
||||
* other requests to make progress. Don't defer for that case.
|
||||
*/
|
||||
if (!list_empty(&fq->flush_data_in_flight) &&
|
||||
!(q->mq_ops && q->elevator) &&
|
||||
time_before(jiffies,
|
||||
fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
|
||||
return false;
|
||||
|
@ -327,7 +333,6 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
|
|||
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
|
||||
}
|
||||
|
||||
flush_rq->cmd_type = REQ_TYPE_FS;
|
||||
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
|
||||
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
|
||||
flush_rq->rq_disk = first_rq->rq_disk;
|
||||
|
@ -547,11 +552,10 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
|
|||
if (!fq)
|
||||
goto fail;
|
||||
|
||||
if (q->mq_ops) {
|
||||
if (q->mq_ops)
|
||||
spin_lock_init(&fq->mq_flush_lock);
|
||||
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
|
||||
}
|
||||
|
||||
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
|
||||
fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
|
||||
if (!fq->flush_rq)
|
||||
goto fail_rq;
|
||||
|
|
|
@ -443,10 +443,10 @@ void blk_integrity_revalidate(struct gendisk *disk)
|
|||
return;
|
||||
|
||||
if (bi->profile)
|
||||
disk->queue->backing_dev_info.capabilities |=
|
||||
disk->queue->backing_dev_info->capabilities |=
|
||||
BDI_CAP_STABLE_WRITES;
|
||||
else
|
||||
disk->queue->backing_dev_info.capabilities &=
|
||||
disk->queue->backing_dev_info->capabilities &=
|
||||
~BDI_CAP_STABLE_WRITES;
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,10 @@ static void icq_free_icq_rcu(struct rcu_head *head)
|
|||
kmem_cache_free(icq->__rcu_icq_cache, icq);
|
||||
}
|
||||
|
||||
/* Exit an icq. Called with both ioc and q locked. */
|
||||
/*
|
||||
* Exit an icq. Called with both ioc and q locked for sq, only ioc locked for
|
||||
* mq.
|
||||
*/
|
||||
static void ioc_exit_icq(struct io_cq *icq)
|
||||
{
|
||||
struct elevator_type *et = icq->q->elevator->type;
|
||||
|
@ -166,6 +169,7 @@ EXPORT_SYMBOL(put_io_context);
|
|||
*/
|
||||
void put_io_context_active(struct io_context *ioc)
|
||||
{
|
||||
struct elevator_type *et;
|
||||
unsigned long flags;
|
||||
struct io_cq *icq;
|
||||
|
||||
|
@ -184,13 +188,19 @@ void put_io_context_active(struct io_context *ioc)
|
|||
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
|
||||
if (icq->flags & ICQ_EXITED)
|
||||
continue;
|
||||
if (spin_trylock(icq->q->queue_lock)) {
|
||||
|
||||
et = icq->q->elevator->type;
|
||||
if (et->uses_mq) {
|
||||
ioc_exit_icq(icq);
|
||||
spin_unlock(icq->q->queue_lock);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
cpu_relax();
|
||||
goto retry;
|
||||
if (spin_trylock(icq->q->queue_lock)) {
|
||||
ioc_exit_icq(icq);
|
||||
spin_unlock(icq->q->queue_lock);
|
||||
} else {
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
cpu_relax();
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&ioc->lock, flags);
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
int blk_rq_append_bio(struct request *rq, struct bio *bio)
|
||||
{
|
||||
if (!rq->bio) {
|
||||
rq->cmd_flags &= REQ_OP_MASK;
|
||||
rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK);
|
||||
blk_rq_bio_prep(rq->q, rq, bio);
|
||||
} else {
|
||||
if (!ll_back_merge_fn(rq->q, rq, bio))
|
||||
|
@ -62,6 +60,9 @@ static int __blk_rq_map_user_iov(struct request *rq,
|
|||
if (IS_ERR(bio))
|
||||
return PTR_ERR(bio);
|
||||
|
||||
bio->bi_opf &= ~REQ_OP_MASK;
|
||||
bio->bi_opf |= req_op(rq);
|
||||
|
||||
if (map_data && map_data->null_mapped)
|
||||
bio_set_flag(bio, BIO_NULL_MAPPED);
|
||||
|
||||
|
@ -90,7 +91,7 @@ static int __blk_rq_map_user_iov(struct request *rq,
|
|||
}
|
||||
|
||||
/**
|
||||
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* blk_rq_map_user_iov - map user data to a request, for passthrough requests
|
||||
* @q: request queue where request should be inserted
|
||||
* @rq: request to map data to
|
||||
* @map_data: pointer to the rq_map_data holding pages (if necessary)
|
||||
|
@ -199,7 +200,7 @@ int blk_rq_unmap_user(struct bio *bio)
|
|||
EXPORT_SYMBOL(blk_rq_unmap_user);
|
||||
|
||||
/**
|
||||
* blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
|
||||
* blk_rq_map_kern - map kernel data to a request, for passthrough requests
|
||||
* @q: request queue where request should be inserted
|
||||
* @rq: request to fill
|
||||
* @kbuf: the kernel buffer
|
||||
|
@ -234,8 +235,8 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
|
|||
if (IS_ERR(bio))
|
||||
return PTR_ERR(bio);
|
||||
|
||||
if (!reading)
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
bio->bi_opf &= ~REQ_OP_MASK;
|
||||
bio->bi_opf |= req_op(rq);
|
||||
|
||||
if (do_copy)
|
||||
rq->rq_flags |= RQF_COPY_USER;
|
||||
|
|
|
@ -482,13 +482,6 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
|||
}
|
||||
EXPORT_SYMBOL(blk_rq_map_sg);
|
||||
|
||||
static void req_set_nomerge(struct request_queue *q, struct request *req)
|
||||
{
|
||||
req->cmd_flags |= REQ_NOMERGE;
|
||||
if (req == q->last_merge)
|
||||
q->last_merge = NULL;
|
||||
}
|
||||
|
||||
static inline int ll_new_hw_segment(struct request_queue *q,
|
||||
struct request *req,
|
||||
struct bio *bio)
|
||||
|
@ -659,31 +652,32 @@ static void blk_account_io_merge(struct request *req)
|
|||
}
|
||||
|
||||
/*
|
||||
* Has to be called with the request spinlock acquired
|
||||
* For non-mq, this has to be called with the request spinlock acquired.
|
||||
* For mq with scheduling, the appropriate queue wide lock should be held.
|
||||
*/
|
||||
static int attempt_merge(struct request_queue *q, struct request *req,
|
||||
struct request *next)
|
||||
static struct request *attempt_merge(struct request_queue *q,
|
||||
struct request *req, struct request *next)
|
||||
{
|
||||
if (!rq_mergeable(req) || !rq_mergeable(next))
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
if (req_op(req) != req_op(next))
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* not contiguous
|
||||
*/
|
||||
if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
if (rq_data_dir(req) != rq_data_dir(next)
|
||||
|| req->rq_disk != next->rq_disk
|
||||
|| req_no_special_merge(next))
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
if (req_op(req) == REQ_OP_WRITE_SAME &&
|
||||
!blk_write_same_mergeable(req->bio, next->bio))
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* If we are allowed to merge, then append bio list
|
||||
|
@ -692,7 +686,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
|
|||
* counts here.
|
||||
*/
|
||||
if (!ll_merge_requests_fn(q, req, next))
|
||||
return 0;
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* If failfast settings disagree or any of the two is already
|
||||
|
@ -732,42 +726,51 @@ static int attempt_merge(struct request_queue *q, struct request *req,
|
|||
if (blk_rq_cpu_valid(next))
|
||||
req->cpu = next->cpu;
|
||||
|
||||
/* owner-ship of bio passed from next to req */
|
||||
/*
|
||||
* ownership of bio passed from next to req, return 'next' for
|
||||
* the caller to free
|
||||
*/
|
||||
next->bio = NULL;
|
||||
__blk_put_request(q, next);
|
||||
return 1;
|
||||
return next;
|
||||
}
|
||||
|
||||
int attempt_back_merge(struct request_queue *q, struct request *rq)
|
||||
struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct request *next = elv_latter_request(q, rq);
|
||||
|
||||
if (next)
|
||||
return attempt_merge(q, rq, next);
|
||||
|
||||
return 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int attempt_front_merge(struct request_queue *q, struct request *rq)
|
||||
struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct request *prev = elv_former_request(q, rq);
|
||||
|
||||
if (prev)
|
||||
return attempt_merge(q, prev, rq);
|
||||
|
||||
return 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
struct request *next)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
struct request *free;
|
||||
|
||||
if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
|
||||
if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
|
||||
return 0;
|
||||
|
||||
return attempt_merge(q, rq, next);
|
||||
free = attempt_merge(q, rq, next);
|
||||
if (free) {
|
||||
__blk_put_request(q, free);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
||||
|
@ -798,9 +801,12 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
|||
return true;
|
||||
}
|
||||
|
||||
int blk_try_merge(struct request *rq, struct bio *bio)
|
||||
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
|
||||
{
|
||||
if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
|
||||
if (req_op(rq) == REQ_OP_DISCARD &&
|
||||
queue_max_discard_segments(rq->q) > 1)
|
||||
return ELEVATOR_DISCARD_MERGE;
|
||||
else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
|
||||
return ELEVATOR_BACK_MERGE;
|
||||
else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
|
||||
return ELEVATOR_FRONT_MERGE;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/debugfs.h>
|
||||
|
||||
#include <linux/blk-mq.h>
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-tag.h"
|
||||
|
||||
|
@ -28,8 +29,6 @@ struct blk_mq_debugfs_attr {
|
|||
const struct file_operations *fops;
|
||||
};
|
||||
|
||||
static struct dentry *block_debugfs_root;
|
||||
|
||||
static int blk_mq_debugfs_seq_open(struct inode *inode, struct file *file,
|
||||
const struct seq_operations *ops)
|
||||
{
|
||||
|
@ -88,13 +87,14 @@ static int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
|
|||
{
|
||||
struct request *rq = list_entry_rq(v);
|
||||
|
||||
seq_printf(m, "%p {.cmd_type=%u, .cmd_flags=0x%x, .rq_flags=0x%x, .tag=%d, .internal_tag=%d}\n",
|
||||
rq, rq->cmd_type, rq->cmd_flags, (unsigned int)rq->rq_flags,
|
||||
seq_printf(m, "%p {.cmd_flags=0x%x, .rq_flags=0x%x, .tag=%d, .internal_tag=%d}\n",
|
||||
rq, rq->cmd_flags, (__force unsigned int)rq->rq_flags,
|
||||
rq->tag, rq->internal_tag);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
|
||||
__acquires(&hctx->lock)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = m->private;
|
||||
|
||||
|
@ -110,6 +110,7 @@ static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
|
|||
}
|
||||
|
||||
static void hctx_dispatch_stop(struct seq_file *m, void *v)
|
||||
__releases(&hctx->lock)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = m->private;
|
||||
|
||||
|
@ -176,13 +177,17 @@ static int hctx_tags_show(struct seq_file *m, void *v)
|
|||
{
|
||||
struct blk_mq_hw_ctx *hctx = m->private;
|
||||
struct request_queue *q = hctx->queue;
|
||||
int res;
|
||||
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
res = mutex_lock_interruptible(&q->sysfs_lock);
|
||||
if (res)
|
||||
goto out;
|
||||
if (hctx->tags)
|
||||
blk_mq_debugfs_tags_show(m, hctx->tags);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
return res;
|
||||
}
|
||||
|
||||
static int hctx_tags_open(struct inode *inode, struct file *file)
|
||||
|
@ -201,12 +206,17 @@ static int hctx_tags_bitmap_show(struct seq_file *m, void *v)
|
|||
{
|
||||
struct blk_mq_hw_ctx *hctx = m->private;
|
||||
struct request_queue *q = hctx->queue;
|
||||
int res;
|
||||
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
res = mutex_lock_interruptible(&q->sysfs_lock);
|
||||
if (res)
|
||||
goto out;
|
||||
if (hctx->tags)
|
||||
sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
return res;
|
||||
}
|
||||
|
||||
static int hctx_tags_bitmap_open(struct inode *inode, struct file *file)
|
||||
|
@ -225,13 +235,17 @@ static int hctx_sched_tags_show(struct seq_file *m, void *v)
|
|||
{
|
||||
struct blk_mq_hw_ctx *hctx = m->private;
|
||||
struct request_queue *q = hctx->queue;
|
||||
int res;
|
||||
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
res = mutex_lock_interruptible(&q->sysfs_lock);
|
||||
if (res)
|
||||
goto out;
|
||||
if (hctx->sched_tags)
|
||||
blk_mq_debugfs_tags_show(m, hctx->sched_tags);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
return res;
|
||||
}
|
||||
|
||||
static int hctx_sched_tags_open(struct inode *inode, struct file *file)
|
||||
|
@ -250,12 +264,17 @@ static int hctx_sched_tags_bitmap_show(struct seq_file *m, void *v)
|
|||
{
|
||||
struct blk_mq_hw_ctx *hctx = m->private;
|
||||
struct request_queue *q = hctx->queue;
|
||||
int res;
|
||||
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
res = mutex_lock_interruptible(&q->sysfs_lock);
|
||||
if (res)
|
||||
goto out;
|
||||
if (hctx->sched_tags)
|
||||
sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
return res;
|
||||
}
|
||||
|
||||
static int hctx_sched_tags_bitmap_open(struct inode *inode, struct file *file)
|
||||
|
@ -482,6 +501,7 @@ static const struct file_operations hctx_active_fops = {
|
|||
};
|
||||
|
||||
static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
|
||||
__acquires(&ctx->lock)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = m->private;
|
||||
|
||||
|
@ -497,6 +517,7 @@ static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
|
|||
}
|
||||
|
||||
static void ctx_rq_list_stop(struct seq_file *m, void *v)
|
||||
__releases(&ctx->lock)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = m->private;
|
||||
|
||||
|
@ -630,6 +651,7 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
|
|||
{"queued", 0600, &hctx_queued_fops},
|
||||
{"run", 0600, &hctx_run_fops},
|
||||
{"active", 0400, &hctx_active_fops},
|
||||
{},
|
||||
};
|
||||
|
||||
static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
|
||||
|
@ -637,14 +659,15 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
|
|||
{"dispatched", 0600, &ctx_dispatched_fops},
|
||||
{"merged", 0600, &ctx_merged_fops},
|
||||
{"completed", 0600, &ctx_completed_fops},
|
||||
{},
|
||||
};
|
||||
|
||||
int blk_mq_debugfs_register(struct request_queue *q, const char *name)
|
||||
{
|
||||
if (!block_debugfs_root)
|
||||
if (!blk_debugfs_root)
|
||||
return -ENOENT;
|
||||
|
||||
q->debugfs_dir = debugfs_create_dir(name, block_debugfs_root);
|
||||
q->debugfs_dir = debugfs_create_dir(name, blk_debugfs_root);
|
||||
if (!q->debugfs_dir)
|
||||
goto err;
|
||||
|
||||
|
@ -665,27 +688,31 @@ void blk_mq_debugfs_unregister(struct request_queue *q)
|
|||
q->debugfs_dir = NULL;
|
||||
}
|
||||
|
||||
static bool debugfs_create_files(struct dentry *parent, void *data,
|
||||
const struct blk_mq_debugfs_attr *attr)
|
||||
{
|
||||
for (; attr->name; attr++) {
|
||||
if (!debugfs_create_file(attr->name, attr->mode, parent,
|
||||
data, attr->fops))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static int blk_mq_debugfs_register_ctx(struct request_queue *q,
|
||||
struct blk_mq_ctx *ctx,
|
||||
struct dentry *hctx_dir)
|
||||
{
|
||||
struct dentry *ctx_dir;
|
||||
char name[20];
|
||||
int i;
|
||||
|
||||
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
|
||||
ctx_dir = debugfs_create_dir(name, hctx_dir);
|
||||
if (!ctx_dir)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(blk_mq_debugfs_ctx_attrs); i++) {
|
||||
const struct blk_mq_debugfs_attr *attr;
|
||||
|
||||
attr = &blk_mq_debugfs_ctx_attrs[i];
|
||||
if (!debugfs_create_file(attr->name, attr->mode, ctx_dir, ctx,
|
||||
attr->fops))
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -703,14 +730,8 @@ static int blk_mq_debugfs_register_hctx(struct request_queue *q,
|
|||
if (!hctx_dir)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(blk_mq_debugfs_hctx_attrs); i++) {
|
||||
const struct blk_mq_debugfs_attr *attr;
|
||||
|
||||
attr = &blk_mq_debugfs_hctx_attrs[i];
|
||||
if (!debugfs_create_file(attr->name, attr->mode, hctx_dir, hctx,
|
||||
attr->fops))
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!debugfs_create_files(hctx_dir, hctx, blk_mq_debugfs_hctx_attrs))
|
||||
return -ENOMEM;
|
||||
|
||||
hctx_for_each_ctx(hctx, ctx, i) {
|
||||
if (blk_mq_debugfs_register_ctx(q, ctx, hctx_dir))
|
||||
|
@ -749,8 +770,3 @@ void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
|
|||
debugfs_remove_recursive(q->mq_debugfs_dir);
|
||||
q->mq_debugfs_dir = NULL;
|
||||
}
|
||||
|
||||
void blk_mq_debugfs_init(void)
|
||||
{
|
||||
block_debugfs_root = debugfs_create_dir("block", NULL);
|
||||
}
|
||||
|
|
|
@ -68,7 +68,9 @@ int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size,
|
|||
EXPORT_SYMBOL_GPL(blk_mq_sched_init_hctx_data);
|
||||
|
||||
static void __blk_mq_sched_assign_ioc(struct request_queue *q,
|
||||
struct request *rq, struct io_context *ioc)
|
||||
struct request *rq,
|
||||
struct bio *bio,
|
||||
struct io_context *ioc)
|
||||
{
|
||||
struct io_cq *icq;
|
||||
|
||||
|
@ -83,7 +85,7 @@ static void __blk_mq_sched_assign_ioc(struct request_queue *q,
|
|||
}
|
||||
|
||||
rq->elv.icq = icq;
|
||||
if (!blk_mq_sched_get_rq_priv(q, rq)) {
|
||||
if (!blk_mq_sched_get_rq_priv(q, rq, bio)) {
|
||||
rq->rq_flags |= RQF_ELVPRIV;
|
||||
get_io_context(icq->ioc);
|
||||
return;
|
||||
|
@ -99,7 +101,7 @@ static void blk_mq_sched_assign_ioc(struct request_queue *q,
|
|||
|
||||
ioc = rq_ioc(bio);
|
||||
if (ioc)
|
||||
__blk_mq_sched_assign_ioc(q, rq, ioc);
|
||||
__blk_mq_sched_assign_ioc(q, rq, bio, ioc);
|
||||
}
|
||||
|
||||
struct request *blk_mq_sched_get_request(struct request_queue *q,
|
||||
|
@ -173,6 +175,8 @@ void blk_mq_sched_put_request(struct request *rq)
|
|||
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
struct elevator_queue *e = hctx->queue->elevator;
|
||||
const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
|
||||
bool did_work = false;
|
||||
LIST_HEAD(rq_list);
|
||||
|
||||
if (unlikely(blk_mq_hctx_stopped(hctx)))
|
||||
|
@ -202,11 +206,18 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
|
|||
*/
|
||||
if (!list_empty(&rq_list)) {
|
||||
blk_mq_sched_mark_restart(hctx);
|
||||
blk_mq_dispatch_rq_list(hctx, &rq_list);
|
||||
} else if (!e || !e->type->ops.mq.dispatch_request) {
|
||||
did_work = blk_mq_dispatch_rq_list(hctx, &rq_list);
|
||||
} else if (!has_sched_dispatch) {
|
||||
blk_mq_flush_busy_ctxs(hctx, &rq_list);
|
||||
blk_mq_dispatch_rq_list(hctx, &rq_list);
|
||||
} else {
|
||||
}
|
||||
|
||||
/*
|
||||
* We want to dispatch from the scheduler if we had no work left
|
||||
* on the dispatch list, OR if we did have work but weren't able
|
||||
* to make progress.
|
||||
*/
|
||||
if (!did_work && has_sched_dispatch) {
|
||||
do {
|
||||
struct request *rq;
|
||||
|
||||
|
@ -234,31 +245,33 @@ void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_sched_move_to_dispatch);
|
||||
|
||||
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio)
|
||||
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
||||
struct request **merged_request)
|
||||
{
|
||||
struct request *rq;
|
||||
int ret;
|
||||
|
||||
ret = elv_merge(q, &rq, bio);
|
||||
if (ret == ELEVATOR_BACK_MERGE) {
|
||||
switch (elv_merge(q, &rq, bio)) {
|
||||
case ELEVATOR_BACK_MERGE:
|
||||
if (!blk_mq_sched_allow_merge(q, rq, bio))
|
||||
return false;
|
||||
if (bio_attempt_back_merge(q, rq, bio)) {
|
||||
if (!attempt_back_merge(q, rq))
|
||||
elv_merged_request(q, rq, ret);
|
||||
return true;
|
||||
}
|
||||
} else if (ret == ELEVATOR_FRONT_MERGE) {
|
||||
if (!bio_attempt_back_merge(q, rq, bio))
|
||||
return false;
|
||||
*merged_request = attempt_back_merge(q, rq);
|
||||
if (!*merged_request)
|
||||
elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
|
||||
return true;
|
||||
case ELEVATOR_FRONT_MERGE:
|
||||
if (!blk_mq_sched_allow_merge(q, rq, bio))
|
||||
return false;
|
||||
if (bio_attempt_front_merge(q, rq, bio)) {
|
||||
if (!attempt_front_merge(q, rq))
|
||||
elv_merged_request(q, rq, ret);
|
||||
return true;
|
||||
}
|
||||
if (!bio_attempt_front_merge(q, rq, bio))
|
||||
return false;
|
||||
*merged_request = attempt_front_merge(q, rq);
|
||||
if (!*merged_request)
|
||||
elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
|
||||
|
||||
|
@ -289,7 +302,8 @@ void blk_mq_sched_request_inserted(struct request *rq)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
|
||||
|
||||
bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
||||
static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq)
|
||||
{
|
||||
if (rq->tag == -1) {
|
||||
rq->rq_flags |= RQF_SORTED;
|
||||
|
@ -305,7 +319,6 @@ bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
|||
spin_unlock(&hctx->lock);
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_sched_bypass_insert);
|
||||
|
||||
static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
|
@ -347,7 +360,7 @@ static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx,
|
|||
blk_insert_flush(rq);
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
} else
|
||||
blk_mq_add_to_requeue_list(rq, true, true);
|
||||
blk_mq_add_to_requeue_list(rq, false, true);
|
||||
}
|
||||
|
||||
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
||||
|
@ -363,6 +376,9 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
|||
return;
|
||||
}
|
||||
|
||||
if (e && blk_mq_sched_bypass_insert(hctx, rq))
|
||||
goto run;
|
||||
|
||||
if (e && e->type->ops.mq.insert_requests) {
|
||||
LIST_HEAD(list);
|
||||
|
||||
|
@ -374,6 +390,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
|||
spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
run:
|
||||
if (run_queue)
|
||||
blk_mq_run_hw_queue(hctx, async);
|
||||
}
|
||||
|
@ -385,6 +402,23 @@ void blk_mq_sched_insert_requests(struct request_queue *q,
|
|||
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
|
||||
struct elevator_queue *e = hctx->queue->elevator;
|
||||
|
||||
if (e) {
|
||||
struct request *rq, *next;
|
||||
|
||||
/*
|
||||
* We bypass requests that already have a driver tag assigned,
|
||||
* which should only be flushes. Flushes are only ever inserted
|
||||
* as single requests, so we shouldn't ever hit the
|
||||
* WARN_ON_ONCE() below (but let's handle it just in case).
|
||||
*/
|
||||
list_for_each_entry_safe(rq, next, list, queuelist) {
|
||||
if (WARN_ON_ONCE(rq->tag != -1)) {
|
||||
list_del_init(&rq->queuelist);
|
||||
blk_mq_sched_bypass_insert(hctx, rq);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (e && e->type->ops.mq.insert_requests)
|
||||
e->type->ops.mq.insert_requests(hctx, list, false);
|
||||
else
|
||||
|
|
|
@ -15,8 +15,8 @@ struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bi
|
|||
void blk_mq_sched_put_request(struct request *rq);
|
||||
|
||||
void blk_mq_sched_request_inserted(struct request *rq);
|
||||
bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq);
|
||||
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio);
|
||||
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
|
||||
struct request **merged_request);
|
||||
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
|
||||
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
|
||||
void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
|
||||
|
@ -49,12 +49,13 @@ blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
|
||||
static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
|
||||
struct request *rq)
|
||||
struct request *rq,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e && e->type->ops.mq.get_rq_priv)
|
||||
return e->type->ops.mq.get_rq_priv(q, rq);
|
||||
return e->type->ops.mq.get_rq_priv(q, rq, bio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -254,7 +254,7 @@ static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
|
|||
kobject_put(&hctx->kobj);
|
||||
}
|
||||
|
||||
blk_mq_debugfs_unregister(q);
|
||||
blk_mq_debugfs_unregister_hctxs(q);
|
||||
|
||||
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
|
||||
kobject_del(&q->mq_kobj);
|
||||
|
|
|
@ -199,13 +199,7 @@ void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
|
|||
rq->special = NULL;
|
||||
/* tag was already set */
|
||||
rq->errors = 0;
|
||||
|
||||
rq->cmd = rq->__cmd;
|
||||
|
||||
rq->extra_len = 0;
|
||||
rq->sense_len = 0;
|
||||
rq->resid_len = 0;
|
||||
rq->sense = NULL;
|
||||
|
||||
INIT_LIST_HEAD(&rq->timeout_list);
|
||||
rq->timeout = 0;
|
||||
|
@ -487,10 +481,6 @@ void blk_mq_start_request(struct request *rq)
|
|||
|
||||
trace_block_rq_issue(q, rq);
|
||||
|
||||
rq->resid_len = blk_rq_bytes(rq);
|
||||
if (unlikely(blk_bidi_rq(rq)))
|
||||
rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
|
||||
|
||||
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
|
||||
blk_stat_set_issue_time(&rq->issue_stat);
|
||||
rq->rq_flags |= RQF_STATS;
|
||||
|
@ -773,7 +763,7 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
|
|||
int checked = 8;
|
||||
|
||||
list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
|
||||
int el_ret;
|
||||
bool merged = false;
|
||||
|
||||
if (!checked--)
|
||||
break;
|
||||
|
@ -781,26 +771,25 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
|
|||
if (!blk_rq_merge_ok(rq, bio))
|
||||
continue;
|
||||
|
||||
el_ret = blk_try_merge(rq, bio);
|
||||
if (el_ret == ELEVATOR_NO_MERGE)
|
||||
switch (blk_try_merge(rq, bio)) {
|
||||
case ELEVATOR_BACK_MERGE:
|
||||
if (blk_mq_sched_allow_merge(q, rq, bio))
|
||||
merged = bio_attempt_back_merge(q, rq, bio);
|
||||
break;
|
||||
case ELEVATOR_FRONT_MERGE:
|
||||
if (blk_mq_sched_allow_merge(q, rq, bio))
|
||||
merged = bio_attempt_front_merge(q, rq, bio);
|
||||
break;
|
||||
case ELEVATOR_DISCARD_MERGE:
|
||||
merged = bio_attempt_discard_merge(q, rq, bio);
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
|
||||
if (!blk_mq_sched_allow_merge(q, rq, bio))
|
||||
break;
|
||||
|
||||
if (el_ret == ELEVATOR_BACK_MERGE) {
|
||||
if (bio_attempt_back_merge(q, rq, bio)) {
|
||||
ctx->rq_merged++;
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
|
||||
if (bio_attempt_front_merge(q, rq, bio)) {
|
||||
ctx->rq_merged++;
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (merged)
|
||||
ctx->rq_merged++;
|
||||
return merged;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -1013,7 +1002,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
|
|||
blk_mq_run_hw_queue(hctx, true);
|
||||
}
|
||||
|
||||
return ret != BLK_MQ_RQ_QUEUE_BUSY;
|
||||
return queued != 0;
|
||||
}
|
||||
|
||||
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
||||
|
@ -1442,12 +1431,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
cookie = request_to_qc_t(data.hctx, rq);
|
||||
|
||||
if (unlikely(is_flush_fua)) {
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
if (q->elevator)
|
||||
goto elv_insert;
|
||||
blk_mq_bio_to_request(rq, bio);
|
||||
blk_mq_get_driver_tag(rq, NULL, true);
|
||||
blk_insert_flush(rq);
|
||||
blk_mq_run_hw_queue(data.hctx, true);
|
||||
goto done;
|
||||
goto run_queue;
|
||||
}
|
||||
|
||||
plug = current->plug;
|
||||
|
@ -1497,6 +1485,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
|
||||
if (q->elevator) {
|
||||
elv_insert:
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
blk_mq_bio_to_request(rq, bio);
|
||||
blk_mq_sched_insert_request(rq, false, true,
|
||||
|
@ -1510,6 +1499,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
* latter allows for merging opportunities and more efficient
|
||||
* dispatching.
|
||||
*/
|
||||
run_queue:
|
||||
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
|
||||
}
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
|
@ -1565,12 +1555,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|||
cookie = request_to_qc_t(data.hctx, rq);
|
||||
|
||||
if (unlikely(is_flush_fua)) {
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
if (q->elevator)
|
||||
goto elv_insert;
|
||||
blk_mq_bio_to_request(rq, bio);
|
||||
blk_mq_get_driver_tag(rq, NULL, true);
|
||||
blk_insert_flush(rq);
|
||||
blk_mq_run_hw_queue(data.hctx, true);
|
||||
goto done;
|
||||
goto run_queue;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1608,6 +1597,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|||
}
|
||||
|
||||
if (q->elevator) {
|
||||
elv_insert:
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
blk_mq_bio_to_request(rq, bio);
|
||||
blk_mq_sched_insert_request(rq, false, true,
|
||||
|
@ -1621,6 +1611,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|||
* latter allows for merging opportunities and more efficient
|
||||
* dispatching.
|
||||
*/
|
||||
run_queue:
|
||||
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
|
||||
}
|
||||
|
||||
|
@ -2637,10 +2628,14 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
|
|||
list_for_each_entry(q, &set->tag_list, tag_set_list) {
|
||||
blk_mq_realloc_hw_ctxs(set, q);
|
||||
|
||||
/*
|
||||
* Manually set the make_request_fn as blk_queue_make_request
|
||||
* resets a lot of the queue settings.
|
||||
*/
|
||||
if (q->nr_hw_queues > 1)
|
||||
blk_queue_make_request(q, blk_mq_make_request);
|
||||
q->make_request_fn = blk_mq_make_request;
|
||||
else
|
||||
blk_queue_make_request(q, blk_sq_make_request);
|
||||
q->make_request_fn = blk_sq_make_request;
|
||||
|
||||
blk_mq_queue_reinit(q, cpu_online_mask);
|
||||
}
|
||||
|
@ -2824,8 +2819,6 @@ void blk_mq_enable_hotplug(void)
|
|||
|
||||
static int __init blk_mq_init(void)
|
||||
{
|
||||
blk_mq_debugfs_init();
|
||||
|
||||
cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
|
||||
blk_mq_hctx_notify_dead);
|
||||
|
||||
|
|
|
@ -85,16 +85,11 @@ extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
|
|||
* debugfs helpers
|
||||
*/
|
||||
#ifdef CONFIG_BLK_DEBUG_FS
|
||||
void blk_mq_debugfs_init(void);
|
||||
int blk_mq_debugfs_register(struct request_queue *q, const char *name);
|
||||
void blk_mq_debugfs_unregister(struct request_queue *q);
|
||||
int blk_mq_debugfs_register_hctxs(struct request_queue *q);
|
||||
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
|
||||
#else
|
||||
static inline void blk_mq_debugfs_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int blk_mq_debugfs_register(struct request_queue *q,
|
||||
const char *name)
|
||||
{
|
||||
|
|
|
@ -88,6 +88,7 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
|
|||
void blk_set_default_limits(struct queue_limits *lim)
|
||||
{
|
||||
lim->max_segments = BLK_MAX_SEGMENTS;
|
||||
lim->max_discard_segments = 1;
|
||||
lim->max_integrity_segments = 0;
|
||||
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
|
||||
lim->virt_boundary_mask = 0;
|
||||
|
@ -128,6 +129,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
|
|||
/* Inherit limits from component devices */
|
||||
lim->discard_zeroes_data = 1;
|
||||
lim->max_segments = USHRT_MAX;
|
||||
lim->max_discard_segments = 1;
|
||||
lim->max_hw_sectors = UINT_MAX;
|
||||
lim->max_segment_size = UINT_MAX;
|
||||
lim->max_sectors = UINT_MAX;
|
||||
|
@ -253,7 +255,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
|
|||
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
|
||||
max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
|
||||
limits->max_sectors = max_sectors;
|
||||
q->backing_dev_info.io_pages = max_sectors >> (PAGE_SHIFT - 9);
|
||||
q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
||||
|
||||
|
@ -336,6 +338,22 @@ void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments
|
|||
}
|
||||
EXPORT_SYMBOL(blk_queue_max_segments);
|
||||
|
||||
/**
|
||||
* blk_queue_max_discard_segments - set max segments for discard requests
|
||||
* @q: the request queue for the device
|
||||
* @max_segments: max number of segments
|
||||
*
|
||||
* Description:
|
||||
* Enables a low level driver to set an upper limit on the number of
|
||||
* segments in a discard request.
|
||||
**/
|
||||
void blk_queue_max_discard_segments(struct request_queue *q,
|
||||
unsigned short max_segments)
|
||||
{
|
||||
q->limits.max_discard_segments = max_segments;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
|
||||
|
||||
/**
|
||||
* blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
|
||||
* @q: the request queue for the device
|
||||
|
@ -553,6 +571,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
|||
b->virt_boundary_mask);
|
||||
|
||||
t->max_segments = min_not_zero(t->max_segments, b->max_segments);
|
||||
t->max_discard_segments = min_not_zero(t->max_discard_segments,
|
||||
b->max_discard_segments);
|
||||
t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
|
||||
b->max_integrity_segments);
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
|
|||
|
||||
static ssize_t queue_ra_show(struct request_queue *q, char *page)
|
||||
{
|
||||
unsigned long ra_kb = q->backing_dev_info.ra_pages <<
|
||||
unsigned long ra_kb = q->backing_dev_info->ra_pages <<
|
||||
(PAGE_SHIFT - 10);
|
||||
|
||||
return queue_var_show(ra_kb, (page));
|
||||
|
@ -104,7 +104,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
|
||||
q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -121,6 +121,12 @@ static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
|
|||
return queue_var_show(queue_max_segments(q), (page));
|
||||
}
|
||||
|
||||
static ssize_t queue_max_discard_segments_show(struct request_queue *q,
|
||||
char *page)
|
||||
{
|
||||
return queue_var_show(queue_max_discard_segments(q), (page));
|
||||
}
|
||||
|
||||
static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
|
||||
{
|
||||
return queue_var_show(q->limits.max_integrity_segments, (page));
|
||||
|
@ -236,7 +242,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
|
|||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
q->limits.max_sectors = max_sectors_kb << 1;
|
||||
q->backing_dev_info.io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
|
||||
q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
return ret;
|
||||
|
@ -545,6 +551,11 @@ static struct queue_sysfs_entry queue_max_segments_entry = {
|
|||
.show = queue_max_segments_show,
|
||||
};
|
||||
|
||||
static struct queue_sysfs_entry queue_max_discard_segments_entry = {
|
||||
.attr = {.name = "max_discard_segments", .mode = S_IRUGO },
|
||||
.show = queue_max_discard_segments_show,
|
||||
};
|
||||
|
||||
static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
|
||||
.attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
|
||||
.show = queue_max_integrity_segments_show,
|
||||
|
@ -697,6 +708,7 @@ static struct attribute *default_attrs[] = {
|
|||
&queue_max_hw_sectors_entry.attr,
|
||||
&queue_max_sectors_entry.attr,
|
||||
&queue_max_segments_entry.attr,
|
||||
&queue_max_discard_segments_entry.attr,
|
||||
&queue_max_integrity_segments_entry.attr,
|
||||
&queue_max_segment_size_entry.attr,
|
||||
&queue_iosched_entry.attr,
|
||||
|
@ -799,7 +811,7 @@ static void blk_release_queue(struct kobject *kobj)
|
|||
container_of(kobj, struct request_queue, kobj);
|
||||
|
||||
wbt_exit(q);
|
||||
bdi_exit(&q->backing_dev_info);
|
||||
bdi_put(q->backing_dev_info);
|
||||
blkcg_exit_queue(q);
|
||||
|
||||
if (q->elevator) {
|
||||
|
@ -814,13 +826,19 @@ static void blk_release_queue(struct kobject *kobj)
|
|||
if (q->queue_tags)
|
||||
__blk_queue_free_tags(q);
|
||||
|
||||
if (!q->mq_ops)
|
||||
if (!q->mq_ops) {
|
||||
if (q->exit_rq_fn)
|
||||
q->exit_rq_fn(q, q->fq->flush_rq);
|
||||
blk_free_flush_queue(q->fq);
|
||||
else
|
||||
} else {
|
||||
blk_mq_release(q);
|
||||
}
|
||||
|
||||
blk_trace_shutdown(q);
|
||||
|
||||
if (q->mq_ops)
|
||||
blk_mq_debugfs_unregister(q);
|
||||
|
||||
if (q->bio_split)
|
||||
bioset_free(q->bio_split);
|
||||
|
||||
|
@ -884,32 +902,36 @@ int blk_register_queue(struct gendisk *disk)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (q->mq_ops)
|
||||
blk_mq_register_dev(dev, q);
|
||||
|
||||
/* Prevent changes through sysfs until registration is completed. */
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
|
||||
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
|
||||
if (ret < 0) {
|
||||
blk_trace_remove_sysfs(dev);
|
||||
return ret;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
kobject_uevent(&q->kobj, KOBJ_ADD);
|
||||
|
||||
if (q->mq_ops)
|
||||
blk_mq_register_dev(dev, q);
|
||||
|
||||
blk_wb_init(q);
|
||||
|
||||
if (!q->request_fn)
|
||||
return 0;
|
||||
|
||||
ret = elv_register_queue(q);
|
||||
if (ret) {
|
||||
kobject_uevent(&q->kobj, KOBJ_REMOVE);
|
||||
kobject_del(&q->kobj);
|
||||
blk_trace_remove_sysfs(dev);
|
||||
kobject_put(&dev->kobj);
|
||||
return ret;
|
||||
if (q->request_fn || (q->mq_ops && q->elevator)) {
|
||||
ret = elv_register_queue(q);
|
||||
if (ret) {
|
||||
kobject_uevent(&q->kobj, KOBJ_REMOVE);
|
||||
kobject_del(&q->kobj);
|
||||
blk_trace_remove_sysfs(dev);
|
||||
kobject_put(&dev->kobj);
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
ret = 0;
|
||||
unlock:
|
||||
mutex_unlock(&q->sysfs_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void blk_unregister_queue(struct gendisk *disk)
|
||||
|
@ -922,7 +944,7 @@ void blk_unregister_queue(struct gendisk *disk)
|
|||
if (q->mq_ops)
|
||||
blk_mq_unregister_dev(disk_to_dev(disk), q);
|
||||
|
||||
if (q->request_fn)
|
||||
if (q->request_fn || (q->mq_ops && q->elevator))
|
||||
elv_unregister_queue(q);
|
||||
|
||||
kobject_uevent(&q->kobj, KOBJ_REMOVE);
|
||||
|
|
|
@ -96,7 +96,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
|
|||
*/
|
||||
static bool wb_recent_wait(struct rq_wb *rwb)
|
||||
{
|
||||
struct bdi_writeback *wb = &rwb->queue->backing_dev_info.wb;
|
||||
struct bdi_writeback *wb = &rwb->queue->backing_dev_info->wb;
|
||||
|
||||
return time_before(jiffies, wb->dirty_sleep + HZ);
|
||||
}
|
||||
|
@ -279,7 +279,7 @@ enum {
|
|||
|
||||
static int __latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
|
||||
{
|
||||
struct backing_dev_info *bdi = &rwb->queue->backing_dev_info;
|
||||
struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
|
||||
u64 thislat;
|
||||
|
||||
/*
|
||||
|
@ -339,7 +339,7 @@ static int latency_exceeded(struct rq_wb *rwb)
|
|||
|
||||
static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
|
||||
{
|
||||
struct backing_dev_info *bdi = &rwb->queue->backing_dev_info;
|
||||
struct backing_dev_info *bdi = rwb->queue->backing_dev_info;
|
||||
|
||||
trace_wbt_step(bdi, msg, rwb->scale_step, rwb->cur_win_nsec,
|
||||
rwb->wb_background, rwb->wb_normal, rwb->wb_max);
|
||||
|
@ -423,7 +423,7 @@ static void wb_timer_fn(unsigned long data)
|
|||
|
||||
status = latency_exceeded(rwb);
|
||||
|
||||
trace_wbt_timer(&rwb->queue->backing_dev_info, status, rwb->scale_step,
|
||||
trace_wbt_timer(rwb->queue->backing_dev_info, status, rwb->scale_step,
|
||||
inflight);
|
||||
|
||||
/*
|
||||
|
|
21
block/blk.h
21
block/blk.h
|
@ -14,6 +14,10 @@
|
|||
/* Max future timer expiry for timeouts */
|
||||
#define BLK_MAX_TIMEOUT (5 * HZ)
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
extern struct dentry *blk_debugfs_root;
|
||||
#endif
|
||||
|
||||
struct blk_flush_queue {
|
||||
unsigned int flush_queue_delayed:1;
|
||||
unsigned int flush_pending_idx:1;
|
||||
|
@ -96,6 +100,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
|
|||
struct bio *bio);
|
||||
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
|
||||
struct bio *bio);
|
||||
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
|
||||
struct bio *bio);
|
||||
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||
unsigned int *request_count,
|
||||
struct request **same_queue_rq);
|
||||
|
@ -204,14 +210,14 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
|
|||
struct bio *bio);
|
||||
int ll_front_merge_fn(struct request_queue *q, struct request *req,
|
||||
struct bio *bio);
|
||||
int attempt_back_merge(struct request_queue *q, struct request *rq);
|
||||
int attempt_front_merge(struct request_queue *q, struct request *rq);
|
||||
struct request *attempt_back_merge(struct request_queue *q, struct request *rq);
|
||||
struct request *attempt_front_merge(struct request_queue *q, struct request *rq);
|
||||
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
|
||||
struct request *next);
|
||||
void blk_recalc_rq_segments(struct request *rq);
|
||||
void blk_rq_set_mixed_merge(struct request *rq);
|
||||
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
|
||||
int blk_try_merge(struct request *rq, struct bio *bio);
|
||||
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
|
||||
|
||||
void blk_queue_congestion_threshold(struct request_queue *q);
|
||||
|
||||
|
@ -249,7 +255,14 @@ static inline int blk_do_io_stat(struct request *rq)
|
|||
{
|
||||
return rq->rq_disk &&
|
||||
(rq->rq_flags & RQF_IO_STAT) &&
|
||||
(rq->cmd_type == REQ_TYPE_FS);
|
||||
!blk_rq_is_passthrough(rq);
|
||||
}
|
||||
|
||||
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
|
||||
{
|
||||
req->cmd_flags |= REQ_NOMERGE;
|
||||
if (req == q->last_merge)
|
||||
q->last_merge = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -71,22 +71,24 @@ void bsg_job_done(struct bsg_job *job, int result,
|
|||
{
|
||||
struct request *req = job->req;
|
||||
struct request *rsp = req->next_rq;
|
||||
struct scsi_request *rq = scsi_req(req);
|
||||
int err;
|
||||
|
||||
err = job->req->errors = result;
|
||||
if (err < 0)
|
||||
/* we're only returning the result field in the reply */
|
||||
job->req->sense_len = sizeof(u32);
|
||||
rq->sense_len = sizeof(u32);
|
||||
else
|
||||
job->req->sense_len = job->reply_len;
|
||||
rq->sense_len = job->reply_len;
|
||||
/* we assume all request payload was transferred, residual == 0 */
|
||||
req->resid_len = 0;
|
||||
rq->resid_len = 0;
|
||||
|
||||
if (rsp) {
|
||||
WARN_ON(reply_payload_rcv_len > rsp->resid_len);
|
||||
WARN_ON(reply_payload_rcv_len > scsi_req(rsp)->resid_len);
|
||||
|
||||
/* set reply (bidi) residual */
|
||||
rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len);
|
||||
scsi_req(rsp)->resid_len -=
|
||||
min(reply_payload_rcv_len, scsi_req(rsp)->resid_len);
|
||||
}
|
||||
blk_complete_request(req);
|
||||
}
|
||||
|
@ -113,6 +115,7 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
|
|||
if (!buf->sg_list)
|
||||
return -ENOMEM;
|
||||
sg_init_table(buf->sg_list, req->nr_phys_segments);
|
||||
scsi_req(req)->resid_len = blk_rq_bytes(req);
|
||||
buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
|
||||
buf->payload_len = blk_rq_bytes(req);
|
||||
return 0;
|
||||
|
@ -127,6 +130,7 @@ static int bsg_create_job(struct device *dev, struct request *req)
|
|||
{
|
||||
struct request *rsp = req->next_rq;
|
||||
struct request_queue *q = req->q;
|
||||
struct scsi_request *rq = scsi_req(req);
|
||||
struct bsg_job *job;
|
||||
int ret;
|
||||
|
||||
|
@ -140,9 +144,9 @@ static int bsg_create_job(struct device *dev, struct request *req)
|
|||
job->req = req;
|
||||
if (q->bsg_job_size)
|
||||
job->dd_data = (void *)&job[1];
|
||||
job->request = req->cmd;
|
||||
job->request_len = req->cmd_len;
|
||||
job->reply = req->sense;
|
||||
job->request = rq->cmd;
|
||||
job->request_len = rq->cmd_len;
|
||||
job->reply = rq->sense;
|
||||
job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
|
||||
* allocated */
|
||||
if (req->bio) {
|
||||
|
@ -177,7 +181,7 @@ static int bsg_create_job(struct device *dev, struct request *req)
|
|||
*
|
||||
* Drivers/subsys should pass this to the queue init function.
|
||||
*/
|
||||
void bsg_request_fn(struct request_queue *q)
|
||||
static void bsg_request_fn(struct request_queue *q)
|
||||
__releases(q->queue_lock)
|
||||
__acquires(q->queue_lock)
|
||||
{
|
||||
|
@ -214,24 +218,30 @@ void bsg_request_fn(struct request_queue *q)
|
|||
put_device(dev);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bsg_request_fn);
|
||||
|
||||
/**
|
||||
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
|
||||
* @dev: device to attach bsg device to
|
||||
* @q: request queue setup by caller
|
||||
* @name: device to give bsg device
|
||||
* @job_fn: bsg job handler
|
||||
* @dd_job_size: size of LLD data needed for each job
|
||||
*
|
||||
* The caller should have setup the reuqest queue with bsg_request_fn
|
||||
* as the request_fn.
|
||||
*/
|
||||
int bsg_setup_queue(struct device *dev, struct request_queue *q,
|
||||
char *name, bsg_job_fn *job_fn, int dd_job_size)
|
||||
struct request_queue *bsg_setup_queue(struct device *dev, char *name,
|
||||
bsg_job_fn *job_fn, int dd_job_size)
|
||||
{
|
||||
struct request_queue *q;
|
||||
int ret;
|
||||
|
||||
q = blk_alloc_queue(GFP_KERNEL);
|
||||
if (!q)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
q->cmd_size = sizeof(struct scsi_request);
|
||||
q->request_fn = bsg_request_fn;
|
||||
|
||||
ret = blk_init_allocated_queue(q);
|
||||
if (ret)
|
||||
goto out_cleanup_queue;
|
||||
|
||||
q->queuedata = dev;
|
||||
q->bsg_job_size = dd_job_size;
|
||||
q->bsg_job_fn = job_fn;
|
||||
|
@ -243,9 +253,12 @@ int bsg_setup_queue(struct device *dev, struct request_queue *q,
|
|||
if (ret) {
|
||||
printk(KERN_ERR "%s: bsg interface failed to "
|
||||
"initialize - register queue\n", dev->kobj.name);
|
||||
return ret;
|
||||
goto out_cleanup_queue;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return q;
|
||||
out_cleanup_queue:
|
||||
blk_cleanup_queue(q);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bsg_setup_queue);
|
||||
|
|
64
block/bsg.c
64
block/bsg.c
|
@ -85,7 +85,6 @@ struct bsg_command {
|
|||
struct bio *bidi_bio;
|
||||
int err;
|
||||
struct sg_io_v4 hdr;
|
||||
char sense[SCSI_SENSE_BUFFERSIZE];
|
||||
};
|
||||
|
||||
static void bsg_free_command(struct bsg_command *bc)
|
||||
|
@ -140,18 +139,20 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
|
|||
struct sg_io_v4 *hdr, struct bsg_device *bd,
|
||||
fmode_t has_write_perm)
|
||||
{
|
||||
struct scsi_request *req = scsi_req(rq);
|
||||
|
||||
if (hdr->request_len > BLK_MAX_CDB) {
|
||||
rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
|
||||
if (!rq->cmd)
|
||||
req->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
|
||||
if (!req->cmd)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
|
||||
if (copy_from_user(req->cmd, (void __user *)(unsigned long)hdr->request,
|
||||
hdr->request_len))
|
||||
return -EFAULT;
|
||||
|
||||
if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
|
||||
if (blk_verify_command(rq->cmd, has_write_perm))
|
||||
if (blk_verify_command(req->cmd, has_write_perm))
|
||||
return -EPERM;
|
||||
} else if (!capable(CAP_SYS_RAWIO))
|
||||
return -EPERM;
|
||||
|
@ -159,7 +160,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
|
|||
/*
|
||||
* fill in request structure
|
||||
*/
|
||||
rq->cmd_len = hdr->request_len;
|
||||
req->cmd_len = hdr->request_len;
|
||||
|
||||
rq->timeout = msecs_to_jiffies(hdr->timeout);
|
||||
if (!rq->timeout)
|
||||
|
@ -176,7 +177,7 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
|
|||
* Check if sg_io_v4 from user is allowed and valid
|
||||
*/
|
||||
static int
|
||||
bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw)
|
||||
bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *op)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -197,7 +198,7 @@ bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw)
|
|||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
*rw = hdr->dout_xfer_len ? WRITE : READ;
|
||||
*op = hdr->dout_xfer_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -205,13 +206,12 @@ bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *rw)
|
|||
* map sg_io_v4 to a request.
|
||||
*/
|
||||
static struct request *
|
||||
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
|
||||
u8 *sense)
|
||||
bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm)
|
||||
{
|
||||
struct request_queue *q = bd->queue;
|
||||
struct request *rq, *next_rq = NULL;
|
||||
int ret, rw;
|
||||
unsigned int dxfer_len;
|
||||
int ret;
|
||||
unsigned int op, dxfer_len;
|
||||
void __user *dxferp = NULL;
|
||||
struct bsg_class_device *bcd = &q->bsg_dev;
|
||||
|
||||
|
@ -226,36 +226,35 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
|
|||
hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
|
||||
hdr->din_xfer_len);
|
||||
|
||||
ret = bsg_validate_sgv4_hdr(hdr, &rw);
|
||||
ret = bsg_validate_sgv4_hdr(hdr, &op);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/*
|
||||
* map scatter-gather elements separately and string them to request
|
||||
*/
|
||||
rq = blk_get_request(q, rw, GFP_KERNEL);
|
||||
rq = blk_get_request(q, op, GFP_KERNEL);
|
||||
if (IS_ERR(rq))
|
||||
return rq;
|
||||
blk_rq_set_block_pc(rq);
|
||||
scsi_req_init(rq);
|
||||
|
||||
ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (rw == WRITE && hdr->din_xfer_len) {
|
||||
if (op == REQ_OP_SCSI_OUT && hdr->din_xfer_len) {
|
||||
if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
next_rq = blk_get_request(q, READ, GFP_KERNEL);
|
||||
next_rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
|
||||
if (IS_ERR(next_rq)) {
|
||||
ret = PTR_ERR(next_rq);
|
||||
next_rq = NULL;
|
||||
goto out;
|
||||
}
|
||||
rq->next_rq = next_rq;
|
||||
next_rq->cmd_type = rq->cmd_type;
|
||||
|
||||
dxferp = (void __user *)(unsigned long)hdr->din_xferp;
|
||||
ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
|
||||
|
@ -280,13 +279,9 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
|
|||
goto out;
|
||||
}
|
||||
|
||||
rq->sense = sense;
|
||||
rq->sense_len = 0;
|
||||
|
||||
return rq;
|
||||
out:
|
||||
if (rq->cmd != rq->__cmd)
|
||||
kfree(rq->cmd);
|
||||
scsi_req_free_cmd(scsi_req(rq));
|
||||
blk_put_request(rq);
|
||||
if (next_rq) {
|
||||
blk_rq_unmap_user(next_rq->bio);
|
||||
|
@ -393,6 +388,7 @@ static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
|
|||
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
|
||||
struct bio *bio, struct bio *bidi_bio)
|
||||
{
|
||||
struct scsi_request *req = scsi_req(rq);
|
||||
int ret = 0;
|
||||
|
||||
dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
|
||||
|
@ -407,12 +403,12 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
|
|||
hdr->info |= SG_INFO_CHECK;
|
||||
hdr->response_len = 0;
|
||||
|
||||
if (rq->sense_len && hdr->response) {
|
||||
if (req->sense_len && hdr->response) {
|
||||
int len = min_t(unsigned int, hdr->max_response_len,
|
||||
rq->sense_len);
|
||||
req->sense_len);
|
||||
|
||||
ret = copy_to_user((void __user *)(unsigned long)hdr->response,
|
||||
rq->sense, len);
|
||||
req->sense, len);
|
||||
if (!ret)
|
||||
hdr->response_len = len;
|
||||
else
|
||||
|
@ -420,14 +416,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
|
|||
}
|
||||
|
||||
if (rq->next_rq) {
|
||||
hdr->dout_resid = rq->resid_len;
|
||||
hdr->din_resid = rq->next_rq->resid_len;
|
||||
hdr->dout_resid = req->resid_len;
|
||||
hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
|
||||
blk_rq_unmap_user(bidi_bio);
|
||||
blk_put_request(rq->next_rq);
|
||||
} else if (rq_data_dir(rq) == READ)
|
||||
hdr->din_resid = rq->resid_len;
|
||||
hdr->din_resid = req->resid_len;
|
||||
else
|
||||
hdr->dout_resid = rq->resid_len;
|
||||
hdr->dout_resid = req->resid_len;
|
||||
|
||||
/*
|
||||
* If the request generated a negative error number, return it
|
||||
|
@ -439,8 +435,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
|
|||
ret = rq->errors;
|
||||
|
||||
blk_rq_unmap_user(bio);
|
||||
if (rq->cmd != rq->__cmd)
|
||||
kfree(rq->cmd);
|
||||
scsi_req_free_cmd(req);
|
||||
blk_put_request(rq);
|
||||
|
||||
return ret;
|
||||
|
@ -625,7 +620,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf,
|
|||
/*
|
||||
* get a request, fill in the blanks, and add to request queue
|
||||
*/
|
||||
rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense);
|
||||
rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm);
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
rq = NULL;
|
||||
|
@ -911,12 +906,11 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
struct bio *bio, *bidi_bio = NULL;
|
||||
struct sg_io_v4 hdr;
|
||||
int at_head;
|
||||
u8 sense[SCSI_SENSE_BUFFERSIZE];
|
||||
|
||||
if (copy_from_user(&hdr, uarg, sizeof(hdr)))
|
||||
return -EFAULT;
|
||||
|
||||
rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense);
|
||||
rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
|
|
|
@ -2528,7 +2528,7 @@ static void cfq_remove_request(struct request *rq)
|
|||
}
|
||||
}
|
||||
|
||||
static int cfq_merge(struct request_queue *q, struct request **req,
|
||||
static enum elv_merge cfq_merge(struct request_queue *q, struct request **req,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct cfq_data *cfqd = q->elevator->elevator_data;
|
||||
|
@ -2544,7 +2544,7 @@ static int cfq_merge(struct request_queue *q, struct request **req,
|
|||
}
|
||||
|
||||
static void cfq_merged_request(struct request_queue *q, struct request *req,
|
||||
int type)
|
||||
enum elv_merge type)
|
||||
{
|
||||
if (type == ELEVATOR_FRONT_MERGE) {
|
||||
struct cfq_queue *cfqq = RQ_CFQQ(req);
|
||||
|
|
|
@ -661,7 +661,6 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|||
struct block_device *bdev = inode->i_bdev;
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
fmode_t mode = file->f_mode;
|
||||
struct backing_dev_info *bdi;
|
||||
loff_t size;
|
||||
unsigned int max_sectors;
|
||||
|
||||
|
@ -708,9 +707,8 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|||
case BLKFRAGET:
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
bdi = blk_get_backing_dev_info(bdev);
|
||||
return compat_put_long(arg,
|
||||
(bdi->ra_pages * PAGE_SIZE) / 512);
|
||||
(bdev->bd_bdi->ra_pages * PAGE_SIZE) / 512);
|
||||
case BLKROGET: /* compatible */
|
||||
return compat_put_int(arg, bdev_read_only(bdev) != 0);
|
||||
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
|
||||
|
@ -728,8 +726,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|||
case BLKFRASET:
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
bdi = blk_get_backing_dev_info(bdev);
|
||||
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
|
||||
bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
|
||||
return 0;
|
||||
case BLKGETSIZE:
|
||||
size = i_size_read(bdev->bd_inode);
|
||||
|
|
|
@ -120,12 +120,11 @@ static void deadline_remove_request(struct request_queue *q, struct request *rq)
|
|||
deadline_del_rq_rb(dd, rq);
|
||||
}
|
||||
|
||||
static int
|
||||
static enum elv_merge
|
||||
deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
||||
{
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
struct request *__rq;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* check for front merge
|
||||
|
@ -138,20 +137,17 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
|||
BUG_ON(sector != blk_rq_pos(__rq));
|
||||
|
||||
if (elv_bio_merge_ok(__rq, bio)) {
|
||||
ret = ELEVATOR_FRONT_MERGE;
|
||||
goto out;
|
||||
*req = __rq;
|
||||
return ELEVATOR_FRONT_MERGE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ELEVATOR_NO_MERGE;
|
||||
out:
|
||||
*req = __rq;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void deadline_merged_request(struct request_queue *q,
|
||||
struct request *req, int type)
|
||||
struct request *req, enum elv_merge type)
|
||||
{
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
|
||||
|
|
|
@ -428,11 +428,11 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
|
|||
}
|
||||
EXPORT_SYMBOL(elv_dispatch_add_tail);
|
||||
|
||||
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
||||
enum elv_merge elv_merge(struct request_queue *q, struct request **req,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
struct request *__rq;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Levels of merges:
|
||||
|
@ -447,7 +447,8 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
|||
* First try one-hit cache.
|
||||
*/
|
||||
if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
|
||||
ret = blk_try_merge(q->last_merge, bio);
|
||||
enum elv_merge ret = blk_try_merge(q->last_merge, bio);
|
||||
|
||||
if (ret != ELEVATOR_NO_MERGE) {
|
||||
*req = q->last_merge;
|
||||
return ret;
|
||||
|
@ -515,7 +516,8 @@ bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq)
|
|||
return ret;
|
||||
}
|
||||
|
||||
void elv_merged_request(struct request_queue *q, struct request *rq, int type)
|
||||
void elv_merged_request(struct request_queue *q, struct request *rq,
|
||||
enum elv_merge type)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
|
@ -539,7 +541,7 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
|
|||
if (e->uses_mq && e->type->ops.mq.requests_merged)
|
||||
e->type->ops.mq.requests_merged(q, rq, next);
|
||||
else if (e->type->ops.sq.elevator_merge_req_fn) {
|
||||
next_sorted = next->rq_flags & RQF_SORTED;
|
||||
next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
|
||||
if (next_sorted)
|
||||
e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
|
||||
}
|
||||
|
@ -635,7 +637,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
|
|||
|
||||
if (rq->rq_flags & RQF_SOFTBARRIER) {
|
||||
/* barriers are scheduling boundary, update end_sector */
|
||||
if (rq->cmd_type == REQ_TYPE_FS) {
|
||||
if (!blk_rq_is_passthrough(rq)) {
|
||||
q->end_sector = rq_end_sector(rq);
|
||||
q->boundary_rq = rq;
|
||||
}
|
||||
|
@ -677,7 +679,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
|
|||
if (elv_attempt_insert_merge(q, rq))
|
||||
break;
|
||||
case ELEVATOR_INSERT_SORT:
|
||||
BUG_ON(rq->cmd_type != REQ_TYPE_FS);
|
||||
BUG_ON(blk_rq_is_passthrough(rq));
|
||||
rq->rq_flags |= RQF_SORTED;
|
||||
q->nr_sorted++;
|
||||
if (rq_mergeable(rq)) {
|
||||
|
|
|
@ -572,6 +572,20 @@ static void register_disk(struct device *parent, struct gendisk *disk)
|
|||
disk_part_iter_exit(&piter);
|
||||
}
|
||||
|
||||
void put_disk_devt(struct disk_devt *disk_devt)
|
||||
{
|
||||
if (disk_devt && atomic_dec_and_test(&disk_devt->count))
|
||||
disk_devt->release(disk_devt);
|
||||
}
|
||||
EXPORT_SYMBOL(put_disk_devt);
|
||||
|
||||
void get_disk_devt(struct disk_devt *disk_devt)
|
||||
{
|
||||
if (disk_devt)
|
||||
atomic_inc(&disk_devt->count);
|
||||
}
|
||||
EXPORT_SYMBOL(get_disk_devt);
|
||||
|
||||
/**
|
||||
* device_add_disk - add partitioning information to kernel list
|
||||
* @parent: parent device for the disk
|
||||
|
@ -612,8 +626,15 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
|
|||
|
||||
disk_alloc_events(disk);
|
||||
|
||||
/*
|
||||
* Take a reference on the devt and assign it to queue since it
|
||||
* must not be reallocated while the bdi is registered
|
||||
*/
|
||||
disk->queue->disk_devt = disk->disk_devt;
|
||||
get_disk_devt(disk->disk_devt);
|
||||
|
||||
/* Register BDI before referencing it from bdev */
|
||||
bdi = &disk->queue->backing_dev_info;
|
||||
bdi = disk->queue->backing_dev_info;
|
||||
bdi_register_owner(bdi, disk_to_dev(disk));
|
||||
|
||||
blk_register_region(disk_devt(disk), disk->minors, NULL,
|
||||
|
@ -648,6 +669,8 @@ void del_gendisk(struct gendisk *disk)
|
|||
disk_part_iter_init(&piter, disk,
|
||||
DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
|
||||
while ((part = disk_part_iter_next(&piter))) {
|
||||
bdev_unhash_inode(MKDEV(disk->major,
|
||||
disk->first_minor + part->partno));
|
||||
invalidate_partition(disk, part->partno);
|
||||
delete_partition(disk, part->partno);
|
||||
}
|
||||
|
|
|
@ -505,7 +505,6 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
|
|||
int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct backing_dev_info *bdi;
|
||||
void __user *argp = (void __user *)arg;
|
||||
loff_t size;
|
||||
unsigned int max_sectors;
|
||||
|
@ -532,8 +531,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
|||
case BLKFRAGET:
|
||||
if (!arg)
|
||||
return -EINVAL;
|
||||
bdi = blk_get_backing_dev_info(bdev);
|
||||
return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
|
||||
return put_long(arg, (bdev->bd_bdi->ra_pages*PAGE_SIZE) / 512);
|
||||
case BLKROGET:
|
||||
return put_int(arg, bdev_read_only(bdev) != 0);
|
||||
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
|
||||
|
@ -560,8 +558,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
|||
case BLKFRASET:
|
||||
if(!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
bdi = blk_get_backing_dev_info(bdev);
|
||||
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
|
||||
bdev->bd_bdi->ra_pages = (arg * 512) / PAGE_SIZE;
|
||||
return 0;
|
||||
case BLKBSZSET:
|
||||
return blkdev_bszset(bdev, mode, argp);
|
||||
|
|
|
@ -121,7 +121,7 @@ static void deadline_remove_request(struct request_queue *q, struct request *rq)
|
|||
}
|
||||
|
||||
static void dd_request_merged(struct request_queue *q, struct request *req,
|
||||
int type)
|
||||
enum elv_merge type)
|
||||
{
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
|
||||
|
@ -371,12 +371,16 @@ static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
|
|||
{
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
int ret;
|
||||
struct request *free = NULL;
|
||||
bool ret;
|
||||
|
||||
spin_lock(&dd->lock);
|
||||
ret = blk_mq_sched_try_merge(q, bio);
|
||||
ret = blk_mq_sched_try_merge(q, bio, &free);
|
||||
spin_unlock(&dd->lock);
|
||||
|
||||
if (free)
|
||||
blk_mq_free_request(free);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -395,10 +399,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
|||
|
||||
blk_mq_sched_request_inserted(rq);
|
||||
|
||||
if (blk_mq_sched_bypass_insert(hctx, rq))
|
||||
return;
|
||||
|
||||
if (at_head || rq->cmd_type != REQ_TYPE_FS) {
|
||||
if (at_head || blk_rq_is_passthrough(rq)) {
|
||||
if (at_head)
|
||||
list_add(&rq->queuelist, &dd->dispatch);
|
||||
else
|
||||
|
|
|
@ -230,15 +230,17 @@ EXPORT_SYMBOL(blk_verify_command);
|
|||
static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
|
||||
struct sg_io_hdr *hdr, fmode_t mode)
|
||||
{
|
||||
if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
|
||||
struct scsi_request *req = scsi_req(rq);
|
||||
|
||||
if (copy_from_user(req->cmd, hdr->cmdp, hdr->cmd_len))
|
||||
return -EFAULT;
|
||||
if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
|
||||
if (blk_verify_command(req->cmd, mode & FMODE_WRITE))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* fill in request structure
|
||||
*/
|
||||
rq->cmd_len = hdr->cmd_len;
|
||||
req->cmd_len = hdr->cmd_len;
|
||||
|
||||
rq->timeout = msecs_to_jiffies(hdr->timeout);
|
||||
if (!rq->timeout)
|
||||
|
@ -254,6 +256,7 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
|
|||
static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
|
||||
struct bio *bio)
|
||||
{
|
||||
struct scsi_request *req = scsi_req(rq);
|
||||
int r, ret = 0;
|
||||
|
||||
/*
|
||||
|
@ -267,13 +270,13 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
|
|||
hdr->info = 0;
|
||||
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
|
||||
hdr->info |= SG_INFO_CHECK;
|
||||
hdr->resid = rq->resid_len;
|
||||
hdr->resid = req->resid_len;
|
||||
hdr->sb_len_wr = 0;
|
||||
|
||||
if (rq->sense_len && hdr->sbp) {
|
||||
int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
|
||||
if (req->sense_len && hdr->sbp) {
|
||||
int len = min((unsigned int) hdr->mx_sb_len, req->sense_len);
|
||||
|
||||
if (!copy_to_user(hdr->sbp, rq->sense, len))
|
||||
if (!copy_to_user(hdr->sbp, req->sense, len))
|
||||
hdr->sb_len_wr = len;
|
||||
else
|
||||
ret = -EFAULT;
|
||||
|
@ -294,7 +297,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
|
|||
int writing = 0;
|
||||
int at_head = 0;
|
||||
struct request *rq;
|
||||
char sense[SCSI_SENSE_BUFFERSIZE];
|
||||
struct scsi_request *req;
|
||||
struct bio *bio;
|
||||
|
||||
if (hdr->interface_id != 'S')
|
||||
|
@ -318,14 +321,16 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
|
|||
at_head = 1;
|
||||
|
||||
ret = -ENOMEM;
|
||||
rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
|
||||
rq = blk_get_request(q, writing ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
|
||||
GFP_KERNEL);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
blk_rq_set_block_pc(rq);
|
||||
req = scsi_req(rq);
|
||||
scsi_req_init(rq);
|
||||
|
||||
if (hdr->cmd_len > BLK_MAX_CDB) {
|
||||
rq->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
|
||||
if (!rq->cmd)
|
||||
req->cmd = kzalloc(hdr->cmd_len, GFP_KERNEL);
|
||||
if (!req->cmd)
|
||||
goto out_put_request;
|
||||
}
|
||||
|
||||
|
@ -357,9 +362,6 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
|
|||
goto out_free_cdb;
|
||||
|
||||
bio = rq->bio;
|
||||
memset(sense, 0, sizeof(sense));
|
||||
rq->sense = sense;
|
||||
rq->sense_len = 0;
|
||||
rq->retries = 0;
|
||||
|
||||
start_time = jiffies;
|
||||
|
@ -375,8 +377,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
|
|||
ret = blk_complete_sghdr_rq(rq, hdr, bio);
|
||||
|
||||
out_free_cdb:
|
||||
if (rq->cmd != rq->__cmd)
|
||||
kfree(rq->cmd);
|
||||
scsi_req_free_cmd(req);
|
||||
out_put_request:
|
||||
blk_put_request(rq);
|
||||
return ret;
|
||||
|
@ -420,9 +421,10 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
|||
struct scsi_ioctl_command __user *sic)
|
||||
{
|
||||
struct request *rq;
|
||||
struct scsi_request *req;
|
||||
int err;
|
||||
unsigned int in_len, out_len, bytes, opcode, cmdlen;
|
||||
char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
|
||||
char *buffer = NULL;
|
||||
|
||||
if (!sic)
|
||||
return -EINVAL;
|
||||
|
@ -447,12 +449,14 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
|||
|
||||
}
|
||||
|
||||
rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_RECLAIM);
|
||||
rq = blk_get_request(q, in_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
|
||||
__GFP_RECLAIM);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto error_free_buffer;
|
||||
}
|
||||
blk_rq_set_block_pc(rq);
|
||||
req = scsi_req(rq);
|
||||
scsi_req_init(rq);
|
||||
|
||||
cmdlen = COMMAND_SIZE(opcode);
|
||||
|
||||
|
@ -460,14 +464,14 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
|||
* get command and data to send to device, if any
|
||||
*/
|
||||
err = -EFAULT;
|
||||
rq->cmd_len = cmdlen;
|
||||
if (copy_from_user(rq->cmd, sic->data, cmdlen))
|
||||
req->cmd_len = cmdlen;
|
||||
if (copy_from_user(req->cmd, sic->data, cmdlen))
|
||||
goto error;
|
||||
|
||||
if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
|
||||
goto error;
|
||||
|
||||
err = blk_verify_command(rq->cmd, mode & FMODE_WRITE);
|
||||
err = blk_verify_command(req->cmd, mode & FMODE_WRITE);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
|
@ -503,18 +507,14 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
|||
goto error;
|
||||
}
|
||||
|
||||
memset(sense, 0, sizeof(sense));
|
||||
rq->sense = sense;
|
||||
rq->sense_len = 0;
|
||||
|
||||
blk_execute_rq(q, disk, rq, 0);
|
||||
|
||||
err = rq->errors & 0xff; /* only 8 bit SCSI status */
|
||||
if (err) {
|
||||
if (rq->sense_len && rq->sense) {
|
||||
bytes = (OMAX_SB_LEN > rq->sense_len) ?
|
||||
rq->sense_len : OMAX_SB_LEN;
|
||||
if (copy_to_user(sic->data, rq->sense, bytes))
|
||||
if (req->sense_len && req->sense) {
|
||||
bytes = (OMAX_SB_LEN > req->sense_len) ?
|
||||
req->sense_len : OMAX_SB_LEN;
|
||||
if (copy_to_user(sic->data, req->sense, bytes))
|
||||
err = -EFAULT;
|
||||
}
|
||||
} else {
|
||||
|
@ -539,14 +539,14 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
|
|||
struct request *rq;
|
||||
int err;
|
||||
|
||||
rq = blk_get_request(q, WRITE, __GFP_RECLAIM);
|
||||
rq = blk_get_request(q, REQ_OP_SCSI_OUT, __GFP_RECLAIM);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
blk_rq_set_block_pc(rq);
|
||||
scsi_req_init(rq);
|
||||
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
|
||||
rq->cmd[0] = cmd;
|
||||
rq->cmd[4] = data;
|
||||
rq->cmd_len = 6;
|
||||
scsi_req(rq)->cmd[0] = cmd;
|
||||
scsi_req(rq)->cmd[4] = data;
|
||||
scsi_req(rq)->cmd_len = 6;
|
||||
err = blk_execute_rq(q, bd_disk, rq, 0);
|
||||
blk_put_request(rq);
|
||||
|
||||
|
@ -743,6 +743,17 @@ int scsi_cmd_blk_ioctl(struct block_device *bd, fmode_t mode,
|
|||
}
|
||||
EXPORT_SYMBOL(scsi_cmd_blk_ioctl);
|
||||
|
||||
void scsi_req_init(struct request *rq)
|
||||
{
|
||||
struct scsi_request *req = scsi_req(rq);
|
||||
|
||||
memset(req->__cmd, 0, sizeof(req->__cmd));
|
||||
req->cmd = req->__cmd;
|
||||
req->cmd_len = BLK_MAX_CDB;
|
||||
req->sense_len = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_req_init);
|
||||
|
||||
static int __init blk_scsi_ioctl_init(void)
|
||||
{
|
||||
blk_set_cmd_filter_defaults(&blk_default_cmd_filter);
|
||||
|
|
|
@ -1265,13 +1265,13 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
|
|||
*/
|
||||
static int atapi_drain_needed(struct request *rq)
|
||||
{
|
||||
if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
|
||||
if (likely(!blk_rq_is_passthrough(rq)))
|
||||
return 0;
|
||||
|
||||
if (!blk_rq_bytes(rq) || op_is_write(req_op(rq)))
|
||||
return 0;
|
||||
|
||||
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
|
||||
return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
|
||||
}
|
||||
|
||||
static int ata_scsi_dev_config(struct scsi_device *sdev,
|
||||
|
|
|
@ -69,6 +69,7 @@ config AMIGA_Z2RAM
|
|||
config GDROM
|
||||
tristate "SEGA Dreamcast GD-ROM drive"
|
||||
depends on SH_DREAMCAST
|
||||
select BLK_SCSI_REQUEST # only for the generic cdrom code
|
||||
help
|
||||
A standard SEGA Dreamcast comes with a modified CD ROM drive called a
|
||||
"GD-ROM" by SEGA to signify it is capable of reading special disks
|
||||
|
@ -114,6 +115,7 @@ config BLK_CPQ_CISS_DA
|
|||
tristate "Compaq Smart Array 5xxx support"
|
||||
depends on PCI
|
||||
select CHECK_SIGNATURE
|
||||
select BLK_SCSI_REQUEST
|
||||
help
|
||||
This is the driver for Compaq Smart Array 5xxx controllers.
|
||||
Everyone using these boards should say Y here.
|
||||
|
@ -386,6 +388,7 @@ config BLK_DEV_RAM_DAX
|
|||
config CDROM_PKTCDVD
|
||||
tristate "Packet writing on CD/DVD media (DEPRECATED)"
|
||||
depends on !UML
|
||||
select BLK_SCSI_REQUEST
|
||||
help
|
||||
Note: This driver is deprecated and will be removed from the
|
||||
kernel in the near future!
|
||||
|
@ -501,6 +504,16 @@ config VIRTIO_BLK
|
|||
This is the virtual block driver for virtio. It can be used with
|
||||
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
|
||||
|
||||
config VIRTIO_BLK_SCSI
|
||||
bool "SCSI passthrough request for the Virtio block driver"
|
||||
depends on VIRTIO_BLK
|
||||
select BLK_SCSI_REQUEST
|
||||
---help---
|
||||
Enable support for SCSI passthrough (e.g. the SG_IO ioctl) on
|
||||
virtio-blk devices. This is only supported for the legacy
|
||||
virtio protocol and not enabled by default by any hypervisor.
|
||||
Your probably want to virtio-scsi instead.
|
||||
|
||||
config BLK_DEV_HD
|
||||
bool "Very old hard disk (MFM/RLL/IDE) driver"
|
||||
depends on HAVE_IDE
|
||||
|
|
|
@ -396,8 +396,8 @@ aoeblk_gdalloc(void *vp)
|
|||
WARN_ON(d->gd);
|
||||
WARN_ON(d->flags & DEVFL_UP);
|
||||
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
|
||||
q->backing_dev_info.name = "aoe";
|
||||
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
|
||||
q->backing_dev_info->name = "aoe";
|
||||
q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE;
|
||||
d->bufpool = mp;
|
||||
d->blkq = gd->queue = q;
|
||||
q->queuedata = d;
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#include <scsi/scsi.h>
|
||||
#include <scsi/sg.h>
|
||||
#include <scsi/scsi_ioctl.h>
|
||||
#include <scsi/scsi_request.h>
|
||||
#include <linux/cdrom.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/kthread.h>
|
||||
|
@ -1853,8 +1854,8 @@ static void cciss_softirq_done(struct request *rq)
|
|||
dev_dbg(&h->pdev->dev, "Done with %p\n", rq);
|
||||
|
||||
/* set the residual count for pc requests */
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
rq->resid_len = c->err_info->ResidualCnt;
|
||||
if (blk_rq_is_passthrough(rq))
|
||||
scsi_req(rq)->resid_len = c->err_info->ResidualCnt;
|
||||
|
||||
blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO);
|
||||
|
||||
|
@ -1941,9 +1942,16 @@ static void cciss_get_serial_no(ctlr_info_t *h, int logvol,
|
|||
static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
|
||||
int drv_index)
|
||||
{
|
||||
disk->queue = blk_init_queue(do_cciss_request, &h->lock);
|
||||
disk->queue = blk_alloc_queue(GFP_KERNEL);
|
||||
if (!disk->queue)
|
||||
goto init_queue_failure;
|
||||
|
||||
disk->queue->cmd_size = sizeof(struct scsi_request);
|
||||
disk->queue->request_fn = do_cciss_request;
|
||||
disk->queue->queue_lock = &h->lock;
|
||||
if (blk_init_allocated_queue(disk->queue) < 0)
|
||||
goto cleanup_queue;
|
||||
|
||||
sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
|
||||
disk->major = h->major;
|
||||
disk->first_minor = drv_index << NWD_SHIFT;
|
||||
|
@ -3075,7 +3083,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
|
|||
driver_byte = DRIVER_OK;
|
||||
msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
|
||||
|
||||
if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
if (blk_rq_is_passthrough(cmd->rq))
|
||||
host_byte = DID_PASSTHROUGH;
|
||||
else
|
||||
host_byte = DID_OK;
|
||||
|
@ -3084,7 +3092,7 @@ static inline int evaluate_target_status(ctlr_info_t *h,
|
|||
host_byte, driver_byte);
|
||||
|
||||
if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
|
||||
if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)
|
||||
if (!blk_rq_is_passthrough(cmd->rq))
|
||||
dev_warn(&h->pdev->dev, "cmd %p "
|
||||
"has SCSI Status 0x%x\n",
|
||||
cmd, cmd->err_info->ScsiStatus);
|
||||
|
@ -3095,31 +3103,23 @@ static inline int evaluate_target_status(ctlr_info_t *h,
|
|||
sense_key = 0xf & cmd->err_info->SenseInfo[2];
|
||||
/* no status or recovered error */
|
||||
if (((sense_key == 0x0) || (sense_key == 0x1)) &&
|
||||
(cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC))
|
||||
!blk_rq_is_passthrough(cmd->rq))
|
||||
error_value = 0;
|
||||
|
||||
if (check_for_unit_attention(h, cmd)) {
|
||||
*retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC);
|
||||
*retry_cmd = !blk_rq_is_passthrough(cmd->rq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Not SG_IO or similar? */
|
||||
if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) {
|
||||
if (!blk_rq_is_passthrough(cmd->rq)) {
|
||||
if (error_value != 0)
|
||||
dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION"
|
||||
" sense key = 0x%x\n", cmd, sense_key);
|
||||
return error_value;
|
||||
}
|
||||
|
||||
/* SG_IO or similar, copy sense data back */
|
||||
if (cmd->rq->sense) {
|
||||
if (cmd->rq->sense_len > cmd->err_info->SenseLen)
|
||||
cmd->rq->sense_len = cmd->err_info->SenseLen;
|
||||
memcpy(cmd->rq->sense, cmd->err_info->SenseInfo,
|
||||
cmd->rq->sense_len);
|
||||
} else
|
||||
cmd->rq->sense_len = 0;
|
||||
|
||||
scsi_req(cmd->rq)->sense_len = cmd->err_info->SenseLen;
|
||||
return error_value;
|
||||
}
|
||||
|
||||
|
@ -3146,15 +3146,14 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
|
||||
break;
|
||||
case CMD_DATA_UNDERRUN:
|
||||
if (cmd->rq->cmd_type == REQ_TYPE_FS) {
|
||||
if (!blk_rq_is_passthrough(cmd->rq)) {
|
||||
dev_warn(&h->pdev->dev, "cmd %p has"
|
||||
" completed with data underrun "
|
||||
"reported\n", cmd);
|
||||
cmd->rq->resid_len = cmd->err_info->ResidualCnt;
|
||||
}
|
||||
break;
|
||||
case CMD_DATA_OVERRUN:
|
||||
if (cmd->rq->cmd_type == REQ_TYPE_FS)
|
||||
if (!blk_rq_is_passthrough(cmd->rq))
|
||||
dev_warn(&h->pdev->dev, "cciss: cmd %p has"
|
||||
" completed with data overrun "
|
||||
"reported\n", cmd);
|
||||
|
@ -3164,7 +3163,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
"reported invalid\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
blk_rq_is_passthrough(cmd->rq) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
break;
|
||||
case CMD_PROTOCOL_ERR:
|
||||
|
@ -3172,7 +3171,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
"protocol error\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
blk_rq_is_passthrough(cmd->rq) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
break;
|
||||
case CMD_HARDWARE_ERR:
|
||||
|
@ -3180,7 +3179,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
" hardware error\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
blk_rq_is_passthrough(cmd->rq) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
break;
|
||||
case CMD_CONNECTION_LOST:
|
||||
|
@ -3188,7 +3187,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
"connection lost\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
blk_rq_is_passthrough(cmd->rq) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
break;
|
||||
case CMD_ABORTED:
|
||||
|
@ -3196,7 +3195,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
"aborted\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
blk_rq_is_passthrough(cmd->rq) ?
|
||||
DID_PASSTHROUGH : DID_ABORT);
|
||||
break;
|
||||
case CMD_ABORT_FAILED:
|
||||
|
@ -3204,7 +3203,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
"abort failed\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
blk_rq_is_passthrough(cmd->rq) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
break;
|
||||
case CMD_UNSOLICITED_ABORT:
|
||||
|
@ -3219,21 +3218,21 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
"%p retried too many times\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
blk_rq_is_passthrough(cmd->rq) ?
|
||||
DID_PASSTHROUGH : DID_ABORT);
|
||||
break;
|
||||
case CMD_TIMEOUT:
|
||||
dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
blk_rq_is_passthrough(cmd->rq) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
break;
|
||||
case CMD_UNABORTABLE:
|
||||
dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ?
|
||||
blk_rq_is_passthrough(cmd->rq) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
break;
|
||||
default:
|
||||
|
@ -3242,7 +3241,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
cmd->err_info->CommandStatus);
|
||||
rq->errors = make_status_bytes(SAM_STAT_GOOD,
|
||||
cmd->err_info->CommandStatus, DRIVER_OK,
|
||||
(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
|
||||
blk_rq_is_passthrough(cmd->rq) ?
|
||||
DID_PASSTHROUGH : DID_ERROR);
|
||||
}
|
||||
|
||||
|
@ -3395,7 +3394,9 @@ static void do_cciss_request(struct request_queue *q)
|
|||
c->Header.SGList = h->max_cmd_sgentries;
|
||||
set_performant_mode(h, c);
|
||||
|
||||
if (likely(creq->cmd_type == REQ_TYPE_FS)) {
|
||||
switch (req_op(creq)) {
|
||||
case REQ_OP_READ:
|
||||
case REQ_OP_WRITE:
|
||||
if(h->cciss_read == CCISS_READ_10) {
|
||||
c->Request.CDB[1] = 0;
|
||||
c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
|
||||
|
@ -3425,12 +3426,16 @@ static void do_cciss_request(struct request_queue *q)
|
|||
c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
|
||||
c->Request.CDB[14] = c->Request.CDB[15] = 0;
|
||||
}
|
||||
} else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
c->Request.CDBLen = creq->cmd_len;
|
||||
memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB);
|
||||
} else {
|
||||
break;
|
||||
case REQ_OP_SCSI_IN:
|
||||
case REQ_OP_SCSI_OUT:
|
||||
c->Request.CDBLen = scsi_req(creq)->cmd_len;
|
||||
memcpy(c->Request.CDB, scsi_req(creq)->cmd, BLK_MAX_CDB);
|
||||
scsi_req(creq)->sense = c->err_info->SenseInfo;
|
||||
break;
|
||||
default:
|
||||
dev_warn(&h->pdev->dev, "bad request type %d\n",
|
||||
creq->cmd_type);
|
||||
creq->cmd_flags);
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
|
|
@ -2462,7 +2462,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
|
|||
|
||||
if (get_ldev(device)) {
|
||||
q = bdev_get_queue(device->ldev->backing_bdev);
|
||||
r = bdi_congested(&q->backing_dev_info, bdi_bits);
|
||||
r = bdi_congested(q->backing_dev_info, bdi_bits);
|
||||
put_ldev(device);
|
||||
if (r)
|
||||
reason = 'b';
|
||||
|
@ -2834,8 +2834,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
|
|||
/* we have no partitions. we contain only ourselves. */
|
||||
device->this_bdev->bd_contains = device->this_bdev;
|
||||
|
||||
q->backing_dev_info.congested_fn = drbd_congested;
|
||||
q->backing_dev_info.congested_data = device;
|
||||
q->backing_dev_info->congested_fn = drbd_congested;
|
||||
q->backing_dev_info->congested_data = device;
|
||||
|
||||
blk_queue_make_request(q, drbd_make_request);
|
||||
blk_queue_write_cache(q, true, true);
|
||||
|
|
|
@ -1328,11 +1328,13 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
|
|||
if (b) {
|
||||
blk_queue_stack_limits(q, b);
|
||||
|
||||
if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
|
||||
if (q->backing_dev_info->ra_pages !=
|
||||
b->backing_dev_info->ra_pages) {
|
||||
drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
|
||||
q->backing_dev_info.ra_pages,
|
||||
b->backing_dev_info.ra_pages);
|
||||
q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
|
||||
q->backing_dev_info->ra_pages,
|
||||
b->backing_dev_info->ra_pages);
|
||||
q->backing_dev_info->ra_pages =
|
||||
b->backing_dev_info->ra_pages;
|
||||
}
|
||||
}
|
||||
fixup_discard_if_not_supported(q);
|
||||
|
@ -3345,7 +3347,7 @@ static void device_to_statistics(struct device_statistics *s,
|
|||
s->dev_disk_flags = md->flags;
|
||||
q = bdev_get_queue(device->ldev->backing_bdev);
|
||||
s->dev_lower_blocked =
|
||||
bdi_congested(&q->backing_dev_info,
|
||||
bdi_congested(q->backing_dev_info,
|
||||
(1 << WB_async_congested) |
|
||||
(1 << WB_sync_congested));
|
||||
put_ldev(device);
|
||||
|
|
|
@ -288,7 +288,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
|
|||
seq_printf(seq, "%2d: cs:Unconfigured\n", i);
|
||||
} else {
|
||||
/* reset device->congestion_reason */
|
||||
bdi_rw_congested(&device->rq_queue->backing_dev_info);
|
||||
bdi_rw_congested(device->rq_queue->backing_dev_info);
|
||||
|
||||
nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
|
||||
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
|
||||
|
|
|
@ -938,7 +938,7 @@ static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t se
|
|||
|
||||
switch (rbm) {
|
||||
case RB_CONGESTED_REMOTE:
|
||||
bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
|
||||
bdi = device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
|
||||
return bdi_read_congested(bdi);
|
||||
case RB_LEAST_PENDING:
|
||||
return atomic_read(&device->local_cnt) >
|
||||
|
|
|
@ -2900,8 +2900,8 @@ static void do_fd_request(struct request_queue *q)
|
|||
return;
|
||||
|
||||
if (WARN(atomic_read(&usage_count) == 0,
|
||||
"warning: usage count=0, current_req=%p sect=%ld type=%x flags=%llx\n",
|
||||
current_req, (long)blk_rq_pos(current_req), current_req->cmd_type,
|
||||
"warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
|
||||
current_req, (long)blk_rq_pos(current_req),
|
||||
(unsigned long long) current_req->cmd_flags))
|
||||
return;
|
||||
|
||||
|
|
|
@ -626,30 +626,29 @@ static void hd_request(void)
|
|||
req_data_dir(req) == READ ? "read" : "writ",
|
||||
cyl, head, sec, nsect, bio_data(req->bio));
|
||||
#endif
|
||||
if (req->cmd_type == REQ_TYPE_FS) {
|
||||
switch (rq_data_dir(req)) {
|
||||
case READ:
|
||||
hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
|
||||
&read_intr);
|
||||
if (reset)
|
||||
goto repeat;
|
||||
break;
|
||||
case WRITE:
|
||||
hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE,
|
||||
&write_intr);
|
||||
if (reset)
|
||||
goto repeat;
|
||||
if (wait_DRQ()) {
|
||||
bad_rw_intr();
|
||||
goto repeat;
|
||||
}
|
||||
outsw(HD_DATA, bio_data(req->bio), 256);
|
||||
break;
|
||||
default:
|
||||
printk("unknown hd-command\n");
|
||||
hd_end_request_cur(-EIO);
|
||||
break;
|
||||
|
||||
switch (req_op(req)) {
|
||||
case REQ_OP_READ:
|
||||
hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
|
||||
&read_intr);
|
||||
if (reset)
|
||||
goto repeat;
|
||||
break;
|
||||
case REQ_OP_WRITE:
|
||||
hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_WRITE,
|
||||
&write_intr);
|
||||
if (reset)
|
||||
goto repeat;
|
||||
if (wait_DRQ()) {
|
||||
bad_rw_intr();
|
||||
goto repeat;
|
||||
}
|
||||
outsw(HD_DATA, bio_data(req->bio), 256);
|
||||
break;
|
||||
default:
|
||||
printk("unknown hd-command\n");
|
||||
hd_end_request_cur(-EIO);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -670,15 +670,17 @@ static void mg_request_poll(struct request_queue *q)
|
|||
break;
|
||||
}
|
||||
|
||||
if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) {
|
||||
mg_end_request_cur(host, -EIO);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (rq_data_dir(host->req) == READ)
|
||||
switch (req_op(host->req)) {
|
||||
case REQ_OP_READ:
|
||||
mg_read(host->req);
|
||||
else
|
||||
break;
|
||||
case REQ_OP_WRITE:
|
||||
mg_write(host->req);
|
||||
break;
|
||||
default:
|
||||
mg_end_request_cur(host, -EIO);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -687,13 +689,15 @@ static unsigned int mg_issue_req(struct request *req,
|
|||
unsigned int sect_num,
|
||||
unsigned int sect_cnt)
|
||||
{
|
||||
if (rq_data_dir(req) == READ) {
|
||||
switch (req_op(host->req)) {
|
||||
case REQ_OP_READ:
|
||||
if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
|
||||
!= MG_ERR_NONE) {
|
||||
mg_bad_rw_intr(host);
|
||||
return host->error;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
case REQ_OP_WRITE:
|
||||
/* TODO : handler */
|
||||
outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
|
||||
if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
|
||||
|
@ -712,6 +716,10 @@ static unsigned int mg_issue_req(struct request *req,
|
|||
mod_timer(&host->timer, jiffies + 3 * HZ);
|
||||
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
|
||||
MG_REG_COMMAND);
|
||||
break;
|
||||
default:
|
||||
mg_end_request_cur(host, -EIO);
|
||||
break;
|
||||
}
|
||||
return MG_ERR_NONE;
|
||||
}
|
||||
|
@ -753,11 +761,6 @@ static void mg_request(struct request_queue *q)
|
|||
continue;
|
||||
}
|
||||
|
||||
if (unlikely(req->cmd_type != REQ_TYPE_FS)) {
|
||||
mg_end_request_cur(host, -EIO);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!mg_issue_req(req, host, sect_num, sect_cnt))
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -41,6 +41,9 @@
|
|||
|
||||
#include <linux/nbd.h>
|
||||
|
||||
static DEFINE_IDR(nbd_index_idr);
|
||||
static DEFINE_MUTEX(nbd_index_mutex);
|
||||
|
||||
struct nbd_sock {
|
||||
struct socket *sock;
|
||||
struct mutex tx_lock;
|
||||
|
@ -89,8 +92,9 @@ static struct dentry *nbd_dbg_dir;
|
|||
#define NBD_MAGIC 0x68797548
|
||||
|
||||
static unsigned int nbds_max = 16;
|
||||
static struct nbd_device *nbd_dev;
|
||||
static int max_part;
|
||||
static struct workqueue_struct *recv_workqueue;
|
||||
static int part_shift;
|
||||
|
||||
static inline struct device *nbd_to_dev(struct nbd_device *nbd)
|
||||
{
|
||||
|
@ -193,13 +197,6 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
|
|||
set_bit(NBD_TIMEDOUT, &nbd->runtime_flags);
|
||||
req->errors++;
|
||||
|
||||
/*
|
||||
* If our disconnect packet times out then we're already holding the
|
||||
* config_lock and could deadlock here, so just set an error and return,
|
||||
* we'll handle shutting everything down later.
|
||||
*/
|
||||
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
|
||||
return BLK_EH_HANDLED;
|
||||
mutex_lock(&nbd->config_lock);
|
||||
sock_shutdown(nbd);
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
|
@ -278,14 +275,29 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
|
|||
u32 type;
|
||||
u32 tag = blk_mq_unique_tag(req);
|
||||
|
||||
if (req_op(req) == REQ_OP_DISCARD)
|
||||
switch (req_op(req)) {
|
||||
case REQ_OP_DISCARD:
|
||||
type = NBD_CMD_TRIM;
|
||||
else if (req_op(req) == REQ_OP_FLUSH)
|
||||
break;
|
||||
case REQ_OP_FLUSH:
|
||||
type = NBD_CMD_FLUSH;
|
||||
else if (rq_data_dir(req) == WRITE)
|
||||
break;
|
||||
case REQ_OP_WRITE:
|
||||
type = NBD_CMD_WRITE;
|
||||
else
|
||||
break;
|
||||
case REQ_OP_READ:
|
||||
type = NBD_CMD_READ;
|
||||
break;
|
||||
default:
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (rq_data_dir(req) == WRITE &&
|
||||
(nbd->flags & NBD_FLAG_READ_ONLY)) {
|
||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
"Write on read-only\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
memset(&request, 0, sizeof(request));
|
||||
request.magic = htonl(NBD_REQUEST_MAGIC);
|
||||
|
@ -510,18 +522,6 @@ static void nbd_handle_cmd(struct nbd_cmd *cmd, int index)
|
|||
goto error_out;
|
||||
}
|
||||
|
||||
if (req->cmd_type != REQ_TYPE_FS &&
|
||||
req->cmd_type != REQ_TYPE_DRV_PRIV)
|
||||
goto error_out;
|
||||
|
||||
if (req->cmd_type == REQ_TYPE_FS &&
|
||||
rq_data_dir(req) == WRITE &&
|
||||
(nbd->flags & NBD_FLAG_READ_ONLY)) {
|
||||
dev_err_ratelimited(disk_to_dev(nbd->disk),
|
||||
"Write on read-only\n");
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
req->errors = 0;
|
||||
|
||||
nsock = nbd->socks[index];
|
||||
|
@ -785,7 +785,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
|||
INIT_WORK(&args[i].work, recv_work);
|
||||
args[i].nbd = nbd;
|
||||
args[i].index = i;
|
||||
queue_work(system_long_wq, &args[i].work);
|
||||
queue_work(recv_workqueue, &args[i].work);
|
||||
}
|
||||
wait_event_interruptible(nbd->recv_wq,
|
||||
atomic_read(&nbd->recv_threads) == 0);
|
||||
|
@ -996,6 +996,103 @@ static struct blk_mq_ops nbd_mq_ops = {
|
|||
.timeout = nbd_xmit_timeout,
|
||||
};
|
||||
|
||||
static void nbd_dev_remove(struct nbd_device *nbd)
|
||||
{
|
||||
struct gendisk *disk = nbd->disk;
|
||||
nbd->magic = 0;
|
||||
if (disk) {
|
||||
del_gendisk(disk);
|
||||
blk_cleanup_queue(disk->queue);
|
||||
blk_mq_free_tag_set(&nbd->tag_set);
|
||||
put_disk(disk);
|
||||
}
|
||||
kfree(nbd);
|
||||
}
|
||||
|
||||
static int nbd_dev_add(int index)
|
||||
{
|
||||
struct nbd_device *nbd;
|
||||
struct gendisk *disk;
|
||||
struct request_queue *q;
|
||||
int err = -ENOMEM;
|
||||
|
||||
nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
|
||||
if (!nbd)
|
||||
goto out;
|
||||
|
||||
disk = alloc_disk(1 << part_shift);
|
||||
if (!disk)
|
||||
goto out_free_nbd;
|
||||
|
||||
if (index >= 0) {
|
||||
err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
|
||||
GFP_KERNEL);
|
||||
if (err == -ENOSPC)
|
||||
err = -EEXIST;
|
||||
} else {
|
||||
err = idr_alloc(&nbd_index_idr, nbd, 0, 0, GFP_KERNEL);
|
||||
if (err >= 0)
|
||||
index = err;
|
||||
}
|
||||
if (err < 0)
|
||||
goto out_free_disk;
|
||||
|
||||
nbd->disk = disk;
|
||||
nbd->tag_set.ops = &nbd_mq_ops;
|
||||
nbd->tag_set.nr_hw_queues = 1;
|
||||
nbd->tag_set.queue_depth = 128;
|
||||
nbd->tag_set.numa_node = NUMA_NO_NODE;
|
||||
nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
|
||||
nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
|
||||
BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
|
||||
nbd->tag_set.driver_data = nbd;
|
||||
|
||||
err = blk_mq_alloc_tag_set(&nbd->tag_set);
|
||||
if (err)
|
||||
goto out_free_idr;
|
||||
|
||||
q = blk_mq_init_queue(&nbd->tag_set);
|
||||
if (IS_ERR(q)) {
|
||||
err = PTR_ERR(q);
|
||||
goto out_free_tags;
|
||||
}
|
||||
disk->queue = q;
|
||||
|
||||
/*
|
||||
* Tell the block layer that we are not a rotational device
|
||||
*/
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
|
||||
disk->queue->limits.discard_granularity = 512;
|
||||
blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
|
||||
disk->queue->limits.discard_zeroes_data = 0;
|
||||
blk_queue_max_hw_sectors(disk->queue, 65536);
|
||||
disk->queue->limits.max_sectors = 256;
|
||||
|
||||
nbd->magic = NBD_MAGIC;
|
||||
mutex_init(&nbd->config_lock);
|
||||
disk->major = NBD_MAJOR;
|
||||
disk->first_minor = index << part_shift;
|
||||
disk->fops = &nbd_fops;
|
||||
disk->private_data = nbd;
|
||||
sprintf(disk->disk_name, "nbd%d", index);
|
||||
init_waitqueue_head(&nbd->recv_wq);
|
||||
nbd_reset(nbd);
|
||||
add_disk(disk);
|
||||
return index;
|
||||
|
||||
out_free_tags:
|
||||
blk_mq_free_tag_set(&nbd->tag_set);
|
||||
out_free_idr:
|
||||
idr_remove(&nbd_index_idr, index);
|
||||
out_free_disk:
|
||||
put_disk(disk);
|
||||
out_free_nbd:
|
||||
kfree(nbd);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* And here should be modules and kernel interface
|
||||
* (Just smiley confuses emacs :-)
|
||||
|
@ -1003,9 +1100,7 @@ static struct blk_mq_ops nbd_mq_ops = {
|
|||
|
||||
static int __init nbd_init(void)
|
||||
{
|
||||
int err = -ENOMEM;
|
||||
int i;
|
||||
int part_shift;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
|
||||
|
||||
|
@ -1034,111 +1129,38 @@ static int __init nbd_init(void)
|
|||
|
||||
if (nbds_max > 1UL << (MINORBITS - part_shift))
|
||||
return -EINVAL;
|
||||
|
||||
nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
|
||||
if (!nbd_dev)
|
||||
recv_workqueue = alloc_workqueue("knbd-recv",
|
||||
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
|
||||
if (!recv_workqueue)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < nbds_max; i++) {
|
||||
struct request_queue *q;
|
||||
struct gendisk *disk = alloc_disk(1 << part_shift);
|
||||
if (!disk)
|
||||
goto out;
|
||||
nbd_dev[i].disk = disk;
|
||||
|
||||
nbd_dev[i].tag_set.ops = &nbd_mq_ops;
|
||||
nbd_dev[i].tag_set.nr_hw_queues = 1;
|
||||
nbd_dev[i].tag_set.queue_depth = 128;
|
||||
nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE;
|
||||
nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd);
|
||||
nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
|
||||
BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING;
|
||||
nbd_dev[i].tag_set.driver_data = &nbd_dev[i];
|
||||
|
||||
err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set);
|
||||
if (err) {
|
||||
put_disk(disk);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* The new linux 2.5 block layer implementation requires
|
||||
* every gendisk to have its very own request_queue struct.
|
||||
* These structs are big so we dynamically allocate them.
|
||||
*/
|
||||
q = blk_mq_init_queue(&nbd_dev[i].tag_set);
|
||||
if (IS_ERR(q)) {
|
||||
blk_mq_free_tag_set(&nbd_dev[i].tag_set);
|
||||
put_disk(disk);
|
||||
goto out;
|
||||
}
|
||||
disk->queue = q;
|
||||
|
||||
/*
|
||||
* Tell the block layer that we are not a rotational device
|
||||
*/
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
|
||||
disk->queue->limits.discard_granularity = 512;
|
||||
blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
|
||||
disk->queue->limits.discard_zeroes_data = 0;
|
||||
blk_queue_max_hw_sectors(disk->queue, 65536);
|
||||
disk->queue->limits.max_sectors = 256;
|
||||
}
|
||||
|
||||
if (register_blkdev(NBD_MAJOR, "nbd")) {
|
||||
err = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
|
||||
if (register_blkdev(NBD_MAJOR, "nbd"))
|
||||
return -EIO;
|
||||
|
||||
nbd_dbg_init();
|
||||
|
||||
for (i = 0; i < nbds_max; i++) {
|
||||
struct gendisk *disk = nbd_dev[i].disk;
|
||||
nbd_dev[i].magic = NBD_MAGIC;
|
||||
mutex_init(&nbd_dev[i].config_lock);
|
||||
disk->major = NBD_MAJOR;
|
||||
disk->first_minor = i << part_shift;
|
||||
disk->fops = &nbd_fops;
|
||||
disk->private_data = &nbd_dev[i];
|
||||
sprintf(disk->disk_name, "nbd%d", i);
|
||||
init_waitqueue_head(&nbd_dev[i].recv_wq);
|
||||
nbd_reset(&nbd_dev[i]);
|
||||
add_disk(disk);
|
||||
}
|
||||
|
||||
mutex_lock(&nbd_index_mutex);
|
||||
for (i = 0; i < nbds_max; i++)
|
||||
nbd_dev_add(i);
|
||||
mutex_unlock(&nbd_index_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nbd_exit_cb(int id, void *ptr, void *data)
|
||||
{
|
||||
struct nbd_device *nbd = ptr;
|
||||
nbd_dev_remove(nbd);
|
||||
return 0;
|
||||
out:
|
||||
while (i--) {
|
||||
blk_mq_free_tag_set(&nbd_dev[i].tag_set);
|
||||
blk_cleanup_queue(nbd_dev[i].disk->queue);
|
||||
put_disk(nbd_dev[i].disk);
|
||||
}
|
||||
kfree(nbd_dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit nbd_cleanup(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
nbd_dbg_close();
|
||||
|
||||
for (i = 0; i < nbds_max; i++) {
|
||||
struct gendisk *disk = nbd_dev[i].disk;
|
||||
nbd_dev[i].magic = 0;
|
||||
if (disk) {
|
||||
del_gendisk(disk);
|
||||
blk_cleanup_queue(disk->queue);
|
||||
blk_mq_free_tag_set(&nbd_dev[i].tag_set);
|
||||
put_disk(disk);
|
||||
}
|
||||
}
|
||||
idr_for_each(&nbd_index_idr, &nbd_exit_cb, NULL);
|
||||
idr_destroy(&nbd_index_idr);
|
||||
destroy_workqueue(recv_workqueue);
|
||||
unregister_blkdev(NBD_MAJOR, "nbd");
|
||||
kfree(nbd_dev);
|
||||
printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
|
||||
}
|
||||
|
||||
module_init(nbd_init);
|
||||
|
|
|
@ -432,11 +432,11 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
|
|||
struct request *rq;
|
||||
struct bio *bio = rqd->bio;
|
||||
|
||||
rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
|
||||
rq = blk_mq_alloc_request(q,
|
||||
op_is_write(bio_op(bio)) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
|
||||
if (IS_ERR(rq))
|
||||
return -ENOMEM;
|
||||
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq->__sector = bio->bi_iter.bi_sector;
|
||||
rq->ioprio = bio_prio(bio);
|
||||
|
||||
|
|
|
@ -308,12 +308,6 @@ static void osdblk_rq_fn(struct request_queue *q)
|
|||
if (!rq)
|
||||
break;
|
||||
|
||||
/* filter out block requests we don't understand */
|
||||
if (rq->cmd_type != REQ_TYPE_FS) {
|
||||
blk_end_request_all(rq, 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* deduce our operation (read, write, flush) */
|
||||
/* I wish the block layer simplified cmd_type/cmd_flags/cmd[]
|
||||
* into a clearly defined set of RPC commands:
|
||||
|
|
|
@ -25,6 +25,7 @@ config PARIDE_PD
|
|||
config PARIDE_PCD
|
||||
tristate "Parallel port ATAPI CD-ROMs"
|
||||
depends on PARIDE
|
||||
select BLK_SCSI_REQUEST # only for the generic cdrom code
|
||||
---help---
|
||||
This option enables the high-level driver for ATAPI CD-ROM devices
|
||||
connected through a parallel port. If you chose to build PARIDE
|
||||
|
|
|
@ -439,18 +439,16 @@ static int pd_retries = 0; /* i/o error retry count */
|
|||
static int pd_block; /* address of next requested block */
|
||||
static int pd_count; /* number of blocks still to do */
|
||||
static int pd_run; /* sectors in current cluster */
|
||||
static int pd_cmd; /* current command READ/WRITE */
|
||||
static char *pd_buf; /* buffer for request in progress */
|
||||
|
||||
static enum action do_pd_io_start(void)
|
||||
{
|
||||
if (pd_req->cmd_type == REQ_TYPE_DRV_PRIV) {
|
||||
switch (req_op(pd_req)) {
|
||||
case REQ_OP_DRV_IN:
|
||||
phase = pd_special;
|
||||
return pd_special();
|
||||
}
|
||||
|
||||
pd_cmd = rq_data_dir(pd_req);
|
||||
if (pd_cmd == READ || pd_cmd == WRITE) {
|
||||
case REQ_OP_READ:
|
||||
case REQ_OP_WRITE:
|
||||
pd_block = blk_rq_pos(pd_req);
|
||||
pd_count = blk_rq_cur_sectors(pd_req);
|
||||
if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
|
||||
|
@ -458,7 +456,7 @@ static enum action do_pd_io_start(void)
|
|||
pd_run = blk_rq_sectors(pd_req);
|
||||
pd_buf = bio_data(pd_req->bio);
|
||||
pd_retries = 0;
|
||||
if (pd_cmd == READ)
|
||||
if (req_op(pd_req) == REQ_OP_READ)
|
||||
return do_pd_read_start();
|
||||
else
|
||||
return do_pd_write_start();
|
||||
|
@ -723,11 +721,10 @@ static int pd_special_command(struct pd_unit *disk,
|
|||
struct request *rq;
|
||||
int err = 0;
|
||||
|
||||
rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM);
|
||||
rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq->special = func;
|
||||
|
||||
err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
|
||||
|
|
|
@ -704,10 +704,10 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
|
|||
int ret = 0;
|
||||
|
||||
rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
|
||||
WRITE : READ, __GFP_RECLAIM);
|
||||
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
blk_rq_set_block_pc(rq);
|
||||
scsi_req_init(rq);
|
||||
|
||||
if (cgc->buflen) {
|
||||
ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
|
||||
|
@ -716,8 +716,8 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
|
|||
goto out;
|
||||
}
|
||||
|
||||
rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
|
||||
memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
|
||||
scsi_req(rq)->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
|
||||
memcpy(scsi_req(rq)->cmd, cgc->cmd, CDROM_PACKET_SIZE);
|
||||
|
||||
rq->timeout = 60*HZ;
|
||||
if (cgc->quiet)
|
||||
|
@ -1243,7 +1243,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
|
|||
&& pd->bio_queue_size <= pd->write_congestion_off);
|
||||
spin_unlock(&pd->lock);
|
||||
if (wakeup) {
|
||||
clear_bdi_congested(&pd->disk->queue->backing_dev_info,
|
||||
clear_bdi_congested(pd->disk->queue->backing_dev_info,
|
||||
BLK_RW_ASYNC);
|
||||
}
|
||||
|
||||
|
@ -2370,7 +2370,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
|
|||
spin_lock(&pd->lock);
|
||||
if (pd->write_congestion_on > 0
|
||||
&& pd->bio_queue_size >= pd->write_congestion_on) {
|
||||
set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
|
||||
set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC);
|
||||
do {
|
||||
spin_unlock(&pd->lock);
|
||||
congestion_wait(BLK_RW_ASYNC, HZ);
|
||||
|
|
|
@ -196,16 +196,19 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
|
|||
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
|
||||
|
||||
while ((req = blk_fetch_request(q))) {
|
||||
if (req_op(req) == REQ_OP_FLUSH) {
|
||||
switch (req_op(req)) {
|
||||
case REQ_OP_FLUSH:
|
||||
if (ps3disk_submit_flush_request(dev, req))
|
||||
break;
|
||||
} else if (req->cmd_type == REQ_TYPE_FS) {
|
||||
return;
|
||||
break;
|
||||
case REQ_OP_READ:
|
||||
case REQ_OP_WRITE:
|
||||
if (ps3disk_submit_request_sg(dev, req))
|
||||
break;
|
||||
} else {
|
||||
return;
|
||||
break;
|
||||
default:
|
||||
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
|
||||
__blk_end_request_all(req, -EIO);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4099,20 +4099,22 @@ static void rbd_queue_workfn(struct work_struct *work)
|
|||
bool must_be_locked;
|
||||
int result;
|
||||
|
||||
if (rq->cmd_type != REQ_TYPE_FS) {
|
||||
dout("%s: non-fs request type %d\n", __func__,
|
||||
(int) rq->cmd_type);
|
||||
switch (req_op(rq)) {
|
||||
case REQ_OP_DISCARD:
|
||||
op_type = OBJ_OP_DISCARD;
|
||||
break;
|
||||
case REQ_OP_WRITE:
|
||||
op_type = OBJ_OP_WRITE;
|
||||
break;
|
||||
case REQ_OP_READ:
|
||||
op_type = OBJ_OP_READ;
|
||||
break;
|
||||
default:
|
||||
dout("%s: non-fs request type %d\n", __func__, req_op(rq));
|
||||
result = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (req_op(rq) == REQ_OP_DISCARD)
|
||||
op_type = OBJ_OP_DISCARD;
|
||||
else if (req_op(rq) == REQ_OP_WRITE)
|
||||
op_type = OBJ_OP_WRITE;
|
||||
else
|
||||
op_type = OBJ_OP_READ;
|
||||
|
||||
/* Ignore/skip any zero-length requests */
|
||||
|
||||
if (!length) {
|
||||
|
@ -4524,7 +4526,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
|
|||
q->limits.discard_zeroes_data = 1;
|
||||
|
||||
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
|
||||
q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
|
||||
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
|
||||
|
||||
disk->queue = q;
|
||||
|
||||
|
|
|
@ -1204,10 +1204,11 @@ static void skd_complete_special(struct skd_device *skdev,
|
|||
static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
uint cmd_in, ulong arg)
|
||||
{
|
||||
int rc = 0;
|
||||
static const int sg_version_num = 30527;
|
||||
int rc = 0, timeout;
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
struct skd_device *skdev = disk->private_data;
|
||||
void __user *p = (void *)arg;
|
||||
int __user *p = (int __user *)arg;
|
||||
|
||||
pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
|
||||
skdev->name, __func__, __LINE__,
|
||||
|
@ -1218,12 +1219,18 @@ static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
|
||||
switch (cmd_in) {
|
||||
case SG_SET_TIMEOUT:
|
||||
rc = get_user(timeout, p);
|
||||
if (!rc)
|
||||
disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
|
||||
break;
|
||||
case SG_GET_TIMEOUT:
|
||||
rc = jiffies_to_clock_t(disk->queue->sg_timeout);
|
||||
break;
|
||||
case SG_GET_VERSION_NUM:
|
||||
rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p);
|
||||
rc = put_user(sg_version_num, p);
|
||||
break;
|
||||
case SG_IO:
|
||||
rc = skd_ioctl_sg_io(skdev, mode, p);
|
||||
rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -567,7 +567,7 @@ static struct carm_request *carm_get_special(struct carm_host *host)
|
|||
if (!crq)
|
||||
return NULL;
|
||||
|
||||
rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL);
|
||||
rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, GFP_KERNEL);
|
||||
if (IS_ERR(rq)) {
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
carm_put_request(host, crq);
|
||||
|
@ -620,7 +620,6 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
|
|||
spin_unlock_irq(&host->lock);
|
||||
|
||||
DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
|
||||
crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
crq->rq->special = crq;
|
||||
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
|
||||
|
||||
|
@ -661,7 +660,6 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
|
|||
crq->msg_bucket = (u32) rc;
|
||||
|
||||
DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
|
||||
crq->rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
crq->rq->special = crq;
|
||||
blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
|
||||
|
||||
|
|
|
@ -52,11 +52,13 @@ struct virtio_blk {
|
|||
};
|
||||
|
||||
struct virtblk_req {
|
||||
struct request *req;
|
||||
struct virtio_blk_outhdr out_hdr;
|
||||
struct virtio_scsi_inhdr in_hdr;
|
||||
u8 status;
|
||||
#ifdef CONFIG_VIRTIO_BLK_SCSI
|
||||
struct scsi_request sreq; /* for SCSI passthrough, must be first */
|
||||
u8 sense[SCSI_SENSE_BUFFERSIZE];
|
||||
struct virtio_scsi_inhdr in_hdr;
|
||||
#endif
|
||||
struct virtio_blk_outhdr out_hdr;
|
||||
u8 status;
|
||||
struct scatterlist sg[];
|
||||
};
|
||||
|
||||
|
@ -72,28 +74,23 @@ static inline int virtblk_result(struct virtblk_req *vbr)
|
|||
}
|
||||
}
|
||||
|
||||
static int __virtblk_add_req(struct virtqueue *vq,
|
||||
struct virtblk_req *vbr,
|
||||
struct scatterlist *data_sg,
|
||||
bool have_data)
|
||||
/*
|
||||
* If this is a packet command we need a couple of additional headers. Behind
|
||||
* the normal outhdr we put a segment with the scsi command block, and before
|
||||
* the normal inhdr we put the sense data and the inhdr with additional status
|
||||
* information.
|
||||
*/
|
||||
#ifdef CONFIG_VIRTIO_BLK_SCSI
|
||||
static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
|
||||
struct scatterlist *data_sg, bool have_data)
|
||||
{
|
||||
struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
|
||||
unsigned int num_out = 0, num_in = 0;
|
||||
__virtio32 type = vbr->out_hdr.type & ~cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT);
|
||||
|
||||
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
|
||||
sgs[num_out++] = &hdr;
|
||||
|
||||
/*
|
||||
* If this is a packet command we need a couple of additional headers.
|
||||
* Behind the normal outhdr we put a segment with the scsi command
|
||||
* block, and before the normal inhdr we put the sense data and the
|
||||
* inhdr with additional status information.
|
||||
*/
|
||||
if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
|
||||
sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
|
||||
sgs[num_out++] = &cmd;
|
||||
}
|
||||
sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
|
||||
sgs[num_out++] = &cmd;
|
||||
|
||||
if (have_data) {
|
||||
if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
|
||||
|
@ -102,12 +99,69 @@ static int __virtblk_add_req(struct virtqueue *vq,
|
|||
sgs[num_out + num_in++] = data_sg;
|
||||
}
|
||||
|
||||
if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
|
||||
memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
|
||||
sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
|
||||
sgs[num_out + num_in++] = &sense;
|
||||
sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
|
||||
sgs[num_out + num_in++] = &inhdr;
|
||||
sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
|
||||
sgs[num_out + num_in++] = &sense;
|
||||
sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
|
||||
sgs[num_out + num_in++] = &inhdr;
|
||||
sg_init_one(&status, &vbr->status, sizeof(vbr->status));
|
||||
sgs[num_out + num_in++] = &status;
|
||||
|
||||
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static inline void virtblk_scsi_reques_done(struct request *req)
|
||||
{
|
||||
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
|
||||
struct virtio_blk *vblk = req->q->queuedata;
|
||||
struct scsi_request *sreq = &vbr->sreq;
|
||||
|
||||
sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
|
||||
sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
|
||||
req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
|
||||
}
|
||||
|
||||
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long data)
|
||||
{
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
struct virtio_blk *vblk = disk->private_data;
|
||||
|
||||
/*
|
||||
* Only allow the generic SCSI ioctls if the host can support it.
|
||||
*/
|
||||
if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
|
||||
return -ENOTTY;
|
||||
|
||||
return scsi_cmd_blk_ioctl(bdev, mode, cmd,
|
||||
(void __user *)data);
|
||||
}
|
||||
#else
|
||||
static inline int virtblk_add_req_scsi(struct virtqueue *vq,
|
||||
struct virtblk_req *vbr, struct scatterlist *data_sg,
|
||||
bool have_data)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
static inline void virtblk_scsi_reques_done(struct request *req)
|
||||
{
|
||||
}
|
||||
#define virtblk_ioctl NULL
|
||||
#endif /* CONFIG_VIRTIO_BLK_SCSI */
|
||||
|
||||
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
|
||||
struct scatterlist *data_sg, bool have_data)
|
||||
{
|
||||
struct scatterlist hdr, status, *sgs[3];
|
||||
unsigned int num_out = 0, num_in = 0;
|
||||
|
||||
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
|
||||
sgs[num_out++] = &hdr;
|
||||
|
||||
if (have_data) {
|
||||
if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
|
||||
sgs[num_out++] = data_sg;
|
||||
else
|
||||
sgs[num_out + num_in++] = data_sg;
|
||||
}
|
||||
|
||||
sg_init_one(&status, &vbr->status, sizeof(vbr->status));
|
||||
|
@ -119,15 +173,16 @@ static int __virtblk_add_req(struct virtqueue *vq,
|
|||
static inline void virtblk_request_done(struct request *req)
|
||||
{
|
||||
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
|
||||
struct virtio_blk *vblk = req->q->queuedata;
|
||||
int error = virtblk_result(vbr);
|
||||
|
||||
if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
|
||||
req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
|
||||
req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
|
||||
} else if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
|
||||
switch (req_op(req)) {
|
||||
case REQ_OP_SCSI_IN:
|
||||
case REQ_OP_SCSI_OUT:
|
||||
virtblk_scsi_reques_done(req);
|
||||
break;
|
||||
case REQ_OP_DRV_IN:
|
||||
req->errors = (error != 0);
|
||||
break;
|
||||
}
|
||||
|
||||
blk_mq_end_request(req, error);
|
||||
|
@ -146,7 +201,9 @@ static void virtblk_done(struct virtqueue *vq)
|
|||
do {
|
||||
virtqueue_disable_cb(vq);
|
||||
while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
|
||||
blk_mq_complete_request(vbr->req, vbr->req->errors);
|
||||
struct request *req = blk_mq_rq_from_pdu(vbr);
|
||||
|
||||
blk_mq_complete_request(req, req->errors);
|
||||
req_done = true;
|
||||
}
|
||||
if (unlikely(virtqueue_is_broken(vq)))
|
||||
|
@ -170,49 +227,50 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
int qid = hctx->queue_num;
|
||||
int err;
|
||||
bool notify = false;
|
||||
u32 type;
|
||||
|
||||
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
|
||||
|
||||
vbr->req = req;
|
||||
if (req_op(req) == REQ_OP_FLUSH) {
|
||||
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
|
||||
vbr->out_hdr.sector = 0;
|
||||
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
|
||||
} else {
|
||||
switch (req->cmd_type) {
|
||||
case REQ_TYPE_FS:
|
||||
vbr->out_hdr.type = 0;
|
||||
vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, blk_rq_pos(vbr->req));
|
||||
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
|
||||
break;
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_SCSI_CMD);
|
||||
vbr->out_hdr.sector = 0;
|
||||
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
|
||||
break;
|
||||
case REQ_TYPE_DRV_PRIV:
|
||||
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID);
|
||||
vbr->out_hdr.sector = 0;
|
||||
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
|
||||
break;
|
||||
default:
|
||||
/* We don't put anything else in the queue. */
|
||||
BUG();
|
||||
}
|
||||
switch (req_op(req)) {
|
||||
case REQ_OP_READ:
|
||||
case REQ_OP_WRITE:
|
||||
type = 0;
|
||||
break;
|
||||
case REQ_OP_FLUSH:
|
||||
type = VIRTIO_BLK_T_FLUSH;
|
||||
break;
|
||||
case REQ_OP_SCSI_IN:
|
||||
case REQ_OP_SCSI_OUT:
|
||||
type = VIRTIO_BLK_T_SCSI_CMD;
|
||||
break;
|
||||
case REQ_OP_DRV_IN:
|
||||
type = VIRTIO_BLK_T_GET_ID;
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
||||
}
|
||||
|
||||
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
|
||||
vbr->out_hdr.sector = type ?
|
||||
0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
|
||||
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
|
||||
|
||||
blk_mq_start_request(req);
|
||||
|
||||
num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
|
||||
num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
|
||||
if (num) {
|
||||
if (rq_data_dir(vbr->req) == WRITE)
|
||||
if (rq_data_dir(req) == WRITE)
|
||||
vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
|
||||
else
|
||||
vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
|
||||
err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
|
||||
if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
|
||||
err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
|
||||
else
|
||||
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
|
||||
if (err) {
|
||||
virtqueue_kick(vblk->vqs[qid].vq);
|
||||
blk_mq_stop_hw_queue(hctx);
|
||||
|
@ -242,10 +300,9 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
|
|||
struct request *req;
|
||||
int err;
|
||||
|
||||
req = blk_get_request(q, READ, GFP_KERNEL);
|
||||
req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL);
|
||||
if (IS_ERR(req))
|
||||
return PTR_ERR(req);
|
||||
req->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
|
||||
err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
|
||||
if (err)
|
||||
|
@ -257,22 +314,6 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long data)
|
||||
{
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
struct virtio_blk *vblk = disk->private_data;
|
||||
|
||||
/*
|
||||
* Only allow the generic SCSI ioctls if the host can support it.
|
||||
*/
|
||||
if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
|
||||
return -ENOTTY;
|
||||
|
||||
return scsi_cmd_blk_ioctl(bdev, mode, cmd,
|
||||
(void __user *)data);
|
||||
}
|
||||
|
||||
/* We provide getgeo only to please some old bootloader/partitioning tools */
|
||||
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
|
||||
{
|
||||
|
@ -538,6 +579,9 @@ static int virtblk_init_request(void *data, struct request *rq,
|
|||
struct virtio_blk *vblk = data;
|
||||
struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
#ifdef CONFIG_VIRTIO_BLK_SCSI
|
||||
vbr->sreq.sense = vbr->sense;
|
||||
#endif
|
||||
sg_init_table(vbr->sg, vblk->sg_elems);
|
||||
return 0;
|
||||
}
|
||||
|
@ -821,7 +865,10 @@ static const struct virtio_device_id id_table[] = {
|
|||
|
||||
static unsigned int features_legacy[] = {
|
||||
VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
|
||||
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
|
||||
VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
|
||||
#ifdef CONFIG_VIRTIO_BLK_SCSI
|
||||
VIRTIO_BLK_F_SCSI,
|
||||
#endif
|
||||
VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
|
||||
VIRTIO_BLK_F_MQ,
|
||||
}
|
||||
|
|
|
@ -865,7 +865,7 @@ static inline void flush_requests(struct blkfront_ring_info *rinfo)
|
|||
static inline bool blkif_request_flush_invalid(struct request *req,
|
||||
struct blkfront_info *info)
|
||||
{
|
||||
return ((req->cmd_type != REQ_TYPE_FS) ||
|
||||
return (blk_rq_is_passthrough(req) ||
|
||||
((req_op(req) == REQ_OP_FLUSH) &&
|
||||
!info->feature_flush) ||
|
||||
((req->cmd_flags & REQ_FUA) &&
|
||||
|
|
|
@ -468,7 +468,7 @@ static struct request *ace_get_next_request(struct request_queue *q)
|
|||
struct request *req;
|
||||
|
||||
while ((req = blk_peek_request(q)) != NULL) {
|
||||
if (req->cmd_type == REQ_TYPE_FS)
|
||||
if (!blk_rq_is_passthrough(req))
|
||||
break;
|
||||
blk_start_request(req);
|
||||
__blk_end_request_all(req, -EIO);
|
||||
|
|
|
@ -117,7 +117,7 @@ static void zram_revalidate_disk(struct zram *zram)
|
|||
{
|
||||
revalidate_disk(zram->disk);
|
||||
/* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
|
||||
zram->disk->queue->backing_dev_info.capabilities |=
|
||||
zram->disk->queue->backing_dev_info->capabilities |=
|
||||
BDI_CAP_STABLE_WRITES;
|
||||
}
|
||||
|
||||
|
|
|
@ -281,8 +281,8 @@
|
|||
#include <linux/fcntl.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/times.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <scsi/scsi_request.h>
|
||||
|
||||
/* used to tell the module to turn on full debugging messages */
|
||||
static bool debug;
|
||||
|
@ -2170,6 +2170,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
|
|||
{
|
||||
struct request_queue *q = cdi->disk->queue;
|
||||
struct request *rq;
|
||||
struct scsi_request *req;
|
||||
struct bio *bio;
|
||||
unsigned int len;
|
||||
int nr, ret = 0;
|
||||
|
@ -2188,12 +2189,13 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
|
|||
|
||||
len = nr * CD_FRAMESIZE_RAW;
|
||||
|
||||
rq = blk_get_request(q, READ, GFP_KERNEL);
|
||||
rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL);
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
break;
|
||||
}
|
||||
blk_rq_set_block_pc(rq);
|
||||
req = scsi_req(rq);
|
||||
scsi_req_init(rq);
|
||||
|
||||
ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
|
||||
if (ret) {
|
||||
|
@ -2201,23 +2203,23 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
|
|||
break;
|
||||
}
|
||||
|
||||
rq->cmd[0] = GPCMD_READ_CD;
|
||||
rq->cmd[1] = 1 << 2;
|
||||
rq->cmd[2] = (lba >> 24) & 0xff;
|
||||
rq->cmd[3] = (lba >> 16) & 0xff;
|
||||
rq->cmd[4] = (lba >> 8) & 0xff;
|
||||
rq->cmd[5] = lba & 0xff;
|
||||
rq->cmd[6] = (nr >> 16) & 0xff;
|
||||
rq->cmd[7] = (nr >> 8) & 0xff;
|
||||
rq->cmd[8] = nr & 0xff;
|
||||
rq->cmd[9] = 0xf8;
|
||||
req->cmd[0] = GPCMD_READ_CD;
|
||||
req->cmd[1] = 1 << 2;
|
||||
req->cmd[2] = (lba >> 24) & 0xff;
|
||||
req->cmd[3] = (lba >> 16) & 0xff;
|
||||
req->cmd[4] = (lba >> 8) & 0xff;
|
||||
req->cmd[5] = lba & 0xff;
|
||||
req->cmd[6] = (nr >> 16) & 0xff;
|
||||
req->cmd[7] = (nr >> 8) & 0xff;
|
||||
req->cmd[8] = nr & 0xff;
|
||||
req->cmd[9] = 0xf8;
|
||||
|
||||
rq->cmd_len = 12;
|
||||
req->cmd_len = 12;
|
||||
rq->timeout = 60 * HZ;
|
||||
bio = rq->bio;
|
||||
|
||||
if (blk_execute_rq(q, cdi->disk, rq, 0)) {
|
||||
struct request_sense *s = rq->sense;
|
||||
struct request_sense *s = req->sense;
|
||||
ret = -EIO;
|
||||
cdi->last_sense = s->sense_key;
|
||||
}
|
||||
|
|
|
@ -659,23 +659,24 @@ static void gdrom_request(struct request_queue *rq)
|
|||
struct request *req;
|
||||
|
||||
while ((req = blk_fetch_request(rq)) != NULL) {
|
||||
if (req->cmd_type != REQ_TYPE_FS) {
|
||||
printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
|
||||
__blk_end_request_all(req, -EIO);
|
||||
continue;
|
||||
}
|
||||
if (rq_data_dir(req) != READ) {
|
||||
switch (req_op(req)) {
|
||||
case REQ_OP_READ:
|
||||
/*
|
||||
* Add to list of deferred work and then schedule
|
||||
* workqueue.
|
||||
*/
|
||||
list_add_tail(&req->queuelist, &gdrom_deferred);
|
||||
schedule_work(&work);
|
||||
break;
|
||||
case REQ_OP_WRITE:
|
||||
pr_notice("Read only device - write request ignored\n");
|
||||
__blk_end_request_all(req, -EIO);
|
||||
continue;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
|
||||
__blk_end_request_all(req, -EIO);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add to list of deferred work and then schedule
|
||||
* workqueue.
|
||||
*/
|
||||
list_add_tail(&req->queuelist, &gdrom_deferred);
|
||||
schedule_work(&work);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ menuconfig IDE
|
|||
tristate "ATA/ATAPI/MFM/RLL support (DEPRECATED)"
|
||||
depends on HAVE_IDE
|
||||
depends on BLOCK
|
||||
select BLK_SCSI_REQUEST
|
||||
---help---
|
||||
If you say Y here, your kernel will be able to manage ATA/(E)IDE and
|
||||
ATAPI units. The most common cases are IDE hard drives and ATAPI
|
||||
|
|
|
@ -92,8 +92,9 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
|
|||
struct request *rq;
|
||||
int error;
|
||||
|
||||
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
|
||||
scsi_req_init(rq);
|
||||
ide_req(rq)->type = ATA_PRIV_MISC;
|
||||
rq->special = (char *)pc;
|
||||
|
||||
if (buf && bufflen) {
|
||||
|
@ -103,9 +104,9 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
|
|||
goto put_req;
|
||||
}
|
||||
|
||||
memcpy(rq->cmd, pc->c, 12);
|
||||
memcpy(scsi_req(rq)->cmd, pc->c, 12);
|
||||
if (drive->media == ide_tape)
|
||||
rq->cmd[13] = REQ_IDETAPE_PC1;
|
||||
scsi_req(rq)->cmd[13] = REQ_IDETAPE_PC1;
|
||||
error = blk_execute_rq(drive->queue, disk, rq, 0);
|
||||
put_req:
|
||||
blk_put_request(rq);
|
||||
|
@ -171,7 +172,8 @@ EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
|
|||
void ide_prep_sense(ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
struct request_sense *sense = &drive->sense_data;
|
||||
struct request *sense_rq = &drive->sense_rq;
|
||||
struct request *sense_rq = drive->sense_rq;
|
||||
struct scsi_request *req = scsi_req(sense_rq);
|
||||
unsigned int cmd_len, sense_len;
|
||||
int err;
|
||||
|
||||
|
@ -191,12 +193,13 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
|
|||
|
||||
BUG_ON(sense_len > sizeof(*sense));
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_ATA_SENSE || drive->sense_rq_armed)
|
||||
if (ata_sense_request(rq) || drive->sense_rq_armed)
|
||||
return;
|
||||
|
||||
memset(sense, 0, sizeof(*sense));
|
||||
|
||||
blk_rq_init(rq->q, sense_rq);
|
||||
scsi_req_init(sense_rq);
|
||||
|
||||
err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
|
||||
GFP_NOIO);
|
||||
|
@ -208,13 +211,14 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
|
|||
}
|
||||
|
||||
sense_rq->rq_disk = rq->rq_disk;
|
||||
sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
|
||||
sense_rq->cmd[4] = cmd_len;
|
||||
sense_rq->cmd_type = REQ_TYPE_ATA_SENSE;
|
||||
sense_rq->cmd_flags = REQ_OP_DRV_IN;
|
||||
ide_req(sense_rq)->type = ATA_PRIV_SENSE;
|
||||
sense_rq->rq_flags |= RQF_PREEMPT;
|
||||
|
||||
req->cmd[0] = GPCMD_REQUEST_SENSE;
|
||||
req->cmd[4] = cmd_len;
|
||||
if (drive->media == ide_tape)
|
||||
sense_rq->cmd[13] = REQ_IDETAPE_PC1;
|
||||
req->cmd[13] = REQ_IDETAPE_PC1;
|
||||
|
||||
drive->sense_rq_armed = true;
|
||||
}
|
||||
|
@ -229,12 +233,12 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
drive->sense_rq.special = special;
|
||||
drive->sense_rq->special = special;
|
||||
drive->sense_rq_armed = false;
|
||||
|
||||
drive->hwif->rq = NULL;
|
||||
|
||||
elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT);
|
||||
elv_add_request(drive->queue, drive->sense_rq, ELEVATOR_INSERT_FRONT);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
|
||||
|
@ -247,14 +251,14 @@ EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
|
|||
void ide_retry_pc(ide_drive_t *drive)
|
||||
{
|
||||
struct request *failed_rq = drive->hwif->rq;
|
||||
struct request *sense_rq = &drive->sense_rq;
|
||||
struct request *sense_rq = drive->sense_rq;
|
||||
struct ide_atapi_pc *pc = &drive->request_sense_pc;
|
||||
|
||||
(void)ide_read_error(drive);
|
||||
|
||||
/* init pc from sense_rq */
|
||||
ide_init_pc(pc);
|
||||
memcpy(pc->c, sense_rq->cmd, 12);
|
||||
memcpy(pc->c, scsi_req(sense_rq)->cmd, 12);
|
||||
|
||||
if (drive->media == ide_tape)
|
||||
drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
|
||||
|
@ -286,7 +290,7 @@ int ide_cd_expiry(ide_drive_t *drive)
|
|||
* commands/drives support that. Let ide_timer_expiry keep polling us
|
||||
* for these.
|
||||
*/
|
||||
switch (rq->cmd[0]) {
|
||||
switch (scsi_req(rq)->cmd[0]) {
|
||||
case GPCMD_BLANK:
|
||||
case GPCMD_FORMAT_UNIT:
|
||||
case GPCMD_RESERVE_RZONE_TRACK:
|
||||
|
@ -297,7 +301,7 @@ int ide_cd_expiry(ide_drive_t *drive)
|
|||
default:
|
||||
if (!(rq->rq_flags & RQF_QUIET))
|
||||
printk(KERN_INFO PFX "cmd 0x%x timed out\n",
|
||||
rq->cmd[0]);
|
||||
scsi_req(rq)->cmd[0]);
|
||||
wait = 0;
|
||||
break;
|
||||
}
|
||||
|
@ -307,15 +311,21 @@ EXPORT_SYMBOL_GPL(ide_cd_expiry);
|
|||
|
||||
int ide_cd_get_xferlen(struct request *rq)
|
||||
{
|
||||
switch (rq->cmd_type) {
|
||||
case REQ_TYPE_FS:
|
||||
return 32768;
|
||||
case REQ_TYPE_ATA_SENSE:
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
case REQ_TYPE_ATA_PC:
|
||||
return blk_rq_bytes(rq);
|
||||
switch (req_op(rq)) {
|
||||
default:
|
||||
return 0;
|
||||
return 32768;
|
||||
case REQ_OP_SCSI_IN:
|
||||
case REQ_OP_SCSI_OUT:
|
||||
return blk_rq_bytes(rq);
|
||||
case REQ_OP_DRV_IN:
|
||||
case REQ_OP_DRV_OUT:
|
||||
switch (ide_req(rq)->type) {
|
||||
case ATA_PRIV_PC:
|
||||
case ATA_PRIV_SENSE:
|
||||
return blk_rq_bytes(rq);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ide_cd_get_xferlen);
|
||||
|
@ -374,7 +384,7 @@ int ide_check_ireason(ide_drive_t *drive, struct request *rq, int len,
|
|||
drive->name, __func__, ireason);
|
||||
}
|
||||
|
||||
if (dev_is_idecd(drive) && rq->cmd_type == REQ_TYPE_ATA_PC)
|
||||
if (dev_is_idecd(drive) && ata_pc_request(rq))
|
||||
rq->rq_flags |= RQF_FAILED;
|
||||
|
||||
return 1;
|
||||
|
@ -420,7 +430,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
|
|||
? "write" : "read");
|
||||
pc->flags |= PC_FLAG_DMA_ERROR;
|
||||
} else
|
||||
rq->resid_len = 0;
|
||||
scsi_req(rq)->resid_len = 0;
|
||||
debug_log("%s: DMA finished\n", drive->name);
|
||||
}
|
||||
|
||||
|
@ -436,7 +446,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
|
|||
local_irq_enable_in_hardirq();
|
||||
|
||||
if (drive->media == ide_tape &&
|
||||
(stat & ATA_ERR) && rq->cmd[0] == REQUEST_SENSE)
|
||||
(stat & ATA_ERR) && scsi_req(rq)->cmd[0] == REQUEST_SENSE)
|
||||
stat &= ~ATA_ERR;
|
||||
|
||||
if ((stat & ATA_ERR) || (pc->flags & PC_FLAG_DMA_ERROR)) {
|
||||
|
@ -446,7 +456,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
|
|||
if (drive->media != ide_tape)
|
||||
pc->rq->errors++;
|
||||
|
||||
if (rq->cmd[0] == REQUEST_SENSE) {
|
||||
if (scsi_req(rq)->cmd[0] == REQUEST_SENSE) {
|
||||
printk(KERN_ERR PFX "%s: I/O error in request "
|
||||
"sense command\n", drive->name);
|
||||
return ide_do_reset(drive);
|
||||
|
@ -477,12 +487,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
|
|||
if (uptodate == 0)
|
||||
drive->failed_pc = NULL;
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_DRV_PRIV) {
|
||||
if (ata_misc_request(rq)) {
|
||||
rq->errors = 0;
|
||||
error = 0;
|
||||
} else {
|
||||
|
||||
if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) {
|
||||
if (blk_rq_is_passthrough(rq) && uptodate <= 0) {
|
||||
if (rq->errors == 0)
|
||||
rq->errors = -EIO;
|
||||
}
|
||||
|
@ -512,7 +522,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
|
|||
ide_pio_bytes(drive, cmd, write, done);
|
||||
|
||||
/* Update transferred byte count */
|
||||
rq->resid_len -= done;
|
||||
scsi_req(rq)->resid_len -= done;
|
||||
|
||||
bcount -= done;
|
||||
|
||||
|
@ -520,7 +530,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
|
|||
ide_pad_transfer(drive, write, bcount);
|
||||
|
||||
debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n",
|
||||
rq->cmd[0], done, bcount, rq->resid_len);
|
||||
rq->cmd[0], done, bcount, scsi_req(rq)->resid_len);
|
||||
|
||||
/* And set the interrupt handler again */
|
||||
ide_set_handler(drive, ide_pc_intr, timeout);
|
||||
|
@ -603,7 +613,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
|
|||
|
||||
if (dev_is_idecd(drive)) {
|
||||
/* ATAPI commands get padded out to 12 bytes minimum */
|
||||
cmd_len = COMMAND_SIZE(rq->cmd[0]);
|
||||
cmd_len = COMMAND_SIZE(scsi_req(rq)->cmd[0]);
|
||||
if (cmd_len < ATAPI_MIN_CDB_BYTES)
|
||||
cmd_len = ATAPI_MIN_CDB_BYTES;
|
||||
|
||||
|
@ -650,7 +660,7 @@ static ide_startstop_t ide_transfer_pc(ide_drive_t *drive)
|
|||
|
||||
/* Send the actual packet */
|
||||
if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
|
||||
hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
|
||||
hwif->tp_ops->output_data(drive, NULL, scsi_req(rq)->cmd, cmd_len);
|
||||
|
||||
/* Begin DMA, if necessary */
|
||||
if (dev_is_idecd(drive)) {
|
||||
|
@ -695,7 +705,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd)
|
|||
bytes, 63 * 1024));
|
||||
|
||||
/* We haven't transferred any data yet */
|
||||
rq->resid_len = bcount;
|
||||
scsi_req(rq)->resid_len = bcount;
|
||||
|
||||
if (pc->flags & PC_FLAG_DMA_ERROR) {
|
||||
pc->flags &= ~PC_FLAG_DMA_ERROR;
|
||||
|
|
|
@ -121,7 +121,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq)
|
|||
* don't log START_STOP unit with LoEj set, since we cannot
|
||||
* reliably check if drive can auto-close
|
||||
*/
|
||||
if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
|
||||
if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
|
||||
break;
|
||||
log = 1;
|
||||
break;
|
||||
|
@ -163,7 +163,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
|
|||
* toc has not been recorded yet, it will fail with 05/24/00 (which is a
|
||||
* confusing error)
|
||||
*/
|
||||
if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
|
||||
if (failed_command && scsi_req(failed_command)->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
|
||||
if (sense->sense_key == 0x05 && sense->asc == 0x24)
|
||||
return;
|
||||
|
||||
|
@ -176,7 +176,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
|
|||
if (!sense->valid)
|
||||
break;
|
||||
if (failed_command == NULL ||
|
||||
failed_command->cmd_type != REQ_TYPE_FS)
|
||||
blk_rq_is_passthrough(failed_command))
|
||||
break;
|
||||
sector = (sense->information[0] << 24) |
|
||||
(sense->information[1] << 16) |
|
||||
|
@ -210,7 +210,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
|
|||
static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
/*
|
||||
* For REQ_TYPE_ATA_SENSE, "rq->special" points to the original
|
||||
* For ATA_PRIV_SENSE, "rq->special" points to the original
|
||||
* failed request. Also, the sense data should be read
|
||||
* directly from rq which might be different from the original
|
||||
* sense buffer if it got copied during mapping.
|
||||
|
@ -219,15 +219,12 @@ static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
|
|||
void *sense = bio_data(rq->bio);
|
||||
|
||||
if (failed) {
|
||||
if (failed->sense) {
|
||||
/*
|
||||
* Sense is always read into drive->sense_data.
|
||||
* Copy back if the failed request has its
|
||||
* sense pointer set.
|
||||
*/
|
||||
memcpy(failed->sense, sense, 18);
|
||||
failed->sense_len = rq->sense_len;
|
||||
}
|
||||
/*
|
||||
* Sense is always read into drive->sense_data, copy back to the
|
||||
* original request.
|
||||
*/
|
||||
memcpy(scsi_req(failed)->sense, sense, 18);
|
||||
scsi_req(failed)->sense_len = scsi_req(rq)->sense_len;
|
||||
cdrom_analyze_sense_data(drive, failed);
|
||||
|
||||
if (ide_end_rq(drive, failed, -EIO, blk_rq_bytes(failed)))
|
||||
|
@ -285,7 +282,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
"stat 0x%x",
|
||||
rq->cmd[0], rq->cmd_type, err, stat);
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_ATA_SENSE) {
|
||||
if (ata_sense_request(rq)) {
|
||||
/*
|
||||
* We got an error trying to get sense info from the drive
|
||||
* (probably while trying to recover from a former error).
|
||||
|
@ -296,7 +293,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
}
|
||||
|
||||
/* if we have an error, pass CHECK_CONDITION as the SCSI status byte */
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors)
|
||||
if (blk_rq_is_scsi(rq) && !rq->errors)
|
||||
rq->errors = SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
if (blk_noretry_request(rq))
|
||||
|
@ -304,13 +301,13 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
|
||||
switch (sense_key) {
|
||||
case NOT_READY:
|
||||
if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) {
|
||||
if (req_op(rq) == REQ_OP_WRITE) {
|
||||
if (ide_cd_breathe(drive, rq))
|
||||
return 1;
|
||||
} else {
|
||||
cdrom_saw_media_change(drive);
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_FS &&
|
||||
if (!blk_rq_is_passthrough(rq) &&
|
||||
!(rq->rq_flags & RQF_QUIET))
|
||||
printk(KERN_ERR PFX "%s: tray open\n",
|
||||
drive->name);
|
||||
|
@ -320,7 +317,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
case UNIT_ATTENTION:
|
||||
cdrom_saw_media_change(drive);
|
||||
|
||||
if (rq->cmd_type != REQ_TYPE_FS)
|
||||
if (blk_rq_is_passthrough(rq))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -338,7 +335,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
*
|
||||
* cdrom_log_sense() knows this!
|
||||
*/
|
||||
if (rq->cmd[0] == GPCMD_START_STOP_UNIT)
|
||||
if (scsi_req(rq)->cmd[0] == GPCMD_START_STOP_UNIT)
|
||||
break;
|
||||
/* fall-through */
|
||||
case DATA_PROTECT:
|
||||
|
@ -368,7 +365,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
do_end_request = 1;
|
||||
break;
|
||||
default:
|
||||
if (rq->cmd_type != REQ_TYPE_FS)
|
||||
if (blk_rq_is_passthrough(rq))
|
||||
break;
|
||||
if (err & ~ATA_ABORTED) {
|
||||
/* go to the default handler for other errors */
|
||||
|
@ -379,7 +376,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
|
|||
do_end_request = 1;
|
||||
}
|
||||
|
||||
if (rq->cmd_type != REQ_TYPE_FS) {
|
||||
if (blk_rq_is_passthrough(rq)) {
|
||||
rq->rq_flags |= RQF_FAILED;
|
||||
do_end_request = 1;
|
||||
}
|
||||
|
@ -414,7 +411,7 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd)
|
|||
* Some of the trailing request sense fields are optional,
|
||||
* and some drives don't send them. Sigh.
|
||||
*/
|
||||
if (rq->cmd[0] == GPCMD_REQUEST_SENSE &&
|
||||
if (scsi_req(rq)->cmd[0] == GPCMD_REQUEST_SENSE &&
|
||||
cmd->nleft > 0 && cmd->nleft <= 5)
|
||||
cmd->nleft = 0;
|
||||
}
|
||||
|
@ -425,12 +422,8 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
|
|||
req_flags_t rq_flags)
|
||||
{
|
||||
struct cdrom_info *info = drive->driver_data;
|
||||
struct request_sense local_sense;
|
||||
int retries = 10;
|
||||
req_flags_t flags = 0;
|
||||
|
||||
if (!sense)
|
||||
sense = &local_sense;
|
||||
bool failed;
|
||||
|
||||
ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x, timeout: %d, "
|
||||
"rq_flags: 0x%x",
|
||||
|
@ -440,12 +433,13 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
|
|||
do {
|
||||
struct request *rq;
|
||||
int error;
|
||||
bool delay = false;
|
||||
|
||||
rq = blk_get_request(drive->queue, write, __GFP_RECLAIM);
|
||||
|
||||
memcpy(rq->cmd, cmd, BLK_MAX_CDB);
|
||||
rq->cmd_type = REQ_TYPE_ATA_PC;
|
||||
rq->sense = sense;
|
||||
rq = blk_get_request(drive->queue,
|
||||
write ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
|
||||
scsi_req_init(rq);
|
||||
memcpy(scsi_req(rq)->cmd, cmd, BLK_MAX_CDB);
|
||||
ide_req(rq)->type = ATA_PRIV_PC;
|
||||
rq->rq_flags |= rq_flags;
|
||||
rq->timeout = timeout;
|
||||
if (buffer) {
|
||||
|
@ -460,21 +454,21 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
|
|||
error = blk_execute_rq(drive->queue, info->disk, rq, 0);
|
||||
|
||||
if (buffer)
|
||||
*bufflen = rq->resid_len;
|
||||
|
||||
flags = rq->rq_flags;
|
||||
blk_put_request(rq);
|
||||
*bufflen = scsi_req(rq)->resid_len;
|
||||
if (sense)
|
||||
memcpy(sense, scsi_req(rq)->sense, sizeof(*sense));
|
||||
|
||||
/*
|
||||
* FIXME: we should probably abort/retry or something in case of
|
||||
* failure.
|
||||
*/
|
||||
if (flags & RQF_FAILED) {
|
||||
failed = (rq->rq_flags & RQF_FAILED) != 0;
|
||||
if (failed) {
|
||||
/*
|
||||
* The request failed. Retry if it was due to a unit
|
||||
* attention status (usually means media was changed).
|
||||
*/
|
||||
struct request_sense *reqbuf = sense;
|
||||
struct request_sense *reqbuf = scsi_req(rq)->sense;
|
||||
|
||||
if (reqbuf->sense_key == UNIT_ATTENTION)
|
||||
cdrom_saw_media_change(drive);
|
||||
|
@ -485,19 +479,20 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
|
|||
* a disk. Retry, but wait a little to give
|
||||
* the drive time to complete the load.
|
||||
*/
|
||||
ssleep(2);
|
||||
delay = true;
|
||||
} else {
|
||||
/* otherwise, don't retry */
|
||||
retries = 0;
|
||||
}
|
||||
--retries;
|
||||
}
|
||||
|
||||
/* end of retry loop */
|
||||
} while ((flags & RQF_FAILED) && retries >= 0);
|
||||
blk_put_request(rq);
|
||||
if (delay)
|
||||
ssleep(2);
|
||||
} while (failed && retries >= 0);
|
||||
|
||||
/* return an error if the command failed */
|
||||
return (flags & RQF_FAILED) ? -EIO : 0;
|
||||
return failed ? -EIO : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -526,7 +521,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
ide_expiry_t *expiry = NULL;
|
||||
int dma_error = 0, dma, thislen, uptodate = 0;
|
||||
int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
|
||||
int sense = (rq->cmd_type == REQ_TYPE_ATA_SENSE);
|
||||
int sense = ata_sense_request(rq);
|
||||
unsigned int timeout;
|
||||
u16 len;
|
||||
u8 ireason, stat;
|
||||
|
@ -569,7 +564,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
|
||||
ide_read_bcount_and_ireason(drive, &len, &ireason);
|
||||
|
||||
thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft;
|
||||
thislen = !blk_rq_is_passthrough(rq) ? len : cmd->nleft;
|
||||
if (thislen > len)
|
||||
thislen = len;
|
||||
|
||||
|
@ -578,7 +573,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
|
||||
/* If DRQ is clear, the command has completed. */
|
||||
if ((stat & ATA_DRQ) == 0) {
|
||||
if (rq->cmd_type == REQ_TYPE_FS) {
|
||||
switch (req_op(rq)) {
|
||||
default:
|
||||
/*
|
||||
* If we're not done reading/writing, complain.
|
||||
* Otherwise, complete the command normally.
|
||||
|
@ -592,7 +588,9 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
rq->rq_flags |= RQF_FAILED;
|
||||
uptodate = 0;
|
||||
}
|
||||
} else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) {
|
||||
goto out_end;
|
||||
case REQ_OP_DRV_IN:
|
||||
case REQ_OP_DRV_OUT:
|
||||
ide_cd_request_sense_fixup(drive, cmd);
|
||||
|
||||
uptodate = cmd->nleft ? 0 : 1;
|
||||
|
@ -608,8 +606,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
|
||||
if (!uptodate)
|
||||
rq->rq_flags |= RQF_FAILED;
|
||||
goto out_end;
|
||||
case REQ_OP_SCSI_IN:
|
||||
case REQ_OP_SCSI_OUT:
|
||||
goto out_end;
|
||||
}
|
||||
goto out_end;
|
||||
}
|
||||
|
||||
rc = ide_check_ireason(drive, rq, len, ireason, write);
|
||||
|
@ -636,12 +637,12 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
len -= blen;
|
||||
|
||||
if (sense && write == 0)
|
||||
rq->sense_len += blen;
|
||||
scsi_req(rq)->sense_len += blen;
|
||||
}
|
||||
|
||||
/* pad, if necessary */
|
||||
if (len > 0) {
|
||||
if (rq->cmd_type != REQ_TYPE_FS || write == 0)
|
||||
if (blk_rq_is_passthrough(rq) || write == 0)
|
||||
ide_pad_transfer(drive, write, len);
|
||||
else {
|
||||
printk(KERN_ERR PFX "%s: confused, missing data\n",
|
||||
|
@ -650,12 +651,18 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
}
|
||||
}
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
switch (req_op(rq)) {
|
||||
case REQ_OP_SCSI_IN:
|
||||
case REQ_OP_SCSI_OUT:
|
||||
timeout = rq->timeout;
|
||||
} else {
|
||||
break;
|
||||
case REQ_OP_DRV_IN:
|
||||
case REQ_OP_DRV_OUT:
|
||||
expiry = ide_cd_expiry;
|
||||
/*FALLTHRU*/
|
||||
default:
|
||||
timeout = ATAPI_WAIT_PC;
|
||||
if (rq->cmd_type != REQ_TYPE_FS)
|
||||
expiry = ide_cd_expiry;
|
||||
break;
|
||||
}
|
||||
|
||||
hwif->expiry = expiry;
|
||||
|
@ -663,15 +670,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
return ide_started;
|
||||
|
||||
out_end:
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) {
|
||||
rq->resid_len = 0;
|
||||
if (blk_rq_is_scsi(rq) && rc == 0) {
|
||||
scsi_req(rq)->resid_len = 0;
|
||||
blk_end_request_all(rq, 0);
|
||||
hwif->rq = NULL;
|
||||
} else {
|
||||
if (sense && uptodate)
|
||||
ide_cd_complete_failed_rq(drive, rq);
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_FS) {
|
||||
if (!blk_rq_is_passthrough(rq)) {
|
||||
if (cmd->nleft == 0)
|
||||
uptodate = 1;
|
||||
} else {
|
||||
|
@ -684,10 +691,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
return ide_stopped;
|
||||
|
||||
/* make sure it's fully ended */
|
||||
if (rq->cmd_type != REQ_TYPE_FS) {
|
||||
rq->resid_len -= cmd->nbytes - cmd->nleft;
|
||||
if (blk_rq_is_passthrough(rq)) {
|
||||
scsi_req(rq)->resid_len -= cmd->nbytes - cmd->nleft;
|
||||
if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
|
||||
rq->resid_len += cmd->last_xfer_len;
|
||||
scsi_req(rq)->resid_len += cmd->last_xfer_len;
|
||||
}
|
||||
|
||||
ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
|
||||
|
@ -744,7 +751,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
|
|||
ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x",
|
||||
rq->cmd[0], rq->cmd_type);
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
if (blk_rq_is_scsi(rq))
|
||||
rq->rq_flags |= RQF_QUIET;
|
||||
else
|
||||
rq->rq_flags &= ~RQF_FAILED;
|
||||
|
@ -786,25 +793,31 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
|
|||
if (drive->debug_mask & IDE_DBG_RQ)
|
||||
blk_dump_rq_flags(rq, "ide_cd_do_request");
|
||||
|
||||
switch (rq->cmd_type) {
|
||||
case REQ_TYPE_FS:
|
||||
switch (req_op(rq)) {
|
||||
default:
|
||||
if (cdrom_start_rw(drive, rq) == ide_stopped)
|
||||
goto out_end;
|
||||
break;
|
||||
case REQ_TYPE_ATA_SENSE:
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
case REQ_TYPE_ATA_PC:
|
||||
case REQ_OP_SCSI_IN:
|
||||
case REQ_OP_SCSI_OUT:
|
||||
handle_pc:
|
||||
if (!rq->timeout)
|
||||
rq->timeout = ATAPI_WAIT_PC;
|
||||
|
||||
cdrom_do_block_pc(drive, rq);
|
||||
break;
|
||||
case REQ_TYPE_DRV_PRIV:
|
||||
/* right now this can only be a reset... */
|
||||
uptodate = 1;
|
||||
goto out_end;
|
||||
default:
|
||||
BUG();
|
||||
case REQ_OP_DRV_IN:
|
||||
case REQ_OP_DRV_OUT:
|
||||
switch (ide_req(rq)->type) {
|
||||
case ATA_PRIV_MISC:
|
||||
/* right now this can only be a reset... */
|
||||
uptodate = 1;
|
||||
goto out_end;
|
||||
case ATA_PRIV_SENSE:
|
||||
case ATA_PRIV_PC:
|
||||
goto handle_pc;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/* prepare sense request for this command */
|
||||
|
@ -817,7 +830,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
|
|||
|
||||
cmd.rq = rq;
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
|
||||
if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
|
||||
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
|
||||
ide_map_sg(drive, &cmd);
|
||||
}
|
||||
|
@ -1312,28 +1325,29 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
|
|||
int hard_sect = queue_logical_block_size(q);
|
||||
long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
|
||||
unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
|
||||
struct scsi_request *req = scsi_req(rq);
|
||||
|
||||
memset(rq->cmd, 0, BLK_MAX_CDB);
|
||||
memset(req->cmd, 0, BLK_MAX_CDB);
|
||||
|
||||
if (rq_data_dir(rq) == READ)
|
||||
rq->cmd[0] = GPCMD_READ_10;
|
||||
req->cmd[0] = GPCMD_READ_10;
|
||||
else
|
||||
rq->cmd[0] = GPCMD_WRITE_10;
|
||||
req->cmd[0] = GPCMD_WRITE_10;
|
||||
|
||||
/*
|
||||
* fill in lba
|
||||
*/
|
||||
rq->cmd[2] = (block >> 24) & 0xff;
|
||||
rq->cmd[3] = (block >> 16) & 0xff;
|
||||
rq->cmd[4] = (block >> 8) & 0xff;
|
||||
rq->cmd[5] = block & 0xff;
|
||||
req->cmd[2] = (block >> 24) & 0xff;
|
||||
req->cmd[3] = (block >> 16) & 0xff;
|
||||
req->cmd[4] = (block >> 8) & 0xff;
|
||||
req->cmd[5] = block & 0xff;
|
||||
|
||||
/*
|
||||
* and transfer length
|
||||
*/
|
||||
rq->cmd[7] = (blocks >> 8) & 0xff;
|
||||
rq->cmd[8] = blocks & 0xff;
|
||||
rq->cmd_len = 10;
|
||||
req->cmd[7] = (blocks >> 8) & 0xff;
|
||||
req->cmd[8] = blocks & 0xff;
|
||||
req->cmd_len = 10;
|
||||
return BLKPREP_OK;
|
||||
}
|
||||
|
||||
|
@ -1343,7 +1357,7 @@ static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
|
|||
*/
|
||||
static int ide_cdrom_prep_pc(struct request *rq)
|
||||
{
|
||||
u8 *c = rq->cmd;
|
||||
u8 *c = scsi_req(rq)->cmd;
|
||||
|
||||
/* transform 6-byte read/write commands to the 10-byte version */
|
||||
if (c[0] == READ_6 || c[0] == WRITE_6) {
|
||||
|
@ -1354,7 +1368,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
|
|||
c[2] = 0;
|
||||
c[1] &= 0xe0;
|
||||
c[0] += (READ_10 - READ_6);
|
||||
rq->cmd_len = 10;
|
||||
scsi_req(rq)->cmd_len = 10;
|
||||
return BLKPREP_OK;
|
||||
}
|
||||
|
||||
|
@ -1373,9 +1387,9 @@ static int ide_cdrom_prep_pc(struct request *rq)
|
|||
|
||||
static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
if (rq->cmd_type == REQ_TYPE_FS)
|
||||
if (!blk_rq_is_passthrough(rq))
|
||||
return ide_cdrom_prep_fs(q, rq);
|
||||
else if (rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
else if (blk_rq_is_scsi(rq))
|
||||
return ide_cdrom_prep_pc(rq);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -303,8 +303,9 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
|
|||
struct request *rq;
|
||||
int ret;
|
||||
|
||||
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
|
||||
scsi_req_init(rq);
|
||||
ide_req(rq)->type = ATA_PRIV_MISC;
|
||||
rq->rq_flags = RQF_QUIET;
|
||||
ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
|
||||
blk_put_request(rq);
|
||||
|
|
|
@ -315,12 +315,12 @@ void ide_cd_log_error(const char *name, struct request *failed_command,
|
|||
while (hi > lo) {
|
||||
mid = (lo + hi) / 2;
|
||||
if (packet_command_texts[mid].packet_command ==
|
||||
failed_command->cmd[0]) {
|
||||
scsi_req(failed_command)->cmd[0]) {
|
||||
s = packet_command_texts[mid].text;
|
||||
break;
|
||||
}
|
||||
if (packet_command_texts[mid].packet_command >
|
||||
failed_command->cmd[0])
|
||||
scsi_req(failed_command)->cmd[0])
|
||||
hi = mid;
|
||||
else
|
||||
lo = mid + 1;
|
||||
|
@ -329,7 +329,7 @@ void ide_cd_log_error(const char *name, struct request *failed_command,
|
|||
printk(KERN_ERR " The failed \"%s\" packet command "
|
||||
"was: \n \"", s);
|
||||
for (i = 0; i < BLK_MAX_CDB; i++)
|
||||
printk(KERN_CONT "%02x ", failed_command->cmd[i]);
|
||||
printk(KERN_CONT "%02x ", scsi_req(failed_command)->cmd[i]);
|
||||
printk(KERN_CONT "\"\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -165,11 +165,12 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
|
|||
if (!(setting->flags & DS_SYNC))
|
||||
return setting->set(drive, arg);
|
||||
|
||||
rq = blk_get_request(q, READ, __GFP_RECLAIM);
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq->cmd_len = 5;
|
||||
rq->cmd[0] = REQ_DEVSET_EXEC;
|
||||
*(int *)&rq->cmd[1] = arg;
|
||||
rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
|
||||
scsi_req_init(rq);
|
||||
ide_req(rq)->type = ATA_PRIV_MISC;
|
||||
scsi_req(rq)->cmd_len = 5;
|
||||
scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
|
||||
*(int *)&scsi_req(rq)->cmd[1] = arg;
|
||||
rq->special = setting->set;
|
||||
|
||||
if (blk_execute_rq(q, NULL, rq, 0))
|
||||
|
@ -183,7 +184,7 @@ ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
|
|||
{
|
||||
int err, (*setfunc)(ide_drive_t *, int) = rq->special;
|
||||
|
||||
err = setfunc(drive, *(int *)&rq->cmd[1]);
|
||||
err = setfunc(drive, *(int *)&scsi_req(rq)->cmd[1]);
|
||||
if (err)
|
||||
rq->errors = err;
|
||||
ide_complete_rq(drive, err, blk_rq_bytes(rq));
|
||||
|
|
|
@ -184,7 +184,7 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
|
|||
ide_hwif_t *hwif = drive->hwif;
|
||||
|
||||
BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
|
||||
BUG_ON(rq->cmd_type != REQ_TYPE_FS);
|
||||
BUG_ON(blk_rq_is_passthrough(rq));
|
||||
|
||||
ledtrig_disk_activity();
|
||||
|
||||
|
@ -452,8 +452,9 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
|
|||
cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE;
|
||||
cmd->tf_flags = IDE_TFLAG_DYN;
|
||||
cmd->protocol = ATA_PROT_NODATA;
|
||||
|
||||
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
|
||||
rq->cmd_flags &= ~REQ_OP_MASK;
|
||||
rq->cmd_flags |= REQ_OP_DRV_OUT;
|
||||
ide_req(rq)->type = ATA_PRIV_TASKFILE;
|
||||
rq->special = cmd;
|
||||
cmd->rq = rq;
|
||||
|
||||
|
@ -477,8 +478,9 @@ static int set_multcount(ide_drive_t *drive, int arg)
|
|||
if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
|
||||
return -EBUSY;
|
||||
|
||||
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
|
||||
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
|
||||
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
|
||||
scsi_req_init(rq);
|
||||
ide_req(rq)->type = ATA_PRIV_TASKFILE;
|
||||
|
||||
drive->mult_req = arg;
|
||||
drive->special_flags |= IDE_SFLAG_SET_MULTMODE;
|
||||
|
|
|
@ -123,8 +123,8 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
|
|||
return ide_stopped;
|
||||
|
||||
/* retry only "normal" I/O: */
|
||||
if (rq->cmd_type != REQ_TYPE_FS) {
|
||||
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
|
||||
if (blk_rq_is_passthrough(rq)) {
|
||||
if (ata_taskfile_request(rq)) {
|
||||
struct ide_cmd *cmd = rq->special;
|
||||
|
||||
if (cmd)
|
||||
|
@ -147,8 +147,8 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
|
|||
{
|
||||
struct request *rq = drive->hwif->rq;
|
||||
|
||||
if (rq && rq->cmd_type == REQ_TYPE_DRV_PRIV &&
|
||||
rq->cmd[0] == REQ_DRIVE_RESET) {
|
||||
if (rq && ata_misc_request(rq) &&
|
||||
scsi_req(rq)->cmd[0] == REQ_DRIVE_RESET) {
|
||||
if (err <= 0 && rq->errors == 0)
|
||||
rq->errors = -EIO;
|
||||
ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
|
||||
|
|
|
@ -72,7 +72,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
|
|||
drive->failed_pc = NULL;
|
||||
|
||||
if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
|
||||
rq->cmd_type == REQ_TYPE_BLOCK_PC)
|
||||
(req_op(rq) == REQ_OP_SCSI_IN || req_op(rq) == REQ_OP_SCSI_OUT))
|
||||
uptodate = 1; /* FIXME */
|
||||
else if (pc->c[0] == GPCMD_REQUEST_SENSE) {
|
||||
|
||||
|
@ -97,7 +97,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc)
|
|||
"Aborting request!\n");
|
||||
}
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
|
||||
if (ata_misc_request(rq))
|
||||
rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;
|
||||
|
||||
return uptodate;
|
||||
|
@ -203,7 +203,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
|
|||
put_unaligned(cpu_to_be16(blocks), (unsigned short *)&pc->c[7]);
|
||||
put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[2]);
|
||||
|
||||
memcpy(rq->cmd, pc->c, 12);
|
||||
memcpy(scsi_req(rq)->cmd, pc->c, 12);
|
||||
|
||||
pc->rq = rq;
|
||||
if (cmd == WRITE)
|
||||
|
@ -216,7 +216,7 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
|
|||
struct ide_atapi_pc *pc, struct request *rq)
|
||||
{
|
||||
ide_init_pc(pc);
|
||||
memcpy(pc->c, rq->cmd, sizeof(pc->c));
|
||||
memcpy(pc->c, scsi_req(rq)->cmd, sizeof(pc->c));
|
||||
pc->rq = rq;
|
||||
if (blk_rq_bytes(rq)) {
|
||||
pc->flags |= PC_FLAG_DMA_OK;
|
||||
|
@ -246,7 +246,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
|||
} else
|
||||
printk(KERN_ERR PFX "%s: I/O error\n", drive->name);
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_DRV_PRIV) {
|
||||
if (ata_misc_request(rq)) {
|
||||
rq->errors = 0;
|
||||
ide_complete_rq(drive, 0, blk_rq_bytes(rq));
|
||||
return ide_stopped;
|
||||
|
@ -254,8 +254,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
|||
goto out_end;
|
||||
}
|
||||
|
||||
switch (rq->cmd_type) {
|
||||
case REQ_TYPE_FS:
|
||||
switch (req_op(rq)) {
|
||||
default:
|
||||
if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
|
||||
(blk_rq_sectors(rq) % floppy->bs_factor)) {
|
||||
printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
|
||||
|
@ -265,16 +265,21 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
|||
pc = &floppy->queued_pc;
|
||||
idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block);
|
||||
break;
|
||||
case REQ_TYPE_DRV_PRIV:
|
||||
case REQ_TYPE_ATA_SENSE:
|
||||
pc = (struct ide_atapi_pc *)rq->special;
|
||||
break;
|
||||
case REQ_TYPE_BLOCK_PC:
|
||||
case REQ_OP_SCSI_IN:
|
||||
case REQ_OP_SCSI_OUT:
|
||||
pc = &floppy->queued_pc;
|
||||
idefloppy_blockpc_cmd(floppy, pc, rq);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
case REQ_OP_DRV_IN:
|
||||
case REQ_OP_DRV_OUT:
|
||||
switch (ide_req(rq)->type) {
|
||||
case ATA_PRIV_MISC:
|
||||
case ATA_PRIV_SENSE:
|
||||
pc = (struct ide_atapi_pc *)rq->special;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
ide_prep_sense(drive, rq);
|
||||
|
@ -286,7 +291,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
|||
|
||||
cmd.rq = rq;
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) {
|
||||
if (!blk_rq_is_passthrough(rq) || blk_rq_bytes(rq)) {
|
||||
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
|
||||
ide_map_sg(drive, &cmd);
|
||||
}
|
||||
|
@ -296,7 +301,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
|
|||
return ide_floppy_issue_pc(drive, &cmd, pc);
|
||||
out_end:
|
||||
drive->failed_pc = NULL;
|
||||
if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
|
||||
if (blk_rq_is_passthrough(rq) && rq->errors == 0)
|
||||
rq->errors = -EIO;
|
||||
ide_complete_rq(drive, -EIO, blk_rq_bytes(rq));
|
||||
return ide_stopped;
|
||||
|
|
|
@ -102,7 +102,7 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
|
|||
drive->dev_flags |= IDE_DFLAG_PARKED;
|
||||
}
|
||||
|
||||
if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
|
||||
if (rq && ata_taskfile_request(rq)) {
|
||||
struct ide_cmd *orig_cmd = rq->special;
|
||||
|
||||
if (cmd->tf_flags & IDE_TFLAG_DYN)
|
||||
|
@ -135,7 +135,7 @@ EXPORT_SYMBOL(ide_complete_rq);
|
|||
|
||||
void ide_kill_rq(ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
u8 drv_req = (rq->cmd_type == REQ_TYPE_DRV_PRIV) && rq->rq_disk;
|
||||
u8 drv_req = ata_misc_request(rq) && rq->rq_disk;
|
||||
u8 media = drive->media;
|
||||
|
||||
drive->failed_pc = NULL;
|
||||
|
@ -145,7 +145,7 @@ void ide_kill_rq(ide_drive_t *drive, struct request *rq)
|
|||
} else {
|
||||
if (media == ide_tape)
|
||||
rq->errors = IDE_DRV_ERROR_GENERAL;
|
||||
else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0)
|
||||
else if (blk_rq_is_passthrough(rq) && rq->errors == 0)
|
||||
rq->errors = -EIO;
|
||||
}
|
||||
|
||||
|
@ -279,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
|
|||
|
||||
static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
u8 cmd = rq->cmd[0];
|
||||
u8 cmd = scsi_req(rq)->cmd[0];
|
||||
|
||||
switch (cmd) {
|
||||
case REQ_PARK_HEADS:
|
||||
|
@ -340,7 +340,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
|
|||
if (drive->current_speed == 0xff)
|
||||
ide_config_drive_speed(drive, drive->desired_speed);
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
|
||||
if (ata_taskfile_request(rq))
|
||||
return execute_drive_cmd(drive, rq);
|
||||
else if (ata_pm_request(rq)) {
|
||||
struct ide_pm_state *pm = rq->special;
|
||||
|
@ -353,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
|
|||
pm->pm_step == IDE_PM_COMPLETED)
|
||||
ide_complete_pm_rq(drive, rq);
|
||||
return startstop;
|
||||
} else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_DRV_PRIV)
|
||||
} else if (!rq->rq_disk && ata_misc_request(rq))
|
||||
/*
|
||||
* TODO: Once all ULDs have been modified to
|
||||
* check for specific op codes rather than
|
||||
|
@ -545,6 +545,7 @@ void do_ide_request(struct request_queue *q)
|
|||
goto plug_device;
|
||||
}
|
||||
|
||||
scsi_req(rq)->resid_len = blk_rq_bytes(rq);
|
||||
hwif->rq = rq;
|
||||
|
||||
spin_unlock_irq(&hwif->lock);
|
||||
|
|
|
@ -125,8 +125,9 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
|
|||
if (NULL == (void *) arg) {
|
||||
struct request *rq;
|
||||
|
||||
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
|
||||
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
|
||||
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
|
||||
scsi_req_init(rq);
|
||||
ide_req(rq)->type = ATA_PRIV_TASKFILE;
|
||||
err = blk_execute_rq(drive->queue, NULL, rq, 0);
|
||||
blk_put_request(rq);
|
||||
|
||||
|
@ -221,10 +222,11 @@ static int generic_drive_reset(ide_drive_t *drive)
|
|||
struct request *rq;
|
||||
int ret = 0;
|
||||
|
||||
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq->cmd_len = 1;
|
||||
rq->cmd[0] = REQ_DRIVE_RESET;
|
||||
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
|
||||
scsi_req_init(rq);
|
||||
ide_req(rq)->type = ATA_PRIV_MISC;
|
||||
scsi_req(rq)->cmd_len = 1;
|
||||
scsi_req(rq)->cmd[0] = REQ_DRIVE_RESET;
|
||||
if (blk_execute_rq(drive->queue, NULL, rq, 1))
|
||||
ret = rq->errors;
|
||||
blk_put_request(rq);
|
||||
|
|
|
@ -31,10 +31,11 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
|
|||
}
|
||||
spin_unlock_irq(&hwif->lock);
|
||||
|
||||
rq = blk_get_request(q, READ, __GFP_RECLAIM);
|
||||
rq->cmd[0] = REQ_PARK_HEADS;
|
||||
rq->cmd_len = 1;
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq = blk_get_request(q, REQ_OP_DRV_IN, __GFP_RECLAIM);
|
||||
scsi_req_init(rq);
|
||||
scsi_req(rq)->cmd[0] = REQ_PARK_HEADS;
|
||||
scsi_req(rq)->cmd_len = 1;
|
||||
ide_req(rq)->type = ATA_PRIV_MISC;
|
||||
rq->special = &timeout;
|
||||
rc = blk_execute_rq(q, NULL, rq, 1);
|
||||
blk_put_request(rq);
|
||||
|
@ -45,13 +46,14 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
|
|||
* Make sure that *some* command is sent to the drive after the
|
||||
* timeout has expired, so power management will be reenabled.
|
||||
*/
|
||||
rq = blk_get_request(q, READ, GFP_NOWAIT);
|
||||
rq = blk_get_request(q, REQ_OP_DRV_IN, GFP_NOWAIT);
|
||||
scsi_req_init(rq);
|
||||
if (IS_ERR(rq))
|
||||
goto out;
|
||||
|
||||
rq->cmd[0] = REQ_UNPARK_HEADS;
|
||||
rq->cmd_len = 1;
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
|
||||
scsi_req(rq)->cmd_len = 1;
|
||||
ide_req(rq)->type = ATA_PRIV_MISC;
|
||||
elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
|
||||
|
||||
out:
|
||||
|
@ -64,7 +66,7 @@ ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq)
|
|||
struct ide_taskfile *tf = &cmd.tf;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
if (rq->cmd[0] == REQ_PARK_HEADS) {
|
||||
if (scsi_req(rq)->cmd[0] == REQ_PARK_HEADS) {
|
||||
drive->sleep = *(unsigned long *)rq->special;
|
||||
drive->dev_flags |= IDE_DFLAG_SLEEPING;
|
||||
tf->command = ATA_CMD_IDLEIMMEDIATE;
|
||||
|
|
|
@ -18,8 +18,9 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
|
|||
}
|
||||
|
||||
memset(&rqpm, 0, sizeof(rqpm));
|
||||
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
|
||||
rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND;
|
||||
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
|
||||
scsi_req_init(rq);
|
||||
ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
|
||||
rq->special = &rqpm;
|
||||
rqpm.pm_step = IDE_PM_START_SUSPEND;
|
||||
if (mesg.event == PM_EVENT_PRETHAW)
|
||||
|
@ -88,8 +89,9 @@ int generic_ide_resume(struct device *dev)
|
|||
}
|
||||
|
||||
memset(&rqpm, 0, sizeof(rqpm));
|
||||
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
|
||||
rq->cmd_type = REQ_TYPE_ATA_PM_RESUME;
|
||||
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
|
||||
scsi_req_init(rq);
|
||||
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
|
||||
rq->rq_flags |= RQF_PREEMPT;
|
||||
rq->special = &rqpm;
|
||||
rqpm.pm_step = IDE_PM_START_RESUME;
|
||||
|
@ -221,10 +223,10 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
|
|||
|
||||
#ifdef DEBUG_PM
|
||||
printk("%s: completing PM request, %s\n", drive->name,
|
||||
(rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND) ? "suspend" : "resume");
|
||||
(ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
|
||||
#endif
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND)
|
||||
if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
|
||||
blk_stop_queue(q);
|
||||
else
|
||||
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
|
||||
|
@ -240,11 +242,13 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
|
|||
{
|
||||
struct ide_pm_state *pm = rq->special;
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND &&
|
||||
if (blk_rq_is_private(rq) &&
|
||||
ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
|
||||
pm->pm_step == IDE_PM_START_SUSPEND)
|
||||
/* Mark drive blocked when starting the suspend sequence. */
|
||||
drive->dev_flags |= IDE_DFLAG_BLOCKED;
|
||||
else if (rq->cmd_type == REQ_TYPE_ATA_PM_RESUME &&
|
||||
else if (blk_rq_is_private(rq) &&
|
||||
ide_req(rq)->type == ATA_PRIV_PM_RESUME &&
|
||||
pm->pm_step == IDE_PM_START_RESUME) {
|
||||
/*
|
||||
* The first thing we do on wakeup is to wait for BSY bit to
|
||||
|
|
|
@ -741,6 +741,14 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
|
|||
}
|
||||
}
|
||||
|
||||
static int ide_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
|
||||
{
|
||||
struct ide_request *req = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
req->sreq.sense = req->sense;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* init request queue
|
||||
*/
|
||||
|
@ -758,11 +766,18 @@ static int ide_init_queue(ide_drive_t *drive)
|
|||
* limits and LBA48 we could raise it but as yet
|
||||
* do not.
|
||||
*/
|
||||
|
||||
q = blk_init_queue_node(do_ide_request, NULL, hwif_to_node(hwif));
|
||||
q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif));
|
||||
if (!q)
|
||||
return 1;
|
||||
|
||||
q->request_fn = do_ide_request;
|
||||
q->init_rq_fn = ide_init_rq;
|
||||
q->cmd_size = sizeof(struct ide_request);
|
||||
if (blk_init_allocated_queue(q) < 0) {
|
||||
blk_cleanup_queue(q);
|
||||
return 1;
|
||||
}
|
||||
|
||||
q->queuedata = drive;
|
||||
blk_queue_segment_boundary(q, 0xffff);
|
||||
|
||||
|
@ -1131,10 +1146,12 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
|
|||
ide_port_for_each_dev(i, drive, hwif) {
|
||||
u8 j = (hwif->index * MAX_DRIVES) + i;
|
||||
u16 *saved_id = drive->id;
|
||||
struct request *saved_sense_rq = drive->sense_rq;
|
||||
|
||||
memset(drive, 0, sizeof(*drive));
|
||||
memset(saved_id, 0, SECTOR_SIZE);
|
||||
drive->id = saved_id;
|
||||
drive->sense_rq = saved_sense_rq;
|
||||
|
||||
drive->media = ide_disk;
|
||||
drive->select = (i << 4) | ATA_DEVICE_OBS;
|
||||
|
@ -1241,6 +1258,7 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
|
|||
int i;
|
||||
|
||||
ide_port_for_each_dev(i, drive, hwif) {
|
||||
kfree(drive->sense_rq);
|
||||
kfree(drive->id);
|
||||
kfree(drive);
|
||||
}
|
||||
|
@ -1248,11 +1266,10 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
|
|||
|
||||
static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
|
||||
{
|
||||
ide_drive_t *drive;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_DRIVES; i++) {
|
||||
ide_drive_t *drive;
|
||||
|
||||
drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node);
|
||||
if (drive == NULL)
|
||||
goto out_nomem;
|
||||
|
@ -1267,12 +1284,21 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
|
|||
*/
|
||||
drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node);
|
||||
if (drive->id == NULL)
|
||||
goto out_nomem;
|
||||
goto out_free_drive;
|
||||
|
||||
drive->sense_rq = kmalloc(sizeof(struct request) +
|
||||
sizeof(struct ide_request), GFP_KERNEL);
|
||||
if (!drive->sense_rq)
|
||||
goto out_free_id;
|
||||
|
||||
hwif->devices[i] = drive;
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_free_id:
|
||||
kfree(drive->id);
|
||||
out_free_drive:
|
||||
kfree(drive);
|
||||
out_nomem:
|
||||
ide_port_free_devices(hwif);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -282,7 +282,7 @@ static void idetape_analyze_error(ide_drive_t *drive)
|
|||
|
||||
/* correct remaining bytes to transfer */
|
||||
if (pc->flags & PC_FLAG_DMA_ERROR)
|
||||
rq->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]);
|
||||
scsi_req(rq)->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]);
|
||||
|
||||
/*
|
||||
* If error was the result of a zero-length read or write command,
|
||||
|
@ -316,7 +316,7 @@ static void idetape_analyze_error(ide_drive_t *drive)
|
|||
pc->flags |= PC_FLAG_ABORT;
|
||||
}
|
||||
if (!(pc->flags & PC_FLAG_ABORT) &&
|
||||
(blk_rq_bytes(rq) - rq->resid_len))
|
||||
(blk_rq_bytes(rq) - scsi_req(rq)->resid_len))
|
||||
pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
|
||||
}
|
||||
}
|
||||
|
@ -348,7 +348,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
|
|||
"itself - Aborting request!\n");
|
||||
} else if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
|
||||
unsigned int blocks =
|
||||
(blk_rq_bytes(rq) - rq->resid_len) / tape->blk_size;
|
||||
(blk_rq_bytes(rq) - scsi_req(rq)->resid_len) / tape->blk_size;
|
||||
|
||||
tape->avg_size += blocks * tape->blk_size;
|
||||
|
||||
|
@ -560,7 +560,7 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
|
|||
pc->flags |= PC_FLAG_WRITING;
|
||||
}
|
||||
|
||||
memcpy(rq->cmd, pc->c, 12);
|
||||
memcpy(scsi_req(rq)->cmd, pc->c, 12);
|
||||
}
|
||||
|
||||
static ide_startstop_t idetape_do_request(ide_drive_t *drive,
|
||||
|
@ -570,14 +570,16 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
|
|||
idetape_tape_t *tape = drive->driver_data;
|
||||
struct ide_atapi_pc *pc = NULL;
|
||||
struct ide_cmd cmd;
|
||||
struct scsi_request *req = scsi_req(rq);
|
||||
u8 stat;
|
||||
|
||||
ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, sector: %llu, nr_sectors: %u",
|
||||
rq->cmd[0], (unsigned long long)blk_rq_pos(rq),
|
||||
req->cmd[0], (unsigned long long)blk_rq_pos(rq),
|
||||
blk_rq_sectors(rq));
|
||||
|
||||
BUG_ON(!(rq->cmd_type == REQ_TYPE_DRV_PRIV ||
|
||||
rq->cmd_type == REQ_TYPE_ATA_SENSE));
|
||||
BUG_ON(!blk_rq_is_private(rq));
|
||||
BUG_ON(ide_req(rq)->type != ATA_PRIV_MISC &&
|
||||
ide_req(rq)->type != ATA_PRIV_SENSE);
|
||||
|
||||
/* Retry a failed packet command */
|
||||
if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) {
|
||||
|
@ -592,7 +594,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
|
|||
stat = hwif->tp_ops->read_status(hwif);
|
||||
|
||||
if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 &&
|
||||
(rq->cmd[13] & REQ_IDETAPE_PC2) == 0)
|
||||
(req->cmd[13] & REQ_IDETAPE_PC2) == 0)
|
||||
drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC;
|
||||
|
||||
if (drive->dev_flags & IDE_DFLAG_POST_RESET) {
|
||||
|
@ -609,7 +611,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
|
|||
} else if (time_after(jiffies, tape->dsc_timeout)) {
|
||||
printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
|
||||
tape->name);
|
||||
if (rq->cmd[13] & REQ_IDETAPE_PC2) {
|
||||
if (req->cmd[13] & REQ_IDETAPE_PC2) {
|
||||
idetape_media_access_finished(drive);
|
||||
return ide_stopped;
|
||||
} else {
|
||||
|
@ -626,23 +628,23 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
|
|||
tape->postponed_rq = false;
|
||||
}
|
||||
|
||||
if (rq->cmd[13] & REQ_IDETAPE_READ) {
|
||||
if (req->cmd[13] & REQ_IDETAPE_READ) {
|
||||
pc = &tape->queued_pc;
|
||||
ide_tape_create_rw_cmd(tape, pc, rq, READ_6);
|
||||
goto out;
|
||||
}
|
||||
if (rq->cmd[13] & REQ_IDETAPE_WRITE) {
|
||||
if (req->cmd[13] & REQ_IDETAPE_WRITE) {
|
||||
pc = &tape->queued_pc;
|
||||
ide_tape_create_rw_cmd(tape, pc, rq, WRITE_6);
|
||||
goto out;
|
||||
}
|
||||
if (rq->cmd[13] & REQ_IDETAPE_PC1) {
|
||||
if (req->cmd[13] & REQ_IDETAPE_PC1) {
|
||||
pc = (struct ide_atapi_pc *)rq->special;
|
||||
rq->cmd[13] &= ~(REQ_IDETAPE_PC1);
|
||||
rq->cmd[13] |= REQ_IDETAPE_PC2;
|
||||
req->cmd[13] &= ~(REQ_IDETAPE_PC1);
|
||||
req->cmd[13] |= REQ_IDETAPE_PC2;
|
||||
goto out;
|
||||
}
|
||||
if (rq->cmd[13] & REQ_IDETAPE_PC2) {
|
||||
if (req->cmd[13] & REQ_IDETAPE_PC2) {
|
||||
idetape_media_access_finished(drive);
|
||||
return ide_stopped;
|
||||
}
|
||||
|
@ -852,9 +854,10 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
|
|||
BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
|
||||
BUG_ON(size < 0 || size % tape->blk_size);
|
||||
|
||||
rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
|
||||
rq->cmd_type = REQ_TYPE_DRV_PRIV;
|
||||
rq->cmd[13] = cmd;
|
||||
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, __GFP_RECLAIM);
|
||||
scsi_req_init(rq);
|
||||
ide_req(rq)->type = ATA_PRIV_MISC;
|
||||
scsi_req(rq)->cmd[13] = cmd;
|
||||
rq->rq_disk = tape->disk;
|
||||
rq->__sector = tape->first_frame;
|
||||
|
||||
|
@ -868,7 +871,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
|
|||
blk_execute_rq(drive->queue, tape->disk, rq, 0);
|
||||
|
||||
/* calculate the number of transferred bytes and update buffer state */
|
||||
size -= rq->resid_len;
|
||||
size -= scsi_req(rq)->resid_len;
|
||||
tape->cur = tape->buf;
|
||||
if (cmd == REQ_IDETAPE_READ)
|
||||
tape->valid = size;
|
||||
|
|
|
@ -428,10 +428,12 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
|
|||
{
|
||||
struct request *rq;
|
||||
int error;
|
||||
int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
|
||||
|
||||
rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM);
|
||||
rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
|
||||
rq = blk_get_request(drive->queue,
|
||||
(cmd->tf_flags & IDE_TFLAG_WRITE) ?
|
||||
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM);
|
||||
scsi_req_init(rq);
|
||||
ide_req(rq)->type = ATA_PRIV_TASKFILE;
|
||||
|
||||
/*
|
||||
* (ks) We transfer currently only whole sectors.
|
||||
|
|
|
@ -54,7 +54,7 @@
|
|||
#define DRV_NAME "sis5513"
|
||||
|
||||
/* registers layout and init values are chipset family dependent */
|
||||
|
||||
#undef ATA_16
|
||||
#define ATA_16 0x01
|
||||
#define ATA_33 0x02
|
||||
#define ATA_66 0x03
|
||||
|
|
|
@ -1009,7 +1009,7 @@ static int cached_dev_congested(void *data, int bits)
|
|||
struct request_queue *q = bdev_get_queue(dc->bdev);
|
||||
int ret = 0;
|
||||
|
||||
if (bdi_congested(&q->backing_dev_info, bits))
|
||||
if (bdi_congested(q->backing_dev_info, bits))
|
||||
return 1;
|
||||
|
||||
if (cached_dev_get(dc)) {
|
||||
|
@ -1018,7 +1018,7 @@ static int cached_dev_congested(void *data, int bits)
|
|||
|
||||
for_each_cache(ca, d->c, i) {
|
||||
q = bdev_get_queue(ca->bdev);
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
}
|
||||
|
||||
cached_dev_put(dc);
|
||||
|
@ -1032,7 +1032,7 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
|
|||
struct gendisk *g = dc->disk.disk;
|
||||
|
||||
g->queue->make_request_fn = cached_dev_make_request;
|
||||
g->queue->backing_dev_info.congested_fn = cached_dev_congested;
|
||||
g->queue->backing_dev_info->congested_fn = cached_dev_congested;
|
||||
dc->disk.cache_miss = cached_dev_cache_miss;
|
||||
dc->disk.ioctl = cached_dev_ioctl;
|
||||
}
|
||||
|
@ -1125,7 +1125,7 @@ static int flash_dev_congested(void *data, int bits)
|
|||
|
||||
for_each_cache(ca, d->c, i) {
|
||||
q = bdev_get_queue(ca->bdev);
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1136,7 +1136,7 @@ void bch_flash_dev_request_init(struct bcache_device *d)
|
|||
struct gendisk *g = d->disk;
|
||||
|
||||
g->queue->make_request_fn = flash_dev_make_request;
|
||||
g->queue->backing_dev_info.congested_fn = flash_dev_congested;
|
||||
g->queue->backing_dev_info->congested_fn = flash_dev_congested;
|
||||
d->cache_miss = flash_dev_cache_miss;
|
||||
d->ioctl = flash_dev_ioctl;
|
||||
}
|
||||
|
|
|
@ -807,7 +807,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
|
|||
blk_queue_make_request(q, NULL);
|
||||
d->disk->queue = q;
|
||||
q->queuedata = d;
|
||||
q->backing_dev_info.congested_data = d;
|
||||
q->backing_dev_info->congested_data = d;
|
||||
q->limits.max_hw_sectors = UINT_MAX;
|
||||
q->limits.max_sectors = UINT_MAX;
|
||||
q->limits.max_segment_size = UINT_MAX;
|
||||
|
@ -1132,9 +1132,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
|
|||
set_capacity(dc->disk.disk,
|
||||
dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
|
||||
|
||||
dc->disk.disk->queue->backing_dev_info.ra_pages =
|
||||
max(dc->disk.disk->queue->backing_dev_info.ra_pages,
|
||||
q->backing_dev_info.ra_pages);
|
||||
dc->disk.disk->queue->backing_dev_info->ra_pages =
|
||||
max(dc->disk.disk->queue->backing_dev_info->ra_pages,
|
||||
q->backing_dev_info->ra_pages);
|
||||
|
||||
bch_cached_dev_request_init(dc);
|
||||
bch_cached_dev_writeback_init(dc);
|
||||
|
|
|
@ -2284,7 +2284,7 @@ static void do_waker(struct work_struct *ws)
|
|||
static int is_congested(struct dm_dev *dev, int bdi_bits)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||
return bdi_congested(&q->backing_dev_info, bdi_bits);
|
||||
return bdi_congested(q->backing_dev_info, bdi_bits);
|
||||
}
|
||||
|
||||
static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
|
||||
|
|
|
@ -92,7 +92,6 @@ struct mapped_device {
|
|||
* io objects are allocated from here.
|
||||
*/
|
||||
mempool_t *io_pool;
|
||||
mempool_t *rq_pool;
|
||||
|
||||
struct bio_set *bs;
|
||||
|
||||
|
|
|
@ -1379,7 +1379,7 @@ static void stop_worker(struct era *era)
|
|||
static int dev_is_congested(struct dm_dev *dev, int bdi_bits)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||
return bdi_congested(&q->backing_dev_info, bdi_bits);
|
||||
return bdi_congested(q->backing_dev_info, bdi_bits);
|
||||
}
|
||||
|
||||
static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
|
||||
|
|
|
@ -92,12 +92,6 @@ struct multipath {
|
|||
|
||||
unsigned queue_mode;
|
||||
|
||||
/*
|
||||
* We must use a mempool of dm_mpath_io structs so that we
|
||||
* can resubmit bios on error.
|
||||
*/
|
||||
mempool_t *mpio_pool;
|
||||
|
||||
struct mutex work_mutex;
|
||||
struct work_struct trigger_event;
|
||||
|
||||
|
@ -115,8 +109,6 @@ struct dm_mpath_io {
|
|||
|
||||
typedef int (*action_fn) (struct pgpath *pgpath);
|
||||
|
||||
static struct kmem_cache *_mpio_cache;
|
||||
|
||||
static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
|
||||
static void trigger_event(struct work_struct *work);
|
||||
static void activate_path(struct work_struct *work);
|
||||
|
@ -209,7 +201,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
|
|||
init_waitqueue_head(&m->pg_init_wait);
|
||||
mutex_init(&m->work_mutex);
|
||||
|
||||
m->mpio_pool = NULL;
|
||||
m->queue_mode = DM_TYPE_NONE;
|
||||
|
||||
m->ti = ti;
|
||||
|
@ -229,16 +220,7 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
|
|||
m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
|
||||
else
|
||||
m->queue_mode = DM_TYPE_REQUEST_BASED;
|
||||
}
|
||||
|
||||
if (m->queue_mode == DM_TYPE_REQUEST_BASED) {
|
||||
unsigned min_ios = dm_get_reserved_rq_based_ios();
|
||||
|
||||
m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
|
||||
if (!m->mpio_pool)
|
||||
return -ENOMEM;
|
||||
}
|
||||
else if (m->queue_mode == DM_TYPE_BIO_BASED) {
|
||||
} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
|
||||
INIT_WORK(&m->process_queued_bios, process_queued_bios);
|
||||
/*
|
||||
* bio-based doesn't support any direct scsi_dh management;
|
||||
|
@ -263,7 +245,6 @@ static void free_multipath(struct multipath *m)
|
|||
|
||||
kfree(m->hw_handler_name);
|
||||
kfree(m->hw_handler_params);
|
||||
mempool_destroy(m->mpio_pool);
|
||||
kfree(m);
|
||||
}
|
||||
|
||||
|
@ -272,38 +253,6 @@ static struct dm_mpath_io *get_mpio(union map_info *info)
|
|||
return info->ptr;
|
||||
}
|
||||
|
||||
static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
|
||||
{
|
||||
struct dm_mpath_io *mpio;
|
||||
|
||||
if (!m->mpio_pool) {
|
||||
/* Use blk-mq pdu memory requested via per_io_data_size */
|
||||
mpio = get_mpio(info);
|
||||
memset(mpio, 0, sizeof(*mpio));
|
||||
return mpio;
|
||||
}
|
||||
|
||||
mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
|
||||
if (!mpio)
|
||||
return NULL;
|
||||
|
||||
memset(mpio, 0, sizeof(*mpio));
|
||||
info->ptr = mpio;
|
||||
|
||||
return mpio;
|
||||
}
|
||||
|
||||
static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
|
||||
{
|
||||
/* Only needed for non blk-mq (.request_fn) multipath */
|
||||
if (m->mpio_pool) {
|
||||
struct dm_mpath_io *mpio = info->ptr;
|
||||
|
||||
info->ptr = NULL;
|
||||
mempool_free(mpio, m->mpio_pool);
|
||||
}
|
||||
}
|
||||
|
||||
static size_t multipath_per_bio_data_size(void)
|
||||
{
|
||||
return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
|
||||
|
@ -530,16 +479,17 @@ static bool must_push_back_bio(struct multipath *m)
|
|||
/*
|
||||
* Map cloned requests (request-based multipath)
|
||||
*/
|
||||
static int __multipath_map(struct dm_target *ti, struct request *clone,
|
||||
union map_info *map_context,
|
||||
struct request *rq, struct request **__clone)
|
||||
static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
|
||||
union map_info *map_context,
|
||||
struct request **__clone)
|
||||
{
|
||||
struct multipath *m = ti->private;
|
||||
int r = DM_MAPIO_REQUEUE;
|
||||
size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
|
||||
size_t nr_bytes = blk_rq_bytes(rq);
|
||||
struct pgpath *pgpath;
|
||||
struct block_device *bdev;
|
||||
struct dm_mpath_io *mpio;
|
||||
struct dm_mpath_io *mpio = get_mpio(map_context);
|
||||
struct request *clone;
|
||||
|
||||
/* Do we need to select a new pgpath? */
|
||||
pgpath = lockless_dereference(m->current_pgpath);
|
||||
|
@ -556,42 +506,23 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
|
|||
return r;
|
||||
}
|
||||
|
||||
mpio = set_mpio(m, map_context);
|
||||
if (!mpio)
|
||||
/* ENOMEM, requeue */
|
||||
return r;
|
||||
|
||||
memset(mpio, 0, sizeof(*mpio));
|
||||
mpio->pgpath = pgpath;
|
||||
mpio->nr_bytes = nr_bytes;
|
||||
|
||||
bdev = pgpath->path.dev->bdev;
|
||||
|
||||
if (clone) {
|
||||
/*
|
||||
* Old request-based interface: allocated clone is passed in.
|
||||
* Used by: .request_fn stacked on .request_fn path(s).
|
||||
*/
|
||||
clone->q = bdev_get_queue(bdev);
|
||||
clone->rq_disk = bdev->bd_disk;
|
||||
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
|
||||
} else {
|
||||
/*
|
||||
* blk-mq request-based interface; used by both:
|
||||
* .request_fn stacked on blk-mq path(s) and
|
||||
* blk-mq stacked on blk-mq path(s).
|
||||
*/
|
||||
clone = blk_mq_alloc_request(bdev_get_queue(bdev),
|
||||
rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
|
||||
if (IS_ERR(clone)) {
|
||||
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
|
||||
clear_request_fn_mpio(m, map_context);
|
||||
return r;
|
||||
}
|
||||
clone->bio = clone->biotail = NULL;
|
||||
clone->rq_disk = bdev->bd_disk;
|
||||
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
|
||||
*__clone = clone;
|
||||
clone = blk_get_request(bdev_get_queue(bdev),
|
||||
rq->cmd_flags | REQ_NOMERGE,
|
||||
GFP_ATOMIC);
|
||||
if (IS_ERR(clone)) {
|
||||
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
|
||||
return r;
|
||||
}
|
||||
clone->bio = clone->biotail = NULL;
|
||||
clone->rq_disk = bdev->bd_disk;
|
||||
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
|
||||
*__clone = clone;
|
||||
|
||||
if (pgpath->pg->ps.type->start_io)
|
||||
pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
|
||||
|
@ -600,22 +531,9 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
|
|||
return DM_MAPIO_REMAPPED;
|
||||
}
|
||||
|
||||
static int multipath_map(struct dm_target *ti, struct request *clone,
|
||||
union map_info *map_context)
|
||||
{
|
||||
return __multipath_map(ti, clone, map_context, NULL, NULL);
|
||||
}
|
||||
|
||||
static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
|
||||
union map_info *map_context,
|
||||
struct request **clone)
|
||||
{
|
||||
return __multipath_map(ti, NULL, map_context, rq, clone);
|
||||
}
|
||||
|
||||
static void multipath_release_clone(struct request *clone)
|
||||
{
|
||||
blk_mq_free_request(clone);
|
||||
blk_put_request(clone);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1187,7 +1105,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
|||
ti->num_write_same_bios = 1;
|
||||
if (m->queue_mode == DM_TYPE_BIO_BASED)
|
||||
ti->per_io_data_size = multipath_per_bio_data_size();
|
||||
else if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
|
||||
else
|
||||
ti->per_io_data_size = sizeof(struct dm_mpath_io);
|
||||
|
||||
return 0;
|
||||
|
@ -1610,7 +1528,6 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
|
|||
if (ps->type->end_io)
|
||||
ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
|
||||
}
|
||||
clear_request_fn_mpio(m, map_context);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -2060,7 +1977,6 @@ static struct target_type multipath_target = {
|
|||
.module = THIS_MODULE,
|
||||
.ctr = multipath_ctr,
|
||||
.dtr = multipath_dtr,
|
||||
.map_rq = multipath_map,
|
||||
.clone_and_map_rq = multipath_clone_and_map,
|
||||
.release_clone_rq = multipath_release_clone,
|
||||
.rq_end_io = multipath_end_io,
|
||||
|
@ -2080,11 +1996,6 @@ static int __init dm_multipath_init(void)
|
|||
{
|
||||
int r;
|
||||
|
||||
/* allocate a slab for the dm_mpath_ios */
|
||||
_mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
|
||||
if (!_mpio_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
r = dm_register_target(&multipath_target);
|
||||
if (r < 0) {
|
||||
DMERR("request-based register failed %d", r);
|
||||
|
@ -2120,8 +2031,6 @@ static int __init dm_multipath_init(void)
|
|||
bad_alloc_kmultipathd:
|
||||
dm_unregister_target(&multipath_target);
|
||||
bad_register_target:
|
||||
kmem_cache_destroy(_mpio_cache);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -2131,7 +2040,6 @@ static void __exit dm_multipath_exit(void)
|
|||
destroy_workqueue(kmultipathd);
|
||||
|
||||
dm_unregister_target(&multipath_target);
|
||||
kmem_cache_destroy(_mpio_cache);
|
||||
}
|
||||
|
||||
module_init(dm_multipath_init);
|
||||
|
|
|
@ -109,28 +109,6 @@ void dm_stop_queue(struct request_queue *q)
|
|||
dm_mq_stop_queue(q);
|
||||
}
|
||||
|
||||
static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
return mempool_alloc(md->io_pool, gfp_mask);
|
||||
}
|
||||
|
||||
static void free_old_rq_tio(struct dm_rq_target_io *tio)
|
||||
{
|
||||
mempool_free(tio, tio->md->io_pool);
|
||||
}
|
||||
|
||||
static struct request *alloc_old_clone_request(struct mapped_device *md,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
return mempool_alloc(md->rq_pool, gfp_mask);
|
||||
}
|
||||
|
||||
static void free_old_clone_request(struct mapped_device *md, struct request *rq)
|
||||
{
|
||||
mempool_free(rq, md->rq_pool);
|
||||
}
|
||||
|
||||
/*
|
||||
* Partial completion handling for request-based dm
|
||||
*/
|
||||
|
@ -185,7 +163,7 @@ static void end_clone_bio(struct bio *clone)
|
|||
|
||||
static struct dm_rq_target_io *tio_from_request(struct request *rq)
|
||||
{
|
||||
return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special);
|
||||
return blk_mq_rq_to_pdu(rq);
|
||||
}
|
||||
|
||||
static void rq_end_stats(struct mapped_device *md, struct request *orig)
|
||||
|
@ -233,31 +211,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
|
|||
dm_put(md);
|
||||
}
|
||||
|
||||
static void free_rq_clone(struct request *clone)
|
||||
{
|
||||
struct dm_rq_target_io *tio = clone->end_io_data;
|
||||
struct mapped_device *md = tio->md;
|
||||
|
||||
blk_rq_unprep_clone(clone);
|
||||
|
||||
/*
|
||||
* It is possible for a clone_old_rq() allocated clone to
|
||||
* get passed in -- it may not yet have a request_queue.
|
||||
* This is known to occur if the error target replaces
|
||||
* a multipath target that has a request_fn queue stacked
|
||||
* on blk-mq queue(s).
|
||||
*/
|
||||
if (clone->q && clone->q->mq_ops)
|
||||
/* stacked on blk-mq queue(s) */
|
||||
tio->ti->type->release_clone_rq(clone);
|
||||
else if (!md->queue->mq_ops)
|
||||
/* request_fn queue stacked on request_fn queue(s) */
|
||||
free_old_clone_request(md, clone);
|
||||
|
||||
if (!md->queue->mq_ops)
|
||||
free_old_rq_tio(tio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Complete the clone and the original request.
|
||||
* Must be called without clone's queue lock held,
|
||||
|
@ -270,20 +223,9 @@ static void dm_end_request(struct request *clone, int error)
|
|||
struct mapped_device *md = tio->md;
|
||||
struct request *rq = tio->orig;
|
||||
|
||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||
rq->errors = clone->errors;
|
||||
rq->resid_len = clone->resid_len;
|
||||
blk_rq_unprep_clone(clone);
|
||||
tio->ti->type->release_clone_rq(clone);
|
||||
|
||||
if (rq->sense)
|
||||
/*
|
||||
* We are using the sense buffer of the original
|
||||
* request.
|
||||
* So setting the length of the sense data is enough.
|
||||
*/
|
||||
rq->sense_len = clone->sense_len;
|
||||
}
|
||||
|
||||
free_rq_clone(clone);
|
||||
rq_end_stats(md, rq);
|
||||
if (!rq->q->mq_ops)
|
||||
blk_end_request_all(rq, error);
|
||||
|
@ -292,22 +234,6 @@ static void dm_end_request(struct request *clone, int error)
|
|||
rq_completed(md, rw, true);
|
||||
}
|
||||
|
||||
static void dm_unprep_request(struct request *rq)
|
||||
{
|
||||
struct dm_rq_target_io *tio = tio_from_request(rq);
|
||||
struct request *clone = tio->clone;
|
||||
|
||||
if (!rq->q->mq_ops) {
|
||||
rq->special = NULL;
|
||||
rq->rq_flags &= ~RQF_DONTPREP;
|
||||
}
|
||||
|
||||
if (clone)
|
||||
free_rq_clone(clone);
|
||||
else if (!tio->md->queue->mq_ops)
|
||||
free_old_rq_tio(tio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Requeue the original request of a clone.
|
||||
*/
|
||||
|
@ -346,7 +272,10 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
|
|||
int rw = rq_data_dir(rq);
|
||||
|
||||
rq_end_stats(md, rq);
|
||||
dm_unprep_request(rq);
|
||||
if (tio->clone) {
|
||||
blk_rq_unprep_clone(tio->clone);
|
||||
tio->ti->type->release_clone_rq(tio->clone);
|
||||
}
|
||||
|
||||
if (!rq->q->mq_ops)
|
||||
dm_old_requeue_request(rq);
|
||||
|
@ -401,14 +330,11 @@ static void dm_softirq_done(struct request *rq)
|
|||
if (!clone) {
|
||||
rq_end_stats(tio->md, rq);
|
||||
rw = rq_data_dir(rq);
|
||||
if (!rq->q->mq_ops) {
|
||||
if (!rq->q->mq_ops)
|
||||
blk_end_request_all(rq, tio->error);
|
||||
rq_completed(tio->md, rw, false);
|
||||
free_old_rq_tio(tio);
|
||||
} else {
|
||||
else
|
||||
blk_mq_end_request(rq, tio->error);
|
||||
rq_completed(tio->md, rw, false);
|
||||
}
|
||||
rq_completed(tio->md, rw, false);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -452,16 +378,6 @@ static void end_clone_request(struct request *clone, int error)
|
|||
{
|
||||
struct dm_rq_target_io *tio = clone->end_io_data;
|
||||
|
||||
if (!clone->q->mq_ops) {
|
||||
/*
|
||||
* For just cleaning up the information of the queue in which
|
||||
* the clone was dispatched.
|
||||
* The clone is *NOT* freed actually here because it is alloced
|
||||
* from dm own mempool (RQF_ALLOCED isn't set).
|
||||
*/
|
||||
__blk_put_request(clone->q, clone);
|
||||
}
|
||||
|
||||
/*
|
||||
* Actual request completion is done in a softirq context which doesn't
|
||||
* hold the clone's queue lock. Otherwise, deadlock could occur because:
|
||||
|
@ -511,9 +427,6 @@ static int setup_clone(struct request *clone, struct request *rq,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
clone->cmd = rq->cmd;
|
||||
clone->cmd_len = rq->cmd_len;
|
||||
clone->sense = rq->sense;
|
||||
clone->end_io = end_clone_request;
|
||||
clone->end_io_data = tio;
|
||||
|
||||
|
@ -522,28 +435,6 @@ static int setup_clone(struct request *clone, struct request *rq,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
|
||||
struct dm_rq_target_io *tio, gfp_t gfp_mask)
|
||||
{
|
||||
/*
|
||||
* Create clone for use with .request_fn request_queue
|
||||
*/
|
||||
struct request *clone;
|
||||
|
||||
clone = alloc_old_clone_request(md, gfp_mask);
|
||||
if (!clone)
|
||||
return NULL;
|
||||
|
||||
blk_rq_init(NULL, clone);
|
||||
if (setup_clone(clone, rq, tio, gfp_mask)) {
|
||||
/* -ENOMEM */
|
||||
free_old_clone_request(md, clone);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return clone;
|
||||
}
|
||||
|
||||
static void map_tio_request(struct kthread_work *work);
|
||||
|
||||
static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
|
||||
|
@ -565,60 +456,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
|
|||
kthread_init_work(&tio->work, map_tio_request);
|
||||
}
|
||||
|
||||
static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
|
||||
struct mapped_device *md,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct dm_rq_target_io *tio;
|
||||
int srcu_idx;
|
||||
struct dm_table *table;
|
||||
|
||||
tio = alloc_old_rq_tio(md, gfp_mask);
|
||||
if (!tio)
|
||||
return NULL;
|
||||
|
||||
init_tio(tio, rq, md);
|
||||
|
||||
table = dm_get_live_table(md, &srcu_idx);
|
||||
/*
|
||||
* Must clone a request if this .request_fn DM device
|
||||
* is stacked on .request_fn device(s).
|
||||
*/
|
||||
if (!dm_table_all_blk_mq_devices(table)) {
|
||||
if (!clone_old_rq(rq, md, tio, gfp_mask)) {
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
free_old_rq_tio(tio);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
dm_put_live_table(md, srcu_idx);
|
||||
|
||||
return tio;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with the queue lock held.
|
||||
*/
|
||||
static int dm_old_prep_fn(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
struct mapped_device *md = q->queuedata;
|
||||
struct dm_rq_target_io *tio;
|
||||
|
||||
if (unlikely(rq->special)) {
|
||||
DMWARN("Already has something in rq->special.");
|
||||
return BLKPREP_KILL;
|
||||
}
|
||||
|
||||
tio = dm_old_prep_tio(rq, md, GFP_ATOMIC);
|
||||
if (!tio)
|
||||
return BLKPREP_DEFER;
|
||||
|
||||
rq->special = tio;
|
||||
rq->rq_flags |= RQF_DONTPREP;
|
||||
|
||||
return BLKPREP_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns:
|
||||
* DM_MAPIO_* : the request has been processed as indicated
|
||||
|
@ -633,31 +470,18 @@ static int map_request(struct dm_rq_target_io *tio)
|
|||
struct request *rq = tio->orig;
|
||||
struct request *clone = NULL;
|
||||
|
||||
if (tio->clone) {
|
||||
clone = tio->clone;
|
||||
r = ti->type->map_rq(ti, clone, &tio->info);
|
||||
if (r == DM_MAPIO_DELAY_REQUEUE)
|
||||
return DM_MAPIO_REQUEUE; /* .request_fn requeue is always immediate */
|
||||
} else {
|
||||
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
|
||||
if (r < 0) {
|
||||
/* The target wants to complete the I/O */
|
||||
dm_kill_unmapped_request(rq, r);
|
||||
return r;
|
||||
}
|
||||
if (r == DM_MAPIO_REMAPPED &&
|
||||
setup_clone(clone, rq, tio, GFP_ATOMIC)) {
|
||||
/* -ENOMEM */
|
||||
ti->type->release_clone_rq(clone);
|
||||
return DM_MAPIO_REQUEUE;
|
||||
}
|
||||
}
|
||||
|
||||
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
|
||||
switch (r) {
|
||||
case DM_MAPIO_SUBMITTED:
|
||||
/* The target has taken the I/O to submit by itself later */
|
||||
break;
|
||||
case DM_MAPIO_REMAPPED:
|
||||
if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
|
||||
/* -ENOMEM */
|
||||
ti->type->release_clone_rq(clone);
|
||||
return DM_MAPIO_REQUEUE;
|
||||
}
|
||||
|
||||
/* The target has remapped the I/O so dispatch it */
|
||||
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
|
||||
blk_rq_pos(rq));
|
||||
|
@ -716,6 +540,29 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
|
|||
dm_get(md);
|
||||
}
|
||||
|
||||
static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
|
||||
{
|
||||
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
/*
|
||||
* Must initialize md member of tio, otherwise it won't
|
||||
* be available in dm_mq_queue_rq.
|
||||
*/
|
||||
tio->md = md;
|
||||
|
||||
if (md->init_tio_pdu) {
|
||||
/* target-specific per-io data is immediately after the tio */
|
||||
tio->info.ptr = tio + 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
|
||||
{
|
||||
return __dm_rq_init_rq(q->rq_alloc_data, rq);
|
||||
}
|
||||
|
||||
static void map_tio_request(struct kthread_work *work)
|
||||
{
|
||||
struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
|
||||
|
@ -814,6 +661,7 @@ static void dm_old_request_fn(struct request_queue *q)
|
|||
dm_start_request(md, rq);
|
||||
|
||||
tio = tio_from_request(rq);
|
||||
init_tio(tio, rq, md);
|
||||
/* Establish tio->ti before queuing work (map_tio_request) */
|
||||
tio->ti = ti;
|
||||
kthread_queue_work(&md->kworker, &tio->work);
|
||||
|
@ -824,10 +672,23 @@ static void dm_old_request_fn(struct request_queue *q)
|
|||
/*
|
||||
* Fully initialize a .request_fn request-based queue.
|
||||
*/
|
||||
int dm_old_init_request_queue(struct mapped_device *md)
|
||||
int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
|
||||
{
|
||||
struct dm_target *immutable_tgt;
|
||||
|
||||
/* Fully initialize the queue */
|
||||
if (!blk_init_allocated_queue(md->queue, dm_old_request_fn, NULL))
|
||||
md->queue->cmd_size = sizeof(struct dm_rq_target_io);
|
||||
md->queue->rq_alloc_data = md;
|
||||
md->queue->request_fn = dm_old_request_fn;
|
||||
md->queue->init_rq_fn = dm_rq_init_rq;
|
||||
|
||||
immutable_tgt = dm_table_get_immutable_target(t);
|
||||
if (immutable_tgt && immutable_tgt->per_io_data_size) {
|
||||
/* any target-specific per-io data is immediately after the tio */
|
||||
md->queue->cmd_size += immutable_tgt->per_io_data_size;
|
||||
md->init_tio_pdu = true;
|
||||
}
|
||||
if (blk_init_allocated_queue(md->queue) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* disable dm_old_request_fn's merge heuristic by default */
|
||||
|
@ -835,7 +696,6 @@ int dm_old_init_request_queue(struct mapped_device *md)
|
|||
|
||||
dm_init_normal_md_queue(md);
|
||||
blk_queue_softirq_done(md->queue, dm_softirq_done);
|
||||
blk_queue_prep_rq(md->queue, dm_old_prep_fn);
|
||||
|
||||
/* Initialize the request-based DM worker thread */
|
||||
kthread_init_worker(&md->kworker);
|
||||
|
@ -856,21 +716,7 @@ static int dm_mq_init_request(void *data, struct request *rq,
|
|||
unsigned int hctx_idx, unsigned int request_idx,
|
||||
unsigned int numa_node)
|
||||
{
|
||||
struct mapped_device *md = data;
|
||||
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
/*
|
||||
* Must initialize md member of tio, otherwise it won't
|
||||
* be available in dm_mq_queue_rq.
|
||||
*/
|
||||
tio->md = md;
|
||||
|
||||
if (md->init_tio_pdu) {
|
||||
/* target-specific per-io data is immediately after the tio */
|
||||
tio->info.ptr = tio + 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return __dm_rq_init_rq(data, rq);
|
||||
}
|
||||
|
||||
static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
|
|
|
@ -48,7 +48,7 @@ struct dm_rq_clone_bio_info {
|
|||
bool dm_use_blk_mq_default(void);
|
||||
bool dm_use_blk_mq(struct mapped_device *md);
|
||||
|
||||
int dm_old_init_request_queue(struct mapped_device *md);
|
||||
int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t);
|
||||
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
|
||||
void dm_mq_cleanup_mapped_device(struct mapped_device *md);
|
||||
|
||||
|
|
|
@ -1750,7 +1750,7 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits)
|
|||
char b[BDEVNAME_SIZE];
|
||||
|
||||
if (likely(q))
|
||||
r |= bdi_congested(&q->backing_dev_info, bdi_bits);
|
||||
r |= bdi_congested(q->backing_dev_info, bdi_bits);
|
||||
else
|
||||
DMWARN_LIMIT("%s: any_congested: nonexistent device %s",
|
||||
dm_device_name(t->md),
|
||||
|
|
|
@ -131,12 +131,6 @@ static int io_err_map(struct dm_target *tt, struct bio *bio)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
static int io_err_map_rq(struct dm_target *ti, struct request *clone,
|
||||
union map_info *map_context)
|
||||
{
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
|
||||
union map_info *map_context,
|
||||
struct request **clone)
|
||||
|
@ -161,7 +155,6 @@ static struct target_type error_target = {
|
|||
.ctr = io_err_ctr,
|
||||
.dtr = io_err_dtr,
|
||||
.map = io_err_map,
|
||||
.map_rq = io_err_map_rq,
|
||||
.clone_and_map_rq = io_err_clone_and_map_rq,
|
||||
.release_clone_rq = io_err_release_clone_rq,
|
||||
.direct_access = io_err_direct_access,
|
||||
|
|
|
@ -2711,7 +2711,7 @@ static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
|
|||
return 1;
|
||||
|
||||
q = bdev_get_queue(pt->data_dev->bdev);
|
||||
return bdi_congested(&q->backing_dev_info, bdi_bits);
|
||||
return bdi_congested(q->backing_dev_info, bdi_bits);
|
||||
}
|
||||
|
||||
static void requeue_bios(struct pool *pool)
|
||||
|
|
|
@ -91,7 +91,6 @@ static int dm_numa_node = DM_NUMA_NODE;
|
|||
*/
|
||||
struct dm_md_mempools {
|
||||
mempool_t *io_pool;
|
||||
mempool_t *rq_pool;
|
||||
struct bio_set *bs;
|
||||
};
|
||||
|
||||
|
@ -466,13 +465,16 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
|
||||
if (r > 0) {
|
||||
/*
|
||||
* Target determined this ioctl is being issued against
|
||||
* a logical partition of the parent bdev; so extra
|
||||
* validation is needed.
|
||||
* Target determined this ioctl is being issued against a
|
||||
* subset of the parent bdev; require extra privileges.
|
||||
*/
|
||||
r = scsi_verify_blk_ioctl(NULL, cmd);
|
||||
if (r)
|
||||
if (!capable(CAP_SYS_RAWIO)) {
|
||||
DMWARN_LIMIT(
|
||||
"%s: sending ioctl %x to DM device without required privilege.",
|
||||
current->comm, cmd);
|
||||
r = -ENOIOCTLCMD;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
||||
|
@ -1314,7 +1316,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
|
|||
* With request-based DM we only need to check the
|
||||
* top-level queue for congestion.
|
||||
*/
|
||||
r = md->queue->backing_dev_info.wb.state & bdi_bits;
|
||||
r = md->queue->backing_dev_info->wb.state & bdi_bits;
|
||||
} else {
|
||||
map = dm_get_live_table_fast(md);
|
||||
if (map)
|
||||
|
@ -1397,7 +1399,7 @@ void dm_init_md_queue(struct mapped_device *md)
|
|||
* - must do so here (in alloc_dev callchain) before queue is used
|
||||
*/
|
||||
md->queue->queuedata = md;
|
||||
md->queue->backing_dev_info.congested_data = md;
|
||||
md->queue->backing_dev_info->congested_data = md;
|
||||
}
|
||||
|
||||
void dm_init_normal_md_queue(struct mapped_device *md)
|
||||
|
@ -1408,7 +1410,7 @@ void dm_init_normal_md_queue(struct mapped_device *md)
|
|||
/*
|
||||
* Initialize aspects of queue that aren't relevant for blk-mq
|
||||
*/
|
||||
md->queue->backing_dev_info.congested_fn = dm_any_congested;
|
||||
md->queue->backing_dev_info->congested_fn = dm_any_congested;
|
||||
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
|
||||
}
|
||||
|
||||
|
@ -1419,7 +1421,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
|||
if (md->kworker_task)
|
||||
kthread_stop(md->kworker_task);
|
||||
mempool_destroy(md->io_pool);
|
||||
mempool_destroy(md->rq_pool);
|
||||
if (md->bs)
|
||||
bioset_free(md->bs);
|
||||
|
||||
|
@ -1595,12 +1596,10 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
|
|||
goto out;
|
||||
}
|
||||
|
||||
BUG_ON(!p || md->io_pool || md->rq_pool || md->bs);
|
||||
BUG_ON(!p || md->io_pool || md->bs);
|
||||
|
||||
md->io_pool = p->io_pool;
|
||||
p->io_pool = NULL;
|
||||
md->rq_pool = p->rq_pool;
|
||||
p->rq_pool = NULL;
|
||||
md->bs = p->bs;
|
||||
p->bs = NULL;
|
||||
|
||||
|
@ -1777,7 +1776,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
|
|||
|
||||
switch (type) {
|
||||
case DM_TYPE_REQUEST_BASED:
|
||||
r = dm_old_init_request_queue(md);
|
||||
r = dm_old_init_request_queue(md, t);
|
||||
if (r) {
|
||||
DMERR("Cannot initialize queue for request-based mapped device");
|
||||
return r;
|
||||
|
@ -2493,7 +2492,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
|
|||
unsigned integrity, unsigned per_io_data_size)
|
||||
{
|
||||
struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
|
||||
struct kmem_cache *cachep = NULL;
|
||||
unsigned int pool_size = 0;
|
||||
unsigned int front_pad;
|
||||
|
||||
|
@ -2503,20 +2501,16 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
|
|||
switch (type) {
|
||||
case DM_TYPE_BIO_BASED:
|
||||
case DM_TYPE_DAX_BIO_BASED:
|
||||
cachep = _io_cache;
|
||||
pool_size = dm_get_reserved_bio_based_ios();
|
||||
front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
|
||||
|
||||
pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache);
|
||||
if (!pools->io_pool)
|
||||
goto out;
|
||||
break;
|
||||
case DM_TYPE_REQUEST_BASED:
|
||||
cachep = _rq_tio_cache;
|
||||
pool_size = dm_get_reserved_rq_based_ios();
|
||||
pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache);
|
||||
if (!pools->rq_pool)
|
||||
goto out;
|
||||
/* fall through to setup remaining rq-based pools */
|
||||
case DM_TYPE_MQ_REQUEST_BASED:
|
||||
if (!pool_size)
|
||||
pool_size = dm_get_reserved_rq_based_ios();
|
||||
pool_size = dm_get_reserved_rq_based_ios();
|
||||
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
|
||||
/* per_io_data_size is used for blk-mq pdu at queue allocation */
|
||||
break;
|
||||
|
@ -2524,12 +2518,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
|
|||
BUG();
|
||||
}
|
||||
|
||||
if (cachep) {
|
||||
pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
|
||||
if (!pools->io_pool)
|
||||
goto out;
|
||||
}
|
||||
|
||||
pools->bs = bioset_create_nobvec(pool_size, front_pad);
|
||||
if (!pools->bs)
|
||||
goto out;
|
||||
|
@ -2551,7 +2539,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
|
|||
return;
|
||||
|
||||
mempool_destroy(pools->io_pool);
|
||||
mempool_destroy(pools->rq_pool);
|
||||
|
||||
if (pools->bs)
|
||||
bioset_free(pools->bs);
|
||||
|
|
|
@ -95,8 +95,7 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
|
|||
/*
|
||||
* To check whether the target type is request-based or not (bio-based).
|
||||
*/
|
||||
#define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \
|
||||
((t)->type->clone_and_map_rq != NULL))
|
||||
#define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
|
||||
|
||||
/*
|
||||
* To check whether the target type is a hybrid (capable of being
|
||||
|
|
|
@ -62,7 +62,7 @@ static int linear_congested(struct mddev *mddev, int bits)
|
|||
|
||||
for (i = 0; i < mddev->raid_disks && !ret ; i++) {
|
||||
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -5346,8 +5346,8 @@ int md_run(struct mddev *mddev)
|
|||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
|
||||
else
|
||||
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
|
||||
mddev->queue->backing_dev_info.congested_data = mddev;
|
||||
mddev->queue->backing_dev_info.congested_fn = md_congested;
|
||||
mddev->queue->backing_dev_info->congested_data = mddev;
|
||||
mddev->queue->backing_dev_info->congested_fn = md_congested;
|
||||
}
|
||||
if (pers->sync_request) {
|
||||
if (mddev->kobj.sd &&
|
||||
|
@ -5704,7 +5704,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
|
|||
|
||||
__md_stop_writes(mddev);
|
||||
__md_stop(mddev);
|
||||
mddev->queue->backing_dev_info.congested_fn = NULL;
|
||||
mddev->queue->backing_dev_info->congested_fn = NULL;
|
||||
|
||||
/* tell userspace to handle 'inactive' */
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_state);
|
||||
|
|
|
@ -169,7 +169,7 @@ static int multipath_congested(struct mddev *mddev, int bits)
|
|||
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
||||
struct request_queue *q = bdev_get_queue(rdev->bdev);
|
||||
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
/* Just like multipath_map, we just check the
|
||||
* first available device
|
||||
*/
|
||||
|
|
|
@ -41,7 +41,7 @@ static int raid0_congested(struct mddev *mddev, int bits)
|
|||
for (i = 0; i < raid_disks && !ret ; i++) {
|
||||
struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
|
||||
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -420,8 +420,8 @@ static int raid0_run(struct mddev *mddev)
|
|||
*/
|
||||
int stripe = mddev->raid_disks *
|
||||
(mddev->chunk_sectors << 9) / PAGE_SIZE;
|
||||
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
|
||||
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
|
||||
if (mddev->queue->backing_dev_info->ra_pages < 2* stripe)
|
||||
mddev->queue->backing_dev_info->ra_pages = 2* stripe;
|
||||
}
|
||||
|
||||
dump_zones(mddev);
|
||||
|
|
|
@ -744,9 +744,9 @@ static int raid1_congested(struct mddev *mddev, int bits)
|
|||
* non-congested targets, it can be removed
|
||||
*/
|
||||
if ((bits & (1 << WB_async_congested)) || 1)
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
else
|
||||
ret &= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret &= bdi_congested(q->backing_dev_info, bits);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -1170,10 +1170,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|||
int i, disks;
|
||||
struct bitmap *bitmap = mddev->bitmap;
|
||||
unsigned long flags;
|
||||
const int op = bio_op(bio);
|
||||
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
|
||||
const unsigned long do_flush_fua = (bio->bi_opf &
|
||||
(REQ_PREFLUSH | REQ_FUA));
|
||||
struct md_rdev *blocked_rdev;
|
||||
struct blk_plug_cb *cb;
|
||||
struct raid1_plug_cb *plug = NULL;
|
||||
|
@ -1389,7 +1385,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|||
conf->mirrors[i].rdev->data_offset);
|
||||
mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
|
||||
mbio->bi_end_io = raid1_end_write_request;
|
||||
bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
|
||||
mbio->bi_opf = bio_op(bio) |
|
||||
(bio->bi_opf & (REQ_SYNC | REQ_PREFLUSH | REQ_FUA));
|
||||
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
|
||||
!test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
|
||||
conf->raid_disks - mddev->degraded > 1)
|
||||
|
|
|
@ -860,7 +860,7 @@ static int raid10_congested(struct mddev *mddev, int bits)
|
|||
if (rdev && !test_bit(Faulty, &rdev->flags)) {
|
||||
struct request_queue *q = bdev_get_queue(rdev->bdev);
|
||||
|
||||
ret |= bdi_congested(&q->backing_dev_info, bits);
|
||||
ret |= bdi_congested(q->backing_dev_info, bits);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -3841,8 +3841,8 @@ static int raid10_run(struct mddev *mddev)
|
|||
* maybe...
|
||||
*/
|
||||
stripe /= conf->geo.near_copies;
|
||||
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
|
||||
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
|
||||
if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
|
||||
mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
|
||||
}
|
||||
|
||||
if (md_integrity_register(mddev))
|
||||
|
@ -4643,8 +4643,8 @@ static void end_reshape(struct r10conf *conf)
|
|||
int stripe = conf->geo.raid_disks *
|
||||
((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
|
||||
stripe /= conf->geo.near_copies;
|
||||
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
|
||||
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
|
||||
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
|
||||
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
|
||||
}
|
||||
conf->fullsync = 0;
|
||||
}
|
||||
|
|
|
@ -6331,10 +6331,10 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
|
|||
mddev_suspend(mddev);
|
||||
conf->skip_copy = new;
|
||||
if (new)
|
||||
mddev->queue->backing_dev_info.capabilities |=
|
||||
mddev->queue->backing_dev_info->capabilities |=
|
||||
BDI_CAP_STABLE_WRITES;
|
||||
else
|
||||
mddev->queue->backing_dev_info.capabilities &=
|
||||
mddev->queue->backing_dev_info->capabilities &=
|
||||
~BDI_CAP_STABLE_WRITES;
|
||||
mddev_resume(mddev);
|
||||
}
|
||||
|
@ -7153,8 +7153,8 @@ static int raid5_run(struct mddev *mddev)
|
|||
int data_disks = conf->previous_raid_disks - conf->max_degraded;
|
||||
int stripe = data_disks *
|
||||
((mddev->chunk_sectors << 9) / PAGE_SIZE);
|
||||
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
|
||||
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
|
||||
if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
|
||||
mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
|
||||
|
||||
chunk_size = mddev->chunk_sectors << 9;
|
||||
blk_queue_io_min(mddev->queue, chunk_size);
|
||||
|
@ -7763,8 +7763,8 @@ static void end_reshape(struct r5conf *conf)
|
|||
int data_disks = conf->raid_disks - conf->max_degraded;
|
||||
int stripe = data_disks * ((conf->chunk_sectors << 9)
|
||||
/ PAGE_SIZE);
|
||||
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
|
||||
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
|
||||
if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe)
|
||||
conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2000,16 +2000,6 @@ static int msb_bd_getgeo(struct block_device *bdev,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int msb_prepare_req(struct request_queue *q, struct request *req)
|
||||
{
|
||||
if (req->cmd_type != REQ_TYPE_FS) {
|
||||
blk_dump_rq_flags(req, "MS unsupported request");
|
||||
return BLKPREP_KILL;
|
||||
}
|
||||
req->rq_flags |= RQF_DONTPREP;
|
||||
return BLKPREP_OK;
|
||||
}
|
||||
|
||||
static void msb_submit_req(struct request_queue *q)
|
||||
{
|
||||
struct memstick_dev *card = q->queuedata;
|
||||
|
@ -2132,7 +2122,6 @@ static int msb_init_disk(struct memstick_dev *card)
|
|||
}
|
||||
|
||||
msb->queue->queuedata = card;
|
||||
blk_queue_prep_rq(msb->queue, msb_prepare_req);
|
||||
|
||||
blk_queue_bounce_limit(msb->queue, limit);
|
||||
blk_queue_max_hw_sectors(msb->queue, MS_BLOCK_MAX_PAGES);
|
||||
|
|
|
@ -827,18 +827,6 @@ static void mspro_block_start(struct memstick_dev *card)
|
|||
spin_unlock_irqrestore(&msb->q_lock, flags);
|
||||
}
|
||||
|
||||
static int mspro_block_prepare_req(struct request_queue *q, struct request *req)
|
||||
{
|
||||
if (req->cmd_type != REQ_TYPE_FS) {
|
||||
blk_dump_rq_flags(req, "MSPro unsupported request");
|
||||
return BLKPREP_KILL;
|
||||
}
|
||||
|
||||
req->rq_flags |= RQF_DONTPREP;
|
||||
|
||||
return BLKPREP_OK;
|
||||
}
|
||||
|
||||
static void mspro_block_submit_req(struct request_queue *q)
|
||||
{
|
||||
struct memstick_dev *card = q->queuedata;
|
||||
|
@ -1228,7 +1216,6 @@ static int mspro_block_init_disk(struct memstick_dev *card)
|
|||
}
|
||||
|
||||
msb->queue->queuedata = card;
|
||||
blk_queue_prep_rq(msb->queue, mspro_block_prepare_req);
|
||||
|
||||
blk_queue_bounce_limit(msb->queue, limit);
|
||||
blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES);
|
||||
|
|
|
@ -2320,10 +2320,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
|
|||
SmpPassthroughReply_t *smprep;
|
||||
|
||||
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
|
||||
memcpy(req->sense, smprep, sizeof(*smprep));
|
||||
req->sense_len = sizeof(*smprep);
|
||||
req->resid_len = 0;
|
||||
rsp->resid_len -= smprep->ResponseDataLength;
|
||||
memcpy(scsi_req(req)->sense, smprep, sizeof(*smprep));
|
||||
scsi_req(req)->sense_len = sizeof(*smprep);
|
||||
scsi_req(req)->resid_len = 0;
|
||||
scsi_req(rsp)->resid_len -= smprep->ResponseDataLength;
|
||||
} else {
|
||||
printk(MYIOC_s_ERR_FMT
|
||||
"%s: smp passthru reply failed to be returned\n",
|
||||
|
|
|
@ -30,15 +30,6 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
|
|||
{
|
||||
struct mmc_queue *mq = q->queuedata;
|
||||
|
||||
/*
|
||||
* We only like normal block requests and discards.
|
||||
*/
|
||||
if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
|
||||
req_op(req) != REQ_OP_SECURE_ERASE) {
|
||||
blk_dump_rq_flags(req, "MMC bad request");
|
||||
return BLKPREP_KILL;
|
||||
}
|
||||
|
||||
if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
|
||||
return BLKPREP_KILL;
|
||||
|
||||
|
|
|
@ -84,9 +84,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
|||
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
|
||||
buf = bio_data(req->bio);
|
||||
|
||||
if (req->cmd_type != REQ_TYPE_FS)
|
||||
return -EIO;
|
||||
|
||||
if (req_op(req) == REQ_OP_FLUSH)
|
||||
return tr->flush(dev);
|
||||
|
||||
|
@ -94,16 +91,16 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
|||
get_capacity(req->rq_disk))
|
||||
return -EIO;
|
||||
|
||||
if (req_op(req) == REQ_OP_DISCARD)
|
||||
switch (req_op(req)) {
|
||||
case REQ_OP_DISCARD:
|
||||
return tr->discard(dev, block, nsect);
|
||||
|
||||
if (rq_data_dir(req) == READ) {
|
||||
case REQ_OP_READ:
|
||||
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
|
||||
if (tr->readsect(dev, block, buf))
|
||||
return -EIO;
|
||||
rq_flush_dcache_pages(req);
|
||||
return 0;
|
||||
} else {
|
||||
case REQ_OP_WRITE:
|
||||
if (!tr->writesect)
|
||||
return -EIO;
|
||||
|
||||
|
@ -112,6 +109,8 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
|
|||
if (tr->writesect(dev, block, buf))
|
||||
return -EIO;
|
||||
return 0;
|
||||
default:
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue