Merge branch 'blk-end-request' of git://git.kernel.dk/linux-2.6-block
* 'blk-end-request' of git://git.kernel.dk/linux-2.6-block: (30 commits) blk_end_request: changing xsysace (take 4) blk_end_request: changing ub (take 4) blk_end_request: cleanup of request completion (take 4) blk_end_request: cleanup 'uptodate' related code (take 4) blk_end_request: remove/unexport end_that_request_* (take 4) blk_end_request: changing scsi (take 4) blk_end_request: add bidi completion interface (take 4) blk_end_request: changing ide-cd (take 4) blk_end_request: add callback feature (take 4) blk_end_request: changing ide normal caller (take 4) blk_end_request: changing cpqarray (take 4) blk_end_request: changing cciss (take 4) blk_end_request: changing ide-scsi (take 4) blk_end_request: changing s390 (take 4) blk_end_request: changing mmc (take 4) blk_end_request: changing i2o_block (take 4) blk_end_request: changing viocd (take 4) blk_end_request: changing xen-blkfront (take 4) blk_end_request: changing viodasd (take 4) blk_end_request: changing sx8 (take 4) ...
This commit is contained in:
commit
f0f0052069
26 changed files with 419 additions and 377 deletions
|
@ -116,8 +116,8 @@ static void mbox_tx_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
spin_lock(q->queue_lock);
|
||||
blkdev_dequeue_request(rq);
|
||||
end_that_request_last(rq, 0);
|
||||
if (__blk_end_request(rq, 0, 0))
|
||||
BUG();
|
||||
spin_unlock(q->queue_lock);
|
||||
}
|
||||
}
|
||||
|
@ -149,10 +149,8 @@ static void mbox_rx_work(struct work_struct *work)
|
|||
|
||||
msg = (mbox_msg_t) rq->data;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
blkdev_dequeue_request(rq);
|
||||
end_that_request_last(rq, 0);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
if (blk_end_request(rq, 0, 0))
|
||||
BUG();
|
||||
|
||||
mbox->rxq->callback((void *)msg);
|
||||
}
|
||||
|
@ -263,10 +261,8 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
|
|||
|
||||
*p = (mbox_msg_t) rq->data;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
blkdev_dequeue_request(rq);
|
||||
end_that_request_last(rq, 0);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
if (blk_end_request(rq, 0, 0))
|
||||
BUG();
|
||||
|
||||
if (unlikely(mbox_seq_test(mbox, *p))) {
|
||||
pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
|
||||
|
|
|
@ -475,17 +475,9 @@ static void do_ubd_request(struct request_queue * q);
|
|||
/* Only changed by ubd_init, which is an initcall. */
|
||||
int thread_fd = -1;
|
||||
|
||||
static void ubd_end_request(struct request *req, int bytes, int uptodate)
|
||||
static void ubd_end_request(struct request *req, int bytes, int error)
|
||||
{
|
||||
if (!end_that_request_first(req, uptodate, bytes >> 9)) {
|
||||
struct ubd *dev = req->rq_disk->private_data;
|
||||
unsigned long flags;
|
||||
|
||||
add_disk_randomness(req->rq_disk);
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
end_that_request_last(req, uptodate);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
}
|
||||
blk_end_request(req, error, bytes);
|
||||
}
|
||||
|
||||
/* Callable only from interrupt context - otherwise you need to do
|
||||
|
@ -493,10 +485,10 @@ static void ubd_end_request(struct request *req, int bytes, int uptodate)
|
|||
static inline void ubd_finish(struct request *req, int bytes)
|
||||
{
|
||||
if(bytes < 0){
|
||||
ubd_end_request(req, 0, 0);
|
||||
ubd_end_request(req, 0, -EIO);
|
||||
return;
|
||||
}
|
||||
ubd_end_request(req, bytes, 1);
|
||||
ubd_end_request(req, bytes, 0);
|
||||
}
|
||||
|
||||
static LIST_HEAD(restart);
|
||||
|
|
|
@ -347,7 +347,6 @@ unsigned blk_ordered_req_seq(struct request *rq)
|
|||
void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
|
||||
{
|
||||
struct request *rq;
|
||||
int uptodate;
|
||||
|
||||
if (error && !q->orderr)
|
||||
q->orderr = error;
|
||||
|
@ -361,15 +360,11 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
|
|||
/*
|
||||
* Okay, sequence complete.
|
||||
*/
|
||||
uptodate = 1;
|
||||
if (q->orderr)
|
||||
uptodate = q->orderr;
|
||||
|
||||
q->ordseq = 0;
|
||||
rq = q->orig_bar_rq;
|
||||
|
||||
end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
|
||||
end_that_request_last(rq, uptodate);
|
||||
if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
|
||||
BUG();
|
||||
}
|
||||
|
||||
static void pre_flush_end_io(struct request *rq, int error)
|
||||
|
@ -486,9 +481,9 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
|
|||
* ORDERED_NONE while this request is on it.
|
||||
*/
|
||||
blkdev_dequeue_request(rq);
|
||||
end_that_request_first(rq, -EOPNOTSUPP,
|
||||
rq->hard_nr_sectors);
|
||||
end_that_request_last(rq, -EOPNOTSUPP);
|
||||
if (__blk_end_request(rq, -EOPNOTSUPP,
|
||||
blk_rq_bytes(rq)))
|
||||
BUG();
|
||||
*rqp = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
@ -3437,21 +3432,28 @@ static void blk_recalc_rq_sectors(struct request *rq, int nsect)
|
|||
}
|
||||
}
|
||||
|
||||
static int __end_that_request_first(struct request *req, int uptodate,
|
||||
/**
|
||||
* __end_that_request_first - end I/O on a request
|
||||
* @req: the request being processed
|
||||
* @error: 0 for success, < 0 for error
|
||||
* @nr_bytes: number of bytes to complete
|
||||
*
|
||||
* Description:
|
||||
* Ends I/O on a number of bytes attached to @req, and sets it up
|
||||
* for the next range of segments (if any) in the cluster.
|
||||
*
|
||||
* Return:
|
||||
* 0 - we are done with this request, call end_that_request_last()
|
||||
* 1 - still buffers pending for this request
|
||||
**/
|
||||
static int __end_that_request_first(struct request *req, int error,
|
||||
int nr_bytes)
|
||||
{
|
||||
int total_bytes, bio_nbytes, error, next_idx = 0;
|
||||
int total_bytes, bio_nbytes, next_idx = 0;
|
||||
struct bio *bio;
|
||||
|
||||
blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE);
|
||||
|
||||
/*
|
||||
* extend uptodate bool to allow < 0 value to be direct io error
|
||||
*/
|
||||
error = 0;
|
||||
if (end_io_error(uptodate))
|
||||
error = !uptodate ? -EIO : uptodate;
|
||||
|
||||
/*
|
||||
* for a REQ_BLOCK_PC request, we want to carry any eventual
|
||||
* sense key with us all the way through
|
||||
|
@ -3459,7 +3461,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
|
|||
if (!blk_pc_request(req))
|
||||
req->errors = 0;
|
||||
|
||||
if (!uptodate) {
|
||||
if (error) {
|
||||
if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
|
||||
printk("end_request: I/O error, dev %s, sector %llu\n",
|
||||
req->rq_disk ? req->rq_disk->disk_name : "?",
|
||||
|
@ -3553,49 +3555,6 @@ static int __end_that_request_first(struct request *req, int uptodate,
|
|||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* end_that_request_first - end I/O on a request
|
||||
* @req: the request being processed
|
||||
* @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
|
||||
* @nr_sectors: number of sectors to end I/O on
|
||||
*
|
||||
* Description:
|
||||
* Ends I/O on a number of sectors attached to @req, and sets it up
|
||||
* for the next range of segments (if any) in the cluster.
|
||||
*
|
||||
* Return:
|
||||
* 0 - we are done with this request, call end_that_request_last()
|
||||
* 1 - still buffers pending for this request
|
||||
**/
|
||||
int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
|
||||
{
|
||||
return __end_that_request_first(req, uptodate, nr_sectors << 9);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(end_that_request_first);
|
||||
|
||||
/**
|
||||
* end_that_request_chunk - end I/O on a request
|
||||
* @req: the request being processed
|
||||
* @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
|
||||
* @nr_bytes: number of bytes to complete
|
||||
*
|
||||
* Description:
|
||||
* Ends I/O on a number of bytes attached to @req, and sets it up
|
||||
* for the next range of segments (if any). Like end_that_request_first(),
|
||||
* but deals with bytes instead of sectors.
|
||||
*
|
||||
* Return:
|
||||
* 0 - we are done with this request, call end_that_request_last()
|
||||
* 1 - still buffers pending for this request
|
||||
**/
|
||||
int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
|
||||
{
|
||||
return __end_that_request_first(req, uptodate, nr_bytes);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(end_that_request_chunk);
|
||||
|
||||
/*
|
||||
* splice the completion data to a local structure and hand off to
|
||||
* process_completion_queue() to complete the requests
|
||||
|
@ -3675,17 +3634,15 @@ EXPORT_SYMBOL(blk_complete_request);
|
|||
/*
|
||||
* queue lock must be held
|
||||
*/
|
||||
void end_that_request_last(struct request *req, int uptodate)
|
||||
static void end_that_request_last(struct request *req, int error)
|
||||
{
|
||||
struct gendisk *disk = req->rq_disk;
|
||||
int error;
|
||||
|
||||
/*
|
||||
* extend uptodate bool to allow < 0 value to be direct io error
|
||||
*/
|
||||
error = 0;
|
||||
if (end_io_error(uptodate))
|
||||
error = !uptodate ? -EIO : uptodate;
|
||||
if (blk_rq_tagged(req))
|
||||
blk_queue_end_tag(req->q, req);
|
||||
|
||||
if (blk_queued_rq(req))
|
||||
blkdev_dequeue_request(req);
|
||||
|
||||
if (unlikely(laptop_mode) && blk_fs_request(req))
|
||||
laptop_io_completion();
|
||||
|
@ -3704,32 +3661,54 @@ void end_that_request_last(struct request *req, int uptodate)
|
|||
disk_round_stats(disk);
|
||||
disk->in_flight--;
|
||||
}
|
||||
|
||||
if (req->end_io)
|
||||
req->end_io(req, error);
|
||||
else
|
||||
else {
|
||||
if (blk_bidi_rq(req))
|
||||
__blk_put_request(req->next_rq->q, req->next_rq);
|
||||
|
||||
__blk_put_request(req->q, req);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(end_that_request_last);
|
||||
|
||||
static inline void __end_request(struct request *rq, int uptodate,
|
||||
unsigned int nr_bytes, int dequeue)
|
||||
{
|
||||
if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
|
||||
if (dequeue)
|
||||
blkdev_dequeue_request(rq);
|
||||
add_disk_randomness(rq->rq_disk);
|
||||
end_that_request_last(rq, uptodate);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int rq_byte_size(struct request *rq)
|
||||
static inline void __end_request(struct request *rq, int uptodate,
|
||||
unsigned int nr_bytes)
|
||||
{
|
||||
int error = 0;
|
||||
|
||||
if (uptodate <= 0)
|
||||
error = uptodate ? uptodate : -EIO;
|
||||
|
||||
__blk_end_request(rq, error, nr_bytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_rq_bytes - Returns bytes left to complete in the entire request
|
||||
**/
|
||||
unsigned int blk_rq_bytes(struct request *rq)
|
||||
{
|
||||
if (blk_fs_request(rq))
|
||||
return rq->hard_nr_sectors << 9;
|
||||
|
||||
return rq->data_len;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_rq_bytes);
|
||||
|
||||
/**
|
||||
* blk_rq_cur_bytes - Returns bytes left to complete in the current segment
|
||||
**/
|
||||
unsigned int blk_rq_cur_bytes(struct request *rq)
|
||||
{
|
||||
if (blk_fs_request(rq))
|
||||
return rq->current_nr_sectors << 9;
|
||||
|
||||
if (rq->bio)
|
||||
return rq->bio->bi_size;
|
||||
|
||||
return rq->data_len;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
|
||||
|
||||
/**
|
||||
* end_queued_request - end all I/O on a queued request
|
||||
|
@ -3744,7 +3723,7 @@ static unsigned int rq_byte_size(struct request *rq)
|
|||
**/
|
||||
void end_queued_request(struct request *rq, int uptodate)
|
||||
{
|
||||
__end_request(rq, uptodate, rq_byte_size(rq), 1);
|
||||
__end_request(rq, uptodate, blk_rq_bytes(rq));
|
||||
}
|
||||
EXPORT_SYMBOL(end_queued_request);
|
||||
|
||||
|
@ -3761,7 +3740,7 @@ EXPORT_SYMBOL(end_queued_request);
|
|||
**/
|
||||
void end_dequeued_request(struct request *rq, int uptodate)
|
||||
{
|
||||
__end_request(rq, uptodate, rq_byte_size(rq), 0);
|
||||
__end_request(rq, uptodate, blk_rq_bytes(rq));
|
||||
}
|
||||
EXPORT_SYMBOL(end_dequeued_request);
|
||||
|
||||
|
@ -3787,10 +3766,159 @@ EXPORT_SYMBOL(end_dequeued_request);
|
|||
**/
|
||||
void end_request(struct request *req, int uptodate)
|
||||
{
|
||||
__end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
|
||||
__end_request(req, uptodate, req->hard_cur_sectors << 9);
|
||||
}
|
||||
EXPORT_SYMBOL(end_request);
|
||||
|
||||
/**
|
||||
* blk_end_io - Generic end_io function to complete a request.
|
||||
* @rq: the request being processed
|
||||
* @error: 0 for success, < 0 for error
|
||||
* @nr_bytes: number of bytes to complete @rq
|
||||
* @bidi_bytes: number of bytes to complete @rq->next_rq
|
||||
* @drv_callback: function called between completion of bios in the request
|
||||
* and completion of the request.
|
||||
* If the callback returns non 0, this helper returns without
|
||||
* completion of the request.
|
||||
*
|
||||
* Description:
|
||||
* Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
|
||||
* If @rq has leftover, sets it up for the next range of segments.
|
||||
*
|
||||
* Return:
|
||||
* 0 - we are done with this request
|
||||
* 1 - this request is not freed yet, it still has pending buffers.
|
||||
**/
|
||||
static int blk_end_io(struct request *rq, int error, int nr_bytes,
|
||||
int bidi_bytes, int (drv_callback)(struct request *))
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
unsigned long flags = 0UL;
|
||||
|
||||
if (blk_fs_request(rq) || blk_pc_request(rq)) {
|
||||
if (__end_that_request_first(rq, error, nr_bytes))
|
||||
return 1;
|
||||
|
||||
/* Bidi request must be completed as a whole */
|
||||
if (blk_bidi_rq(rq) &&
|
||||
__end_that_request_first(rq->next_rq, error, bidi_bytes))
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Special feature for tricky drivers */
|
||||
if (drv_callback && drv_callback(rq))
|
||||
return 1;
|
||||
|
||||
add_disk_randomness(rq->rq_disk);
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
end_that_request_last(rq, error);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_end_request - Helper function for drivers to complete the request.
|
||||
* @rq: the request being processed
|
||||
* @error: 0 for success, < 0 for error
|
||||
* @nr_bytes: number of bytes to complete
|
||||
*
|
||||
* Description:
|
||||
* Ends I/O on a number of bytes attached to @rq.
|
||||
* If @rq has leftover, sets it up for the next range of segments.
|
||||
*
|
||||
* Return:
|
||||
* 0 - we are done with this request
|
||||
* 1 - still buffers pending for this request
|
||||
**/
|
||||
int blk_end_request(struct request *rq, int error, int nr_bytes)
|
||||
{
|
||||
return blk_end_io(rq, error, nr_bytes, 0, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_end_request);
|
||||
|
||||
/**
|
||||
* __blk_end_request - Helper function for drivers to complete the request.
|
||||
* @rq: the request being processed
|
||||
* @error: 0 for success, < 0 for error
|
||||
* @nr_bytes: number of bytes to complete
|
||||
*
|
||||
* Description:
|
||||
* Must be called with queue lock held unlike blk_end_request().
|
||||
*
|
||||
* Return:
|
||||
* 0 - we are done with this request
|
||||
* 1 - still buffers pending for this request
|
||||
**/
|
||||
int __blk_end_request(struct request *rq, int error, int nr_bytes)
|
||||
{
|
||||
if (blk_fs_request(rq) || blk_pc_request(rq)) {
|
||||
if (__end_that_request_first(rq, error, nr_bytes))
|
||||
return 1;
|
||||
}
|
||||
|
||||
add_disk_randomness(rq->rq_disk);
|
||||
|
||||
end_that_request_last(rq, error);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__blk_end_request);
|
||||
|
||||
/**
|
||||
* blk_end_bidi_request - Helper function for drivers to complete bidi request.
|
||||
* @rq: the bidi request being processed
|
||||
* @error: 0 for success, < 0 for error
|
||||
* @nr_bytes: number of bytes to complete @rq
|
||||
* @bidi_bytes: number of bytes to complete @rq->next_rq
|
||||
*
|
||||
* Description:
|
||||
* Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
|
||||
*
|
||||
* Return:
|
||||
* 0 - we are done with this request
|
||||
* 1 - still buffers pending for this request
|
||||
**/
|
||||
int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
|
||||
int bidi_bytes)
|
||||
{
|
||||
return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_end_bidi_request);
|
||||
|
||||
/**
|
||||
* blk_end_request_callback - Special helper function for tricky drivers
|
||||
* @rq: the request being processed
|
||||
* @error: 0 for success, < 0 for error
|
||||
* @nr_bytes: number of bytes to complete
|
||||
* @drv_callback: function called between completion of bios in the request
|
||||
* and completion of the request.
|
||||
* If the callback returns non 0, this helper returns without
|
||||
* completion of the request.
|
||||
*
|
||||
* Description:
|
||||
* Ends I/O on a number of bytes attached to @rq.
|
||||
* If @rq has leftover, sets it up for the next range of segments.
|
||||
*
|
||||
* This special helper function is used only for existing tricky drivers.
|
||||
* (e.g. cdrom_newpc_intr() of ide-cd)
|
||||
* This interface will be removed when such drivers are rewritten.
|
||||
* Don't use this interface in other places anymore.
|
||||
*
|
||||
* Return:
|
||||
* 0 - we are done with this request
|
||||
* 1 - this request is not freed yet.
|
||||
* this request still has pending buffers or
|
||||
* the driver doesn't want to finish this request yet.
|
||||
**/
|
||||
int blk_end_request_callback(struct request *rq, int error, int nr_bytes,
|
||||
int (drv_callback)(struct request *))
|
||||
{
|
||||
return blk_end_io(rq, error, nr_bytes, 0, drv_callback);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_end_request_callback);
|
||||
|
||||
static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
|
||||
struct bio *bio)
|
||||
{
|
||||
|
|
|
@ -3455,19 +3455,12 @@ static inline bool DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
|
|||
bool SuccessfulIO)
|
||||
{
|
||||
struct request *Request = Command->Request;
|
||||
int UpToDate;
|
||||
|
||||
UpToDate = 0;
|
||||
if (SuccessfulIO)
|
||||
UpToDate = 1;
|
||||
int Error = SuccessfulIO ? 0 : -EIO;
|
||||
|
||||
pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist,
|
||||
Command->SegmentCount, Command->DmaDirection);
|
||||
|
||||
if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) {
|
||||
add_disk_randomness(Request->rq_disk);
|
||||
end_that_request_last(Request, UpToDate);
|
||||
|
||||
if (!__blk_end_request(Request, Error, Command->BlockCount << 9)) {
|
||||
if (Command->Completion) {
|
||||
complete(Command->Completion);
|
||||
Command->Completion = NULL;
|
||||
|
|
|
@ -1187,17 +1187,6 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
|
|||
}
|
||||
}
|
||||
|
||||
static inline void complete_buffers(struct bio *bio, int status)
|
||||
{
|
||||
while (bio) {
|
||||
struct bio *xbh = bio->bi_next;
|
||||
|
||||
bio->bi_next = NULL;
|
||||
bio_endio(bio, status ? 0 : -EIO);
|
||||
bio = xbh;
|
||||
}
|
||||
}
|
||||
|
||||
static void cciss_check_queues(ctlr_info_t *h)
|
||||
{
|
||||
int start_queue = h->next_to_run;
|
||||
|
@ -1263,21 +1252,14 @@ static void cciss_softirq_done(struct request *rq)
|
|||
pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir);
|
||||
}
|
||||
|
||||
complete_buffers(rq->bio, (rq->errors == 0));
|
||||
|
||||
if (blk_fs_request(rq)) {
|
||||
const int rw = rq_data_dir(rq);
|
||||
|
||||
disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
|
||||
}
|
||||
|
||||
#ifdef CCISS_DEBUG
|
||||
printk("Done with %p\n", rq);
|
||||
#endif /* CCISS_DEBUG */
|
||||
|
||||
add_disk_randomness(rq->rq_disk);
|
||||
if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, blk_rq_bytes(rq)))
|
||||
BUG();
|
||||
|
||||
spin_lock_irqsave(&h->lock, flags);
|
||||
end_that_request_last(rq, (rq->errors == 0));
|
||||
cmd_free(h, cmd, 1);
|
||||
cciss_check_queues(h);
|
||||
spin_unlock_irqrestore(&h->lock, flags);
|
||||
|
@ -2544,7 +2526,6 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
|
|||
}
|
||||
cmd->rq->data_len = 0;
|
||||
cmd->rq->completion_data = cmd;
|
||||
blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE);
|
||||
blk_complete_request(cmd->rq);
|
||||
}
|
||||
|
||||
|
|
|
@ -167,7 +167,6 @@ static void start_io(ctlr_info_t *h);
|
|||
|
||||
static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
|
||||
static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
|
||||
static inline void complete_buffers(struct bio *bio, int ok);
|
||||
static inline void complete_command(cmdlist_t *cmd, int timeout);
|
||||
|
||||
static irqreturn_t do_ida_intr(int irq, void *dev_id);
|
||||
|
@ -980,26 +979,13 @@ static void start_io(ctlr_info_t *h)
|
|||
}
|
||||
}
|
||||
|
||||
static inline void complete_buffers(struct bio *bio, int ok)
|
||||
{
|
||||
struct bio *xbh;
|
||||
|
||||
while (bio) {
|
||||
xbh = bio->bi_next;
|
||||
bio->bi_next = NULL;
|
||||
|
||||
bio_endio(bio, ok ? 0 : -EIO);
|
||||
|
||||
bio = xbh;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Mark all buffers that cmd was responsible for
|
||||
*/
|
||||
static inline void complete_command(cmdlist_t *cmd, int timeout)
|
||||
{
|
||||
struct request *rq = cmd->rq;
|
||||
int ok=1;
|
||||
int error = 0;
|
||||
int i, ddir;
|
||||
|
||||
if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
|
||||
|
@ -1011,16 +997,17 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
|
|||
if (cmd->req.hdr.rcode & RCODE_FATAL) {
|
||||
printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
|
||||
cmd->ctlr, cmd->hdr.unit);
|
||||
ok = 0;
|
||||
error = -EIO;
|
||||
}
|
||||
if (cmd->req.hdr.rcode & RCODE_INVREQ) {
|
||||
printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
|
||||
cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
|
||||
cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
|
||||
cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
|
||||
ok = 0;
|
||||
error = -EIO;
|
||||
}
|
||||
if (timeout) ok = 0;
|
||||
if (timeout)
|
||||
error = -EIO;
|
||||
/* unmap the DMA mapping for all the scatter gather elements */
|
||||
if (cmd->req.hdr.cmd == IDA_READ)
|
||||
ddir = PCI_DMA_FROMDEVICE;
|
||||
|
@ -1030,18 +1017,9 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
|
|||
pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
|
||||
cmd->req.sg[i].size, ddir);
|
||||
|
||||
complete_buffers(rq->bio, ok);
|
||||
|
||||
if (blk_fs_request(rq)) {
|
||||
const int rw = rq_data_dir(rq);
|
||||
|
||||
disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
|
||||
}
|
||||
|
||||
add_disk_randomness(rq->rq_disk);
|
||||
|
||||
DBGPX(printk("Done with %p\n", rq););
|
||||
end_that_request_last(rq, ok ? 1 : -EIO);
|
||||
if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2287,21 +2287,19 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
|
|||
* =============================
|
||||
*/
|
||||
|
||||
static void floppy_end_request(struct request *req, int uptodate)
|
||||
static void floppy_end_request(struct request *req, int error)
|
||||
{
|
||||
unsigned int nr_sectors = current_count_sectors;
|
||||
unsigned int drive = (unsigned long)req->rq_disk->private_data;
|
||||
|
||||
/* current_count_sectors can be zero if transfer failed */
|
||||
if (!uptodate)
|
||||
if (error)
|
||||
nr_sectors = req->current_nr_sectors;
|
||||
if (end_that_request_first(req, uptodate, nr_sectors))
|
||||
if (__blk_end_request(req, error, nr_sectors << 9))
|
||||
return;
|
||||
add_disk_randomness(req->rq_disk);
|
||||
floppy_off((long)req->rq_disk->private_data);
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
|
||||
/* We're done with the request */
|
||||
floppy_off(drive);
|
||||
current_req = NULL;
|
||||
}
|
||||
|
||||
|
@ -2332,7 +2330,7 @@ static void request_done(int uptodate)
|
|||
|
||||
/* unlock chained buffers */
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
floppy_end_request(req, 1);
|
||||
floppy_end_request(req, 0);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
} else {
|
||||
if (rq_data_dir(req) == WRITE) {
|
||||
|
@ -2346,7 +2344,7 @@ static void request_done(int uptodate)
|
|||
DRWE->last_error_generation = DRS->generation;
|
||||
}
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
floppy_end_request(req, 0);
|
||||
floppy_end_request(req, -EIO);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -100,17 +100,15 @@ static const char *nbdcmd_to_ascii(int cmd)
|
|||
|
||||
static void nbd_end_request(struct request *req)
|
||||
{
|
||||
int uptodate = (req->errors == 0) ? 1 : 0;
|
||||
int error = req->errors ? -EIO : 0;
|
||||
struct request_queue *q = req->q;
|
||||
unsigned long flags;
|
||||
|
||||
dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
|
||||
req, uptodate? "done": "failed");
|
||||
req, error ? "failed" : "done");
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
|
||||
end_that_request_last(req, uptodate);
|
||||
}
|
||||
__blk_end_request(req, error, req->nr_sectors << 9);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -229,7 +229,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
|
|||
struct ps3_storage_device *dev = data;
|
||||
struct ps3disk_private *priv;
|
||||
struct request *req;
|
||||
int res, read, uptodate;
|
||||
int res, read, error;
|
||||
u64 tag, status;
|
||||
unsigned long num_sectors;
|
||||
const char *op;
|
||||
|
@ -270,21 +270,17 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
|
|||
if (status) {
|
||||
dev_dbg(&dev->sbd.core, "%s:%u: %s failed 0x%lx\n", __func__,
|
||||
__LINE__, op, status);
|
||||
uptodate = 0;
|
||||
error = -EIO;
|
||||
} else {
|
||||
dev_dbg(&dev->sbd.core, "%s:%u: %s completed\n", __func__,
|
||||
__LINE__, op);
|
||||
uptodate = 1;
|
||||
error = 0;
|
||||
if (read)
|
||||
ps3disk_scatter_gather(dev, req, 0);
|
||||
}
|
||||
|
||||
spin_lock(&priv->lock);
|
||||
if (!end_that_request_first(req, uptodate, num_sectors)) {
|
||||
add_disk_randomness(req->rq_disk);
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
}
|
||||
__blk_end_request(req, error, num_sectors << 9);
|
||||
priv->req = NULL;
|
||||
ps3disk_do_request(dev, priv->queue);
|
||||
spin_unlock(&priv->lock);
|
||||
|
|
|
@ -212,12 +212,9 @@ static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
|
|||
vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
|
||||
}
|
||||
|
||||
static void vdc_end_request(struct request *req, int uptodate, int num_sectors)
|
||||
static void vdc_end_request(struct request *req, int error, int num_sectors)
|
||||
{
|
||||
if (end_that_request_first(req, uptodate, num_sectors))
|
||||
return;
|
||||
add_disk_randomness(req->rq_disk);
|
||||
end_that_request_last(req, uptodate);
|
||||
__blk_end_request(req, error, num_sectors << 9);
|
||||
}
|
||||
|
||||
static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
|
||||
|
@ -242,7 +239,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
|
|||
|
||||
rqe->req = NULL;
|
||||
|
||||
vdc_end_request(req, !desc->status, desc->size >> 9);
|
||||
vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9);
|
||||
|
||||
if (blk_queue_stopped(port->disk->queue))
|
||||
blk_start_queue(port->disk->queue);
|
||||
|
@ -456,7 +453,7 @@ static void do_vdc_request(struct request_queue *q)
|
|||
|
||||
blkdev_dequeue_request(req);
|
||||
if (__send_request(req) < 0)
|
||||
vdc_end_request(req, 0, req->hard_nr_sectors);
|
||||
vdc_end_request(req, -EIO, req->hard_nr_sectors);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -744,16 +744,14 @@ static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
|
|||
|
||||
static inline void carm_end_request_queued(struct carm_host *host,
|
||||
struct carm_request *crq,
|
||||
int uptodate)
|
||||
int error)
|
||||
{
|
||||
struct request *req = crq->rq;
|
||||
int rc;
|
||||
|
||||
rc = end_that_request_first(req, uptodate, req->hard_nr_sectors);
|
||||
rc = __blk_end_request(req, error, blk_rq_bytes(req));
|
||||
assert(rc == 0);
|
||||
|
||||
end_that_request_last(req, uptodate);
|
||||
|
||||
rc = carm_put_request(host, crq);
|
||||
assert(rc == 0);
|
||||
}
|
||||
|
@ -793,9 +791,9 @@ static inline void carm_round_robin(struct carm_host *host)
|
|||
}
|
||||
|
||||
static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
|
||||
int is_ok)
|
||||
int error)
|
||||
{
|
||||
carm_end_request_queued(host, crq, is_ok);
|
||||
carm_end_request_queued(host, crq, error);
|
||||
if (max_queue == 1)
|
||||
carm_round_robin(host);
|
||||
else if ((host->n_msgs <= CARM_MSG_LOW_WATER) &&
|
||||
|
@ -873,14 +871,14 @@ static void carm_rq_fn(struct request_queue *q)
|
|||
sg = &crq->sg[0];
|
||||
n_elem = blk_rq_map_sg(q, rq, sg);
|
||||
if (n_elem <= 0) {
|
||||
carm_end_rq(host, crq, 0);
|
||||
carm_end_rq(host, crq, -EIO);
|
||||
return; /* request with no s/g entries? */
|
||||
}
|
||||
|
||||
/* map scatterlist to PCI bus addresses */
|
||||
n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
|
||||
if (n_elem <= 0) {
|
||||
carm_end_rq(host, crq, 0);
|
||||
carm_end_rq(host, crq, -EIO);
|
||||
return; /* request with no s/g entries? */
|
||||
}
|
||||
crq->n_elem = n_elem;
|
||||
|
@ -941,7 +939,7 @@ static void carm_rq_fn(struct request_queue *q)
|
|||
|
||||
static void carm_handle_array_info(struct carm_host *host,
|
||||
struct carm_request *crq, u8 *mem,
|
||||
int is_ok)
|
||||
int error)
|
||||
{
|
||||
struct carm_port *port;
|
||||
u8 *msg_data = mem + sizeof(struct carm_array_info);
|
||||
|
@ -952,9 +950,9 @@ static void carm_handle_array_info(struct carm_host *host,
|
|||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
carm_end_rq(host, crq, is_ok);
|
||||
carm_end_rq(host, crq, error);
|
||||
|
||||
if (!is_ok)
|
||||
if (error)
|
||||
goto out;
|
||||
if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
|
||||
goto out;
|
||||
|
@ -1001,7 +999,7 @@ static void carm_handle_array_info(struct carm_host *host,
|
|||
|
||||
static void carm_handle_scan_chan(struct carm_host *host,
|
||||
struct carm_request *crq, u8 *mem,
|
||||
int is_ok)
|
||||
int error)
|
||||
{
|
||||
u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
|
||||
unsigned int i, dev_count = 0;
|
||||
|
@ -1009,9 +1007,9 @@ static void carm_handle_scan_chan(struct carm_host *host,
|
|||
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
carm_end_rq(host, crq, is_ok);
|
||||
carm_end_rq(host, crq, error);
|
||||
|
||||
if (!is_ok) {
|
||||
if (error) {
|
||||
new_state = HST_ERROR;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1033,23 +1031,23 @@ static void carm_handle_scan_chan(struct carm_host *host,
|
|||
}
|
||||
|
||||
static void carm_handle_generic(struct carm_host *host,
|
||||
struct carm_request *crq, int is_ok,
|
||||
struct carm_request *crq, int error,
|
||||
int cur_state, int next_state)
|
||||
{
|
||||
DPRINTK("ENTER\n");
|
||||
|
||||
carm_end_rq(host, crq, is_ok);
|
||||
carm_end_rq(host, crq, error);
|
||||
|
||||
assert(host->state == cur_state);
|
||||
if (is_ok)
|
||||
host->state = next_state;
|
||||
else
|
||||
if (error)
|
||||
host->state = HST_ERROR;
|
||||
else
|
||||
host->state = next_state;
|
||||
schedule_work(&host->fsm_task);
|
||||
}
|
||||
|
||||
static inline void carm_handle_rw(struct carm_host *host,
|
||||
struct carm_request *crq, int is_ok)
|
||||
struct carm_request *crq, int error)
|
||||
{
|
||||
int pci_dir;
|
||||
|
||||
|
@ -1062,7 +1060,7 @@ static inline void carm_handle_rw(struct carm_host *host,
|
|||
|
||||
pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir);
|
||||
|
||||
carm_end_rq(host, crq, is_ok);
|
||||
carm_end_rq(host, crq, error);
|
||||
}
|
||||
|
||||
static inline void carm_handle_resp(struct carm_host *host,
|
||||
|
@ -1071,7 +1069,7 @@ static inline void carm_handle_resp(struct carm_host *host,
|
|||
u32 handle = le32_to_cpu(ret_handle_le);
|
||||
unsigned int msg_idx;
|
||||
struct carm_request *crq;
|
||||
int is_ok = (status == RMSG_OK);
|
||||
int error = (status == RMSG_OK) ? 0 : -EIO;
|
||||
u8 *mem;
|
||||
|
||||
VPRINTK("ENTER, handle == 0x%x\n", handle);
|
||||
|
@ -1090,7 +1088,7 @@ static inline void carm_handle_resp(struct carm_host *host,
|
|||
/* fast path */
|
||||
if (likely(crq->msg_type == CARM_MSG_READ ||
|
||||
crq->msg_type == CARM_MSG_WRITE)) {
|
||||
carm_handle_rw(host, crq, is_ok);
|
||||
carm_handle_rw(host, crq, error);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1100,7 +1098,7 @@ static inline void carm_handle_resp(struct carm_host *host,
|
|||
case CARM_MSG_IOCTL: {
|
||||
switch (crq->msg_subtype) {
|
||||
case CARM_IOC_SCAN_CHAN:
|
||||
carm_handle_scan_chan(host, crq, mem, is_ok);
|
||||
carm_handle_scan_chan(host, crq, mem, error);
|
||||
break;
|
||||
default:
|
||||
/* unknown / invalid response */
|
||||
|
@ -1112,21 +1110,21 @@ static inline void carm_handle_resp(struct carm_host *host,
|
|||
case CARM_MSG_MISC: {
|
||||
switch (crq->msg_subtype) {
|
||||
case MISC_ALLOC_MEM:
|
||||
carm_handle_generic(host, crq, is_ok,
|
||||
carm_handle_generic(host, crq, error,
|
||||
HST_ALLOC_BUF, HST_SYNC_TIME);
|
||||
break;
|
||||
case MISC_SET_TIME:
|
||||
carm_handle_generic(host, crq, is_ok,
|
||||
carm_handle_generic(host, crq, error,
|
||||
HST_SYNC_TIME, HST_GET_FW_VER);
|
||||
break;
|
||||
case MISC_GET_FW_VER: {
|
||||
struct carm_fw_ver *ver = (struct carm_fw_ver *)
|
||||
mem + sizeof(struct carm_msg_get_fw_ver);
|
||||
if (is_ok) {
|
||||
if (!error) {
|
||||
host->fw_ver = le32_to_cpu(ver->version);
|
||||
host->flags |= (ver->features & FL_FW_VER_MASK);
|
||||
}
|
||||
carm_handle_generic(host, crq, is_ok,
|
||||
carm_handle_generic(host, crq, error,
|
||||
HST_GET_FW_VER, HST_PORT_SCAN);
|
||||
break;
|
||||
}
|
||||
|
@ -1140,7 +1138,7 @@ static inline void carm_handle_resp(struct carm_host *host,
|
|||
case CARM_MSG_ARRAY: {
|
||||
switch (crq->msg_subtype) {
|
||||
case CARM_ARRAY_INFO:
|
||||
carm_handle_array_info(host, crq, mem, is_ok);
|
||||
carm_handle_array_info(host, crq, mem, error);
|
||||
break;
|
||||
default:
|
||||
/* unknown / invalid response */
|
||||
|
@ -1159,7 +1157,7 @@ static inline void carm_handle_resp(struct carm_host *host,
|
|||
err_out:
|
||||
printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
|
||||
pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
|
||||
carm_end_rq(host, crq, 0);
|
||||
carm_end_rq(host, crq, -EIO);
|
||||
}
|
||||
|
||||
static inline void carm_handle_responses(struct carm_host *host)
|
||||
|
|
|
@ -808,16 +808,16 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
|
|||
|
||||
static void ub_end_rq(struct request *rq, unsigned int scsi_status)
|
||||
{
|
||||
int uptodate;
|
||||
int error;
|
||||
|
||||
if (scsi_status == 0) {
|
||||
uptodate = 1;
|
||||
error = 0;
|
||||
} else {
|
||||
uptodate = 0;
|
||||
error = -EIO;
|
||||
rq->errors = scsi_status;
|
||||
}
|
||||
end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
|
||||
end_that_request_last(rq, uptodate);
|
||||
if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
|
||||
BUG();
|
||||
}
|
||||
|
||||
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
|
||||
|
|
|
@ -229,13 +229,10 @@ static struct block_device_operations viodasd_fops = {
|
|||
/*
|
||||
* End a request
|
||||
*/
|
||||
static void viodasd_end_request(struct request *req, int uptodate,
|
||||
static void viodasd_end_request(struct request *req, int error,
|
||||
int num_sectors)
|
||||
{
|
||||
if (end_that_request_first(req, uptodate, num_sectors))
|
||||
return;
|
||||
add_disk_randomness(req->rq_disk);
|
||||
end_that_request_last(req, uptodate);
|
||||
__blk_end_request(req, error, num_sectors << 9);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -374,12 +371,12 @@ static void do_viodasd_request(struct request_queue *q)
|
|||
blkdev_dequeue_request(req);
|
||||
/* check that request contains a valid command */
|
||||
if (!blk_fs_request(req)) {
|
||||
viodasd_end_request(req, 0, req->hard_nr_sectors);
|
||||
viodasd_end_request(req, -EIO, req->hard_nr_sectors);
|
||||
continue;
|
||||
}
|
||||
/* Try sending the request */
|
||||
if (send_request(req) != 0)
|
||||
viodasd_end_request(req, 0, req->hard_nr_sectors);
|
||||
viodasd_end_request(req, -EIO, req->hard_nr_sectors);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -591,7 +588,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
|
|||
num_req_outstanding--;
|
||||
spin_unlock_irqrestore(&viodasd_spinlock, irq_flags);
|
||||
|
||||
error = event->xRc != HvLpEvent_Rc_Good;
|
||||
error = (event->xRc == HvLpEvent_Rc_Good) ? 0 : -EIO;
|
||||
if (error) {
|
||||
const struct vio_error_entry *err;
|
||||
err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
|
||||
|
@ -601,7 +598,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
|
|||
}
|
||||
qlock = req->q->queue_lock;
|
||||
spin_lock_irqsave(qlock, irq_flags);
|
||||
viodasd_end_request(req, !error, num_sect);
|
||||
viodasd_end_request(req, error, num_sect);
|
||||
spin_unlock_irqrestore(qlock, irq_flags);
|
||||
|
||||
/* Finally, try to get more requests off of this device's queue */
|
||||
|
|
|
@ -452,7 +452,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
|||
RING_IDX i, rp;
|
||||
unsigned long flags;
|
||||
struct blkfront_info *info = (struct blkfront_info *)dev_id;
|
||||
int uptodate;
|
||||
int error;
|
||||
|
||||
spin_lock_irqsave(&blkif_io_lock, flags);
|
||||
|
||||
|
@ -477,13 +477,13 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
|||
|
||||
add_id_to_freelist(info, id);
|
||||
|
||||
uptodate = (bret->status == BLKIF_RSP_OKAY);
|
||||
error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
|
||||
switch (bret->operation) {
|
||||
case BLKIF_OP_WRITE_BARRIER:
|
||||
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
|
||||
printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
|
||||
info->gd->disk_name);
|
||||
uptodate = -EOPNOTSUPP;
|
||||
error = -EOPNOTSUPP;
|
||||
info->feature_barrier = 0;
|
||||
xlvbd_barrier(info);
|
||||
}
|
||||
|
@ -494,10 +494,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
|||
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
|
||||
"request: %x\n", bret->status);
|
||||
|
||||
ret = end_that_request_first(req, uptodate,
|
||||
req->hard_nr_sectors);
|
||||
ret = __blk_end_request(req, error, blk_rq_bytes(req));
|
||||
BUG_ON(ret);
|
||||
end_that_request_last(req, uptodate);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
|
|
@ -703,7 +703,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
|
|||
|
||||
/* bio finished; is there another one? */
|
||||
i = ace->req->current_nr_sectors;
|
||||
if (end_that_request_first(ace->req, 1, i)) {
|
||||
if (__blk_end_request(ace->req, 0, i)) {
|
||||
/* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
|
||||
* ace->req->hard_nr_sectors,
|
||||
* ace->req->current_nr_sectors);
|
||||
|
@ -718,9 +718,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
|
|||
break;
|
||||
|
||||
case ACE_FSM_STATE_REQ_COMPLETE:
|
||||
/* Complete the block request */
|
||||
blkdev_dequeue_request(ace->req);
|
||||
end_that_request_last(ace->req, 1);
|
||||
ace->req = NULL;
|
||||
|
||||
/* Finished request; go to idle state */
|
||||
|
|
|
@ -289,7 +289,7 @@ static int send_request(struct request *req)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void viocd_end_request(struct request *req, int uptodate)
|
||||
static void viocd_end_request(struct request *req, int error)
|
||||
{
|
||||
int nsectors = req->hard_nr_sectors;
|
||||
|
||||
|
@ -302,11 +302,8 @@ static void viocd_end_request(struct request *req, int uptodate)
|
|||
if (!nsectors)
|
||||
nsectors = 1;
|
||||
|
||||
if (end_that_request_first(req, uptodate, nsectors))
|
||||
if (__blk_end_request(req, error, nsectors << 9))
|
||||
BUG();
|
||||
add_disk_randomness(req->rq_disk);
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req, uptodate);
|
||||
}
|
||||
|
||||
static int rwreq;
|
||||
|
@ -317,11 +314,11 @@ static void do_viocd_request(struct request_queue *q)
|
|||
|
||||
while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
|
||||
if (!blk_fs_request(req))
|
||||
viocd_end_request(req, 0);
|
||||
viocd_end_request(req, -EIO);
|
||||
else if (send_request(req) < 0) {
|
||||
printk(VIOCD_KERN_WARNING
|
||||
"unable to send message to OS/400!");
|
||||
viocd_end_request(req, 0);
|
||||
viocd_end_request(req, -EIO);
|
||||
} else
|
||||
rwreq++;
|
||||
}
|
||||
|
@ -532,9 +529,9 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
|
|||
"with rc %d:0x%04X: %s\n",
|
||||
req, event->xRc,
|
||||
bevent->sub_result, err->msg);
|
||||
viocd_end_request(req, 0);
|
||||
viocd_end_request(req, -EIO);
|
||||
} else
|
||||
viocd_end_request(req, 1);
|
||||
viocd_end_request(req, 0);
|
||||
|
||||
/* restart handling of incoming requests */
|
||||
spin_unlock_irqrestore(&viocd_reqlock, flags);
|
||||
|
|
|
@ -655,9 +655,9 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
|
|||
BUG();
|
||||
} else {
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
end_that_request_chunk(failed, 0,
|
||||
failed->data_len);
|
||||
end_that_request_last(failed, 0);
|
||||
if (__blk_end_request(failed, -EIO,
|
||||
failed->data_len))
|
||||
BUG();
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
} else
|
||||
|
@ -1647,6 +1647,17 @@ static int cdrom_write_check_ireason(ide_drive_t *drive, int len, int ireason)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from blk_end_request_callback() after the data of the request
|
||||
* is completed and before the request is completed.
|
||||
* By returning value '1', blk_end_request_callback() returns immediately
|
||||
* without completing the request.
|
||||
*/
|
||||
static int cdrom_newpc_intr_dummy_cb(struct request *rq)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
typedef void (xfer_func_t)(ide_drive_t *, void *, u32);
|
||||
|
||||
/*
|
||||
|
@ -1685,9 +1696,13 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
return ide_error(drive, "dma error", stat);
|
||||
}
|
||||
|
||||
end_that_request_chunk(rq, 1, rq->data_len);
|
||||
rq->data_len = 0;
|
||||
goto end_request;
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
if (__blk_end_request(rq, 0, rq->data_len))
|
||||
BUG();
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
|
||||
return ide_stopped;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1705,8 +1720,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
/*
|
||||
* If DRQ is clear, the command has completed.
|
||||
*/
|
||||
if ((stat & DRQ_STAT) == 0)
|
||||
goto end_request;
|
||||
if ((stat & DRQ_STAT) == 0) {
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
if (__blk_end_request(rq, 0, 0))
|
||||
BUG();
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
|
||||
return ide_stopped;
|
||||
}
|
||||
|
||||
/*
|
||||
* check which way to transfer data
|
||||
|
@ -1759,7 +1781,14 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
rq->data_len -= blen;
|
||||
|
||||
if (rq->bio)
|
||||
end_that_request_chunk(rq, 1, blen);
|
||||
/*
|
||||
* The request can't be completed until DRQ is cleared.
|
||||
* So complete the data, but don't complete the request
|
||||
* using the dummy function for the callback feature
|
||||
* of blk_end_request_callback().
|
||||
*/
|
||||
blk_end_request_callback(rq, 0, blen,
|
||||
cdrom_newpc_intr_dummy_cb);
|
||||
else
|
||||
rq->data += blen;
|
||||
}
|
||||
|
@ -1780,14 +1809,6 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
|
|||
|
||||
ide_set_handler(drive, cdrom_newpc_intr, rq->timeout, NULL);
|
||||
return ide_started;
|
||||
|
||||
end_request:
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
blkdev_dequeue_request(rq);
|
||||
end_that_request_last(rq, 1);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
return ide_stopped;
|
||||
}
|
||||
|
||||
static ide_startstop_t cdrom_write_intr(ide_drive_t *drive)
|
||||
|
|
|
@ -58,15 +58,19 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
|
|||
int uptodate, unsigned int nr_bytes, int dequeue)
|
||||
{
|
||||
int ret = 1;
|
||||
int error = 0;
|
||||
|
||||
if (uptodate <= 0)
|
||||
error = uptodate ? uptodate : -EIO;
|
||||
|
||||
/*
|
||||
* if failfast is set on a request, override number of sectors and
|
||||
* complete the whole request right now
|
||||
*/
|
||||
if (blk_noretry_request(rq) && end_io_error(uptodate))
|
||||
if (blk_noretry_request(rq) && error)
|
||||
nr_bytes = rq->hard_nr_sectors << 9;
|
||||
|
||||
if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
|
||||
if (!blk_fs_request(rq) && error && !rq->errors)
|
||||
rq->errors = -EIO;
|
||||
|
||||
/*
|
||||
|
@ -78,14 +82,9 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
|
|||
ide_dma_on(drive);
|
||||
}
|
||||
|
||||
if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
|
||||
add_disk_randomness(rq->rq_disk);
|
||||
if (dequeue) {
|
||||
if (!list_empty(&rq->queuelist))
|
||||
blkdev_dequeue_request(rq);
|
||||
if (!__blk_end_request(rq, error, nr_bytes)) {
|
||||
if (dequeue)
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
}
|
||||
end_that_request_last(rq, uptodate);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
|
@ -290,9 +289,9 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
|
|||
drive->blocked = 0;
|
||||
blk_start_queue(drive->queue);
|
||||
}
|
||||
blkdev_dequeue_request(rq);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
end_that_request_last(rq, 1);
|
||||
if (__blk_end_request(rq, 0, 0))
|
||||
BUG();
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
|
||||
|
@ -387,10 +386,10 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
|
|||
}
|
||||
|
||||
spin_lock_irqsave(&ide_lock, flags);
|
||||
blkdev_dequeue_request(rq);
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
rq->errors = err;
|
||||
end_that_request_last(rq, !rq->errors);
|
||||
if (__blk_end_request(rq, (rq->errors ? -EIO : 0), 0))
|
||||
BUG();
|
||||
spin_unlock_irqrestore(&ide_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -412,13 +412,13 @@ static void i2o_block_delayed_request_fn(struct work_struct *work)
|
|||
/**
|
||||
* i2o_block_end_request - Post-processing of completed commands
|
||||
* @req: request which should be completed
|
||||
* @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
|
||||
* @error: 0 for success, < 0 for error
|
||||
* @nr_bytes: number of bytes to complete
|
||||
*
|
||||
* Mark the request as complete. The lock must not be held when entering.
|
||||
*
|
||||
*/
|
||||
static void i2o_block_end_request(struct request *req, int uptodate,
|
||||
static void i2o_block_end_request(struct request *req, int error,
|
||||
int nr_bytes)
|
||||
{
|
||||
struct i2o_block_request *ireq = req->special;
|
||||
|
@ -426,22 +426,18 @@ static void i2o_block_end_request(struct request *req, int uptodate,
|
|||
struct request_queue *q = req->q;
|
||||
unsigned long flags;
|
||||
|
||||
if (end_that_request_chunk(req, uptodate, nr_bytes)) {
|
||||
if (blk_end_request(req, error, nr_bytes)) {
|
||||
int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
|
||||
|
||||
if (blk_pc_request(req))
|
||||
leftover = req->data_len;
|
||||
|
||||
if (end_io_error(uptodate))
|
||||
end_that_request_chunk(req, 0, leftover);
|
||||
if (error)
|
||||
blk_end_request(req, -EIO, leftover);
|
||||
}
|
||||
|
||||
add_disk_randomness(req->rq_disk);
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
|
||||
end_that_request_last(req, uptodate);
|
||||
|
||||
if (likely(dev)) {
|
||||
dev->open_queue_depth--;
|
||||
list_del(&ireq->queue);
|
||||
|
@ -468,7 +464,7 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
|
|||
struct i2o_message *msg)
|
||||
{
|
||||
struct request *req;
|
||||
int uptodate = 1;
|
||||
int error = 0;
|
||||
|
||||
req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
|
||||
if (unlikely(!req)) {
|
||||
|
@ -501,10 +497,10 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
|
|||
|
||||
req->errors++;
|
||||
|
||||
uptodate = 0;
|
||||
error = -EIO;
|
||||
}
|
||||
|
||||
i2o_block_end_request(req, uptodate, le32_to_cpu(msg->body[1]));
|
||||
i2o_block_end_request(req, error, le32_to_cpu(msg->body[1]));
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
|
|
@ -348,15 +348,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
* A block was successfully transferred.
|
||||
*/
|
||||
spin_lock_irq(&md->lock);
|
||||
ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
|
||||
if (!ret) {
|
||||
/*
|
||||
* The whole request completed successfully.
|
||||
*/
|
||||
add_disk_randomness(req->rq_disk);
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req, 1);
|
||||
}
|
||||
ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
|
||||
spin_unlock_irq(&md->lock);
|
||||
} while (ret);
|
||||
|
||||
|
@ -386,27 +378,21 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
else
|
||||
bytes = blocks << 9;
|
||||
spin_lock_irq(&md->lock);
|
||||
ret = end_that_request_chunk(req, 1, bytes);
|
||||
ret = __blk_end_request(req, 0, bytes);
|
||||
spin_unlock_irq(&md->lock);
|
||||
}
|
||||
} else if (rq_data_dir(req) != READ &&
|
||||
(card->host->caps & MMC_CAP_MULTIWRITE)) {
|
||||
spin_lock_irq(&md->lock);
|
||||
ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
|
||||
ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
|
||||
spin_unlock_irq(&md->lock);
|
||||
}
|
||||
|
||||
mmc_release_host(card->host);
|
||||
|
||||
spin_lock_irq(&md->lock);
|
||||
while (ret) {
|
||||
ret = end_that_request_chunk(req, 0,
|
||||
req->current_nr_sectors << 9);
|
||||
}
|
||||
|
||||
add_disk_randomness(req->rq_disk);
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req, 0);
|
||||
while (ret)
|
||||
ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
|
||||
spin_unlock_irq(&md->lock);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -94,8 +94,8 @@ static void mmc_request(struct request_queue *q)
|
|||
printk(KERN_ERR "MMC: killing requests for dead queue\n");
|
||||
while ((req = elv_next_request(q)) != NULL) {
|
||||
do {
|
||||
ret = end_that_request_chunk(req, 0,
|
||||
req->current_nr_sectors << 9);
|
||||
ret = __blk_end_request(req, -EIO,
|
||||
blk_rq_cur_bytes(req));
|
||||
} while (ret);
|
||||
}
|
||||
return;
|
||||
|
|
|
@ -1595,12 +1595,10 @@ void dasd_block_clear_timer(struct dasd_block *block)
|
|||
/*
|
||||
* posts the buffer_cache about a finalized request
|
||||
*/
|
||||
static inline void dasd_end_request(struct request *req, int uptodate)
|
||||
static inline void dasd_end_request(struct request *req, int error)
|
||||
{
|
||||
if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
|
||||
if (__blk_end_request(req, error, blk_rq_bytes(req)))
|
||||
BUG();
|
||||
add_disk_randomness(req->rq_disk);
|
||||
end_that_request_last(req, uptodate);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1657,7 +1655,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
|
|||
"Rejecting write request %p",
|
||||
req);
|
||||
blkdev_dequeue_request(req);
|
||||
dasd_end_request(req, 0);
|
||||
dasd_end_request(req, -EIO);
|
||||
continue;
|
||||
}
|
||||
cqr = basedev->discipline->build_cp(basedev, block, req);
|
||||
|
@ -1686,7 +1684,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
|
|||
"on request %p",
|
||||
PTR_ERR(cqr), req);
|
||||
blkdev_dequeue_request(req);
|
||||
dasd_end_request(req, 0);
|
||||
dasd_end_request(req, -EIO);
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
|
@ -1705,11 +1703,14 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
|
|||
{
|
||||
struct request *req;
|
||||
int status;
|
||||
int error = 0;
|
||||
|
||||
req = (struct request *) cqr->callback_data;
|
||||
dasd_profile_end(cqr->block, cqr, req);
|
||||
status = cqr->memdev->discipline->free_cp(cqr, req);
|
||||
dasd_end_request(req, status);
|
||||
if (status <= 0)
|
||||
error = status ? status : -EIO;
|
||||
dasd_end_request(req, error);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2009,7 +2010,7 @@ static void dasd_flush_request_queue(struct dasd_block *block)
|
|||
spin_lock_irq(&block->request_queue_lock);
|
||||
while ((req = elv_next_request(block->request_queue))) {
|
||||
blkdev_dequeue_request(req);
|
||||
dasd_end_request(req, 0);
|
||||
dasd_end_request(req, -EIO);
|
||||
}
|
||||
spin_unlock_irq(&block->request_queue_lock);
|
||||
}
|
||||
|
|
|
@ -74,11 +74,10 @@ tapeblock_trigger_requeue(struct tape_device *device)
|
|||
* Post finished request.
|
||||
*/
|
||||
static void
|
||||
tapeblock_end_request(struct request *req, int uptodate)
|
||||
tapeblock_end_request(struct request *req, int error)
|
||||
{
|
||||
if (end_that_request_first(req, uptodate, req->hard_nr_sectors))
|
||||
if (__blk_end_request(req, error, blk_rq_bytes(req)))
|
||||
BUG();
|
||||
end_that_request_last(req, uptodate);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -91,7 +90,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
|
|||
|
||||
device = ccw_req->device;
|
||||
req = (struct request *) data;
|
||||
tapeblock_end_request(req, ccw_req->rc == 0);
|
||||
tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO);
|
||||
if (ccw_req->rc == 0)
|
||||
/* Update position. */
|
||||
device->blk_data.block_position =
|
||||
|
@ -119,7 +118,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
|
|||
ccw_req = device->discipline->bread(device, req);
|
||||
if (IS_ERR(ccw_req)) {
|
||||
DBF_EVENT(1, "TBLOCK: bread failed\n");
|
||||
tapeblock_end_request(req, 0);
|
||||
tapeblock_end_request(req, -EIO);
|
||||
return PTR_ERR(ccw_req);
|
||||
}
|
||||
ccw_req->callback = __tapeblock_end_request;
|
||||
|
@ -132,7 +131,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
|
|||
* Start/enqueueing failed. No retries in
|
||||
* this case.
|
||||
*/
|
||||
tapeblock_end_request(req, 0);
|
||||
tapeblock_end_request(req, -EIO);
|
||||
device->discipline->free_bread(ccw_req);
|
||||
}
|
||||
|
||||
|
@ -177,7 +176,7 @@ tapeblock_requeue(struct work_struct *work) {
|
|||
if (rq_data_dir(req) == WRITE) {
|
||||
DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
|
||||
blkdev_dequeue_request(req);
|
||||
tapeblock_end_request(req, 0);
|
||||
tapeblock_end_request(req, -EIO);
|
||||
continue;
|
||||
}
|
||||
spin_unlock_irq(&device->blk_data.request_queue_lock);
|
||||
|
|
|
@ -919,8 +919,8 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
|
|||
}
|
||||
|
||||
/* kill current request */
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req, 0);
|
||||
if (__blk_end_request(req, -EIO, 0))
|
||||
BUG();
|
||||
if (blk_sense_request(req))
|
||||
kfree(scsi->pc->buffer);
|
||||
kfree(scsi->pc);
|
||||
|
@ -929,8 +929,8 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
|
|||
|
||||
/* now nuke the drive queue */
|
||||
while ((req = elv_next_request(drive->queue))) {
|
||||
blkdev_dequeue_request(req);
|
||||
end_that_request_last(req, 0);
|
||||
if (__blk_end_request(req, -EIO, 0))
|
||||
BUG();
|
||||
}
|
||||
|
||||
HWGROUP(drive)->rq = NULL;
|
||||
|
|
|
@ -634,7 +634,7 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
|
|||
* of upper level post-processing and scsi_io_completion).
|
||||
*
|
||||
* Arguments: cmd - command that is complete.
|
||||
* uptodate - 1 if I/O indicates success, <= 0 for I/O error.
|
||||
* error - 0 if I/O indicates success, < 0 for I/O error.
|
||||
* bytes - number of bytes of completed I/O
|
||||
* requeue - indicates whether we should requeue leftovers.
|
||||
*
|
||||
|
@ -649,26 +649,25 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
|
|||
* at some point during this call.
|
||||
* Notes: If cmd was requeued, upon return it will be a stale pointer.
|
||||
*/
|
||||
static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
|
||||
static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
|
||||
int bytes, int requeue)
|
||||
{
|
||||
struct request_queue *q = cmd->device->request_queue;
|
||||
struct request *req = cmd->request;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* If there are blocks left over at the end, set up the command
|
||||
* to queue the remainder of them.
|
||||
*/
|
||||
if (end_that_request_chunk(req, uptodate, bytes)) {
|
||||
if (blk_end_request(req, error, bytes)) {
|
||||
int leftover = (req->hard_nr_sectors << 9);
|
||||
|
||||
if (blk_pc_request(req))
|
||||
leftover = req->data_len;
|
||||
|
||||
/* kill remainder if no retrys */
|
||||
if (!uptodate && blk_noretry_request(req))
|
||||
end_that_request_chunk(req, 0, leftover);
|
||||
if (error && blk_noretry_request(req))
|
||||
blk_end_request(req, error, leftover);
|
||||
else {
|
||||
if (requeue) {
|
||||
/*
|
||||
|
@ -683,14 +682,6 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
|
|||
}
|
||||
}
|
||||
|
||||
add_disk_randomness(req->rq_disk);
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (blk_rq_tagged(req))
|
||||
blk_queue_end_tag(q, req);
|
||||
end_that_request_last(req, uptodate);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
/*
|
||||
* This will goose the queue request function at the end, so we don't
|
||||
* need to worry about launching another command.
|
||||
|
@ -892,7 +883,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|||
* are leftovers and there is some kind of error
|
||||
* (result != 0), retry the rest.
|
||||
*/
|
||||
if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
|
||||
if (scsi_end_request(cmd, 0, good_bytes, result == 0) == NULL)
|
||||
return;
|
||||
|
||||
/* good_bytes = 0, or (inclusive) there were leftovers and
|
||||
|
@ -906,7 +897,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|||
* and quietly refuse further access.
|
||||
*/
|
||||
cmd->device->changed = 1;
|
||||
scsi_end_request(cmd, 0, this_count, 1);
|
||||
scsi_end_request(cmd, -EIO, this_count, 1);
|
||||
return;
|
||||
} else {
|
||||
/* Must have been a power glitch, or a
|
||||
|
@ -938,7 +929,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|||
scsi_requeue_command(q, cmd);
|
||||
return;
|
||||
} else {
|
||||
scsi_end_request(cmd, 0, this_count, 1);
|
||||
scsi_end_request(cmd, -EIO, this_count, 1);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
@ -966,7 +957,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|||
"Device not ready",
|
||||
&sshdr);
|
||||
|
||||
scsi_end_request(cmd, 0, this_count, 1);
|
||||
scsi_end_request(cmd, -EIO, this_count, 1);
|
||||
return;
|
||||
case VOLUME_OVERFLOW:
|
||||
if (!(req->cmd_flags & REQ_QUIET)) {
|
||||
|
@ -976,7 +967,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|||
scsi_print_sense("", cmd);
|
||||
}
|
||||
/* See SSC3rXX or current. */
|
||||
scsi_end_request(cmd, 0, this_count, 1);
|
||||
scsi_end_request(cmd, -EIO, this_count, 1);
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
|
@ -997,7 +988,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
|||
scsi_print_sense("", cmd);
|
||||
}
|
||||
}
|
||||
scsi_end_request(cmd, 0, this_count, !result);
|
||||
scsi_end_request(cmd, -EIO, this_count, !result);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -464,6 +464,8 @@ enum {
|
|||
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
|
||||
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
|
||||
#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
|
||||
/* rq->queuelist of dequeued request must be list_empty() */
|
||||
#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
|
||||
|
||||
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
|
||||
|
||||
|
@ -643,29 +645,32 @@ static inline void blk_run_address_space(struct address_space *mapping)
|
|||
}
|
||||
|
||||
/*
|
||||
* end_request() and friends. Must be called with the request queue spinlock
|
||||
* acquired. All functions called within end_request() _must_be_ atomic.
|
||||
* blk_end_request() and friends.
|
||||
* __blk_end_request() and end_request() must be called with
|
||||
* the request queue spinlock acquired.
|
||||
*
|
||||
* Several drivers define their own end_request and call
|
||||
* end_that_request_first() and end_that_request_last()
|
||||
* for parts of the original function. This prevents
|
||||
* code duplication in drivers.
|
||||
* blk_end_request() for parts of the original function.
|
||||
* This prevents code duplication in drivers.
|
||||
*/
|
||||
extern int end_that_request_first(struct request *, int, int);
|
||||
extern int end_that_request_chunk(struct request *, int, int);
|
||||
extern void end_that_request_last(struct request *, int);
|
||||
extern int blk_end_request(struct request *rq, int error, int nr_bytes);
|
||||
extern int __blk_end_request(struct request *rq, int error, int nr_bytes);
|
||||
extern int blk_end_bidi_request(struct request *rq, int error, int nr_bytes,
|
||||
int bidi_bytes);
|
||||
extern void end_request(struct request *, int);
|
||||
extern void end_queued_request(struct request *, int);
|
||||
extern void end_dequeued_request(struct request *, int);
|
||||
extern int blk_end_request_callback(struct request *rq, int error, int nr_bytes,
|
||||
int (drv_callback)(struct request *));
|
||||
extern void blk_complete_request(struct request *);
|
||||
|
||||
/*
|
||||
* end_that_request_first/chunk() takes an uptodate argument. we account
|
||||
* any value <= as an io error. 0 means -EIO for compatability reasons,
|
||||
* any other < 0 value is the direct error type. An uptodate value of
|
||||
* 1 indicates successful io completion
|
||||
* blk_end_request() takes bytes instead of sectors as a complete size.
|
||||
* blk_rq_bytes() returns bytes left to complete in the entire request.
|
||||
* blk_rq_cur_bytes() returns bytes left to complete in the current segment.
|
||||
*/
|
||||
#define end_io_error(uptodate) (unlikely((uptodate) <= 0))
|
||||
extern unsigned int blk_rq_bytes(struct request *rq);
|
||||
extern unsigned int blk_rq_cur_bytes(struct request *rq);
|
||||
|
||||
static inline void blkdev_dequeue_request(struct request *req)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue