mmc_block: ensure all sectors that do not have errors are read
If a card encounters an ECC error while reading a sector it will timeout. Instead of reporting the entire I/O request as having an error, redo the I/O one sector at a time so that all readable sectors are provided to the upper layers. Signed-off-by: Adrian Hunter <ext-adrian.hunter@nokia.com> Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
This commit is contained in:
parent
a0d045cac9
commit
6a79e391df
1 changed files with 59 additions and 17 deletions
|
@ -229,7 +229,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
struct mmc_blk_data *md = mq->data;
|
||||
struct mmc_card *card = md->queue.card;
|
||||
struct mmc_blk_request brq;
|
||||
int ret = 1;
|
||||
int ret = 1, disable_multi = 0;
|
||||
|
||||
mmc_claim_host(card->host);
|
||||
|
||||
|
@ -251,6 +251,14 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
|
||||
brq.data.blocks = req->nr_sectors;
|
||||
|
||||
/*
|
||||
* After a read error, we redo the request one sector at a time
|
||||
* in order to accurately determine which sectors can be read
|
||||
* successfully.
|
||||
*/
|
||||
if (disable_multi && brq.data.blocks > 1)
|
||||
brq.data.blocks = 1;
|
||||
|
||||
if (brq.data.blocks > 1) {
|
||||
/* SPI multiblock writes terminate using a special
|
||||
* token, not a STOP_TRANSMISSION request.
|
||||
|
@ -279,6 +287,25 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
brq.data.sg = mq->sg;
|
||||
brq.data.sg_len = mmc_queue_map_sg(mq);
|
||||
|
||||
/*
|
||||
* Adjust the sg list so it is the same size as the
|
||||
* request.
|
||||
*/
|
||||
if (brq.data.blocks != req->nr_sectors) {
|
||||
int i, data_size = brq.data.blocks << 9;
|
||||
struct scatterlist *sg;
|
||||
|
||||
for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
|
||||
data_size -= sg->length;
|
||||
if (data_size <= 0) {
|
||||
sg->length += data_size;
|
||||
i++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
brq.data.sg_len = i;
|
||||
}
|
||||
|
||||
mmc_queue_bounce_pre(mq);
|
||||
|
||||
mmc_wait_for_req(card->host, &brq.mrq);
|
||||
|
@ -290,8 +317,16 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
* until later as we need to wait for the card to leave
|
||||
* programming mode even when things go wrong.
|
||||
*/
|
||||
if (brq.cmd.error || brq.data.error || brq.stop.error)
|
||||
if (brq.cmd.error || brq.data.error || brq.stop.error) {
|
||||
if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
|
||||
/* Redo read one sector at a time */
|
||||
printk(KERN_WARNING "%s: retrying using single "
|
||||
"block read\n", req->rq_disk->disk_name);
|
||||
disable_multi = 1;
|
||||
continue;
|
||||
}
|
||||
status = get_card_status(card, req);
|
||||
}
|
||||
|
||||
if (brq.cmd.error) {
|
||||
printk(KERN_ERR "%s: error %d sending read/write "
|
||||
|
@ -348,8 +383,20 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
#endif
|
||||
}
|
||||
|
||||
if (brq.cmd.error || brq.data.error || brq.stop.error)
|
||||
if (brq.cmd.error || brq.stop.error || brq.data.error) {
|
||||
if (rq_data_dir(req) == READ) {
|
||||
/*
|
||||
* After an error, we redo I/O one sector at a
|
||||
* time, so we only reach here after trying to
|
||||
* read a single sector.
|
||||
*/
|
||||
spin_lock_irq(&md->lock);
|
||||
ret = __blk_end_request(req, -EIO, brq.data.blksz);
|
||||
spin_unlock_irq(&md->lock);
|
||||
continue;
|
||||
}
|
||||
goto cmd_err;
|
||||
}
|
||||
|
||||
/*
|
||||
* A block was successfully transferred.
|
||||
|
@ -371,25 +418,20 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
|||
* If the card is not SD, we can still ok written sectors
|
||||
* as reported by the controller (which might be less than
|
||||
* the real number of written sectors, but never more).
|
||||
*
|
||||
* For reads we just fail the entire chunk as that should
|
||||
* be safe in all cases.
|
||||
*/
|
||||
if (rq_data_dir(req) != READ) {
|
||||
if (mmc_card_sd(card)) {
|
||||
u32 blocks;
|
||||
if (mmc_card_sd(card)) {
|
||||
u32 blocks;
|
||||
|
||||
blocks = mmc_sd_num_wr_blocks(card);
|
||||
if (blocks != (u32)-1) {
|
||||
spin_lock_irq(&md->lock);
|
||||
ret = __blk_end_request(req, 0, blocks << 9);
|
||||
spin_unlock_irq(&md->lock);
|
||||
}
|
||||
} else {
|
||||
blocks = mmc_sd_num_wr_blocks(card);
|
||||
if (blocks != (u32)-1) {
|
||||
spin_lock_irq(&md->lock);
|
||||
ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
|
||||
ret = __blk_end_request(req, 0, blocks << 9);
|
||||
spin_unlock_irq(&md->lock);
|
||||
}
|
||||
} else {
|
||||
spin_lock_irq(&md->lock);
|
||||
ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
|
||||
spin_unlock_irq(&md->lock);
|
||||
}
|
||||
|
||||
mmc_release_host(card->host);
|
||||
|
|
Loading…
Reference in a new issue