aboutsummaryrefslogtreecommitdiffstats
path: root/recipes/linux/linux-2.6.28/collie/0023-mmc_block-ensure-all-sectors-that-do-not-have-error.patch
diff options
context:
space:
mode:
Diffstat (limited to 'recipes/linux/linux-2.6.28/collie/0023-mmc_block-ensure-all-sectors-that-do-not-have-error.patch')
-rw-r--r--recipes/linux/linux-2.6.28/collie/0023-mmc_block-ensure-all-sectors-that-do-not-have-error.patch148
1 files changed, 148 insertions, 0 deletions
diff --git a/recipes/linux/linux-2.6.28/collie/0023-mmc_block-ensure-all-sectors-that-do-not-have-error.patch b/recipes/linux/linux-2.6.28/collie/0023-mmc_block-ensure-all-sectors-that-do-not-have-error.patch
new file mode 100644
index 0000000000..3ff32b0a94
--- /dev/null
+++ b/recipes/linux/linux-2.6.28/collie/0023-mmc_block-ensure-all-sectors-that-do-not-have-error.patch
@@ -0,0 +1,148 @@
+From fba35a4bb8f9cabcd374e19a2a34ee5496d971d2 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <ext-adrian.hunter@nokia.com>
+Date: Wed, 31 Dec 2008 18:21:17 +0100
+Subject: [PATCH 23/23] mmc_block: ensure all sectors that do not have errors are read
+
+If a card encounters an ECC error while reading a sector it will
+timeout. Instead of reporting the entire I/O request as having
+an error, redo the I/O one sector at a time so that all readable
+sectors are provided to the upper layers.
+
+Signed-off-by: Adrian Hunter <ext-adrian.hunter@nokia.com>
+Signed-off-by: Pierre Ossman <drzeus@drzeus.cx>
+---
+ drivers/mmc/card/block.c | 76 +++++++++++++++++++++++++++++++++++----------
+ 1 files changed, 59 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index cc9b3ab..45b1f43 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -229,7 +229,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_blk_request brq;
+- int ret = 1;
++ int ret = 1, disable_multi = 0;
+
+ mmc_claim_host(card->host);
+
+@@ -251,6 +251,14 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+ brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ brq.data.blocks = req->nr_sectors;
+
++ /*
++ * After a read error, we redo the request one sector at a time
++ * in order to accurately determine which sectors can be read
++ * successfully.
++ */
++ if (disable_multi && brq.data.blocks > 1)
++ brq.data.blocks = 1;
++
+ if (brq.data.blocks > 1) {
+ /* SPI multiblock writes terminate using a special
+ * token, not a STOP_TRANSMISSION request.
+@@ -279,6 +287,25 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+ brq.data.sg = mq->sg;
+ brq.data.sg_len = mmc_queue_map_sg(mq);
+
++ /*
++ * Adjust the sg list so it is the same size as the
++ * request.
++ */
++ if (brq.data.blocks != req->nr_sectors) {
++ int i, data_size = brq.data.blocks << 9;
++ struct scatterlist *sg;
++
++ for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
++ data_size -= sg->length;
++ if (data_size <= 0) {
++ sg->length += data_size;
++ i++;
++ break;
++ }
++ }
++ brq.data.sg_len = i;
++ }
++
+ mmc_queue_bounce_pre(mq);
+
+ mmc_wait_for_req(card->host, &brq.mrq);
+@@ -290,8 +317,16 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+ * until later as we need to wait for the card to leave
+ * programming mode even when things go wrong.
+ */
+- if (brq.cmd.error || brq.data.error || brq.stop.error)
++ if (brq.cmd.error || brq.data.error || brq.stop.error) {
++ if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
++ /* Redo read one sector at a time */
++ printk(KERN_WARNING "%s: retrying using single "
++ "block read\n", req->rq_disk->disk_name);
++ disable_multi = 1;
++ continue;
++ }
+ status = get_card_status(card, req);
++ }
+
+ if (brq.cmd.error) {
+ printk(KERN_ERR "%s: error %d sending read/write "
+@@ -348,8 +383,20 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+ #endif
+ }
+
+- if (brq.cmd.error || brq.data.error || brq.stop.error)
++ if (brq.cmd.error || brq.stop.error || brq.data.error) {
++ if (rq_data_dir(req) == READ) {
++ /*
++ * After an error, we redo I/O one sector at a
++ * time, so we only reach here after trying to
++ * read a single sector.
++ */
++ spin_lock_irq(&md->lock);
++ ret = __blk_end_request(req, -EIO, brq.data.blksz);
++ spin_unlock_irq(&md->lock);
++ continue;
++ }
+ goto cmd_err;
++ }
+
+ /*
+ * A block was successfully transferred.
+@@ -371,25 +418,20 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+ * If the card is not SD, we can still ok written sectors
+ * as reported by the controller (which might be less than
+ * the real number of written sectors, but never more).
+- *
+- * For reads we just fail the entire chunk as that should
+- * be safe in all cases.
+ */
+- if (rq_data_dir(req) != READ) {
+- if (mmc_card_sd(card)) {
+- u32 blocks;
++ if (mmc_card_sd(card)) {
++ u32 blocks;
+
+- blocks = mmc_sd_num_wr_blocks(card);
+- if (blocks != (u32)-1) {
+- spin_lock_irq(&md->lock);
+- ret = __blk_end_request(req, 0, blocks << 9);
+- spin_unlock_irq(&md->lock);
+- }
+- } else {
++ blocks = mmc_sd_num_wr_blocks(card);
++ if (blocks != (u32)-1) {
+ spin_lock_irq(&md->lock);
+- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
++ ret = __blk_end_request(req, 0, blocks << 9);
+ spin_unlock_irq(&md->lock);
+ }
++ } else {
++ spin_lock_irq(&md->lock);
++ ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
++ spin_unlock_irq(&md->lock);
+ }
+
+ mmc_release_host(card->host);
+--
+1.5.6.5
+