aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Jansa <Martin.Jansa@gmail.com>2011-05-26 10:27:07 +0200
committerMartin Jansa <Martin.Jansa@gmail.com>2011-05-26 20:39:25 +0200
commit275c62bfeb9b6416e8e6e449c23d19ea8550db9c (patch)
tree9341be2dcbf1a0ea0020182c22ad572ba3b3b78d
parent24513855be5e8d8f2b23236fee5587bd73333072 (diff)
downloadopenembedded-275c62bfeb9b6416e8e6e449c23d19ea8550db9c.tar.gz
linux_2.6.39: revert MMC changes from openmoko.patch in shr.patch
Signed-off-by: Martin Jansa <Martin.Jansa@gmail.com>
-rw-r--r--recipes/linux/linux-2.6.39/om-gta02/defconfig1
-rw-r--r--recipes/linux/linux-2.6.39/shr.patch2008
-rw-r--r--recipes/linux/linux-openmoko_2.6.39.bb2
-rw-r--r--recipes/linux/linux_2.6.39.bb1
4 files changed, 2010 insertions, 2 deletions
diff --git a/recipes/linux/linux-2.6.39/om-gta02/defconfig b/recipes/linux/linux-2.6.39/om-gta02/defconfig
index 58d1ff3f02..94e3b4065c 100644
--- a/recipes/linux/linux-2.6.39/om-gta02/defconfig
+++ b/recipes/linux/linux-2.6.39/om-gta02/defconfig
@@ -392,7 +392,6 @@ CONFIG_USB_MASS_STORAGE=m
CONFIG_USB_G_SERIAL=m
CONFIG_USB_CDC_COMPOSITE=m
CONFIG_MMC=y
-CONFIG_MMC_DEBUG=y
CONFIG_MMC_UNSAFE_RESUME=y
CONFIG_MMC_S3C=y
CONFIG_MMC_GLAMO=y
diff --git a/recipes/linux/linux-2.6.39/shr.patch b/recipes/linux/linux-2.6.39/shr.patch
new file mode 100644
index 0000000000..0c0a9f7550
--- /dev/null
+++ b/recipes/linux/linux-2.6.39/shr.patch
@@ -0,0 +1,2008 @@
+All patches from shr kernel repository
+rebased on top of openmoko kernel repository
+
+https://gitorious.org/shr/linux/commits/shr-2.6.39-nodrm
+
+56885e2 glamo-mci: revert changes for Per's patchset
+6ab40bf Revert "mmc: add none blocking mmc request function"
+7937634 Revert "mmc: mmc_test: add debugfs file to list all tests"
+cf10e4d Revert "mmc: mmc_test: add test for none blocking transfers"
+29cd7b4 Revert "mmc: add member in mmc queue struct to hold request data"
+8f94eec Revert "mmc: add a block request prepare function"
+eae71b0 Revert "mmc: move error code in mmc_block_issue_rw_rq to a separate function."
+1062d7c Revert "mmc: add a second mmc queue request member"
+e0b2a74 Revert "mmc: add handling for two parallel block requests in issue_rw_rq"
+b59a013 Revert "mmc: test: add random fault injection in core.c"
+
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 4b530ae..61d233a 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -79,13 +79,6 @@ struct mmc_blk_data {
+
+ static DEFINE_MUTEX(open_lock);
+
+-enum mmc_blk_status {
+- MMC_BLK_SUCCESS = 0,
+- MMC_BLK_RETRY,
+- MMC_BLK_DATA_ERR,
+- MMC_BLK_CMD_ERR,
+-};
+-
+ module_param(perdev_minors, int, 0444);
+ MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
+
+@@ -172,6 +165,13 @@ static const struct block_device_operations mmc_bdops = {
+ .owner = THIS_MODULE,
+ };
+
++struct mmc_blk_request {
++ struct mmc_request mrq;
++ struct mmc_command cmd;
++ struct mmc_command stop;
++ struct mmc_data data;
++};
++
+ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
+ {
+ int err;
+@@ -331,341 +331,200 @@ out:
+ return err ? 0 : 1;
+ }
+
+-static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+- struct mmc_card *card,
+- int disable_multi,
+- struct mmc_queue *mq)
++static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+ {
+- u32 readcmd, writecmd;
+- struct mmc_blk_request *brq = &mqrq->brq;
+- struct request *req = mqrq->req;
+-
+- memset(brq, 0, sizeof(struct mmc_blk_request));
+-
+- brq->mrq.cmd = &brq->cmd;
+- brq->mrq.data = &brq->data;
+-
+- brq->cmd.arg = blk_rq_pos(req);
+- if (!mmc_card_blockaddr(card))
+- brq->cmd.arg <<= 9;
+- brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+- brq->data.blksz = 512;
+- brq->stop.opcode = MMC_STOP_TRANSMISSION;
+- brq->stop.arg = 0;
+- brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+- brq->data.blocks = blk_rq_sectors(req);
++ struct mmc_blk_data *md = mq->data;
++ struct mmc_card *card = md->queue.card;
++ struct mmc_blk_request brq;
++ int ret = 1, disable_multi = 0;
+
+- /*
+- * The block layer doesn't support all sector count
+- * restrictions, so we need to be prepared for too big
+- * requests.
+- */
+- if (brq->data.blocks > card->host->max_blk_count)
+- brq->data.blocks = card->host->max_blk_count;
++ mmc_claim_host(card->host);
+
+- /*
+- * After a read error, we redo the request one sector at a time
+- * in order to accurately determine which sectors can be read
+- * successfully.
+- */
+- if (disable_multi && brq->data.blocks > 1)
+- brq->data.blocks = 1;
++ do {
++ struct mmc_command cmd;
++ u32 readcmd, writecmd, status = 0;
++
++ memset(&brq, 0, sizeof(struct mmc_blk_request));
++ brq.mrq.cmd = &brq.cmd;
++ brq.mrq.data = &brq.data;
++
++ brq.cmd.arg = blk_rq_pos(req);
++ if (!mmc_card_blockaddr(card))
++ brq.cmd.arg <<= 9;
++ brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
++ brq.data.blksz = 512;
++ brq.stop.opcode = MMC_STOP_TRANSMISSION;
++ brq.stop.arg = 0;
++ brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
++ brq.data.blocks = blk_rq_sectors(req);
+
+- if (brq->data.blocks > 1) {
+- /* SPI multiblock writes terminate using a special
+- * token, not a STOP_TRANSMISSION request.
++ /*
++ * The block layer doesn't support all sector count
++ * restrictions, so we need to be prepared for too big
++ * requests.
+ */
+- if (!mmc_host_is_spi(card->host)
+- || rq_data_dir(req) == READ)
+- brq->mrq.stop = &brq->stop;
+- readcmd = MMC_READ_MULTIPLE_BLOCK;
+- writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+- } else {
+- brq->mrq.stop = NULL;
+- readcmd = MMC_READ_SINGLE_BLOCK;
+- writecmd = MMC_WRITE_BLOCK;
+- }
+- if (rq_data_dir(req) == READ) {
+- brq->cmd.opcode = readcmd;
+- brq->data.flags |= MMC_DATA_READ;
+- } else {
+- brq->cmd.opcode = writecmd;
+- brq->data.flags |= MMC_DATA_WRITE;
+- }
+-
+- mmc_set_data_timeout(&brq->data, card);
++ if (brq.data.blocks > card->host->max_blk_count)
++ brq.data.blocks = card->host->max_blk_count;
+
+- brq->data.sg = mqrq->sg;
+- brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
++ /*
++ * After a read error, we redo the request one sector at a time
++ * in order to accurately determine which sectors can be read
++ * successfully.
++ */
++ if (disable_multi && brq.data.blocks > 1)
++ brq.data.blocks = 1;
+
+- /*
+- * Adjust the sg list so it is the same size as the
+- * request.
+- */
+- if (brq->data.blocks != blk_rq_sectors(req)) {
+- int i, data_size = brq->data.blocks << 9;
+- struct scatterlist *sg;
+-
+- for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
+- data_size -= sg->length;
+- if (data_size <= 0) {
+- sg->length += data_size;
+- i++;
+- break;
+- }
++ if (brq.data.blocks > 1) {
++ /* SPI multiblock writes terminate using a special
++ * token, not a STOP_TRANSMISSION request.
++ */
++ if (!mmc_host_is_spi(card->host)
++ || rq_data_dir(req) == READ)
++ brq.mrq.stop = &brq.stop;
++ readcmd = MMC_READ_MULTIPLE_BLOCK;
++ writecmd = MMC_WRITE_MULTIPLE_BLOCK;
++ } else {
++ brq.mrq.stop = NULL;
++ readcmd = MMC_READ_SINGLE_BLOCK;
++ writecmd = MMC_WRITE_BLOCK;
+ }
+- brq->data.sg_len = i;
+- }
+-
+- mmc_queue_bounce_pre(mqrq);
+-}
+-
+-static enum mmc_blk_status mmc_blk_get_status(struct mmc_blk_request *brq,
+- struct request *req,
+- struct mmc_card *card,
+- struct mmc_blk_data *md)
+-{
+- struct mmc_command cmd;
+- u32 status;
+- enum mmc_blk_status ret = MMC_BLK_SUCCESS;
+-
+- /*
+- * Check for errors here, but don't jump to cmd_err
+- * until later as we need to wait for the card to leave
+- * programming mode even when things go wrong.
+- */
+- if (brq->cmd.error || brq->data.error || brq->stop.error) {
+- if (brq->data.blocks > 1 && rq_data_dir(req) == READ) {
+- /* Redo read one sector at a time */
+- printk(KERN_WARNING "%s: retrying using single "
+- "block read, brq %p\n",
+- req->rq_disk->disk_name, brq);
+- ret = MMC_BLK_RETRY;
+- goto out;
++ if (rq_data_dir(req) == READ) {
++ brq.cmd.opcode = readcmd;
++ brq.data.flags |= MMC_DATA_READ;
++ } else {
++ brq.cmd.opcode = writecmd;
++ brq.data.flags |= MMC_DATA_WRITE;
+ }
+- status = get_card_status(card, req);
+- }
+
+- if (brq->cmd.error) {
+- printk(KERN_ERR "%s: error %d sending read/write "
+- "command, response %#x, card status %#x\n",
+- req->rq_disk->disk_name, brq->cmd.error,
+- brq->cmd.resp[0], status);
+- }
+-
+- if (brq->data.error) {
+- if (brq->data.error == -ETIMEDOUT && brq->mrq.stop)
+- /* 'Stop' response contains card status */
+- status = brq->mrq.stop->resp[0];
+- printk(KERN_ERR "%s: error %d transferring data,"
+- " sector %u, nr %u, card status %#x\n",
+- req->rq_disk->disk_name, brq->data.error,
+- (unsigned)blk_rq_pos(req),
+- (unsigned)blk_rq_sectors(req), status);
+- }
++ mmc_set_data_timeout(&brq.data, card);
+
+- if (brq->stop.error) {
+- printk(KERN_ERR "%s: error %d sending stop command, "
+- "response %#x, card status %#x\n",
+- req->rq_disk->disk_name, brq->stop.error,
+- brq->stop.resp[0], status);
+- }
++ brq.data.sg = mq->sg;
++ brq.data.sg_len = mmc_queue_map_sg(mq);
+
+- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+- do {
+- int err;
+-
+- cmd.opcode = MMC_SEND_STATUS;
+- cmd.arg = card->rca << 16;
+- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+- err = mmc_wait_for_cmd(card->host, &cmd, 5);
+- if (err) {
+- printk(KERN_ERR "%s: error %d requesting status\n",
+- req->rq_disk->disk_name, err);
+- ret = MMC_BLK_CMD_ERR;
+- goto out;
++ /*
++ * Adjust the sg list so it is the same size as the
++ * request.
++ */
++ if (brq.data.blocks != blk_rq_sectors(req)) {
++ int i, data_size = brq.data.blocks << 9;
++ struct scatterlist *sg;
++
++ for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
++ data_size -= sg->length;
++ if (data_size <= 0) {
++ sg->length += data_size;
++ i++;
++ break;
++ }
+ }
+- /*
+- * Some cards mishandle the status bits,
+- * so make sure to check both the busy
+- * indication and the card state.
+- */
+- } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
+- (R1_CURRENT_STATE(cmd.resp[0]) == 7));
+-
+-#if 0
+- if (cmd.resp[0] & ~0x00000900)
+- printk(KERN_ERR "%s: status = %08x\n",
+- req->rq_disk->disk_name, cmd.resp[0]);
+- if (mmc_decode_status(cmd.resp)) {
+- ret = MMC_BLK_CMD_ERR;
+- goto out;
++ brq.data.sg_len = i;
+ }
+
+-#endif
+- }
++ mmc_queue_bounce_pre(mq);
+
+- if (brq->cmd.error || brq->stop.error || brq->data.error) {
+- if (rq_data_dir(req) == READ)
+- ret = MMC_BLK_DATA_ERR;
+- else
+- ret = MMC_BLK_CMD_ERR;
+- }
+- out:
+- return ret;
++ mmc_wait_for_req(card->host, &brq.mrq);
+
+-}
++ mmc_queue_bounce_post(mq);
+
+-static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+-{
+- struct mmc_blk_data *md = mq->data;
+- struct mmc_card *card = md->queue.card;
+- struct mmc_blk_request *brqc = &mq->mqrq_cur->brq;
+- struct mmc_blk_request *brqp = &mq->mqrq_prev->brq;
+- struct mmc_queue_req *mqrqp = mq->mqrq_prev;
+- struct request *rqp = mqrqp->req;
+- int ret = 0;
+- int disable_multi = 0;
+- enum mmc_blk_status status;
+-
+- if (!rqc && !rqp)
+- return 0;
+-
+- if (rqc) {
+- /* Claim host for the first request in a serie of requests */
+- if (!rqp)
+- mmc_claim_host(card->host);
+-
+- /* Prepare a new request */
+- mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+- mmc_pre_req(card->host, &brqc->mrq, !rqp);
+- }
+- do {
+ /*
+- * If there is an ongoing request, indicated by rqp, wait for
+- * it to finish before starting a new one.
++ * Check for errors here, but don't jump to cmd_err
++ * until later as we need to wait for the card to leave
++ * programming mode even when things go wrong.
+ */
+- if (rqp)
+- mmc_wait_for_req_done(&brqp->mrq);
+- else {
+- /* start a new asynchronous request */
+- mmc_start_req(card->host, &brqc->mrq);
+- goto out;
+- }
+- status = mmc_blk_get_status(brqp, rqp, card, md);
+- if (status != MMC_BLK_SUCCESS) {
+- mmc_post_req(card->host, &brqp->mrq, -EINVAL);
+- mmc_queue_bounce_post(mqrqp);
+- if (rqc)
+- mmc_post_req(card->host, &brqc->mrq, -EINVAL);
++ if (brq.cmd.error || brq.data.error || brq.stop.error) {
++ if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
++ /* Redo read one sector at a time */
++ printk(KERN_WARNING "%s: retrying using single "
++ "block read\n", req->rq_disk->disk_name);
++ disable_multi = 1;
++ continue;
++ }
++ status = get_card_status(card, req);
+ }
+
+- switch (status) {
+- case MMC_BLK_SUCCESS:
+- /*
+- * A block was successfully transferred.
+- */
+-
+- /*
+- * All data is transferred without errors.
+- * Defer mmc post processing and _blk_end_request
+- * until after the new request is started.
+- */
+- if (blk_rq_bytes(rqp) == brqp->data.bytes_xfered)
+- break;
+-
+- mmc_post_req(card->host, &brqp->mrq, 0);
+- mmc_queue_bounce_post(mqrqp);
+-
+- spin_lock_irq(&md->lock);
+- ret = __blk_end_request(rqp, 0,
+- brqp->data.bytes_xfered);
+- spin_unlock_irq(&md->lock);
+-
+- if (rqc)
+- mmc_post_req(card->host, &brqc->mrq, -EINVAL);
+- break;
+- case MMC_BLK_CMD_ERR:
+- goto cmd_err;
+- break;
+- case MMC_BLK_RETRY:
+- disable_multi = 1;
+- ret = 1;
+- break;
+- case MMC_BLK_DATA_ERR:
+- /*
+- * After an error, we redo I/O one sector at a
+- * time, so we only reach here after trying to
+- * read a single sector.
+- */
+- spin_lock_irq(&md->lock);
+- ret = __blk_end_request(rqp, -EIO, brqp->data.blksz);
+- spin_unlock_irq(&md->lock);
+- if (rqc && !ret)
+- mmc_pre_req(card->host, &brqc->mrq, false);
+- break;
++ if (brq.cmd.error) {
++ printk(KERN_ERR "%s: error %d sending read/write "
++ "command, response %#x, card status %#x\n",
++ req->rq_disk->disk_name, brq.cmd.error,
++ brq.cmd.resp[0], status);
+ }
+
+- if (ret) {
+- /*
+- * In case of a none complete request
+- * prepare it again and resend.
+- */
+- mmc_blk_rw_rq_prep(mqrqp, card, disable_multi, mq);
+- mmc_pre_req(card->host, &brqp->mrq, true);
+- mmc_start_req(card->host, &brqp->mrq);
+- if (rqc)
+- mmc_pre_req(card->host, &brqc->mrq, false);
++ if (brq.data.error) {
++ if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
++ /* 'Stop' response contains card status */
++ status = brq.mrq.stop->resp[0];
++ printk(KERN_ERR "%s: error %d transferring data,"
++ " sector %u, nr %u, card status %#x\n",
++ req->rq_disk->disk_name, brq.data.error,
++ (unsigned)blk_rq_pos(req),
++ (unsigned)blk_rq_sectors(req), status);
+ }
+- } while (ret);
+
+- /* Previous request is completed, start the new request if any */
+- if (rqc)
+- mmc_start_req(card->host, &brqc->mrq);
++ if (brq.stop.error) {
++ printk(KERN_ERR "%s: error %d sending stop command, "
++ "response %#x, card status %#x\n",
++ req->rq_disk->disk_name, brq.stop.error,
++ brq.stop.resp[0], status);
++ }
+
+- /*
+- * Post process the previous request while the new request is active.
+- * In case of error the reuqest is already ended.
+- */
+- if (status == MMC_BLK_SUCCESS) {
+- mmc_post_req(card->host, &brqp->mrq, 0);
+- mmc_queue_bounce_post(mqrqp);
++ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
++ do {
++ int err;
++
++ cmd.opcode = MMC_SEND_STATUS;
++ cmd.arg = card->rca << 16;
++ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
++ err = mmc_wait_for_cmd(card->host, &cmd, 5);
++ if (err) {
++ printk(KERN_ERR "%s: error %d requesting status\n",
++ req->rq_disk->disk_name, err);
++ goto cmd_err;
++ }
++ /*
++ * Some cards mishandle the status bits,
++ * so make sure to check both the busy
++ * indication and the card state.
++ */
++ } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
++ (R1_CURRENT_STATE(cmd.resp[0]) == 7));
+
+- spin_lock_irq(&md->lock);
+- ret = __blk_end_request(rqp, 0, brqp->data.bytes_xfered);
+- spin_unlock_irq(&md->lock);
++#if 0
++ if (cmd.resp[0] & ~0x00000900)
++ printk(KERN_ERR "%s: status = %08x\n",
++ req->rq_disk->disk_name, cmd.resp[0]);
++ if (mmc_decode_status(cmd.resp))
++ goto cmd_err;
++#endif
++ }
+
+- if (ret) {
+- /* If this happen it is a bug */
+- printk(KERN_ERR "[%s] BUG: rq_bytes %d xfered %d\n",
+- __func__, blk_rq_bytes(rqp),
+- brqp->data.bytes_xfered);
++ if (brq.cmd.error || brq.stop.error || brq.data.error) {
++ if (rq_data_dir(req) == READ) {
++ /*
++ * After an error, we redo I/O one sector at a
++ * time, so we only reach here after trying to
++ * read a single sector.
++ */
++ spin_lock_irq(&md->lock);
++ ret = __blk_end_request(req, -EIO, brq.data.blksz);
++ spin_unlock_irq(&md->lock);
++ continue;
++ }
+ goto cmd_err;
+ }
+- }
+
+- /* 1 indicates one request has been completed */
+- ret = 1;
+- out:
+- /*
+- * TODO: Find out if it is OK to only release host after the
+- * last request. For the last request the current request
+- * is NULL, which means no requests are pending.
+- */
+- /* Release host for the last request in a serie of requests */
+- if (!rqc)
+- mmc_release_host(card->host);
++ /*
++ * A block was successfully transferred.
++ */
++ spin_lock_irq(&md->lock);
++ ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
++ spin_unlock_irq(&md->lock);
++ } while (ret);
+
+- /* Current request becomes previous request and vice versa. */
+- mqrqp->brq.mrq.data = NULL;
+- mqrqp->req = NULL;
+- mq->mqrq_prev = mq->mqrq_cur;
+- mq->mqrq_cur = mqrqp;
++ mmc_release_host(card->host);
+
+- return ret;
++ return 1;
+
+ cmd_err:
+-
+ /*
+ * If this is an SD card and we're writing, we can first
+ * mark the known good sectors as ok.
+@@ -680,12 +539,12 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+ blocks = mmc_sd_num_wr_blocks(card);
+ if (blocks != (u32)-1) {
+ spin_lock_irq(&md->lock);
+- ret = __blk_end_request(rqp, 0, blocks << 9);
++ ret = __blk_end_request(req, 0, blocks << 9);
+ spin_unlock_irq(&md->lock);
+ }
+ } else {
+ spin_lock_irq(&md->lock);
+- ret = __blk_end_request(rqp, 0, brqp->data.bytes_xfered);
++ ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
+ spin_unlock_irq(&md->lock);
+ }
+
+@@ -693,27 +552,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+
+ spin_lock_irq(&md->lock);
+ while (ret)
+- ret = __blk_end_request(rqp, -EIO, blk_rq_cur_bytes(rqp));
++ ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
+ spin_unlock_irq(&md->lock);
+
+- if (rqc) {
+- mmc_claim_host(card->host);
+- mmc_pre_req(card->host, &brqc->mrq, false);
+- mmc_start_req(card->host, &brqc->mrq);
+- }
+-
+- /* Current request becomes previous request and vice versa. */
+- mqrqp->brq.mrq.data = NULL;
+- mqrqp->req = NULL;
+- mq->mqrq_prev = mq->mqrq_cur;
+- mq->mqrq_cur = mqrqp;
+-
+ return 0;
+ }
+
+ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
+ {
+- if (req && req->cmd_flags & REQ_DISCARD) {
++ if (req->cmd_flags & REQ_DISCARD) {
+ if (req->cmd_flags & REQ_SECURE)
+ return mmc_blk_issue_secdiscard_rq(mq, req);
+ else
+diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
+index 8f7ffad..abc1a63 100644
+--- a/drivers/mmc/card/mmc_test.c
++++ b/drivers/mmc/card/mmc_test.c
+@@ -22,7 +22,6 @@
+ #include <linux/debugfs.h>
+ #include <linux/uaccess.h>
+ #include <linux/seq_file.h>
+-#include <linux/random.h>
+
+ #define RESULT_OK 0
+ #define RESULT_FAIL 1
+@@ -52,12 +51,10 @@ struct mmc_test_pages {
+ * struct mmc_test_mem - allocated memory.
+ * @arr: array of allocations
+ * @cnt: number of allocations
+- * @size_min_cmn: lowest common size in array of allocations
+ */
+ struct mmc_test_mem {
+ struct mmc_test_pages *arr;
+ unsigned int cnt;
+- unsigned int size_min_cmn;
+ };
+
+ /**
+@@ -151,21 +148,6 @@ struct mmc_test_card {
+ struct mmc_test_general_result *gr;
+ };
+
+-enum mmc_test_prep_media {
+- MMC_TEST_PREP_NONE = 0,
+- MMC_TEST_PREP_WRITE_FULL = 1 << 0,
+- MMC_TEST_PREP_ERASE = 1 << 1,
+-};
+-
+-struct mmc_test_multiple_rw {
+- unsigned int *bs;
+- unsigned int len;
+- unsigned int size;
+- bool do_write;
+- bool do_nonblock_req;
+- enum mmc_test_prep_media prepare;
+-};
+-
+ /*******************************************************************/
+ /* General helper functions */
+ /*******************************************************************/
+@@ -325,7 +307,6 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
+ unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
+ unsigned long page_cnt = 0;
+ unsigned long limit = nr_free_buffer_pages() >> 4;
+- unsigned int min_cmn = 0;
+ struct mmc_test_mem *mem;
+
+ if (max_page_cnt > limit)
+@@ -369,12 +350,6 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
+ mem->arr[mem->cnt].page = page;
+ mem->arr[mem->cnt].order = order;
+ mem->cnt += 1;
+- if (!min_cmn)
+- min_cmn = PAGE_SIZE << order;
+- else
+- min_cmn = min(min_cmn,
+- (unsigned int) (PAGE_SIZE << order));
+-
+ if (max_page_cnt <= (1UL << order))
+ break;
+ max_page_cnt -= 1UL << order;
+@@ -385,7 +360,6 @@ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
+ break;
+ }
+ }
+- mem->size_min_cmn = min_cmn;
+
+ return mem;
+
+@@ -412,6 +386,7 @@ static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
+ do {
+ for (i = 0; i < mem->cnt; i++) {
+ unsigned long len = PAGE_SIZE << mem->arr[i].order;
++
+ if (len > sz)
+ len = sz;
+ if (len > max_seg_sz)
+@@ -750,94 +725,6 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test,
+ }
+
+ /*
+- * Tests nonblock transfer with certain parameters
+- */
+-static void mmc_test_nonblock_reset(struct mmc_request *mrq,
+- struct mmc_command *cmd,
+- struct mmc_command *stop,
+- struct mmc_data *data)
+-{
+- memset(mrq, 0, sizeof(struct mmc_request));
+- memset(cmd, 0, sizeof(struct mmc_command));
+- memset(data, 0, sizeof(struct mmc_data));
+- memset(stop, 0, sizeof(struct mmc_command));
+-
+- mrq->cmd = cmd;
+- mrq->data = data;
+- mrq->stop = stop;
+-}
+-static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
+- struct scatterlist *sg, unsigned sg_len,
+- unsigned dev_addr, unsigned blocks,
+- unsigned blksz, int write, int count)
+-{
+- struct mmc_request mrq1;
+- struct mmc_command cmd1;
+- struct mmc_command stop1;
+- struct mmc_data data1;
+-
+- struct mmc_request mrq2;
+- struct mmc_command cmd2;
+- struct mmc_command stop2;
+- struct mmc_data data2;
+-
+- struct mmc_request *cur_mrq;
+- struct mmc_request *prev_mrq;
+- int i;
+- int ret = 0;
+-
+- if (!test->card->host->ops->pre_req ||
+- !test->card->host->ops->post_req)
+- return -RESULT_UNSUP_HOST;
+-
+- mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
+- mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
+-
+- cur_mrq = &mrq1;
+- prev_mrq = NULL;
+-
+- for (i = 0; i < count; i++) {
+- mmc_test_prepare_mrq(test, cur_mrq, sg, sg_len, dev_addr,
+- blocks, blksz, write);
+- mmc_pre_req(test->card->host, cur_mrq, !prev_mrq);
+-
+- if (prev_mrq) {
+- mmc_wait_for_req_done(prev_mrq);
+- mmc_test_wait_busy(test);
+- ret = mmc_test_check_result(test, prev_mrq);
+- if (ret)
+- goto err;
+- }
+-
+- mmc_start_req(test->card->host, cur_mrq);
+-
+- if (prev_mrq)
+- mmc_post_req(test->card->host, prev_mrq, 0);
+-
+- prev_mrq = cur_mrq;
+- if (cur_mrq == &mrq1) {
+- mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
+- cur_mrq = &mrq2;
+- } else {
+- mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
+- cur_mrq = &mrq1;
+- }
+- dev_addr += blocks;
+- }
+-
+- mmc_wait_for_req_done(prev_mrq);
+- mmc_test_wait_busy(test);
+- ret = mmc_test_check_result(test, prev_mrq);
+- if (ret)
+- goto err;
+- mmc_post_req(test->card->host, prev_mrq, 0);
+-
+- return ret;
+-err:
+- return ret;
+-}
+-
+-/*
+ * Tests a basic transfer with certain parameters
+ */
+ static int mmc_test_simple_transfer(struct mmc_test_card *test,
+@@ -1464,17 +1351,14 @@ static int mmc_test_area_transfer(struct mmc_test_card *test,
+ }
+
+ /*
+- * Map and transfer bytes for multiple transfers.
++ * Map and transfer bytes.
+ */
+-static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
+- unsigned int dev_addr, int write,
+- int max_scatter, int timed, int count,
+- bool nonblock)
++static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
++ unsigned int dev_addr, int write, int max_scatter,
++ int timed)
+ {
+ struct timespec ts1, ts2;
+- int ret = 0;
+- int i;
+- struct mmc_test_area *t = &test->area;
++ int ret;
+
+ /*
+ * In the case of a maximally scattered transfer, the maximum transfer
+@@ -1498,15 +1382,8 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
+
+ if (timed)
+ getnstimeofday(&ts1);
+- if (nonblock)
+- ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
+- dev_addr, t->blocks, 512, write, count);
+- else
+- for (i = 0; i < count && ret == 0; i++) {
+- ret = mmc_test_area_transfer(test, dev_addr, write);
+- dev_addr += sz >> 9;
+- }
+
++ ret = mmc_test_area_transfer(test, dev_addr, write);
+ if (ret)
+ return ret;
+
+@@ -1514,19 +1391,11 @@ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
+ getnstimeofday(&ts2);
+
+ if (timed)
+- mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
++ mmc_test_print_rate(test, sz, &ts1, &ts2);
+
+ return 0;
+ }
+
+-static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
+- unsigned int dev_addr, int write, int max_scatter,
+- int timed)
+-{
+- return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
+- timed, 1, false);
+-}
+-
+ /*
+ * Write the test area entirely.
+ */
+@@ -2087,144 +1956,6 @@ static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
+ return mmc_test_large_seq_perf(test, 1);
+ }
+
+-static int mmc_test_rw_multiple(struct mmc_test_card *test,
+- struct mmc_test_multiple_rw *tdata,
+- unsigned int reqsize, unsigned int size)
+-{
+- unsigned int dev_addr;
+- struct mmc_test_area *t = &test->area;
+- int ret = 0;
+- int max_reqsize = max(t->mem->size_min_cmn *
+- min(t->max_segs, t->mem->cnt), t->max_tfr);
+-
+- /* Set up test area */
+- if (size > mmc_test_capacity(test->card) / 2 * 512)
+- size = mmc_test_capacity(test->card) / 2 * 512;
+- if (reqsize > max_reqsize)
+- reqsize = max_reqsize;
+- dev_addr = mmc_test_capacity(test->card) / 4;
+- if ((dev_addr & 0xffff0000))
+- dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+- else
+- dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
+- if (!dev_addr)
+- goto err;
+-
+- /* prepare test area */
+- if (mmc_can_erase(test->card) &&
+- tdata->prepare & MMC_TEST_PREP_ERASE) {
+- ret = mmc_erase(test->card, dev_addr,
+- size / 512, MMC_SECURE_ERASE_ARG);
+- if (ret)
+- ret = mmc_erase(test->card, dev_addr,
+- size / 512, MMC_ERASE_ARG);
+- if (ret)
+- goto err;
+- }
+-
+- /* Run test */
+- ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
+- tdata->do_write, 0, 1, size / reqsize,
+- tdata->do_nonblock_req);
+- if (ret)
+- goto err;
+-
+- return ret;
+- err:
+- printk(KERN_INFO "[%s] error\n", __func__);
+- return ret;
+-}
+-
+-static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
+- struct mmc_test_multiple_rw *rw)
+-{
+- int ret = 0;
+- int i;
+-
+- for (i = 0 ; i < rw->len && ret == 0; i++) {
+- ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size);
+- if (ret)
+- break;
+- }
+- return ret;
+-}
+-
+-/*
+- * Multiple blocking write 4k to 4 MB chunks
+- */
+-static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
+-{
+- unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+- 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+- struct mmc_test_multiple_rw test_data = {
+- .bs = bs,
+- .size = 128*1024*1024,
+- .len = ARRAY_SIZE(bs),
+- .do_write = true,
+- .do_nonblock_req = false,
+- .prepare = MMC_TEST_PREP_ERASE,
+- };
+-
+- return mmc_test_rw_multiple_size(test, &test_data);
+-};
+-
+-/*
+- * Multiple none blocking write 4k to 4 MB chunks
+- */
+-static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
+-{
+- unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+- 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+- struct mmc_test_multiple_rw test_data = {
+- .bs = bs,
+- .size = 128*1024*1024,
+- .len = ARRAY_SIZE(bs),
+- .do_write = true,
+- .do_nonblock_req = true,
+- .prepare = MMC_TEST_PREP_ERASE,
+- };
+-
+- return mmc_test_rw_multiple_size(test, &test_data);
+-}
+-
+-/*
+- * Multiple blocking read 4k to 4 MB chunks
+- */
+-static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
+-{
+- unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+- 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+- struct mmc_test_multiple_rw test_data = {
+- .bs = bs,
+- .size = 128*1024*1024,
+- .len = ARRAY_SIZE(bs),
+- .do_write = false,
+- .do_nonblock_req = false,
+- .prepare = MMC_TEST_PREP_NONE,
+- };
+-
+- return mmc_test_rw_multiple_size(test, &test_data);
+-}
+-
+-/*
+- * Multiple none blocking read 4k to 4 MB chunks
+- */
+-static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
+-{
+- unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+- 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+- struct mmc_test_multiple_rw test_data = {
+- .bs = bs,
+- .size = 128*1024*1024,
+- .len = ARRAY_SIZE(bs),
+- .do_write = false,
+- .do_nonblock_req = true,
+- .prepare = MMC_TEST_PREP_NONE,
+- };
+-
+- return mmc_test_rw_multiple_size(test, &test_data);
+-}
+-
+ static const struct mmc_test_case mmc_test_cases[] = {
+ {
+ .name = "Basic write (no data verification)",
+@@ -2492,33 +2223,6 @@ static const struct mmc_test_case mmc_test_cases[] = {
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+- {
+- .name = "Write performance with blocking req 4k to 4MB",
+- .prepare = mmc_test_area_prepare,
+- .run = mmc_test_profile_mult_write_blocking_perf,
+- .cleanup = mmc_test_area_cleanup,
+- },
+-
+- {
+- .name = "Write performance with none blocking req 4k to 4MB",
+- .prepare = mmc_test_area_prepare,
+- .run = mmc_test_profile_mult_write_nonblock_perf,
+- .cleanup = mmc_test_area_cleanup,
+- },
+-
+- {
+- .name = "Read performance with blocking req 4k to 4MB",
+- .prepare = mmc_test_area_prepare,
+- .run = mmc_test_profile_mult_read_blocking_perf,
+- .cleanup = mmc_test_area_cleanup,
+- },
+-
+- {
+- .name = "Read performance with none blocking req 4k to 4MB",
+- .prepare = mmc_test_area_prepare,
+- .run = mmc_test_profile_mult_read_nonblock_perf,
+- .cleanup = mmc_test_area_cleanup,
+- },
+ };
+
+ static DEFINE_MUTEX(mmc_test_lock);
+@@ -2743,32 +2447,6 @@ static const struct file_operations mmc_test_fops_test = {
+ .release = single_release,
+ };
+
+-static int mtf_testlist_show(struct seq_file *sf, void *data)
+-{
+- int i;
+-
+- mutex_lock(&mmc_test_lock);
+-
+- for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
+- seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
+-
+- mutex_unlock(&mmc_test_lock);
+-
+- return 0;
+-}
+-
+-static int mtf_testlist_open(struct inode *inode, struct file *file)
+-{
+- return single_open(file, mtf_testlist_show, inode->i_private);
+-}
+-
+-static const struct file_operations mmc_test_fops_testlist = {
+- .open = mtf_testlist_open,
+- .read = seq_read,
+- .llseek = seq_lseek,
+- .release = single_release,
+-};
+-
+ static void mmc_test_free_file_test(struct mmc_card *card)
+ {
+ struct mmc_test_dbgfs_file *df, *dfs;
+@@ -2798,10 +2476,6 @@ static int mmc_test_register_file_test(struct mmc_card *card)
+ file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
+ card->debugfs_root, card, &mmc_test_fops_test);
+
+- if (card->debugfs_root)
+- file = debugfs_create_file("testlist", S_IRUGO,
+- card->debugfs_root, card, &mmc_test_fops_testlist);
+-
+ if (IS_ERR_OR_NULL(file)) {
+ dev_err(&card->dev,
+ "Can't create file. Perhaps debugfs is disabled.\n");
+diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
+index 2b14d1c..2ae7275 100644
+--- a/drivers/mmc/card/queue.c
++++ b/drivers/mmc/card/queue.c
+@@ -56,10 +56,9 @@ static int mmc_queue_thread(void *d)
+ spin_lock_irq(q->queue_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ req = blk_fetch_request(q);
+- mq->mqrq_cur->req = req;
++ mq->req = req;
+ spin_unlock_irq(q->queue_lock);
+
+- mq->issue_fn(mq, req);
+ if (!req) {
+ if (kthread_should_stop()) {
+ set_current_state(TASK_RUNNING);
+@@ -72,6 +71,7 @@ static int mmc_queue_thread(void *d)
+ }
+ set_current_state(TASK_RUNNING);
+
++ mq->issue_fn(mq, req);
+ } while (1);
+ up(&mq->thread_sem);
+
+@@ -97,25 +97,10 @@ static void mmc_request(struct request_queue *q)
+ return;
+ }
+
+- if (!mq->mqrq_cur->req)
++ if (!mq->req)
+ wake_up_process(mq->thread);
+ }
+
+-struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
+-{
+- struct scatterlist *sg;
+-
+- sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
+- if (!sg)
+- *err = -ENOMEM;
+- else {
+- *err = 0;
+- sg_init_table(sg, sg_len);
+- }
+-
+- return sg;
+-}
+-
+ /**
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+@@ -129,8 +114,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
+ struct mmc_host *host = card->host;
+ u64 limit = BLK_BOUNCE_HIGH;
+ int ret;
+- struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+- struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+
+ if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+ limit = *mmc_dev(host)->dma_mask;
+@@ -140,11 +123,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
+ if (!mq->queue)
+ return -ENOMEM;
+
+- memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
+- memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
+- mq->mqrq_cur = mqrq_cur;
+- mq->mqrq_prev = mqrq_prev;
+ mq->queue->queuedata = mq;
++ mq->req = NULL;
+
+ blk_queue_prep_rq(mq->queue, mmc_prep_request);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+@@ -178,64 +158,53 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
+ bouncesz = host->max_blk_count * 512;
+
+ if (bouncesz > 512) {
+- mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+- if (!mqrq_cur->bounce_buf) {
+- printk(KERN_WARNING "%s: unable to "
+- "allocate bounce cur buffer\n",
+- mmc_card_name(card));
+- }
+- mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+- if (!mqrq_prev->bounce_buf) {
++ mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
++ if (!mq->bounce_buf) {
+ printk(KERN_WARNING "%s: unable to "
+- "allocate bounce prev buffer\n",
++ "allocate bounce buffer\n",
+ mmc_card_name(card));
+- kfree(mqrq_cur->bounce_buf);
+- mqrq_cur->bounce_buf = NULL;
+ }
+ }
+
+- if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
++ if (mq->bounce_buf) {
+ blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
+ blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
+ blk_queue_max_segments(mq->queue, bouncesz / 512);
+ blk_queue_max_segment_size(mq->queue, bouncesz);
+
+- mqrq_cur->sg = mmc_alloc_sg(1, &ret);
+- if (ret)
+- goto cleanup_queue;
+-
+- mqrq_cur->bounce_sg =
+- mmc_alloc_sg(bouncesz / 512, &ret);
+- if (ret)
+- goto cleanup_queue;
+-
+- mqrq_prev->sg = mmc_alloc_sg(1, &ret);
+- if (ret)
++ mq->sg = kmalloc(sizeof(struct scatterlist),
++ GFP_KERNEL);
++ if (!mq->sg) {
++ ret = -ENOMEM;
+ goto cleanup_queue;
++ }
++ sg_init_table(mq->sg, 1);
+
+- mqrq_prev->bounce_sg =
+- mmc_alloc_sg(bouncesz / 512, &ret);
+- if (ret)
++ mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
++ bouncesz / 512, GFP_KERNEL);
++ if (!mq->bounce_sg) {
++ ret = -ENOMEM;
+ goto cleanup_queue;
++ }
++ sg_init_table(mq->bounce_sg, bouncesz / 512);
+ }
+ }
+ #endif
+
+- if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
++ if (!mq->bounce_buf) {
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_hw_sectors(mq->queue,
+ min(host->max_blk_count, host->max_req_size / 512));
+ blk_queue_max_segments(mq->queue, host->max_segs);
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+
+- mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
+- if (ret)
+- goto cleanup_queue;
+-
+-
+- mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
+- if (ret)
++ mq->sg = kmalloc(sizeof(struct scatterlist) *
++ host->max_segs, GFP_KERNEL);
++ if (!mq->sg) {
++ ret = -ENOMEM;
+ goto cleanup_queue;
++ }
++ sg_init_table(mq->sg, host->max_segs);
+ }
+
+ sema_init(&mq->thread_sem, 1);
+@@ -250,22 +219,16 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
+
+ return 0;
+ free_bounce_sg:
+- kfree(mqrq_cur->bounce_sg);
+- mqrq_cur->bounce_sg = NULL;
+- kfree(mqrq_prev->bounce_sg);
+- mqrq_prev->bounce_sg = NULL;
+-
++ if (mq->bounce_sg)
++ kfree(mq->bounce_sg);
++ mq->bounce_sg = NULL;
+ cleanup_queue:
+- kfree(mqrq_cur->sg);
+- mqrq_cur->sg = NULL;
+- kfree(mqrq_cur->bounce_buf);
+- mqrq_cur->bounce_buf = NULL;
+-
+- kfree(mqrq_prev->sg);
+- mqrq_prev->sg = NULL;
+- kfree(mqrq_prev->bounce_buf);
+- mqrq_prev->bounce_buf = NULL;
+-
++ if (mq->sg)
++ kfree(mq->sg);
++ mq->sg = NULL;
++ if (mq->bounce_buf)
++ kfree(mq->bounce_buf);
++ mq->bounce_buf = NULL;
+ blk_cleanup_queue(mq->queue);
+ return ret;
+ }
+@@ -274,8 +237,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
+ {
+ struct request_queue *q = mq->queue;
+ unsigned long flags;
+- struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+- struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
+
+ /* Make sure the queue isn't suspended, as that will deadlock */
+ mmc_queue_resume(mq);
+@@ -289,23 +250,16 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+- kfree(mqrq_cur->bounce_sg);
+- mqrq_cur->bounce_sg = NULL;
+-
+- kfree(mqrq_cur->sg);
+- mqrq_cur->sg = NULL;
++ if (mq->bounce_sg)
++ kfree(mq->bounce_sg);
++ mq->bounce_sg = NULL;
+
+- kfree(mqrq_cur->bounce_buf);
+- mqrq_cur->bounce_buf = NULL;
++ kfree(mq->sg);
++ mq->sg = NULL;
+
+- kfree(mqrq_prev->bounce_sg);
+- mqrq_prev->bounce_sg = NULL;
+-
+- kfree(mqrq_prev->sg);
+- mqrq_prev->sg = NULL;
+-
+- kfree(mqrq_prev->bounce_buf);
+- mqrq_prev->bounce_buf = NULL;
++ if (mq->bounce_buf)
++ kfree(mq->bounce_buf);
++ mq->bounce_buf = NULL;
+
+ mq->card = NULL;
+ }
+@@ -358,27 +312,27 @@ void mmc_queue_resume(struct mmc_queue *mq)
+ /*
+ * Prepare the sg list(s) to be handed of to the host driver
+ */
+-unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
++unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
+ {
+ unsigned int sg_len;
+ size_t buflen;
+ struct scatterlist *sg;
+ int i;
+
+- if (!mqrq->bounce_buf)
+- return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
++ if (!mq->bounce_buf)
++ return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
+
+- BUG_ON(!mqrq->bounce_sg);
++ BUG_ON(!mq->bounce_sg);
+
+- sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
++ sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
+
+- mqrq->bounce_sg_len = sg_len;
++ mq->bounce_sg_len = sg_len;
+
+ buflen = 0;
+- for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
++ for_each_sg(mq->bounce_sg, sg, sg_len, i)
+ buflen += sg->length;
+
+- sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
++ sg_init_one(mq->sg, mq->bounce_buf, buflen);
+
+ return 1;
+ }
+@@ -387,19 +341,19 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
+ * If writing, bounce the data to the buffer before the request
+ * is sent to the host driver
+ */
+-void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
++void mmc_queue_bounce_pre(struct mmc_queue *mq)
+ {
+ unsigned long flags;
+
+- if (!mqrq->bounce_buf)
++ if (!mq->bounce_buf)
+ return;
+
+- if (rq_data_dir(mqrq->req) != WRITE)
++ if (rq_data_dir(mq->req) != WRITE)
+ return;
+
+ local_irq_save(flags);
+- sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+- mqrq->bounce_buf, mqrq->sg[0].length);
++ sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
++ mq->bounce_buf, mq->sg[0].length);
+ local_irq_restore(flags);
+ }
+
+@@ -407,18 +361,19 @@ void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
+ * If reading, bounce the data from the buffer after the request
+ * has been handled by the host driver
+ */
+-void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
++void mmc_queue_bounce_post(struct mmc_queue *mq)
+ {
+ unsigned long flags;
+
+- if (!mqrq->bounce_buf)
++ if (!mq->bounce_buf)
+ return;
+
+- if (rq_data_dir(mqrq->req) != READ)
++ if (rq_data_dir(mq->req) != READ)
+ return;
+
+ local_irq_save(flags);
+- sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+- mqrq->bounce_buf, mqrq->sg[0].length);
++ sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
++ mq->bounce_buf, mq->sg[0].length);
+ local_irq_restore(flags);
+ }
++
+diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
+index 0e65807..64e66e0 100644
+--- a/drivers/mmc/card/queue.h
++++ b/drivers/mmc/card/queue.h
+@@ -4,33 +4,19 @@
+ struct request;
+ struct task_struct;
+
+-struct mmc_blk_request {
+- struct mmc_request mrq;
+- struct mmc_command cmd;
+- struct mmc_command stop;
+- struct mmc_data data;
+-};
+-
+-struct mmc_queue_req {
+- struct request *req;
+- struct mmc_blk_request brq;
+- struct scatterlist *sg;
+- char *bounce_buf;
+- struct scatterlist *bounce_sg;
+- unsigned int bounce_sg_len;
+-};
+-
+ struct mmc_queue {
+ struct mmc_card *card;
+ struct task_struct *thread;
+ struct semaphore thread_sem;
+ unsigned int flags;
++ struct request *req;
+ int (*issue_fn)(struct mmc_queue *, struct request *);
+ void *data;
+ struct request_queue *queue;
+- struct mmc_queue_req mqrq[2];
+- struct mmc_queue_req *mqrq_cur;
+- struct mmc_queue_req *mqrq_prev;
++ struct scatterlist *sg;
++ char *bounce_buf;
++ struct scatterlist *bounce_sg;
++ unsigned int bounce_sg_len;
+ };
+
+ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
+@@ -38,9 +24,8 @@ extern void mmc_cleanup_queue(struct mmc_queue *);
+ extern void mmc_queue_suspend(struct mmc_queue *);
+ extern void mmc_queue_resume(struct mmc_queue *);
+
+-extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
+- struct mmc_queue_req *);
+-extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
+-extern void mmc_queue_bounce_post(struct mmc_queue_req *);
++extern unsigned int mmc_queue_map_sg(struct mmc_queue *);
++extern void mmc_queue_bounce_pre(struct mmc_queue *);
++extern void mmc_queue_bounce_post(struct mmc_queue *);
+
+ #endif
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index 85296df..1f453ac 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -23,8 +23,6 @@
+ #include <linux/log2.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/pm_runtime.h>
+-#include <linux/fault-inject.h>
+-#include <linux/random.h>
+
+ #include <linux/mmc/card.h>
+ #include <linux/mmc/host.h>
+@@ -84,56 +82,6 @@ static void mmc_flush_scheduled_work(void)
+ flush_workqueue(workqueue);
+ }
+
+-#ifdef CONFIG_FAIL_MMC_REQUEST
+-
+-static DECLARE_FAULT_ATTR(fail_mmc_request);
+-
+-static int __init setup_fail_mmc_request(char *str)
+-{
+- return setup_fault_attr(&fail_mmc_request, str);
+-}
+-__setup("fail_mmc_request=", setup_fail_mmc_request);
+-
+-static void mmc_should_fail_request(struct mmc_host *host,
+- struct mmc_request *mrq)
+-{
+- struct mmc_command *cmd = mrq->cmd;
+- struct mmc_data *data = mrq->data;
+- static const int data_errors[] = {
+- -ETIMEDOUT,
+- -EILSEQ,
+- -EIO,
+- };
+-
+- if (!data)
+- return;
+-
+- if (cmd->error || data->error || !host->make_it_fail ||
+- !should_fail(&fail_mmc_request, data->blksz * data->blocks))
+- return;
+-
+- data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
+- data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
+-}
+-
+-static int __init fail_mmc_request_debugfs(void)
+-{
+- return init_fault_attr_dentries(&fail_mmc_request,
+- "fail_mmc_request");
+-}
+-
+-late_initcall(fail_mmc_request_debugfs);
+-
+-#else /* CONFIG_FAIL_MMC_REQUEST */
+-
+-static inline void mmc_should_fail_request(struct mmc_host *host,
+- struct mmc_data *data)
+-{
+-}
+-
+-#endif /* CONFIG_FAIL_MMC_REQUEST */
+-
+-
+ /**
+ * mmc_request_done - finish processing an MMC request
+ * @host: MMC host which completed request
+@@ -160,8 +108,6 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
+ cmd->error = 0;
+ host->ops->request(host, mrq);
+ } else {
+- mmc_should_fail_request(host, mrq);
+-
+ led_trigger_event(host->led, LED_OFF);
+
+ pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
+@@ -252,73 +198,8 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
+
+ static void mmc_wait_done(struct mmc_request *mrq)
+ {
+- complete(&mrq->completion);
+-}
+-
+-/**
+- * mmc_pre_req - Prepare for a new request
+- * @host: MMC host to prepare command
+- * @mrq: MMC request to prepare for
+- * @is_first_req: true if there is no previous started request
+- * that may run in parellel to this call, otherwise false
+- *
+- * mmc_pre_req() is called in prior to mmc_start_req() to let
+- * host prepare for the new request. Preparation of a request may be
+- * performed while another request is running on the host.
+- */
+-void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
+- bool is_first_req)
+-{
+- if (host->ops->pre_req)
+- host->ops->pre_req(host, mrq, is_first_req);
++ complete(mrq->done_data);
+ }
+-EXPORT_SYMBOL(mmc_pre_req);
+-
+-/**
+- * mmc_post_req - Post process a completed request
+- * @host: MMC host to post process command
+- * @mrq: MMC request to post process for
+- * @err: Error, if none zero, clean up any resources made in pre_req
+- *
+- * Let the host post process a completed request. Post processing of
+- * a request may be performed while another reuqest is running.
+- */
+-void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, int err)
+-{
+- if (host->ops->post_req)
+- host->ops->post_req(host, mrq, err);
+-}
+-EXPORT_SYMBOL(mmc_post_req);
+-
+-/**
+- * mmc_start_req - start a request
+- * @host: MMC host to start command
+- * @mrq: MMC request to start
+- *
+- * Start a new MMC custom command request for a host.
+- * Does not wait for the command to complete.
+- */
+-void mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
+-{
+- init_completion(&mrq->completion);
+- mrq->done = mmc_wait_done;
+-
+- mmc_start_request(host, mrq);
+-}
+-EXPORT_SYMBOL(mmc_start_req);
+-
+-/**
+- * mmc_wait_for_req_done - wait for completion of request
+- * @mrq: MMC request to wait for
+- *
+- * Wait for the command to complete. Does not attempt to parse the
+- * response.
+- */
+-void mmc_wait_for_req_done(struct mmc_request *mrq)
+-{
+- wait_for_completion(&mrq->completion);
+-}
+-EXPORT_SYMBOL(mmc_wait_for_req_done);
+
+ /**
+ * mmc_wait_for_req - start a request and wait for completion
+@@ -331,9 +212,16 @@ EXPORT_SYMBOL(mmc_wait_for_req_done);
+ */
+ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
+ {
+- mmc_start_req(host, mrq);
+- mmc_wait_for_req_done(mrq);
++ DECLARE_COMPLETION_ONSTACK(complete);
++
++ mrq->done_data = &complete;
++ mrq->done = mmc_wait_done;
++
++ mmc_start_request(host, mrq);
++
++ wait_for_completion(&complete);
+ }
++
+ EXPORT_SYMBOL(mmc_wait_for_req);
+
+ /**
+diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
+index 588e76f..998797e 100644
+--- a/drivers/mmc/core/debugfs.c
++++ b/drivers/mmc/core/debugfs.c
+@@ -188,11 +188,6 @@ void mmc_add_host_debugfs(struct mmc_host *host)
+ root, &host->clk_delay))
+ goto err_node;
+ #endif
+-#ifdef CONFIG_FAIL_MMC_REQUEST
+- if (!debugfs_create_u8("make-it-fail", S_IRUSR | S_IWUSR,
+- root, &host->make_it_fail))
+- goto err_node;
+-#endif
+ return;
+
+ err_node:
+diff --git a/drivers/mmc/host/glamo-mci.c b/drivers/mmc/host/glamo-mci.c
+index 02c4b69..f298658 100644
+--- a/drivers/mmc/host/glamo-mci.c
++++ b/drivers/mmc/host/glamo-mci.c
+@@ -49,10 +49,10 @@ struct glamo_mci_host {
+ unsigned short vdd;
+ char power_mode;
+
+- unsigned long transfer_start;
+- unsigned long request_start;
+-
+ unsigned char request_counter;
++
++ struct workqueue_struct *workqueue;
++ struct work_struct read_work;
+ };
+
+ static void glamo_mci_send_request(struct mmc_host *mmc,
+@@ -165,21 +165,12 @@ static int glamo_mci_clock_enable(struct mmc_host *mmc)
+ return 0;
+ }
+
+-static void __iomem *glamo_mci_get_data_addr(struct glamo_mci_host *host,
+- struct mmc_data *data)
+-{
+- void __iomem *addr = host->data_base;
+-
+- if (data->host_cookie & 1)
+- addr += resource_size(host->data_mem) / 2;
+-
+- return addr;
+-}
+
++#ifndef GLAMO_MCI_WORKER
+ static void do_pio_read(struct glamo_mci_host *host, struct mmc_data *data)
+ {
+- void __iomem *from_ptr = glamo_mci_get_data_addr(host, data);
+ struct sg_mapping_iter miter;
++ uint16_t __iomem *from_ptr = host->data_base;
+
+ dev_dbg(&host->pdev->dev, "pio_read():\n");
+
+@@ -187,7 +178,9 @@ static void do_pio_read(struct glamo_mci_host *host, struct mmc_data *data)
+
+ while (sg_miter_next(&miter)) {
+ memcpy(miter.addr, from_ptr, miter.length);
+- from_ptr += miter.length;
++ from_ptr += miter.length >> 1;
++
++ data->bytes_xfered += miter.length;
+ }
+
+ sg_miter_stop(&miter);
+@@ -195,18 +188,19 @@ static void do_pio_read(struct glamo_mci_host *host, struct mmc_data *data)
+ dev_dbg(&host->pdev->dev, "pio_read(): "
+ "complete (no more data)\n");
+ }
++#endif
+
+ static void do_pio_write(struct glamo_mci_host *host, struct mmc_data *data)
+ {
+- void __iomem *to_ptr = glamo_mci_get_data_addr(host, data);
+ struct sg_mapping_iter miter;
++ uint16_t __iomem *to_ptr = host->data_base;
+
+ dev_dbg(&host->pdev->dev, "pio_write():\n");
+ sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_FROM_SG);
+
+ while (sg_miter_next(&miter)) {
+ memcpy(to_ptr, miter.addr, miter.length);
+- to_ptr += miter.length;
++ to_ptr += miter.length >> 1;
+
+ data->bytes_xfered += miter.length;
+ }
+@@ -284,11 +278,12 @@ static irqreturn_t glamo_mci_irq(int irq, void *data)
+ if (mrq->stop)
+ glamo_mci_send_command(host, mrq->stop);
+
+- if (mrq->data && (mrq->data->flags & MMC_DATA_READ)) {
+- mrq->data->bytes_xfered = mrq->data->blocks * mrq->data->blksz;
+- if (!mrq->data->host_cookie)
+- do_pio_read(host, mrq->data);
+- }
++ if (cmd->data->flags & MMC_DATA_READ)
++#ifndef GLAMO_MCI_WORKER
++ do_pio_read(host, cmd->data);
++#else
++ flush_workqueue(host->workqueue);
++#endif
+
+ if (mrq->stop)
+ mrq->stop->error = glamo_mci_wait_idle(host, jiffies + HZ);
+@@ -300,6 +295,64 @@ done:
+ return IRQ_HANDLED;
+ }
+
++#ifdef GLAMO_MCI_WORKER
++static void glamo_mci_read_worker(struct work_struct *work)
++{
++ struct glamo_mci_host *host = container_of(work, struct glamo_mci_host,
++ read_work);
++ struct mmc_command *cmd;
++ uint16_t status;
++ uint16_t blocks_ready;
++ size_t data_read = 0;
++ size_t data_ready;
++ struct scatterlist *sg;
++ uint16_t __iomem *from_ptr = host->data_base;
++ void *sg_pointer;
++
++
++ cmd = host->mrq->cmd;
++ sg = cmd->data->sg;
++ do {
++ /*
++ * TODO: How to get rid of that?
++ * Maybe just drop it... In fact, it is already handled in
++ * the IRQ handler, maybe we should only check cmd->error.
++ * But the question is: what happens between the moment
++ * the error occurs, and the moment the IRQ handler handles it?
++ */
++ status = glamomci_reg_read(host, GLAMO_REG_MMC_RB_STAT1);
++
++ if (status & (GLAMO_STAT1_MMC_RTOUT | GLAMO_STAT1_MMC_DTOUT))
++ cmd->error = -ETIMEDOUT;
++ if (status & (GLAMO_STAT1_MMC_BWERR | GLAMO_STAT1_MMC_BRERR))
++ cmd->error = -EILSEQ;
++ if (cmd->error) {
++ dev_info(&host->pdev->dev, "Error after cmd: 0x%x\n",
++ status);
++ return;
++ }
++
++ blocks_ready = glamomci_reg_read(host, GLAMO_REG_MMC_RB_BLKCNT);
++ data_ready = blocks_ready * cmd->data->blksz;
++
++ if (data_ready == data_read)
++ yield();
++
++ while (sg && data_read + sg->length <= data_ready) {
++ sg_pointer = page_address(sg_page(sg)) + sg->offset;
++ memcpy(sg_pointer, from_ptr, sg->length);
++ from_ptr += sg->length >> 1;
++
++ data_read += sg->length;
++
++ sg = sg_next(sg);
++ }
++
++ } while (sg);
++ cmd->data->bytes_xfered = data_read;
++}
++#endif
++
+ static void glamo_mci_send_command(struct glamo_mci_host *host,
+ struct mmc_command *cmd)
+ {
+@@ -480,29 +533,28 @@ static void glamo_mci_send_command(struct glamo_mci_host *host,
+ (readw(&reg_resp[2]) << 24);
+ }
+ }
++
++#ifdef GLAMO_MCI_WORKER
++ /* We'll only get an interrupt when all data has been transfered.
++ By starting to copy data when it's avaiable we can increase
++ throughput by up to 30%. */
++ if (cmd->data && (cmd->data->flags & MMC_DATA_READ))
++ queue_work(host->workqueue, &host->read_work);
++#endif
++
+ }
+
+ static int glamo_mci_prepare_pio(struct glamo_mci_host *host,
+ struct mmc_data *data)
+ {
+- unsigned long addr = host->data_mem->start;
+-
+- if (data->host_cookie & 1)
+- addr += resource_size(host->data_mem) / 2;
+-
+ /* set up the block info */
+ glamomci_reg_write(host, GLAMO_REG_MMC_DATBLKLEN, data->blksz);
+ glamomci_reg_write(host, GLAMO_REG_MMC_DATBLKCNT, data->blocks);
+
+- if (data->flags & MMC_DATA_WRITE) {
+- glamomci_reg_write(host, GLAMO_REG_MMC_WDATADS1, addr);
+- glamomci_reg_write(host, GLAMO_REG_MMC_WDATADS2, addr >> 16);
+- } else {
+- glamomci_reg_write(host, GLAMO_REG_MMC_RDATADS1, addr);
+- glamomci_reg_write(host, GLAMO_REG_MMC_RDATADS2, addr >> 16);
+- }
++ data->bytes_xfered = 0;
+
+- if ((data->flags & MMC_DATA_WRITE) && !data->host_cookie)
++ /* if write, prep the write into the shared RAM before the command */
++ if (data->flags & MMC_DATA_WRITE)
+ do_pio_write(host, data);
+
+ dev_dbg(&host->pdev->dev, "(blksz=%d, count=%d)\n",
+@@ -517,8 +569,6 @@ static void glamo_mci_send_request(struct mmc_host *mmc,
+ struct mmc_command *cmd = mrq->cmd;
+
+ host->request_counter++;
+- host->request_start = jiffies;
+-
+ if (cmd->data) {
+ if (glamo_mci_prepare_pio(host, cmd->data)) {
+ cmd->error = -EIO;
+@@ -639,42 +689,21 @@ static void glamo_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ mmc_host_lazy_disable(host->mmc);
+ }
+
+-static void glamo_mci_pre_request(struct mmc_host *mmc,
+- struct mmc_request *mrq, bool is_first_req)
+-{
+- struct glamo_mci_host *host = mmc_priv(mmc);
+-
+- mrq->data->host_cookie = (host->request_counter & 1) | 2;
+-
+- /* if write, prep the write into the shared RAM before the command */
+- if (mrq->data->flags & MMC_DATA_WRITE)
+- do_pio_write(host, mrq->data);
+-}
+
+-static void glamo_mci_post_request(struct mmc_host *mmc,
+- struct mmc_request *mrq, int err)
++/*
++ * no physical write protect supported by us
++ */
++static int glamo_mci_get_ro(struct mmc_host *mmc)
+ {
+- struct glamo_mci_host *host = mmc_priv(mmc);
+-
+- if (!mrq->data->host_cookie)
+- return;
+-
+- if (err)
+- return;
+-
+- if (mrq->data->flags & MMC_DATA_READ)
+- do_pio_read(host, mrq->data);
+-
+- mrq->data->host_cookie = 0;
++ return 0;
+ }
+
+ static struct mmc_host_ops glamo_mci_ops = {
+ .enable = glamo_mci_clock_enable,
+ .disable = glamo_mci_clock_disable,
+ .request = glamo_mci_send_request,
+- .post_req = glamo_mci_post_request,
+- .pre_req = glamo_mci_pre_request,
+ .set_ios = glamo_mci_set_ios,
++ .get_ro = glamo_mci_get_ro,
+ };
+
+ static int __devinit glamo_mci_probe(struct platform_device *pdev)
+@@ -702,6 +731,11 @@ static int __devinit glamo_mci_probe(struct platform_device *pdev)
+
+ host->irq = platform_get_irq(pdev, 0);
+
++#ifdef GLAMO_MCI_WORKER
++ INIT_WORK(&host->read_work, glamo_mci_read_worker);
++ host->workqueue = create_singlethread_workqueue("glamo-mci-read");
++#endif
++
+ host->regulator = regulator_get(pdev->dev.parent, "SD_3V3");
+ if (IS_ERR(host->regulator)) {
+ dev_err(&pdev->dev, "Cannot proceed without regulator.\n");
+@@ -789,7 +823,7 @@ static int __devinit glamo_mci_probe(struct platform_device *pdev)
+
+ mmc->max_blk_count = (1 << 16) - 1; /* GLAMO_REG_MMC_RB_BLKCNT */
+ mmc->max_blk_size = (1 << 12) - 1; /* GLAMO_REG_MMC_RB_BLKLEN */
+- mmc->max_req_size = resource_size(host->data_mem) / 2;
++ mmc->max_req_size = resource_size(host->data_mem);
+ mmc->max_seg_size = mmc->max_req_size;
+ mmc->max_segs = 128;
+
+@@ -834,6 +868,9 @@ probe_free_mem_region_mmio:
+ probe_regulator_put:
+ regulator_put(host->regulator);
+ probe_free_host:
++#ifdef GLAMO_MCI_WORKER
++ destroy_workqueue(host->workqueue);
++#endif
+ mmc_free_host(mmc);
+ probe_out:
+ return ret;
+@@ -844,6 +881,11 @@ static int __devexit glamo_mci_remove(struct platform_device *pdev)
+ struct mmc_host *mmc = platform_get_drvdata(pdev);
+ struct glamo_mci_host *host = mmc_priv(mmc);
+
++#ifdef GLAMO_MCI_WORKER
++ flush_workqueue(host->workqueue);
++ destroy_workqueue(host->workqueue);
++#endif
++
+ mmc_host_enable(mmc);
+ mmc_remove_host(mmc);
+ mmc_host_disable(mmc);
+diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
+index 5bbfb71..07f27af 100644
+--- a/include/linux/mmc/core.h
++++ b/include/linux/mmc/core.h
+@@ -117,7 +117,6 @@ struct mmc_data {
+
+ unsigned int sg_len; /* size of scatter list */
+ struct scatterlist *sg; /* I/O scatter list */
+- s32 host_cookie; /* host private data */
+ };
+
+ struct mmc_request {
+@@ -125,19 +124,13 @@ struct mmc_request {
+ struct mmc_data *data;
+ struct mmc_command *stop;
+
+- struct completion completion;
++ void *done_data; /* completion data */
+ void (*done)(struct mmc_request *);/* completion function */
+ };
+
+ struct mmc_host;
+ struct mmc_card;
+
+-extern void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
+- bool is_first_req);
+-extern void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+- int err);
+-extern void mmc_start_req(struct mmc_host *host, struct mmc_request *mrq);
+-extern void mmc_wait_for_req_done(struct mmc_request *mrq);
+ extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
+ extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
+ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index 8b2b44b..bcb793e 100644
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -88,15 +88,6 @@ struct mmc_host_ops {
+ */
+ int (*enable)(struct mmc_host *host);
+ int (*disable)(struct mmc_host *host, int lazy);
+- /*
+- * It is optional for the host to implement pre_req and post_req in
+- * order to support double buffering of requests (prepare one
+- * request while another request is active).
+- */
+- void (*post_req)(struct mmc_host *host, struct mmc_request *req,
+- int err);
+- void (*pre_req)(struct mmc_host *host, struct mmc_request *req,
+- bool is_first_req);
+ void (*request)(struct mmc_host *host, struct mmc_request *req);
+ /*
+ * Avoid calling these three functions too often or in a "fast path",
+@@ -251,9 +242,7 @@ struct mmc_host {
+ #endif
+
+ struct dentry *debugfs_root;
+-#ifdef CONFIG_FAIL_MMC_REQUEST
+- u8 make_it_fail;
+-#endif
++
+ unsigned long private[0] ____cacheline_aligned;
+ };
+
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 330fc70..c768bcd 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -1057,17 +1057,6 @@ config FAIL_IO_TIMEOUT
+ Only works with drivers that use the generic timeout handling,
+ for others it wont do anything.
+
+-config FAIL_MMC_REQUEST
+- bool "Fault-injection capability for MMC IO"
+- select DEBUG_FS
+- depends on FAULT_INJECTION
+- help
+- Provide fault-injection capability for MMC IO.
+- This will make the mmc core return data errors. This is
+- useful for testing the error handling in the mmc block device
+- and how the mmc host driver handle retries from
+- the block device.
+-
+ config FAULT_INJECTION_DEBUG_FS
+ bool "Debugfs entries for fault-injection capabilities"
+ depends on FAULT_INJECTION && SYSFS && DEBUG_FS
diff --git a/recipes/linux/linux-openmoko_2.6.39.bb b/recipes/linux/linux-openmoko_2.6.39.bb
index 633349ecc1..2824f6c6fd 100644
--- a/recipes/linux/linux-openmoko_2.6.39.bb
+++ b/recipes/linux/linux-openmoko_2.6.39.bb
@@ -1,3 +1,3 @@
require linux_${PV}.bb
require linux-openmoko.inc
-OM-PR = "1"
+OM-PR = "2"
diff --git a/recipes/linux/linux_2.6.39.bb b/recipes/linux/linux_2.6.39.bb
index ef6f7e1ac0..2342f88541 100644
--- a/recipes/linux/linux_2.6.39.bb
+++ b/recipes/linux/linux_2.6.39.bb
@@ -10,6 +10,7 @@ SRC_URI = "${KERNELORG_MIRROR}/pub/linux/kernel/v2.6/linux-${PV}.tar.bz2;name=ke
SRC_URI_append_om-gta02 = " \
file://openmoko.patch \
+ file://shr.patch \
"
SRC_URI_append_spitz = " file://${LOGO_SIZE}/logo_linux_clut224.ppm.bz2 "