diff options
author | Naveen Kumar Arepalli <naveenk@nvidia.com> | 2013-03-04 16:18:00 +0530 |
---|---|---|
committer | Mandar Padmawar <mpadmawar@nvidia.com> | 2013-03-11 02:37:12 -0700 |
commit | 2ee31b7823bdb51a64c18d4e311e786aa3b8acbf (patch) | |
tree | 3a40f09475a942f7615dc49d544962bbb7edf59a /drivers | |
parent | f8304fa2c3a1e073a90b9abd8bbd6aaf629737cf (diff) |
Revert "mmc: core: Support packed command for eMMC4.5 device"
Packed command implementation is available in Upstream.
Use Upstream Packed comamnd code.
Bug 1242730
This reverts commit fa3a748000b65ad4854a95761b6067f89f1c4c61.
Change-Id: I807d6911aa20e9fe1664686bf43811bec7d274f5
Signed-off-by: Naveen Kumar Arepalli <naveenk@nvidia.com>
Reviewed-on: http://git-master/r/206215
Reviewed-by: Mandar Padmawar <mpadmawar@nvidia.com>
Tested-by: Mandar Padmawar <mpadmawar@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/mmc/card/block.c | 447 | ||||
-rw-r--r-- | drivers/mmc/card/queue.c | 50 | ||||
-rw-r--r-- | drivers/mmc/card/queue.h | 14 | ||||
-rw-r--r-- | drivers/mmc/core/mmc.c | 24 | ||||
-rw-r--r-- | drivers/mmc/core/mmc_ops.c | 1 |
5 files changed, 20 insertions, 516 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index cc34388941aa..09d7fa3ad2db 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -79,15 +79,6 @@ static DECLARE_BITMAP(name_use, 256); /* * There is one mmc_blk_data per slot. */ - -#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ - (req->cmd_flags & REQ_META)) && \ - (rq_data_dir(req) == WRITE)) -#define PACKED_CMD_VER 0x01 -#define PACKED_CMD_RD 0x01 -#define PACKED_CMD_WR 0x02 - - struct mmc_blk_data { spinlock_t lock; struct gendisk *disk; @@ -107,7 +98,7 @@ struct mmc_blk_data { #define MMC_BLK_WRITE BIT(1) #define MMC_BLK_DISCARD BIT(2) #define MMC_BLK_SECDISCARD BIT(3) -#define MMC_BLK_WR_HDR BIT(4) + /* * Only set in main mmc_blk_data associated * with mmc_card with mmc_set_drvdata, and keeps @@ -1062,9 +1053,7 @@ static int mmc_blk_err_check(struct mmc_card *card, * kind. If it was a write, we may have transitioned to * program mode, which we have to wait for it to complete. */ - if ((!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) || - (mq_mrq->packed_cmd == MMC_PACKED_WR_HDR)) { - + if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { u32 status; do { int err = get_card_status(card, &status, 5); @@ -1101,61 +1090,12 @@ static int mmc_blk_err_check(struct mmc_card *card, if (!brq->data.bytes_xfered) return MMC_BLK_RETRY; - if (mq_mrq->packed_cmd != MMC_PACKED_NONE) { - if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) - return MMC_BLK_PARTIAL; - else - return MMC_BLK_SUCCESS; - } - if (blk_rq_bytes(req) != brq->data.bytes_xfered) return MMC_BLK_PARTIAL; return MMC_BLK_SUCCESS; } -static int mmc_blk_packed_err_check(struct mmc_card *card, - struct mmc_async_req *areq) -{ - struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, - mmc_active); - struct request *req = mq_rq->req; - int err, check, status; - u8 ext_csd[512]; - - check = mmc_blk_err_check(card, areq); - err = get_card_status(card, &status, 0); - if (err) { - pr_err("%s: error %d sending status command\n", - req->rq_disk->disk_name, err); - return MMC_BLK_ABORT; - } - - if (status & R1_EXP_EVENT) { - err = mmc_send_ext_csd(card, ext_csd); - if (err) { - pr_err("%s: error %d sending ext_csd\n", - req->rq_disk->disk_name, err); - return MMC_BLK_ABORT; - } - - if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & - EXT_CSD_PACKED_FAILURE) && - (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & - EXT_CSD_PACKED_GENERIC_ERROR)) { - if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & - EXT_CSD_PACKED_INDEXED_ERROR) { - mq_rq->packed_fail_idx = - ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1; - return MMC_BLK_PARTIAL; - } - } - } - - return check; -} - - static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, int disable_multi, @@ -1335,283 +1275,6 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, return ret; } - -static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) -{ - struct request_queue *q = mq->queue; - struct mmc_card *card = mq->card; - struct request *cur = req, *next = NULL; - struct mmc_blk_data *md = mq->data; - bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN; - unsigned int req_sectors = 0, phys_segments = 0; - unsigned int max_blk_count, max_phys_segs; - u8 put_back = 0; - u8 max_packed_rw = 0; - u8 reqs = 0; - - mq->mqrq_cur->packed_num = 0; - - if (!(md->flags & MMC_BLK_CMD23) || - !card->ext_csd.packed_event_en) - goto no_packed; - - if (rq_data_dir(cur) == READ) - max_packed_rw = card->ext_csd.max_packed_reads; - else - max_packed_rw = card->ext_csd.max_packed_writes; - - if (max_packed_rw == 0) - goto no_packed; - - if (mmc_req_rel_wr(cur) && - (md->flags & MMC_BLK_REL_WR) && - !en_rel_wr) { - goto no_packed; - } - - max_blk_count = min(card->host->max_blk_count, - card->host->max_req_size >> 9); - if (unlikely(max_blk_count > 0xffff)) - max_blk_count = 0xffff; - - max_phys_segs = queue_max_segments(q); - req_sectors += blk_rq_sectors(cur); - phys_segments += req->nr_phys_segments; - - if (rq_data_dir(cur) == WRITE) { - req_sectors++; - phys_segments++; - } - - while (reqs < max_packed_rw - 1) { - spin_lock_irq(q->queue_lock); - next = blk_fetch_request(q); - spin_unlock_irq(q->queue_lock); - if (!next) - break; - - if (next->cmd_flags & REQ_DISCARD || - next->cmd_flags & REQ_FLUSH) { - put_back = 1; - break; - } - - if (rq_data_dir(cur) != rq_data_dir(next)) { - put_back = 1; - break; - } - - if (mmc_req_rel_wr(next) && - (md->flags & MMC_BLK_REL_WR) && - !en_rel_wr) { - put_back = 1; - break; - } - - req_sectors += blk_rq_sectors(next); - if (req_sectors > max_blk_count) { - put_back = 1; - break; - } - - phys_segments += next->nr_phys_segments; - if (phys_segments > max_phys_segs) { - put_back = 1; - break; - } - - list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list); - cur = next; - reqs++; - } - - if (put_back) { - spin_lock_irq(q->queue_lock); - blk_requeue_request(q, next); - spin_unlock_irq(q->queue_lock); - } - - if (reqs > 0) { - list_add(&req->queuelist, &mq->mqrq_cur->packed_list); - mq->mqrq_cur->packed_num = ++reqs; - return reqs; - } - -no_packed: - mq->mqrq_cur->packed_cmd = MMC_PACKED_NONE; - mq->mqrq_cur->packed_num = 0; - return 0; -} - -static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, - struct mmc_card *card, - struct mmc_queue *mq, - u8 reqs) -{ - struct mmc_blk_request *brq = &mqrq->brq; - struct request *req = mqrq->req; - struct request *prq; - struct mmc_blk_data *md = mq->data; - bool do_rel_wr; - u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr; - u8 i = 1; - - mqrq->packed_cmd = (rq_data_dir(req) == READ) ? - MMC_PACKED_WR_HDR : MMC_PACKED_WRITE; - mqrq->packed_blocks = 0; - mqrq->packed_fail_idx = -1; - - memset(packed_cmd_hdr, 0, sizeof(mqrq->packed_cmd_hdr)); - packed_cmd_hdr[0] = (reqs << 16) | - (((rq_data_dir(req) == READ) ? - PACKED_CMD_RD : PACKED_CMD_WR) << 8) | - PACKED_CMD_VER; - - /* - * Argument for each entry of packed group - */ - list_for_each_entry(prq, &mqrq->packed_list, queuelist) { - do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR); - /* Argument of CMD23*/ - packed_cmd_hdr[(i * 2)] = (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | - blk_rq_sectors(prq); - /* Argument of CMD18 or CMD25 */ - packed_cmd_hdr[((i * 2)) + 1] = mmc_card_blockaddr(card) ? - blk_rq_pos(prq) : blk_rq_pos(prq) << 9; - mqrq->packed_blocks += blk_rq_sectors(prq); - i++; - } - - memset(brq, 0, sizeof(struct mmc_blk_request)); - brq->mrq.cmd = &brq->cmd; - brq->mrq.data = &brq->data; - brq->mrq.sbc = &brq->sbc; - brq->mrq.stop = &brq->stop; - - brq->sbc.opcode = MMC_SET_BLOCK_COUNT; - brq->sbc.arg = MMC_CMD23_ARG_PACKED | - ((rq_data_dir(req) == READ) ? 1 : mqrq->packed_blocks + 1); - brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; - - brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; - brq->cmd.arg = blk_rq_pos(req); - if (!mmc_card_blockaddr(card)) - brq->cmd.arg <<= 9; - brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; - - brq->data.blksz = 512; - /* - * Write separately the packd command header only for packed read. - * In case of packed write, header is sent with blocks of data. - */ - brq->data.blocks = (rq_data_dir(req) == READ) ? - 1 : mqrq->packed_blocks + 1; - brq->data.flags |= MMC_DATA_WRITE; - - brq->stop.opcode = MMC_STOP_TRANSMISSION; - brq->stop.arg = 0; - brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; - - mmc_set_data_timeout(&brq->data, card); - - brq->data.sg = mqrq->sg; - brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); - - mqrq->mmc_active.mrq = &brq->mrq; - mqrq->mmc_active.err_check = mmc_blk_packed_err_check; - - mmc_queue_bounce_pre(mqrq); -} - -static void mmc_blk_packed_rrq_prep(struct mmc_queue_req *mqrq, - struct mmc_card *card, - struct mmc_queue *mq) -{ - struct mmc_blk_request *brq = &mqrq->brq; - struct request *req = mqrq->req; - - mqrq->packed_cmd = MMC_PACKED_READ; - - memset(brq, 0, sizeof(struct mmc_blk_request)); - brq->mrq.cmd = &brq->cmd; - brq->mrq.data = &brq->data; - brq->mrq.stop = &brq->stop; - - brq->cmd.opcode = MMC_READ_MULTIPLE_BLOCK; - brq->cmd.arg = blk_rq_pos(req); - if (!mmc_card_blockaddr(card)) - brq->cmd.arg <<= 9; - brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; - brq->data.blksz = 512; - brq->data.blocks = mqrq->packed_blocks; - brq->data.flags |= MMC_DATA_READ; - - brq->stop.opcode = MMC_STOP_TRANSMISSION; - brq->stop.arg = 0; - brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; - - mmc_set_data_timeout(&brq->data, card); - - brq->data.sg = mqrq->sg; - brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); - - mqrq->mmc_active.mrq = &brq->mrq; - mqrq->mmc_active.err_check = mmc_blk_packed_err_check; - - mmc_queue_bounce_pre(mqrq); -} - - -static int mmc_blk_chk_hdr_err(struct mmc_queue *mq, int status) -{ - struct mmc_blk_data *md = mq->data; - struct mmc_card *card = md->queue.card; - int type = MMC_BLK_WR_HDR; - int err; - - switch (status) { - case MMC_BLK_PARTIAL: - case MMC_BLK_RETRY: - err = 0; - break; - case MMC_BLK_CMD_ERR: - case MMC_BLK_ABORT: - case MMC_BLK_DATA_ERR: - case MMC_BLK_ECC_ERR: - err = mmc_blk_reset(md, card->host, type); - break; - } - - return err; -} - -static int mmc_blk_issue_packed_rd(struct mmc_queue *mq, - struct mmc_queue_req *mq_rq) -{ - struct mmc_blk_data *md = mq->data; - struct mmc_card *card = md->queue.card; - int status, ret = -EIO, retry = 2; - - do { - mmc_start_req(card->host, NULL, (int *) &status); - if (status) { - ret = mmc_blk_chk_hdr_err(mq, status); - if (ret) - break; - mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq, mq_rq->packed_num); - mmc_start_req(card->host, &mq_rq->mmc_active, NULL); - } else { - mmc_blk_packed_rrq_prep(mq_rq, card, mq); - mmc_start_req(card->host, &mq_rq->mmc_active, NULL); - ret = 0; - break; - } - } while (retry-- > 0); - - return ret; -} - - static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) { struct mmc_blk_data *md = mq->data; @@ -1620,32 +1283,21 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) int ret = 1, disable_multi = 0, retry = 0, type; enum mmc_blk_status status; struct mmc_queue_req *mq_rq; - struct request *req, *prq; + struct request *req; struct mmc_async_req *areq; - u8 reqs = 0; if (!rqc && !mq->mqrq_prev->req) return 0; - if (rqc) - reqs = mmc_blk_prep_packed_list(mq, rqc); - do { if (rqc) { - if (reqs >= 2) - mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, card, mq, reqs); - else - mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); + mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); areq = &mq->mqrq_cur->mmc_active; } else areq = NULL; areq = mmc_start_req(card->host, areq, (int *) &status); - if (!areq) { - if (mq->mqrq_cur->packed_cmd == MMC_PACKED_WR_HDR) - goto snd_packed_rd; - else - return 0; - } + if (!areq) + return 0; mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); brq = &mq_rq->brq; @@ -1660,31 +1312,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) * A block was successfully transferred. */ mmc_blk_reset_success(md, type); - if (mq_rq->packed_cmd != MMC_PACKED_NONE) { - int idx = mq_rq->packed_fail_idx, i = 0; - while (!list_empty(&mq_rq->packed_list)) { - prq = list_entry_rq(mq_rq->packed_list.next); - if (idx == i) { - /* retry from error index */ - mq_rq->packed_num -= idx; - if (mq_rq->packed_num == 1) - mq_rq->packed_cmd = MMC_PACKED_NONE; - mq_rq->req = prq; - ret = 1; - break; - } - list_del_init(&prq->queuelist); - ret = blk_end_request(prq, - 0, blk_rq_bytes(prq)); - i++; - } - if (idx == -1) - mq_rq->packed_num = 0; - break; - } else { - ret = blk_end_request(req, 0, + ret = blk_end_request(req, 0, brq->data.bytes_xfered); - } /* * If the blk_end_request function returns non-zero even * though all data has been transferred and no errors @@ -1738,75 +1367,33 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) brq->data.blksz); if (!ret) goto start_new_req; - if (mq_rq->packed_cmd != MMC_PACKED_NONE) - break; break; case MMC_BLK_NOMEDIUM: goto cmd_abort; } if (ret) { - if (mq_rq->packed_cmd == MMC_PACKED_NONE) { - /* - * In case of a incomplete request - * prepare it again and resend. - */ - mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); - mmc_start_req(card->host, &mq_rq->mmc_active, NULL); - } else { - mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq, mq_rq->packed_num); - mmc_start_req(card->host, &mq_rq->mmc_active, NULL); - if (mq_rq->packed_cmd == MMC_PACKED_WR_HDR) { - if (mmc_blk_issue_packed_rd(mq, mq_rq)) - goto cmd_abort; - } - } - + /* + * In case of a incomplete request + * prepare it again and resend. + */ + mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); + mmc_start_req(card->host, &mq_rq->mmc_active, NULL); } } while (ret); if (brq->cmd.resp[0] & R1_URGENT_BKOPS) mmc_card_set_need_bkops(card); -snd_packed_rd: - if (mq->mqrq_cur->packed_cmd == MMC_PACKED_WR_HDR) { - if (mmc_blk_issue_packed_rd(mq, mq->mqrq_cur)) - goto start_new_req; - } - return 1; cmd_abort: - if (mq_rq->packed_cmd == MMC_PACKED_NONE) { - if (mmc_card_removed(card)) - req->cmd_flags |= REQ_QUIET; - while (ret) - ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); - } else { - while (!list_empty(&mq_rq->packed_list)) { - prq = list_entry_rq(mq_rq->packed_list.next); - list_del_init(&prq->queuelist); - blk_end_request(prq, -EIO, blk_rq_bytes(prq)); - } - } - + if (mmc_card_removed(card)) + req->cmd_flags |= REQ_QUIET; + while (ret) + ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); start_new_req: if (rqc) { - /* - * If current request is packed, it need to put back. - */ - if (mq->mqrq_cur->packed_cmd != MMC_PACKED_NONE) { - while (!list_empty(&mq->mqrq_cur->packed_list)) { - prq = list_entry_rq(mq->mqrq_cur->packed_list.prev); - if (prq->queuelist.prev != &mq->mqrq_cur->packed_list) { - list_del_init(&prq->queuelist); - blk_requeue_request(mq->queue, prq); - } else { - list_del_init(&prq->queuelist); - } - } - } - mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); } diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index d584d1f3ef8c..87186868ae6d 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -183,8 +183,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur)); memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev)); - INIT_LIST_HEAD(&mqrq_cur->packed_list); - INIT_LIST_HEAD(&mqrq_prev->packed_list); mq->mqrq_cur = mqrq_cur; mq->mqrq_prev = mqrq_prev; mq->queue->queuedata = mq; @@ -385,40 +383,6 @@ void mmc_queue_resume(struct mmc_queue *mq) } } -static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, - struct mmc_queue_req *mqrq, - struct scatterlist *sg) -{ - struct scatterlist *__sg; - unsigned int sg_len = 0; - struct request *req; - enum mmc_packed_cmd cmd; - - cmd = mqrq->packed_cmd; - - if (cmd == MMC_PACKED_WR_HDR || cmd == MMC_PACKED_WRITE) { - __sg = sg; - sg_set_buf(__sg, mqrq->packed_cmd_hdr, - sizeof(mqrq->packed_cmd_hdr)); - sg_len++; - if (cmd == MMC_PACKED_WR_HDR) { - sg_mark_end(__sg); - return sg_len; - } - __sg->page_link &= ~0x02; - } - - __sg = sg + sg_len; - list_for_each_entry(req, &mqrq->packed_list, queuelist) { - sg_len += blk_rq_map_sg(mq->queue, req, __sg); - __sg = sg + (sg_len - 1); - (__sg++)->page_link &= ~0x02; - } - sg_mark_end(sg + (sg_len - 1)); - return sg_len; -} - - /* * Prepare the sg list(s) to be handed of to the host driver */ @@ -429,20 +393,12 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) struct scatterlist *sg; int i; - if (!mqrq->bounce_buf) { - if (!list_empty(&mqrq->packed_list)) - return mmc_queue_packed_map_sg(mq, mqrq, mqrq->sg); - else - return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); - } - + if (!mqrq->bounce_buf) + return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); BUG_ON(!mqrq->bounce_sg); - if (!list_empty(&mqrq->packed_list)) - sg_len = mmc_queue_packed_map_sg(mq, mqrq, mqrq->bounce_sg); - else - sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); + sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); mqrq->bounce_sg_len = sg_len; diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 4424280c0394..d2a1eb4b9f9f 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -12,14 +12,6 @@ struct mmc_blk_request { struct mmc_data data; }; -enum mmc_packed_cmd { - MMC_PACKED_NONE = 0, - MMC_PACKED_WR_HDR, - MMC_PACKED_WRITE, - MMC_PACKED_READ, -}; - - struct mmc_queue_req { struct request *req; struct mmc_blk_request brq; @@ -28,12 +20,6 @@ struct mmc_queue_req { struct scatterlist *bounce_sg; unsigned int bounce_sg_len; struct mmc_async_req mmc_active; - struct list_head packed_list; - u32 packed_cmd_hdr[128]; - unsigned int packed_blocks; - enum mmc_packed_cmd packed_cmd; - int packed_fail_idx; - u8 packed_num; }; struct mmc_queue { diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index fb2fbb66a761..a12d8dfc9d24 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -475,11 +475,6 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) card->ext_csd.boot_ro_lockable = true; } - card->ext_csd.max_packed_writes = - ext_csd[EXT_CSD_MAX_PACKED_WRITES]; - card->ext_csd.max_packed_reads = - ext_csd[EXT_CSD_MAX_PACKED_READS]; - if (card->ext_csd.rev >= 5) { /* check whether the eMMC card supports HPI */ if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { @@ -1328,25 +1323,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, } } - if ((host->caps2 & MMC_CAP2_PACKED_CMD) && - (card->ext_csd.max_packed_writes > 0) && - (card->ext_csd.max_packed_reads > 0)) { - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_EXP_EVENTS_CTRL, - EXT_CSD_PACKED_EVENT_EN, - card->ext_csd.generic_cmd6_time); - if (err && err != -EBADMSG) - goto free_card; - if (err) { - pr_warning("%s: Enabling packed event failed\n", - mmc_hostname(card->host)); - card->ext_csd.packed_event_en = 0; - err = 0; - } else { - card->ext_csd.packed_event_en = 1; - } - } - if (!oldcard) host->card = card; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 7a899ccb04f1..ea9076608b8c 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -335,7 +335,6 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd, 512); } -EXPORT_SYMBOL_GPL(mmc_send_ext_csd); int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) { |