From 968a64ea638bbd48839b41981ff50197f3412676 Mon Sep 17 00:00:00 2001 From: Kyoungil Kim Date: Mon, 22 Oct 2012 20:01:00 +0900 Subject: mmc: sdio: Use multiple scatter/gather list Before this patch, we always used only single sg entry for SDIO transfer. This patch switches to using multiple sg entries. In the case of dwmci, it supports only up to 4KB size per single sg entry. So if we want to transfer more than 4KB, we should send more than 1 command. When we tested before applying this patch, it took around 335 us for 5K(5120) bytes transfer with dwmci controller. After applying this patch, it takes 242 us for 5K bytes. So this patch makes around 38% performance improvement for 5K bytes transfer. If the transfer size is bigger, then the performance improvement ratio will be increased. Signed-off-by: Kyoungil Kim Signed-off-by: Chris Ball --- drivers/mmc/core/sdio_io.c | 10 +++------- drivers/mmc/core/sdio_ops.c | 32 ++++++++++++++++++++++++++++---- 2 files changed, 31 insertions(+), 11 deletions(-) (limited to 'drivers/mmc') diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index 8f6f5ac131fc..78cb4d5d9d58 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -188,8 +188,7 @@ EXPORT_SYMBOL_GPL(sdio_set_block_size); */ static inline unsigned int sdio_max_byte_size(struct sdio_func *func) { - unsigned mval = min(func->card->host->max_seg_size, - func->card->host->max_blk_size); + unsigned mval = func->card->host->max_blk_size; if (mmc_blksz_for_byte_mode(func->card)) mval = min(mval, func->cur_blksize); @@ -311,11 +310,8 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, /* Do the bulk of the transfer using block mode (if supported). */ if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) { /* Blocks per command is limited by host count, host transfer - * size (we only use a single sg entry) and the maximum for - * IO_RW_EXTENDED of 511 blocks. */ - max_blocks = min(func->card->host->max_blk_count, - func->card->host->max_seg_size / func->cur_blksize); - max_blocks = min(max_blocks, 511u); + * size and the maximum for IO_RW_EXTENDED of 511 blocks. */ + max_blocks = min(func->card->host->max_blk_count, 511u); while (remainder >= func->cur_blksize) { unsigned blocks; diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c index d29e20630eed..62508b457c4f 100644 --- a/drivers/mmc/core/sdio_ops.c +++ b/drivers/mmc/core/sdio_ops.c @@ -124,7 +124,10 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; - struct scatterlist sg; + struct scatterlist sg, *sg_ptr; + struct sg_table sgtable; + unsigned int nents, left_size, i; + unsigned int seg_size = card->host->max_seg_size; BUG_ON(!card); BUG_ON(fn > 7); @@ -152,15 +155,36 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, /* Code in host drivers/fwk assumes that "blocks" always is >=1 */ data.blocks = blocks ? blocks : 1; data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; - sg_init_one(&sg, buf, data.blksz * data.blocks); + left_size = data.blksz * data.blocks; + nents = (left_size - 1) / seg_size + 1; + if (nents > 1) { + if (sg_alloc_table(&sgtable, nents, GFP_KERNEL)) + return -ENOMEM; + + data.sg = sgtable.sgl; + data.sg_len = nents; + + for_each_sg(data.sg, sg_ptr, data.sg_len, i) { + sg_set_page(sg_ptr, virt_to_page(buf + (i * seg_size)), + min(seg_size, left_size), + offset_in_page(buf + (i * seg_size))); + left_size = left_size - seg_size; + } + } else { + data.sg = &sg; + data.sg_len = 1; + + sg_init_one(&sg, buf, left_size); + } mmc_set_data_timeout(&data, card); mmc_wait_for_req(card->host, &mrq); + if (nents > 1) + sg_free_table(&sgtable); + if (cmd.error) return cmd.error; if (data.error) -- cgit v1.2.3