diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-11-15 09:13:11 +0100 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-01-28 10:05:27 +0100 |
commit | 5ed7959ede0936c55e50421a53f153b17080e876 (patch) | |
tree | 52fae9cd4959b7855e37c5dc4d0ce7eb22656cdb | |
parent | 0db9299f48ebd4a860d6ad4e1d36ac50671d48e7 (diff) |
SG: Convert SCSI to use scatterlist helpers for sg chaining
Also change scsi_alloc_sgtable() to just return 0/failure, since it
maps to the command passed in. ->request_buffer is now no longer needed,
once drivers are adapted to use scsi_sglist() it can be killed.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | drivers/scsi/scsi_lib.c | 138 | ||||
-rw-r--r-- | drivers/scsi/scsi_tgt_lib.c | 3 | ||||
-rw-r--r-- | include/scsi/scsi_cmnd.h | 7 |
3 files changed, 24 insertions, 124 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 4cf902efbdbf..3b5121c4c081 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -739,138 +739,41 @@ static inline unsigned int scsi_sgtable_index(unsigned short nents) return index; } -struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) +static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) { struct scsi_host_sg_pool *sgp; - struct scatterlist *sgl, *prev, *ret; - unsigned int index; - int this, left; - - BUG_ON(!cmd->use_sg); - - left = cmd->use_sg; - ret = prev = NULL; - do { - this = left; - if (this > SCSI_MAX_SG_SEGMENTS) { - this = SCSI_MAX_SG_SEGMENTS - 1; - index = SG_MEMPOOL_NR - 1; - } else - index = scsi_sgtable_index(this); - left -= this; - - sgp = scsi_sg_pools + index; + sgp = scsi_sg_pools + scsi_sgtable_index(nents); + mempool_free(sgl, sgp->pool); +} - sgl = mempool_alloc(sgp->pool, gfp_mask); - if (unlikely(!sgl)) - goto enomem; +static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) +{ + struct scsi_host_sg_pool *sgp; - sg_init_table(sgl, sgp->size); + sgp = scsi_sg_pools + scsi_sgtable_index(nents); + return mempool_alloc(sgp->pool, gfp_mask); +} - /* - * first loop through, set initial index and return value - */ - if (!ret) - ret = sgl; +int scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) +{ + int ret; - /* - * chain previous sglist, if any. we know the previous - * sglist must be the biggest one, or we would not have - * ended up doing another loop. - */ - if (prev) - sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl); + BUG_ON(!cmd->use_sg); - /* - * if we have nothing left, mark the last segment as - * end-of-list - */ - if (!left) - sg_mark_end(&sgl[this - 1]); + ret = __sg_alloc_table(&cmd->sg_table, cmd->use_sg, gfp_mask, scsi_sg_alloc); + if (unlikely(ret)) + __sg_free_table(&cmd->sg_table, scsi_sg_free); - /* - * don't allow subsequent mempool allocs to sleep, it would - * violate the mempool principle. - */ - gfp_mask &= ~__GFP_WAIT; - gfp_mask |= __GFP_HIGH; - prev = sgl; - } while (left); - - /* - * ->use_sg may get modified after dma mapping has potentially - * shrunk the number of segments, so keep a copy of it for free. - */ - cmd->__use_sg = cmd->use_sg; + cmd->request_buffer = cmd->sg_table.sgl; return ret; -enomem: - if (ret) { - /* - * Free entries chained off ret. Since we were trying to - * allocate another sglist, we know that all entries are of - * the max size. - */ - sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; - prev = ret; - ret = &ret[SCSI_MAX_SG_SEGMENTS - 1]; - - while ((sgl = sg_chain_ptr(ret)) != NULL) { - ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1]; - mempool_free(sgl, sgp->pool); - } - - mempool_free(prev, sgp->pool); - } - return NULL; } EXPORT_SYMBOL(scsi_alloc_sgtable); void scsi_free_sgtable(struct scsi_cmnd *cmd) { - struct scatterlist *sgl = cmd->request_buffer; - struct scsi_host_sg_pool *sgp; - - /* - * if this is the biggest size sglist, check if we have - * chained parts we need to free - */ - if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) { - unsigned short this, left; - struct scatterlist *next; - unsigned int index; - - left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1); - next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]); - while (left && next) { - sgl = next; - this = left; - if (this > SCSI_MAX_SG_SEGMENTS) { - this = SCSI_MAX_SG_SEGMENTS - 1; - index = SG_MEMPOOL_NR - 1; - } else - index = scsi_sgtable_index(this); - - left -= this; - - sgp = scsi_sg_pools + index; - - if (left) - next = sg_chain_ptr(&sgl[sgp->size - 1]); - - mempool_free(sgl, sgp->pool); - } - - /* - * Restore original, will be freed below - */ - sgl = cmd->request_buffer; - sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; - } else - sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg); - - mempool_free(sgl, sgp->pool); + __sg_free_table(&cmd->sg_table, scsi_sg_free); } EXPORT_SYMBOL(scsi_free_sgtable); @@ -1120,8 +1023,7 @@ static int scsi_init_io(struct scsi_cmnd *cmd) /* * If sg table allocation fails, requeue request later. */ - cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC); - if (unlikely(!cmd->request_buffer)) { + if (unlikely(scsi_alloc_sgtable(cmd, GFP_ATOMIC))) { scsi_unprep_request(req); return BLKPREP_DEFER; } diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index 93ece8f4e5de..01e03f3f6ffa 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c @@ -359,8 +359,7 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask) int count; cmd->use_sg = rq->nr_phys_segments; - cmd->request_buffer = scsi_alloc_sgtable(cmd, gfp_mask); - if (!cmd->request_buffer) + if (scsi_alloc_sgtable(cmd, gfp_mask)) return -ENOMEM; cmd->request_bufflen = rq->data_len; diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index abd7479ff452..a457fca66f61 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -8,7 +8,6 @@ #include <linux/scatterlist.h> struct request; -struct scatterlist; struct Scsi_Host; struct scsi_device; @@ -68,8 +67,8 @@ struct scsi_cmnd { void *request_buffer; /* Actual requested buffer */ /* These elements define the operation we ultimately want to perform */ + struct sg_table sg_table; unsigned short use_sg; /* Number of pieces of scatter-gather */ - unsigned short __use_sg; unsigned underflow; /* Return error if less than this amount is transferred */ @@ -128,14 +127,14 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, size_t *offset, size_t *len); extern void scsi_kunmap_atomic_sg(void *virt); -extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t); +extern int scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t); extern void scsi_free_sgtable(struct scsi_cmnd *); extern int scsi_dma_map(struct scsi_cmnd *cmd); extern void scsi_dma_unmap(struct scsi_cmnd *cmd); #define scsi_sg_count(cmd) ((cmd)->use_sg) -#define scsi_sglist(cmd) ((struct scatterlist *)(cmd)->request_buffer) +#define scsi_sglist(cmd) ((cmd)->sg_table.sgl) #define scsi_bufflen(cmd) ((cmd)->request_bufflen) static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid) |