diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-11-15 09:13:11 +0100 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-01-28 10:05:27 +0100 |
commit | 5ed7959ede0936c55e50421a53f153b17080e876 (patch) | |
tree | 52fae9cd4959b7855e37c5dc4d0ce7eb22656cdb /drivers/scsi/scsi_lib.c | |
parent | 0db9299f48ebd4a860d6ad4e1d36ac50671d48e7 (diff) |
SG: Convert SCSI to use scatterlist helpers for sg chaining
Also change scsi_alloc_sgtable() to just return 0/failure, since it
maps to the command passed in. ->request_buffer is now no longer needed,
once drivers are adapted to use scsi_sglist() it can be killed.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 138 |
1 files changed, 20 insertions, 118 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 4cf902efbdbf..3b5121c4c081 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -739,138 +739,41 @@ static inline unsigned int scsi_sgtable_index(unsigned short nents) return index; } -struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) +static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents) { struct scsi_host_sg_pool *sgp; - struct scatterlist *sgl, *prev, *ret; - unsigned int index; - int this, left; - - BUG_ON(!cmd->use_sg); - - left = cmd->use_sg; - ret = prev = NULL; - do { - this = left; - if (this > SCSI_MAX_SG_SEGMENTS) { - this = SCSI_MAX_SG_SEGMENTS - 1; - index = SG_MEMPOOL_NR - 1; - } else - index = scsi_sgtable_index(this); - left -= this; - - sgp = scsi_sg_pools + index; + sgp = scsi_sg_pools + scsi_sgtable_index(nents); + mempool_free(sgl, sgp->pool); +} - sgl = mempool_alloc(sgp->pool, gfp_mask); - if (unlikely(!sgl)) - goto enomem; +static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask) +{ + struct scsi_host_sg_pool *sgp; - sg_init_table(sgl, sgp->size); + sgp = scsi_sg_pools + scsi_sgtable_index(nents); + return mempool_alloc(sgp->pool, gfp_mask); +} - /* - * first loop through, set initial index and return value - */ - if (!ret) - ret = sgl; +int scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) +{ + int ret; - /* - * chain previous sglist, if any. we know the previous - * sglist must be the biggest one, or we would not have - * ended up doing another loop. - */ - if (prev) - sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl); + BUG_ON(!cmd->use_sg); - /* - * if we have nothing left, mark the last segment as - * end-of-list - */ - if (!left) - sg_mark_end(&sgl[this - 1]); + ret = __sg_alloc_table(&cmd->sg_table, cmd->use_sg, gfp_mask, scsi_sg_alloc); + if (unlikely(ret)) + __sg_free_table(&cmd->sg_table, scsi_sg_free); - /* - * don't allow subsequent mempool allocs to sleep, it would - * violate the mempool principle. - */ - gfp_mask &= ~__GFP_WAIT; - gfp_mask |= __GFP_HIGH; - prev = sgl; - } while (left); - - /* - * ->use_sg may get modified after dma mapping has potentially - * shrunk the number of segments, so keep a copy of it for free. - */ - cmd->__use_sg = cmd->use_sg; + cmd->request_buffer = cmd->sg_table.sgl; return ret; -enomem: - if (ret) { - /* - * Free entries chained off ret. Since we were trying to - * allocate another sglist, we know that all entries are of - * the max size. - */ - sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; - prev = ret; - ret = &ret[SCSI_MAX_SG_SEGMENTS - 1]; - - while ((sgl = sg_chain_ptr(ret)) != NULL) { - ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1]; - mempool_free(sgl, sgp->pool); - } - - mempool_free(prev, sgp->pool); - } - return NULL; } EXPORT_SYMBOL(scsi_alloc_sgtable); void scsi_free_sgtable(struct scsi_cmnd *cmd) { - struct scatterlist *sgl = cmd->request_buffer; - struct scsi_host_sg_pool *sgp; - - /* - * if this is the biggest size sglist, check if we have - * chained parts we need to free - */ - if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) { - unsigned short this, left; - struct scatterlist *next; - unsigned int index; - - left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1); - next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]); - while (left && next) { - sgl = next; - this = left; - if (this > SCSI_MAX_SG_SEGMENTS) { - this = SCSI_MAX_SG_SEGMENTS - 1; - index = SG_MEMPOOL_NR - 1; - } else - index = scsi_sgtable_index(this); - - left -= this; - - sgp = scsi_sg_pools + index; - - if (left) - next = sg_chain_ptr(&sgl[sgp->size - 1]); - - mempool_free(sgl, sgp->pool); - } - - /* - * Restore original, will be freed below - */ - sgl = cmd->request_buffer; - sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; - } else - sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg); - - mempool_free(sgl, sgp->pool); + __sg_free_table(&cmd->sg_table, scsi_sg_free); } EXPORT_SYMBOL(scsi_free_sgtable); @@ -1120,8 +1023,7 @@ static int scsi_init_io(struct scsi_cmnd *cmd) /* * If sg table allocation fails, requeue request later. */ - cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC); - if (unlikely(!cmd->request_buffer)) { + if (unlikely(scsi_alloc_sgtable(cmd, GFP_ATOMIC))) { scsi_unprep_request(req); return BLKPREP_DEFER; } |