diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
-rw-r--r-- | drivers/scsi/scsi_lib.c | 238 |
1 files changed, 185 insertions, 53 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 207f1aa08869..aac8a02cbe80 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -17,6 +17,7 @@ #include <linux/pci.h> #include <linux/delay.h> #include <linux/hardirq.h> +#include <linux/scatterlist.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -33,35 +34,34 @@ #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) #define SG_MEMPOOL_SIZE 2 +/* + * The maximum number of SG segments that we will put inside a scatterlist + * (unless chaining is used). Should ideally fit inside a single page, to + * avoid a higher order allocation. + */ +#define SCSI_MAX_SG_SEGMENTS 128 + struct scsi_host_sg_pool { size_t size; - char *name; + char *name; struct kmem_cache *slab; mempool_t *pool; }; -#if (SCSI_MAX_PHYS_SEGMENTS < 32) -#error SCSI_MAX_PHYS_SEGMENTS is too small -#endif - -#define SP(x) { x, "sgpool-" #x } +#define SP(x) { x, "sgpool-" #x } static struct scsi_host_sg_pool scsi_sg_pools[] = { SP(8), SP(16), +#if (SCSI_MAX_SG_SEGMENTS > 16) SP(32), -#if (SCSI_MAX_PHYS_SEGMENTS > 32) +#if (SCSI_MAX_SG_SEGMENTS > 32) SP(64), -#if (SCSI_MAX_PHYS_SEGMENTS > 64) +#if (SCSI_MAX_SG_SEGMENTS > 64) SP(128), -#if (SCSI_MAX_PHYS_SEGMENTS > 128) - SP(256), -#if (SCSI_MAX_PHYS_SEGMENTS > 256) -#error SCSI_MAX_PHYS_SEGMENTS is too large -#endif #endif #endif #endif -}; +}; #undef SP static void scsi_run_queue(struct request_queue *q); @@ -289,14 +289,16 @@ static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, struct request_queue *q = rq->q; int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned int data_len = bufflen, len, bytes, off; + struct scatterlist *sg; struct page *page; struct bio *bio = NULL; int i, err, nr_vecs = 0; - for (i = 0; i < nsegs; i++) { - page = sgl[i].page; - off = sgl[i].offset; - len = sgl[i].length; + for_each_sg(sgl, sg, nsegs, i) { + page = sg->page; + off = sg->offset; + len = sg->length; + data_len += len; while (len > 0 && data_len > 0) { /* @@ -695,56 +697,170 @@ static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, return NULL; } -struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) -{ - struct scsi_host_sg_pool *sgp; - struct scatterlist *sgl; +/* + * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit + * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. + */ +#define SCSI_MAX_SG_CHAIN_SEGMENTS 2048 - BUG_ON(!cmd->use_sg); +static inline unsigned int scsi_sgtable_index(unsigned short nents) +{ + unsigned int index; - switch (cmd->use_sg) { + switch (nents) { case 1 ... 8: - cmd->sglist_len = 0; + index = 0; break; case 9 ... 16: - cmd->sglist_len = 1; + index = 1; break; +#if (SCSI_MAX_SG_SEGMENTS > 16) case 17 ... 32: - cmd->sglist_len = 2; + index = 2; break; -#if (SCSI_MAX_PHYS_SEGMENTS > 32) +#if (SCSI_MAX_SG_SEGMENTS > 32) case 33 ... 64: - cmd->sglist_len = 3; + index = 3; break; -#if (SCSI_MAX_PHYS_SEGMENTS > 64) +#if (SCSI_MAX_SG_SEGMENTS > 64) case 65 ... 128: - cmd->sglist_len = 4; - break; -#if (SCSI_MAX_PHYS_SEGMENTS > 128) - case 129 ... 256: - cmd->sglist_len = 5; + index = 4; break; #endif #endif #endif default: - return NULL; + printk(KERN_ERR "scsi: bad segment count=%d\n", nents); + BUG(); } - sgp = scsi_sg_pools + cmd->sglist_len; - sgl = mempool_alloc(sgp->pool, gfp_mask); - return sgl; + return index; +} + +struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) +{ + struct scsi_host_sg_pool *sgp; + struct scatterlist *sgl, *prev, *ret; + unsigned int index; + int this, left; + + BUG_ON(!cmd->use_sg); + + left = cmd->use_sg; + ret = prev = NULL; + do { + this = left; + if (this > SCSI_MAX_SG_SEGMENTS) { + this = SCSI_MAX_SG_SEGMENTS - 1; + index = SG_MEMPOOL_NR - 1; + } else + index = scsi_sgtable_index(this); + + left -= this; + + sgp = scsi_sg_pools + index; + + sgl = mempool_alloc(sgp->pool, gfp_mask); + if (unlikely(!sgl)) + goto enomem; + + memset(sgl, 0, sizeof(*sgl) * sgp->size); + + /* + * first loop through, set initial index and return value + */ + if (!ret) + ret = sgl; + + /* + * chain previous sglist, if any. we know the previous + * sglist must be the biggest one, or we would not have + * ended up doing another loop. + */ + if (prev) + sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl); + + /* + * don't allow subsequent mempool allocs to sleep, it would + * violate the mempool principle. + */ + gfp_mask &= ~__GFP_WAIT; + gfp_mask |= __GFP_HIGH; + prev = sgl; + } while (left); + + /* + * ->use_sg may get modified after dma mapping has potentially + * shrunk the number of segments, so keep a copy of it for free. + */ + cmd->__use_sg = cmd->use_sg; + return ret; +enomem: + if (ret) { + /* + * Free entries chained off ret. Since we were trying to + * allocate another sglist, we know that all entries are of + * the max size. + */ + sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; + prev = ret; + ret = &ret[SCSI_MAX_SG_SEGMENTS - 1]; + + while ((sgl = sg_chain_ptr(ret)) != NULL) { + ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1]; + mempool_free(sgl, sgp->pool); + } + + mempool_free(prev, sgp->pool); + } + return NULL; } EXPORT_SYMBOL(scsi_alloc_sgtable); -void scsi_free_sgtable(struct scatterlist *sgl, int index) +void scsi_free_sgtable(struct scsi_cmnd *cmd) { + struct scatterlist *sgl = cmd->request_buffer; struct scsi_host_sg_pool *sgp; - BUG_ON(index >= SG_MEMPOOL_NR); + /* + * if this is the biggest size sglist, check if we have + * chained parts we need to free + */ + if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) { + unsigned short this, left; + struct scatterlist *next; + unsigned int index; + + left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1); + next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]); + while (left && next) { + sgl = next; + this = left; + if (this > SCSI_MAX_SG_SEGMENTS) { + this = SCSI_MAX_SG_SEGMENTS - 1; + index = SG_MEMPOOL_NR - 1; + } else + index = scsi_sgtable_index(this); + + left -= this; + + sgp = scsi_sg_pools + index; + + if (left) + next = sg_chain_ptr(&sgl[sgp->size - 1]); + + mempool_free(sgl, sgp->pool); + } + + /* + * Restore original, will be freed below + */ + sgl = cmd->request_buffer; + sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; + } else + sgp = scsi_sg_pools + scsi_sgtable_index(cmd->__use_sg); - sgp = scsi_sg_pools + index; mempool_free(sgl, sgp->pool); } @@ -770,7 +886,7 @@ EXPORT_SYMBOL(scsi_free_sgtable); static void scsi_release_buffers(struct scsi_cmnd *cmd) { if (cmd->use_sg) - scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len); + scsi_free_sgtable(cmd); /* * Zero these out. They now point to freed memory, and it is @@ -984,7 +1100,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) static int scsi_init_io(struct scsi_cmnd *cmd) { struct request *req = cmd->request; - struct scatterlist *sgpnt; int count; /* @@ -997,14 +1112,13 @@ static int scsi_init_io(struct scsi_cmnd *cmd) /* * If sg table allocation fails, requeue request later. */ - sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC); - if (unlikely(!sgpnt)) { + cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC); + if (unlikely(!cmd->request_buffer)) { scsi_unprep_request(req); return BLKPREP_DEFER; } req->buffer = NULL; - cmd->request_buffer = (char *) sgpnt; if (blk_pc_request(req)) cmd->request_bufflen = req->data_len; else @@ -1529,8 +1643,25 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, if (!q) return NULL; + /* + * this limit is imposed by hardware restrictions + */ blk_queue_max_hw_segments(q, shost->sg_tablesize); - blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS); + + /* + * In the future, sg chaining support will be mandatory and this + * ifdef can then go away. Right now we don't have all archs + * converted, so better keep it safe. + */ +#ifdef ARCH_HAS_SG_CHAIN + if (shost->use_sg_chaining) + blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); + else + blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS); +#else + blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS); +#endif + blk_queue_max_sectors(q, shost->max_sectors); blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); blk_queue_segment_boundary(q, shost->dma_boundary); @@ -2193,18 +2324,19 @@ EXPORT_SYMBOL_GPL(scsi_target_unblock); * * Returns virtual address of the start of the mapped page */ -void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, +void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, size_t *offset, size_t *len) { int i; size_t sg_len = 0, len_complete = 0; + struct scatterlist *sg; struct page *page; WARN_ON(!irqs_disabled()); - for (i = 0; i < sg_count; i++) { + for_each_sg(sgl, sg, sg_count, i) { len_complete = sg_len; /* Complete sg-entries */ - sg_len += sg[i].length; + sg_len += sg->length; if (sg_len > *offset) break; } @@ -2218,10 +2350,10 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count, } /* Offset starting from the beginning of first page in this sg-entry */ - *offset = *offset - len_complete + sg[i].offset; + *offset = *offset - len_complete + sg->offset; /* Assumption: contiguous pages can be accessed as "page + i" */ - page = nth_page(sg[i].page, (*offset >> PAGE_SHIFT)); + page = nth_page(sg->page, (*offset >> PAGE_SHIFT)); *offset &= ~PAGE_MASK; /* Bytes in this sg-entry from *offset to the end of the page */ |