diff options
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 215 |
1 files changed, 112 insertions, 103 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 66056ca5e631..8c2caff87cc3 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -40,7 +40,7 @@ static void blk_unplug_work(struct work_struct *work); static void blk_unplug_timeout(unsigned long data); static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); static void init_request_from_bio(struct request *req, struct bio *bio); -static int __make_request(request_queue_t *q, struct bio *bio); +static int __make_request(struct request_queue *q, struct bio *bio); static struct io_context *current_io_context(gfp_t gfp_flags, int node); /* @@ -121,7 +121,7 @@ static void blk_queue_congestion_threshold(struct request_queue *q) struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) { struct backing_dev_info *ret = NULL; - request_queue_t *q = bdev_get_queue(bdev); + struct request_queue *q = bdev_get_queue(bdev); if (q) ret = &q->backing_dev_info; @@ -140,7 +140,7 @@ EXPORT_SYMBOL(blk_get_backing_dev_info); * cdb from the request data for instance. * */ -void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn) +void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) { q->prep_rq_fn = pfn; } @@ -163,14 +163,14 @@ EXPORT_SYMBOL(blk_queue_prep_rq); * no merge_bvec_fn is defined for a queue, and only the fixed limits are * honored. */ -void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn) +void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) { q->merge_bvec_fn = mbfn; } EXPORT_SYMBOL(blk_queue_merge_bvec); -void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn) +void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) { q->softirq_done_fn = fn; } @@ -199,7 +199,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done); * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling * blk_queue_bounce() to create a buffer in normal memory. **/ -void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) +void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) { /* * set defaults @@ -235,7 +235,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) EXPORT_SYMBOL(blk_queue_make_request); -static void rq_init(request_queue_t *q, struct request *rq) +static void rq_init(struct request_queue *q, struct request *rq) { INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->donelist); @@ -272,7 +272,7 @@ static void rq_init(request_queue_t *q, struct request *rq) * feature should call this function and indicate so. * **/ -int blk_queue_ordered(request_queue_t *q, unsigned ordered, +int blk_queue_ordered(struct request_queue *q, unsigned ordered, prepare_flush_fn *prepare_flush_fn) { if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && @@ -311,7 +311,7 @@ EXPORT_SYMBOL(blk_queue_ordered); * to the block layer by defining it through this call. * **/ -void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff) +void blk_queue_issue_flush_fn(struct request_queue *q, issue_flush_fn *iff) { q->issue_flush_fn = iff; } @@ -321,7 +321,7 @@ EXPORT_SYMBOL(blk_queue_issue_flush_fn); /* * Cache flushing for ordered writes handling */ -inline unsigned blk_ordered_cur_seq(request_queue_t *q) +inline unsigned blk_ordered_cur_seq(struct request_queue *q) { if (!q->ordseq) return 0; @@ -330,7 +330,7 @@ inline unsigned blk_ordered_cur_seq(request_queue_t *q) unsigned blk_ordered_req_seq(struct request *rq) { - request_queue_t *q = rq->q; + struct request_queue *q = rq->q; BUG_ON(q->ordseq == 0); @@ -357,7 +357,7 @@ unsigned blk_ordered_req_seq(struct request *rq) return QUEUE_ORDSEQ_DONE; } -void blk_ordered_complete_seq(request_queue_t *q, unsigned seq, int error) +void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) { struct request *rq; int uptodate; @@ -401,7 +401,7 @@ static void post_flush_end_io(struct request *rq, int error) blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); } -static void queue_flush(request_queue_t *q, unsigned which) +static void queue_flush(struct request_queue *q, unsigned which) { struct request *rq; rq_end_io_fn *end_io; @@ -425,7 +425,7 @@ static void queue_flush(request_queue_t *q, unsigned which) elv_insert(q, rq, ELEVATOR_INSERT_FRONT); } -static inline struct request *start_ordered(request_queue_t *q, +static inline struct request *start_ordered(struct request_queue *q, struct request *rq) { q->bi_size = 0; @@ -476,7 +476,7 @@ static inline struct request *start_ordered(request_queue_t *q, return rq; } -int blk_do_ordered(request_queue_t *q, struct request **rqp) +int blk_do_ordered(struct request_queue *q, struct request **rqp) { struct request *rq = *rqp; int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); @@ -527,7 +527,7 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp) static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) { - request_queue_t *q = bio->bi_private; + struct request_queue *q = bio->bi_private; /* * This is dry run, restore bio_sector and size. We'll finish @@ -551,7 +551,7 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) static int ordered_bio_endio(struct request *rq, struct bio *bio, unsigned int nbytes, int error) { - request_queue_t *q = rq->q; + struct request_queue *q = rq->q; bio_end_io_t *endio; void *private; @@ -588,7 +588,7 @@ static int ordered_bio_endio(struct request *rq, struct bio *bio, * blk_queue_bounce_limit to have lower memory pages allocated as bounce * buffers for doing I/O to pages residing above @page. **/ -void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) +void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) { unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; int dma = 0; @@ -624,7 +624,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); * Enables a low level driver to set an upper limit on the size of * received requests. **/ -void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors) +void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) { if ((max_sectors << 9) < PAGE_CACHE_SIZE) { max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); @@ -651,7 +651,8 @@ EXPORT_SYMBOL(blk_queue_max_sectors); * physical data segments in a request. This would be the largest sized * scatter list the driver could handle. **/ -void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments) +void blk_queue_max_phys_segments(struct request_queue *q, + unsigned short max_segments) { if (!max_segments) { max_segments = 1; @@ -674,7 +675,8 @@ EXPORT_SYMBOL(blk_queue_max_phys_segments); * address/length pairs the host adapter can actually give as once * to the device. **/ -void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments) +void blk_queue_max_hw_segments(struct request_queue *q, + unsigned short max_segments) { if (!max_segments) { max_segments = 1; @@ -695,7 +697,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_segments); * Enables a low level driver to set an upper limit on the size of a * coalesced segment **/ -void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size) +void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) { if (max_size < PAGE_CACHE_SIZE) { max_size = PAGE_CACHE_SIZE; @@ -718,7 +720,7 @@ EXPORT_SYMBOL(blk_queue_max_segment_size); * even internal read-modify-write operations). Usually the default * of 512 covers most hardware. **/ -void blk_queue_hardsect_size(request_queue_t *q, unsigned short size) +void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) { q->hardsect_size = size; } @@ -735,7 +737,7 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); * @t: the stacking driver (top) * @b: the underlying device (bottom) **/ -void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) +void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) { /* zero is "infinity" */ t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); @@ -756,7 +758,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits); * @q: the request queue for the device * @mask: the memory boundary mask **/ -void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask) +void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) { if (mask < PAGE_CACHE_SIZE - 1) { mask = PAGE_CACHE_SIZE - 1; @@ -778,7 +780,7 @@ EXPORT_SYMBOL(blk_queue_segment_boundary); * this is used when buiding direct io requests for the queue. * **/ -void blk_queue_dma_alignment(request_queue_t *q, int mask) +void blk_queue_dma_alignment(struct request_queue *q, int mask) { q->dma_alignment = mask; } @@ -796,7 +798,7 @@ EXPORT_SYMBOL(blk_queue_dma_alignment); * * no locks need be held. **/ -struct request *blk_queue_find_tag(request_queue_t *q, int tag) +struct request *blk_queue_find_tag(struct request_queue *q, int tag) { return blk_map_queue_find_tag(q->queue_tags, tag); } @@ -840,7 +842,7 @@ static int __blk_free_tags(struct blk_queue_tag *bqt) * blk_cleanup_queue() will take care of calling this function, if tagging * has been used. So there's no need to call this directly. **/ -static void __blk_queue_free_tags(request_queue_t *q) +static void __blk_queue_free_tags(struct request_queue *q) { struct blk_queue_tag *bqt = q->queue_tags; @@ -877,7 +879,7 @@ EXPORT_SYMBOL(blk_free_tags); * This is used to disabled tagged queuing to a device, yet leave * queue in function. **/ -void blk_queue_free_tags(request_queue_t *q) +void blk_queue_free_tags(struct request_queue *q) { clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); } @@ -885,7 +887,7 @@ void blk_queue_free_tags(request_queue_t *q) EXPORT_SYMBOL(blk_queue_free_tags); static int -init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) +init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) { struct request **tag_index; unsigned long *tag_map; @@ -955,7 +957,7 @@ EXPORT_SYMBOL(blk_init_tags); * @depth: the maximum queue depth supported * @tags: the tag to use **/ -int blk_queue_init_tags(request_queue_t *q, int depth, +int blk_queue_init_tags(struct request_queue *q, int depth, struct blk_queue_tag *tags) { int rc; @@ -996,7 +998,7 @@ EXPORT_SYMBOL(blk_queue_init_tags); * Notes: * Must be called with the queue lock held. **/ -int blk_queue_resize_tags(request_queue_t *q, int new_depth) +int blk_queue_resize_tags(struct request_queue *q, int new_depth) { struct blk_queue_tag *bqt = q->queue_tags; struct request **tag_index; @@ -1059,7 +1061,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags); * Notes: * queue lock must be held. **/ -void blk_queue_end_tag(request_queue_t *q, struct request *rq) +void blk_queue_end_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; int tag = rq->tag; @@ -1111,7 +1113,7 @@ EXPORT_SYMBOL(blk_queue_end_tag); * Notes: * queue lock must be held. **/ -int blk_queue_start_tag(request_queue_t *q, struct request *rq) +int blk_queue_start_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; int tag; @@ -1158,7 +1160,7 @@ EXPORT_SYMBOL(blk_queue_start_tag); * Notes: * queue lock must be held. **/ -void blk_queue_invalidate_tags(request_queue_t *q) +void blk_queue_invalidate_tags(struct request_queue *q) { struct blk_queue_tag *bqt = q->queue_tags; struct list_head *tmp, *n; @@ -1205,7 +1207,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg) EXPORT_SYMBOL(blk_dump_rq_flags); -void blk_recount_segments(request_queue_t *q, struct bio *bio) +void blk_recount_segments(struct request_queue *q, struct bio *bio) { struct bio_vec *bv, *bvprv = NULL; int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster; @@ -1267,7 +1269,7 @@ new_hw_segment: } EXPORT_SYMBOL(blk_recount_segments); -static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, +static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, struct bio *nxt) { if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) @@ -1288,7 +1290,7 @@ static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, return 0; } -static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, +static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, struct bio *nxt) { if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) @@ -1308,7 +1310,8 @@ static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, * map a request to scatterlist, return number of sg entries setup. Caller * must make sure sg can hold rq->nr_phys_segments entries */ -int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg) +int blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sg) { struct bio_vec *bvec, *bvprv; struct bio *bio; @@ -1361,7 +1364,7 @@ EXPORT_SYMBOL(blk_rq_map_sg); * specific ones if so desired */ -static inline int ll_new_mergeable(request_queue_t *q, +static inline int ll_new_mergeable(struct request_queue *q, struct request *req, struct bio *bio) { @@ -1382,7 +1385,7 @@ static inline int ll_new_mergeable(request_queue_t *q, return 1; } -static inline int ll_new_hw_segment(request_queue_t *q, +static inline int ll_new_hw_segment(struct request_queue *q, struct request *req, struct bio *bio) { @@ -1406,7 +1409,7 @@ static inline int ll_new_hw_segment(request_queue_t *q, return 1; } -int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) +int ll_back_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) { unsigned short max_sectors; int len; @@ -1444,7 +1447,7 @@ int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) } EXPORT_SYMBOL(ll_back_merge_fn); -static int ll_front_merge_fn(request_queue_t *q, struct request *req, +static int ll_front_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) { unsigned short max_sectors; @@ -1483,7 +1486,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req, return ll_new_hw_segment(q, req, bio); } -static int ll_merge_requests_fn(request_queue_t *q, struct request *req, +static int ll_merge_requests_fn(struct request_queue *q, struct request *req, struct request *next) { int total_phys_segments; @@ -1539,7 +1542,7 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req, * This is called with interrupts off and no requests on the queue and * with the queue lock held. */ -void blk_plug_device(request_queue_t *q) +void blk_plug_device(struct request_queue *q) { WARN_ON(!irqs_disabled()); @@ -1562,7 +1565,7 @@ EXPORT_SYMBOL(blk_plug_device); * remove the queue from the plugged list, if present. called with * queue lock held and interrupts disabled. */ -int blk_remove_plug(request_queue_t *q) +int blk_remove_plug(struct request_queue *q) { WARN_ON(!irqs_disabled()); @@ -1578,7 +1581,7 @@ EXPORT_SYMBOL(blk_remove_plug); /* * remove the plug and let it rip.. */ -void __generic_unplug_device(request_queue_t *q) +void __generic_unplug_device(struct request_queue *q) { if (unlikely(blk_queue_stopped(q))) return; @@ -1592,7 +1595,7 @@ EXPORT_SYMBOL(__generic_unplug_device); /** * generic_unplug_device - fire a request queue - * @q: The &request_queue_t in question + * @q: The &struct request_queue in question * * Description: * Linux uses plugging to build bigger requests queues before letting @@ -1601,7 +1604,7 @@ EXPORT_SYMBOL(__generic_unplug_device); * gets unplugged, the request_fn defined for the queue is invoked and * transfers started. **/ -void generic_unplug_device(request_queue_t *q) +void generic_unplug_device(struct request_queue *q) { spin_lock_irq(q->queue_lock); __generic_unplug_device(q); @@ -1612,7 +1615,7 @@ EXPORT_SYMBOL(generic_unplug_device); static void blk_backing_dev_unplug(struct backing_dev_info *bdi, struct page *page) { - request_queue_t *q = bdi->unplug_io_data; + struct request_queue *q = bdi->unplug_io_data; /* * devices don't necessarily have an ->unplug_fn defined @@ -1627,7 +1630,8 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, static void blk_unplug_work(struct work_struct *work) { - request_queue_t *q = container_of(work, request_queue_t, unplug_work); + struct request_queue *q = + container_of(work, struct request_queue, unplug_work); blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, q->rq.count[READ] + q->rq.count[WRITE]); @@ -1637,7 +1641,7 @@ static void blk_unplug_work(struct work_struct *work) static void blk_unplug_timeout(unsigned long data) { - request_queue_t *q = (request_queue_t *)data; + struct request_queue *q = (struct request_queue *)data; blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, q->rq.count[READ] + q->rq.count[WRITE]); @@ -1647,14 +1651,14 @@ static void blk_unplug_timeout(unsigned long data) /** * blk_start_queue - restart a previously stopped queue - * @q: The &request_queue_t in question + * @q: The &struct request_queue in question * * Description: * blk_start_queue() will clear the stop flag on the queue, and call * the request_fn for the queue if it was in a stopped state when * entered. Also see blk_stop_queue(). Queue lock must be held. **/ -void blk_start_queue(request_queue_t *q) +void blk_start_queue(struct request_queue *q) { WARN_ON(!irqs_disabled()); @@ -1677,7 +1681,7 @@ EXPORT_SYMBOL(blk_start_queue); /** * blk_stop_queue - stop a queue - * @q: The &request_queue_t in question + * @q: The &struct request_queue in question * * Description: * The Linux block layer assumes that a block driver will consume all @@ -1689,7 +1693,7 @@ EXPORT_SYMBOL(blk_start_queue); * the driver has signalled it's ready to go again. This happens by calling * blk_start_queue() to restart queue operations. Queue lock must be held. **/ -void blk_stop_queue(request_queue_t *q) +void blk_stop_queue(struct request_queue *q) { blk_remove_plug(q); set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); @@ -1746,7 +1750,7 @@ void blk_run_queue(struct request_queue *q) EXPORT_SYMBOL(blk_run_queue); /** - * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed + * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed * @kobj: the kobj belonging of the request queue to be released * * Description: @@ -1762,7 +1766,8 @@ EXPORT_SYMBOL(blk_run_queue); **/ static void blk_release_queue(struct kobject *kobj) { - request_queue_t *q = container_of(kobj, struct request_queue, kobj); + struct request_queue *q = + container_of(kobj, struct request_queue, kobj); struct request_list *rl = &q->rq; blk_sync_queue(q); @@ -1778,13 +1783,13 @@ static void blk_release_queue(struct kobject *kobj) kmem_cache_free(requestq_cachep, q); } -void blk_put_queue(request_queue_t *q) +void blk_put_queue(struct request_queue *q) { kobject_put(&q->kobj); } EXPORT_SYMBOL(blk_put_queue); -void blk_cleanup_queue(request_queue_t * q) +void blk_cleanup_queue(struct request_queue * q) { mutex_lock(&q->sysfs_lock); set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); @@ -1798,7 +1803,7 @@ void blk_cleanup_queue(request_queue_t * q) EXPORT_SYMBOL(blk_cleanup_queue); -static int blk_init_free_list(request_queue_t *q) +static int blk_init_free_list(struct request_queue *q) { struct request_list *rl = &q->rq; @@ -1817,7 +1822,7 @@ static int blk_init_free_list(request_queue_t *q) return 0; } -request_queue_t *blk_alloc_queue(gfp_t gfp_mask) +struct request_queue *blk_alloc_queue(gfp_t gfp_mask) { return blk_alloc_queue_node(gfp_mask, -1); } @@ -1825,9 +1830,9 @@ EXPORT_SYMBOL(blk_alloc_queue); static struct kobj_type queue_ktype; -request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) +struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) { - request_queue_t *q; + struct request_queue *q; q = kmem_cache_alloc_node(requestq_cachep, gfp_mask | __GFP_ZERO, node_id); @@ -1882,16 +1887,16 @@ EXPORT_SYMBOL(blk_alloc_queue_node); * when the block device is deactivated (such as at module unload). **/ -request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) +struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) { return blk_init_queue_node(rfn, lock, -1); } EXPORT_SYMBOL(blk_init_queue); -request_queue_t * +struct request_queue * blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) { - request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id); + struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id); if (!q) return NULL; @@ -1940,7 +1945,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) } EXPORT_SYMBOL(blk_init_queue_node); -int blk_get_queue(request_queue_t *q) +int blk_get_queue(struct request_queue *q) { if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { kobject_get(&q->kobj); @@ -1952,7 +1957,7 @@ int blk_get_queue(request_queue_t *q) EXPORT_SYMBOL(blk_get_queue); -static inline void blk_free_request(request_queue_t *q, struct request *rq) +static inline void blk_free_request(struct request_queue *q, struct request *rq) { if (rq->cmd_flags & REQ_ELVPRIV) elv_put_request(q, rq); @@ -1960,7 +1965,7 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq) } static struct request * -blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask) +blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask) { struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); @@ -1988,7 +1993,7 @@ blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask) * ioc_batching returns true if the ioc is a valid batching request and * should be given priority access to a request. */ -static inline int ioc_batching(request_queue_t *q, struct io_context *ioc) +static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) { if (!ioc) return 0; @@ -2009,7 +2014,7 @@ static inline int ioc_batching(request_queue_t *q, struct io_context *ioc) * is the behaviour we want though - once it gets a wakeup it should be given * a nice run. */ -static void ioc_set_batching(request_queue_t *q, struct io_context *ioc) +static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) { if (!ioc || ioc_batching(q, ioc)) return; @@ -2018,7 +2023,7 @@ static void ioc_set_batching(request_queue_t *q, struct io_context *ioc) ioc->last_waited = jiffies; } -static void __freed_request(request_queue_t *q, int rw) +static void __freed_request(struct request_queue *q, int rw) { struct request_list *rl = &q->rq; @@ -2037,7 +2042,7 @@ static void __freed_request(request_queue_t *q, int rw) * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. */ -static void freed_request(request_queue_t *q, int rw, int priv) +static void freed_request(struct request_queue *q, int rw, int priv) { struct request_list *rl = &q->rq; @@ -2057,7 +2062,7 @@ static void freed_request(request_queue_t *q, int rw, int priv) * Returns NULL on failure, with queue_lock held. * Returns !NULL on success, with queue_lock *not held*. */ -static struct request *get_request(request_queue_t *q, int rw_flags, +static struct request *get_request(struct request_queue *q, int rw_flags, struct bio *bio, gfp_t gfp_mask) { struct request *rq = NULL; @@ -2162,7 +2167,7 @@ out: * * Called with q->queue_lock held, and returns with it unlocked. */ -static struct request *get_request_wait(request_queue_t *q, int rw_flags, +static struct request *get_request_wait(struct request_queue *q, int rw_flags, struct bio *bio) { const int rw = rw_flags & 0x01; @@ -2204,7 +2209,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw_flags, return rq; } -struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask) +struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) { struct request *rq; @@ -2234,7 +2239,7 @@ EXPORT_SYMBOL(blk_get_request); * * The queue lock must be held with interrupts disabled. */ -void blk_start_queueing(request_queue_t *q) +void blk_start_queueing(struct request_queue *q) { if (!blk_queue_plugged(q)) q->request_fn(q); @@ -2253,7 +2258,7 @@ EXPORT_SYMBOL(blk_start_queueing); * more, when that condition happens we need to put the request back * on the queue. Must be called with queue lock held. */ -void blk_requeue_request(request_queue_t *q, struct request *rq) +void blk_requeue_request(struct request_queue *q, struct request *rq) { blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); @@ -2284,7 +2289,7 @@ EXPORT_SYMBOL(blk_requeue_request); * of the queue for things like a QUEUE_FULL message from a device, or a * host that is unable to accept a particular command. */ -void blk_insert_request(request_queue_t *q, struct request *rq, +void blk_insert_request(struct request_queue *q, struct request *rq, int at_head, void *data) { int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; @@ -2330,7 +2335,7 @@ static int __blk_rq_unmap_user(struct bio *bio) return ret; } -static int __blk_rq_map_user(request_queue_t *q, struct request *rq, +static int __blk_rq_map_user(struct request_queue *q, struct request *rq, void __user *ubuf, unsigned int len) { unsigned long uaddr; @@ -2403,8 +2408,8 @@ unmap_bio: * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ -int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, - unsigned long len) +int blk_rq_map_user(struct request_queue *q, struct request *rq, + void __user *ubuf, unsigned long len) { unsigned long bytes_read = 0; struct bio *bio = NULL; @@ -2470,7 +2475,7 @@ EXPORT_SYMBOL(blk_rq_map_user); * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ -int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, +int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct sg_iovec *iov, int iov_count, unsigned int len) { struct bio *bio; @@ -2540,7 +2545,7 @@ EXPORT_SYMBOL(blk_rq_unmap_user); * @len: length of user data * @gfp_mask: memory allocation flags */ -int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, +int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, unsigned int len, gfp_t gfp_mask) { struct bio *bio; @@ -2577,7 +2582,7 @@ EXPORT_SYMBOL(blk_rq_map_kern); * Insert a fully prepared request at the back of the io scheduler queue * for execution. Don't wait for completion. */ -void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, +void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, struct request *rq, int at_head, rq_end_io_fn *done) { @@ -2605,7 +2610,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); * Insert a fully prepared request at the back of the io scheduler queue * for execution and wait for completion. */ -int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, +int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, struct request *rq, int at_head) { DECLARE_COMPLETION_ONSTACK(wait); @@ -2648,7 +2653,7 @@ EXPORT_SYMBOL(blk_execute_rq); */ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) { - request_queue_t *q; + struct request_queue *q; if (bdev->bd_disk == NULL) return -ENXIO; @@ -2684,7 +2689,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) * queue lock is held and interrupts disabled, as we muck with the * request queue list. */ -static inline void add_request(request_queue_t * q, struct request * req) +static inline void add_request(struct request_queue * q, struct request * req) { drive_stat_acct(req, req->nr_sectors, 1); @@ -2730,7 +2735,7 @@ EXPORT_SYMBOL_GPL(disk_round_stats); /* * queue lock must be held */ -void __blk_put_request(request_queue_t *q, struct request *req) +void __blk_put_request(struct request_queue *q, struct request *req) { if (unlikely(!q)) return; @@ -2760,7 +2765,7 @@ EXPORT_SYMBOL_GPL(__blk_put_request); void blk_put_request(struct request *req) { unsigned long flags; - request_queue_t *q = req->q; + struct request_queue *q = req->q; /* * Gee, IDE calls in w/ NULL q. Fix IDE and remove the @@ -2798,7 +2803,7 @@ EXPORT_SYMBOL(blk_end_sync_rq); /* * Has to be called with the request spinlock acquired */ -static int attempt_merge(request_queue_t *q, struct request *req, +static int attempt_merge(struct request_queue *q, struct request *req, struct request *next) { if (!rq_mergeable(req) || !rq_mergeable(next)) @@ -2851,7 +2856,8 @@ static int attempt_merge(request_queue_t *q, struct request *req, return 1; } -static inline int attempt_back_merge(request_queue_t *q, struct request *rq) +static inline int attempt_back_merge(struct request_queue *q, + struct request *rq) { struct request *next = elv_latter_request(q, rq); @@ -2861,7 +2867,8 @@ static inline int attempt_back_merge(request_queue_t *q, struct request *rq) return 0; } -static inline int attempt_front_merge(request_queue_t *q, struct request *rq) +static inline int attempt_front_merge(struct request_queue *q, + struct request *rq) { struct request *prev = elv_former_request(q, rq); @@ -2905,7 +2912,7 @@ static void init_request_from_bio(struct request *req, struct bio *bio) req->start_time = jiffies; } -static int __make_request(request_queue_t *q, struct bio *bio) +static int __make_request(struct request_queue *q, struct bio *bio) { struct request *req; int el_ret, nr_sectors, barrier, err; @@ -3119,7 +3126,7 @@ static inline int should_fail_request(struct bio *bio) */ static inline void __generic_make_request(struct bio *bio) { - request_queue_t *q; + struct request_queue *q; sector_t maxsector; sector_t old_sector; int ret, nr_sectors = bio_sectors(bio); @@ -3312,7 +3319,7 @@ static void blk_recalc_rq_segments(struct request *rq) struct bio *bio, *prevbio = NULL; int nr_phys_segs, nr_hw_segs; unsigned int phys_size, hw_size; - request_queue_t *q = rq->q; + struct request_queue *q = rq->q; if (!rq->bio) return; @@ -3658,7 +3665,8 @@ void end_request(struct request *req, int uptodate) EXPORT_SYMBOL(end_request); -void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) +void blk_rq_bio_prep(struct request_queue *q, struct request *rq, + struct bio *bio) { /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ rq->cmd_flags |= (bio->bi_rw & 3); @@ -3701,7 +3709,7 @@ int __init blk_dev_init(void) sizeof(struct request), 0, SLAB_PANIC, NULL); requestq_cachep = kmem_cache_create("blkdev_queue", - sizeof(request_queue_t), 0, SLAB_PANIC, NULL); + sizeof(struct request_queue), 0, SLAB_PANIC, NULL); iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL); @@ -4021,7 +4029,8 @@ static ssize_t queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct queue_sysfs_entry *entry = to_queue(attr); - request_queue_t *q = container_of(kobj, struct request_queue, kobj); + struct request_queue *q = + container_of(kobj, struct request_queue, kobj); ssize_t res; if (!entry->show) @@ -4041,7 +4050,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct queue_sysfs_entry *entry = to_queue(attr); - request_queue_t *q = container_of(kobj, struct request_queue, kobj); + struct request_queue *q = container_of(kobj, struct request_queue, kobj); ssize_t res; @@ -4072,7 +4081,7 @@ int blk_register_queue(struct gendisk *disk) { int ret; - request_queue_t *q = disk->queue; + struct request_queue *q = disk->queue; if (!q || !q->request_fn) return -ENXIO; @@ -4097,7 +4106,7 @@ int blk_register_queue(struct gendisk *disk) void blk_unregister_queue(struct gendisk *disk) { - request_queue_t *q = disk->queue; + struct request_queue *q = disk->queue; if (q && q->request_fn) { elv_unregister_queue(q); |