diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-02-01 21:48:45 +1100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-02-01 21:48:45 +1100 |
commit | 24e1c13c93cbdd05e4b7ea921c0050b036555adc (patch) | |
tree | 60e5b54f1ce6db72507e6f20f19bffe5854793b8 | |
parent | 31fa5d2868cfa6b51e39989e2a2ab99ce4566cb2 (diff) | |
parent | 3bc217ffe6774e7971d6a7ce6350ce806ebab044 (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
block: kill swap_io_context()
as-iosched: fix inconsistent ioc->lock context
ide-cd: fix leftover data BUG
block: make elevator lib checkpatch compliant
cfq-iosched: make checkpatch compliant
block: make core bits checkpatch compliant
block: new end request handling interface should take unsigned byte counts
unexport add_disk_randomness
block/sunvdc.c:print_version() must be __devinit
splice: always updated atime in direct splice
-rw-r--r-- | block/as-iosched.c | 24 | ||||
-rw-r--r-- | block/blk-barrier.c | 5 | ||||
-rw-r--r-- | block/blk-core.c | 169 | ||||
-rw-r--r-- | block/blk-exec.c | 1 | ||||
-rw-r--r-- | block/blk-ioc.c | 9 | ||||
-rw-r--r-- | block/blk-map.c | 10 | ||||
-rw-r--r-- | block/blk-merge.c | 12 | ||||
-rw-r--r-- | block/blk-settings.c | 61 | ||||
-rw-r--r-- | block/blk-sysfs.c | 5 | ||||
-rw-r--r-- | block/blk-tag.c | 12 | ||||
-rw-r--r-- | block/cfq-iosched.c | 83 | ||||
-rw-r--r-- | block/elevator.c | 57 | ||||
-rw-r--r-- | drivers/block/sunvdc.c | 2 | ||||
-rw-r--r-- | drivers/char/random.c | 2 | ||||
-rw-r--r-- | drivers/ide/ide-cd.c | 2 | ||||
-rw-r--r-- | fs/splice.c | 4 | ||||
-rw-r--r-- | include/linux/blkdev.h | 16 |
17 files changed, 220 insertions, 254 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 96036846a001..8c3946787dbb 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c @@ -170,11 +170,11 @@ static void free_as_io_context(struct as_io_context *aic) static void as_trim(struct io_context *ioc) { - spin_lock(&ioc->lock); + spin_lock_irq(&ioc->lock); if (ioc->aic) free_as_io_context(ioc->aic); ioc->aic = NULL; - spin_unlock(&ioc->lock); + spin_unlock_irq(&ioc->lock); } /* Called when the task exits */ @@ -235,10 +235,12 @@ static void as_put_io_context(struct request *rq) aic = RQ_IOC(rq)->aic; if (rq_is_sync(rq) && aic) { - spin_lock(&aic->lock); + unsigned long flags; + + spin_lock_irqsave(&aic->lock, flags); set_bit(AS_TASK_IORUNNING, &aic->state); aic->last_end_request = jiffies; - spin_unlock(&aic->lock); + spin_unlock_irqrestore(&aic->lock, flags); } put_io_context(RQ_IOC(rq)); @@ -1266,22 +1268,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req, */ if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { - struct io_context *rioc = RQ_IOC(req); - struct io_context *nioc = RQ_IOC(next); - list_move(&req->queuelist, &next->queuelist); rq_set_fifo_time(req, rq_fifo_time(next)); - /* - * Don't copy here but swap, because when anext is - * removed below, it must contain the unused context - */ - if (rioc != nioc) { - double_spin_lock(&rioc->lock, &nioc->lock, - rioc < nioc); - swap_io_context(&rioc, &nioc); - double_spin_unlock(&rioc->lock, &nioc->lock, - rioc < nioc); - } } } diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 5f74fec327d5..6901eedeffce 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c @@ -26,7 +26,8 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered, { if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && prepare_flush_fn == NULL) { - printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n"); + printk(KERN_ERR "%s: prepare_flush_fn required\n", + __FUNCTION__); return -EINVAL; } @@ -47,7 +48,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered, return 0; } - EXPORT_SYMBOL(blk_queue_ordered); /* @@ -315,5 +315,4 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector) bio_put(bio); return ret; } - EXPORT_SYMBOL(blkdev_issue_flush); diff --git a/block/blk-core.c b/block/blk-core.c index 8ff99440ee44..4afb39c82339 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -3,7 +3,8 @@ * Copyright (C) 1994, Karl Keyte: Added support for disk statistics * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> - * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000 + * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> + * - July2000 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 */ @@ -42,7 +43,7 @@ struct kmem_cache *request_cachep; /* * For queue allocation */ -struct kmem_cache *blk_requestq_cachep = NULL; +struct kmem_cache *blk_requestq_cachep; /* * Controlling structure to kblockd @@ -137,7 +138,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, error = -EIO; if (unlikely(nbytes > bio->bi_size)) { - printk("%s: want %u bytes done, only %u left\n", + printk(KERN_ERR "%s: want %u bytes done, %u left\n", __FUNCTION__, nbytes, bio->bi_size); nbytes = bio->bi_size; } @@ -161,23 +162,26 @@ void blk_dump_rq_flags(struct request *rq, char *msg) { int bit; - printk("%s: dev %s: type=%x, flags=%x\n", msg, + printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, rq->cmd_flags); - printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, - rq->nr_sectors, - rq->current_nr_sectors); - printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); + printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", + (unsigned long long)rq->sector, + rq->nr_sectors, + rq->current_nr_sectors); + printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n", + rq->bio, rq->biotail, + rq->buffer, rq->data, + rq->data_len); if (blk_pc_request(rq)) { - printk("cdb: "); + printk(KERN_INFO " cdb: "); for (bit = 0; bit < sizeof(rq->cmd); bit++) printk("%02x ", rq->cmd[bit]); printk("\n"); } } - EXPORT_SYMBOL(blk_dump_rq_flags); /* @@ -204,7 +208,6 @@ void blk_plug_device(struct request_queue *q) blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); } } - EXPORT_SYMBOL(blk_plug_device); /* @@ -221,7 +224,6 @@ int blk_remove_plug(struct request_queue *q) del_timer(&q->unplug_timer); return 1; } - EXPORT_SYMBOL(blk_remove_plug); /* @@ -328,7 +330,6 @@ void blk_start_queue(struct request_queue *q) kblockd_schedule_work(&q->unplug_work); } } - EXPORT_SYMBOL(blk_start_queue); /** @@ -408,7 +409,7 @@ void blk_put_queue(struct request_queue *q) } EXPORT_SYMBOL(blk_put_queue); -void blk_cleanup_queue(struct request_queue * q) +void blk_cleanup_queue(struct request_queue *q) { mutex_lock(&q->sysfs_lock); set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); @@ -419,7 +420,6 @@ void blk_cleanup_queue(struct request_queue * q) blk_put_queue(q); } - EXPORT_SYMBOL(blk_cleanup_queue); static int blk_init_free_list(struct request_queue *q) @@ -575,7 +575,6 @@ int blk_get_queue(struct request_queue *q) return 1; } - EXPORT_SYMBOL(blk_get_queue); static inline void blk_free_request(struct request_queue *q, struct request *rq) @@ -774,7 +773,7 @@ rq_starved: */ if (ioc_batching(q, ioc)) ioc->nr_batch_requests--; - + rq_init(q, rq); blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); @@ -888,7 +887,6 @@ void blk_requeue_request(struct request_queue *q, struct request *rq) elv_requeue_request(q, rq); } - EXPORT_SYMBOL(blk_requeue_request); /** @@ -939,7 +937,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq, blk_start_queueing(q); spin_unlock_irqrestore(q->queue_lock, flags); } - EXPORT_SYMBOL(blk_insert_request); /* @@ -947,7 +944,7 @@ EXPORT_SYMBOL(blk_insert_request); * queue lock is held and interrupts disabled, as we muck with the * request queue list. */ -static inline void add_request(struct request_queue * q, struct request * req) +static inline void add_request(struct request_queue *q, struct request *req) { drive_stat_acct(req, 1); @@ -957,7 +954,7 @@ static inline void add_request(struct request_queue * q, struct request * req) */ __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); } - + /* * disk_round_stats() - Round off the performance stats on a struct * disk_stats. @@ -987,7 +984,6 @@ void disk_round_stats(struct gendisk *disk) } disk->stamp = now; } - EXPORT_SYMBOL_GPL(disk_round_stats); /* @@ -1017,7 +1013,6 @@ void __blk_put_request(struct request_queue *q, struct request *req) freed_request(q, rw, priv); } } - EXPORT_SYMBOL_GPL(__blk_put_request); void blk_put_request(struct request *req) @@ -1035,7 +1030,6 @@ void blk_put_request(struct request *req) spin_unlock_irqrestore(q->queue_lock, flags); } } - EXPORT_SYMBOL(blk_put_request); void init_request_from_bio(struct request *req, struct bio *bio) @@ -1096,53 +1090,53 @@ static int __make_request(struct request_queue *q, struct bio *bio) el_ret = elv_merge(q, &req, bio); switch (el_ret) { - case ELEVATOR_BACK_MERGE: - BUG_ON(!rq_mergeable(req)); + case ELEVATOR_BACK_MERGE: + BUG_ON(!rq_mergeable(req)); - if (!ll_back_merge_fn(q, req, bio)) - break; + if (!ll_back_merge_fn(q, req, bio)) + break; - blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); - req->biotail->bi_next = bio; - req->biotail = bio; - req->nr_sectors = req->hard_nr_sectors += nr_sectors; - req->ioprio = ioprio_best(req->ioprio, prio); - drive_stat_acct(req, 0); - if (!attempt_back_merge(q, req)) - elv_merged_request(q, req, el_ret); - goto out; + req->biotail->bi_next = bio; + req->biotail = bio; + req->nr_sectors = req->hard_nr_sectors += nr_sectors; + req->ioprio = ioprio_best(req->ioprio, prio); + drive_stat_acct(req, 0); + if (!attempt_back_merge(q, req)) + elv_merged_request(q, req, el_ret); + goto out; - case ELEVATOR_FRONT_MERGE: - BUG_ON(!rq_mergeable(req)); + case ELEVATOR_FRONT_MERGE: + BUG_ON(!rq_mergeable(req)); - if (!ll_front_merge_fn(q, req, bio)) - break; + if (!ll_front_merge_fn(q, req, bio)) + break; - blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); - bio->bi_next = req->bio; - req->bio = bio; + bio->bi_next = req->bio; + req->bio = bio; - /* - * may not be valid. if the low level driver said - * it didn't need a bounce buffer then it better - * not touch req->buffer either... - */ - req->buffer = bio_data(bio); - req->current_nr_sectors = bio_cur_sectors(bio); - req->hard_cur_sectors = req->current_nr_sectors; - req->sector = req->hard_sector = bio->bi_sector; - req->nr_sectors = req->hard_nr_sectors += nr_sectors; - req->ioprio = ioprio_best(req->ioprio, prio); - drive_stat_acct(req, 0); - if (!attempt_front_merge(q, req)) - elv_merged_request(q, req, el_ret); - goto out; - - /* ELV_NO_MERGE: elevator says don't/can't merge. */ - default: - ; + /* + * may not be valid. if the low level driver said + * it didn't need a bounce buffer then it better + * not touch req->buffer either... + */ + req->buffer = bio_data(bio); + req->current_nr_sectors = bio_cur_sectors(bio); + req->hard_cur_sectors = req->current_nr_sectors; + req->sector = req->hard_sector = bio->bi_sector; + req->nr_sectors = req->hard_nr_sectors += nr_sectors; + req->ioprio = ioprio_best(req->ioprio, prio); + drive_stat_acct(req, 0); + if (!attempt_front_merge(q, req)) + elv_merged_request(q, req, el_ret); + goto out; + + /* ELV_NO_MERGE: elevator says don't/can't merge. */ + default: + ; } get_rq: @@ -1350,7 +1344,7 @@ end_io: } if (unlikely(nr_sectors > q->max_hw_sectors)) { - printk("bio too big device %s (%u > %u)\n", + printk(KERN_ERR "bio too big device %s (%u > %u)\n", bdevname(bio->bi_bdev, b), bio_sectors(bio), q->max_hw_sectors); @@ -1439,7 +1433,6 @@ void generic_make_request(struct bio *bio) } while (bio); current->bio_tail = NULL; /* deactivate */ } - EXPORT_SYMBOL(generic_make_request); /** @@ -1480,13 +1473,12 @@ void submit_bio(int rw, struct bio *bio) current->comm, task_pid_nr(current), (rw & WRITE) ? "WRITE" : "READ", (unsigned long long)bio->bi_sector, - bdevname(bio->bi_bdev,b)); + bdevname(bio->bi_bdev, b)); } } generic_make_request(bio); } - EXPORT_SYMBOL(submit_bio); /** @@ -1518,9 +1510,8 @@ static int __end_that_request_first(struct request *req, int error, if (!blk_pc_request(req)) req->errors = 0; - if (error) { - if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) - printk("end_request: I/O error, dev %s, sector %llu\n", + if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { + printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", req->rq_disk ? req->rq_disk->disk_name : "?", (unsigned long long)req->sector); } @@ -1554,9 +1545,9 @@ static int __end_that_request_first(struct request *req, int error, if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { blk_dump_rq_flags(req, "__end_that"); - printk("%s: bio idx %d >= vcnt %d\n", - __FUNCTION__, - bio->bi_idx, bio->bi_vcnt); + printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", + __FUNCTION__, bio->bi_idx, + bio->bi_vcnt); break; } @@ -1582,7 +1573,8 @@ static int __end_that_request_first(struct request *req, int error, total_bytes += nbytes; nr_bytes -= nbytes; - if ((bio = req->bio)) { + bio = req->bio; + if (bio) { /* * end more in this run, or just return 'not-done' */ @@ -1626,15 +1618,16 @@ static void blk_done_softirq(struct softirq_action *h) local_irq_enable(); while (!list_empty(&local_list)) { - struct request *rq = list_entry(local_list.next, struct request, donelist); + struct request *rq; + rq = list_entry(local_list.next, struct request, donelist); list_del_init(&rq->donelist); rq->q->softirq_done_fn(rq); } } -static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action, - void *hcpu) +static int __cpuinit blk_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) { /* * If a CPU goes away, splice its entries to the current CPU @@ -1676,7 +1669,7 @@ void blk_complete_request(struct request *req) unsigned long flags; BUG_ON(!req->q->softirq_done_fn); - + local_irq_save(flags); cpu_list = &__get_cpu_var(blk_cpu_done); @@ -1685,9 +1678,8 @@ void blk_complete_request(struct request *req) local_irq_restore(flags); } - EXPORT_SYMBOL(blk_complete_request); - + /* * queue lock must be held */ @@ -1846,8 +1838,9 @@ EXPORT_SYMBOL(end_request); * 0 - we are done with this request * 1 - this request is not freed yet, it still has pending buffers. **/ -static int blk_end_io(struct request *rq, int error, int nr_bytes, - int bidi_bytes, int (drv_callback)(struct request *)) +static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, + unsigned int bidi_bytes, + int (drv_callback)(struct request *)) { struct request_queue *q = rq->q; unsigned long flags = 0UL; @@ -1889,7 +1882,7 @@ static int blk_end_io(struct request *rq, int error, int nr_bytes, * 0 - we are done with this request * 1 - still buffers pending for this request **/ -int blk_end_request(struct request *rq, int error, int nr_bytes) +int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) { return blk_end_io(rq, error, nr_bytes, 0, NULL); } @@ -1908,7 +1901,7 @@ EXPORT_SYMBOL_GPL(blk_end_request); * 0 - we are done with this request * 1 - still buffers pending for this request **/ -int __blk_end_request(struct request *rq, int error, int nr_bytes) +int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) { if (blk_fs_request(rq) || blk_pc_request(rq)) { if (__end_that_request_first(rq, error, nr_bytes)) @@ -1937,8 +1930,8 @@ EXPORT_SYMBOL_GPL(__blk_end_request); * 0 - we are done with this request * 1 - still buffers pending for this request **/ -int blk_end_bidi_request(struct request *rq, int error, int nr_bytes, - int bidi_bytes) +int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, + unsigned int bidi_bytes) { return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); } @@ -1969,7 +1962,8 @@ EXPORT_SYMBOL_GPL(blk_end_bidi_request); * this request still has pending buffers or * the driver doesn't want to finish this request yet. **/ -int blk_end_request_callback(struct request *rq, int error, int nr_bytes, +int blk_end_request_callback(struct request *rq, int error, + unsigned int nr_bytes, int (drv_callback)(struct request *)) { return blk_end_io(rq, error, nr_bytes, 0, drv_callback); @@ -2000,7 +1994,6 @@ int kblockd_schedule_work(struct work_struct *work) { return queue_work(kblockd_workqueue, work); } - EXPORT_SYMBOL(kblockd_schedule_work); void kblockd_flush_work(struct work_struct *work) diff --git a/block/blk-exec.c b/block/blk-exec.c index ebfb44e959a9..391dd6224890 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -101,5 +101,4 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, return err; } - EXPORT_SYMBOL(blk_execute_rq); diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 6d1675508eb5..80245dc30c75 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -176,15 +176,6 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc) } EXPORT_SYMBOL(copy_io_context); -void swap_io_context(struct io_context **ioc1, struct io_context **ioc2) -{ - struct io_context *temp; - temp = *ioc1; - *ioc1 = *ioc2; - *ioc2 = temp; -} -EXPORT_SYMBOL(swap_io_context); - int __init blk_ioc_init(void) { iocontext_cachep = kmem_cache_create("blkdev_ioc", diff --git a/block/blk-map.c b/block/blk-map.c index 916cfc96ffa0..955d75c1a58f 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -53,7 +53,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq, * direct dma. else, set up kernel bounce buffers */ uaddr = (unsigned long) ubuf; - if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) + if (!(uaddr & queue_dma_alignment(q)) && + !(len & queue_dma_alignment(q))) bio = bio_map_user(q, NULL, uaddr, len, reading); else bio = bio_copy_user(q, uaddr, len, reading); @@ -144,7 +145,6 @@ unmap_rq: blk_rq_unmap_user(bio); return ret; } - EXPORT_SYMBOL(blk_rq_map_user); /** @@ -179,7 +179,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, /* we don't allow misaligned data like bio_map_user() does. If the * user is using sg, they're expected to know the alignment constraints * and respect them accordingly */ - bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); + bio = bio_map_user_iov(q, NULL, iov, iov_count, + rq_data_dir(rq) == READ); if (IS_ERR(bio)) return PTR_ERR(bio); @@ -194,7 +195,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, rq->buffer = rq->data = NULL; return 0; } - EXPORT_SYMBOL(blk_rq_map_user_iov); /** @@ -227,7 +227,6 @@ int blk_rq_unmap_user(struct bio *bio) return ret; } - EXPORT_SYMBOL(blk_rq_unmap_user); /** @@ -260,5 +259,4 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, rq->buffer = rq->data = NULL; return 0; } - EXPORT_SYMBOL(blk_rq_map_kern); diff --git a/block/blk-merge.c b/block/blk-merge.c index 5023f0b08073..845ef8131108 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -32,7 +32,7 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect) * size, something has gone terribly wrong */ if (rq->nr_sectors < rq->current_nr_sectors) { - printk("blk: request botched\n"); + printk(KERN_ERR "blk: request botched\n"); rq->nr_sectors = rq->current_nr_sectors; } } @@ -235,7 +235,6 @@ new_segment: return nsegs; } - EXPORT_SYMBOL(blk_rq_map_sg); static inline int ll_new_mergeable(struct request_queue *q, @@ -305,8 +304,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) blk_recount_segments(q, bio); len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; - if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) && - !BIOVEC_VIRT_OVERSIZE(len)) { + if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) + && !BIOVEC_VIRT_OVERSIZE(len)) { int mergeable = ll_new_mergeable(q, req, bio); if (mergeable) { @@ -321,7 +320,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, return ll_new_hw_segment(q, req, bio); } -int ll_front_merge_fn(struct request_queue *q, struct request *req, +int ll_front_merge_fn(struct request_queue *q, struct request *req, struct bio *bio) { unsigned short max_sectors; @@ -388,7 +387,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; if (blk_hw_contig_segment(q, req->biotail, next->bio)) { - int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size; + int len = req->biotail->bi_hw_back_size + + next->bio->bi_hw_front_size; /* * propagate the combined length to the end of the requests */ diff --git a/block/blk-settings.c b/block/blk-settings.c index 4df09a1b8f43..c8d0c5724098 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -10,8 +10,10 @@ #include "blk.h" -unsigned long blk_max_low_pfn, blk_max_pfn; +unsigned long blk_max_low_pfn; EXPORT_SYMBOL(blk_max_low_pfn); + +unsigned long blk_max_pfn; EXPORT_SYMBOL(blk_max_pfn); /** @@ -29,7 +31,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) { q->prep_rq_fn = pfn; } - EXPORT_SYMBOL(blk_queue_prep_rq); /** @@ -52,14 +53,12 @@ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) { q->merge_bvec_fn = mbfn; } - EXPORT_SYMBOL(blk_queue_merge_bvec); void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) { q->softirq_done_fn = fn; } - EXPORT_SYMBOL(blk_queue_softirq_done); /** @@ -84,7 +83,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done); * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling * blk_queue_bounce() to create a buffer in normal memory. **/ -void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) +void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) { /* * set defaults @@ -93,7 +92,8 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); q->make_request_fn = mfn; - q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; + q->backing_dev_info.ra_pages = + (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; q->backing_dev_info.state = 0; q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; blk_queue_max_sectors(q, SAFE_MAX_SECTORS); @@ -117,7 +117,6 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); } - EXPORT_SYMBOL(blk_queue_make_request); /** @@ -133,7 +132,7 @@ EXPORT_SYMBOL(blk_queue_make_request); **/ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) { - unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; + unsigned long b_pfn = dma_addr >> PAGE_SHIFT; int dma = 0; q->bounce_gfp = GFP_NOIO; @@ -141,21 +140,20 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) /* Assume anything <= 4GB can be handled by IOMMU. Actually some IOMMUs can handle everything, but I don't know of a way to test this here. */ - if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) + if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) dma = 1; q->bounce_pfn = max_low_pfn; #else - if (bounce_pfn < blk_max_low_pfn) + if (b_pfn < blk_max_low_pfn) dma = 1; - q->bounce_pfn = bounce_pfn; + q->bounce_pfn = b_pfn; #endif if (dma) { init_emergency_isa_pool(); q->bounce_gfp = GFP_NOIO | GFP_DMA; - q->bounce_pfn = bounce_pfn; + q->bounce_pfn = b_pfn; } } - EXPORT_SYMBOL(blk_queue_bounce_limit); /** @@ -171,7 +169,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) { if ((max_sectors << 9) < PAGE_CACHE_SIZE) { max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); - printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); + printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, + max_sectors); } if (BLK_DEF_MAX_SECTORS > max_sectors) @@ -181,7 +180,6 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) q->max_hw_sectors = max_sectors; } } - EXPORT_SYMBOL(blk_queue_max_sectors); /** @@ -199,12 +197,12 @@ void blk_queue_max_phys_segments(struct request_queue *q, { if (!max_segments) { max_segments = 1; - printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); + printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, + max_segments); } q->max_phys_segments = max_segments; } - EXPORT_SYMBOL(blk_queue_max_phys_segments); /** @@ -223,12 +221,12 @@ void blk_queue_max_hw_segments(struct request_queue *q, { if (!max_segments) { max_segments = 1; - printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); + printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, + max_segments); } q->max_hw_segments = max_segments; } - EXPORT_SYMBOL(blk_queue_max_hw_segments); /** @@ -244,12 +242,12 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) { if (max_size < PAGE_CACHE_SIZE) { max_size = PAGE_CACHE_SIZE; - printk("%s: set to minimum %d\n", __FUNCTION__, max_size); + printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, + max_size); } q->max_segment_size = max_size; } - EXPORT_SYMBOL(blk_queue_max_segment_size); /** @@ -267,7 +265,6 @@ void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) { q->hardsect_size = size; } - EXPORT_SYMBOL(blk_queue_hardsect_size); /* @@ -283,17 +280,16 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) { /* zero is "infinity" */ - t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); - t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); + t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); - t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); - t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); - t->max_segment_size = min(t->max_segment_size,b->max_segment_size); - t->hardsect_size = max(t->hardsect_size,b->hardsect_size); + t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments); + t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); + t->max_segment_size = min(t->max_segment_size, b->max_segment_size); + t->hardsect_size = max(t->hardsect_size, b->hardsect_size); if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); } - EXPORT_SYMBOL(blk_queue_stack_limits); /** @@ -332,7 +328,6 @@ int blk_queue_dma_drain(struct request_queue *q, void *buf, return 0; } - EXPORT_SYMBOL_GPL(blk_queue_dma_drain); /** @@ -344,12 +339,12 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) { if (mask < PAGE_CACHE_SIZE - 1) { mask = PAGE_CACHE_SIZE - 1; - printk("%s: set to minimum %lx\n", __FUNCTION__, mask); + printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__, + mask); } q->seg_boundary_mask = mask; } - EXPORT_SYMBOL(blk_queue_segment_boundary); /** @@ -366,7 +361,6 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask) { q->dma_alignment = mask; } - EXPORT_SYMBOL(blk_queue_dma_alignment); /** @@ -390,7 +384,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) if (mask > q->dma_alignment) q->dma_alignment = mask; } - EXPORT_SYMBOL(blk_queue_update_dma_alignment); int __init blk_settings_init(void) diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index bc28776ba76a..54d0db116153 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -207,12 +207,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct queue_sysfs_entry *entry = to_queue(attr); - struct request_queue *q = container_of(kobj, struct request_queue, kobj); - + struct request_queue *q; ssize_t res; if (!entry->store) return -EIO; + + q = container_of(kobj, struct request_queue, kobj); mutex_lock(&q->sysfs_lock); if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { mutex_unlock(&q->sysfs_lock); diff --git a/block/blk-tag.c b/block/blk-tag.c index d1fd300e8aea..a8c37d4bbb32 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -21,7 +21,6 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag) { return blk_map_queue_find_tag(q->queue_tags, tag); } - EXPORT_SYMBOL(blk_queue_find_tag); /** @@ -99,7 +98,6 @@ void blk_queue_free_tags(struct request_queue *q) { clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); } - EXPORT_SYMBOL(blk_queue_free_tags); static int @@ -185,7 +183,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth, if (!tags) goto fail; } else if (q->queue_tags) { - if ((rc = blk_queue_resize_tags(q, depth))) + rc = blk_queue_resize_tags(q, depth); + if (rc) return rc; set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); return 0; @@ -203,7 +202,6 @@ fail: kfree(tags); return -ENOMEM; } - EXPORT_SYMBOL(blk_queue_init_tags); /** @@ -260,7 +258,6 @@ int blk_queue_resize_tags(struct request_queue *q, int new_depth) kfree(tag_map); return 0; } - EXPORT_SYMBOL(blk_queue_resize_tags); /** @@ -313,7 +310,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) clear_bit_unlock(tag, bqt->tag_map); bqt->busy--; } - EXPORT_SYMBOL(blk_queue_end_tag); /** @@ -340,7 +336,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) int tag; if (unlikely((rq->cmd_flags & REQ_QUEUED))) { - printk(KERN_ERR + printk(KERN_ERR "%s: request %p for device [%s] already tagged %d", __FUNCTION__, rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); @@ -370,7 +366,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) bqt->busy++; return 0; } - EXPORT_SYMBOL(blk_queue_start_tag); /** @@ -392,5 +387,4 @@ void blk_queue_invalidate_tags(struct request_queue *q) list_for_each_safe(tmp, n, &q->tag_busy_list) blk_requeue_request(q, list_entry_rq(tmp)); } - EXPORT_SYMBOL(blk_queue_invalidate_tags); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index f28d1fb30608..ca198e61fa65 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -15,11 +15,13 @@ /* * tunables */ -static const int cfq_quantum = 4; /* max queue in one round of service */ +/* max queue in one round of service */ +static const int cfq_quantum = 4; static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; -static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ -static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ - +/* maximum backwards seek, in KiB */ +static const int cfq_back_max = 16 * 1024; +/* penalty of a backwards seek */ +static const int cfq_back_penalty = 2; static const int cfq_slice_sync = HZ / 10; static int cfq_slice_async = HZ / 25; static const int cfq_slice_async_rq = 2; @@ -37,7 +39,8 @@ static int cfq_slice_idle = HZ / 125; #define CFQ_SLICE_SCALE (5) -#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) +#define RQ_CIC(rq) \ + ((struct cfq_io_context *) (rq)->elevator_private) #define RQ_CFQQ(rq) ((rq)->elevator_private2) static struct kmem_cache *cfq_pool; @@ -171,15 +174,15 @@ enum cfqq_state_flags { #define CFQ_CFQQ_FNS(name) \ static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ { \ - cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ + (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ } \ static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ { \ - cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ + (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ } \ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ { \ - return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ + return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ } CFQ_CFQQ_FNS(on_rr); @@ -1005,7 +1008,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, /* * follow expired path, else get first next available */ - if ((rq = cfq_check_fifo(cfqq)) == NULL) + rq = cfq_check_fifo(cfqq); + if (rq == NULL) rq = cfqq->next_rq; /* @@ -1294,28 +1298,28 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); switch (ioprio_class) { - default: - printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); - case IOPRIO_CLASS_NONE: - /* - * no prio set, place us in the middle of the BE classes - */ - cfqq->ioprio = task_nice_ioprio(tsk); - cfqq->ioprio_class = IOPRIO_CLASS_BE; - break; - case IOPRIO_CLASS_RT: - cfqq->ioprio = task_ioprio(ioc); - cfqq->ioprio_class = IOPRIO_CLASS_RT; - break; - case IOPRIO_CLASS_BE: - cfqq->ioprio = task_ioprio(ioc); - cfqq->ioprio_class = IOPRIO_CLASS_BE; - break; - case IOPRIO_CLASS_IDLE: - cfqq->ioprio_class = IOPRIO_CLASS_IDLE; - cfqq->ioprio = 7; - cfq_clear_cfqq_idle_window(cfqq); - break; + default: + printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); + case IOPRIO_CLASS_NONE: + /* + * no prio set, place us in the middle of the BE classes + */ + cfqq->ioprio = task_nice_ioprio(tsk); + cfqq->ioprio_class = IOPRIO_CLASS_BE; + break; + case IOPRIO_CLASS_RT: + cfqq->ioprio = task_ioprio(ioc); + cfqq->ioprio_class = IOPRIO_CLASS_RT; + break; + case IOPRIO_CLASS_BE: + cfqq->ioprio = task_ioprio(ioc); + cfqq->ioprio_class = IOPRIO_CLASS_BE; + break; + case IOPRIO_CLASS_IDLE: + cfqq->ioprio_class = IOPRIO_CLASS_IDLE; + cfqq->ioprio = 7; + cfq_clear_cfqq_idle_window(cfqq); + break; } /* @@ -1427,7 +1431,7 @@ out: static struct cfq_queue ** cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) { - switch(ioprio_class) { + switch (ioprio_class) { case IOPRIO_CLASS_RT: return &cfqd->async_cfqq[0][ioprio]; case IOPRIO_CLASS_BE: @@ -2018,7 +2022,8 @@ static void cfq_idle_slice_timer(unsigned long data) spin_lock_irqsave(cfqd->queue->queue_lock, flags); - if ((cfqq = cfqd->active_queue) != NULL) { + cfqq = cfqd->active_queue; + if (cfqq) { timed_out = 0; /* @@ -2212,14 +2217,18 @@ static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ return ret; \ } STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); -STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); -STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); +STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, + UINT_MAX, 1); +STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, + UINT_MAX, 1); STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); -STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); +STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, + UINT_MAX, 0); STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); -STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); +STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, + UINT_MAX, 0); #undef STORE_FUNCTION #define CFQ_ATTR(name) \ diff --git a/block/elevator.c b/block/elevator.c index 8cd5775acd7a..bafbae0344d3 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -45,7 +45,8 @@ static LIST_HEAD(elv_list); */ static const int elv_hash_shift = 6; #define ELV_HASH_BLOCK(sec) ((sec) >> 3) -#define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) +#define ELV_HASH_FN(sec) \ + (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) #define ELV_HASH_ENTRIES (1 << elv_hash_shift) #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) @@ -224,15 +225,27 @@ int elevator_init(struct request_queue *q, char *name) q->end_sector = 0; q->boundary_rq = NULL; - if (name && !(e = elevator_get(name))) - return -EINVAL; + if (name) { + e = elevator_get(name); + if (!e) + return -EINVAL; + } - if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator))) - printk("I/O scheduler %s not found\n", chosen_elevator); + if (!e && *chosen_elevator) { + e = elevator_get(chosen_elevator); + if (!e) + printk(KERN_ERR "I/O scheduler %s not found\n", + chosen_elevator); + } - if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) { - printk("Default I/O scheduler not found, using no-op\n"); - e = elevator_get("noop"); + if (!e) { + e = elevator_get(CONFIG_DEFAULT_IOSCHED); + if (!e) { + printk(KERN_ERR + "Default I/O scheduler not found. " \ + "Using noop.\n"); + e = elevator_get("noop"); + } } eq = elevator_alloc(q, e); @@ -248,7 +261,6 @@ int elevator_init(struct request_queue *q, char *name) elevator_attach(q, eq, data); return ret; } - EXPORT_SYMBOL(elevator_init); void elevator_exit(elevator_t *e) @@ -261,7 +273,6 @@ void elevator_exit(elevator_t *e) kobject_put(&e->kobj); } - EXPORT_SYMBOL(elevator_exit); static void elv_activate_rq(struct request_queue *q, struct request *rq) @@ -353,7 +364,6 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq) rb_insert_color(&rq->rb_node, root); return NULL; } - EXPORT_SYMBOL(elv_rb_add); void elv_rb_del(struct rb_root *root, struct request *rq) @@ -362,7 +372,6 @@ void elv_rb_del(struct rb_root *root, struct request *rq) rb_erase(&rq->rb_node, root); RB_CLEAR_NODE(&rq->rb_node); } - EXPORT_SYMBOL(elv_rb_del); struct request *elv_rb_find(struct rb_root *root, sector_t sector) @@ -383,7 +392,6 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector) return NULL; } - EXPORT_SYMBOL(elv_rb_find); /* @@ -395,6 +403,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) { sector_t boundary; struct list_head *entry; + int stop_flags; if (q->last_merge == rq) q->last_merge = NULL; @@ -404,13 +413,13 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) q->nr_sorted--; boundary = q->end_sector; - + stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; list_for_each_prev(entry, &q->queue_head) { struct request *pos = list_entry_rq(entry); if (rq_data_dir(rq) != rq_data_dir(pos)) break; - if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) + if (pos->cmd_flags & stop_flags) break; if (rq->sector >= boundary) { if (pos->sector < boundary) @@ -425,7 +434,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) list_add(&rq->queuelist, entry); } - EXPORT_SYMBOL(elv_dispatch_sort); /* @@ -446,7 +454,6 @@ void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) q->boundary_rq = rq; list_add_tail(&rq->queuelist, &q->queue_head); } - EXPORT_SYMBOL(elv_dispatch_add_tail); int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) @@ -665,7 +672,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, q->end_sector = rq_end_sector(rq); q->boundary_rq = rq; } - } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) + } else if (!(rq->cmd_flags & REQ_ELVPRIV) && + where == ELEVATOR_INSERT_SORT) where = ELEVATOR_INSERT_BACK; if (plug) @@ -673,7 +681,6 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where, elv_insert(q, rq, where); } - EXPORT_SYMBOL(__elv_add_request); void elv_add_request(struct request_queue *q, struct request *rq, int where, @@ -685,7 +692,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where, __elv_add_request(q, rq, where, plug); spin_unlock_irqrestore(q->queue_lock, flags); } - EXPORT_SYMBOL(elv_add_request); static inline struct request *__elv_next_request(struct request_queue *q) @@ -792,7 +798,6 @@ struct request *elv_next_request(struct request_queue *q) return rq; } - EXPORT_SYMBOL(elv_next_request); void elv_dequeue_request(struct request_queue *q, struct request *rq) @@ -810,7 +815,6 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq) if (blk_account_rq(rq)) q->in_flight++; } - EXPORT_SYMBOL(elv_dequeue_request); int elv_queue_empty(struct request_queue *q) @@ -825,7 +829,6 @@ int elv_queue_empty(struct request_queue *q) return 1; } - EXPORT_SYMBOL(elv_queue_empty); struct request *elv_latter_request(struct request_queue *q, struct request *rq) @@ -994,7 +997,8 @@ void elv_register(struct elevator_type *e) !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) def = " (default)"; - printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def); + printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, + def); } EXPORT_SYMBOL_GPL(elv_register); @@ -1126,7 +1130,8 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name, } if (!elevator_switch(q, e)) - printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name); + printk(KERN_ERR "elevator: switch to %s failed\n", + elevator_name); return count; } @@ -1160,7 +1165,6 @@ struct request *elv_rb_former_request(struct request_queue *q, return NULL; } - EXPORT_SYMBOL(elv_rb_former_request); struct request *elv_rb_latter_request(struct request_queue *q, @@ -1173,5 +1177,4 @@ struct request *elv_rb_latter_request(struct request_queue *q, return NULL; } - EXPORT_SYMBOL(elv_rb_latter_request); diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 66e30155b0ab..a8de037ecd4a 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -732,7 +732,7 @@ static struct vio_driver_ops vdc_vio_ops = { .handshake_complete = vdc_handshake_complete, }; -static void print_version(void) +static void __devinit print_version(void) { static int version_printed; diff --git a/drivers/char/random.c b/drivers/char/random.c index 5fee05661823..c511a831f0c0 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -667,8 +667,6 @@ void add_disk_randomness(struct gendisk *disk) add_timer_randomness(disk->random, 0x100 + MKDEV(disk->major, disk->first_minor)); } - -EXPORT_SYMBOL(add_disk_randomness); #endif #define EXTRACT_SIZE 10 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 74c6087ada38..bee05a3f52ae 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -1722,7 +1722,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) */ if ((stat & DRQ_STAT) == 0) { spin_lock_irqsave(&ide_lock, flags); - if (__blk_end_request(rq, 0, 0)) + if (__blk_end_request(rq, 0, rq->data_len)) BUG(); HWGROUP(drive)->rq = NULL; spin_unlock_irqrestore(&ide_lock, flags); diff --git a/fs/splice.c b/fs/splice.c index 1577a7391d23..4ee49e86edde 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -1033,9 +1033,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, done: pipe->nrbufs = pipe->curbuf = 0; - if (bytes > 0) - file_accessed(in); - + file_accessed(in); return bytes; out_release: diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e18d4192f6e8..90392a9d7a9c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -39,7 +39,6 @@ void exit_io_context(void); struct io_context *get_io_context(gfp_t gfp_flags, int node); struct io_context *alloc_io_context(gfp_t gfp_flags, int node); void copy_io_context(struct io_context **pdst, struct io_context **psrc); -void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); struct request; typedef void (rq_end_io_fn)(struct request *, int); @@ -655,15 +654,18 @@ static inline void blk_run_address_space(struct address_space *mapping) * blk_end_request() for parts of the original function. * This prevents code duplication in drivers. */ -extern int blk_end_request(struct request *rq, int error, int nr_bytes); -extern int __blk_end_request(struct request *rq, int error, int nr_bytes); -extern int blk_end_bidi_request(struct request *rq, int error, int nr_bytes, - int bidi_bytes); +extern int blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern int __blk_end_request(struct request *rq, int error, + unsigned int nr_bytes); +extern int blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes); extern void end_request(struct request *, int); extern void end_queued_request(struct request *, int); extern void end_dequeued_request(struct request *, int); -extern int blk_end_request_callback(struct request *rq, int error, int nr_bytes, - int (drv_callback)(struct request *)); +extern int blk_end_request_callback(struct request *rq, int error, + unsigned int nr_bytes, + int (drv_callback)(struct request *)); extern void blk_complete_request(struct request *); /* |