diff options
-rw-r--r-- | Documentation/block/biodoc.txt | 7 | ||||
-rw-r--r-- | block/cfq-iosched.c | 33 | ||||
-rw-r--r-- | block/elevator.c | 26 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 67 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 3 | ||||
-rw-r--r-- | drivers/block/cciss.c | 3 | ||||
-rw-r--r-- | drivers/cdrom/cdrom.c | 3 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 4 | ||||
-rw-r--r-- | include/linux/blkdev.h | 14 | ||||
-rw-r--r-- | include/linux/elevator.h | 3 |
10 files changed, 107 insertions, 56 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt index c6c9a9c10d7f..3adaace328a6 100644 --- a/Documentation/block/biodoc.txt +++ b/Documentation/block/biodoc.txt @@ -946,6 +946,13 @@ elevator_merged_fn called when a request in the scheduler has been scheduler for example, to reposition the request if its sorting order has changed. +elevator_allow_merge_fn called whenever the block layer determines + that a bio can be merged into an existing + request safely. The io scheduler may still + want to stop a merge at this point if it + results in some sort of conflict internally, + this hook allows it to do that. + elevator_dispatch_fn fills the dispatch queue with ready requests. I/O schedulers are free to postpone requests by not filling the dispatch queue unless @force diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 533a2938ffd6..9fc5eafa6c0e 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -568,6 +568,38 @@ cfq_merged_requests(request_queue_t *q, struct request *rq, cfq_remove_request(next); } +static int cfq_allow_merge(request_queue_t *q, struct request *rq, + struct bio *bio) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + const int rw = bio_data_dir(bio); + struct cfq_queue *cfqq; + pid_t key; + + /* + * If bio is async or a write, always allow merge + */ + if (!bio_sync(bio) || rw == WRITE) + return 1; + + /* + * bio is sync. if request is not, disallow. + */ + if (!rq_is_sync(rq)) + return 0; + + /* + * Ok, both bio and request are sync. Allow merge if they are + * from the same queue. + */ + key = cfq_queue_pid(current, rw, 1); + cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio); + if (cfqq != RQ_CFQQ(rq)) + return 0; + + return 1; +} + static inline void __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) { @@ -2125,6 +2157,7 @@ static struct elevator_type iosched_cfq = { .elevator_merge_fn = cfq_merge, .elevator_merged_fn = cfq_merged_request, .elevator_merge_req_fn = cfq_merged_requests, + .elevator_allow_merge_fn = cfq_allow_merge, .elevator_dispatch_fn = cfq_dispatch_requests, .elevator_add_req_fn = cfq_insert_request, .elevator_activate_req_fn = cfq_activate_request, diff --git a/block/elevator.c b/block/elevator.c index c0063f345c5d..62c7a3069d3a 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -51,6 +51,21 @@ static const int elv_hash_shift = 6; #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) /* + * Query io scheduler to see if the current process issuing bio may be + * merged with rq. + */ +static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) +{ + request_queue_t *q = rq->q; + elevator_t *e = q->elevator; + + if (e->ops->elevator_allow_merge_fn) + return e->ops->elevator_allow_merge_fn(q, rq, bio); + + return 1; +} + +/* * can we safely merge with this request? */ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) @@ -65,12 +80,15 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) return 0; /* - * same device and no special stuff set, merge is ok + * must be same device and not a special request */ - if (rq->rq_disk == bio->bi_bdev->bd_disk && !rq->special) - return 1; + if (rq->rq_disk != bio->bi_bdev->bd_disk || !rq->special) + return 0; - return 0; + if (!elv_iosched_allow_merge(rq, bio)) + return 0; + + return 1; } EXPORT_SYMBOL(elv_rq_merge_ok); diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 79807dbc306e..e07c079e07e6 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1405,8 +1405,7 @@ static inline int ll_new_hw_segment(request_queue_t *q, return 1; } -static int ll_back_merge_fn(request_queue_t *q, struct request *req, - struct bio *bio) +int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) { unsigned short max_sectors; int len; @@ -1442,6 +1441,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, return ll_new_hw_segment(q, req, bio); } +EXPORT_SYMBOL(ll_back_merge_fn); static int ll_front_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) @@ -1912,9 +1912,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) } q->request_fn = rfn; - q->back_merge_fn = ll_back_merge_fn; - q->front_merge_fn = ll_front_merge_fn; - q->merge_requests_fn = ll_merge_requests_fn; q->prep_rq_fn = NULL; q->unplug_fn = generic_unplug_device; q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); @@ -2350,40 +2347,29 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq, else bio = bio_copy_user(q, uaddr, len, reading); - if (IS_ERR(bio)) { + if (IS_ERR(bio)) return PTR_ERR(bio); - } orig_bio = bio; blk_queue_bounce(q, &bio); + /* * We link the bounce buffer in and could have to traverse it * later so we have to get a ref to prevent it from being freed */ bio_get(bio); - /* - * for most (all? don't know of any) queues we could - * skip grabbing the queue lock here. only drivers with - * funky private ->back_merge_fn() function could be - * problematic. - */ - spin_lock_irq(q->queue_lock); if (!rq->bio) blk_rq_bio_prep(q, rq, bio); - else if (!q->back_merge_fn(q, rq, bio)) { + else if (!ll_back_merge_fn(q, rq, bio)) { ret = -EINVAL; - spin_unlock_irq(q->queue_lock); goto unmap_bio; } else { rq->biotail->bi_next = bio; rq->biotail = bio; - rq->nr_sectors += bio_sectors(bio); - rq->hard_nr_sectors = rq->nr_sectors; rq->data_len += bio->bi_size; } - spin_unlock_irq(q->queue_lock); return bio->bi_size; @@ -2419,6 +2405,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, unsigned long len) { unsigned long bytes_read = 0; + struct bio *bio = NULL; int ret; if (len > (q->max_hw_sectors << 9)) @@ -2445,6 +2432,8 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, ret = __blk_rq_map_user(q, rq, ubuf, map_len); if (ret < 0) goto unmap_rq; + if (!bio) + bio = rq->bio; bytes_read += ret; ubuf += ret; } @@ -2452,7 +2441,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, rq->buffer = rq->data = NULL; return 0; unmap_rq: - blk_rq_unmap_user(rq); + blk_rq_unmap_user(bio); return ret; } @@ -2509,27 +2498,33 @@ EXPORT_SYMBOL(blk_rq_map_user_iov); /** * blk_rq_unmap_user - unmap a request with user data - * @rq: rq to be unmapped + * @bio: start of bio list * * Description: - * Unmap a rq previously mapped by blk_rq_map_user(). - * rq->bio must be set to the original head of the request. + * Unmap a rq previously mapped by blk_rq_map_user(). The caller must + * supply the original rq->bio from the blk_rq_map_user() return, since + * the io completion may have changed rq->bio. */ -int blk_rq_unmap_user(struct request *rq) +int blk_rq_unmap_user(struct bio *bio) { - struct bio *bio, *mapped_bio; + struct bio *mapped_bio; + int ret = 0, ret2; - while ((bio = rq->bio)) { - if (bio_flagged(bio, BIO_BOUNCED)) + while (bio) { + mapped_bio = bio; + if (unlikely(bio_flagged(bio, BIO_BOUNCED))) mapped_bio = bio->bi_private; - else - mapped_bio = bio; - __blk_rq_unmap_user(mapped_bio); - rq->bio = bio->bi_next; - bio_put(bio); + ret2 = __blk_rq_unmap_user(mapped_bio); + if (ret2 && !ret) + ret = ret2; + + mapped_bio = bio; + bio = bio->bi_next; + bio_put(mapped_bio); } - return 0; + + return ret; } EXPORT_SYMBOL(blk_rq_unmap_user); @@ -2822,7 +2817,7 @@ static int attempt_merge(request_queue_t *q, struct request *req, * will have updated segment counts, update sector * counts here. */ - if (!q->merge_requests_fn(q, req, next)) + if (!ll_merge_requests_fn(q, req, next)) return 0; /* @@ -2939,7 +2934,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) case ELEVATOR_BACK_MERGE: BUG_ON(!rq_mergeable(req)); - if (!q->back_merge_fn(q, req, bio)) + if (!ll_back_merge_fn(q, req, bio)) break; blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); @@ -2956,7 +2951,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) case ELEVATOR_FRONT_MERGE: BUG_ON(!rq_mergeable(req)); - if (!q->front_merge_fn(q, req, bio)) + if (!ll_front_merge_fn(q, req, bio)) break; blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index f322b6a441d8..2528a0c0dec8 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -333,8 +333,7 @@ static int sg_io(struct file *file, request_queue_t *q, hdr->sb_len_wr = len; } - rq->bio = bio; - if (blk_rq_unmap_user(rq)) + if (blk_rq_unmap_user(bio)) ret = -EFAULT; /* may not have succeeded, but output values written to control diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index d719a5d8f435..9d2ddb209343 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1907,6 +1907,7 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, "does not support reading geometry\n"); drv->heads = 255; drv->sectors = 32; // Sectors per track + drv->raid_level = RAID_UNKNOWN; } else { drv->heads = inq_buff->data_byte[6]; drv->sectors = inq_buff->data_byte[7]; @@ -2491,7 +2492,7 @@ static void do_cciss_request(request_queue_t *q) c->Request.Type.Type = TYPE_CMD; // It is a command. c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = - (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; + (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE; c->Request.Timeout = 0; // Don't time out c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index e4a2f8f3a1d7..66d028d30439 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2139,8 +2139,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, cdi->last_sense = s->sense_key; } - rq->bio = bio; - if (blk_rq_unmap_user(rq)) + if (blk_rq_unmap_user(bio)) ret = -EFAULT; if (ret) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 1748e27501cd..f02f48a882a9 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -265,13 +265,11 @@ static int scsi_merge_bio(struct request *rq, struct bio *bio) if (!rq->bio) blk_rq_bio_prep(q, rq, bio); - else if (!q->back_merge_fn(q, rq, bio)) + else if (!ll_back_merge_fn(q, rq, bio)) return -EINVAL; else { rq->biotail->bi_next = bio; rq->biotail = bio; - rq->hard_nr_sectors += bio_sectors(bio); - rq->nr_sectors = rq->hard_nr_sectors; } return 0; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ea330d7b46c0..36a6eacefe20 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -331,10 +331,6 @@ struct request_pm_state #include <linux/elevator.h> -typedef int (merge_request_fn) (request_queue_t *, struct request *, - struct bio *); -typedef int (merge_requests_fn) (request_queue_t *, struct request *, - struct request *); typedef void (request_fn_proc) (request_queue_t *q); typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); typedef int (prep_rq_fn) (request_queue_t *, struct request *); @@ -376,9 +372,6 @@ struct request_queue struct request_list rq; request_fn_proc *request_fn; - merge_request_fn *back_merge_fn; - merge_request_fn *front_merge_fn; - merge_requests_fn *merge_requests_fn; make_request_fn *make_request_fn; prep_rq_fn *prep_rq_fn; unplug_fn *unplug_fn; @@ -649,6 +642,11 @@ extern int sg_scsi_ioctl(struct file *, struct request_queue *, struct gendisk *, struct scsi_ioctl_command __user *); /* + * Temporary export, until SCSI gets fixed up. + */ +extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *); + +/* * A queue has just exitted congestion. Note this in the global counter of * congested queues, and wake up anyone who was waiting for requests to be * put back. @@ -674,7 +672,7 @@ extern void __blk_stop_queue(request_queue_t *q); extern void blk_run_queue(request_queue_t *); extern void blk_start_queueing(request_queue_t *); extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long); -extern int blk_rq_unmap_user(struct request *); +extern int blk_rq_unmap_user(struct bio *); extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int, unsigned int); diff --git a/include/linux/elevator.h b/include/linux/elevator.h index a24931d24404..e88fcbc77f8f 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -12,6 +12,8 @@ typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struc typedef void (elevator_merged_fn) (request_queue_t *, struct request *, int); +typedef int (elevator_allow_merge_fn) (request_queue_t *, struct request *, struct bio *); + typedef int (elevator_dispatch_fn) (request_queue_t *, int); typedef void (elevator_add_req_fn) (request_queue_t *, struct request *); @@ -33,6 +35,7 @@ struct elevator_ops elevator_merge_fn *elevator_merge_fn; elevator_merged_fn *elevator_merged_fn; elevator_merge_req_fn *elevator_merge_req_fn; + elevator_allow_merge_fn *elevator_allow_merge_fn; elevator_dispatch_fn *elevator_dispatch_fn; elevator_add_req_fn *elevator_add_req_fn; |