diff options
author | Tejun Heo <htejun@gmail.com> | 2006-02-08 01:01:31 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-02-08 07:52:58 -0800 |
commit | 30e9656cc340035e102fea46e1908689494b042d (patch) | |
tree | 5fc623ccad5a1f5b09ebc4b7e8d7c6bec8e485ac /block | |
parent | e5ea0a9fca5612808839dd4bcc41c46fc02451f9 (diff) |
[PATCH] block: implement elv_insert and use it (fix ordcolor flipping bug)
q->ordcolor must only be flipped on initial queueing of a hardbarrier
request.
Constructing ordered sequence and requeueing used to pass through
__elv_add_request() which flips q->ordcolor when it sees a barrier
request.
This patch separates out elv_insert() from __elv_add_request() and uses
elv_insert() when constructing ordered sequence and requeueing.
elv_insert() inserts the given request at the specified position and
does nothing else.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Acked-by: Jens Axboe <axboe@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'block')
-rw-r--r-- | block/elevator.c | 70 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 4 |
2 files changed, 40 insertions, 34 deletions
diff --git a/block/elevator.c b/block/elevator.c index 2fc269f69726..24b702d649a9 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -293,7 +293,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq) rq->flags &= ~REQ_STARTED; - __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0); + elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); } static void elv_drain_elevator(request_queue_t *q) @@ -310,41 +310,11 @@ static void elv_drain_elevator(request_queue_t *q) } } -void __elv_add_request(request_queue_t *q, struct request *rq, int where, - int plug) +void elv_insert(request_queue_t *q, struct request *rq, int where) { struct list_head *pos; unsigned ordseq; - if (q->ordcolor) - rq->flags |= REQ_ORDERED_COLOR; - - if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { - /* - * toggle ordered color - */ - if (blk_barrier_rq(rq)) - q->ordcolor ^= 1; - - /* - * barriers implicitly indicate back insertion - */ - if (where == ELEVATOR_INSERT_SORT) - where = ELEVATOR_INSERT_BACK; - - /* - * this request is scheduling boundary, update end_sector - */ - if (blk_fs_request(rq)) { - q->end_sector = rq_end_sector(rq); - q->boundary_rq = rq; - } - } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) - where = ELEVATOR_INSERT_BACK; - - if (plug) - blk_plug_device(q); - rq->q = q; switch (where) { @@ -425,6 +395,42 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where, } } +void __elv_add_request(request_queue_t *q, struct request *rq, int where, + int plug) +{ + if (q->ordcolor) + rq->flags |= REQ_ORDERED_COLOR; + + if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { + /* + * toggle ordered color + */ + if (blk_barrier_rq(rq)) + q->ordcolor ^= 1; + + /* + * barriers implicitly indicate back insertion + */ + if (where == ELEVATOR_INSERT_SORT) + where = ELEVATOR_INSERT_BACK; + + /* + * this request is scheduling boundary, update + * end_sector + */ + if (blk_fs_request(rq)) { + q->end_sector = rq_end_sector(rq); + q->boundary_rq = rq; + } + } else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) + where = ELEVATOR_INSERT_BACK; + + if (plug) + blk_plug_device(q); + + elv_insert(q, rq, where); +} + void elv_add_request(request_queue_t *q, struct request *rq, int where, int plug) { diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index ee5ed98db4cd..03d9c82b0fe7 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -454,7 +454,7 @@ static void queue_flush(request_queue_t *q, unsigned which) rq->end_io = end_io; q->prepare_flush_fn(q, rq); - __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); + elv_insert(q, rq, ELEVATOR_INSERT_FRONT); } static inline struct request *start_ordered(request_queue_t *q, @@ -490,7 +490,7 @@ static inline struct request *start_ordered(request_queue_t *q, else q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; - __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); + elv_insert(q, rq, ELEVATOR_INSERT_FRONT); if (q->ordered & QUEUE_ORDERED_PREFLUSH) { queue_flush(q, QUEUE_ORDERED_PREFLUSH); |