From 4f024f3797c43cb4b73cd2c50cec728842d0e49e Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Fri, 11 Oct 2013 15:44:27 -0700 Subject: block: Abstract out bvec iterator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Immutable biovecs are going to require an explicit iterator. To implement immutable bvecs, a later patch is going to add a bi_bvec_done member to this struct; for now, this patch effectively just renames things. Signed-off-by: Kent Overstreet Cc: Jens Axboe Cc: Geert Uytterhoeven Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: "Ed L. Cashin" Cc: Nick Piggin Cc: Lars Ellenberg Cc: Jiri Kosina Cc: Matthew Wilcox Cc: Geoff Levand Cc: Yehuda Sadeh Cc: Sage Weil Cc: Alex Elder Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris Cc: Philip Kelleher Cc: Rusty Russell Cc: "Michael S. Tsirkin" Cc: Konrad Rzeszutek Wilk Cc: Jeremy Fitzhardinge Cc: Neil Brown Cc: Alasdair Kergon Cc: Mike Snitzer Cc: dm-devel@redhat.com Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: linux390@de.ibm.com Cc: Boaz Harrosh Cc: Benny Halevy Cc: "James E.J. Bottomley" Cc: Greg Kroah-Hartman Cc: "Nicholas A. Bellinger" Cc: Alexander Viro Cc: Chris Mason Cc: "Theodore Ts'o" Cc: Andreas Dilger Cc: Jaegeuk Kim Cc: Steven Whitehouse Cc: Dave Kleikamp Cc: Joern Engel Cc: Prasad Joshi Cc: Trond Myklebust Cc: KONISHI Ryusuke Cc: Mark Fasheh Cc: Joel Becker Cc: Ben Myers Cc: xfs@oss.sgi.com Cc: Steven Rostedt Cc: Frederic Weisbecker Cc: Ingo Molnar Cc: Len Brown Cc: Pavel Machek Cc: "Rafael J. Wysocki" Cc: Herton Ronaldo Krzesinski Cc: Ben Hutchings Cc: Andrew Morton Cc: Guo Chao Cc: Tejun Heo Cc: Asai Thambi S P Cc: Selvan Mani Cc: Sam Bradshaw Cc: Wei Yongjun Cc: "Roger Pau Monné" Cc: Jan Beulich Cc: Stefano Stabellini Cc: Ian Campbell Cc: Sebastian Ott Cc: Christian Borntraeger Cc: Minchan Kim Cc: Jiang Liu Cc: Nitin Gupta Cc: Jerome Marchand Cc: Joe Perches Cc: Peng Tao Cc: Andy Adamson Cc: fanchaoting Cc: Jie Liu Cc: Sunil Mushran Cc: "Martin K. Petersen" Cc: Namjae Jeon Cc: Pankaj Kumar Cc: Dan Magenheimer Cc: Mel Gorman 6 --- block/blk-core.c | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'block/blk-core.c') diff --git a/block/blk-core.c b/block/blk-core.c index 8bdd0121212a..5c2ab2c74066 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -130,7 +130,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, bio_advance(bio, nbytes); /* don't actually finish bio if it's part of flush sequence */ - if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) + if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) bio_endio(bio, error); } @@ -1326,7 +1326,7 @@ void blk_add_request_payload(struct request *rq, struct page *page, bio->bi_io_vec->bv_offset = 0; bio->bi_io_vec->bv_len = len; - bio->bi_size = len; + bio->bi_iter.bi_size = len; bio->bi_vcnt = 1; bio->bi_phys_segments = 1; @@ -1351,7 +1351,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req, req->biotail->bi_next = bio; req->biotail = bio; - req->__data_len += bio->bi_size; + req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); blk_account_io_start(req, false); @@ -1380,8 +1380,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, * not touch req->buffer either... */ req->buffer = bio_data(bio); - req->__sector = bio->bi_sector; - req->__data_len += bio->bi_size; + req->__sector = bio->bi_iter.bi_sector; + req->__data_len += bio->bi_iter.bi_size; req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); blk_account_io_start(req, false); @@ -1459,7 +1459,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) req->cmd_flags |= REQ_FAILFAST_MASK; req->errors = 0; - req->__sector = bio->bi_sector; + req->__sector = bio->bi_iter.bi_sector; req->ioprio = bio_prio(bio); blk_rq_bio_prep(req->q, req, bio); } @@ -1583,12 +1583,12 @@ static inline void blk_partition_remap(struct bio *bio) if (bio_sectors(bio) && bdev != bdev->bd_contains) { struct hd_struct *p = bdev->bd_part; - bio->bi_sector += p->start_sect; + bio->bi_iter.bi_sector += p->start_sect; bio->bi_bdev = bdev->bd_contains; trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, bdev->bd_dev, - bio->bi_sector - p->start_sect); + bio->bi_iter.bi_sector - p->start_sect); } } @@ -1654,7 +1654,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) /* Test device or partition size, when known. */ maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; if (maxsector) { - sector_t sector = bio->bi_sector; + sector_t sector = bio->bi_iter.bi_sector; if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { /* @@ -1690,7 +1690,7 @@ generic_make_request_checks(struct bio *bio) "generic_make_request: Trying to access " "nonexistent block-device %s (%Lu)\n", bdevname(bio->bi_bdev, b), - (long long) bio->bi_sector); + (long long) bio->bi_iter.bi_sector); goto end_io; } @@ -1704,9 +1704,9 @@ generic_make_request_checks(struct bio *bio) } part = bio->bi_bdev->bd_part; - if (should_fail_request(part, bio->bi_size) || + if (should_fail_request(part, bio->bi_iter.bi_size) || should_fail_request(&part_to_disk(part)->part0, - bio->bi_size)) + bio->bi_iter.bi_size)) goto end_io; /* @@ -1865,7 +1865,7 @@ void submit_bio(int rw, struct bio *bio) if (rw & WRITE) { count_vm_events(PGPGOUT, count); } else { - task_io_account_read(bio->bi_size); + task_io_account_read(bio->bi_iter.bi_size); count_vm_events(PGPGIN, count); } @@ -1874,7 +1874,7 @@ void submit_bio(int rw, struct bio *bio) printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", current->comm, task_pid_nr(current), (rw & WRITE) ? "WRITE" : "READ", - (unsigned long long)bio->bi_sector, + (unsigned long long)bio->bi_iter.bi_sector, bdevname(bio->bi_bdev, b), count); } @@ -2007,7 +2007,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq) for (bio = rq->bio; bio; bio = bio->bi_next) { if ((bio->bi_rw & ff) != ff) break; - bytes += bio->bi_size; + bytes += bio->bi_iter.bi_size; } /* this could lead to infinite loop */ @@ -2378,9 +2378,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) total_bytes = 0; while (req->bio) { struct bio *bio = req->bio; - unsigned bio_bytes = min(bio->bi_size, nr_bytes); + unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); - if (bio_bytes == bio->bi_size) + if (bio_bytes == bio->bi_iter.bi_size) req->bio = bio->bi_next; req_bio_endio(req, bio, bio_bytes, error); @@ -2728,7 +2728,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, rq->nr_phys_segments = bio_phys_segments(q, bio); rq->buffer = bio_data(bio); } - rq->__data_len = bio->bi_size; + rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; if (bio->bi_bdev) -- cgit v1.2.3 From 7988613b0e5b2638caf6cd493cc78e9595eba19c Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Sat, 23 Nov 2013 17:19:00 -0800 Subject: block: Convert bio_for_each_segment() to bvec_iter More prep work for immutable biovecs - with immutable bvecs drivers won't be able to use the biovec directly, they'll need to use helpers that take into account bio->bi_iter.bi_bvec_done. This updates callers for the new usage without changing the implementation yet. Signed-off-by: Kent Overstreet Cc: Jens Axboe Cc: Geert Uytterhoeven Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: "Ed L. Cashin" Cc: Nick Piggin Cc: Lars Ellenberg Cc: Jiri Kosina Cc: Paul Clements Cc: Jim Paris Cc: Geoff Levand Cc: Yehuda Sadeh Cc: Sage Weil Cc: Alex Elder Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris Cc: Philip Kelleher Cc: Konrad Rzeszutek Wilk Cc: Jeremy Fitzhardinge Cc: Neil Brown Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: linux390@de.ibm.com Cc: Nagalakshmi Nandigama Cc: Sreekanth Reddy Cc: support@lsi.com Cc: "James E.J. Bottomley" Cc: Greg Kroah-Hartman Cc: Alexander Viro Cc: Steven Whitehouse Cc: Herton Ronaldo Krzesinski Cc: Tejun Heo Cc: Andrew Morton Cc: Guo Chao Cc: Asai Thambi S P Cc: Selvan Mani Cc: Sam Bradshaw Cc: Matthew Wilcox Cc: Keith Busch Cc: Stephen Hemminger Cc: Quoc-Son Anh Cc: Sebastian Ott Cc: Nitin Gupta Cc: Minchan Kim Cc: Jerome Marchand Cc: Seth Jennings Cc: "Martin K. Petersen" Cc: Mike Snitzer Cc: Vivek Goyal Cc: "Darrick J. Wong" Cc: Chris Metcalf Cc: Jan Kara Cc: linux-m68k@lists.linux-m68k.org Cc: linuxppc-dev@lists.ozlabs.org Cc: drbd-user@lists.linbit.com Cc: nbd-general@lists.sourceforge.net Cc: cbe-oss-dev@lists.ozlabs.org Cc: xen-devel@lists.xensource.com Cc: virtualization@lists.linux-foundation.org Cc: linux-raid@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: DL-MPTFusionLinux@lsi.com Cc: linux-scsi@vger.kernel.org Cc: devel@driverdev.osuosl.org Cc: linux-fsdevel@vger.kernel.org Cc: cluster-devel@redhat.com Cc: linux-mm@kvack.org Acked-by: Geoff Levand --- block/blk-core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'block/blk-core.c') diff --git a/block/blk-core.c b/block/blk-core.c index 5c2ab2c74066..5da8e900d3b1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2746,10 +2746,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, void rq_flush_dcache_pages(struct request *rq) { struct req_iterator iter; - struct bio_vec *bvec; + struct bio_vec bvec; rq_for_each_segment(bvec, rq, iter) - flush_dcache_page(bvec->bv_page); + flush_dcache_page(bvec.bv_page); } EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); #endif -- cgit v1.2.3 From 43a5e4e21964a6efb4d14a34644ec7109d0ae891 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 26 Dec 2013 21:31:35 +0800 Subject: block: blk-mq: support draining mq queue blk_mq_drain_queue() is introduced so that we can drain mq queue inside blk_cleanup_queue(). Also don't accept new requests any more if queue is marked as dying. Cc: Jens Axboe Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-core.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'block/blk-core.c') diff --git a/block/blk-core.c b/block/blk-core.c index 5da8e900d3b1..accb7fc6ec94 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -38,6 +38,7 @@ #include "blk.h" #include "blk-cgroup.h" +#include "blk-mq.h" EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); @@ -497,8 +498,13 @@ void blk_cleanup_queue(struct request_queue *q) * Drain all requests queued before DYING marking. Set DEAD flag to * prevent that q->request_fn() gets invoked after draining finished. */ - spin_lock_irq(lock); - __blk_drain_queue(q, true); + if (q->mq_ops) { + blk_mq_drain_queue(q); + spin_lock_irq(lock); + } else { + spin_lock_irq(lock); + __blk_drain_queue(q, true); + } queue_flag_set(QUEUE_FLAG_DEAD, q); spin_unlock_irq(lock); -- cgit v1.2.3 From f04c1fe7619b2a60ee9e209cf3f9fcba2ce8f2a2 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 26 Dec 2013 21:31:36 +0800 Subject: block: blk-mq: make blk_sync_queue support mq This patch moves synchronization on mq->delay_work from blk_mq_free_queue() to blk_sync_queue(), so that blk_sync_queue can work on mq. Cc: Jens Axboe Signed-off-by: Ming Lei Signed-off-by: Jens Axboe --- block/blk-core.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'block/blk-core.c') diff --git a/block/blk-core.c b/block/blk-core.c index accb7fc6ec94..c00e0bdeab4a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -246,7 +246,16 @@ EXPORT_SYMBOL(blk_stop_queue); void blk_sync_queue(struct request_queue *q) { del_timer_sync(&q->timeout); - cancel_delayed_work_sync(&q->delay_work); + + if (q->mq_ops) { + struct blk_mq_hw_ctx *hctx; + int i; + + queue_for_each_hw_ctx(q, hctx, i) + cancel_delayed_work_sync(&hctx->delayed_work); + } else { + cancel_delayed_work_sync(&q->delay_work); + } } EXPORT_SYMBOL(blk_sync_queue); -- cgit v1.2.3 From 6f5ba581c0d3ba0a76fe138123c1c2817ffcbeb1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 7 Feb 2014 10:22:37 -0800 Subject: blk-mq: divert __blk_put_request for MQ ops __blk_put_request needs to call into the blk-mq code just like blk_put_request. As we don't have the queue lock in this case both end up calling the same function. Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'block/blk-core.c') diff --git a/block/blk-core.c b/block/blk-core.c index c00e0bdeab4a..06636f3ad424 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1278,6 +1278,11 @@ void __blk_put_request(struct request_queue *q, struct request *req) if (unlikely(!q)) return; + if (q->mq_ops) { + blk_mq_free_request(req); + return; + } + blk_pm_put_request(req); elv_completed_request(q, req); -- cgit v1.2.3 From 18741986a4b1dc4b1f171634c4191abc3b0fa023 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 10 Feb 2014 09:29:00 -0700 Subject: blk-mq: rework flush sequencing logic Witch to using a preallocated flush_rq for blk-mq similar to what's done with the old request path. This allows us to set up the request properly with a tag from the actually allowed range and ->rq_disk as needed by some drivers. To make life easier we also switch to dynamic allocation of ->flush_rq for the old path. This effectively reverts most of "blk-mq: fix for flush deadlock" and "blk-mq: Don't reserve a tag for flush request" Signed-off-by: Christoph Hellwig Signed-off-by: Jens Axboe --- block/blk-core.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'block/blk-core.c') diff --git a/block/blk-core.c b/block/blk-core.c index 06636f3ad424..853f92749202 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -693,11 +693,20 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) if (!uninit_q) return NULL; + uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL); + if (!uninit_q->flush_rq) + goto out_cleanup_queue; + q = blk_init_allocated_queue(uninit_q, rfn, lock); if (!q) - blk_cleanup_queue(uninit_q); - + goto out_free_flush_rq; return q; + +out_free_flush_rq: + kfree(uninit_q->flush_rq); +out_cleanup_queue: + blk_cleanup_queue(uninit_q); + return NULL; } EXPORT_SYMBOL(blk_init_queue_node); @@ -1127,7 +1136,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw, struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) { if (q->mq_ops) - return blk_mq_alloc_request(q, rw, gfp_mask, false); + return blk_mq_alloc_request(q, rw, gfp_mask); else return blk_old_get_request(q, rw, gfp_mask); } -- cgit v1.2.3