diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/bio.c | 8 | ||||
-rw-r--r-- | block/blk-cgroup.c | 9 | ||||
-rw-r--r-- | block/blk-cgroup.h | 21 | ||||
-rw-r--r-- | block/blk-merge.c | 10 | ||||
-rw-r--r-- | block/blk-mq.c | 2 | ||||
-rw-r--r-- | block/elevator.c | 2 |
6 files changed, 32 insertions, 20 deletions
diff --git a/block/bio.c b/block/bio.c index 8c2e55e39a1b..0ec61c9e536c 100644 --- a/block/bio.c +++ b/block/bio.c @@ -746,6 +746,14 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page goto done; } + + /* + * If the queue doesn't support SG gaps and adding this + * offset would create a gap, disallow it. + */ + if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && + bvec_gap_to_prev(prev, offset)) + return 0; } if (bio->bi_vcnt >= bio->bi_max_vecs) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 069bc202ffe3..b9f4cc494ece 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -80,7 +80,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, blkg->q = q; INIT_LIST_HEAD(&blkg->q_node); blkg->blkcg = blkcg; - blkg->refcnt = 1; + atomic_set(&blkg->refcnt, 1); /* root blkg uses @q->root_rl, init rl only for !root blkgs */ if (blkcg != &blkcg_root) { @@ -399,11 +399,8 @@ void __blkg_release_rcu(struct rcu_head *rcu_head) /* release the blkcg and parent blkg refs this blkg has been holding */ css_put(&blkg->blkcg->css); - if (blkg->parent) { - spin_lock_irq(blkg->q->queue_lock); + if (blkg->parent) blkg_put(blkg->parent); - spin_unlock_irq(blkg->q->queue_lock); - } blkg_free(blkg); } @@ -1093,7 +1090,7 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); * Register @pol with blkcg core. Might sleep and @pol may be modified on * successful registration. Returns 0 on success and -errno on failure. */ -int __init blkcg_policy_register(struct blkcg_policy *pol) +int blkcg_policy_register(struct blkcg_policy *pol) { int i, ret; diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index cbb7f943f78a..d3fd7aa3d2a3 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -18,6 +18,7 @@ #include <linux/seq_file.h> #include <linux/radix-tree.h> #include <linux/blkdev.h> +#include <linux/atomic.h> /* Max limits for throttle policy */ #define THROTL_IOPS_MAX UINT_MAX @@ -104,7 +105,7 @@ struct blkcg_gq { struct request_list rl; /* reference count */ - int refcnt; + atomic_t refcnt; /* is this blkg online? protected by both blkcg and q locks */ bool online; @@ -145,7 +146,7 @@ void blkcg_drain_queue(struct request_queue *q); void blkcg_exit_queue(struct request_queue *q); /* Blkio controller policy registration */ -int __init blkcg_policy_register(struct blkcg_policy *pol); +int blkcg_policy_register(struct blkcg_policy *pol); void blkcg_policy_unregister(struct blkcg_policy *pol); int blkcg_activate_policy(struct request_queue *q, const struct blkcg_policy *pol); @@ -257,13 +258,12 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) * blkg_get - get a blkg reference * @blkg: blkg to get * - * The caller should be holding queue_lock and an existing reference. + * The caller should be holding an existing reference. */ static inline void blkg_get(struct blkcg_gq *blkg) { - lockdep_assert_held(blkg->q->queue_lock); - WARN_ON_ONCE(!blkg->refcnt); - blkg->refcnt++; + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + atomic_inc(&blkg->refcnt); } void __blkg_release_rcu(struct rcu_head *rcu); @@ -271,14 +271,11 @@ void __blkg_release_rcu(struct rcu_head *rcu); /** * blkg_put - put a blkg reference * @blkg: blkg to put - * - * The caller should be holding queue_lock. */ static inline void blkg_put(struct blkcg_gq *blkg) { - lockdep_assert_held(blkg->q->queue_lock); - WARN_ON_ONCE(blkg->refcnt <= 0); - if (!--blkg->refcnt) + WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0); + if (atomic_dec_and_test(&blkg->refcnt)) call_rcu(&blkg->rcu_head, __blkg_release_rcu); } @@ -580,7 +577,7 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret static inline int blkcg_init_queue(struct request_queue *q) { return 0; } static inline void blkcg_drain_queue(struct request_queue *q) { } static inline void blkcg_exit_queue(struct request_queue *q) { } -static inline int __init blkcg_policy_register(struct blkcg_policy *pol) { return 0; } +static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } static inline int blkcg_activate_policy(struct request_queue *q, const struct blkcg_policy *pol) { return 0; } diff --git a/block/blk-merge.c b/block/blk-merge.c index b3bf0df0f4c2..54535831f1e1 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -568,6 +568,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq, bool blk_rq_merge_ok(struct request *rq, struct bio *bio) { + struct request_queue *q = rq->q; + if (!rq_mergeable(rq) || !bio_mergeable(bio)) return false; @@ -591,6 +593,14 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio) !blk_write_same_mergeable(rq->bio, bio)) return false; + if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) { + struct bio_vec *bprev; + + bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1]; + if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset)) + return false; + } + return true; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 0ef2dc7f01bf..ad69ef657e85 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -878,7 +878,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) clear_bit(BLK_MQ_S_STOPPED, &hctx->state); preempt_disable(); - __blk_mq_run_hw_queue(hctx); + blk_mq_run_hw_queue(hctx, false); preempt_enable(); } EXPORT_SYMBOL(blk_mq_start_hw_queue); diff --git a/block/elevator.c b/block/elevator.c index 34bded18910e..24c28b659bb3 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -825,7 +825,7 @@ void elv_unregister_queue(struct request_queue *q) } EXPORT_SYMBOL(elv_unregister_queue); -int __init elv_register(struct elevator_type *e) +int elv_register(struct elevator_type *e) { char *def = ""; |