summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio.c20
-rw-r--r--block/blk-cgroup.c6
-rw-r--r--block/blk-merge.c23
-rw-r--r--block/blk-settings.c22
4 files changed, 46 insertions, 25 deletions
diff --git a/block/bio.c b/block/bio.c
index 425d6d4a2f7a..515b5434fe2d 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -742,8 +742,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/
- if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
- bvec_gap_to_prev(prev, offset))
+ if (bvec_gap_to_prev(q, prev, offset))
return 0;
}
@@ -1776,8 +1775,9 @@ EXPORT_SYMBOL(bio_endio);
* Allocates and returns a new bio which represents @sectors from the start of
* @bio, and updates @bio to represent the remaining sectors.
*
- * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
- * responsibility to ensure that @bio is not freed before the split.
+ * Unless this is a discard request the newly allocated bio will point
+ * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
+ * @bio is not freed before the split.
*/
struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs)
@@ -1787,7 +1787,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
BUG_ON(sectors <= 0);
BUG_ON(sectors >= bio_sectors(bio));
- split = bio_clone_fast(bio, gfp, bs);
+ /*
+ * Discards need a mutable bio_vec to accommodate the payload
+ * required by the DSM TRIM and UNMAP commands.
+ */
+ if (bio->bi_rw & REQ_DISCARD)
+ split = bio_clone_bioset(bio, gfp, bs);
+ else
+ split = bio_clone_fast(bio, gfp, bs);
+
if (!split)
return NULL;
@@ -1954,6 +1962,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
bio->bi_css = blkcg_css;
return 0;
}
+EXPORT_SYMBOL_GPL(bio_associate_blkcg);
/**
* bio_associate_current - associate a bio with %current
@@ -1984,6 +1993,7 @@ int bio_associate_current(struct bio *bio)
bio->bi_css = task_get_css(current, blkio_cgrp_id);
return 0;
}
+EXPORT_SYMBOL_GPL(bio_associate_current);
/**
* bio_disassociate_task - undo bio_associate_current()
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 9da02c021ebe..d6283b3f5db5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -718,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
return -EINVAL;
disk = get_gendisk(MKDEV(major, minor), &part);
- if (!disk || part)
+ if (!disk)
return -EINVAL;
+ if (part) {
+ put_disk(disk);
+ return -EINVAL;
+ }
rcu_read_lock();
spin_lock_irq(disk->queue->queue_lock);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 25ebac8560cf..d088cffb8105 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -82,8 +82,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/
- if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
- prev && bvec_gap_to_prev(&bvprv, bv.bv_offset))
+ if (prev && bvec_gap_to_prev(q, &bvprv, bv.bv_offset))
goto split;
if (prev && blk_queue_cluster(q)) {
@@ -484,12 +483,12 @@ static bool req_no_special_merge(struct request *req)
return !q->mq_ops && req->special;
}
-static int req_gap_to_prev(struct request *req, struct request *next)
+static int req_gap_to_prev(struct request *req, struct bio *next)
{
struct bio *prev = req->biotail;
- return bvec_gap_to_prev(&prev->bi_io_vec[prev->bi_vcnt - 1],
- next->bio->bi_io_vec[0].bv_offset);
+ return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1],
+ next->bi_io_vec[0].bv_offset);
}
static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
@@ -506,8 +505,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
if (req_no_special_merge(req) || req_no_special_merge(next))
return 0;
- if (test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags) &&
- req_gap_to_prev(req, next))
+ if (req_gap_to_prev(req, next->bio))
return 0;
/*
@@ -692,8 +690,6 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
- struct request_queue *q = rq->q;
-
if (!rq_mergeable(rq) || !bio_mergeable(bio))
return false;
@@ -718,13 +714,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
return false;
/* Only check gaps if the bio carries data */
- if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && bio_has_data(bio)) {
- struct bio_vec *bprev;
-
- bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
- if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
- return false;
- }
+ if (bio_has_data(bio) && req_gap_to_prev(rq, bio))
+ return false;
return true;
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 9df73991b231..7d8f129a1516 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -89,6 +89,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->max_segments = BLK_MAX_SEGMENTS;
lim->max_integrity_segments = 0;
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
+ lim->virt_boundary_mask = 0;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
lim->chunk_sectors = 0;
@@ -220,8 +221,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* Description:
* Enables a low level driver to set a hard upper limit,
* max_hw_sectors, on the size of requests. max_hw_sectors is set by
- * the device driver based upon the combined capabilities of I/O
- * controller and storage device.
+ * the device driver based upon the capabilities of the I/O
+ * controller.
*
* max_sectors is a soft limit imposed by the block layer for
* filesystem type requests. This value can be overridden on a
@@ -236,7 +237,9 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_
__func__, max_hw_sectors);
}
- limits->max_sectors = limits->max_hw_sectors = max_hw_sectors;
+ limits->max_hw_sectors = max_hw_sectors;
+ limits->max_sectors = min_t(unsigned int, max_hw_sectors,
+ BLK_DEF_MAX_SECTORS);
}
EXPORT_SYMBOL(blk_limits_max_hw_sectors);
@@ -530,6 +533,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
b->seg_boundary_mask);
+ t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
+ b->virt_boundary_mask);
t->max_segments = min_not_zero(t->max_segments, b->max_segments);
t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
@@ -770,6 +775,17 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
EXPORT_SYMBOL(blk_queue_segment_boundary);
/**
+ * blk_queue_virt_boundary - set boundary rules for bio merging
+ * @q: the request queue for the device
+ * @mask: the memory boundary mask
+ **/
+void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
+{
+ q->limits.virt_boundary_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_virt_boundary);
+
+/**
* blk_queue_dma_alignment - set dma length and memory alignment
* @q: the request queue for the device
* @mask: alignment mask