summaryrefslogtreecommitdiff
path: root/block/bio.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/bio.c')
-rw-r--r--block/bio.c33
1 files changed, 18 insertions, 15 deletions
diff --git a/block/bio.c b/block/bio.c
index b3a79285c278..e726c0e280a8 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -253,6 +253,7 @@ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
bio->bi_write_hint = 0;
bio->bi_write_stream = 0;
bio->bi_status = 0;
+ bio->bi_bvec_gap_bit = 0;
bio->bi_iter.bi_sector = 0;
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_idx = 0;
@@ -320,9 +321,13 @@ static struct bio *__bio_chain_endio(struct bio *bio)
return parent;
}
+/*
+ * This function should only be used as a flag and must never be called.
+ * If execution reaches here, it indicates a serious programming error.
+ */
static void bio_chain_endio(struct bio *bio)
{
- bio_endio(__bio_chain_endio(bio));
+ BUG();
}
/**
@@ -516,20 +521,18 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
return NULL;
- if (opf & REQ_ALLOC_CACHE) {
- if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
- bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
- gfp_mask, bs);
- if (bio)
- return bio;
- /*
- * No cached bio available, bio returned below marked with
- * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache.
- */
- } else {
- opf &= ~REQ_ALLOC_CACHE;
- }
- }
+ if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
+ opf |= REQ_ALLOC_CACHE;
+ bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
+ gfp_mask, bs);
+ if (bio)
+ return bio;
+ /*
+ * No cached bio available, bio returned below marked with
+ * REQ_ALLOC_CACHE to participate in per-cpu alloc cache.
+ */
+ } else
+ opf &= ~REQ_ALLOC_CACHE;
/*
* submit_bio_noacct() converts recursion to iteration; this means if