diff options
author | Tejun Heo <tj@kernel.org> | 2014-09-24 13:00:21 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-09-24 13:00:21 -0400 |
commit | d06efebf0c37d438fcf07057be00dd40fcfce08d (patch) | |
tree | 31a0786d132aadf4cbb9725f3f444ef6e1052128 /drivers/md | |
parent | bb2e226b3bef596dd56be97df655d857b4603923 (diff) | |
parent | 0a30288da1aec914e158c2d7a3482a85f632750f (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block into for-3.18
This is to receive 0a30288da1ae ("blk-mq, percpu_ref: implement a
kludge for SCSI blk-mq stall during probe") which implements
__percpu_ref_kill_expedited() to work around SCSI blk-mq stall. The
commit reverted and patches to implement proper fix will be added.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Kent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-cache-target.c | 4 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 25 | ||||
-rw-r--r-- | drivers/md/raid10.c | 7 | ||||
-rw-r--r-- | drivers/md/raid5.c | 4 |
4 files changed, 30 insertions, 10 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1af40ee209e2..7130505c2425 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -895,8 +895,8 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg) struct cache *cache = mg->cache; if (mg->writeback) { - cell_defer(cache, mg->old_ocell, false); clear_dirty(cache, mg->old_oblock, mg->cblock); + cell_defer(cache, mg->old_ocell, false); cleanup_migration(mg); return; @@ -951,13 +951,13 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) } } else { + clear_dirty(cache, mg->new_oblock, mg->cblock); if (mg->requeue_holder) cell_defer(cache, mg->new_ocell, true); else { bio_endio(mg->new_ocell->holder, 0); cell_defer(cache, mg->new_ocell, false); } - clear_dirty(cache, mg->new_oblock, mg->cblock); cleanup_migration(mg); } } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 2785007e0e46..cd15e0801228 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1688,6 +1688,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) unsigned int key_size, opt_params; unsigned long long tmpll; int ret; + size_t iv_size_padding; struct dm_arg_set as; const char *opt_string; char dummy; @@ -1724,20 +1725,32 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->dmreq_start = sizeof(struct ablkcipher_request); cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); - cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); - cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) & - ~(crypto_tfm_ctx_alignment() - 1); + cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); + + if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) { + /* Allocate the padding exactly */ + iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request)) + & crypto_ablkcipher_alignmask(any_tfm(cc)); + } else { + /* + * If the cipher requires greater alignment than kmalloc + * alignment, we don't know the exact position of the + * initialization vector. We must assume worst case. + */ + iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc)); + } cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + - sizeof(struct dm_crypt_request) + cc->iv_size); + sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); if (!cc->req_pool) { ti->error = "Cannot allocate crypt request mempool"; goto bad; } cc->per_bio_data_size = ti->per_bio_data_size = - sizeof(struct dm_crypt_io) + cc->dmreq_start + - sizeof(struct dm_crypt_request) + cc->iv_size; + ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + + sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size, + ARCH_KMALLOC_MINALIGN); cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); if (!cc->page_pool) { diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index b08c18871323..6703751d87d7 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2953,6 +2953,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, */ if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { end_reshape(conf); + close_sync(conf); return 0; } @@ -3081,6 +3082,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, } r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); + r10_bio->state = 0; raise_barrier(conf, rb2 != NULL); atomic_set(&r10_bio->remaining, 0); @@ -3269,6 +3271,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, if (sync_blocks < max_sync) max_sync = sync_blocks; r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); + r10_bio->state = 0; r10_bio->mddev = mddev; atomic_set(&r10_bio->remaining, 0); @@ -4384,6 +4387,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, read_more: /* Now schedule reads for blocks from sector_nr to last */ r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); + r10_bio->state = 0; raise_barrier(conf, sectors_done != 0); atomic_set(&r10_bio->remaining, 0); r10_bio->mddev = mddev; @@ -4398,6 +4402,7 @@ read_more: * on all the target devices. */ // FIXME + mempool_free(r10_bio, conf->r10buf_pool); set_bit(MD_RECOVERY_INTR, &mddev->recovery); return sectors_done; } @@ -4410,7 +4415,7 @@ read_more: read_bio->bi_private = r10_bio; read_bio->bi_end_io = end_sync_read; read_bio->bi_rw = READ; - read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); + read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); read_bio->bi_flags |= 1 << BIO_UPTODATE; read_bio->bi_vcnt = 0; read_bio->bi_iter.bi_size = 0; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 6234b2e84587..183588b11fc1 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2922,7 +2922,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) && !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || (sh->raid_conf->level == 6 && s->failed && s->to_write && - s->to_write < sh->raid_conf->raid_disks - 2 && + s->to_write - s->non_overwrite < sh->raid_conf->raid_disks - 2 && (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) { /* we would like to get this block, possibly by computing it, * otherwise read it if the backing disk is insync @@ -3817,6 +3817,8 @@ static void handle_stripe(struct stripe_head *sh) set_bit(R5_Wantwrite, &dev->flags); if (prexor) continue; + if (s.failed > 1) + continue; if (!test_bit(R5_Insync, &dev->flags) || ((i == sh->pd_idx || i == sh->qd_idx) && s.failed == 0)) |