diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-12 13:33:29 -0700 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-12 13:33:29 -0700 |
| commit | 0e04c641b199435f3779454055f6a7de258ecdfc (patch) | |
| tree | 9b79da0c52240bf4b96270ac12356ad75da6f739 /drivers/md/dm-thin.c | |
| parent | 7550cfab3d4053b54f16e2fe337affde71d1eb51 (diff) | |
| parent | 09869de57ed2728ae3c619803932a86cb0e2c4f8 (diff) | |
Merge tag 'dm-3.16-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer:
"This pull request is later than I'd have liked because I was waiting
for some performance data to help finally justify sending the
long-standing dm-crypt cpu scalability improvements upstream.
Unfortunately we came up short, so those dm-crypt changes will
continue to wait, but it seems we're not far off.
. Add dm_accept_partial_bio interface to DM core to allow DM targets
to only process a portion of a bio, the remainder being sent in the
next bio. This enables the old dm snapshot-origin target to only
split write bios on chunk boundaries, read bios are now sent to the
origin device unchanged.
. Add DM core support for disabling WRITE SAME if the underlying SCSI
layer disables it due to command failure.
. Reduce lock contention in DM's bio-prison.
. A few small cleanups and fixes to dm-thin and dm-era"
* tag 'dm-3.16-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
dm thin: update discard_granularity to reflect the thin-pool blocksize
dm bio prison: implement per bucket locking in the dm_bio_prison hash table
dm: remove symbol export for dm_set_device_limits
dm: disable WRITE SAME if it fails
dm era: check for a non-NULL metadata object before closing it
dm thin: return ENOSPC instead of EIO when error_if_no_space enabled
dm thin: cleanup noflush_work to use a proper completion
dm snapshot: do not split read bios sent to snapshot-origin target
dm snapshot: allocate a per-target structure for snapshot-origin target
dm: introduce dm_accept_partial_bio
dm: change sector_count member in clone_info from sector_t to unsigned
Diffstat (limited to 'drivers/md/dm-thin.c')
| -rw-r--r-- | drivers/md/dm-thin.c | 93 |
1 files changed, 60 insertions, 33 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 242ac2ea5f29..fc9c848a60c9 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -310,13 +310,18 @@ static void cell_defer_no_holder_no_free(struct thin_c *tc, wake_worker(pool); } -static void cell_error(struct pool *pool, - struct dm_bio_prison_cell *cell) +static void cell_error_with_code(struct pool *pool, + struct dm_bio_prison_cell *cell, int error_code) { - dm_cell_error(pool->prison, cell); + dm_cell_error(pool->prison, cell, error_code); dm_bio_prison_free_cell(pool->prison, cell); } +static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) +{ + cell_error_with_code(pool, cell, -EIO); +} + /*----------------------------------------------------------------*/ /* @@ -1027,7 +1032,7 @@ static void retry_on_resume(struct bio *bio) spin_unlock_irqrestore(&tc->lock, flags); } -static bool should_error_unserviceable_bio(struct pool *pool) +static int should_error_unserviceable_bio(struct pool *pool) { enum pool_mode m = get_pool_mode(pool); @@ -1035,25 +1040,27 @@ static bool should_error_unserviceable_bio(struct pool *pool) case PM_WRITE: /* Shouldn't get here */ DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); - return true; + return -EIO; case PM_OUT_OF_DATA_SPACE: - return pool->pf.error_if_no_space; + return pool->pf.error_if_no_space ? -ENOSPC : 0; case PM_READ_ONLY: case PM_FAIL: - return true; + return -EIO; default: /* Shouldn't get here */ DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); - return true; + return -EIO; } } static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) { - if (should_error_unserviceable_bio(pool)) - bio_io_error(bio); + int error = should_error_unserviceable_bio(pool); + + if (error) + bio_endio(bio, error); else retry_on_resume(bio); } @@ -1062,18 +1069,21 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c { struct bio *bio; struct bio_list bios; + int error; - if (should_error_unserviceable_bio(pool)) { - cell_error(pool, cell); + error = should_error_unserviceable_bio(pool); + if (error) { + cell_error_with_code(pool, cell, error); return; } bio_list_init(&bios); cell_release(pool, cell, &bios); - if (should_error_unserviceable_bio(pool)) + error = should_error_unserviceable_bio(pool); + if (error) while ((bio = bio_list_pop(&bios))) - bio_io_error(bio); + bio_endio(bio, error); else while ((bio = bio_list_pop(&bios))) retry_on_resume(bio); @@ -1610,47 +1620,63 @@ static void do_no_space_timeout(struct work_struct *ws) /*----------------------------------------------------------------*/ -struct noflush_work { +struct pool_work { struct work_struct worker; - struct thin_c *tc; + struct completion complete; +}; + +static struct pool_work *to_pool_work(struct work_struct *ws) +{ + return container_of(ws, struct pool_work, worker); +} - atomic_t complete; - wait_queue_head_t wait; +static void pool_work_complete(struct pool_work *pw) +{ + complete(&pw->complete); +} + +static void pool_work_wait(struct pool_work *pw, struct pool *pool, + void (*fn)(struct work_struct *)) +{ + INIT_WORK_ONSTACK(&pw->worker, fn); + init_completion(&pw->complete); + queue_work(pool->wq, &pw->worker); + wait_for_completion(&pw->complete); +} + +/*----------------------------------------------------------------*/ + +struct noflush_work { + struct pool_work pw; + struct thin_c *tc; }; -static void complete_noflush_work(struct noflush_work *w) +static struct noflush_work *to_noflush(struct work_struct *ws) { - atomic_set(&w->complete, 1); - wake_up(&w->wait); + return container_of(to_pool_work(ws), struct noflush_work, pw); } static void do_noflush_start(struct work_struct *ws) { - struct noflush_work *w = container_of(ws, struct noflush_work, worker); + struct noflush_work *w = to_noflush(ws); w->tc->requeue_mode = true; requeue_io(w->tc); - complete_noflush_work(w); + pool_work_complete(&w->pw); } static void do_noflush_stop(struct work_struct *ws) { - struct noflush_work *w = container_of(ws, struct noflush_work, worker); + struct noflush_work *w = to_noflush(ws); w->tc->requeue_mode = false; - complete_noflush_work(w); + pool_work_complete(&w->pw); } static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) { struct noflush_work w; - INIT_WORK_ONSTACK(&w.worker, fn); w.tc = tc; - atomic_set(&w.complete, 0); - init_waitqueue_head(&w.wait); - - queue_work(tc->pool->wq, &w.worker); - - wait_event(w.wait, atomic_read(&w.complete)); + pool_work_wait(&w.pw, tc->pool, fn); } /*----------------------------------------------------------------*/ @@ -3068,7 +3094,8 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) */ if (pt->adjusted_pf.discard_passdown) { data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; - limits->discard_granularity = data_limits->discard_granularity; + limits->discard_granularity = max(data_limits->discard_granularity, + pool->sectors_per_block << SECTOR_SHIFT); } else limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; } |
