summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2014-11-24 14:06:22 +0000
committerMike Snitzer <snitzer@redhat.com>2014-12-01 11:30:10 -0500
commit3e2e1c3098fcc02369f0eea822d0a7914b691567 (patch)
treed7ccfeacc50f7e3b730daad45f3ad11c70570153 /drivers/md
parent2572629a1318eb9e13e70fa59756d7bcfb80319e (diff)
dm cache: when reloading a discard bitset allow for a different discard block size
The discard block size can change if the origin changes size or if an old DM cache is upgraded from using a discard block size that was equal to cache block size. To fix this an extent of discarded blocks is established for the purpose of translating the old discard block size to the new in-core discard block size and set bits. The old (potentially huge) discard bitset is left ondisk until it is re-written using the new in-core information on the next successful DM cache shutdown. Fixes: 7ae34e777896 ("dm cache: improve discard support") Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-cache-target.c94
1 files changed, 87 insertions, 7 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 41e7cfdb450d..2c66315553f2 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2817,17 +2817,86 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
return 0;
}
+/*
+ * The discard block size in the on disk metadata is not
+ * neccessarily the same as we're currently using. So we have to
+ * be careful to only set the discarded attribute if we know it
+ * covers a complete block of the new size.
+ */
+struct discard_load_info {
+ struct cache *cache;
+
+ /*
+ * These blocks are sized using the on disk dblock size, rather
+ * than the current one.
+ */
+ dm_block_t block_size;
+ dm_block_t discard_begin, discard_end;
+};
+
+static void discard_load_info_init(struct cache *cache,
+ struct discard_load_info *li)
+{
+ li->cache = cache;
+ li->discard_begin = li->discard_end = 0;
+}
+
+static void set_discard_range(struct discard_load_info *li)
+{
+ sector_t b, e;
+
+ if (li->discard_begin == li->discard_end)
+ return;
+
+ /*
+ * Convert to sectors.
+ */
+ b = li->discard_begin * li->block_size;
+ e = li->discard_end * li->block_size;
+
+ /*
+ * Then convert back to the current dblock size.
+ */
+ b = dm_sector_div_up(b, li->cache->discard_block_size);
+ sector_div(e, li->cache->discard_block_size);
+
+ /*
+ * The origin may have shrunk, so we need to check we're still in
+ * bounds.
+ */
+ if (e > from_dblock(li->cache->discard_nr_blocks))
+ e = from_dblock(li->cache->discard_nr_blocks);
+
+ for (; b < e; b++)
+ set_discard(li->cache, to_dblock(b));
+}
+
static int load_discard(void *context, sector_t discard_block_size,
dm_dblock_t dblock, bool discard)
{
- struct cache *cache = context;
+ struct discard_load_info *li = context;
- /* FIXME: handle mis-matched block size */
+ li->block_size = discard_block_size;
- if (discard)
- set_discard(cache, dblock);
- else
- clear_discard(cache, dblock);
+ if (discard) {
+ if (from_dblock(dblock) == li->discard_end)
+ /*
+ * We're already in a discard range, just extend it.
+ */
+ li->discard_end = li->discard_end + 1ULL;
+
+ else {
+ /*
+ * Emit the old range and start a new one.
+ */
+ set_discard_range(li);
+ li->discard_begin = from_dblock(dblock);
+ li->discard_end = li->discard_begin + 1ULL;
+ }
+ } else {
+ set_discard_range(li);
+ li->discard_begin = li->discard_end = 0;
+ }
return 0;
}
@@ -2911,11 +2980,22 @@ static int cache_preresume(struct dm_target *ti)
}
if (!cache->loaded_discards) {
- r = dm_cache_load_discards(cache->cmd, load_discard, cache);
+ struct discard_load_info li;
+
+ /*
+ * The discard bitset could have been resized, or the
+ * discard block size changed. To be safe we start by
+ * setting every dblock to not discarded.
+ */
+ clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
+
+ discard_load_info_init(cache, &li);
+ r = dm_cache_load_discards(cache->cmd, load_discard, &li);
if (r) {
DMERR("could not load origin discards");
return r;
}
+ set_discard_range(&li);
cache->loaded_discards = true;
}