summaryrefslogtreecommitdiff
path: root/fs/bcachefs/replicas.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-11-09 14:22:46 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2024-07-14 19:00:13 -0400
commit1d16c605cc55ef26f0c65b362665a6c99080ccbc (patch)
treec7f7f6b6209ac290185dbea26a1f9d59661ac95f /fs/bcachefs/replicas.c
parent5d9667d1d6eaca3f6cd3c63cd6a0f309147c7f5c (diff)
bcachefs: Disk space accounting rewrite
Main part of the disk accounting rewrite. This is a wholesale rewrite of the existing disk space accounting, which relies on percepu counters that are sharded by journal buffer, and rolled up and added to each journal write. With the new scheme, every set of counters is a distinct key in the accounting btree; this fixes scaling limitations of the old scheme, where counters took up space in each journal entry and required multiple percpu counters. Now, in memory accounting requires a single set of percpu counters - not multiple for each in flight journal buffer - and in the future we'll probably also have counters that don't use in memory percpu counters, they're not strictly required. An accounting update is now a normal btree update, using the btree write buffer path. At transaction commit time, we apply accounting updates to the in memory counters, which are percpu counters indexed in an eytzinger tree by the accounting key. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/replicas.c')
-rw-r--r--fs/bcachefs/replicas.c42
1 files changed, 13 insertions, 29 deletions
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index 57a1f09cca09..9cf1d118f146 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -243,23 +243,25 @@ static bool __replicas_has_entry(struct bch_replicas_cpu *r,
return __replicas_entry_idx(r, search) >= 0;
}
-bool bch2_replicas_marked(struct bch_fs *c,
+bool bch2_replicas_marked_locked(struct bch_fs *c,
struct bch_replicas_entry_v1 *search)
{
- bool marked;
-
- if (!search->nr_devs)
- return true;
-
verify_replicas_entry(search);
+ return !search->nr_devs ||
+ (__replicas_has_entry(&c->replicas, search) &&
+ (likely((!c->replicas_gc.entries)) ||
+ __replicas_has_entry(&c->replicas_gc, search)));
+}
+
+bool bch2_replicas_marked(struct bch_fs *c,
+ struct bch_replicas_entry_v1 *search)
+{
percpu_down_read(&c->mark_lock);
- marked = __replicas_has_entry(&c->replicas, search) &&
- (likely((!c->replicas_gc.entries)) ||
- __replicas_has_entry(&c->replicas_gc, search));
+ bool ret = bch2_replicas_marked_locked(c, search);
percpu_up_read(&c->mark_lock);
- return marked;
+ return ret;
}
static void __replicas_table_update(struct bch_fs_usage *dst,
@@ -457,20 +459,6 @@ int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry_v1 *r)
? 0 : bch2_mark_replicas_slowpath(c, r);
}
-/* replicas delta list: */
-
-int bch2_replicas_delta_list_mark(struct bch_fs *c,
- struct replicas_delta_list *r)
-{
- struct replicas_delta *d = r->d;
- struct replicas_delta *top = (void *) r->d + r->used;
- int ret = 0;
-
- for (d = r->d; !ret && d != top; d = replicas_delta_next(d))
- ret = bch2_mark_replicas(c, &d->r);
- return ret;
-}
-
/*
* Old replicas_gc mechanism: only used for journal replicas entries now, should
* die at some point:
@@ -1046,8 +1034,6 @@ void bch2_fs_replicas_exit(struct bch_fs *c)
kfree(c->usage_base);
kfree(c->replicas.entries);
kfree(c->replicas_gc.entries);
-
- mempool_exit(&c->replicas_delta_pool);
}
int bch2_fs_replicas_init(struct bch_fs *c)
@@ -1056,7 +1042,5 @@ int bch2_fs_replicas_init(struct bch_fs *c)
&c->replicas_journal_res,
reserve_journal_replicas(c, &c->replicas));
- return mempool_init_kmalloc_pool(&c->replicas_delta_pool, 1,
- REPLICAS_DELTA_LIST_MAX) ?:
- replicas_table_update(c, &c->replicas);
+ return replicas_table_update(c, &c->replicas);
}