summaryrefslogtreecommitdiff
path: root/fs/bcachefs/alloc_background.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2023-11-09 14:22:46 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2024-07-14 19:00:13 -0400
commit1d16c605cc55ef26f0c65b362665a6c99080ccbc (patch)
treec7f7f6b6209ac290185dbea26a1f9d59661ac95f /fs/bcachefs/alloc_background.c
parent5d9667d1d6eaca3f6cd3c63cd6a0f309147c7f5c (diff)
bcachefs: Disk space accounting rewrite
Main part of the disk accounting rewrite. This is a wholesale rewrite of the existing disk space accounting, which relies on percepu counters that are sharded by journal buffer, and rolled up and added to each journal write. With the new scheme, every set of counters is a distinct key in the accounting btree; this fixes scaling limitations of the old scheme, where counters took up space in each journal entry and required multiple percpu counters. Now, in memory accounting requires a single set of percpu counters - not multiple for each in flight journal buffer - and in the future we'll probably also have counters that don't use in memory percpu counters, they're not strictly required. An accounting update is now a normal btree update, using the btree write buffer path. At transaction commit time, we apply accounting updates to the in memory counters, which are percpu counters indexed in an eytzinger tree by the accounting key. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/alloc_background.c')
-rw-r--r--fs/bcachefs/alloc_background.c79
1 files changed, 67 insertions, 12 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 2af0f0a631f6..3df1099750af 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -15,6 +15,7 @@
#include "buckets_waiting_for_journal.h"
#include "clock.h"
#include "debug.h"
+#include "disk_accounting.h"
#include "ec.h"
#include "error.h"
#include "lru.h"
@@ -760,6 +761,61 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
return ret;
}
+static inline int bch2_dev_data_type_accounting_mod(struct btree_trans *trans, struct bch_dev *ca,
+ enum bch_data_type data_type,
+ s64 delta_buckets,
+ s64 delta_sectors,
+ s64 delta_fragmented, unsigned flags)
+{
+ struct disk_accounting_pos acc = {
+ .type = BCH_DISK_ACCOUNTING_dev_data_type,
+ .dev_data_type.dev = ca->dev_idx,
+ .dev_data_type.data_type = data_type,
+ };
+ s64 d[3] = { delta_buckets, delta_sectors, delta_fragmented };
+
+ return bch2_disk_accounting_mod(trans, &acc, d, 3);
+}
+
+int bch2_alloc_key_to_dev_counters(struct btree_trans *trans, struct bch_dev *ca,
+ const struct bch_alloc_v4 *old,
+ const struct bch_alloc_v4 *new,
+ unsigned flags)
+{
+ s64 old_sectors = bch2_bucket_sectors(*old);
+ s64 new_sectors = bch2_bucket_sectors(*new);
+ if (old->data_type != new->data_type) {
+ int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type,
+ 1, new_sectors, bch2_bucket_sectors_fragmented(ca, *new), flags) ?:
+ bch2_dev_data_type_accounting_mod(trans, ca, old->data_type,
+ -1, -old_sectors, -bch2_bucket_sectors_fragmented(ca, *old), flags);
+ if (ret)
+ return ret;
+ } else if (old_sectors != new_sectors) {
+ int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type,
+ 0,
+ new_sectors - old_sectors,
+ bch2_bucket_sectors_fragmented(ca, *new) -
+ bch2_bucket_sectors_fragmented(ca, *old), flags);
+ if (ret)
+ return ret;
+ }
+
+ s64 old_unstriped = bch2_bucket_sectors_unstriped(*old);
+ s64 new_unstriped = bch2_bucket_sectors_unstriped(*new);
+ if (old_unstriped != new_unstriped) {
+ int ret = bch2_dev_data_type_accounting_mod(trans, ca, BCH_DATA_unstriped,
+ !!new_unstriped - !!old_unstriped,
+ new_unstriped - old_unstriped,
+ 0,
+ flags);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int bch2_trigger_alloc(struct btree_trans *trans,
enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s new,
@@ -835,18 +891,17 @@ int bch2_trigger_alloc(struct btree_trans *trans,
goto err;
}
- /*
- * need to know if we're getting called from the invalidate path or
- * not:
- */
-
if ((flags & BTREE_TRIGGER_bucket_invalidate) &&
old_a->cached_sectors) {
- ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
- -((s64) old_a->cached_sectors));
+ ret = bch2_mod_dev_cached_sectors(trans, ca->dev_idx,
+ -((s64) old_a->cached_sectors));
if (ret)
goto err;
}
+
+ ret = bch2_alloc_key_to_dev_counters(trans, ca, old_a, new_a, flags);
+ if (ret)
+ goto err;
}
if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
@@ -886,19 +941,17 @@ int bch2_trigger_alloc(struct btree_trans *trans,
}
}
- percpu_down_read(&c->mark_lock);
if (new_a->gen != old_a->gen) {
+ rcu_read_lock();
u8 *gen = bucket_gen(ca, new.k->p.offset);
if (unlikely(!gen)) {
- percpu_up_read(&c->mark_lock);
+ rcu_read_unlock();
goto invalid_bucket;
}
*gen = new_a->gen;
+ rcu_read_unlock();
}
- bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
- percpu_up_read(&c->mark_lock);
-
#define eval_state(_a, expr) ({ const struct bch_alloc_v4 *a = _a; expr; })
#define statechange(expr) !eval_state(old_a, expr) && eval_state(new_a, expr)
#define bucket_flushed(a) (!a->journal_seq || a->journal_seq <= c->journal.flushed_seq_ondisk)
@@ -946,6 +999,8 @@ int bch2_trigger_alloc(struct btree_trans *trans,
bucket_unlock(g);
percpu_up_read(&c->mark_lock);
+
+ bch2_dev_usage_update(c, ca, old_a, new_a);
}
err:
printbuf_exit(&buf);