summaryrefslogtreecommitdiff
path: root/mm/damon
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-11-02 19:38:47 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2023-11-02 19:38:47 -1000
commitecae0bd5173b1014f95a14a8dfbe40ec10367dcf (patch)
treef571213ef1a35354ea79f0240a180fdb4111b290 /mm/damon
parentbc3012f4e3a9765de81f454cb8f9bb16aafc6ff5 (diff)
parent9732336006764e2ee61225387e3c70eae9139035 (diff)
Merge tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton: "Many singleton patches against the MM code. The patch series which are included in this merge do the following: - Kemeng Shi has contributed some compation maintenance work in the series 'Fixes and cleanups to compaction' - Joel Fernandes has a patchset ('Optimize mremap during mutual alignment within PMD') which fixes an obscure issue with mremap()'s pagetable handling during a subsequent exec(), based upon an implementation which Linus suggested - More DAMON/DAMOS maintenance and feature work from SeongJae Park i the following patch series: mm/damon: misc fixups for documents, comments and its tracepoint mm/damon: add a tracepoint for damos apply target regions mm/damon: provide pseudo-moving sum based access rate mm/damon: implement DAMOS apply intervals mm/damon/core-test: Fix memory leaks in core-test mm/damon/sysfs-schemes: Do DAMOS tried regions update for only one apply interval - In the series 'Do not try to access unaccepted memory' Adrian Hunter provides some fixups for the recently-added 'unaccepted memory' feature. To increase the feature's checking coverage. 'Plug a few gaps where RAM is exposed without checking if it is unaccepted memory' - In the series 'cleanups for lockless slab shrink' Qi Zheng has done some maintenance work which is preparation for the lockless slab shrinking code - Qi Zheng has redone the earlier (and reverted) attempt to make slab shrinking lockless in the series 'use refcount+RCU method to implement lockless slab shrink' - David Hildenbrand contributes some maintenance work for the rmap code in the series 'Anon rmap cleanups' - Kefeng Wang does more folio conversions and some maintenance work in the migration code. Series 'mm: migrate: more folio conversion and unification' - Matthew Wilcox has fixed an issue in the buffer_head code which was causing long stalls under some heavy memory/IO loads. Some cleanups were added on the way. Series 'Add and use bdev_getblk()' - In the series 'Use nth_page() in place of direct struct page manipulation' Zi Yan has fixed a potential issue with the direct manipulation of hugetlb page frames - In the series 'mm: hugetlb: Skip initialization of gigantic tail struct pages if freed by HVO' has improved our handling of gigantic pages in the hugetlb vmmemmep optimizaton code. This provides significant boot time improvements when significant amounts of gigantic pages are in use - Matthew Wilcox has sent the series 'Small hugetlb cleanups' - code rationalization and folio conversions in the hugetlb code - Yin Fengwei has improved mlock()'s handling of large folios in the series 'support large folio for mlock' - In the series 'Expose swapcache stat for memcg v1' Liu Shixin has added statistics for memcg v1 users which are available (and useful) under memcg v2 - Florent Revest has enhanced the MDWE (Memory-Deny-Write-Executable) prctl so that userspace may direct the kernel to not automatically propagate the denial to child processes. The series is named 'MDWE without inheritance' - Kefeng Wang has provided the series 'mm: convert numa balancing functions to use a folio' which does what it says - In the series 'mm/ksm: add fork-exec support for prctl' Stefan Roesch makes is possible for a process to propagate KSM treatment across exec() - Huang Ying has enhanced memory tiering's calculation of memory distances. This is used to permit the dax/kmem driver to use 'high bandwidth memory' in addition to Optane Data Center Persistent Memory Modules (DCPMM). The series is named 'memory tiering: calculate abstract distance based on ACPI HMAT' - In the series 'Smart scanning mode for KSM' Stefan Roesch has optimized KSM by teaching it to retain and use some historical information from previous scans - Yosry Ahmed has fixed some inconsistencies in memcg statistics in the series 'mm: memcg: fix tracking of pending stats updates values' - In the series 'Implement IOCTL to get and optionally clear info about PTEs' Peter Xu has added an ioctl to /proc/<pid>/pagemap which permits us to atomically read-then-clear page softdirty state. This is mainly used by CRIU - Hugh Dickins contributed the series 'shmem,tmpfs: general maintenance', a bunch of relatively minor maintenance tweaks to this code - Matthew Wilcox has increased the use of the VMA lock over file-backed page faults in the series 'Handle more faults under the VMA lock'. Some rationalizations of the fault path became possible as a result - In the series 'mm/rmap: convert page_move_anon_rmap() to folio_move_anon_rmap()' David Hildenbrand has implemented some cleanups and folio conversions - In the series 'various improvements to the GUP interface' Lorenzo Stoakes has simplified and improved the GUP interface with an eye to providing groundwork for future improvements - Andrey Konovalov has sent along the series 'kasan: assorted fixes and improvements' which does those things - Some page allocator maintenance work from Kemeng Shi in the series 'Two minor cleanups to break_down_buddy_pages' - In thes series 'New selftest for mm' Breno Leitao has developed another MM self test which tickles a race we had between madvise() and page faults - In the series 'Add folio_end_read' Matthew Wilcox provides cleanups and an optimization to the core pagecache code - Nhat Pham has added memcg accounting for hugetlb memory in the series 'hugetlb memcg accounting' - Cleanups and rationalizations to the pagemap code from Lorenzo Stoakes, in the series 'Abstract vma_merge() and split_vma()' - Audra Mitchell has fixed issues in the procfs page_owner code's new timestamping feature which was causing some misbehaviours. In the series 'Fix page_owner's use of free timestamps' - Lorenzo Stoakes has fixed the handling of new mappings of sealed files in the series 'permit write-sealed memfd read-only shared mappings' - Mike Kravetz has optimized the hugetlb vmemmap optimization in the series 'Batch hugetlb vmemmap modification operations' - Some buffer_head folio conversions and cleanups from Matthew Wilcox in the series 'Finish the create_empty_buffers() transition' - As a page allocator performance optimization Huang Ying has added automatic tuning to the allocator's per-cpu-pages feature, in the series 'mm: PCP high auto-tuning' - Roman Gushchin has contributed the patchset 'mm: improve performance of accounted kernel memory allocations' which improves their performance by ~30% as measured by a micro-benchmark - folio conversions from Kefeng Wang in the series 'mm: convert page cpupid functions to folios' - Some kmemleak fixups in Liu Shixin's series 'Some bugfix about kmemleak' - Qi Zheng has improved our handling of memoryless nodes by keeping them off the allocation fallback list. This is done in the series 'handle memoryless nodes more appropriately' - khugepaged conversions from Vishal Moola in the series 'Some khugepaged folio conversions'" [ bcachefs conflicts with the dynamically allocated shrinkers have been resolved as per Stephen Rothwell in https://lore.kernel.org/all/20230913093553.4290421e@canb.auug.org.au/ with help from Qi Zheng. The clone3 test filtering conflict was half-arsed by yours truly ] * tag 'mm-stable-2023-11-01-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (406 commits) mm/damon/sysfs: update monitoring target regions for online input commit mm/damon/sysfs: remove requested targets when online-commit inputs selftests: add a sanity check for zswap Documentation: maple_tree: fix word spelling error mm/vmalloc: fix the unchecked dereference warning in vread_iter() zswap: export compression failure stats Documentation: ubsan: drop "the" from article title mempolicy: migration attempt to match interleave nodes mempolicy: mmap_lock is not needed while migrating folios mempolicy: alloc_pages_mpol() for NUMA policy without vma mm: add page_rmappable_folio() wrapper mempolicy: remove confusing MPOL_MF_LAZY dead code mempolicy: mpol_shared_policy_init() without pseudo-vma mempolicy trivia: use pgoff_t in shared mempolicy tree mempolicy trivia: slightly more consistent naming mempolicy trivia: delete those ancient pr_debug()s mempolicy: fix migrate_pages(2) syscall return nr_failed kernfs: drop shared NUMA mempolicy hooks hugetlbfs: drop shared NUMA mempolicy pretence mm/damon/sysfs-test: add a unit test for damon_sysfs_set_targets() ...
Diffstat (limited to 'mm/damon')
-rw-r--r--mm/damon/Kconfig12
-rw-r--r--mm/damon/core-test.h29
-rw-r--r--mm/damon/core.c295
-rw-r--r--mm/damon/dbgfs.c3
-rw-r--r--mm/damon/lru_sort.c6
-rw-r--r--mm/damon/ops-common.c5
-rw-r--r--mm/damon/paddr.c11
-rw-r--r--mm/damon/reclaim.c2
-rw-r--r--mm/damon/sysfs-common.h2
-rw-r--r--mm/damon/sysfs-schemes.c133
-rw-r--r--mm/damon/sysfs-test.h86
-rw-r--r--mm/damon/sysfs.c123
-rw-r--r--mm/damon/vaddr.c22
13 files changed, 592 insertions, 137 deletions
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig
index 436c6b4cb5ec..29f43fbc2eff 100644
--- a/mm/damon/Kconfig
+++ b/mm/damon/Kconfig
@@ -59,6 +59,18 @@ config DAMON_SYSFS
This builds the sysfs interface for DAMON. The user space can use
the interface for arbitrary data access monitoring.
+config DAMON_SYSFS_KUNIT_TEST
+ bool "Test for damon debugfs interface" if !KUNIT_ALL_TESTS
+ depends on DAMON_SYSFS && KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds the DAMON sysfs interface Kunit test suite.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation.
+
+ If unsure, say N.
+
config DAMON_DBGFS
bool "DAMON debugfs interface (DEPRECATED!)"
depends on DAMON_VADDR && DAMON_PADDR && DEBUG_FS
diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h
index 6cc8b245586d..649adf91ebc5 100644
--- a/mm/damon/core-test.h
+++ b/mm/damon/core-test.h
@@ -30,7 +30,7 @@ static void damon_test_regions(struct kunit *test)
damon_add_region(r, t);
KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t));
- damon_del_region(r, t);
+ damon_destroy_region(r, t);
KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t));
damon_free_target(t);
@@ -94,6 +94,7 @@ static void damon_test_aggregate(struct kunit *test)
for (ir = 0; ir < 3; ir++) {
r = damon_new_region(saddr[it][ir], eaddr[it][ir]);
r->nr_accesses = accesses[it][ir];
+ r->nr_accesses_bp = accesses[it][ir] * 10000;
damon_add_region(r, t);
}
it++;
@@ -147,9 +148,11 @@ static void damon_test_merge_two(struct kunit *test)
t = damon_new_target();
r = damon_new_region(0, 100);
r->nr_accesses = 10;
+ r->nr_accesses_bp = 100000;
damon_add_region(r, t);
r2 = damon_new_region(100, 300);
r2->nr_accesses = 20;
+ r2->nr_accesses_bp = 200000;
damon_add_region(r2, t);
damon_merge_two_regions(t, r, r2);
@@ -196,6 +199,7 @@ static void damon_test_merge_regions_of(struct kunit *test)
for (i = 0; i < ARRAY_SIZE(sa); i++) {
r = damon_new_region(sa[i], ea[i]);
r->nr_accesses = nrs[i];
+ r->nr_accesses_bp = nrs[i] * 10000;
damon_add_region(r, t);
}
@@ -265,6 +269,8 @@ static void damon_test_ops_registration(struct kunit *test)
/* Check double-registration failure again */
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
+
+ damon_destroy_ctx(c);
}
static void damon_test_set_regions(struct kunit *test)
@@ -297,6 +303,7 @@ static void damon_test_update_monitoring_result(struct kunit *test)
struct damon_region *r = damon_new_region(3, 7);
r->nr_accesses = 15;
+ r->nr_accesses_bp = 150000;
r->age = 20;
new_attrs = (struct damon_attrs){
@@ -316,6 +323,8 @@ static void damon_test_update_monitoring_result(struct kunit *test)
damon_update_monitoring_result(r, &old_attrs, &new_attrs);
KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
KUNIT_EXPECT_EQ(test, r->age, 20);
+
+ damon_free_region(r);
}
static void damon_test_set_attrs(struct kunit *test)
@@ -339,6 +348,23 @@ static void damon_test_set_attrs(struct kunit *test)
invalid_attrs = valid_attrs;
invalid_attrs.aggr_interval = 4999;
KUNIT_EXPECT_EQ(test, damon_set_attrs(c, &invalid_attrs), -EINVAL);
+
+ damon_destroy_ctx(c);
+}
+
+static void damon_test_moving_sum(struct kunit *test)
+{
+ unsigned int mvsum = 50000, nomvsum = 50000, len_window = 10;
+ unsigned int new_values[] = {10000, 0, 10000, 0, 0, 0, 10000, 0, 0, 0};
+ unsigned int expects[] = {55000, 50000, 55000, 50000, 45000, 40000,
+ 45000, 40000, 35000, 30000};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(new_values); i++) {
+ mvsum = damon_moving_sum(mvsum, nomvsum, len_window,
+ new_values[i]);
+ KUNIT_EXPECT_EQ(test, mvsum, expects[i]);
+ }
}
static void damos_test_new_filter(struct kunit *test)
@@ -425,6 +451,7 @@ static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_set_regions),
KUNIT_CASE(damon_test_update_monitoring_result),
KUNIT_CASE(damon_test_set_attrs),
+ KUNIT_CASE(damon_test_moving_sum),
KUNIT_CASE(damos_test_new_filter),
KUNIT_CASE(damos_test_filter_out),
{},
diff --git a/mm/damon/core.c b/mm/damon/core.c
index bcd2bd9d6c10..aa2dc7087cd9 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -128,6 +128,7 @@ struct damon_region *damon_new_region(unsigned long start, unsigned long end)
region->ar.start = start;
region->ar.end = end;
region->nr_accesses = 0;
+ region->nr_accesses_bp = 0;
INIT_LIST_HEAD(&region->list);
region->age = 0;
@@ -312,7 +313,9 @@ static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota)
}
struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
- enum damos_action action, struct damos_quota *quota,
+ enum damos_action action,
+ unsigned long apply_interval_us,
+ struct damos_quota *quota,
struct damos_watermarks *wmarks)
{
struct damos *scheme;
@@ -322,6 +325,13 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
return NULL;
scheme->pattern = *pattern;
scheme->action = action;
+ scheme->apply_interval_us = apply_interval_us;
+ /*
+ * next_apply_sis will be set when kdamond starts. While kdamond is
+ * running, it will also updated when it is added to the DAMON context,
+ * or damon_attrs are updated.
+ */
+ scheme->next_apply_sis = 0;
INIT_LIST_HEAD(&scheme->filters);
scheme->stat = (struct damos_stat){};
INIT_LIST_HEAD(&scheme->list);
@@ -334,9 +344,21 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
return scheme;
}
+static void damos_set_next_apply_sis(struct damos *s, struct damon_ctx *ctx)
+{
+ unsigned long sample_interval = ctx->attrs.sample_interval ?
+ ctx->attrs.sample_interval : 1;
+ unsigned long apply_interval = s->apply_interval_us ?
+ s->apply_interval_us : ctx->attrs.aggr_interval;
+
+ s->next_apply_sis = ctx->passed_sample_intervals +
+ apply_interval / sample_interval;
+}
+
void damon_add_scheme(struct damon_ctx *ctx, struct damos *s)
{
list_add_tail(&s->list, &ctx->schemes);
+ damos_set_next_apply_sis(s, ctx);
}
static void damon_del_scheme(struct damos *s)
@@ -427,8 +449,10 @@ struct damon_ctx *damon_new_ctx(void)
ctx->attrs.aggr_interval = 100 * 1000;
ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
- ktime_get_coarse_ts64(&ctx->last_aggregation);
- ctx->last_ops_update = ctx->last_aggregation;
+ ctx->passed_sample_intervals = 0;
+ /* These will be set from kdamond_init_intervals_sis() */
+ ctx->next_aggregation_sis = 0;
+ ctx->next_ops_update_sis = 0;
mutex_init(&ctx->kdamond_lock);
@@ -476,20 +500,14 @@ static unsigned int damon_age_for_new_attrs(unsigned int age,
static unsigned int damon_accesses_bp_to_nr_accesses(
unsigned int accesses_bp, struct damon_attrs *attrs)
{
- unsigned int max_nr_accesses =
- attrs->aggr_interval / attrs->sample_interval;
-
- return accesses_bp * max_nr_accesses / 10000;
+ return accesses_bp * damon_max_nr_accesses(attrs) / 10000;
}
/* convert nr_accesses to access ratio in bp (per 10,000) */
static unsigned int damon_nr_accesses_to_accesses_bp(
unsigned int nr_accesses, struct damon_attrs *attrs)
{
- unsigned int max_nr_accesses =
- attrs->aggr_interval / attrs->sample_interval;
-
- return nr_accesses * 10000 / max_nr_accesses;
+ return nr_accesses * 10000 / damon_max_nr_accesses(attrs);
}
static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
@@ -506,6 +524,7 @@ static void damon_update_monitoring_result(struct damon_region *r,
{
r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses,
old_attrs, new_attrs);
+ r->nr_accesses_bp = r->nr_accesses * 10000;
r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
}
@@ -541,13 +560,21 @@ static void damon_update_monitoring_results(struct damon_ctx *ctx,
* @ctx: monitoring context
* @attrs: monitoring attributes
*
- * This function should not be called while the kdamond is running.
+ * This function should be called while the kdamond is not running, or an
+ * access check results aggregation is not ongoing (e.g., from
+ * &struct damon_callback->after_aggregation or
+ * &struct damon_callback->after_wmarks_check callbacks).
+ *
* Every time interval is in micro-seconds.
*
* Return: 0 on success, negative error code otherwise.
*/
int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
{
+ unsigned long sample_interval = attrs->sample_interval ?
+ attrs->sample_interval : 1;
+ struct damos *s;
+
if (attrs->min_nr_regions < 3)
return -EINVAL;
if (attrs->min_nr_regions > attrs->max_nr_regions)
@@ -555,8 +582,17 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
if (attrs->sample_interval > attrs->aggr_interval)
return -EINVAL;
+ ctx->next_aggregation_sis = ctx->passed_sample_intervals +
+ attrs->aggr_interval / sample_interval;
+ ctx->next_ops_update_sis = ctx->passed_sample_intervals +
+ attrs->ops_update_interval / sample_interval;
+
damon_update_monitoring_results(ctx, attrs);
ctx->attrs = *attrs;
+
+ damon_for_each_scheme(s, ctx)
+ damos_set_next_apply_sis(s, ctx);
+
return 0;
}
@@ -729,38 +765,6 @@ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs)
}
/*
- * damon_check_reset_time_interval() - Check if a time interval is elapsed.
- * @baseline: the time to check whether the interval has elapsed since
- * @interval: the time interval (microseconds)
- *
- * See whether the given time interval has passed since the given baseline
- * time. If so, it also updates the baseline to current time for next check.
- *
- * Return: true if the time interval has passed, or false otherwise.
- */
-static bool damon_check_reset_time_interval(struct timespec64 *baseline,
- unsigned long interval)
-{
- struct timespec64 now;
-
- ktime_get_coarse_ts64(&now);
- if ((timespec64_to_ns(&now) - timespec64_to_ns(baseline)) <
- interval * 1000)
- return false;
- *baseline = now;
- return true;
-}
-
-/*
- * Check whether it is time to flush the aggregated information
- */
-static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
-{
- return damon_check_reset_time_interval(&ctx->last_aggregation,
- ctx->attrs.aggr_interval);
-}
-
-/*
* Reset the aggregated monitoring results ('nr_accesses' of each region).
*/
static void kdamond_reset_aggregated(struct damon_ctx *c)
@@ -772,7 +776,7 @@ static void kdamond_reset_aggregated(struct damon_ctx *c)
struct damon_region *r;
damon_for_each_region(r, t) {
- trace_damon_aggregated(t, ti, r, damon_nr_regions(t));
+ trace_damon_aggregated(ti, r, damon_nr_regions(t));
r->last_nr_accesses = r->nr_accesses;
r->nr_accesses = 0;
}
@@ -786,12 +790,13 @@ static void damon_split_region_at(struct damon_target *t,
static bool __damos_valid_target(struct damon_region *r, struct damos *s)
{
unsigned long sz;
+ unsigned int nr_accesses = r->nr_accesses_bp / 10000;
sz = damon_sz_region(r);
return s->pattern.min_sz_region <= sz &&
sz <= s->pattern.max_sz_region &&
- s->pattern.min_nr_accesses <= r->nr_accesses &&
- r->nr_accesses <= s->pattern.max_nr_accesses &&
+ s->pattern.min_nr_accesses <= nr_accesses &&
+ nr_accesses <= s->pattern.max_nr_accesses &&
s->pattern.min_age_region <= r->age &&
r->age <= s->pattern.max_age_region;
}
@@ -946,6 +951,33 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
struct timespec64 begin, end;
unsigned long sz_applied = 0;
int err = 0;
+ /*
+ * We plan to support multiple context per kdamond, as DAMON sysfs
+ * implies with 'nr_contexts' file. Nevertheless, only single context
+ * per kdamond is supported for now. So, we can simply use '0' context
+ * index here.
+ */
+ unsigned int cidx = 0;
+ struct damos *siter; /* schemes iterator */
+ unsigned int sidx = 0;
+ struct damon_target *titer; /* targets iterator */
+ unsigned int tidx = 0;
+ bool do_trace = false;
+
+ /* get indices for trace_damos_before_apply() */
+ if (trace_damos_before_apply_enabled()) {
+ damon_for_each_scheme(siter, c) {
+ if (siter == s)
+ break;
+ sidx++;
+ }
+ damon_for_each_target(titer, c) {
+ if (titer == t)
+ break;
+ tidx++;
+ }
+ do_trace = true;
+ }
if (c->ops.apply_scheme) {
if (quota->esz && quota->charged_sz + sz > quota->esz) {
@@ -960,8 +992,11 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
ktime_get_coarse_ts64(&begin);
if (c->callback.before_damos_apply)
err = c->callback.before_damos_apply(c, t, r, s);
- if (!err)
+ if (!err) {
+ trace_damos_before_apply(cidx, sidx, tidx, r,
+ damon_nr_regions(t), do_trace);
sz_applied = c->ops.apply_scheme(c, t, r, s);
+ }
ktime_get_coarse_ts64(&end);
quota->total_charged_ns += timespec64_to_ns(&end) -
timespec64_to_ns(&begin);
@@ -1079,14 +1114,29 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
struct damon_target *t;
struct damon_region *r, *next_r;
struct damos *s;
+ unsigned long sample_interval = c->attrs.sample_interval ?
+ c->attrs.sample_interval : 1;
+ bool has_schemes_to_apply = false;
damon_for_each_scheme(s, c) {
+ if (c->passed_sample_intervals != s->next_apply_sis)
+ continue;
+
+ s->next_apply_sis +=
+ (s->apply_interval_us ? s->apply_interval_us :
+ c->attrs.aggr_interval) / sample_interval;
+
if (!s->wmarks.activated)
continue;
+ has_schemes_to_apply = true;
+
damos_adjust_quota(c, s);
}
+ if (!has_schemes_to_apply)
+ return;
+
damon_for_each_target(t, c) {
damon_for_each_region_safe(r, next_r, t)
damon_do_apply_schemes(c, t, r);
@@ -1103,6 +1153,7 @@ static void damon_merge_two_regions(struct damon_target *t,
l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) /
(sz_l + sz_r);
+ l->nr_accesses_bp = l->nr_accesses * 10000;
l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r);
l->ar.end = r->ar.end;
damon_destroy_region(r, t);
@@ -1174,6 +1225,7 @@ static void damon_split_region_at(struct damon_target *t,
new->age = r->age;
new->last_nr_accesses = r->last_nr_accesses;
+ new->nr_accesses_bp = r->nr_accesses_bp;
damon_insert_region(new, r, damon_next_region(r), t);
}
@@ -1241,18 +1293,6 @@ static void kdamond_split_regions(struct damon_ctx *ctx)
}
/*
- * Check whether it is time to check and apply the operations-related data
- * structures.
- *
- * Returns true if it is.
- */
-static bool kdamond_need_update_operations(struct damon_ctx *ctx)
-{
- return damon_check_reset_time_interval(&ctx->last_ops_update,
- ctx->attrs.ops_update_interval);
-}
-
-/*
* Check whether current monitoring should be stopped
*
* The monitoring is stopped when either the user requested to stop, or all
@@ -1280,12 +1320,10 @@ static bool kdamond_need_stop(struct damon_ctx *ctx)
static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric)
{
- struct sysinfo i;
-
switch (metric) {
case DAMOS_WMARK_FREE_MEM_RATE:
- si_meminfo(&i);
- return i.freeram * 1000 / i.totalram;
+ return global_zone_page_state(NR_FREE_PAGES) * 1000 /
+ totalram_pages();
default:
break;
}
@@ -1363,6 +1401,25 @@ static int kdamond_wait_activation(struct damon_ctx *ctx)
return -EBUSY;
}
+static void kdamond_init_intervals_sis(struct damon_ctx *ctx)
+{
+ unsigned long sample_interval = ctx->attrs.sample_interval ?
+ ctx->attrs.sample_interval : 1;
+ unsigned long apply_interval;
+ struct damos *scheme;
+
+ ctx->passed_sample_intervals = 0;
+ ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval;
+ ctx->next_ops_update_sis = ctx->attrs.ops_update_interval /
+ sample_interval;
+
+ damon_for_each_scheme(scheme, ctx) {
+ apply_interval = scheme->apply_interval_us ?
+ scheme->apply_interval_us : ctx->attrs.aggr_interval;
+ scheme->next_apply_sis = apply_interval / sample_interval;
+ }
+}
+
/*
* The monitoring daemon that runs as a kernel thread
*/
@@ -1376,6 +1433,8 @@ static int kdamond_fn(void *data)
pr_debug("kdamond (%d) starts\n", current->pid);
+ kdamond_init_intervals_sis(ctx);
+
if (ctx->ops.init)
ctx->ops.init(ctx);
if (ctx->callback.before_start && ctx->callback.before_start(ctx))
@@ -1384,6 +1443,17 @@ static int kdamond_fn(void *data)
sz_limit = damon_region_sz_limit(ctx);
while (!kdamond_need_stop(ctx)) {
+ /*
+ * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could
+ * be changed from after_wmarks_check() or after_aggregation()
+ * callbacks. Read the values here, and use those for this
+ * iteration. That is, damon_set_attrs() updated new values
+ * are respected from next iteration.
+ */
+ unsigned long next_aggregation_sis = ctx->next_aggregation_sis;
+ unsigned long next_ops_update_sis = ctx->next_ops_update_sis;
+ unsigned long sample_interval = ctx->attrs.sample_interval;
+
if (kdamond_wait_activation(ctx))
break;
@@ -1393,27 +1463,44 @@ static int kdamond_fn(void *data)
ctx->callback.after_sampling(ctx))
break;
- kdamond_usleep(ctx->attrs.sample_interval);
+ kdamond_usleep(sample_interval);
+ ctx->passed_sample_intervals++;
if (ctx->ops.check_accesses)
max_nr_accesses = ctx->ops.check_accesses(ctx);
- if (kdamond_aggregate_interval_passed(ctx)) {
+ if (ctx->passed_sample_intervals == next_aggregation_sis) {
kdamond_merge_regions(ctx,
max_nr_accesses / 10,
sz_limit);
if (ctx->callback.after_aggregation &&
ctx->callback.after_aggregation(ctx))
break;
- if (!list_empty(&ctx->schemes))
- kdamond_apply_schemes(ctx);
+ }
+
+ /*
+ * do kdamond_apply_schemes() after kdamond_merge_regions() if
+ * possible, to reduce overhead
+ */
+ if (!list_empty(&ctx->schemes))
+ kdamond_apply_schemes(ctx);
+
+ sample_interval = ctx->attrs.sample_interval ?
+ ctx->attrs.sample_interval : 1;
+ if (ctx->passed_sample_intervals == next_aggregation_sis) {
+ ctx->next_aggregation_sis = next_aggregation_sis +
+ ctx->attrs.aggr_interval / sample_interval;
+
kdamond_reset_aggregated(ctx);
kdamond_split_regions(ctx);
if (ctx->ops.reset_aggregated)
ctx->ops.reset_aggregated(ctx);
}
- if (kdamond_need_update_operations(ctx)) {
+ if (ctx->passed_sample_intervals == next_ops_update_sis) {
+ ctx->next_ops_update_sis = next_ops_update_sis +
+ ctx->attrs.ops_update_interval /
+ sample_interval;
if (ctx->ops.update)
ctx->ops.update(ctx);
sz_limit = damon_region_sz_limit(ctx);
@@ -1517,6 +1604,76 @@ int damon_set_region_biggest_system_ram_default(struct damon_target *t,
return damon_set_regions(t, &addr_range, 1);
}
+/*
+ * damon_moving_sum() - Calculate an inferred moving sum value.
+ * @mvsum: Inferred sum of the last @len_window values.
+ * @nomvsum: Non-moving sum of the last discrete @len_window window values.
+ * @len_window: The number of last values to take care of.
+ * @new_value: New value that will be added to the pseudo moving sum.
+ *
+ * Moving sum (moving average * window size) is good for handling noise, but
+ * the cost of keeping past values can be high for arbitrary window size. This
+ * function implements a lightweight pseudo moving sum function that doesn't
+ * keep the past window values.
+ *
+ * It simply assumes there was no noise in the past, and get the no-noise
+ * assumed past value to drop from @nomvsum and @len_window. @nomvsum is a
+ * non-moving sum of the last window. For example, if @len_window is 10 and we
+ * have 25 values, @nomvsum is the sum of the 11th to 20th values of the 25
+ * values. Hence, this function simply drops @nomvsum / @len_window from
+ * given @mvsum and add @new_value.
+ *
+ * For example, if @len_window is 10 and @nomvsum is 50, the last 10 values for
+ * the last window could be vary, e.g., 0, 10, 0, 10, 0, 10, 0, 0, 0, 20. For
+ * calculating next moving sum with a new value, we should drop 0 from 50 and
+ * add the new value. However, this function assumes it got value 5 for each
+ * of the last ten times. Based on the assumption, when the next value is
+ * measured, it drops the assumed past value, 5 from the current sum, and add
+ * the new value to get the updated pseduo-moving average.
+ *
+ * This means the value could have errors, but the errors will be disappeared
+ * for every @len_window aligned calls. For example, if @len_window is 10, the
+ * pseudo moving sum with 11th value to 19th value would have an error. But
+ * the sum with 20th value will not have the error.
+ *
+ * Return: Pseudo-moving average after getting the @new_value.
+ */
+static unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
+ unsigned int len_window, unsigned int new_value)
+{
+ return mvsum - nomvsum / len_window + new_value;
+}
+
+/**
+ * damon_update_region_access_rate() - Update the access rate of a region.
+ * @r: The DAMON region to update for its access check result.
+ * @accessed: Whether the region has accessed during last sampling interval.
+ * @attrs: The damon_attrs of the DAMON context.
+ *
+ * Update the access rate of a region with the region's last sampling interval
+ * access check result.
+ *
+ * Usually this will be called by &damon_operations->check_accesses callback.
+ */
+void damon_update_region_access_rate(struct damon_region *r, bool accessed,
+ struct damon_attrs *attrs)
+{
+ unsigned int len_window = 1;
+
+ /*
+ * sample_interval can be zero, but cannot be larger than
+ * aggr_interval, owing to validation of damon_set_attrs().
+ */
+ if (attrs->sample_interval)
+ len_window = damon_max_nr_accesses(attrs);
+ r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
+ r->last_nr_accesses * 10000, len_window,
+ accessed ? 10000 : 0);
+
+ if (accessed)
+ r->nr_accesses++;
+}
+
static int __init damon_init(void)
{
damon_region_cache = KMEM_CACHE(damon_region, 0);
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index 124f0f8c97b7..dc0ea1fc30ca 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -278,7 +278,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
goto fail;
pos += parsed;
- scheme = damon_new_scheme(&pattern, action, &quota, &wmarks);
+ scheme = damon_new_scheme(&pattern, action, 0, &quota,
+ &wmarks);
if (!scheme)
goto fail;
diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
index 7b8fce2f67a8..f2e5f9431892 100644
--- a/mm/damon/lru_sort.c
+++ b/mm/damon/lru_sort.c
@@ -158,6 +158,8 @@ static struct damos *damon_lru_sort_new_scheme(
pattern,
/* (de)prioritize on LRU-lists */
action,
+ /* for each aggregation interval */
+ 0,
/* under the quota. */
&quota,
/* (De)activate this according to the watermarks. */
@@ -193,9 +195,7 @@ static int damon_lru_sort_apply_parameters(void)
if (err)
return err;
- /* aggr_interval / sample_interval is the maximum nr_accesses */
- hot_thres = damon_lru_sort_mon_attrs.aggr_interval /
- damon_lru_sort_mon_attrs.sample_interval *
+ hot_thres = damon_max_nr_accesses(&damon_lru_sort_mon_attrs) *
hot_thres_access_freq / 1000;
scheme = damon_lru_sort_new_hot_scheme(hot_thres);
if (!scheme)
diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
index ac1c3fa80f98..d25d99cb5f2b 100644
--- a/mm/damon/ops-common.c
+++ b/mm/damon/ops-common.c
@@ -73,7 +73,6 @@ void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr
int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
struct damos *s)
{
- unsigned int max_nr_accesses;
int freq_subscore;
unsigned int age_in_sec;
int age_in_log, age_subscore;
@@ -81,8 +80,8 @@ int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
unsigned int age_weight = s->quota.weight_age;
int hotness;
- max_nr_accesses = c->attrs.aggr_interval / c->attrs.sample_interval;
- freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
+ freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE /
+ damon_max_nr_accesses(&c->attrs);
age_in_sec = (unsigned long)r->age * c->attrs.aggr_interval / 1000000;
for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index 909db25efb35..081e2a325778 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -148,7 +148,8 @@ out:
return accessed;
}
-static void __damon_pa_check_access(struct damon_region *r)
+static void __damon_pa_check_access(struct damon_region *r,
+ struct damon_attrs *attrs)
{
static unsigned long last_addr;
static unsigned long last_folio_sz = PAGE_SIZE;
@@ -157,14 +158,12 @@ static void __damon_pa_check_access(struct damon_region *r)
/* If the region is in the last checked page, reuse the result */
if (ALIGN_DOWN(last_addr, last_folio_sz) ==
ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
- if (last_accessed)
- r->nr_accesses++;
+ damon_update_region_access_rate(r, last_accessed, attrs);
return;
}
last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
- if (last_accessed)
- r->nr_accesses++;
+ damon_update_region_access_rate(r, last_accessed, attrs);
last_addr = r->sampling_addr;
}
@@ -177,7 +176,7 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t) {
- __damon_pa_check_access(r);
+ __damon_pa_check_access(r, &ctx->attrs);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
}
}
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
index 648d2a85523a..ab974e477d2f 100644
--- a/mm/damon/reclaim.c
+++ b/mm/damon/reclaim.c
@@ -142,6 +142,8 @@ static struct damos *damon_reclaim_new_scheme(void)
&pattern,
/* page out those, as soon as found */
DAMOS_PAGEOUT,
+ /* for each aggregation interval */
+ 0,
/* under the quota. */
&damon_reclaim_quota,
/* (De)activate this according to the watermarks. */
diff --git a/mm/damon/sysfs-common.h b/mm/damon/sysfs-common.h
index fd482a0639b4..5ff081226e28 100644
--- a/mm/damon/sysfs-common.h
+++ b/mm/damon/sysfs-common.h
@@ -49,6 +49,8 @@ int damon_sysfs_schemes_update_regions_start(
struct damon_sysfs_schemes *sysfs_schemes,
struct damon_ctx *ctx, bool total_bytes_only);
+bool damos_sysfs_regions_upd_done(void);
+
int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx);
int damon_sysfs_schemes_clear_regions(
diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
index 527e7d17eb3b..45bd0fd4a8b1 100644
--- a/mm/damon/sysfs-schemes.c
+++ b/mm/damon/sysfs-schemes.c
@@ -31,7 +31,7 @@ static struct damon_sysfs_scheme_region *damon_sysfs_scheme_region_alloc(
return NULL;
sysfs_region->kobj = (struct kobject){};
sysfs_region->ar = region->ar;
- sysfs_region->nr_accesses = region->nr_accesses;
+ sysfs_region->nr_accesses = region->nr_accesses_bp / 10000;
sysfs_region->age = region->age;
INIT_LIST_HEAD(&sysfs_region->list);
return sysfs_region;
@@ -113,11 +113,47 @@ static const struct kobj_type damon_sysfs_scheme_region_ktype = {
* scheme regions directory
*/
+/*
+ * enum damos_sysfs_regions_upd_status - Represent DAMOS tried regions update
+ * status
+ * @DAMOS_TRIED_REGIONS_UPD_IDLE: Waiting for next request.
+ * @DAMOS_TRIED_REGIONS_UPD_STARTED: Update started.
+ * @DAMOS_TRIED_REGIONS_UPD_FINISHED: Update finished.
+ *
+ * Each DAMON-based operation scheme (&struct damos) has its own apply
+ * interval, and we need to expose the scheme tried regions based on only
+ * single snapshot. For this, we keep the tried regions update status for each
+ * scheme. The status becomes 'idle' at the beginning.
+ *
+ * Once the tried regions update request is received, the request handling
+ * start function (damon_sysfs_scheme_update_regions_start()) sets the status
+ * of all schemes as 'idle' again, and register ->before_damos_apply() and
+ * ->after_sampling() callbacks.
+ *
+ * Then, the first followup ->before_damos_apply() callback
+ * (damon_sysfs_before_damos_apply()) sets the status 'started'. The first
+ * ->after_sampling() callback (damon_sysfs_after_sampling()) after the call
+ * is called only after the scheme is completely applied
+ * to the given snapshot. Hence the callback knows the situation by showing
+ * 'started' status, and sets the status as 'finished'. Then,
+ * damon_sysfs_before_damos_apply() understands the situation by showing the
+ * 'finished' status and do nothing.
+ *
+ * Finally, the tried regions request handling finisher function
+ * (damon_sysfs_schemes_update_regions_stop()) unregisters the callbacks.
+ */
+enum damos_sysfs_regions_upd_status {
+ DAMOS_TRIED_REGIONS_UPD_IDLE,
+ DAMOS_TRIED_REGIONS_UPD_STARTED,
+ DAMOS_TRIED_REGIONS_UPD_FINISHED,
+};
+
struct damon_sysfs_scheme_regions {
struct kobject kobj;
struct list_head regions_list;
int nr_regions;
unsigned long total_bytes;
+ enum damos_sysfs_regions_upd_status upd_status;
};
static struct damon_sysfs_scheme_regions *
@@ -130,6 +166,7 @@ damon_sysfs_scheme_regions_alloc(void)
INIT_LIST_HEAD(&regions->regions_list);
regions->nr_regions = 0;
regions->total_bytes = 0;
+ regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE;
return regions;
}
@@ -1121,6 +1158,7 @@ struct damon_sysfs_scheme {
struct kobject kobj;
enum damos_action action;
struct damon_sysfs_access_pattern *access_pattern;
+ unsigned long apply_interval_us;
struct damon_sysfs_quotas *quotas;
struct damon_sysfs_watermarks *watermarks;
struct damon_sysfs_scheme_filters *filters;
@@ -1141,7 +1179,7 @@ static const char * const damon_sysfs_damos_action_strs[] = {
};
static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc(
- enum damos_action action)
+ enum damos_action action, unsigned long apply_interval_us)
{
struct damon_sysfs_scheme *scheme = kmalloc(sizeof(*scheme),
GFP_KERNEL);
@@ -1150,6 +1188,7 @@ static struct damon_sysfs_scheme *damon_sysfs_scheme_alloc(
return NULL;
scheme->kobj = (struct kobject){};
scheme->action = action;
+ scheme->apply_interval_us = apply_interval_us;
return scheme;
}
@@ -1353,6 +1392,25 @@ static ssize_t action_store(struct kobject *kobj, struct kobj_attribute *attr,
return -EINVAL;
}
+static ssize_t apply_interval_us_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_scheme *scheme = container_of(kobj,
+ struct damon_sysfs_scheme, kobj);
+
+ return sysfs_emit(buf, "%lu\n", scheme->apply_interval_us);
+}
+
+static ssize_t apply_interval_us_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damon_sysfs_scheme *scheme = container_of(kobj,
+ struct damon_sysfs_scheme, kobj);
+ int err = kstrtoul(buf, 0, &scheme->apply_interval_us);
+
+ return err ? err : count;
+}
+
static void damon_sysfs_scheme_release(struct kobject *kobj)
{
kfree(container_of(kobj, struct damon_sysfs_scheme, kobj));
@@ -1361,8 +1419,12 @@ static void damon_sysfs_scheme_release(struct kobject *kobj)
static struct kobj_attribute damon_sysfs_scheme_action_attr =
__ATTR_RW_MODE(action, 0600);
+static struct kobj_attribute damon_sysfs_scheme_apply_interval_us_attr =
+ __ATTR_RW_MODE(apply_interval_us, 0600);
+
static struct attribute *damon_sysfs_scheme_attrs[] = {
&damon_sysfs_scheme_action_attr.attr,
+ &damon_sysfs_scheme_apply_interval_us_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(damon_sysfs_scheme);
@@ -1413,7 +1475,11 @@ static int damon_sysfs_schemes_add_dirs(struct damon_sysfs_schemes *schemes,
schemes->schemes_arr = schemes_arr;
for (i = 0; i < nr_schemes; i++) {
- scheme = damon_sysfs_scheme_alloc(DAMOS_STAT);
+ /*
+ * apply_interval_us as 0 means same to aggregation interval
+ * (same to before-apply_interval behavior)
+ */
+ scheme = damon_sysfs_scheme_alloc(DAMOS_STAT, 0);
if (!scheme) {
damon_sysfs_schemes_rm_dirs(schemes);
return -ENOMEM;
@@ -1610,8 +1676,8 @@ static struct damos *damon_sysfs_mk_scheme(
.low = sysfs_wmarks->low,
};
- scheme = damon_new_scheme(&pattern, sysfs_scheme->action, &quota,
- &wmarks);
+ scheme = damon_new_scheme(&pattern, sysfs_scheme->action,
+ sysfs_scheme->apply_interval_us, &quota, &wmarks);
if (!scheme)
return NULL;
@@ -1641,6 +1707,7 @@ static void damon_sysfs_update_scheme(struct damos *scheme,
scheme->pattern.max_age_region = access_pattern->age->max;
scheme->action = sysfs_scheme->action;
+ scheme->apply_interval_us = sysfs_scheme->apply_interval_us;
scheme->quota.ms = sysfs_quotas->ms;
scheme->quota.sz = sysfs_quotas->sz;
@@ -1747,6 +1814,10 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx,
return 0;
sysfs_regions = sysfs_schemes->schemes_arr[schemes_idx]->tried_regions;
+ if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_FINISHED)
+ return 0;
+ if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_IDLE)
+ sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_STARTED;
sysfs_regions->total_bytes += r->ar.end - r->ar.start;
if (damos_regions_upd_total_bytes_only)
return 0;
@@ -1763,6 +1834,29 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx,
return 0;
}
+/*
+ * DAMON callback that called after each accesses sampling. While this
+ * callback is registered, damon_sysfs_lock should be held to ensure the
+ * regions directories exist.
+ */
+static int damon_sysfs_after_sampling(struct damon_ctx *ctx)
+{
+ struct damon_sysfs_schemes *sysfs_schemes =
+ damon_sysfs_schemes_for_damos_callback;
+ struct damon_sysfs_scheme_regions *sysfs_regions;
+ int i;
+
+ for (i = 0; i < sysfs_schemes->nr; i++) {
+ sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions;
+ if (sysfs_regions->upd_status ==
+ DAMOS_TRIED_REGIONS_UPD_STARTED)
+ sysfs_regions->upd_status =
+ DAMOS_TRIED_REGIONS_UPD_FINISHED;
+ }
+
+ return 0;
+}
+
/* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */
int damon_sysfs_schemes_clear_regions(
struct damon_sysfs_schemes *sysfs_schemes,
@@ -1786,6 +1880,16 @@ int damon_sysfs_schemes_clear_regions(
return 0;
}
+static void damos_tried_regions_init_upd_status(
+ struct damon_sysfs_schemes *sysfs_schemes)
+{
+ int i;
+
+ for (i = 0; i < sysfs_schemes->nr; i++)
+ sysfs_schemes->schemes_arr[i]->tried_regions->upd_status =
+ DAMOS_TRIED_REGIONS_UPD_IDLE;
+}
+
/* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */
int damon_sysfs_schemes_update_regions_start(
struct damon_sysfs_schemes *sysfs_schemes,
@@ -1793,11 +1897,29 @@ int damon_sysfs_schemes_update_regions_start(
{
damon_sysfs_schemes_clear_regions(sysfs_schemes, ctx);
damon_sysfs_schemes_for_damos_callback = sysfs_schemes;
+ damos_tried_regions_init_upd_status(sysfs_schemes);
damos_regions_upd_total_bytes_only = total_bytes_only;
ctx->callback.before_damos_apply = damon_sysfs_before_damos_apply;
+ ctx->callback.after_sampling = damon_sysfs_after_sampling;
return 0;
}
+bool damos_sysfs_regions_upd_done(void)
+{
+ struct damon_sysfs_schemes *sysfs_schemes =
+ damon_sysfs_schemes_for_damos_callback;
+ struct damon_sysfs_scheme_regions *sysfs_regions;
+ int i;
+
+ for (i = 0; i < sysfs_schemes->nr; i++) {
+ sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions;
+ if (sysfs_regions->upd_status !=
+ DAMOS_TRIED_REGIONS_UPD_FINISHED)
+ return false;
+ }
+ return true;
+}
+
/*
* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock. Caller
* should unlock damon_sysfs_lock which held before
@@ -1807,6 +1929,7 @@ int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx)
{
damon_sysfs_schemes_for_damos_callback = NULL;
ctx->callback.before_damos_apply = NULL;
+ ctx->callback.after_sampling = NULL;
damon_sysfs_schemes_region_idx = 0;
return 0;
}
diff --git a/mm/damon/sysfs-test.h b/mm/damon/sysfs-test.h
new file mode 100644
index 000000000000..73bdce2452c1
--- /dev/null
+++ b/mm/damon/sysfs-test.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Data Access Monitor Unit Tests
+ *
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#ifdef CONFIG_DAMON_SYSFS_KUNIT_TEST
+
+#ifndef _DAMON_SYSFS_TEST_H
+#define _DAMON_SYSFS_TEST_H
+
+#include <kunit/test.h>
+
+static unsigned int nr_damon_targets(struct damon_ctx *ctx)
+{
+ struct damon_target *t;
+ unsigned int nr_targets = 0;
+
+ damon_for_each_target(t, ctx)
+ nr_targets++;
+
+ return nr_targets;
+}
+
+static int __damon_sysfs_test_get_any_pid(int min, int max)
+{
+ struct pid *pid;
+ int i;
+
+ for (i = min; i <= max; i++) {
+ pid = find_get_pid(i);
+ if (pid) {
+ put_pid(pid);
+ return i;
+ }
+ }
+ return -1;
+}
+
+static void damon_sysfs_test_set_targets(struct kunit *test)
+{
+ struct damon_sysfs_targets *sysfs_targets;
+ struct damon_sysfs_target *sysfs_target;
+ struct damon_ctx *ctx;
+
+ sysfs_targets = damon_sysfs_targets_alloc();
+ sysfs_targets->nr = 1;
+ sysfs_targets->targets_arr = kmalloc_array(1,
+ sizeof(*sysfs_targets->targets_arr), GFP_KERNEL);
+
+ sysfs_target = damon_sysfs_target_alloc();
+ sysfs_target->pid = __damon_sysfs_test_get_any_pid(12, 100);
+ sysfs_target->regions = damon_sysfs_regions_alloc();
+ sysfs_targets->targets_arr[0] = sysfs_target;
+
+ ctx = damon_new_ctx();
+
+ damon_sysfs_set_targets(ctx, sysfs_targets);
+ KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(ctx));
+
+ sysfs_target->pid = __damon_sysfs_test_get_any_pid(
+ sysfs_target->pid + 1, 200);
+ damon_sysfs_set_targets(ctx, sysfs_targets);
+ KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(ctx));
+
+ damon_destroy_ctx(ctx);
+ kfree(sysfs_targets->targets_arr);
+ kfree(sysfs_targets);
+ kfree(sysfs_target);
+}
+
+static struct kunit_case damon_sysfs_test_cases[] = {
+ KUNIT_CASE(damon_sysfs_test_set_targets),
+ {},
+};
+
+static struct kunit_suite damon_sysfs_test_suite = {
+ .name = "damon-sysfs",
+ .test_cases = damon_sysfs_test_cases,
+};
+kunit_test_suite(damon_sysfs_test_suite);
+
+#endif /* _DAMON_SYSFS_TEST_H */
+
+#endif /* CONFIG_DAMON_SYSFS_KUNIT_TEST */
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index f60e56150feb..e27846708b5a 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1150,58 +1150,73 @@ destroy_targets_out:
return err;
}
-/*
- * Search a target in a context that corresponds to the sysfs target input.
- *
- * Return: pointer to the target if found, NULL if not found, or negative
- * error code if the search failed.
- */
-static struct damon_target *damon_sysfs_existing_target(
- struct damon_sysfs_target *sys_target, struct damon_ctx *ctx)
+static int damon_sysfs_update_target_pid(struct damon_target *target, int pid)
{
- struct pid *pid;
- struct damon_target *t;
+ struct pid *pid_new;
- if (!damon_target_has_pid(ctx)) {
- /* Up to only one target for paddr could exist */
- damon_for_each_target(t, ctx)
- return t;
- return NULL;
+ pid_new = find_get_pid(pid);
+ if (!pid_new)
+ return -EINVAL;
+
+ if (pid_new == target->pid) {
+ put_pid(pid_new);
+ return 0;
}
- /* ops.id should be DAMON_OPS_VADDR or DAMON_OPS_FVADDR */
- pid = find_get_pid(sys_target->pid);
- if (!pid)
- return ERR_PTR(-EINVAL);
- damon_for_each_target(t, ctx) {
- if (t->pid == pid) {
- put_pid(pid);
- return t;
- }
+ put_pid(target->pid);
+ target->pid = pid_new;
+ return 0;
+}
+
+static int damon_sysfs_update_target(struct damon_target *target,
+ struct damon_ctx *ctx,
+ struct damon_sysfs_target *sys_target)
+{
+ int err;
+
+ if (damon_target_has_pid(ctx)) {
+ err = damon_sysfs_update_target_pid(target, sys_target->pid);
+ if (err)
+ return err;
}
- put_pid(pid);
- return NULL;
+
+ /*
+ * Do monitoring target region boundary update only if one or more
+ * regions are set by the user. This is for keeping current monitoring
+ * target results and range easier, especially for dynamic monitoring
+ * target regions update ops like 'vaddr'.
+ */
+ if (sys_target->regions->nr)
+ err = damon_sysfs_set_regions(target, sys_target->regions);
+ return err;
}
static int damon_sysfs_set_targets(struct damon_ctx *ctx,
struct damon_sysfs_targets *sysfs_targets)
{
- int i, err;
+ struct damon_target *t, *next;
+ int i = 0, err;
/* Multiple physical address space monitoring targets makes no sense */
if (ctx->ops.id == DAMON_OPS_PADDR && sysfs_targets->nr > 1)
return -EINVAL;
- for (i = 0; i < sysfs_targets->nr; i++) {
+ damon_for_each_target_safe(t, next, ctx) {
+ if (i < sysfs_targets->nr) {
+ damon_sysfs_update_target(t, ctx,
+ sysfs_targets->targets_arr[i]);
+ } else {
+ if (damon_target_has_pid(ctx))
+ put_pid(t->pid);
+ damon_destroy_target(t);
+ }
+ i++;
+ }
+
+ for (; i < sysfs_targets->nr; i++) {
struct damon_sysfs_target *st = sysfs_targets->targets_arr[i];
- struct damon_target *t = damon_sysfs_existing_target(st, ctx);
-
- if (IS_ERR(t))
- return PTR_ERR(t);
- if (!t)
- err = damon_sysfs_add_target(st, ctx);
- else
- err = damon_sysfs_set_regions(t, st->regions);
+
+ err = damon_sysfs_add_target(st, ctx);
if (err)
return err;
}
@@ -1336,12 +1351,13 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
/*
* damon_sysfs_cmd_request_callback() - DAMON callback for handling requests.
- * @c: The DAMON context of the callback.
+ * @c: The DAMON context of the callback.
+ * @active: Whether @c is not deactivated due to watermarks.
*
* This function is periodically called back from the kdamond thread for @c.
* Then, it checks if there is a waiting DAMON sysfs request and handles it.
*/
-static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
+static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active)
{
struct damon_sysfs_kdamond *kdamond;
bool total_bytes_only = false;
@@ -1373,6 +1389,13 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
goto keep_lock_out;
}
} else {
+ /*
+ * Continue regions updating if DAMON is till
+ * active and the update for all schemes is not
+ * finished.
+ */
+ if (active && !damos_sysfs_regions_upd_done())
+ goto keep_lock_out;
err = damon_sysfs_upd_schemes_regions_stop(kdamond);
damon_sysfs_schemes_regions_updating = false;
}
@@ -1392,6 +1415,24 @@ keep_lock_out:
return err;
}
+static int damon_sysfs_after_wmarks_check(struct damon_ctx *c)
+{
+ /*
+ * after_wmarks_check() is called back while the context is deactivated
+ * by watermarks.
+ */
+ return damon_sysfs_cmd_request_callback(c, false);
+}
+
+static int damon_sysfs_after_aggregation(struct damon_ctx *c)
+{
+ /*
+ * after_aggregation() is called back only while the context is not
+ * deactivated by watermarks.
+ */
+ return damon_sysfs_cmd_request_callback(c, true);
+}
+
static struct damon_ctx *damon_sysfs_build_ctx(
struct damon_sysfs_context *sys_ctx)
{
@@ -1407,8 +1448,8 @@ static struct damon_ctx *damon_sysfs_build_ctx(
return ERR_PTR(err);
}
- ctx->callback.after_wmarks_check = damon_sysfs_cmd_request_callback;
- ctx->callback.after_aggregation = damon_sysfs_cmd_request_callback;
+ ctx->callback.after_wmarks_check = damon_sysfs_after_wmarks_check;
+ ctx->callback.after_aggregation = damon_sysfs_after_aggregation;
ctx->callback.before_terminate = damon_sysfs_before_terminate;
return ctx;
}
@@ -1810,3 +1851,5 @@ out:
return err;
}
subsys_initcall(damon_sysfs_init);
+
+#include "sysfs-test.h"
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index cf8a9fc5c9d1..a4d1f63c5b23 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -558,23 +558,27 @@ static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
* r the region to be checked
*/
static void __damon_va_check_access(struct mm_struct *mm,
- struct damon_region *r, bool same_target)
+ struct damon_region *r, bool same_target,
+ struct damon_attrs *attrs)
{
static unsigned long last_addr;
static unsigned long last_folio_sz = PAGE_SIZE;
static bool last_accessed;
+ if (!mm) {
+ damon_update_region_access_rate(r, false, attrs);
+ return;
+ }
+
/* If the region is in the last checked page, reuse the result */
if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) ==
ALIGN_DOWN(r->sampling_addr, last_folio_sz))) {
- if (last_accessed)
- r->nr_accesses++;
+ damon_update_region_access_rate(r, last_accessed, attrs);
return;
}
last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz);
- if (last_accessed)
- r->nr_accesses++;
+ damon_update_region_access_rate(r, last_accessed, attrs);
last_addr = r->sampling_addr;
}
@@ -589,15 +593,15 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
damon_for_each_target(t, ctx) {
mm = damon_get_mm(t);
- if (!mm)
- continue;
same_target = false;
damon_for_each_region(r, t) {
- __damon_va_check_access(mm, r, same_target);
+ __damon_va_check_access(mm, r, same_target,
+ &ctx->attrs);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
same_target = true;
}
- mmput(mm);
+ if (mm)
+ mmput(mm);
}
return max_nr_accesses;