summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorDavid Carlier <devnexen@gmail.com>2026-02-14 07:32:05 +0000
committerTejun Heo <tj@kernel.org>2026-02-16 21:01:18 -1000
commitcabd76bbc03617e55c25f0b06167aa5e0b911a36 (patch)
tree9859a1e8daf39945445ddf154b33dd6088313590 /tools
parent0b82cc331d2e23537670878c62c19ee3f4147a93 (diff)
tools/sched_ext: scx_flatcg: fix potential stack overflow from VLA in fcg_read_stats
fcg_read_stats() had a VLA allocating 21 * nr_cpus * 8 bytes on the stack, risking stack overflow on large CPU counts (nr_cpus can be up to 512). Fix by using a single heap allocation with the correct size, reusing it across all stat indices, and freeing it at the end. Signed-off-by: David Carlier <devnexen@gmail.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'tools')
-rw-r--r--tools/sched_ext/scx_flatcg.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/tools/sched_ext/scx_flatcg.c b/tools/sched_ext/scx_flatcg.c
index bea76d060201..a8446509949e 100644
--- a/tools/sched_ext/scx_flatcg.c
+++ b/tools/sched_ext/scx_flatcg.c
@@ -102,22 +102,27 @@ static float read_cpu_util(__u64 *last_sum, __u64 *last_idle)
static void fcg_read_stats(struct scx_flatcg *skel, __u64 *stats)
{
- __u64 cnts[FCG_NR_STATS][skel->rodata->nr_cpus];
+ __u64 *cnts;
__u32 idx;
+ cnts = calloc(skel->rodata->nr_cpus, sizeof(__u64));
+ if (!cnts)
+ return;
+
memset(stats, 0, sizeof(stats[0]) * FCG_NR_STATS);
- memset(cnts, 0, sizeof(cnts));
for (idx = 0; idx < FCG_NR_STATS; idx++) {
int ret, cpu;
ret = bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats),
- &idx, cnts[idx]);
+ &idx, cnts);
if (ret < 0)
continue;
for (cpu = 0; cpu < skel->rodata->nr_cpus; cpu++)
- stats[idx] += cnts[idx][cpu];
+ stats[idx] += cnts[cpu];
}
+
+ free(cnts);
}
int main(int argc, char **argv)