summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2025-05-28 10:08:53 +0200
committerIngo Molnar <mingo@kernel.org>2025-06-13 08:47:17 +0200
commit311bb3f7b78e944e831ffb07cb58455b47bf2269 (patch)
treea54470ca5428116a4117124ed9a106a79255b091
parentc215dff7f80caab4a25160f0ded369eba5cb9190 (diff)
sched: Clean up and standardize #if/#else/#endif markers in sched/pelt.[ch]
- Use the standard #ifdef marker format for larger blocks, where appropriate: #if CONFIG_FOO ... #else /* !CONFIG_FOO: */ ... #endif /* !CONFIG_FOO */ - Fix whitespace noise and other inconsistencies. Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Shrikanth Hegde <sshegde@linux.ibm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Valentin Schneider <vschneid@redhat.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lore.kernel.org/r/20250528080924.2273858-13-mingo@kernel.org
-rw-r--r--kernel/sched/pelt.c4
-rw-r--r--kernel/sched/pelt.h12
2 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 09be6a83e45a..fa83bbaf4f3e 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -414,7 +414,7 @@ int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
return 0;
}
-#endif
+#endif /* CONFIG_SCHED_HW_PRESSURE */
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
/*
@@ -467,7 +467,7 @@ int update_irq_load_avg(struct rq *rq, u64 running)
return ret;
}
-#endif
+#endif /* CONFIG_HAVE_SCHED_AVG_IRQ */
/*
* Load avg and utiliztion metrics need to be updated periodically and before
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index 19592077452e..a5d4933e6b70 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -20,7 +20,7 @@ static inline u64 hw_load_avg(struct rq *rq)
{
return READ_ONCE(rq->avg_hw.load_avg);
}
-#else
+#else /* !CONFIG_SCHED_HW_PRESSURE: */
static inline int
update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
{
@@ -31,7 +31,7 @@ static inline u64 hw_load_avg(struct rq *rq)
{
return 0;
}
-#endif
+#endif /* !CONFIG_SCHED_HW_PRESSURE */
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
int update_irq_load_avg(struct rq *rq, u64 running);
@@ -179,15 +179,15 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
}
-#else
+#else /* !CONFIG_CFS_BANDWIDTH: */
static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
{
return rq_clock_pelt(rq_of(cfs_rq));
}
-#endif
+#endif /* !CONFIG_CFS_BANDWIDTH */
-#else
+#else /* !CONFIG_SMP: */
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
@@ -236,6 +236,6 @@ static inline void
update_idle_rq_clock_pelt(struct rq *rq) { }
static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
-#endif
+#endif /* !CONFIG_SMP */
#endif /* _KERNEL_SCHED_PELT_H */