From 22dc02f81cddd19528fc1d4fbd7404defbf736c5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 24 Jul 2023 21:30:54 +0200 Subject: Revert "sched/fair: Move unused stub functions to header" Revert commit 7aa55f2a5902 ("sched/fair: Move unused stub functions to header"), for while it has the right Changelog, the actual patch content a revert of the previous 4 patches: f7df852ad6db ("sched: Make task_vruntime_update() prototype visible") c0bdfd72fbfb ("sched/fair: Hide unused init_cfs_bandwidth() stub") 378be384e01f ("sched: Add schedule_user() declaration") d55ebae3f312 ("sched: Hide unused sched_update_scaling()") So in effect this is a revert of a revert and re-applies those patches. Fixes: 7aa55f2a5902 ("sched/fair: Move unused stub functions to header") Reported-by: Arnd Bergmann Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) --- kernel/sched/fair.c | 6 +++--- kernel/sched/sched.h | 2 ++ 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index b3e25be58e2b..695f8e5c9e77 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -684,7 +684,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) /************************************************************** * Scheduling class statistics methods: */ - +#ifdef CONFIG_SMP int sched_update_scaling(void) { unsigned int factor = get_update_sysctl_factor(); @@ -702,6 +702,7 @@ int sched_update_scaling(void) return 0; } #endif +#endif /* * delta /= w @@ -6186,9 +6187,8 @@ static inline int throttled_lb_pair(struct task_group *tg, return 0; } -void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} - #ifdef CONFIG_FAIR_GROUP_SCHED +void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} #endif diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e93e006a942b..44b540ad37b7 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1245,6 +1245,7 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq) bool cfs_prio_less(const struct task_struct *a, const struct task_struct *b, bool fi); +void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); /* * Helpers to check if the CPU's core cookie matches with the task's cookie @@ -2398,6 +2399,7 @@ static inline struct cpuidle_state *idle_get_state(struct rq *rq) #endif extern void schedule_idle(void); +asmlinkage void schedule_user(void); extern void sysrq_sched_debug_show(void); extern void sched_init_granularity(void); -- cgit v1.2.3 From 8874a414f8f706daf1de467cbf2550988ebec09d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 3 Aug 2023 10:26:19 +0200 Subject: x86/qspinlock-paravirt: Fix missing-prototype warning __pv_queued_spin_unlock_slowpath() is defined in a header file as a global function, and designed to be called from inline asm, but there is no prototype visible in the definition: kernel/locking/qspinlock_paravirt.h:493:1: error: no previous \ prototype for '__pv_queued_spin_unlock_slowpath' [-Werror=missing-prototypes] Add this to the x86 header that contains the inline asm calling it, and ensure this gets included before the definition, rather than after it. Signed-off-by: Arnd Bergmann Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230803082619.1369127-8-arnd@kernel.org --- kernel/locking/qspinlock_paravirt.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'kernel') diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h index 6afc249ce697..6a0184e9c234 100644 --- a/kernel/locking/qspinlock_paravirt.h +++ b/kernel/locking/qspinlock_paravirt.h @@ -485,6 +485,16 @@ gotlock: return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL); } +/* + * Include the architecture specific callee-save thunk of the + * __pv_queued_spin_unlock(). This thunk is put together with + * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock + * function close to each other sharing consecutive instruction cachelines. + * Alternatively, architecture specific version of __pv_queued_spin_unlock() + * can be defined. + */ +#include + /* * PV versions of the unlock fastpath and slowpath functions to be used * instead of queued_spin_unlock(). @@ -533,16 +543,6 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) pv_kick(node->cpu); } -/* - * Include the architecture specific callee-save thunk of the - * __pv_queued_spin_unlock(). This thunk is put together with - * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock - * function close to each other sharing consecutive instruction cachelines. - * Alternatively, architecture specific version of __pv_queued_spin_unlock() - * can be defined. - */ -#include - #ifndef __pv_queued_spin_unlock __visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock) { -- cgit v1.2.3