summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorWaiman Long <longman@redhat.com>2026-02-21 13:54:17 -0500
committerTejun Heo <tj@kernel.org>2026-02-23 10:42:11 -1000
commit6df415aa46ec10d607da5063d88492a7c7762074 (patch)
tree397e8609732e6fa9c02e3ad8baa1f8e66e442ad6 /kernel
parent3bfe47967191f42d17510713b31a47d9284b8c5a (diff)
cgroup/cpuset: Defer housekeeping_update() calls from CPU hotplug to workqueue
The cpuset_handle_hotplug() may need to invoke housekeeping_update(), for instance, when an isolated partition is invalidated because its last active CPU has been put offline. As we are going to enable dynamic update to the nozh_full housekeeping cpumask (HK_TYPE_KERNEL_NOISE) soon with the help of CPU hotplug, allowing the CPU hotplug path to call into housekeeping_update() directly from update_isolation_cpumasks() will likely cause deadlock. So we have to defer any call to housekeeping_update() after the CPU hotplug operation has finished. This is now done via the workqueue where the update_hk_sched_domains() function will be invoked via the hk_sd_workfn(). An concurrent cpuset control file write may have executed the required update_hk_sched_domains() function before the work function is called. So the work function call may become a no-op when it is invoked. Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup/cpuset.c31
1 files changed, 26 insertions, 5 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 14b07a283a2c..aa915e9b588f 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1324,6 +1324,16 @@ static void update_hk_sched_domains(void)
rebuild_sched_domains_locked();
}
+/*
+ * Work function to invoke update_hk_sched_domains()
+ */
+static void hk_sd_workfn(struct work_struct *work)
+{
+ cpuset_full_lock();
+ update_hk_sched_domains();
+ cpuset_full_unlock();
+}
+
/**
* rm_siblings_excl_cpus - Remove exclusive CPUs that are used by sibling cpusets
* @parent: Parent cpuset containing all siblings
@@ -3796,6 +3806,7 @@ unlock:
*/
static void cpuset_handle_hotplug(void)
{
+ static DECLARE_WORK(hk_sd_work, hk_sd_workfn);
static cpumask_t new_cpus;
static nodemask_t new_mems;
bool cpus_updated, mems_updated;
@@ -3878,11 +3889,21 @@ static void cpuset_handle_hotplug(void)
}
- if (update_housekeeping || force_sd_rebuild) {
- mutex_lock(&cpuset_mutex);
- update_hk_sched_domains();
- mutex_unlock(&cpuset_mutex);
- }
+ /*
+ * Queue a work to call housekeeping_update() & rebuild_sched_domains()
+ * There will be a slight delay before the HK_TYPE_DOMAIN housekeeping
+ * cpumask can correctly reflect what is in isolated_cpus.
+ *
+ * We rely on WORK_STRUCT_PENDING_BIT to not requeue a work item that
+ * is still pending. Before the pending bit is cleared, the work data
+ * is copied out and work item dequeued. So it is possible to queue
+ * the work again before the hk_sd_workfn() is invoked to process the
+ * previously queued work. Since hk_sd_workfn() doesn't use the work
+ * item at all, this is not a problem.
+ */
+ if (update_housekeeping || force_sd_rebuild)
+ queue_work(system_unbound_wq, &hk_sd_work);
+
free_tmpmasks(ptmp);
}