diff options
author | Jan Kara <jack@suse.cz> | 2025-09-12 12:38:36 +0200 |
---|---|---|
committer | Christian Brauner <brauner@kernel.org> | 2025-09-19 13:11:05 +0200 |
commit | 66c14dccd810d42ec5c73bb8a9177489dfd62278 (patch) | |
tree | f7a32b05d3b14eebf91d6097a9d1dc819fddf977 | |
parent | e1b849cfa6b61f1c866a908c9e8dd9b5aaab820b (diff) |
writeback: Avoid softlockup when switching many inodes
process_inode_switch_wbs_work() can be switching over 100 inodes to a
different cgroup. Since switching an inode requires counting all dirty &
under-writeback pages in the address space of each inode, this can take
a significant amount of time. Add a possibility to reschedule after
processing each inode to avoid softlockups.
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Christian Brauner <brauner@kernel.org>
-rw-r--r-- | fs/fs-writeback.c | 11 |
1 files changed, 10 insertions, 1 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index b0e9092ccf04..36ef1a796d4b 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -500,6 +500,7 @@ static void process_inode_switch_wbs(struct bdi_writeback *new_wb, */ down_read(&bdi->wb_switch_rwsem); + inodep = isw->inodes; /* * By the time control reaches here, RCU grace period has passed * since I_WB_SWITCH assertion and all wb stat update transactions @@ -510,6 +511,7 @@ static void process_inode_switch_wbs(struct bdi_writeback *new_wb, * gives us exclusion against all wb related operations on @inode * including IO list manipulations and stat updates. */ +relock: if (old_wb < new_wb) { spin_lock(&old_wb->list_lock); spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING); @@ -518,10 +520,17 @@ static void process_inode_switch_wbs(struct bdi_writeback *new_wb, spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING); } - for (inodep = isw->inodes; *inodep; inodep++) { + while (*inodep) { WARN_ON_ONCE((*inodep)->i_wb != old_wb); if (inode_do_switch_wbs(*inodep, old_wb, new_wb)) nr_switched++; + inodep++; + if (*inodep && need_resched()) { + spin_unlock(&new_wb->list_lock); + spin_unlock(&old_wb->list_lock); + cond_resched(); + goto relock; + } } spin_unlock(&new_wb->list_lock); |