diff options
author | Tejun Heo <tj@kernel.org> | 2013-06-12 21:04:54 -0700 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-06-13 10:55:18 -0700 |
commit | ddd69148bdc45e5e3e55bfde3571daecd5a96d75 (patch) | |
tree | 0d248de63b78acf91668a96a1f6cc77f49f2946c /kernel/cgroup.c | |
parent | 54766d4a1d3d6f84ff8fa475cd8f165c0a0000eb (diff) |
cgroup: drop unnecessary RCU dancing from __put_css_set()
__put_css_set() does RCU read access on @cgrp across dropping
@cgrp->count so that it can continue accessing @cgrp even if the count
reached zero and destruction of the cgroup commenced. Given that both
sides - __css_put() and cgroup_destroy_locked() - are cold paths, this
is unnecessary. Just making cgroup_destroy_locked() grab css_set_lock
while checking @cgrp->count is enough.
Remove the RCU read locking from __put_css_set() and make
cgroup_destroy_locked() read-lock css_set_lock when checking
@cgrp->count. This will also allow removing @cgrp->count.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r-- | kernel/cgroup.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 84efb344fdf6..1a68241ca835 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -407,19 +407,13 @@ static void __put_css_set(struct css_set *cset, int taskexit) list_del(&link->cset_link); list_del(&link->cgrp_link); - /* - * We may not be holding cgroup_mutex, and if cgrp->count is - * dropped to 0 the cgroup can be destroyed at any time, hence - * rcu_read_lock is used to keep it alive. - */ - rcu_read_lock(); + /* @cgrp can't go away while we're holding css_set_lock */ if (atomic_dec_and_test(&cgrp->count) && notify_on_release(cgrp)) { if (taskexit) set_bit(CGRP_RELEASABLE, &cgrp->flags); check_for_release(cgrp); } - rcu_read_unlock(); kfree(link); } @@ -4370,11 +4364,19 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) struct cgroup *parent = cgrp->parent; struct cgroup_event *event, *tmp; struct cgroup_subsys *ss; + bool empty; lockdep_assert_held(&d->d_inode->i_mutex); lockdep_assert_held(&cgroup_mutex); - if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) + /* + * css_set_lock prevents @cgrp from being removed while + * __put_css_set() is in progress. + */ + read_lock(&css_set_lock); + empty = !atomic_read(&cgrp->count) && list_empty(&cgrp->children); + read_unlock(&css_set_lock); + if (!empty) return -EBUSY; /* @@ -5051,8 +5053,6 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) static void check_for_release(struct cgroup *cgrp) { - /* All of these checks rely on RCU to keep the cgroup - * structure alive */ if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count) && list_empty(&cgrp->children)) { /* |