diff options
| author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-01 09:06:26 +0100 |
|---|---|---|
| committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-01 09:06:26 +0100 |
| commit | 45bf86e7731606a475b7f989486de23b0784bfe7 (patch) | |
| tree | dac8a776783d6322bedf346060fb6000251c8b40 /kernel/ucount.c | |
| parent | 059c7a5a748d4e7481d8b1b4cf0e182cb81496ad (diff) | |
| parent | 18566acac18f5784347bc5fe636a26897d1c963b (diff) | |
Merge remote-tracking branch 'airlied/drm-next' into drm-misc-next
Backmerge to resync and also so that Ville can apply a cleanup patch
from Takashi.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'kernel/ucount.c')
| -rw-r--r-- | kernel/ucount.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/ucount.c b/kernel/ucount.c index 9d20d5dd298a..4bbd38ec3788 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) struct hlist_head *hashent = ucounts_hashentry(ns, uid); struct ucounts *ucounts, *new; - spin_lock(&ucounts_lock); + spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); if (!ucounts) { - spin_unlock(&ucounts_lock); + spin_unlock_irq(&ucounts_lock); new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) @@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) new->uid = uid; atomic_set(&new->count, 0); - spin_lock(&ucounts_lock); + spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); if (ucounts) { kfree(new); @@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) } if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) ucounts = NULL; - spin_unlock(&ucounts_lock); + spin_unlock_irq(&ucounts_lock); return ucounts; } static void put_ucounts(struct ucounts *ucounts) { + unsigned long flags; + if (atomic_dec_and_test(&ucounts->count)) { - spin_lock(&ucounts_lock); + spin_lock_irqsave(&ucounts_lock, flags); hlist_del_init(&ucounts->node); - spin_unlock(&ucounts_lock); + spin_unlock_irqrestore(&ucounts_lock, flags); kfree(ucounts); } |
