diff options
author | Christian Borntraeger <borntraeger@de.ibm.com> | 2014-07-25 14:23:29 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2014-08-01 11:16:23 +0200 |
commit | 55e4283c3eb1d850893f645dd695c9c75d5fa1fc (patch) | |
tree | 73b921638494fa3923c94a3930b48bb4f3b20242 /arch/s390 | |
parent | a94fa154291bb11b1537ac2ff1ae2fd28428e054 (diff) |
KVM: s390/mm: Fix page table locking vs. split pmd lock
commit ec66ad66a0de87866be347b5ecc83bd46427f53b (s390/mm: enable
split page table lock for PMD level) activated the split pmd lock
for s390. Turns out that we missed one place: We also have to take
the pmd lock instead of the page table lock when we reallocate the
page tables (==> changing entries in the PMD) during sie enablement.
Cc: stable@vger.kernel.org # 3.15+
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/mm/pgtable.c | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 37b8241ec784..f90ad8592b36 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -1279,6 +1279,7 @@ static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb, { unsigned long next, *table, *new; struct page *page; + spinlock_t *ptl; pmd_t *pmd; pmd = pmd_offset(pud, addr); @@ -1296,7 +1297,7 @@ again: if (!new) return -ENOMEM; - spin_lock(&mm->page_table_lock); + ptl = pmd_lock(mm, pmd); if (likely((unsigned long *) pmd_deref(*pmd) == table)) { /* Nuke pmd entry pointing to the "short" page table */ pmdp_flush_lazy(mm, addr, pmd); @@ -1310,7 +1311,7 @@ again: page_table_free_rcu(tlb, table); new = NULL; } - spin_unlock(&mm->page_table_lock); + spin_unlock(ptl); if (new) { page_table_free_pgste(new); goto again; |