KVM: s390/mm: Fix page table locking vs. split pmd lock
authorChristian Borntraeger <borntraeger@de.ibm.com>
Fri, 25 Jul 2014 12:23:29 +0000 (14:23 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Fri, 1 Aug 2014 09:16:23 +0000 (11:16 +0200)
commit ec66ad66a0de87866be347b5ecc83bd46427f53b (s390/mm: enable
split page table lock for PMD level) activated the split pmd lock
for s390. Turns out that we missed one place: We also have to take
the pmd lock instead of the page table lock when we reallocate the
page tables (==> changing entries in the PMD) during sie enablement.

Cc: stable@vger.kernel.org # 3.15+
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/mm/pgtable.c

index 37b8241..f90ad85 100644 (file)
@@ -1279,6 +1279,7 @@ static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
 {
        unsigned long next, *table, *new;
        struct page *page;
+       spinlock_t *ptl;
        pmd_t *pmd;
 
        pmd = pmd_offset(pud, addr);
@@ -1296,7 +1297,7 @@ again:
                if (!new)
                        return -ENOMEM;
 
-               spin_lock(&mm->page_table_lock);
+               ptl = pmd_lock(mm, pmd);
                if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
                        /* Nuke pmd entry pointing to the "short" page table */
                        pmdp_flush_lazy(mm, addr, pmd);
@@ -1310,7 +1311,7 @@ again:
                        page_table_free_rcu(tlb, table);
                        new = NULL;
                }
-               spin_unlock(&mm->page_table_lock);
+               spin_unlock(ptl);
                if (new) {
                        page_table_free_pgste(new);
                        goto again;