X-Git-Url: http://git.cascardo.info/?p=cascardo%2Flinux.git;a=blobdiff_plain;f=mm%2Fmemory.c;h=649e7d440bd763fa647d47722abb6471cb75ab79;hp=c3b9097251c5769959b044e74ed1da6de35392e6;hb=60815cf2e05057db5b78e398d9734c493560b11e;hpb=60d7ef3fd34dca2eb7ef4c997ccf1ef76a9ba148 diff --git a/mm/memory.c b/mm/memory.c index c3b9097251c5..649e7d440bd7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -235,9 +235,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { - if (!tlb->end) - return; - tlb_flush(tlb); mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); #ifdef CONFIG_HAVE_RCU_TABLE_FREE @@ -259,6 +256,9 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb) void tlb_flush_mmu(struct mmu_gather *tlb) { + if (!tlb->end) + return; + tlb_flush_mmu_tlbonly(tlb); tlb_flush_mmu_free(tlb); } @@ -2996,6 +2996,12 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (set_page_dirty(fault_page)) dirtied = 1; + /* + * Take a local copy of the address_space - page.mapping may be zeroed + * by truncate after unlock_page(). The address_space itself remains + * pinned by vma->vm_file's reference. We rely on unlock_page()'s + * release semantics to prevent the compiler from undoing this copying. + */ mapping = fault_page->mapping; unlock_page(fault_page); if ((dirtied || vma->vm_ops->page_mkwrite) && mapping) { @@ -3189,7 +3195,16 @@ static int handle_pte_fault(struct mm_struct *mm, pte_t entry; spinlock_t *ptl; - entry = ACCESS_ONCE(*pte); + /* + * some architectures can have larger ptes than wordsize, + * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and CONFIG_32BIT=y, + * so READ_ONCE or ACCESS_ONCE cannot guarantee atomic accesses. + * The code below just needs a consistent view for the ifs and + * we later double check anyway with the ptl lock held. So here + * a barrier will do. + */ + entry = *pte; + barrier(); if (!pte_present(entry)) { if (pte_none(entry)) { if (vma->vm_ops) {