ARM: KVM: introduce kvm_p*d_addr_end
authorMarc Zyngier <marc.zyngier@arm.com>
Tue, 18 Feb 2014 14:29:03 +0000 (14:29 +0000)
committerMarc Zyngier <marc.zyngier@arm.com>
Mon, 3 Mar 2014 01:15:22 +0000 (01:15 +0000)
The use of p*d_addr_end with stage-2 translation is slightly dodgy,
as the IPA is 40bits, while all the p*d_addr_end helpers are
taking an unsigned long (arm64 is fine with that as unligned long
is 64bit).

The fix is to introduce 64bit clean versions of the same helpers,
and use them in the stage-2 page table code.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
arch/arm/include/asm/kvm_mmu.h
arch/arm/kvm/mmu.c
arch/arm64/include/asm/kvm_mmu.h

index 6d0f3d3..891afe7 100644 (file)
@@ -114,6 +114,19 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
        pmd_val(*pmd) |= L_PMD_S2_RDWR;
 }
 
+/* Open coded p*d_addr_end that can deal with 64bit addresses */
+#define kvm_pgd_addr_end(addr, end)                                    \
+({     u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK;            \
+       (__boundary - 1 < (end) - 1)? __boundary: (end);                \
+})
+
+#define kvm_pud_addr_end(addr,end)             (end)
+
+#define kvm_pmd_addr_end(addr, end)                                    \
+({     u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK;                \
+       (__boundary - 1 < (end) - 1)? __boundary: (end);                \
+})
+
 struct kvm;
 
 static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
index fc71a8d..c1c08b2 100644 (file)
@@ -145,7 +145,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
                pgd = pgdp + pgd_index(addr);
                pud = pud_offset(pgd, addr);
                if (pud_none(*pud)) {
-                       addr = pud_addr_end(addr, end);
+                       addr = kvm_pud_addr_end(addr, end);
                        continue;
                }
 
@@ -155,13 +155,13 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
                         * move on.
                         */
                        clear_pud_entry(kvm, pud, addr);
-                       addr = pud_addr_end(addr, end);
+                       addr = kvm_pud_addr_end(addr, end);
                        continue;
                }
 
                pmd = pmd_offset(pud, addr);
                if (pmd_none(*pmd)) {
-                       addr = pmd_addr_end(addr, end);
+                       addr = kvm_pmd_addr_end(addr, end);
                        continue;
                }
 
@@ -176,10 +176,10 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
                 */
                if (kvm_pmd_huge(*pmd) || page_empty(pte)) {
                        clear_pmd_entry(kvm, pmd, addr);
-                       next = pmd_addr_end(addr, end);
+                       next = kvm_pmd_addr_end(addr, end);
                        if (page_empty(pmd) && !page_empty(pud)) {
                                clear_pud_entry(kvm, pud, addr);
-                               next = pud_addr_end(addr, end);
+                               next = kvm_pud_addr_end(addr, end);
                        }
                }
 
index 6eaf69b..00c0cc8 100644 (file)
@@ -121,6 +121,10 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
        pmd_val(*pmd) |= PMD_S2_RDWR;
 }
 
+#define kvm_pgd_addr_end(addr, end)    pgd_addr_end(addr, end)
+#define kvm_pud_addr_end(addr, end)    pud_addr_end(addr, end)
+#define kvm_pmd_addr_end(addr, end)    pmd_addr_end(addr, end)
+
 struct kvm;
 
 #define kvm_flush_dcache_to_poc(a,l)   __flush_dcache_area((a), (l))