Merge tag 'iwlwifi-next-for-kalle-2014-12-30' of https://git.kernel.org/pub/scm/linux...
[cascardo/linux.git] / arch / powerpc / kvm / book3s_hv_rm_mmu.c
index 5a24d3c..510bdfb 100644 (file)
@@ -45,16 +45,12 @@ static int global_invalidates(struct kvm *kvm, unsigned long flags)
         * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
         * we can use tlbiel as long as we mark all other physical
         * cores as potentially having stale TLB entries for this lpid.
-        * If we're not using MMU notifiers, we never take pages away
-        * from the guest, so we can use tlbiel if requested.
         * Otherwise, don't use tlbiel.
         */
        if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
                global = 0;
-       else if (kvm->arch.using_mmu_notifiers)
-               global = 1;
        else
-               global = !(flags & H_LOCAL);
+               global = 1;
 
        if (!global) {
                /* any other core might now have stale TLB entries... */
@@ -154,10 +150,10 @@ static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva,
        return kvmppc_read_update_linux_pte(ptep, writing, hugepage_shift);
 }
 
-static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
+static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
 {
        asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
-       hpte[0] = hpte_v;
+       hpte[0] = cpu_to_be64(hpte_v);
 }
 
 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
@@ -166,11 +162,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 {
        unsigned long i, pa, gpa, gfn, psize;
        unsigned long slot_fn, hva;
-       unsigned long *hpte;
+       __be64 *hpte;
        struct revmap_entry *rev;
        unsigned long g_ptel;
        struct kvm_memory_slot *memslot;
-       unsigned long *physp, pte_size;
+       unsigned long pte_size;
        unsigned long is_io;
        unsigned long *rmap;
        pte_t pte;
@@ -198,9 +194,6 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
        is_io = ~0ul;
        rmap = NULL;
        if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
-               /* PPC970 can't do emulated MMIO */
-               if (!cpu_has_feature(CPU_FTR_ARCH_206))
-                       return H_PARAMETER;
                /* Emulated MMIO - mark this with key=31 */
                pteh |= HPTE_V_ABSENT;
                ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
@@ -213,37 +206,20 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
        slot_fn = gfn - memslot->base_gfn;
        rmap = &memslot->arch.rmap[slot_fn];
 
-       if (!kvm->arch.using_mmu_notifiers) {
-               physp = memslot->arch.slot_phys;
-               if (!physp)
-                       return H_PARAMETER;
-               physp += slot_fn;
-               if (realmode)
-                       physp = real_vmalloc_addr(physp);
-               pa = *physp;
-               if (!pa)
-                       return H_TOO_HARD;
-               is_io = pa & (HPTE_R_I | HPTE_R_W);
-               pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
-               pa &= PAGE_MASK;
+       /* Translate to host virtual address */
+       hva = __gfn_to_hva_memslot(memslot, gfn);
+
+       /* Look up the Linux PTE for the backing page */
+       pte_size = psize;
+       pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size);
+       if (pte_present(pte) && !pte_numa(pte)) {
+               if (writing && !pte_write(pte))
+                       /* make the actual HPTE be read-only */
+                       ptel = hpte_make_readonly(ptel);
+               is_io = hpte_cache_bits(pte_val(pte));
+               pa = pte_pfn(pte) << PAGE_SHIFT;
+               pa |= hva & (pte_size - 1);
                pa |= gpa & ~PAGE_MASK;
-       } else {
-               /* Translate to host virtual address */
-               hva = __gfn_to_hva_memslot(memslot, gfn);
-
-               /* Look up the Linux PTE for the backing page */
-               pte_size = psize;
-               pte = lookup_linux_pte_and_update(pgdir, hva, writing,
-                                                 &pte_size);
-               if (pte_present(pte) && !pte_numa(pte)) {
-                       if (writing && !pte_write(pte))
-                               /* make the actual HPTE be read-only */
-                               ptel = hpte_make_readonly(ptel);
-                       is_io = hpte_cache_bits(pte_val(pte));
-                       pa = pte_pfn(pte) << PAGE_SHIFT;
-                       pa |= hva & (pte_size - 1);
-                       pa |= gpa & ~PAGE_MASK;
-               }
        }
 
        if (pte_size < psize)
@@ -275,9 +251,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                return H_PARAMETER;
        if (likely((flags & H_EXACT) == 0)) {
                pte_index &= ~7UL;
-               hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+               hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
                for (i = 0; i < 8; ++i) {
-                       if ((*hpte & HPTE_V_VALID) == 0 &&
+                       if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
                            try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
                                          HPTE_V_ABSENT))
                                break;
@@ -292,11 +268,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                         */
                        hpte -= 16;
                        for (i = 0; i < 8; ++i) {
+                               u64 pte;
                                while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                                        cpu_relax();
-                               if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
+                               pte = be64_to_cpu(*hpte);
+                               if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
                                        break;
-                               *hpte &= ~HPTE_V_HVLOCK;
+                               *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
                                hpte += 2;
                        }
                        if (i == 8)
@@ -304,14 +282,17 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                }
                pte_index += i;
        } else {
-               hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+               hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
                if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
                                   HPTE_V_ABSENT)) {
                        /* Lock the slot and check again */
+                       u64 pte;
+
                        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                                cpu_relax();
-                       if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
-                               *hpte &= ~HPTE_V_HVLOCK;
+                       pte = be64_to_cpu(*hpte);
+                       if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
+                               *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
                                return H_PTEG_FULL;
                        }
                }
@@ -332,8 +313,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                        rmap = real_vmalloc_addr(rmap);
                lock_rmap(rmap);
                /* Check for pending invalidations under the rmap chain lock */
-               if (kvm->arch.using_mmu_notifiers &&
-                   mmu_notifier_retry(kvm, mmu_seq)) {
+               if (mmu_notifier_retry(kvm, mmu_seq)) {
                        /* inval in progress, write a non-present HPTE */
                        pteh |= HPTE_V_ABSENT;
                        pteh &= ~HPTE_V_VALID;
@@ -347,11 +327,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                }
        }
 
-       hpte[1] = ptel;
+       hpte[1] = cpu_to_be64(ptel);
 
        /* Write the first HPTE dword, unlocking the HPTE and making it valid */
        eieio();
-       hpte[0] = pteh;
+       hpte[0] = cpu_to_be64(pteh);
        asm volatile("ptesync" : : : "memory");
 
        *pte_idx_ret = pte_index;
@@ -390,61 +370,11 @@ static inline int try_lock_tlbie(unsigned int *lock)
        return old == 0;
 }
 
-/*
- * tlbie/tlbiel is a bit different on the PPC970 compared to later
- * processors such as POWER7; the large page bit is in the instruction
- * not RB, and the top 16 bits and the bottom 12 bits of the VA
- * in RB must be 0.
- */
-static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
-                         long npages, int global, bool need_sync)
-{
-       long i;
-
-       if (global) {
-               while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
-                       cpu_relax();
-               if (need_sync)
-                       asm volatile("ptesync" : : : "memory");
-               for (i = 0; i < npages; ++i) {
-                       unsigned long rb = rbvalues[i];
-
-                       if (rb & 1)             /* large page */
-                               asm volatile("tlbie %0,1" : :
-                                            "r" (rb & 0x0000fffffffff000ul));
-                       else
-                               asm volatile("tlbie %0,0" : :
-                                            "r" (rb & 0x0000fffffffff000ul));
-               }
-               asm volatile("eieio; tlbsync; ptesync" : : : "memory");
-               kvm->arch.tlbie_lock = 0;
-       } else {
-               if (need_sync)
-                       asm volatile("ptesync" : : : "memory");
-               for (i = 0; i < npages; ++i) {
-                       unsigned long rb = rbvalues[i];
-
-                       if (rb & 1)             /* large page */
-                               asm volatile("tlbiel %0,1" : :
-                                            "r" (rb & 0x0000fffffffff000ul));
-                       else
-                               asm volatile("tlbiel %0,0" : :
-                                            "r" (rb & 0x0000fffffffff000ul));
-               }
-               asm volatile("ptesync" : : : "memory");
-       }
-}
-
 static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
                      long npages, int global, bool need_sync)
 {
        long i;
 
-       if (cpu_has_feature(CPU_FTR_ARCH_201)) {
-               /* PPC970 tlbie instruction is a bit different */
-               do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
-               return;
-       }
        if (global) {
                while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
                        cpu_relax();
@@ -468,30 +398,35 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
                        unsigned long pte_index, unsigned long avpn,
                        unsigned long *hpret)
 {
-       unsigned long *hpte;
+       __be64 *hpte;
        unsigned long v, r, rb;
        struct revmap_entry *rev;
+       u64 pte;
 
        if (pte_index >= kvm->arch.hpt_npte)
                return H_PARAMETER;
-       hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                cpu_relax();
-       if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
-           ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
-           ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
-               hpte[0] &= ~HPTE_V_HVLOCK;
+       pte = be64_to_cpu(hpte[0]);
+       if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
+           ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
+           ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
+               hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
                return H_NOT_FOUND;
        }
 
        rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
-       v = hpte[0] & ~HPTE_V_HVLOCK;
+       v = pte & ~HPTE_V_HVLOCK;
        if (v & HPTE_V_VALID) {
-               hpte[0] &= ~HPTE_V_VALID;
-               rb = compute_tlbie_rb(v, hpte[1], pte_index);
+               u64 pte1;
+
+               pte1 = be64_to_cpu(hpte[1]);
+               hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
+               rb = compute_tlbie_rb(v, pte1, pte_index);
                do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
                /* Read PTE low word after tlbie to get final R/C values */
-               remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
+               remove_revmap_chain(kvm, pte_index, rev, v, pte1);
        }
        r = rev->guest_rpte & ~HPTE_GR_RESERVED;
        note_hpte_modification(kvm, rev);
@@ -514,12 +449,14 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
 {
        struct kvm *kvm = vcpu->kvm;
        unsigned long *args = &vcpu->arch.gpr[4];
-       unsigned long *hp, *hptes[4], tlbrb[4];
+       __be64 *hp, *hptes[4];
+       unsigned long tlbrb[4];
        long int i, j, k, n, found, indexes[4];
        unsigned long flags, req, pte_index, rcbits;
        int global;
        long int ret = H_SUCCESS;
        struct revmap_entry *rev, *revs[4];
+       u64 hp0;
 
        global = global_invalidates(kvm, 0);
        for (i = 0; i < 4 && ret == H_SUCCESS; ) {
@@ -542,8 +479,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
                                ret = H_PARAMETER;
                                break;
                        }
-                       hp = (unsigned long *)
-                               (kvm->arch.hpt_virt + (pte_index << 4));
+                       hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
                        /* to avoid deadlock, don't spin except for first */
                        if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
                                if (n)
@@ -552,23 +488,24 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
                                        cpu_relax();
                        }
                        found = 0;
-                       if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
+                       hp0 = be64_to_cpu(hp[0]);
+                       if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
                                switch (flags & 3) {
                                case 0:         /* absolute */
                                        found = 1;
                                        break;
                                case 1:         /* andcond */
-                                       if (!(hp[0] & args[j + 1]))
+                                       if (!(hp0 & args[j + 1]))
                                                found = 1;
                                        break;
                                case 2:         /* AVPN */
-                                       if ((hp[0] & ~0x7fUL) == args[j + 1])
+                                       if ((hp0 & ~0x7fUL) == args[j + 1])
                                                found = 1;
                                        break;
                                }
                        }
                        if (!found) {
-                               hp[0] &= ~HPTE_V_HVLOCK;
+                               hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
                                args[j] = ((0x90 | flags) << 56) + pte_index;
                                continue;
                        }
@@ -577,7 +514,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
                        rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
                        note_hpte_modification(kvm, rev);
 
-                       if (!(hp[0] & HPTE_V_VALID)) {
+                       if (!(hp0 & HPTE_V_VALID)) {
                                /* insert R and C bits from PTE */
                                rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
                                args[j] |= rcbits << (56 - 5);
@@ -585,8 +522,10 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
                                continue;
                        }
 
-                       hp[0] &= ~HPTE_V_VALID;         /* leave it locked */
-                       tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
+                       /* leave it locked */
+                       hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
+                       tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
+                               be64_to_cpu(hp[1]), pte_index);
                        indexes[n] = j;
                        hptes[n] = hp;
                        revs[n] = rev;
@@ -605,7 +544,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
                        pte_index = args[j] & ((1ul << 56) - 1);
                        hp = hptes[k];
                        rev = revs[k];
-                       remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
+                       remove_revmap_chain(kvm, pte_index, rev,
+                               be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
                        rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
                        args[j] |= rcbits << (56 - 5);
                        hp[0] = 0;
@@ -620,23 +560,25 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
                      unsigned long va)
 {
        struct kvm *kvm = vcpu->kvm;
-       unsigned long *hpte;
+       __be64 *hpte;
        struct revmap_entry *rev;
        unsigned long v, r, rb, mask, bits;
+       u64 pte;
 
        if (pte_index >= kvm->arch.hpt_npte)
                return H_PARAMETER;
 
-       hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
+       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                cpu_relax();
-       if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
-           ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
-               hpte[0] &= ~HPTE_V_HVLOCK;
+       pte = be64_to_cpu(hpte[0]);
+       if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
+           ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
+               hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
                return H_NOT_FOUND;
        }
 
-       v = hpte[0];
+       v = pte;
        bits = (flags << 55) & HPTE_R_PP0;
        bits |= (flags << 48) & HPTE_R_KEY_HI;
        bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
@@ -650,40 +592,29 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
                rev->guest_rpte = r;
                note_hpte_modification(kvm, rev);
        }
-       r = (hpte[1] & ~mask) | bits;
 
        /* Update HPTE */
        if (v & HPTE_V_VALID) {
-               rb = compute_tlbie_rb(v, r, pte_index);
-               hpte[0] = v & ~HPTE_V_VALID;
-               do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
                /*
-                * If the host has this page as readonly but the guest
-                * wants to make it read/write, reduce the permissions.
-                * Checking the host permissions involves finding the
-                * memslot and then the Linux PTE for the page.
+                * If the page is valid, don't let it transition from
+                * readonly to writable.  If it should be writable, we'll
+                * take a trap and let the page fault code sort it out.
                 */
-               if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) {
-                       unsigned long psize, gfn, hva;
-                       struct kvm_memory_slot *memslot;
-                       pgd_t *pgdir = vcpu->arch.pgdir;
-                       pte_t pte;
-
-                       psize = hpte_page_size(v, r);
-                       gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
-                       memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
-                       if (memslot) {
-                               hva = __gfn_to_hva_memslot(memslot, gfn);
-                               pte = lookup_linux_pte_and_update(pgdir, hva,
-                                                                 1, &psize);
-                               if (pte_present(pte) && !pte_write(pte))
-                                       r = hpte_make_readonly(r);
-                       }
+               pte = be64_to_cpu(hpte[1]);
+               r = (pte & ~mask) | bits;
+               if (hpte_is_writable(r) && !hpte_is_writable(pte))
+                       r = hpte_make_readonly(r);
+               /* If the PTE is changing, invalidate it first */
+               if (r != pte) {
+                       rb = compute_tlbie_rb(v, r, pte_index);
+                       hpte[0] = cpu_to_be64((v & ~HPTE_V_VALID) |
+                                             HPTE_V_ABSENT);
+                       do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
+                                 true);
+                       hpte[1] = cpu_to_be64(r);
                }
        }
-       hpte[1] = r;
-       eieio();
-       hpte[0] = v & ~HPTE_V_HVLOCK;
+       unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
        asm volatile("ptesync" : : : "memory");
        return H_SUCCESS;
 }
@@ -692,7 +623,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
                   unsigned long pte_index)
 {
        struct kvm *kvm = vcpu->kvm;
-       unsigned long *hpte, v, r;
+       __be64 *hpte;
+       unsigned long v, r;
        int i, n = 1;
        struct revmap_entry *rev = NULL;
 
@@ -704,9 +636,9 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
        }
        rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
        for (i = 0; i < n; ++i, ++pte_index) {
-               hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
-               v = hpte[0] & ~HPTE_V_HVLOCK;
-               r = hpte[1];
+               hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+               v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
+               r = be64_to_cpu(hpte[1]);
                if (v & HPTE_V_ABSENT) {
                        v &= ~HPTE_V_ABSENT;
                        v |= HPTE_V_VALID;
@@ -721,25 +653,27 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
        return H_SUCCESS;
 }
 
-void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
+void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
                        unsigned long pte_index)
 {
        unsigned long rb;
 
-       hptep[0] &= ~HPTE_V_VALID;
-       rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
+       hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
+       rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
+                             pte_index);
        do_tlbies(kvm, &rb, 1, 1, true);
 }
 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
 
-void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
+void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
                           unsigned long pte_index)
 {
        unsigned long rb;
        unsigned char rbyte;
 
-       rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
-       rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
+       rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
+                             pte_index);
+       rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
        /* modify only the second-last byte, which contains the ref bit */
        *((char *)hptep + 14) = rbyte;
        do_tlbies(kvm, &rb, 1, 1, false);
@@ -765,7 +699,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
        unsigned long somask;
        unsigned long vsid, hash;
        unsigned long avpn;
-       unsigned long *hpte;
+       __be64 *hpte;
        unsigned long mask, val;
        unsigned long v, r;
 
@@ -797,11 +731,11 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
        val |= avpn;
 
        for (;;) {
-               hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
+               hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7));
 
                for (i = 0; i < 16; i += 2) {
                        /* Read the PTE racily */
-                       v = hpte[i] & ~HPTE_V_HVLOCK;
+                       v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
 
                        /* Check valid/absent, hash, segment size and AVPN */
                        if (!(v & valid) || (v & mask) != val)
@@ -810,8 +744,8 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
                        /* Lock the PTE and read it under the lock */
                        while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
                                cpu_relax();
-                       v = hpte[i] & ~HPTE_V_HVLOCK;
-                       r = hpte[i+1];
+                       v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
+                       r = be64_to_cpu(hpte[i+1]);
 
                        /*
                         * Check the HPTE again, including base page size
@@ -822,7 +756,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
                                return (hash << 3) + (i >> 1);
 
                        /* Unlock and move on */
-                       hpte[i] = v;
+                       hpte[i] = cpu_to_be64(v);
                }
 
                if (val & HPTE_V_SECONDARY)
@@ -851,7 +785,7 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
        struct kvm *kvm = vcpu->kvm;
        long int index;
        unsigned long v, r, gr;
-       unsigned long *hpte;
+       __be64 *hpte;
        unsigned long valid;
        struct revmap_entry *rev;
        unsigned long pp, key;
@@ -867,9 +801,9 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
                        return status;  /* there really was no HPTE */
                return 0;               /* for prot fault, HPTE disappeared */
        }
-       hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
-       v = hpte[0] & ~HPTE_V_HVLOCK;
-       r = hpte[1];
+       hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
+       v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
+       r = be64_to_cpu(hpte[1]);
        rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
        gr = rev->guest_rpte;