2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2010
17 * Authors: Alexander Graf <agraf@suse.de>
20 #ifndef __ASM_KVM_BOOK3S_64_H__
21 #define __ASM_KVM_BOOK3S_64_H__
23 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
24 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
27 return &get_paca()->shadow_vcpu;
30 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
36 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
37 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
40 #define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
43 * We use a lock bit in HPTE dword 0 to synchronize updates and
44 * accesses to each HPTE, and another bit to indicate non-present
47 #define HPTE_V_HVLOCK 0x40UL
48 #define HPTE_V_ABSENT 0x20UL
51 * We use this bit in the guest_rpte field of the revmap entry
52 * to indicate a modified HPTE.
54 #define HPTE_GR_MODIFIED (1ul << 62)
56 /* These bits are reserved in the guest view of the HPTE */
57 #define HPTE_GR_RESERVED HPTE_GR_MODIFIED
59 static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
61 unsigned long tmp, old;
62 __be64 be_lockbit, be_bits;
65 * We load/store in native endian, but the HTAB is in big endian. If
66 * we byte swap all data we apply on the PTE we're implicitly correct
69 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
70 be_bits = cpu_to_be64(bits);
72 asm volatile(" ldarx %0,0,%2\n"
80 : "=&r" (tmp), "=&r" (old)
81 : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
86 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
88 hpte_v &= ~HPTE_V_HVLOCK;
89 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
90 hpte[0] = cpu_to_be64(hpte_v);
94 static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
96 hpte_v &= ~HPTE_V_HVLOCK;
97 hpte[0] = cpu_to_be64(hpte_v);
100 static inline int __hpte_actual_psize(unsigned int lp, int psize)
105 /* start from 1 ignoring MMU_PAGE_4K */
106 for (i = 1; i < MMU_PAGE_COUNT; i++) {
109 if (mmu_psize_defs[psize].penc[i] == -1)
112 * encoding bits per actual page size
113 * PTE LP actual page size
120 shift = mmu_psize_defs[i].shift - LP_SHIFT;
123 mask = (1 << shift) - 1;
124 if ((lp & mask) == mmu_psize_defs[psize].penc[i])
130 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
131 unsigned long pte_index)
133 int b_psize = MMU_PAGE_4K, a_psize = MMU_PAGE_4K;
135 unsigned long rb = 0, va_low, sllp;
136 unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
138 if (v & HPTE_V_LARGE) {
139 for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
141 /* valid entries have a shift value */
142 if (!mmu_psize_defs[b_psize].shift)
145 a_psize = __hpte_actual_psize(lp, b_psize);
151 * Ignore the top 14 bits of va
152 * v have top two bits covering segment size, hence move
153 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
154 * AVA field in v also have the lower 23 bits ignored.
155 * For base page size 4K we need 14 .. 65 bits (so need to
156 * collect extra 11 bits)
157 * For others we need 14..14+i
159 /* This covers 14..54 bits of va*/
160 rb = (v & ~0x7fUL) << 16; /* AVA field */
162 rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
164 * AVA in v had cleared lower 23 bits. We need to derive
165 * that from pteg index
167 va_low = pte_index >> 3;
168 if (v & HPTE_V_SECONDARY)
171 * get the vpn bits from va_low using reverse of hashing.
172 * In v we have va with 23 bits dropped and then left shifted
173 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
174 * right shift it with (SID_SHIFT - (23 - 7))
176 if (!(v & HPTE_V_1TB_SEG))
177 va_low ^= v >> (SID_SHIFT - 16);
179 va_low ^= v >> (SID_SHIFT_1T - 16);
184 sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) |
185 ((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4);
186 rb |= sllp << 5; /* AP field */
187 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
193 * remaining bits of AVA/LP fields
194 * Also contain the rr bits of LP
196 rb |= (va_low << mmu_psize_defs[b_psize].shift) & 0x7ff000;
198 * Now clear not needed LP bits based on actual psize
200 rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1);
202 * AVAL field 58..77 - base_page_shift bits of va
203 * we have space for 58..64 bits, Missing bits should
204 * be zero filled. +1 is to take care of L bit shift
206 aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1;
207 rb |= ((va_low << aval_shift) & 0xfe);
209 rb |= 1; /* L field */
210 penc = mmu_psize_defs[b_psize].penc[a_psize];
211 rb |= penc << 12; /* LP field */
215 rb |= (v >> 54) & 0x300; /* B field */
219 static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
224 /* Look at the 8 bit LP value */
225 unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
227 /* only handle 4k, 64k and 16M pages for now */
228 if (!(h & HPTE_V_LARGE))
231 for (size = 0; size < MMU_PAGE_COUNT; size++) {
232 /* valid entries have a shift value */
233 if (!mmu_psize_defs[size].shift)
236 a_psize = __hpte_actual_psize(lp, size);
239 return 1ul << mmu_psize_defs[size].shift;
240 return 1ul << mmu_psize_defs[a_psize].shift;
248 static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
250 return __hpte_page_size(h, l, 0);
253 static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
255 return __hpte_page_size(h, l, 1);
258 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
260 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
263 static inline int hpte_is_writable(unsigned long ptel)
265 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
267 return pp != PP_RXRX && pp != PP_RXXX;
270 static inline unsigned long hpte_make_readonly(unsigned long ptel)
272 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
273 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
279 static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
281 unsigned int wimg = ptel & HPTE_R_WIMG;
284 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
285 cpu_has_feature(CPU_FTR_ARCH_206))
289 return wimg == HPTE_R_M;
291 return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
295 * If it's present and writable, atomically set dirty and referenced bits and
296 * return the PTE, otherwise return 0.
298 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
300 pte_t old_pte, new_pte = __pte(0);
304 * Make sure we don't reload from ptep
306 old_pte = READ_ONCE(*ptep);
308 * wait until _PAGE_BUSY is clear then set it atomically
310 if (unlikely(pte_val(old_pte) & _PAGE_BUSY)) {
314 /* If pte is not present return None */
315 if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
318 new_pte = pte_mkyoung(old_pte);
319 if (writing && pte_write(old_pte))
320 new_pte = pte_mkdirty(new_pte);
322 if (pte_val(old_pte) == __cmpxchg_u64((unsigned long *)ptep,
332 /* Return HPTE cache control bits corresponding to Linux pte bits */
333 static inline unsigned long hpte_cache_bits(unsigned long pte_val)
335 #if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
336 return pte_val & (HPTE_R_W | HPTE_R_I);
338 return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
339 ((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
343 static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
346 return PP_RWRX <= pp && pp <= PP_RXRX;
350 static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
353 return pp == PP_RWRW;
354 return pp <= PP_RWRW;
357 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
361 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
362 ((hpte_r & HPTE_R_KEY_LO) >> 9);
363 return (amr >> (62 - 2 * skey)) & 3;
366 static inline void lock_rmap(unsigned long *rmap)
369 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
371 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
374 static inline void unlock_rmap(unsigned long *rmap)
376 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
379 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
380 unsigned long pagesize)
382 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
384 if (pagesize <= PAGE_SIZE)
386 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
390 * This works for 4k, 64k and 16M pages on POWER7,
391 * and 4k and 16M pages on PPC970.
393 static inline unsigned long slb_pgsize_encoding(unsigned long psize)
395 unsigned long senc = 0;
397 if (psize > 0x1000) {
399 if (psize == 0x10000)
400 senc |= SLB_VSID_LP_01;
405 static inline int is_vrma_hpte(unsigned long hpte_v)
407 return (hpte_v & ~0xffffffUL) ==
408 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
411 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
413 * Note modification of an HPTE; set the HPTE modified bit
414 * if anyone is interested.
416 static inline void note_hpte_modification(struct kvm *kvm,
417 struct revmap_entry *rev)
419 if (atomic_read(&kvm->arch.hpte_mod_interest))
420 rev->guest_rpte |= HPTE_GR_MODIFIED;
424 * Like kvm_memslots(), but for use in real mode when we can't do
425 * any RCU stuff (since the secondary threads are offline from the
426 * kernel's point of view), and we can't print anything.
427 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
429 static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
431 return rcu_dereference_raw_notrace(kvm->memslots[0]);
434 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
436 extern void kvmhv_rm_send_ipi(int cpu);
438 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
440 #endif /* __ASM_KVM_BOOK3S_64_H__ */