initramfs: Escape colons in depfile
[cascardo/linux.git] / arch / powerpc / kvm / book3s_hv.c
1 /*
2  * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4  *
5  * Authors:
6  *    Paul Mackerras <paulus@au1.ibm.com>
7  *    Alexander Graf <agraf@suse.de>
8  *    Kevin Wolf <mail@kevin-wolf.de>
9  *
10  * Description: KVM functions specific to running on Book 3S
11  * processors in hypervisor mode (specifically POWER7 and later).
12  *
13  * This file is derived from arch/powerpc/kvm/book3s.c,
14  * by Alexander Graf <agraf@suse.de>.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License, version 2, as
18  * published by the Free Software Foundation.
19  */
20
21 #include <linux/kvm_host.h>
22 #include <linux/err.h>
23 #include <linux/slab.h>
24 #include <linux/preempt.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/export.h>
28 #include <linux/fs.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/cpu.h>
31 #include <linux/cpumask.h>
32 #include <linux/spinlock.h>
33 #include <linux/page-flags.h>
34 #include <linux/srcu.h>
35 #include <linux/miscdevice.h>
36 #include <linux/debugfs.h>
37
38 #include <asm/reg.h>
39 #include <asm/cputable.h>
40 #include <asm/cacheflush.h>
41 #include <asm/tlbflush.h>
42 #include <asm/uaccess.h>
43 #include <asm/io.h>
44 #include <asm/kvm_ppc.h>
45 #include <asm/kvm_book3s.h>
46 #include <asm/mmu_context.h>
47 #include <asm/lppaca.h>
48 #include <asm/processor.h>
49 #include <asm/cputhreads.h>
50 #include <asm/page.h>
51 #include <asm/hvcall.h>
52 #include <asm/switch_to.h>
53 #include <asm/smp.h>
54 #include <asm/dbell.h>
55 #include <asm/hmi.h>
56 #include <linux/gfp.h>
57 #include <linux/vmalloc.h>
58 #include <linux/highmem.h>
59 #include <linux/hugetlb.h>
60 #include <linux/module.h>
61
62 #include "book3s.h"
63
64 #define CREATE_TRACE_POINTS
65 #include "trace_hv.h"
66
67 /* #define EXIT_DEBUG */
68 /* #define EXIT_DEBUG_SIMPLE */
69 /* #define EXIT_DEBUG_INT */
70
71 /* Used to indicate that a guest page fault needs to be handled */
72 #define RESUME_PAGE_FAULT       (RESUME_GUEST | RESUME_FLAG_ARCH1)
73
74 /* Used as a "null" value for timebase values */
75 #define TB_NIL  (~(u64)0)
76
77 static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
78
79 static int dynamic_mt_modes = 6;
80 module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
81 MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
82 static int target_smt_mode;
83 module_param(target_smt_mode, int, S_IRUGO | S_IWUSR);
84 MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
85
86 #ifdef CONFIG_KVM_XICS
87 static struct kernel_param_ops module_param_ops = {
88         .set = param_set_int,
89         .get = param_get_int,
90 };
91
92 module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect,
93                                                         S_IRUGO | S_IWUSR);
94 MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
95 #endif
96
97 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
98 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
99
100 static bool kvmppc_ipi_thread(int cpu)
101 {
102         /* On POWER8 for IPIs to threads in the same core, use msgsnd */
103         if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
104                 preempt_disable();
105                 if (cpu_first_thread_sibling(cpu) ==
106                     cpu_first_thread_sibling(smp_processor_id())) {
107                         unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
108                         msg |= cpu_thread_in_core(cpu);
109                         smp_mb();
110                         __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
111                         preempt_enable();
112                         return true;
113                 }
114                 preempt_enable();
115         }
116
117 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
118         if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) {
119                 xics_wake_cpu(cpu);
120                 return true;
121         }
122 #endif
123
124         return false;
125 }
126
127 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
128 {
129         int cpu;
130         struct swait_queue_head *wqp;
131
132         wqp = kvm_arch_vcpu_wq(vcpu);
133         if (swait_active(wqp)) {
134                 swake_up(wqp);
135                 ++vcpu->stat.halt_wakeup;
136         }
137
138         if (kvmppc_ipi_thread(vcpu->arch.thread_cpu))
139                 return;
140
141         /* CPU points to the first thread of the core */
142         cpu = vcpu->cpu;
143         if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
144                 smp_send_reschedule(cpu);
145 }
146
147 /*
148  * We use the vcpu_load/put functions to measure stolen time.
149  * Stolen time is counted as time when either the vcpu is able to
150  * run as part of a virtual core, but the task running the vcore
151  * is preempted or sleeping, or when the vcpu needs something done
152  * in the kernel by the task running the vcpu, but that task is
153  * preempted or sleeping.  Those two things have to be counted
154  * separately, since one of the vcpu tasks will take on the job
155  * of running the core, and the other vcpu tasks in the vcore will
156  * sleep waiting for it to do that, but that sleep shouldn't count
157  * as stolen time.
158  *
159  * Hence we accumulate stolen time when the vcpu can run as part of
160  * a vcore using vc->stolen_tb, and the stolen time when the vcpu
161  * needs its task to do other things in the kernel (for example,
162  * service a page fault) in busy_stolen.  We don't accumulate
163  * stolen time for a vcore when it is inactive, or for a vcpu
164  * when it is in state RUNNING or NOTREADY.  NOTREADY is a bit of
165  * a misnomer; it means that the vcpu task is not executing in
166  * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
167  * the kernel.  We don't have any way of dividing up that time
168  * between time that the vcpu is genuinely stopped, time that
169  * the task is actively working on behalf of the vcpu, and time
170  * that the task is preempted, so we don't count any of it as
171  * stolen.
172  *
173  * Updates to busy_stolen are protected by arch.tbacct_lock;
174  * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
175  * lock.  The stolen times are measured in units of timebase ticks.
176  * (Note that the != TB_NIL checks below are purely defensive;
177  * they should never fail.)
178  */
179
180 static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
181 {
182         unsigned long flags;
183
184         spin_lock_irqsave(&vc->stoltb_lock, flags);
185         vc->preempt_tb = mftb();
186         spin_unlock_irqrestore(&vc->stoltb_lock, flags);
187 }
188
189 static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
190 {
191         unsigned long flags;
192
193         spin_lock_irqsave(&vc->stoltb_lock, flags);
194         if (vc->preempt_tb != TB_NIL) {
195                 vc->stolen_tb += mftb() - vc->preempt_tb;
196                 vc->preempt_tb = TB_NIL;
197         }
198         spin_unlock_irqrestore(&vc->stoltb_lock, flags);
199 }
200
201 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
202 {
203         struct kvmppc_vcore *vc = vcpu->arch.vcore;
204         unsigned long flags;
205
206         /*
207          * We can test vc->runner without taking the vcore lock,
208          * because only this task ever sets vc->runner to this
209          * vcpu, and once it is set to this vcpu, only this task
210          * ever sets it to NULL.
211          */
212         if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
213                 kvmppc_core_end_stolen(vc);
214
215         spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
216         if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
217             vcpu->arch.busy_preempt != TB_NIL) {
218                 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
219                 vcpu->arch.busy_preempt = TB_NIL;
220         }
221         spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
222 }
223
224 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
225 {
226         struct kvmppc_vcore *vc = vcpu->arch.vcore;
227         unsigned long flags;
228
229         if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
230                 kvmppc_core_start_stolen(vc);
231
232         spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
233         if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
234                 vcpu->arch.busy_preempt = mftb();
235         spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
236 }
237
238 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
239 {
240         /*
241          * Check for illegal transactional state bit combination
242          * and if we find it, force the TS field to a safe state.
243          */
244         if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
245                 msr &= ~MSR_TS_MASK;
246         vcpu->arch.shregs.msr = msr;
247         kvmppc_end_cede(vcpu);
248 }
249
250 static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
251 {
252         vcpu->arch.pvr = pvr;
253 }
254
255 static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
256 {
257         unsigned long pcr = 0;
258         struct kvmppc_vcore *vc = vcpu->arch.vcore;
259
260         if (arch_compat) {
261                 switch (arch_compat) {
262                 case PVR_ARCH_205:
263                         /*
264                          * If an arch bit is set in PCR, all the defined
265                          * higher-order arch bits also have to be set.
266                          */
267                         pcr = PCR_ARCH_206 | PCR_ARCH_205;
268                         break;
269                 case PVR_ARCH_206:
270                 case PVR_ARCH_206p:
271                         pcr = PCR_ARCH_206;
272                         break;
273                 case PVR_ARCH_207:
274                         break;
275                 default:
276                         return -EINVAL;
277                 }
278
279                 if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
280                         /* POWER7 can't emulate POWER8 */
281                         if (!(pcr & PCR_ARCH_206))
282                                 return -EINVAL;
283                         pcr &= ~PCR_ARCH_206;
284                 }
285         }
286
287         spin_lock(&vc->lock);
288         vc->arch_compat = arch_compat;
289         vc->pcr = pcr;
290         spin_unlock(&vc->lock);
291
292         return 0;
293 }
294
295 static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
296 {
297         int r;
298
299         pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
300         pr_err("pc  = %.16lx  msr = %.16llx  trap = %x\n",
301                vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
302         for (r = 0; r < 16; ++r)
303                 pr_err("r%2d = %.16lx  r%d = %.16lx\n",
304                        r, kvmppc_get_gpr(vcpu, r),
305                        r+16, kvmppc_get_gpr(vcpu, r+16));
306         pr_err("ctr = %.16lx  lr  = %.16lx\n",
307                vcpu->arch.ctr, vcpu->arch.lr);
308         pr_err("srr0 = %.16llx srr1 = %.16llx\n",
309                vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
310         pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
311                vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
312         pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
313                vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
314         pr_err("cr = %.8x  xer = %.16lx  dsisr = %.8x\n",
315                vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
316         pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
317         pr_err("fault dar = %.16lx dsisr = %.8x\n",
318                vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
319         pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
320         for (r = 0; r < vcpu->arch.slb_max; ++r)
321                 pr_err("  ESID = %.16llx VSID = %.16llx\n",
322                        vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
323         pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
324                vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
325                vcpu->arch.last_inst);
326 }
327
328 static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
329 {
330         struct kvm_vcpu *ret;
331
332         mutex_lock(&kvm->lock);
333         ret = kvm_get_vcpu_by_id(kvm, id);
334         mutex_unlock(&kvm->lock);
335         return ret;
336 }
337
338 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
339 {
340         vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
341         vpa->yield_count = cpu_to_be32(1);
342 }
343
344 static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
345                    unsigned long addr, unsigned long len)
346 {
347         /* check address is cacheline aligned */
348         if (addr & (L1_CACHE_BYTES - 1))
349                 return -EINVAL;
350         spin_lock(&vcpu->arch.vpa_update_lock);
351         if (v->next_gpa != addr || v->len != len) {
352                 v->next_gpa = addr;
353                 v->len = addr ? len : 0;
354                 v->update_pending = 1;
355         }
356         spin_unlock(&vcpu->arch.vpa_update_lock);
357         return 0;
358 }
359
360 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
361 struct reg_vpa {
362         u32 dummy;
363         union {
364                 __be16 hword;
365                 __be32 word;
366         } length;
367 };
368
369 static int vpa_is_registered(struct kvmppc_vpa *vpap)
370 {
371         if (vpap->update_pending)
372                 return vpap->next_gpa != 0;
373         return vpap->pinned_addr != NULL;
374 }
375
376 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
377                                        unsigned long flags,
378                                        unsigned long vcpuid, unsigned long vpa)
379 {
380         struct kvm *kvm = vcpu->kvm;
381         unsigned long len, nb;
382         void *va;
383         struct kvm_vcpu *tvcpu;
384         int err;
385         int subfunc;
386         struct kvmppc_vpa *vpap;
387
388         tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
389         if (!tvcpu)
390                 return H_PARAMETER;
391
392         subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
393         if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
394             subfunc == H_VPA_REG_SLB) {
395                 /* Registering new area - address must be cache-line aligned */
396                 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
397                         return H_PARAMETER;
398
399                 /* convert logical addr to kernel addr and read length */
400                 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
401                 if (va == NULL)
402                         return H_PARAMETER;
403                 if (subfunc == H_VPA_REG_VPA)
404                         len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
405                 else
406                         len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
407                 kvmppc_unpin_guest_page(kvm, va, vpa, false);
408
409                 /* Check length */
410                 if (len > nb || len < sizeof(struct reg_vpa))
411                         return H_PARAMETER;
412         } else {
413                 vpa = 0;
414                 len = 0;
415         }
416
417         err = H_PARAMETER;
418         vpap = NULL;
419         spin_lock(&tvcpu->arch.vpa_update_lock);
420
421         switch (subfunc) {
422         case H_VPA_REG_VPA:             /* register VPA */
423                 if (len < sizeof(struct lppaca))
424                         break;
425                 vpap = &tvcpu->arch.vpa;
426                 err = 0;
427                 break;
428
429         case H_VPA_REG_DTL:             /* register DTL */
430                 if (len < sizeof(struct dtl_entry))
431                         break;
432                 len -= len % sizeof(struct dtl_entry);
433
434                 /* Check that they have previously registered a VPA */
435                 err = H_RESOURCE;
436                 if (!vpa_is_registered(&tvcpu->arch.vpa))
437                         break;
438
439                 vpap = &tvcpu->arch.dtl;
440                 err = 0;
441                 break;
442
443         case H_VPA_REG_SLB:             /* register SLB shadow buffer */
444                 /* Check that they have previously registered a VPA */
445                 err = H_RESOURCE;
446                 if (!vpa_is_registered(&tvcpu->arch.vpa))
447                         break;
448
449                 vpap = &tvcpu->arch.slb_shadow;
450                 err = 0;
451                 break;
452
453         case H_VPA_DEREG_VPA:           /* deregister VPA */
454                 /* Check they don't still have a DTL or SLB buf registered */
455                 err = H_RESOURCE;
456                 if (vpa_is_registered(&tvcpu->arch.dtl) ||
457                     vpa_is_registered(&tvcpu->arch.slb_shadow))
458                         break;
459
460                 vpap = &tvcpu->arch.vpa;
461                 err = 0;
462                 break;
463
464         case H_VPA_DEREG_DTL:           /* deregister DTL */
465                 vpap = &tvcpu->arch.dtl;
466                 err = 0;
467                 break;
468
469         case H_VPA_DEREG_SLB:           /* deregister SLB shadow buffer */
470                 vpap = &tvcpu->arch.slb_shadow;
471                 err = 0;
472                 break;
473         }
474
475         if (vpap) {
476                 vpap->next_gpa = vpa;
477                 vpap->len = len;
478                 vpap->update_pending = 1;
479         }
480
481         spin_unlock(&tvcpu->arch.vpa_update_lock);
482
483         return err;
484 }
485
486 static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
487 {
488         struct kvm *kvm = vcpu->kvm;
489         void *va;
490         unsigned long nb;
491         unsigned long gpa;
492
493         /*
494          * We need to pin the page pointed to by vpap->next_gpa,
495          * but we can't call kvmppc_pin_guest_page under the lock
496          * as it does get_user_pages() and down_read().  So we
497          * have to drop the lock, pin the page, then get the lock
498          * again and check that a new area didn't get registered
499          * in the meantime.
500          */
501         for (;;) {
502                 gpa = vpap->next_gpa;
503                 spin_unlock(&vcpu->arch.vpa_update_lock);
504                 va = NULL;
505                 nb = 0;
506                 if (gpa)
507                         va = kvmppc_pin_guest_page(kvm, gpa, &nb);
508                 spin_lock(&vcpu->arch.vpa_update_lock);
509                 if (gpa == vpap->next_gpa)
510                         break;
511                 /* sigh... unpin that one and try again */
512                 if (va)
513                         kvmppc_unpin_guest_page(kvm, va, gpa, false);
514         }
515
516         vpap->update_pending = 0;
517         if (va && nb < vpap->len) {
518                 /*
519                  * If it's now too short, it must be that userspace
520                  * has changed the mappings underlying guest memory,
521                  * so unregister the region.
522                  */
523                 kvmppc_unpin_guest_page(kvm, va, gpa, false);
524                 va = NULL;
525         }
526         if (vpap->pinned_addr)
527                 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
528                                         vpap->dirty);
529         vpap->gpa = gpa;
530         vpap->pinned_addr = va;
531         vpap->dirty = false;
532         if (va)
533                 vpap->pinned_end = va + vpap->len;
534 }
535
536 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
537 {
538         if (!(vcpu->arch.vpa.update_pending ||
539               vcpu->arch.slb_shadow.update_pending ||
540               vcpu->arch.dtl.update_pending))
541                 return;
542
543         spin_lock(&vcpu->arch.vpa_update_lock);
544         if (vcpu->arch.vpa.update_pending) {
545                 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
546                 if (vcpu->arch.vpa.pinned_addr)
547                         init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
548         }
549         if (vcpu->arch.dtl.update_pending) {
550                 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
551                 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
552                 vcpu->arch.dtl_index = 0;
553         }
554         if (vcpu->arch.slb_shadow.update_pending)
555                 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
556         spin_unlock(&vcpu->arch.vpa_update_lock);
557 }
558
559 /*
560  * Return the accumulated stolen time for the vcore up until `now'.
561  * The caller should hold the vcore lock.
562  */
563 static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
564 {
565         u64 p;
566         unsigned long flags;
567
568         spin_lock_irqsave(&vc->stoltb_lock, flags);
569         p = vc->stolen_tb;
570         if (vc->vcore_state != VCORE_INACTIVE &&
571             vc->preempt_tb != TB_NIL)
572                 p += now - vc->preempt_tb;
573         spin_unlock_irqrestore(&vc->stoltb_lock, flags);
574         return p;
575 }
576
577 static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
578                                     struct kvmppc_vcore *vc)
579 {
580         struct dtl_entry *dt;
581         struct lppaca *vpa;
582         unsigned long stolen;
583         unsigned long core_stolen;
584         u64 now;
585
586         dt = vcpu->arch.dtl_ptr;
587         vpa = vcpu->arch.vpa.pinned_addr;
588         now = mftb();
589         core_stolen = vcore_stolen_time(vc, now);
590         stolen = core_stolen - vcpu->arch.stolen_logged;
591         vcpu->arch.stolen_logged = core_stolen;
592         spin_lock_irq(&vcpu->arch.tbacct_lock);
593         stolen += vcpu->arch.busy_stolen;
594         vcpu->arch.busy_stolen = 0;
595         spin_unlock_irq(&vcpu->arch.tbacct_lock);
596         if (!dt || !vpa)
597                 return;
598         memset(dt, 0, sizeof(struct dtl_entry));
599         dt->dispatch_reason = 7;
600         dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
601         dt->timebase = cpu_to_be64(now + vc->tb_offset);
602         dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
603         dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
604         dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
605         ++dt;
606         if (dt == vcpu->arch.dtl.pinned_end)
607                 dt = vcpu->arch.dtl.pinned_addr;
608         vcpu->arch.dtl_ptr = dt;
609         /* order writing *dt vs. writing vpa->dtl_idx */
610         smp_wmb();
611         vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
612         vcpu->arch.dtl.dirty = true;
613 }
614
615 static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
616 {
617         if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
618                 return true;
619         if ((!vcpu->arch.vcore->arch_compat) &&
620             cpu_has_feature(CPU_FTR_ARCH_207S))
621                 return true;
622         return false;
623 }
624
625 static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
626                              unsigned long resource, unsigned long value1,
627                              unsigned long value2)
628 {
629         switch (resource) {
630         case H_SET_MODE_RESOURCE_SET_CIABR:
631                 if (!kvmppc_power8_compatible(vcpu))
632                         return H_P2;
633                 if (value2)
634                         return H_P4;
635                 if (mflags)
636                         return H_UNSUPPORTED_FLAG_START;
637                 /* Guests can't breakpoint the hypervisor */
638                 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
639                         return H_P3;
640                 vcpu->arch.ciabr  = value1;
641                 return H_SUCCESS;
642         case H_SET_MODE_RESOURCE_SET_DAWR:
643                 if (!kvmppc_power8_compatible(vcpu))
644                         return H_P2;
645                 if (mflags)
646                         return H_UNSUPPORTED_FLAG_START;
647                 if (value2 & DABRX_HYP)
648                         return H_P4;
649                 vcpu->arch.dawr  = value1;
650                 vcpu->arch.dawrx = value2;
651                 return H_SUCCESS;
652         default:
653                 return H_TOO_HARD;
654         }
655 }
656
657 static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
658 {
659         struct kvmppc_vcore *vcore = target->arch.vcore;
660
661         /*
662          * We expect to have been called by the real mode handler
663          * (kvmppc_rm_h_confer()) which would have directly returned
664          * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
665          * have useful work to do and should not confer) so we don't
666          * recheck that here.
667          */
668
669         spin_lock(&vcore->lock);
670         if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
671             vcore->vcore_state != VCORE_INACTIVE &&
672             vcore->runner)
673                 target = vcore->runner;
674         spin_unlock(&vcore->lock);
675
676         return kvm_vcpu_yield_to(target);
677 }
678
679 static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
680 {
681         int yield_count = 0;
682         struct lppaca *lppaca;
683
684         spin_lock(&vcpu->arch.vpa_update_lock);
685         lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
686         if (lppaca)
687                 yield_count = be32_to_cpu(lppaca->yield_count);
688         spin_unlock(&vcpu->arch.vpa_update_lock);
689         return yield_count;
690 }
691
692 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
693 {
694         unsigned long req = kvmppc_get_gpr(vcpu, 3);
695         unsigned long target, ret = H_SUCCESS;
696         int yield_count;
697         struct kvm_vcpu *tvcpu;
698         int idx, rc;
699
700         if (req <= MAX_HCALL_OPCODE &&
701             !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
702                 return RESUME_HOST;
703
704         switch (req) {
705         case H_CEDE:
706                 break;
707         case H_PROD:
708                 target = kvmppc_get_gpr(vcpu, 4);
709                 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
710                 if (!tvcpu) {
711                         ret = H_PARAMETER;
712                         break;
713                 }
714                 tvcpu->arch.prodded = 1;
715                 smp_mb();
716                 if (vcpu->arch.ceded) {
717                         if (swait_active(&vcpu->wq)) {
718                                 swake_up(&vcpu->wq);
719                                 vcpu->stat.halt_wakeup++;
720                         }
721                 }
722                 break;
723         case H_CONFER:
724                 target = kvmppc_get_gpr(vcpu, 4);
725                 if (target == -1)
726                         break;
727                 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
728                 if (!tvcpu) {
729                         ret = H_PARAMETER;
730                         break;
731                 }
732                 yield_count = kvmppc_get_gpr(vcpu, 5);
733                 if (kvmppc_get_yield_count(tvcpu) != yield_count)
734                         break;
735                 kvm_arch_vcpu_yield_to(tvcpu);
736                 break;
737         case H_REGISTER_VPA:
738                 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
739                                         kvmppc_get_gpr(vcpu, 5),
740                                         kvmppc_get_gpr(vcpu, 6));
741                 break;
742         case H_RTAS:
743                 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
744                         return RESUME_HOST;
745
746                 idx = srcu_read_lock(&vcpu->kvm->srcu);
747                 rc = kvmppc_rtas_hcall(vcpu);
748                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
749
750                 if (rc == -ENOENT)
751                         return RESUME_HOST;
752                 else if (rc == 0)
753                         break;
754
755                 /* Send the error out to userspace via KVM_RUN */
756                 return rc;
757         case H_LOGICAL_CI_LOAD:
758                 ret = kvmppc_h_logical_ci_load(vcpu);
759                 if (ret == H_TOO_HARD)
760                         return RESUME_HOST;
761                 break;
762         case H_LOGICAL_CI_STORE:
763                 ret = kvmppc_h_logical_ci_store(vcpu);
764                 if (ret == H_TOO_HARD)
765                         return RESUME_HOST;
766                 break;
767         case H_SET_MODE:
768                 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
769                                         kvmppc_get_gpr(vcpu, 5),
770                                         kvmppc_get_gpr(vcpu, 6),
771                                         kvmppc_get_gpr(vcpu, 7));
772                 if (ret == H_TOO_HARD)
773                         return RESUME_HOST;
774                 break;
775         case H_XIRR:
776         case H_CPPR:
777         case H_EOI:
778         case H_IPI:
779         case H_IPOLL:
780         case H_XIRR_X:
781                 if (kvmppc_xics_enabled(vcpu)) {
782                         ret = kvmppc_xics_hcall(vcpu, req);
783                         break;
784                 }
785                 return RESUME_HOST;
786         case H_PUT_TCE:
787                 ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
788                                                 kvmppc_get_gpr(vcpu, 5),
789                                                 kvmppc_get_gpr(vcpu, 6));
790                 if (ret == H_TOO_HARD)
791                         return RESUME_HOST;
792                 break;
793         case H_PUT_TCE_INDIRECT:
794                 ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
795                                                 kvmppc_get_gpr(vcpu, 5),
796                                                 kvmppc_get_gpr(vcpu, 6),
797                                                 kvmppc_get_gpr(vcpu, 7));
798                 if (ret == H_TOO_HARD)
799                         return RESUME_HOST;
800                 break;
801         case H_STUFF_TCE:
802                 ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
803                                                 kvmppc_get_gpr(vcpu, 5),
804                                                 kvmppc_get_gpr(vcpu, 6),
805                                                 kvmppc_get_gpr(vcpu, 7));
806                 if (ret == H_TOO_HARD)
807                         return RESUME_HOST;
808                 break;
809         default:
810                 return RESUME_HOST;
811         }
812         kvmppc_set_gpr(vcpu, 3, ret);
813         vcpu->arch.hcall_needed = 0;
814         return RESUME_GUEST;
815 }
816
817 static int kvmppc_hcall_impl_hv(unsigned long cmd)
818 {
819         switch (cmd) {
820         case H_CEDE:
821         case H_PROD:
822         case H_CONFER:
823         case H_REGISTER_VPA:
824         case H_SET_MODE:
825         case H_LOGICAL_CI_LOAD:
826         case H_LOGICAL_CI_STORE:
827 #ifdef CONFIG_KVM_XICS
828         case H_XIRR:
829         case H_CPPR:
830         case H_EOI:
831         case H_IPI:
832         case H_IPOLL:
833         case H_XIRR_X:
834 #endif
835                 return 1;
836         }
837
838         /* See if it's in the real-mode table */
839         return kvmppc_hcall_impl_hv_realmode(cmd);
840 }
841
842 static int kvmppc_emulate_debug_inst(struct kvm_run *run,
843                                         struct kvm_vcpu *vcpu)
844 {
845         u32 last_inst;
846
847         if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
848                                         EMULATE_DONE) {
849                 /*
850                  * Fetch failed, so return to guest and
851                  * try executing it again.
852                  */
853                 return RESUME_GUEST;
854         }
855
856         if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
857                 run->exit_reason = KVM_EXIT_DEBUG;
858                 run->debug.arch.address = kvmppc_get_pc(vcpu);
859                 return RESUME_HOST;
860         } else {
861                 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
862                 return RESUME_GUEST;
863         }
864 }
865
866 static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
867                                  struct task_struct *tsk)
868 {
869         int r = RESUME_HOST;
870
871         vcpu->stat.sum_exits++;
872
873         /*
874          * This can happen if an interrupt occurs in the last stages
875          * of guest entry or the first stages of guest exit (i.e. after
876          * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
877          * and before setting it to KVM_GUEST_MODE_HOST_HV).
878          * That can happen due to a bug, or due to a machine check
879          * occurring at just the wrong time.
880          */
881         if (vcpu->arch.shregs.msr & MSR_HV) {
882                 printk(KERN_EMERG "KVM trap in HV mode!\n");
883                 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
884                         vcpu->arch.trap, kvmppc_get_pc(vcpu),
885                         vcpu->arch.shregs.msr);
886                 kvmppc_dump_regs(vcpu);
887                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
888                 run->hw.hardware_exit_reason = vcpu->arch.trap;
889                 return RESUME_HOST;
890         }
891         run->exit_reason = KVM_EXIT_UNKNOWN;
892         run->ready_for_interrupt_injection = 1;
893         switch (vcpu->arch.trap) {
894         /* We're good on these - the host merely wanted to get our attention */
895         case BOOK3S_INTERRUPT_HV_DECREMENTER:
896                 vcpu->stat.dec_exits++;
897                 r = RESUME_GUEST;
898                 break;
899         case BOOK3S_INTERRUPT_EXTERNAL:
900         case BOOK3S_INTERRUPT_H_DOORBELL:
901                 vcpu->stat.ext_intr_exits++;
902                 r = RESUME_GUEST;
903                 break;
904         /* HMI is hypervisor interrupt and host has handled it. Resume guest.*/
905         case BOOK3S_INTERRUPT_HMI:
906         case BOOK3S_INTERRUPT_PERFMON:
907                 r = RESUME_GUEST;
908                 break;
909         case BOOK3S_INTERRUPT_MACHINE_CHECK:
910                 /*
911                  * Deliver a machine check interrupt to the guest.
912                  * We have to do this, even if the host has handled the
913                  * machine check, because machine checks use SRR0/1 and
914                  * the interrupt might have trashed guest state in them.
915                  */
916                 kvmppc_book3s_queue_irqprio(vcpu,
917                                             BOOK3S_INTERRUPT_MACHINE_CHECK);
918                 r = RESUME_GUEST;
919                 break;
920         case BOOK3S_INTERRUPT_PROGRAM:
921         {
922                 ulong flags;
923                 /*
924                  * Normally program interrupts are delivered directly
925                  * to the guest by the hardware, but we can get here
926                  * as a result of a hypervisor emulation interrupt
927                  * (e40) getting turned into a 700 by BML RTAS.
928                  */
929                 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
930                 kvmppc_core_queue_program(vcpu, flags);
931                 r = RESUME_GUEST;
932                 break;
933         }
934         case BOOK3S_INTERRUPT_SYSCALL:
935         {
936                 /* hcall - punt to userspace */
937                 int i;
938
939                 /* hypercall with MSR_PR has already been handled in rmode,
940                  * and never reaches here.
941                  */
942
943                 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
944                 for (i = 0; i < 9; ++i)
945                         run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
946                 run->exit_reason = KVM_EXIT_PAPR_HCALL;
947                 vcpu->arch.hcall_needed = 1;
948                 r = RESUME_HOST;
949                 break;
950         }
951         /*
952          * We get these next two if the guest accesses a page which it thinks
953          * it has mapped but which is not actually present, either because
954          * it is for an emulated I/O device or because the corresonding
955          * host page has been paged out.  Any other HDSI/HISI interrupts
956          * have been handled already.
957          */
958         case BOOK3S_INTERRUPT_H_DATA_STORAGE:
959                 r = RESUME_PAGE_FAULT;
960                 break;
961         case BOOK3S_INTERRUPT_H_INST_STORAGE:
962                 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
963                 vcpu->arch.fault_dsisr = 0;
964                 r = RESUME_PAGE_FAULT;
965                 break;
966         /*
967          * This occurs if the guest executes an illegal instruction.
968          * If the guest debug is disabled, generate a program interrupt
969          * to the guest. If guest debug is enabled, we need to check
970          * whether the instruction is a software breakpoint instruction.
971          * Accordingly return to Guest or Host.
972          */
973         case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
974                 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
975                         vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
976                                 swab32(vcpu->arch.emul_inst) :
977                                 vcpu->arch.emul_inst;
978                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
979                         r = kvmppc_emulate_debug_inst(run, vcpu);
980                 } else {
981                         kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
982                         r = RESUME_GUEST;
983                 }
984                 break;
985         /*
986          * This occurs if the guest (kernel or userspace), does something that
987          * is prohibited by HFSCR.  We just generate a program interrupt to
988          * the guest.
989          */
990         case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
991                 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
992                 r = RESUME_GUEST;
993                 break;
994         default:
995                 kvmppc_dump_regs(vcpu);
996                 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
997                         vcpu->arch.trap, kvmppc_get_pc(vcpu),
998                         vcpu->arch.shregs.msr);
999                 run->hw.hardware_exit_reason = vcpu->arch.trap;
1000                 r = RESUME_HOST;
1001                 break;
1002         }
1003
1004         return r;
1005 }
1006
1007 static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
1008                                             struct kvm_sregs *sregs)
1009 {
1010         int i;
1011
1012         memset(sregs, 0, sizeof(struct kvm_sregs));
1013         sregs->pvr = vcpu->arch.pvr;
1014         for (i = 0; i < vcpu->arch.slb_max; i++) {
1015                 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
1016                 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1017         }
1018
1019         return 0;
1020 }
1021
1022 static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
1023                                             struct kvm_sregs *sregs)
1024 {
1025         int i, j;
1026
1027         /* Only accept the same PVR as the host's, since we can't spoof it */
1028         if (sregs->pvr != vcpu->arch.pvr)
1029                 return -EINVAL;
1030
1031         j = 0;
1032         for (i = 0; i < vcpu->arch.slb_nr; i++) {
1033                 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
1034                         vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
1035                         vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
1036                         ++j;
1037                 }
1038         }
1039         vcpu->arch.slb_max = j;
1040
1041         return 0;
1042 }
1043
1044 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
1045                 bool preserve_top32)
1046 {
1047         struct kvm *kvm = vcpu->kvm;
1048         struct kvmppc_vcore *vc = vcpu->arch.vcore;
1049         u64 mask;
1050
1051         mutex_lock(&kvm->lock);
1052         spin_lock(&vc->lock);
1053         /*
1054          * If ILE (interrupt little-endian) has changed, update the
1055          * MSR_LE bit in the intr_msr for each vcpu in this vcore.
1056          */
1057         if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
1058                 struct kvm_vcpu *vcpu;
1059                 int i;
1060
1061                 kvm_for_each_vcpu(i, vcpu, kvm) {
1062                         if (vcpu->arch.vcore != vc)
1063                                 continue;
1064                         if (new_lpcr & LPCR_ILE)
1065                                 vcpu->arch.intr_msr |= MSR_LE;
1066                         else
1067                                 vcpu->arch.intr_msr &= ~MSR_LE;
1068                 }
1069         }
1070
1071         /*
1072          * Userspace can only modify DPFD (default prefetch depth),
1073          * ILE (interrupt little-endian) and TC (translation control).
1074          * On POWER8 userspace can also modify AIL (alt. interrupt loc.)
1075          */
1076         mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
1077         if (cpu_has_feature(CPU_FTR_ARCH_207S))
1078                 mask |= LPCR_AIL;
1079
1080         /* Broken 32-bit version of LPCR must not clear top bits */
1081         if (preserve_top32)
1082                 mask &= 0xFFFFFFFF;
1083         vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
1084         spin_unlock(&vc->lock);
1085         mutex_unlock(&kvm->lock);
1086 }
1087
1088 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1089                                  union kvmppc_one_reg *val)
1090 {
1091         int r = 0;
1092         long int i;
1093
1094         switch (id) {
1095         case KVM_REG_PPC_DEBUG_INST:
1096                 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1097                 break;
1098         case KVM_REG_PPC_HIOR:
1099                 *val = get_reg_val(id, 0);
1100                 break;
1101         case KVM_REG_PPC_DABR:
1102                 *val = get_reg_val(id, vcpu->arch.dabr);
1103                 break;
1104         case KVM_REG_PPC_DABRX:
1105                 *val = get_reg_val(id, vcpu->arch.dabrx);
1106                 break;
1107         case KVM_REG_PPC_DSCR:
1108                 *val = get_reg_val(id, vcpu->arch.dscr);
1109                 break;
1110         case KVM_REG_PPC_PURR:
1111                 *val = get_reg_val(id, vcpu->arch.purr);
1112                 break;
1113         case KVM_REG_PPC_SPURR:
1114                 *val = get_reg_val(id, vcpu->arch.spurr);
1115                 break;
1116         case KVM_REG_PPC_AMR:
1117                 *val = get_reg_val(id, vcpu->arch.amr);
1118                 break;
1119         case KVM_REG_PPC_UAMOR:
1120                 *val = get_reg_val(id, vcpu->arch.uamor);
1121                 break;
1122         case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
1123                 i = id - KVM_REG_PPC_MMCR0;
1124                 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
1125                 break;
1126         case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1127                 i = id - KVM_REG_PPC_PMC1;
1128                 *val = get_reg_val(id, vcpu->arch.pmc[i]);
1129                 break;
1130         case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1131                 i = id - KVM_REG_PPC_SPMC1;
1132                 *val = get_reg_val(id, vcpu->arch.spmc[i]);
1133                 break;
1134         case KVM_REG_PPC_SIAR:
1135                 *val = get_reg_val(id, vcpu->arch.siar);
1136                 break;
1137         case KVM_REG_PPC_SDAR:
1138                 *val = get_reg_val(id, vcpu->arch.sdar);
1139                 break;
1140         case KVM_REG_PPC_SIER:
1141                 *val = get_reg_val(id, vcpu->arch.sier);
1142                 break;
1143         case KVM_REG_PPC_IAMR:
1144                 *val = get_reg_val(id, vcpu->arch.iamr);
1145                 break;
1146         case KVM_REG_PPC_PSPB:
1147                 *val = get_reg_val(id, vcpu->arch.pspb);
1148                 break;
1149         case KVM_REG_PPC_DPDES:
1150                 *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
1151                 break;
1152         case KVM_REG_PPC_DAWR:
1153                 *val = get_reg_val(id, vcpu->arch.dawr);
1154                 break;
1155         case KVM_REG_PPC_DAWRX:
1156                 *val = get_reg_val(id, vcpu->arch.dawrx);
1157                 break;
1158         case KVM_REG_PPC_CIABR:
1159                 *val = get_reg_val(id, vcpu->arch.ciabr);
1160                 break;
1161         case KVM_REG_PPC_CSIGR:
1162                 *val = get_reg_val(id, vcpu->arch.csigr);
1163                 break;
1164         case KVM_REG_PPC_TACR:
1165                 *val = get_reg_val(id, vcpu->arch.tacr);
1166                 break;
1167         case KVM_REG_PPC_TCSCR:
1168                 *val = get_reg_val(id, vcpu->arch.tcscr);
1169                 break;
1170         case KVM_REG_PPC_PID:
1171                 *val = get_reg_val(id, vcpu->arch.pid);
1172                 break;
1173         case KVM_REG_PPC_ACOP:
1174                 *val = get_reg_val(id, vcpu->arch.acop);
1175                 break;
1176         case KVM_REG_PPC_WORT:
1177                 *val = get_reg_val(id, vcpu->arch.wort);
1178                 break;
1179         case KVM_REG_PPC_VPA_ADDR:
1180                 spin_lock(&vcpu->arch.vpa_update_lock);
1181                 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
1182                 spin_unlock(&vcpu->arch.vpa_update_lock);
1183                 break;
1184         case KVM_REG_PPC_VPA_SLB:
1185                 spin_lock(&vcpu->arch.vpa_update_lock);
1186                 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
1187                 val->vpaval.length = vcpu->arch.slb_shadow.len;
1188                 spin_unlock(&vcpu->arch.vpa_update_lock);
1189                 break;
1190         case KVM_REG_PPC_VPA_DTL:
1191                 spin_lock(&vcpu->arch.vpa_update_lock);
1192                 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
1193                 val->vpaval.length = vcpu->arch.dtl.len;
1194                 spin_unlock(&vcpu->arch.vpa_update_lock);
1195                 break;
1196         case KVM_REG_PPC_TB_OFFSET:
1197                 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
1198                 break;
1199         case KVM_REG_PPC_LPCR:
1200         case KVM_REG_PPC_LPCR_64:
1201                 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
1202                 break;
1203         case KVM_REG_PPC_PPR:
1204                 *val = get_reg_val(id, vcpu->arch.ppr);
1205                 break;
1206 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1207         case KVM_REG_PPC_TFHAR:
1208                 *val = get_reg_val(id, vcpu->arch.tfhar);
1209                 break;
1210         case KVM_REG_PPC_TFIAR:
1211                 *val = get_reg_val(id, vcpu->arch.tfiar);
1212                 break;
1213         case KVM_REG_PPC_TEXASR:
1214                 *val = get_reg_val(id, vcpu->arch.texasr);
1215                 break;
1216         case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1217                 i = id - KVM_REG_PPC_TM_GPR0;
1218                 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
1219                 break;
1220         case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1221         {
1222                 int j;
1223                 i = id - KVM_REG_PPC_TM_VSR0;
1224                 if (i < 32)
1225                         for (j = 0; j < TS_FPRWIDTH; j++)
1226                                 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1227                 else {
1228                         if (cpu_has_feature(CPU_FTR_ALTIVEC))
1229                                 val->vval = vcpu->arch.vr_tm.vr[i-32];
1230                         else
1231                                 r = -ENXIO;
1232                 }
1233                 break;
1234         }
1235         case KVM_REG_PPC_TM_CR:
1236                 *val = get_reg_val(id, vcpu->arch.cr_tm);
1237                 break;
1238         case KVM_REG_PPC_TM_LR:
1239                 *val = get_reg_val(id, vcpu->arch.lr_tm);
1240                 break;
1241         case KVM_REG_PPC_TM_CTR:
1242                 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1243                 break;
1244         case KVM_REG_PPC_TM_FPSCR:
1245                 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1246                 break;
1247         case KVM_REG_PPC_TM_AMR:
1248                 *val = get_reg_val(id, vcpu->arch.amr_tm);
1249                 break;
1250         case KVM_REG_PPC_TM_PPR:
1251                 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1252                 break;
1253         case KVM_REG_PPC_TM_VRSAVE:
1254                 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1255                 break;
1256         case KVM_REG_PPC_TM_VSCR:
1257                 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1258                         *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1259                 else
1260                         r = -ENXIO;
1261                 break;
1262         case KVM_REG_PPC_TM_DSCR:
1263                 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1264                 break;
1265         case KVM_REG_PPC_TM_TAR:
1266                 *val = get_reg_val(id, vcpu->arch.tar_tm);
1267                 break;
1268 #endif
1269         case KVM_REG_PPC_ARCH_COMPAT:
1270                 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
1271                 break;
1272         default:
1273                 r = -EINVAL;
1274                 break;
1275         }
1276
1277         return r;
1278 }
1279
1280 static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1281                                  union kvmppc_one_reg *val)
1282 {
1283         int r = 0;
1284         long int i;
1285         unsigned long addr, len;
1286
1287         switch (id) {
1288         case KVM_REG_PPC_HIOR:
1289                 /* Only allow this to be set to zero */
1290                 if (set_reg_val(id, *val))
1291                         r = -EINVAL;
1292                 break;
1293         case KVM_REG_PPC_DABR:
1294                 vcpu->arch.dabr = set_reg_val(id, *val);
1295                 break;
1296         case KVM_REG_PPC_DABRX:
1297                 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
1298                 break;
1299         case KVM_REG_PPC_DSCR:
1300                 vcpu->arch.dscr = set_reg_val(id, *val);
1301                 break;
1302         case KVM_REG_PPC_PURR:
1303                 vcpu->arch.purr = set_reg_val(id, *val);
1304                 break;
1305         case KVM_REG_PPC_SPURR:
1306                 vcpu->arch.spurr = set_reg_val(id, *val);
1307                 break;
1308         case KVM_REG_PPC_AMR:
1309                 vcpu->arch.amr = set_reg_val(id, *val);
1310                 break;
1311         case KVM_REG_PPC_UAMOR:
1312                 vcpu->arch.uamor = set_reg_val(id, *val);
1313                 break;
1314         case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
1315                 i = id - KVM_REG_PPC_MMCR0;
1316                 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
1317                 break;
1318         case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1319                 i = id - KVM_REG_PPC_PMC1;
1320                 vcpu->arch.pmc[i] = set_reg_val(id, *val);
1321                 break;
1322         case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1323                 i = id - KVM_REG_PPC_SPMC1;
1324                 vcpu->arch.spmc[i] = set_reg_val(id, *val);
1325                 break;
1326         case KVM_REG_PPC_SIAR:
1327                 vcpu->arch.siar = set_reg_val(id, *val);
1328                 break;
1329         case KVM_REG_PPC_SDAR:
1330                 vcpu->arch.sdar = set_reg_val(id, *val);
1331                 break;
1332         case KVM_REG_PPC_SIER:
1333                 vcpu->arch.sier = set_reg_val(id, *val);
1334                 break;
1335         case KVM_REG_PPC_IAMR:
1336                 vcpu->arch.iamr = set_reg_val(id, *val);
1337                 break;
1338         case KVM_REG_PPC_PSPB:
1339                 vcpu->arch.pspb = set_reg_val(id, *val);
1340                 break;
1341         case KVM_REG_PPC_DPDES:
1342                 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
1343                 break;
1344         case KVM_REG_PPC_DAWR:
1345                 vcpu->arch.dawr = set_reg_val(id, *val);
1346                 break;
1347         case KVM_REG_PPC_DAWRX:
1348                 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
1349                 break;
1350         case KVM_REG_PPC_CIABR:
1351                 vcpu->arch.ciabr = set_reg_val(id, *val);
1352                 /* Don't allow setting breakpoints in hypervisor code */
1353                 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1354                         vcpu->arch.ciabr &= ~CIABR_PRIV;        /* disable */
1355                 break;
1356         case KVM_REG_PPC_CSIGR:
1357                 vcpu->arch.csigr = set_reg_val(id, *val);
1358                 break;
1359         case KVM_REG_PPC_TACR:
1360                 vcpu->arch.tacr = set_reg_val(id, *val);
1361                 break;
1362         case KVM_REG_PPC_TCSCR:
1363                 vcpu->arch.tcscr = set_reg_val(id, *val);
1364                 break;
1365         case KVM_REG_PPC_PID:
1366                 vcpu->arch.pid = set_reg_val(id, *val);
1367                 break;
1368         case KVM_REG_PPC_ACOP:
1369                 vcpu->arch.acop = set_reg_val(id, *val);
1370                 break;
1371         case KVM_REG_PPC_WORT:
1372                 vcpu->arch.wort = set_reg_val(id, *val);
1373                 break;
1374         case KVM_REG_PPC_VPA_ADDR:
1375                 addr = set_reg_val(id, *val);
1376                 r = -EINVAL;
1377                 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
1378                               vcpu->arch.dtl.next_gpa))
1379                         break;
1380                 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
1381                 break;
1382         case KVM_REG_PPC_VPA_SLB:
1383                 addr = val->vpaval.addr;
1384                 len = val->vpaval.length;
1385                 r = -EINVAL;
1386                 if (addr && !vcpu->arch.vpa.next_gpa)
1387                         break;
1388                 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
1389                 break;
1390         case KVM_REG_PPC_VPA_DTL:
1391                 addr = val->vpaval.addr;
1392                 len = val->vpaval.length;
1393                 r = -EINVAL;
1394                 if (addr && (len < sizeof(struct dtl_entry) ||
1395                              !vcpu->arch.vpa.next_gpa))
1396                         break;
1397                 len -= len % sizeof(struct dtl_entry);
1398                 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
1399                 break;
1400         case KVM_REG_PPC_TB_OFFSET:
1401                 /* round up to multiple of 2^24 */
1402                 vcpu->arch.vcore->tb_offset =
1403                         ALIGN(set_reg_val(id, *val), 1UL << 24);
1404                 break;
1405         case KVM_REG_PPC_LPCR:
1406                 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
1407                 break;
1408         case KVM_REG_PPC_LPCR_64:
1409                 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
1410                 break;
1411         case KVM_REG_PPC_PPR:
1412                 vcpu->arch.ppr = set_reg_val(id, *val);
1413                 break;
1414 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1415         case KVM_REG_PPC_TFHAR:
1416                 vcpu->arch.tfhar = set_reg_val(id, *val);
1417                 break;
1418         case KVM_REG_PPC_TFIAR:
1419                 vcpu->arch.tfiar = set_reg_val(id, *val);
1420                 break;
1421         case KVM_REG_PPC_TEXASR:
1422                 vcpu->arch.texasr = set_reg_val(id, *val);
1423                 break;
1424         case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1425                 i = id - KVM_REG_PPC_TM_GPR0;
1426                 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
1427                 break;
1428         case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1429         {
1430                 int j;
1431                 i = id - KVM_REG_PPC_TM_VSR0;
1432                 if (i < 32)
1433                         for (j = 0; j < TS_FPRWIDTH; j++)
1434                                 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1435                 else
1436                         if (cpu_has_feature(CPU_FTR_ALTIVEC))
1437                                 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1438                         else
1439                                 r = -ENXIO;
1440                 break;
1441         }
1442         case KVM_REG_PPC_TM_CR:
1443                 vcpu->arch.cr_tm = set_reg_val(id, *val);
1444                 break;
1445         case KVM_REG_PPC_TM_LR:
1446                 vcpu->arch.lr_tm = set_reg_val(id, *val);
1447                 break;
1448         case KVM_REG_PPC_TM_CTR:
1449                 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1450                 break;
1451         case KVM_REG_PPC_TM_FPSCR:
1452                 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1453                 break;
1454         case KVM_REG_PPC_TM_AMR:
1455                 vcpu->arch.amr_tm = set_reg_val(id, *val);
1456                 break;
1457         case KVM_REG_PPC_TM_PPR:
1458                 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1459                 break;
1460         case KVM_REG_PPC_TM_VRSAVE:
1461                 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1462                 break;
1463         case KVM_REG_PPC_TM_VSCR:
1464                 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1465                         vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1466                 else
1467                         r = - ENXIO;
1468                 break;
1469         case KVM_REG_PPC_TM_DSCR:
1470                 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1471                 break;
1472         case KVM_REG_PPC_TM_TAR:
1473                 vcpu->arch.tar_tm = set_reg_val(id, *val);
1474                 break;
1475 #endif
1476         case KVM_REG_PPC_ARCH_COMPAT:
1477                 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
1478                 break;
1479         default:
1480                 r = -EINVAL;
1481                 break;
1482         }
1483
1484         return r;
1485 }
1486
1487 static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
1488 {
1489         struct kvmppc_vcore *vcore;
1490
1491         vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
1492
1493         if (vcore == NULL)
1494                 return NULL;
1495
1496         INIT_LIST_HEAD(&vcore->runnable_threads);
1497         spin_lock_init(&vcore->lock);
1498         spin_lock_init(&vcore->stoltb_lock);
1499         init_swait_queue_head(&vcore->wq);
1500         vcore->preempt_tb = TB_NIL;
1501         vcore->lpcr = kvm->arch.lpcr;
1502         vcore->first_vcpuid = core * threads_per_subcore;
1503         vcore->kvm = kvm;
1504         INIT_LIST_HEAD(&vcore->preempt_list);
1505
1506         return vcore;
1507 }
1508
1509 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1510 static struct debugfs_timings_element {
1511         const char *name;
1512         size_t offset;
1513 } timings[] = {
1514         {"rm_entry",    offsetof(struct kvm_vcpu, arch.rm_entry)},
1515         {"rm_intr",     offsetof(struct kvm_vcpu, arch.rm_intr)},
1516         {"rm_exit",     offsetof(struct kvm_vcpu, arch.rm_exit)},
1517         {"guest",       offsetof(struct kvm_vcpu, arch.guest_time)},
1518         {"cede",        offsetof(struct kvm_vcpu, arch.cede_time)},
1519 };
1520
1521 #define N_TIMINGS       (sizeof(timings) / sizeof(timings[0]))
1522
1523 struct debugfs_timings_state {
1524         struct kvm_vcpu *vcpu;
1525         unsigned int    buflen;
1526         char            buf[N_TIMINGS * 100];
1527 };
1528
1529 static int debugfs_timings_open(struct inode *inode, struct file *file)
1530 {
1531         struct kvm_vcpu *vcpu = inode->i_private;
1532         struct debugfs_timings_state *p;
1533
1534         p = kzalloc(sizeof(*p), GFP_KERNEL);
1535         if (!p)
1536                 return -ENOMEM;
1537
1538         kvm_get_kvm(vcpu->kvm);
1539         p->vcpu = vcpu;
1540         file->private_data = p;
1541
1542         return nonseekable_open(inode, file);
1543 }
1544
1545 static int debugfs_timings_release(struct inode *inode, struct file *file)
1546 {
1547         struct debugfs_timings_state *p = file->private_data;
1548
1549         kvm_put_kvm(p->vcpu->kvm);
1550         kfree(p);
1551         return 0;
1552 }
1553
1554 static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
1555                                     size_t len, loff_t *ppos)
1556 {
1557         struct debugfs_timings_state *p = file->private_data;
1558         struct kvm_vcpu *vcpu = p->vcpu;
1559         char *s, *buf_end;
1560         struct kvmhv_tb_accumulator tb;
1561         u64 count;
1562         loff_t pos;
1563         ssize_t n;
1564         int i, loops;
1565         bool ok;
1566
1567         if (!p->buflen) {
1568                 s = p->buf;
1569                 buf_end = s + sizeof(p->buf);
1570                 for (i = 0; i < N_TIMINGS; ++i) {
1571                         struct kvmhv_tb_accumulator *acc;
1572
1573                         acc = (struct kvmhv_tb_accumulator *)
1574                                 ((unsigned long)vcpu + timings[i].offset);
1575                         ok = false;
1576                         for (loops = 0; loops < 1000; ++loops) {
1577                                 count = acc->seqcount;
1578                                 if (!(count & 1)) {
1579                                         smp_rmb();
1580                                         tb = *acc;
1581                                         smp_rmb();
1582                                         if (count == acc->seqcount) {
1583                                                 ok = true;
1584                                                 break;
1585                                         }
1586                                 }
1587                                 udelay(1);
1588                         }
1589                         if (!ok)
1590                                 snprintf(s, buf_end - s, "%s: stuck\n",
1591                                         timings[i].name);
1592                         else
1593                                 snprintf(s, buf_end - s,
1594                                         "%s: %llu %llu %llu %llu\n",
1595                                         timings[i].name, count / 2,
1596                                         tb_to_ns(tb.tb_total),
1597                                         tb_to_ns(tb.tb_min),
1598                                         tb_to_ns(tb.tb_max));
1599                         s += strlen(s);
1600                 }
1601                 p->buflen = s - p->buf;
1602         }
1603
1604         pos = *ppos;
1605         if (pos >= p->buflen)
1606                 return 0;
1607         if (len > p->buflen - pos)
1608                 len = p->buflen - pos;
1609         n = copy_to_user(buf, p->buf + pos, len);
1610         if (n) {
1611                 if (n == len)
1612                         return -EFAULT;
1613                 len -= n;
1614         }
1615         *ppos = pos + len;
1616         return len;
1617 }
1618
1619 static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
1620                                      size_t len, loff_t *ppos)
1621 {
1622         return -EACCES;
1623 }
1624
1625 static const struct file_operations debugfs_timings_ops = {
1626         .owner   = THIS_MODULE,
1627         .open    = debugfs_timings_open,
1628         .release = debugfs_timings_release,
1629         .read    = debugfs_timings_read,
1630         .write   = debugfs_timings_write,
1631         .llseek  = generic_file_llseek,
1632 };
1633
1634 /* Create a debugfs directory for the vcpu */
1635 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1636 {
1637         char buf[16];
1638         struct kvm *kvm = vcpu->kvm;
1639
1640         snprintf(buf, sizeof(buf), "vcpu%u", id);
1641         if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
1642                 return;
1643         vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
1644         if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir))
1645                 return;
1646         vcpu->arch.debugfs_timings =
1647                 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir,
1648                                     vcpu, &debugfs_timings_ops);
1649 }
1650
1651 #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1652 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1653 {
1654 }
1655 #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1656
1657 static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1658                                                    unsigned int id)
1659 {
1660         struct kvm_vcpu *vcpu;
1661         int err = -EINVAL;
1662         int core;
1663         struct kvmppc_vcore *vcore;
1664
1665         core = id / threads_per_subcore;
1666         if (core >= KVM_MAX_VCORES)
1667                 goto out;
1668
1669         err = -ENOMEM;
1670         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1671         if (!vcpu)
1672                 goto out;
1673
1674         err = kvm_vcpu_init(vcpu, kvm, id);
1675         if (err)
1676                 goto free_vcpu;
1677
1678         vcpu->arch.shared = &vcpu->arch.shregs;
1679 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1680         /*
1681          * The shared struct is never shared on HV,
1682          * so we can always use host endianness
1683          */
1684 #ifdef __BIG_ENDIAN__
1685         vcpu->arch.shared_big_endian = true;
1686 #else
1687         vcpu->arch.shared_big_endian = false;
1688 #endif
1689 #endif
1690         vcpu->arch.mmcr[0] = MMCR0_FC;
1691         vcpu->arch.ctrl = CTRL_RUNLATCH;
1692         /* default to host PVR, since we can't spoof it */
1693         kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
1694         spin_lock_init(&vcpu->arch.vpa_update_lock);
1695         spin_lock_init(&vcpu->arch.tbacct_lock);
1696         vcpu->arch.busy_preempt = TB_NIL;
1697         vcpu->arch.intr_msr = MSR_SF | MSR_ME;
1698
1699         kvmppc_mmu_book3s_hv_init(vcpu);
1700
1701         vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
1702
1703         init_waitqueue_head(&vcpu->arch.cpu_run);
1704
1705         mutex_lock(&kvm->lock);
1706         vcore = kvm->arch.vcores[core];
1707         if (!vcore) {
1708                 vcore = kvmppc_vcore_create(kvm, core);
1709                 kvm->arch.vcores[core] = vcore;
1710                 kvm->arch.online_vcores++;
1711         }
1712         mutex_unlock(&kvm->lock);
1713
1714         if (!vcore)
1715                 goto free_vcpu;
1716
1717         spin_lock(&vcore->lock);
1718         ++vcore->num_threads;
1719         spin_unlock(&vcore->lock);
1720         vcpu->arch.vcore = vcore;
1721         vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
1722         vcpu->arch.thread_cpu = -1;
1723
1724         vcpu->arch.cpu_type = KVM_CPU_3S_64;
1725         kvmppc_sanity_check(vcpu);
1726
1727         debugfs_vcpu_init(vcpu, id);
1728
1729         return vcpu;
1730
1731 free_vcpu:
1732         kmem_cache_free(kvm_vcpu_cache, vcpu);
1733 out:
1734         return ERR_PTR(err);
1735 }
1736
1737 static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
1738 {
1739         if (vpa->pinned_addr)
1740                 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
1741                                         vpa->dirty);
1742 }
1743
1744 static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
1745 {
1746         spin_lock(&vcpu->arch.vpa_update_lock);
1747         unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
1748         unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
1749         unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
1750         spin_unlock(&vcpu->arch.vpa_update_lock);
1751         kvm_vcpu_uninit(vcpu);
1752         kmem_cache_free(kvm_vcpu_cache, vcpu);
1753 }
1754
1755 static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
1756 {
1757         /* Indicate we want to get back into the guest */
1758         return 1;
1759 }
1760
1761 static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
1762 {
1763         unsigned long dec_nsec, now;
1764
1765         now = get_tb();
1766         if (now > vcpu->arch.dec_expires) {
1767                 /* decrementer has already gone negative */
1768                 kvmppc_core_queue_dec(vcpu);
1769                 kvmppc_core_prepare_to_enter(vcpu);
1770                 return;
1771         }
1772         dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
1773                    / tb_ticks_per_sec;
1774         hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
1775                       HRTIMER_MODE_REL);
1776         vcpu->arch.timer_running = 1;
1777 }
1778
1779 static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
1780 {
1781         vcpu->arch.ceded = 0;
1782         if (vcpu->arch.timer_running) {
1783                 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
1784                 vcpu->arch.timer_running = 0;
1785         }
1786 }
1787
1788 extern void __kvmppc_vcore_entry(void);
1789
1790 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1791                                    struct kvm_vcpu *vcpu)
1792 {
1793         u64 now;
1794
1795         if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
1796                 return;
1797         spin_lock_irq(&vcpu->arch.tbacct_lock);
1798         now = mftb();
1799         vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
1800                 vcpu->arch.stolen_logged;
1801         vcpu->arch.busy_preempt = now;
1802         vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
1803         spin_unlock_irq(&vcpu->arch.tbacct_lock);
1804         --vc->n_runnable;
1805         list_del(&vcpu->arch.run_list);
1806 }
1807
1808 static int kvmppc_grab_hwthread(int cpu)
1809 {
1810         struct paca_struct *tpaca;
1811         long timeout = 10000;
1812
1813         tpaca = &paca[cpu];
1814
1815         /* Ensure the thread won't go into the kernel if it wakes */
1816         tpaca->kvm_hstate.kvm_vcpu = NULL;
1817         tpaca->kvm_hstate.kvm_vcore = NULL;
1818         tpaca->kvm_hstate.napping = 0;
1819         smp_wmb();
1820         tpaca->kvm_hstate.hwthread_req = 1;
1821
1822         /*
1823          * If the thread is already executing in the kernel (e.g. handling
1824          * a stray interrupt), wait for it to get back to nap mode.
1825          * The smp_mb() is to ensure that our setting of hwthread_req
1826          * is visible before we look at hwthread_state, so if this
1827          * races with the code at system_reset_pSeries and the thread
1828          * misses our setting of hwthread_req, we are sure to see its
1829          * setting of hwthread_state, and vice versa.
1830          */
1831         smp_mb();
1832         while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
1833                 if (--timeout <= 0) {
1834                         pr_err("KVM: couldn't grab cpu %d\n", cpu);
1835                         return -EBUSY;
1836                 }
1837                 udelay(1);
1838         }
1839         return 0;
1840 }
1841
1842 static void kvmppc_release_hwthread(int cpu)
1843 {
1844         struct paca_struct *tpaca;
1845
1846         tpaca = &paca[cpu];
1847         tpaca->kvm_hstate.hwthread_req = 0;
1848         tpaca->kvm_hstate.kvm_vcpu = NULL;
1849         tpaca->kvm_hstate.kvm_vcore = NULL;
1850         tpaca->kvm_hstate.kvm_split_mode = NULL;
1851 }
1852
1853 static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
1854 {
1855         int cpu;
1856         struct paca_struct *tpaca;
1857         struct kvmppc_vcore *mvc = vc->master_vcore;
1858
1859         cpu = vc->pcpu;
1860         if (vcpu) {
1861                 if (vcpu->arch.timer_running) {
1862                         hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
1863                         vcpu->arch.timer_running = 0;
1864                 }
1865                 cpu += vcpu->arch.ptid;
1866                 vcpu->cpu = mvc->pcpu;
1867                 vcpu->arch.thread_cpu = cpu;
1868         }
1869         tpaca = &paca[cpu];
1870         tpaca->kvm_hstate.kvm_vcpu = vcpu;
1871         tpaca->kvm_hstate.ptid = cpu - mvc->pcpu;
1872         /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
1873         smp_wmb();
1874         tpaca->kvm_hstate.kvm_vcore = mvc;
1875         if (cpu != smp_processor_id())
1876                 kvmppc_ipi_thread(cpu);
1877 }
1878
1879 static void kvmppc_wait_for_nap(void)
1880 {
1881         int cpu = smp_processor_id();
1882         int i, loops;
1883
1884         for (loops = 0; loops < 1000000; ++loops) {
1885                 /*
1886                  * Check if all threads are finished.
1887                  * We set the vcore pointer when starting a thread
1888                  * and the thread clears it when finished, so we look
1889                  * for any threads that still have a non-NULL vcore ptr.
1890                  */
1891                 for (i = 1; i < threads_per_subcore; ++i)
1892                         if (paca[cpu + i].kvm_hstate.kvm_vcore)
1893                                 break;
1894                 if (i == threads_per_subcore) {
1895                         HMT_medium();
1896                         return;
1897                 }
1898                 HMT_low();
1899         }
1900         HMT_medium();
1901         for (i = 1; i < threads_per_subcore; ++i)
1902                 if (paca[cpu + i].kvm_hstate.kvm_vcore)
1903                         pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
1904 }
1905
1906 /*
1907  * Check that we are on thread 0 and that any other threads in
1908  * this core are off-line.  Then grab the threads so they can't
1909  * enter the kernel.
1910  */
1911 static int on_primary_thread(void)
1912 {
1913         int cpu = smp_processor_id();
1914         int thr;
1915
1916         /* Are we on a primary subcore? */
1917         if (cpu_thread_in_subcore(cpu))
1918                 return 0;
1919
1920         thr = 0;
1921         while (++thr < threads_per_subcore)
1922                 if (cpu_online(cpu + thr))
1923                         return 0;
1924
1925         /* Grab all hw threads so they can't go into the kernel */
1926         for (thr = 1; thr < threads_per_subcore; ++thr) {
1927                 if (kvmppc_grab_hwthread(cpu + thr)) {
1928                         /* Couldn't grab one; let the others go */
1929                         do {
1930                                 kvmppc_release_hwthread(cpu + thr);
1931                         } while (--thr > 0);
1932                         return 0;
1933                 }
1934         }
1935         return 1;
1936 }
1937
1938 /*
1939  * A list of virtual cores for each physical CPU.
1940  * These are vcores that could run but their runner VCPU tasks are
1941  * (or may be) preempted.
1942  */
1943 struct preempted_vcore_list {
1944         struct list_head        list;
1945         spinlock_t              lock;
1946 };
1947
1948 static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores);
1949
1950 static void init_vcore_lists(void)
1951 {
1952         int cpu;
1953
1954         for_each_possible_cpu(cpu) {
1955                 struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
1956                 spin_lock_init(&lp->lock);
1957                 INIT_LIST_HEAD(&lp->list);
1958         }
1959 }
1960
1961 static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
1962 {
1963         struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
1964
1965         vc->vcore_state = VCORE_PREEMPT;
1966         vc->pcpu = smp_processor_id();
1967         if (vc->num_threads < threads_per_subcore) {
1968                 spin_lock(&lp->lock);
1969                 list_add_tail(&vc->preempt_list, &lp->list);
1970                 spin_unlock(&lp->lock);
1971         }
1972
1973         /* Start accumulating stolen time */
1974         kvmppc_core_start_stolen(vc);
1975 }
1976
1977 static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
1978 {
1979         struct preempted_vcore_list *lp;
1980
1981         kvmppc_core_end_stolen(vc);
1982         if (!list_empty(&vc->preempt_list)) {
1983                 lp = &per_cpu(preempted_vcores, vc->pcpu);
1984                 spin_lock(&lp->lock);
1985                 list_del_init(&vc->preempt_list);
1986                 spin_unlock(&lp->lock);
1987         }
1988         vc->vcore_state = VCORE_INACTIVE;
1989 }
1990
1991 /*
1992  * This stores information about the virtual cores currently
1993  * assigned to a physical core.
1994  */
1995 struct core_info {
1996         int             n_subcores;
1997         int             max_subcore_threads;
1998         int             total_threads;
1999         int             subcore_threads[MAX_SUBCORES];
2000         struct kvm      *subcore_vm[MAX_SUBCORES];
2001         struct list_head vcs[MAX_SUBCORES];
2002 };
2003
2004 /*
2005  * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
2006  * respectively in 2-way micro-threading (split-core) mode.
2007  */
2008 static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
2009
2010 static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
2011 {
2012         int sub;
2013
2014         memset(cip, 0, sizeof(*cip));
2015         cip->n_subcores = 1;
2016         cip->max_subcore_threads = vc->num_threads;
2017         cip->total_threads = vc->num_threads;
2018         cip->subcore_threads[0] = vc->num_threads;
2019         cip->subcore_vm[0] = vc->kvm;
2020         for (sub = 0; sub < MAX_SUBCORES; ++sub)
2021                 INIT_LIST_HEAD(&cip->vcs[sub]);
2022         list_add_tail(&vc->preempt_list, &cip->vcs[0]);
2023 }
2024
2025 static bool subcore_config_ok(int n_subcores, int n_threads)
2026 {
2027         /* Can only dynamically split if unsplit to begin with */
2028         if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS)
2029                 return false;
2030         if (n_subcores > MAX_SUBCORES)
2031                 return false;
2032         if (n_subcores > 1) {
2033                 if (!(dynamic_mt_modes & 2))
2034                         n_subcores = 4;
2035                 if (n_subcores > 2 && !(dynamic_mt_modes & 4))
2036                         return false;
2037         }
2038
2039         return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
2040 }
2041
2042 static void init_master_vcore(struct kvmppc_vcore *vc)
2043 {
2044         vc->master_vcore = vc;
2045         vc->entry_exit_map = 0;
2046         vc->in_guest = 0;
2047         vc->napping_threads = 0;
2048         vc->conferring_threads = 0;
2049 }
2050
2051 /*
2052  * See if the existing subcores can be split into 3 (or fewer) subcores
2053  * of at most two threads each, so we can fit in another vcore.  This
2054  * assumes there are at most two subcores and at most 6 threads in total.
2055  */
2056 static bool can_split_piggybacked_subcores(struct core_info *cip)
2057 {
2058         int sub, new_sub;
2059         int large_sub = -1;
2060         int thr;
2061         int n_subcores = cip->n_subcores;
2062         struct kvmppc_vcore *vc, *vcnext;
2063         struct kvmppc_vcore *master_vc = NULL;
2064
2065         for (sub = 0; sub < cip->n_subcores; ++sub) {
2066                 if (cip->subcore_threads[sub] <= 2)
2067                         continue;
2068                 if (large_sub >= 0)
2069                         return false;
2070                 large_sub = sub;
2071                 vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore,
2072                                       preempt_list);
2073                 if (vc->num_threads > 2)
2074                         return false;
2075                 n_subcores += (cip->subcore_threads[sub] - 1) >> 1;
2076         }
2077         if (large_sub < 0 || !subcore_config_ok(n_subcores + 1, 2))
2078                 return false;
2079
2080         /*
2081          * Seems feasible, so go through and move vcores to new subcores.
2082          * Note that when we have two or more vcores in one subcore,
2083          * all those vcores must have only one thread each.
2084          */
2085         new_sub = cip->n_subcores;
2086         thr = 0;
2087         sub = large_sub;
2088         list_for_each_entry_safe(vc, vcnext, &cip->vcs[sub], preempt_list) {
2089                 if (thr >= 2) {
2090                         list_del(&vc->preempt_list);
2091                         list_add_tail(&vc->preempt_list, &cip->vcs[new_sub]);
2092                         /* vc->num_threads must be 1 */
2093                         if (++cip->subcore_threads[new_sub] == 1) {
2094                                 cip->subcore_vm[new_sub] = vc->kvm;
2095                                 init_master_vcore(vc);
2096                                 master_vc = vc;
2097                                 ++cip->n_subcores;
2098                         } else {
2099                                 vc->master_vcore = master_vc;
2100                                 ++new_sub;
2101                         }
2102                 }
2103                 thr += vc->num_threads;
2104         }
2105         cip->subcore_threads[large_sub] = 2;
2106         cip->max_subcore_threads = 2;
2107
2108         return true;
2109 }
2110
2111 static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
2112 {
2113         int n_threads = vc->num_threads;
2114         int sub;
2115
2116         if (!cpu_has_feature(CPU_FTR_ARCH_207S))
2117                 return false;
2118
2119         if (n_threads < cip->max_subcore_threads)
2120                 n_threads = cip->max_subcore_threads;
2121         if (subcore_config_ok(cip->n_subcores + 1, n_threads)) {
2122                 cip->max_subcore_threads = n_threads;
2123         } else if (cip->n_subcores <= 2 && cip->total_threads <= 6 &&
2124                    vc->num_threads <= 2) {
2125                 /*
2126                  * We may be able to fit another subcore in by
2127                  * splitting an existing subcore with 3 or 4
2128                  * threads into two 2-thread subcores, or one
2129                  * with 5 or 6 threads into three subcores.
2130                  * We can only do this if those subcores have
2131                  * piggybacked virtual cores.
2132                  */
2133                 if (!can_split_piggybacked_subcores(cip))
2134                         return false;
2135         } else {
2136                 return false;
2137         }
2138
2139         sub = cip->n_subcores;
2140         ++cip->n_subcores;
2141         cip->total_threads += vc->num_threads;
2142         cip->subcore_threads[sub] = vc->num_threads;
2143         cip->subcore_vm[sub] = vc->kvm;
2144         init_master_vcore(vc);
2145         list_del(&vc->preempt_list);
2146         list_add_tail(&vc->preempt_list, &cip->vcs[sub]);
2147
2148         return true;
2149 }
2150
2151 static bool can_piggyback_subcore(struct kvmppc_vcore *pvc,
2152                                   struct core_info *cip, int sub)
2153 {
2154         struct kvmppc_vcore *vc;
2155         int n_thr;
2156
2157         vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore,
2158                               preempt_list);
2159
2160         /* require same VM and same per-core reg values */
2161         if (pvc->kvm != vc->kvm ||
2162             pvc->tb_offset != vc->tb_offset ||
2163             pvc->pcr != vc->pcr ||
2164             pvc->lpcr != vc->lpcr)
2165                 return false;
2166
2167         /* P8 guest with > 1 thread per core would see wrong TIR value */
2168         if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
2169             (vc->num_threads > 1 || pvc->num_threads > 1))
2170                 return false;
2171
2172         n_thr = cip->subcore_threads[sub] + pvc->num_threads;
2173         if (n_thr > cip->max_subcore_threads) {
2174                 if (!subcore_config_ok(cip->n_subcores, n_thr))
2175                         return false;
2176                 cip->max_subcore_threads = n_thr;
2177         }
2178
2179         cip->total_threads += pvc->num_threads;
2180         cip->subcore_threads[sub] = n_thr;
2181         pvc->master_vcore = vc;
2182         list_del(&pvc->preempt_list);
2183         list_add_tail(&pvc->preempt_list, &cip->vcs[sub]);
2184
2185         return true;
2186 }
2187
2188 /*
2189  * Work out whether it is possible to piggyback the execution of
2190  * vcore *pvc onto the execution of the other vcores described in *cip.
2191  */
2192 static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
2193                           int target_threads)
2194 {
2195         int sub;
2196
2197         if (cip->total_threads + pvc->num_threads > target_threads)
2198                 return false;
2199         for (sub = 0; sub < cip->n_subcores; ++sub)
2200                 if (cip->subcore_threads[sub] &&
2201                     can_piggyback_subcore(pvc, cip, sub))
2202                         return true;
2203
2204         if (can_dynamic_split(pvc, cip))
2205                 return true;
2206
2207         return false;
2208 }
2209
2210 static void prepare_threads(struct kvmppc_vcore *vc)
2211 {
2212         struct kvm_vcpu *vcpu, *vnext;
2213
2214         list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
2215                                  arch.run_list) {
2216                 if (signal_pending(vcpu->arch.run_task))
2217                         vcpu->arch.ret = -EINTR;
2218                 else if (vcpu->arch.vpa.update_pending ||
2219                          vcpu->arch.slb_shadow.update_pending ||
2220                          vcpu->arch.dtl.update_pending)
2221                         vcpu->arch.ret = RESUME_GUEST;
2222                 else
2223                         continue;
2224                 kvmppc_remove_runnable(vc, vcpu);
2225                 wake_up(&vcpu->arch.cpu_run);
2226         }
2227 }
2228
2229 static void collect_piggybacks(struct core_info *cip, int target_threads)
2230 {
2231         struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2232         struct kvmppc_vcore *pvc, *vcnext;
2233
2234         spin_lock(&lp->lock);
2235         list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
2236                 if (!spin_trylock(&pvc->lock))
2237                         continue;
2238                 prepare_threads(pvc);
2239                 if (!pvc->n_runnable) {
2240                         list_del_init(&pvc->preempt_list);
2241                         if (pvc->runner == NULL) {
2242                                 pvc->vcore_state = VCORE_INACTIVE;
2243                                 kvmppc_core_end_stolen(pvc);
2244                         }
2245                         spin_unlock(&pvc->lock);
2246                         continue;
2247                 }
2248                 if (!can_piggyback(pvc, cip, target_threads)) {
2249                         spin_unlock(&pvc->lock);
2250                         continue;
2251                 }
2252                 kvmppc_core_end_stolen(pvc);
2253                 pvc->vcore_state = VCORE_PIGGYBACK;
2254                 if (cip->total_threads >= target_threads)
2255                         break;
2256         }
2257         spin_unlock(&lp->lock);
2258 }
2259
2260 static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
2261 {
2262         int still_running = 0;
2263         u64 now;
2264         long ret;
2265         struct kvm_vcpu *vcpu, *vnext;
2266
2267         spin_lock(&vc->lock);
2268         now = get_tb();
2269         list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
2270                                  arch.run_list) {
2271                 /* cancel pending dec exception if dec is positive */
2272                 if (now < vcpu->arch.dec_expires &&
2273                     kvmppc_core_pending_dec(vcpu))
2274                         kvmppc_core_dequeue_dec(vcpu);
2275
2276                 trace_kvm_guest_exit(vcpu);
2277
2278                 ret = RESUME_GUEST;
2279                 if (vcpu->arch.trap)
2280                         ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
2281                                                     vcpu->arch.run_task);
2282
2283                 vcpu->arch.ret = ret;
2284                 vcpu->arch.trap = 0;
2285
2286                 if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
2287                         if (vcpu->arch.pending_exceptions)
2288                                 kvmppc_core_prepare_to_enter(vcpu);
2289                         if (vcpu->arch.ceded)
2290                                 kvmppc_set_timer(vcpu);
2291                         else
2292                                 ++still_running;
2293                 } else {
2294                         kvmppc_remove_runnable(vc, vcpu);
2295                         wake_up(&vcpu->arch.cpu_run);
2296                 }
2297         }
2298         list_del_init(&vc->preempt_list);
2299         if (!is_master) {
2300                 if (still_running > 0) {
2301                         kvmppc_vcore_preempt(vc);
2302                 } else if (vc->runner) {
2303                         vc->vcore_state = VCORE_PREEMPT;
2304                         kvmppc_core_start_stolen(vc);
2305                 } else {
2306                         vc->vcore_state = VCORE_INACTIVE;
2307                 }
2308                 if (vc->n_runnable > 0 && vc->runner == NULL) {
2309                         /* make sure there's a candidate runner awake */
2310                         vcpu = list_first_entry(&vc->runnable_threads,
2311                                                 struct kvm_vcpu, arch.run_list);
2312                         wake_up(&vcpu->arch.cpu_run);
2313                 }
2314         }
2315         spin_unlock(&vc->lock);
2316 }
2317
2318 /*
2319  * Clear core from the list of active host cores as we are about to
2320  * enter the guest. Only do this if it is the primary thread of the
2321  * core (not if a subcore) that is entering the guest.
2322  */
2323 static inline void kvmppc_clear_host_core(int cpu)
2324 {
2325         int core;
2326
2327         if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
2328                 return;
2329         /*
2330          * Memory barrier can be omitted here as we will do a smp_wmb()
2331          * later in kvmppc_start_thread and we need ensure that state is
2332          * visible to other CPUs only after we enter guest.
2333          */
2334         core = cpu >> threads_shift;
2335         kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
2336 }
2337
2338 /*
2339  * Advertise this core as an active host core since we exited the guest
2340  * Only need to do this if it is the primary thread of the core that is
2341  * exiting.
2342  */
2343 static inline void kvmppc_set_host_core(int cpu)
2344 {
2345         int core;
2346
2347         if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
2348                 return;
2349
2350         /*
2351          * Memory barrier can be omitted here because we do a spin_unlock
2352          * immediately after this which provides the memory barrier.
2353          */
2354         core = cpu >> threads_shift;
2355         kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
2356 }
2357
2358 /*
2359  * Run a set of guest threads on a physical core.
2360  * Called with vc->lock held.
2361  */
2362 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
2363 {
2364         struct kvm_vcpu *vcpu, *vnext;
2365         int i;
2366         int srcu_idx;
2367         struct core_info core_info;
2368         struct kvmppc_vcore *pvc, *vcnext;
2369         struct kvm_split_mode split_info, *sip;
2370         int split, subcore_size, active;
2371         int sub;
2372         bool thr0_done;
2373         unsigned long cmd_bit, stat_bit;
2374         int pcpu, thr;
2375         int target_threads;
2376
2377         /*
2378          * Remove from the list any threads that have a signal pending
2379          * or need a VPA update done
2380          */
2381         prepare_threads(vc);
2382
2383         /* if the runner is no longer runnable, let the caller pick a new one */
2384         if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
2385                 return;
2386
2387         /*
2388          * Initialize *vc.
2389          */
2390         init_master_vcore(vc);
2391         vc->preempt_tb = TB_NIL;
2392
2393         /*
2394          * Make sure we are running on primary threads, and that secondary
2395          * threads are offline.  Also check if the number of threads in this
2396          * guest are greater than the current system threads per guest.
2397          */
2398         if ((threads_per_core > 1) &&
2399             ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
2400                 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
2401                                          arch.run_list) {
2402                         vcpu->arch.ret = -EBUSY;
2403                         kvmppc_remove_runnable(vc, vcpu);
2404                         wake_up(&vcpu->arch.cpu_run);
2405                 }
2406                 goto out;
2407         }
2408
2409         /*
2410          * See if we could run any other vcores on the physical core
2411          * along with this one.
2412          */
2413         init_core_info(&core_info, vc);
2414         pcpu = smp_processor_id();
2415         target_threads = threads_per_subcore;
2416         if (target_smt_mode && target_smt_mode < target_threads)
2417                 target_threads = target_smt_mode;
2418         if (vc->num_threads < target_threads)
2419                 collect_piggybacks(&core_info, target_threads);
2420
2421         /* Decide on micro-threading (split-core) mode */
2422         subcore_size = threads_per_subcore;
2423         cmd_bit = stat_bit = 0;
2424         split = core_info.n_subcores;
2425         sip = NULL;
2426         if (split > 1) {
2427                 /* threads_per_subcore must be MAX_SMT_THREADS (8) here */
2428                 if (split == 2 && (dynamic_mt_modes & 2)) {
2429                         cmd_bit = HID0_POWER8_1TO2LPAR;
2430                         stat_bit = HID0_POWER8_2LPARMODE;
2431                 } else {
2432                         split = 4;
2433                         cmd_bit = HID0_POWER8_1TO4LPAR;
2434                         stat_bit = HID0_POWER8_4LPARMODE;
2435                 }
2436                 subcore_size = MAX_SMT_THREADS / split;
2437                 sip = &split_info;
2438                 memset(&split_info, 0, sizeof(split_info));
2439                 split_info.rpr = mfspr(SPRN_RPR);
2440                 split_info.pmmar = mfspr(SPRN_PMMAR);
2441                 split_info.ldbar = mfspr(SPRN_LDBAR);
2442                 split_info.subcore_size = subcore_size;
2443                 for (sub = 0; sub < core_info.n_subcores; ++sub)
2444                         split_info.master_vcs[sub] =
2445                                 list_first_entry(&core_info.vcs[sub],
2446                                         struct kvmppc_vcore, preempt_list);
2447                 /* order writes to split_info before kvm_split_mode pointer */
2448                 smp_wmb();
2449         }
2450         pcpu = smp_processor_id();
2451         for (thr = 0; thr < threads_per_subcore; ++thr)
2452                 paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip;
2453
2454         /* Initiate micro-threading (split-core) if required */
2455         if (cmd_bit) {
2456                 unsigned long hid0 = mfspr(SPRN_HID0);
2457
2458                 hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS;
2459                 mb();
2460                 mtspr(SPRN_HID0, hid0);
2461                 isync();
2462                 for (;;) {
2463                         hid0 = mfspr(SPRN_HID0);
2464                         if (hid0 & stat_bit)
2465                                 break;
2466                         cpu_relax();
2467                 }
2468         }
2469
2470         kvmppc_clear_host_core(pcpu);
2471
2472         /* Start all the threads */
2473         active = 0;
2474         for (sub = 0; sub < core_info.n_subcores; ++sub) {
2475                 thr = subcore_thread_map[sub];
2476                 thr0_done = false;
2477                 active |= 1 << thr;
2478                 list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) {
2479                         pvc->pcpu = pcpu + thr;
2480                         list_for_each_entry(vcpu, &pvc->runnable_threads,
2481                                             arch.run_list) {
2482                                 kvmppc_start_thread(vcpu, pvc);
2483                                 kvmppc_create_dtl_entry(vcpu, pvc);
2484                                 trace_kvm_guest_enter(vcpu);
2485                                 if (!vcpu->arch.ptid)
2486                                         thr0_done = true;
2487                                 active |= 1 << (thr + vcpu->arch.ptid);
2488                         }
2489                         /*
2490                          * We need to start the first thread of each subcore
2491                          * even if it doesn't have a vcpu.
2492                          */
2493                         if (pvc->master_vcore == pvc && !thr0_done)
2494                                 kvmppc_start_thread(NULL, pvc);
2495                         thr += pvc->num_threads;
2496                 }
2497         }
2498
2499         /*
2500          * Ensure that split_info.do_nap is set after setting
2501          * the vcore pointer in the PACA of the secondaries.
2502          */
2503         smp_mb();
2504         if (cmd_bit)
2505                 split_info.do_nap = 1;  /* ask secondaries to nap when done */
2506
2507         /*
2508          * When doing micro-threading, poke the inactive threads as well.
2509          * This gets them to the nap instruction after kvm_do_nap,
2510          * which reduces the time taken to unsplit later.
2511          */
2512         if (split > 1)
2513                 for (thr = 1; thr < threads_per_subcore; ++thr)
2514                         if (!(active & (1 << thr)))
2515                                 kvmppc_ipi_thread(pcpu + thr);
2516
2517         vc->vcore_state = VCORE_RUNNING;
2518         preempt_disable();
2519
2520         trace_kvmppc_run_core(vc, 0);
2521
2522         for (sub = 0; sub < core_info.n_subcores; ++sub)
2523                 list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list)
2524                         spin_unlock(&pvc->lock);
2525
2526         guest_enter();
2527
2528         srcu_idx = srcu_read_lock(&vc->kvm->srcu);
2529
2530         __kvmppc_vcore_entry();
2531
2532         srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
2533
2534         spin_lock(&vc->lock);
2535         /* prevent other vcpu threads from doing kvmppc_start_thread() now */
2536         vc->vcore_state = VCORE_EXITING;
2537
2538         /* wait for secondary threads to finish writing their state to memory */
2539         kvmppc_wait_for_nap();
2540
2541         /* Return to whole-core mode if we split the core earlier */
2542         if (split > 1) {
2543                 unsigned long hid0 = mfspr(SPRN_HID0);
2544                 unsigned long loops = 0;
2545
2546                 hid0 &= ~HID0_POWER8_DYNLPARDIS;
2547                 stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
2548                 mb();
2549                 mtspr(SPRN_HID0, hid0);
2550                 isync();
2551                 for (;;) {
2552                         hid0 = mfspr(SPRN_HID0);
2553                         if (!(hid0 & stat_bit))
2554                                 break;
2555                         cpu_relax();
2556                         ++loops;
2557                 }
2558                 split_info.do_nap = 0;
2559         }
2560
2561         /* Let secondaries go back to the offline loop */
2562         for (i = 0; i < threads_per_subcore; ++i) {
2563                 kvmppc_release_hwthread(pcpu + i);
2564                 if (sip && sip->napped[i])
2565                         kvmppc_ipi_thread(pcpu + i);
2566         }
2567
2568         kvmppc_set_host_core(pcpu);
2569
2570         spin_unlock(&vc->lock);
2571
2572         /* make sure updates to secondary vcpu structs are visible now */
2573         smp_mb();
2574         guest_exit();
2575
2576         for (sub = 0; sub < core_info.n_subcores; ++sub)
2577                 list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub],
2578                                          preempt_list)
2579                         post_guest_process(pvc, pvc == vc);
2580
2581         spin_lock(&vc->lock);
2582         preempt_enable();
2583
2584  out:
2585         vc->vcore_state = VCORE_INACTIVE;
2586         trace_kvmppc_run_core(vc, 1);
2587 }
2588
2589 /*
2590  * Wait for some other vcpu thread to execute us, and
2591  * wake us up when we need to handle something in the host.
2592  */
2593 static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
2594                                  struct kvm_vcpu *vcpu, int wait_state)
2595 {
2596         DEFINE_WAIT(wait);
2597
2598         prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
2599         if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
2600                 spin_unlock(&vc->lock);
2601                 schedule();
2602                 spin_lock(&vc->lock);
2603         }
2604         finish_wait(&vcpu->arch.cpu_run, &wait);
2605 }
2606
2607 /*
2608  * All the vcpus in this vcore are idle, so wait for a decrementer
2609  * or external interrupt to one of the vcpus.  vc->lock is held.
2610  */
2611 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
2612 {
2613         struct kvm_vcpu *vcpu;
2614         int do_sleep = 1;
2615         DECLARE_SWAITQUEUE(wait);
2616
2617         prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
2618
2619         /*
2620          * Check one last time for pending exceptions and ceded state after
2621          * we put ourselves on the wait queue
2622          */
2623         list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
2624                 if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) {
2625                         do_sleep = 0;
2626                         break;
2627                 }
2628         }
2629
2630         if (!do_sleep) {
2631                 finish_swait(&vc->wq, &wait);
2632                 return;
2633         }
2634
2635         vc->vcore_state = VCORE_SLEEPING;
2636         trace_kvmppc_vcore_blocked(vc, 0);
2637         spin_unlock(&vc->lock);
2638         schedule();
2639         finish_swait(&vc->wq, &wait);
2640         spin_lock(&vc->lock);
2641         vc->vcore_state = VCORE_INACTIVE;
2642         trace_kvmppc_vcore_blocked(vc, 1);
2643 }
2644
2645 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2646 {
2647         int n_ceded;
2648         struct kvmppc_vcore *vc;
2649         struct kvm_vcpu *v, *vn;
2650
2651         trace_kvmppc_run_vcpu_enter(vcpu);
2652
2653         kvm_run->exit_reason = 0;
2654         vcpu->arch.ret = RESUME_GUEST;
2655         vcpu->arch.trap = 0;
2656         kvmppc_update_vpas(vcpu);
2657
2658         /*
2659          * Synchronize with other threads in this virtual core
2660          */
2661         vc = vcpu->arch.vcore;
2662         spin_lock(&vc->lock);
2663         vcpu->arch.ceded = 0;
2664         vcpu->arch.run_task = current;
2665         vcpu->arch.kvm_run = kvm_run;
2666         vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
2667         vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
2668         vcpu->arch.busy_preempt = TB_NIL;
2669         list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
2670         ++vc->n_runnable;
2671
2672         /*
2673          * This happens the first time this is called for a vcpu.
2674          * If the vcore is already running, we may be able to start
2675          * this thread straight away and have it join in.
2676          */
2677         if (!signal_pending(current)) {
2678                 if (vc->vcore_state == VCORE_PIGGYBACK) {
2679                         struct kvmppc_vcore *mvc = vc->master_vcore;
2680                         if (spin_trylock(&mvc->lock)) {
2681                                 if (mvc->vcore_state == VCORE_RUNNING &&
2682                                     !VCORE_IS_EXITING(mvc)) {
2683                                         kvmppc_create_dtl_entry(vcpu, vc);
2684                                         kvmppc_start_thread(vcpu, vc);
2685                                         trace_kvm_guest_enter(vcpu);
2686                                 }
2687                                 spin_unlock(&mvc->lock);
2688                         }
2689                 } else if (vc->vcore_state == VCORE_RUNNING &&
2690                            !VCORE_IS_EXITING(vc)) {
2691                         kvmppc_create_dtl_entry(vcpu, vc);
2692                         kvmppc_start_thread(vcpu, vc);
2693                         trace_kvm_guest_enter(vcpu);
2694                 } else if (vc->vcore_state == VCORE_SLEEPING) {
2695                         swake_up(&vc->wq);
2696                 }
2697
2698         }
2699
2700         while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
2701                !signal_pending(current)) {
2702                 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
2703                         kvmppc_vcore_end_preempt(vc);
2704
2705                 if (vc->vcore_state != VCORE_INACTIVE) {
2706                         kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
2707                         continue;
2708                 }
2709                 list_for_each_entry_safe(v, vn, &vc->runnable_threads,
2710                                          arch.run_list) {
2711                         kvmppc_core_prepare_to_enter(v);
2712                         if (signal_pending(v->arch.run_task)) {
2713                                 kvmppc_remove_runnable(vc, v);
2714                                 v->stat.signal_exits++;
2715                                 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
2716                                 v->arch.ret = -EINTR;
2717                                 wake_up(&v->arch.cpu_run);
2718                         }
2719                 }
2720                 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2721                         break;
2722                 n_ceded = 0;
2723                 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
2724                         if (!v->arch.pending_exceptions)
2725                                 n_ceded += v->arch.ceded;
2726                         else
2727                                 v->arch.ceded = 0;
2728                 }
2729                 vc->runner = vcpu;
2730                 if (n_ceded == vc->n_runnable) {
2731                         kvmppc_vcore_blocked(vc);
2732                 } else if (need_resched()) {
2733                         kvmppc_vcore_preempt(vc);
2734                         /* Let something else run */
2735                         cond_resched_lock(&vc->lock);
2736                         if (vc->vcore_state == VCORE_PREEMPT)
2737                                 kvmppc_vcore_end_preempt(vc);
2738                 } else {
2739                         kvmppc_run_core(vc);
2740                 }
2741                 vc->runner = NULL;
2742         }
2743
2744         while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
2745                (vc->vcore_state == VCORE_RUNNING ||
2746                 vc->vcore_state == VCORE_EXITING ||
2747                 vc->vcore_state == VCORE_PIGGYBACK))
2748                 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
2749
2750         if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
2751                 kvmppc_vcore_end_preempt(vc);
2752
2753         if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
2754                 kvmppc_remove_runnable(vc, vcpu);
2755                 vcpu->stat.signal_exits++;
2756                 kvm_run->exit_reason = KVM_EXIT_INTR;
2757                 vcpu->arch.ret = -EINTR;
2758         }
2759
2760         if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
2761                 /* Wake up some vcpu to run the core */
2762                 v = list_first_entry(&vc->runnable_threads,
2763                                      struct kvm_vcpu, arch.run_list);
2764                 wake_up(&v->arch.cpu_run);
2765         }
2766
2767         trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
2768         spin_unlock(&vc->lock);
2769         return vcpu->arch.ret;
2770 }
2771
2772 static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2773 {
2774         int r;
2775         int srcu_idx;
2776
2777         if (!vcpu->arch.sane) {
2778                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2779                 return -EINVAL;
2780         }
2781
2782         kvmppc_core_prepare_to_enter(vcpu);
2783
2784         /* No need to go into the guest when all we'll do is come back out */
2785         if (signal_pending(current)) {
2786                 run->exit_reason = KVM_EXIT_INTR;
2787                 return -EINTR;
2788         }
2789
2790         atomic_inc(&vcpu->kvm->arch.vcpus_running);
2791         /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
2792         smp_mb();
2793
2794         /* On the first time here, set up HTAB and VRMA */
2795         if (!vcpu->kvm->arch.hpte_setup_done) {
2796                 r = kvmppc_hv_setup_htab_rma(vcpu);
2797                 if (r)
2798                         goto out;
2799         }
2800
2801         flush_all_to_thread(current);
2802
2803         vcpu->arch.wqp = &vcpu->arch.vcore->wq;
2804         vcpu->arch.pgdir = current->mm->pgd;
2805         vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
2806
2807         do {
2808                 r = kvmppc_run_vcpu(run, vcpu);
2809
2810                 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
2811                     !(vcpu->arch.shregs.msr & MSR_PR)) {
2812                         trace_kvm_hcall_enter(vcpu);
2813                         r = kvmppc_pseries_do_hcall(vcpu);
2814                         trace_kvm_hcall_exit(vcpu, r);
2815                         kvmppc_core_prepare_to_enter(vcpu);
2816                 } else if (r == RESUME_PAGE_FAULT) {
2817                         srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2818                         r = kvmppc_book3s_hv_page_fault(run, vcpu,
2819                                 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
2820                         srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2821                 }
2822         } while (is_kvmppc_resume_guest(r));
2823
2824  out:
2825         vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2826         atomic_dec(&vcpu->kvm->arch.vcpus_running);
2827         return r;
2828 }
2829
2830 static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
2831                                      int linux_psize)
2832 {
2833         struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
2834
2835         if (!def->shift)
2836                 return;
2837         (*sps)->page_shift = def->shift;
2838         (*sps)->slb_enc = def->sllp;
2839         (*sps)->enc[0].page_shift = def->shift;
2840         (*sps)->enc[0].pte_enc = def->penc[linux_psize];
2841         /*
2842          * Add 16MB MPSS support if host supports it
2843          */
2844         if (linux_psize != MMU_PAGE_16M && def->penc[MMU_PAGE_16M] != -1) {
2845                 (*sps)->enc[1].page_shift = 24;
2846                 (*sps)->enc[1].pte_enc = def->penc[MMU_PAGE_16M];
2847         }
2848         (*sps)++;
2849 }
2850
2851 static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
2852                                          struct kvm_ppc_smmu_info *info)
2853 {
2854         struct kvm_ppc_one_seg_page_size *sps;
2855
2856         info->flags = KVM_PPC_PAGE_SIZES_REAL;
2857         if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
2858                 info->flags |= KVM_PPC_1T_SEGMENTS;
2859         info->slb_size = mmu_slb_size;
2860
2861         /* We only support these sizes for now, and no muti-size segments */
2862         sps = &info->sps[0];
2863         kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
2864         kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
2865         kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
2866
2867         return 0;
2868 }
2869
2870 /*
2871  * Get (and clear) the dirty memory log for a memory slot.
2872  */
2873 static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
2874                                          struct kvm_dirty_log *log)
2875 {
2876         struct kvm_memslots *slots;
2877         struct kvm_memory_slot *memslot;
2878         int r;
2879         unsigned long n;
2880
2881         mutex_lock(&kvm->slots_lock);
2882
2883         r = -EINVAL;
2884         if (log->slot >= KVM_USER_MEM_SLOTS)
2885                 goto out;
2886
2887         slots = kvm_memslots(kvm);
2888         memslot = id_to_memslot(slots, log->slot);
2889         r = -ENOENT;
2890         if (!memslot->dirty_bitmap)
2891                 goto out;
2892
2893         n = kvm_dirty_bitmap_bytes(memslot);
2894         memset(memslot->dirty_bitmap, 0, n);
2895
2896         r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap);
2897         if (r)
2898                 goto out;
2899
2900         r = -EFAULT;
2901         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
2902                 goto out;
2903
2904         r = 0;
2905 out:
2906         mutex_unlock(&kvm->slots_lock);
2907         return r;
2908 }
2909
2910 static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
2911                                         struct kvm_memory_slot *dont)
2912 {
2913         if (!dont || free->arch.rmap != dont->arch.rmap) {
2914                 vfree(free->arch.rmap);
2915                 free->arch.rmap = NULL;
2916         }
2917 }
2918
2919 static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
2920                                          unsigned long npages)
2921 {
2922         slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
2923         if (!slot->arch.rmap)
2924                 return -ENOMEM;
2925
2926         return 0;
2927 }
2928
2929 static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
2930                                         struct kvm_memory_slot *memslot,
2931                                         const struct kvm_userspace_memory_region *mem)
2932 {
2933         return 0;
2934 }
2935
2936 static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
2937                                 const struct kvm_userspace_memory_region *mem,
2938                                 const struct kvm_memory_slot *old,
2939                                 const struct kvm_memory_slot *new)
2940 {
2941         unsigned long npages = mem->memory_size >> PAGE_SHIFT;
2942         struct kvm_memslots *slots;
2943         struct kvm_memory_slot *memslot;
2944
2945         if (npages && old->npages) {
2946                 /*
2947                  * If modifying a memslot, reset all the rmap dirty bits.
2948                  * If this is a new memslot, we don't need to do anything
2949                  * since the rmap array starts out as all zeroes,
2950                  * i.e. no pages are dirty.
2951                  */
2952                 slots = kvm_memslots(kvm);
2953                 memslot = id_to_memslot(slots, mem->slot);
2954                 kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
2955         }
2956 }
2957
2958 /*
2959  * Update LPCR values in kvm->arch and in vcores.
2960  * Caller must hold kvm->lock.
2961  */
2962 void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
2963 {
2964         long int i;
2965         u32 cores_done = 0;
2966
2967         if ((kvm->arch.lpcr & mask) == lpcr)
2968                 return;
2969
2970         kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
2971
2972         for (i = 0; i < KVM_MAX_VCORES; ++i) {
2973                 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
2974                 if (!vc)
2975                         continue;
2976                 spin_lock(&vc->lock);
2977                 vc->lpcr = (vc->lpcr & ~mask) | lpcr;
2978                 spin_unlock(&vc->lock);
2979                 if (++cores_done >= kvm->arch.online_vcores)
2980                         break;
2981         }
2982 }
2983
2984 static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
2985 {
2986         return;
2987 }
2988
2989 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
2990 {
2991         int err = 0;
2992         struct kvm *kvm = vcpu->kvm;
2993         unsigned long hva;
2994         struct kvm_memory_slot *memslot;
2995         struct vm_area_struct *vma;
2996         unsigned long lpcr = 0, senc;
2997         unsigned long psize, porder;
2998         int srcu_idx;
2999
3000         mutex_lock(&kvm->lock);
3001         if (kvm->arch.hpte_setup_done)
3002                 goto out;       /* another vcpu beat us to it */
3003
3004         /* Allocate hashed page table (if not done already) and reset it */
3005         if (!kvm->arch.hpt_virt) {
3006                 err = kvmppc_alloc_hpt(kvm, NULL);
3007                 if (err) {
3008                         pr_err("KVM: Couldn't alloc HPT\n");
3009                         goto out;
3010                 }
3011         }
3012
3013         /* Look up the memslot for guest physical address 0 */
3014         srcu_idx = srcu_read_lock(&kvm->srcu);
3015         memslot = gfn_to_memslot(kvm, 0);
3016
3017         /* We must have some memory at 0 by now */
3018         err = -EINVAL;
3019         if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
3020                 goto out_srcu;
3021
3022         /* Look up the VMA for the start of this memory slot */
3023         hva = memslot->userspace_addr;
3024         down_read(&current->mm->mmap_sem);
3025         vma = find_vma(current->mm, hva);
3026         if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
3027                 goto up_out;
3028
3029         psize = vma_kernel_pagesize(vma);
3030         porder = __ilog2(psize);
3031
3032         up_read(&current->mm->mmap_sem);
3033
3034         /* We can handle 4k, 64k or 16M pages in the VRMA */
3035         err = -EINVAL;
3036         if (!(psize == 0x1000 || psize == 0x10000 ||
3037               psize == 0x1000000))
3038                 goto out_srcu;
3039
3040         /* Update VRMASD field in the LPCR */
3041         senc = slb_pgsize_encoding(psize);
3042         kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
3043                 (VRMA_VSID << SLB_VSID_SHIFT_1T);
3044         /* the -4 is to account for senc values starting at 0x10 */
3045         lpcr = senc << (LPCR_VRMASD_SH - 4);
3046
3047         /* Create HPTEs in the hash page table for the VRMA */
3048         kvmppc_map_vrma(vcpu, memslot, porder);
3049
3050         kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
3051
3052         /* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */
3053         smp_wmb();
3054         kvm->arch.hpte_setup_done = 1;
3055         err = 0;
3056  out_srcu:
3057         srcu_read_unlock(&kvm->srcu, srcu_idx);
3058  out:
3059         mutex_unlock(&kvm->lock);
3060         return err;
3061
3062  up_out:
3063         up_read(&current->mm->mmap_sem);
3064         goto out_srcu;
3065 }
3066
3067 #ifdef CONFIG_KVM_XICS
3068 static int kvmppc_cpu_notify(struct notifier_block *self, unsigned long action,
3069                         void *hcpu)
3070 {
3071         unsigned long cpu = (long)hcpu;
3072
3073         switch (action) {
3074         case CPU_UP_PREPARE:
3075         case CPU_UP_PREPARE_FROZEN:
3076                 kvmppc_set_host_core(cpu);
3077                 break;
3078
3079 #ifdef CONFIG_HOTPLUG_CPU
3080         case CPU_DEAD:
3081         case CPU_DEAD_FROZEN:
3082         case CPU_UP_CANCELED:
3083         case CPU_UP_CANCELED_FROZEN:
3084                 kvmppc_clear_host_core(cpu);
3085                 break;
3086 #endif
3087         default:
3088                 break;
3089         }
3090
3091         return NOTIFY_OK;
3092 }
3093
3094 static struct notifier_block kvmppc_cpu_notifier = {
3095             .notifier_call = kvmppc_cpu_notify,
3096 };
3097
3098 /*
3099  * Allocate a per-core structure for managing state about which cores are
3100  * running in the host versus the guest and for exchanging data between
3101  * real mode KVM and CPU running in the host.
3102  * This is only done for the first VM.
3103  * The allocated structure stays even if all VMs have stopped.
3104  * It is only freed when the kvm-hv module is unloaded.
3105  * It's OK for this routine to fail, we just don't support host
3106  * core operations like redirecting H_IPI wakeups.
3107  */
3108 void kvmppc_alloc_host_rm_ops(void)
3109 {
3110         struct kvmppc_host_rm_ops *ops;
3111         unsigned long l_ops;
3112         int cpu, core;
3113         int size;
3114
3115         /* Not the first time here ? */
3116         if (kvmppc_host_rm_ops_hv != NULL)
3117                 return;
3118
3119         ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL);
3120         if (!ops)
3121                 return;
3122
3123         size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core);
3124         ops->rm_core = kzalloc(size, GFP_KERNEL);
3125
3126         if (!ops->rm_core) {
3127                 kfree(ops);
3128                 return;
3129         }
3130
3131         get_online_cpus();
3132
3133         for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
3134                 if (!cpu_online(cpu))
3135                         continue;
3136
3137                 core = cpu >> threads_shift;
3138                 ops->rm_core[core].rm_state.in_host = 1;
3139         }
3140
3141         ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv;
3142
3143         /*
3144          * Make the contents of the kvmppc_host_rm_ops structure visible
3145          * to other CPUs before we assign it to the global variable.
3146          * Do an atomic assignment (no locks used here), but if someone
3147          * beats us to it, just free our copy and return.
3148          */
3149         smp_wmb();
3150         l_ops = (unsigned long) ops;
3151
3152         if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
3153                 put_online_cpus();
3154                 kfree(ops->rm_core);
3155                 kfree(ops);
3156                 return;
3157         }
3158
3159         register_cpu_notifier(&kvmppc_cpu_notifier);
3160
3161         put_online_cpus();
3162 }
3163
3164 void kvmppc_free_host_rm_ops(void)
3165 {
3166         if (kvmppc_host_rm_ops_hv) {
3167                 unregister_cpu_notifier(&kvmppc_cpu_notifier);
3168                 kfree(kvmppc_host_rm_ops_hv->rm_core);
3169                 kfree(kvmppc_host_rm_ops_hv);
3170                 kvmppc_host_rm_ops_hv = NULL;
3171         }
3172 }
3173 #endif
3174
3175 static int kvmppc_core_init_vm_hv(struct kvm *kvm)
3176 {
3177         unsigned long lpcr, lpid;
3178         char buf[32];
3179
3180         /* Allocate the guest's logical partition ID */
3181
3182         lpid = kvmppc_alloc_lpid();
3183         if ((long)lpid < 0)
3184                 return -ENOMEM;
3185         kvm->arch.lpid = lpid;
3186
3187         kvmppc_alloc_host_rm_ops();
3188
3189         /*
3190          * Since we don't flush the TLB when tearing down a VM,
3191          * and this lpid might have previously been used,
3192          * make sure we flush on each core before running the new VM.
3193          */
3194         cpumask_setall(&kvm->arch.need_tlb_flush);
3195
3196         /* Start out with the default set of hcalls enabled */
3197         memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
3198                sizeof(kvm->arch.enabled_hcalls));
3199
3200         kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
3201
3202         /* Init LPCR for virtual RMA mode */
3203         kvm->arch.host_lpid = mfspr(SPRN_LPID);
3204         kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
3205         lpcr &= LPCR_PECE | LPCR_LPES;
3206         lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
3207                 LPCR_VPM0 | LPCR_VPM1;
3208         kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
3209                 (VRMA_VSID << SLB_VSID_SHIFT_1T);
3210         /* On POWER8 turn on online bit to enable PURR/SPURR */
3211         if (cpu_has_feature(CPU_FTR_ARCH_207S))
3212                 lpcr |= LPCR_ONL;
3213         kvm->arch.lpcr = lpcr;
3214
3215         /*
3216          * Track that we now have a HV mode VM active. This blocks secondary
3217          * CPU threads from coming online.
3218          */
3219         kvm_hv_vm_activated();
3220
3221         /*
3222          * Create a debugfs directory for the VM
3223          */
3224         snprintf(buf, sizeof(buf), "vm%d", current->pid);
3225         kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
3226         if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
3227                 kvmppc_mmu_debugfs_init(kvm);
3228
3229         return 0;
3230 }
3231
3232 static void kvmppc_free_vcores(struct kvm *kvm)
3233 {
3234         long int i;
3235
3236         for (i = 0; i < KVM_MAX_VCORES; ++i)
3237                 kfree(kvm->arch.vcores[i]);
3238         kvm->arch.online_vcores = 0;
3239 }
3240
3241 static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
3242 {
3243         debugfs_remove_recursive(kvm->arch.debugfs_dir);
3244
3245         kvm_hv_vm_deactivated();
3246
3247         kvmppc_free_vcores(kvm);
3248
3249         kvmppc_free_hpt(kvm);
3250 }
3251
3252 /* We don't need to emulate any privileged instructions or dcbz */
3253 static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
3254                                      unsigned int inst, int *advance)
3255 {
3256         return EMULATE_FAIL;
3257 }
3258
3259 static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
3260                                         ulong spr_val)
3261 {
3262         return EMULATE_FAIL;
3263 }
3264
3265 static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
3266                                         ulong *spr_val)
3267 {
3268         return EMULATE_FAIL;
3269 }
3270
3271 static int kvmppc_core_check_processor_compat_hv(void)
3272 {
3273         if (!cpu_has_feature(CPU_FTR_HVMODE) ||
3274             !cpu_has_feature(CPU_FTR_ARCH_206))
3275                 return -EIO;
3276         /*
3277          * Disable KVM for Power9, untill the required bits merged.
3278          */
3279         if (cpu_has_feature(CPU_FTR_ARCH_300))
3280                 return -EIO;
3281
3282         return 0;
3283 }
3284
3285 static long kvm_arch_vm_ioctl_hv(struct file *filp,
3286                                  unsigned int ioctl, unsigned long arg)
3287 {
3288         struct kvm *kvm __maybe_unused = filp->private_data;
3289         void __user *argp = (void __user *)arg;
3290         long r;
3291
3292         switch (ioctl) {
3293
3294         case KVM_PPC_ALLOCATE_HTAB: {
3295                 u32 htab_order;
3296
3297                 r = -EFAULT;
3298                 if (get_user(htab_order, (u32 __user *)argp))
3299                         break;
3300                 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
3301                 if (r)
3302                         break;
3303                 r = -EFAULT;
3304                 if (put_user(htab_order, (u32 __user *)argp))
3305                         break;
3306                 r = 0;
3307                 break;
3308         }
3309
3310         case KVM_PPC_GET_HTAB_FD: {
3311                 struct kvm_get_htab_fd ghf;
3312
3313                 r = -EFAULT;
3314                 if (copy_from_user(&ghf, argp, sizeof(ghf)))
3315                         break;
3316                 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
3317                 break;
3318         }
3319
3320         default:
3321                 r = -ENOTTY;
3322         }
3323
3324         return r;
3325 }
3326
3327 /*
3328  * List of hcall numbers to enable by default.
3329  * For compatibility with old userspace, we enable by default
3330  * all hcalls that were implemented before the hcall-enabling
3331  * facility was added.  Note this list should not include H_RTAS.
3332  */
3333 static unsigned int default_hcall_list[] = {
3334         H_REMOVE,
3335         H_ENTER,
3336         H_READ,
3337         H_PROTECT,
3338         H_BULK_REMOVE,
3339         H_GET_TCE,
3340         H_PUT_TCE,
3341         H_SET_DABR,
3342         H_SET_XDABR,
3343         H_CEDE,
3344         H_PROD,
3345         H_CONFER,
3346         H_REGISTER_VPA,
3347 #ifdef CONFIG_KVM_XICS
3348         H_EOI,
3349         H_CPPR,
3350         H_IPI,
3351         H_IPOLL,
3352         H_XIRR,
3353         H_XIRR_X,
3354 #endif
3355         0
3356 };
3357
3358 static void init_default_hcalls(void)
3359 {
3360         int i;
3361         unsigned int hcall;
3362
3363         for (i = 0; default_hcall_list[i]; ++i) {
3364                 hcall = default_hcall_list[i];
3365                 WARN_ON(!kvmppc_hcall_impl_hv(hcall));
3366                 __set_bit(hcall / 4, default_enabled_hcalls);
3367         }
3368 }
3369
3370 static struct kvmppc_ops kvm_ops_hv = {
3371         .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
3372         .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
3373         .get_one_reg = kvmppc_get_one_reg_hv,
3374         .set_one_reg = kvmppc_set_one_reg_hv,
3375         .vcpu_load   = kvmppc_core_vcpu_load_hv,
3376         .vcpu_put    = kvmppc_core_vcpu_put_hv,
3377         .set_msr     = kvmppc_set_msr_hv,
3378         .vcpu_run    = kvmppc_vcpu_run_hv,
3379         .vcpu_create = kvmppc_core_vcpu_create_hv,
3380         .vcpu_free   = kvmppc_core_vcpu_free_hv,
3381         .check_requests = kvmppc_core_check_requests_hv,
3382         .get_dirty_log  = kvm_vm_ioctl_get_dirty_log_hv,
3383         .flush_memslot  = kvmppc_core_flush_memslot_hv,
3384         .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
3385         .commit_memory_region  = kvmppc_core_commit_memory_region_hv,
3386         .unmap_hva = kvm_unmap_hva_hv,
3387         .unmap_hva_range = kvm_unmap_hva_range_hv,
3388         .age_hva  = kvm_age_hva_hv,
3389         .test_age_hva = kvm_test_age_hva_hv,
3390         .set_spte_hva = kvm_set_spte_hva_hv,
3391         .mmu_destroy  = kvmppc_mmu_destroy_hv,
3392         .free_memslot = kvmppc_core_free_memslot_hv,
3393         .create_memslot = kvmppc_core_create_memslot_hv,
3394         .init_vm =  kvmppc_core_init_vm_hv,
3395         .destroy_vm = kvmppc_core_destroy_vm_hv,
3396         .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
3397         .emulate_op = kvmppc_core_emulate_op_hv,
3398         .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
3399         .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
3400         .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
3401         .arch_vm_ioctl  = kvm_arch_vm_ioctl_hv,
3402         .hcall_implemented = kvmppc_hcall_impl_hv,
3403 };
3404
3405 static int kvm_init_subcore_bitmap(void)
3406 {
3407         int i, j;
3408         int nr_cores = cpu_nr_cores();
3409         struct sibling_subcore_state *sibling_subcore_state;
3410
3411         for (i = 0; i < nr_cores; i++) {
3412                 int first_cpu = i * threads_per_core;
3413                 int node = cpu_to_node(first_cpu);
3414
3415                 /* Ignore if it is already allocated. */
3416                 if (paca[first_cpu].sibling_subcore_state)
3417                         continue;
3418
3419                 sibling_subcore_state =
3420                         kmalloc_node(sizeof(struct sibling_subcore_state),
3421                                                         GFP_KERNEL, node);
3422                 if (!sibling_subcore_state)
3423                         return -ENOMEM;
3424
3425                 memset(sibling_subcore_state, 0,
3426                                 sizeof(struct sibling_subcore_state));
3427
3428                 for (j = 0; j < threads_per_core; j++) {
3429                         int cpu = first_cpu + j;
3430
3431                         paca[cpu].sibling_subcore_state = sibling_subcore_state;
3432                 }
3433         }
3434         return 0;
3435 }
3436
3437 static int kvmppc_book3s_init_hv(void)
3438 {
3439         int r;
3440         /*
3441          * FIXME!! Do we need to check on all cpus ?
3442          */
3443         r = kvmppc_core_check_processor_compat_hv();
3444         if (r < 0)
3445                 return -ENODEV;
3446
3447         r = kvm_init_subcore_bitmap();
3448         if (r)
3449                 return r;
3450
3451         kvm_ops_hv.owner = THIS_MODULE;
3452         kvmppc_hv_ops = &kvm_ops_hv;
3453
3454         init_default_hcalls();
3455
3456         init_vcore_lists();
3457
3458         r = kvmppc_mmu_hv_init();
3459         return r;
3460 }
3461
3462 static void kvmppc_book3s_exit_hv(void)
3463 {
3464         kvmppc_free_host_rm_ops();
3465         kvmppc_hv_ops = NULL;
3466 }
3467
3468 module_init(kvmppc_book3s_init_hv);
3469 module_exit(kvmppc_book3s_exit_hv);
3470 MODULE_LICENSE("GPL");
3471 MODULE_ALIAS_MISCDEV(KVM_MINOR);
3472 MODULE_ALIAS("devname:kvm");