Merge tag 'iwlwifi-next-for-kalle-2014-12-30' of https://git.kernel.org/pub/scm/linux...
[cascardo/linux.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  *               Jason J. Herne <jjherne@us.ibm.com>
15  */
16
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/timer.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/lowcore.h>
29 #include <asm/pgtable.h>
30 #include <asm/nmi.h>
31 #include <asm/switch_to.h>
32 #include <asm/facility.h>
33 #include <asm/sclp.h>
34 #include "kvm-s390.h"
35 #include "gaccess.h"
36
37 #define CREATE_TRACE_POINTS
38 #include "trace.h"
39 #include "trace-s390.h"
40
41 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43 struct kvm_stats_debugfs_item debugfs_entries[] = {
44         { "userspace_handled", VCPU_STAT(exit_userspace) },
45         { "exit_null", VCPU_STAT(exit_null) },
46         { "exit_validity", VCPU_STAT(exit_validity) },
47         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48         { "exit_external_request", VCPU_STAT(exit_external_request) },
49         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50         { "exit_instruction", VCPU_STAT(exit_instruction) },
51         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
54         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
55         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
56         { "instruction_stctl", VCPU_STAT(instruction_stctl) },
57         { "instruction_stctg", VCPU_STAT(instruction_stctg) },
58         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
59         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
60         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
61         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
62         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
63         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
64         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
65         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
66         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
67         { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
68         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
69         { "instruction_spx", VCPU_STAT(instruction_spx) },
70         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
71         { "instruction_stap", VCPU_STAT(instruction_stap) },
72         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
73         { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
74         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
75         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
76         { "instruction_essa", VCPU_STAT(instruction_essa) },
77         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
78         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
79         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
80         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
81         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
82         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
83         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
84         { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
85         { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
86         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
87         { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
88         { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
89         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
90         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
91         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
92         { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
93         { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
94         { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
95         { "diagnose_10", VCPU_STAT(diagnose_10) },
96         { "diagnose_44", VCPU_STAT(diagnose_44) },
97         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
98         { NULL }
99 };
100
101 unsigned long *vfacilities;
102 static struct gmap_notifier gmap_notifier;
103
104 /* test availability of vfacility */
105 int test_vfacility(unsigned long nr)
106 {
107         return __test_facility(nr, (void *) vfacilities);
108 }
109
110 /* Section: not file related */
111 int kvm_arch_hardware_enable(void)
112 {
113         /* every s390 is virtualization enabled ;-) */
114         return 0;
115 }
116
117 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
118
119 int kvm_arch_hardware_setup(void)
120 {
121         gmap_notifier.notifier_call = kvm_gmap_notifier;
122         gmap_register_ipte_notifier(&gmap_notifier);
123         return 0;
124 }
125
126 void kvm_arch_hardware_unsetup(void)
127 {
128         gmap_unregister_ipte_notifier(&gmap_notifier);
129 }
130
131 int kvm_arch_init(void *opaque)
132 {
133         /* Register floating interrupt controller interface. */
134         return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
135 }
136
137 /* Section: device related */
138 long kvm_arch_dev_ioctl(struct file *filp,
139                         unsigned int ioctl, unsigned long arg)
140 {
141         if (ioctl == KVM_S390_ENABLE_SIE)
142                 return s390_enable_sie();
143         return -EINVAL;
144 }
145
146 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
147 {
148         int r;
149
150         switch (ext) {
151         case KVM_CAP_S390_PSW:
152         case KVM_CAP_S390_GMAP:
153         case KVM_CAP_SYNC_MMU:
154 #ifdef CONFIG_KVM_S390_UCONTROL
155         case KVM_CAP_S390_UCONTROL:
156 #endif
157         case KVM_CAP_ASYNC_PF:
158         case KVM_CAP_SYNC_REGS:
159         case KVM_CAP_ONE_REG:
160         case KVM_CAP_ENABLE_CAP:
161         case KVM_CAP_S390_CSS_SUPPORT:
162         case KVM_CAP_IRQFD:
163         case KVM_CAP_IOEVENTFD:
164         case KVM_CAP_DEVICE_CTRL:
165         case KVM_CAP_ENABLE_CAP_VM:
166         case KVM_CAP_S390_IRQCHIP:
167         case KVM_CAP_VM_ATTRIBUTES:
168         case KVM_CAP_MP_STATE:
169                 r = 1;
170                 break;
171         case KVM_CAP_NR_VCPUS:
172         case KVM_CAP_MAX_VCPUS:
173                 r = KVM_MAX_VCPUS;
174                 break;
175         case KVM_CAP_NR_MEMSLOTS:
176                 r = KVM_USER_MEM_SLOTS;
177                 break;
178         case KVM_CAP_S390_COW:
179                 r = MACHINE_HAS_ESOP;
180                 break;
181         default:
182                 r = 0;
183         }
184         return r;
185 }
186
187 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
188                                         struct kvm_memory_slot *memslot)
189 {
190         gfn_t cur_gfn, last_gfn;
191         unsigned long address;
192         struct gmap *gmap = kvm->arch.gmap;
193
194         down_read(&gmap->mm->mmap_sem);
195         /* Loop over all guest pages */
196         last_gfn = memslot->base_gfn + memslot->npages;
197         for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
198                 address = gfn_to_hva_memslot(memslot, cur_gfn);
199
200                 if (gmap_test_and_clear_dirty(address, gmap))
201                         mark_page_dirty(kvm, cur_gfn);
202         }
203         up_read(&gmap->mm->mmap_sem);
204 }
205
206 /* Section: vm related */
207 /*
208  * Get (and clear) the dirty memory log for a memory slot.
209  */
210 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
211                                struct kvm_dirty_log *log)
212 {
213         int r;
214         unsigned long n;
215         struct kvm_memory_slot *memslot;
216         int is_dirty = 0;
217
218         mutex_lock(&kvm->slots_lock);
219
220         r = -EINVAL;
221         if (log->slot >= KVM_USER_MEM_SLOTS)
222                 goto out;
223
224         memslot = id_to_memslot(kvm->memslots, log->slot);
225         r = -ENOENT;
226         if (!memslot->dirty_bitmap)
227                 goto out;
228
229         kvm_s390_sync_dirty_log(kvm, memslot);
230         r = kvm_get_dirty_log(kvm, log, &is_dirty);
231         if (r)
232                 goto out;
233
234         /* Clear the dirty log */
235         if (is_dirty) {
236                 n = kvm_dirty_bitmap_bytes(memslot);
237                 memset(memslot->dirty_bitmap, 0, n);
238         }
239         r = 0;
240 out:
241         mutex_unlock(&kvm->slots_lock);
242         return r;
243 }
244
245 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
246 {
247         int r;
248
249         if (cap->flags)
250                 return -EINVAL;
251
252         switch (cap->cap) {
253         case KVM_CAP_S390_IRQCHIP:
254                 kvm->arch.use_irqchip = 1;
255                 r = 0;
256                 break;
257         default:
258                 r = -EINVAL;
259                 break;
260         }
261         return r;
262 }
263
264 static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
265 {
266         int ret;
267         unsigned int idx;
268         switch (attr->attr) {
269         case KVM_S390_VM_MEM_ENABLE_CMMA:
270                 ret = -EBUSY;
271                 mutex_lock(&kvm->lock);
272                 if (atomic_read(&kvm->online_vcpus) == 0) {
273                         kvm->arch.use_cmma = 1;
274                         ret = 0;
275                 }
276                 mutex_unlock(&kvm->lock);
277                 break;
278         case KVM_S390_VM_MEM_CLR_CMMA:
279                 mutex_lock(&kvm->lock);
280                 idx = srcu_read_lock(&kvm->srcu);
281                 s390_reset_cmma(kvm->arch.gmap->mm);
282                 srcu_read_unlock(&kvm->srcu, idx);
283                 mutex_unlock(&kvm->lock);
284                 ret = 0;
285                 break;
286         default:
287                 ret = -ENXIO;
288                 break;
289         }
290         return ret;
291 }
292
293 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
294 {
295         int ret;
296
297         switch (attr->group) {
298         case KVM_S390_VM_MEM_CTRL:
299                 ret = kvm_s390_mem_control(kvm, attr);
300                 break;
301         default:
302                 ret = -ENXIO;
303                 break;
304         }
305
306         return ret;
307 }
308
309 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
310 {
311         return -ENXIO;
312 }
313
314 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
315 {
316         int ret;
317
318         switch (attr->group) {
319         case KVM_S390_VM_MEM_CTRL:
320                 switch (attr->attr) {
321                 case KVM_S390_VM_MEM_ENABLE_CMMA:
322                 case KVM_S390_VM_MEM_CLR_CMMA:
323                         ret = 0;
324                         break;
325                 default:
326                         ret = -ENXIO;
327                         break;
328                 }
329                 break;
330         default:
331                 ret = -ENXIO;
332                 break;
333         }
334
335         return ret;
336 }
337
338 long kvm_arch_vm_ioctl(struct file *filp,
339                        unsigned int ioctl, unsigned long arg)
340 {
341         struct kvm *kvm = filp->private_data;
342         void __user *argp = (void __user *)arg;
343         struct kvm_device_attr attr;
344         int r;
345
346         switch (ioctl) {
347         case KVM_S390_INTERRUPT: {
348                 struct kvm_s390_interrupt s390int;
349
350                 r = -EFAULT;
351                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
352                         break;
353                 r = kvm_s390_inject_vm(kvm, &s390int);
354                 break;
355         }
356         case KVM_ENABLE_CAP: {
357                 struct kvm_enable_cap cap;
358                 r = -EFAULT;
359                 if (copy_from_user(&cap, argp, sizeof(cap)))
360                         break;
361                 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
362                 break;
363         }
364         case KVM_CREATE_IRQCHIP: {
365                 struct kvm_irq_routing_entry routing;
366
367                 r = -EINVAL;
368                 if (kvm->arch.use_irqchip) {
369                         /* Set up dummy routing. */
370                         memset(&routing, 0, sizeof(routing));
371                         kvm_set_irq_routing(kvm, &routing, 0, 0);
372                         r = 0;
373                 }
374                 break;
375         }
376         case KVM_SET_DEVICE_ATTR: {
377                 r = -EFAULT;
378                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
379                         break;
380                 r = kvm_s390_vm_set_attr(kvm, &attr);
381                 break;
382         }
383         case KVM_GET_DEVICE_ATTR: {
384                 r = -EFAULT;
385                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
386                         break;
387                 r = kvm_s390_vm_get_attr(kvm, &attr);
388                 break;
389         }
390         case KVM_HAS_DEVICE_ATTR: {
391                 r = -EFAULT;
392                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
393                         break;
394                 r = kvm_s390_vm_has_attr(kvm, &attr);
395                 break;
396         }
397         default:
398                 r = -ENOTTY;
399         }
400
401         return r;
402 }
403
404 static int kvm_s390_crypto_init(struct kvm *kvm)
405 {
406         if (!test_vfacility(76))
407                 return 0;
408
409         kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
410                                          GFP_KERNEL | GFP_DMA);
411         if (!kvm->arch.crypto.crycb)
412                 return -ENOMEM;
413
414         kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
415                                   CRYCB_FORMAT1;
416
417         return 0;
418 }
419
420 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
421 {
422         int rc;
423         char debug_name[16];
424         static unsigned long sca_offset;
425
426         rc = -EINVAL;
427 #ifdef CONFIG_KVM_S390_UCONTROL
428         if (type & ~KVM_VM_S390_UCONTROL)
429                 goto out_err;
430         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
431                 goto out_err;
432 #else
433         if (type)
434                 goto out_err;
435 #endif
436
437         rc = s390_enable_sie();
438         if (rc)
439                 goto out_err;
440
441         rc = -ENOMEM;
442
443         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
444         if (!kvm->arch.sca)
445                 goto out_err;
446         spin_lock(&kvm_lock);
447         sca_offset = (sca_offset + 16) & 0x7f0;
448         kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
449         spin_unlock(&kvm_lock);
450
451         sprintf(debug_name, "kvm-%u", current->pid);
452
453         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
454         if (!kvm->arch.dbf)
455                 goto out_nodbf;
456
457         if (kvm_s390_crypto_init(kvm) < 0)
458                 goto out_crypto;
459
460         spin_lock_init(&kvm->arch.float_int.lock);
461         INIT_LIST_HEAD(&kvm->arch.float_int.list);
462         init_waitqueue_head(&kvm->arch.ipte_wq);
463         mutex_init(&kvm->arch.ipte_mutex);
464
465         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
466         VM_EVENT(kvm, 3, "%s", "vm created");
467
468         if (type & KVM_VM_S390_UCONTROL) {
469                 kvm->arch.gmap = NULL;
470         } else {
471                 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
472                 if (!kvm->arch.gmap)
473                         goto out_nogmap;
474                 kvm->arch.gmap->private = kvm;
475                 kvm->arch.gmap->pfault_enabled = 0;
476         }
477
478         kvm->arch.css_support = 0;
479         kvm->arch.use_irqchip = 0;
480
481         spin_lock_init(&kvm->arch.start_stop_lock);
482
483         return 0;
484 out_nogmap:
485         kfree(kvm->arch.crypto.crycb);
486 out_crypto:
487         debug_unregister(kvm->arch.dbf);
488 out_nodbf:
489         free_page((unsigned long)(kvm->arch.sca));
490 out_err:
491         return rc;
492 }
493
494 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
495 {
496         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
497         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
498         kvm_s390_clear_local_irqs(vcpu);
499         kvm_clear_async_pf_completion_queue(vcpu);
500         if (!kvm_is_ucontrol(vcpu->kvm)) {
501                 clear_bit(63 - vcpu->vcpu_id,
502                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
503                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
504                     (__u64) vcpu->arch.sie_block)
505                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
506         }
507         smp_mb();
508
509         if (kvm_is_ucontrol(vcpu->kvm))
510                 gmap_free(vcpu->arch.gmap);
511
512         if (kvm_s390_cmma_enabled(vcpu->kvm))
513                 kvm_s390_vcpu_unsetup_cmma(vcpu);
514         free_page((unsigned long)(vcpu->arch.sie_block));
515
516         kvm_vcpu_uninit(vcpu);
517         kmem_cache_free(kvm_vcpu_cache, vcpu);
518 }
519
520 static void kvm_free_vcpus(struct kvm *kvm)
521 {
522         unsigned int i;
523         struct kvm_vcpu *vcpu;
524
525         kvm_for_each_vcpu(i, vcpu, kvm)
526                 kvm_arch_vcpu_destroy(vcpu);
527
528         mutex_lock(&kvm->lock);
529         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
530                 kvm->vcpus[i] = NULL;
531
532         atomic_set(&kvm->online_vcpus, 0);
533         mutex_unlock(&kvm->lock);
534 }
535
536 void kvm_arch_destroy_vm(struct kvm *kvm)
537 {
538         kvm_free_vcpus(kvm);
539         free_page((unsigned long)(kvm->arch.sca));
540         debug_unregister(kvm->arch.dbf);
541         kfree(kvm->arch.crypto.crycb);
542         if (!kvm_is_ucontrol(kvm))
543                 gmap_free(kvm->arch.gmap);
544         kvm_s390_destroy_adapters(kvm);
545         kvm_s390_clear_float_irqs(kvm);
546 }
547
548 /* Section: vcpu related */
549 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
550 {
551         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
552         kvm_clear_async_pf_completion_queue(vcpu);
553         if (kvm_is_ucontrol(vcpu->kvm)) {
554                 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
555                 if (!vcpu->arch.gmap)
556                         return -ENOMEM;
557                 vcpu->arch.gmap->private = vcpu->kvm;
558                 return 0;
559         }
560
561         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
562         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
563                                     KVM_SYNC_GPRS |
564                                     KVM_SYNC_ACRS |
565                                     KVM_SYNC_CRS |
566                                     KVM_SYNC_ARCH0 |
567                                     KVM_SYNC_PFAULT;
568         return 0;
569 }
570
571 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
572 {
573         save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
574         save_fp_regs(vcpu->arch.host_fpregs.fprs);
575         save_access_regs(vcpu->arch.host_acrs);
576         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
577         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
578         restore_access_regs(vcpu->run->s.regs.acrs);
579         gmap_enable(vcpu->arch.gmap);
580         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
581 }
582
583 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
584 {
585         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
586         gmap_disable(vcpu->arch.gmap);
587         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
588         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
589         save_access_regs(vcpu->run->s.regs.acrs);
590         restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
591         restore_fp_regs(vcpu->arch.host_fpregs.fprs);
592         restore_access_regs(vcpu->arch.host_acrs);
593 }
594
595 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
596 {
597         /* this equals initial cpu reset in pop, but we don't switch to ESA */
598         vcpu->arch.sie_block->gpsw.mask = 0UL;
599         vcpu->arch.sie_block->gpsw.addr = 0UL;
600         kvm_s390_set_prefix(vcpu, 0);
601         vcpu->arch.sie_block->cputm     = 0UL;
602         vcpu->arch.sie_block->ckc       = 0UL;
603         vcpu->arch.sie_block->todpr     = 0;
604         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
605         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
606         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
607         vcpu->arch.guest_fpregs.fpc = 0;
608         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
609         vcpu->arch.sie_block->gbea = 1;
610         vcpu->arch.sie_block->pp = 0;
611         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
612         kvm_clear_async_pf_completion_queue(vcpu);
613         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
614                 kvm_s390_vcpu_stop(vcpu);
615         kvm_s390_clear_local_irqs(vcpu);
616 }
617
618 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
619 {
620         return 0;
621 }
622
623 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
624 {
625         if (!test_vfacility(76))
626                 return;
627
628         vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
629 }
630
631 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
632 {
633         free_page(vcpu->arch.sie_block->cbrlo);
634         vcpu->arch.sie_block->cbrlo = 0;
635 }
636
637 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
638 {
639         vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
640         if (!vcpu->arch.sie_block->cbrlo)
641                 return -ENOMEM;
642
643         vcpu->arch.sie_block->ecb2 |= 0x80;
644         vcpu->arch.sie_block->ecb2 &= ~0x08;
645         return 0;
646 }
647
648 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
649 {
650         int rc = 0;
651
652         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
653                                                     CPUSTAT_SM |
654                                                     CPUSTAT_STOPPED |
655                                                     CPUSTAT_GED);
656         vcpu->arch.sie_block->ecb   = 6;
657         if (test_vfacility(50) && test_vfacility(73))
658                 vcpu->arch.sie_block->ecb |= 0x10;
659
660         vcpu->arch.sie_block->ecb2  = 8;
661         vcpu->arch.sie_block->eca   = 0xD1002000U;
662         if (sclp_has_siif())
663                 vcpu->arch.sie_block->eca |= 1;
664         vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
665         vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
666                                       ICTL_TPROT;
667
668         if (kvm_s390_cmma_enabled(vcpu->kvm)) {
669                 rc = kvm_s390_vcpu_setup_cmma(vcpu);
670                 if (rc)
671                         return rc;
672         }
673         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
674         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
675         get_cpu_id(&vcpu->arch.cpu_id);
676         vcpu->arch.cpu_id.version = 0xff;
677
678         kvm_s390_vcpu_crypto_setup(vcpu);
679
680         return rc;
681 }
682
683 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
684                                       unsigned int id)
685 {
686         struct kvm_vcpu *vcpu;
687         struct sie_page *sie_page;
688         int rc = -EINVAL;
689
690         if (id >= KVM_MAX_VCPUS)
691                 goto out;
692
693         rc = -ENOMEM;
694
695         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
696         if (!vcpu)
697                 goto out;
698
699         sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
700         if (!sie_page)
701                 goto out_free_cpu;
702
703         vcpu->arch.sie_block = &sie_page->sie_block;
704         vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
705
706         vcpu->arch.sie_block->icpua = id;
707         if (!kvm_is_ucontrol(kvm)) {
708                 if (!kvm->arch.sca) {
709                         WARN_ON_ONCE(1);
710                         goto out_free_cpu;
711                 }
712                 if (!kvm->arch.sca->cpu[id].sda)
713                         kvm->arch.sca->cpu[id].sda =
714                                 (__u64) vcpu->arch.sie_block;
715                 vcpu->arch.sie_block->scaoh =
716                         (__u32)(((__u64)kvm->arch.sca) >> 32);
717                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
718                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
719         }
720
721         spin_lock_init(&vcpu->arch.local_int.lock);
722         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
723         vcpu->arch.local_int.wq = &vcpu->wq;
724         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
725
726         rc = kvm_vcpu_init(vcpu, kvm, id);
727         if (rc)
728                 goto out_free_sie_block;
729         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
730                  vcpu->arch.sie_block);
731         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
732
733         return vcpu;
734 out_free_sie_block:
735         free_page((unsigned long)(vcpu->arch.sie_block));
736 out_free_cpu:
737         kmem_cache_free(kvm_vcpu_cache, vcpu);
738 out:
739         return ERR_PTR(rc);
740 }
741
742 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
743 {
744         return kvm_cpu_has_interrupt(vcpu);
745 }
746
747 void s390_vcpu_block(struct kvm_vcpu *vcpu)
748 {
749         atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
750 }
751
752 void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
753 {
754         atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
755 }
756
757 /*
758  * Kick a guest cpu out of SIE and wait until SIE is not running.
759  * If the CPU is not running (e.g. waiting as idle) the function will
760  * return immediately. */
761 void exit_sie(struct kvm_vcpu *vcpu)
762 {
763         atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
764         while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
765                 cpu_relax();
766 }
767
768 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
769 void exit_sie_sync(struct kvm_vcpu *vcpu)
770 {
771         s390_vcpu_block(vcpu);
772         exit_sie(vcpu);
773 }
774
775 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
776 {
777         int i;
778         struct kvm *kvm = gmap->private;
779         struct kvm_vcpu *vcpu;
780
781         kvm_for_each_vcpu(i, vcpu, kvm) {
782                 /* match against both prefix pages */
783                 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
784                         VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
785                         kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
786                         exit_sie_sync(vcpu);
787                 }
788         }
789 }
790
791 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
792 {
793         /* kvm common code refers to this, but never calls it */
794         BUG();
795         return 0;
796 }
797
798 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
799                                            struct kvm_one_reg *reg)
800 {
801         int r = -EINVAL;
802
803         switch (reg->id) {
804         case KVM_REG_S390_TODPR:
805                 r = put_user(vcpu->arch.sie_block->todpr,
806                              (u32 __user *)reg->addr);
807                 break;
808         case KVM_REG_S390_EPOCHDIFF:
809                 r = put_user(vcpu->arch.sie_block->epoch,
810                              (u64 __user *)reg->addr);
811                 break;
812         case KVM_REG_S390_CPU_TIMER:
813                 r = put_user(vcpu->arch.sie_block->cputm,
814                              (u64 __user *)reg->addr);
815                 break;
816         case KVM_REG_S390_CLOCK_COMP:
817                 r = put_user(vcpu->arch.sie_block->ckc,
818                              (u64 __user *)reg->addr);
819                 break;
820         case KVM_REG_S390_PFTOKEN:
821                 r = put_user(vcpu->arch.pfault_token,
822                              (u64 __user *)reg->addr);
823                 break;
824         case KVM_REG_S390_PFCOMPARE:
825                 r = put_user(vcpu->arch.pfault_compare,
826                              (u64 __user *)reg->addr);
827                 break;
828         case KVM_REG_S390_PFSELECT:
829                 r = put_user(vcpu->arch.pfault_select,
830                              (u64 __user *)reg->addr);
831                 break;
832         case KVM_REG_S390_PP:
833                 r = put_user(vcpu->arch.sie_block->pp,
834                              (u64 __user *)reg->addr);
835                 break;
836         case KVM_REG_S390_GBEA:
837                 r = put_user(vcpu->arch.sie_block->gbea,
838                              (u64 __user *)reg->addr);
839                 break;
840         default:
841                 break;
842         }
843
844         return r;
845 }
846
847 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
848                                            struct kvm_one_reg *reg)
849 {
850         int r = -EINVAL;
851
852         switch (reg->id) {
853         case KVM_REG_S390_TODPR:
854                 r = get_user(vcpu->arch.sie_block->todpr,
855                              (u32 __user *)reg->addr);
856                 break;
857         case KVM_REG_S390_EPOCHDIFF:
858                 r = get_user(vcpu->arch.sie_block->epoch,
859                              (u64 __user *)reg->addr);
860                 break;
861         case KVM_REG_S390_CPU_TIMER:
862                 r = get_user(vcpu->arch.sie_block->cputm,
863                              (u64 __user *)reg->addr);
864                 break;
865         case KVM_REG_S390_CLOCK_COMP:
866                 r = get_user(vcpu->arch.sie_block->ckc,
867                              (u64 __user *)reg->addr);
868                 break;
869         case KVM_REG_S390_PFTOKEN:
870                 r = get_user(vcpu->arch.pfault_token,
871                              (u64 __user *)reg->addr);
872                 break;
873         case KVM_REG_S390_PFCOMPARE:
874                 r = get_user(vcpu->arch.pfault_compare,
875                              (u64 __user *)reg->addr);
876                 break;
877         case KVM_REG_S390_PFSELECT:
878                 r = get_user(vcpu->arch.pfault_select,
879                              (u64 __user *)reg->addr);
880                 break;
881         case KVM_REG_S390_PP:
882                 r = get_user(vcpu->arch.sie_block->pp,
883                              (u64 __user *)reg->addr);
884                 break;
885         case KVM_REG_S390_GBEA:
886                 r = get_user(vcpu->arch.sie_block->gbea,
887                              (u64 __user *)reg->addr);
888                 break;
889         default:
890                 break;
891         }
892
893         return r;
894 }
895
896 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
897 {
898         kvm_s390_vcpu_initial_reset(vcpu);
899         return 0;
900 }
901
902 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
903 {
904         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
905         return 0;
906 }
907
908 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
909 {
910         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
911         return 0;
912 }
913
914 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
915                                   struct kvm_sregs *sregs)
916 {
917         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
918         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
919         restore_access_regs(vcpu->run->s.regs.acrs);
920         return 0;
921 }
922
923 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
924                                   struct kvm_sregs *sregs)
925 {
926         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
927         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
928         return 0;
929 }
930
931 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
932 {
933         if (test_fp_ctl(fpu->fpc))
934                 return -EINVAL;
935         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
936         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
937         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
938         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
939         return 0;
940 }
941
942 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
943 {
944         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
945         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
946         return 0;
947 }
948
949 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
950 {
951         int rc = 0;
952
953         if (!is_vcpu_stopped(vcpu))
954                 rc = -EBUSY;
955         else {
956                 vcpu->run->psw_mask = psw.mask;
957                 vcpu->run->psw_addr = psw.addr;
958         }
959         return rc;
960 }
961
962 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
963                                   struct kvm_translation *tr)
964 {
965         return -EINVAL; /* not implemented yet */
966 }
967
968 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
969                               KVM_GUESTDBG_USE_HW_BP | \
970                               KVM_GUESTDBG_ENABLE)
971
972 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
973                                         struct kvm_guest_debug *dbg)
974 {
975         int rc = 0;
976
977         vcpu->guest_debug = 0;
978         kvm_s390_clear_bp_data(vcpu);
979
980         if (dbg->control & ~VALID_GUESTDBG_FLAGS)
981                 return -EINVAL;
982
983         if (dbg->control & KVM_GUESTDBG_ENABLE) {
984                 vcpu->guest_debug = dbg->control;
985                 /* enforce guest PER */
986                 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
987
988                 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
989                         rc = kvm_s390_import_bp_data(vcpu, dbg);
990         } else {
991                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
992                 vcpu->arch.guestdbg.last_bp = 0;
993         }
994
995         if (rc) {
996                 vcpu->guest_debug = 0;
997                 kvm_s390_clear_bp_data(vcpu);
998                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
999         }
1000
1001         return rc;
1002 }
1003
1004 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1005                                     struct kvm_mp_state *mp_state)
1006 {
1007         /* CHECK_STOP and LOAD are not supported yet */
1008         return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1009                                        KVM_MP_STATE_OPERATING;
1010 }
1011
1012 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1013                                     struct kvm_mp_state *mp_state)
1014 {
1015         int rc = 0;
1016
1017         /* user space knows about this interface - let it control the state */
1018         vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1019
1020         switch (mp_state->mp_state) {
1021         case KVM_MP_STATE_STOPPED:
1022                 kvm_s390_vcpu_stop(vcpu);
1023                 break;
1024         case KVM_MP_STATE_OPERATING:
1025                 kvm_s390_vcpu_start(vcpu);
1026                 break;
1027         case KVM_MP_STATE_LOAD:
1028         case KVM_MP_STATE_CHECK_STOP:
1029                 /* fall through - CHECK_STOP and LOAD are not supported yet */
1030         default:
1031                 rc = -ENXIO;
1032         }
1033
1034         return rc;
1035 }
1036
1037 bool kvm_s390_cmma_enabled(struct kvm *kvm)
1038 {
1039         if (!MACHINE_IS_LPAR)
1040                 return false;
1041         /* only enable for z10 and later */
1042         if (!MACHINE_HAS_EDAT1)
1043                 return false;
1044         if (!kvm->arch.use_cmma)
1045                 return false;
1046         return true;
1047 }
1048
1049 static bool ibs_enabled(struct kvm_vcpu *vcpu)
1050 {
1051         return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1052 }
1053
1054 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1055 {
1056 retry:
1057         s390_vcpu_unblock(vcpu);
1058         /*
1059          * We use MMU_RELOAD just to re-arm the ipte notifier for the
1060          * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1061          * This ensures that the ipte instruction for this request has
1062          * already finished. We might race against a second unmapper that
1063          * wants to set the blocking bit. Lets just retry the request loop.
1064          */
1065         if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1066                 int rc;
1067                 rc = gmap_ipte_notify(vcpu->arch.gmap,
1068                                       kvm_s390_get_prefix(vcpu),
1069                                       PAGE_SIZE * 2);
1070                 if (rc)
1071                         return rc;
1072                 goto retry;
1073         }
1074
1075         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1076                 vcpu->arch.sie_block->ihcpu = 0xffff;
1077                 goto retry;
1078         }
1079
1080         if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1081                 if (!ibs_enabled(vcpu)) {
1082                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1083                         atomic_set_mask(CPUSTAT_IBS,
1084                                         &vcpu->arch.sie_block->cpuflags);
1085                 }
1086                 goto retry;
1087         }
1088
1089         if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1090                 if (ibs_enabled(vcpu)) {
1091                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1092                         atomic_clear_mask(CPUSTAT_IBS,
1093                                           &vcpu->arch.sie_block->cpuflags);
1094                 }
1095                 goto retry;
1096         }
1097
1098         /* nothing to do, just clear the request */
1099         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1100
1101         return 0;
1102 }
1103
1104 /**
1105  * kvm_arch_fault_in_page - fault-in guest page if necessary
1106  * @vcpu: The corresponding virtual cpu
1107  * @gpa: Guest physical address
1108  * @writable: Whether the page should be writable or not
1109  *
1110  * Make sure that a guest page has been faulted-in on the host.
1111  *
1112  * Return: Zero on success, negative error code otherwise.
1113  */
1114 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
1115 {
1116         return gmap_fault(vcpu->arch.gmap, gpa,
1117                           writable ? FAULT_FLAG_WRITE : 0);
1118 }
1119
1120 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1121                                       unsigned long token)
1122 {
1123         struct kvm_s390_interrupt inti;
1124         struct kvm_s390_irq irq;
1125
1126         if (start_token) {
1127                 irq.u.ext.ext_params2 = token;
1128                 irq.type = KVM_S390_INT_PFAULT_INIT;
1129                 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
1130         } else {
1131                 inti.type = KVM_S390_INT_PFAULT_DONE;
1132                 inti.parm64 = token;
1133                 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1134         }
1135 }
1136
1137 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1138                                      struct kvm_async_pf *work)
1139 {
1140         trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1141         __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1142 }
1143
1144 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1145                                  struct kvm_async_pf *work)
1146 {
1147         trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1148         __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1149 }
1150
1151 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1152                                struct kvm_async_pf *work)
1153 {
1154         /* s390 will always inject the page directly */
1155 }
1156
1157 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1158 {
1159         /*
1160          * s390 will always inject the page directly,
1161          * but we still want check_async_completion to cleanup
1162          */
1163         return true;
1164 }
1165
1166 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1167 {
1168         hva_t hva;
1169         struct kvm_arch_async_pf arch;
1170         int rc;
1171
1172         if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1173                 return 0;
1174         if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1175             vcpu->arch.pfault_compare)
1176                 return 0;
1177         if (psw_extint_disabled(vcpu))
1178                 return 0;
1179         if (kvm_cpu_has_interrupt(vcpu))
1180                 return 0;
1181         if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1182                 return 0;
1183         if (!vcpu->arch.gmap->pfault_enabled)
1184                 return 0;
1185
1186         hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1187         hva += current->thread.gmap_addr & ~PAGE_MASK;
1188         if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
1189                 return 0;
1190
1191         rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1192         return rc;
1193 }
1194
1195 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1196 {
1197         int rc, cpuflags;
1198
1199         /*
1200          * On s390 notifications for arriving pages will be delivered directly
1201          * to the guest but the house keeping for completed pfaults is
1202          * handled outside the worker.
1203          */
1204         kvm_check_async_pf_completion(vcpu);
1205
1206         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1207
1208         if (need_resched())
1209                 schedule();
1210
1211         if (test_cpu_flag(CIF_MCCK_PENDING))
1212                 s390_handle_mcck();
1213
1214         if (!kvm_is_ucontrol(vcpu->kvm)) {
1215                 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1216                 if (rc)
1217                         return rc;
1218         }
1219
1220         rc = kvm_s390_handle_requests(vcpu);
1221         if (rc)
1222                 return rc;
1223
1224         if (guestdbg_enabled(vcpu)) {
1225                 kvm_s390_backup_guest_per_regs(vcpu);
1226                 kvm_s390_patch_guest_per_regs(vcpu);
1227         }
1228
1229         vcpu->arch.sie_block->icptcode = 0;
1230         cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1231         VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1232         trace_kvm_s390_sie_enter(vcpu, cpuflags);
1233
1234         return 0;
1235 }
1236
1237 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1238 {
1239         int rc = -1;
1240
1241         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1242                    vcpu->arch.sie_block->icptcode);
1243         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1244
1245         if (guestdbg_enabled(vcpu))
1246                 kvm_s390_restore_guest_per_regs(vcpu);
1247
1248         if (exit_reason >= 0) {
1249                 rc = 0;
1250         } else if (kvm_is_ucontrol(vcpu->kvm)) {
1251                 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1252                 vcpu->run->s390_ucontrol.trans_exc_code =
1253                                                 current->thread.gmap_addr;
1254                 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1255                 rc = -EREMOTE;
1256
1257         } else if (current->thread.gmap_pfault) {
1258                 trace_kvm_s390_major_guest_pfault(vcpu);
1259                 current->thread.gmap_pfault = 0;
1260                 if (kvm_arch_setup_async_pf(vcpu)) {
1261                         rc = 0;
1262                 } else {
1263                         gpa_t gpa = current->thread.gmap_addr;
1264                         rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1265                 }
1266         }
1267
1268         if (rc == -1) {
1269                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1270                 trace_kvm_s390_sie_fault(vcpu);
1271                 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1272         }
1273
1274         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
1275
1276         if (rc == 0) {
1277                 if (kvm_is_ucontrol(vcpu->kvm))
1278                         /* Don't exit for host interrupts. */
1279                         rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1280                 else
1281                         rc = kvm_handle_sie_intercept(vcpu);
1282         }
1283
1284         return rc;
1285 }
1286
1287 static int __vcpu_run(struct kvm_vcpu *vcpu)
1288 {
1289         int rc, exit_reason;
1290
1291         /*
1292          * We try to hold kvm->srcu during most of vcpu_run (except when run-
1293          * ning the guest), so that memslots (and other stuff) are protected
1294          */
1295         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1296
1297         do {
1298                 rc = vcpu_pre_run(vcpu);
1299                 if (rc)
1300                         break;
1301
1302                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1303                 /*
1304                  * As PF_VCPU will be used in fault handler, between
1305                  * guest_enter and guest_exit should be no uaccess.
1306                  */
1307                 preempt_disable();
1308                 kvm_guest_enter();
1309                 preempt_enable();
1310                 exit_reason = sie64a(vcpu->arch.sie_block,
1311                                      vcpu->run->s.regs.gprs);
1312                 kvm_guest_exit();
1313                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1314
1315                 rc = vcpu_post_run(vcpu, exit_reason);
1316         } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
1317
1318         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1319         return rc;
1320 }
1321
1322 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1323 {
1324         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1325         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1326         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1327                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1328         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1329                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1330                 /* some control register changes require a tlb flush */
1331                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1332         }
1333         if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1334                 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1335                 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1336                 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1337                 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1338                 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1339         }
1340         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1341                 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1342                 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1343                 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1344         }
1345         kvm_run->kvm_dirty_regs = 0;
1346 }
1347
1348 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1349 {
1350         kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1351         kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1352         kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1353         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1354         kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1355         kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1356         kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1357         kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1358         kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1359         kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1360         kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1361         kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1362 }
1363
1364 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1365 {
1366         int rc;
1367         sigset_t sigsaved;
1368
1369         if (guestdbg_exit_pending(vcpu)) {
1370                 kvm_s390_prepare_debug_exit(vcpu);
1371                 return 0;
1372         }
1373
1374         if (vcpu->sigset_active)
1375                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1376
1377         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1378                 kvm_s390_vcpu_start(vcpu);
1379         } else if (is_vcpu_stopped(vcpu)) {
1380                 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1381                                    vcpu->vcpu_id);
1382                 return -EINVAL;
1383         }
1384
1385         sync_regs(vcpu, kvm_run);
1386
1387         might_fault();
1388         rc = __vcpu_run(vcpu);
1389
1390         if (signal_pending(current) && !rc) {
1391                 kvm_run->exit_reason = KVM_EXIT_INTR;
1392                 rc = -EINTR;
1393         }
1394
1395         if (guestdbg_exit_pending(vcpu) && !rc)  {
1396                 kvm_s390_prepare_debug_exit(vcpu);
1397                 rc = 0;
1398         }
1399
1400         if (rc == -EOPNOTSUPP) {
1401                 /* intercept cannot be handled in-kernel, prepare kvm-run */
1402                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
1403                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
1404                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
1405                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
1406                 rc = 0;
1407         }
1408
1409         if (rc == -EREMOTE) {
1410                 /* intercept was handled, but userspace support is needed
1411                  * kvm_run has been prepared by the handler */
1412                 rc = 0;
1413         }
1414
1415         store_regs(vcpu, kvm_run);
1416
1417         if (vcpu->sigset_active)
1418                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1419
1420         vcpu->stat.exit_userspace++;
1421         return rc;
1422 }
1423
1424 /*
1425  * store status at address
1426  * we use have two special cases:
1427  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1428  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1429  */
1430 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1431 {
1432         unsigned char archmode = 1;
1433         unsigned int px;
1434         u64 clkcomp;
1435         int rc;
1436
1437         if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1438                 if (write_guest_abs(vcpu, 163, &archmode, 1))
1439                         return -EFAULT;
1440                 gpa = SAVE_AREA_BASE;
1441         } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1442                 if (write_guest_real(vcpu, 163, &archmode, 1))
1443                         return -EFAULT;
1444                 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1445         }
1446         rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1447                              vcpu->arch.guest_fpregs.fprs, 128);
1448         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1449                               vcpu->run->s.regs.gprs, 128);
1450         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1451                               &vcpu->arch.sie_block->gpsw, 16);
1452         px = kvm_s390_get_prefix(vcpu);
1453         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1454                               &px, 4);
1455         rc |= write_guest_abs(vcpu,
1456                               gpa + offsetof(struct save_area, fp_ctrl_reg),
1457                               &vcpu->arch.guest_fpregs.fpc, 4);
1458         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1459                               &vcpu->arch.sie_block->todpr, 4);
1460         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1461                               &vcpu->arch.sie_block->cputm, 8);
1462         clkcomp = vcpu->arch.sie_block->ckc >> 8;
1463         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1464                               &clkcomp, 8);
1465         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1466                               &vcpu->run->s.regs.acrs, 64);
1467         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1468                               &vcpu->arch.sie_block->gcr, 128);
1469         return rc ? -EFAULT : 0;
1470 }
1471
1472 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1473 {
1474         /*
1475          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1476          * copying in vcpu load/put. Lets update our copies before we save
1477          * it into the save area
1478          */
1479         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1480         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1481         save_access_regs(vcpu->run->s.regs.acrs);
1482
1483         return kvm_s390_store_status_unloaded(vcpu, addr);
1484 }
1485
1486 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1487 {
1488         kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1489         kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1490         exit_sie_sync(vcpu);
1491 }
1492
1493 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1494 {
1495         unsigned int i;
1496         struct kvm_vcpu *vcpu;
1497
1498         kvm_for_each_vcpu(i, vcpu, kvm) {
1499                 __disable_ibs_on_vcpu(vcpu);
1500         }
1501 }
1502
1503 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1504 {
1505         kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1506         kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1507         exit_sie_sync(vcpu);
1508 }
1509
1510 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1511 {
1512         int i, online_vcpus, started_vcpus = 0;
1513
1514         if (!is_vcpu_stopped(vcpu))
1515                 return;
1516
1517         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
1518         /* Only one cpu at a time may enter/leave the STOPPED state. */
1519         spin_lock(&vcpu->kvm->arch.start_stop_lock);
1520         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1521
1522         for (i = 0; i < online_vcpus; i++) {
1523                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1524                         started_vcpus++;
1525         }
1526
1527         if (started_vcpus == 0) {
1528                 /* we're the only active VCPU -> speed it up */
1529                 __enable_ibs_on_vcpu(vcpu);
1530         } else if (started_vcpus == 1) {
1531                 /*
1532                  * As we are starting a second VCPU, we have to disable
1533                  * the IBS facility on all VCPUs to remove potentially
1534                  * oustanding ENABLE requests.
1535                  */
1536                 __disable_ibs_on_all_vcpus(vcpu->kvm);
1537         }
1538
1539         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1540         /*
1541          * Another VCPU might have used IBS while we were offline.
1542          * Let's play safe and flush the VCPU at startup.
1543          */
1544         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1545         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
1546         return;
1547 }
1548
1549 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1550 {
1551         int i, online_vcpus, started_vcpus = 0;
1552         struct kvm_vcpu *started_vcpu = NULL;
1553
1554         if (is_vcpu_stopped(vcpu))
1555                 return;
1556
1557         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
1558         /* Only one cpu at a time may enter/leave the STOPPED state. */
1559         spin_lock(&vcpu->kvm->arch.start_stop_lock);
1560         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1561
1562         /* Need to lock access to action_bits to avoid a SIGP race condition */
1563         spin_lock(&vcpu->arch.local_int.lock);
1564         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1565
1566         /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1567         vcpu->arch.local_int.action_bits &=
1568                                  ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
1569         spin_unlock(&vcpu->arch.local_int.lock);
1570
1571         __disable_ibs_on_vcpu(vcpu);
1572
1573         for (i = 0; i < online_vcpus; i++) {
1574                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1575                         started_vcpus++;
1576                         started_vcpu = vcpu->kvm->vcpus[i];
1577                 }
1578         }
1579
1580         if (started_vcpus == 1) {
1581                 /*
1582                  * As we only have one VCPU left, we want to enable the
1583                  * IBS facility for that VCPU to speed it up.
1584                  */
1585                 __enable_ibs_on_vcpu(started_vcpu);
1586         }
1587
1588         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
1589         return;
1590 }
1591
1592 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1593                                      struct kvm_enable_cap *cap)
1594 {
1595         int r;
1596
1597         if (cap->flags)
1598                 return -EINVAL;
1599
1600         switch (cap->cap) {
1601         case KVM_CAP_S390_CSS_SUPPORT:
1602                 if (!vcpu->kvm->arch.css_support) {
1603                         vcpu->kvm->arch.css_support = 1;
1604                         trace_kvm_s390_enable_css(vcpu->kvm);
1605                 }
1606                 r = 0;
1607                 break;
1608         default:
1609                 r = -EINVAL;
1610                 break;
1611         }
1612         return r;
1613 }
1614
1615 long kvm_arch_vcpu_ioctl(struct file *filp,
1616                          unsigned int ioctl, unsigned long arg)
1617 {
1618         struct kvm_vcpu *vcpu = filp->private_data;
1619         void __user *argp = (void __user *)arg;
1620         int idx;
1621         long r;
1622
1623         switch (ioctl) {
1624         case KVM_S390_INTERRUPT: {
1625                 struct kvm_s390_interrupt s390int;
1626                 struct kvm_s390_irq s390irq;
1627
1628                 r = -EFAULT;
1629                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1630                         break;
1631                 if (s390int_to_s390irq(&s390int, &s390irq))
1632                         return -EINVAL;
1633                 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
1634                 break;
1635         }
1636         case KVM_S390_STORE_STATUS:
1637                 idx = srcu_read_lock(&vcpu->kvm->srcu);
1638                 r = kvm_s390_vcpu_store_status(vcpu, arg);
1639                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1640                 break;
1641         case KVM_S390_SET_INITIAL_PSW: {
1642                 psw_t psw;
1643
1644                 r = -EFAULT;
1645                 if (copy_from_user(&psw, argp, sizeof(psw)))
1646                         break;
1647                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1648                 break;
1649         }
1650         case KVM_S390_INITIAL_RESET:
1651                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1652                 break;
1653         case KVM_SET_ONE_REG:
1654         case KVM_GET_ONE_REG: {
1655                 struct kvm_one_reg reg;
1656                 r = -EFAULT;
1657                 if (copy_from_user(&reg, argp, sizeof(reg)))
1658                         break;
1659                 if (ioctl == KVM_SET_ONE_REG)
1660                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1661                 else
1662                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1663                 break;
1664         }
1665 #ifdef CONFIG_KVM_S390_UCONTROL
1666         case KVM_S390_UCAS_MAP: {
1667                 struct kvm_s390_ucas_mapping ucasmap;
1668
1669                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1670                         r = -EFAULT;
1671                         break;
1672                 }
1673
1674                 if (!kvm_is_ucontrol(vcpu->kvm)) {
1675                         r = -EINVAL;
1676                         break;
1677                 }
1678
1679                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1680                                      ucasmap.vcpu_addr, ucasmap.length);
1681                 break;
1682         }
1683         case KVM_S390_UCAS_UNMAP: {
1684                 struct kvm_s390_ucas_mapping ucasmap;
1685
1686                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1687                         r = -EFAULT;
1688                         break;
1689                 }
1690
1691                 if (!kvm_is_ucontrol(vcpu->kvm)) {
1692                         r = -EINVAL;
1693                         break;
1694                 }
1695
1696                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1697                         ucasmap.length);
1698                 break;
1699         }
1700 #endif
1701         case KVM_S390_VCPU_FAULT: {
1702                 r = gmap_fault(vcpu->arch.gmap, arg, 0);
1703                 break;
1704         }
1705         case KVM_ENABLE_CAP:
1706         {
1707                 struct kvm_enable_cap cap;
1708                 r = -EFAULT;
1709                 if (copy_from_user(&cap, argp, sizeof(cap)))
1710                         break;
1711                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1712                 break;
1713         }
1714         default:
1715                 r = -ENOTTY;
1716         }
1717         return r;
1718 }
1719
1720 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1721 {
1722 #ifdef CONFIG_KVM_S390_UCONTROL
1723         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1724                  && (kvm_is_ucontrol(vcpu->kvm))) {
1725                 vmf->page = virt_to_page(vcpu->arch.sie_block);
1726                 get_page(vmf->page);
1727                 return 0;
1728         }
1729 #endif
1730         return VM_FAULT_SIGBUS;
1731 }
1732
1733 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1734                             unsigned long npages)
1735 {
1736         return 0;
1737 }
1738
1739 /* Section: memory related */
1740 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1741                                    struct kvm_memory_slot *memslot,
1742                                    struct kvm_userspace_memory_region *mem,
1743                                    enum kvm_mr_change change)
1744 {
1745         /* A few sanity checks. We can have memory slots which have to be
1746            located/ended at a segment boundary (1MB). The memory in userland is
1747            ok to be fragmented into various different vmas. It is okay to mmap()
1748            and munmap() stuff in this slot after doing this call at any time */
1749
1750         if (mem->userspace_addr & 0xffffful)
1751                 return -EINVAL;
1752
1753         if (mem->memory_size & 0xffffful)
1754                 return -EINVAL;
1755
1756         return 0;
1757 }
1758
1759 void kvm_arch_commit_memory_region(struct kvm *kvm,
1760                                 struct kvm_userspace_memory_region *mem,
1761                                 const struct kvm_memory_slot *old,
1762                                 enum kvm_mr_change change)
1763 {
1764         int rc;
1765
1766         /* If the basics of the memslot do not change, we do not want
1767          * to update the gmap. Every update causes several unnecessary
1768          * segment translation exceptions. This is usually handled just
1769          * fine by the normal fault handler + gmap, but it will also
1770          * cause faults on the prefix page of running guest CPUs.
1771          */
1772         if (old->userspace_addr == mem->userspace_addr &&
1773             old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1774             old->npages * PAGE_SIZE == mem->memory_size)
1775                 return;
1776
1777         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1778                 mem->guest_phys_addr, mem->memory_size);
1779         if (rc)
1780                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1781         return;
1782 }
1783
1784 static int __init kvm_s390_init(void)
1785 {
1786         int ret;
1787         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1788         if (ret)
1789                 return ret;
1790
1791         /*
1792          * guests can ask for up to 255+1 double words, we need a full page
1793          * to hold the maximum amount of facilities. On the other hand, we
1794          * only set facilities that are known to work in KVM.
1795          */
1796         vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1797         if (!vfacilities) {
1798                 kvm_exit();
1799                 return -ENOMEM;
1800         }
1801         memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1802         vfacilities[0] &= 0xff82fffbf47c2000UL;
1803         vfacilities[1] &= 0x005c000000000000UL;
1804         return 0;
1805 }
1806
1807 static void __exit kvm_s390_exit(void)
1808 {
1809         free_page((unsigned long) vfacilities);
1810         kvm_exit();
1811 }
1812
1813 module_init(kvm_s390_init);
1814 module_exit(kvm_s390_exit);
1815
1816 /*
1817  * Enable autoloading of the kvm module.
1818  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1819  * since x86 takes a different approach.
1820  */
1821 #include <linux/miscdevice.h>
1822 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1823 MODULE_ALIAS("devname:kvm");