Merge tag 'xtensa-next-20141215' of git://github.com/czankel/xtensa-linux
[cascardo/linux.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  *               Jason J. Herne <jjherne@us.ibm.com>
15  */
16
17 #include <linux/compiler.h>
18 #include <linux/err.h>
19 #include <linux/fs.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/slab.h>
26 #include <linux/timer.h>
27 #include <asm/asm-offsets.h>
28 #include <asm/lowcore.h>
29 #include <asm/pgtable.h>
30 #include <asm/nmi.h>
31 #include <asm/switch_to.h>
32 #include <asm/facility.h>
33 #include <asm/sclp.h>
34 #include "kvm-s390.h"
35 #include "gaccess.h"
36
37 #define CREATE_TRACE_POINTS
38 #include "trace.h"
39 #include "trace-s390.h"
40
41 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43 struct kvm_stats_debugfs_item debugfs_entries[] = {
44         { "userspace_handled", VCPU_STAT(exit_userspace) },
45         { "exit_null", VCPU_STAT(exit_null) },
46         { "exit_validity", VCPU_STAT(exit_validity) },
47         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48         { "exit_external_request", VCPU_STAT(exit_external_request) },
49         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
50         { "exit_instruction", VCPU_STAT(exit_instruction) },
51         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
53         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
54         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
55         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
56         { "instruction_stctl", VCPU_STAT(instruction_stctl) },
57         { "instruction_stctg", VCPU_STAT(instruction_stctg) },
58         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
59         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
60         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
61         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
62         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
63         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
64         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
65         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
66         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
67         { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
68         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
69         { "instruction_spx", VCPU_STAT(instruction_spx) },
70         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
71         { "instruction_stap", VCPU_STAT(instruction_stap) },
72         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
73         { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
74         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
75         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
76         { "instruction_essa", VCPU_STAT(instruction_essa) },
77         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
78         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
79         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
80         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
81         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
82         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
83         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
84         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
85         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
86         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
87         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
88         { "diagnose_10", VCPU_STAT(diagnose_10) },
89         { "diagnose_44", VCPU_STAT(diagnose_44) },
90         { "diagnose_9c", VCPU_STAT(diagnose_9c) },
91         { NULL }
92 };
93
94 unsigned long *vfacilities;
95 static struct gmap_notifier gmap_notifier;
96
97 /* test availability of vfacility */
98 int test_vfacility(unsigned long nr)
99 {
100         return __test_facility(nr, (void *) vfacilities);
101 }
102
103 /* Section: not file related */
104 int kvm_arch_hardware_enable(void)
105 {
106         /* every s390 is virtualization enabled ;-) */
107         return 0;
108 }
109
110 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
111
112 int kvm_arch_hardware_setup(void)
113 {
114         gmap_notifier.notifier_call = kvm_gmap_notifier;
115         gmap_register_ipte_notifier(&gmap_notifier);
116         return 0;
117 }
118
119 void kvm_arch_hardware_unsetup(void)
120 {
121         gmap_unregister_ipte_notifier(&gmap_notifier);
122 }
123
124 int kvm_arch_init(void *opaque)
125 {
126         /* Register floating interrupt controller interface. */
127         return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
128 }
129
130 /* Section: device related */
131 long kvm_arch_dev_ioctl(struct file *filp,
132                         unsigned int ioctl, unsigned long arg)
133 {
134         if (ioctl == KVM_S390_ENABLE_SIE)
135                 return s390_enable_sie();
136         return -EINVAL;
137 }
138
139 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
140 {
141         int r;
142
143         switch (ext) {
144         case KVM_CAP_S390_PSW:
145         case KVM_CAP_S390_GMAP:
146         case KVM_CAP_SYNC_MMU:
147 #ifdef CONFIG_KVM_S390_UCONTROL
148         case KVM_CAP_S390_UCONTROL:
149 #endif
150         case KVM_CAP_ASYNC_PF:
151         case KVM_CAP_SYNC_REGS:
152         case KVM_CAP_ONE_REG:
153         case KVM_CAP_ENABLE_CAP:
154         case KVM_CAP_S390_CSS_SUPPORT:
155         case KVM_CAP_IRQFD:
156         case KVM_CAP_IOEVENTFD:
157         case KVM_CAP_DEVICE_CTRL:
158         case KVM_CAP_ENABLE_CAP_VM:
159         case KVM_CAP_S390_IRQCHIP:
160         case KVM_CAP_VM_ATTRIBUTES:
161         case KVM_CAP_MP_STATE:
162                 r = 1;
163                 break;
164         case KVM_CAP_NR_VCPUS:
165         case KVM_CAP_MAX_VCPUS:
166                 r = KVM_MAX_VCPUS;
167                 break;
168         case KVM_CAP_NR_MEMSLOTS:
169                 r = KVM_USER_MEM_SLOTS;
170                 break;
171         case KVM_CAP_S390_COW:
172                 r = MACHINE_HAS_ESOP;
173                 break;
174         default:
175                 r = 0;
176         }
177         return r;
178 }
179
180 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
181                                         struct kvm_memory_slot *memslot)
182 {
183         gfn_t cur_gfn, last_gfn;
184         unsigned long address;
185         struct gmap *gmap = kvm->arch.gmap;
186
187         down_read(&gmap->mm->mmap_sem);
188         /* Loop over all guest pages */
189         last_gfn = memslot->base_gfn + memslot->npages;
190         for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
191                 address = gfn_to_hva_memslot(memslot, cur_gfn);
192
193                 if (gmap_test_and_clear_dirty(address, gmap))
194                         mark_page_dirty(kvm, cur_gfn);
195         }
196         up_read(&gmap->mm->mmap_sem);
197 }
198
199 /* Section: vm related */
200 /*
201  * Get (and clear) the dirty memory log for a memory slot.
202  */
203 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
204                                struct kvm_dirty_log *log)
205 {
206         int r;
207         unsigned long n;
208         struct kvm_memory_slot *memslot;
209         int is_dirty = 0;
210
211         mutex_lock(&kvm->slots_lock);
212
213         r = -EINVAL;
214         if (log->slot >= KVM_USER_MEM_SLOTS)
215                 goto out;
216
217         memslot = id_to_memslot(kvm->memslots, log->slot);
218         r = -ENOENT;
219         if (!memslot->dirty_bitmap)
220                 goto out;
221
222         kvm_s390_sync_dirty_log(kvm, memslot);
223         r = kvm_get_dirty_log(kvm, log, &is_dirty);
224         if (r)
225                 goto out;
226
227         /* Clear the dirty log */
228         if (is_dirty) {
229                 n = kvm_dirty_bitmap_bytes(memslot);
230                 memset(memslot->dirty_bitmap, 0, n);
231         }
232         r = 0;
233 out:
234         mutex_unlock(&kvm->slots_lock);
235         return r;
236 }
237
238 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
239 {
240         int r;
241
242         if (cap->flags)
243                 return -EINVAL;
244
245         switch (cap->cap) {
246         case KVM_CAP_S390_IRQCHIP:
247                 kvm->arch.use_irqchip = 1;
248                 r = 0;
249                 break;
250         default:
251                 r = -EINVAL;
252                 break;
253         }
254         return r;
255 }
256
257 static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
258 {
259         int ret;
260         unsigned int idx;
261         switch (attr->attr) {
262         case KVM_S390_VM_MEM_ENABLE_CMMA:
263                 ret = -EBUSY;
264                 mutex_lock(&kvm->lock);
265                 if (atomic_read(&kvm->online_vcpus) == 0) {
266                         kvm->arch.use_cmma = 1;
267                         ret = 0;
268                 }
269                 mutex_unlock(&kvm->lock);
270                 break;
271         case KVM_S390_VM_MEM_CLR_CMMA:
272                 mutex_lock(&kvm->lock);
273                 idx = srcu_read_lock(&kvm->srcu);
274                 s390_reset_cmma(kvm->arch.gmap->mm);
275                 srcu_read_unlock(&kvm->srcu, idx);
276                 mutex_unlock(&kvm->lock);
277                 ret = 0;
278                 break;
279         default:
280                 ret = -ENXIO;
281                 break;
282         }
283         return ret;
284 }
285
286 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
287 {
288         int ret;
289
290         switch (attr->group) {
291         case KVM_S390_VM_MEM_CTRL:
292                 ret = kvm_s390_mem_control(kvm, attr);
293                 break;
294         default:
295                 ret = -ENXIO;
296                 break;
297         }
298
299         return ret;
300 }
301
302 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
303 {
304         return -ENXIO;
305 }
306
307 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
308 {
309         int ret;
310
311         switch (attr->group) {
312         case KVM_S390_VM_MEM_CTRL:
313                 switch (attr->attr) {
314                 case KVM_S390_VM_MEM_ENABLE_CMMA:
315                 case KVM_S390_VM_MEM_CLR_CMMA:
316                         ret = 0;
317                         break;
318                 default:
319                         ret = -ENXIO;
320                         break;
321                 }
322                 break;
323         default:
324                 ret = -ENXIO;
325                 break;
326         }
327
328         return ret;
329 }
330
331 long kvm_arch_vm_ioctl(struct file *filp,
332                        unsigned int ioctl, unsigned long arg)
333 {
334         struct kvm *kvm = filp->private_data;
335         void __user *argp = (void __user *)arg;
336         struct kvm_device_attr attr;
337         int r;
338
339         switch (ioctl) {
340         case KVM_S390_INTERRUPT: {
341                 struct kvm_s390_interrupt s390int;
342
343                 r = -EFAULT;
344                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
345                         break;
346                 r = kvm_s390_inject_vm(kvm, &s390int);
347                 break;
348         }
349         case KVM_ENABLE_CAP: {
350                 struct kvm_enable_cap cap;
351                 r = -EFAULT;
352                 if (copy_from_user(&cap, argp, sizeof(cap)))
353                         break;
354                 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
355                 break;
356         }
357         case KVM_CREATE_IRQCHIP: {
358                 struct kvm_irq_routing_entry routing;
359
360                 r = -EINVAL;
361                 if (kvm->arch.use_irqchip) {
362                         /* Set up dummy routing. */
363                         memset(&routing, 0, sizeof(routing));
364                         kvm_set_irq_routing(kvm, &routing, 0, 0);
365                         r = 0;
366                 }
367                 break;
368         }
369         case KVM_SET_DEVICE_ATTR: {
370                 r = -EFAULT;
371                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
372                         break;
373                 r = kvm_s390_vm_set_attr(kvm, &attr);
374                 break;
375         }
376         case KVM_GET_DEVICE_ATTR: {
377                 r = -EFAULT;
378                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
379                         break;
380                 r = kvm_s390_vm_get_attr(kvm, &attr);
381                 break;
382         }
383         case KVM_HAS_DEVICE_ATTR: {
384                 r = -EFAULT;
385                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
386                         break;
387                 r = kvm_s390_vm_has_attr(kvm, &attr);
388                 break;
389         }
390         default:
391                 r = -ENOTTY;
392         }
393
394         return r;
395 }
396
397 static int kvm_s390_crypto_init(struct kvm *kvm)
398 {
399         if (!test_vfacility(76))
400                 return 0;
401
402         kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
403                                          GFP_KERNEL | GFP_DMA);
404         if (!kvm->arch.crypto.crycb)
405                 return -ENOMEM;
406
407         kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
408                                   CRYCB_FORMAT1;
409
410         return 0;
411 }
412
413 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
414 {
415         int rc;
416         char debug_name[16];
417         static unsigned long sca_offset;
418
419         rc = -EINVAL;
420 #ifdef CONFIG_KVM_S390_UCONTROL
421         if (type & ~KVM_VM_S390_UCONTROL)
422                 goto out_err;
423         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
424                 goto out_err;
425 #else
426         if (type)
427                 goto out_err;
428 #endif
429
430         rc = s390_enable_sie();
431         if (rc)
432                 goto out_err;
433
434         rc = -ENOMEM;
435
436         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
437         if (!kvm->arch.sca)
438                 goto out_err;
439         spin_lock(&kvm_lock);
440         sca_offset = (sca_offset + 16) & 0x7f0;
441         kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
442         spin_unlock(&kvm_lock);
443
444         sprintf(debug_name, "kvm-%u", current->pid);
445
446         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
447         if (!kvm->arch.dbf)
448                 goto out_nodbf;
449
450         if (kvm_s390_crypto_init(kvm) < 0)
451                 goto out_crypto;
452
453         spin_lock_init(&kvm->arch.float_int.lock);
454         INIT_LIST_HEAD(&kvm->arch.float_int.list);
455         init_waitqueue_head(&kvm->arch.ipte_wq);
456
457         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
458         VM_EVENT(kvm, 3, "%s", "vm created");
459
460         if (type & KVM_VM_S390_UCONTROL) {
461                 kvm->arch.gmap = NULL;
462         } else {
463                 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
464                 if (!kvm->arch.gmap)
465                         goto out_nogmap;
466                 kvm->arch.gmap->private = kvm;
467                 kvm->arch.gmap->pfault_enabled = 0;
468         }
469
470         kvm->arch.css_support = 0;
471         kvm->arch.use_irqchip = 0;
472
473         spin_lock_init(&kvm->arch.start_stop_lock);
474
475         return 0;
476 out_nogmap:
477         kfree(kvm->arch.crypto.crycb);
478 out_crypto:
479         debug_unregister(kvm->arch.dbf);
480 out_nodbf:
481         free_page((unsigned long)(kvm->arch.sca));
482 out_err:
483         return rc;
484 }
485
486 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
487 {
488         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
489         trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
490         kvm_s390_clear_local_irqs(vcpu);
491         kvm_clear_async_pf_completion_queue(vcpu);
492         if (!kvm_is_ucontrol(vcpu->kvm)) {
493                 clear_bit(63 - vcpu->vcpu_id,
494                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
495                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
496                     (__u64) vcpu->arch.sie_block)
497                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
498         }
499         smp_mb();
500
501         if (kvm_is_ucontrol(vcpu->kvm))
502                 gmap_free(vcpu->arch.gmap);
503
504         if (kvm_s390_cmma_enabled(vcpu->kvm))
505                 kvm_s390_vcpu_unsetup_cmma(vcpu);
506         free_page((unsigned long)(vcpu->arch.sie_block));
507
508         kvm_vcpu_uninit(vcpu);
509         kmem_cache_free(kvm_vcpu_cache, vcpu);
510 }
511
512 static void kvm_free_vcpus(struct kvm *kvm)
513 {
514         unsigned int i;
515         struct kvm_vcpu *vcpu;
516
517         kvm_for_each_vcpu(i, vcpu, kvm)
518                 kvm_arch_vcpu_destroy(vcpu);
519
520         mutex_lock(&kvm->lock);
521         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
522                 kvm->vcpus[i] = NULL;
523
524         atomic_set(&kvm->online_vcpus, 0);
525         mutex_unlock(&kvm->lock);
526 }
527
528 void kvm_arch_destroy_vm(struct kvm *kvm)
529 {
530         kvm_free_vcpus(kvm);
531         free_page((unsigned long)(kvm->arch.sca));
532         debug_unregister(kvm->arch.dbf);
533         kfree(kvm->arch.crypto.crycb);
534         if (!kvm_is_ucontrol(kvm))
535                 gmap_free(kvm->arch.gmap);
536         kvm_s390_destroy_adapters(kvm);
537         kvm_s390_clear_float_irqs(kvm);
538 }
539
540 /* Section: vcpu related */
541 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
542 {
543         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
544         kvm_clear_async_pf_completion_queue(vcpu);
545         if (kvm_is_ucontrol(vcpu->kvm)) {
546                 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
547                 if (!vcpu->arch.gmap)
548                         return -ENOMEM;
549                 vcpu->arch.gmap->private = vcpu->kvm;
550                 return 0;
551         }
552
553         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
554         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
555                                     KVM_SYNC_GPRS |
556                                     KVM_SYNC_ACRS |
557                                     KVM_SYNC_CRS |
558                                     KVM_SYNC_ARCH0 |
559                                     KVM_SYNC_PFAULT;
560         return 0;
561 }
562
563 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
564 {
565         save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
566         save_fp_regs(vcpu->arch.host_fpregs.fprs);
567         save_access_regs(vcpu->arch.host_acrs);
568         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
569         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
570         restore_access_regs(vcpu->run->s.regs.acrs);
571         gmap_enable(vcpu->arch.gmap);
572         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
573 }
574
575 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
576 {
577         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
578         gmap_disable(vcpu->arch.gmap);
579         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
580         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
581         save_access_regs(vcpu->run->s.regs.acrs);
582         restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
583         restore_fp_regs(vcpu->arch.host_fpregs.fprs);
584         restore_access_regs(vcpu->arch.host_acrs);
585 }
586
587 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
588 {
589         /* this equals initial cpu reset in pop, but we don't switch to ESA */
590         vcpu->arch.sie_block->gpsw.mask = 0UL;
591         vcpu->arch.sie_block->gpsw.addr = 0UL;
592         kvm_s390_set_prefix(vcpu, 0);
593         vcpu->arch.sie_block->cputm     = 0UL;
594         vcpu->arch.sie_block->ckc       = 0UL;
595         vcpu->arch.sie_block->todpr     = 0;
596         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
597         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
598         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
599         vcpu->arch.guest_fpregs.fpc = 0;
600         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
601         vcpu->arch.sie_block->gbea = 1;
602         vcpu->arch.sie_block->pp = 0;
603         vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
604         kvm_clear_async_pf_completion_queue(vcpu);
605         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
606                 kvm_s390_vcpu_stop(vcpu);
607         kvm_s390_clear_local_irqs(vcpu);
608 }
609
610 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
611 {
612         return 0;
613 }
614
615 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
616 {
617         if (!test_vfacility(76))
618                 return;
619
620         vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
621 }
622
623 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
624 {
625         free_page(vcpu->arch.sie_block->cbrlo);
626         vcpu->arch.sie_block->cbrlo = 0;
627 }
628
629 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
630 {
631         vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
632         if (!vcpu->arch.sie_block->cbrlo)
633                 return -ENOMEM;
634
635         vcpu->arch.sie_block->ecb2 |= 0x80;
636         vcpu->arch.sie_block->ecb2 &= ~0x08;
637         return 0;
638 }
639
640 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
641 {
642         int rc = 0;
643
644         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
645                                                     CPUSTAT_SM |
646                                                     CPUSTAT_STOPPED |
647                                                     CPUSTAT_GED);
648         vcpu->arch.sie_block->ecb   = 6;
649         if (test_vfacility(50) && test_vfacility(73))
650                 vcpu->arch.sie_block->ecb |= 0x10;
651
652         vcpu->arch.sie_block->ecb2  = 8;
653         vcpu->arch.sie_block->eca   = 0xD1002000U;
654         if (sclp_has_siif())
655                 vcpu->arch.sie_block->eca |= 1;
656         vcpu->arch.sie_block->fac   = (int) (long) vfacilities;
657         vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
658                                       ICTL_TPROT;
659
660         if (kvm_s390_cmma_enabled(vcpu->kvm)) {
661                 rc = kvm_s390_vcpu_setup_cmma(vcpu);
662                 if (rc)
663                         return rc;
664         }
665         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
666         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
667         get_cpu_id(&vcpu->arch.cpu_id);
668         vcpu->arch.cpu_id.version = 0xff;
669
670         kvm_s390_vcpu_crypto_setup(vcpu);
671
672         return rc;
673 }
674
675 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
676                                       unsigned int id)
677 {
678         struct kvm_vcpu *vcpu;
679         struct sie_page *sie_page;
680         int rc = -EINVAL;
681
682         if (id >= KVM_MAX_VCPUS)
683                 goto out;
684
685         rc = -ENOMEM;
686
687         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
688         if (!vcpu)
689                 goto out;
690
691         sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
692         if (!sie_page)
693                 goto out_free_cpu;
694
695         vcpu->arch.sie_block = &sie_page->sie_block;
696         vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
697
698         vcpu->arch.sie_block->icpua = id;
699         if (!kvm_is_ucontrol(kvm)) {
700                 if (!kvm->arch.sca) {
701                         WARN_ON_ONCE(1);
702                         goto out_free_cpu;
703                 }
704                 if (!kvm->arch.sca->cpu[id].sda)
705                         kvm->arch.sca->cpu[id].sda =
706                                 (__u64) vcpu->arch.sie_block;
707                 vcpu->arch.sie_block->scaoh =
708                         (__u32)(((__u64)kvm->arch.sca) >> 32);
709                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
710                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
711         }
712
713         spin_lock_init(&vcpu->arch.local_int.lock);
714         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
715         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
716         vcpu->arch.local_int.wq = &vcpu->wq;
717         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
718
719         rc = kvm_vcpu_init(vcpu, kvm, id);
720         if (rc)
721                 goto out_free_sie_block;
722         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
723                  vcpu->arch.sie_block);
724         trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
725
726         return vcpu;
727 out_free_sie_block:
728         free_page((unsigned long)(vcpu->arch.sie_block));
729 out_free_cpu:
730         kmem_cache_free(kvm_vcpu_cache, vcpu);
731 out:
732         return ERR_PTR(rc);
733 }
734
735 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
736 {
737         return kvm_cpu_has_interrupt(vcpu);
738 }
739
740 void s390_vcpu_block(struct kvm_vcpu *vcpu)
741 {
742         atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
743 }
744
745 void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
746 {
747         atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
748 }
749
750 /*
751  * Kick a guest cpu out of SIE and wait until SIE is not running.
752  * If the CPU is not running (e.g. waiting as idle) the function will
753  * return immediately. */
754 void exit_sie(struct kvm_vcpu *vcpu)
755 {
756         atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
757         while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
758                 cpu_relax();
759 }
760
761 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
762 void exit_sie_sync(struct kvm_vcpu *vcpu)
763 {
764         s390_vcpu_block(vcpu);
765         exit_sie(vcpu);
766 }
767
768 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
769 {
770         int i;
771         struct kvm *kvm = gmap->private;
772         struct kvm_vcpu *vcpu;
773
774         kvm_for_each_vcpu(i, vcpu, kvm) {
775                 /* match against both prefix pages */
776                 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
777                         VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
778                         kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
779                         exit_sie_sync(vcpu);
780                 }
781         }
782 }
783
784 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
785 {
786         /* kvm common code refers to this, but never calls it */
787         BUG();
788         return 0;
789 }
790
791 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
792                                            struct kvm_one_reg *reg)
793 {
794         int r = -EINVAL;
795
796         switch (reg->id) {
797         case KVM_REG_S390_TODPR:
798                 r = put_user(vcpu->arch.sie_block->todpr,
799                              (u32 __user *)reg->addr);
800                 break;
801         case KVM_REG_S390_EPOCHDIFF:
802                 r = put_user(vcpu->arch.sie_block->epoch,
803                              (u64 __user *)reg->addr);
804                 break;
805         case KVM_REG_S390_CPU_TIMER:
806                 r = put_user(vcpu->arch.sie_block->cputm,
807                              (u64 __user *)reg->addr);
808                 break;
809         case KVM_REG_S390_CLOCK_COMP:
810                 r = put_user(vcpu->arch.sie_block->ckc,
811                              (u64 __user *)reg->addr);
812                 break;
813         case KVM_REG_S390_PFTOKEN:
814                 r = put_user(vcpu->arch.pfault_token,
815                              (u64 __user *)reg->addr);
816                 break;
817         case KVM_REG_S390_PFCOMPARE:
818                 r = put_user(vcpu->arch.pfault_compare,
819                              (u64 __user *)reg->addr);
820                 break;
821         case KVM_REG_S390_PFSELECT:
822                 r = put_user(vcpu->arch.pfault_select,
823                              (u64 __user *)reg->addr);
824                 break;
825         case KVM_REG_S390_PP:
826                 r = put_user(vcpu->arch.sie_block->pp,
827                              (u64 __user *)reg->addr);
828                 break;
829         case KVM_REG_S390_GBEA:
830                 r = put_user(vcpu->arch.sie_block->gbea,
831                              (u64 __user *)reg->addr);
832                 break;
833         default:
834                 break;
835         }
836
837         return r;
838 }
839
840 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
841                                            struct kvm_one_reg *reg)
842 {
843         int r = -EINVAL;
844
845         switch (reg->id) {
846         case KVM_REG_S390_TODPR:
847                 r = get_user(vcpu->arch.sie_block->todpr,
848                              (u32 __user *)reg->addr);
849                 break;
850         case KVM_REG_S390_EPOCHDIFF:
851                 r = get_user(vcpu->arch.sie_block->epoch,
852                              (u64 __user *)reg->addr);
853                 break;
854         case KVM_REG_S390_CPU_TIMER:
855                 r = get_user(vcpu->arch.sie_block->cputm,
856                              (u64 __user *)reg->addr);
857                 break;
858         case KVM_REG_S390_CLOCK_COMP:
859                 r = get_user(vcpu->arch.sie_block->ckc,
860                              (u64 __user *)reg->addr);
861                 break;
862         case KVM_REG_S390_PFTOKEN:
863                 r = get_user(vcpu->arch.pfault_token,
864                              (u64 __user *)reg->addr);
865                 break;
866         case KVM_REG_S390_PFCOMPARE:
867                 r = get_user(vcpu->arch.pfault_compare,
868                              (u64 __user *)reg->addr);
869                 break;
870         case KVM_REG_S390_PFSELECT:
871                 r = get_user(vcpu->arch.pfault_select,
872                              (u64 __user *)reg->addr);
873                 break;
874         case KVM_REG_S390_PP:
875                 r = get_user(vcpu->arch.sie_block->pp,
876                              (u64 __user *)reg->addr);
877                 break;
878         case KVM_REG_S390_GBEA:
879                 r = get_user(vcpu->arch.sie_block->gbea,
880                              (u64 __user *)reg->addr);
881                 break;
882         default:
883                 break;
884         }
885
886         return r;
887 }
888
889 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
890 {
891         kvm_s390_vcpu_initial_reset(vcpu);
892         return 0;
893 }
894
895 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
896 {
897         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
898         return 0;
899 }
900
901 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
902 {
903         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
904         return 0;
905 }
906
907 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
908                                   struct kvm_sregs *sregs)
909 {
910         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
911         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
912         restore_access_regs(vcpu->run->s.regs.acrs);
913         return 0;
914 }
915
916 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
917                                   struct kvm_sregs *sregs)
918 {
919         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
920         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
921         return 0;
922 }
923
924 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
925 {
926         if (test_fp_ctl(fpu->fpc))
927                 return -EINVAL;
928         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
929         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
930         restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
931         restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
932         return 0;
933 }
934
935 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
936 {
937         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
938         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
939         return 0;
940 }
941
942 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
943 {
944         int rc = 0;
945
946         if (!is_vcpu_stopped(vcpu))
947                 rc = -EBUSY;
948         else {
949                 vcpu->run->psw_mask = psw.mask;
950                 vcpu->run->psw_addr = psw.addr;
951         }
952         return rc;
953 }
954
955 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
956                                   struct kvm_translation *tr)
957 {
958         return -EINVAL; /* not implemented yet */
959 }
960
961 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
962                               KVM_GUESTDBG_USE_HW_BP | \
963                               KVM_GUESTDBG_ENABLE)
964
965 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
966                                         struct kvm_guest_debug *dbg)
967 {
968         int rc = 0;
969
970         vcpu->guest_debug = 0;
971         kvm_s390_clear_bp_data(vcpu);
972
973         if (dbg->control & ~VALID_GUESTDBG_FLAGS)
974                 return -EINVAL;
975
976         if (dbg->control & KVM_GUESTDBG_ENABLE) {
977                 vcpu->guest_debug = dbg->control;
978                 /* enforce guest PER */
979                 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
980
981                 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
982                         rc = kvm_s390_import_bp_data(vcpu, dbg);
983         } else {
984                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
985                 vcpu->arch.guestdbg.last_bp = 0;
986         }
987
988         if (rc) {
989                 vcpu->guest_debug = 0;
990                 kvm_s390_clear_bp_data(vcpu);
991                 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
992         }
993
994         return rc;
995 }
996
997 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
998                                     struct kvm_mp_state *mp_state)
999 {
1000         /* CHECK_STOP and LOAD are not supported yet */
1001         return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1002                                        KVM_MP_STATE_OPERATING;
1003 }
1004
1005 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1006                                     struct kvm_mp_state *mp_state)
1007 {
1008         int rc = 0;
1009
1010         /* user space knows about this interface - let it control the state */
1011         vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1012
1013         switch (mp_state->mp_state) {
1014         case KVM_MP_STATE_STOPPED:
1015                 kvm_s390_vcpu_stop(vcpu);
1016                 break;
1017         case KVM_MP_STATE_OPERATING:
1018                 kvm_s390_vcpu_start(vcpu);
1019                 break;
1020         case KVM_MP_STATE_LOAD:
1021         case KVM_MP_STATE_CHECK_STOP:
1022                 /* fall through - CHECK_STOP and LOAD are not supported yet */
1023         default:
1024                 rc = -ENXIO;
1025         }
1026
1027         return rc;
1028 }
1029
1030 bool kvm_s390_cmma_enabled(struct kvm *kvm)
1031 {
1032         if (!MACHINE_IS_LPAR)
1033                 return false;
1034         /* only enable for z10 and later */
1035         if (!MACHINE_HAS_EDAT1)
1036                 return false;
1037         if (!kvm->arch.use_cmma)
1038                 return false;
1039         return true;
1040 }
1041
1042 static bool ibs_enabled(struct kvm_vcpu *vcpu)
1043 {
1044         return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1045 }
1046
1047 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1048 {
1049 retry:
1050         s390_vcpu_unblock(vcpu);
1051         /*
1052          * We use MMU_RELOAD just to re-arm the ipte notifier for the
1053          * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1054          * This ensures that the ipte instruction for this request has
1055          * already finished. We might race against a second unmapper that
1056          * wants to set the blocking bit. Lets just retry the request loop.
1057          */
1058         if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
1059                 int rc;
1060                 rc = gmap_ipte_notify(vcpu->arch.gmap,
1061                                       kvm_s390_get_prefix(vcpu),
1062                                       PAGE_SIZE * 2);
1063                 if (rc)
1064                         return rc;
1065                 goto retry;
1066         }
1067
1068         if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1069                 vcpu->arch.sie_block->ihcpu = 0xffff;
1070                 goto retry;
1071         }
1072
1073         if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1074                 if (!ibs_enabled(vcpu)) {
1075                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1076                         atomic_set_mask(CPUSTAT_IBS,
1077                                         &vcpu->arch.sie_block->cpuflags);
1078                 }
1079                 goto retry;
1080         }
1081
1082         if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1083                 if (ibs_enabled(vcpu)) {
1084                         trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1085                         atomic_clear_mask(CPUSTAT_IBS,
1086                                           &vcpu->arch.sie_block->cpuflags);
1087                 }
1088                 goto retry;
1089         }
1090
1091         /* nothing to do, just clear the request */
1092         clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1093
1094         return 0;
1095 }
1096
1097 /**
1098  * kvm_arch_fault_in_page - fault-in guest page if necessary
1099  * @vcpu: The corresponding virtual cpu
1100  * @gpa: Guest physical address
1101  * @writable: Whether the page should be writable or not
1102  *
1103  * Make sure that a guest page has been faulted-in on the host.
1104  *
1105  * Return: Zero on success, negative error code otherwise.
1106  */
1107 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
1108 {
1109         return gmap_fault(vcpu->arch.gmap, gpa,
1110                           writable ? FAULT_FLAG_WRITE : 0);
1111 }
1112
1113 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1114                                       unsigned long token)
1115 {
1116         struct kvm_s390_interrupt inti;
1117         inti.parm64 = token;
1118
1119         if (start_token) {
1120                 inti.type = KVM_S390_INT_PFAULT_INIT;
1121                 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1122         } else {
1123                 inti.type = KVM_S390_INT_PFAULT_DONE;
1124                 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1125         }
1126 }
1127
1128 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1129                                      struct kvm_async_pf *work)
1130 {
1131         trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1132         __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1133 }
1134
1135 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1136                                  struct kvm_async_pf *work)
1137 {
1138         trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1139         __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1140 }
1141
1142 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1143                                struct kvm_async_pf *work)
1144 {
1145         /* s390 will always inject the page directly */
1146 }
1147
1148 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1149 {
1150         /*
1151          * s390 will always inject the page directly,
1152          * but we still want check_async_completion to cleanup
1153          */
1154         return true;
1155 }
1156
1157 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1158 {
1159         hva_t hva;
1160         struct kvm_arch_async_pf arch;
1161         int rc;
1162
1163         if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1164                 return 0;
1165         if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1166             vcpu->arch.pfault_compare)
1167                 return 0;
1168         if (psw_extint_disabled(vcpu))
1169                 return 0;
1170         if (kvm_cpu_has_interrupt(vcpu))
1171                 return 0;
1172         if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1173                 return 0;
1174         if (!vcpu->arch.gmap->pfault_enabled)
1175                 return 0;
1176
1177         hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1178         hva += current->thread.gmap_addr & ~PAGE_MASK;
1179         if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
1180                 return 0;
1181
1182         rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1183         return rc;
1184 }
1185
1186 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1187 {
1188         int rc, cpuflags;
1189
1190         /*
1191          * On s390 notifications for arriving pages will be delivered directly
1192          * to the guest but the house keeping for completed pfaults is
1193          * handled outside the worker.
1194          */
1195         kvm_check_async_pf_completion(vcpu);
1196
1197         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
1198
1199         if (need_resched())
1200                 schedule();
1201
1202         if (test_cpu_flag(CIF_MCCK_PENDING))
1203                 s390_handle_mcck();
1204
1205         if (!kvm_is_ucontrol(vcpu->kvm)) {
1206                 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1207                 if (rc)
1208                         return rc;
1209         }
1210
1211         rc = kvm_s390_handle_requests(vcpu);
1212         if (rc)
1213                 return rc;
1214
1215         if (guestdbg_enabled(vcpu)) {
1216                 kvm_s390_backup_guest_per_regs(vcpu);
1217                 kvm_s390_patch_guest_per_regs(vcpu);
1218         }
1219
1220         vcpu->arch.sie_block->icptcode = 0;
1221         cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1222         VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1223         trace_kvm_s390_sie_enter(vcpu, cpuflags);
1224
1225         return 0;
1226 }
1227
1228 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1229 {
1230         int rc = -1;
1231
1232         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1233                    vcpu->arch.sie_block->icptcode);
1234         trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1235
1236         if (guestdbg_enabled(vcpu))
1237                 kvm_s390_restore_guest_per_regs(vcpu);
1238
1239         if (exit_reason >= 0) {
1240                 rc = 0;
1241         } else if (kvm_is_ucontrol(vcpu->kvm)) {
1242                 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1243                 vcpu->run->s390_ucontrol.trans_exc_code =
1244                                                 current->thread.gmap_addr;
1245                 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1246                 rc = -EREMOTE;
1247
1248         } else if (current->thread.gmap_pfault) {
1249                 trace_kvm_s390_major_guest_pfault(vcpu);
1250                 current->thread.gmap_pfault = 0;
1251                 if (kvm_arch_setup_async_pf(vcpu)) {
1252                         rc = 0;
1253                 } else {
1254                         gpa_t gpa = current->thread.gmap_addr;
1255                         rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1256                 }
1257         }
1258
1259         if (rc == -1) {
1260                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1261                 trace_kvm_s390_sie_fault(vcpu);
1262                 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1263         }
1264
1265         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
1266
1267         if (rc == 0) {
1268                 if (kvm_is_ucontrol(vcpu->kvm))
1269                         /* Don't exit for host interrupts. */
1270                         rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
1271                 else
1272                         rc = kvm_handle_sie_intercept(vcpu);
1273         }
1274
1275         return rc;
1276 }
1277
1278 static int __vcpu_run(struct kvm_vcpu *vcpu)
1279 {
1280         int rc, exit_reason;
1281
1282         /*
1283          * We try to hold kvm->srcu during most of vcpu_run (except when run-
1284          * ning the guest), so that memslots (and other stuff) are protected
1285          */
1286         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1287
1288         do {
1289                 rc = vcpu_pre_run(vcpu);
1290                 if (rc)
1291                         break;
1292
1293                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1294                 /*
1295                  * As PF_VCPU will be used in fault handler, between
1296                  * guest_enter and guest_exit should be no uaccess.
1297                  */
1298                 preempt_disable();
1299                 kvm_guest_enter();
1300                 preempt_enable();
1301                 exit_reason = sie64a(vcpu->arch.sie_block,
1302                                      vcpu->run->s.regs.gprs);
1303                 kvm_guest_exit();
1304                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1305
1306                 rc = vcpu_post_run(vcpu, exit_reason);
1307         } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
1308
1309         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1310         return rc;
1311 }
1312
1313 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1314 {
1315         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1316         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1317         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1318                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1319         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1320                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1321                 /* some control register changes require a tlb flush */
1322                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1323         }
1324         if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1325                 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1326                 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1327                 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1328                 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1329                 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1330         }
1331         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1332                 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1333                 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1334                 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1335         }
1336         kvm_run->kvm_dirty_regs = 0;
1337 }
1338
1339 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1340 {
1341         kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1342         kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1343         kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1344         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1345         kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1346         kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1347         kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1348         kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1349         kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1350         kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1351         kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1352         kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1353 }
1354
1355 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1356 {
1357         int rc;
1358         sigset_t sigsaved;
1359
1360         if (guestdbg_exit_pending(vcpu)) {
1361                 kvm_s390_prepare_debug_exit(vcpu);
1362                 return 0;
1363         }
1364
1365         if (vcpu->sigset_active)
1366                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1367
1368         if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1369                 kvm_s390_vcpu_start(vcpu);
1370         } else if (is_vcpu_stopped(vcpu)) {
1371                 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1372                                    vcpu->vcpu_id);
1373                 return -EINVAL;
1374         }
1375
1376         sync_regs(vcpu, kvm_run);
1377
1378         might_fault();
1379         rc = __vcpu_run(vcpu);
1380
1381         if (signal_pending(current) && !rc) {
1382                 kvm_run->exit_reason = KVM_EXIT_INTR;
1383                 rc = -EINTR;
1384         }
1385
1386         if (guestdbg_exit_pending(vcpu) && !rc)  {
1387                 kvm_s390_prepare_debug_exit(vcpu);
1388                 rc = 0;
1389         }
1390
1391         if (rc == -EOPNOTSUPP) {
1392                 /* intercept cannot be handled in-kernel, prepare kvm-run */
1393                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
1394                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
1395                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
1396                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
1397                 rc = 0;
1398         }
1399
1400         if (rc == -EREMOTE) {
1401                 /* intercept was handled, but userspace support is needed
1402                  * kvm_run has been prepared by the handler */
1403                 rc = 0;
1404         }
1405
1406         store_regs(vcpu, kvm_run);
1407
1408         if (vcpu->sigset_active)
1409                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1410
1411         vcpu->stat.exit_userspace++;
1412         return rc;
1413 }
1414
1415 /*
1416  * store status at address
1417  * we use have two special cases:
1418  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1419  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1420  */
1421 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
1422 {
1423         unsigned char archmode = 1;
1424         unsigned int px;
1425         u64 clkcomp;
1426         int rc;
1427
1428         if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1429                 if (write_guest_abs(vcpu, 163, &archmode, 1))
1430                         return -EFAULT;
1431                 gpa = SAVE_AREA_BASE;
1432         } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1433                 if (write_guest_real(vcpu, 163, &archmode, 1))
1434                         return -EFAULT;
1435                 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1436         }
1437         rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1438                              vcpu->arch.guest_fpregs.fprs, 128);
1439         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1440                               vcpu->run->s.regs.gprs, 128);
1441         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1442                               &vcpu->arch.sie_block->gpsw, 16);
1443         px = kvm_s390_get_prefix(vcpu);
1444         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
1445                               &px, 4);
1446         rc |= write_guest_abs(vcpu,
1447                               gpa + offsetof(struct save_area, fp_ctrl_reg),
1448                               &vcpu->arch.guest_fpregs.fpc, 4);
1449         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1450                               &vcpu->arch.sie_block->todpr, 4);
1451         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1452                               &vcpu->arch.sie_block->cputm, 8);
1453         clkcomp = vcpu->arch.sie_block->ckc >> 8;
1454         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1455                               &clkcomp, 8);
1456         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1457                               &vcpu->run->s.regs.acrs, 64);
1458         rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1459                               &vcpu->arch.sie_block->gcr, 128);
1460         return rc ? -EFAULT : 0;
1461 }
1462
1463 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1464 {
1465         /*
1466          * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1467          * copying in vcpu load/put. Lets update our copies before we save
1468          * it into the save area
1469          */
1470         save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1471         save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1472         save_access_regs(vcpu->run->s.regs.acrs);
1473
1474         return kvm_s390_store_status_unloaded(vcpu, addr);
1475 }
1476
1477 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1478 {
1479         kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1480         kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1481         exit_sie_sync(vcpu);
1482 }
1483
1484 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1485 {
1486         unsigned int i;
1487         struct kvm_vcpu *vcpu;
1488
1489         kvm_for_each_vcpu(i, vcpu, kvm) {
1490                 __disable_ibs_on_vcpu(vcpu);
1491         }
1492 }
1493
1494 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1495 {
1496         kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1497         kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1498         exit_sie_sync(vcpu);
1499 }
1500
1501 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1502 {
1503         int i, online_vcpus, started_vcpus = 0;
1504
1505         if (!is_vcpu_stopped(vcpu))
1506                 return;
1507
1508         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
1509         /* Only one cpu at a time may enter/leave the STOPPED state. */
1510         spin_lock(&vcpu->kvm->arch.start_stop_lock);
1511         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1512
1513         for (i = 0; i < online_vcpus; i++) {
1514                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1515                         started_vcpus++;
1516         }
1517
1518         if (started_vcpus == 0) {
1519                 /* we're the only active VCPU -> speed it up */
1520                 __enable_ibs_on_vcpu(vcpu);
1521         } else if (started_vcpus == 1) {
1522                 /*
1523                  * As we are starting a second VCPU, we have to disable
1524                  * the IBS facility on all VCPUs to remove potentially
1525                  * oustanding ENABLE requests.
1526                  */
1527                 __disable_ibs_on_all_vcpus(vcpu->kvm);
1528         }
1529
1530         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1531         /*
1532          * Another VCPU might have used IBS while we were offline.
1533          * Let's play safe and flush the VCPU at startup.
1534          */
1535         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1536         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
1537         return;
1538 }
1539
1540 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1541 {
1542         int i, online_vcpus, started_vcpus = 0;
1543         struct kvm_vcpu *started_vcpu = NULL;
1544
1545         if (is_vcpu_stopped(vcpu))
1546                 return;
1547
1548         trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
1549         /* Only one cpu at a time may enter/leave the STOPPED state. */
1550         spin_lock(&vcpu->kvm->arch.start_stop_lock);
1551         online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1552
1553         /* Need to lock access to action_bits to avoid a SIGP race condition */
1554         spin_lock(&vcpu->arch.local_int.lock);
1555         atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
1556
1557         /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1558         vcpu->arch.local_int.action_bits &=
1559                                  ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
1560         spin_unlock(&vcpu->arch.local_int.lock);
1561
1562         __disable_ibs_on_vcpu(vcpu);
1563
1564         for (i = 0; i < online_vcpus; i++) {
1565                 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1566                         started_vcpus++;
1567                         started_vcpu = vcpu->kvm->vcpus[i];
1568                 }
1569         }
1570
1571         if (started_vcpus == 1) {
1572                 /*
1573                  * As we only have one VCPU left, we want to enable the
1574                  * IBS facility for that VCPU to speed it up.
1575                  */
1576                 __enable_ibs_on_vcpu(started_vcpu);
1577         }
1578
1579         spin_unlock(&vcpu->kvm->arch.start_stop_lock);
1580         return;
1581 }
1582
1583 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1584                                      struct kvm_enable_cap *cap)
1585 {
1586         int r;
1587
1588         if (cap->flags)
1589                 return -EINVAL;
1590
1591         switch (cap->cap) {
1592         case KVM_CAP_S390_CSS_SUPPORT:
1593                 if (!vcpu->kvm->arch.css_support) {
1594                         vcpu->kvm->arch.css_support = 1;
1595                         trace_kvm_s390_enable_css(vcpu->kvm);
1596                 }
1597                 r = 0;
1598                 break;
1599         default:
1600                 r = -EINVAL;
1601                 break;
1602         }
1603         return r;
1604 }
1605
1606 long kvm_arch_vcpu_ioctl(struct file *filp,
1607                          unsigned int ioctl, unsigned long arg)
1608 {
1609         struct kvm_vcpu *vcpu = filp->private_data;
1610         void __user *argp = (void __user *)arg;
1611         int idx;
1612         long r;
1613
1614         switch (ioctl) {
1615         case KVM_S390_INTERRUPT: {
1616                 struct kvm_s390_interrupt s390int;
1617
1618                 r = -EFAULT;
1619                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1620                         break;
1621                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1622                 break;
1623         }
1624         case KVM_S390_STORE_STATUS:
1625                 idx = srcu_read_lock(&vcpu->kvm->srcu);
1626                 r = kvm_s390_vcpu_store_status(vcpu, arg);
1627                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1628                 break;
1629         case KVM_S390_SET_INITIAL_PSW: {
1630                 psw_t psw;
1631
1632                 r = -EFAULT;
1633                 if (copy_from_user(&psw, argp, sizeof(psw)))
1634                         break;
1635                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1636                 break;
1637         }
1638         case KVM_S390_INITIAL_RESET:
1639                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1640                 break;
1641         case KVM_SET_ONE_REG:
1642         case KVM_GET_ONE_REG: {
1643                 struct kvm_one_reg reg;
1644                 r = -EFAULT;
1645                 if (copy_from_user(&reg, argp, sizeof(reg)))
1646                         break;
1647                 if (ioctl == KVM_SET_ONE_REG)
1648                         r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1649                 else
1650                         r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1651                 break;
1652         }
1653 #ifdef CONFIG_KVM_S390_UCONTROL
1654         case KVM_S390_UCAS_MAP: {
1655                 struct kvm_s390_ucas_mapping ucasmap;
1656
1657                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1658                         r = -EFAULT;
1659                         break;
1660                 }
1661
1662                 if (!kvm_is_ucontrol(vcpu->kvm)) {
1663                         r = -EINVAL;
1664                         break;
1665                 }
1666
1667                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1668                                      ucasmap.vcpu_addr, ucasmap.length);
1669                 break;
1670         }
1671         case KVM_S390_UCAS_UNMAP: {
1672                 struct kvm_s390_ucas_mapping ucasmap;
1673
1674                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1675                         r = -EFAULT;
1676                         break;
1677                 }
1678
1679                 if (!kvm_is_ucontrol(vcpu->kvm)) {
1680                         r = -EINVAL;
1681                         break;
1682                 }
1683
1684                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1685                         ucasmap.length);
1686                 break;
1687         }
1688 #endif
1689         case KVM_S390_VCPU_FAULT: {
1690                 r = gmap_fault(vcpu->arch.gmap, arg, 0);
1691                 break;
1692         }
1693         case KVM_ENABLE_CAP:
1694         {
1695                 struct kvm_enable_cap cap;
1696                 r = -EFAULT;
1697                 if (copy_from_user(&cap, argp, sizeof(cap)))
1698                         break;
1699                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1700                 break;
1701         }
1702         default:
1703                 r = -ENOTTY;
1704         }
1705         return r;
1706 }
1707
1708 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1709 {
1710 #ifdef CONFIG_KVM_S390_UCONTROL
1711         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1712                  && (kvm_is_ucontrol(vcpu->kvm))) {
1713                 vmf->page = virt_to_page(vcpu->arch.sie_block);
1714                 get_page(vmf->page);
1715                 return 0;
1716         }
1717 #endif
1718         return VM_FAULT_SIGBUS;
1719 }
1720
1721 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1722                             unsigned long npages)
1723 {
1724         return 0;
1725 }
1726
1727 /* Section: memory related */
1728 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1729                                    struct kvm_memory_slot *memslot,
1730                                    struct kvm_userspace_memory_region *mem,
1731                                    enum kvm_mr_change change)
1732 {
1733         /* A few sanity checks. We can have memory slots which have to be
1734            located/ended at a segment boundary (1MB). The memory in userland is
1735            ok to be fragmented into various different vmas. It is okay to mmap()
1736            and munmap() stuff in this slot after doing this call at any time */
1737
1738         if (mem->userspace_addr & 0xffffful)
1739                 return -EINVAL;
1740
1741         if (mem->memory_size & 0xffffful)
1742                 return -EINVAL;
1743
1744         return 0;
1745 }
1746
1747 void kvm_arch_commit_memory_region(struct kvm *kvm,
1748                                 struct kvm_userspace_memory_region *mem,
1749                                 const struct kvm_memory_slot *old,
1750                                 enum kvm_mr_change change)
1751 {
1752         int rc;
1753
1754         /* If the basics of the memslot do not change, we do not want
1755          * to update the gmap. Every update causes several unnecessary
1756          * segment translation exceptions. This is usually handled just
1757          * fine by the normal fault handler + gmap, but it will also
1758          * cause faults on the prefix page of running guest CPUs.
1759          */
1760         if (old->userspace_addr == mem->userspace_addr &&
1761             old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1762             old->npages * PAGE_SIZE == mem->memory_size)
1763                 return;
1764
1765         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1766                 mem->guest_phys_addr, mem->memory_size);
1767         if (rc)
1768                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
1769         return;
1770 }
1771
1772 static int __init kvm_s390_init(void)
1773 {
1774         int ret;
1775         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1776         if (ret)
1777                 return ret;
1778
1779         /*
1780          * guests can ask for up to 255+1 double words, we need a full page
1781          * to hold the maximum amount of facilities. On the other hand, we
1782          * only set facilities that are known to work in KVM.
1783          */
1784         vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1785         if (!vfacilities) {
1786                 kvm_exit();
1787                 return -ENOMEM;
1788         }
1789         memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1790         vfacilities[0] &= 0xff82fffbf47c2000UL;
1791         vfacilities[1] &= 0x005c000000000000UL;
1792         return 0;
1793 }
1794
1795 static void __exit kvm_s390_exit(void)
1796 {
1797         free_page((unsigned long) vfacilities);
1798         kvm_exit();
1799 }
1800
1801 module_init(kvm_s390_init);
1802 module_exit(kvm_s390_exit);
1803
1804 /*
1805  * Enable autoloading of the kvm module.
1806  * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1807  * since x86 takes a different approach.
1808  */
1809 #include <linux/miscdevice.h>
1810 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1811 MODULE_ALIAS("devname:kvm");