Merge tag 'nios2-fixes-v3.19' of git://git.rocketboards.org/linux-socfpga-next
[cascardo/linux.git] / arch / s390 / kvm / interrupt.c
1 /*
2  * handling kvm guest interrupts
3  *
4  * Copyright IBM Corp. 2008,2014
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  */
12
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/uaccess.h>
22 #include "kvm-s390.h"
23 #include "gaccess.h"
24 #include "trace-s390.h"
25
26 #define IOINT_SCHID_MASK 0x0000ffff
27 #define IOINT_SSID_MASK 0x00030000
28 #define IOINT_CSSID_MASK 0x03fc0000
29 #define IOINT_AI_MASK 0x04000000
30 #define PFAULT_INIT 0x0600
31 #define PFAULT_DONE 0x0680
32 #define VIRTIO_PARAM 0x0d00
33
34 static int is_ioint(u64 type)
35 {
36         return ((type & 0xfffe0000u) != 0xfffe0000u);
37 }
38
39 int psw_extint_disabled(struct kvm_vcpu *vcpu)
40 {
41         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
42 }
43
44 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
45 {
46         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
47 }
48
49 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
50 {
51         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
52 }
53
54 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
55 {
56         if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
57             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
58             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
59                 return 0;
60         return 1;
61 }
62
63 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
64 {
65         if (psw_extint_disabled(vcpu) ||
66             !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
67                 return 0;
68         if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
69                 /* No timer interrupts when single stepping */
70                 return 0;
71         return 1;
72 }
73
74 static u64 int_word_to_isc_bits(u32 int_word)
75 {
76         u8 isc = (int_word & 0x38000000) >> 27;
77
78         return (0x80 >> isc) << 24;
79 }
80
81 static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
82                                       struct kvm_s390_interrupt_info *inti)
83 {
84         switch (inti->type) {
85         case KVM_S390_INT_EXTERNAL_CALL:
86                 if (psw_extint_disabled(vcpu))
87                         return 0;
88                 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
89                         return 1;
90                 return 0;
91         case KVM_S390_INT_EMERGENCY:
92                 if (psw_extint_disabled(vcpu))
93                         return 0;
94                 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
95                         return 1;
96                 return 0;
97         case KVM_S390_INT_CLOCK_COMP:
98                 return ckc_interrupts_enabled(vcpu);
99         case KVM_S390_INT_CPU_TIMER:
100                 if (psw_extint_disabled(vcpu))
101                         return 0;
102                 if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
103                         return 1;
104                 return 0;
105         case KVM_S390_INT_SERVICE:
106         case KVM_S390_INT_PFAULT_INIT:
107         case KVM_S390_INT_PFAULT_DONE:
108         case KVM_S390_INT_VIRTIO:
109                 if (psw_extint_disabled(vcpu))
110                         return 0;
111                 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
112                         return 1;
113                 return 0;
114         case KVM_S390_PROGRAM_INT:
115         case KVM_S390_SIGP_STOP:
116         case KVM_S390_SIGP_SET_PREFIX:
117         case KVM_S390_RESTART:
118                 return 1;
119         case KVM_S390_MCHK:
120                 if (psw_mchk_disabled(vcpu))
121                         return 0;
122                 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
123                         return 1;
124                 return 0;
125         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
126                 if (psw_ioint_disabled(vcpu))
127                         return 0;
128                 if (vcpu->arch.sie_block->gcr[6] &
129                     int_word_to_isc_bits(inti->io.io_int_word))
130                         return 1;
131                 return 0;
132         default:
133                 printk(KERN_WARNING "illegal interrupt type %llx\n",
134                        inti->type);
135                 BUG();
136         }
137         return 0;
138 }
139
140 static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
141 {
142         return vcpu->arch.local_int.pending_irqs;
143 }
144
145 static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
146 {
147         unsigned long active_mask = pending_local_irqs(vcpu);
148
149         if (psw_extint_disabled(vcpu))
150                 active_mask &= ~IRQ_PEND_EXT_MASK;
151         if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
152                 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
153         if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
154                 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
155         if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
156                 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
157         if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
158                 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
159         if (psw_mchk_disabled(vcpu))
160                 active_mask &= ~IRQ_PEND_MCHK_MASK;
161
162         return active_mask;
163 }
164
165 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
166 {
167         atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
168         set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
169 }
170
171 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
172 {
173         atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
174         clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
175 }
176
177 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
178 {
179         atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
180                           &vcpu->arch.sie_block->cpuflags);
181         vcpu->arch.sie_block->lctl = 0x0000;
182         vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
183
184         if (guestdbg_enabled(vcpu)) {
185                 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
186                                                LCTL_CR10 | LCTL_CR11);
187                 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
188         }
189
190         if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
191                 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
192 }
193
194 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
195 {
196         atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
197 }
198
199 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
200 {
201         if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
202                 return;
203         if (psw_extint_disabled(vcpu))
204                 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
205         else
206                 vcpu->arch.sie_block->lctl |= LCTL_CR0;
207 }
208
209 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
210 {
211         if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
212                 return;
213         if (psw_mchk_disabled(vcpu))
214                 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
215         else
216                 vcpu->arch.sie_block->lctl |= LCTL_CR14;
217 }
218
219 /* Set interception request for non-deliverable local interrupts */
220 static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
221 {
222         set_intercept_indicators_ext(vcpu);
223         set_intercept_indicators_mchk(vcpu);
224 }
225
226 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
227                                       struct kvm_s390_interrupt_info *inti)
228 {
229         switch (inti->type) {
230         case KVM_S390_INT_SERVICE:
231         case KVM_S390_INT_PFAULT_DONE:
232         case KVM_S390_INT_VIRTIO:
233                 if (psw_extint_disabled(vcpu))
234                         __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
235                 else
236                         vcpu->arch.sie_block->lctl |= LCTL_CR0;
237                 break;
238         case KVM_S390_MCHK:
239                 if (psw_mchk_disabled(vcpu))
240                         vcpu->arch.sie_block->ictl |= ICTL_LPSW;
241                 else
242                         vcpu->arch.sie_block->lctl |= LCTL_CR14;
243                 break;
244         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
245                 if (psw_ioint_disabled(vcpu))
246                         __set_cpuflag(vcpu, CPUSTAT_IO_INT);
247                 else
248                         vcpu->arch.sie_block->lctl |= LCTL_CR6;
249                 break;
250         default:
251                 BUG();
252         }
253 }
254
255 static u16 get_ilc(struct kvm_vcpu *vcpu)
256 {
257         const unsigned short table[] = { 2, 4, 4, 6 };
258
259         switch (vcpu->arch.sie_block->icptcode) {
260         case ICPT_INST:
261         case ICPT_INSTPROGI:
262         case ICPT_OPEREXC:
263         case ICPT_PARTEXEC:
264         case ICPT_IOINST:
265                 /* last instruction only stored for these icptcodes */
266                 return table[vcpu->arch.sie_block->ipa >> 14];
267         case ICPT_PROGI:
268                 return vcpu->arch.sie_block->pgmilc;
269         default:
270                 return 0;
271         }
272 }
273
274 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
275 {
276         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
277         int rc;
278
279         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
280                                          0, 0);
281
282         rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
283                            (u16 *)__LC_EXT_INT_CODE);
284         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
285         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
286                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
287         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
288                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
289         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
290         return rc ? -EFAULT : 0;
291 }
292
293 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
294 {
295         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
296         int rc;
297
298         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
299                                          0, 0);
300
301         rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
302                            (u16 __user *)__LC_EXT_INT_CODE);
303         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
304         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
305                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
306         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
307                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
308         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
309         return rc ? -EFAULT : 0;
310 }
311
312 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
313 {
314         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
315         struct kvm_s390_ext_info ext;
316         int rc;
317
318         spin_lock(&li->lock);
319         ext = li->irq.ext;
320         clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
321         li->irq.ext.ext_params2 = 0;
322         spin_unlock(&li->lock);
323
324         VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
325                    0, ext.ext_params2);
326         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
327                                          KVM_S390_INT_PFAULT_INIT,
328                                          0, ext.ext_params2);
329
330         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
331         rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
332         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
333                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
334         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
335                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
336         rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
337         return rc ? -EFAULT : 0;
338 }
339
340 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
341 {
342         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
343         struct kvm_s390_mchk_info mchk;
344         int rc;
345
346         spin_lock(&li->lock);
347         mchk = li->irq.mchk;
348         /*
349          * If there was an exigent machine check pending, then any repressible
350          * machine checks that might have been pending are indicated along
351          * with it, so always clear both bits
352          */
353         clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
354         clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
355         memset(&li->irq.mchk, 0, sizeof(mchk));
356         spin_unlock(&li->lock);
357
358         VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
359                    mchk.mcic);
360         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
361                                          mchk.cr14, mchk.mcic);
362
363         rc  = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
364         rc |= put_guest_lc(vcpu, mchk.mcic,
365                            (u64 __user *) __LC_MCCK_CODE);
366         rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
367                            (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
368         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
369                              &mchk.fixed_logout, sizeof(mchk.fixed_logout));
370         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
371                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
372         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
373                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
374         return rc ? -EFAULT : 0;
375 }
376
377 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
378 {
379         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
380         int rc;
381
382         VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
383         vcpu->stat.deliver_restart_signal++;
384         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
385
386         rc  = write_guest_lc(vcpu,
387                              offsetof(struct _lowcore, restart_old_psw),
388                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
389         rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
390                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
391         clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
392         return rc ? -EFAULT : 0;
393 }
394
395 static int __must_check __deliver_stop(struct kvm_vcpu *vcpu)
396 {
397         VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
398         vcpu->stat.deliver_stop_signal++;
399         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP,
400                                          0, 0);
401
402         __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
403         clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs);
404         return 0;
405 }
406
407 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
408 {
409         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
410         struct kvm_s390_prefix_info prefix;
411
412         spin_lock(&li->lock);
413         prefix = li->irq.prefix;
414         li->irq.prefix.address = 0;
415         clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
416         spin_unlock(&li->lock);
417
418         VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
419         vcpu->stat.deliver_prefix_signal++;
420         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
421                                          KVM_S390_SIGP_SET_PREFIX,
422                                          prefix.address, 0);
423
424         kvm_s390_set_prefix(vcpu, prefix.address);
425         return 0;
426 }
427
428 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
429 {
430         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
431         int rc;
432         int cpu_addr;
433
434         spin_lock(&li->lock);
435         cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
436         clear_bit(cpu_addr, li->sigp_emerg_pending);
437         if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
438                 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
439         spin_unlock(&li->lock);
440
441         VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
442         vcpu->stat.deliver_emergency_signal++;
443         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
444                                          cpu_addr, 0);
445
446         rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
447                            (u16 *)__LC_EXT_INT_CODE);
448         rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
449         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
450                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
451         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
452                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
453         return rc ? -EFAULT : 0;
454 }
455
456 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
457 {
458         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
459         struct kvm_s390_extcall_info extcall;
460         int rc;
461
462         spin_lock(&li->lock);
463         extcall = li->irq.extcall;
464         li->irq.extcall.code = 0;
465         clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
466         spin_unlock(&li->lock);
467
468         VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
469         vcpu->stat.deliver_external_call++;
470         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
471                                          KVM_S390_INT_EXTERNAL_CALL,
472                                          extcall.code, 0);
473
474         rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
475                            (u16 *)__LC_EXT_INT_CODE);
476         rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
477         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
478                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
479         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
480                             sizeof(psw_t));
481         return rc ? -EFAULT : 0;
482 }
483
484 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
485 {
486         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
487         struct kvm_s390_pgm_info pgm_info;
488         int rc = 0;
489         u16 ilc = get_ilc(vcpu);
490
491         spin_lock(&li->lock);
492         pgm_info = li->irq.pgm;
493         clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
494         memset(&li->irq.pgm, 0, sizeof(pgm_info));
495         spin_unlock(&li->lock);
496
497         VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
498                    pgm_info.code, ilc);
499         vcpu->stat.deliver_program_int++;
500         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
501                                          pgm_info.code, 0);
502
503         switch (pgm_info.code & ~PGM_PER) {
504         case PGM_AFX_TRANSLATION:
505         case PGM_ASX_TRANSLATION:
506         case PGM_EX_TRANSLATION:
507         case PGM_LFX_TRANSLATION:
508         case PGM_LSTE_SEQUENCE:
509         case PGM_LSX_TRANSLATION:
510         case PGM_LX_TRANSLATION:
511         case PGM_PRIMARY_AUTHORITY:
512         case PGM_SECONDARY_AUTHORITY:
513         case PGM_SPACE_SWITCH:
514                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
515                                   (u64 *)__LC_TRANS_EXC_CODE);
516                 break;
517         case PGM_ALEN_TRANSLATION:
518         case PGM_ALE_SEQUENCE:
519         case PGM_ASTE_INSTANCE:
520         case PGM_ASTE_SEQUENCE:
521         case PGM_ASTE_VALIDITY:
522         case PGM_EXTENDED_AUTHORITY:
523                 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
524                                   (u8 *)__LC_EXC_ACCESS_ID);
525                 break;
526         case PGM_ASCE_TYPE:
527         case PGM_PAGE_TRANSLATION:
528         case PGM_REGION_FIRST_TRANS:
529         case PGM_REGION_SECOND_TRANS:
530         case PGM_REGION_THIRD_TRANS:
531         case PGM_SEGMENT_TRANSLATION:
532                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
533                                   (u64 *)__LC_TRANS_EXC_CODE);
534                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
535                                    (u8 *)__LC_EXC_ACCESS_ID);
536                 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
537                                    (u8 *)__LC_OP_ACCESS_ID);
538                 break;
539         case PGM_MONITOR:
540                 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
541                                   (u16 *)__LC_MON_CLASS_NR);
542                 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
543                                    (u64 *)__LC_MON_CODE);
544                 break;
545         case PGM_DATA:
546                 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
547                                   (u32 *)__LC_DATA_EXC_CODE);
548                 break;
549         case PGM_PROTECTION:
550                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
551                                   (u64 *)__LC_TRANS_EXC_CODE);
552                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
553                                    (u8 *)__LC_EXC_ACCESS_ID);
554                 break;
555         }
556
557         if (pgm_info.code & PGM_PER) {
558                 rc |= put_guest_lc(vcpu, pgm_info.per_code,
559                                    (u8 *) __LC_PER_CODE);
560                 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
561                                    (u8 *)__LC_PER_ATMID);
562                 rc |= put_guest_lc(vcpu, pgm_info.per_address,
563                                    (u64 *) __LC_PER_ADDRESS);
564                 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
565                                    (u8 *) __LC_PER_ACCESS_ID);
566         }
567
568         rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
569         rc |= put_guest_lc(vcpu, pgm_info.code,
570                            (u16 *)__LC_PGM_INT_CODE);
571         rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
572                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
573         rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
574                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
575         return rc ? -EFAULT : 0;
576 }
577
578 static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
579                                           struct kvm_s390_interrupt_info *inti)
580 {
581         int rc;
582
583         VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
584                    inti->ext.ext_params);
585         vcpu->stat.deliver_service_signal++;
586         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
587                                          inti->ext.ext_params, 0);
588
589         rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
590         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
591         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
592                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
593         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
594                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
595         rc |= put_guest_lc(vcpu, inti->ext.ext_params,
596                            (u32 *)__LC_EXT_PARAMS);
597         return rc ? -EFAULT : 0;
598 }
599
600 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
601                                            struct kvm_s390_interrupt_info *inti)
602 {
603         int rc;
604
605         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
606                                          KVM_S390_INT_PFAULT_DONE, 0,
607                                          inti->ext.ext_params2);
608
609         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
610         rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
611         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
612                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
613         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
614                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
615         rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
616                            (u64 *)__LC_EXT_PARAMS2);
617         return rc ? -EFAULT : 0;
618 }
619
620 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
621                                          struct kvm_s390_interrupt_info *inti)
622 {
623         int rc;
624
625         VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
626                    inti->ext.ext_params, inti->ext.ext_params2);
627         vcpu->stat.deliver_virtio_interrupt++;
628         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
629                                          inti->ext.ext_params,
630                                          inti->ext.ext_params2);
631
632         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
633         rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
634         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
635                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
636         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
637                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
638         rc |= put_guest_lc(vcpu, inti->ext.ext_params,
639                            (u32 *)__LC_EXT_PARAMS);
640         rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
641                            (u64 *)__LC_EXT_PARAMS2);
642         return rc ? -EFAULT : 0;
643 }
644
645 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
646                                      struct kvm_s390_interrupt_info *inti)
647 {
648         int rc;
649
650         VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
651         vcpu->stat.deliver_io_int++;
652         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
653                                          ((__u32)inti->io.subchannel_id << 16) |
654                                                 inti->io.subchannel_nr,
655                                          ((__u64)inti->io.io_int_parm << 32) |
656                                                 inti->io.io_int_word);
657
658         rc  = put_guest_lc(vcpu, inti->io.subchannel_id,
659                            (u16 *)__LC_SUBCHANNEL_ID);
660         rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
661                            (u16 *)__LC_SUBCHANNEL_NR);
662         rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
663                            (u32 *)__LC_IO_INT_PARM);
664         rc |= put_guest_lc(vcpu, inti->io.io_int_word,
665                            (u32 *)__LC_IO_INT_WORD);
666         rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
667                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
668         rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
669                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
670         return rc ? -EFAULT : 0;
671 }
672
673 static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
674                                            struct kvm_s390_interrupt_info *inti)
675 {
676         struct kvm_s390_mchk_info *mchk = &inti->mchk;
677         int rc;
678
679         VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
680                    mchk->mcic);
681         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
682                                          mchk->cr14, mchk->mcic);
683
684         rc  = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
685         rc |= put_guest_lc(vcpu, mchk->mcic,
686                         (u64 __user *) __LC_MCCK_CODE);
687         rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
688                         (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
689         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
690                              &mchk->fixed_logout, sizeof(mchk->fixed_logout));
691         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
692                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
693         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
694                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
695         return rc ? -EFAULT : 0;
696 }
697
698 typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
699
700 static const deliver_irq_t deliver_irq_funcs[] = {
701         [IRQ_PEND_MCHK_EX]        = __deliver_machine_check,
702         [IRQ_PEND_PROG]           = __deliver_prog,
703         [IRQ_PEND_EXT_EMERGENCY]  = __deliver_emergency_signal,
704         [IRQ_PEND_EXT_EXTERNAL]   = __deliver_external_call,
705         [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
706         [IRQ_PEND_EXT_CPU_TIMER]  = __deliver_cpu_timer,
707         [IRQ_PEND_RESTART]        = __deliver_restart,
708         [IRQ_PEND_SIGP_STOP]      = __deliver_stop,
709         [IRQ_PEND_SET_PREFIX]     = __deliver_set_prefix,
710         [IRQ_PEND_PFAULT_INIT]    = __deliver_pfault_init,
711 };
712
713 static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
714                                            struct kvm_s390_interrupt_info *inti)
715 {
716         int rc;
717
718         switch (inti->type) {
719         case KVM_S390_INT_SERVICE:
720                 rc = __deliver_service(vcpu, inti);
721                 break;
722         case KVM_S390_INT_PFAULT_DONE:
723                 rc = __deliver_pfault_done(vcpu, inti);
724                 break;
725         case KVM_S390_INT_VIRTIO:
726                 rc = __deliver_virtio(vcpu, inti);
727                 break;
728         case KVM_S390_MCHK:
729                 rc = __deliver_mchk_floating(vcpu, inti);
730                 break;
731         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
732                 rc = __deliver_io(vcpu, inti);
733                 break;
734         default:
735                 BUG();
736         }
737
738         return rc;
739 }
740
741 /* Check whether SIGP interpretation facility has an external call pending */
742 int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
743 {
744         atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl;
745
746         if (!psw_extint_disabled(vcpu) &&
747             (vcpu->arch.sie_block->gcr[0] & 0x2000ul) &&
748             (atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
749             (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
750                 return 1;
751
752         return 0;
753 }
754
755 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
756 {
757         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
758         struct kvm_s390_interrupt_info  *inti;
759         int rc;
760
761         rc = !!deliverable_local_irqs(vcpu);
762
763         if ((!rc) && atomic_read(&fi->active)) {
764                 spin_lock(&fi->lock);
765                 list_for_each_entry(inti, &fi->list, list)
766                         if (__interrupt_is_deliverable(vcpu, inti)) {
767                                 rc = 1;
768                                 break;
769                         }
770                 spin_unlock(&fi->lock);
771         }
772
773         if (!rc && kvm_cpu_has_pending_timer(vcpu))
774                 rc = 1;
775
776         if (!rc && kvm_s390_si_ext_call_pending(vcpu))
777                 rc = 1;
778
779         return rc;
780 }
781
782 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
783 {
784         if (!(vcpu->arch.sie_block->ckc <
785               get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
786                 return 0;
787         if (!ckc_interrupts_enabled(vcpu))
788                 return 0;
789         return 1;
790 }
791
792 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
793 {
794         u64 now, sltime;
795
796         vcpu->stat.exit_wait_state++;
797
798         /* fast path */
799         if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
800                 return 0;
801
802         if (psw_interrupts_disabled(vcpu)) {
803                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
804                 return -EOPNOTSUPP; /* disabled wait */
805         }
806
807         __set_cpu_idle(vcpu);
808         if (!ckc_interrupts_enabled(vcpu)) {
809                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
810                 goto no_timer;
811         }
812
813         now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
814         sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
815         hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
816         VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
817 no_timer:
818         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
819         kvm_vcpu_block(vcpu);
820         __unset_cpu_idle(vcpu);
821         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
822
823         hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
824         return 0;
825 }
826
827 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
828 {
829         if (waitqueue_active(&vcpu->wq)) {
830                 /*
831                  * The vcpu gave up the cpu voluntarily, mark it as a good
832                  * yield-candidate.
833                  */
834                 vcpu->preempted = true;
835                 wake_up_interruptible(&vcpu->wq);
836                 vcpu->stat.halt_wakeup++;
837         }
838 }
839
840 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
841 {
842         struct kvm_vcpu *vcpu;
843
844         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
845         kvm_s390_vcpu_wakeup(vcpu);
846
847         return HRTIMER_NORESTART;
848 }
849
850 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
851 {
852         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
853
854         spin_lock(&li->lock);
855         li->pending_irqs = 0;
856         bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
857         memset(&li->irq, 0, sizeof(li->irq));
858         spin_unlock(&li->lock);
859
860         /* clear pending external calls set by sigp interpretation facility */
861         atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
862         atomic_clear_mask(SIGP_CTRL_C,
863                           &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
864 }
865
866 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
867 {
868         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
869         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
870         struct kvm_s390_interrupt_info  *n, *inti = NULL;
871         deliver_irq_t func;
872         int deliver;
873         int rc = 0;
874         unsigned long irq_type;
875         unsigned long deliverable_irqs;
876
877         __reset_intercept_indicators(vcpu);
878
879         /* pending ckc conditions might have been invalidated */
880         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
881         if (kvm_cpu_has_pending_timer(vcpu))
882                 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
883
884         do {
885                 deliverable_irqs = deliverable_local_irqs(vcpu);
886                 /* bits are in the order of interrupt priority */
887                 irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
888                 if (irq_type == IRQ_PEND_COUNT)
889                         break;
890                 func = deliver_irq_funcs[irq_type];
891                 if (!func) {
892                         WARN_ON_ONCE(func == NULL);
893                         clear_bit(irq_type, &li->pending_irqs);
894                         continue;
895                 }
896                 rc = func(vcpu);
897         } while (!rc && irq_type != IRQ_PEND_COUNT);
898
899         set_intercept_indicators_local(vcpu);
900
901         if (!rc && atomic_read(&fi->active)) {
902                 do {
903                         deliver = 0;
904                         spin_lock(&fi->lock);
905                         list_for_each_entry_safe(inti, n, &fi->list, list) {
906                                 if (__interrupt_is_deliverable(vcpu, inti)) {
907                                         list_del(&inti->list);
908                                         fi->irq_count--;
909                                         deliver = 1;
910                                         break;
911                                 }
912                                 __set_intercept_indicator(vcpu, inti);
913                         }
914                         if (list_empty(&fi->list))
915                                 atomic_set(&fi->active, 0);
916                         spin_unlock(&fi->lock);
917                         if (deliver) {
918                                 rc = __deliver_floating_interrupt(vcpu, inti);
919                                 kfree(inti);
920                         }
921                 } while (!rc && deliver);
922         }
923
924         return rc;
925 }
926
927 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
928 {
929         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
930
931         li->irq.pgm = irq->u.pgm;
932         set_bit(IRQ_PEND_PROG, &li->pending_irqs);
933         return 0;
934 }
935
936 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
937 {
938         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
939         struct kvm_s390_irq irq;
940
941         VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
942         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
943                                    0, 1);
944         spin_lock(&li->lock);
945         irq.u.pgm.code = code;
946         __inject_prog(vcpu, &irq);
947         BUG_ON(waitqueue_active(li->wq));
948         spin_unlock(&li->lock);
949         return 0;
950 }
951
952 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
953                              struct kvm_s390_pgm_info *pgm_info)
954 {
955         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
956         struct kvm_s390_irq irq;
957         int rc;
958
959         VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
960                    pgm_info->code);
961         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
962                                    pgm_info->code, 0, 1);
963         spin_lock(&li->lock);
964         irq.u.pgm = *pgm_info;
965         rc = __inject_prog(vcpu, &irq);
966         BUG_ON(waitqueue_active(li->wq));
967         spin_unlock(&li->lock);
968         return rc;
969 }
970
971 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
972 {
973         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
974
975         VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx",
976                    irq->u.ext.ext_params, irq->u.ext.ext_params2);
977         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
978                                    irq->u.ext.ext_params,
979                                    irq->u.ext.ext_params2, 2);
980
981         li->irq.ext = irq->u.ext;
982         set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
983         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
984         return 0;
985 }
986
987 int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
988 {
989         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
990         struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
991
992         VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
993                    irq->u.extcall.code);
994         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
995                                    irq->u.extcall.code, 0, 2);
996
997         *extcall = irq->u.extcall;
998         set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
999         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1000         return 0;
1001 }
1002
1003 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1004 {
1005         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1006         struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1007
1008         VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
1009                    prefix->address);
1010         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1011                                    prefix->address, 0, 2);
1012
1013         *prefix = irq->u.prefix;
1014         set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1015         return 0;
1016 }
1017
1018 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1019 {
1020         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1021
1022         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
1023
1024         li->action_bits |= ACTION_STOP_ON_STOP;
1025         set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1026         return 0;
1027 }
1028
1029 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1030                                  struct kvm_s390_irq *irq)
1031 {
1032         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1033
1034         VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type);
1035         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2);
1036
1037         set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1038         return 0;
1039 }
1040
1041 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1042                                    struct kvm_s390_irq *irq)
1043 {
1044         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1045         struct kvm_s390_emerg_info *emerg = &li->irq.emerg;
1046
1047         VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
1048                    irq->u.emerg.code);
1049         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1050                                    emerg->code, 0, 2);
1051
1052         set_bit(emerg->code, li->sigp_emerg_pending);
1053         set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1054         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1055         return 0;
1056 }
1057
1058 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1059 {
1060         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1061         struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1062
1063         VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
1064                    mchk->mcic);
1065         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1066                                    mchk->mcic, 2);
1067
1068         /*
1069          * Because repressible machine checks can be indicated along with
1070          * exigent machine checks (PoP, Chapter 11, Interruption action)
1071          * we need to combine cr14, mcic and external damage code.
1072          * Failing storage address and the logout area should not be or'ed
1073          * together, we just indicate the last occurrence of the corresponding
1074          * machine check
1075          */
1076         mchk->cr14 |= irq->u.mchk.cr14;
1077         mchk->mcic |= irq->u.mchk.mcic;
1078         mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1079         mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1080         memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1081                sizeof(mchk->fixed_logout));
1082         if (mchk->mcic & MCHK_EX_MASK)
1083                 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1084         else if (mchk->mcic & MCHK_REP_MASK)
1085                 set_bit(IRQ_PEND_MCHK_REP,  &li->pending_irqs);
1086         return 0;
1087 }
1088
1089 static int __inject_ckc(struct kvm_vcpu *vcpu)
1090 {
1091         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1092
1093         VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP);
1094         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1095                                    0, 0, 2);
1096
1097         set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1098         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1099         return 0;
1100 }
1101
1102 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1103 {
1104         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1105
1106         VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER);
1107         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1108                                    0, 0, 2);
1109
1110         set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1111         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1112         return 0;
1113 }
1114
1115
1116 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1117                                                     u64 cr6, u64 schid)
1118 {
1119         struct kvm_s390_float_interrupt *fi;
1120         struct kvm_s390_interrupt_info *inti, *iter;
1121
1122         if ((!schid && !cr6) || (schid && cr6))
1123                 return NULL;
1124         mutex_lock(&kvm->lock);
1125         fi = &kvm->arch.float_int;
1126         spin_lock(&fi->lock);
1127         inti = NULL;
1128         list_for_each_entry(iter, &fi->list, list) {
1129                 if (!is_ioint(iter->type))
1130                         continue;
1131                 if (cr6 &&
1132                     ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
1133                         continue;
1134                 if (schid) {
1135                         if (((schid & 0x00000000ffff0000) >> 16) !=
1136                             iter->io.subchannel_id)
1137                                 continue;
1138                         if ((schid & 0x000000000000ffff) !=
1139                             iter->io.subchannel_nr)
1140                                 continue;
1141                 }
1142                 inti = iter;
1143                 break;
1144         }
1145         if (inti) {
1146                 list_del_init(&inti->list);
1147                 fi->irq_count--;
1148         }
1149         if (list_empty(&fi->list))
1150                 atomic_set(&fi->active, 0);
1151         spin_unlock(&fi->lock);
1152         mutex_unlock(&kvm->lock);
1153         return inti;
1154 }
1155
1156 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1157 {
1158         struct kvm_s390_local_interrupt *li;
1159         struct kvm_s390_float_interrupt *fi;
1160         struct kvm_s390_interrupt_info *iter;
1161         struct kvm_vcpu *dst_vcpu = NULL;
1162         int sigcpu;
1163         int rc = 0;
1164
1165         mutex_lock(&kvm->lock);
1166         fi = &kvm->arch.float_int;
1167         spin_lock(&fi->lock);
1168         if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
1169                 rc = -EINVAL;
1170                 goto unlock_fi;
1171         }
1172         fi->irq_count++;
1173         if (!is_ioint(inti->type)) {
1174                 list_add_tail(&inti->list, &fi->list);
1175         } else {
1176                 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
1177
1178                 /* Keep I/O interrupts sorted in isc order. */
1179                 list_for_each_entry(iter, &fi->list, list) {
1180                         if (!is_ioint(iter->type))
1181                                 continue;
1182                         if (int_word_to_isc_bits(iter->io.io_int_word)
1183                             <= isc_bits)
1184                                 continue;
1185                         break;
1186                 }
1187                 list_add_tail(&inti->list, &iter->list);
1188         }
1189         atomic_set(&fi->active, 1);
1190         sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
1191         if (sigcpu == KVM_MAX_VCPUS) {
1192                 do {
1193                         sigcpu = fi->next_rr_cpu++;
1194                         if (sigcpu == KVM_MAX_VCPUS)
1195                                 sigcpu = fi->next_rr_cpu = 0;
1196                 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
1197         }
1198         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1199         li = &dst_vcpu->arch.local_int;
1200         spin_lock(&li->lock);
1201         switch (inti->type) {
1202         case KVM_S390_MCHK:
1203                 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1204                 break;
1205         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1206                 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1207                 break;
1208         default:
1209                 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1210                 break;
1211         }
1212         spin_unlock(&li->lock);
1213         kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
1214 unlock_fi:
1215         spin_unlock(&fi->lock);
1216         mutex_unlock(&kvm->lock);
1217         return rc;
1218 }
1219
1220 int kvm_s390_inject_vm(struct kvm *kvm,
1221                        struct kvm_s390_interrupt *s390int)
1222 {
1223         struct kvm_s390_interrupt_info *inti;
1224
1225         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1226         if (!inti)
1227                 return -ENOMEM;
1228
1229         inti->type = s390int->type;
1230         switch (inti->type) {
1231         case KVM_S390_INT_VIRTIO:
1232                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1233                          s390int->parm, s390int->parm64);
1234                 inti->ext.ext_params = s390int->parm;
1235                 inti->ext.ext_params2 = s390int->parm64;
1236                 break;
1237         case KVM_S390_INT_SERVICE:
1238                 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
1239                 inti->ext.ext_params = s390int->parm;
1240                 break;
1241         case KVM_S390_INT_PFAULT_DONE:
1242                 inti->type = s390int->type;
1243                 inti->ext.ext_params2 = s390int->parm64;
1244                 break;
1245         case KVM_S390_MCHK:
1246                 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
1247                          s390int->parm64);
1248                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1249                 inti->mchk.mcic = s390int->parm64;
1250                 break;
1251         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1252                 if (inti->type & IOINT_AI_MASK)
1253                         VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1254                 else
1255                         VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1256                                  s390int->type & IOINT_CSSID_MASK,
1257                                  s390int->type & IOINT_SSID_MASK,
1258                                  s390int->type & IOINT_SCHID_MASK);
1259                 inti->io.subchannel_id = s390int->parm >> 16;
1260                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1261                 inti->io.io_int_parm = s390int->parm64 >> 32;
1262                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1263                 break;
1264         default:
1265                 kfree(inti);
1266                 return -EINVAL;
1267         }
1268         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1269                                  2);
1270
1271         return __inject_vm(kvm, inti);
1272 }
1273
1274 void kvm_s390_reinject_io_int(struct kvm *kvm,
1275                               struct kvm_s390_interrupt_info *inti)
1276 {
1277         __inject_vm(kvm, inti);
1278 }
1279
1280 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1281                        struct kvm_s390_irq *irq)
1282 {
1283         irq->type = s390int->type;
1284         switch (irq->type) {
1285         case KVM_S390_PROGRAM_INT:
1286                 if (s390int->parm & 0xffff0000)
1287                         return -EINVAL;
1288                 irq->u.pgm.code = s390int->parm;
1289                 break;
1290         case KVM_S390_SIGP_SET_PREFIX:
1291                 irq->u.prefix.address = s390int->parm;
1292                 break;
1293         case KVM_S390_INT_EXTERNAL_CALL:
1294                 if (irq->u.extcall.code & 0xffff0000)
1295                         return -EINVAL;
1296                 irq->u.extcall.code = s390int->parm;
1297                 break;
1298         case KVM_S390_INT_EMERGENCY:
1299                 if (irq->u.emerg.code & 0xffff0000)
1300                         return -EINVAL;
1301                 irq->u.emerg.code = s390int->parm;
1302                 break;
1303         case KVM_S390_MCHK:
1304                 irq->u.mchk.mcic = s390int->parm64;
1305                 break;
1306         }
1307         return 0;
1308 }
1309
1310 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1311 {
1312         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1313         int rc;
1314
1315         spin_lock(&li->lock);
1316         switch (irq->type) {
1317         case KVM_S390_PROGRAM_INT:
1318                 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
1319                            irq->u.pgm.code);
1320                 rc = __inject_prog(vcpu, irq);
1321                 break;
1322         case KVM_S390_SIGP_SET_PREFIX:
1323                 rc = __inject_set_prefix(vcpu, irq);
1324                 break;
1325         case KVM_S390_SIGP_STOP:
1326                 rc = __inject_sigp_stop(vcpu, irq);
1327                 break;
1328         case KVM_S390_RESTART:
1329                 rc = __inject_sigp_restart(vcpu, irq);
1330                 break;
1331         case KVM_S390_INT_CLOCK_COMP:
1332                 rc = __inject_ckc(vcpu);
1333                 break;
1334         case KVM_S390_INT_CPU_TIMER:
1335                 rc = __inject_cpu_timer(vcpu);
1336                 break;
1337         case KVM_S390_INT_EXTERNAL_CALL:
1338                 rc = __inject_extcall(vcpu, irq);
1339                 break;
1340         case KVM_S390_INT_EMERGENCY:
1341                 rc = __inject_sigp_emergency(vcpu, irq);
1342                 break;
1343         case KVM_S390_MCHK:
1344                 rc = __inject_mchk(vcpu, irq);
1345                 break;
1346         case KVM_S390_INT_PFAULT_INIT:
1347                 rc = __inject_pfault_init(vcpu, irq);
1348                 break;
1349         case KVM_S390_INT_VIRTIO:
1350         case KVM_S390_INT_SERVICE:
1351         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1352         default:
1353                 rc = -EINVAL;
1354         }
1355         spin_unlock(&li->lock);
1356         if (!rc)
1357                 kvm_s390_vcpu_wakeup(vcpu);
1358         return rc;
1359 }
1360
1361 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1362 {
1363         struct kvm_s390_float_interrupt *fi;
1364         struct kvm_s390_interrupt_info  *n, *inti = NULL;
1365
1366         mutex_lock(&kvm->lock);
1367         fi = &kvm->arch.float_int;
1368         spin_lock(&fi->lock);
1369         list_for_each_entry_safe(inti, n, &fi->list, list) {
1370                 list_del(&inti->list);
1371                 kfree(inti);
1372         }
1373         fi->irq_count = 0;
1374         atomic_set(&fi->active, 0);
1375         spin_unlock(&fi->lock);
1376         mutex_unlock(&kvm->lock);
1377 }
1378
1379 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
1380                                    u8 *addr)
1381 {
1382         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1383         struct kvm_s390_irq irq = {0};
1384
1385         irq.type = inti->type;
1386         switch (inti->type) {
1387         case KVM_S390_INT_PFAULT_INIT:
1388         case KVM_S390_INT_PFAULT_DONE:
1389         case KVM_S390_INT_VIRTIO:
1390         case KVM_S390_INT_SERVICE:
1391                 irq.u.ext = inti->ext;
1392                 break;
1393         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1394                 irq.u.io = inti->io;
1395                 break;
1396         case KVM_S390_MCHK:
1397                 irq.u.mchk = inti->mchk;
1398                 break;
1399         default:
1400                 return -EINVAL;
1401         }
1402
1403         if (copy_to_user(uptr, &irq, sizeof(irq)))
1404                 return -EFAULT;
1405
1406         return 0;
1407 }
1408
1409 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
1410 {
1411         struct kvm_s390_interrupt_info *inti;
1412         struct kvm_s390_float_interrupt *fi;
1413         int ret = 0;
1414         int n = 0;
1415
1416         mutex_lock(&kvm->lock);
1417         fi = &kvm->arch.float_int;
1418         spin_lock(&fi->lock);
1419
1420         list_for_each_entry(inti, &fi->list, list) {
1421                 if (len < sizeof(struct kvm_s390_irq)) {
1422                         /* signal userspace to try again */
1423                         ret = -ENOMEM;
1424                         break;
1425                 }
1426                 ret = copy_irq_to_user(inti, buf);
1427                 if (ret)
1428                         break;
1429                 buf += sizeof(struct kvm_s390_irq);
1430                 len -= sizeof(struct kvm_s390_irq);
1431                 n++;
1432         }
1433
1434         spin_unlock(&fi->lock);
1435         mutex_unlock(&kvm->lock);
1436
1437         return ret < 0 ? ret : n;
1438 }
1439
1440 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1441 {
1442         int r;
1443
1444         switch (attr->group) {
1445         case KVM_DEV_FLIC_GET_ALL_IRQS:
1446                 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
1447                                           attr->attr);
1448                 break;
1449         default:
1450                 r = -EINVAL;
1451         }
1452
1453         return r;
1454 }
1455
1456 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1457                                      u64 addr)
1458 {
1459         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1460         void *target = NULL;
1461         void __user *source;
1462         u64 size;
1463
1464         if (get_user(inti->type, (u64 __user *)addr))
1465                 return -EFAULT;
1466
1467         switch (inti->type) {
1468         case KVM_S390_INT_PFAULT_INIT:
1469         case KVM_S390_INT_PFAULT_DONE:
1470         case KVM_S390_INT_VIRTIO:
1471         case KVM_S390_INT_SERVICE:
1472                 target = (void *) &inti->ext;
1473                 source = &uptr->u.ext;
1474                 size = sizeof(inti->ext);
1475                 break;
1476         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1477                 target = (void *) &inti->io;
1478                 source = &uptr->u.io;
1479                 size = sizeof(inti->io);
1480                 break;
1481         case KVM_S390_MCHK:
1482                 target = (void *) &inti->mchk;
1483                 source = &uptr->u.mchk;
1484                 size = sizeof(inti->mchk);
1485                 break;
1486         default:
1487                 return -EINVAL;
1488         }
1489
1490         if (copy_from_user(target, source, size))
1491                 return -EFAULT;
1492
1493         return 0;
1494 }
1495
1496 static int enqueue_floating_irq(struct kvm_device *dev,
1497                                 struct kvm_device_attr *attr)
1498 {
1499         struct kvm_s390_interrupt_info *inti = NULL;
1500         int r = 0;
1501         int len = attr->attr;
1502
1503         if (len % sizeof(struct kvm_s390_irq) != 0)
1504                 return -EINVAL;
1505         else if (len > KVM_S390_FLIC_MAX_BUFFER)
1506                 return -EINVAL;
1507
1508         while (len >= sizeof(struct kvm_s390_irq)) {
1509                 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1510                 if (!inti)
1511                         return -ENOMEM;
1512
1513                 r = copy_irq_from_user(inti, attr->addr);
1514                 if (r) {
1515                         kfree(inti);
1516                         return r;
1517                 }
1518                 r = __inject_vm(dev->kvm, inti);
1519                 if (r) {
1520                         kfree(inti);
1521                         return r;
1522                 }
1523                 len -= sizeof(struct kvm_s390_irq);
1524                 attr->addr += sizeof(struct kvm_s390_irq);
1525         }
1526
1527         return r;
1528 }
1529
1530 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1531 {
1532         if (id >= MAX_S390_IO_ADAPTERS)
1533                 return NULL;
1534         return kvm->arch.adapters[id];
1535 }
1536
1537 static int register_io_adapter(struct kvm_device *dev,
1538                                struct kvm_device_attr *attr)
1539 {
1540         struct s390_io_adapter *adapter;
1541         struct kvm_s390_io_adapter adapter_info;
1542
1543         if (copy_from_user(&adapter_info,
1544                            (void __user *)attr->addr, sizeof(adapter_info)))
1545                 return -EFAULT;
1546
1547         if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1548             (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1549                 return -EINVAL;
1550
1551         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1552         if (!adapter)
1553                 return -ENOMEM;
1554
1555         INIT_LIST_HEAD(&adapter->maps);
1556         init_rwsem(&adapter->maps_lock);
1557         atomic_set(&adapter->nr_maps, 0);
1558         adapter->id = adapter_info.id;
1559         adapter->isc = adapter_info.isc;
1560         adapter->maskable = adapter_info.maskable;
1561         adapter->masked = false;
1562         adapter->swap = adapter_info.swap;
1563         dev->kvm->arch.adapters[adapter->id] = adapter;
1564
1565         return 0;
1566 }
1567
1568 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1569 {
1570         int ret;
1571         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1572
1573         if (!adapter || !adapter->maskable)
1574                 return -EINVAL;
1575         ret = adapter->masked;
1576         adapter->masked = masked;
1577         return ret;
1578 }
1579
1580 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1581 {
1582         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1583         struct s390_map_info *map;
1584         int ret;
1585
1586         if (!adapter || !addr)
1587                 return -EINVAL;
1588
1589         map = kzalloc(sizeof(*map), GFP_KERNEL);
1590         if (!map) {
1591                 ret = -ENOMEM;
1592                 goto out;
1593         }
1594         INIT_LIST_HEAD(&map->list);
1595         map->guest_addr = addr;
1596         map->addr = gmap_translate(kvm->arch.gmap, addr);
1597         if (map->addr == -EFAULT) {
1598                 ret = -EFAULT;
1599                 goto out;
1600         }
1601         ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1602         if (ret < 0)
1603                 goto out;
1604         BUG_ON(ret != 1);
1605         down_write(&adapter->maps_lock);
1606         if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1607                 list_add_tail(&map->list, &adapter->maps);
1608                 ret = 0;
1609         } else {
1610                 put_page(map->page);
1611                 ret = -EINVAL;
1612         }
1613         up_write(&adapter->maps_lock);
1614 out:
1615         if (ret)
1616                 kfree(map);
1617         return ret;
1618 }
1619
1620 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1621 {
1622         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1623         struct s390_map_info *map, *tmp;
1624         int found = 0;
1625
1626         if (!adapter || !addr)
1627                 return -EINVAL;
1628
1629         down_write(&adapter->maps_lock);
1630         list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1631                 if (map->guest_addr == addr) {
1632                         found = 1;
1633                         atomic_dec(&adapter->nr_maps);
1634                         list_del(&map->list);
1635                         put_page(map->page);
1636                         kfree(map);
1637                         break;
1638                 }
1639         }
1640         up_write(&adapter->maps_lock);
1641
1642         return found ? 0 : -EINVAL;
1643 }
1644
1645 void kvm_s390_destroy_adapters(struct kvm *kvm)
1646 {
1647         int i;
1648         struct s390_map_info *map, *tmp;
1649
1650         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1651                 if (!kvm->arch.adapters[i])
1652                         continue;
1653                 list_for_each_entry_safe(map, tmp,
1654                                          &kvm->arch.adapters[i]->maps, list) {
1655                         list_del(&map->list);
1656                         put_page(map->page);
1657                         kfree(map);
1658                 }
1659                 kfree(kvm->arch.adapters[i]);
1660         }
1661 }
1662
1663 static int modify_io_adapter(struct kvm_device *dev,
1664                              struct kvm_device_attr *attr)
1665 {
1666         struct kvm_s390_io_adapter_req req;
1667         struct s390_io_adapter *adapter;
1668         int ret;
1669
1670         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1671                 return -EFAULT;
1672
1673         adapter = get_io_adapter(dev->kvm, req.id);
1674         if (!adapter)
1675                 return -EINVAL;
1676         switch (req.type) {
1677         case KVM_S390_IO_ADAPTER_MASK:
1678                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1679                 if (ret > 0)
1680                         ret = 0;
1681                 break;
1682         case KVM_S390_IO_ADAPTER_MAP:
1683                 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1684                 break;
1685         case KVM_S390_IO_ADAPTER_UNMAP:
1686                 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1687                 break;
1688         default:
1689                 ret = -EINVAL;
1690         }
1691
1692         return ret;
1693 }
1694
1695 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1696 {
1697         int r = 0;
1698         unsigned int i;
1699         struct kvm_vcpu *vcpu;
1700
1701         switch (attr->group) {
1702         case KVM_DEV_FLIC_ENQUEUE:
1703                 r = enqueue_floating_irq(dev, attr);
1704                 break;
1705         case KVM_DEV_FLIC_CLEAR_IRQS:
1706                 kvm_s390_clear_float_irqs(dev->kvm);
1707                 break;
1708         case KVM_DEV_FLIC_APF_ENABLE:
1709                 dev->kvm->arch.gmap->pfault_enabled = 1;
1710                 break;
1711         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1712                 dev->kvm->arch.gmap->pfault_enabled = 0;
1713                 /*
1714                  * Make sure no async faults are in transition when
1715                  * clearing the queues. So we don't need to worry
1716                  * about late coming workers.
1717                  */
1718                 synchronize_srcu(&dev->kvm->srcu);
1719                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1720                         kvm_clear_async_pf_completion_queue(vcpu);
1721                 break;
1722         case KVM_DEV_FLIC_ADAPTER_REGISTER:
1723                 r = register_io_adapter(dev, attr);
1724                 break;
1725         case KVM_DEV_FLIC_ADAPTER_MODIFY:
1726                 r = modify_io_adapter(dev, attr);
1727                 break;
1728         default:
1729                 r = -EINVAL;
1730         }
1731
1732         return r;
1733 }
1734
1735 static int flic_create(struct kvm_device *dev, u32 type)
1736 {
1737         if (!dev)
1738                 return -EINVAL;
1739         if (dev->kvm->arch.flic)
1740                 return -EINVAL;
1741         dev->kvm->arch.flic = dev;
1742         return 0;
1743 }
1744
1745 static void flic_destroy(struct kvm_device *dev)
1746 {
1747         dev->kvm->arch.flic = NULL;
1748         kfree(dev);
1749 }
1750
1751 /* s390 floating irq controller (flic) */
1752 struct kvm_device_ops kvm_flic_ops = {
1753         .name = "kvm-flic",
1754         .get_attr = flic_get_attr,
1755         .set_attr = flic_set_attr,
1756         .create = flic_create,
1757         .destroy = flic_destroy,
1758 };
1759
1760 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1761 {
1762         unsigned long bit;
1763
1764         bit = bit_nr + (addr % PAGE_SIZE) * 8;
1765
1766         return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1767 }
1768
1769 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1770                                           u64 addr)
1771 {
1772         struct s390_map_info *map;
1773
1774         if (!adapter)
1775                 return NULL;
1776
1777         list_for_each_entry(map, &adapter->maps, list) {
1778                 if (map->guest_addr == addr)
1779                         return map;
1780         }
1781         return NULL;
1782 }
1783
1784 static int adapter_indicators_set(struct kvm *kvm,
1785                                   struct s390_io_adapter *adapter,
1786                                   struct kvm_s390_adapter_int *adapter_int)
1787 {
1788         unsigned long bit;
1789         int summary_set, idx;
1790         struct s390_map_info *info;
1791         void *map;
1792
1793         info = get_map_info(adapter, adapter_int->ind_addr);
1794         if (!info)
1795                 return -1;
1796         map = page_address(info->page);
1797         bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1798         set_bit(bit, map);
1799         idx = srcu_read_lock(&kvm->srcu);
1800         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1801         set_page_dirty_lock(info->page);
1802         info = get_map_info(adapter, adapter_int->summary_addr);
1803         if (!info) {
1804                 srcu_read_unlock(&kvm->srcu, idx);
1805                 return -1;
1806         }
1807         map = page_address(info->page);
1808         bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1809                           adapter->swap);
1810         summary_set = test_and_set_bit(bit, map);
1811         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1812         set_page_dirty_lock(info->page);
1813         srcu_read_unlock(&kvm->srcu, idx);
1814         return summary_set ? 0 : 1;
1815 }
1816
1817 /*
1818  * < 0 - not injected due to error
1819  * = 0 - coalesced, summary indicator already active
1820  * > 0 - injected interrupt
1821  */
1822 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1823                            struct kvm *kvm, int irq_source_id, int level,
1824                            bool line_status)
1825 {
1826         int ret;
1827         struct s390_io_adapter *adapter;
1828
1829         /* We're only interested in the 0->1 transition. */
1830         if (!level)
1831                 return 0;
1832         adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1833         if (!adapter)
1834                 return -1;
1835         down_read(&adapter->maps_lock);
1836         ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1837         up_read(&adapter->maps_lock);
1838         if ((ret > 0) && !adapter->masked) {
1839                 struct kvm_s390_interrupt s390int = {
1840                         .type = KVM_S390_INT_IO(1, 0, 0, 0),
1841                         .parm = 0,
1842                         .parm64 = (adapter->isc << 27) | 0x80000000,
1843                 };
1844                 ret = kvm_s390_inject_vm(kvm, &s390int);
1845                 if (ret == 0)
1846                         ret = 1;
1847         }
1848         return ret;
1849 }
1850
1851 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1852                           const struct kvm_irq_routing_entry *ue)
1853 {
1854         int ret;
1855
1856         switch (ue->type) {
1857         case KVM_IRQ_ROUTING_S390_ADAPTER:
1858                 e->set = set_adapter_int;
1859                 e->adapter.summary_addr = ue->u.adapter.summary_addr;
1860                 e->adapter.ind_addr = ue->u.adapter.ind_addr;
1861                 e->adapter.summary_offset = ue->u.adapter.summary_offset;
1862                 e->adapter.ind_offset = ue->u.adapter.ind_offset;
1863                 e->adapter.adapter_id = ue->u.adapter.adapter_id;
1864                 ret = 0;
1865                 break;
1866         default:
1867                 ret = -EINVAL;
1868         }
1869
1870         return ret;
1871 }
1872
1873 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1874                 int irq_source_id, int level, bool line_status)
1875 {
1876         return -EINVAL;
1877 }