Merge tag 'kvm-s390-next-20141128' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / arch / s390 / kvm / interrupt.c
1 /*
2  * handling kvm guest interrupts
3  *
4  * Copyright IBM Corp. 2008,2014
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  */
12
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/uaccess.h>
22 #include "kvm-s390.h"
23 #include "gaccess.h"
24 #include "trace-s390.h"
25
26 #define IOINT_SCHID_MASK 0x0000ffff
27 #define IOINT_SSID_MASK 0x00030000
28 #define IOINT_CSSID_MASK 0x03fc0000
29 #define IOINT_AI_MASK 0x04000000
30 #define PFAULT_INIT 0x0600
31 #define PFAULT_DONE 0x0680
32 #define VIRTIO_PARAM 0x0d00
33
34 static int is_ioint(u64 type)
35 {
36         return ((type & 0xfffe0000u) != 0xfffe0000u);
37 }
38
39 int psw_extint_disabled(struct kvm_vcpu *vcpu)
40 {
41         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
42 }
43
44 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
45 {
46         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
47 }
48
49 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
50 {
51         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
52 }
53
54 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
55 {
56         if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
57             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
58             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
59                 return 0;
60         return 1;
61 }
62
63 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
64 {
65         if (psw_extint_disabled(vcpu) ||
66             !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
67                 return 0;
68         if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
69                 /* No timer interrupts when single stepping */
70                 return 0;
71         return 1;
72 }
73
74 static u64 int_word_to_isc_bits(u32 int_word)
75 {
76         u8 isc = (int_word & 0x38000000) >> 27;
77
78         return (0x80 >> isc) << 24;
79 }
80
81 static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
82                                       struct kvm_s390_interrupt_info *inti)
83 {
84         switch (inti->type) {
85         case KVM_S390_INT_EXTERNAL_CALL:
86                 if (psw_extint_disabled(vcpu))
87                         return 0;
88                 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
89                         return 1;
90                 return 0;
91         case KVM_S390_INT_EMERGENCY:
92                 if (psw_extint_disabled(vcpu))
93                         return 0;
94                 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
95                         return 1;
96                 return 0;
97         case KVM_S390_INT_CLOCK_COMP:
98                 return ckc_interrupts_enabled(vcpu);
99         case KVM_S390_INT_CPU_TIMER:
100                 if (psw_extint_disabled(vcpu))
101                         return 0;
102                 if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
103                         return 1;
104                 return 0;
105         case KVM_S390_INT_SERVICE:
106         case KVM_S390_INT_PFAULT_INIT:
107         case KVM_S390_INT_PFAULT_DONE:
108         case KVM_S390_INT_VIRTIO:
109                 if (psw_extint_disabled(vcpu))
110                         return 0;
111                 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
112                         return 1;
113                 return 0;
114         case KVM_S390_PROGRAM_INT:
115         case KVM_S390_SIGP_STOP:
116         case KVM_S390_SIGP_SET_PREFIX:
117         case KVM_S390_RESTART:
118                 return 1;
119         case KVM_S390_MCHK:
120                 if (psw_mchk_disabled(vcpu))
121                         return 0;
122                 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
123                         return 1;
124                 return 0;
125         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
126                 if (psw_ioint_disabled(vcpu))
127                         return 0;
128                 if (vcpu->arch.sie_block->gcr[6] &
129                     int_word_to_isc_bits(inti->io.io_int_word))
130                         return 1;
131                 return 0;
132         default:
133                 printk(KERN_WARNING "illegal interrupt type %llx\n",
134                        inti->type);
135                 BUG();
136         }
137         return 0;
138 }
139
140 static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
141 {
142         return vcpu->arch.local_int.pending_irqs;
143 }
144
145 static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
146 {
147         unsigned long active_mask = pending_local_irqs(vcpu);
148
149         if (psw_extint_disabled(vcpu))
150                 active_mask &= ~IRQ_PEND_EXT_MASK;
151         if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
152                 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
153         if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
154                 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
155         if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
156                 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
157         if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
158                 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
159         if (psw_mchk_disabled(vcpu))
160                 active_mask &= ~IRQ_PEND_MCHK_MASK;
161
162         return active_mask;
163 }
164
165 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
166 {
167         atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
168         set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
169 }
170
171 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
172 {
173         atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
174         clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
175 }
176
177 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
178 {
179         atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
180                           &vcpu->arch.sie_block->cpuflags);
181         vcpu->arch.sie_block->lctl = 0x0000;
182         vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
183
184         if (guestdbg_enabled(vcpu)) {
185                 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
186                                                LCTL_CR10 | LCTL_CR11);
187                 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
188         }
189
190         if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
191                 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
192 }
193
194 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
195 {
196         atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
197 }
198
199 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
200 {
201         if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
202                 return;
203         if (psw_extint_disabled(vcpu))
204                 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
205         else
206                 vcpu->arch.sie_block->lctl |= LCTL_CR0;
207 }
208
209 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
210 {
211         if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
212                 return;
213         if (psw_mchk_disabled(vcpu))
214                 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
215         else
216                 vcpu->arch.sie_block->lctl |= LCTL_CR14;
217 }
218
219 /* Set interception request for non-deliverable local interrupts */
220 static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
221 {
222         set_intercept_indicators_ext(vcpu);
223         set_intercept_indicators_mchk(vcpu);
224 }
225
226 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
227                                       struct kvm_s390_interrupt_info *inti)
228 {
229         switch (inti->type) {
230         case KVM_S390_INT_SERVICE:
231         case KVM_S390_INT_PFAULT_DONE:
232         case KVM_S390_INT_VIRTIO:
233                 if (psw_extint_disabled(vcpu))
234                         __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
235                 else
236                         vcpu->arch.sie_block->lctl |= LCTL_CR0;
237                 break;
238         case KVM_S390_MCHK:
239                 if (psw_mchk_disabled(vcpu))
240                         vcpu->arch.sie_block->ictl |= ICTL_LPSW;
241                 else
242                         vcpu->arch.sie_block->lctl |= LCTL_CR14;
243                 break;
244         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
245                 if (psw_ioint_disabled(vcpu))
246                         __set_cpuflag(vcpu, CPUSTAT_IO_INT);
247                 else
248                         vcpu->arch.sie_block->lctl |= LCTL_CR6;
249                 break;
250         default:
251                 BUG();
252         }
253 }
254
255 static u16 get_ilc(struct kvm_vcpu *vcpu)
256 {
257         const unsigned short table[] = { 2, 4, 4, 6 };
258
259         switch (vcpu->arch.sie_block->icptcode) {
260         case ICPT_INST:
261         case ICPT_INSTPROGI:
262         case ICPT_OPEREXC:
263         case ICPT_PARTEXEC:
264         case ICPT_IOINST:
265                 /* last instruction only stored for these icptcodes */
266                 return table[vcpu->arch.sie_block->ipa >> 14];
267         case ICPT_PROGI:
268                 return vcpu->arch.sie_block->pgmilc;
269         default:
270                 return 0;
271         }
272 }
273
274 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
275 {
276         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
277         int rc;
278
279         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
280                                          0, 0);
281
282         rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
283                            (u16 *)__LC_EXT_INT_CODE);
284         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
285                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
286         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
287                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
288         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
289         return rc;
290 }
291
292 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
293 {
294         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
295         int rc;
296
297         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
298                                          0, 0);
299
300         rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
301                            (u16 __user *)__LC_EXT_INT_CODE);
302         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
303                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
304         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
305                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
306         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
307         return rc;
308 }
309
310 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
311 {
312         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
313         struct kvm_s390_ext_info ext;
314         int rc;
315
316         spin_lock(&li->lock);
317         ext = li->irq.ext;
318         clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
319         li->irq.ext.ext_params2 = 0;
320         spin_unlock(&li->lock);
321
322         VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
323                    0, ext.ext_params2);
324         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
325                                          KVM_S390_INT_PFAULT_INIT,
326                                          0, ext.ext_params2);
327
328         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
329         rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
330         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
331                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
332         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
333                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
334         rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
335         return rc;
336 }
337
338 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
339 {
340         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
341         struct kvm_s390_mchk_info mchk;
342         int rc;
343
344         spin_lock(&li->lock);
345         mchk = li->irq.mchk;
346         /*
347          * If there was an exigent machine check pending, then any repressible
348          * machine checks that might have been pending are indicated along
349          * with it, so always clear both bits
350          */
351         clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
352         clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
353         memset(&li->irq.mchk, 0, sizeof(mchk));
354         spin_unlock(&li->lock);
355
356         VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
357                    mchk.mcic);
358         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
359                                          mchk.cr14, mchk.mcic);
360
361         rc  = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
362         rc |= put_guest_lc(vcpu, mchk.mcic,
363                            (u64 __user *) __LC_MCCK_CODE);
364         rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
365                            (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
366         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
367                              &mchk.fixed_logout, sizeof(mchk.fixed_logout));
368         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
369                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
370         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
371                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
372         return rc;
373 }
374
375 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
376 {
377         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
378         int rc;
379
380         VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
381         vcpu->stat.deliver_restart_signal++;
382         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
383
384         rc  = write_guest_lc(vcpu,
385                              offsetof(struct _lowcore, restart_old_psw),
386                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
387         rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
388                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
389         clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
390         return rc;
391 }
392
393 static int __must_check __deliver_stop(struct kvm_vcpu *vcpu)
394 {
395         VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
396         vcpu->stat.deliver_stop_signal++;
397         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP,
398                                          0, 0);
399
400         __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
401         clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs);
402         return 0;
403 }
404
405 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
406 {
407         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
408         struct kvm_s390_prefix_info prefix;
409
410         spin_lock(&li->lock);
411         prefix = li->irq.prefix;
412         li->irq.prefix.address = 0;
413         clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
414         spin_unlock(&li->lock);
415
416         VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
417         vcpu->stat.deliver_prefix_signal++;
418         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
419                                          KVM_S390_SIGP_SET_PREFIX,
420                                          prefix.address, 0);
421
422         kvm_s390_set_prefix(vcpu, prefix.address);
423         return 0;
424 }
425
426 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
427 {
428         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
429         int rc;
430         int cpu_addr;
431
432         spin_lock(&li->lock);
433         cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
434         clear_bit(cpu_addr, li->sigp_emerg_pending);
435         if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
436                 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
437         spin_unlock(&li->lock);
438
439         VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
440         vcpu->stat.deliver_emergency_signal++;
441         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
442                                          cpu_addr, 0);
443
444         rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
445                            (u16 *)__LC_EXT_INT_CODE);
446         rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
447         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
448                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
449         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
450                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
451         return rc;
452 }
453
454 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
455 {
456         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
457         struct kvm_s390_extcall_info extcall;
458         int rc;
459
460         spin_lock(&li->lock);
461         extcall = li->irq.extcall;
462         li->irq.extcall.code = 0;
463         clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
464         spin_unlock(&li->lock);
465
466         VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
467         vcpu->stat.deliver_external_call++;
468         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
469                                          KVM_S390_INT_EXTERNAL_CALL,
470                                          extcall.code, 0);
471
472         rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
473                            (u16 *)__LC_EXT_INT_CODE);
474         rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
475         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
476                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
477         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
478                             sizeof(psw_t));
479         return rc;
480 }
481
482 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
483 {
484         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
485         struct kvm_s390_pgm_info pgm_info;
486         int rc = 0;
487         u16 ilc = get_ilc(vcpu);
488
489         spin_lock(&li->lock);
490         pgm_info = li->irq.pgm;
491         clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
492         memset(&li->irq.pgm, 0, sizeof(pgm_info));
493         spin_unlock(&li->lock);
494
495         VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
496                    pgm_info.code, ilc);
497         vcpu->stat.deliver_program_int++;
498         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
499                                          pgm_info.code, 0);
500
501         switch (pgm_info.code & ~PGM_PER) {
502         case PGM_AFX_TRANSLATION:
503         case PGM_ASX_TRANSLATION:
504         case PGM_EX_TRANSLATION:
505         case PGM_LFX_TRANSLATION:
506         case PGM_LSTE_SEQUENCE:
507         case PGM_LSX_TRANSLATION:
508         case PGM_LX_TRANSLATION:
509         case PGM_PRIMARY_AUTHORITY:
510         case PGM_SECONDARY_AUTHORITY:
511         case PGM_SPACE_SWITCH:
512                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
513                                   (u64 *)__LC_TRANS_EXC_CODE);
514                 break;
515         case PGM_ALEN_TRANSLATION:
516         case PGM_ALE_SEQUENCE:
517         case PGM_ASTE_INSTANCE:
518         case PGM_ASTE_SEQUENCE:
519         case PGM_ASTE_VALIDITY:
520         case PGM_EXTENDED_AUTHORITY:
521                 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
522                                   (u8 *)__LC_EXC_ACCESS_ID);
523                 break;
524         case PGM_ASCE_TYPE:
525         case PGM_PAGE_TRANSLATION:
526         case PGM_REGION_FIRST_TRANS:
527         case PGM_REGION_SECOND_TRANS:
528         case PGM_REGION_THIRD_TRANS:
529         case PGM_SEGMENT_TRANSLATION:
530                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
531                                   (u64 *)__LC_TRANS_EXC_CODE);
532                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
533                                    (u8 *)__LC_EXC_ACCESS_ID);
534                 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
535                                    (u8 *)__LC_OP_ACCESS_ID);
536                 break;
537         case PGM_MONITOR:
538                 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
539                                   (u16 *)__LC_MON_CLASS_NR);
540                 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
541                                    (u64 *)__LC_MON_CODE);
542                 break;
543         case PGM_DATA:
544                 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
545                                   (u32 *)__LC_DATA_EXC_CODE);
546                 break;
547         case PGM_PROTECTION:
548                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
549                                   (u64 *)__LC_TRANS_EXC_CODE);
550                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
551                                    (u8 *)__LC_EXC_ACCESS_ID);
552                 break;
553         }
554
555         if (pgm_info.code & PGM_PER) {
556                 rc |= put_guest_lc(vcpu, pgm_info.per_code,
557                                    (u8 *) __LC_PER_CODE);
558                 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
559                                    (u8 *)__LC_PER_ATMID);
560                 rc |= put_guest_lc(vcpu, pgm_info.per_address,
561                                    (u64 *) __LC_PER_ADDRESS);
562                 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
563                                    (u8 *) __LC_PER_ACCESS_ID);
564         }
565
566         rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
567         rc |= put_guest_lc(vcpu, pgm_info.code,
568                            (u16 *)__LC_PGM_INT_CODE);
569         rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
570                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
571         rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
572                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
573         return rc;
574 }
575
576 static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
577                                           struct kvm_s390_interrupt_info *inti)
578 {
579         int rc;
580
581         VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
582                    inti->ext.ext_params);
583         vcpu->stat.deliver_service_signal++;
584         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
585                                          inti->ext.ext_params, 0);
586
587         rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
588         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
589                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
590         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
591                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
592         rc |= put_guest_lc(vcpu, inti->ext.ext_params,
593                            (u32 *)__LC_EXT_PARAMS);
594         return rc;
595 }
596
597 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
598                                            struct kvm_s390_interrupt_info *inti)
599 {
600         int rc;
601
602         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
603                                          KVM_S390_INT_PFAULT_DONE, 0,
604                                          inti->ext.ext_params2);
605
606         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
607         rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
608         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
609                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
610         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
611                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
612         rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
613                            (u64 *)__LC_EXT_PARAMS2);
614         return rc;
615 }
616
617 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
618                                          struct kvm_s390_interrupt_info *inti)
619 {
620         int rc;
621
622         VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
623                    inti->ext.ext_params, inti->ext.ext_params2);
624         vcpu->stat.deliver_virtio_interrupt++;
625         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
626                                          inti->ext.ext_params,
627                                          inti->ext.ext_params2);
628
629         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
630         rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
631         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
632                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
633         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
634                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
635         rc |= put_guest_lc(vcpu, inti->ext.ext_params,
636                            (u32 *)__LC_EXT_PARAMS);
637         rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
638                            (u64 *)__LC_EXT_PARAMS2);
639         return rc;
640 }
641
642 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
643                                      struct kvm_s390_interrupt_info *inti)
644 {
645         int rc;
646
647         VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
648         vcpu->stat.deliver_io_int++;
649         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
650                                          ((__u32)inti->io.subchannel_id << 16) |
651                                                 inti->io.subchannel_nr,
652                                          ((__u64)inti->io.io_int_parm << 32) |
653                                                 inti->io.io_int_word);
654
655         rc  = put_guest_lc(vcpu, inti->io.subchannel_id,
656                            (u16 *)__LC_SUBCHANNEL_ID);
657         rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
658                            (u16 *)__LC_SUBCHANNEL_NR);
659         rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
660                            (u32 *)__LC_IO_INT_PARM);
661         rc |= put_guest_lc(vcpu, inti->io.io_int_word,
662                            (u32 *)__LC_IO_INT_WORD);
663         rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
664                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
665         rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
666                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
667         return rc;
668 }
669
670 static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
671                                            struct kvm_s390_interrupt_info *inti)
672 {
673         struct kvm_s390_mchk_info *mchk = &inti->mchk;
674         int rc;
675
676         VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
677                    mchk->mcic);
678         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
679                                          mchk->cr14, mchk->mcic);
680
681         rc  = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
682         rc |= put_guest_lc(vcpu, mchk->mcic,
683                         (u64 __user *) __LC_MCCK_CODE);
684         rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
685                         (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
686         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
687                              &mchk->fixed_logout, sizeof(mchk->fixed_logout));
688         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
689                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
690         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
691                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
692         return rc;
693 }
694
695 typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
696
697 static const deliver_irq_t deliver_irq_funcs[] = {
698         [IRQ_PEND_MCHK_EX]        = __deliver_machine_check,
699         [IRQ_PEND_PROG]           = __deliver_prog,
700         [IRQ_PEND_EXT_EMERGENCY]  = __deliver_emergency_signal,
701         [IRQ_PEND_EXT_EXTERNAL]   = __deliver_external_call,
702         [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
703         [IRQ_PEND_EXT_CPU_TIMER]  = __deliver_cpu_timer,
704         [IRQ_PEND_RESTART]        = __deliver_restart,
705         [IRQ_PEND_SIGP_STOP]      = __deliver_stop,
706         [IRQ_PEND_SET_PREFIX]     = __deliver_set_prefix,
707         [IRQ_PEND_PFAULT_INIT]    = __deliver_pfault_init,
708 };
709
710 static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
711                                            struct kvm_s390_interrupt_info *inti)
712 {
713         int rc;
714
715         switch (inti->type) {
716         case KVM_S390_INT_SERVICE:
717                 rc = __deliver_service(vcpu, inti);
718                 break;
719         case KVM_S390_INT_PFAULT_DONE:
720                 rc = __deliver_pfault_done(vcpu, inti);
721                 break;
722         case KVM_S390_INT_VIRTIO:
723                 rc = __deliver_virtio(vcpu, inti);
724                 break;
725         case KVM_S390_MCHK:
726                 rc = __deliver_mchk_floating(vcpu, inti);
727                 break;
728         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
729                 rc = __deliver_io(vcpu, inti);
730                 break;
731         default:
732                 BUG();
733         }
734
735         return rc;
736 }
737
738 /* Check whether SIGP interpretation facility has an external call pending */
739 int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
740 {
741         atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl;
742
743         if (!psw_extint_disabled(vcpu) &&
744             (vcpu->arch.sie_block->gcr[0] & 0x2000ul) &&
745             (atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
746             (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
747                 return 1;
748
749         return 0;
750 }
751
752 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
753 {
754         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
755         struct kvm_s390_interrupt_info  *inti;
756         int rc;
757
758         rc = !!deliverable_local_irqs(vcpu);
759
760         if ((!rc) && atomic_read(&fi->active)) {
761                 spin_lock(&fi->lock);
762                 list_for_each_entry(inti, &fi->list, list)
763                         if (__interrupt_is_deliverable(vcpu, inti)) {
764                                 rc = 1;
765                                 break;
766                         }
767                 spin_unlock(&fi->lock);
768         }
769
770         if (!rc && kvm_cpu_has_pending_timer(vcpu))
771                 rc = 1;
772
773         if (!rc && kvm_s390_si_ext_call_pending(vcpu))
774                 rc = 1;
775
776         return rc;
777 }
778
779 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
780 {
781         if (!(vcpu->arch.sie_block->ckc <
782               get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
783                 return 0;
784         if (!ckc_interrupts_enabled(vcpu))
785                 return 0;
786         return 1;
787 }
788
789 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
790 {
791         u64 now, sltime;
792
793         vcpu->stat.exit_wait_state++;
794
795         /* fast path */
796         if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
797                 return 0;
798
799         if (psw_interrupts_disabled(vcpu)) {
800                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
801                 return -EOPNOTSUPP; /* disabled wait */
802         }
803
804         __set_cpu_idle(vcpu);
805         if (!ckc_interrupts_enabled(vcpu)) {
806                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
807                 goto no_timer;
808         }
809
810         now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
811         sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
812         hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
813         VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
814 no_timer:
815         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
816         kvm_vcpu_block(vcpu);
817         __unset_cpu_idle(vcpu);
818         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
819
820         hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
821         return 0;
822 }
823
824 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
825 {
826         if (waitqueue_active(&vcpu->wq)) {
827                 /*
828                  * The vcpu gave up the cpu voluntarily, mark it as a good
829                  * yield-candidate.
830                  */
831                 vcpu->preempted = true;
832                 wake_up_interruptible(&vcpu->wq);
833                 vcpu->stat.halt_wakeup++;
834         }
835 }
836
837 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
838 {
839         struct kvm_vcpu *vcpu;
840
841         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
842         kvm_s390_vcpu_wakeup(vcpu);
843
844         return HRTIMER_NORESTART;
845 }
846
847 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
848 {
849         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
850
851         spin_lock(&li->lock);
852         li->pending_irqs = 0;
853         bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
854         memset(&li->irq, 0, sizeof(li->irq));
855         spin_unlock(&li->lock);
856
857         /* clear pending external calls set by sigp interpretation facility */
858         atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
859         atomic_clear_mask(SIGP_CTRL_C,
860                           &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
861 }
862
863 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
864 {
865         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
866         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
867         struct kvm_s390_interrupt_info  *n, *inti = NULL;
868         deliver_irq_t func;
869         int deliver;
870         int rc = 0;
871         unsigned long irq_type;
872         unsigned long deliverable_irqs;
873
874         __reset_intercept_indicators(vcpu);
875
876         /* pending ckc conditions might have been invalidated */
877         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
878         if (kvm_cpu_has_pending_timer(vcpu))
879                 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
880
881         do {
882                 deliverable_irqs = deliverable_local_irqs(vcpu);
883                 /* bits are in the order of interrupt priority */
884                 irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
885                 if (irq_type == IRQ_PEND_COUNT)
886                         break;
887                 func = deliver_irq_funcs[irq_type];
888                 if (!func) {
889                         WARN_ON_ONCE(func == NULL);
890                         clear_bit(irq_type, &li->pending_irqs);
891                         continue;
892                 }
893                 rc = func(vcpu);
894         } while (!rc && irq_type != IRQ_PEND_COUNT);
895
896         set_intercept_indicators_local(vcpu);
897
898         if (!rc && atomic_read(&fi->active)) {
899                 do {
900                         deliver = 0;
901                         spin_lock(&fi->lock);
902                         list_for_each_entry_safe(inti, n, &fi->list, list) {
903                                 if (__interrupt_is_deliverable(vcpu, inti)) {
904                                         list_del(&inti->list);
905                                         fi->irq_count--;
906                                         deliver = 1;
907                                         break;
908                                 }
909                                 __set_intercept_indicator(vcpu, inti);
910                         }
911                         if (list_empty(&fi->list))
912                                 atomic_set(&fi->active, 0);
913                         spin_unlock(&fi->lock);
914                         if (deliver) {
915                                 rc = __deliver_floating_interrupt(vcpu, inti);
916                                 kfree(inti);
917                         }
918                 } while (!rc && deliver);
919         }
920
921         return rc;
922 }
923
924 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
925 {
926         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
927
928         li->irq.pgm = irq->u.pgm;
929         __set_bit(IRQ_PEND_PROG, &li->pending_irqs);
930         return 0;
931 }
932
933 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
934 {
935         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
936         struct kvm_s390_irq irq;
937
938         VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
939         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
940                                    0, 1);
941         spin_lock(&li->lock);
942         irq.u.pgm.code = code;
943         __inject_prog(vcpu, &irq);
944         BUG_ON(waitqueue_active(li->wq));
945         spin_unlock(&li->lock);
946         return 0;
947 }
948
949 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
950                              struct kvm_s390_pgm_info *pgm_info)
951 {
952         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
953         struct kvm_s390_irq irq;
954         int rc;
955
956         VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
957                    pgm_info->code);
958         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
959                                    pgm_info->code, 0, 1);
960         spin_lock(&li->lock);
961         irq.u.pgm = *pgm_info;
962         rc = __inject_prog(vcpu, &irq);
963         BUG_ON(waitqueue_active(li->wq));
964         spin_unlock(&li->lock);
965         return rc;
966 }
967
968 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
969 {
970         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
971
972         VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx",
973                    irq->u.ext.ext_params, irq->u.ext.ext_params2);
974         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
975                                    irq->u.ext.ext_params,
976                                    irq->u.ext.ext_params2, 2);
977
978         li->irq.ext = irq->u.ext;
979         set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
980         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
981         return 0;
982 }
983
984 int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
985 {
986         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
987         struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
988
989         VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
990                    irq->u.extcall.code);
991         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
992                                    irq->u.extcall.code, 0, 2);
993
994         *extcall = irq->u.extcall;
995         __set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
996         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
997         return 0;
998 }
999
1000 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1001 {
1002         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1003         struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1004
1005         VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
1006                    prefix->address);
1007         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1008                                    prefix->address, 0, 2);
1009
1010         *prefix = irq->u.prefix;
1011         set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1012         return 0;
1013 }
1014
1015 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1016 {
1017         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1018
1019         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
1020
1021         li->action_bits |= ACTION_STOP_ON_STOP;
1022         set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1023         return 0;
1024 }
1025
1026 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1027                                  struct kvm_s390_irq *irq)
1028 {
1029         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1030
1031         VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type);
1032         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2);
1033
1034         set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1035         return 0;
1036 }
1037
1038 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1039                                    struct kvm_s390_irq *irq)
1040 {
1041         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1042         struct kvm_s390_emerg_info *emerg = &li->irq.emerg;
1043
1044         VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
1045                    irq->u.emerg.code);
1046         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1047                                    emerg->code, 0, 2);
1048
1049         set_bit(emerg->code, li->sigp_emerg_pending);
1050         set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1051         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1052         return 0;
1053 }
1054
1055 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1056 {
1057         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1058         struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1059
1060         VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
1061                    mchk->mcic);
1062         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1063                                    mchk->mcic, 2);
1064
1065         /*
1066          * Because repressible machine checks can be indicated along with
1067          * exigent machine checks (PoP, Chapter 11, Interruption action)
1068          * we need to combine cr14, mcic and external damage code.
1069          * Failing storage address and the logout area should not be or'ed
1070          * together, we just indicate the last occurrence of the corresponding
1071          * machine check
1072          */
1073         mchk->cr14 |= irq->u.mchk.cr14;
1074         mchk->mcic |= irq->u.mchk.mcic;
1075         mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1076         mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1077         memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1078                sizeof(mchk->fixed_logout));
1079         if (mchk->mcic & MCHK_EX_MASK)
1080                 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1081         else if (mchk->mcic & MCHK_REP_MASK)
1082                 set_bit(IRQ_PEND_MCHK_REP,  &li->pending_irqs);
1083         return 0;
1084 }
1085
1086 static int __inject_ckc(struct kvm_vcpu *vcpu)
1087 {
1088         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1089
1090         VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP);
1091         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1092                                    0, 0, 2);
1093
1094         set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1095         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1096         return 0;
1097 }
1098
1099 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1100 {
1101         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1102
1103         VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER);
1104         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1105                                    0, 0, 2);
1106
1107         set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1108         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1109         return 0;
1110 }
1111
1112
1113 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1114                                                     u64 cr6, u64 schid)
1115 {
1116         struct kvm_s390_float_interrupt *fi;
1117         struct kvm_s390_interrupt_info *inti, *iter;
1118
1119         if ((!schid && !cr6) || (schid && cr6))
1120                 return NULL;
1121         mutex_lock(&kvm->lock);
1122         fi = &kvm->arch.float_int;
1123         spin_lock(&fi->lock);
1124         inti = NULL;
1125         list_for_each_entry(iter, &fi->list, list) {
1126                 if (!is_ioint(iter->type))
1127                         continue;
1128                 if (cr6 &&
1129                     ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
1130                         continue;
1131                 if (schid) {
1132                         if (((schid & 0x00000000ffff0000) >> 16) !=
1133                             iter->io.subchannel_id)
1134                                 continue;
1135                         if ((schid & 0x000000000000ffff) !=
1136                             iter->io.subchannel_nr)
1137                                 continue;
1138                 }
1139                 inti = iter;
1140                 break;
1141         }
1142         if (inti) {
1143                 list_del_init(&inti->list);
1144                 fi->irq_count--;
1145         }
1146         if (list_empty(&fi->list))
1147                 atomic_set(&fi->active, 0);
1148         spin_unlock(&fi->lock);
1149         mutex_unlock(&kvm->lock);
1150         return inti;
1151 }
1152
1153 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1154 {
1155         struct kvm_s390_local_interrupt *li;
1156         struct kvm_s390_float_interrupt *fi;
1157         struct kvm_s390_interrupt_info *iter;
1158         struct kvm_vcpu *dst_vcpu = NULL;
1159         int sigcpu;
1160         int rc = 0;
1161
1162         mutex_lock(&kvm->lock);
1163         fi = &kvm->arch.float_int;
1164         spin_lock(&fi->lock);
1165         if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
1166                 rc = -EINVAL;
1167                 goto unlock_fi;
1168         }
1169         fi->irq_count++;
1170         if (!is_ioint(inti->type)) {
1171                 list_add_tail(&inti->list, &fi->list);
1172         } else {
1173                 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
1174
1175                 /* Keep I/O interrupts sorted in isc order. */
1176                 list_for_each_entry(iter, &fi->list, list) {
1177                         if (!is_ioint(iter->type))
1178                                 continue;
1179                         if (int_word_to_isc_bits(iter->io.io_int_word)
1180                             <= isc_bits)
1181                                 continue;
1182                         break;
1183                 }
1184                 list_add_tail(&inti->list, &iter->list);
1185         }
1186         atomic_set(&fi->active, 1);
1187         sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
1188         if (sigcpu == KVM_MAX_VCPUS) {
1189                 do {
1190                         sigcpu = fi->next_rr_cpu++;
1191                         if (sigcpu == KVM_MAX_VCPUS)
1192                                 sigcpu = fi->next_rr_cpu = 0;
1193                 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
1194         }
1195         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1196         li = &dst_vcpu->arch.local_int;
1197         spin_lock(&li->lock);
1198         switch (inti->type) {
1199         case KVM_S390_MCHK:
1200                 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1201                 break;
1202         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1203                 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1204                 break;
1205         default:
1206                 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1207                 break;
1208         }
1209         spin_unlock(&li->lock);
1210         kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
1211 unlock_fi:
1212         spin_unlock(&fi->lock);
1213         mutex_unlock(&kvm->lock);
1214         return rc;
1215 }
1216
1217 int kvm_s390_inject_vm(struct kvm *kvm,
1218                        struct kvm_s390_interrupt *s390int)
1219 {
1220         struct kvm_s390_interrupt_info *inti;
1221
1222         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1223         if (!inti)
1224                 return -ENOMEM;
1225
1226         inti->type = s390int->type;
1227         switch (inti->type) {
1228         case KVM_S390_INT_VIRTIO:
1229                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1230                          s390int->parm, s390int->parm64);
1231                 inti->ext.ext_params = s390int->parm;
1232                 inti->ext.ext_params2 = s390int->parm64;
1233                 break;
1234         case KVM_S390_INT_SERVICE:
1235                 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
1236                 inti->ext.ext_params = s390int->parm;
1237                 break;
1238         case KVM_S390_INT_PFAULT_DONE:
1239                 inti->type = s390int->type;
1240                 inti->ext.ext_params2 = s390int->parm64;
1241                 break;
1242         case KVM_S390_MCHK:
1243                 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
1244                          s390int->parm64);
1245                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1246                 inti->mchk.mcic = s390int->parm64;
1247                 break;
1248         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1249                 if (inti->type & IOINT_AI_MASK)
1250                         VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1251                 else
1252                         VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1253                                  s390int->type & IOINT_CSSID_MASK,
1254                                  s390int->type & IOINT_SSID_MASK,
1255                                  s390int->type & IOINT_SCHID_MASK);
1256                 inti->io.subchannel_id = s390int->parm >> 16;
1257                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1258                 inti->io.io_int_parm = s390int->parm64 >> 32;
1259                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1260                 break;
1261         default:
1262                 kfree(inti);
1263                 return -EINVAL;
1264         }
1265         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1266                                  2);
1267
1268         return __inject_vm(kvm, inti);
1269 }
1270
1271 void kvm_s390_reinject_io_int(struct kvm *kvm,
1272                               struct kvm_s390_interrupt_info *inti)
1273 {
1274         __inject_vm(kvm, inti);
1275 }
1276
1277 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1278                        struct kvm_s390_irq *irq)
1279 {
1280         irq->type = s390int->type;
1281         switch (irq->type) {
1282         case KVM_S390_PROGRAM_INT:
1283                 if (s390int->parm & 0xffff0000)
1284                         return -EINVAL;
1285                 irq->u.pgm.code = s390int->parm;
1286                 break;
1287         case KVM_S390_SIGP_SET_PREFIX:
1288                 irq->u.prefix.address = s390int->parm;
1289                 break;
1290         case KVM_S390_INT_EXTERNAL_CALL:
1291                 if (irq->u.extcall.code & 0xffff0000)
1292                         return -EINVAL;
1293                 irq->u.extcall.code = s390int->parm;
1294                 break;
1295         case KVM_S390_INT_EMERGENCY:
1296                 if (irq->u.emerg.code & 0xffff0000)
1297                         return -EINVAL;
1298                 irq->u.emerg.code = s390int->parm;
1299                 break;
1300         case KVM_S390_MCHK:
1301                 irq->u.mchk.mcic = s390int->parm64;
1302                 break;
1303         }
1304         return 0;
1305 }
1306
1307 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1308 {
1309         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1310         int rc;
1311
1312         spin_lock(&li->lock);
1313         switch (irq->type) {
1314         case KVM_S390_PROGRAM_INT:
1315                 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
1316                            irq->u.pgm.code);
1317                 rc = __inject_prog(vcpu, irq);
1318                 break;
1319         case KVM_S390_SIGP_SET_PREFIX:
1320                 rc = __inject_set_prefix(vcpu, irq);
1321                 break;
1322         case KVM_S390_SIGP_STOP:
1323                 rc = __inject_sigp_stop(vcpu, irq);
1324                 break;
1325         case KVM_S390_RESTART:
1326                 rc = __inject_sigp_restart(vcpu, irq);
1327                 break;
1328         case KVM_S390_INT_CLOCK_COMP:
1329                 rc = __inject_ckc(vcpu);
1330                 break;
1331         case KVM_S390_INT_CPU_TIMER:
1332                 rc = __inject_cpu_timer(vcpu);
1333                 break;
1334         case KVM_S390_INT_EXTERNAL_CALL:
1335                 rc = __inject_extcall(vcpu, irq);
1336                 break;
1337         case KVM_S390_INT_EMERGENCY:
1338                 rc = __inject_sigp_emergency(vcpu, irq);
1339                 break;
1340         case KVM_S390_MCHK:
1341                 rc = __inject_mchk(vcpu, irq);
1342                 break;
1343         case KVM_S390_INT_PFAULT_INIT:
1344                 rc = __inject_pfault_init(vcpu, irq);
1345                 break;
1346         case KVM_S390_INT_VIRTIO:
1347         case KVM_S390_INT_SERVICE:
1348         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1349         default:
1350                 rc = -EINVAL;
1351         }
1352         spin_unlock(&li->lock);
1353         if (!rc)
1354                 kvm_s390_vcpu_wakeup(vcpu);
1355         return rc;
1356 }
1357
1358 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1359 {
1360         struct kvm_s390_float_interrupt *fi;
1361         struct kvm_s390_interrupt_info  *n, *inti = NULL;
1362
1363         mutex_lock(&kvm->lock);
1364         fi = &kvm->arch.float_int;
1365         spin_lock(&fi->lock);
1366         list_for_each_entry_safe(inti, n, &fi->list, list) {
1367                 list_del(&inti->list);
1368                 kfree(inti);
1369         }
1370         fi->irq_count = 0;
1371         atomic_set(&fi->active, 0);
1372         spin_unlock(&fi->lock);
1373         mutex_unlock(&kvm->lock);
1374 }
1375
1376 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
1377                                    u8 *addr)
1378 {
1379         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1380         struct kvm_s390_irq irq = {0};
1381
1382         irq.type = inti->type;
1383         switch (inti->type) {
1384         case KVM_S390_INT_PFAULT_INIT:
1385         case KVM_S390_INT_PFAULT_DONE:
1386         case KVM_S390_INT_VIRTIO:
1387         case KVM_S390_INT_SERVICE:
1388                 irq.u.ext = inti->ext;
1389                 break;
1390         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1391                 irq.u.io = inti->io;
1392                 break;
1393         case KVM_S390_MCHK:
1394                 irq.u.mchk = inti->mchk;
1395                 break;
1396         default:
1397                 return -EINVAL;
1398         }
1399
1400         if (copy_to_user(uptr, &irq, sizeof(irq)))
1401                 return -EFAULT;
1402
1403         return 0;
1404 }
1405
1406 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
1407 {
1408         struct kvm_s390_interrupt_info *inti;
1409         struct kvm_s390_float_interrupt *fi;
1410         int ret = 0;
1411         int n = 0;
1412
1413         mutex_lock(&kvm->lock);
1414         fi = &kvm->arch.float_int;
1415         spin_lock(&fi->lock);
1416
1417         list_for_each_entry(inti, &fi->list, list) {
1418                 if (len < sizeof(struct kvm_s390_irq)) {
1419                         /* signal userspace to try again */
1420                         ret = -ENOMEM;
1421                         break;
1422                 }
1423                 ret = copy_irq_to_user(inti, buf);
1424                 if (ret)
1425                         break;
1426                 buf += sizeof(struct kvm_s390_irq);
1427                 len -= sizeof(struct kvm_s390_irq);
1428                 n++;
1429         }
1430
1431         spin_unlock(&fi->lock);
1432         mutex_unlock(&kvm->lock);
1433
1434         return ret < 0 ? ret : n;
1435 }
1436
1437 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1438 {
1439         int r;
1440
1441         switch (attr->group) {
1442         case KVM_DEV_FLIC_GET_ALL_IRQS:
1443                 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
1444                                           attr->attr);
1445                 break;
1446         default:
1447                 r = -EINVAL;
1448         }
1449
1450         return r;
1451 }
1452
1453 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1454                                      u64 addr)
1455 {
1456         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1457         void *target = NULL;
1458         void __user *source;
1459         u64 size;
1460
1461         if (get_user(inti->type, (u64 __user *)addr))
1462                 return -EFAULT;
1463
1464         switch (inti->type) {
1465         case KVM_S390_INT_PFAULT_INIT:
1466         case KVM_S390_INT_PFAULT_DONE:
1467         case KVM_S390_INT_VIRTIO:
1468         case KVM_S390_INT_SERVICE:
1469                 target = (void *) &inti->ext;
1470                 source = &uptr->u.ext;
1471                 size = sizeof(inti->ext);
1472                 break;
1473         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1474                 target = (void *) &inti->io;
1475                 source = &uptr->u.io;
1476                 size = sizeof(inti->io);
1477                 break;
1478         case KVM_S390_MCHK:
1479                 target = (void *) &inti->mchk;
1480                 source = &uptr->u.mchk;
1481                 size = sizeof(inti->mchk);
1482                 break;
1483         default:
1484                 return -EINVAL;
1485         }
1486
1487         if (copy_from_user(target, source, size))
1488                 return -EFAULT;
1489
1490         return 0;
1491 }
1492
1493 static int enqueue_floating_irq(struct kvm_device *dev,
1494                                 struct kvm_device_attr *attr)
1495 {
1496         struct kvm_s390_interrupt_info *inti = NULL;
1497         int r = 0;
1498         int len = attr->attr;
1499
1500         if (len % sizeof(struct kvm_s390_irq) != 0)
1501                 return -EINVAL;
1502         else if (len > KVM_S390_FLIC_MAX_BUFFER)
1503                 return -EINVAL;
1504
1505         while (len >= sizeof(struct kvm_s390_irq)) {
1506                 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1507                 if (!inti)
1508                         return -ENOMEM;
1509
1510                 r = copy_irq_from_user(inti, attr->addr);
1511                 if (r) {
1512                         kfree(inti);
1513                         return r;
1514                 }
1515                 r = __inject_vm(dev->kvm, inti);
1516                 if (r) {
1517                         kfree(inti);
1518                         return r;
1519                 }
1520                 len -= sizeof(struct kvm_s390_irq);
1521                 attr->addr += sizeof(struct kvm_s390_irq);
1522         }
1523
1524         return r;
1525 }
1526
1527 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1528 {
1529         if (id >= MAX_S390_IO_ADAPTERS)
1530                 return NULL;
1531         return kvm->arch.adapters[id];
1532 }
1533
1534 static int register_io_adapter(struct kvm_device *dev,
1535                                struct kvm_device_attr *attr)
1536 {
1537         struct s390_io_adapter *adapter;
1538         struct kvm_s390_io_adapter adapter_info;
1539
1540         if (copy_from_user(&adapter_info,
1541                            (void __user *)attr->addr, sizeof(adapter_info)))
1542                 return -EFAULT;
1543
1544         if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1545             (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1546                 return -EINVAL;
1547
1548         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1549         if (!adapter)
1550                 return -ENOMEM;
1551
1552         INIT_LIST_HEAD(&adapter->maps);
1553         init_rwsem(&adapter->maps_lock);
1554         atomic_set(&adapter->nr_maps, 0);
1555         adapter->id = adapter_info.id;
1556         adapter->isc = adapter_info.isc;
1557         adapter->maskable = adapter_info.maskable;
1558         adapter->masked = false;
1559         adapter->swap = adapter_info.swap;
1560         dev->kvm->arch.adapters[adapter->id] = adapter;
1561
1562         return 0;
1563 }
1564
1565 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1566 {
1567         int ret;
1568         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1569
1570         if (!adapter || !adapter->maskable)
1571                 return -EINVAL;
1572         ret = adapter->masked;
1573         adapter->masked = masked;
1574         return ret;
1575 }
1576
1577 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1578 {
1579         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1580         struct s390_map_info *map;
1581         int ret;
1582
1583         if (!adapter || !addr)
1584                 return -EINVAL;
1585
1586         map = kzalloc(sizeof(*map), GFP_KERNEL);
1587         if (!map) {
1588                 ret = -ENOMEM;
1589                 goto out;
1590         }
1591         INIT_LIST_HEAD(&map->list);
1592         map->guest_addr = addr;
1593         map->addr = gmap_translate(kvm->arch.gmap, addr);
1594         if (map->addr == -EFAULT) {
1595                 ret = -EFAULT;
1596                 goto out;
1597         }
1598         ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1599         if (ret < 0)
1600                 goto out;
1601         BUG_ON(ret != 1);
1602         down_write(&adapter->maps_lock);
1603         if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1604                 list_add_tail(&map->list, &adapter->maps);
1605                 ret = 0;
1606         } else {
1607                 put_page(map->page);
1608                 ret = -EINVAL;
1609         }
1610         up_write(&adapter->maps_lock);
1611 out:
1612         if (ret)
1613                 kfree(map);
1614         return ret;
1615 }
1616
1617 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1618 {
1619         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1620         struct s390_map_info *map, *tmp;
1621         int found = 0;
1622
1623         if (!adapter || !addr)
1624                 return -EINVAL;
1625
1626         down_write(&adapter->maps_lock);
1627         list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1628                 if (map->guest_addr == addr) {
1629                         found = 1;
1630                         atomic_dec(&adapter->nr_maps);
1631                         list_del(&map->list);
1632                         put_page(map->page);
1633                         kfree(map);
1634                         break;
1635                 }
1636         }
1637         up_write(&adapter->maps_lock);
1638
1639         return found ? 0 : -EINVAL;
1640 }
1641
1642 void kvm_s390_destroy_adapters(struct kvm *kvm)
1643 {
1644         int i;
1645         struct s390_map_info *map, *tmp;
1646
1647         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1648                 if (!kvm->arch.adapters[i])
1649                         continue;
1650                 list_for_each_entry_safe(map, tmp,
1651                                          &kvm->arch.adapters[i]->maps, list) {
1652                         list_del(&map->list);
1653                         put_page(map->page);
1654                         kfree(map);
1655                 }
1656                 kfree(kvm->arch.adapters[i]);
1657         }
1658 }
1659
1660 static int modify_io_adapter(struct kvm_device *dev,
1661                              struct kvm_device_attr *attr)
1662 {
1663         struct kvm_s390_io_adapter_req req;
1664         struct s390_io_adapter *adapter;
1665         int ret;
1666
1667         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1668                 return -EFAULT;
1669
1670         adapter = get_io_adapter(dev->kvm, req.id);
1671         if (!adapter)
1672                 return -EINVAL;
1673         switch (req.type) {
1674         case KVM_S390_IO_ADAPTER_MASK:
1675                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1676                 if (ret > 0)
1677                         ret = 0;
1678                 break;
1679         case KVM_S390_IO_ADAPTER_MAP:
1680                 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1681                 break;
1682         case KVM_S390_IO_ADAPTER_UNMAP:
1683                 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1684                 break;
1685         default:
1686                 ret = -EINVAL;
1687         }
1688
1689         return ret;
1690 }
1691
1692 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1693 {
1694         int r = 0;
1695         unsigned int i;
1696         struct kvm_vcpu *vcpu;
1697
1698         switch (attr->group) {
1699         case KVM_DEV_FLIC_ENQUEUE:
1700                 r = enqueue_floating_irq(dev, attr);
1701                 break;
1702         case KVM_DEV_FLIC_CLEAR_IRQS:
1703                 kvm_s390_clear_float_irqs(dev->kvm);
1704                 break;
1705         case KVM_DEV_FLIC_APF_ENABLE:
1706                 dev->kvm->arch.gmap->pfault_enabled = 1;
1707                 break;
1708         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1709                 dev->kvm->arch.gmap->pfault_enabled = 0;
1710                 /*
1711                  * Make sure no async faults are in transition when
1712                  * clearing the queues. So we don't need to worry
1713                  * about late coming workers.
1714                  */
1715                 synchronize_srcu(&dev->kvm->srcu);
1716                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1717                         kvm_clear_async_pf_completion_queue(vcpu);
1718                 break;
1719         case KVM_DEV_FLIC_ADAPTER_REGISTER:
1720                 r = register_io_adapter(dev, attr);
1721                 break;
1722         case KVM_DEV_FLIC_ADAPTER_MODIFY:
1723                 r = modify_io_adapter(dev, attr);
1724                 break;
1725         default:
1726                 r = -EINVAL;
1727         }
1728
1729         return r;
1730 }
1731
1732 static int flic_create(struct kvm_device *dev, u32 type)
1733 {
1734         if (!dev)
1735                 return -EINVAL;
1736         if (dev->kvm->arch.flic)
1737                 return -EINVAL;
1738         dev->kvm->arch.flic = dev;
1739         return 0;
1740 }
1741
1742 static void flic_destroy(struct kvm_device *dev)
1743 {
1744         dev->kvm->arch.flic = NULL;
1745         kfree(dev);
1746 }
1747
1748 /* s390 floating irq controller (flic) */
1749 struct kvm_device_ops kvm_flic_ops = {
1750         .name = "kvm-flic",
1751         .get_attr = flic_get_attr,
1752         .set_attr = flic_set_attr,
1753         .create = flic_create,
1754         .destroy = flic_destroy,
1755 };
1756
1757 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1758 {
1759         unsigned long bit;
1760
1761         bit = bit_nr + (addr % PAGE_SIZE) * 8;
1762
1763         return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1764 }
1765
1766 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1767                                           u64 addr)
1768 {
1769         struct s390_map_info *map;
1770
1771         if (!adapter)
1772                 return NULL;
1773
1774         list_for_each_entry(map, &adapter->maps, list) {
1775                 if (map->guest_addr == addr)
1776                         return map;
1777         }
1778         return NULL;
1779 }
1780
1781 static int adapter_indicators_set(struct kvm *kvm,
1782                                   struct s390_io_adapter *adapter,
1783                                   struct kvm_s390_adapter_int *adapter_int)
1784 {
1785         unsigned long bit;
1786         int summary_set, idx;
1787         struct s390_map_info *info;
1788         void *map;
1789
1790         info = get_map_info(adapter, adapter_int->ind_addr);
1791         if (!info)
1792                 return -1;
1793         map = page_address(info->page);
1794         bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1795         set_bit(bit, map);
1796         idx = srcu_read_lock(&kvm->srcu);
1797         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1798         set_page_dirty_lock(info->page);
1799         info = get_map_info(adapter, adapter_int->summary_addr);
1800         if (!info) {
1801                 srcu_read_unlock(&kvm->srcu, idx);
1802                 return -1;
1803         }
1804         map = page_address(info->page);
1805         bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1806                           adapter->swap);
1807         summary_set = test_and_set_bit(bit, map);
1808         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1809         set_page_dirty_lock(info->page);
1810         srcu_read_unlock(&kvm->srcu, idx);
1811         return summary_set ? 0 : 1;
1812 }
1813
1814 /*
1815  * < 0 - not injected due to error
1816  * = 0 - coalesced, summary indicator already active
1817  * > 0 - injected interrupt
1818  */
1819 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1820                            struct kvm *kvm, int irq_source_id, int level,
1821                            bool line_status)
1822 {
1823         int ret;
1824         struct s390_io_adapter *adapter;
1825
1826         /* We're only interested in the 0->1 transition. */
1827         if (!level)
1828                 return 0;
1829         adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1830         if (!adapter)
1831                 return -1;
1832         down_read(&adapter->maps_lock);
1833         ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1834         up_read(&adapter->maps_lock);
1835         if ((ret > 0) && !adapter->masked) {
1836                 struct kvm_s390_interrupt s390int = {
1837                         .type = KVM_S390_INT_IO(1, 0, 0, 0),
1838                         .parm = 0,
1839                         .parm64 = (adapter->isc << 27) | 0x80000000,
1840                 };
1841                 ret = kvm_s390_inject_vm(kvm, &s390int);
1842                 if (ret == 0)
1843                         ret = 1;
1844         }
1845         return ret;
1846 }
1847
1848 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1849                           const struct kvm_irq_routing_entry *ue)
1850 {
1851         int ret;
1852
1853         switch (ue->type) {
1854         case KVM_IRQ_ROUTING_S390_ADAPTER:
1855                 e->set = set_adapter_int;
1856                 e->adapter.summary_addr = ue->u.adapter.summary_addr;
1857                 e->adapter.ind_addr = ue->u.adapter.ind_addr;
1858                 e->adapter.summary_offset = ue->u.adapter.summary_offset;
1859                 e->adapter.ind_offset = ue->u.adapter.ind_offset;
1860                 e->adapter.adapter_id = ue->u.adapter.adapter_id;
1861                 ret = 0;
1862                 break;
1863         default:
1864                 ret = -EINVAL;
1865         }
1866
1867         return ret;
1868 }
1869
1870 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1871                 int irq_source_id, int level, bool line_status)
1872 {
1873         return -EINVAL;
1874 }