KVM: s390: refactor interrupt delivery code
[cascardo/linux.git] / arch / s390 / kvm / interrupt.c
1 /*
2  * handling kvm guest interrupts
3  *
4  * Copyright IBM Corp. 2008,2014
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  */
12
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/uaccess.h>
21 #include "kvm-s390.h"
22 #include "gaccess.h"
23 #include "trace-s390.h"
24
25 #define IOINT_SCHID_MASK 0x0000ffff
26 #define IOINT_SSID_MASK 0x00030000
27 #define IOINT_CSSID_MASK 0x03fc0000
28 #define IOINT_AI_MASK 0x04000000
29 #define PFAULT_INIT 0x0600
30 #define PFAULT_DONE 0x0680
31 #define VIRTIO_PARAM 0x0d00
32
33 static int is_ioint(u64 type)
34 {
35         return ((type & 0xfffe0000u) != 0xfffe0000u);
36 }
37
38 int psw_extint_disabled(struct kvm_vcpu *vcpu)
39 {
40         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
41 }
42
43 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
44 {
45         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
46 }
47
48 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
49 {
50         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
51 }
52
53 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
54 {
55         if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
56             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
57             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
58                 return 0;
59         return 1;
60 }
61
62 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
63 {
64         if (psw_extint_disabled(vcpu) ||
65             !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
66                 return 0;
67         if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
68                 /* No timer interrupts when single stepping */
69                 return 0;
70         return 1;
71 }
72
73 static u64 int_word_to_isc_bits(u32 int_word)
74 {
75         u8 isc = (int_word & 0x38000000) >> 27;
76
77         return (0x80 >> isc) << 24;
78 }
79
80 static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
81                                       struct kvm_s390_interrupt_info *inti)
82 {
83         switch (inti->type) {
84         case KVM_S390_INT_EXTERNAL_CALL:
85                 if (psw_extint_disabled(vcpu))
86                         return 0;
87                 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
88                         return 1;
89                 return 0;
90         case KVM_S390_INT_EMERGENCY:
91                 if (psw_extint_disabled(vcpu))
92                         return 0;
93                 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
94                         return 1;
95                 return 0;
96         case KVM_S390_INT_CLOCK_COMP:
97                 return ckc_interrupts_enabled(vcpu);
98         case KVM_S390_INT_CPU_TIMER:
99                 if (psw_extint_disabled(vcpu))
100                         return 0;
101                 if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
102                         return 1;
103                 return 0;
104         case KVM_S390_INT_SERVICE:
105         case KVM_S390_INT_PFAULT_INIT:
106         case KVM_S390_INT_PFAULT_DONE:
107         case KVM_S390_INT_VIRTIO:
108                 if (psw_extint_disabled(vcpu))
109                         return 0;
110                 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
111                         return 1;
112                 return 0;
113         case KVM_S390_PROGRAM_INT:
114         case KVM_S390_SIGP_STOP:
115         case KVM_S390_SIGP_SET_PREFIX:
116         case KVM_S390_RESTART:
117                 return 1;
118         case KVM_S390_MCHK:
119                 if (psw_mchk_disabled(vcpu))
120                         return 0;
121                 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
122                         return 1;
123                 return 0;
124         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
125                 if (psw_ioint_disabled(vcpu))
126                         return 0;
127                 if (vcpu->arch.sie_block->gcr[6] &
128                     int_word_to_isc_bits(inti->io.io_int_word))
129                         return 1;
130                 return 0;
131         default:
132                 printk(KERN_WARNING "illegal interrupt type %llx\n",
133                        inti->type);
134                 BUG();
135         }
136         return 0;
137 }
138
139 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
140 {
141         atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
142         set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
143 }
144
145 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
146 {
147         atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
148         clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
149 }
150
151 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
152 {
153         atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
154                           &vcpu->arch.sie_block->cpuflags);
155         vcpu->arch.sie_block->lctl = 0x0000;
156         vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
157
158         if (guestdbg_enabled(vcpu)) {
159                 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
160                                                LCTL_CR10 | LCTL_CR11);
161                 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
162         }
163
164         if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
165                 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
166 }
167
168 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
169 {
170         atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
171 }
172
173 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
174                                       struct kvm_s390_interrupt_info *inti)
175 {
176         switch (inti->type) {
177         case KVM_S390_INT_EXTERNAL_CALL:
178         case KVM_S390_INT_EMERGENCY:
179         case KVM_S390_INT_SERVICE:
180         case KVM_S390_INT_PFAULT_INIT:
181         case KVM_S390_INT_PFAULT_DONE:
182         case KVM_S390_INT_VIRTIO:
183         case KVM_S390_INT_CLOCK_COMP:
184         case KVM_S390_INT_CPU_TIMER:
185                 if (psw_extint_disabled(vcpu))
186                         __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
187                 else
188                         vcpu->arch.sie_block->lctl |= LCTL_CR0;
189                 break;
190         case KVM_S390_SIGP_STOP:
191                 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
192                 break;
193         case KVM_S390_MCHK:
194                 if (psw_mchk_disabled(vcpu))
195                         vcpu->arch.sie_block->ictl |= ICTL_LPSW;
196                 else
197                         vcpu->arch.sie_block->lctl |= LCTL_CR14;
198                 break;
199         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
200                 if (psw_ioint_disabled(vcpu))
201                         __set_cpuflag(vcpu, CPUSTAT_IO_INT);
202                 else
203                         vcpu->arch.sie_block->lctl |= LCTL_CR6;
204                 break;
205         default:
206                 BUG();
207         }
208 }
209
210 static u16 get_ilc(struct kvm_vcpu *vcpu)
211 {
212         const unsigned short table[] = { 2, 4, 4, 6 };
213
214         switch (vcpu->arch.sie_block->icptcode) {
215         case ICPT_INST:
216         case ICPT_INSTPROGI:
217         case ICPT_OPEREXC:
218         case ICPT_PARTEXEC:
219         case ICPT_IOINST:
220                 /* last instruction only stored for these icptcodes */
221                 return table[vcpu->arch.sie_block->ipa >> 14];
222         case ICPT_PROGI:
223                 return vcpu->arch.sie_block->pgmilc;
224         default:
225                 return 0;
226         }
227 }
228
229 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
230 {
231         int rc;
232
233         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
234                                          0, 0);
235
236         rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
237                            (u16 *)__LC_EXT_INT_CODE);
238         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
239                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
240         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
241                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
242         return rc;
243 }
244
245 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
246 {
247         int rc;
248
249         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
250                                          0, 0);
251
252         rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
253                            (u16 __user *)__LC_EXT_INT_CODE);
254         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
255                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
256         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
257                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
258         return rc;
259 }
260
261 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu,
262                                            struct kvm_s390_interrupt_info *inti)
263 {
264         struct kvm_s390_ext_info *ext = &inti->ext;
265         int rc;
266
267         VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
268                    0, ext->ext_params2);
269         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
270                                          KVM_S390_INT_PFAULT_INIT,
271                                          0, ext->ext_params2);
272
273         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
274         rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
275         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
276                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
277         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
278                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
279         rc |= put_guest_lc(vcpu, ext->ext_params2, (u64 *) __LC_EXT_PARAMS2);
280         return rc;
281 }
282
283 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu,
284                                            struct kvm_s390_interrupt_info *inti)
285 {
286         struct kvm_s390_mchk_info *mchk = &inti->mchk;
287         int rc;
288
289         VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
290                    mchk->mcic);
291         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
292                                          mchk->cr14, mchk->mcic);
293
294         rc  = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
295         rc |= put_guest_lc(vcpu, mchk->mcic,
296                            (u64 __user *) __LC_MCCK_CODE);
297         rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
298                            (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
299         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
300                              &mchk->fixed_logout, sizeof(mchk->fixed_logout));
301         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
302                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
303         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
304                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
305         return rc;
306 }
307
308 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
309 {
310         int rc;
311
312         VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
313         vcpu->stat.deliver_restart_signal++;
314         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
315
316         rc  = write_guest_lc(vcpu,
317                              offsetof(struct _lowcore, restart_old_psw),
318                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
319         rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
320                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
321         return rc;
322 }
323
324 static int __must_check __deliver_stop(struct kvm_vcpu *vcpu)
325 {
326         VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
327         vcpu->stat.deliver_stop_signal++;
328         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP,
329                                          0, 0);
330
331         __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
332         return 0;
333 }
334
335 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu,
336                                            struct kvm_s390_interrupt_info *inti)
337 {
338         struct kvm_s390_prefix_info *prefix = &inti->prefix;
339
340         VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix->address);
341         vcpu->stat.deliver_prefix_signal++;
342         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
343                                          KVM_S390_SIGP_SET_PREFIX,
344                                          prefix->address, 0);
345
346         kvm_s390_set_prefix(vcpu, prefix->address);
347         return 0;
348 }
349
350 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu,
351                                            struct kvm_s390_interrupt_info *inti)
352 {
353         struct kvm_s390_emerg_info *emerg = &inti->emerg;
354         int rc;
355
356         VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
357         vcpu->stat.deliver_emergency_signal++;
358         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
359                                          inti->emerg.code, 0);
360
361         rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
362                            (u16 *)__LC_EXT_INT_CODE);
363         rc |= put_guest_lc(vcpu, emerg->code, (u16 *)__LC_EXT_CPU_ADDR);
364         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
365                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
366         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
367                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
368         return rc;
369 }
370
371 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu,
372                                            struct kvm_s390_interrupt_info *inti)
373 {
374         struct kvm_s390_extcall_info *extcall = &inti->extcall;
375         int rc;
376
377         VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
378         vcpu->stat.deliver_external_call++;
379         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
380                                          KVM_S390_INT_EXTERNAL_CALL,
381                                          extcall->code, 0);
382
383         rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
384                            (u16 *)__LC_EXT_INT_CODE);
385         rc |= put_guest_lc(vcpu, extcall->code, (u16 *)__LC_EXT_CPU_ADDR);
386         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
387                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
388         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
389                             sizeof(psw_t));
390         return rc;
391 }
392
393 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu,
394                                            struct kvm_s390_interrupt_info *inti)
395 {
396         struct kvm_s390_pgm_info *pgm_info = &inti->pgm;
397         int rc = 0;
398         u16 ilc = get_ilc(vcpu);
399
400         VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
401                    pgm_info->code, ilc);
402         vcpu->stat.deliver_program_int++;
403         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
404                                          pgm_info->code, 0);
405
406         switch (pgm_info->code & ~PGM_PER) {
407         case PGM_AFX_TRANSLATION:
408         case PGM_ASX_TRANSLATION:
409         case PGM_EX_TRANSLATION:
410         case PGM_LFX_TRANSLATION:
411         case PGM_LSTE_SEQUENCE:
412         case PGM_LSX_TRANSLATION:
413         case PGM_LX_TRANSLATION:
414         case PGM_PRIMARY_AUTHORITY:
415         case PGM_SECONDARY_AUTHORITY:
416         case PGM_SPACE_SWITCH:
417                 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
418                                   (u64 *)__LC_TRANS_EXC_CODE);
419                 break;
420         case PGM_ALEN_TRANSLATION:
421         case PGM_ALE_SEQUENCE:
422         case PGM_ASTE_INSTANCE:
423         case PGM_ASTE_SEQUENCE:
424         case PGM_ASTE_VALIDITY:
425         case PGM_EXTENDED_AUTHORITY:
426                 rc = put_guest_lc(vcpu, pgm_info->exc_access_id,
427                                   (u8 *)__LC_EXC_ACCESS_ID);
428                 break;
429         case PGM_ASCE_TYPE:
430         case PGM_PAGE_TRANSLATION:
431         case PGM_REGION_FIRST_TRANS:
432         case PGM_REGION_SECOND_TRANS:
433         case PGM_REGION_THIRD_TRANS:
434         case PGM_SEGMENT_TRANSLATION:
435                 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
436                                   (u64 *)__LC_TRANS_EXC_CODE);
437                 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
438                                    (u8 *)__LC_EXC_ACCESS_ID);
439                 rc |= put_guest_lc(vcpu, pgm_info->op_access_id,
440                                    (u8 *)__LC_OP_ACCESS_ID);
441                 break;
442         case PGM_MONITOR:
443                 rc = put_guest_lc(vcpu, pgm_info->mon_class_nr,
444                                   (u16 *)__LC_MON_CLASS_NR);
445                 rc |= put_guest_lc(vcpu, pgm_info->mon_code,
446                                    (u64 *)__LC_MON_CODE);
447                 break;
448         case PGM_DATA:
449                 rc = put_guest_lc(vcpu, pgm_info->data_exc_code,
450                                   (u32 *)__LC_DATA_EXC_CODE);
451                 break;
452         case PGM_PROTECTION:
453                 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
454                                   (u64 *)__LC_TRANS_EXC_CODE);
455                 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
456                                    (u8 *)__LC_EXC_ACCESS_ID);
457                 break;
458         }
459
460         if (pgm_info->code & PGM_PER) {
461                 rc |= put_guest_lc(vcpu, pgm_info->per_code,
462                                    (u8 *) __LC_PER_CODE);
463                 rc |= put_guest_lc(vcpu, pgm_info->per_atmid,
464                                    (u8 *)__LC_PER_ATMID);
465                 rc |= put_guest_lc(vcpu, pgm_info->per_address,
466                                    (u64 *) __LC_PER_ADDRESS);
467                 rc |= put_guest_lc(vcpu, pgm_info->per_access_id,
468                                    (u8 *) __LC_PER_ACCESS_ID);
469         }
470
471         rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
472         rc |= put_guest_lc(vcpu, pgm_info->code,
473                            (u16 *)__LC_PGM_INT_CODE);
474         rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
475                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
476         rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
477                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
478         return rc;
479 }
480
481 static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
482                                           struct kvm_s390_interrupt_info *inti)
483 {
484         int rc;
485
486         VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
487                    inti->ext.ext_params);
488         vcpu->stat.deliver_service_signal++;
489         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
490                                          inti->ext.ext_params, 0);
491
492         rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
493         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
494                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
495         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
496                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
497         rc |= put_guest_lc(vcpu, inti->ext.ext_params,
498                            (u32 *)__LC_EXT_PARAMS);
499         return rc;
500 }
501
502 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
503                                            struct kvm_s390_interrupt_info *inti)
504 {
505         int rc;
506
507         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
508                                          KVM_S390_INT_PFAULT_DONE, 0,
509                                          inti->ext.ext_params2);
510
511         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
512         rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
513         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
514                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
515         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
516                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
517         rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
518                            (u64 *)__LC_EXT_PARAMS2);
519         return rc;
520 }
521
522 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
523                                          struct kvm_s390_interrupt_info *inti)
524 {
525         int rc;
526
527         VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
528                    inti->ext.ext_params, inti->ext.ext_params2);
529         vcpu->stat.deliver_virtio_interrupt++;
530         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
531                                          inti->ext.ext_params,
532                                          inti->ext.ext_params2);
533
534         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
535         rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
536         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
537                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
538         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
539                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
540         rc |= put_guest_lc(vcpu, inti->ext.ext_params,
541                            (u32 *)__LC_EXT_PARAMS);
542         rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
543                            (u64 *)__LC_EXT_PARAMS2);
544         return rc;
545 }
546
547 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
548                                      struct kvm_s390_interrupt_info *inti)
549 {
550         int rc;
551
552         VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
553         vcpu->stat.deliver_io_int++;
554         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
555                                          ((__u32)inti->io.subchannel_id << 16) |
556                                                 inti->io.subchannel_nr,
557                                          ((__u64)inti->io.io_int_parm << 32) |
558                                                 inti->io.io_int_word);
559
560         rc  = put_guest_lc(vcpu, inti->io.subchannel_id,
561                            (u16 *)__LC_SUBCHANNEL_ID);
562         rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
563                            (u16 *)__LC_SUBCHANNEL_NR);
564         rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
565                            (u32 *)__LC_IO_INT_PARM);
566         rc |= put_guest_lc(vcpu, inti->io.io_int_word,
567                            (u32 *)__LC_IO_INT_WORD);
568         rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
569                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
570         rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
571                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
572         return rc;
573 }
574
575 static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu,
576                                    struct kvm_s390_interrupt_info *inti)
577 {
578         int rc;
579
580         switch (inti->type) {
581         case KVM_S390_INT_EMERGENCY:
582                 rc = __deliver_emergency_signal(vcpu, inti);
583                 break;
584         case KVM_S390_INT_EXTERNAL_CALL:
585                 rc = __deliver_external_call(vcpu, inti);
586                 break;
587         case KVM_S390_INT_CLOCK_COMP:
588                 rc = __deliver_ckc(vcpu);
589                 break;
590         case KVM_S390_INT_CPU_TIMER:
591                 rc = __deliver_cpu_timer(vcpu);
592                 break;
593         case KVM_S390_INT_SERVICE:
594                 rc = __deliver_service(vcpu, inti);
595                 break;
596         case KVM_S390_INT_PFAULT_INIT:
597                 rc = __deliver_pfault_init(vcpu, inti);
598                 break;
599         case KVM_S390_INT_PFAULT_DONE:
600                 rc = __deliver_pfault_done(vcpu, inti);
601                 break;
602         case KVM_S390_INT_VIRTIO:
603                 rc = __deliver_virtio(vcpu, inti);
604                 break;
605         case KVM_S390_SIGP_STOP:
606                 rc = __deliver_stop(vcpu);
607                 break;
608         case KVM_S390_SIGP_SET_PREFIX:
609                 rc = __deliver_set_prefix(vcpu, inti);
610                 break;
611         case KVM_S390_RESTART:
612                 rc = __deliver_restart(vcpu);
613                 break;
614         case KVM_S390_PROGRAM_INT:
615                 rc = __deliver_prog(vcpu, inti);
616                 break;
617         case KVM_S390_MCHK:
618                 rc = __deliver_machine_check(vcpu, inti);
619                 break;
620         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
621                 rc = __deliver_io(vcpu, inti);
622                 break;
623         default:
624                 BUG();
625         }
626
627         return rc;
628 }
629
630 /* Check whether SIGP interpretation facility has an external call pending */
631 int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
632 {
633         atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl;
634
635         if (!psw_extint_disabled(vcpu) &&
636             (vcpu->arch.sie_block->gcr[0] & 0x2000ul) &&
637             (atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
638             (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
639                 return 1;
640
641         return 0;
642 }
643
644 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
645 {
646         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
647         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
648         struct kvm_s390_interrupt_info  *inti;
649         int rc = 0;
650
651         if (atomic_read(&li->active)) {
652                 spin_lock(&li->lock);
653                 list_for_each_entry(inti, &li->list, list)
654                         if (__interrupt_is_deliverable(vcpu, inti)) {
655                                 rc = 1;
656                                 break;
657                         }
658                 spin_unlock(&li->lock);
659         }
660
661         if ((!rc) && atomic_read(&fi->active)) {
662                 spin_lock(&fi->lock);
663                 list_for_each_entry(inti, &fi->list, list)
664                         if (__interrupt_is_deliverable(vcpu, inti)) {
665                                 rc = 1;
666                                 break;
667                         }
668                 spin_unlock(&fi->lock);
669         }
670
671         if (!rc && kvm_cpu_has_pending_timer(vcpu))
672                 rc = 1;
673
674         if (!rc && kvm_s390_si_ext_call_pending(vcpu))
675                 rc = 1;
676
677         return rc;
678 }
679
680 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
681 {
682         if (!(vcpu->arch.sie_block->ckc <
683               get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
684                 return 0;
685         if (!ckc_interrupts_enabled(vcpu))
686                 return 0;
687         return 1;
688 }
689
690 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
691 {
692         u64 now, sltime;
693
694         vcpu->stat.exit_wait_state++;
695
696         /* fast path */
697         if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
698                 return 0;
699
700         if (psw_interrupts_disabled(vcpu)) {
701                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
702                 return -EOPNOTSUPP; /* disabled wait */
703         }
704
705         __set_cpu_idle(vcpu);
706         if (!ckc_interrupts_enabled(vcpu)) {
707                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
708                 goto no_timer;
709         }
710
711         now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
712         sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
713         hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
714         VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
715 no_timer:
716         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
717         kvm_vcpu_block(vcpu);
718         __unset_cpu_idle(vcpu);
719         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
720
721         hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
722         return 0;
723 }
724
725 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
726 {
727         if (waitqueue_active(&vcpu->wq)) {
728                 /*
729                  * The vcpu gave up the cpu voluntarily, mark it as a good
730                  * yield-candidate.
731                  */
732                 vcpu->preempted = true;
733                 wake_up_interruptible(&vcpu->wq);
734                 vcpu->stat.halt_wakeup++;
735         }
736 }
737
738 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
739 {
740         struct kvm_vcpu *vcpu;
741
742         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
743         kvm_s390_vcpu_wakeup(vcpu);
744
745         return HRTIMER_NORESTART;
746 }
747
748 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
749 {
750         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
751         struct kvm_s390_interrupt_info  *n, *inti = NULL;
752
753         spin_lock(&li->lock);
754         list_for_each_entry_safe(inti, n, &li->list, list) {
755                 list_del(&inti->list);
756                 kfree(inti);
757         }
758         atomic_set(&li->active, 0);
759         spin_unlock(&li->lock);
760
761         /* clear pending external calls set by sigp interpretation facility */
762         atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
763         atomic_clear_mask(SIGP_CTRL_C,
764                           &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
765 }
766
767 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
768 {
769         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
770         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
771         struct kvm_s390_interrupt_info  *n, *inti = NULL;
772         int deliver;
773         int rc = 0;
774
775         __reset_intercept_indicators(vcpu);
776         if (atomic_read(&li->active)) {
777                 do {
778                         deliver = 0;
779                         spin_lock(&li->lock);
780                         list_for_each_entry_safe(inti, n, &li->list, list) {
781                                 if (__interrupt_is_deliverable(vcpu, inti)) {
782                                         list_del(&inti->list);
783                                         deliver = 1;
784                                         break;
785                                 }
786                                 __set_intercept_indicator(vcpu, inti);
787                         }
788                         if (list_empty(&li->list))
789                                 atomic_set(&li->active, 0);
790                         spin_unlock(&li->lock);
791                         if (deliver) {
792                                 rc = __do_deliver_interrupt(vcpu, inti);
793                                 kfree(inti);
794                         }
795                 } while (!rc && deliver);
796         }
797
798         if (!rc && kvm_cpu_has_pending_timer(vcpu))
799                 rc = __deliver_ckc(vcpu);
800
801         if (!rc && atomic_read(&fi->active)) {
802                 do {
803                         deliver = 0;
804                         spin_lock(&fi->lock);
805                         list_for_each_entry_safe(inti, n, &fi->list, list) {
806                                 if (__interrupt_is_deliverable(vcpu, inti)) {
807                                         list_del(&inti->list);
808                                         fi->irq_count--;
809                                         deliver = 1;
810                                         break;
811                                 }
812                                 __set_intercept_indicator(vcpu, inti);
813                         }
814                         if (list_empty(&fi->list))
815                                 atomic_set(&fi->active, 0);
816                         spin_unlock(&fi->lock);
817                         if (deliver) {
818                                 rc = __do_deliver_interrupt(vcpu, inti);
819                                 kfree(inti);
820                         }
821                 } while (!rc && deliver);
822         }
823
824         return rc;
825 }
826
827 static int __inject_prog_irq(struct kvm_vcpu *vcpu,
828                              struct kvm_s390_interrupt_info *inti)
829 {
830         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
831
832         list_add(&inti->list, &li->list);
833         atomic_set(&li->active, 1);
834         return 0;
835 }
836
837 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
838 {
839         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
840         struct kvm_s390_interrupt_info *inti;
841
842         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
843         if (!inti)
844                 return -ENOMEM;
845
846         inti->type = KVM_S390_PROGRAM_INT;
847         inti->pgm.code = code;
848
849         VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
850         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
851         spin_lock(&li->lock);
852         list_add(&inti->list, &li->list);
853         atomic_set(&li->active, 1);
854         BUG_ON(waitqueue_active(li->wq));
855         spin_unlock(&li->lock);
856         return 0;
857 }
858
859 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
860                              struct kvm_s390_pgm_info *pgm_info)
861 {
862         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
863         struct kvm_s390_interrupt_info *inti;
864         int rc;
865
866         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
867         if (!inti)
868                 return -ENOMEM;
869
870         VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
871                    pgm_info->code);
872         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
873                                    pgm_info->code, 0, 1);
874
875         inti->type = KVM_S390_PROGRAM_INT;
876         memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
877         spin_lock(&li->lock);
878         rc = __inject_prog_irq(vcpu, inti);
879         BUG_ON(waitqueue_active(li->wq));
880         spin_unlock(&li->lock);
881         return rc;
882 }
883
884 static int __inject_pfault_init(struct kvm_vcpu *vcpu,
885                                 struct kvm_s390_interrupt *s390int,
886                                 struct kvm_s390_interrupt_info *inti)
887 {
888         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
889
890         inti->ext.ext_params2 = s390int->parm64;
891         list_add_tail(&inti->list, &li->list);
892         atomic_set(&li->active, 1);
893         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
894         return 0;
895 }
896
897 static int __inject_extcall(struct kvm_vcpu *vcpu,
898                             struct kvm_s390_interrupt *s390int,
899                             struct kvm_s390_interrupt_info *inti)
900 {
901         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
902
903         VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
904                    s390int->parm);
905         if (s390int->parm & 0xffff0000)
906                 return -EINVAL;
907         inti->extcall.code = s390int->parm;
908         list_add_tail(&inti->list, &li->list);
909         atomic_set(&li->active, 1);
910         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
911         return 0;
912 }
913
914 static int __inject_set_prefix(struct kvm_vcpu *vcpu,
915                                struct kvm_s390_interrupt *s390int,
916                                struct kvm_s390_interrupt_info *inti)
917 {
918         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
919
920         VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
921                    s390int->parm);
922         inti->prefix.address = s390int->parm;
923         list_add_tail(&inti->list, &li->list);
924         atomic_set(&li->active, 1);
925         return 0;
926 }
927
928 static int __inject_sigp_stop(struct kvm_vcpu *vcpu,
929                               struct kvm_s390_interrupt *s390int,
930                               struct kvm_s390_interrupt_info *inti)
931 {
932         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
933
934         list_add_tail(&inti->list, &li->list);
935         atomic_set(&li->active, 1);
936         li->action_bits |= ACTION_STOP_ON_STOP;
937         return 0;
938 }
939
940 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
941                                  struct kvm_s390_interrupt *s390int,
942                                  struct kvm_s390_interrupt_info *inti)
943 {
944         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
945
946         VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
947         list_add_tail(&inti->list, &li->list);
948         atomic_set(&li->active, 1);
949         return 0;
950 }
951
952 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
953                                    struct kvm_s390_interrupt *s390int,
954                                    struct kvm_s390_interrupt_info *inti)
955 {
956         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
957
958         VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm);
959         if (s390int->parm & 0xffff0000)
960                 return -EINVAL;
961         inti->emerg.code = s390int->parm;
962         list_add_tail(&inti->list, &li->list);
963         atomic_set(&li->active, 1);
964         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
965         return 0;
966 }
967
968 static int __inject_mchk(struct kvm_vcpu *vcpu,
969                          struct kvm_s390_interrupt *s390int,
970                          struct kvm_s390_interrupt_info *inti)
971 {
972         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
973
974         VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
975                    s390int->parm64);
976         inti->mchk.mcic = s390int->parm64;
977         list_add_tail(&inti->list, &li->list);
978         atomic_set(&li->active, 1);
979         return 0;
980 }
981
982 static int __inject_ckc(struct kvm_vcpu *vcpu,
983                         struct kvm_s390_interrupt *s390int,
984                         struct kvm_s390_interrupt_info *inti)
985 {
986         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
987
988         VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
989         list_add_tail(&inti->list, &li->list);
990         atomic_set(&li->active, 1);
991         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
992         return 0;
993 }
994
995 static int __inject_cpu_timer(struct kvm_vcpu *vcpu,
996                               struct kvm_s390_interrupt *s390int,
997                               struct kvm_s390_interrupt_info *inti)
998 {
999         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1000
1001         VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
1002         list_add_tail(&inti->list, &li->list);
1003         atomic_set(&li->active, 1);
1004         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1005         return 0;
1006 }
1007
1008 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1009                                                     u64 cr6, u64 schid)
1010 {
1011         struct kvm_s390_float_interrupt *fi;
1012         struct kvm_s390_interrupt_info *inti, *iter;
1013
1014         if ((!schid && !cr6) || (schid && cr6))
1015                 return NULL;
1016         mutex_lock(&kvm->lock);
1017         fi = &kvm->arch.float_int;
1018         spin_lock(&fi->lock);
1019         inti = NULL;
1020         list_for_each_entry(iter, &fi->list, list) {
1021                 if (!is_ioint(iter->type))
1022                         continue;
1023                 if (cr6 &&
1024                     ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
1025                         continue;
1026                 if (schid) {
1027                         if (((schid & 0x00000000ffff0000) >> 16) !=
1028                             iter->io.subchannel_id)
1029                                 continue;
1030                         if ((schid & 0x000000000000ffff) !=
1031                             iter->io.subchannel_nr)
1032                                 continue;
1033                 }
1034                 inti = iter;
1035                 break;
1036         }
1037         if (inti) {
1038                 list_del_init(&inti->list);
1039                 fi->irq_count--;
1040         }
1041         if (list_empty(&fi->list))
1042                 atomic_set(&fi->active, 0);
1043         spin_unlock(&fi->lock);
1044         mutex_unlock(&kvm->lock);
1045         return inti;
1046 }
1047
1048 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1049 {
1050         struct kvm_s390_local_interrupt *li;
1051         struct kvm_s390_float_interrupt *fi;
1052         struct kvm_s390_interrupt_info *iter;
1053         struct kvm_vcpu *dst_vcpu = NULL;
1054         int sigcpu;
1055         int rc = 0;
1056
1057         mutex_lock(&kvm->lock);
1058         fi = &kvm->arch.float_int;
1059         spin_lock(&fi->lock);
1060         if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
1061                 rc = -EINVAL;
1062                 goto unlock_fi;
1063         }
1064         fi->irq_count++;
1065         if (!is_ioint(inti->type)) {
1066                 list_add_tail(&inti->list, &fi->list);
1067         } else {
1068                 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
1069
1070                 /* Keep I/O interrupts sorted in isc order. */
1071                 list_for_each_entry(iter, &fi->list, list) {
1072                         if (!is_ioint(iter->type))
1073                                 continue;
1074                         if (int_word_to_isc_bits(iter->io.io_int_word)
1075                             <= isc_bits)
1076                                 continue;
1077                         break;
1078                 }
1079                 list_add_tail(&inti->list, &iter->list);
1080         }
1081         atomic_set(&fi->active, 1);
1082         sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
1083         if (sigcpu == KVM_MAX_VCPUS) {
1084                 do {
1085                         sigcpu = fi->next_rr_cpu++;
1086                         if (sigcpu == KVM_MAX_VCPUS)
1087                                 sigcpu = fi->next_rr_cpu = 0;
1088                 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
1089         }
1090         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1091         li = &dst_vcpu->arch.local_int;
1092         spin_lock(&li->lock);
1093         switch (inti->type) {
1094         case KVM_S390_MCHK:
1095                 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1096                 break;
1097         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1098                 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1099                 break;
1100         default:
1101                 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1102                 break;
1103         }
1104         spin_unlock(&li->lock);
1105         kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
1106 unlock_fi:
1107         spin_unlock(&fi->lock);
1108         mutex_unlock(&kvm->lock);
1109         return rc;
1110 }
1111
1112 int kvm_s390_inject_vm(struct kvm *kvm,
1113                        struct kvm_s390_interrupt *s390int)
1114 {
1115         struct kvm_s390_interrupt_info *inti;
1116
1117         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1118         if (!inti)
1119                 return -ENOMEM;
1120
1121         inti->type = s390int->type;
1122         switch (inti->type) {
1123         case KVM_S390_INT_VIRTIO:
1124                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1125                          s390int->parm, s390int->parm64);
1126                 inti->ext.ext_params = s390int->parm;
1127                 inti->ext.ext_params2 = s390int->parm64;
1128                 break;
1129         case KVM_S390_INT_SERVICE:
1130                 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
1131                 inti->ext.ext_params = s390int->parm;
1132                 break;
1133         case KVM_S390_INT_PFAULT_DONE:
1134                 inti->type = s390int->type;
1135                 inti->ext.ext_params2 = s390int->parm64;
1136                 break;
1137         case KVM_S390_MCHK:
1138                 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
1139                          s390int->parm64);
1140                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1141                 inti->mchk.mcic = s390int->parm64;
1142                 break;
1143         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1144                 if (inti->type & IOINT_AI_MASK)
1145                         VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1146                 else
1147                         VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1148                                  s390int->type & IOINT_CSSID_MASK,
1149                                  s390int->type & IOINT_SSID_MASK,
1150                                  s390int->type & IOINT_SCHID_MASK);
1151                 inti->io.subchannel_id = s390int->parm >> 16;
1152                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1153                 inti->io.io_int_parm = s390int->parm64 >> 32;
1154                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1155                 break;
1156         default:
1157                 kfree(inti);
1158                 return -EINVAL;
1159         }
1160         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1161                                  2);
1162
1163         return __inject_vm(kvm, inti);
1164 }
1165
1166 void kvm_s390_reinject_io_int(struct kvm *kvm,
1167                               struct kvm_s390_interrupt_info *inti)
1168 {
1169         __inject_vm(kvm, inti);
1170 }
1171
1172 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
1173                          struct kvm_s390_interrupt *s390int)
1174 {
1175         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1176         struct kvm_s390_interrupt_info *inti;
1177         int rc;
1178
1179         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1180         if (!inti)
1181                 return -ENOMEM;
1182
1183         inti->type = s390int->type;
1184
1185         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type,
1186                                    s390int->parm, 0, 2);
1187         spin_lock(&li->lock);
1188         switch (inti->type) {
1189         case KVM_S390_PROGRAM_INT:
1190                 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
1191                            s390int->parm);
1192                 inti->pgm.code = s390int->parm;
1193                 if (s390int->parm & 0xffff0000)
1194                         rc = -EINVAL;
1195                 else
1196                         rc = __inject_prog_irq(vcpu, inti);
1197                 break;
1198         case KVM_S390_SIGP_SET_PREFIX:
1199                 rc = __inject_set_prefix(vcpu, s390int, inti);
1200                 break;
1201         case KVM_S390_SIGP_STOP:
1202                 rc = __inject_sigp_stop(vcpu, s390int, inti);
1203                 break;
1204         case KVM_S390_RESTART:
1205                 rc = __inject_sigp_restart(vcpu, s390int, inti);
1206                 break;
1207         case KVM_S390_INT_CLOCK_COMP:
1208                 rc = __inject_ckc(vcpu, s390int, inti);
1209                 break;
1210         case KVM_S390_INT_CPU_TIMER:
1211                 rc = __inject_cpu_timer(vcpu, s390int, inti);
1212                 break;
1213         case KVM_S390_INT_EXTERNAL_CALL:
1214                 rc = __inject_extcall(vcpu, s390int, inti);
1215                 break;
1216         case KVM_S390_INT_EMERGENCY:
1217                 rc = __inject_sigp_emergency(vcpu, s390int, inti);
1218                 break;
1219         case KVM_S390_MCHK:
1220                 rc = __inject_mchk(vcpu, s390int, inti);
1221                 break;
1222         case KVM_S390_INT_PFAULT_INIT:
1223                 rc = __inject_pfault_init(vcpu, s390int, inti);
1224                 break;
1225         case KVM_S390_INT_VIRTIO:
1226         case KVM_S390_INT_SERVICE:
1227         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1228         default:
1229                 rc = -EINVAL;
1230         }
1231         spin_unlock(&li->lock);
1232         if (!rc)
1233                 kvm_s390_vcpu_wakeup(vcpu);
1234         else
1235                 kfree(inti);
1236         return rc;
1237 }
1238
1239 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1240 {
1241         struct kvm_s390_float_interrupt *fi;
1242         struct kvm_s390_interrupt_info  *n, *inti = NULL;
1243
1244         mutex_lock(&kvm->lock);
1245         fi = &kvm->arch.float_int;
1246         spin_lock(&fi->lock);
1247         list_for_each_entry_safe(inti, n, &fi->list, list) {
1248                 list_del(&inti->list);
1249                 kfree(inti);
1250         }
1251         fi->irq_count = 0;
1252         atomic_set(&fi->active, 0);
1253         spin_unlock(&fi->lock);
1254         mutex_unlock(&kvm->lock);
1255 }
1256
1257 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
1258                                    u8 *addr)
1259 {
1260         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1261         struct kvm_s390_irq irq = {0};
1262
1263         irq.type = inti->type;
1264         switch (inti->type) {
1265         case KVM_S390_INT_PFAULT_INIT:
1266         case KVM_S390_INT_PFAULT_DONE:
1267         case KVM_S390_INT_VIRTIO:
1268         case KVM_S390_INT_SERVICE:
1269                 irq.u.ext = inti->ext;
1270                 break;
1271         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1272                 irq.u.io = inti->io;
1273                 break;
1274         case KVM_S390_MCHK:
1275                 irq.u.mchk = inti->mchk;
1276                 break;
1277         default:
1278                 return -EINVAL;
1279         }
1280
1281         if (copy_to_user(uptr, &irq, sizeof(irq)))
1282                 return -EFAULT;
1283
1284         return 0;
1285 }
1286
1287 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
1288 {
1289         struct kvm_s390_interrupt_info *inti;
1290         struct kvm_s390_float_interrupt *fi;
1291         int ret = 0;
1292         int n = 0;
1293
1294         mutex_lock(&kvm->lock);
1295         fi = &kvm->arch.float_int;
1296         spin_lock(&fi->lock);
1297
1298         list_for_each_entry(inti, &fi->list, list) {
1299                 if (len < sizeof(struct kvm_s390_irq)) {
1300                         /* signal userspace to try again */
1301                         ret = -ENOMEM;
1302                         break;
1303                 }
1304                 ret = copy_irq_to_user(inti, buf);
1305                 if (ret)
1306                         break;
1307                 buf += sizeof(struct kvm_s390_irq);
1308                 len -= sizeof(struct kvm_s390_irq);
1309                 n++;
1310         }
1311
1312         spin_unlock(&fi->lock);
1313         mutex_unlock(&kvm->lock);
1314
1315         return ret < 0 ? ret : n;
1316 }
1317
1318 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1319 {
1320         int r;
1321
1322         switch (attr->group) {
1323         case KVM_DEV_FLIC_GET_ALL_IRQS:
1324                 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
1325                                           attr->attr);
1326                 break;
1327         default:
1328                 r = -EINVAL;
1329         }
1330
1331         return r;
1332 }
1333
1334 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1335                                      u64 addr)
1336 {
1337         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1338         void *target = NULL;
1339         void __user *source;
1340         u64 size;
1341
1342         if (get_user(inti->type, (u64 __user *)addr))
1343                 return -EFAULT;
1344
1345         switch (inti->type) {
1346         case KVM_S390_INT_PFAULT_INIT:
1347         case KVM_S390_INT_PFAULT_DONE:
1348         case KVM_S390_INT_VIRTIO:
1349         case KVM_S390_INT_SERVICE:
1350                 target = (void *) &inti->ext;
1351                 source = &uptr->u.ext;
1352                 size = sizeof(inti->ext);
1353                 break;
1354         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1355                 target = (void *) &inti->io;
1356                 source = &uptr->u.io;
1357                 size = sizeof(inti->io);
1358                 break;
1359         case KVM_S390_MCHK:
1360                 target = (void *) &inti->mchk;
1361                 source = &uptr->u.mchk;
1362                 size = sizeof(inti->mchk);
1363                 break;
1364         default:
1365                 return -EINVAL;
1366         }
1367
1368         if (copy_from_user(target, source, size))
1369                 return -EFAULT;
1370
1371         return 0;
1372 }
1373
1374 static int enqueue_floating_irq(struct kvm_device *dev,
1375                                 struct kvm_device_attr *attr)
1376 {
1377         struct kvm_s390_interrupt_info *inti = NULL;
1378         int r = 0;
1379         int len = attr->attr;
1380
1381         if (len % sizeof(struct kvm_s390_irq) != 0)
1382                 return -EINVAL;
1383         else if (len > KVM_S390_FLIC_MAX_BUFFER)
1384                 return -EINVAL;
1385
1386         while (len >= sizeof(struct kvm_s390_irq)) {
1387                 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1388                 if (!inti)
1389                         return -ENOMEM;
1390
1391                 r = copy_irq_from_user(inti, attr->addr);
1392                 if (r) {
1393                         kfree(inti);
1394                         return r;
1395                 }
1396                 r = __inject_vm(dev->kvm, inti);
1397                 if (r) {
1398                         kfree(inti);
1399                         return r;
1400                 }
1401                 len -= sizeof(struct kvm_s390_irq);
1402                 attr->addr += sizeof(struct kvm_s390_irq);
1403         }
1404
1405         return r;
1406 }
1407
1408 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1409 {
1410         if (id >= MAX_S390_IO_ADAPTERS)
1411                 return NULL;
1412         return kvm->arch.adapters[id];
1413 }
1414
1415 static int register_io_adapter(struct kvm_device *dev,
1416                                struct kvm_device_attr *attr)
1417 {
1418         struct s390_io_adapter *adapter;
1419         struct kvm_s390_io_adapter adapter_info;
1420
1421         if (copy_from_user(&adapter_info,
1422                            (void __user *)attr->addr, sizeof(adapter_info)))
1423                 return -EFAULT;
1424
1425         if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1426             (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1427                 return -EINVAL;
1428
1429         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1430         if (!adapter)
1431                 return -ENOMEM;
1432
1433         INIT_LIST_HEAD(&adapter->maps);
1434         init_rwsem(&adapter->maps_lock);
1435         atomic_set(&adapter->nr_maps, 0);
1436         adapter->id = adapter_info.id;
1437         adapter->isc = adapter_info.isc;
1438         adapter->maskable = adapter_info.maskable;
1439         adapter->masked = false;
1440         adapter->swap = adapter_info.swap;
1441         dev->kvm->arch.adapters[adapter->id] = adapter;
1442
1443         return 0;
1444 }
1445
1446 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1447 {
1448         int ret;
1449         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1450
1451         if (!adapter || !adapter->maskable)
1452                 return -EINVAL;
1453         ret = adapter->masked;
1454         adapter->masked = masked;
1455         return ret;
1456 }
1457
1458 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1459 {
1460         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1461         struct s390_map_info *map;
1462         int ret;
1463
1464         if (!adapter || !addr)
1465                 return -EINVAL;
1466
1467         map = kzalloc(sizeof(*map), GFP_KERNEL);
1468         if (!map) {
1469                 ret = -ENOMEM;
1470                 goto out;
1471         }
1472         INIT_LIST_HEAD(&map->list);
1473         map->guest_addr = addr;
1474         map->addr = gmap_translate(kvm->arch.gmap, addr);
1475         if (map->addr == -EFAULT) {
1476                 ret = -EFAULT;
1477                 goto out;
1478         }
1479         ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1480         if (ret < 0)
1481                 goto out;
1482         BUG_ON(ret != 1);
1483         down_write(&adapter->maps_lock);
1484         if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1485                 list_add_tail(&map->list, &adapter->maps);
1486                 ret = 0;
1487         } else {
1488                 put_page(map->page);
1489                 ret = -EINVAL;
1490         }
1491         up_write(&adapter->maps_lock);
1492 out:
1493         if (ret)
1494                 kfree(map);
1495         return ret;
1496 }
1497
1498 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1499 {
1500         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1501         struct s390_map_info *map, *tmp;
1502         int found = 0;
1503
1504         if (!adapter || !addr)
1505                 return -EINVAL;
1506
1507         down_write(&adapter->maps_lock);
1508         list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1509                 if (map->guest_addr == addr) {
1510                         found = 1;
1511                         atomic_dec(&adapter->nr_maps);
1512                         list_del(&map->list);
1513                         put_page(map->page);
1514                         kfree(map);
1515                         break;
1516                 }
1517         }
1518         up_write(&adapter->maps_lock);
1519
1520         return found ? 0 : -EINVAL;
1521 }
1522
1523 void kvm_s390_destroy_adapters(struct kvm *kvm)
1524 {
1525         int i;
1526         struct s390_map_info *map, *tmp;
1527
1528         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1529                 if (!kvm->arch.adapters[i])
1530                         continue;
1531                 list_for_each_entry_safe(map, tmp,
1532                                          &kvm->arch.adapters[i]->maps, list) {
1533                         list_del(&map->list);
1534                         put_page(map->page);
1535                         kfree(map);
1536                 }
1537                 kfree(kvm->arch.adapters[i]);
1538         }
1539 }
1540
1541 static int modify_io_adapter(struct kvm_device *dev,
1542                              struct kvm_device_attr *attr)
1543 {
1544         struct kvm_s390_io_adapter_req req;
1545         struct s390_io_adapter *adapter;
1546         int ret;
1547
1548         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1549                 return -EFAULT;
1550
1551         adapter = get_io_adapter(dev->kvm, req.id);
1552         if (!adapter)
1553                 return -EINVAL;
1554         switch (req.type) {
1555         case KVM_S390_IO_ADAPTER_MASK:
1556                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1557                 if (ret > 0)
1558                         ret = 0;
1559                 break;
1560         case KVM_S390_IO_ADAPTER_MAP:
1561                 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1562                 break;
1563         case KVM_S390_IO_ADAPTER_UNMAP:
1564                 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1565                 break;
1566         default:
1567                 ret = -EINVAL;
1568         }
1569
1570         return ret;
1571 }
1572
1573 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1574 {
1575         int r = 0;
1576         unsigned int i;
1577         struct kvm_vcpu *vcpu;
1578
1579         switch (attr->group) {
1580         case KVM_DEV_FLIC_ENQUEUE:
1581                 r = enqueue_floating_irq(dev, attr);
1582                 break;
1583         case KVM_DEV_FLIC_CLEAR_IRQS:
1584                 kvm_s390_clear_float_irqs(dev->kvm);
1585                 break;
1586         case KVM_DEV_FLIC_APF_ENABLE:
1587                 dev->kvm->arch.gmap->pfault_enabled = 1;
1588                 break;
1589         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1590                 dev->kvm->arch.gmap->pfault_enabled = 0;
1591                 /*
1592                  * Make sure no async faults are in transition when
1593                  * clearing the queues. So we don't need to worry
1594                  * about late coming workers.
1595                  */
1596                 synchronize_srcu(&dev->kvm->srcu);
1597                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1598                         kvm_clear_async_pf_completion_queue(vcpu);
1599                 break;
1600         case KVM_DEV_FLIC_ADAPTER_REGISTER:
1601                 r = register_io_adapter(dev, attr);
1602                 break;
1603         case KVM_DEV_FLIC_ADAPTER_MODIFY:
1604                 r = modify_io_adapter(dev, attr);
1605                 break;
1606         default:
1607                 r = -EINVAL;
1608         }
1609
1610         return r;
1611 }
1612
1613 static int flic_create(struct kvm_device *dev, u32 type)
1614 {
1615         if (!dev)
1616                 return -EINVAL;
1617         if (dev->kvm->arch.flic)
1618                 return -EINVAL;
1619         dev->kvm->arch.flic = dev;
1620         return 0;
1621 }
1622
1623 static void flic_destroy(struct kvm_device *dev)
1624 {
1625         dev->kvm->arch.flic = NULL;
1626         kfree(dev);
1627 }
1628
1629 /* s390 floating irq controller (flic) */
1630 struct kvm_device_ops kvm_flic_ops = {
1631         .name = "kvm-flic",
1632         .get_attr = flic_get_attr,
1633         .set_attr = flic_set_attr,
1634         .create = flic_create,
1635         .destroy = flic_destroy,
1636 };
1637
1638 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1639 {
1640         unsigned long bit;
1641
1642         bit = bit_nr + (addr % PAGE_SIZE) * 8;
1643
1644         return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1645 }
1646
1647 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1648                                           u64 addr)
1649 {
1650         struct s390_map_info *map;
1651
1652         if (!adapter)
1653                 return NULL;
1654
1655         list_for_each_entry(map, &adapter->maps, list) {
1656                 if (map->guest_addr == addr)
1657                         return map;
1658         }
1659         return NULL;
1660 }
1661
1662 static int adapter_indicators_set(struct kvm *kvm,
1663                                   struct s390_io_adapter *adapter,
1664                                   struct kvm_s390_adapter_int *adapter_int)
1665 {
1666         unsigned long bit;
1667         int summary_set, idx;
1668         struct s390_map_info *info;
1669         void *map;
1670
1671         info = get_map_info(adapter, adapter_int->ind_addr);
1672         if (!info)
1673                 return -1;
1674         map = page_address(info->page);
1675         bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1676         set_bit(bit, map);
1677         idx = srcu_read_lock(&kvm->srcu);
1678         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1679         set_page_dirty_lock(info->page);
1680         info = get_map_info(adapter, adapter_int->summary_addr);
1681         if (!info) {
1682                 srcu_read_unlock(&kvm->srcu, idx);
1683                 return -1;
1684         }
1685         map = page_address(info->page);
1686         bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1687                           adapter->swap);
1688         summary_set = test_and_set_bit(bit, map);
1689         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1690         set_page_dirty_lock(info->page);
1691         srcu_read_unlock(&kvm->srcu, idx);
1692         return summary_set ? 0 : 1;
1693 }
1694
1695 /*
1696  * < 0 - not injected due to error
1697  * = 0 - coalesced, summary indicator already active
1698  * > 0 - injected interrupt
1699  */
1700 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1701                            struct kvm *kvm, int irq_source_id, int level,
1702                            bool line_status)
1703 {
1704         int ret;
1705         struct s390_io_adapter *adapter;
1706
1707         /* We're only interested in the 0->1 transition. */
1708         if (!level)
1709                 return 0;
1710         adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1711         if (!adapter)
1712                 return -1;
1713         down_read(&adapter->maps_lock);
1714         ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1715         up_read(&adapter->maps_lock);
1716         if ((ret > 0) && !adapter->masked) {
1717                 struct kvm_s390_interrupt s390int = {
1718                         .type = KVM_S390_INT_IO(1, 0, 0, 0),
1719                         .parm = 0,
1720                         .parm64 = (adapter->isc << 27) | 0x80000000,
1721                 };
1722                 ret = kvm_s390_inject_vm(kvm, &s390int);
1723                 if (ret == 0)
1724                         ret = 1;
1725         }
1726         return ret;
1727 }
1728
1729 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1730                           const struct kvm_irq_routing_entry *ue)
1731 {
1732         int ret;
1733
1734         switch (ue->type) {
1735         case KVM_IRQ_ROUTING_S390_ADAPTER:
1736                 e->set = set_adapter_int;
1737                 e->adapter.summary_addr = ue->u.adapter.summary_addr;
1738                 e->adapter.ind_addr = ue->u.adapter.ind_addr;
1739                 e->adapter.summary_offset = ue->u.adapter.summary_offset;
1740                 e->adapter.ind_offset = ue->u.adapter.ind_offset;
1741                 e->adapter.adapter_id = ue->u.adapter.adapter_id;
1742                 ret = 0;
1743                 break;
1744         default:
1745                 ret = -EINVAL;
1746         }
1747
1748         return ret;
1749 }
1750
1751 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1752                 int irq_source_id, int level, bool line_status)
1753 {
1754         return -EINVAL;
1755 }