2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008,2014
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/uaccess.h>
23 #include "trace-s390.h"
25 #define IOINT_SCHID_MASK 0x0000ffff
26 #define IOINT_SSID_MASK 0x00030000
27 #define IOINT_CSSID_MASK 0x03fc0000
28 #define IOINT_AI_MASK 0x04000000
29 #define PFAULT_INIT 0x0600
30 #define PFAULT_DONE 0x0680
31 #define VIRTIO_PARAM 0x0d00
33 static int is_ioint(u64 type)
35 return ((type & 0xfffe0000u) != 0xfffe0000u);
38 int psw_extint_disabled(struct kvm_vcpu *vcpu)
40 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
43 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
45 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
48 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
50 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
53 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
55 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
56 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
57 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
62 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
64 if (psw_extint_disabled(vcpu) ||
65 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
67 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
68 /* No timer interrupts when single stepping */
73 static u64 int_word_to_isc_bits(u32 int_word)
75 u8 isc = (int_word & 0x38000000) >> 27;
77 return (0x80 >> isc) << 24;
80 static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
81 struct kvm_s390_interrupt_info *inti)
84 case KVM_S390_INT_EXTERNAL_CALL:
85 if (psw_extint_disabled(vcpu))
87 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
90 case KVM_S390_INT_EMERGENCY:
91 if (psw_extint_disabled(vcpu))
93 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
96 case KVM_S390_INT_CLOCK_COMP:
97 return ckc_interrupts_enabled(vcpu);
98 case KVM_S390_INT_CPU_TIMER:
99 if (psw_extint_disabled(vcpu))
101 if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
104 case KVM_S390_INT_SERVICE:
105 case KVM_S390_INT_PFAULT_INIT:
106 case KVM_S390_INT_PFAULT_DONE:
107 case KVM_S390_INT_VIRTIO:
108 if (psw_extint_disabled(vcpu))
110 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
113 case KVM_S390_PROGRAM_INT:
114 case KVM_S390_SIGP_STOP:
115 case KVM_S390_SIGP_SET_PREFIX:
116 case KVM_S390_RESTART:
119 if (psw_mchk_disabled(vcpu))
121 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
124 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
125 if (psw_ioint_disabled(vcpu))
127 if (vcpu->arch.sie_block->gcr[6] &
128 int_word_to_isc_bits(inti->io.io_int_word))
132 printk(KERN_WARNING "illegal interrupt type %llx\n",
139 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
141 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
142 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
145 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
147 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
148 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
151 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
153 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
154 &vcpu->arch.sie_block->cpuflags);
155 vcpu->arch.sie_block->lctl = 0x0000;
156 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
158 if (guestdbg_enabled(vcpu)) {
159 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
160 LCTL_CR10 | LCTL_CR11);
161 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
164 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
165 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
168 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
170 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
173 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
174 struct kvm_s390_interrupt_info *inti)
176 switch (inti->type) {
177 case KVM_S390_INT_EXTERNAL_CALL:
178 case KVM_S390_INT_EMERGENCY:
179 case KVM_S390_INT_SERVICE:
180 case KVM_S390_INT_PFAULT_INIT:
181 case KVM_S390_INT_PFAULT_DONE:
182 case KVM_S390_INT_VIRTIO:
183 case KVM_S390_INT_CLOCK_COMP:
184 case KVM_S390_INT_CPU_TIMER:
185 if (psw_extint_disabled(vcpu))
186 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
188 vcpu->arch.sie_block->lctl |= LCTL_CR0;
190 case KVM_S390_SIGP_STOP:
191 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
194 if (psw_mchk_disabled(vcpu))
195 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
197 vcpu->arch.sie_block->lctl |= LCTL_CR14;
199 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
200 if (psw_ioint_disabled(vcpu))
201 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
203 vcpu->arch.sie_block->lctl |= LCTL_CR6;
210 static u16 get_ilc(struct kvm_vcpu *vcpu)
212 const unsigned short table[] = { 2, 4, 4, 6 };
214 switch (vcpu->arch.sie_block->icptcode) {
220 /* last instruction only stored for these icptcodes */
221 return table[vcpu->arch.sie_block->ipa >> 14];
223 return vcpu->arch.sie_block->pgmilc;
229 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
233 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
236 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
237 (u16 *)__LC_EXT_INT_CODE);
238 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
239 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
240 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
241 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
245 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
249 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
252 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
253 (u16 __user *)__LC_EXT_INT_CODE);
254 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
255 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
256 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
257 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
261 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu,
262 struct kvm_s390_interrupt_info *inti)
264 struct kvm_s390_ext_info *ext = &inti->ext;
267 VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
268 0, ext->ext_params2);
269 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
270 KVM_S390_INT_PFAULT_INIT,
271 0, ext->ext_params2);
273 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
274 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
275 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
276 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
277 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
278 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
279 rc |= put_guest_lc(vcpu, ext->ext_params2, (u64 *) __LC_EXT_PARAMS2);
283 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu,
284 struct kvm_s390_interrupt_info *inti)
286 struct kvm_s390_mchk_info *mchk = &inti->mchk;
289 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
291 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
292 mchk->cr14, mchk->mcic);
294 rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
295 rc |= put_guest_lc(vcpu, mchk->mcic,
296 (u64 __user *) __LC_MCCK_CODE);
297 rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
298 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
299 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
300 &mchk->fixed_logout, sizeof(mchk->fixed_logout));
301 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
302 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
303 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
304 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
308 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
312 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
313 vcpu->stat.deliver_restart_signal++;
314 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
316 rc = write_guest_lc(vcpu,
317 offsetof(struct _lowcore, restart_old_psw),
318 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
319 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
320 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
324 static int __must_check __deliver_stop(struct kvm_vcpu *vcpu)
326 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
327 vcpu->stat.deliver_stop_signal++;
328 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP,
331 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
335 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu,
336 struct kvm_s390_interrupt_info *inti)
338 struct kvm_s390_prefix_info *prefix = &inti->prefix;
340 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix->address);
341 vcpu->stat.deliver_prefix_signal++;
342 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
343 KVM_S390_SIGP_SET_PREFIX,
346 kvm_s390_set_prefix(vcpu, prefix->address);
350 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu,
351 struct kvm_s390_interrupt_info *inti)
353 struct kvm_s390_emerg_info *emerg = &inti->emerg;
356 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
357 vcpu->stat.deliver_emergency_signal++;
358 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
359 inti->emerg.code, 0);
361 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
362 (u16 *)__LC_EXT_INT_CODE);
363 rc |= put_guest_lc(vcpu, emerg->code, (u16 *)__LC_EXT_CPU_ADDR);
364 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
365 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
366 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
367 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
371 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu,
372 struct kvm_s390_interrupt_info *inti)
374 struct kvm_s390_extcall_info *extcall = &inti->extcall;
377 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
378 vcpu->stat.deliver_external_call++;
379 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
380 KVM_S390_INT_EXTERNAL_CALL,
383 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
384 (u16 *)__LC_EXT_INT_CODE);
385 rc |= put_guest_lc(vcpu, extcall->code, (u16 *)__LC_EXT_CPU_ADDR);
386 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
387 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
388 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
393 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu,
394 struct kvm_s390_interrupt_info *inti)
396 struct kvm_s390_pgm_info *pgm_info = &inti->pgm;
398 u16 ilc = get_ilc(vcpu);
400 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
401 pgm_info->code, ilc);
402 vcpu->stat.deliver_program_int++;
403 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
406 switch (pgm_info->code & ~PGM_PER) {
407 case PGM_AFX_TRANSLATION:
408 case PGM_ASX_TRANSLATION:
409 case PGM_EX_TRANSLATION:
410 case PGM_LFX_TRANSLATION:
411 case PGM_LSTE_SEQUENCE:
412 case PGM_LSX_TRANSLATION:
413 case PGM_LX_TRANSLATION:
414 case PGM_PRIMARY_AUTHORITY:
415 case PGM_SECONDARY_AUTHORITY:
416 case PGM_SPACE_SWITCH:
417 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
418 (u64 *)__LC_TRANS_EXC_CODE);
420 case PGM_ALEN_TRANSLATION:
421 case PGM_ALE_SEQUENCE:
422 case PGM_ASTE_INSTANCE:
423 case PGM_ASTE_SEQUENCE:
424 case PGM_ASTE_VALIDITY:
425 case PGM_EXTENDED_AUTHORITY:
426 rc = put_guest_lc(vcpu, pgm_info->exc_access_id,
427 (u8 *)__LC_EXC_ACCESS_ID);
430 case PGM_PAGE_TRANSLATION:
431 case PGM_REGION_FIRST_TRANS:
432 case PGM_REGION_SECOND_TRANS:
433 case PGM_REGION_THIRD_TRANS:
434 case PGM_SEGMENT_TRANSLATION:
435 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
436 (u64 *)__LC_TRANS_EXC_CODE);
437 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
438 (u8 *)__LC_EXC_ACCESS_ID);
439 rc |= put_guest_lc(vcpu, pgm_info->op_access_id,
440 (u8 *)__LC_OP_ACCESS_ID);
443 rc = put_guest_lc(vcpu, pgm_info->mon_class_nr,
444 (u16 *)__LC_MON_CLASS_NR);
445 rc |= put_guest_lc(vcpu, pgm_info->mon_code,
446 (u64 *)__LC_MON_CODE);
449 rc = put_guest_lc(vcpu, pgm_info->data_exc_code,
450 (u32 *)__LC_DATA_EXC_CODE);
453 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
454 (u64 *)__LC_TRANS_EXC_CODE);
455 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
456 (u8 *)__LC_EXC_ACCESS_ID);
460 if (pgm_info->code & PGM_PER) {
461 rc |= put_guest_lc(vcpu, pgm_info->per_code,
462 (u8 *) __LC_PER_CODE);
463 rc |= put_guest_lc(vcpu, pgm_info->per_atmid,
464 (u8 *)__LC_PER_ATMID);
465 rc |= put_guest_lc(vcpu, pgm_info->per_address,
466 (u64 *) __LC_PER_ADDRESS);
467 rc |= put_guest_lc(vcpu, pgm_info->per_access_id,
468 (u8 *) __LC_PER_ACCESS_ID);
471 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
472 rc |= put_guest_lc(vcpu, pgm_info->code,
473 (u16 *)__LC_PGM_INT_CODE);
474 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
475 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
476 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
477 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
481 static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
482 struct kvm_s390_interrupt_info *inti)
486 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
487 inti->ext.ext_params);
488 vcpu->stat.deliver_service_signal++;
489 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
490 inti->ext.ext_params, 0);
492 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
493 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
494 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
495 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
496 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
497 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
498 (u32 *)__LC_EXT_PARAMS);
502 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
503 struct kvm_s390_interrupt_info *inti)
507 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
508 KVM_S390_INT_PFAULT_DONE, 0,
509 inti->ext.ext_params2);
511 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
512 rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
513 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
514 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
515 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
516 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
517 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
518 (u64 *)__LC_EXT_PARAMS2);
522 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
523 struct kvm_s390_interrupt_info *inti)
527 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
528 inti->ext.ext_params, inti->ext.ext_params2);
529 vcpu->stat.deliver_virtio_interrupt++;
530 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
531 inti->ext.ext_params,
532 inti->ext.ext_params2);
534 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
535 rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
536 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
537 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
538 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
539 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
540 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
541 (u32 *)__LC_EXT_PARAMS);
542 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
543 (u64 *)__LC_EXT_PARAMS2);
547 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
548 struct kvm_s390_interrupt_info *inti)
552 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
553 vcpu->stat.deliver_io_int++;
554 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
555 ((__u32)inti->io.subchannel_id << 16) |
556 inti->io.subchannel_nr,
557 ((__u64)inti->io.io_int_parm << 32) |
558 inti->io.io_int_word);
560 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
561 (u16 *)__LC_SUBCHANNEL_ID);
562 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
563 (u16 *)__LC_SUBCHANNEL_NR);
564 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
565 (u32 *)__LC_IO_INT_PARM);
566 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
567 (u32 *)__LC_IO_INT_WORD);
568 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
569 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
570 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
571 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
575 static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu,
576 struct kvm_s390_interrupt_info *inti)
580 switch (inti->type) {
581 case KVM_S390_INT_EMERGENCY:
582 rc = __deliver_emergency_signal(vcpu, inti);
584 case KVM_S390_INT_EXTERNAL_CALL:
585 rc = __deliver_external_call(vcpu, inti);
587 case KVM_S390_INT_CLOCK_COMP:
588 rc = __deliver_ckc(vcpu);
590 case KVM_S390_INT_CPU_TIMER:
591 rc = __deliver_cpu_timer(vcpu);
593 case KVM_S390_INT_SERVICE:
594 rc = __deliver_service(vcpu, inti);
596 case KVM_S390_INT_PFAULT_INIT:
597 rc = __deliver_pfault_init(vcpu, inti);
599 case KVM_S390_INT_PFAULT_DONE:
600 rc = __deliver_pfault_done(vcpu, inti);
602 case KVM_S390_INT_VIRTIO:
603 rc = __deliver_virtio(vcpu, inti);
605 case KVM_S390_SIGP_STOP:
606 rc = __deliver_stop(vcpu);
608 case KVM_S390_SIGP_SET_PREFIX:
609 rc = __deliver_set_prefix(vcpu, inti);
611 case KVM_S390_RESTART:
612 rc = __deliver_restart(vcpu);
614 case KVM_S390_PROGRAM_INT:
615 rc = __deliver_prog(vcpu, inti);
618 rc = __deliver_machine_check(vcpu, inti);
620 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
621 rc = __deliver_io(vcpu, inti);
630 /* Check whether SIGP interpretation facility has an external call pending */
631 int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
633 atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl;
635 if (!psw_extint_disabled(vcpu) &&
636 (vcpu->arch.sie_block->gcr[0] & 0x2000ul) &&
637 (atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
638 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
644 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
646 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
647 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
648 struct kvm_s390_interrupt_info *inti;
651 if (atomic_read(&li->active)) {
652 spin_lock(&li->lock);
653 list_for_each_entry(inti, &li->list, list)
654 if (__interrupt_is_deliverable(vcpu, inti)) {
658 spin_unlock(&li->lock);
661 if ((!rc) && atomic_read(&fi->active)) {
662 spin_lock(&fi->lock);
663 list_for_each_entry(inti, &fi->list, list)
664 if (__interrupt_is_deliverable(vcpu, inti)) {
668 spin_unlock(&fi->lock);
671 if (!rc && kvm_cpu_has_pending_timer(vcpu))
674 if (!rc && kvm_s390_si_ext_call_pending(vcpu))
680 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
682 if (!(vcpu->arch.sie_block->ckc <
683 get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
685 if (!ckc_interrupts_enabled(vcpu))
690 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
694 vcpu->stat.exit_wait_state++;
697 if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
700 if (psw_interrupts_disabled(vcpu)) {
701 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
702 return -EOPNOTSUPP; /* disabled wait */
705 __set_cpu_idle(vcpu);
706 if (!ckc_interrupts_enabled(vcpu)) {
707 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
711 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
712 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
713 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
714 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
716 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
717 kvm_vcpu_block(vcpu);
718 __unset_cpu_idle(vcpu);
719 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
721 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
725 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
727 if (waitqueue_active(&vcpu->wq)) {
729 * The vcpu gave up the cpu voluntarily, mark it as a good
732 vcpu->preempted = true;
733 wake_up_interruptible(&vcpu->wq);
734 vcpu->stat.halt_wakeup++;
738 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
740 struct kvm_vcpu *vcpu;
742 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
743 kvm_s390_vcpu_wakeup(vcpu);
745 return HRTIMER_NORESTART;
748 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
750 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
751 struct kvm_s390_interrupt_info *n, *inti = NULL;
753 spin_lock(&li->lock);
754 list_for_each_entry_safe(inti, n, &li->list, list) {
755 list_del(&inti->list);
758 atomic_set(&li->active, 0);
759 spin_unlock(&li->lock);
761 /* clear pending external calls set by sigp interpretation facility */
762 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
763 atomic_clear_mask(SIGP_CTRL_C,
764 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
767 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
769 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
770 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
771 struct kvm_s390_interrupt_info *n, *inti = NULL;
775 __reset_intercept_indicators(vcpu);
776 if (atomic_read(&li->active)) {
779 spin_lock(&li->lock);
780 list_for_each_entry_safe(inti, n, &li->list, list) {
781 if (__interrupt_is_deliverable(vcpu, inti)) {
782 list_del(&inti->list);
786 __set_intercept_indicator(vcpu, inti);
788 if (list_empty(&li->list))
789 atomic_set(&li->active, 0);
790 spin_unlock(&li->lock);
792 rc = __do_deliver_interrupt(vcpu, inti);
795 } while (!rc && deliver);
798 if (!rc && kvm_cpu_has_pending_timer(vcpu))
799 rc = __deliver_ckc(vcpu);
801 if (!rc && atomic_read(&fi->active)) {
804 spin_lock(&fi->lock);
805 list_for_each_entry_safe(inti, n, &fi->list, list) {
806 if (__interrupt_is_deliverable(vcpu, inti)) {
807 list_del(&inti->list);
812 __set_intercept_indicator(vcpu, inti);
814 if (list_empty(&fi->list))
815 atomic_set(&fi->active, 0);
816 spin_unlock(&fi->lock);
818 rc = __do_deliver_interrupt(vcpu, inti);
821 } while (!rc && deliver);
827 static int __inject_prog_irq(struct kvm_vcpu *vcpu,
828 struct kvm_s390_interrupt_info *inti)
830 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
832 list_add(&inti->list, &li->list);
833 atomic_set(&li->active, 1);
837 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
839 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
840 struct kvm_s390_interrupt_info *inti;
842 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
846 inti->type = KVM_S390_PROGRAM_INT;
847 inti->pgm.code = code;
849 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
850 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
851 spin_lock(&li->lock);
852 list_add(&inti->list, &li->list);
853 atomic_set(&li->active, 1);
854 BUG_ON(waitqueue_active(li->wq));
855 spin_unlock(&li->lock);
859 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
860 struct kvm_s390_pgm_info *pgm_info)
862 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
863 struct kvm_s390_interrupt_info *inti;
866 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
870 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
872 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
873 pgm_info->code, 0, 1);
875 inti->type = KVM_S390_PROGRAM_INT;
876 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
877 spin_lock(&li->lock);
878 rc = __inject_prog_irq(vcpu, inti);
879 BUG_ON(waitqueue_active(li->wq));
880 spin_unlock(&li->lock);
884 static int __inject_pfault_init(struct kvm_vcpu *vcpu,
885 struct kvm_s390_interrupt *s390int,
886 struct kvm_s390_interrupt_info *inti)
888 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
890 inti->ext.ext_params2 = s390int->parm64;
891 list_add_tail(&inti->list, &li->list);
892 atomic_set(&li->active, 1);
893 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
897 static int __inject_extcall(struct kvm_vcpu *vcpu,
898 struct kvm_s390_interrupt *s390int,
899 struct kvm_s390_interrupt_info *inti)
901 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
903 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
905 if (s390int->parm & 0xffff0000)
907 inti->extcall.code = s390int->parm;
908 list_add_tail(&inti->list, &li->list);
909 atomic_set(&li->active, 1);
910 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
914 static int __inject_set_prefix(struct kvm_vcpu *vcpu,
915 struct kvm_s390_interrupt *s390int,
916 struct kvm_s390_interrupt_info *inti)
918 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
920 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
922 inti->prefix.address = s390int->parm;
923 list_add_tail(&inti->list, &li->list);
924 atomic_set(&li->active, 1);
928 static int __inject_sigp_stop(struct kvm_vcpu *vcpu,
929 struct kvm_s390_interrupt *s390int,
930 struct kvm_s390_interrupt_info *inti)
932 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
934 list_add_tail(&inti->list, &li->list);
935 atomic_set(&li->active, 1);
936 li->action_bits |= ACTION_STOP_ON_STOP;
940 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
941 struct kvm_s390_interrupt *s390int,
942 struct kvm_s390_interrupt_info *inti)
944 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
946 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
947 list_add_tail(&inti->list, &li->list);
948 atomic_set(&li->active, 1);
952 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
953 struct kvm_s390_interrupt *s390int,
954 struct kvm_s390_interrupt_info *inti)
956 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
958 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm);
959 if (s390int->parm & 0xffff0000)
961 inti->emerg.code = s390int->parm;
962 list_add_tail(&inti->list, &li->list);
963 atomic_set(&li->active, 1);
964 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
968 static int __inject_mchk(struct kvm_vcpu *vcpu,
969 struct kvm_s390_interrupt *s390int,
970 struct kvm_s390_interrupt_info *inti)
972 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
974 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
976 inti->mchk.mcic = s390int->parm64;
977 list_add_tail(&inti->list, &li->list);
978 atomic_set(&li->active, 1);
982 static int __inject_ckc(struct kvm_vcpu *vcpu,
983 struct kvm_s390_interrupt *s390int,
984 struct kvm_s390_interrupt_info *inti)
986 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
988 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
989 list_add_tail(&inti->list, &li->list);
990 atomic_set(&li->active, 1);
991 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
995 static int __inject_cpu_timer(struct kvm_vcpu *vcpu,
996 struct kvm_s390_interrupt *s390int,
997 struct kvm_s390_interrupt_info *inti)
999 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1001 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
1002 list_add_tail(&inti->list, &li->list);
1003 atomic_set(&li->active, 1);
1004 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1008 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1011 struct kvm_s390_float_interrupt *fi;
1012 struct kvm_s390_interrupt_info *inti, *iter;
1014 if ((!schid && !cr6) || (schid && cr6))
1016 mutex_lock(&kvm->lock);
1017 fi = &kvm->arch.float_int;
1018 spin_lock(&fi->lock);
1020 list_for_each_entry(iter, &fi->list, list) {
1021 if (!is_ioint(iter->type))
1024 ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
1027 if (((schid & 0x00000000ffff0000) >> 16) !=
1028 iter->io.subchannel_id)
1030 if ((schid & 0x000000000000ffff) !=
1031 iter->io.subchannel_nr)
1038 list_del_init(&inti->list);
1041 if (list_empty(&fi->list))
1042 atomic_set(&fi->active, 0);
1043 spin_unlock(&fi->lock);
1044 mutex_unlock(&kvm->lock);
1048 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1050 struct kvm_s390_local_interrupt *li;
1051 struct kvm_s390_float_interrupt *fi;
1052 struct kvm_s390_interrupt_info *iter;
1053 struct kvm_vcpu *dst_vcpu = NULL;
1057 mutex_lock(&kvm->lock);
1058 fi = &kvm->arch.float_int;
1059 spin_lock(&fi->lock);
1060 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
1065 if (!is_ioint(inti->type)) {
1066 list_add_tail(&inti->list, &fi->list);
1068 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
1070 /* Keep I/O interrupts sorted in isc order. */
1071 list_for_each_entry(iter, &fi->list, list) {
1072 if (!is_ioint(iter->type))
1074 if (int_word_to_isc_bits(iter->io.io_int_word)
1079 list_add_tail(&inti->list, &iter->list);
1081 atomic_set(&fi->active, 1);
1082 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
1083 if (sigcpu == KVM_MAX_VCPUS) {
1085 sigcpu = fi->next_rr_cpu++;
1086 if (sigcpu == KVM_MAX_VCPUS)
1087 sigcpu = fi->next_rr_cpu = 0;
1088 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
1090 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1091 li = &dst_vcpu->arch.local_int;
1092 spin_lock(&li->lock);
1093 switch (inti->type) {
1095 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1097 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1098 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1101 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1104 spin_unlock(&li->lock);
1105 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
1107 spin_unlock(&fi->lock);
1108 mutex_unlock(&kvm->lock);
1112 int kvm_s390_inject_vm(struct kvm *kvm,
1113 struct kvm_s390_interrupt *s390int)
1115 struct kvm_s390_interrupt_info *inti;
1117 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1121 inti->type = s390int->type;
1122 switch (inti->type) {
1123 case KVM_S390_INT_VIRTIO:
1124 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1125 s390int->parm, s390int->parm64);
1126 inti->ext.ext_params = s390int->parm;
1127 inti->ext.ext_params2 = s390int->parm64;
1129 case KVM_S390_INT_SERVICE:
1130 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
1131 inti->ext.ext_params = s390int->parm;
1133 case KVM_S390_INT_PFAULT_DONE:
1134 inti->type = s390int->type;
1135 inti->ext.ext_params2 = s390int->parm64;
1138 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
1140 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1141 inti->mchk.mcic = s390int->parm64;
1143 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1144 if (inti->type & IOINT_AI_MASK)
1145 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1147 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1148 s390int->type & IOINT_CSSID_MASK,
1149 s390int->type & IOINT_SSID_MASK,
1150 s390int->type & IOINT_SCHID_MASK);
1151 inti->io.subchannel_id = s390int->parm >> 16;
1152 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1153 inti->io.io_int_parm = s390int->parm64 >> 32;
1154 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1160 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1163 return __inject_vm(kvm, inti);
1166 void kvm_s390_reinject_io_int(struct kvm *kvm,
1167 struct kvm_s390_interrupt_info *inti)
1169 __inject_vm(kvm, inti);
1172 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
1173 struct kvm_s390_interrupt *s390int)
1175 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1176 struct kvm_s390_interrupt_info *inti;
1179 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1183 inti->type = s390int->type;
1185 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type,
1186 s390int->parm, 0, 2);
1187 spin_lock(&li->lock);
1188 switch (inti->type) {
1189 case KVM_S390_PROGRAM_INT:
1190 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
1192 inti->pgm.code = s390int->parm;
1193 if (s390int->parm & 0xffff0000)
1196 rc = __inject_prog_irq(vcpu, inti);
1198 case KVM_S390_SIGP_SET_PREFIX:
1199 rc = __inject_set_prefix(vcpu, s390int, inti);
1201 case KVM_S390_SIGP_STOP:
1202 rc = __inject_sigp_stop(vcpu, s390int, inti);
1204 case KVM_S390_RESTART:
1205 rc = __inject_sigp_restart(vcpu, s390int, inti);
1207 case KVM_S390_INT_CLOCK_COMP:
1208 rc = __inject_ckc(vcpu, s390int, inti);
1210 case KVM_S390_INT_CPU_TIMER:
1211 rc = __inject_cpu_timer(vcpu, s390int, inti);
1213 case KVM_S390_INT_EXTERNAL_CALL:
1214 rc = __inject_extcall(vcpu, s390int, inti);
1216 case KVM_S390_INT_EMERGENCY:
1217 rc = __inject_sigp_emergency(vcpu, s390int, inti);
1220 rc = __inject_mchk(vcpu, s390int, inti);
1222 case KVM_S390_INT_PFAULT_INIT:
1223 rc = __inject_pfault_init(vcpu, s390int, inti);
1225 case KVM_S390_INT_VIRTIO:
1226 case KVM_S390_INT_SERVICE:
1227 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1231 spin_unlock(&li->lock);
1233 kvm_s390_vcpu_wakeup(vcpu);
1239 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1241 struct kvm_s390_float_interrupt *fi;
1242 struct kvm_s390_interrupt_info *n, *inti = NULL;
1244 mutex_lock(&kvm->lock);
1245 fi = &kvm->arch.float_int;
1246 spin_lock(&fi->lock);
1247 list_for_each_entry_safe(inti, n, &fi->list, list) {
1248 list_del(&inti->list);
1252 atomic_set(&fi->active, 0);
1253 spin_unlock(&fi->lock);
1254 mutex_unlock(&kvm->lock);
1257 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
1260 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1261 struct kvm_s390_irq irq = {0};
1263 irq.type = inti->type;
1264 switch (inti->type) {
1265 case KVM_S390_INT_PFAULT_INIT:
1266 case KVM_S390_INT_PFAULT_DONE:
1267 case KVM_S390_INT_VIRTIO:
1268 case KVM_S390_INT_SERVICE:
1269 irq.u.ext = inti->ext;
1271 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1272 irq.u.io = inti->io;
1275 irq.u.mchk = inti->mchk;
1281 if (copy_to_user(uptr, &irq, sizeof(irq)))
1287 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
1289 struct kvm_s390_interrupt_info *inti;
1290 struct kvm_s390_float_interrupt *fi;
1294 mutex_lock(&kvm->lock);
1295 fi = &kvm->arch.float_int;
1296 spin_lock(&fi->lock);
1298 list_for_each_entry(inti, &fi->list, list) {
1299 if (len < sizeof(struct kvm_s390_irq)) {
1300 /* signal userspace to try again */
1304 ret = copy_irq_to_user(inti, buf);
1307 buf += sizeof(struct kvm_s390_irq);
1308 len -= sizeof(struct kvm_s390_irq);
1312 spin_unlock(&fi->lock);
1313 mutex_unlock(&kvm->lock);
1315 return ret < 0 ? ret : n;
1318 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1322 switch (attr->group) {
1323 case KVM_DEV_FLIC_GET_ALL_IRQS:
1324 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
1334 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1337 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1338 void *target = NULL;
1339 void __user *source;
1342 if (get_user(inti->type, (u64 __user *)addr))
1345 switch (inti->type) {
1346 case KVM_S390_INT_PFAULT_INIT:
1347 case KVM_S390_INT_PFAULT_DONE:
1348 case KVM_S390_INT_VIRTIO:
1349 case KVM_S390_INT_SERVICE:
1350 target = (void *) &inti->ext;
1351 source = &uptr->u.ext;
1352 size = sizeof(inti->ext);
1354 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1355 target = (void *) &inti->io;
1356 source = &uptr->u.io;
1357 size = sizeof(inti->io);
1360 target = (void *) &inti->mchk;
1361 source = &uptr->u.mchk;
1362 size = sizeof(inti->mchk);
1368 if (copy_from_user(target, source, size))
1374 static int enqueue_floating_irq(struct kvm_device *dev,
1375 struct kvm_device_attr *attr)
1377 struct kvm_s390_interrupt_info *inti = NULL;
1379 int len = attr->attr;
1381 if (len % sizeof(struct kvm_s390_irq) != 0)
1383 else if (len > KVM_S390_FLIC_MAX_BUFFER)
1386 while (len >= sizeof(struct kvm_s390_irq)) {
1387 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1391 r = copy_irq_from_user(inti, attr->addr);
1396 r = __inject_vm(dev->kvm, inti);
1401 len -= sizeof(struct kvm_s390_irq);
1402 attr->addr += sizeof(struct kvm_s390_irq);
1408 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1410 if (id >= MAX_S390_IO_ADAPTERS)
1412 return kvm->arch.adapters[id];
1415 static int register_io_adapter(struct kvm_device *dev,
1416 struct kvm_device_attr *attr)
1418 struct s390_io_adapter *adapter;
1419 struct kvm_s390_io_adapter adapter_info;
1421 if (copy_from_user(&adapter_info,
1422 (void __user *)attr->addr, sizeof(adapter_info)))
1425 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1426 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1429 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1433 INIT_LIST_HEAD(&adapter->maps);
1434 init_rwsem(&adapter->maps_lock);
1435 atomic_set(&adapter->nr_maps, 0);
1436 adapter->id = adapter_info.id;
1437 adapter->isc = adapter_info.isc;
1438 adapter->maskable = adapter_info.maskable;
1439 adapter->masked = false;
1440 adapter->swap = adapter_info.swap;
1441 dev->kvm->arch.adapters[adapter->id] = adapter;
1446 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1449 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1451 if (!adapter || !adapter->maskable)
1453 ret = adapter->masked;
1454 adapter->masked = masked;
1458 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1460 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1461 struct s390_map_info *map;
1464 if (!adapter || !addr)
1467 map = kzalloc(sizeof(*map), GFP_KERNEL);
1472 INIT_LIST_HEAD(&map->list);
1473 map->guest_addr = addr;
1474 map->addr = gmap_translate(kvm->arch.gmap, addr);
1475 if (map->addr == -EFAULT) {
1479 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1483 down_write(&adapter->maps_lock);
1484 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1485 list_add_tail(&map->list, &adapter->maps);
1488 put_page(map->page);
1491 up_write(&adapter->maps_lock);
1498 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1500 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1501 struct s390_map_info *map, *tmp;
1504 if (!adapter || !addr)
1507 down_write(&adapter->maps_lock);
1508 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1509 if (map->guest_addr == addr) {
1511 atomic_dec(&adapter->nr_maps);
1512 list_del(&map->list);
1513 put_page(map->page);
1518 up_write(&adapter->maps_lock);
1520 return found ? 0 : -EINVAL;
1523 void kvm_s390_destroy_adapters(struct kvm *kvm)
1526 struct s390_map_info *map, *tmp;
1528 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1529 if (!kvm->arch.adapters[i])
1531 list_for_each_entry_safe(map, tmp,
1532 &kvm->arch.adapters[i]->maps, list) {
1533 list_del(&map->list);
1534 put_page(map->page);
1537 kfree(kvm->arch.adapters[i]);
1541 static int modify_io_adapter(struct kvm_device *dev,
1542 struct kvm_device_attr *attr)
1544 struct kvm_s390_io_adapter_req req;
1545 struct s390_io_adapter *adapter;
1548 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1551 adapter = get_io_adapter(dev->kvm, req.id);
1555 case KVM_S390_IO_ADAPTER_MASK:
1556 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1560 case KVM_S390_IO_ADAPTER_MAP:
1561 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1563 case KVM_S390_IO_ADAPTER_UNMAP:
1564 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1573 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1577 struct kvm_vcpu *vcpu;
1579 switch (attr->group) {
1580 case KVM_DEV_FLIC_ENQUEUE:
1581 r = enqueue_floating_irq(dev, attr);
1583 case KVM_DEV_FLIC_CLEAR_IRQS:
1584 kvm_s390_clear_float_irqs(dev->kvm);
1586 case KVM_DEV_FLIC_APF_ENABLE:
1587 dev->kvm->arch.gmap->pfault_enabled = 1;
1589 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1590 dev->kvm->arch.gmap->pfault_enabled = 0;
1592 * Make sure no async faults are in transition when
1593 * clearing the queues. So we don't need to worry
1594 * about late coming workers.
1596 synchronize_srcu(&dev->kvm->srcu);
1597 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1598 kvm_clear_async_pf_completion_queue(vcpu);
1600 case KVM_DEV_FLIC_ADAPTER_REGISTER:
1601 r = register_io_adapter(dev, attr);
1603 case KVM_DEV_FLIC_ADAPTER_MODIFY:
1604 r = modify_io_adapter(dev, attr);
1613 static int flic_create(struct kvm_device *dev, u32 type)
1617 if (dev->kvm->arch.flic)
1619 dev->kvm->arch.flic = dev;
1623 static void flic_destroy(struct kvm_device *dev)
1625 dev->kvm->arch.flic = NULL;
1629 /* s390 floating irq controller (flic) */
1630 struct kvm_device_ops kvm_flic_ops = {
1632 .get_attr = flic_get_attr,
1633 .set_attr = flic_set_attr,
1634 .create = flic_create,
1635 .destroy = flic_destroy,
1638 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1642 bit = bit_nr + (addr % PAGE_SIZE) * 8;
1644 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1647 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1650 struct s390_map_info *map;
1655 list_for_each_entry(map, &adapter->maps, list) {
1656 if (map->guest_addr == addr)
1662 static int adapter_indicators_set(struct kvm *kvm,
1663 struct s390_io_adapter *adapter,
1664 struct kvm_s390_adapter_int *adapter_int)
1667 int summary_set, idx;
1668 struct s390_map_info *info;
1671 info = get_map_info(adapter, adapter_int->ind_addr);
1674 map = page_address(info->page);
1675 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1677 idx = srcu_read_lock(&kvm->srcu);
1678 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1679 set_page_dirty_lock(info->page);
1680 info = get_map_info(adapter, adapter_int->summary_addr);
1682 srcu_read_unlock(&kvm->srcu, idx);
1685 map = page_address(info->page);
1686 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1688 summary_set = test_and_set_bit(bit, map);
1689 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1690 set_page_dirty_lock(info->page);
1691 srcu_read_unlock(&kvm->srcu, idx);
1692 return summary_set ? 0 : 1;
1696 * < 0 - not injected due to error
1697 * = 0 - coalesced, summary indicator already active
1698 * > 0 - injected interrupt
1700 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1701 struct kvm *kvm, int irq_source_id, int level,
1705 struct s390_io_adapter *adapter;
1707 /* We're only interested in the 0->1 transition. */
1710 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1713 down_read(&adapter->maps_lock);
1714 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1715 up_read(&adapter->maps_lock);
1716 if ((ret > 0) && !adapter->masked) {
1717 struct kvm_s390_interrupt s390int = {
1718 .type = KVM_S390_INT_IO(1, 0, 0, 0),
1720 .parm64 = (adapter->isc << 27) | 0x80000000,
1722 ret = kvm_s390_inject_vm(kvm, &s390int);
1729 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1730 const struct kvm_irq_routing_entry *ue)
1735 case KVM_IRQ_ROUTING_S390_ADAPTER:
1736 e->set = set_adapter_int;
1737 e->adapter.summary_addr = ue->u.adapter.summary_addr;
1738 e->adapter.ind_addr = ue->u.adapter.ind_addr;
1739 e->adapter.summary_offset = ue->u.adapter.summary_offset;
1740 e->adapter.ind_offset = ue->u.adapter.ind_offset;
1741 e->adapter.adapter_id = ue->u.adapter.adapter_id;
1751 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1752 int irq_source_id, int level, bool line_status)