2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008,2014
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/uaccess.h>
24 #include "trace-s390.h"
26 #define IOINT_SCHID_MASK 0x0000ffff
27 #define IOINT_SSID_MASK 0x00030000
28 #define IOINT_CSSID_MASK 0x03fc0000
29 #define IOINT_AI_MASK 0x04000000
30 #define PFAULT_INIT 0x0600
31 #define PFAULT_DONE 0x0680
32 #define VIRTIO_PARAM 0x0d00
34 static int is_ioint(u64 type)
36 return ((type & 0xfffe0000u) != 0xfffe0000u);
39 int psw_extint_disabled(struct kvm_vcpu *vcpu)
41 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
44 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
46 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
49 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
51 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
54 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
56 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
57 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
58 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
63 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
65 if (psw_extint_disabled(vcpu) ||
66 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
68 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
69 /* No timer interrupts when single stepping */
74 static u64 int_word_to_isc_bits(u32 int_word)
76 u8 isc = (int_word & 0x38000000) >> 27;
78 return (0x80 >> isc) << 24;
81 static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
82 struct kvm_s390_interrupt_info *inti)
85 case KVM_S390_INT_EXTERNAL_CALL:
86 if (psw_extint_disabled(vcpu))
88 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
91 case KVM_S390_INT_EMERGENCY:
92 if (psw_extint_disabled(vcpu))
94 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
97 case KVM_S390_INT_CLOCK_COMP:
98 return ckc_interrupts_enabled(vcpu);
99 case KVM_S390_INT_CPU_TIMER:
100 if (psw_extint_disabled(vcpu))
102 if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
105 case KVM_S390_INT_SERVICE:
106 case KVM_S390_INT_PFAULT_INIT:
107 case KVM_S390_INT_PFAULT_DONE:
108 case KVM_S390_INT_VIRTIO:
109 if (psw_extint_disabled(vcpu))
111 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
114 case KVM_S390_PROGRAM_INT:
115 case KVM_S390_SIGP_STOP:
116 case KVM_S390_SIGP_SET_PREFIX:
117 case KVM_S390_RESTART:
120 if (psw_mchk_disabled(vcpu))
122 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
125 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
126 if (psw_ioint_disabled(vcpu))
128 if (vcpu->arch.sie_block->gcr[6] &
129 int_word_to_isc_bits(inti->io.io_int_word))
133 printk(KERN_WARNING "illegal interrupt type %llx\n",
140 static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
142 return vcpu->arch.local_int.pending_irqs;
145 static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
147 unsigned long active_mask = pending_local_irqs(vcpu);
149 if (psw_extint_disabled(vcpu))
150 active_mask &= ~IRQ_PEND_EXT_MASK;
151 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
152 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
153 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
154 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
155 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
156 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
157 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
158 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
159 if (psw_mchk_disabled(vcpu))
160 active_mask &= ~IRQ_PEND_MCHK_MASK;
165 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
167 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
168 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
171 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
173 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
174 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
177 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
179 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
180 &vcpu->arch.sie_block->cpuflags);
181 vcpu->arch.sie_block->lctl = 0x0000;
182 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
184 if (guestdbg_enabled(vcpu)) {
185 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
186 LCTL_CR10 | LCTL_CR11);
187 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
190 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
191 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
194 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
196 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
199 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
201 if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
203 if (psw_extint_disabled(vcpu))
204 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
206 vcpu->arch.sie_block->lctl |= LCTL_CR0;
209 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
211 if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
213 if (psw_mchk_disabled(vcpu))
214 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
216 vcpu->arch.sie_block->lctl |= LCTL_CR14;
219 /* Set interception request for non-deliverable local interrupts */
220 static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
222 set_intercept_indicators_ext(vcpu);
223 set_intercept_indicators_mchk(vcpu);
226 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
227 struct kvm_s390_interrupt_info *inti)
229 switch (inti->type) {
230 case KVM_S390_INT_SERVICE:
231 case KVM_S390_INT_PFAULT_DONE:
232 case KVM_S390_INT_VIRTIO:
233 if (psw_extint_disabled(vcpu))
234 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
236 vcpu->arch.sie_block->lctl |= LCTL_CR0;
239 if (psw_mchk_disabled(vcpu))
240 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
242 vcpu->arch.sie_block->lctl |= LCTL_CR14;
244 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
245 if (psw_ioint_disabled(vcpu))
246 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
248 vcpu->arch.sie_block->lctl |= LCTL_CR6;
255 static u16 get_ilc(struct kvm_vcpu *vcpu)
257 const unsigned short table[] = { 2, 4, 4, 6 };
259 switch (vcpu->arch.sie_block->icptcode) {
265 /* last instruction only stored for these icptcodes */
266 return table[vcpu->arch.sie_block->ipa >> 14];
268 return vcpu->arch.sie_block->pgmilc;
274 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
276 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
279 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
282 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
283 (u16 *)__LC_EXT_INT_CODE);
284 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
285 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
286 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
287 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
288 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
292 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
294 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
297 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
300 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
301 (u16 __user *)__LC_EXT_INT_CODE);
302 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
303 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
304 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
305 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
306 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
310 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
312 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
313 struct kvm_s390_ext_info ext;
316 spin_lock(&li->lock);
318 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
319 li->irq.ext.ext_params2 = 0;
320 spin_unlock(&li->lock);
322 VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
324 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
325 KVM_S390_INT_PFAULT_INIT,
328 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
329 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
330 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
331 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
332 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
333 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
334 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
338 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
340 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
341 struct kvm_s390_mchk_info mchk;
344 spin_lock(&li->lock);
347 * If there was an exigent machine check pending, then any repressible
348 * machine checks that might have been pending are indicated along
349 * with it, so always clear both bits
351 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
352 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
353 memset(&li->irq.mchk, 0, sizeof(mchk));
354 spin_unlock(&li->lock);
356 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
358 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
359 mchk.cr14, mchk.mcic);
361 rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
362 rc |= put_guest_lc(vcpu, mchk.mcic,
363 (u64 __user *) __LC_MCCK_CODE);
364 rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
365 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
366 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
367 &mchk.fixed_logout, sizeof(mchk.fixed_logout));
368 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
369 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
370 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
371 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
375 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
377 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
380 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
381 vcpu->stat.deliver_restart_signal++;
382 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
384 rc = write_guest_lc(vcpu,
385 offsetof(struct _lowcore, restart_old_psw),
386 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
387 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
388 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
389 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
393 static int __must_check __deliver_stop(struct kvm_vcpu *vcpu)
395 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
396 vcpu->stat.deliver_stop_signal++;
397 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_SIGP_STOP,
400 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
401 clear_bit(IRQ_PEND_SIGP_STOP, &vcpu->arch.local_int.pending_irqs);
405 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
407 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
408 struct kvm_s390_prefix_info prefix;
410 spin_lock(&li->lock);
411 prefix = li->irq.prefix;
412 li->irq.prefix.address = 0;
413 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
414 spin_unlock(&li->lock);
416 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
417 vcpu->stat.deliver_prefix_signal++;
418 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
419 KVM_S390_SIGP_SET_PREFIX,
422 kvm_s390_set_prefix(vcpu, prefix.address);
426 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
428 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
432 spin_lock(&li->lock);
433 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
434 clear_bit(cpu_addr, li->sigp_emerg_pending);
435 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
436 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
437 spin_unlock(&li->lock);
439 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
440 vcpu->stat.deliver_emergency_signal++;
441 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
444 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
445 (u16 *)__LC_EXT_INT_CODE);
446 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
447 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
448 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
449 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
450 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
454 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
456 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
457 struct kvm_s390_extcall_info extcall;
460 spin_lock(&li->lock);
461 extcall = li->irq.extcall;
462 li->irq.extcall.code = 0;
463 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
464 spin_unlock(&li->lock);
466 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
467 vcpu->stat.deliver_external_call++;
468 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
469 KVM_S390_INT_EXTERNAL_CALL,
472 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
473 (u16 *)__LC_EXT_INT_CODE);
474 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
475 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
476 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
477 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
482 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
484 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
485 struct kvm_s390_pgm_info pgm_info;
487 u16 ilc = get_ilc(vcpu);
489 spin_lock(&li->lock);
490 pgm_info = li->irq.pgm;
491 clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
492 memset(&li->irq.pgm, 0, sizeof(pgm_info));
493 spin_unlock(&li->lock);
495 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
497 vcpu->stat.deliver_program_int++;
498 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
501 switch (pgm_info.code & ~PGM_PER) {
502 case PGM_AFX_TRANSLATION:
503 case PGM_ASX_TRANSLATION:
504 case PGM_EX_TRANSLATION:
505 case PGM_LFX_TRANSLATION:
506 case PGM_LSTE_SEQUENCE:
507 case PGM_LSX_TRANSLATION:
508 case PGM_LX_TRANSLATION:
509 case PGM_PRIMARY_AUTHORITY:
510 case PGM_SECONDARY_AUTHORITY:
511 case PGM_SPACE_SWITCH:
512 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
513 (u64 *)__LC_TRANS_EXC_CODE);
515 case PGM_ALEN_TRANSLATION:
516 case PGM_ALE_SEQUENCE:
517 case PGM_ASTE_INSTANCE:
518 case PGM_ASTE_SEQUENCE:
519 case PGM_ASTE_VALIDITY:
520 case PGM_EXTENDED_AUTHORITY:
521 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
522 (u8 *)__LC_EXC_ACCESS_ID);
525 case PGM_PAGE_TRANSLATION:
526 case PGM_REGION_FIRST_TRANS:
527 case PGM_REGION_SECOND_TRANS:
528 case PGM_REGION_THIRD_TRANS:
529 case PGM_SEGMENT_TRANSLATION:
530 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
531 (u64 *)__LC_TRANS_EXC_CODE);
532 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
533 (u8 *)__LC_EXC_ACCESS_ID);
534 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
535 (u8 *)__LC_OP_ACCESS_ID);
538 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
539 (u16 *)__LC_MON_CLASS_NR);
540 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
541 (u64 *)__LC_MON_CODE);
544 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
545 (u32 *)__LC_DATA_EXC_CODE);
548 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
549 (u64 *)__LC_TRANS_EXC_CODE);
550 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
551 (u8 *)__LC_EXC_ACCESS_ID);
555 if (pgm_info.code & PGM_PER) {
556 rc |= put_guest_lc(vcpu, pgm_info.per_code,
557 (u8 *) __LC_PER_CODE);
558 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
559 (u8 *)__LC_PER_ATMID);
560 rc |= put_guest_lc(vcpu, pgm_info.per_address,
561 (u64 *) __LC_PER_ADDRESS);
562 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
563 (u8 *) __LC_PER_ACCESS_ID);
566 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
567 rc |= put_guest_lc(vcpu, pgm_info.code,
568 (u16 *)__LC_PGM_INT_CODE);
569 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
570 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
571 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
572 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
576 static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
577 struct kvm_s390_interrupt_info *inti)
581 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
582 inti->ext.ext_params);
583 vcpu->stat.deliver_service_signal++;
584 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
585 inti->ext.ext_params, 0);
587 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
588 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
589 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
590 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
591 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
592 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
593 (u32 *)__LC_EXT_PARAMS);
597 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
598 struct kvm_s390_interrupt_info *inti)
602 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
603 KVM_S390_INT_PFAULT_DONE, 0,
604 inti->ext.ext_params2);
606 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
607 rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
608 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
609 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
610 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
611 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
612 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
613 (u64 *)__LC_EXT_PARAMS2);
617 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
618 struct kvm_s390_interrupt_info *inti)
622 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
623 inti->ext.ext_params, inti->ext.ext_params2);
624 vcpu->stat.deliver_virtio_interrupt++;
625 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
626 inti->ext.ext_params,
627 inti->ext.ext_params2);
629 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
630 rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
631 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
632 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
633 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
634 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
635 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
636 (u32 *)__LC_EXT_PARAMS);
637 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
638 (u64 *)__LC_EXT_PARAMS2);
642 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
643 struct kvm_s390_interrupt_info *inti)
647 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
648 vcpu->stat.deliver_io_int++;
649 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
650 ((__u32)inti->io.subchannel_id << 16) |
651 inti->io.subchannel_nr,
652 ((__u64)inti->io.io_int_parm << 32) |
653 inti->io.io_int_word);
655 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
656 (u16 *)__LC_SUBCHANNEL_ID);
657 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
658 (u16 *)__LC_SUBCHANNEL_NR);
659 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
660 (u32 *)__LC_IO_INT_PARM);
661 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
662 (u32 *)__LC_IO_INT_WORD);
663 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
664 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
665 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
666 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
670 static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
671 struct kvm_s390_interrupt_info *inti)
673 struct kvm_s390_mchk_info *mchk = &inti->mchk;
676 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
678 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
679 mchk->cr14, mchk->mcic);
681 rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
682 rc |= put_guest_lc(vcpu, mchk->mcic,
683 (u64 __user *) __LC_MCCK_CODE);
684 rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
685 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
686 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
687 &mchk->fixed_logout, sizeof(mchk->fixed_logout));
688 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
689 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
690 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
691 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
695 typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
697 static const deliver_irq_t deliver_irq_funcs[] = {
698 [IRQ_PEND_MCHK_EX] = __deliver_machine_check,
699 [IRQ_PEND_PROG] = __deliver_prog,
700 [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
701 [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
702 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
703 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
704 [IRQ_PEND_RESTART] = __deliver_restart,
705 [IRQ_PEND_SIGP_STOP] = __deliver_stop,
706 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
707 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
710 static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
711 struct kvm_s390_interrupt_info *inti)
715 switch (inti->type) {
716 case KVM_S390_INT_SERVICE:
717 rc = __deliver_service(vcpu, inti);
719 case KVM_S390_INT_PFAULT_DONE:
720 rc = __deliver_pfault_done(vcpu, inti);
722 case KVM_S390_INT_VIRTIO:
723 rc = __deliver_virtio(vcpu, inti);
726 rc = __deliver_mchk_floating(vcpu, inti);
728 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
729 rc = __deliver_io(vcpu, inti);
738 /* Check whether SIGP interpretation facility has an external call pending */
739 int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
741 atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl;
743 if (!psw_extint_disabled(vcpu) &&
744 (vcpu->arch.sie_block->gcr[0] & 0x2000ul) &&
745 (atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
746 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
752 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
754 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
755 struct kvm_s390_interrupt_info *inti;
758 rc = !!deliverable_local_irqs(vcpu);
760 if ((!rc) && atomic_read(&fi->active)) {
761 spin_lock(&fi->lock);
762 list_for_each_entry(inti, &fi->list, list)
763 if (__interrupt_is_deliverable(vcpu, inti)) {
767 spin_unlock(&fi->lock);
770 if (!rc && kvm_cpu_has_pending_timer(vcpu))
773 if (!rc && kvm_s390_si_ext_call_pending(vcpu))
779 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
781 if (!(vcpu->arch.sie_block->ckc <
782 get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
784 if (!ckc_interrupts_enabled(vcpu))
789 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
793 vcpu->stat.exit_wait_state++;
796 if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
799 if (psw_interrupts_disabled(vcpu)) {
800 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
801 return -EOPNOTSUPP; /* disabled wait */
804 __set_cpu_idle(vcpu);
805 if (!ckc_interrupts_enabled(vcpu)) {
806 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
810 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
811 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
812 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
813 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
815 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
816 kvm_vcpu_block(vcpu);
817 __unset_cpu_idle(vcpu);
818 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
820 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
824 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
826 if (waitqueue_active(&vcpu->wq)) {
828 * The vcpu gave up the cpu voluntarily, mark it as a good
831 vcpu->preempted = true;
832 wake_up_interruptible(&vcpu->wq);
833 vcpu->stat.halt_wakeup++;
837 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
839 struct kvm_vcpu *vcpu;
841 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
842 kvm_s390_vcpu_wakeup(vcpu);
844 return HRTIMER_NORESTART;
847 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
849 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
851 spin_lock(&li->lock);
852 li->pending_irqs = 0;
853 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
854 memset(&li->irq, 0, sizeof(li->irq));
855 spin_unlock(&li->lock);
857 /* clear pending external calls set by sigp interpretation facility */
858 atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
859 atomic_clear_mask(SIGP_CTRL_C,
860 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
863 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
865 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
866 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
867 struct kvm_s390_interrupt_info *n, *inti = NULL;
871 unsigned long irq_type;
872 unsigned long deliverable_irqs;
874 __reset_intercept_indicators(vcpu);
876 /* pending ckc conditions might have been invalidated */
877 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
878 if (kvm_cpu_has_pending_timer(vcpu))
879 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
882 deliverable_irqs = deliverable_local_irqs(vcpu);
883 /* bits are in the order of interrupt priority */
884 irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
885 if (irq_type == IRQ_PEND_COUNT)
887 func = deliver_irq_funcs[irq_type];
889 WARN_ON_ONCE(func == NULL);
890 clear_bit(irq_type, &li->pending_irqs);
894 } while (!rc && irq_type != IRQ_PEND_COUNT);
896 set_intercept_indicators_local(vcpu);
898 if (!rc && atomic_read(&fi->active)) {
901 spin_lock(&fi->lock);
902 list_for_each_entry_safe(inti, n, &fi->list, list) {
903 if (__interrupt_is_deliverable(vcpu, inti)) {
904 list_del(&inti->list);
909 __set_intercept_indicator(vcpu, inti);
911 if (list_empty(&fi->list))
912 atomic_set(&fi->active, 0);
913 spin_unlock(&fi->lock);
915 rc = __deliver_floating_interrupt(vcpu, inti);
918 } while (!rc && deliver);
924 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
926 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
928 li->irq.pgm = irq->u.pgm;
929 __set_bit(IRQ_PEND_PROG, &li->pending_irqs);
933 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
935 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
936 struct kvm_s390_irq irq;
938 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
939 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
941 spin_lock(&li->lock);
942 irq.u.pgm.code = code;
943 __inject_prog(vcpu, &irq);
944 BUG_ON(waitqueue_active(li->wq));
945 spin_unlock(&li->lock);
949 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
950 struct kvm_s390_pgm_info *pgm_info)
952 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
953 struct kvm_s390_irq irq;
956 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
958 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
959 pgm_info->code, 0, 1);
960 spin_lock(&li->lock);
961 irq.u.pgm = *pgm_info;
962 rc = __inject_prog(vcpu, &irq);
963 BUG_ON(waitqueue_active(li->wq));
964 spin_unlock(&li->lock);
968 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
970 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
972 VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx",
973 irq->u.ext.ext_params, irq->u.ext.ext_params2);
974 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
975 irq->u.ext.ext_params,
976 irq->u.ext.ext_params2, 2);
978 li->irq.ext = irq->u.ext;
979 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
980 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
984 int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
986 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
987 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
989 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
990 irq->u.extcall.code);
991 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
992 irq->u.extcall.code, 0, 2);
994 *extcall = irq->u.extcall;
995 __set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
996 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1000 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1002 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1003 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1005 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
1007 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1008 prefix->address, 0, 2);
1010 *prefix = irq->u.prefix;
1011 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1015 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1017 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1019 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
1021 li->action_bits |= ACTION_STOP_ON_STOP;
1022 set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1026 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1027 struct kvm_s390_irq *irq)
1029 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1031 VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type);
1032 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2);
1034 set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1038 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1039 struct kvm_s390_irq *irq)
1041 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1042 struct kvm_s390_emerg_info *emerg = &li->irq.emerg;
1044 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
1046 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1049 set_bit(emerg->code, li->sigp_emerg_pending);
1050 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1051 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1055 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1057 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1058 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1060 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
1062 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1066 * Because repressible machine checks can be indicated along with
1067 * exigent machine checks (PoP, Chapter 11, Interruption action)
1068 * we need to combine cr14, mcic and external damage code.
1069 * Failing storage address and the logout area should not be or'ed
1070 * together, we just indicate the last occurrence of the corresponding
1073 mchk->cr14 |= irq->u.mchk.cr14;
1074 mchk->mcic |= irq->u.mchk.mcic;
1075 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1076 mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1077 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1078 sizeof(mchk->fixed_logout));
1079 if (mchk->mcic & MCHK_EX_MASK)
1080 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1081 else if (mchk->mcic & MCHK_REP_MASK)
1082 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
1086 static int __inject_ckc(struct kvm_vcpu *vcpu)
1088 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1090 VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP);
1091 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1094 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1095 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1099 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1101 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1103 VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER);
1104 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1107 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1108 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1113 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1116 struct kvm_s390_float_interrupt *fi;
1117 struct kvm_s390_interrupt_info *inti, *iter;
1119 if ((!schid && !cr6) || (schid && cr6))
1121 mutex_lock(&kvm->lock);
1122 fi = &kvm->arch.float_int;
1123 spin_lock(&fi->lock);
1125 list_for_each_entry(iter, &fi->list, list) {
1126 if (!is_ioint(iter->type))
1129 ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
1132 if (((schid & 0x00000000ffff0000) >> 16) !=
1133 iter->io.subchannel_id)
1135 if ((schid & 0x000000000000ffff) !=
1136 iter->io.subchannel_nr)
1143 list_del_init(&inti->list);
1146 if (list_empty(&fi->list))
1147 atomic_set(&fi->active, 0);
1148 spin_unlock(&fi->lock);
1149 mutex_unlock(&kvm->lock);
1153 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1155 struct kvm_s390_local_interrupt *li;
1156 struct kvm_s390_float_interrupt *fi;
1157 struct kvm_s390_interrupt_info *iter;
1158 struct kvm_vcpu *dst_vcpu = NULL;
1162 mutex_lock(&kvm->lock);
1163 fi = &kvm->arch.float_int;
1164 spin_lock(&fi->lock);
1165 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
1170 if (!is_ioint(inti->type)) {
1171 list_add_tail(&inti->list, &fi->list);
1173 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
1175 /* Keep I/O interrupts sorted in isc order. */
1176 list_for_each_entry(iter, &fi->list, list) {
1177 if (!is_ioint(iter->type))
1179 if (int_word_to_isc_bits(iter->io.io_int_word)
1184 list_add_tail(&inti->list, &iter->list);
1186 atomic_set(&fi->active, 1);
1187 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
1188 if (sigcpu == KVM_MAX_VCPUS) {
1190 sigcpu = fi->next_rr_cpu++;
1191 if (sigcpu == KVM_MAX_VCPUS)
1192 sigcpu = fi->next_rr_cpu = 0;
1193 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
1195 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1196 li = &dst_vcpu->arch.local_int;
1197 spin_lock(&li->lock);
1198 switch (inti->type) {
1200 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1202 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1203 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1206 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1209 spin_unlock(&li->lock);
1210 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
1212 spin_unlock(&fi->lock);
1213 mutex_unlock(&kvm->lock);
1217 int kvm_s390_inject_vm(struct kvm *kvm,
1218 struct kvm_s390_interrupt *s390int)
1220 struct kvm_s390_interrupt_info *inti;
1222 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1226 inti->type = s390int->type;
1227 switch (inti->type) {
1228 case KVM_S390_INT_VIRTIO:
1229 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1230 s390int->parm, s390int->parm64);
1231 inti->ext.ext_params = s390int->parm;
1232 inti->ext.ext_params2 = s390int->parm64;
1234 case KVM_S390_INT_SERVICE:
1235 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
1236 inti->ext.ext_params = s390int->parm;
1238 case KVM_S390_INT_PFAULT_DONE:
1239 inti->type = s390int->type;
1240 inti->ext.ext_params2 = s390int->parm64;
1243 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
1245 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1246 inti->mchk.mcic = s390int->parm64;
1248 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1249 if (inti->type & IOINT_AI_MASK)
1250 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1252 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1253 s390int->type & IOINT_CSSID_MASK,
1254 s390int->type & IOINT_SSID_MASK,
1255 s390int->type & IOINT_SCHID_MASK);
1256 inti->io.subchannel_id = s390int->parm >> 16;
1257 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1258 inti->io.io_int_parm = s390int->parm64 >> 32;
1259 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1265 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1268 return __inject_vm(kvm, inti);
1271 void kvm_s390_reinject_io_int(struct kvm *kvm,
1272 struct kvm_s390_interrupt_info *inti)
1274 __inject_vm(kvm, inti);
1277 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1278 struct kvm_s390_irq *irq)
1280 irq->type = s390int->type;
1281 switch (irq->type) {
1282 case KVM_S390_PROGRAM_INT:
1283 if (s390int->parm & 0xffff0000)
1285 irq->u.pgm.code = s390int->parm;
1287 case KVM_S390_SIGP_SET_PREFIX:
1288 irq->u.prefix.address = s390int->parm;
1290 case KVM_S390_INT_EXTERNAL_CALL:
1291 if (irq->u.extcall.code & 0xffff0000)
1293 irq->u.extcall.code = s390int->parm;
1295 case KVM_S390_INT_EMERGENCY:
1296 if (irq->u.emerg.code & 0xffff0000)
1298 irq->u.emerg.code = s390int->parm;
1301 irq->u.mchk.mcic = s390int->parm64;
1307 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1309 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1312 spin_lock(&li->lock);
1313 switch (irq->type) {
1314 case KVM_S390_PROGRAM_INT:
1315 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
1317 rc = __inject_prog(vcpu, irq);
1319 case KVM_S390_SIGP_SET_PREFIX:
1320 rc = __inject_set_prefix(vcpu, irq);
1322 case KVM_S390_SIGP_STOP:
1323 rc = __inject_sigp_stop(vcpu, irq);
1325 case KVM_S390_RESTART:
1326 rc = __inject_sigp_restart(vcpu, irq);
1328 case KVM_S390_INT_CLOCK_COMP:
1329 rc = __inject_ckc(vcpu);
1331 case KVM_S390_INT_CPU_TIMER:
1332 rc = __inject_cpu_timer(vcpu);
1334 case KVM_S390_INT_EXTERNAL_CALL:
1335 rc = __inject_extcall(vcpu, irq);
1337 case KVM_S390_INT_EMERGENCY:
1338 rc = __inject_sigp_emergency(vcpu, irq);
1341 rc = __inject_mchk(vcpu, irq);
1343 case KVM_S390_INT_PFAULT_INIT:
1344 rc = __inject_pfault_init(vcpu, irq);
1346 case KVM_S390_INT_VIRTIO:
1347 case KVM_S390_INT_SERVICE:
1348 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1352 spin_unlock(&li->lock);
1354 kvm_s390_vcpu_wakeup(vcpu);
1358 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1360 struct kvm_s390_float_interrupt *fi;
1361 struct kvm_s390_interrupt_info *n, *inti = NULL;
1363 mutex_lock(&kvm->lock);
1364 fi = &kvm->arch.float_int;
1365 spin_lock(&fi->lock);
1366 list_for_each_entry_safe(inti, n, &fi->list, list) {
1367 list_del(&inti->list);
1371 atomic_set(&fi->active, 0);
1372 spin_unlock(&fi->lock);
1373 mutex_unlock(&kvm->lock);
1376 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
1379 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1380 struct kvm_s390_irq irq = {0};
1382 irq.type = inti->type;
1383 switch (inti->type) {
1384 case KVM_S390_INT_PFAULT_INIT:
1385 case KVM_S390_INT_PFAULT_DONE:
1386 case KVM_S390_INT_VIRTIO:
1387 case KVM_S390_INT_SERVICE:
1388 irq.u.ext = inti->ext;
1390 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1391 irq.u.io = inti->io;
1394 irq.u.mchk = inti->mchk;
1400 if (copy_to_user(uptr, &irq, sizeof(irq)))
1406 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
1408 struct kvm_s390_interrupt_info *inti;
1409 struct kvm_s390_float_interrupt *fi;
1413 mutex_lock(&kvm->lock);
1414 fi = &kvm->arch.float_int;
1415 spin_lock(&fi->lock);
1417 list_for_each_entry(inti, &fi->list, list) {
1418 if (len < sizeof(struct kvm_s390_irq)) {
1419 /* signal userspace to try again */
1423 ret = copy_irq_to_user(inti, buf);
1426 buf += sizeof(struct kvm_s390_irq);
1427 len -= sizeof(struct kvm_s390_irq);
1431 spin_unlock(&fi->lock);
1432 mutex_unlock(&kvm->lock);
1434 return ret < 0 ? ret : n;
1437 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1441 switch (attr->group) {
1442 case KVM_DEV_FLIC_GET_ALL_IRQS:
1443 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
1453 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1456 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1457 void *target = NULL;
1458 void __user *source;
1461 if (get_user(inti->type, (u64 __user *)addr))
1464 switch (inti->type) {
1465 case KVM_S390_INT_PFAULT_INIT:
1466 case KVM_S390_INT_PFAULT_DONE:
1467 case KVM_S390_INT_VIRTIO:
1468 case KVM_S390_INT_SERVICE:
1469 target = (void *) &inti->ext;
1470 source = &uptr->u.ext;
1471 size = sizeof(inti->ext);
1473 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1474 target = (void *) &inti->io;
1475 source = &uptr->u.io;
1476 size = sizeof(inti->io);
1479 target = (void *) &inti->mchk;
1480 source = &uptr->u.mchk;
1481 size = sizeof(inti->mchk);
1487 if (copy_from_user(target, source, size))
1493 static int enqueue_floating_irq(struct kvm_device *dev,
1494 struct kvm_device_attr *attr)
1496 struct kvm_s390_interrupt_info *inti = NULL;
1498 int len = attr->attr;
1500 if (len % sizeof(struct kvm_s390_irq) != 0)
1502 else if (len > KVM_S390_FLIC_MAX_BUFFER)
1505 while (len >= sizeof(struct kvm_s390_irq)) {
1506 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1510 r = copy_irq_from_user(inti, attr->addr);
1515 r = __inject_vm(dev->kvm, inti);
1520 len -= sizeof(struct kvm_s390_irq);
1521 attr->addr += sizeof(struct kvm_s390_irq);
1527 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1529 if (id >= MAX_S390_IO_ADAPTERS)
1531 return kvm->arch.adapters[id];
1534 static int register_io_adapter(struct kvm_device *dev,
1535 struct kvm_device_attr *attr)
1537 struct s390_io_adapter *adapter;
1538 struct kvm_s390_io_adapter adapter_info;
1540 if (copy_from_user(&adapter_info,
1541 (void __user *)attr->addr, sizeof(adapter_info)))
1544 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1545 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1548 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1552 INIT_LIST_HEAD(&adapter->maps);
1553 init_rwsem(&adapter->maps_lock);
1554 atomic_set(&adapter->nr_maps, 0);
1555 adapter->id = adapter_info.id;
1556 adapter->isc = adapter_info.isc;
1557 adapter->maskable = adapter_info.maskable;
1558 adapter->masked = false;
1559 adapter->swap = adapter_info.swap;
1560 dev->kvm->arch.adapters[adapter->id] = adapter;
1565 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1568 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1570 if (!adapter || !adapter->maskable)
1572 ret = adapter->masked;
1573 adapter->masked = masked;
1577 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1579 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1580 struct s390_map_info *map;
1583 if (!adapter || !addr)
1586 map = kzalloc(sizeof(*map), GFP_KERNEL);
1591 INIT_LIST_HEAD(&map->list);
1592 map->guest_addr = addr;
1593 map->addr = gmap_translate(kvm->arch.gmap, addr);
1594 if (map->addr == -EFAULT) {
1598 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1602 down_write(&adapter->maps_lock);
1603 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1604 list_add_tail(&map->list, &adapter->maps);
1607 put_page(map->page);
1610 up_write(&adapter->maps_lock);
1617 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1619 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1620 struct s390_map_info *map, *tmp;
1623 if (!adapter || !addr)
1626 down_write(&adapter->maps_lock);
1627 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1628 if (map->guest_addr == addr) {
1630 atomic_dec(&adapter->nr_maps);
1631 list_del(&map->list);
1632 put_page(map->page);
1637 up_write(&adapter->maps_lock);
1639 return found ? 0 : -EINVAL;
1642 void kvm_s390_destroy_adapters(struct kvm *kvm)
1645 struct s390_map_info *map, *tmp;
1647 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1648 if (!kvm->arch.adapters[i])
1650 list_for_each_entry_safe(map, tmp,
1651 &kvm->arch.adapters[i]->maps, list) {
1652 list_del(&map->list);
1653 put_page(map->page);
1656 kfree(kvm->arch.adapters[i]);
1660 static int modify_io_adapter(struct kvm_device *dev,
1661 struct kvm_device_attr *attr)
1663 struct kvm_s390_io_adapter_req req;
1664 struct s390_io_adapter *adapter;
1667 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1670 adapter = get_io_adapter(dev->kvm, req.id);
1674 case KVM_S390_IO_ADAPTER_MASK:
1675 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1679 case KVM_S390_IO_ADAPTER_MAP:
1680 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1682 case KVM_S390_IO_ADAPTER_UNMAP:
1683 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1692 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1696 struct kvm_vcpu *vcpu;
1698 switch (attr->group) {
1699 case KVM_DEV_FLIC_ENQUEUE:
1700 r = enqueue_floating_irq(dev, attr);
1702 case KVM_DEV_FLIC_CLEAR_IRQS:
1703 kvm_s390_clear_float_irqs(dev->kvm);
1705 case KVM_DEV_FLIC_APF_ENABLE:
1706 dev->kvm->arch.gmap->pfault_enabled = 1;
1708 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1709 dev->kvm->arch.gmap->pfault_enabled = 0;
1711 * Make sure no async faults are in transition when
1712 * clearing the queues. So we don't need to worry
1713 * about late coming workers.
1715 synchronize_srcu(&dev->kvm->srcu);
1716 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1717 kvm_clear_async_pf_completion_queue(vcpu);
1719 case KVM_DEV_FLIC_ADAPTER_REGISTER:
1720 r = register_io_adapter(dev, attr);
1722 case KVM_DEV_FLIC_ADAPTER_MODIFY:
1723 r = modify_io_adapter(dev, attr);
1732 static int flic_create(struct kvm_device *dev, u32 type)
1736 if (dev->kvm->arch.flic)
1738 dev->kvm->arch.flic = dev;
1742 static void flic_destroy(struct kvm_device *dev)
1744 dev->kvm->arch.flic = NULL;
1748 /* s390 floating irq controller (flic) */
1749 struct kvm_device_ops kvm_flic_ops = {
1751 .get_attr = flic_get_attr,
1752 .set_attr = flic_set_attr,
1753 .create = flic_create,
1754 .destroy = flic_destroy,
1757 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1761 bit = bit_nr + (addr % PAGE_SIZE) * 8;
1763 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1766 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1769 struct s390_map_info *map;
1774 list_for_each_entry(map, &adapter->maps, list) {
1775 if (map->guest_addr == addr)
1781 static int adapter_indicators_set(struct kvm *kvm,
1782 struct s390_io_adapter *adapter,
1783 struct kvm_s390_adapter_int *adapter_int)
1786 int summary_set, idx;
1787 struct s390_map_info *info;
1790 info = get_map_info(adapter, adapter_int->ind_addr);
1793 map = page_address(info->page);
1794 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1796 idx = srcu_read_lock(&kvm->srcu);
1797 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1798 set_page_dirty_lock(info->page);
1799 info = get_map_info(adapter, adapter_int->summary_addr);
1801 srcu_read_unlock(&kvm->srcu, idx);
1804 map = page_address(info->page);
1805 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1807 summary_set = test_and_set_bit(bit, map);
1808 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1809 set_page_dirty_lock(info->page);
1810 srcu_read_unlock(&kvm->srcu, idx);
1811 return summary_set ? 0 : 1;
1815 * < 0 - not injected due to error
1816 * = 0 - coalesced, summary indicator already active
1817 * > 0 - injected interrupt
1819 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1820 struct kvm *kvm, int irq_source_id, int level,
1824 struct s390_io_adapter *adapter;
1826 /* We're only interested in the 0->1 transition. */
1829 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1832 down_read(&adapter->maps_lock);
1833 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1834 up_read(&adapter->maps_lock);
1835 if ((ret > 0) && !adapter->masked) {
1836 struct kvm_s390_interrupt s390int = {
1837 .type = KVM_S390_INT_IO(1, 0, 0, 0),
1839 .parm64 = (adapter->isc << 27) | 0x80000000,
1841 ret = kvm_s390_inject_vm(kvm, &s390int);
1848 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1849 const struct kvm_irq_routing_entry *ue)
1854 case KVM_IRQ_ROUTING_S390_ADAPTER:
1855 e->set = set_adapter_int;
1856 e->adapter.summary_addr = ue->u.adapter.summary_addr;
1857 e->adapter.ind_addr = ue->u.adapter.ind_addr;
1858 e->adapter.summary_offset = ue->u.adapter.summary_offset;
1859 e->adapter.ind_offset = ue->u.adapter.ind_offset;
1860 e->adapter.adapter_id = ue->u.adapter.adapter_id;
1870 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1871 int irq_source_id, int level, bool line_status)