2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008,2014
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/uaccess.h>
23 #include "trace-s390.h"
25 #define IOINT_SCHID_MASK 0x0000ffff
26 #define IOINT_SSID_MASK 0x00030000
27 #define IOINT_CSSID_MASK 0x03fc0000
28 #define IOINT_AI_MASK 0x04000000
29 #define PFAULT_INIT 0x0600
31 static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu);
33 static int is_ioint(u64 type)
35 return ((type & 0xfffe0000u) != 0xfffe0000u);
38 int psw_extint_disabled(struct kvm_vcpu *vcpu)
40 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
43 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
45 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
48 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
50 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
53 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
55 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
56 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
57 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
62 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
64 if (psw_extint_disabled(vcpu) ||
65 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
67 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
68 /* No timer interrupts when single stepping */
73 static u64 int_word_to_isc_bits(u32 int_word)
75 u8 isc = (int_word & 0x38000000) >> 27;
77 return (0x80 >> isc) << 24;
80 static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
81 struct kvm_s390_interrupt_info *inti)
84 case KVM_S390_INT_EXTERNAL_CALL:
85 if (psw_extint_disabled(vcpu))
87 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
90 case KVM_S390_INT_EMERGENCY:
91 if (psw_extint_disabled(vcpu))
93 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
96 case KVM_S390_INT_CLOCK_COMP:
97 return ckc_interrupts_enabled(vcpu);
98 case KVM_S390_INT_CPU_TIMER:
99 if (psw_extint_disabled(vcpu))
101 if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
104 case KVM_S390_INT_SERVICE:
105 case KVM_S390_INT_PFAULT_INIT:
106 case KVM_S390_INT_PFAULT_DONE:
107 case KVM_S390_INT_VIRTIO:
108 if (psw_extint_disabled(vcpu))
110 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
113 case KVM_S390_PROGRAM_INT:
114 case KVM_S390_SIGP_STOP:
115 case KVM_S390_SIGP_SET_PREFIX:
116 case KVM_S390_RESTART:
119 if (psw_mchk_disabled(vcpu))
121 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
124 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
125 if (psw_ioint_disabled(vcpu))
127 if (vcpu->arch.sie_block->gcr[6] &
128 int_word_to_isc_bits(inti->io.io_int_word))
132 printk(KERN_WARNING "illegal interrupt type %llx\n",
139 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
141 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
142 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
145 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
147 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
148 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
151 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
153 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
154 &vcpu->arch.sie_block->cpuflags);
155 vcpu->arch.sie_block->lctl = 0x0000;
156 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
158 if (guestdbg_enabled(vcpu)) {
159 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
160 LCTL_CR10 | LCTL_CR11);
161 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
164 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
165 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
168 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
170 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
173 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
174 struct kvm_s390_interrupt_info *inti)
176 switch (inti->type) {
177 case KVM_S390_INT_EXTERNAL_CALL:
178 case KVM_S390_INT_EMERGENCY:
179 case KVM_S390_INT_SERVICE:
180 case KVM_S390_INT_PFAULT_INIT:
181 case KVM_S390_INT_PFAULT_DONE:
182 case KVM_S390_INT_VIRTIO:
183 case KVM_S390_INT_CLOCK_COMP:
184 case KVM_S390_INT_CPU_TIMER:
185 if (psw_extint_disabled(vcpu))
186 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
188 vcpu->arch.sie_block->lctl |= LCTL_CR0;
190 case KVM_S390_SIGP_STOP:
191 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
194 if (psw_mchk_disabled(vcpu))
195 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
197 vcpu->arch.sie_block->lctl |= LCTL_CR14;
199 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
200 if (psw_ioint_disabled(vcpu))
201 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
203 vcpu->arch.sie_block->lctl |= LCTL_CR6;
210 static u16 get_ilc(struct kvm_vcpu *vcpu)
212 const unsigned short table[] = { 2, 4, 4, 6 };
214 switch (vcpu->arch.sie_block->icptcode) {
220 /* last instruction only stored for these icptcodes */
221 return table[vcpu->arch.sie_block->ipa >> 14];
223 return vcpu->arch.sie_block->pgmilc;
229 static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
230 struct kvm_s390_pgm_info *pgm_info)
233 u16 ilc = get_ilc(vcpu);
235 switch (pgm_info->code & ~PGM_PER) {
236 case PGM_AFX_TRANSLATION:
237 case PGM_ASX_TRANSLATION:
238 case PGM_EX_TRANSLATION:
239 case PGM_LFX_TRANSLATION:
240 case PGM_LSTE_SEQUENCE:
241 case PGM_LSX_TRANSLATION:
242 case PGM_LX_TRANSLATION:
243 case PGM_PRIMARY_AUTHORITY:
244 case PGM_SECONDARY_AUTHORITY:
245 case PGM_SPACE_SWITCH:
246 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
247 (u64 *)__LC_TRANS_EXC_CODE);
249 case PGM_ALEN_TRANSLATION:
250 case PGM_ALE_SEQUENCE:
251 case PGM_ASTE_INSTANCE:
252 case PGM_ASTE_SEQUENCE:
253 case PGM_ASTE_VALIDITY:
254 case PGM_EXTENDED_AUTHORITY:
255 rc = put_guest_lc(vcpu, pgm_info->exc_access_id,
256 (u8 *)__LC_EXC_ACCESS_ID);
259 case PGM_PAGE_TRANSLATION:
260 case PGM_REGION_FIRST_TRANS:
261 case PGM_REGION_SECOND_TRANS:
262 case PGM_REGION_THIRD_TRANS:
263 case PGM_SEGMENT_TRANSLATION:
264 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
265 (u64 *)__LC_TRANS_EXC_CODE);
266 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
267 (u8 *)__LC_EXC_ACCESS_ID);
268 rc |= put_guest_lc(vcpu, pgm_info->op_access_id,
269 (u8 *)__LC_OP_ACCESS_ID);
272 rc = put_guest_lc(vcpu, pgm_info->mon_class_nr,
273 (u16 *)__LC_MON_CLASS_NR);
274 rc |= put_guest_lc(vcpu, pgm_info->mon_code,
275 (u64 *)__LC_MON_CODE);
278 rc = put_guest_lc(vcpu, pgm_info->data_exc_code,
279 (u32 *)__LC_DATA_EXC_CODE);
282 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
283 (u64 *)__LC_TRANS_EXC_CODE);
284 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
285 (u8 *)__LC_EXC_ACCESS_ID);
289 if (pgm_info->code & PGM_PER) {
290 rc |= put_guest_lc(vcpu, pgm_info->per_code,
291 (u8 *) __LC_PER_CODE);
292 rc |= put_guest_lc(vcpu, pgm_info->per_atmid,
293 (u8 *)__LC_PER_ATMID);
294 rc |= put_guest_lc(vcpu, pgm_info->per_address,
295 (u64 *) __LC_PER_ADDRESS);
296 rc |= put_guest_lc(vcpu, pgm_info->per_access_id,
297 (u8 *) __LC_PER_ACCESS_ID);
300 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
301 rc |= put_guest_lc(vcpu, pgm_info->code,
302 (u16 *)__LC_PGM_INT_CODE);
303 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
304 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
305 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
306 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
311 static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu,
312 struct kvm_s390_interrupt_info *inti)
314 const unsigned short table[] = { 2, 4, 4, 6 };
317 switch (inti->type) {
318 case KVM_S390_INT_EMERGENCY:
319 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
320 vcpu->stat.deliver_emergency_signal++;
321 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
322 inti->emerg.code, 0);
323 rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE);
324 rc |= put_guest_lc(vcpu, inti->emerg.code,
325 (u16 *)__LC_EXT_CPU_ADDR);
326 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
327 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
328 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
329 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
331 case KVM_S390_INT_EXTERNAL_CALL:
332 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
333 vcpu->stat.deliver_external_call++;
334 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
335 inti->extcall.code, 0);
336 rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE);
337 rc |= put_guest_lc(vcpu, inti->extcall.code,
338 (u16 *)__LC_EXT_CPU_ADDR);
339 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
340 &vcpu->arch.sie_block->gpsw,
342 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
343 &vcpu->arch.sie_block->gpsw,
346 case KVM_S390_INT_CLOCK_COMP:
347 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
348 inti->ext.ext_params, 0);
349 rc = deliver_ckc_interrupt(vcpu);
351 case KVM_S390_INT_CPU_TIMER:
352 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
353 inti->ext.ext_params, 0);
354 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
355 (u16 *)__LC_EXT_INT_CODE);
356 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
357 &vcpu->arch.sie_block->gpsw,
359 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
360 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
361 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
362 (u32 *)__LC_EXT_PARAMS);
364 case KVM_S390_INT_SERVICE:
365 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
366 inti->ext.ext_params);
367 vcpu->stat.deliver_service_signal++;
368 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
369 inti->ext.ext_params, 0);
370 rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE);
371 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
372 &vcpu->arch.sie_block->gpsw,
374 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
375 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
376 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
377 (u32 *)__LC_EXT_PARAMS);
379 case KVM_S390_INT_PFAULT_INIT:
380 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
381 inti->ext.ext_params2);
382 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
383 (u16 *) __LC_EXT_INT_CODE);
384 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
385 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
386 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
387 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
388 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
389 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
390 (u64 *) __LC_EXT_PARAMS2);
392 case KVM_S390_INT_PFAULT_DONE:
393 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
394 inti->ext.ext_params2);
395 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
396 rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR);
397 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
398 &vcpu->arch.sie_block->gpsw,
400 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
401 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
402 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
403 (u64 *)__LC_EXT_PARAMS2);
405 case KVM_S390_INT_VIRTIO:
406 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
407 inti->ext.ext_params, inti->ext.ext_params2);
408 vcpu->stat.deliver_virtio_interrupt++;
409 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
410 inti->ext.ext_params,
411 inti->ext.ext_params2);
412 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
413 rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR);
414 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
415 &vcpu->arch.sie_block->gpsw,
417 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
418 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
419 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
420 (u32 *)__LC_EXT_PARAMS);
421 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
422 (u64 *)__LC_EXT_PARAMS2);
424 case KVM_S390_SIGP_STOP:
425 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
426 vcpu->stat.deliver_stop_signal++;
427 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
429 __set_intercept_indicator(vcpu, inti);
432 case KVM_S390_SIGP_SET_PREFIX:
433 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
434 inti->prefix.address);
435 vcpu->stat.deliver_prefix_signal++;
436 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
437 inti->prefix.address, 0);
438 kvm_s390_set_prefix(vcpu, inti->prefix.address);
441 case KVM_S390_RESTART:
442 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
443 vcpu->stat.deliver_restart_signal++;
444 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
446 rc = write_guest_lc(vcpu,
447 offsetof(struct _lowcore, restart_old_psw),
448 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
449 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
450 &vcpu->arch.sie_block->gpsw,
453 case KVM_S390_PROGRAM_INT:
454 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
456 table[vcpu->arch.sie_block->ipa >> 14]);
457 vcpu->stat.deliver_program_int++;
458 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
460 rc = __deliver_prog_irq(vcpu, &inti->pgm);
464 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
466 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
469 rc = kvm_s390_vcpu_store_status(vcpu,
470 KVM_S390_STORE_STATUS_PREFIXED);
471 rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE);
472 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
473 &vcpu->arch.sie_block->gpsw,
475 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
476 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
479 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
481 __u32 param0 = ((__u32)inti->io.subchannel_id << 16) |
482 inti->io.subchannel_nr;
483 __u64 param1 = ((__u64)inti->io.io_int_parm << 32) |
484 inti->io.io_int_word;
485 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
486 vcpu->stat.deliver_io_int++;
487 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
489 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
490 (u16 *)__LC_SUBCHANNEL_ID);
491 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
492 (u16 *)__LC_SUBCHANNEL_NR);
493 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
494 (u32 *)__LC_IO_INT_PARM);
495 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
496 (u32 *)__LC_IO_INT_WORD);
497 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
498 &vcpu->arch.sie_block->gpsw,
500 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
501 &vcpu->arch.sie_block->gpsw,
512 static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
516 rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE);
517 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
518 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
519 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
520 &vcpu->arch.sie_block->gpsw,
525 /* Check whether SIGP interpretation facility has an external call pending */
526 int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
528 atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl;
530 if (!psw_extint_disabled(vcpu) &&
531 (vcpu->arch.sie_block->gcr[0] & 0x2000ul) &&
532 (atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
533 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
539 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
541 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
542 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
543 struct kvm_s390_interrupt_info *inti;
546 if (atomic_read(&li->active)) {
547 spin_lock(&li->lock);
548 list_for_each_entry(inti, &li->list, list)
549 if (__interrupt_is_deliverable(vcpu, inti)) {
553 spin_unlock(&li->lock);
556 if ((!rc) && atomic_read(&fi->active)) {
557 spin_lock(&fi->lock);
558 list_for_each_entry(inti, &fi->list, list)
559 if (__interrupt_is_deliverable(vcpu, inti)) {
563 spin_unlock(&fi->lock);
566 if (!rc && kvm_cpu_has_pending_timer(vcpu))
569 if (!rc && kvm_s390_si_ext_call_pending(vcpu))
575 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
577 if (!(vcpu->arch.sie_block->ckc <
578 get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
580 if (!ckc_interrupts_enabled(vcpu))
585 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
589 vcpu->stat.exit_wait_state++;
592 if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
595 if (psw_interrupts_disabled(vcpu)) {
596 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
597 return -EOPNOTSUPP; /* disabled wait */
600 __set_cpu_idle(vcpu);
601 if (!ckc_interrupts_enabled(vcpu)) {
602 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
606 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
607 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
608 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
609 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
611 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
612 kvm_vcpu_block(vcpu);
613 __unset_cpu_idle(vcpu);
614 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
616 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
620 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
622 if (waitqueue_active(&vcpu->wq)) {
624 * The vcpu gave up the cpu voluntarily, mark it as a good
627 vcpu->preempted = true;
628 wake_up_interruptible(&vcpu->wq);
629 vcpu->stat.halt_wakeup++;
633 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
635 struct kvm_vcpu *vcpu;
637 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
638 kvm_s390_vcpu_wakeup(vcpu);
640 return HRTIMER_NORESTART;
643 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
645 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
646 struct kvm_s390_interrupt_info *n, *inti = NULL;
648 spin_lock(&li->lock);
649 list_for_each_entry_safe(inti, n, &li->list, list) {
650 list_del(&inti->list);
653 atomic_set(&li->active, 0);
654 spin_unlock(&li->lock);
656 /* clear pending external calls set by sigp interpretation facility */
657 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
658 atomic_clear_mask(SIGP_CTRL_C,
659 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
662 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
664 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
665 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
666 struct kvm_s390_interrupt_info *n, *inti = NULL;
670 __reset_intercept_indicators(vcpu);
671 if (atomic_read(&li->active)) {
674 spin_lock(&li->lock);
675 list_for_each_entry_safe(inti, n, &li->list, list) {
676 if (__interrupt_is_deliverable(vcpu, inti)) {
677 list_del(&inti->list);
681 __set_intercept_indicator(vcpu, inti);
683 if (list_empty(&li->list))
684 atomic_set(&li->active, 0);
685 spin_unlock(&li->lock);
687 rc = __do_deliver_interrupt(vcpu, inti);
690 } while (!rc && deliver);
693 if (!rc && kvm_cpu_has_pending_timer(vcpu))
694 rc = deliver_ckc_interrupt(vcpu);
696 if (!rc && atomic_read(&fi->active)) {
699 spin_lock(&fi->lock);
700 list_for_each_entry_safe(inti, n, &fi->list, list) {
701 if (__interrupt_is_deliverable(vcpu, inti)) {
702 list_del(&inti->list);
707 __set_intercept_indicator(vcpu, inti);
709 if (list_empty(&fi->list))
710 atomic_set(&fi->active, 0);
711 spin_unlock(&fi->lock);
713 rc = __do_deliver_interrupt(vcpu, inti);
716 } while (!rc && deliver);
722 static int __inject_prog_irq(struct kvm_vcpu *vcpu,
723 struct kvm_s390_interrupt_info *inti)
725 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
727 list_add(&inti->list, &li->list);
728 atomic_set(&li->active, 1);
732 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
734 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
735 struct kvm_s390_interrupt_info *inti;
737 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
741 inti->type = KVM_S390_PROGRAM_INT;
742 inti->pgm.code = code;
744 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
745 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
746 spin_lock(&li->lock);
747 list_add(&inti->list, &li->list);
748 atomic_set(&li->active, 1);
749 BUG_ON(waitqueue_active(li->wq));
750 spin_unlock(&li->lock);
754 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
755 struct kvm_s390_pgm_info *pgm_info)
757 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
758 struct kvm_s390_interrupt_info *inti;
761 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
765 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
767 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
768 pgm_info->code, 0, 1);
770 inti->type = KVM_S390_PROGRAM_INT;
771 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
772 spin_lock(&li->lock);
773 rc = __inject_prog_irq(vcpu, inti);
774 BUG_ON(waitqueue_active(li->wq));
775 spin_unlock(&li->lock);
779 static int __inject_pfault_init(struct kvm_vcpu *vcpu,
780 struct kvm_s390_interrupt *s390int,
781 struct kvm_s390_interrupt_info *inti)
783 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
785 inti->ext.ext_params2 = s390int->parm64;
786 list_add_tail(&inti->list, &li->list);
787 atomic_set(&li->active, 1);
788 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
792 static int __inject_extcall(struct kvm_vcpu *vcpu,
793 struct kvm_s390_interrupt *s390int,
794 struct kvm_s390_interrupt_info *inti)
796 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
798 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
800 if (s390int->parm & 0xffff0000)
802 inti->extcall.code = s390int->parm;
803 list_add_tail(&inti->list, &li->list);
804 atomic_set(&li->active, 1);
805 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
809 static int __inject_set_prefix(struct kvm_vcpu *vcpu,
810 struct kvm_s390_interrupt *s390int,
811 struct kvm_s390_interrupt_info *inti)
813 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
815 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
817 inti->prefix.address = s390int->parm;
818 list_add_tail(&inti->list, &li->list);
819 atomic_set(&li->active, 1);
823 static int __inject_sigp_stop(struct kvm_vcpu *vcpu,
824 struct kvm_s390_interrupt *s390int,
825 struct kvm_s390_interrupt_info *inti)
827 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
829 list_add_tail(&inti->list, &li->list);
830 atomic_set(&li->active, 1);
831 li->action_bits |= ACTION_STOP_ON_STOP;
835 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
836 struct kvm_s390_interrupt *s390int,
837 struct kvm_s390_interrupt_info *inti)
839 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
841 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
842 list_add_tail(&inti->list, &li->list);
843 atomic_set(&li->active, 1);
847 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
848 struct kvm_s390_interrupt *s390int,
849 struct kvm_s390_interrupt_info *inti)
851 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
853 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm);
854 if (s390int->parm & 0xffff0000)
856 inti->emerg.code = s390int->parm;
857 list_add_tail(&inti->list, &li->list);
858 atomic_set(&li->active, 1);
859 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
863 static int __inject_mchk(struct kvm_vcpu *vcpu,
864 struct kvm_s390_interrupt *s390int,
865 struct kvm_s390_interrupt_info *inti)
867 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
869 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
871 inti->mchk.mcic = s390int->parm64;
872 list_add_tail(&inti->list, &li->list);
873 atomic_set(&li->active, 1);
877 static int __inject_ckc(struct kvm_vcpu *vcpu,
878 struct kvm_s390_interrupt *s390int,
879 struct kvm_s390_interrupt_info *inti)
881 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
883 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
884 list_add_tail(&inti->list, &li->list);
885 atomic_set(&li->active, 1);
886 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
890 static int __inject_cpu_timer(struct kvm_vcpu *vcpu,
891 struct kvm_s390_interrupt *s390int,
892 struct kvm_s390_interrupt_info *inti)
894 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
896 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
897 list_add_tail(&inti->list, &li->list);
898 atomic_set(&li->active, 1);
899 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
903 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
906 struct kvm_s390_float_interrupt *fi;
907 struct kvm_s390_interrupt_info *inti, *iter;
909 if ((!schid && !cr6) || (schid && cr6))
911 mutex_lock(&kvm->lock);
912 fi = &kvm->arch.float_int;
913 spin_lock(&fi->lock);
915 list_for_each_entry(iter, &fi->list, list) {
916 if (!is_ioint(iter->type))
919 ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
922 if (((schid & 0x00000000ffff0000) >> 16) !=
923 iter->io.subchannel_id)
925 if ((schid & 0x000000000000ffff) !=
926 iter->io.subchannel_nr)
933 list_del_init(&inti->list);
936 if (list_empty(&fi->list))
937 atomic_set(&fi->active, 0);
938 spin_unlock(&fi->lock);
939 mutex_unlock(&kvm->lock);
943 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
945 struct kvm_s390_local_interrupt *li;
946 struct kvm_s390_float_interrupt *fi;
947 struct kvm_s390_interrupt_info *iter;
948 struct kvm_vcpu *dst_vcpu = NULL;
952 mutex_lock(&kvm->lock);
953 fi = &kvm->arch.float_int;
954 spin_lock(&fi->lock);
955 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
960 if (!is_ioint(inti->type)) {
961 list_add_tail(&inti->list, &fi->list);
963 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
965 /* Keep I/O interrupts sorted in isc order. */
966 list_for_each_entry(iter, &fi->list, list) {
967 if (!is_ioint(iter->type))
969 if (int_word_to_isc_bits(iter->io.io_int_word)
974 list_add_tail(&inti->list, &iter->list);
976 atomic_set(&fi->active, 1);
977 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
978 if (sigcpu == KVM_MAX_VCPUS) {
980 sigcpu = fi->next_rr_cpu++;
981 if (sigcpu == KVM_MAX_VCPUS)
982 sigcpu = fi->next_rr_cpu = 0;
983 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
985 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
986 li = &dst_vcpu->arch.local_int;
987 spin_lock(&li->lock);
988 switch (inti->type) {
990 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
992 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
993 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
996 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
999 spin_unlock(&li->lock);
1000 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
1002 spin_unlock(&fi->lock);
1003 mutex_unlock(&kvm->lock);
1007 int kvm_s390_inject_vm(struct kvm *kvm,
1008 struct kvm_s390_interrupt *s390int)
1010 struct kvm_s390_interrupt_info *inti;
1012 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1016 inti->type = s390int->type;
1017 switch (inti->type) {
1018 case KVM_S390_INT_VIRTIO:
1019 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1020 s390int->parm, s390int->parm64);
1021 inti->ext.ext_params = s390int->parm;
1022 inti->ext.ext_params2 = s390int->parm64;
1024 case KVM_S390_INT_SERVICE:
1025 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
1026 inti->ext.ext_params = s390int->parm;
1028 case KVM_S390_INT_PFAULT_DONE:
1029 inti->type = s390int->type;
1030 inti->ext.ext_params2 = s390int->parm64;
1033 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
1035 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1036 inti->mchk.mcic = s390int->parm64;
1038 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1039 if (inti->type & IOINT_AI_MASK)
1040 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1042 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1043 s390int->type & IOINT_CSSID_MASK,
1044 s390int->type & IOINT_SSID_MASK,
1045 s390int->type & IOINT_SCHID_MASK);
1046 inti->io.subchannel_id = s390int->parm >> 16;
1047 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1048 inti->io.io_int_parm = s390int->parm64 >> 32;
1049 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1055 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1058 return __inject_vm(kvm, inti);
1061 void kvm_s390_reinject_io_int(struct kvm *kvm,
1062 struct kvm_s390_interrupt_info *inti)
1064 __inject_vm(kvm, inti);
1067 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
1068 struct kvm_s390_interrupt *s390int)
1070 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1071 struct kvm_s390_interrupt_info *inti;
1074 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1078 inti->type = s390int->type;
1080 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type,
1081 s390int->parm, 0, 2);
1082 spin_lock(&li->lock);
1083 switch (inti->type) {
1084 case KVM_S390_PROGRAM_INT:
1085 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
1087 inti->pgm.code = s390int->parm;
1088 if (s390int->parm & 0xffff0000)
1091 rc = __inject_prog_irq(vcpu, inti);
1093 case KVM_S390_SIGP_SET_PREFIX:
1094 rc = __inject_set_prefix(vcpu, s390int, inti);
1096 case KVM_S390_SIGP_STOP:
1097 rc = __inject_sigp_stop(vcpu, s390int, inti);
1099 case KVM_S390_RESTART:
1100 rc = __inject_sigp_restart(vcpu, s390int, inti);
1102 case KVM_S390_INT_CLOCK_COMP:
1103 rc = __inject_ckc(vcpu, s390int, inti);
1105 case KVM_S390_INT_CPU_TIMER:
1106 rc = __inject_cpu_timer(vcpu, s390int, inti);
1108 case KVM_S390_INT_EXTERNAL_CALL:
1109 rc = __inject_extcall(vcpu, s390int, inti);
1111 case KVM_S390_INT_EMERGENCY:
1112 rc = __inject_sigp_emergency(vcpu, s390int, inti);
1115 rc = __inject_mchk(vcpu, s390int, inti);
1117 case KVM_S390_INT_PFAULT_INIT:
1118 rc = __inject_pfault_init(vcpu, s390int, inti);
1120 case KVM_S390_INT_VIRTIO:
1121 case KVM_S390_INT_SERVICE:
1122 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1126 spin_unlock(&li->lock);
1128 kvm_s390_vcpu_wakeup(vcpu);
1134 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1136 struct kvm_s390_float_interrupt *fi;
1137 struct kvm_s390_interrupt_info *n, *inti = NULL;
1139 mutex_lock(&kvm->lock);
1140 fi = &kvm->arch.float_int;
1141 spin_lock(&fi->lock);
1142 list_for_each_entry_safe(inti, n, &fi->list, list) {
1143 list_del(&inti->list);
1147 atomic_set(&fi->active, 0);
1148 spin_unlock(&fi->lock);
1149 mutex_unlock(&kvm->lock);
1152 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
1155 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1156 struct kvm_s390_irq irq = {0};
1158 irq.type = inti->type;
1159 switch (inti->type) {
1160 case KVM_S390_INT_PFAULT_INIT:
1161 case KVM_S390_INT_PFAULT_DONE:
1162 case KVM_S390_INT_VIRTIO:
1163 case KVM_S390_INT_SERVICE:
1164 irq.u.ext = inti->ext;
1166 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1167 irq.u.io = inti->io;
1170 irq.u.mchk = inti->mchk;
1176 if (copy_to_user(uptr, &irq, sizeof(irq)))
1182 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
1184 struct kvm_s390_interrupt_info *inti;
1185 struct kvm_s390_float_interrupt *fi;
1189 mutex_lock(&kvm->lock);
1190 fi = &kvm->arch.float_int;
1191 spin_lock(&fi->lock);
1193 list_for_each_entry(inti, &fi->list, list) {
1194 if (len < sizeof(struct kvm_s390_irq)) {
1195 /* signal userspace to try again */
1199 ret = copy_irq_to_user(inti, buf);
1202 buf += sizeof(struct kvm_s390_irq);
1203 len -= sizeof(struct kvm_s390_irq);
1207 spin_unlock(&fi->lock);
1208 mutex_unlock(&kvm->lock);
1210 return ret < 0 ? ret : n;
1213 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1217 switch (attr->group) {
1218 case KVM_DEV_FLIC_GET_ALL_IRQS:
1219 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
1229 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1232 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1233 void *target = NULL;
1234 void __user *source;
1237 if (get_user(inti->type, (u64 __user *)addr))
1240 switch (inti->type) {
1241 case KVM_S390_INT_PFAULT_INIT:
1242 case KVM_S390_INT_PFAULT_DONE:
1243 case KVM_S390_INT_VIRTIO:
1244 case KVM_S390_INT_SERVICE:
1245 target = (void *) &inti->ext;
1246 source = &uptr->u.ext;
1247 size = sizeof(inti->ext);
1249 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1250 target = (void *) &inti->io;
1251 source = &uptr->u.io;
1252 size = sizeof(inti->io);
1255 target = (void *) &inti->mchk;
1256 source = &uptr->u.mchk;
1257 size = sizeof(inti->mchk);
1263 if (copy_from_user(target, source, size))
1269 static int enqueue_floating_irq(struct kvm_device *dev,
1270 struct kvm_device_attr *attr)
1272 struct kvm_s390_interrupt_info *inti = NULL;
1274 int len = attr->attr;
1276 if (len % sizeof(struct kvm_s390_irq) != 0)
1278 else if (len > KVM_S390_FLIC_MAX_BUFFER)
1281 while (len >= sizeof(struct kvm_s390_irq)) {
1282 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1286 r = copy_irq_from_user(inti, attr->addr);
1291 r = __inject_vm(dev->kvm, inti);
1296 len -= sizeof(struct kvm_s390_irq);
1297 attr->addr += sizeof(struct kvm_s390_irq);
1303 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1305 if (id >= MAX_S390_IO_ADAPTERS)
1307 return kvm->arch.adapters[id];
1310 static int register_io_adapter(struct kvm_device *dev,
1311 struct kvm_device_attr *attr)
1313 struct s390_io_adapter *adapter;
1314 struct kvm_s390_io_adapter adapter_info;
1316 if (copy_from_user(&adapter_info,
1317 (void __user *)attr->addr, sizeof(adapter_info)))
1320 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1321 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1324 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1328 INIT_LIST_HEAD(&adapter->maps);
1329 init_rwsem(&adapter->maps_lock);
1330 atomic_set(&adapter->nr_maps, 0);
1331 adapter->id = adapter_info.id;
1332 adapter->isc = adapter_info.isc;
1333 adapter->maskable = adapter_info.maskable;
1334 adapter->masked = false;
1335 adapter->swap = adapter_info.swap;
1336 dev->kvm->arch.adapters[adapter->id] = adapter;
1341 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1344 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1346 if (!adapter || !adapter->maskable)
1348 ret = adapter->masked;
1349 adapter->masked = masked;
1353 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1355 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1356 struct s390_map_info *map;
1359 if (!adapter || !addr)
1362 map = kzalloc(sizeof(*map), GFP_KERNEL);
1367 INIT_LIST_HEAD(&map->list);
1368 map->guest_addr = addr;
1369 map->addr = gmap_translate(kvm->arch.gmap, addr);
1370 if (map->addr == -EFAULT) {
1374 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1378 down_write(&adapter->maps_lock);
1379 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1380 list_add_tail(&map->list, &adapter->maps);
1383 put_page(map->page);
1386 up_write(&adapter->maps_lock);
1393 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1395 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1396 struct s390_map_info *map, *tmp;
1399 if (!adapter || !addr)
1402 down_write(&adapter->maps_lock);
1403 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1404 if (map->guest_addr == addr) {
1406 atomic_dec(&adapter->nr_maps);
1407 list_del(&map->list);
1408 put_page(map->page);
1413 up_write(&adapter->maps_lock);
1415 return found ? 0 : -EINVAL;
1418 void kvm_s390_destroy_adapters(struct kvm *kvm)
1421 struct s390_map_info *map, *tmp;
1423 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1424 if (!kvm->arch.adapters[i])
1426 list_for_each_entry_safe(map, tmp,
1427 &kvm->arch.adapters[i]->maps, list) {
1428 list_del(&map->list);
1429 put_page(map->page);
1432 kfree(kvm->arch.adapters[i]);
1436 static int modify_io_adapter(struct kvm_device *dev,
1437 struct kvm_device_attr *attr)
1439 struct kvm_s390_io_adapter_req req;
1440 struct s390_io_adapter *adapter;
1443 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1446 adapter = get_io_adapter(dev->kvm, req.id);
1450 case KVM_S390_IO_ADAPTER_MASK:
1451 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1455 case KVM_S390_IO_ADAPTER_MAP:
1456 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1458 case KVM_S390_IO_ADAPTER_UNMAP:
1459 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1468 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1472 struct kvm_vcpu *vcpu;
1474 switch (attr->group) {
1475 case KVM_DEV_FLIC_ENQUEUE:
1476 r = enqueue_floating_irq(dev, attr);
1478 case KVM_DEV_FLIC_CLEAR_IRQS:
1479 kvm_s390_clear_float_irqs(dev->kvm);
1481 case KVM_DEV_FLIC_APF_ENABLE:
1482 dev->kvm->arch.gmap->pfault_enabled = 1;
1484 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1485 dev->kvm->arch.gmap->pfault_enabled = 0;
1487 * Make sure no async faults are in transition when
1488 * clearing the queues. So we don't need to worry
1489 * about late coming workers.
1491 synchronize_srcu(&dev->kvm->srcu);
1492 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1493 kvm_clear_async_pf_completion_queue(vcpu);
1495 case KVM_DEV_FLIC_ADAPTER_REGISTER:
1496 r = register_io_adapter(dev, attr);
1498 case KVM_DEV_FLIC_ADAPTER_MODIFY:
1499 r = modify_io_adapter(dev, attr);
1508 static int flic_create(struct kvm_device *dev, u32 type)
1512 if (dev->kvm->arch.flic)
1514 dev->kvm->arch.flic = dev;
1518 static void flic_destroy(struct kvm_device *dev)
1520 dev->kvm->arch.flic = NULL;
1524 /* s390 floating irq controller (flic) */
1525 struct kvm_device_ops kvm_flic_ops = {
1527 .get_attr = flic_get_attr,
1528 .set_attr = flic_set_attr,
1529 .create = flic_create,
1530 .destroy = flic_destroy,
1533 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1537 bit = bit_nr + (addr % PAGE_SIZE) * 8;
1539 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1542 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1545 struct s390_map_info *map;
1550 list_for_each_entry(map, &adapter->maps, list) {
1551 if (map->guest_addr == addr)
1557 static int adapter_indicators_set(struct kvm *kvm,
1558 struct s390_io_adapter *adapter,
1559 struct kvm_s390_adapter_int *adapter_int)
1562 int summary_set, idx;
1563 struct s390_map_info *info;
1566 info = get_map_info(adapter, adapter_int->ind_addr);
1569 map = page_address(info->page);
1570 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1572 idx = srcu_read_lock(&kvm->srcu);
1573 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1574 set_page_dirty_lock(info->page);
1575 info = get_map_info(adapter, adapter_int->summary_addr);
1577 srcu_read_unlock(&kvm->srcu, idx);
1580 map = page_address(info->page);
1581 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1583 summary_set = test_and_set_bit(bit, map);
1584 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1585 set_page_dirty_lock(info->page);
1586 srcu_read_unlock(&kvm->srcu, idx);
1587 return summary_set ? 0 : 1;
1591 * < 0 - not injected due to error
1592 * = 0 - coalesced, summary indicator already active
1593 * > 0 - injected interrupt
1595 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1596 struct kvm *kvm, int irq_source_id, int level,
1600 struct s390_io_adapter *adapter;
1602 /* We're only interested in the 0->1 transition. */
1605 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1608 down_read(&adapter->maps_lock);
1609 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1610 up_read(&adapter->maps_lock);
1611 if ((ret > 0) && !adapter->masked) {
1612 struct kvm_s390_interrupt s390int = {
1613 .type = KVM_S390_INT_IO(1, 0, 0, 0),
1615 .parm64 = (adapter->isc << 27) | 0x80000000,
1617 ret = kvm_s390_inject_vm(kvm, &s390int);
1624 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1625 const struct kvm_irq_routing_entry *ue)
1630 case KVM_IRQ_ROUTING_S390_ADAPTER:
1631 e->set = set_adapter_int;
1632 e->adapter.summary_addr = ue->u.adapter.summary_addr;
1633 e->adapter.ind_addr = ue->u.adapter.ind_addr;
1634 e->adapter.summary_offset = ue->u.adapter.summary_offset;
1635 e->adapter.ind_offset = ue->u.adapter.ind_offset;
1636 e->adapter.adapter_id = ue->u.adapter.adapter_id;
1646 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1647 int irq_source_id, int level, bool line_status)