2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008,2014
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/uaccess.h>
23 #include "trace-s390.h"
25 #define IOINT_SCHID_MASK 0x0000ffff
26 #define IOINT_SSID_MASK 0x00030000
27 #define IOINT_CSSID_MASK 0x03fc0000
28 #define IOINT_AI_MASK 0x04000000
29 #define PFAULT_INIT 0x0600
31 static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu);
33 static int is_ioint(u64 type)
35 return ((type & 0xfffe0000u) != 0xfffe0000u);
38 int psw_extint_disabled(struct kvm_vcpu *vcpu)
40 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
43 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
45 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
48 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
50 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
53 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
55 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
56 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
57 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
62 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
64 if (psw_extint_disabled(vcpu) ||
65 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
67 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
68 /* No timer interrupts when single stepping */
73 static u64 int_word_to_isc_bits(u32 int_word)
75 u8 isc = (int_word & 0x38000000) >> 27;
77 return (0x80 >> isc) << 24;
80 static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
81 struct kvm_s390_interrupt_info *inti)
84 case KVM_S390_INT_EXTERNAL_CALL:
85 if (psw_extint_disabled(vcpu))
87 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
90 case KVM_S390_INT_EMERGENCY:
91 if (psw_extint_disabled(vcpu))
93 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
96 case KVM_S390_INT_CLOCK_COMP:
97 return ckc_interrupts_enabled(vcpu);
98 case KVM_S390_INT_CPU_TIMER:
99 if (psw_extint_disabled(vcpu))
101 if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
104 case KVM_S390_INT_SERVICE:
105 case KVM_S390_INT_PFAULT_INIT:
106 case KVM_S390_INT_PFAULT_DONE:
107 case KVM_S390_INT_VIRTIO:
108 if (psw_extint_disabled(vcpu))
110 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
113 case KVM_S390_PROGRAM_INT:
114 case KVM_S390_SIGP_STOP:
115 case KVM_S390_SIGP_SET_PREFIX:
116 case KVM_S390_RESTART:
119 if (psw_mchk_disabled(vcpu))
121 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
124 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
125 if (psw_ioint_disabled(vcpu))
127 if (vcpu->arch.sie_block->gcr[6] &
128 int_word_to_isc_bits(inti->io.io_int_word))
132 printk(KERN_WARNING "illegal interrupt type %llx\n",
139 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
141 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
142 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
145 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
147 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
148 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
151 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
153 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
154 &vcpu->arch.sie_block->cpuflags);
155 vcpu->arch.sie_block->lctl = 0x0000;
156 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
158 if (guestdbg_enabled(vcpu)) {
159 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
160 LCTL_CR10 | LCTL_CR11);
161 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
164 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
165 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
168 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
170 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
173 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
174 struct kvm_s390_interrupt_info *inti)
176 switch (inti->type) {
177 case KVM_S390_INT_EXTERNAL_CALL:
178 case KVM_S390_INT_EMERGENCY:
179 case KVM_S390_INT_SERVICE:
180 case KVM_S390_INT_PFAULT_INIT:
181 case KVM_S390_INT_PFAULT_DONE:
182 case KVM_S390_INT_VIRTIO:
183 case KVM_S390_INT_CLOCK_COMP:
184 case KVM_S390_INT_CPU_TIMER:
185 if (psw_extint_disabled(vcpu))
186 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
188 vcpu->arch.sie_block->lctl |= LCTL_CR0;
190 case KVM_S390_SIGP_STOP:
191 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
194 if (psw_mchk_disabled(vcpu))
195 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
197 vcpu->arch.sie_block->lctl |= LCTL_CR14;
199 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
200 if (psw_ioint_disabled(vcpu))
201 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
203 vcpu->arch.sie_block->lctl |= LCTL_CR6;
210 static u16 get_ilc(struct kvm_vcpu *vcpu)
212 const unsigned short table[] = { 2, 4, 4, 6 };
214 switch (vcpu->arch.sie_block->icptcode) {
220 /* last instruction only stored for these icptcodes */
221 return table[vcpu->arch.sie_block->ipa >> 14];
223 return vcpu->arch.sie_block->pgmilc;
229 static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu,
230 struct kvm_s390_pgm_info *pgm_info)
233 u16 ilc = get_ilc(vcpu);
235 switch (pgm_info->code & ~PGM_PER) {
236 case PGM_AFX_TRANSLATION:
237 case PGM_ASX_TRANSLATION:
238 case PGM_EX_TRANSLATION:
239 case PGM_LFX_TRANSLATION:
240 case PGM_LSTE_SEQUENCE:
241 case PGM_LSX_TRANSLATION:
242 case PGM_LX_TRANSLATION:
243 case PGM_PRIMARY_AUTHORITY:
244 case PGM_SECONDARY_AUTHORITY:
245 case PGM_SPACE_SWITCH:
246 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
247 (u64 *)__LC_TRANS_EXC_CODE);
249 case PGM_ALEN_TRANSLATION:
250 case PGM_ALE_SEQUENCE:
251 case PGM_ASTE_INSTANCE:
252 case PGM_ASTE_SEQUENCE:
253 case PGM_ASTE_VALIDITY:
254 case PGM_EXTENDED_AUTHORITY:
255 rc = put_guest_lc(vcpu, pgm_info->exc_access_id,
256 (u8 *)__LC_EXC_ACCESS_ID);
259 case PGM_PAGE_TRANSLATION:
260 case PGM_REGION_FIRST_TRANS:
261 case PGM_REGION_SECOND_TRANS:
262 case PGM_REGION_THIRD_TRANS:
263 case PGM_SEGMENT_TRANSLATION:
264 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
265 (u64 *)__LC_TRANS_EXC_CODE);
266 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
267 (u8 *)__LC_EXC_ACCESS_ID);
268 rc |= put_guest_lc(vcpu, pgm_info->op_access_id,
269 (u8 *)__LC_OP_ACCESS_ID);
272 rc = put_guest_lc(vcpu, pgm_info->mon_class_nr,
273 (u16 *)__LC_MON_CLASS_NR);
274 rc |= put_guest_lc(vcpu, pgm_info->mon_code,
275 (u64 *)__LC_MON_CODE);
278 rc = put_guest_lc(vcpu, pgm_info->data_exc_code,
279 (u32 *)__LC_DATA_EXC_CODE);
282 rc = put_guest_lc(vcpu, pgm_info->trans_exc_code,
283 (u64 *)__LC_TRANS_EXC_CODE);
284 rc |= put_guest_lc(vcpu, pgm_info->exc_access_id,
285 (u8 *)__LC_EXC_ACCESS_ID);
289 if (pgm_info->code & PGM_PER) {
290 rc |= put_guest_lc(vcpu, pgm_info->per_code,
291 (u8 *) __LC_PER_CODE);
292 rc |= put_guest_lc(vcpu, pgm_info->per_atmid,
293 (u8 *)__LC_PER_ATMID);
294 rc |= put_guest_lc(vcpu, pgm_info->per_address,
295 (u64 *) __LC_PER_ADDRESS);
296 rc |= put_guest_lc(vcpu, pgm_info->per_access_id,
297 (u8 *) __LC_PER_ACCESS_ID);
300 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
301 rc |= put_guest_lc(vcpu, pgm_info->code,
302 (u16 *)__LC_PGM_INT_CODE);
303 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
304 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
305 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
306 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
311 static int __must_check __do_deliver_interrupt(struct kvm_vcpu *vcpu,
312 struct kvm_s390_interrupt_info *inti)
314 const unsigned short table[] = { 2, 4, 4, 6 };
317 switch (inti->type) {
318 case KVM_S390_INT_EMERGENCY:
319 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
320 vcpu->stat.deliver_emergency_signal++;
321 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
322 inti->emerg.code, 0);
323 rc = put_guest_lc(vcpu, 0x1201, (u16 *)__LC_EXT_INT_CODE);
324 rc |= put_guest_lc(vcpu, inti->emerg.code,
325 (u16 *)__LC_EXT_CPU_ADDR);
326 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
327 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
328 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
329 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
331 case KVM_S390_INT_EXTERNAL_CALL:
332 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
333 vcpu->stat.deliver_external_call++;
334 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
335 inti->extcall.code, 0);
336 rc = put_guest_lc(vcpu, 0x1202, (u16 *)__LC_EXT_INT_CODE);
337 rc |= put_guest_lc(vcpu, inti->extcall.code,
338 (u16 *)__LC_EXT_CPU_ADDR);
339 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
340 &vcpu->arch.sie_block->gpsw,
342 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
343 &vcpu->arch.sie_block->gpsw,
346 case KVM_S390_INT_CLOCK_COMP:
347 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
348 inti->ext.ext_params, 0);
349 rc = deliver_ckc_interrupt(vcpu);
351 case KVM_S390_INT_CPU_TIMER:
352 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
353 inti->ext.ext_params, 0);
354 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
355 (u16 *)__LC_EXT_INT_CODE);
356 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
357 &vcpu->arch.sie_block->gpsw,
359 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
360 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
361 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
362 (u32 *)__LC_EXT_PARAMS);
364 case KVM_S390_INT_SERVICE:
365 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
366 inti->ext.ext_params);
367 vcpu->stat.deliver_service_signal++;
368 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
369 inti->ext.ext_params, 0);
370 rc = put_guest_lc(vcpu, 0x2401, (u16 *)__LC_EXT_INT_CODE);
371 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
372 &vcpu->arch.sie_block->gpsw,
374 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
375 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
376 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
377 (u32 *)__LC_EXT_PARAMS);
379 case KVM_S390_INT_PFAULT_INIT:
380 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
381 inti->ext.ext_params2);
382 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
383 (u16 *) __LC_EXT_INT_CODE);
384 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
385 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
386 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
387 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
388 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
389 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
390 (u64 *) __LC_EXT_PARAMS2);
392 case KVM_S390_INT_PFAULT_DONE:
393 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type, 0,
394 inti->ext.ext_params2);
395 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
396 rc |= put_guest_lc(vcpu, 0x0680, (u16 *)__LC_EXT_CPU_ADDR);
397 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
398 &vcpu->arch.sie_block->gpsw,
400 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
401 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
402 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
403 (u64 *)__LC_EXT_PARAMS2);
405 case KVM_S390_INT_VIRTIO:
406 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
407 inti->ext.ext_params, inti->ext.ext_params2);
408 vcpu->stat.deliver_virtio_interrupt++;
409 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
410 inti->ext.ext_params,
411 inti->ext.ext_params2);
412 rc = put_guest_lc(vcpu, 0x2603, (u16 *)__LC_EXT_INT_CODE);
413 rc |= put_guest_lc(vcpu, 0x0d00, (u16 *)__LC_EXT_CPU_ADDR);
414 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
415 &vcpu->arch.sie_block->gpsw,
417 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
418 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
419 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
420 (u32 *)__LC_EXT_PARAMS);
421 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
422 (u64 *)__LC_EXT_PARAMS2);
424 case KVM_S390_SIGP_STOP:
425 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
426 vcpu->stat.deliver_stop_signal++;
427 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
429 __set_intercept_indicator(vcpu, inti);
432 case KVM_S390_SIGP_SET_PREFIX:
433 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
434 inti->prefix.address);
435 vcpu->stat.deliver_prefix_signal++;
436 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
437 inti->prefix.address, 0);
438 kvm_s390_set_prefix(vcpu, inti->prefix.address);
441 case KVM_S390_RESTART:
442 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
443 vcpu->stat.deliver_restart_signal++;
444 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
446 rc = write_guest_lc(vcpu,
447 offsetof(struct _lowcore, restart_old_psw),
448 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
449 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
450 &vcpu->arch.sie_block->gpsw,
453 case KVM_S390_PROGRAM_INT:
454 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
456 table[vcpu->arch.sie_block->ipa >> 14]);
457 vcpu->stat.deliver_program_int++;
458 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
460 rc = __deliver_prog_irq(vcpu, &inti->pgm);
464 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
466 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
469 rc = kvm_s390_vcpu_store_status(vcpu,
470 KVM_S390_STORE_STATUS_PREFIXED);
471 rc |= put_guest_lc(vcpu, inti->mchk.mcic, (u64 *)__LC_MCCK_CODE);
472 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
473 &vcpu->arch.sie_block->gpsw,
475 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
476 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
479 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
481 __u32 param0 = ((__u32)inti->io.subchannel_id << 16) |
482 inti->io.subchannel_nr;
483 __u64 param1 = ((__u64)inti->io.io_int_parm << 32) |
484 inti->io.io_int_word;
485 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
486 vcpu->stat.deliver_io_int++;
487 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
489 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
490 (u16 *)__LC_SUBCHANNEL_ID);
491 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
492 (u16 *)__LC_SUBCHANNEL_NR);
493 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
494 (u32 *)__LC_IO_INT_PARM);
495 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
496 (u32 *)__LC_IO_INT_WORD);
497 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
498 &vcpu->arch.sie_block->gpsw,
500 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
501 &vcpu->arch.sie_block->gpsw,
512 static int __must_check deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
516 rc = put_guest_lc(vcpu, 0x1004, (u16 __user *)__LC_EXT_INT_CODE);
517 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
518 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
519 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
520 &vcpu->arch.sie_block->gpsw,
525 /* Check whether SIGP interpretation facility has an external call pending */
526 int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu)
528 atomic_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl;
530 if (!psw_extint_disabled(vcpu) &&
531 (vcpu->arch.sie_block->gcr[0] & 0x2000ul) &&
532 (atomic_read(sigp_ctrl) & SIGP_CTRL_C) &&
533 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
539 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
541 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
542 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
543 struct kvm_s390_interrupt_info *inti;
546 if (atomic_read(&li->active)) {
547 spin_lock(&li->lock);
548 list_for_each_entry(inti, &li->list, list)
549 if (__interrupt_is_deliverable(vcpu, inti)) {
553 spin_unlock(&li->lock);
556 if ((!rc) && atomic_read(&fi->active)) {
557 spin_lock(&fi->lock);
558 list_for_each_entry(inti, &fi->list, list)
559 if (__interrupt_is_deliverable(vcpu, inti)) {
563 spin_unlock(&fi->lock);
566 if (!rc && kvm_cpu_has_pending_timer(vcpu))
569 if (!rc && kvm_s390_si_ext_call_pending(vcpu))
575 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
577 if (!(vcpu->arch.sie_block->ckc <
578 get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
580 if (!ckc_interrupts_enabled(vcpu))
585 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
589 vcpu->stat.exit_wait_state++;
592 if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
595 if (psw_interrupts_disabled(vcpu)) {
596 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
597 return -EOPNOTSUPP; /* disabled wait */
600 __set_cpu_idle(vcpu);
601 if (!ckc_interrupts_enabled(vcpu)) {
602 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
606 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
607 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
608 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
609 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
611 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
612 kvm_vcpu_block(vcpu);
613 __unset_cpu_idle(vcpu);
614 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
616 hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
620 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
622 if (waitqueue_active(&vcpu->wq)) {
624 * The vcpu gave up the cpu voluntarily, mark it as a good
627 vcpu->preempted = true;
628 wake_up_interruptible(&vcpu->wq);
629 vcpu->stat.halt_wakeup++;
633 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
635 struct kvm_vcpu *vcpu;
637 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
638 kvm_s390_vcpu_wakeup(vcpu);
640 return HRTIMER_NORESTART;
643 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
645 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
646 struct kvm_s390_interrupt_info *n, *inti = NULL;
648 spin_lock(&li->lock);
649 list_for_each_entry_safe(inti, n, &li->list, list) {
650 list_del(&inti->list);
653 atomic_set(&li->active, 0);
654 spin_unlock(&li->lock);
656 /* clear pending external calls set by sigp interpretation facility */
657 atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
658 atomic_clear_mask(SIGP_CTRL_C,
659 &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].ctrl);
662 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
664 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
665 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
666 struct kvm_s390_interrupt_info *n, *inti = NULL;
670 __reset_intercept_indicators(vcpu);
671 if (atomic_read(&li->active)) {
674 spin_lock(&li->lock);
675 list_for_each_entry_safe(inti, n, &li->list, list) {
676 if (__interrupt_is_deliverable(vcpu, inti)) {
677 list_del(&inti->list);
681 __set_intercept_indicator(vcpu, inti);
683 if (list_empty(&li->list))
684 atomic_set(&li->active, 0);
685 spin_unlock(&li->lock);
687 rc = __do_deliver_interrupt(vcpu, inti);
690 } while (!rc && deliver);
693 if (!rc && kvm_cpu_has_pending_timer(vcpu))
694 rc = deliver_ckc_interrupt(vcpu);
696 if (!rc && atomic_read(&fi->active)) {
699 spin_lock(&fi->lock);
700 list_for_each_entry_safe(inti, n, &fi->list, list) {
701 if (__interrupt_is_deliverable(vcpu, inti)) {
702 list_del(&inti->list);
707 __set_intercept_indicator(vcpu, inti);
709 if (list_empty(&fi->list))
710 atomic_set(&fi->active, 0);
711 spin_unlock(&fi->lock);
713 rc = __do_deliver_interrupt(vcpu, inti);
716 } while (!rc && deliver);
722 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
724 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
725 struct kvm_s390_interrupt_info *inti;
727 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
731 inti->type = KVM_S390_PROGRAM_INT;
732 inti->pgm.code = code;
734 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
735 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
736 spin_lock(&li->lock);
737 list_add(&inti->list, &li->list);
738 atomic_set(&li->active, 1);
739 BUG_ON(waitqueue_active(li->wq));
740 spin_unlock(&li->lock);
744 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
745 struct kvm_s390_pgm_info *pgm_info)
747 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
748 struct kvm_s390_interrupt_info *inti;
750 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
754 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
756 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
757 pgm_info->code, 0, 1);
759 inti->type = KVM_S390_PROGRAM_INT;
760 memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
761 spin_lock(&li->lock);
762 list_add(&inti->list, &li->list);
763 atomic_set(&li->active, 1);
764 BUG_ON(waitqueue_active(li->wq));
765 spin_unlock(&li->lock);
769 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
772 struct kvm_s390_float_interrupt *fi;
773 struct kvm_s390_interrupt_info *inti, *iter;
775 if ((!schid && !cr6) || (schid && cr6))
777 mutex_lock(&kvm->lock);
778 fi = &kvm->arch.float_int;
779 spin_lock(&fi->lock);
781 list_for_each_entry(iter, &fi->list, list) {
782 if (!is_ioint(iter->type))
785 ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
788 if (((schid & 0x00000000ffff0000) >> 16) !=
789 iter->io.subchannel_id)
791 if ((schid & 0x000000000000ffff) !=
792 iter->io.subchannel_nr)
799 list_del_init(&inti->list);
802 if (list_empty(&fi->list))
803 atomic_set(&fi->active, 0);
804 spin_unlock(&fi->lock);
805 mutex_unlock(&kvm->lock);
809 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
811 struct kvm_s390_local_interrupt *li;
812 struct kvm_s390_float_interrupt *fi;
813 struct kvm_s390_interrupt_info *iter;
814 struct kvm_vcpu *dst_vcpu = NULL;
818 mutex_lock(&kvm->lock);
819 fi = &kvm->arch.float_int;
820 spin_lock(&fi->lock);
821 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
826 if (!is_ioint(inti->type)) {
827 list_add_tail(&inti->list, &fi->list);
829 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
831 /* Keep I/O interrupts sorted in isc order. */
832 list_for_each_entry(iter, &fi->list, list) {
833 if (!is_ioint(iter->type))
835 if (int_word_to_isc_bits(iter->io.io_int_word)
840 list_add_tail(&inti->list, &iter->list);
842 atomic_set(&fi->active, 1);
843 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
844 if (sigcpu == KVM_MAX_VCPUS) {
846 sigcpu = fi->next_rr_cpu++;
847 if (sigcpu == KVM_MAX_VCPUS)
848 sigcpu = fi->next_rr_cpu = 0;
849 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
851 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
852 li = &dst_vcpu->arch.local_int;
853 spin_lock(&li->lock);
854 switch (inti->type) {
856 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
858 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
859 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
862 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
865 spin_unlock(&li->lock);
866 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
868 spin_unlock(&fi->lock);
869 mutex_unlock(&kvm->lock);
873 int kvm_s390_inject_vm(struct kvm *kvm,
874 struct kvm_s390_interrupt *s390int)
876 struct kvm_s390_interrupt_info *inti;
878 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
882 inti->type = s390int->type;
883 switch (inti->type) {
884 case KVM_S390_INT_VIRTIO:
885 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
886 s390int->parm, s390int->parm64);
887 inti->ext.ext_params = s390int->parm;
888 inti->ext.ext_params2 = s390int->parm64;
890 case KVM_S390_INT_SERVICE:
891 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
892 inti->ext.ext_params = s390int->parm;
894 case KVM_S390_INT_PFAULT_DONE:
895 inti->type = s390int->type;
896 inti->ext.ext_params2 = s390int->parm64;
899 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
901 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
902 inti->mchk.mcic = s390int->parm64;
904 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
905 if (inti->type & IOINT_AI_MASK)
906 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
908 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
909 s390int->type & IOINT_CSSID_MASK,
910 s390int->type & IOINT_SSID_MASK,
911 s390int->type & IOINT_SCHID_MASK);
912 inti->io.subchannel_id = s390int->parm >> 16;
913 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
914 inti->io.io_int_parm = s390int->parm64 >> 32;
915 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
921 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
924 return __inject_vm(kvm, inti);
927 void kvm_s390_reinject_io_int(struct kvm *kvm,
928 struct kvm_s390_interrupt_info *inti)
930 __inject_vm(kvm, inti);
933 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
934 struct kvm_s390_interrupt *s390int)
936 struct kvm_s390_local_interrupt *li;
937 struct kvm_s390_interrupt_info *inti;
939 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
943 switch (s390int->type) {
944 case KVM_S390_PROGRAM_INT:
945 if (s390int->parm & 0xffff0000) {
949 inti->type = s390int->type;
950 inti->pgm.code = s390int->parm;
951 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
954 case KVM_S390_SIGP_SET_PREFIX:
955 inti->prefix.address = s390int->parm;
956 inti->type = s390int->type;
957 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
960 case KVM_S390_SIGP_STOP:
961 case KVM_S390_RESTART:
962 case KVM_S390_INT_CLOCK_COMP:
963 case KVM_S390_INT_CPU_TIMER:
964 VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
965 inti->type = s390int->type;
967 case KVM_S390_INT_EXTERNAL_CALL:
968 if (s390int->parm & 0xffff0000) {
972 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
974 inti->type = s390int->type;
975 inti->extcall.code = s390int->parm;
977 case KVM_S390_INT_EMERGENCY:
978 if (s390int->parm & 0xffff0000) {
982 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n", s390int->parm);
983 inti->type = s390int->type;
984 inti->emerg.code = s390int->parm;
987 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
989 inti->type = s390int->type;
990 inti->mchk.mcic = s390int->parm64;
992 case KVM_S390_INT_PFAULT_INIT:
993 inti->type = s390int->type;
994 inti->ext.ext_params2 = s390int->parm64;
996 case KVM_S390_INT_VIRTIO:
997 case KVM_S390_INT_SERVICE:
998 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1003 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, s390int->type, s390int->parm,
1004 s390int->parm64, 2);
1006 li = &vcpu->arch.local_int;
1007 spin_lock(&li->lock);
1008 if (inti->type == KVM_S390_PROGRAM_INT)
1009 list_add(&inti->list, &li->list);
1011 list_add_tail(&inti->list, &li->list);
1012 atomic_set(&li->active, 1);
1013 if (inti->type == KVM_S390_SIGP_STOP)
1014 li->action_bits |= ACTION_STOP_ON_STOP;
1015 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1016 spin_unlock(&li->lock);
1017 kvm_s390_vcpu_wakeup(vcpu);
1021 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1023 struct kvm_s390_float_interrupt *fi;
1024 struct kvm_s390_interrupt_info *n, *inti = NULL;
1026 mutex_lock(&kvm->lock);
1027 fi = &kvm->arch.float_int;
1028 spin_lock(&fi->lock);
1029 list_for_each_entry_safe(inti, n, &fi->list, list) {
1030 list_del(&inti->list);
1034 atomic_set(&fi->active, 0);
1035 spin_unlock(&fi->lock);
1036 mutex_unlock(&kvm->lock);
1039 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
1042 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1043 struct kvm_s390_irq irq = {0};
1045 irq.type = inti->type;
1046 switch (inti->type) {
1047 case KVM_S390_INT_PFAULT_INIT:
1048 case KVM_S390_INT_PFAULT_DONE:
1049 case KVM_S390_INT_VIRTIO:
1050 case KVM_S390_INT_SERVICE:
1051 irq.u.ext = inti->ext;
1053 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1054 irq.u.io = inti->io;
1057 irq.u.mchk = inti->mchk;
1063 if (copy_to_user(uptr, &irq, sizeof(irq)))
1069 static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
1071 struct kvm_s390_interrupt_info *inti;
1072 struct kvm_s390_float_interrupt *fi;
1076 mutex_lock(&kvm->lock);
1077 fi = &kvm->arch.float_int;
1078 spin_lock(&fi->lock);
1080 list_for_each_entry(inti, &fi->list, list) {
1081 if (len < sizeof(struct kvm_s390_irq)) {
1082 /* signal userspace to try again */
1086 ret = copy_irq_to_user(inti, buf);
1089 buf += sizeof(struct kvm_s390_irq);
1090 len -= sizeof(struct kvm_s390_irq);
1094 spin_unlock(&fi->lock);
1095 mutex_unlock(&kvm->lock);
1097 return ret < 0 ? ret : n;
1100 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1104 switch (attr->group) {
1105 case KVM_DEV_FLIC_GET_ALL_IRQS:
1106 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
1116 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1119 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1120 void *target = NULL;
1121 void __user *source;
1124 if (get_user(inti->type, (u64 __user *)addr))
1127 switch (inti->type) {
1128 case KVM_S390_INT_PFAULT_INIT:
1129 case KVM_S390_INT_PFAULT_DONE:
1130 case KVM_S390_INT_VIRTIO:
1131 case KVM_S390_INT_SERVICE:
1132 target = (void *) &inti->ext;
1133 source = &uptr->u.ext;
1134 size = sizeof(inti->ext);
1136 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1137 target = (void *) &inti->io;
1138 source = &uptr->u.io;
1139 size = sizeof(inti->io);
1142 target = (void *) &inti->mchk;
1143 source = &uptr->u.mchk;
1144 size = sizeof(inti->mchk);
1150 if (copy_from_user(target, source, size))
1156 static int enqueue_floating_irq(struct kvm_device *dev,
1157 struct kvm_device_attr *attr)
1159 struct kvm_s390_interrupt_info *inti = NULL;
1161 int len = attr->attr;
1163 if (len % sizeof(struct kvm_s390_irq) != 0)
1165 else if (len > KVM_S390_FLIC_MAX_BUFFER)
1168 while (len >= sizeof(struct kvm_s390_irq)) {
1169 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1173 r = copy_irq_from_user(inti, attr->addr);
1178 r = __inject_vm(dev->kvm, inti);
1183 len -= sizeof(struct kvm_s390_irq);
1184 attr->addr += sizeof(struct kvm_s390_irq);
1190 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1192 if (id >= MAX_S390_IO_ADAPTERS)
1194 return kvm->arch.adapters[id];
1197 static int register_io_adapter(struct kvm_device *dev,
1198 struct kvm_device_attr *attr)
1200 struct s390_io_adapter *adapter;
1201 struct kvm_s390_io_adapter adapter_info;
1203 if (copy_from_user(&adapter_info,
1204 (void __user *)attr->addr, sizeof(adapter_info)))
1207 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1208 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1211 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1215 INIT_LIST_HEAD(&adapter->maps);
1216 init_rwsem(&adapter->maps_lock);
1217 atomic_set(&adapter->nr_maps, 0);
1218 adapter->id = adapter_info.id;
1219 adapter->isc = adapter_info.isc;
1220 adapter->maskable = adapter_info.maskable;
1221 adapter->masked = false;
1222 adapter->swap = adapter_info.swap;
1223 dev->kvm->arch.adapters[adapter->id] = adapter;
1228 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1231 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1233 if (!adapter || !adapter->maskable)
1235 ret = adapter->masked;
1236 adapter->masked = masked;
1240 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1242 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1243 struct s390_map_info *map;
1246 if (!adapter || !addr)
1249 map = kzalloc(sizeof(*map), GFP_KERNEL);
1254 INIT_LIST_HEAD(&map->list);
1255 map->guest_addr = addr;
1256 map->addr = gmap_translate(kvm->arch.gmap, addr);
1257 if (map->addr == -EFAULT) {
1261 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1265 down_write(&adapter->maps_lock);
1266 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1267 list_add_tail(&map->list, &adapter->maps);
1270 put_page(map->page);
1273 up_write(&adapter->maps_lock);
1280 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1282 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1283 struct s390_map_info *map, *tmp;
1286 if (!adapter || !addr)
1289 down_write(&adapter->maps_lock);
1290 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1291 if (map->guest_addr == addr) {
1293 atomic_dec(&adapter->nr_maps);
1294 list_del(&map->list);
1295 put_page(map->page);
1300 up_write(&adapter->maps_lock);
1302 return found ? 0 : -EINVAL;
1305 void kvm_s390_destroy_adapters(struct kvm *kvm)
1308 struct s390_map_info *map, *tmp;
1310 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1311 if (!kvm->arch.adapters[i])
1313 list_for_each_entry_safe(map, tmp,
1314 &kvm->arch.adapters[i]->maps, list) {
1315 list_del(&map->list);
1316 put_page(map->page);
1319 kfree(kvm->arch.adapters[i]);
1323 static int modify_io_adapter(struct kvm_device *dev,
1324 struct kvm_device_attr *attr)
1326 struct kvm_s390_io_adapter_req req;
1327 struct s390_io_adapter *adapter;
1330 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1333 adapter = get_io_adapter(dev->kvm, req.id);
1337 case KVM_S390_IO_ADAPTER_MASK:
1338 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1342 case KVM_S390_IO_ADAPTER_MAP:
1343 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1345 case KVM_S390_IO_ADAPTER_UNMAP:
1346 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1355 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1359 struct kvm_vcpu *vcpu;
1361 switch (attr->group) {
1362 case KVM_DEV_FLIC_ENQUEUE:
1363 r = enqueue_floating_irq(dev, attr);
1365 case KVM_DEV_FLIC_CLEAR_IRQS:
1366 kvm_s390_clear_float_irqs(dev->kvm);
1368 case KVM_DEV_FLIC_APF_ENABLE:
1369 dev->kvm->arch.gmap->pfault_enabled = 1;
1371 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1372 dev->kvm->arch.gmap->pfault_enabled = 0;
1374 * Make sure no async faults are in transition when
1375 * clearing the queues. So we don't need to worry
1376 * about late coming workers.
1378 synchronize_srcu(&dev->kvm->srcu);
1379 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1380 kvm_clear_async_pf_completion_queue(vcpu);
1382 case KVM_DEV_FLIC_ADAPTER_REGISTER:
1383 r = register_io_adapter(dev, attr);
1385 case KVM_DEV_FLIC_ADAPTER_MODIFY:
1386 r = modify_io_adapter(dev, attr);
1395 static int flic_create(struct kvm_device *dev, u32 type)
1399 if (dev->kvm->arch.flic)
1401 dev->kvm->arch.flic = dev;
1405 static void flic_destroy(struct kvm_device *dev)
1407 dev->kvm->arch.flic = NULL;
1411 /* s390 floating irq controller (flic) */
1412 struct kvm_device_ops kvm_flic_ops = {
1414 .get_attr = flic_get_attr,
1415 .set_attr = flic_set_attr,
1416 .create = flic_create,
1417 .destroy = flic_destroy,
1420 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1424 bit = bit_nr + (addr % PAGE_SIZE) * 8;
1426 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1429 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1432 struct s390_map_info *map;
1437 list_for_each_entry(map, &adapter->maps, list) {
1438 if (map->guest_addr == addr)
1444 static int adapter_indicators_set(struct kvm *kvm,
1445 struct s390_io_adapter *adapter,
1446 struct kvm_s390_adapter_int *adapter_int)
1449 int summary_set, idx;
1450 struct s390_map_info *info;
1453 info = get_map_info(adapter, adapter_int->ind_addr);
1456 map = page_address(info->page);
1457 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1459 idx = srcu_read_lock(&kvm->srcu);
1460 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1461 set_page_dirty_lock(info->page);
1462 info = get_map_info(adapter, adapter_int->summary_addr);
1464 srcu_read_unlock(&kvm->srcu, idx);
1467 map = page_address(info->page);
1468 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1470 summary_set = test_and_set_bit(bit, map);
1471 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1472 set_page_dirty_lock(info->page);
1473 srcu_read_unlock(&kvm->srcu, idx);
1474 return summary_set ? 0 : 1;
1478 * < 0 - not injected due to error
1479 * = 0 - coalesced, summary indicator already active
1480 * > 0 - injected interrupt
1482 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1483 struct kvm *kvm, int irq_source_id, int level,
1487 struct s390_io_adapter *adapter;
1489 /* We're only interested in the 0->1 transition. */
1492 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1495 down_read(&adapter->maps_lock);
1496 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1497 up_read(&adapter->maps_lock);
1498 if ((ret > 0) && !adapter->masked) {
1499 struct kvm_s390_interrupt s390int = {
1500 .type = KVM_S390_INT_IO(1, 0, 0, 0),
1502 .parm64 = (adapter->isc << 27) | 0x80000000,
1504 ret = kvm_s390_inject_vm(kvm, &s390int);
1511 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1512 const struct kvm_irq_routing_entry *ue)
1517 case KVM_IRQ_ROUTING_S390_ADAPTER:
1518 e->set = set_adapter_int;
1519 e->adapter.summary_addr = ue->u.adapter.summary_addr;
1520 e->adapter.ind_addr = ue->u.adapter.ind_addr;
1521 e->adapter.summary_offset = ue->u.adapter.summary_offset;
1522 e->adapter.ind_offset = ue->u.adapter.ind_offset;
1523 e->adapter.adapter_id = ue->u.adapter.adapter_id;
1533 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1534 int irq_source_id, int level, bool line_status)