2 * handling privileged instructions
4 * Copyright IBM Corp. 2008, 2013
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/facility.h>
20 #include <asm/current.h>
21 #include <asm/debug.h>
22 #include <asm/ebcdic.h>
23 #include <asm/sysinfo.h>
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
27 #include <asm/ptrace.h>
28 #include <asm/compat.h>
33 /* Handle SCK (SET CLOCK) interception */
34 static int handle_set_clock(struct kvm_vcpu *vcpu)
36 struct kvm_vcpu *cpup;
41 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
42 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
44 op2 = kvm_s390_get_base_disp_s(vcpu);
45 if (op2 & 7) /* Operand must be on a doubleword boundary */
46 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
47 rc = read_guest(vcpu, op2, &val, sizeof(val));
49 return kvm_s390_inject_prog_cond(vcpu, rc);
51 if (store_tod_clock(&hostclk)) {
52 kvm_s390_set_psw_cc(vcpu, 3);
55 val = (val - hostclk) & ~0x3fUL;
57 mutex_lock(&vcpu->kvm->lock);
58 kvm_for_each_vcpu(i, cpup, vcpu->kvm)
59 cpup->arch.sie_block->epoch = val;
60 mutex_unlock(&vcpu->kvm->lock);
62 kvm_s390_set_psw_cc(vcpu, 0);
66 static int handle_set_prefix(struct kvm_vcpu *vcpu)
72 vcpu->stat.instruction_spx++;
74 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
75 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
77 operand2 = kvm_s390_get_base_disp_s(vcpu);
79 /* must be word boundary */
81 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
84 rc = read_guest(vcpu, operand2, &address, sizeof(address));
86 return kvm_s390_inject_prog_cond(vcpu, rc);
88 address &= 0x7fffe000u;
91 * Make sure the new value is valid memory. We only need to check the
92 * first page, since address is 8k aligned and memory pieces are always
93 * at least 1MB aligned and have at least a size of 1MB.
95 if (kvm_is_error_gpa(vcpu->kvm, address))
96 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
98 kvm_s390_set_prefix(vcpu, address);
100 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
101 trace_kvm_s390_handle_prefix(vcpu, 1, address);
105 static int handle_store_prefix(struct kvm_vcpu *vcpu)
111 vcpu->stat.instruction_stpx++;
113 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
114 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
116 operand2 = kvm_s390_get_base_disp_s(vcpu);
118 /* must be word boundary */
120 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
122 address = kvm_s390_get_prefix(vcpu);
125 rc = write_guest(vcpu, operand2, &address, sizeof(address));
127 return kvm_s390_inject_prog_cond(vcpu, rc);
129 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
130 trace_kvm_s390_handle_prefix(vcpu, 0, address);
134 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
136 u16 vcpu_id = vcpu->vcpu_id;
140 vcpu->stat.instruction_stap++;
142 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
143 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
145 ga = kvm_s390_get_base_disp_s(vcpu);
148 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
150 rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id));
152 return kvm_s390_inject_prog_cond(vcpu, rc);
154 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga);
155 trace_kvm_s390_handle_stap(vcpu, ga);
159 static void __skey_check_enable(struct kvm_vcpu *vcpu)
161 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
165 trace_kvm_s390_skey_related_inst(vcpu);
166 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
170 static int handle_skey(struct kvm_vcpu *vcpu)
172 __skey_check_enable(vcpu);
174 vcpu->stat.instruction_storage_key++;
176 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
177 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
179 kvm_s390_rewind_psw(vcpu, 4);
180 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
184 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
186 vcpu->stat.instruction_ipte_interlock++;
187 if (psw_bits(vcpu->arch.sie_block->gpsw).p)
188 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
189 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
190 kvm_s390_rewind_psw(vcpu, 4);
191 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
195 static int handle_test_block(struct kvm_vcpu *vcpu)
200 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
201 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
203 kvm_s390_get_regs_rre(vcpu, NULL, ®2);
204 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
205 addr = kvm_s390_logical_to_effective(vcpu, addr);
206 if (kvm_s390_check_low_addr_protection(vcpu, addr))
207 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
208 addr = kvm_s390_real_to_abs(vcpu, addr);
210 if (kvm_is_error_gpa(vcpu->kvm, addr))
211 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
213 * We don't expect errors on modern systems, and do not care
214 * about storage keys (yet), so let's just clear the page.
216 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
218 kvm_s390_set_psw_cc(vcpu, 0);
219 vcpu->run->s.regs.gprs[0] = 0;
223 static int handle_tpi(struct kvm_vcpu *vcpu)
225 struct kvm_s390_interrupt_info *inti;
232 addr = kvm_s390_get_base_disp_s(vcpu);
234 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
236 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
240 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
241 tpi_data[1] = inti->io.io_int_parm;
242 tpi_data[2] = inti->io.io_int_word;
245 * Store the two-word I/O interruption code into the
248 len = sizeof(tpi_data) - 4;
249 rc = write_guest(vcpu, addr, &tpi_data, len);
251 return kvm_s390_inject_prog_cond(vcpu, rc);
254 * Store the three-word I/O interruption code into
255 * the appropriate lowcore area.
257 len = sizeof(tpi_data);
258 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
262 * If we encounter a problem storing the interruption code, the
263 * instruction is suppressed from the guest's view: reinject the
269 kvm_s390_reinject_io_int(vcpu->kvm, inti);
271 /* Set condition code and we're done. */
273 kvm_s390_set_psw_cc(vcpu, cc);
274 return rc ? -EFAULT : 0;
277 static int handle_tsch(struct kvm_vcpu *vcpu)
279 struct kvm_s390_interrupt_info *inti;
281 inti = kvm_s390_get_io_int(vcpu->kvm, 0,
282 vcpu->run->s.regs.gprs[1]);
285 * Prepare exit to userspace.
286 * We indicate whether we dequeued a pending I/O interrupt
287 * so that userspace can re-inject it if the instruction gets
288 * a program check. While this may re-order the pending I/O
289 * interrupts, this is no problem since the priority is kept
292 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
293 vcpu->run->s390_tsch.dequeued = !!inti;
295 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
296 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
297 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
298 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
300 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
305 static int handle_io_inst(struct kvm_vcpu *vcpu)
307 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
309 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
310 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
312 if (vcpu->kvm->arch.css_support) {
314 * Most I/O instructions will be handled by userspace.
315 * Exceptions are tpi and the interrupt portion of tsch.
317 if (vcpu->arch.sie_block->ipa == 0xb236)
318 return handle_tpi(vcpu);
319 if (vcpu->arch.sie_block->ipa == 0xb235)
320 return handle_tsch(vcpu);
321 /* Handle in userspace. */
325 * Set condition code 3 to stop the guest from issuing channel
328 kvm_s390_set_psw_cc(vcpu, 3);
333 static int handle_stfl(struct kvm_vcpu *vcpu)
337 vcpu->stat.instruction_stfl++;
339 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
340 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
342 rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
346 VCPU_EVENT(vcpu, 5, "store facility list value %x",
347 *(unsigned int *) vfacilities);
348 trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
352 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
353 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
354 #define PSW_ADDR_24 0x0000000000ffffffUL
355 #define PSW_ADDR_31 0x000000007fffffffUL
357 int is_valid_psw(psw_t *psw)
359 if (psw->mask & PSW_MASK_UNASSIGNED)
361 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
362 if (psw->addr & ~PSW_ADDR_31)
365 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
367 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
374 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
376 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
377 psw_compat_t new_psw;
381 if (gpsw->mask & PSW_MASK_PSTATE)
382 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
384 addr = kvm_s390_get_base_disp_s(vcpu);
386 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
388 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
390 return kvm_s390_inject_prog_cond(vcpu, rc);
391 if (!(new_psw.mask & PSW32_MASK_BASE))
392 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
393 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
394 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
395 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
396 if (!is_valid_psw(gpsw))
397 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
401 static int handle_lpswe(struct kvm_vcpu *vcpu)
407 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
408 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
410 addr = kvm_s390_get_base_disp_s(vcpu);
412 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
413 rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
415 return kvm_s390_inject_prog_cond(vcpu, rc);
416 vcpu->arch.sie_block->gpsw = new_psw;
417 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
418 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
422 static int handle_stidp(struct kvm_vcpu *vcpu)
424 u64 stidp_data = vcpu->arch.stidp_data;
428 vcpu->stat.instruction_stidp++;
430 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
431 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
433 operand2 = kvm_s390_get_base_disp_s(vcpu);
436 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
438 rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data));
440 return kvm_s390_inject_prog_cond(vcpu, rc);
442 VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
446 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
451 cpus = atomic_read(&vcpu->kvm->online_vcpus);
453 /* deal with other level 3 hypervisors */
454 if (stsi(mem, 3, 2, 2))
458 for (n = mem->count - 1; n > 0 ; n--)
459 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
461 mem->vm[0].cpus_total = cpus;
462 mem->vm[0].cpus_configured = cpus;
463 mem->vm[0].cpus_standby = 0;
464 mem->vm[0].cpus_reserved = 0;
465 mem->vm[0].caf = 1000;
466 memcpy(mem->vm[0].name, "KVMguest", 8);
467 ASCEBC(mem->vm[0].name, 8);
468 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
469 ASCEBC(mem->vm[0].cpi, 16);
472 static int handle_stsi(struct kvm_vcpu *vcpu)
474 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
475 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
476 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
477 unsigned long mem = 0;
481 vcpu->stat.instruction_stsi++;
482 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
484 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
485 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
488 kvm_s390_set_psw_cc(vcpu, 3);
492 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
493 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
494 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
497 vcpu->run->s.regs.gprs[0] = 3 << 28;
498 kvm_s390_set_psw_cc(vcpu, 0);
502 operand2 = kvm_s390_get_base_disp_s(vcpu);
504 if (operand2 & 0xfff)
505 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
508 case 1: /* same handling for 1 and 2 */
510 mem = get_zeroed_page(GFP_KERNEL);
513 if (stsi((void *) mem, fc, sel1, sel2))
517 if (sel1 != 2 || sel2 != 2)
519 mem = get_zeroed_page(GFP_KERNEL);
522 handle_stsi_3_2_2(vcpu, (void *) mem);
526 rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE);
528 rc = kvm_s390_inject_prog_cond(vcpu, rc);
531 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
533 kvm_s390_set_psw_cc(vcpu, 0);
534 vcpu->run->s.regs.gprs[0] = 0;
537 kvm_s390_set_psw_cc(vcpu, 3);
543 static const intercept_handler_t b2_handlers[256] = {
544 [0x02] = handle_stidp,
545 [0x04] = handle_set_clock,
546 [0x10] = handle_set_prefix,
547 [0x11] = handle_store_prefix,
548 [0x12] = handle_store_cpu_address,
549 [0x21] = handle_ipte_interlock,
550 [0x29] = handle_skey,
551 [0x2a] = handle_skey,
552 [0x2b] = handle_skey,
553 [0x2c] = handle_test_block,
554 [0x30] = handle_io_inst,
555 [0x31] = handle_io_inst,
556 [0x32] = handle_io_inst,
557 [0x33] = handle_io_inst,
558 [0x34] = handle_io_inst,
559 [0x35] = handle_io_inst,
560 [0x36] = handle_io_inst,
561 [0x37] = handle_io_inst,
562 [0x38] = handle_io_inst,
563 [0x39] = handle_io_inst,
564 [0x3a] = handle_io_inst,
565 [0x3b] = handle_io_inst,
566 [0x3c] = handle_io_inst,
567 [0x50] = handle_ipte_interlock,
568 [0x5f] = handle_io_inst,
569 [0x74] = handle_io_inst,
570 [0x76] = handle_io_inst,
571 [0x7d] = handle_stsi,
572 [0xb1] = handle_stfl,
573 [0xb2] = handle_lpswe,
576 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
578 intercept_handler_t handler;
581 * A lot of B2 instructions are priviledged. Here we check for
582 * the privileged ones, that we can handle in the kernel.
583 * Anything else goes to userspace.
585 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
587 return handler(vcpu);
592 static int handle_epsw(struct kvm_vcpu *vcpu)
596 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
598 /* This basically extracts the mask half of the psw. */
599 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
600 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
602 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
603 vcpu->run->s.regs.gprs[reg2] |=
604 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
609 #define PFMF_RESERVED 0xfffc0101UL
610 #define PFMF_SK 0x00020000UL
611 #define PFMF_CF 0x00010000UL
612 #define PFMF_UI 0x00008000UL
613 #define PFMF_FSC 0x00007000UL
614 #define PFMF_NQ 0x00000800UL
615 #define PFMF_MR 0x00000400UL
616 #define PFMF_MC 0x00000200UL
617 #define PFMF_KEY 0x000000feUL
619 static int handle_pfmf(struct kvm_vcpu *vcpu)
622 unsigned long start, end;
624 vcpu->stat.instruction_pfmf++;
626 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
628 if (!MACHINE_HAS_PFMF)
629 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
631 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
632 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
634 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
635 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
637 /* Only provide non-quiescing support if the host supports it */
638 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
639 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
641 /* No support for conditional-SSKE */
642 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
643 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
645 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
646 start = kvm_s390_logical_to_effective(vcpu, start);
648 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
650 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
653 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
655 /* We dont support EDAT2
657 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
660 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
663 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
664 if (kvm_s390_check_low_addr_protection(vcpu, start))
665 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
668 while (start < end) {
669 unsigned long useraddr, abs_addr;
671 /* Translate guest address to host address */
672 if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0)
673 abs_addr = kvm_s390_real_to_abs(vcpu, start);
676 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr));
677 if (kvm_is_error_hva(useraddr))
678 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
680 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
681 if (clear_user((void __user *)useraddr, PAGE_SIZE))
682 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
685 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
686 __skey_check_enable(vcpu);
687 if (set_guest_storage_key(current->mm, useraddr,
688 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
689 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
690 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
695 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
696 vcpu->run->s.regs.gprs[reg2] = end;
700 static int handle_essa(struct kvm_vcpu *vcpu)
702 /* entries expected to be 1FF */
703 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
704 unsigned long *cbrlo, cbrle;
708 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
709 gmap = vcpu->arch.gmap;
710 vcpu->stat.instruction_essa++;
711 if (!kvm_s390_cmma_enabled(vcpu->kvm))
712 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
714 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
715 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
717 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
718 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
720 /* Rewind PSW to repeat the ESSA instruction */
721 kvm_s390_rewind_psw(vcpu, 4);
722 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
723 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
724 down_read(&gmap->mm->mmap_sem);
725 for (i = 0; i < entries; ++i) {
727 if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
730 /* try to free backing */
731 __gmap_zap(gmap, cbrle);
733 up_read(&gmap->mm->mmap_sem);
735 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
739 static const intercept_handler_t b9_handlers[256] = {
740 [0x8a] = handle_ipte_interlock,
741 [0x8d] = handle_epsw,
742 [0x8e] = handle_ipte_interlock,
743 [0x8f] = handle_ipte_interlock,
744 [0xab] = handle_essa,
745 [0xaf] = handle_pfmf,
748 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
750 intercept_handler_t handler;
752 /* This is handled just as for the B2 instructions. */
753 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
755 return handler(vcpu);
760 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
762 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
763 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
764 int reg, rc, nr_regs;
768 vcpu->stat.instruction_lctl++;
770 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
771 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
773 ga = kvm_s390_get_base_disp_rs(vcpu);
776 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
778 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
779 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
781 nr_regs = ((reg3 - reg1) & 0xf) + 1;
782 rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
784 return kvm_s390_inject_prog_cond(vcpu, rc);
788 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
789 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
792 reg = (reg + 1) % 16;
794 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
798 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
800 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
801 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
802 int reg, rc, nr_regs;
806 vcpu->stat.instruction_stctl++;
808 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
809 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
811 ga = kvm_s390_get_base_disp_rs(vcpu);
814 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
816 VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
817 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
822 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
825 reg = (reg + 1) % 16;
827 rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32));
828 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
831 static int handle_lctlg(struct kvm_vcpu *vcpu)
833 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
834 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
835 int reg, rc, nr_regs;
839 vcpu->stat.instruction_lctlg++;
841 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
842 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
844 ga = kvm_s390_get_base_disp_rsy(vcpu);
847 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
849 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
850 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
852 nr_regs = ((reg3 - reg1) & 0xf) + 1;
853 rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
855 return kvm_s390_inject_prog_cond(vcpu, rc);
859 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
862 reg = (reg + 1) % 16;
864 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
868 static int handle_stctg(struct kvm_vcpu *vcpu)
870 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
871 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
872 int reg, rc, nr_regs;
876 vcpu->stat.instruction_stctg++;
878 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
879 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
881 ga = kvm_s390_get_base_disp_rsy(vcpu);
884 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
886 VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
887 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
892 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
895 reg = (reg + 1) % 16;
897 rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64));
898 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
901 static const intercept_handler_t eb_handlers[256] = {
902 [0x2f] = handle_lctlg,
903 [0x25] = handle_stctg,
906 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
908 intercept_handler_t handler;
910 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
912 return handler(vcpu);
916 static int handle_tprot(struct kvm_vcpu *vcpu)
918 u64 address1, address2;
919 unsigned long hva, gpa;
923 vcpu->stat.instruction_tprot++;
925 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
926 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
928 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
930 /* we only handle the Linux memory detection case:
932 * everything else goes to userspace. */
935 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
937 ret = guest_translate_address(vcpu, address1, &gpa, 1);
938 if (ret == PGM_PROTECTION) {
939 /* Write protected? Try again with read-only... */
941 ret = guest_translate_address(vcpu, address1, &gpa, 0);
944 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
945 ret = kvm_s390_inject_program_int(vcpu, ret);
946 } else if (ret > 0) {
947 /* Translation not available */
948 kvm_s390_set_psw_cc(vcpu, 3);
954 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
955 if (kvm_is_error_hva(hva)) {
956 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
959 cc = 1; /* Write not permitted ==> read-only */
960 kvm_s390_set_psw_cc(vcpu, cc);
961 /* Note: CC2 only occurs for storage keys (not supported yet) */
964 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
969 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
971 /* For e5xx... instructions we only handle TPROT */
972 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
973 return handle_tprot(vcpu);
977 static int handle_sckpf(struct kvm_vcpu *vcpu)
981 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
982 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
984 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
985 return kvm_s390_inject_program_int(vcpu,
988 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
989 vcpu->arch.sie_block->todpr = value;
994 static const intercept_handler_t x01_handlers[256] = {
995 [0x07] = handle_sckpf,
998 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1000 intercept_handler_t handler;
1002 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
1004 return handler(vcpu);