2 * in-kernel handling for sie intercepts
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm_host.h>
15 #include <linux/errno.h>
16 #include <linux/pagemap.h>
18 #include <asm/kvm_host.h>
24 static int handle_lctlg(struct kvm_vcpu *vcpu)
26 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
27 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
28 int base2 = vcpu->arch.sie_block->ipb >> 28;
29 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
30 ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
34 vcpu->stat.instruction_lctlg++;
35 if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
40 useraddr += vcpu->run->s.regs.gprs[base2];
43 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
47 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
49 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
52 rc = get_guest_u64(vcpu, useraddr,
53 &vcpu->arch.sie_block->gcr[reg]);
55 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
66 static int handle_lctl(struct kvm_vcpu *vcpu)
68 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
69 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
70 int base2 = vcpu->arch.sie_block->ipb >> 28;
71 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
76 vcpu->stat.instruction_lctl++;
80 useraddr += vcpu->run->s.regs.gprs[base2];
83 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
85 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
87 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
91 rc = get_guest_u32(vcpu, useraddr, &val);
93 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
96 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
97 vcpu->arch.sie_block->gcr[reg] |= val;
101 reg = (reg + 1) % 16;
106 static intercept_handler_t instruction_handlers[256] = {
107 [0x01] = kvm_s390_handle_01,
108 [0x83] = kvm_s390_handle_diag,
109 [0xae] = kvm_s390_handle_sigp,
110 [0xb2] = kvm_s390_handle_b2,
111 [0xb7] = handle_lctl,
112 [0xe5] = kvm_s390_handle_e5,
113 [0xeb] = handle_lctlg,
116 static int handle_noop(struct kvm_vcpu *vcpu)
118 switch (vcpu->arch.sie_block->icptcode) {
120 vcpu->stat.exit_null++;
123 vcpu->stat.exit_external_request++;
126 vcpu->stat.exit_external_interrupt++;
134 static int handle_stop(struct kvm_vcpu *vcpu)
138 vcpu->stat.exit_stop_request++;
139 spin_lock_bh(&vcpu->arch.local_int.lock);
141 if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
142 vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
143 rc = SIE_INTERCEPT_RERUNVCPU;
144 vcpu->run->exit_reason = KVM_EXIT_INTR;
147 if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
148 atomic_set_mask(CPUSTAT_STOPPED,
149 &vcpu->arch.sie_block->cpuflags);
150 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
151 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
155 if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
156 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
157 /* store status must be called unlocked. Since local_int.lock
158 * only protects local_int.* and not guest memory we can give
159 * up the lock here */
160 spin_unlock_bh(&vcpu->arch.local_int.lock);
161 rc = kvm_s390_vcpu_store_status(vcpu,
162 KVM_S390_STORE_STATUS_NOADDR);
166 spin_unlock_bh(&vcpu->arch.local_int.lock);
170 static int handle_validity(struct kvm_vcpu *vcpu)
172 unsigned long vmaddr;
173 int viwhy = vcpu->arch.sie_block->ipb >> 16;
176 vcpu->stat.exit_validity++;
177 trace_kvm_s390_intercept_validity(vcpu, viwhy);
179 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
181 if (IS_ERR_VALUE(vmaddr)) {
185 rc = fault_in_pages_writeable((char __user *) vmaddr,
188 /* user will receive sigsegv, exit to user */
192 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
194 if (IS_ERR_VALUE(vmaddr)) {
198 rc = fault_in_pages_writeable((char __user *) vmaddr,
201 /* user will receive sigsegv, exit to user */
210 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
215 static int handle_instruction(struct kvm_vcpu *vcpu)
217 intercept_handler_t handler;
219 vcpu->stat.exit_instruction++;
220 trace_kvm_s390_intercept_instruction(vcpu,
221 vcpu->arch.sie_block->ipa,
222 vcpu->arch.sie_block->ipb);
223 handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
225 return handler(vcpu);
229 static int handle_prog(struct kvm_vcpu *vcpu)
231 vcpu->stat.exit_program_interruption++;
232 trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
233 return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
236 static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
240 vcpu->stat.exit_instr_and_program++;
241 rc = handle_instruction(vcpu);
242 rc2 = handle_prog(vcpu);
244 if (rc == -EOPNOTSUPP)
245 vcpu->arch.sie_block->icptcode = 0x04;
251 static const intercept_handler_t intercept_funcs[] = {
252 [0x00 >> 2] = handle_noop,
253 [0x04 >> 2] = handle_instruction,
254 [0x08 >> 2] = handle_prog,
255 [0x0C >> 2] = handle_instruction_and_prog,
256 [0x10 >> 2] = handle_noop,
257 [0x14 >> 2] = handle_noop,
258 [0x1C >> 2] = kvm_s390_handle_wait,
259 [0x20 >> 2] = handle_validity,
260 [0x28 >> 2] = handle_stop,
263 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
265 intercept_handler_t func;
266 u8 code = vcpu->arch.sie_block->icptcode;
268 if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
270 func = intercept_funcs[code >> 2];