2 * handling interprocessor communication
4 * Copyright IBM Corp. 2008, 2013
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
23 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
26 struct kvm_s390_local_interrupt *li;
30 li = &dst_vcpu->arch.local_int;
32 cpuflags = atomic_read(li->cpuflags);
33 if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
34 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
36 *reg &= 0xffffffff00000000UL;
37 if (cpuflags & CPUSTAT_ECALL_PEND)
38 *reg |= SIGP_STATUS_EXT_CALL_PENDING;
39 if (cpuflags & CPUSTAT_STOPPED)
40 *reg |= SIGP_STATUS_STOPPED;
41 rc = SIGP_CC_STATUS_STORED;
44 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
49 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
50 struct kvm_vcpu *dst_vcpu)
52 struct kvm_s390_irq irq = {
53 .type = KVM_S390_INT_EMERGENCY,
54 .u.emerg.code = vcpu->vcpu_id,
58 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
60 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
63 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
66 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
68 return __inject_sigp_emergency(vcpu, dst_vcpu);
71 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
72 struct kvm_vcpu *dst_vcpu,
75 const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
80 flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
81 psw = &dst_vcpu->arch.sie_block->gpsw;
82 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
83 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
85 /* Inject the emergency signal? */
86 if (!(flags & CPUSTAT_STOPPED)
87 || (psw->mask & psw_int_mask) != psw_int_mask
88 || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
89 || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
90 return __inject_sigp_emergency(vcpu, dst_vcpu);
92 *reg &= 0xffffffff00000000UL;
93 *reg |= SIGP_STATUS_INCORRECT_STATE;
94 return SIGP_CC_STATUS_STORED;
98 static int __sigp_external_call(struct kvm_vcpu *vcpu,
99 struct kvm_vcpu *dst_vcpu)
101 struct kvm_s390_irq irq = {
102 .type = KVM_S390_INT_EXTERNAL_CALL,
103 .u.extcall.code = vcpu->vcpu_id,
107 rc = kvm_s390_inject_vcpu(dst_vcpu, &irq);
109 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
112 return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
115 static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
117 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
118 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
120 spin_lock(&li->lock);
121 if (li->action_bits & ACTION_STOP_ON_STOP) {
122 /* another SIGP STOP is pending */
126 if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
127 if ((action & ACTION_STORE_ON_STOP) != 0)
131 set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
132 li->action_bits |= action;
133 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
134 kvm_s390_vcpu_wakeup(dst_vcpu);
136 spin_unlock(&li->lock);
141 static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
145 rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP);
146 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id);
151 static int __sigp_stop_and_store_status(struct kvm_vcpu *vcpu,
152 struct kvm_vcpu *dst_vcpu, u64 *reg)
156 rc = __inject_sigp_stop(dst_vcpu, ACTION_STOP_ON_STOP |
157 ACTION_STORE_ON_STOP);
158 VCPU_EVENT(vcpu, 4, "sent sigp stop and store status to cpu %x",
161 if (rc == -ESHUTDOWN) {
162 /* If the CPU has already been stopped, we still have
163 * to save the status when doing stop-and-store. This
164 * has to be done after unlocking all spinlocks. */
165 rc = kvm_s390_store_status_unloaded(dst_vcpu,
166 KVM_S390_STORE_STATUS_NOADDR);
172 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
178 switch (parameter & 0xff) {
180 rc = SIGP_CC_NOT_OPERATIONAL;
184 kvm_for_each_vcpu(i, v, vcpu->kvm) {
185 v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
186 kvm_clear_async_pf_completion_queue(v);
189 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
197 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
198 u32 address, u64 *reg)
200 struct kvm_s390_local_interrupt *li;
203 li = &dst_vcpu->arch.local_int;
206 * Make sure the new value is valid memory. We only need to check the
207 * first page, since address is 8k aligned and memory pieces are always
208 * at least 1MB aligned and have at least a size of 1MB.
210 address &= 0x7fffe000u;
211 if (kvm_is_error_gpa(vcpu->kvm, address)) {
212 *reg &= 0xffffffff00000000UL;
213 *reg |= SIGP_STATUS_INVALID_PARAMETER;
214 return SIGP_CC_STATUS_STORED;
217 spin_lock(&li->lock);
218 /* cpu must be in stopped state */
219 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
220 *reg &= 0xffffffff00000000UL;
221 *reg |= SIGP_STATUS_INCORRECT_STATE;
222 rc = SIGP_CC_STATUS_STORED;
226 li->irq.prefix.address = address;
227 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
228 kvm_s390_vcpu_wakeup(dst_vcpu);
229 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
231 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
234 spin_unlock(&li->lock);
238 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
239 struct kvm_vcpu *dst_vcpu,
245 spin_lock(&dst_vcpu->arch.local_int.lock);
246 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
247 spin_unlock(&dst_vcpu->arch.local_int.lock);
248 if (!(flags & CPUSTAT_STOPPED)) {
249 *reg &= 0xffffffff00000000UL;
250 *reg |= SIGP_STATUS_INCORRECT_STATE;
251 return SIGP_CC_STATUS_STORED;
255 rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
257 *reg &= 0xffffffff00000000UL;
258 *reg |= SIGP_STATUS_INVALID_PARAMETER;
259 rc = SIGP_CC_STATUS_STORED;
264 static int __sigp_sense_running(struct kvm_vcpu *vcpu,
265 struct kvm_vcpu *dst_vcpu, u64 *reg)
267 struct kvm_s390_local_interrupt *li;
270 li = &dst_vcpu->arch.local_int;
271 if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
273 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
276 *reg &= 0xffffffff00000000UL;
277 *reg |= SIGP_STATUS_NOT_RUNNING;
278 rc = SIGP_CC_STATUS_STORED;
281 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
282 dst_vcpu->vcpu_id, rc);
287 static int __prepare_sigp_re_start(struct kvm_vcpu *vcpu,
288 struct kvm_vcpu *dst_vcpu, u8 order_code)
290 struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
291 /* handle (RE)START in user space */
292 int rc = -EOPNOTSUPP;
294 spin_lock(&li->lock);
295 if (li->action_bits & ACTION_STOP_ON_STOP)
297 spin_unlock(&li->lock);
302 static int __prepare_sigp_cpu_reset(struct kvm_vcpu *vcpu,
303 struct kvm_vcpu *dst_vcpu, u8 order_code)
305 /* handle (INITIAL) CPU RESET in user space */
309 static int __prepare_sigp_unknown(struct kvm_vcpu *vcpu,
310 struct kvm_vcpu *dst_vcpu)
312 /* handle unknown orders in user space */
316 static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
317 u16 cpu_addr, u32 parameter, u64 *status_reg)
320 struct kvm_vcpu *dst_vcpu;
322 if (cpu_addr >= KVM_MAX_VCPUS)
323 return SIGP_CC_NOT_OPERATIONAL;
325 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
327 return SIGP_CC_NOT_OPERATIONAL;
329 switch (order_code) {
331 vcpu->stat.instruction_sigp_sense++;
332 rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
334 case SIGP_EXTERNAL_CALL:
335 vcpu->stat.instruction_sigp_external_call++;
336 rc = __sigp_external_call(vcpu, dst_vcpu);
338 case SIGP_EMERGENCY_SIGNAL:
339 vcpu->stat.instruction_sigp_emergency++;
340 rc = __sigp_emergency(vcpu, dst_vcpu);
343 vcpu->stat.instruction_sigp_stop++;
344 rc = __sigp_stop(vcpu, dst_vcpu);
346 case SIGP_STOP_AND_STORE_STATUS:
347 vcpu->stat.instruction_sigp_stop_store_status++;
348 rc = __sigp_stop_and_store_status(vcpu, dst_vcpu, status_reg);
350 case SIGP_STORE_STATUS_AT_ADDRESS:
351 vcpu->stat.instruction_sigp_store_status++;
352 rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
355 case SIGP_SET_PREFIX:
356 vcpu->stat.instruction_sigp_prefix++;
357 rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
359 case SIGP_COND_EMERGENCY_SIGNAL:
360 vcpu->stat.instruction_sigp_cond_emergency++;
361 rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
364 case SIGP_SENSE_RUNNING:
365 vcpu->stat.instruction_sigp_sense_running++;
366 rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
369 vcpu->stat.instruction_sigp_start++;
370 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
373 vcpu->stat.instruction_sigp_restart++;
374 rc = __prepare_sigp_re_start(vcpu, dst_vcpu, order_code);
376 case SIGP_INITIAL_CPU_RESET:
377 vcpu->stat.instruction_sigp_init_cpu_reset++;
378 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
381 vcpu->stat.instruction_sigp_cpu_reset++;
382 rc = __prepare_sigp_cpu_reset(vcpu, dst_vcpu, order_code);
385 vcpu->stat.instruction_sigp_unknown++;
386 rc = __prepare_sigp_unknown(vcpu, dst_vcpu);
389 if (rc == -EOPNOTSUPP)
391 "sigp order %u -> cpu %x: handled in user space",
392 order_code, dst_vcpu->vcpu_id);
397 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
399 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
400 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
402 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
406 /* sigp in userspace can exit */
407 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
408 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
410 order_code = kvm_s390_get_base_disp_rs(vcpu);
413 parameter = vcpu->run->s.regs.gprs[r1];
415 parameter = vcpu->run->s.regs.gprs[r1 + 1];
417 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
418 switch (order_code) {
419 case SIGP_SET_ARCHITECTURE:
420 vcpu->stat.instruction_sigp_arch++;
421 rc = __sigp_set_arch(vcpu, parameter);
424 rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
426 &vcpu->run->s.regs.gprs[r1]);
432 kvm_s390_set_psw_cc(vcpu, rc);
437 * Handle SIGP partial execution interception.
439 * This interception will occur at the source cpu when a source cpu sends an
440 * external call to a target cpu and the target cpu has the WAIT bit set in
441 * its cpuflags. Interception will occurr after the interrupt indicator bits at
442 * the target cpu have been set. All error cases will lead to instruction
443 * interception, therefore nothing is to be checked or prepared.
445 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
447 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
448 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
449 struct kvm_vcpu *dest_vcpu;
450 u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
452 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
454 if (order_code == SIGP_EXTERNAL_CALL) {
455 dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
456 BUG_ON(dest_vcpu == NULL);
458 kvm_s390_vcpu_wakeup(dest_vcpu);
459 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);