660a9459ff2abc91a2fdbd336848fbf1c4ee8351
[cascardo/linux.git] / arch / s390 / kvm / sigp.c
1 /*
2  * handling interprocessor communication
3  *
4  * Copyright IBM Corp. 2008, 2013
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13  */
14
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
18 #include <asm/sigp.h>
19 #include "gaccess.h"
20 #include "kvm-s390.h"
21 #include "trace.h"
22
23 static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
24                         u64 *reg)
25 {
26         struct kvm_s390_local_interrupt *li;
27         int cpuflags;
28         int rc;
29
30         li = &dst_vcpu->arch.local_int;
31
32         cpuflags = atomic_read(li->cpuflags);
33         if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
34                 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
35         else {
36                 *reg &= 0xffffffff00000000UL;
37                 if (cpuflags & CPUSTAT_ECALL_PEND)
38                         *reg |= SIGP_STATUS_EXT_CALL_PENDING;
39                 if (cpuflags & CPUSTAT_STOPPED)
40                         *reg |= SIGP_STATUS_STOPPED;
41                 rc = SIGP_CC_STATUS_STORED;
42         }
43
44         VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", dst_vcpu->vcpu_id,
45                    rc);
46         return rc;
47 }
48
49 static int __sigp_emergency(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
50 {
51         struct kvm_s390_interrupt s390int = {
52                 .type = KVM_S390_INT_EMERGENCY,
53                 .parm = vcpu->vcpu_id,
54         };
55         int rc = 0;
56
57         rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
58         if (!rc)
59                 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x",
60                            dst_vcpu->vcpu_id);
61
62         return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
63 }
64
65 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu,
66                                         struct kvm_vcpu *dst_vcpu,
67                                         u16 asn, u64 *reg)
68 {
69         const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
70         u16 p_asn, s_asn;
71         psw_t *psw;
72         u32 flags;
73
74         flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
75         psw = &dst_vcpu->arch.sie_block->gpsw;
76         p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff;  /* Primary ASN */
77         s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff;  /* Secondary ASN */
78
79         /* Deliver the emergency signal? */
80         if (!(flags & CPUSTAT_STOPPED)
81             || (psw->mask & psw_int_mask) != psw_int_mask
82             || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
83             || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
84                 return __sigp_emergency(vcpu, dst_vcpu);
85         } else {
86                 *reg &= 0xffffffff00000000UL;
87                 *reg |= SIGP_STATUS_INCORRECT_STATE;
88                 return SIGP_CC_STATUS_STORED;
89         }
90 }
91
92 static int __sigp_external_call(struct kvm_vcpu *vcpu,
93                                 struct kvm_vcpu *dst_vcpu)
94 {
95         struct kvm_s390_interrupt s390int = {
96                 .type = KVM_S390_INT_EXTERNAL_CALL,
97                 .parm = vcpu->vcpu_id,
98         };
99         int rc;
100
101         rc = kvm_s390_inject_vcpu(dst_vcpu, &s390int);
102         if (!rc)
103                 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x",
104                            dst_vcpu->vcpu_id);
105
106         return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
107 }
108
109 static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
110 {
111         struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
112         struct kvm_s390_interrupt_info *inti;
113         int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
114
115         inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
116         if (!inti)
117                 return -ENOMEM;
118         inti->type = KVM_S390_SIGP_STOP;
119
120         spin_lock(&li->lock);
121         if (li->action_bits & ACTION_STOP_ON_STOP) {
122                 /* another SIGP STOP is pending */
123                 kfree(inti);
124                 rc = SIGP_CC_BUSY;
125                 goto out;
126         }
127         if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
128                 kfree(inti);
129                 if ((action & ACTION_STORE_ON_STOP) != 0)
130                         rc = -ESHUTDOWN;
131                 goto out;
132         }
133         list_add_tail(&inti->list, &li->list);
134         atomic_set(&li->active, 1);
135         li->action_bits |= action;
136         atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
137         kvm_s390_vcpu_wakeup(dst_vcpu);
138 out:
139         spin_unlock(&li->lock);
140
141         return rc;
142 }
143
144 static int __sigp_stop(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
145                        int action)
146 {
147         int rc;
148
149         rc = __inject_sigp_stop(dst_vcpu, action);
150         VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", dst_vcpu->vcpu_id);
151
152         if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
153                 /* If the CPU has already been stopped, we still have
154                  * to save the status when doing stop-and-store. This
155                  * has to be done after unlocking all spinlocks. */
156                 rc = kvm_s390_store_status_unloaded(dst_vcpu,
157                                                 KVM_S390_STORE_STATUS_NOADDR);
158         }
159
160         return rc;
161 }
162
163 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
164 {
165         int rc;
166         unsigned int i;
167         struct kvm_vcpu *v;
168
169         switch (parameter & 0xff) {
170         case 0:
171                 rc = SIGP_CC_NOT_OPERATIONAL;
172                 break;
173         case 1:
174         case 2:
175                 kvm_for_each_vcpu(i, v, vcpu->kvm) {
176                         v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
177                         kvm_clear_async_pf_completion_queue(v);
178                 }
179
180                 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
181                 break;
182         default:
183                 rc = -EOPNOTSUPP;
184         }
185         return rc;
186 }
187
188 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
189                              u32 address, u64 *reg)
190 {
191         struct kvm_s390_local_interrupt *li;
192         struct kvm_s390_interrupt_info *inti;
193         int rc;
194
195         li = &dst_vcpu->arch.local_int;
196
197         /*
198          * Make sure the new value is valid memory. We only need to check the
199          * first page, since address is 8k aligned and memory pieces are always
200          * at least 1MB aligned and have at least a size of 1MB.
201          */
202         address &= 0x7fffe000u;
203         if (kvm_is_error_gpa(vcpu->kvm, address)) {
204                 *reg &= 0xffffffff00000000UL;
205                 *reg |= SIGP_STATUS_INVALID_PARAMETER;
206                 return SIGP_CC_STATUS_STORED;
207         }
208
209         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
210         if (!inti)
211                 return SIGP_CC_BUSY;
212
213         spin_lock(&li->lock);
214         /* cpu must be in stopped state */
215         if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
216                 *reg &= 0xffffffff00000000UL;
217                 *reg |= SIGP_STATUS_INCORRECT_STATE;
218                 rc = SIGP_CC_STATUS_STORED;
219                 kfree(inti);
220                 goto out_li;
221         }
222
223         inti->type = KVM_S390_SIGP_SET_PREFIX;
224         inti->prefix.address = address;
225
226         list_add_tail(&inti->list, &li->list);
227         atomic_set(&li->active, 1);
228         kvm_s390_vcpu_wakeup(dst_vcpu);
229         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
230
231         VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", dst_vcpu->vcpu_id,
232                    address);
233 out_li:
234         spin_unlock(&li->lock);
235         return rc;
236 }
237
238 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
239                                        struct kvm_vcpu *dst_vcpu,
240                                        u32 addr, u64 *reg)
241 {
242         int flags;
243         int rc;
244
245         spin_lock(&dst_vcpu->arch.local_int.lock);
246         flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
247         spin_unlock(&dst_vcpu->arch.local_int.lock);
248         if (!(flags & CPUSTAT_STOPPED)) {
249                 *reg &= 0xffffffff00000000UL;
250                 *reg |= SIGP_STATUS_INCORRECT_STATE;
251                 return SIGP_CC_STATUS_STORED;
252         }
253
254         addr &= 0x7ffffe00;
255         rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
256         if (rc == -EFAULT) {
257                 *reg &= 0xffffffff00000000UL;
258                 *reg |= SIGP_STATUS_INVALID_PARAMETER;
259                 rc = SIGP_CC_STATUS_STORED;
260         }
261         return rc;
262 }
263
264 static int __sigp_sense_running(struct kvm_vcpu *vcpu,
265                                 struct kvm_vcpu *dst_vcpu, u64 *reg)
266 {
267         struct kvm_s390_local_interrupt *li;
268         int rc;
269
270         li = &dst_vcpu->arch.local_int;
271         if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
272                 /* running */
273                 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
274         } else {
275                 /* not running */
276                 *reg &= 0xffffffff00000000UL;
277                 *reg |= SIGP_STATUS_NOT_RUNNING;
278                 rc = SIGP_CC_STATUS_STORED;
279         }
280
281         VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x",
282                    dst_vcpu->vcpu_id, rc);
283
284         return rc;
285 }
286
287 /* Test whether the destination CPU is available and not busy */
288 static int sigp_check_callable(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu)
289 {
290         struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
291         int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
292
293         spin_lock(&li->lock);
294         if (li->action_bits & ACTION_STOP_ON_STOP)
295                 rc = SIGP_CC_BUSY;
296         spin_unlock(&li->lock);
297
298         return rc;
299 }
300
301 static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
302                            u16 cpu_addr, u32 parameter, u64 *status_reg)
303 {
304         int rc;
305         struct kvm_vcpu *dst_vcpu;
306
307         if (cpu_addr >= KVM_MAX_VCPUS)
308                 return SIGP_CC_NOT_OPERATIONAL;
309
310         dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
311         if (!dst_vcpu)
312                 return SIGP_CC_NOT_OPERATIONAL;
313
314         switch (order_code) {
315         case SIGP_SENSE:
316                 vcpu->stat.instruction_sigp_sense++;
317                 rc = __sigp_sense(vcpu, dst_vcpu, status_reg);
318                 break;
319         case SIGP_EXTERNAL_CALL:
320                 vcpu->stat.instruction_sigp_external_call++;
321                 rc = __sigp_external_call(vcpu, dst_vcpu);
322                 break;
323         case SIGP_EMERGENCY_SIGNAL:
324                 vcpu->stat.instruction_sigp_emergency++;
325                 rc = __sigp_emergency(vcpu, dst_vcpu);
326                 break;
327         case SIGP_STOP:
328                 vcpu->stat.instruction_sigp_stop++;
329                 rc = __sigp_stop(vcpu, dst_vcpu, ACTION_STOP_ON_STOP);
330                 break;
331         case SIGP_STOP_AND_STORE_STATUS:
332                 vcpu->stat.instruction_sigp_stop++;
333                 rc = __sigp_stop(vcpu, dst_vcpu, ACTION_STORE_ON_STOP |
334                                                  ACTION_STOP_ON_STOP);
335                 break;
336         case SIGP_STORE_STATUS_AT_ADDRESS:
337                 rc = __sigp_store_status_at_addr(vcpu, dst_vcpu, parameter,
338                                                  status_reg);
339                 break;
340         case SIGP_SET_PREFIX:
341                 vcpu->stat.instruction_sigp_prefix++;
342                 rc = __sigp_set_prefix(vcpu, dst_vcpu, parameter, status_reg);
343                 break;
344         case SIGP_COND_EMERGENCY_SIGNAL:
345                 rc = __sigp_conditional_emergency(vcpu, dst_vcpu, parameter,
346                                                   status_reg);
347                 break;
348         case SIGP_SENSE_RUNNING:
349                 vcpu->stat.instruction_sigp_sense_running++;
350                 rc = __sigp_sense_running(vcpu, dst_vcpu, status_reg);
351                 break;
352         case SIGP_START:
353                 rc = sigp_check_callable(vcpu, dst_vcpu);
354                 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
355                         rc = -EOPNOTSUPP;    /* Handle START in user space */
356                 break;
357         case SIGP_RESTART:
358                 vcpu->stat.instruction_sigp_restart++;
359                 rc = sigp_check_callable(vcpu, dst_vcpu);
360                 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
361                         VCPU_EVENT(vcpu, 4,
362                                    "sigp restart %x to handle userspace",
363                                    cpu_addr);
364                         /* user space must know about restart */
365                         rc = -EOPNOTSUPP;
366                 }
367                 break;
368         default:
369                 rc = -EOPNOTSUPP;
370         }
371
372         return rc;
373 }
374
375 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
376 {
377         int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
378         int r3 = vcpu->arch.sie_block->ipa & 0x000f;
379         u32 parameter;
380         u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
381         u8 order_code;
382         int rc;
383
384         /* sigp in userspace can exit */
385         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
386                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
387
388         order_code = kvm_s390_get_base_disp_rs(vcpu);
389
390         if (r1 % 2)
391                 parameter = vcpu->run->s.regs.gprs[r1];
392         else
393                 parameter = vcpu->run->s.regs.gprs[r1 + 1];
394
395         trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
396         switch (order_code) {
397         case SIGP_SET_ARCHITECTURE:
398                 vcpu->stat.instruction_sigp_arch++;
399                 rc = __sigp_set_arch(vcpu, parameter);
400                 break;
401         default:
402                 rc = handle_sigp_dst(vcpu, order_code, cpu_addr,
403                                      parameter,
404                                      &vcpu->run->s.regs.gprs[r1]);
405         }
406
407         if (rc < 0)
408                 return rc;
409
410         kvm_s390_set_psw_cc(vcpu, rc);
411         return 0;
412 }
413
414 /*
415  * Handle SIGP partial execution interception.
416  *
417  * This interception will occur at the source cpu when a source cpu sends an
418  * external call to a target cpu and the target cpu has the WAIT bit set in
419  * its cpuflags. Interception will occurr after the interrupt indicator bits at
420  * the target cpu have been set. All error cases will lead to instruction
421  * interception, therefore nothing is to be checked or prepared.
422  */
423 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
424 {
425         int r3 = vcpu->arch.sie_block->ipa & 0x000f;
426         u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
427         struct kvm_vcpu *dest_vcpu;
428         u8 order_code = kvm_s390_get_base_disp_rs(vcpu);
429
430         trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
431
432         if (order_code == SIGP_EXTERNAL_CALL) {
433                 dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
434                 BUG_ON(dest_vcpu == NULL);
435
436                 kvm_s390_vcpu_wakeup(dest_vcpu);
437                 kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
438                 return 0;
439         }
440
441         return -EOPNOTSUPP;
442 }