KVM: s390: Add architectural trace events
[cascardo/linux.git] / arch / s390 / kvm / intercept.c
1 /*
2  * in-kernel handling for sie intercepts
3  *
4  * Copyright IBM Corp. 2008, 2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13
14 #include <linux/kvm_host.h>
15 #include <linux/errno.h>
16 #include <linux/pagemap.h>
17
18 #include <asm/kvm_host.h>
19
20 #include "kvm-s390.h"
21 #include "gaccess.h"
22 #include "trace.h"
23
24 static int handle_lctlg(struct kvm_vcpu *vcpu)
25 {
26         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
27         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
28         int base2 = vcpu->arch.sie_block->ipb >> 28;
29         int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
30                         ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
31         u64 useraddr;
32         int reg, rc;
33
34         vcpu->stat.instruction_lctlg++;
35         if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
36                 return -EOPNOTSUPP;
37
38         useraddr = disp2;
39         if (base2)
40                 useraddr += vcpu->run->s.regs.gprs[base2];
41
42         if (useraddr & 7)
43                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
44
45         reg = reg1;
46
47         VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
48                    disp2);
49         trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
50
51         do {
52                 rc = get_guest_u64(vcpu, useraddr,
53                                    &vcpu->arch.sie_block->gcr[reg]);
54                 if (rc == -EFAULT) {
55                         kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
56                         break;
57                 }
58                 useraddr += 8;
59                 if (reg == reg3)
60                         break;
61                 reg = (reg + 1) % 16;
62         } while (1);
63         return 0;
64 }
65
66 static int handle_lctl(struct kvm_vcpu *vcpu)
67 {
68         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
69         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
70         int base2 = vcpu->arch.sie_block->ipb >> 28;
71         int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
72         u64 useraddr;
73         u32 val = 0;
74         int reg, rc;
75
76         vcpu->stat.instruction_lctl++;
77
78         useraddr = disp2;
79         if (base2)
80                 useraddr += vcpu->run->s.regs.gprs[base2];
81
82         if (useraddr & 3)
83                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
84
85         VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
86                    disp2);
87         trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
88
89         reg = reg1;
90         do {
91                 rc = get_guest_u32(vcpu, useraddr, &val);
92                 if (rc == -EFAULT) {
93                         kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
94                         break;
95                 }
96                 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
97                 vcpu->arch.sie_block->gcr[reg] |= val;
98                 useraddr += 4;
99                 if (reg == reg3)
100                         break;
101                 reg = (reg + 1) % 16;
102         } while (1);
103         return 0;
104 }
105
106 static intercept_handler_t instruction_handlers[256] = {
107         [0x01] = kvm_s390_handle_01,
108         [0x83] = kvm_s390_handle_diag,
109         [0xae] = kvm_s390_handle_sigp,
110         [0xb2] = kvm_s390_handle_b2,
111         [0xb7] = handle_lctl,
112         [0xe5] = kvm_s390_handle_e5,
113         [0xeb] = handle_lctlg,
114 };
115
116 static int handle_noop(struct kvm_vcpu *vcpu)
117 {
118         switch (vcpu->arch.sie_block->icptcode) {
119         case 0x0:
120                 vcpu->stat.exit_null++;
121                 break;
122         case 0x10:
123                 vcpu->stat.exit_external_request++;
124                 break;
125         case 0x14:
126                 vcpu->stat.exit_external_interrupt++;
127                 break;
128         default:
129                 break; /* nothing */
130         }
131         return 0;
132 }
133
134 static int handle_stop(struct kvm_vcpu *vcpu)
135 {
136         int rc = 0;
137
138         vcpu->stat.exit_stop_request++;
139         spin_lock_bh(&vcpu->arch.local_int.lock);
140
141         if (vcpu->arch.local_int.action_bits & ACTION_RELOADVCPU_ON_STOP) {
142                 vcpu->arch.local_int.action_bits &= ~ACTION_RELOADVCPU_ON_STOP;
143                 rc = SIE_INTERCEPT_RERUNVCPU;
144                 vcpu->run->exit_reason = KVM_EXIT_INTR;
145         }
146
147         if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
148                 atomic_set_mask(CPUSTAT_STOPPED,
149                                 &vcpu->arch.sie_block->cpuflags);
150                 vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
151                 VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
152                 rc = -EOPNOTSUPP;
153         }
154
155         if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
156                 vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
157                 /* store status must be called unlocked. Since local_int.lock
158                  * only protects local_int.* and not guest memory we can give
159                  * up the lock here */
160                 spin_unlock_bh(&vcpu->arch.local_int.lock);
161                 rc = kvm_s390_vcpu_store_status(vcpu,
162                                                 KVM_S390_STORE_STATUS_NOADDR);
163                 if (rc >= 0)
164                         rc = -EOPNOTSUPP;
165         } else
166                 spin_unlock_bh(&vcpu->arch.local_int.lock);
167         return rc;
168 }
169
170 static int handle_validity(struct kvm_vcpu *vcpu)
171 {
172         unsigned long vmaddr;
173         int viwhy = vcpu->arch.sie_block->ipb >> 16;
174         int rc;
175
176         vcpu->stat.exit_validity++;
177         trace_kvm_s390_intercept_validity(vcpu, viwhy);
178         if (viwhy == 0x37) {
179                 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
180                                     vcpu->arch.gmap);
181                 if (IS_ERR_VALUE(vmaddr)) {
182                         rc = -EOPNOTSUPP;
183                         goto out;
184                 }
185                 rc = fault_in_pages_writeable((char __user *) vmaddr,
186                          PAGE_SIZE);
187                 if (rc) {
188                         /* user will receive sigsegv, exit to user */
189                         rc = -EOPNOTSUPP;
190                         goto out;
191                 }
192                 vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
193                                     vcpu->arch.gmap);
194                 if (IS_ERR_VALUE(vmaddr)) {
195                         rc = -EOPNOTSUPP;
196                         goto out;
197                 }
198                 rc = fault_in_pages_writeable((char __user *) vmaddr,
199                          PAGE_SIZE);
200                 if (rc) {
201                         /* user will receive sigsegv, exit to user */
202                         rc = -EOPNOTSUPP;
203                         goto out;
204                 }
205         } else
206                 rc = -EOPNOTSUPP;
207
208 out:
209         if (rc)
210                 VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
211                            viwhy);
212         return rc;
213 }
214
215 static int handle_instruction(struct kvm_vcpu *vcpu)
216 {
217         intercept_handler_t handler;
218
219         vcpu->stat.exit_instruction++;
220         trace_kvm_s390_intercept_instruction(vcpu,
221                                              vcpu->arch.sie_block->ipa,
222                                              vcpu->arch.sie_block->ipb);
223         handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
224         if (handler)
225                 return handler(vcpu);
226         return -EOPNOTSUPP;
227 }
228
229 static int handle_prog(struct kvm_vcpu *vcpu)
230 {
231         vcpu->stat.exit_program_interruption++;
232         trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc);
233         return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
234 }
235
236 static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
237 {
238         int rc, rc2;
239
240         vcpu->stat.exit_instr_and_program++;
241         rc = handle_instruction(vcpu);
242         rc2 = handle_prog(vcpu);
243
244         if (rc == -EOPNOTSUPP)
245                 vcpu->arch.sie_block->icptcode = 0x04;
246         if (rc)
247                 return rc;
248         return rc2;
249 }
250
251 static const intercept_handler_t intercept_funcs[] = {
252         [0x00 >> 2] = handle_noop,
253         [0x04 >> 2] = handle_instruction,
254         [0x08 >> 2] = handle_prog,
255         [0x0C >> 2] = handle_instruction_and_prog,
256         [0x10 >> 2] = handle_noop,
257         [0x14 >> 2] = handle_noop,
258         [0x1C >> 2] = kvm_s390_handle_wait,
259         [0x20 >> 2] = handle_validity,
260         [0x28 >> 2] = handle_stop,
261 };
262
263 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
264 {
265         intercept_handler_t func;
266         u8 code = vcpu->arch.sie_block->icptcode;
267
268         if (code & 3 || (code >> 2) >= ARRAY_SIZE(intercept_funcs))
269                 return -EOPNOTSUPP;
270         func = intercept_funcs[code >> 2];
271         if (func)
272                 return func(vcpu);
273         return -EOPNOTSUPP;
274 }