geneve: avoid using stale geneve socket.
[cascardo/linux.git] / arch / mips / kvm / trap_emul.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/vmalloc.h>
16
17 #include <linux/kvm_host.h>
18
19 #include "interrupt.h"
20
21 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
22 {
23         gpa_t gpa;
24         gva_t kseg = KSEGX(gva);
25
26         if ((kseg == CKSEG0) || (kseg == CKSEG1))
27                 gpa = CPHYSADDR(gva);
28         else {
29                 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
30                 kvm_mips_dump_host_tlbs();
31                 gpa = KVM_INVALID_ADDR;
32         }
33
34         kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
35
36         return gpa;
37 }
38
39 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
40 {
41         struct mips_coproc *cop0 = vcpu->arch.cop0;
42         struct kvm_run *run = vcpu->run;
43         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
44         u32 cause = vcpu->arch.host_cp0_cause;
45         enum emulation_result er = EMULATE_DONE;
46         int ret = RESUME_GUEST;
47
48         if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
49                 /* FPU Unusable */
50                 if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
51                     (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
52                         /*
53                          * Unusable/no FPU in guest:
54                          * deliver guest COP1 Unusable Exception
55                          */
56                         er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
57                 } else {
58                         /* Restore FPU state */
59                         kvm_own_fpu(vcpu);
60                         er = EMULATE_DONE;
61                 }
62         } else {
63                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
64         }
65
66         switch (er) {
67         case EMULATE_DONE:
68                 ret = RESUME_GUEST;
69                 break;
70
71         case EMULATE_FAIL:
72                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
73                 ret = RESUME_HOST;
74                 break;
75
76         case EMULATE_WAIT:
77                 run->exit_reason = KVM_EXIT_INTR;
78                 ret = RESUME_HOST;
79                 break;
80
81         default:
82                 BUG();
83         }
84         return ret;
85 }
86
87 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
88 {
89         struct kvm_run *run = vcpu->run;
90         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
91         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
92         u32 cause = vcpu->arch.host_cp0_cause;
93         enum emulation_result er = EMULATE_DONE;
94         int ret = RESUME_GUEST;
95
96         if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
97             || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
98                 kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
99                           cause, opc, badvaddr);
100                 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
101
102                 if (er == EMULATE_DONE)
103                         ret = RESUME_GUEST;
104                 else {
105                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
106                         ret = RESUME_HOST;
107                 }
108         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
109                 /*
110                  * XXXKYMA: The guest kernel does not expect to get this fault
111                  * when we are not using HIGHMEM. Need to address this in a
112                  * HIGHMEM kernel
113                  */
114                 kvm_err("TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx\n",
115                         cause, opc, badvaddr);
116                 kvm_mips_dump_host_tlbs();
117                 kvm_arch_vcpu_dump_regs(vcpu);
118                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
119                 ret = RESUME_HOST;
120         } else {
121                 kvm_err("Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
122                         cause, opc, badvaddr);
123                 kvm_mips_dump_host_tlbs();
124                 kvm_arch_vcpu_dump_regs(vcpu);
125                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
126                 ret = RESUME_HOST;
127         }
128         return ret;
129 }
130
131 static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
132 {
133         struct kvm_run *run = vcpu->run;
134         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
135         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
136         u32 cause = vcpu->arch.host_cp0_cause;
137         enum emulation_result er = EMULATE_DONE;
138         int ret = RESUME_GUEST;
139
140         if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
141             && KVM_GUEST_KERNEL_MODE(vcpu)) {
142                 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
143                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
144                         ret = RESUME_HOST;
145                 }
146         } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
147                    || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
148                 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
149                           store ? "ST" : "LD", cause, opc, badvaddr);
150
151                 /*
152                  * User Address (UA) fault, this could happen if
153                  * (1) TLB entry not present/valid in both Guest and shadow host
154                  *     TLBs, in this case we pass on the fault to the guest
155                  *     kernel and let it handle it.
156                  * (2) TLB entry is present in the Guest TLB but not in the
157                  *     shadow, in this case we inject the TLB from the Guest TLB
158                  *     into the shadow host TLB
159                  */
160
161                 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
162                 if (er == EMULATE_DONE)
163                         ret = RESUME_GUEST;
164                 else {
165                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
166                         ret = RESUME_HOST;
167                 }
168         } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
169                 /*
170                  * All KSEG0 faults are handled by KVM, as the guest kernel does
171                  * not expect to ever get them
172                  */
173                 if (kvm_mips_handle_kseg0_tlb_fault
174                     (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
175                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
176                         ret = RESUME_HOST;
177                 }
178         } else if (KVM_GUEST_KERNEL_MODE(vcpu)
179                    && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
180                 /*
181                  * With EVA we may get a TLB exception instead of an address
182                  * error when the guest performs MMIO to KSeg1 addresses.
183                  */
184                 kvm_debug("Emulate %s MMIO space\n",
185                           store ? "Store to" : "Load from");
186                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
187                 if (er == EMULATE_FAIL) {
188                         kvm_err("Emulate %s MMIO space failed\n",
189                                 store ? "Store to" : "Load from");
190                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
191                         ret = RESUME_HOST;
192                 } else {
193                         run->exit_reason = KVM_EXIT_MMIO;
194                         ret = RESUME_HOST;
195                 }
196         } else {
197                 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
198                         store ? "ST" : "LD", cause, opc, badvaddr);
199                 kvm_mips_dump_host_tlbs();
200                 kvm_arch_vcpu_dump_regs(vcpu);
201                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
202                 ret = RESUME_HOST;
203         }
204         return ret;
205 }
206
207 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
208 {
209         return kvm_trap_emul_handle_tlb_miss(vcpu, true);
210 }
211
212 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
213 {
214         return kvm_trap_emul_handle_tlb_miss(vcpu, false);
215 }
216
217 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
218 {
219         struct kvm_run *run = vcpu->run;
220         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
221         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
222         u32 cause = vcpu->arch.host_cp0_cause;
223         enum emulation_result er = EMULATE_DONE;
224         int ret = RESUME_GUEST;
225
226         if (KVM_GUEST_KERNEL_MODE(vcpu)
227             && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
228                 kvm_debug("Emulate Store to MMIO space\n");
229                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
230                 if (er == EMULATE_FAIL) {
231                         kvm_err("Emulate Store to MMIO space failed\n");
232                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
233                         ret = RESUME_HOST;
234                 } else {
235                         run->exit_reason = KVM_EXIT_MMIO;
236                         ret = RESUME_HOST;
237                 }
238         } else {
239                 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
240                         cause, opc, badvaddr);
241                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
242                 ret = RESUME_HOST;
243         }
244         return ret;
245 }
246
247 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
248 {
249         struct kvm_run *run = vcpu->run;
250         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
251         unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
252         u32 cause = vcpu->arch.host_cp0_cause;
253         enum emulation_result er = EMULATE_DONE;
254         int ret = RESUME_GUEST;
255
256         if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
257                 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
258                 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
259                 if (er == EMULATE_FAIL) {
260                         kvm_err("Emulate Load from MMIO space failed\n");
261                         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
262                         ret = RESUME_HOST;
263                 } else {
264                         run->exit_reason = KVM_EXIT_MMIO;
265                         ret = RESUME_HOST;
266                 }
267         } else {
268                 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
269                         cause, opc, badvaddr);
270                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
271                 ret = RESUME_HOST;
272                 er = EMULATE_FAIL;
273         }
274         return ret;
275 }
276
277 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
278 {
279         struct kvm_run *run = vcpu->run;
280         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
281         u32 cause = vcpu->arch.host_cp0_cause;
282         enum emulation_result er = EMULATE_DONE;
283         int ret = RESUME_GUEST;
284
285         er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
286         if (er == EMULATE_DONE)
287                 ret = RESUME_GUEST;
288         else {
289                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
290                 ret = RESUME_HOST;
291         }
292         return ret;
293 }
294
295 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
296 {
297         struct kvm_run *run = vcpu->run;
298         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
299         u32 cause = vcpu->arch.host_cp0_cause;
300         enum emulation_result er = EMULATE_DONE;
301         int ret = RESUME_GUEST;
302
303         er = kvm_mips_handle_ri(cause, opc, run, vcpu);
304         if (er == EMULATE_DONE)
305                 ret = RESUME_GUEST;
306         else {
307                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
308                 ret = RESUME_HOST;
309         }
310         return ret;
311 }
312
313 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
314 {
315         struct kvm_run *run = vcpu->run;
316         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
317         u32 cause = vcpu->arch.host_cp0_cause;
318         enum emulation_result er = EMULATE_DONE;
319         int ret = RESUME_GUEST;
320
321         er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
322         if (er == EMULATE_DONE)
323                 ret = RESUME_GUEST;
324         else {
325                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
326                 ret = RESUME_HOST;
327         }
328         return ret;
329 }
330
331 static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
332 {
333         struct kvm_run *run = vcpu->run;
334         u32 __user *opc = (u32 __user *)vcpu->arch.pc;
335         u32 cause = vcpu->arch.host_cp0_cause;
336         enum emulation_result er = EMULATE_DONE;
337         int ret = RESUME_GUEST;
338
339         er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
340         if (er == EMULATE_DONE) {
341                 ret = RESUME_GUEST;
342         } else {
343                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
344                 ret = RESUME_HOST;
345         }
346         return ret;
347 }
348
349 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
350 {
351         struct kvm_run *run = vcpu->run;
352         u32 __user *opc = (u32 __user *)vcpu->arch.pc;
353         u32 cause = vcpu->arch.host_cp0_cause;
354         enum emulation_result er = EMULATE_DONE;
355         int ret = RESUME_GUEST;
356
357         er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
358         if (er == EMULATE_DONE) {
359                 ret = RESUME_GUEST;
360         } else {
361                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
362                 ret = RESUME_HOST;
363         }
364         return ret;
365 }
366
367 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
368 {
369         struct kvm_run *run = vcpu->run;
370         u32 __user *opc = (u32 __user *)vcpu->arch.pc;
371         u32 cause = vcpu->arch.host_cp0_cause;
372         enum emulation_result er = EMULATE_DONE;
373         int ret = RESUME_GUEST;
374
375         er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
376         if (er == EMULATE_DONE) {
377                 ret = RESUME_GUEST;
378         } else {
379                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
380                 ret = RESUME_HOST;
381         }
382         return ret;
383 }
384
385 /**
386  * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
387  * @vcpu:       Virtual CPU context.
388  *
389  * Handle when the guest attempts to use MSA when it is disabled.
390  */
391 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
392 {
393         struct mips_coproc *cop0 = vcpu->arch.cop0;
394         struct kvm_run *run = vcpu->run;
395         u32 __user *opc = (u32 __user *) vcpu->arch.pc;
396         u32 cause = vcpu->arch.host_cp0_cause;
397         enum emulation_result er = EMULATE_DONE;
398         int ret = RESUME_GUEST;
399
400         if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
401             (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
402                 /*
403                  * No MSA in guest, or FPU enabled and not in FR=1 mode,
404                  * guest reserved instruction exception
405                  */
406                 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
407         } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
408                 /* MSA disabled by guest, guest MSA disabled exception */
409                 er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
410         } else {
411                 /* Restore MSA/FPU state */
412                 kvm_own_msa(vcpu);
413                 er = EMULATE_DONE;
414         }
415
416         switch (er) {
417         case EMULATE_DONE:
418                 ret = RESUME_GUEST;
419                 break;
420
421         case EMULATE_FAIL:
422                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
423                 ret = RESUME_HOST;
424                 break;
425
426         default:
427                 BUG();
428         }
429         return ret;
430 }
431
432 static int kvm_trap_emul_vm_init(struct kvm *kvm)
433 {
434         return 0;
435 }
436
437 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
438 {
439         vcpu->arch.kscratch_enabled = 0xfc;
440
441         return 0;
442 }
443
444 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
445 {
446         struct mips_coproc *cop0 = vcpu->arch.cop0;
447         u32 config, config1;
448         int vcpu_id = vcpu->vcpu_id;
449
450         /*
451          * Arch specific stuff, set up config registers properly so that the
452          * guest will come up as expected
453          */
454 #ifndef CONFIG_CPU_MIPSR6
455         /* r2-r5, simulate a MIPS 24kc */
456         kvm_write_c0_guest_prid(cop0, 0x00019300);
457 #else
458         /* r6+, simulate a generic QEMU machine */
459         kvm_write_c0_guest_prid(cop0, 0x00010000);
460 #endif
461         /*
462          * Have config1, Cacheable, noncoherent, write-back, write allocate.
463          * Endianness, arch revision & virtually tagged icache should match
464          * host.
465          */
466         config = read_c0_config() & MIPS_CONF_AR;
467         config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
468 #ifdef CONFIG_CPU_BIG_ENDIAN
469         config |= CONF_BE;
470 #endif
471         if (cpu_has_vtag_icache)
472                 config |= MIPS_CONF_VI;
473         kvm_write_c0_guest_config(cop0, config);
474
475         /* Read the cache characteristics from the host Config1 Register */
476         config1 = (read_c0_config1() & ~0x7f);
477
478         /* Set up MMU size */
479         config1 &= ~(0x3f << 25);
480         config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
481
482         /* We unset some bits that we aren't emulating */
483         config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
484                      MIPS_CONF1_WR | MIPS_CONF1_CA);
485         kvm_write_c0_guest_config1(cop0, config1);
486
487         /* Have config3, no tertiary/secondary caches implemented */
488         kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
489         /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
490
491         /* Have config4, UserLocal */
492         kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
493
494         /* Have config5 */
495         kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
496
497         /* No config6 */
498         kvm_write_c0_guest_config5(cop0, 0);
499
500         /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
501         kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
502
503         /*
504          * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
505          */
506         kvm_write_c0_guest_intctl(cop0, 0xFC000000);
507
508         /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
509         kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
510                                        (vcpu_id & MIPS_EBASE_CPUNUM));
511
512         return 0;
513 }
514
515 static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
516 {
517         return 0;
518 }
519
520 static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
521                                           u64 __user *indices)
522 {
523         return 0;
524 }
525
526 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
527                                      const struct kvm_one_reg *reg,
528                                      s64 *v)
529 {
530         switch (reg->id) {
531         case KVM_REG_MIPS_CP0_COUNT:
532                 *v = kvm_mips_read_count(vcpu);
533                 break;
534         case KVM_REG_MIPS_COUNT_CTL:
535                 *v = vcpu->arch.count_ctl;
536                 break;
537         case KVM_REG_MIPS_COUNT_RESUME:
538                 *v = ktime_to_ns(vcpu->arch.count_resume);
539                 break;
540         case KVM_REG_MIPS_COUNT_HZ:
541                 *v = vcpu->arch.count_hz;
542                 break;
543         default:
544                 return -EINVAL;
545         }
546         return 0;
547 }
548
549 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
550                                      const struct kvm_one_reg *reg,
551                                      s64 v)
552 {
553         struct mips_coproc *cop0 = vcpu->arch.cop0;
554         int ret = 0;
555         unsigned int cur, change;
556
557         switch (reg->id) {
558         case KVM_REG_MIPS_CP0_COUNT:
559                 kvm_mips_write_count(vcpu, v);
560                 break;
561         case KVM_REG_MIPS_CP0_COMPARE:
562                 kvm_mips_write_compare(vcpu, v, false);
563                 break;
564         case KVM_REG_MIPS_CP0_CAUSE:
565                 /*
566                  * If the timer is stopped or started (DC bit) it must look
567                  * atomic with changes to the interrupt pending bits (TI, IRQ5).
568                  * A timer interrupt should not happen in between.
569                  */
570                 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
571                         if (v & CAUSEF_DC) {
572                                 /* disable timer first */
573                                 kvm_mips_count_disable_cause(vcpu);
574                                 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
575                         } else {
576                                 /* enable timer last */
577                                 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
578                                 kvm_mips_count_enable_cause(vcpu);
579                         }
580                 } else {
581                         kvm_write_c0_guest_cause(cop0, v);
582                 }
583                 break;
584         case KVM_REG_MIPS_CP0_CONFIG:
585                 /* read-only for now */
586                 break;
587         case KVM_REG_MIPS_CP0_CONFIG1:
588                 cur = kvm_read_c0_guest_config1(cop0);
589                 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
590                 if (change) {
591                         v = cur ^ change;
592                         kvm_write_c0_guest_config1(cop0, v);
593                 }
594                 break;
595         case KVM_REG_MIPS_CP0_CONFIG2:
596                 /* read-only for now */
597                 break;
598         case KVM_REG_MIPS_CP0_CONFIG3:
599                 cur = kvm_read_c0_guest_config3(cop0);
600                 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
601                 if (change) {
602                         v = cur ^ change;
603                         kvm_write_c0_guest_config3(cop0, v);
604                 }
605                 break;
606         case KVM_REG_MIPS_CP0_CONFIG4:
607                 cur = kvm_read_c0_guest_config4(cop0);
608                 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
609                 if (change) {
610                         v = cur ^ change;
611                         kvm_write_c0_guest_config4(cop0, v);
612                 }
613                 break;
614         case KVM_REG_MIPS_CP0_CONFIG5:
615                 cur = kvm_read_c0_guest_config5(cop0);
616                 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
617                 if (change) {
618                         v = cur ^ change;
619                         kvm_write_c0_guest_config5(cop0, v);
620                 }
621                 break;
622         case KVM_REG_MIPS_COUNT_CTL:
623                 ret = kvm_mips_set_count_ctl(vcpu, v);
624                 break;
625         case KVM_REG_MIPS_COUNT_RESUME:
626                 ret = kvm_mips_set_count_resume(vcpu, v);
627                 break;
628         case KVM_REG_MIPS_COUNT_HZ:
629                 ret = kvm_mips_set_count_hz(vcpu, v);
630                 break;
631         default:
632                 return -EINVAL;
633         }
634         return ret;
635 }
636
637 static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
638 {
639         kvm_lose_fpu(vcpu);
640
641         return 0;
642 }
643
644 static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
645 {
646         return 0;
647 }
648
649 static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
650         /* exit handlers */
651         .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
652         .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
653         .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
654         .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
655         .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
656         .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
657         .handle_syscall = kvm_trap_emul_handle_syscall,
658         .handle_res_inst = kvm_trap_emul_handle_res_inst,
659         .handle_break = kvm_trap_emul_handle_break,
660         .handle_trap = kvm_trap_emul_handle_trap,
661         .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
662         .handle_fpe = kvm_trap_emul_handle_fpe,
663         .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
664
665         .vm_init = kvm_trap_emul_vm_init,
666         .vcpu_init = kvm_trap_emul_vcpu_init,
667         .vcpu_setup = kvm_trap_emul_vcpu_setup,
668         .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
669         .queue_timer_int = kvm_mips_queue_timer_int_cb,
670         .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
671         .queue_io_int = kvm_mips_queue_io_int_cb,
672         .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
673         .irq_deliver = kvm_mips_irq_deliver_cb,
674         .irq_clear = kvm_mips_irq_clear_cb,
675         .num_regs = kvm_trap_emul_num_regs,
676         .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
677         .get_one_reg = kvm_trap_emul_get_one_reg,
678         .set_one_reg = kvm_trap_emul_set_one_reg,
679         .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
680         .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
681 };
682
683 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
684 {
685         *install_callbacks = &kvm_trap_emul_callbacks;
686         return 0;
687 }