2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/module.h>
16 #include <linux/vmalloc.h>
18 #include <linux/bootmem.h>
19 #include <asm/cacheflush.h>
21 #include "kvm_mips_comm.h"
23 #define SYNCI_TEMPLATE 0x041f0000
24 #define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
25 #define SYNCI_OFFSET ((x) & 0xffff)
27 #define LW_TEMPLATE 0x8c000000
28 #define CLEAR_TEMPLATE 0x00000020
29 #define SW_TEMPLATE 0xac000000
32 kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
33 struct kvm_vcpu *vcpu)
36 unsigned long kseg0_opc;
37 uint32_t synci_inst = 0x0;
39 /* Replace the CACHE instruction, with a NOP */
41 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
42 (vcpu, (unsigned long) opc));
43 memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
44 local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
50 * Address based CACHE instructions are transformed into synci(s). A little heavy
51 * for just D-cache invalidates, but avoids an expensive trap
54 kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
55 struct kvm_vcpu *vcpu)
58 unsigned long kseg0_opc;
59 uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
61 base = (inst >> 21) & 0x1f;
62 offset = inst & 0xffff;
63 synci_inst |= (base << 21);
67 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
68 (vcpu, (unsigned long) opc));
69 memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
70 local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
76 kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
80 unsigned long kseg0_opc, flags;
82 rt = (inst >> 16) & 0x1f;
83 rd = (inst >> 11) & 0x1f;
86 if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
87 mfc0_inst = CLEAR_TEMPLATE;
88 mfc0_inst |= ((rt & 0x1f) << 16);
90 mfc0_inst = LW_TEMPLATE;
91 mfc0_inst |= ((rt & 0x1f) << 16);
93 offsetof(struct mips_coproc,
94 reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
98 if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
100 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
101 (vcpu, (unsigned long) opc));
102 memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
103 local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
104 } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
105 local_irq_save(flags);
106 memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
107 local_flush_icache_range((unsigned long)opc,
108 (unsigned long)opc + 32);
109 local_irq_restore(flags);
111 kvm_err("%s: Invalid address: %p\n", __func__, opc);
119 kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
122 uint32_t mtc0_inst = SW_TEMPLATE;
123 unsigned long kseg0_opc, flags;
125 rt = (inst >> 16) & 0x1f;
126 rd = (inst >> 11) & 0x1f;
129 mtc0_inst |= ((rt & 0x1f) << 16);
131 offsetof(struct mips_coproc,
132 reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
134 if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
136 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
137 (vcpu, (unsigned long) opc));
138 memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
139 local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
140 } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
141 local_irq_save(flags);
142 memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
143 local_flush_icache_range((unsigned long)opc,
144 (unsigned long)opc + 32);
145 local_irq_restore(flags);
147 kvm_err("%s: Invalid address: %p\n", __func__, opc);