06b15ee997f7409b9fabf1b85fa028c4f5b3574d
[cascardo/linux.git] / arch / powerpc / kernel / kvm.c
1 /*
2  * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3  * Copyright 2010-2011 Freescale Semiconductor, Inc.
4  *
5  * Authors:
6  *     Alexander Graf <agraf@suse.de>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License, version 2, as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
20  */
21
22 #include <linux/kvm_host.h>
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/kvm_para.h>
26 #include <linux/slab.h>
27 #include <linux/of.h>
28
29 #include <asm/reg.h>
30 #include <asm/sections.h>
31 #include <asm/cacheflush.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
34
35 #define KVM_MAGIC_PAGE          (-4096L)
36 #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
37
38 #define KVM_INST_LWZ            0x80000000
39 #define KVM_INST_STW            0x90000000
40 #define KVM_INST_LD             0xe8000000
41 #define KVM_INST_STD            0xf8000000
42 #define KVM_INST_NOP            0x60000000
43 #define KVM_INST_B              0x48000000
44 #define KVM_INST_B_MASK         0x03ffffff
45 #define KVM_INST_B_MAX          0x01ffffff
46 #define KVM_INST_LI             0x38000000
47
48 #define KVM_MASK_RT             0x03e00000
49 #define KVM_RT_30               0x03c00000
50 #define KVM_MASK_RB             0x0000f800
51 #define KVM_INST_MFMSR          0x7c0000a6
52 #define KVM_INST_MFSPR_SPRG0    0x7c1042a6
53 #define KVM_INST_MFSPR_SPRG1    0x7c1142a6
54 #define KVM_INST_MFSPR_SPRG2    0x7c1242a6
55 #define KVM_INST_MFSPR_SPRG3    0x7c1342a6
56 #define KVM_INST_MFSPR_SRR0     0x7c1a02a6
57 #define KVM_INST_MFSPR_SRR1     0x7c1b02a6
58 #define KVM_INST_MFSPR_DAR      0x7c1302a6
59 #define KVM_INST_MFSPR_DSISR    0x7c1202a6
60
61 #define KVM_INST_MTSPR_SPRG0    0x7c1043a6
62 #define KVM_INST_MTSPR_SPRG1    0x7c1143a6
63 #define KVM_INST_MTSPR_SPRG2    0x7c1243a6
64 #define KVM_INST_MTSPR_SPRG3    0x7c1343a6
65 #define KVM_INST_MTSPR_SRR0     0x7c1a03a6
66 #define KVM_INST_MTSPR_SRR1     0x7c1b03a6
67 #define KVM_INST_MTSPR_DAR      0x7c1303a6
68 #define KVM_INST_MTSPR_DSISR    0x7c1203a6
69
70 #define KVM_INST_TLBSYNC        0x7c00046c
71 #define KVM_INST_MTMSRD_L0      0x7c000164
72 #define KVM_INST_MTMSRD_L1      0x7c010164
73 #define KVM_INST_MTMSR          0x7c000124
74
75 #define KVM_INST_WRTEE          0x7c000106
76 #define KVM_INST_WRTEEI_0       0x7c000146
77 #define KVM_INST_WRTEEI_1       0x7c008146
78
79 #define KVM_INST_MTSRIN         0x7c0001e4
80
81 static bool kvm_patching_worked = true;
82 static char kvm_tmp[1024 * 1024];
83 static int kvm_tmp_index;
84
85 static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
86 {
87         *inst = new_inst;
88         flush_icache_range((ulong)inst, (ulong)inst + 4);
89 }
90
91 static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
92 {
93 #ifdef CONFIG_64BIT
94         kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
95 #else
96         kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
97 #endif
98 }
99
100 static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
101 {
102 #ifdef CONFIG_64BIT
103         kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
104 #else
105         kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
106 #endif
107 }
108
109 static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
110 {
111         kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
112 }
113
114 static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
115 {
116 #ifdef CONFIG_64BIT
117         kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
118 #else
119         kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
120 #endif
121 }
122
123 static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
124 {
125         kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
126 }
127
128 static void kvm_patch_ins_nop(u32 *inst)
129 {
130         kvm_patch_ins(inst, KVM_INST_NOP);
131 }
132
133 static void kvm_patch_ins_b(u32 *inst, int addr)
134 {
135 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
136         /* On relocatable kernels interrupts handlers and our code
137            can be in different regions, so we don't patch them */
138
139         if ((ulong)inst < (ulong)&__end_interrupts)
140                 return;
141 #endif
142
143         kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
144 }
145
146 static u32 *kvm_alloc(int len)
147 {
148         u32 *p;
149
150         if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
151                 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
152                                 kvm_tmp_index, len);
153                 kvm_patching_worked = false;
154                 return NULL;
155         }
156
157         p = (void*)&kvm_tmp[kvm_tmp_index];
158         kvm_tmp_index += len;
159
160         return p;
161 }
162
163 extern u32 kvm_emulate_mtmsrd_branch_offs;
164 extern u32 kvm_emulate_mtmsrd_reg_offs;
165 extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
166 extern u32 kvm_emulate_mtmsrd_len;
167 extern u32 kvm_emulate_mtmsrd[];
168
169 static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
170 {
171         u32 *p;
172         int distance_start;
173         int distance_end;
174         ulong next_inst;
175
176         p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
177         if (!p)
178                 return;
179
180         /* Find out where we are and put everything there */
181         distance_start = (ulong)p - (ulong)inst;
182         next_inst = ((ulong)inst + 4);
183         distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
184
185         /* Make sure we only write valid b instructions */
186         if (distance_start > KVM_INST_B_MAX) {
187                 kvm_patching_worked = false;
188                 return;
189         }
190
191         /* Modify the chunk to fit the invocation */
192         memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
193         p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
194         switch (get_rt(rt)) {
195         case 30:
196                 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
197                                  magic_var(scratch2), KVM_RT_30);
198                 break;
199         case 31:
200                 kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
201                                  magic_var(scratch1), KVM_RT_30);
202                 break;
203         default:
204                 p[kvm_emulate_mtmsrd_reg_offs] |= rt;
205                 break;
206         }
207
208         p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
209         flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
210
211         /* Patch the invocation */
212         kvm_patch_ins_b(inst, distance_start);
213 }
214
215 extern u32 kvm_emulate_mtmsr_branch_offs;
216 extern u32 kvm_emulate_mtmsr_reg1_offs;
217 extern u32 kvm_emulate_mtmsr_reg2_offs;
218 extern u32 kvm_emulate_mtmsr_orig_ins_offs;
219 extern u32 kvm_emulate_mtmsr_len;
220 extern u32 kvm_emulate_mtmsr[];
221
222 static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
223 {
224         u32 *p;
225         int distance_start;
226         int distance_end;
227         ulong next_inst;
228
229         p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
230         if (!p)
231                 return;
232
233         /* Find out where we are and put everything there */
234         distance_start = (ulong)p - (ulong)inst;
235         next_inst = ((ulong)inst + 4);
236         distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
237
238         /* Make sure we only write valid b instructions */
239         if (distance_start > KVM_INST_B_MAX) {
240                 kvm_patching_worked = false;
241                 return;
242         }
243
244         /* Modify the chunk to fit the invocation */
245         memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
246         p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
247
248         /* Make clobbered registers work too */
249         switch (get_rt(rt)) {
250         case 30:
251                 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
252                                  magic_var(scratch2), KVM_RT_30);
253                 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
254                                  magic_var(scratch2), KVM_RT_30);
255                 break;
256         case 31:
257                 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
258                                  magic_var(scratch1), KVM_RT_30);
259                 kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
260                                  magic_var(scratch1), KVM_RT_30);
261                 break;
262         default:
263                 p[kvm_emulate_mtmsr_reg1_offs] |= rt;
264                 p[kvm_emulate_mtmsr_reg2_offs] |= rt;
265                 break;
266         }
267
268         p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
269         flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
270
271         /* Patch the invocation */
272         kvm_patch_ins_b(inst, distance_start);
273 }
274
275 #ifdef CONFIG_BOOKE
276
277 extern u32 kvm_emulate_wrtee_branch_offs;
278 extern u32 kvm_emulate_wrtee_reg_offs;
279 extern u32 kvm_emulate_wrtee_orig_ins_offs;
280 extern u32 kvm_emulate_wrtee_len;
281 extern u32 kvm_emulate_wrtee[];
282
283 static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
284 {
285         u32 *p;
286         int distance_start;
287         int distance_end;
288         ulong next_inst;
289
290         p = kvm_alloc(kvm_emulate_wrtee_len * 4);
291         if (!p)
292                 return;
293
294         /* Find out where we are and put everything there */
295         distance_start = (ulong)p - (ulong)inst;
296         next_inst = ((ulong)inst + 4);
297         distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
298
299         /* Make sure we only write valid b instructions */
300         if (distance_start > KVM_INST_B_MAX) {
301                 kvm_patching_worked = false;
302                 return;
303         }
304
305         /* Modify the chunk to fit the invocation */
306         memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
307         p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
308
309         if (imm_one) {
310                 p[kvm_emulate_wrtee_reg_offs] =
311                         KVM_INST_LI | __PPC_RT(30) | MSR_EE;
312         } else {
313                 /* Make clobbered registers work too */
314                 switch (get_rt(rt)) {
315                 case 30:
316                         kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
317                                          magic_var(scratch2), KVM_RT_30);
318                         break;
319                 case 31:
320                         kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
321                                          magic_var(scratch1), KVM_RT_30);
322                         break;
323                 default:
324                         p[kvm_emulate_wrtee_reg_offs] |= rt;
325                         break;
326                 }
327         }
328
329         p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
330         flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
331
332         /* Patch the invocation */
333         kvm_patch_ins_b(inst, distance_start);
334 }
335
336 extern u32 kvm_emulate_wrteei_0_branch_offs;
337 extern u32 kvm_emulate_wrteei_0_len;
338 extern u32 kvm_emulate_wrteei_0[];
339
340 static void kvm_patch_ins_wrteei_0(u32 *inst)
341 {
342         u32 *p;
343         int distance_start;
344         int distance_end;
345         ulong next_inst;
346
347         p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
348         if (!p)
349                 return;
350
351         /* Find out where we are and put everything there */
352         distance_start = (ulong)p - (ulong)inst;
353         next_inst = ((ulong)inst + 4);
354         distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
355
356         /* Make sure we only write valid b instructions */
357         if (distance_start > KVM_INST_B_MAX) {
358                 kvm_patching_worked = false;
359                 return;
360         }
361
362         memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
363         p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
364         flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
365
366         /* Patch the invocation */
367         kvm_patch_ins_b(inst, distance_start);
368 }
369
370 #endif
371
372 #ifdef CONFIG_PPC_BOOK3S_32
373
374 extern u32 kvm_emulate_mtsrin_branch_offs;
375 extern u32 kvm_emulate_mtsrin_reg1_offs;
376 extern u32 kvm_emulate_mtsrin_reg2_offs;
377 extern u32 kvm_emulate_mtsrin_orig_ins_offs;
378 extern u32 kvm_emulate_mtsrin_len;
379 extern u32 kvm_emulate_mtsrin[];
380
381 static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
382 {
383         u32 *p;
384         int distance_start;
385         int distance_end;
386         ulong next_inst;
387
388         p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
389         if (!p)
390                 return;
391
392         /* Find out where we are and put everything there */
393         distance_start = (ulong)p - (ulong)inst;
394         next_inst = ((ulong)inst + 4);
395         distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
396
397         /* Make sure we only write valid b instructions */
398         if (distance_start > KVM_INST_B_MAX) {
399                 kvm_patching_worked = false;
400                 return;
401         }
402
403         /* Modify the chunk to fit the invocation */
404         memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
405         p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
406         p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
407         p[kvm_emulate_mtsrin_reg2_offs] |= rt;
408         p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
409         flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
410
411         /* Patch the invocation */
412         kvm_patch_ins_b(inst, distance_start);
413 }
414
415 #endif
416
417 static void kvm_map_magic_page(void *data)
418 {
419         u32 *features = data;
420
421         ulong in[8];
422         ulong out[8];
423
424         in[0] = KVM_MAGIC_PAGE;
425         in[1] = KVM_MAGIC_PAGE;
426
427         kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE);
428
429         *features = out[0];
430 }
431
432 static void kvm_check_ins(u32 *inst, u32 features)
433 {
434         u32 _inst = *inst;
435         u32 inst_no_rt = _inst & ~KVM_MASK_RT;
436         u32 inst_rt = _inst & KVM_MASK_RT;
437
438         switch (inst_no_rt) {
439         /* Loads */
440         case KVM_INST_MFMSR:
441                 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
442                 break;
443         case KVM_INST_MFSPR_SPRG0:
444                 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
445                 break;
446         case KVM_INST_MFSPR_SPRG1:
447                 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
448                 break;
449         case KVM_INST_MFSPR_SPRG2:
450                 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
451                 break;
452         case KVM_INST_MFSPR_SPRG3:
453                 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
454                 break;
455         case KVM_INST_MFSPR_SRR0:
456                 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
457                 break;
458         case KVM_INST_MFSPR_SRR1:
459                 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
460                 break;
461         case KVM_INST_MFSPR_DAR:
462                 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
463                 break;
464         case KVM_INST_MFSPR_DSISR:
465                 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
466                 break;
467
468         /* Stores */
469         case KVM_INST_MTSPR_SPRG0:
470                 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
471                 break;
472         case KVM_INST_MTSPR_SPRG1:
473                 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
474                 break;
475         case KVM_INST_MTSPR_SPRG2:
476                 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
477                 break;
478         case KVM_INST_MTSPR_SPRG3:
479                 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
480                 break;
481         case KVM_INST_MTSPR_SRR0:
482                 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
483                 break;
484         case KVM_INST_MTSPR_SRR1:
485                 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
486                 break;
487         case KVM_INST_MTSPR_DAR:
488                 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
489                 break;
490         case KVM_INST_MTSPR_DSISR:
491                 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
492                 break;
493
494         /* Nops */
495         case KVM_INST_TLBSYNC:
496                 kvm_patch_ins_nop(inst);
497                 break;
498
499         /* Rewrites */
500         case KVM_INST_MTMSRD_L1:
501                 kvm_patch_ins_mtmsrd(inst, inst_rt);
502                 break;
503         case KVM_INST_MTMSR:
504         case KVM_INST_MTMSRD_L0:
505                 kvm_patch_ins_mtmsr(inst, inst_rt);
506                 break;
507 #ifdef CONFIG_BOOKE
508         case KVM_INST_WRTEE:
509                 kvm_patch_ins_wrtee(inst, inst_rt, 0);
510                 break;
511 #endif
512         }
513
514         switch (inst_no_rt & ~KVM_MASK_RB) {
515 #ifdef CONFIG_PPC_BOOK3S_32
516         case KVM_INST_MTSRIN:
517                 if (features & KVM_MAGIC_FEAT_SR) {
518                         u32 inst_rb = _inst & KVM_MASK_RB;
519                         kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
520                 }
521                 break;
522                 break;
523 #endif
524         }
525
526         switch (_inst) {
527 #ifdef CONFIG_BOOKE
528         case KVM_INST_WRTEEI_0:
529                 kvm_patch_ins_wrteei_0(inst);
530                 break;
531
532         case KVM_INST_WRTEEI_1:
533                 kvm_patch_ins_wrtee(inst, 0, 1);
534                 break;
535 #endif
536         }
537 }
538
539 static void kvm_use_magic_page(void)
540 {
541         u32 *p;
542         u32 *start, *end;
543         u32 tmp;
544         u32 features;
545
546         /* Tell the host to map the magic page to -4096 on all CPUs */
547         on_each_cpu(kvm_map_magic_page, &features, 1);
548
549         /* Quick self-test to see if the mapping works */
550         if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
551                 kvm_patching_worked = false;
552                 return;
553         }
554
555         /* Now loop through all code and find instructions */
556         start = (void*)_stext;
557         end = (void*)_etext;
558
559         for (p = start; p < end; p++)
560                 kvm_check_ins(p, features);
561
562         printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
563                          kvm_patching_worked ? "worked" : "failed");
564 }
565
566 unsigned long kvm_hypercall(unsigned long *in,
567                             unsigned long *out,
568                             unsigned long nr)
569 {
570         unsigned long register r0 asm("r0");
571         unsigned long register r3 asm("r3") = in[0];
572         unsigned long register r4 asm("r4") = in[1];
573         unsigned long register r5 asm("r5") = in[2];
574         unsigned long register r6 asm("r6") = in[3];
575         unsigned long register r7 asm("r7") = in[4];
576         unsigned long register r8 asm("r8") = in[5];
577         unsigned long register r9 asm("r9") = in[6];
578         unsigned long register r10 asm("r10") = in[7];
579         unsigned long register r11 asm("r11") = nr;
580         unsigned long register r12 asm("r12");
581
582         asm volatile("bl        kvm_hypercall_start"
583                      : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
584                        "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
585                        "=r"(r12)
586                      : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
587                        "r"(r9), "r"(r10), "r"(r11)
588                      : "memory", "cc", "xer", "ctr", "lr");
589
590         out[0] = r4;
591         out[1] = r5;
592         out[2] = r6;
593         out[3] = r7;
594         out[4] = r8;
595         out[5] = r9;
596         out[6] = r10;
597         out[7] = r11;
598
599         return r3;
600 }
601 EXPORT_SYMBOL_GPL(kvm_hypercall);
602
603 static int kvm_para_setup(void)
604 {
605         extern u32 kvm_hypercall_start;
606         struct device_node *hyper_node;
607         u32 *insts;
608         int len, i;
609
610         hyper_node = of_find_node_by_path("/hypervisor");
611         if (!hyper_node)
612                 return -1;
613
614         insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
615         if (len % 4)
616                 return -1;
617         if (len > (4 * 4))
618                 return -1;
619
620         for (i = 0; i < (len / 4); i++)
621                 kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
622
623         return 0;
624 }
625
626 static __init void kvm_free_tmp(void)
627 {
628         unsigned long start, end;
629
630         start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
631         end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
632
633         /* Free the tmp space we don't need */
634         for (; start < end; start += PAGE_SIZE) {
635                 ClearPageReserved(virt_to_page(start));
636                 init_page_count(virt_to_page(start));
637                 free_page(start);
638                 totalram_pages++;
639         }
640 }
641
642 static int __init kvm_guest_init(void)
643 {
644         if (!kvm_para_available())
645                 goto free_tmp;
646
647         if (kvm_para_setup())
648                 goto free_tmp;
649
650         if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
651                 kvm_use_magic_page();
652
653 #ifdef CONFIG_PPC_BOOK3S_64
654         /* Enable napping */
655         powersave_nap = 1;
656 #endif
657
658 free_tmp:
659         kvm_free_tmp();
660
661         return 0;
662 }
663
664 postcore_initcall(kvm_guest_init);