4d7f080e470628bf5723bfcee17ee8c2ea0b6700
[cascardo/linux.git] / arch / x86 / include / asm / paravirt.h
1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/pgtable_types.h>
8 #include <asm/asm.h>
9
10 #include <asm/paravirt_types.h>
11
12 #ifndef __ASSEMBLY__
13 #include <linux/bug.h>
14 #include <linux/types.h>
15 #include <linux/cpumask.h>
16
17 static inline int paravirt_enabled(void)
18 {
19         return pv_info.paravirt_enabled;
20 }
21
22 static inline void load_sp0(struct tss_struct *tss,
23                              struct thread_struct *thread)
24 {
25         PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
26 }
27
28 /* The paravirtualized CPUID instruction. */
29 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
30                            unsigned int *ecx, unsigned int *edx)
31 {
32         PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
33 }
34
35 /*
36  * These special macros can be used to get or set a debugging register
37  */
38 static inline unsigned long paravirt_get_debugreg(int reg)
39 {
40         return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
41 }
42 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
43 static inline void set_debugreg(unsigned long val, int reg)
44 {
45         PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
46 }
47
48 static inline void clts(void)
49 {
50         PVOP_VCALL0(pv_cpu_ops.clts);
51 }
52
53 static inline unsigned long read_cr0(void)
54 {
55         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
56 }
57
58 static inline void write_cr0(unsigned long x)
59 {
60         PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
61 }
62
63 static inline unsigned long read_cr2(void)
64 {
65         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
66 }
67
68 static inline void write_cr2(unsigned long x)
69 {
70         PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
71 }
72
73 static inline unsigned long read_cr3(void)
74 {
75         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
76 }
77
78 static inline void write_cr3(unsigned long x)
79 {
80         PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
81 }
82
83 static inline unsigned long __read_cr4(void)
84 {
85         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
86 }
87 static inline unsigned long __read_cr4_safe(void)
88 {
89         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
90 }
91
92 static inline void __write_cr4(unsigned long x)
93 {
94         PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
95 }
96
97 #ifdef CONFIG_X86_64
98 static inline unsigned long read_cr8(void)
99 {
100         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
101 }
102
103 static inline void write_cr8(unsigned long x)
104 {
105         PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
106 }
107 #endif
108
109 static inline void arch_safe_halt(void)
110 {
111         PVOP_VCALL0(pv_irq_ops.safe_halt);
112 }
113
114 static inline void halt(void)
115 {
116         PVOP_VCALL0(pv_irq_ops.halt);
117 }
118
119 static inline void wbinvd(void)
120 {
121         PVOP_VCALL0(pv_cpu_ops.wbinvd);
122 }
123
124 #define get_kernel_rpl()  (pv_info.kernel_rpl)
125
126 static inline u64 paravirt_read_msr(unsigned msr, int *err)
127 {
128         return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
129 }
130
131 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
132 {
133         return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
134 }
135
136 /* These should all do BUG_ON(_err), but our headers are too tangled. */
137 #define rdmsr(msr, val1, val2)                  \
138 do {                                            \
139         int _err;                               \
140         u64 _l = paravirt_read_msr(msr, &_err); \
141         val1 = (u32)_l;                         \
142         val2 = _l >> 32;                        \
143 } while (0)
144
145 #define wrmsr(msr, val1, val2)                  \
146 do {                                            \
147         paravirt_write_msr(msr, val1, val2);    \
148 } while (0)
149
150 #define rdmsrl(msr, val)                        \
151 do {                                            \
152         int _err;                               \
153         val = paravirt_read_msr(msr, &_err);    \
154 } while (0)
155
156 static inline void wrmsrl(unsigned msr, u64 val)
157 {
158         wrmsr(msr, (u32)val, (u32)(val>>32));
159 }
160
161 #define wrmsr_safe(msr, a, b)   paravirt_write_msr(msr, a, b)
162
163 /* rdmsr with exception handling */
164 #define rdmsr_safe(msr, a, b)                   \
165 ({                                              \
166         int _err;                               \
167         u64 _l = paravirt_read_msr(msr, &_err); \
168         (*a) = (u32)_l;                         \
169         (*b) = _l >> 32;                        \
170         _err;                                   \
171 })
172
173 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
174 {
175         int err;
176
177         *p = paravirt_read_msr(msr, &err);
178         return err;
179 }
180
181 static inline unsigned long long paravirt_sched_clock(void)
182 {
183         return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
184 }
185
186 struct static_key;
187 extern struct static_key paravirt_steal_enabled;
188 extern struct static_key paravirt_steal_rq_enabled;
189
190 static inline u64 paravirt_steal_clock(int cpu)
191 {
192         return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
193 }
194
195 static inline unsigned long long paravirt_read_pmc(int counter)
196 {
197         return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
198 }
199
200 #define rdpmc(counter, low, high)               \
201 do {                                            \
202         u64 _l = paravirt_read_pmc(counter);    \
203         low = (u32)_l;                          \
204         high = _l >> 32;                        \
205 } while (0)
206
207 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
208
209 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
210 {
211         PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
212 }
213
214 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
215 {
216         PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
217 }
218
219 static inline void load_TR_desc(void)
220 {
221         PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
222 }
223 static inline void load_gdt(const struct desc_ptr *dtr)
224 {
225         PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
226 }
227 static inline void load_idt(const struct desc_ptr *dtr)
228 {
229         PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
230 }
231 static inline void set_ldt(const void *addr, unsigned entries)
232 {
233         PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
234 }
235 static inline void store_idt(struct desc_ptr *dtr)
236 {
237         PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
238 }
239 static inline unsigned long paravirt_store_tr(void)
240 {
241         return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
242 }
243 #define store_tr(tr)    ((tr) = paravirt_store_tr())
244 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
245 {
246         PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
247 }
248
249 #ifdef CONFIG_X86_64
250 static inline void load_gs_index(unsigned int gs)
251 {
252         PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
253 }
254 #endif
255
256 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
257                                    const void *desc)
258 {
259         PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
260 }
261
262 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
263                                    void *desc, int type)
264 {
265         PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
266 }
267
268 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
269 {
270         PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
271 }
272 static inline void set_iopl_mask(unsigned mask)
273 {
274         PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
275 }
276
277 /* The paravirtualized I/O functions */
278 static inline void slow_down_io(void)
279 {
280         pv_cpu_ops.io_delay();
281 #ifdef REALLY_SLOW_IO
282         pv_cpu_ops.io_delay();
283         pv_cpu_ops.io_delay();
284         pv_cpu_ops.io_delay();
285 #endif
286 }
287
288 static inline void paravirt_activate_mm(struct mm_struct *prev,
289                                         struct mm_struct *next)
290 {
291         PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
292 }
293
294 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
295                                           struct mm_struct *mm)
296 {
297         PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
298 }
299
300 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
301 {
302         PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
303 }
304
305 static inline void __flush_tlb(void)
306 {
307         PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
308 }
309 static inline void __flush_tlb_global(void)
310 {
311         PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
312 }
313 static inline void __flush_tlb_single(unsigned long addr)
314 {
315         PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
316 }
317
318 static inline void flush_tlb_others(const struct cpumask *cpumask,
319                                     struct mm_struct *mm,
320                                     unsigned long start,
321                                     unsigned long end)
322 {
323         PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
324 }
325
326 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
327 {
328         return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
329 }
330
331 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
332 {
333         PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
334 }
335
336 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
337 {
338         PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
339 }
340 static inline void paravirt_release_pte(unsigned long pfn)
341 {
342         PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
343 }
344
345 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
346 {
347         PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
348 }
349
350 static inline void paravirt_release_pmd(unsigned long pfn)
351 {
352         PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
353 }
354
355 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
356 {
357         PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
358 }
359 static inline void paravirt_release_pud(unsigned long pfn)
360 {
361         PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
362 }
363
364 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
365                               pte_t *ptep)
366 {
367         PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
368 }
369 static inline void pmd_update(struct mm_struct *mm, unsigned long addr,
370                               pmd_t *pmdp)
371 {
372         PVOP_VCALL3(pv_mmu_ops.pmd_update, mm, addr, pmdp);
373 }
374
375 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
376                                     pte_t *ptep)
377 {
378         PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
379 }
380
381 static inline void pmd_update_defer(struct mm_struct *mm, unsigned long addr,
382                                     pmd_t *pmdp)
383 {
384         PVOP_VCALL3(pv_mmu_ops.pmd_update_defer, mm, addr, pmdp);
385 }
386
387 static inline pte_t __pte(pteval_t val)
388 {
389         pteval_t ret;
390
391         if (sizeof(pteval_t) > sizeof(long))
392                 ret = PVOP_CALLEE2(pteval_t,
393                                    pv_mmu_ops.make_pte,
394                                    val, (u64)val >> 32);
395         else
396                 ret = PVOP_CALLEE1(pteval_t,
397                                    pv_mmu_ops.make_pte,
398                                    val);
399
400         return (pte_t) { .pte = ret };
401 }
402
403 static inline pteval_t pte_val(pte_t pte)
404 {
405         pteval_t ret;
406
407         if (sizeof(pteval_t) > sizeof(long))
408                 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
409                                    pte.pte, (u64)pte.pte >> 32);
410         else
411                 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
412                                    pte.pte);
413
414         return ret;
415 }
416
417 static inline pgd_t __pgd(pgdval_t val)
418 {
419         pgdval_t ret;
420
421         if (sizeof(pgdval_t) > sizeof(long))
422                 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
423                                    val, (u64)val >> 32);
424         else
425                 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
426                                    val);
427
428         return (pgd_t) { ret };
429 }
430
431 static inline pgdval_t pgd_val(pgd_t pgd)
432 {
433         pgdval_t ret;
434
435         if (sizeof(pgdval_t) > sizeof(long))
436                 ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
437                                     pgd.pgd, (u64)pgd.pgd >> 32);
438         else
439                 ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
440                                     pgd.pgd);
441
442         return ret;
443 }
444
445 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
446 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
447                                            pte_t *ptep)
448 {
449         pteval_t ret;
450
451         ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
452                          mm, addr, ptep);
453
454         return (pte_t) { .pte = ret };
455 }
456
457 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
458                                            pte_t *ptep, pte_t pte)
459 {
460         if (sizeof(pteval_t) > sizeof(long))
461                 /* 5 arg words */
462                 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
463         else
464                 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
465                             mm, addr, ptep, pte.pte);
466 }
467
468 static inline void set_pte(pte_t *ptep, pte_t pte)
469 {
470         if (sizeof(pteval_t) > sizeof(long))
471                 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
472                             pte.pte, (u64)pte.pte >> 32);
473         else
474                 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
475                             pte.pte);
476 }
477
478 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
479                               pte_t *ptep, pte_t pte)
480 {
481         if (sizeof(pteval_t) > sizeof(long))
482                 /* 5 arg words */
483                 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
484         else
485                 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
486 }
487
488 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
489                               pmd_t *pmdp, pmd_t pmd)
490 {
491         if (sizeof(pmdval_t) > sizeof(long))
492                 /* 5 arg words */
493                 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
494         else
495                 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
496                             native_pmd_val(pmd));
497 }
498
499 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
500 {
501         pmdval_t val = native_pmd_val(pmd);
502
503         if (sizeof(pmdval_t) > sizeof(long))
504                 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
505         else
506                 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
507 }
508
509 #if CONFIG_PGTABLE_LEVELS >= 3
510 static inline pmd_t __pmd(pmdval_t val)
511 {
512         pmdval_t ret;
513
514         if (sizeof(pmdval_t) > sizeof(long))
515                 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
516                                    val, (u64)val >> 32);
517         else
518                 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
519                                    val);
520
521         return (pmd_t) { ret };
522 }
523
524 static inline pmdval_t pmd_val(pmd_t pmd)
525 {
526         pmdval_t ret;
527
528         if (sizeof(pmdval_t) > sizeof(long))
529                 ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
530                                     pmd.pmd, (u64)pmd.pmd >> 32);
531         else
532                 ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
533                                     pmd.pmd);
534
535         return ret;
536 }
537
538 static inline void set_pud(pud_t *pudp, pud_t pud)
539 {
540         pudval_t val = native_pud_val(pud);
541
542         if (sizeof(pudval_t) > sizeof(long))
543                 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
544                             val, (u64)val >> 32);
545         else
546                 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
547                             val);
548 }
549 #if CONFIG_PGTABLE_LEVELS == 4
550 static inline pud_t __pud(pudval_t val)
551 {
552         pudval_t ret;
553
554         if (sizeof(pudval_t) > sizeof(long))
555                 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
556                                    val, (u64)val >> 32);
557         else
558                 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
559                                    val);
560
561         return (pud_t) { ret };
562 }
563
564 static inline pudval_t pud_val(pud_t pud)
565 {
566         pudval_t ret;
567
568         if (sizeof(pudval_t) > sizeof(long))
569                 ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
570                                     pud.pud, (u64)pud.pud >> 32);
571         else
572                 ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
573                                     pud.pud);
574
575         return ret;
576 }
577
578 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
579 {
580         pgdval_t val = native_pgd_val(pgd);
581
582         if (sizeof(pgdval_t) > sizeof(long))
583                 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
584                             val, (u64)val >> 32);
585         else
586                 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
587                             val);
588 }
589
590 static inline void pgd_clear(pgd_t *pgdp)
591 {
592         set_pgd(pgdp, __pgd(0));
593 }
594
595 static inline void pud_clear(pud_t *pudp)
596 {
597         set_pud(pudp, __pud(0));
598 }
599
600 #endif  /* CONFIG_PGTABLE_LEVELS == 4 */
601
602 #endif  /* CONFIG_PGTABLE_LEVELS >= 3 */
603
604 #ifdef CONFIG_X86_PAE
605 /* Special-case pte-setting operations for PAE, which can't update a
606    64-bit pte atomically */
607 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
608 {
609         PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
610                     pte.pte, pte.pte >> 32);
611 }
612
613 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
614                              pte_t *ptep)
615 {
616         PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
617 }
618
619 static inline void pmd_clear(pmd_t *pmdp)
620 {
621         PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
622 }
623 #else  /* !CONFIG_X86_PAE */
624 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
625 {
626         set_pte(ptep, pte);
627 }
628
629 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
630                              pte_t *ptep)
631 {
632         set_pte_at(mm, addr, ptep, __pte(0));
633 }
634
635 static inline void pmd_clear(pmd_t *pmdp)
636 {
637         set_pmd(pmdp, __pmd(0));
638 }
639 #endif  /* CONFIG_X86_PAE */
640
641 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
642 static inline void arch_start_context_switch(struct task_struct *prev)
643 {
644         PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
645 }
646
647 static inline void arch_end_context_switch(struct task_struct *next)
648 {
649         PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
650 }
651
652 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
653 static inline void arch_enter_lazy_mmu_mode(void)
654 {
655         PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
656 }
657
658 static inline void arch_leave_lazy_mmu_mode(void)
659 {
660         PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
661 }
662
663 static inline void arch_flush_lazy_mmu_mode(void)
664 {
665         PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
666 }
667
668 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
669                                 phys_addr_t phys, pgprot_t flags)
670 {
671         pv_mmu_ops.set_fixmap(idx, phys, flags);
672 }
673
674 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
675
676 #ifdef CONFIG_QUEUED_SPINLOCKS
677
678 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
679                                                         u32 val)
680 {
681         PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
682 }
683
684 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
685 {
686         PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
687 }
688
689 static __always_inline void pv_wait(u8 *ptr, u8 val)
690 {
691         PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
692 }
693
694 static __always_inline void pv_kick(int cpu)
695 {
696         PVOP_VCALL1(pv_lock_ops.kick, cpu);
697 }
698
699 #else /* !CONFIG_QUEUED_SPINLOCKS */
700
701 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
702                                                         __ticket_t ticket)
703 {
704         PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
705 }
706
707 static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
708                                                         __ticket_t ticket)
709 {
710         PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
711 }
712
713 #endif /* CONFIG_QUEUED_SPINLOCKS */
714
715 #endif /* SMP && PARAVIRT_SPINLOCKS */
716
717 #ifdef CONFIG_X86_32
718 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
719 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
720
721 /* save and restore all caller-save registers, except return value */
722 #define PV_SAVE_ALL_CALLER_REGS         "pushl %ecx;"
723 #define PV_RESTORE_ALL_CALLER_REGS      "popl  %ecx;"
724
725 #define PV_FLAGS_ARG "0"
726 #define PV_EXTRA_CLOBBERS
727 #define PV_VEXTRA_CLOBBERS
728 #else
729 /* save and restore all caller-save registers, except return value */
730 #define PV_SAVE_ALL_CALLER_REGS                                         \
731         "push %rcx;"                                                    \
732         "push %rdx;"                                                    \
733         "push %rsi;"                                                    \
734         "push %rdi;"                                                    \
735         "push %r8;"                                                     \
736         "push %r9;"                                                     \
737         "push %r10;"                                                    \
738         "push %r11;"
739 #define PV_RESTORE_ALL_CALLER_REGS                                      \
740         "pop %r11;"                                                     \
741         "pop %r10;"                                                     \
742         "pop %r9;"                                                      \
743         "pop %r8;"                                                      \
744         "pop %rdi;"                                                     \
745         "pop %rsi;"                                                     \
746         "pop %rdx;"                                                     \
747         "pop %rcx;"
748
749 /* We save some registers, but all of them, that's too much. We clobber all
750  * caller saved registers but the argument parameter */
751 #define PV_SAVE_REGS "pushq %%rdi;"
752 #define PV_RESTORE_REGS "popq %%rdi;"
753 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
754 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
755 #define PV_FLAGS_ARG "D"
756 #endif
757
758 /*
759  * Generate a thunk around a function which saves all caller-save
760  * registers except for the return value.  This allows C functions to
761  * be called from assembler code where fewer than normal registers are
762  * available.  It may also help code generation around calls from C
763  * code if the common case doesn't use many registers.
764  *
765  * When a callee is wrapped in a thunk, the caller can assume that all
766  * arg regs and all scratch registers are preserved across the
767  * call. The return value in rax/eax will not be saved, even for void
768  * functions.
769  */
770 #define PV_CALLEE_SAVE_REGS_THUNK(func)                                 \
771         extern typeof(func) __raw_callee_save_##func;                   \
772                                                                         \
773         asm(".pushsection .text;"                                       \
774             ".globl __raw_callee_save_" #func " ; "                     \
775             "__raw_callee_save_" #func ": "                             \
776             PV_SAVE_ALL_CALLER_REGS                                     \
777             "call " #func ";"                                           \
778             PV_RESTORE_ALL_CALLER_REGS                                  \
779             "ret;"                                                      \
780             ".popsection")
781
782 /* Get a reference to a callee-save function */
783 #define PV_CALLEE_SAVE(func)                                            \
784         ((struct paravirt_callee_save) { __raw_callee_save_##func })
785
786 /* Promise that "func" already uses the right calling convention */
787 #define __PV_IS_CALLEE_SAVE(func)                       \
788         ((struct paravirt_callee_save) { func })
789
790 static inline notrace unsigned long arch_local_save_flags(void)
791 {
792         return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
793 }
794
795 static inline notrace void arch_local_irq_restore(unsigned long f)
796 {
797         PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
798 }
799
800 static inline notrace void arch_local_irq_disable(void)
801 {
802         PVOP_VCALLEE0(pv_irq_ops.irq_disable);
803 }
804
805 static inline notrace void arch_local_irq_enable(void)
806 {
807         PVOP_VCALLEE0(pv_irq_ops.irq_enable);
808 }
809
810 static inline notrace unsigned long arch_local_irq_save(void)
811 {
812         unsigned long f;
813
814         f = arch_local_save_flags();
815         arch_local_irq_disable();
816         return f;
817 }
818
819
820 /* Make sure as little as possible of this mess escapes. */
821 #undef PARAVIRT_CALL
822 #undef __PVOP_CALL
823 #undef __PVOP_VCALL
824 #undef PVOP_VCALL0
825 #undef PVOP_CALL0
826 #undef PVOP_VCALL1
827 #undef PVOP_CALL1
828 #undef PVOP_VCALL2
829 #undef PVOP_CALL2
830 #undef PVOP_VCALL3
831 #undef PVOP_CALL3
832 #undef PVOP_VCALL4
833 #undef PVOP_CALL4
834
835 extern void default_banner(void);
836
837 #else  /* __ASSEMBLY__ */
838
839 #define _PVSITE(ptype, clobbers, ops, word, algn)       \
840 771:;                                           \
841         ops;                                    \
842 772:;                                           \
843         .pushsection .parainstructions,"a";     \
844          .align algn;                           \
845          word 771b;                             \
846          .byte ptype;                           \
847          .byte 772b-771b;                       \
848          .short clobbers;                       \
849         .popsection
850
851
852 #define COND_PUSH(set, mask, reg)                       \
853         .if ((~(set)) & mask); push %reg; .endif
854 #define COND_POP(set, mask, reg)                        \
855         .if ((~(set)) & mask); pop %reg; .endif
856
857 #ifdef CONFIG_X86_64
858
859 #define PV_SAVE_REGS(set)                       \
860         COND_PUSH(set, CLBR_RAX, rax);          \
861         COND_PUSH(set, CLBR_RCX, rcx);          \
862         COND_PUSH(set, CLBR_RDX, rdx);          \
863         COND_PUSH(set, CLBR_RSI, rsi);          \
864         COND_PUSH(set, CLBR_RDI, rdi);          \
865         COND_PUSH(set, CLBR_R8, r8);            \
866         COND_PUSH(set, CLBR_R9, r9);            \
867         COND_PUSH(set, CLBR_R10, r10);          \
868         COND_PUSH(set, CLBR_R11, r11)
869 #define PV_RESTORE_REGS(set)                    \
870         COND_POP(set, CLBR_R11, r11);           \
871         COND_POP(set, CLBR_R10, r10);           \
872         COND_POP(set, CLBR_R9, r9);             \
873         COND_POP(set, CLBR_R8, r8);             \
874         COND_POP(set, CLBR_RDI, rdi);           \
875         COND_POP(set, CLBR_RSI, rsi);           \
876         COND_POP(set, CLBR_RDX, rdx);           \
877         COND_POP(set, CLBR_RCX, rcx);           \
878         COND_POP(set, CLBR_RAX, rax)
879
880 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
881 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
882 #define PARA_INDIRECT(addr)     *addr(%rip)
883 #else
884 #define PV_SAVE_REGS(set)                       \
885         COND_PUSH(set, CLBR_EAX, eax);          \
886         COND_PUSH(set, CLBR_EDI, edi);          \
887         COND_PUSH(set, CLBR_ECX, ecx);          \
888         COND_PUSH(set, CLBR_EDX, edx)
889 #define PV_RESTORE_REGS(set)                    \
890         COND_POP(set, CLBR_EDX, edx);           \
891         COND_POP(set, CLBR_ECX, ecx);           \
892         COND_POP(set, CLBR_EDI, edi);           \
893         COND_POP(set, CLBR_EAX, eax)
894
895 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
896 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
897 #define PARA_INDIRECT(addr)     *%cs:addr
898 #endif
899
900 #define INTERRUPT_RETURN                                                \
901         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
902                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
903
904 #define DISABLE_INTERRUPTS(clobbers)                                    \
905         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
906                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
907                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
908                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
909
910 #define ENABLE_INTERRUPTS(clobbers)                                     \
911         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
912                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
913                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
914                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
915
916 #define USERGS_SYSRET32                                                 \
917         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
918                   CLBR_NONE,                                            \
919                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
920
921 #ifdef CONFIG_X86_32
922 #define GET_CR0_INTO_EAX                                \
923         push %ecx; push %edx;                           \
924         call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
925         pop %edx; pop %ecx
926
927 #define ENABLE_INTERRUPTS_SYSEXIT                                       \
928         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
929                   CLBR_NONE,                                            \
930                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
931
932
933 #else   /* !CONFIG_X86_32 */
934
935 /*
936  * If swapgs is used while the userspace stack is still current,
937  * there's no way to call a pvop.  The PV replacement *must* be
938  * inlined, or the swapgs instruction must be trapped and emulated.
939  */
940 #define SWAPGS_UNSAFE_STACK                                             \
941         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
942                   swapgs)
943
944 /*
945  * Note: swapgs is very special, and in practise is either going to be
946  * implemented with a single "swapgs" instruction or something very
947  * special.  Either way, we don't need to save any registers for
948  * it.
949  */
950 #define SWAPGS                                                          \
951         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
952                   call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
953                  )
954
955 #define GET_CR2_INTO_RAX                                \
956         call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
957
958 #define PARAVIRT_ADJUST_EXCEPTION_FRAME                                 \
959         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
960                   CLBR_NONE,                                            \
961                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
962
963 #define USERGS_SYSRET64                                                 \
964         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
965                   CLBR_NONE,                                            \
966                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
967 #endif  /* CONFIG_X86_32 */
968
969 #endif /* __ASSEMBLY__ */
970 #else  /* CONFIG_PARAVIRT */
971 # define default_banner x86_init_noop
972 #ifndef __ASSEMBLY__
973 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
974                                           struct mm_struct *mm)
975 {
976 }
977
978 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
979 {
980 }
981 #endif /* __ASSEMBLY__ */
982 #endif /* !CONFIG_PARAVIRT */
983 #endif /* _ASM_X86_PARAVIRT_H */