powerpc/64s: Consolidate Program 0x700 interrupt
[cascardo/linux.git] / arch / powerpc / kernel / exceptions-64s.S
index bffec73..1df0283 100644 (file)
 #include <asm/exception-64s.h>
 #include <asm/ptrace.h>
 #include <asm/cpuidle.h>
+#include <asm/head-64.h>
 
 /*
+ * There are a few constraints to be concerned with.
+ * - Real mode exceptions code/data must be located at their physical location.
+ * - Virtual mode exceptions must be mapped at their 0xc000... location.
+ * - Fixed location code must not call directly beyond the __end_interrupts
+ *   area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
+ *   must be used.
+ * - LOAD_HANDLER targets must be within first 64K of physical 0 /
+ *   virtual 0xc00...
+ * - Conditional branch targets must be within +/-32K of caller.
+ *
+ * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
+ * therefore don't have to run in physically located code or rfid to
+ * virtual mode kernel code. However on relocatable kernels they do have
+ * to branch to KERNELBASE offset because the rest of the kernel (outside
+ * the exception vectors) may be located elsewhere.
+ *
+ * Virtual exceptions correspond with physical, except their entry points
+ * are offset by 0xc000000000000000 and also tend to get an added 0x4000
+ * offset applied. Virtual exceptions are enabled with the Alternate
+ * Interrupt Location (AIL) bit set in the LPCR. However this does not
+ * guarantee they will be delivered virtually. Some conditions (see the ISA)
+ * cause exceptions to be delivered in real mode.
+ *
+ * It's impossible to receive interrupts below 0x300 via AIL.
+ *
+ * KVM: None of the virtual exceptions are from the guest. Anything that
+ * escalated to HV=1 from HV=0 is delivered via real mode handlers.
+ *
+ *
  * We layout physical memory as follows:
  * 0x0000 - 0x00ff : Secondary processor spin code
- * 0x0100 - 0x17ff : pSeries Interrupt prologs
- * 0x1800 - 0x4000 : interrupt support common interrupt prologs
- * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
- * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
+ * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
+ * 0x1900 - 0x3fff : Real mode trampolines
+ * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
+ * 0x5900 - 0x6fff : Relon mode trampolines
  * 0x7000 - 0x7fff : FWNMI data area
- * 0x8000 - 0x8fff : Initial (CPU0) segment table
- * 0x9000 -        : Early init and support code
+ * 0x8000 -   .... : Common interrupt handlers, remaining early
+ *                   setup code, rest of kernel.
+ */
+OPEN_FIXED_SECTION(real_vectors,        0x0100, 0x1900)
+OPEN_FIXED_SECTION(real_trampolines,    0x1900, 0x4000)
+OPEN_FIXED_SECTION(virt_vectors,        0x4000, 0x5900)
+OPEN_FIXED_SECTION(virt_trampolines,    0x5900, 0x7000)
+#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
+/*
+ * Data area reserved for FWNMI option.
+ * This address (0x7000) is fixed by the RPA.
+ * pseries and powernv need to keep the whole page from
+ * 0x7000 to 0x8000 free for use by the firmware
  */
+ZERO_FIXED_SECTION(fwnmi_page,          0x7000, 0x8000)
+OPEN_TEXT_SECTION(0x8000)
+#else
+OPEN_TEXT_SECTION(0x7000)
+#endif
+
+USE_FIXED_SECTION(real_vectors)
+
+#define LOAD_SYSCALL_HANDLER(reg)                              \
+       ld      reg,PACAKBASE(r13);                             \
+       ori     reg,reg,(ABS_ADDR(system_call_common))@l;
+
        /* Syscall routine is used twice, in reloc-off and reloc-on paths */
 #define SYSCALL_PSERIES_1                                      \
 BEGIN_FTR_SECTION                                              \
@@ -41,8 +94,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)                                \
 
 #define SYSCALL_PSERIES_2_RFID                                         \
        mfspr   r12,SPRN_SRR1 ;                                 \
-       ld      r10,PACAKBASE(r13) ;                            \
-       LOAD_HANDLER(r10, system_call_entry) ;                  \
+       LOAD_SYSCALL_HANDLER(r10) ;                             \
        mtspr   SPRN_SRR0,r10 ;                                 \
        ld      r10,PACAKMSR(r13) ;                             \
        mtspr   SPRN_SRR1,r10 ;                                 \
@@ -63,15 +115,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)                             \
         * is volatile across system calls.
         */
 #define SYSCALL_PSERIES_2_DIRECT                               \
-       mflr    r10 ;                                           \
-       ld      r12,PACAKBASE(r13) ;                            \
-       LOAD_HANDLER(r12, system_call_entry) ;                  \
+       LOAD_SYSCALL_HANDLER(r12) ;                             \
        mtctr   r12 ;                                           \
        mfspr   r12,SPRN_SRR1 ;                                 \
-       /* Re-use of r13... No spare regs to do this */ \
-       li      r13,MSR_RI ;                                    \
-       mtmsrd  r13,1 ;                                         \
-       GET_PACA(r13) ; /* get r13 back */                      \
+       li      r10,MSR_RI ;                                    \
+       mtmsrd  r10,1 ;                                         \
        bctr ;
 #else
        /* We can branch directly */
@@ -90,12 +138,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)                             \
  * Therefore any relative branches in this section must only
  * branch to labels in this section.
  */
-       . = 0x100
        .globl __start_interrupts
 __start_interrupts:
 
-       .globl system_reset_pSeries;
-system_reset_pSeries:
+EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
        SET_SCRATCH0(r13)
 #ifdef CONFIG_PPC_P7_NAP
 BEGIN_FTR_SECTION
@@ -136,9 +182,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 #endif /* CONFIG_PPC_P7_NAP */
        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
                                 NOTEST, 0x100)
+EXC_REAL_END(system_reset, 0x100, 0x200)
+EXC_VIRT_NONE(0x4100, 0x4200)
+EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
+
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * Vectors for the FWNMI option.  Share common code.
+ */
+TRAMP_REAL_BEGIN(system_reset_fwnmi)
+       SET_SCRATCH0(r13)               /* save r13 */
+       EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
+                                NOTEST, 0x100)
+#endif /* CONFIG_PPC_PSERIES */
 
-       . = 0x200
-machine_check_pSeries_1:
+
+EXC_REAL_BEGIN(machine_check, 0x200, 0x300)
        /* This is moved out of line as it can be patched by FW, but
         * some code path might still want to branch into the original
         * vector
@@ -158,253 +217,9 @@ BEGIN_FTR_SECTION
 FTR_SECTION_ELSE
        b       machine_check_pSeries_0
 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
-
-       . = 0x300
-       .globl data_access_pSeries
-data_access_pSeries:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
-                                KVMTEST, 0x300)
-
-       . = 0x380
-       .globl data_access_slb_pSeries
-data_access_slb_pSeries:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXSLB)
-       EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
-       std     r3,PACA_EXSLB+EX_R3(r13)
-       mfspr   r3,SPRN_DAR
-       mfspr   r12,SPRN_SRR1
-#ifndef CONFIG_RELOCATABLE
-       b       slb_miss_realmode
-#else
-       /*
-        * We can't just use a direct branch to slb_miss_realmode
-        * because the distance from here to there depends on where
-        * the kernel ends up being put.
-        */
-       mfctr   r11
-       ld      r10,PACAKBASE(r13)
-       LOAD_HANDLER(r10, slb_miss_realmode)
-       mtctr   r10
-       bctr
-#endif
-
-       STD_EXCEPTION_PSERIES(0x400, instruction_access)
-
-       . = 0x480
-       .globl instruction_access_slb_pSeries
-instruction_access_slb_pSeries:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXSLB)
-       EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480)
-       std     r3,PACA_EXSLB+EX_R3(r13)
-       mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
-       mfspr   r12,SPRN_SRR1
-#ifndef CONFIG_RELOCATABLE
-       b       slb_miss_realmode
-#else
-       mfctr   r11
-       ld      r10,PACAKBASE(r13)
-       LOAD_HANDLER(r10, slb_miss_realmode)
-       mtctr   r10
-       bctr
-#endif
-
-       /* We open code these as we can't have a ". = x" (even with
-        * x = "." within a feature section
-        */
-       . = 0x500;
-       .globl hardware_interrupt_pSeries;
-       .globl hardware_interrupt_hv;
-hardware_interrupt_pSeries:
-hardware_interrupt_hv:
-       BEGIN_FTR_SECTION
-               _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
-                                           EXC_HV, SOFTEN_TEST_HV)
-               KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
-       FTR_SECTION_ELSE
-               _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
-                                           EXC_STD, SOFTEN_TEST_PR)
-               KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
-       ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
-
-       STD_EXCEPTION_PSERIES(0x600, alignment)
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600)
-
-       STD_EXCEPTION_PSERIES(0x700, program_check)
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700)
-
-       STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800)
-
-       . = 0x900
-       .globl decrementer_pSeries
-decrementer_pSeries:
-       _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
-
-       STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
-
-       MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00)
-
-       STD_EXCEPTION_PSERIES(0xb00, trap_0b)
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00)
-
-       . = 0xc00
-       .globl  system_call_pSeries
-system_call_pSeries:
-        /*
-         * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems
-         * that support it) before changing to HMT_MEDIUM. That allows the KVM
-         * code to save that value into the guest state (it is the guest's PPR
-         * value). Otherwise just change to HMT_MEDIUM as userspace has
-         * already saved the PPR.
-         */
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-       SET_SCRATCH0(r13)
-       GET_PACA(r13)
-       std     r9,PACA_EXGEN+EX_R9(r13)
-       OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);
-       HMT_MEDIUM;
-       std     r10,PACA_EXGEN+EX_R10(r13)
-       OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR);
-       mfcr    r9
-       KVMTEST(0xc00)
-       GET_SCRATCH0(r13)
-#else
-       HMT_MEDIUM;
-#endif
-       SYSCALL_PSERIES_1
-       SYSCALL_PSERIES_2_RFID
-       SYSCALL_PSERIES_3
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
-
-       STD_EXCEPTION_PSERIES(0xd00, single_step)
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00)
-
-       /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
-        * out of line to handle them
-        */
-       . = 0xe00
-hv_data_storage_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       h_data_storage_hv
-
-       . = 0xe20
-hv_instr_storage_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       h_instr_storage_hv
-
-       . = 0xe40
-emulation_assist_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       emulation_assist_hv
-
-       . = 0xe60
-hv_exception_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       hmi_exception_early
-
-       . = 0xe80
-hv_doorbell_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       h_doorbell_hv
-
-       . = 0xea0
-hv_virt_irq_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       h_virt_irq_hv
-
-       /* We need to deal with the Altivec unavailable exception
-        * here which is at 0xf20, thus in the middle of the
-        * prolog code of the PerformanceMonitor one. A little
-        * trickery is thus necessary
-        */
-       . = 0xf00
-performance_monitor_pseries_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       performance_monitor_pSeries
-
-       . = 0xf20
-altivec_unavailable_pseries_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       altivec_unavailable_pSeries
-
-       . = 0xf40
-vsx_unavailable_pseries_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       vsx_unavailable_pSeries
-
-       . = 0xf60
-facility_unavailable_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       facility_unavailable_pSeries
-
-       . = 0xf80
-hv_facility_unavailable_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       facility_unavailable_hv
-
-#ifdef CONFIG_CBE_RAS
-       STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
-       KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
-#endif /* CONFIG_CBE_RAS */
-
-       STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
-       KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
-
-       . = 0x1500
-       .global denorm_exception_hv
-denorm_exception_hv:
-       mtspr   SPRN_SPRG_HSCRATCH0,r13
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
-
-#ifdef CONFIG_PPC_DENORMALISATION
-       mfspr   r10,SPRN_HSRR1
-       mfspr   r11,SPRN_HSRR0          /* save HSRR0 */
-       andis.  r10,r10,(HSRR1_DENORM)@h /* denorm? */
-       addi    r11,r11,-4              /* HSRR0 is next instruction */
-       bne+    denorm_assist
-#endif
-
-       KVMTEST(0x1500)
-       EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
-       KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
-
-#ifdef CONFIG_CBE_RAS
-       STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
-       KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
-#endif /* CONFIG_CBE_RAS */
-
-       STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700)
-
-#ifdef CONFIG_CBE_RAS
-       STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
-       KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
-#else
-       . = 0x1800
-#endif /* CONFIG_CBE_RAS */
-
-
-/*** Out of line interrupts support ***/
-
-       .align  7
-       /* moved from 0x200 */
-machine_check_powernv_early:
+EXC_REAL_END(machine_check, 0x200, 0x300)
+EXC_VIRT_NONE(0x4200, 0x4300)
+TRAMP_REAL_BEGIN(machine_check_powernv_early)
 BEGIN_FTR_SECTION
        EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
        /*
@@ -457,7 +272,6 @@ BEGIN_FTR_SECTION
        mfmsr   r11                     /* get MSR value */
        ori     r11,r11,MSR_ME          /* turn on ME bit */
        ori     r11,r11,MSR_RI          /* turn on RI bit */
-       ld      r12,PACAKBASE(r13)      /* get high part of &label */
        LOAD_HANDLER(r12, machine_check_handle_early)
 1:     mtspr   SPRN_SRR0,r12
        mtspr   SPRN_SRR1,r11
@@ -470,7 +284,6 @@ BEGIN_FTR_SECTION
         */
        addi    r1,r1,INT_FRAME_SIZE    /* go back to previous stack frame */
        ld      r11,PACAKMSR(r13)
-       ld      r12,PACAKBASE(r13)
        LOAD_HANDLER(r12, unrecover_mce)
        li      r10,MSR_ME
        andc    r11,r11,r10             /* Turn off MSR_ME */
@@ -478,20 +291,19 @@ BEGIN_FTR_SECTION
        b       .       /* prevent speculative execution */
 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 
-machine_check_pSeries:
+TRAMP_REAL_BEGIN(machine_check_pSeries)
        .globl machine_check_fwnmi
 machine_check_fwnmi:
        SET_SCRATCH0(r13)               /* save r13 */
        EXCEPTION_PROLOG_0(PACA_EXMC)
 machine_check_pSeries_0:
-       EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
+       EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST_PR, 0x200)
        /*
         * The following is essentially EXCEPTION_PROLOG_PSERIES_1 with the
         * difference that MSR_RI is not enabled, because PACA_EXMC is being
         * used, so nested machine check corrupts it. machine_check_common
         * enables MSR_RI.
         */
-       ld      r12,PACAKBASE(r13)
        ld      r10,PACAKMSR(r13)
        xori    r10,r10,MSR_RI
        mfspr   r11,SPRN_SRR0
@@ -502,46 +314,667 @@ machine_check_pSeries_0:
        rfid
        b       .       /* prevent speculative execution */
 
-       KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
-       KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
-       KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400)
-       KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480)
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900)
-       KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
-
-#ifdef CONFIG_PPC_DENORMALISATION
-denorm_assist:
-BEGIN_FTR_SECTION
-/*
- * To denormalise we need to move a copy of the register to itself.
- * For POWER6 do that here for all FP regs.
- */
-       mfmsr   r10
-       ori     r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
-       xori    r10,r10,(MSR_FE0|MSR_FE1)
-       mtmsrd  r10
-       sync
+TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
 
-#define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
-#define FMR4(n)  FMR2(n) ; FMR2(n+2)
-#define FMR8(n)  FMR4(n) ; FMR4(n+4)
-#define FMR16(n) FMR8(n) ; FMR8(n+8)
-#define FMR32(n) FMR16(n) ; FMR16(n+16)
-       FMR32(0)
+EXC_COMMON_BEGIN(machine_check_common)
+       /*
+        * Machine check is different because we use a different
+        * save area: PACA_EXMC instead of PACA_EXGEN.
+        */
+       mfspr   r10,SPRN_DAR
+       std     r10,PACA_EXMC+EX_DAR(r13)
+       mfspr   r10,SPRN_DSISR
+       stw     r10,PACA_EXMC+EX_DSISR(r13)
+       EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
+       FINISH_NAP
+       RECONCILE_IRQ_STATE(r10, r11)
+       ld      r3,PACA_EXMC+EX_DAR(r13)
+       lwz     r4,PACA_EXMC+EX_DSISR(r13)
+       /* Enable MSR_RI when finished with PACA_EXMC */
+       li      r10,MSR_RI
+       mtmsrd  r10,1
+       std     r3,_DAR(r1)
+       std     r4,_DSISR(r1)
+       bl      save_nvgprs
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      machine_check_exception
+       b       ret_from_except
 
-FTR_SECTION_ELSE
-/*
- * To denormalise we need to move a copy of the register to itself.
- * For POWER7 do that here for the first 32 VSX registers only.
- */
-       mfmsr   r10
-       oris    r10,r10,MSR_VSX@h
-       mtmsrd  r10
-       sync
+#define MACHINE_CHECK_HANDLER_WINDUP                   \
+       /* Clear MSR_RI before setting SRR0 and SRR1. */\
+       li      r0,MSR_RI;                              \
+       mfmsr   r9;             /* get MSR value */     \
+       andc    r9,r9,r0;                               \
+       mtmsrd  r9,1;           /* Clear MSR_RI */      \
+       /* Move original SRR0 and SRR1 into the respective regs */      \
+       ld      r9,_MSR(r1);                            \
+       mtspr   SPRN_SRR1,r9;                           \
+       ld      r3,_NIP(r1);                            \
+       mtspr   SPRN_SRR0,r3;                           \
+       ld      r9,_CTR(r1);                            \
+       mtctr   r9;                                     \
+       ld      r9,_XER(r1);                            \
+       mtxer   r9;                                     \
+       ld      r9,_LINK(r1);                           \
+       mtlr    r9;                                     \
+       REST_GPR(0, r1);                                \
+       REST_8GPRS(2, r1);                              \
+       REST_GPR(10, r1);                               \
+       ld      r11,_CCR(r1);                           \
+       mtcr    r11;                                    \
+       /* Decrement paca->in_mce. */                   \
+       lhz     r12,PACA_IN_MCE(r13);                   \
+       subi    r12,r12,1;                              \
+       sth     r12,PACA_IN_MCE(r13);                   \
+       REST_GPR(11, r1);                               \
+       REST_2GPRS(12, r1);                             \
+       /* restore original r1. */                      \
+       ld      r1,GPR1(r1)
 
-#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
-#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
+       /*
+        * Handle machine check early in real mode. We come here with
+        * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
+        */
+EXC_COMMON_BEGIN(machine_check_handle_early)
+       std     r0,GPR0(r1)     /* Save r0 */
+       EXCEPTION_PROLOG_COMMON_3(0x200)
+       bl      save_nvgprs
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      machine_check_early
+       std     r3,RESULT(r1)   /* Save result */
+       ld      r12,_MSR(r1)
+#ifdef CONFIG_PPC_P7_NAP
+       /*
+        * Check if thread was in power saving mode. We come here when any
+        * of the following is true:
+        * a. thread wasn't in power saving mode
+        * b. thread was in power saving mode with no state loss,
+        *    supervisor state loss or hypervisor state loss.
+        *
+        * Go back to nap/sleep/winkle mode again if (b) is true.
+        */
+       rlwinm. r11,r12,47-31,30,31     /* Was it in power saving mode? */
+       beq     4f                      /* No, it wasn;t */
+       /* Thread was in power saving mode. Go back to nap again. */
+       cmpwi   r11,2
+       blt     3f
+       /* Supervisor/Hypervisor state loss */
+       li      r0,1
+       stb     r0,PACA_NAPSTATELOST(r13)
+3:     bl      machine_check_queue_event
+       MACHINE_CHECK_HANDLER_WINDUP
+       GET_PACA(r13)
+       ld      r1,PACAR1(r13)
+       /*
+        * Check what idle state this CPU was in and go back to same mode
+        * again.
+        */
+       lbz     r3,PACA_THREAD_IDLE_STATE(r13)
+       cmpwi   r3,PNV_THREAD_NAP
+       bgt     10f
+       IDLE_STATE_ENTER_SEQ(PPC_NAP)
+       /* No return */
+10:
+       cmpwi   r3,PNV_THREAD_SLEEP
+       bgt     2f
+       IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
+       /* No return */
+
+2:
+       /*
+        * Go back to winkle. Please note that this thread was woken up in
+        * machine check from winkle and have not restored the per-subcore
+        * state. Hence before going back to winkle, set last bit of HSPGR0
+        * to 1. This will make sure that if this thread gets woken up
+        * again at reset vector 0x100 then it will get chance to restore
+        * the subcore state.
+        */
+       ori     r13,r13,1
+       SET_PACA(r13)
+       IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
+       /* No return */
+4:
+#endif
+       /*
+        * Check if we are coming from hypervisor userspace. If yes then we
+        * continue in host kernel in V mode to deliver the MC event.
+        */
+       rldicl. r11,r12,4,63            /* See if MC hit while in HV mode. */
+       beq     5f
+       andi.   r11,r12,MSR_PR          /* See if coming from user. */
+       bne     9f                      /* continue in V mode if we are. */
+
+5:
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+       /*
+        * We are coming from kernel context. Check if we are coming from
+        * guest. if yes, then we can continue. We will fall through
+        * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
+        */
+       lbz     r11,HSTATE_IN_GUEST(r13)
+       cmpwi   r11,0                   /* Check if coming from guest */
+       bne     9f                      /* continue if we are. */
+#endif
+       /*
+        * At this point we are not sure about what context we come from.
+        * Queue up the MCE event and return from the interrupt.
+        * But before that, check if this is an un-recoverable exception.
+        * If yes, then stay on emergency stack and panic.
+        */
+       andi.   r11,r12,MSR_RI
+       bne     2f
+1:     mfspr   r11,SPRN_SRR0
+       LOAD_HANDLER(r10,unrecover_mce)
+       mtspr   SPRN_SRR0,r10
+       ld      r10,PACAKMSR(r13)
+       /*
+        * We are going down. But there are chances that we might get hit by
+        * another MCE during panic path and we may run into unstable state
+        * with no way out. Hence, turn ME bit off while going down, so that
+        * when another MCE is hit during panic path, system will checkstop
+        * and hypervisor will get restarted cleanly by SP.
+        */
+       li      r3,MSR_ME
+       andc    r10,r10,r3              /* Turn off MSR_ME */
+       mtspr   SPRN_SRR1,r10
+       rfid
+       b       .
+2:
+       /*
+        * Check if we have successfully handled/recovered from error, if not
+        * then stay on emergency stack and panic.
+        */
+       ld      r3,RESULT(r1)   /* Load result */
+       cmpdi   r3,0            /* see if we handled MCE successfully */
+
+       beq     1b              /* if !handled then panic */
+       /*
+        * Return from MC interrupt.
+        * Queue up the MCE event so that we can log it later, while
+        * returning from kernel or opal call.
+        */
+       bl      machine_check_queue_event
+       MACHINE_CHECK_HANDLER_WINDUP
+       rfid
+9:
+       /* Deliver the machine check to host kernel in V mode. */
+       MACHINE_CHECK_HANDLER_WINDUP
+       b       machine_check_pSeries
+
+EXC_COMMON_BEGIN(unrecover_mce)
+       /* Invoke machine_check_exception to print MCE event and panic. */
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      machine_check_exception
+       /*
+        * We will not reach here. Even if we did, there is no way out. Call
+        * unrecoverable_exception and die.
+        */
+1:     addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      unrecoverable_exception
+       b       1b
+
+
+EXC_REAL(data_access, 0x300, 0x380)
+EXC_VIRT(data_access, 0x4300, 0x4380, 0x300)
+TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
+
+EXC_COMMON_BEGIN(data_access_common)
+       /*
+        * Here r13 points to the paca, r9 contains the saved CR,
+        * SRR0 and SRR1 are saved in r11 and r12,
+        * r9 - r13 are saved in paca->exgen.
+        */
+       mfspr   r10,SPRN_DAR
+       std     r10,PACA_EXGEN+EX_DAR(r13)
+       mfspr   r10,SPRN_DSISR
+       stw     r10,PACA_EXGEN+EX_DSISR(r13)
+       EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
+       RECONCILE_IRQ_STATE(r10, r11)
+       ld      r12,_MSR(r1)
+       ld      r3,PACA_EXGEN+EX_DAR(r13)
+       lwz     r4,PACA_EXGEN+EX_DSISR(r13)
+       li      r5,0x300
+       std     r3,_DAR(r1)
+       std     r4,_DSISR(r1)
+BEGIN_MMU_FTR_SECTION
+       b       do_hash_page            /* Try to handle as hpte fault */
+MMU_FTR_SECTION_ELSE
+       b       handle_page_fault
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+
+
+EXC_REAL_BEGIN(data_access_slb, 0x380, 0x400)
+       SET_SCRATCH0(r13)
+       EXCEPTION_PROLOG_0(PACA_EXSLB)
+       EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
+       std     r3,PACA_EXSLB+EX_R3(r13)
+       mfspr   r3,SPRN_DAR
+       mfspr   r12,SPRN_SRR1
+       crset   4*cr6+eq
+#ifndef CONFIG_RELOCATABLE
+       b       slb_miss_realmode
+#else
+       /*
+        * We can't just use a direct branch to slb_miss_realmode
+        * because the distance from here to there depends on where
+        * the kernel ends up being put.
+        */
+       mfctr   r11
+       LOAD_HANDLER(r10, slb_miss_realmode)
+       mtctr   r10
+       bctr
+#endif
+EXC_REAL_END(data_access_slb, 0x380, 0x400)
+
+EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x4400)
+       SET_SCRATCH0(r13)
+       EXCEPTION_PROLOG_0(PACA_EXSLB)
+       EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
+       std     r3,PACA_EXSLB+EX_R3(r13)
+       mfspr   r3,SPRN_DAR
+       mfspr   r12,SPRN_SRR1
+       crset   4*cr6+eq
+#ifndef CONFIG_RELOCATABLE
+       b       slb_miss_realmode
+#else
+       /*
+        * We can't just use a direct branch to slb_miss_realmode
+        * because the distance from here to there depends on where
+        * the kernel ends up being put.
+        */
+       mfctr   r11
+       LOAD_HANDLER(r10, slb_miss_realmode)
+       mtctr   r10
+       bctr
+#endif
+EXC_VIRT_END(data_access_slb, 0x4380, 0x4400)
+TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
+
+
+EXC_REAL(instruction_access, 0x400, 0x480)
+EXC_VIRT(instruction_access, 0x4400, 0x4480, 0x400)
+TRAMP_KVM(PACA_EXGEN, 0x400)
+
+EXC_COMMON_BEGIN(instruction_access_common)
+       EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
+       RECONCILE_IRQ_STATE(r10, r11)
+       ld      r12,_MSR(r1)
+       ld      r3,_NIP(r1)
+       andis.  r4,r12,0x5820
+       li      r5,0x400
+       std     r3,_DAR(r1)
+       std     r4,_DSISR(r1)
+BEGIN_MMU_FTR_SECTION
+       b       do_hash_page            /* Try to handle as hpte fault */
+MMU_FTR_SECTION_ELSE
+       b       handle_page_fault
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+
+
+EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x500)
+       SET_SCRATCH0(r13)
+       EXCEPTION_PROLOG_0(PACA_EXSLB)
+       EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
+       std     r3,PACA_EXSLB+EX_R3(r13)
+       mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
+       mfspr   r12,SPRN_SRR1
+       crclr   4*cr6+eq
+#ifndef CONFIG_RELOCATABLE
+       b       slb_miss_realmode
+#else
+       mfctr   r11
+       LOAD_HANDLER(r10, slb_miss_realmode)
+       mtctr   r10
+       bctr
+#endif
+EXC_REAL_END(instruction_access_slb, 0x480, 0x500)
+
+EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x4500)
+       SET_SCRATCH0(r13)
+       EXCEPTION_PROLOG_0(PACA_EXSLB)
+       EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
+       std     r3,PACA_EXSLB+EX_R3(r13)
+       mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
+       mfspr   r12,SPRN_SRR1
+       crclr   4*cr6+eq
+#ifndef CONFIG_RELOCATABLE
+       b       slb_miss_realmode
+#else
+       mfctr   r11
+       LOAD_HANDLER(r10, slb_miss_realmode)
+       mtctr   r10
+       bctr
+#endif
+EXC_VIRT_END(instruction_access_slb, 0x4480, 0x4500)
+TRAMP_KVM(PACA_EXSLB, 0x480)
+
+
+/* This handler is used by both 0x380 and 0x480 slb miss interrupts */
+EXC_COMMON_BEGIN(slb_miss_realmode)
+       /*
+        * r13 points to the PACA, r9 contains the saved CR,
+        * r12 contain the saved SRR1, SRR0 is still ready for return
+        * r3 has the faulting address
+        * r9 - r13 are saved in paca->exslb.
+        * r3 is saved in paca->slb_r3
+        * cr6.eq is set for a D-SLB miss, clear for a I-SLB miss
+        * We assume we aren't going to take any exceptions during this
+        * procedure.
+        */
+       mflr    r10
+#ifdef CONFIG_RELOCATABLE
+       mtctr   r11
+#endif
+
+       stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
+       std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
+       std     r3,PACA_EXSLB+EX_DAR(r13)
+
+       crset   4*cr0+eq
+#ifdef CONFIG_PPC_STD_MMU_64
+BEGIN_MMU_FTR_SECTION
+       bl      slb_allocate_realmode
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
+#endif
+
+       ld      r10,PACA_EXSLB+EX_LR(r13)
+       ld      r3,PACA_EXSLB+EX_R3(r13)
+       lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
+       mtlr    r10
+
+       beq     8f              /* if bad address, make full stack frame */
+
+       andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
+       beq-    2f
+
+       /* All done -- return from exception. */
+
+.machine       push
+.machine       "power4"
+       mtcrf   0x80,r9
+       mtcrf   0x02,r9         /* I/D indication is in cr6 */
+       mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
+.machine       pop
+
+       RESTORE_PPR_PACA(PACA_EXSLB, r9)
+       ld      r9,PACA_EXSLB+EX_R9(r13)
+       ld      r10,PACA_EXSLB+EX_R10(r13)
+       ld      r11,PACA_EXSLB+EX_R11(r13)
+       ld      r12,PACA_EXSLB+EX_R12(r13)
+       ld      r13,PACA_EXSLB+EX_R13(r13)
+       rfid
+       b       .       /* prevent speculative execution */
+
+2:     mfspr   r11,SPRN_SRR0
+       LOAD_HANDLER(r10,unrecov_slb)
+       mtspr   SPRN_SRR0,r10
+       ld      r10,PACAKMSR(r13)
+       mtspr   SPRN_SRR1,r10
+       rfid
+       b       .
+
+8:     mfspr   r11,SPRN_SRR0
+       LOAD_HANDLER(r10,bad_addr_slb)
+       mtspr   SPRN_SRR0,r10
+       ld      r10,PACAKMSR(r13)
+       mtspr   SPRN_SRR1,r10
+       rfid
+       b       .
+
+EXC_COMMON_BEGIN(unrecov_slb)
+       EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
+       RECONCILE_IRQ_STATE(r10, r11)
+       bl      save_nvgprs
+1:     addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      unrecoverable_exception
+       b       1b
+
+EXC_COMMON_BEGIN(bad_addr_slb)
+       EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB)
+       RECONCILE_IRQ_STATE(r10, r11)
+       ld      r3, PACA_EXSLB+EX_DAR(r13)
+       std     r3, _DAR(r1)
+       beq     cr6, 2f
+       li      r10, 0x480              /* fix trap number for I-SLB miss */
+       std     r10, _TRAP(r1)
+2:     bl      save_nvgprs
+       addi    r3, r1, STACK_FRAME_OVERHEAD
+       bl      slb_miss_bad_addr
+       b       ret_from_except
+
+EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x600)
+       .globl hardware_interrupt_hv;
+hardware_interrupt_hv:
+       BEGIN_FTR_SECTION
+               _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
+                                           EXC_HV, SOFTEN_TEST_HV)
+do_kvm_H0x500:
+               KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
+       FTR_SECTION_ELSE
+               _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
+                                           EXC_STD, SOFTEN_TEST_PR)
+do_kvm_0x500:
+               KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
+       ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+EXC_REAL_END(hardware_interrupt, 0x500, 0x600)
+
+EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x4600)
+       .globl hardware_interrupt_relon_hv;
+hardware_interrupt_relon_hv:
+       BEGIN_FTR_SECTION
+               _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt_common, EXC_HV, SOFTEN_TEST_HV)
+       FTR_SECTION_ELSE
+               _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt_common, EXC_STD, SOFTEN_TEST_PR)
+       ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
+EXC_VIRT_END(hardware_interrupt, 0x4500, 0x4600)
+
+EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
+
+
+EXC_REAL(alignment, 0x600, 0x700)
+EXC_VIRT(alignment, 0x4600, 0x4700, 0x600)
+TRAMP_KVM(PACA_EXGEN, 0x600)
+EXC_COMMON_BEGIN(alignment_common)
+       mfspr   r10,SPRN_DAR
+       std     r10,PACA_EXGEN+EX_DAR(r13)
+       mfspr   r10,SPRN_DSISR
+       stw     r10,PACA_EXGEN+EX_DSISR(r13)
+       EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
+       ld      r3,PACA_EXGEN+EX_DAR(r13)
+       lwz     r4,PACA_EXGEN+EX_DSISR(r13)
+       std     r3,_DAR(r1)
+       std     r4,_DSISR(r1)
+       bl      save_nvgprs
+       RECONCILE_IRQ_STATE(r10, r11)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      alignment_exception
+       b       ret_from_except
+
+
+EXC_REAL(program_check, 0x700, 0x800)
+EXC_VIRT(program_check, 0x4700, 0x4800, 0x700)
+TRAMP_KVM(PACA_EXGEN, 0x700)
+EXC_COMMON_BEGIN(program_check_common)
+       EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+       bl      save_nvgprs
+       RECONCILE_IRQ_STATE(r10, r11)
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      program_check_exception
+       b       ret_from_except
+
+
+EXC_REAL(fp_unavailable, 0x800, 0x900)
+
+TRAMP_KVM(PACA_EXGEN, 0x800)
+
+EXC_REAL_MASKABLE(decrementer, 0x900, 0x980)
+
+EXC_REAL_HV(hdecrementer, 0x980, 0xa00)
+
+EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0xb00)
+
+TRAMP_KVM(PACA_EXGEN, 0xa00)
+
+EXC_REAL(trap_0b, 0xb00, 0xc00)
+
+TRAMP_KVM(PACA_EXGEN, 0xb00)
+
+EXC_REAL_BEGIN(system_call, 0xc00, 0xd00)
+        /*
+         * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems
+         * that support it) before changing to HMT_MEDIUM. That allows the KVM
+         * code to save that value into the guest state (it is the guest's PPR
+         * value). Otherwise just change to HMT_MEDIUM as userspace has
+         * already saved the PPR.
+         */
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+       SET_SCRATCH0(r13)
+       GET_PACA(r13)
+       std     r9,PACA_EXGEN+EX_R9(r13)
+       OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);
+       HMT_MEDIUM;
+       std     r10,PACA_EXGEN+EX_R10(r13)
+       OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR);
+       mfcr    r9
+       KVMTEST_PR(0xc00)
+       GET_SCRATCH0(r13)
+#else
+       HMT_MEDIUM;
+#endif
+       SYSCALL_PSERIES_1
+       SYSCALL_PSERIES_2_RFID
+       SYSCALL_PSERIES_3
+EXC_REAL_END(system_call, 0xc00, 0xd00)
+
+TRAMP_KVM(PACA_EXGEN, 0xc00)
+
+EXC_REAL(single_step, 0xd00, 0xe00)
+
+TRAMP_KVM(PACA_EXGEN, 0xd00)
+
+
+       /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
+        * out of line to handle them
+        */
+__EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0xe20)
+
+__EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0xe40)
+
+__EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0xe60)
+
+__EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0xe80, hmi_exception_early)
+
+__EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0xea0)
+
+__EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0xec0)
+
+EXC_REAL_NONE(0xec0, 0xf00)
+
+__EXC_REAL_OOL(performance_monitor, 0xf00, 0xf20)
+
+__EXC_REAL_OOL(altivec_unavailable, 0xf20, 0xf40)
+
+__EXC_REAL_OOL(vsx_unavailable, 0xf40, 0xf60)
+
+__EXC_REAL_OOL(facility_unavailable, 0xf60, 0xf80)
+
+__EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0xfa0)
+
+EXC_REAL_NONE(0xfa0, 0x1200)
+
+#ifdef CONFIG_CBE_RAS
+EXC_REAL_HV(cbe_system_error, 0x1200, 0x1300)
+
+TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200)
+
+#else /* CONFIG_CBE_RAS */
+EXC_REAL_NONE(0x1200, 0x1300)
+#endif
+
+EXC_REAL(instruction_breakpoint, 0x1300, 0x1400)
+
+TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300)
+
+EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x1600)
+       mtspr   SPRN_SPRG_HSCRATCH0,r13
+       EXCEPTION_PROLOG_0(PACA_EXGEN)
+       EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
+
+#ifdef CONFIG_PPC_DENORMALISATION
+       mfspr   r10,SPRN_HSRR1
+       mfspr   r11,SPRN_HSRR0          /* save HSRR0 */
+       andis.  r10,r10,(HSRR1_DENORM)@h /* denorm? */
+       addi    r11,r11,-4              /* HSRR0 is next instruction */
+       bne+    denorm_assist
+#endif
+
+       KVMTEST_PR(0x1500)
+       EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
+EXC_REAL_END(denorm_exception_hv, 0x1500, 0x1600)
+
+TRAMP_KVM_SKIP(PACA_EXGEN, 0x1500)
+
+#ifdef CONFIG_CBE_RAS
+EXC_REAL_HV(cbe_maintenance, 0x1600, 0x1700)
+
+TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600)
+
+#else /* CONFIG_CBE_RAS */
+EXC_REAL_NONE(0x1600, 0x1700)
+#endif
+
+EXC_REAL(altivec_assist, 0x1700, 0x1800)
+
+TRAMP_KVM(PACA_EXGEN, 0x1700)
+
+#ifdef CONFIG_CBE_RAS
+EXC_REAL_HV(cbe_thermal, 0x1800, 0x1900)
+
+TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800)
+
+#else /* CONFIG_CBE_RAS */
+EXC_REAL_NONE(0x1800, 0x1900)
+#endif
+
+
+/*** Out of line interrupts support ***/
+
+       /* moved from 0x200 */
+TRAMP_KVM(PACA_EXGEN, 0x900)
+TRAMP_KVM_HV(PACA_EXGEN, 0x980)
+
+#ifdef CONFIG_PPC_DENORMALISATION
+TRAMP_REAL_BEGIN(denorm_assist)
+BEGIN_FTR_SECTION
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER6 do that here for all FP regs.
+ */
+       mfmsr   r10
+       ori     r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
+       xori    r10,r10,(MSR_FE0|MSR_FE1)
+       mtmsrd  r10
+       sync
+
+#define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
+#define FMR4(n)  FMR2(n) ; FMR2(n+2)
+#define FMR8(n)  FMR4(n) ; FMR4(n+4)
+#define FMR16(n) FMR8(n) ; FMR8(n+8)
+#define FMR32(n) FMR16(n) ; FMR16(n+16)
+       FMR32(0)
+
+FTR_SECTION_ELSE
+/*
+ * To denormalise we need to move a copy of the register to itself.
+ * For POWER7 do that here for the first 32 VSX registers only.
+ */
+       mfmsr   r10
+       oris    r10,r10,MSR_VSX@h
+       mtmsrd  r10
+       sync
+
+#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
+#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
 #define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
 #define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
 #define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
@@ -574,34 +1007,40 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
        b       .
 #endif
 
-       .align  7
        /* moved from 0xe00 */
-       STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
-       KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
-       STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
-       KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
-       STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
-       KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
-       MASKABLE_EXCEPTION_HV_OOL(0xe62, hmi_exception)
-       KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
+__TRAMP_REAL_REAL_OOL_HV(h_data_storage, 0xe00)
+TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00)
 
-       MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
-       KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
+__TRAMP_REAL_REAL_OOL_HV(h_instr_storage, 0xe20)
+TRAMP_KVM_HV(PACA_EXGEN, 0xe20)
 
-       MASKABLE_EXCEPTION_HV_OOL(0xea2, h_virt_irq)
-       KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xea2)
+__TRAMP_REAL_REAL_OOL_HV(emulation_assist, 0xe40)
+TRAMP_KVM_HV(PACA_EXGEN, 0xe40)
+
+__TRAMP_REAL_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60)
+TRAMP_KVM_HV(PACA_EXGEN, 0xe60)
+
+__TRAMP_REAL_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80)
+TRAMP_KVM_HV(PACA_EXGEN, 0xe80)
+
+__TRAMP_REAL_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0)
+TRAMP_KVM_HV(PACA_EXGEN, 0xea0)
 
        /* moved from 0xf00 */
-       STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00)
-       STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20)
-       STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40)
-       STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
-       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf60)
-       STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
-       KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
+__TRAMP_REAL_REAL_OOL(performance_monitor, 0xf00)
+TRAMP_KVM(PACA_EXGEN, 0xf00)
+
+__TRAMP_REAL_REAL_OOL(altivec_unavailable, 0xf20)
+TRAMP_KVM(PACA_EXGEN, 0xf20)
+
+__TRAMP_REAL_REAL_OOL(vsx_unavailable, 0xf40)
+TRAMP_KVM(PACA_EXGEN, 0xf40)
+
+__TRAMP_REAL_REAL_OOL(facility_unavailable, 0xf60)
+TRAMP_KVM(PACA_EXGEN, 0xf60)
+
+__TRAMP_REAL_REAL_OOL_HV(h_facility_unavailable, 0xf80)
+TRAMP_KVM_HV(PACA_EXGEN, 0xf80)
 
 /*
  * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
@@ -640,7 +1079,13 @@ masked_##_H##interrupt:                                   \
        GET_SCRATCH0(r13);                              \
        ##_H##rfid;                                     \
        b       .
-       
+
+/*
+ * Real mode exceptions actually use this too, but alternate
+ * instruction code patches (which end up in the common .text area)
+ * cannot reach these if they are put there.
+ */
+USE_FIXED_SECTION(virt_trampolines)
        MASKED_INTERRUPT()
        MASKED_INTERRUPT(H)
 
@@ -654,6 +1099,7 @@ masked_##_H##interrupt:                                    \
  * in the generated frame has EE set to 1 or the exception
  * handler will not properly re-enable them.
  */
+USE_TEXT_SECTION()
 _GLOBAL(__replay_interrupt)
        /* We are going to jump to the exception common code which
         * will retrieve various register values from the PACA which
@@ -680,21 +1126,8 @@ FTR_SECTION_ELSE
 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
        blr
 
-#ifdef CONFIG_PPC_PSERIES
-/*
- * Vectors for the FWNMI option.  Share common code.
- */
-       .globl system_reset_fwnmi
-      .align 7
-system_reset_fwnmi:
-       SET_SCRATCH0(r13)               /* save r13 */
-       EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
-                                NOTEST, 0x100)
-
-#endif /* CONFIG_PPC_PSERIES */
-
 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-kvmppc_skip_interrupt:
+TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
        /*
         * Here all GPRs are unchanged from when the interrupt happened
         * except for r13, which is saved in SPRG_SCRATCH0.
@@ -706,7 +1139,7 @@ kvmppc_skip_interrupt:
        rfid
        b       .
 
-kvmppc_skip_Hinterrupt:
+TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
        /*
         * Here all GPRs are unchanged from when the interrupt happened
         * except for r13, which is saved in SPRG_SCRATCH0.
@@ -728,34 +1161,32 @@ kvmppc_skip_Hinterrupt:
 
 /*** Common interrupt handlers ***/
 
-       STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception)
+EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
+EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
 
-       STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
-       STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt)
-       STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt)
 #ifdef CONFIG_PPC_DOORBELL
-       STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception)
+EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
 #else
-       STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception)
+EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
 #endif
-       STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception)
-       STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception)
-       STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception)
-       STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt)
-       STD_EXCEPTION_COMMON_ASYNC(0xe60, hmi_exception, handle_hmi_exception)
+EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
+EXC_COMMON(single_step_common, 0xd00, single_step_exception)
+EXC_COMMON(trap_0e_common, 0xe00, unknown_exception)
+EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
+EXC_COMMON_ASYNC(hmi_exception_common, 0xe60, handle_hmi_exception)
 #ifdef CONFIG_PPC_DOORBELL
-       STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception)
+EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
 #else
-       STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception)
+EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
 #endif
-       STD_EXCEPTION_COMMON_ASYNC(0xea0, h_virt_irq, do_IRQ)
-       STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception)
-       STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception)
-       STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception)
+EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
+EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
+EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
+EXC_COMMON_HV(denorm_common, 0x1500, unknown_exception)
 #ifdef CONFIG_ALTIVEC
-       STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception)
+EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
 #else
-       STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception)
+EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
 #endif
 
        /*
@@ -773,179 +1204,76 @@ kvmppc_skip_Hinterrupt:
         * only has extra guff for STAB-based processors -- which never
         * come here.
         */
-       STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
-       . = 0x4380
-       .globl data_access_slb_relon_pSeries
-data_access_slb_relon_pSeries:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXSLB)
-       EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
-       std     r3,PACA_EXSLB+EX_R3(r13)
-       mfspr   r3,SPRN_DAR
-       mfspr   r12,SPRN_SRR1
-#ifndef CONFIG_RELOCATABLE
-       b       slb_miss_realmode
-#else
-       /*
-        * We can't just use a direct branch to slb_miss_realmode
-        * because the distance from here to there depends on where
-        * the kernel ends up being put.
-        */
-       mfctr   r11
-       ld      r10,PACAKBASE(r13)
-       LOAD_HANDLER(r10, slb_miss_realmode)
-       mtctr   r10
-       bctr
-#endif
 
-       STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
-       . = 0x4480
-       .globl instruction_access_slb_relon_pSeries
-instruction_access_slb_relon_pSeries:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXSLB)
-       EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
-       std     r3,PACA_EXSLB+EX_R3(r13)
-       mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
-       mfspr   r12,SPRN_SRR1
-#ifndef CONFIG_RELOCATABLE
-       b       slb_miss_realmode
-#else
-       mfctr   r11
-       ld      r10,PACAKBASE(r13)
-       LOAD_HANDLER(r10, slb_miss_realmode)
-       mtctr   r10
-       bctr
-#endif
+EXC_VIRT(fp_unavailable, 0x4800, 0x4900, 0x800)
+EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x4980, 0x900)
+EXC_VIRT_HV(hdecrementer, 0x4980, 0x4a00, 0x980)
+EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x4b00, 0xa00)
+EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00)
 
-       . = 0x4500
-       .globl hardware_interrupt_relon_pSeries;
-       .globl hardware_interrupt_relon_hv;
-hardware_interrupt_relon_pSeries:
-hardware_interrupt_relon_hv:
-       BEGIN_FTR_SECTION
-               _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
-       FTR_SECTION_ELSE
-               _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
-       ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
-       STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
-       STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
-       STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
-       MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
-       STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
-       MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
-       STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
-
-       . = 0x4c00
-       .globl system_call_relon_pSeries
-system_call_relon_pSeries:
+EXC_VIRT_BEGIN(system_call, 0x4c00, 0x4d00)
        HMT_MEDIUM
        SYSCALL_PSERIES_1
        SYSCALL_PSERIES_2_DIRECT
        SYSCALL_PSERIES_3
+EXC_VIRT_END(system_call, 0x4c00, 0x4d00)
 
-       STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
+EXC_VIRT(single_step, 0x4d00, 0x4e00, 0xd00)
 
-       . = 0x4e00
-       b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
+EXC_VIRT_BEGIN(unused, 0x4e00, 0x4e20)
+       b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
+EXC_VIRT_END(unused, 0x4e00, 0x4e20)
 
-       . = 0x4e20
-       b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
+EXC_VIRT_BEGIN(unused, 0x4e20, 0x4e40)
+       b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
+EXC_VIRT_END(unused, 0x4e20, 0x4e40)
 
-       . = 0x4e40
-emulation_assist_relon_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       emulation_assist_relon_hv
+__EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x4e60)
 
-       . = 0x4e60
-       b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
+EXC_VIRT_BEGIN(unused, 0x4e60, 0x4e80)
+       b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
+EXC_VIRT_END(unused, 0x4e60, 0x4e80)
 
-       . = 0x4e80
-h_doorbell_relon_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       h_doorbell_relon_hv
+__EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x4ea0)
 
-       . = 0x4ea0
-h_virt_irq_relon_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       h_virt_irq_relon_hv
+__EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x4ec0)
 
-       . = 0x4f00
-performance_monitor_relon_pseries_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       performance_monitor_relon_pSeries
+EXC_VIRT_NONE(0x4ec0, 0x4f00)
 
-       . = 0x4f20
-altivec_unavailable_relon_pseries_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       altivec_unavailable_relon_pSeries
+__EXC_VIRT_OOL(performance_monitor, 0x4f00, 0x4f20)
 
-       . = 0x4f40
-vsx_unavailable_relon_pseries_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       vsx_unavailable_relon_pSeries
+__EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x4f40)
 
-       . = 0x4f60
-facility_unavailable_relon_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       facility_unavailable_relon_pSeries
+__EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x4f60)
 
-       . = 0x4f80
-hv_facility_unavailable_relon_trampoline:
-       SET_SCRATCH0(r13)
-       EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       hv_facility_unavailable_relon_hv
+__EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x4f80)
+
+__EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x4fa0)
+
+EXC_VIRT_NONE(0x4fa0, 0x5200)
+
+EXC_VIRT_NONE(0x5200, 0x5300)
+
+EXC_VIRT(instruction_breakpoint, 0x5300, 0x5400, 0x1300)
 
-       STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
 #ifdef CONFIG_PPC_DENORMALISATION
-       . = 0x5500
-       b       denorm_exception_hv
+EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x5600)
+       b       exc_real_0x1500_denorm_exception_hv
+EXC_VIRT_END(denorm_exception, 0x5500, 0x5600)
+#else
+EXC_VIRT_NONE(0x5500, 0x5600)
 #endif
-       STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
 
-       .align  7
-system_call_entry:
-       b       system_call_common
+EXC_VIRT_NONE(0x5600, 0x5700)
 
-ppc64_runlatch_on_trampoline:
-       b       __ppc64_runlatch_on
+EXC_VIRT(altivec_assist, 0x5700, 0x5800, 0x1700)
 
-/*
- * Here r13 points to the paca, r9 contains the saved CR,
- * SRR0 and SRR1 are saved in r11 and r12,
- * r9 - r13 are saved in paca->exgen.
- */
-       .align  7
-       .globl data_access_common
-data_access_common:
-       mfspr   r10,SPRN_DAR
-       std     r10,PACA_EXGEN+EX_DAR(r13)
-       mfspr   r10,SPRN_DSISR
-       stw     r10,PACA_EXGEN+EX_DSISR(r13)
-       EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
-       RECONCILE_IRQ_STATE(r10, r11)
-       ld      r12,_MSR(r1)
-       ld      r3,PACA_EXGEN+EX_DAR(r13)
-       lwz     r4,PACA_EXGEN+EX_DSISR(r13)
-       li      r5,0x300
-       std     r3,_DAR(r1)
-       std     r4,_DSISR(r1)
-BEGIN_MMU_FTR_SECTION
-       b       do_hash_page            /* Try to handle as hpte fault */
-MMU_FTR_SECTION_ELSE
-       b       handle_page_fault
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+EXC_VIRT_NONE(0x5800, 0x5900)
+
+EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
+       b       __ppc64_runlatch_on
 
-       .align  7
-       .globl  h_data_storage_common
-h_data_storage_common:
+EXC_COMMON_BEGIN(h_data_storage_common)
        mfspr   r10,SPRN_HDAR
        std     r10,PACA_EXGEN+EX_DAR(r13)
        mfspr   r10,SPRN_HDSISR
@@ -953,87 +1281,14 @@ h_data_storage_common:
        EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
        bl      save_nvgprs
        RECONCILE_IRQ_STATE(r10, r11)
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      unknown_exception
-       b       ret_from_except
-
-       .align  7
-       .globl instruction_access_common
-instruction_access_common:
-       EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
-       RECONCILE_IRQ_STATE(r10, r11)
-       ld      r12,_MSR(r1)
-       ld      r3,_NIP(r1)
-       andis.  r4,r12,0x5820
-       li      r5,0x400
-       std     r3,_DAR(r1)
-       std     r4,_DSISR(r1)
-BEGIN_MMU_FTR_SECTION
-       b       do_hash_page            /* Try to handle as hpte fault */
-MMU_FTR_SECTION_ELSE
-       b       handle_page_fault
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
-
-       STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
-
-       /*
-        * Machine check is different because we use a different
-        * save area: PACA_EXMC instead of PACA_EXGEN.
-        */
-       .align  7
-       .globl machine_check_common
-machine_check_common:
-
-       mfspr   r10,SPRN_DAR
-       std     r10,PACA_EXMC+EX_DAR(r13)
-       mfspr   r10,SPRN_DSISR
-       stw     r10,PACA_EXMC+EX_DSISR(r13)
-       EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
-       FINISH_NAP
-       RECONCILE_IRQ_STATE(r10, r11)
-       ld      r3,PACA_EXMC+EX_DAR(r13)
-       lwz     r4,PACA_EXMC+EX_DSISR(r13)
-       /* Enable MSR_RI when finished with PACA_EXMC */
-       li      r10,MSR_RI
-       mtmsrd  r10,1
-       std     r3,_DAR(r1)
-       std     r4,_DSISR(r1)
-       bl      save_nvgprs
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      machine_check_exception
-       b       ret_from_except
-
-       .align  7
-       .globl alignment_common
-alignment_common:
-       mfspr   r10,SPRN_DAR
-       std     r10,PACA_EXGEN+EX_DAR(r13)
-       mfspr   r10,SPRN_DSISR
-       stw     r10,PACA_EXGEN+EX_DSISR(r13)
-       EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
-       ld      r3,PACA_EXGEN+EX_DAR(r13)
-       lwz     r4,PACA_EXGEN+EX_DSISR(r13)
-       std     r3,_DAR(r1)
-       std     r4,_DSISR(r1)
-       bl      save_nvgprs
-       RECONCILE_IRQ_STATE(r10, r11)
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      alignment_exception
-       b       ret_from_except
-
-       .align  7
-       .globl program_check_common
-program_check_common:
-       EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
-       bl      save_nvgprs
-       RECONCILE_IRQ_STATE(r10, r11)
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      program_check_exception
-       b       ret_from_except
+       addi    r3,r1,STACK_FRAME_OVERHEAD
+       bl      unknown_exception
+       b       ret_from_except
 
-       .align  7
-       .globl fp_unavailable_common
-fp_unavailable_common:
+
+EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
+
+EXC_COMMON_BEGIN(fp_unavailable_common)
        EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
        bne     1f                      /* if from user, just load it up */
        bl      save_nvgprs
@@ -1061,9 +1316,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
        bl      fp_unavailable_tm
        b       ret_from_except
 #endif
-       .align  7
-       .globl altivec_unavailable_common
-altivec_unavailable_common:
+
+EXC_COMMON_BEGIN(altivec_unavailable_common)
        EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
 #ifdef CONFIG_ALTIVEC
 BEGIN_FTR_SECTION
@@ -1096,9 +1350,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
        bl      altivec_unavailable_exception
        b       ret_from_except
 
-       .align  7
-       .globl vsx_unavailable_common
-vsx_unavailable_common:
+EXC_COMMON_BEGIN(vsx_unavailable_common)
        EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
 #ifdef CONFIG_VSX
 BEGIN_FTR_SECTION
@@ -1131,16 +1383,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        b       ret_from_except
 
        /* Equivalents to the above handlers for relocation-on interrupt vectors */
-       STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
-       MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
-       MASKABLE_RELON_EXCEPTION_HV_OOL(0xea0, h_virt_irq)
-
-       STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
-       STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
-       STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
-       STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
-       STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
-
+__TRAMP_REAL_VIRT_OOL_HV(emulation_assist, 0xe40)
+__TRAMP_REAL_VIRT_OOL_MASKABLE_HV(h_doorbell, 0xe80)
+__TRAMP_REAL_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0xea0)
+__TRAMP_REAL_VIRT_OOL(performance_monitor, 0xf00)
+__TRAMP_REAL_VIRT_OOL(altivec_unavailable, 0xf20)
+__TRAMP_REAL_VIRT_OOL(vsx_unavailable, 0xf40)
+__TRAMP_REAL_VIRT_OOL(facility_unavailable, 0xf60)
+__TRAMP_REAL_VIRT_OOL_HV(h_facility_unavailable, 0xf80)
+
+USE_FIXED_SECTION(virt_trampolines)
        /*
         * The __end_interrupts marker must be past the out-of-line (OOL)
         * handlers, so that they are copied to real address 0x100 when running
@@ -1151,34 +1403,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        .align  7
        .globl  __end_interrupts
 __end_interrupts:
+DEFINE_FIXED_SYMBOL(__end_interrupts)
 
-#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
-/*
- * Data area reserved for FWNMI option.
- * This address (0x7000) is fixed by the RPA.
- */
-       .= 0x7000
-       .globl fwnmi_data_area
-fwnmi_data_area:
-
-       /* pseries and powernv need to keep the whole page from
-        * 0x7000 to 0x8000 free for use by the firmware
-        */
-       . = 0x8000
-#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
-
-       STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception)
-       STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)
+EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
+EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
 
 #ifdef CONFIG_CBE_RAS
-       STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception)
-       STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception)
-       STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)
+EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
+EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
+EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
 #endif /* CONFIG_CBE_RAS */
 
-       .globl hmi_exception_early
-hmi_exception_early:
-       EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, 0xe62)
+
+TRAMP_REAL_BEGIN(hmi_exception_early)
+       EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60)
        mr      r10,r1                  /* Save r1                      */
        ld      r1,PACAEMERGSP(r13)     /* Use emergency stack          */
        subi    r1,r1,INT_FRAME_SIZE    /* alloc stack frame            */
@@ -1224,255 +1462,10 @@ hmi_exception_early:
 hmi_exception_after_realmode:
        SET_SCRATCH0(r13)
        EXCEPTION_PROLOG_0(PACA_EXGEN)
-       b       hmi_exception_hv
-
-
-#define MACHINE_CHECK_HANDLER_WINDUP                   \
-       /* Clear MSR_RI before setting SRR0 and SRR1. */\
-       li      r0,MSR_RI;                              \
-       mfmsr   r9;             /* get MSR value */     \
-       andc    r9,r9,r0;                               \
-       mtmsrd  r9,1;           /* Clear MSR_RI */      \
-       /* Move original SRR0 and SRR1 into the respective regs */      \
-       ld      r9,_MSR(r1);                            \
-       mtspr   SPRN_SRR1,r9;                           \
-       ld      r3,_NIP(r1);                            \
-       mtspr   SPRN_SRR0,r3;                           \
-       ld      r9,_CTR(r1);                            \
-       mtctr   r9;                                     \
-       ld      r9,_XER(r1);                            \
-       mtxer   r9;                                     \
-       ld      r9,_LINK(r1);                           \
-       mtlr    r9;                                     \
-       REST_GPR(0, r1);                                \
-       REST_8GPRS(2, r1);                              \
-       REST_GPR(10, r1);                               \
-       ld      r11,_CCR(r1);                           \
-       mtcr    r11;                                    \
-       /* Decrement paca->in_mce. */                   \
-       lhz     r12,PACA_IN_MCE(r13);                   \
-       subi    r12,r12,1;                              \
-       sth     r12,PACA_IN_MCE(r13);                   \
-       REST_GPR(11, r1);                               \
-       REST_2GPRS(12, r1);                             \
-       /* restore original r1. */                      \
-       ld      r1,GPR1(r1)
-
-       /*
-        * Handle machine check early in real mode. We come here with
-        * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
-        */
-       .align  7
-       .globl machine_check_handle_early
-machine_check_handle_early:
-       std     r0,GPR0(r1)     /* Save r0 */
-       EXCEPTION_PROLOG_COMMON_3(0x200)
-       bl      save_nvgprs
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      machine_check_early
-       std     r3,RESULT(r1)   /* Save result */
-       ld      r12,_MSR(r1)
-#ifdef CONFIG_PPC_P7_NAP
-       /*
-        * Check if thread was in power saving mode. We come here when any
-        * of the following is true:
-        * a. thread wasn't in power saving mode
-        * b. thread was in power saving mode with no state loss,
-        *    supervisor state loss or hypervisor state loss.
-        *
-        * Go back to nap/sleep/winkle mode again if (b) is true.
-        */
-       rlwinm. r11,r12,47-31,30,31     /* Was it in power saving mode? */
-       beq     4f                      /* No, it wasn;t */
-       /* Thread was in power saving mode. Go back to nap again. */
-       cmpwi   r11,2
-       blt     3f
-       /* Supervisor/Hypervisor state loss */
-       li      r0,1
-       stb     r0,PACA_NAPSTATELOST(r13)
-3:     bl      machine_check_queue_event
-       MACHINE_CHECK_HANDLER_WINDUP
-       GET_PACA(r13)
-       ld      r1,PACAR1(r13)
-       /*
-        * Check what idle state this CPU was in and go back to same mode
-        * again.
-        */
-       lbz     r3,PACA_THREAD_IDLE_STATE(r13)
-       cmpwi   r3,PNV_THREAD_NAP
-       bgt     10f
-       IDLE_STATE_ENTER_SEQ(PPC_NAP)
-       /* No return */
-10:
-       cmpwi   r3,PNV_THREAD_SLEEP
-       bgt     2f
-       IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
-       /* No return */
-
-2:
-       /*
-        * Go back to winkle. Please note that this thread was woken up in
-        * machine check from winkle and have not restored the per-subcore
-        * state. Hence before going back to winkle, set last bit of HSPGR0
-        * to 1. This will make sure that if this thread gets woken up
-        * again at reset vector 0x100 then it will get chance to restore
-        * the subcore state.
-        */
-       ori     r13,r13,1
-       SET_PACA(r13)
-       IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
-       /* No return */
-4:
-#endif
-       /*
-        * Check if we are coming from hypervisor userspace. If yes then we
-        * continue in host kernel in V mode to deliver the MC event.
-        */
-       rldicl. r11,r12,4,63            /* See if MC hit while in HV mode. */
-       beq     5f
-       andi.   r11,r12,MSR_PR          /* See if coming from user. */
-       bne     9f                      /* continue in V mode if we are. */
-
-5:
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-       /*
-        * We are coming from kernel context. Check if we are coming from
-        * guest. if yes, then we can continue. We will fall through
-        * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
-        */
-       lbz     r11,HSTATE_IN_GUEST(r13)
-       cmpwi   r11,0                   /* Check if coming from guest */
-       bne     9f                      /* continue if we are. */
-#endif
-       /*
-        * At this point we are not sure about what context we come from.
-        * Queue up the MCE event and return from the interrupt.
-        * But before that, check if this is an un-recoverable exception.
-        * If yes, then stay on emergency stack and panic.
-        */
-       andi.   r11,r12,MSR_RI
-       bne     2f
-1:     mfspr   r11,SPRN_SRR0
-       ld      r10,PACAKBASE(r13)
-       LOAD_HANDLER(r10,unrecover_mce)
-       mtspr   SPRN_SRR0,r10
-       ld      r10,PACAKMSR(r13)
-       /*
-        * We are going down. But there are chances that we might get hit by
-        * another MCE during panic path and we may run into unstable state
-        * with no way out. Hence, turn ME bit off while going down, so that
-        * when another MCE is hit during panic path, system will checkstop
-        * and hypervisor will get restarted cleanly by SP.
-        */
-       li      r3,MSR_ME
-       andc    r10,r10,r3              /* Turn off MSR_ME */
-       mtspr   SPRN_SRR1,r10
-       rfid
-       b       .
-2:
-       /*
-        * Check if we have successfully handled/recovered from error, if not
-        * then stay on emergency stack and panic.
-        */
-       ld      r3,RESULT(r1)   /* Load result */
-       cmpdi   r3,0            /* see if we handled MCE successfully */
-
-       beq     1b              /* if !handled then panic */
-       /*
-        * Return from MC interrupt.
-        * Queue up the MCE event so that we can log it later, while
-        * returning from kernel or opal call.
-        */
-       bl      machine_check_queue_event
-       MACHINE_CHECK_HANDLER_WINDUP
-       rfid
-9:
-       /* Deliver the machine check to host kernel in V mode. */
-       MACHINE_CHECK_HANDLER_WINDUP
-       b       machine_check_pSeries
-
-unrecover_mce:
-       /* Invoke machine_check_exception to print MCE event and panic. */
-       addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      machine_check_exception
-       /*
-        * We will not reach here. Even if we did, there is no way out. Call
-        * unrecoverable_exception and die.
-        */
-1:     addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      unrecoverable_exception
-       b       1b
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r12 contain the saved SRR1, SRR0 is still ready for return
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
- * We assume we aren't going to take any exceptions during this procedure.
- */
-slb_miss_realmode:
-       mflr    r10
-#ifdef CONFIG_RELOCATABLE
-       mtctr   r11
-#endif
-
-       stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
-       std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
-
-#ifdef CONFIG_PPC_STD_MMU_64
-BEGIN_MMU_FTR_SECTION
-       bl      slb_allocate_realmode
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
-#endif
-       /* All done -- return from exception. */
-
-       ld      r10,PACA_EXSLB+EX_LR(r13)
-       ld      r3,PACA_EXSLB+EX_R3(r13)
-       lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
-
-       mtlr    r10
-       andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
-BEGIN_MMU_FTR_SECTION
-       beq-    2f
-FTR_SECTION_ELSE
-       b       2f
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
-
-.machine       push
-.machine       "power4"
-       mtcrf   0x80,r9
-       mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
-.machine       pop
-
-       RESTORE_PPR_PACA(PACA_EXSLB, r9)
-       ld      r9,PACA_EXSLB+EX_R9(r13)
-       ld      r10,PACA_EXSLB+EX_R10(r13)
-       ld      r11,PACA_EXSLB+EX_R11(r13)
-       ld      r12,PACA_EXSLB+EX_R12(r13)
-       ld      r13,PACA_EXSLB+EX_R13(r13)
-       rfid
-       b       .       /* prevent speculative execution */
-
-2:     mfspr   r11,SPRN_SRR0
-       ld      r10,PACAKBASE(r13)
-       LOAD_HANDLER(r10,unrecov_slb)
-       mtspr   SPRN_SRR0,r10
-       ld      r10,PACAKMSR(r13)
-       mtspr   SPRN_SRR1,r10
-       rfid
-       b       .
-
-unrecov_slb:
-       EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
-       RECONCILE_IRQ_STATE(r10, r11)
-       bl      save_nvgprs
-1:     addi    r3,r1,STACK_FRAME_OVERHEAD
-       bl      unrecoverable_exception
-       b       1b
-
+       b       tramp_real_hmi_exception
 
 #ifdef CONFIG_PPC_970_NAP
-power4_fixup_nap:
+TRAMP_REAL_BEGIN(power4_fixup_nap)
        andc    r9,r9,r10
        std     r9,TI_LOCAL_FLAGS(r11)
        ld      r10,_LINK(r1)           /* make idle task do the */
@@ -1480,6 +1473,13 @@ power4_fixup_nap:
        blr
 #endif
 
+CLOSE_FIXED_SECTION(real_vectors);
+CLOSE_FIXED_SECTION(real_trampolines);
+CLOSE_FIXED_SECTION(virt_vectors);
+CLOSE_FIXED_SECTION(virt_trampolines);
+
+USE_TEXT_SECTION()
+
 /*
  * Hash table stuff
  */