powerpc: Create mtmsrd_isync()
[cascardo/linux.git] / arch / powerpc / kernel / process.c
index 75b6676..5bf8ec2 100644 (file)
 
 extern unsigned long _get_SP(void);
 
-#ifndef CONFIG_SMP
-struct task_struct *last_task_used_math = NULL;
-struct task_struct *last_task_used_altivec = NULL;
-struct task_struct *last_task_used_vsx = NULL;
-struct task_struct *last_task_used_spe = NULL;
-#endif
-
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-void giveup_fpu_maybe_transactional(struct task_struct *tsk)
+static void check_if_tm_restore_required(struct task_struct *tsk)
 {
        /*
         * If we are saving the current thread's registers, and the
@@ -89,31 +82,9 @@ void giveup_fpu_maybe_transactional(struct task_struct *tsk)
                tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
                set_thread_flag(TIF_RESTORE_TM);
        }
-
-       giveup_fpu(tsk);
 }
-
-void giveup_altivec_maybe_transactional(struct task_struct *tsk)
-{
-       /*
-        * If we are saving the current thread's registers, and the
-        * thread is in a transactional state, set the TIF_RESTORE_TM
-        * bit so that we know to restore the registers before
-        * returning to userspace.
-        */
-       if (tsk == current && tsk->thread.regs &&
-           MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
-           !test_thread_flag(TIF_RESTORE_TM)) {
-               tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
-               set_thread_flag(TIF_RESTORE_TM);
-       }
-
-       giveup_altivec(tsk);
-}
-
 #else
-#define giveup_fpu_maybe_transactional(tsk)    giveup_fpu(tsk)
-#define giveup_altivec_maybe_transactional(tsk)        giveup_altivec(tsk)
+static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
 #ifdef CONFIG_PPC_FPU
@@ -134,17 +105,16 @@ void flush_fp_to_thread(struct task_struct *tsk)
                 */
                preempt_disable();
                if (tsk->thread.regs->msr & MSR_FP) {
-#ifdef CONFIG_SMP
                        /*
                         * This should only ever be called for current or
                         * for a stopped child process.  Since we save away
-                        * the FP register state on context switch on SMP,
+                        * the FP register state on context switch,
                         * there is something wrong if a stopped child appears
                         * to still have its FP state in the CPU registers.
                         */
                        BUG_ON(tsk != current);
-#endif
-                       giveup_fpu_maybe_transactional(tsk);
+                       check_if_tm_restore_required(tsk);
+                       giveup_fpu(tsk);
                }
                preempt_enable();
        }
@@ -156,14 +126,15 @@ void enable_kernel_fp(void)
 {
        WARN_ON(preemptible());
 
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
-               giveup_fpu_maybe_transactional(current);
-       else
-               giveup_fpu(NULL);       /* just enables FP for kernel */
-#else
-       giveup_fpu_maybe_transactional(last_task_used_math);
-#endif /* CONFIG_SMP */
+       if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
+               check_if_tm_restore_required(current);
+               giveup_fpu(current);
+       } else {
+               u64 oldmsr = mfmsr();
+
+               if (!(oldmsr & MSR_FP))
+                       mtmsr_isync(oldmsr | MSR_FP);
+       }
 }
 EXPORT_SYMBOL(enable_kernel_fp);
 
@@ -172,14 +143,15 @@ void enable_kernel_altivec(void)
 {
        WARN_ON(preemptible());
 
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
-               giveup_altivec_maybe_transactional(current);
-       else
-               giveup_altivec_notask();
-#else
-       giveup_altivec_maybe_transactional(last_task_used_altivec);
-#endif /* CONFIG_SMP */
+       if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
+               check_if_tm_restore_required(current);
+               giveup_altivec(current);
+       } else {
+               u64 oldmsr = mfmsr();
+
+               if (!(oldmsr & MSR_VEC))
+                       mtmsr_isync(oldmsr | MSR_VEC);
+       }
 }
 EXPORT_SYMBOL(enable_kernel_altivec);
 
@@ -192,10 +164,9 @@ void flush_altivec_to_thread(struct task_struct *tsk)
        if (tsk->thread.regs) {
                preempt_disable();
                if (tsk->thread.regs->msr & MSR_VEC) {
-#ifdef CONFIG_SMP
                        BUG_ON(tsk != current);
-#endif
-                       giveup_altivec_maybe_transactional(tsk);
+                       check_if_tm_restore_required(tsk);
+                       giveup_altivec(tsk);
                }
                preempt_enable();
        }
@@ -208,21 +179,22 @@ void enable_kernel_vsx(void)
 {
        WARN_ON(preemptible());
 
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
+       if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
                giveup_vsx(current);
-       else
-               giveup_vsx(NULL);       /* just enable vsx for kernel - force */
-#else
-       giveup_vsx(last_task_used_vsx);
-#endif /* CONFIG_SMP */
+       } else {
+               u64 oldmsr = mfmsr();
+
+               if (!(oldmsr & MSR_VSX))
+                       mtmsr_isync(oldmsr | MSR_VSX);
+       }
 }
 EXPORT_SYMBOL(enable_kernel_vsx);
 
 void giveup_vsx(struct task_struct *tsk)
 {
-       giveup_fpu_maybe_transactional(tsk);
-       giveup_altivec_maybe_transactional(tsk);
+       check_if_tm_restore_required(tsk);
+       giveup_fpu(tsk);
+       giveup_altivec(tsk);
        __giveup_vsx(tsk);
 }
 EXPORT_SYMBOL(giveup_vsx);
@@ -232,9 +204,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
        if (tsk->thread.regs) {
                preempt_disable();
                if (tsk->thread.regs->msr & MSR_VSX) {
-#ifdef CONFIG_SMP
                        BUG_ON(tsk != current);
-#endif
                        giveup_vsx(tsk);
                }
                preempt_enable();
@@ -249,14 +219,14 @@ void enable_kernel_spe(void)
 {
        WARN_ON(preemptible());
 
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
+       if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
                giveup_spe(current);
-       else
-               giveup_spe(NULL);       /* just enable SPE for kernel - force */
-#else
-       giveup_spe(last_task_used_spe);
-#endif /* __SMP __ */
+       } else {
+               u64 oldmsr = mfmsr();
+
+               if (!(oldmsr & MSR_SPE))
+                       mtmsr_isync(oldmsr | MSR_SPE);
+       }
 }
 EXPORT_SYMBOL(enable_kernel_spe);
 
@@ -265,9 +235,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
        if (tsk->thread.regs) {
                preempt_disable();
                if (tsk->thread.regs->msr & MSR_SPE) {
-#ifdef CONFIG_SMP
                        BUG_ON(tsk != current);
-#endif
                        tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
                        giveup_spe(tsk);
                }
@@ -276,32 +244,6 @@ void flush_spe_to_thread(struct task_struct *tsk)
 }
 #endif /* CONFIG_SPE */
 
-#ifndef CONFIG_SMP
-/*
- * If we are doing lazy switching of CPU state (FP, altivec or SPE),
- * and the current task has some state, discard it.
- */
-void discard_lazy_cpu_state(void)
-{
-       preempt_disable();
-       if (last_task_used_math == current)
-               last_task_used_math = NULL;
-#ifdef CONFIG_ALTIVEC
-       if (last_task_used_altivec == current)
-               last_task_used_altivec = NULL;
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
-       if (last_task_used_vsx == current)
-               last_task_used_vsx = NULL;
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
-       if (last_task_used_spe == current)
-               last_task_used_spe = NULL;
-#endif
-       preempt_enable();
-}
-#endif /* CONFIG_SMP */
-
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
 void do_send_trap(struct pt_regs *regs, unsigned long address,
                  unsigned long error_code, int signal_code, int breakpt)
@@ -742,6 +684,73 @@ void restore_tm_state(struct pt_regs *regs)
 #define __switch_to_tm(prev)
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
+static inline void save_sprs(struct thread_struct *t)
+{
+#ifdef CONFIG_ALTIVEC
+       if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
+               t->vrsave = mfspr(SPRN_VRSAVE);
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+       if (cpu_has_feature(CPU_FTR_DSCR))
+               t->dscr = mfspr(SPRN_DSCR);
+
+       if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+               t->bescr = mfspr(SPRN_BESCR);
+               t->ebbhr = mfspr(SPRN_EBBHR);
+               t->ebbrr = mfspr(SPRN_EBBRR);
+
+               t->fscr = mfspr(SPRN_FSCR);
+
+               /*
+                * Note that the TAR is not available for use in the kernel.
+                * (To provide this, the TAR should be backed up/restored on
+                * exception entry/exit instead, and be in pt_regs.  FIXME,
+                * this should be in pt_regs anyway (for debug).)
+                */
+               t->tar = mfspr(SPRN_TAR);
+       }
+#endif
+}
+
+static inline void restore_sprs(struct thread_struct *old_thread,
+                               struct thread_struct *new_thread)
+{
+#ifdef CONFIG_ALTIVEC
+       if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
+           old_thread->vrsave != new_thread->vrsave)
+               mtspr(SPRN_VRSAVE, new_thread->vrsave);
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+       if (cpu_has_feature(CPU_FTR_DSCR)) {
+               u64 dscr = get_paca()->dscr_default;
+               u64 fscr = old_thread->fscr & ~FSCR_DSCR;
+
+               if (new_thread->dscr_inherit) {
+                       dscr = new_thread->dscr;
+                       fscr |= FSCR_DSCR;
+               }
+
+               if (old_thread->dscr != dscr)
+                       mtspr(SPRN_DSCR, dscr);
+
+               if (old_thread->fscr != fscr)
+                       mtspr(SPRN_FSCR, fscr);
+       }
+
+       if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+               if (old_thread->bescr != new_thread->bescr)
+                       mtspr(SPRN_BESCR, new_thread->bescr);
+               if (old_thread->ebbhr != new_thread->ebbhr)
+                       mtspr(SPRN_EBBHR, new_thread->ebbhr);
+               if (old_thread->ebbrr != new_thread->ebbrr)
+                       mtspr(SPRN_EBBRR, new_thread->ebbrr);
+
+               if (old_thread->tar != new_thread->tar)
+                       mtspr(SPRN_TAR, new_thread->tar);
+       }
+#endif
+}
+
 struct task_struct *__switch_to(struct task_struct *prev,
        struct task_struct *new)
 {
@@ -751,44 +760,22 @@ struct task_struct *__switch_to(struct task_struct *prev,
        struct ppc64_tlb_batch *batch;
 #endif
 
+       new_thread = &new->thread;
+       old_thread = &current->thread;
+
        WARN_ON(!irqs_disabled());
 
-       /* Back up the TAR and DSCR across context switches.
-        * Note that the TAR is not available for use in the kernel.  (To
-        * provide this, the TAR should be backed up/restored on exception
-        * entry/exit instead, and be in pt_regs.  FIXME, this should be in
-        * pt_regs anyway (for debug).)
-        * Save the TAR and DSCR here before we do treclaim/trecheckpoint as
-        * these will change them.
+       /*
+        * We need to save SPRs before treclaim/trecheckpoint as these will
+        * change a number of them.
         */
-       save_early_sprs(&prev->thread);
+       save_sprs(&prev->thread);
 
        __switch_to_tm(prev);
 
-#ifdef CONFIG_SMP
-       /* avoid complexity of lazy save/restore of fpu
-        * by just saving it every time we switch out if
-        * this task used the fpu during the last quantum.
-        *
-        * If it tries to use the fpu again, it'll trap and
-        * reload its fp regs.  So we don't have to do a restore
-        * every switch, just a save.
-        *  -- Cort
-        */
        if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
                giveup_fpu(prev);
 #ifdef CONFIG_ALTIVEC
-       /*
-        * If the previous thread used altivec in the last quantum
-        * (thus changing altivec regs) then save them.
-        * We used to check the VRSAVE register but not all apps
-        * set it, so we don't rely on it now (and in fact we need
-        * to save & restore VSCR even if VRSAVE == 0).  -- paulus
-        *
-        * On SMP we always save/restore altivec regs just to avoid the
-        * complexity of changing processors.
-        *  -- Cort
-        */
        if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
                giveup_altivec(prev);
 #endif /* CONFIG_ALTIVEC */
@@ -798,39 +785,10 @@ struct task_struct *__switch_to(struct task_struct *prev,
                __giveup_vsx(prev);
 #endif /* CONFIG_VSX */
 #ifdef CONFIG_SPE
-       /*
-        * If the previous thread used spe in the last quantum
-        * (thus changing spe regs) then save them.
-        *
-        * On SMP we always save/restore spe regs just to avoid the
-        * complexity of changing processors.
-        */
        if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
                giveup_spe(prev);
 #endif /* CONFIG_SPE */
 
-#else  /* CONFIG_SMP */
-#ifdef CONFIG_ALTIVEC
-       /* Avoid the trap.  On smp this this never happens since
-        * we don't set last_task_used_altivec -- Cort
-        */
-       if (new->thread.regs && last_task_used_altivec == new)
-               new->thread.regs->msr |= MSR_VEC;
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
-       if (new->thread.regs && last_task_used_vsx == new)
-               new->thread.regs->msr |= MSR_VSX;
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
-       /* Avoid the trap.  On smp this this never happens since
-        * we don't set last_task_used_spe
-        */
-       if (new->thread.regs && last_task_used_spe == new)
-               new->thread.regs->msr |= MSR_SPE;
-#endif /* CONFIG_SPE */
-
-#endif /* CONFIG_SMP */
-
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
        switch_booke_debug_regs(&new->thread.debug);
 #else
@@ -844,10 +802,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
 #endif
 
-
-       new_thread = &new->thread;
-       old_thread = &current->thread;
-
 #ifdef CONFIG_PPC64
        /*
         * Collect processor utilization data per process
@@ -883,6 +837,10 @@ struct task_struct *__switch_to(struct task_struct *prev,
 
        last = _switch(old_thread, new_thread);
 
+       /* Need to recalculate these after calling _switch() */
+       old_thread = &last->thread;
+       new_thread = &current->thread;
+
 #ifdef CONFIG_PPC_BOOK3S_64
        if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
                current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
@@ -891,6 +849,8 @@ struct task_struct *__switch_to(struct task_struct *prev,
        }
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
+       restore_sprs(old_thread, new_thread);
+
        return last;
 }
 
@@ -1043,13 +1003,10 @@ void show_regs(struct pt_regs * regs)
 
 void exit_thread(void)
 {
-       discard_lazy_cpu_state();
 }
 
 void flush_thread(void)
 {
-       discard_lazy_cpu_state();
-
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
        flush_ptrace_hw_breakpoint(current);
 #else /* CONFIG_HAVE_HW_BREAKPOINT */
@@ -1287,7 +1244,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
                regs->msr = MSR_USER32;
        }
 #endif
-       discard_lazy_cpu_state();
 #ifdef CONFIG_VSX
        current->thread.used_vsr = 0;
 #endif