powerpc: Create mtmsrd_isync()
authorAnton Blanchard <anton@samba.org>
Thu, 29 Oct 2015 00:43:59 +0000 (11:43 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 1 Dec 2015 02:52:25 +0000 (13:52 +1100)
mtmsrd_isync() will do an mtmsrd followed by an isync on older
processors. On newer processors we avoid the isync via a feature fixup.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/reg.h
arch/powerpc/kernel/process.c

index a908ada..987dac0 100644 (file)
 #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
                                     : : "r" (v) : "memory")
 #define mtmsr(v)       __mtmsrd((v), 0)
+#define __MTMSR                "mtmsrd"
 #else
 #define mtmsr(v)       asm volatile("mtmsr %0" : \
                                     : "r" ((unsigned long)(v)) \
                                     : "memory")
+#define __MTMSR                "mtmsr"
 #endif
 
+static inline void mtmsr_isync(unsigned long val)
+{
+       asm volatile(__MTMSR " %0; " ASM_FTR_IFCLR("isync", "nop", %1) : :
+                       "r" (val), "i" (CPU_FTR_ARCH_206) : "memory");
+}
+
 #define mfspr(rn)      ({unsigned long rval; \
                        asm volatile("mfspr %0," __stringify(rn) \
                                : "=r" (rval)); rval;})
index ef64219..5bf8ec2 100644 (file)
@@ -130,7 +130,10 @@ void enable_kernel_fp(void)
                check_if_tm_restore_required(current);
                giveup_fpu(current);
        } else {
-               giveup_fpu(NULL);       /* just enables FP for kernel */
+               u64 oldmsr = mfmsr();
+
+               if (!(oldmsr & MSR_FP))
+                       mtmsr_isync(oldmsr | MSR_FP);
        }
 }
 EXPORT_SYMBOL(enable_kernel_fp);
@@ -144,7 +147,10 @@ void enable_kernel_altivec(void)
                check_if_tm_restore_required(current);
                giveup_altivec(current);
        } else {
-               giveup_altivec_notask();
+               u64 oldmsr = mfmsr();
+
+               if (!(oldmsr & MSR_VEC))
+                       mtmsr_isync(oldmsr | MSR_VEC);
        }
 }
 EXPORT_SYMBOL(enable_kernel_altivec);
@@ -173,10 +179,14 @@ void enable_kernel_vsx(void)
 {
        WARN_ON(preemptible());
 
-       if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
+       if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
                giveup_vsx(current);
-       else
-               giveup_vsx(NULL);       /* just enable vsx for kernel - force */
+       } else {
+               u64 oldmsr = mfmsr();
+
+               if (!(oldmsr & MSR_VSX))
+                       mtmsr_isync(oldmsr | MSR_VSX);
+       }
 }
 EXPORT_SYMBOL(enable_kernel_vsx);
 
@@ -209,10 +219,14 @@ void enable_kernel_spe(void)
 {
        WARN_ON(preemptible());
 
-       if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
+       if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
                giveup_spe(current);
-       else
-               giveup_spe(NULL);       /* just enable SPE for kernel - force */
+       } else {
+               u64 oldmsr = mfmsr();
+
+               if (!(oldmsr & MSR_SPE))
+                       mtmsr_isync(oldmsr | MSR_SPE);
+       }
 }
 EXPORT_SYMBOL(enable_kernel_spe);