x86/mm, sched/core: Uninline switch_mm()
authorAndy Lutomirski <luto@kernel.org>
Tue, 26 Apr 2016 16:39:08 +0000 (09:39 -0700)
committerIngo Molnar <mingo@kernel.org>
Thu, 28 Apr 2016 09:44:19 +0000 (11:44 +0200)
It's fairly large and it has quite a few callers.  This may also
help untangle some headers down the road.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/54f3367803e7f80b2be62c8a21879aa74b1a5f57.1461688545.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/mmu_context.h
arch/x86/mm/tlb.c

index 8428002..bb911dd 100644 (file)
@@ -115,103 +115,9 @@ static inline void destroy_context(struct mm_struct *mm)
        destroy_context_ldt(mm);
 }
 
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
-                            struct task_struct *tsk)
-{
-       unsigned cpu = smp_processor_id();
+extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+                     struct task_struct *tsk);
 
-       if (likely(prev != next)) {
-#ifdef CONFIG_SMP
-               this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
-               this_cpu_write(cpu_tlbstate.active_mm, next);
-#endif
-               cpumask_set_cpu(cpu, mm_cpumask(next));
-
-               /*
-                * Re-load page tables.
-                *
-                * This logic has an ordering constraint:
-                *
-                *  CPU 0: Write to a PTE for 'next'
-                *  CPU 0: load bit 1 in mm_cpumask.  if nonzero, send IPI.
-                *  CPU 1: set bit 1 in next's mm_cpumask
-                *  CPU 1: load from the PTE that CPU 0 writes (implicit)
-                *
-                * We need to prevent an outcome in which CPU 1 observes
-                * the new PTE value and CPU 0 observes bit 1 clear in
-                * mm_cpumask.  (If that occurs, then the IPI will never
-                * be sent, and CPU 0's TLB will contain a stale entry.)
-                *
-                * The bad outcome can occur if either CPU's load is
-                * reordered before that CPU's store, so both CPUs must
-                * execute full barriers to prevent this from happening.
-                *
-                * Thus, switch_mm needs a full barrier between the
-                * store to mm_cpumask and any operation that could load
-                * from next->pgd.  TLB fills are special and can happen
-                * due to instruction fetches or for no reason at all,
-                * and neither LOCK nor MFENCE orders them.
-                * Fortunately, load_cr3() is serializing and gives the
-                * ordering guarantee we need.
-                *
-                */
-               load_cr3(next->pgd);
-
-               trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
-
-               /* Stop flush ipis for the previous mm */
-               cpumask_clear_cpu(cpu, mm_cpumask(prev));
-
-               /* Load per-mm CR4 state */
-               load_mm_cr4(next);
-
-#ifdef CONFIG_MODIFY_LDT_SYSCALL
-               /*
-                * Load the LDT, if the LDT is different.
-                *
-                * It's possible that prev->context.ldt doesn't match
-                * the LDT register.  This can happen if leave_mm(prev)
-                * was called and then modify_ldt changed
-                * prev->context.ldt but suppressed an IPI to this CPU.
-                * In this case, prev->context.ldt != NULL, because we
-                * never set context.ldt to NULL while the mm still
-                * exists.  That means that next->context.ldt !=
-                * prev->context.ldt, because mms never share an LDT.
-                */
-               if (unlikely(prev->context.ldt != next->context.ldt))
-                       load_mm_ldt(next);
-#endif
-       }
-#ifdef CONFIG_SMP
-         else {
-               this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
-               BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
-
-               if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
-                       /*
-                        * On established mms, the mm_cpumask is only changed
-                        * from irq context, from ptep_clear_flush() while in
-                        * lazy tlb mode, and here. Irqs are blocked during
-                        * schedule, protecting us from simultaneous changes.
-                        */
-                       cpumask_set_cpu(cpu, mm_cpumask(next));
-
-                       /*
-                        * We were in lazy tlb mode and leave_mm disabled
-                        * tlb flush IPI delivery. We must reload CR3
-                        * to make sure to use no freed page tables.
-                        *
-                        * As above, load_cr3() is serializing and orders TLB
-                        * fills with respect to the mm_cpumask write.
-                        */
-                       load_cr3(next->pgd);
-                       trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
-                       load_mm_cr4(next);
-                       load_mm_ldt(next);
-               }
-       }
-#endif
-}
 
 #define activate_mm(prev, next)                        \
 do {                                           \
index a4530e2..ce7a0c9 100644 (file)
@@ -59,6 +59,108 @@ void leave_mm(int cpu)
 }
 EXPORT_SYMBOL_GPL(leave_mm);
 
+#endif /* CONFIG_SMP */
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+              struct task_struct *tsk)
+{
+       unsigned cpu = smp_processor_id();
+
+       if (likely(prev != next)) {
+#ifdef CONFIG_SMP
+               this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               this_cpu_write(cpu_tlbstate.active_mm, next);
+#endif
+               cpumask_set_cpu(cpu, mm_cpumask(next));
+
+               /*
+                * Re-load page tables.
+                *
+                * This logic has an ordering constraint:
+                *
+                *  CPU 0: Write to a PTE for 'next'
+                *  CPU 0: load bit 1 in mm_cpumask.  if nonzero, send IPI.
+                *  CPU 1: set bit 1 in next's mm_cpumask
+                *  CPU 1: load from the PTE that CPU 0 writes (implicit)
+                *
+                * We need to prevent an outcome in which CPU 1 observes
+                * the new PTE value and CPU 0 observes bit 1 clear in
+                * mm_cpumask.  (If that occurs, then the IPI will never
+                * be sent, and CPU 0's TLB will contain a stale entry.)
+                *
+                * The bad outcome can occur if either CPU's load is
+                * reordered before that CPU's store, so both CPUs must
+                * execute full barriers to prevent this from happening.
+                *
+                * Thus, switch_mm needs a full barrier between the
+                * store to mm_cpumask and any operation that could load
+                * from next->pgd.  TLB fills are special and can happen
+                * due to instruction fetches or for no reason at all,
+                * and neither LOCK nor MFENCE orders them.
+                * Fortunately, load_cr3() is serializing and gives the
+                * ordering guarantee we need.
+                *
+                */
+               load_cr3(next->pgd);
+
+               trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+
+               /* Stop flush ipis for the previous mm */
+               cpumask_clear_cpu(cpu, mm_cpumask(prev));
+
+               /* Load per-mm CR4 state */
+               load_mm_cr4(next);
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+               /*
+                * Load the LDT, if the LDT is different.
+                *
+                * It's possible that prev->context.ldt doesn't match
+                * the LDT register.  This can happen if leave_mm(prev)
+                * was called and then modify_ldt changed
+                * prev->context.ldt but suppressed an IPI to this CPU.
+                * In this case, prev->context.ldt != NULL, because we
+                * never set context.ldt to NULL while the mm still
+                * exists.  That means that next->context.ldt !=
+                * prev->context.ldt, because mms never share an LDT.
+                */
+               if (unlikely(prev->context.ldt != next->context.ldt))
+                       load_mm_ldt(next);
+#endif
+       }
+#ifdef CONFIG_SMP
+         else {
+               this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
+
+               if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
+                       /*
+                        * On established mms, the mm_cpumask is only changed
+                        * from irq context, from ptep_clear_flush() while in
+                        * lazy tlb mode, and here. Irqs are blocked during
+                        * schedule, protecting us from simultaneous changes.
+                        */
+                       cpumask_set_cpu(cpu, mm_cpumask(next));
+
+                       /*
+                        * We were in lazy tlb mode and leave_mm disabled
+                        * tlb flush IPI delivery. We must reload CR3
+                        * to make sure to use no freed page tables.
+                        *
+                        * As above, load_cr3() is serializing and orders TLB
+                        * fills with respect to the mm_cpumask write.
+                        */
+                       load_cr3(next->pgd);
+                       trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+                       load_mm_cr4(next);
+                       load_mm_ldt(next);
+               }
+       }
+#endif
+}
+
+#ifdef CONFIG_SMP
+
 /*
  * The flush IPI assumes that a thread switch happens in this order:
  * [cpu0: the cpu that switches]