mm: send one IPI per CPU to TLB flush all entries after unmapping pages
[cascardo/linux.git] / include / linux / sched.h
index 119823d..3c602c2 100644 (file)
@@ -1344,6 +1344,18 @@ enum perf_event_task_context {
        perf_nr_task_contexts,
 };
 
+/* Track pages that require TLB flushes */
+struct tlbflush_unmap_batch {
+       /*
+        * Each bit set is a CPU that potentially has a TLB entry for one of
+        * the PFNs being flushed. See set_tlb_ubc_flush_pending().
+        */
+       struct cpumask cpumask;
+
+       /* True if any bit in cpumask is set */
+       bool flush_required;
+};
+
 struct task_struct {
        volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
        void *stack;
@@ -1700,6 +1712,10 @@ struct task_struct {
        unsigned long numa_pages_migrated;
 #endif /* CONFIG_NUMA_BALANCING */
 
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+       struct tlbflush_unmap_batch tlb_ubc;
+#endif
+
        struct rcu_head rcu;
 
        /*