Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
[cascardo/linux.git] / include / linux / mmu_notifier.h
index ab8564b..95243d2 100644 (file)
@@ -98,11 +98,11 @@ struct mmu_notifier_ops {
        /*
         * invalidate_range_start() and invalidate_range_end() must be
         * paired and are called only when the mmap_sem and/or the
-        * locks protecting the reverse maps are held. The subsystem
-        * must guarantee that no additional references are taken to
-        * the pages in the range established between the call to
-        * invalidate_range_start() and the matching call to
-        * invalidate_range_end().
+        * locks protecting the reverse maps are held. If the subsystem
+        * can't guarantee that no additional references are taken to
+        * the pages in the range, it has to implement the
+        * invalidate_range() notifier to remove any references taken
+        * after invalidate_range_start().
         *
         * Invalidation of multiple concurrent ranges may be
         * optionally permitted by the driver. Either way the
@@ -144,6 +144,29 @@ struct mmu_notifier_ops {
        void (*invalidate_range_end)(struct mmu_notifier *mn,
                                     struct mm_struct *mm,
                                     unsigned long start, unsigned long end);
+
+       /*
+        * invalidate_range() is either called between
+        * invalidate_range_start() and invalidate_range_end() when the
+        * VM has to free pages that where unmapped, but before the
+        * pages are actually freed, or outside of _start()/_end() when
+        * a (remote) TLB is necessary.
+        *
+        * If invalidate_range() is used to manage a non-CPU TLB with
+        * shared page-tables, it not necessary to implement the
+        * invalidate_range_start()/end() notifiers, as
+        * invalidate_range() alread catches the points in time when an
+        * external TLB range needs to be flushed.
+        *
+        * The invalidate_range() function is called under the ptl
+        * spin-lock and not allowed to sleep.
+        *
+        * Note that this function might be called with just a sub-range
+        * of what was passed to invalidate_range_start()/end(), if
+        * called between those functions.
+        */
+       void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
+                                unsigned long start, unsigned long end);
 };
 
 /*
@@ -190,6 +213,8 @@ extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
                                  unsigned long start, unsigned long end);
 extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
                                  unsigned long start, unsigned long end);
+extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end);
 
 static inline void mmu_notifier_release(struct mm_struct *mm)
 {
@@ -242,6 +267,13 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
                __mmu_notifier_invalidate_range_end(mm, start, end);
 }
 
+static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end)
+{
+       if (mm_has_notifiers(mm))
+               __mmu_notifier_invalidate_range(mm, start, end);
+}
+
 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
 {
        mm->mmu_notifier_mm = NULL;
@@ -279,6 +311,44 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
        __young;                                                        \
 })
 
+#define        ptep_clear_flush_notify(__vma, __address, __ptep)               \
+({                                                                     \
+       unsigned long ___addr = __address & PAGE_MASK;                  \
+       struct mm_struct *___mm = (__vma)->vm_mm;                       \
+       pte_t ___pte;                                                   \
+                                                                       \
+       ___pte = ptep_clear_flush(__vma, __address, __ptep);            \
+       mmu_notifier_invalidate_range(___mm, ___addr,                   \
+                                       ___addr + PAGE_SIZE);           \
+                                                                       \
+       ___pte;                                                         \
+})
+
+#define pmdp_clear_flush_notify(__vma, __haddr, __pmd)                 \
+({                                                                     \
+       unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;              \
+       struct mm_struct *___mm = (__vma)->vm_mm;                       \
+       pmd_t ___pmd;                                                   \
+                                                                       \
+       ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd);               \
+       mmu_notifier_invalidate_range(___mm, ___haddr,                  \
+                                     ___haddr + HPAGE_PMD_SIZE);       \
+                                                                       \
+       ___pmd;                                                         \
+})
+
+#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd)                        \
+({                                                                     \
+       unsigned long ___haddr = __haddr & HPAGE_PMD_MASK;              \
+       pmd_t ___pmd;                                                   \
+                                                                       \
+       ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd);              \
+       mmu_notifier_invalidate_range(__mm, ___haddr,                   \
+                                     ___haddr + HPAGE_PMD_SIZE);       \
+                                                                       \
+       ___pmd;                                                         \
+})
+
 /*
  * set_pte_at_notify() sets the pte _after_ running the notifier.
  * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -342,6 +412,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
 {
 }
 
+static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+                                 unsigned long start, unsigned long end)
+{
+}
+
 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
 {
 }
@@ -352,6 +427,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
 
 #define ptep_clear_flush_young_notify ptep_clear_flush_young
 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
+#define        ptep_clear_flush_notify ptep_clear_flush
+#define pmdp_clear_flush_notify pmdp_clear_flush
+#define pmdp_get_and_clear_notify pmdp_get_and_clear
 #define set_pte_at_notify set_pte_at
 
 #endif /* CONFIG_MMU_NOTIFIER */