mm/page_io.c: replace some BUG_ON()s with VM_BUG_ON_PAGE()
[cascardo/linux.git] / mm / balloon_compaction.c
index 57b3e9b..da91df5 100644 (file)
@@ -70,7 +70,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
                 */
                if (trylock_page(page)) {
 #ifdef CONFIG_BALLOON_COMPACTION
-                       if (!PagePrivate(page)) {
+                       if (PageIsolated(page)) {
                                /* raced with isolation */
                                unlock_page(page);
                                continue;
@@ -106,110 +106,50 @@ EXPORT_SYMBOL_GPL(balloon_page_dequeue);
 
 #ifdef CONFIG_BALLOON_COMPACTION
 
-static inline void __isolate_balloon_page(struct page *page)
+bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
+
 {
        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
        unsigned long flags;
 
        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
-       ClearPagePrivate(page);
        list_del(&page->lru);
        b_dev_info->isolated_pages++;
        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+
+       return true;
 }
 
-static inline void __putback_balloon_page(struct page *page)
+void balloon_page_putback(struct page *page)
 {
        struct balloon_dev_info *b_dev_info = balloon_page_device(page);
        unsigned long flags;
 
        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
-       SetPagePrivate(page);
        list_add(&page->lru, &b_dev_info->pages);
        b_dev_info->isolated_pages--;
        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 }
 
-/* __isolate_lru_page() counterpart for a ballooned page */
-bool balloon_page_isolate(struct page *page)
-{
-       /*
-        * Avoid burning cycles with pages that are yet under __free_pages(),
-        * or just got freed under us.
-        *
-        * In case we 'win' a race for a balloon page being freed under us and
-        * raise its refcount preventing __free_pages() from doing its job
-        * the put_page() at the end of this block will take care of
-        * release this page, thus avoiding a nasty leakage.
-        */
-       if (likely(get_page_unless_zero(page))) {
-               /*
-                * As balloon pages are not isolated from LRU lists, concurrent
-                * compaction threads can race against page migration functions
-                * as well as race against the balloon driver releasing a page.
-                *
-                * In order to avoid having an already isolated balloon page
-                * being (wrongly) re-isolated while it is under migration,
-                * or to avoid attempting to isolate pages being released by
-                * the balloon driver, lets be sure we have the page lock
-                * before proceeding with the balloon page isolation steps.
-                */
-               if (likely(trylock_page(page))) {
-                       /*
-                        * A ballooned page, by default, has PagePrivate set.
-                        * Prevent concurrent compaction threads from isolating
-                        * an already isolated balloon page by clearing it.
-                        */
-                       if (balloon_page_movable(page)) {
-                               __isolate_balloon_page(page);
-                               unlock_page(page);
-                               return true;
-                       }
-                       unlock_page(page);
-               }
-               put_page(page);
-       }
-       return false;
-}
-
-/* putback_lru_page() counterpart for a ballooned page */
-void balloon_page_putback(struct page *page)
-{
-       /*
-        * 'lock_page()' stabilizes the page and prevents races against
-        * concurrent isolation threads attempting to re-isolate it.
-        */
-       lock_page(page);
-
-       if (__is_movable_balloon_page(page)) {
-               __putback_balloon_page(page);
-               /* drop the extra ref count taken for page isolation */
-               put_page(page);
-       } else {
-               WARN_ON(1);
-               dump_page(page, "not movable balloon page");
-       }
-       unlock_page(page);
-}
 
 /* move_to_new_page() counterpart for a ballooned page */
-int balloon_page_migrate(struct page *newpage,
-                        struct page *page, enum migrate_mode mode)
+int balloon_page_migrate(struct address_space *mapping,
+               struct page *newpage, struct page *page,
+               enum migrate_mode mode)
 {
        struct balloon_dev_info *balloon = balloon_page_device(page);
-       int rc = -EAGAIN;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
 
-       if (WARN_ON(!__is_movable_balloon_page(page))) {
-               dump_page(page, "not movable balloon page");
-               return rc;
-       }
+       return balloon->migratepage(balloon, newpage, page, mode);
+}
 
-       if (balloon && balloon->migratepage)
-               rc = balloon->migratepage(balloon, newpage, page, mode);
+const struct address_space_operations balloon_aops = {
+       .migratepage = balloon_page_migrate,
+       .isolate_page = balloon_page_isolate,
+       .putback_page = balloon_page_putback,
+};
+EXPORT_SYMBOL_GPL(balloon_aops);
 
-       return rc;
-}
 #endif /* CONFIG_BALLOON_COMPACTION */