ARM: sunxi_defconfig: enable CONFIG_REGULATOR
[cascardo/linux.git] / mm / balloon_compaction.c
index 6e45a50..b3cbe19 100644 (file)
 #include <linux/export.h>
 #include <linux/balloon_compaction.h>
 
-/*
- * balloon_devinfo_alloc - allocates a balloon device information descriptor.
- * @balloon_dev_descriptor: pointer to reference the balloon device which
- *                          this struct balloon_dev_info will be servicing.
- *
- * Driver must call it to properly allocate and initialize an instance of
- * struct balloon_dev_info which will be used to reference a balloon device
- * as well as to keep track of the balloon device page list.
- */
-struct balloon_dev_info *balloon_devinfo_alloc(void *balloon_dev_descriptor)
-{
-       struct balloon_dev_info *b_dev_info;
-       b_dev_info = kmalloc(sizeof(*b_dev_info), GFP_KERNEL);
-       if (!b_dev_info)
-               return ERR_PTR(-ENOMEM);
-
-       b_dev_info->balloon_device = balloon_dev_descriptor;
-       b_dev_info->mapping = NULL;
-       b_dev_info->isolated_pages = 0;
-       spin_lock_init(&b_dev_info->pages_lock);
-       INIT_LIST_HEAD(&b_dev_info->pages);
-
-       return b_dev_info;
-}
-EXPORT_SYMBOL_GPL(balloon_devinfo_alloc);
-
 /*
  * balloon_page_enqueue - allocates a new page and inserts it into the balloon
  *                       page list.
@@ -61,7 +35,8 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
         */
        BUG_ON(!trylock_page(page));
        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
-       balloon_page_insert(page, b_dev_info->mapping, &b_dev_info->pages);
+       balloon_page_insert(b_dev_info, page);
+       __count_vm_event(BALLOON_INFLATE);
        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
        unlock_page(page);
        return page;
@@ -93,18 +68,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
                 * to be released by the balloon driver.
                 */
                if (trylock_page(page)) {
+                       if (!PagePrivate(page)) {
+                               /* raced with isolation */
+                               unlock_page(page);
+                               continue;
+                       }
                        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
-                       /*
-                        * Raise the page refcount here to prevent any wrong
-                        * attempt to isolate this page, in case of coliding
-                        * with balloon_page_isolate() just after we release
-                        * the page lock.
-                        *
-                        * balloon_page_free() will take care of dropping
-                        * this extra refcount later.
-                        */
-                       get_page(page);
                        balloon_page_delete(page);
+                       __count_vm_event(BALLOON_DEFLATE);
                        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
                        unlock_page(page);
                        dequeued_page = true;
@@ -132,62 +103,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
 EXPORT_SYMBOL_GPL(balloon_page_dequeue);
 
 #ifdef CONFIG_BALLOON_COMPACTION
-/*
- * balloon_mapping_alloc - allocates a special ->mapping for ballooned pages.
- * @b_dev_info: holds the balloon device information descriptor.
- * @a_ops: balloon_mapping address_space_operations descriptor.
- *
- * Driver must call it to properly allocate and initialize an instance of
- * struct address_space which will be used as the special page->mapping for
- * balloon device enlisted page instances.
- */
-struct address_space *balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
-                               const struct address_space_operations *a_ops)
-{
-       struct address_space *mapping;
-
-       mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
-       if (!mapping)
-               return ERR_PTR(-ENOMEM);
-
-       /*
-        * Give a clean 'zeroed' status to all elements of this special
-        * balloon page->mapping struct address_space instance.
-        */
-       address_space_init_once(mapping);
-
-       /*
-        * Set mapping->flags appropriately, to allow balloon pages
-        * ->mapping identification.
-        */
-       mapping_set_balloon(mapping);
-       mapping_set_gfp_mask(mapping, balloon_mapping_gfp_mask());
-
-       /* balloon's page->mapping->a_ops callback descriptor */
-       mapping->a_ops = a_ops;
-
-       /*
-        * Establish a pointer reference back to the balloon device descriptor
-        * this particular page->mapping will be servicing.
-        * This is used by compaction / migration procedures to identify and
-        * access the balloon device pageset while isolating / migrating pages.
-        *
-        * As some balloon drivers can register multiple balloon devices
-        * for a single guest, this also helps compaction / migration to
-        * properly deal with multiple balloon pagesets, when required.
-        */
-       mapping->private_data = b_dev_info;
-       b_dev_info->mapping = mapping;
-
-       return mapping;
-}
-EXPORT_SYMBOL_GPL(balloon_mapping_alloc);
 
 static inline void __isolate_balloon_page(struct page *page)
 {
-       struct balloon_dev_info *b_dev_info = page->mapping->private_data;
+       struct balloon_dev_info *b_dev_info = balloon_page_device(page);
        unsigned long flags;
+
        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+       ClearPagePrivate(page);
        list_del(&page->lru);
        b_dev_info->isolated_pages++;
        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
@@ -195,20 +118,16 @@ static inline void __isolate_balloon_page(struct page *page)
 
 static inline void __putback_balloon_page(struct page *page)
 {
-       struct balloon_dev_info *b_dev_info = page->mapping->private_data;
+       struct balloon_dev_info *b_dev_info = balloon_page_device(page);
        unsigned long flags;
+
        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+       SetPagePrivate(page);
        list_add(&page->lru, &b_dev_info->pages);
        b_dev_info->isolated_pages--;
        spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
 }
 
-static inline int __migrate_balloon_page(struct address_space *mapping,
-               struct page *newpage, struct page *page, enum migrate_mode mode)
-{
-       return page->mapping->a_ops->migratepage(mapping, newpage, page, mode);
-}
-
 /* __isolate_lru_page() counterpart for a ballooned page */
 bool balloon_page_isolate(struct page *page)
 {
@@ -235,12 +154,11 @@ bool balloon_page_isolate(struct page *page)
                 */
                if (likely(trylock_page(page))) {
                        /*
-                        * A ballooned page, by default, has just one refcount.
+                        * A ballooned page, by default, has PagePrivate set.
                         * Prevent concurrent compaction threads from isolating
-                        * an already isolated balloon page by refcount check.
+                        * an already isolated balloon page by clearing it.
                         */
-                       if (__is_movable_balloon_page(page) &&
-                           page_count(page) == 2) {
+                       if (balloon_page_movable(page)) {
                                __isolate_balloon_page(page);
                                unlock_page(page);
                                return true;
@@ -276,7 +194,7 @@ void balloon_page_putback(struct page *page)
 int balloon_page_migrate(struct page *newpage,
                         struct page *page, enum migrate_mode mode)
 {
-       struct address_space *mapping;
+       struct balloon_dev_info *balloon = balloon_page_device(page);
        int rc = -EAGAIN;
 
        /*
@@ -292,9 +210,8 @@ int balloon_page_migrate(struct page *newpage,
                return rc;
        }
 
-       mapping = page->mapping;
-       if (mapping)
-               rc = __migrate_balloon_page(mapping, newpage, page, mode);
+       if (balloon && balloon->migratepage)
+               rc = balloon->migratepage(balloon, newpage, page, mode);
 
        unlock_page(newpage);
        return rc;