Merge tag 'nfsd-4.8' of git://linux-nfs.org/~bfields/linux
[cascardo/linux.git] / mm / zsmalloc.c
index 04176de..b0bc023 100644 (file)
@@ -20,6 +20,7 @@
  *     page->freelist(index): links together all component pages of a zspage
  *             For the huge page, this is always 0, so we use this field
  *             to store handle.
+ *     page->units: first object offset in a subpage of zspage
  *
  * Usage of struct page flags:
  *     PG_private: identifies the first component page
  */
 #define ZS_SIZE_CLASS_DELTA    (PAGE_SIZE >> CLASS_BITS)
 
-/*
- * We do not maintain any list for completely empty or full pages
- */
 enum fullness_group {
        ZS_EMPTY,
        ZS_ALMOST_EMPTY,
@@ -467,11 +465,6 @@ static struct zpool_driver zs_zpool_driver = {
 MODULE_ALIAS("zpool-zsmalloc");
 #endif /* CONFIG_ZPOOL */
 
-static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
-{
-       return pages_per_zspage * PAGE_SIZE / size;
-}
-
 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
 static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
 
@@ -635,8 +628,7 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
                freeable = zs_can_compact(class);
                spin_unlock(&class->lock);
 
-               objs_per_zspage = get_maxobj_per_zspage(class->size,
-                               class->pages_per_zspage);
+               objs_per_zspage = class->objs_per_zspage;
                pages_used = obj_allocated / objs_per_zspage *
                                class->pages_per_zspage;
 
@@ -945,8 +937,8 @@ static void unpin_tag(unsigned long handle)
 static void reset_page(struct page *page)
 {
        __ClearPageMovable(page);
-       clear_bit(PG_private, &page->flags);
-       clear_bit(PG_private_2, &page->flags);
+       ClearPagePrivate(page);
+       ClearPagePrivate2(page);
        set_page_private(page, 0);
        page_mapcount_reset(page);
        ClearPageHugeObject(page);
@@ -1014,8 +1006,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
 
        cache_free_zspage(pool, zspage);
 
-       zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
-                       class->size, class->pages_per_zspage));
+       zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage);
        atomic_long_sub(class->pages_per_zspage,
                                        &pool->pages_allocated);
 }
@@ -1350,7 +1341,7 @@ static void zs_unregister_cpu_notifier(void)
        cpu_notifier_register_done();
 }
 
-static void init_zs_size_classes(void)
+static void __init init_zs_size_classes(void)
 {
        int nr;
 
@@ -1361,16 +1352,14 @@ static void init_zs_size_classes(void)
        zs_size_classes = nr;
 }
 
-static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
+static bool can_merge(struct size_class *prev, int pages_per_zspage,
+                                       int objs_per_zspage)
 {
-       if (prev->pages_per_zspage != pages_per_zspage)
-               return false;
+       if (prev->pages_per_zspage == pages_per_zspage &&
+               prev->objs_per_zspage == objs_per_zspage)
+               return true;
 
-       if (get_maxobj_per_zspage(prev->size, prev->pages_per_zspage)
-               != get_maxobj_per_zspage(size, pages_per_zspage))
-               return false;
-
-       return true;
+       return false;
 }
 
 static bool zspage_full(struct size_class *class, struct zspage *zspage)
@@ -1541,6 +1530,7 @@ static unsigned long obj_malloc(struct size_class *class,
  * zs_malloc - Allocate block of given size from pool.
  * @pool: pool to allocate from
  * @size: size of block to allocate
+ * @gfp: gfp flags when allocating object
  *
  * On success, handle to the allocated object is returned,
  * otherwise 0.
@@ -1592,8 +1582,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
        record_obj(handle, obj);
        atomic_long_add(class->pages_per_zspage,
                                &pool->pages_allocated);
-       zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
-                       class->size, class->pages_per_zspage));
+       zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
 
        /* We completely set up zspage so mark them as movable */
        SetZsPageMovable(pool, zspage);
@@ -1741,10 +1730,11 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
  * return handle.
  */
 static unsigned long find_alloced_obj(struct size_class *class,
-                                       struct page *page, int index)
+                                       struct page *page, int *obj_idx)
 {
        unsigned long head;
        int offset = 0;
+       int index = *obj_idx;
        unsigned long handle = 0;
        void *addr = kmap_atomic(page);
 
@@ -1765,6 +1755,9 @@ static unsigned long find_alloced_obj(struct size_class *class,
        }
 
        kunmap_atomic(addr);
+
+       *obj_idx = index;
+
        return handle;
 }
 
@@ -1776,7 +1769,7 @@ struct zs_compact_control {
        struct page *d_page;
         /* Starting object index within @s_page which used for live object
          * in the subpage. */
-       int index;
+       int obj_idx;
 };
 
 static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
@@ -1786,16 +1779,16 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
        unsigned long handle;
        struct page *s_page = cc->s_page;
        struct page *d_page = cc->d_page;
-       unsigned long index = cc->index;
+       int obj_idx = cc->obj_idx;
        int ret = 0;
 
        while (1) {
-               handle = find_alloced_obj(class, s_page, index);
+               handle = find_alloced_obj(class, s_page, &obj_idx);
                if (!handle) {
                        s_page = get_next_page(s_page);
                        if (!s_page)
                                break;
-                       index = 0;
+                       obj_idx = 0;
                        continue;
                }
 
@@ -1809,7 +1802,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
                used_obj = handle_to_obj(handle);
                free_obj = obj_malloc(class, get_zspage(d_page), handle);
                zs_object_copy(class, free_obj, used_obj);
-               index++;
+               obj_idx++;
                /*
                 * record_obj updates handle's value to free_obj and it will
                 * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
@@ -1824,7 +1817,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
 
        /* Remember last position in this iteration */
        cc->s_page = s_page;
-       cc->index = index;
+       cc->obj_idx = obj_idx;
 
        return ret;
 }
@@ -2181,8 +2174,7 @@ static int zs_register_migration(struct zs_pool *pool)
 static void zs_unregister_migration(struct zs_pool *pool)
 {
        flush_work(&pool->free_work);
-       if (pool->inode)
-               iput(pool->inode);
+       iput(pool->inode);
 }
 
 /*
@@ -2261,8 +2253,7 @@ static unsigned long zs_can_compact(struct size_class *class)
                return 0;
 
        obj_wasted = obj_allocated - obj_used;
-       obj_wasted /= get_maxobj_per_zspage(class->size,
-                       class->pages_per_zspage);
+       obj_wasted /= class->objs_per_zspage;
 
        return obj_wasted * class->pages_per_zspage;
 }
@@ -2279,7 +2270,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
                if (!zs_can_compact(class))
                        break;
 
-               cc.index = 0;
+               cc.obj_idx = 0;
                cc.s_page = get_first_page(src_zspage);
 
                while ((dst_zspage = isolate_zspage(class, false))) {
@@ -2398,7 +2389,7 @@ static int zs_register_shrinker(struct zs_pool *pool)
 
 /**
  * zs_create_pool - Creates an allocation pool to work from.
- * @flags: allocation flags used to allocate pool metadata
+ * @name: pool name to be created
  *
  * This function must be called before anything when using
  * the zsmalloc allocator.
@@ -2438,6 +2429,7 @@ struct zs_pool *zs_create_pool(const char *name)
        for (i = zs_size_classes - 1; i >= 0; i--) {
                int size;
                int pages_per_zspage;
+               int objs_per_zspage;
                struct size_class *class;
                int fullness = 0;
 
@@ -2445,6 +2437,7 @@ struct zs_pool *zs_create_pool(const char *name)
                if (size > ZS_MAX_ALLOC_SIZE)
                        size = ZS_MAX_ALLOC_SIZE;
                pages_per_zspage = get_pages_per_zspage(size);
+               objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
 
                /*
                 * size_class is used for normal zsmalloc operation such
@@ -2456,7 +2449,7 @@ struct zs_pool *zs_create_pool(const char *name)
                 * previous size_class if possible.
                 */
                if (prev_class) {
-                       if (can_merge(prev_class, size, pages_per_zspage)) {
+                       if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) {
                                pool->size_class[i] = prev_class;
                                continue;
                        }
@@ -2469,8 +2462,7 @@ struct zs_pool *zs_create_pool(const char *name)
                class->size = size;
                class->index = i;
                class->pages_per_zspage = pages_per_zspage;
-               class->objs_per_zspage = class->pages_per_zspage *
-                                               PAGE_SIZE / class->size;
+               class->objs_per_zspage = objs_per_zspage;
                spin_lock_init(&class->lock);
                pool->size_class[i] = class;
                for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;