Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm...
[cascardo/linux.git] / mm / zsmalloc.c
index 1833fc9..08bd7a3 100644 (file)
@@ -731,7 +731,8 @@ out:
  * to form a zspage for each size class. This is important
  * to reduce wastage due to unusable space left at end of
  * each zspage which is given as:
- *     wastage = Zp - Zp % size_class
+ *     wastage = Zp % class_size
+ *     usage = Zp - wastage
  * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
  *
  * For example, for size class of 3/8 * PAGE_SIZE, we should
@@ -1397,11 +1398,6 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
        /* extra space in chunk to keep the handle */
        size += ZS_HANDLE_SIZE;
        class = pool->size_class[get_size_class_index(size)];
-       /* In huge class size, we store the handle into first_page->private */
-       if (class->huge) {
-               size -= ZS_HANDLE_SIZE;
-               class = pool->size_class[get_size_class_index(size)];
-       }
 
        spin_lock(&class->lock);
        first_page = find_get_zspage(class);
@@ -1536,7 +1532,12 @@ static void zs_object_copy(unsigned long src, unsigned long dst,
                if (written == class->size)
                        break;
 
-               if (s_off + size >= PAGE_SIZE) {
+               s_off += size;
+               s_size -= size;
+               d_off += size;
+               d_size -= size;
+
+               if (s_off >= PAGE_SIZE) {
                        kunmap_atomic(d_addr);
                        kunmap_atomic(s_addr);
                        s_page = get_next_page(s_page);
@@ -1545,21 +1546,15 @@ static void zs_object_copy(unsigned long src, unsigned long dst,
                        d_addr = kmap_atomic(d_page);
                        s_size = class->size - written;
                        s_off = 0;
-               } else {
-                       s_off += size;
-                       s_size -= size;
                }
 
-               if (d_off + size >= PAGE_SIZE) {
+               if (d_off >= PAGE_SIZE) {
                        kunmap_atomic(d_addr);
                        d_page = get_next_page(d_page);
                        BUG_ON(!d_page);
                        d_addr = kmap_atomic(d_page);
                        d_size = class->size - written;
                        d_off = 0;
-               } else {
-                       d_off += size;
-                       d_size -= size;
                }
        }
 
@@ -1678,14 +1673,14 @@ static struct page *alloc_target_page(struct size_class *class)
 static void putback_zspage(struct zs_pool *pool, struct size_class *class,
                                struct page *first_page)
 {
-       int class_idx;
        enum fullness_group fullness;
 
        BUG_ON(!is_first_page(first_page));
 
-       get_zspage_mapping(first_page, &class_idx, &fullness);
+       fullness = get_fullness_group(first_page);
        insert_zspage(first_page, class, fullness);
-       fullness = fix_fullness_group(class, first_page);
+       set_zspage_mapping(first_page, class->index, fullness);
+
        if (fullness == ZS_EMPTY) {
                zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
                        class->size, class->pages_per_zspage));
@@ -1716,8 +1711,6 @@ static unsigned long __zs_compact(struct zs_pool *pool,
        struct page *dst_page = NULL;
        unsigned long nr_total_migrated = 0;
 
-       cond_resched();
-
        spin_lock(&class->lock);
        while ((src_page = isolate_source_page(class))) {
 
@@ -1777,8 +1770,6 @@ unsigned long zs_compact(struct zs_pool *pool)
                nr_migrated += __zs_compact(pool, class);
        }
 
-       synchronize_rcu();
-
        return nr_migrated;
 }
 EXPORT_SYMBOL_GPL(zs_compact);