Merge branch 'vhost-net-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mst...
[cascardo/linux.git] / drivers / staging / zram / xvmalloc.c
index b644067..1f9c508 100644 (file)
  * Released under the terms of GNU General Public License Version 2.0
  */
 
+#ifdef CONFIG_ZRAM_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
 #include <linux/bitops.h>
 #include <linux/errno.h>
 #include <linux/highmem.h>
@@ -46,7 +52,7 @@ static void clear_flag(struct block_header *block, enum blockflags flag)
 }
 
 /*
- * Given <page, offset> pair, provide a derefrencable pointer.
+ * Given <page, offset> pair, provide a dereferencable pointer.
  * This is called from xv_malloc/xv_free path, so it
  * needs to be fast.
  */
@@ -200,61 +206,23 @@ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
                nextblock->link.prev_page = page;
                nextblock->link.prev_offset = offset;
                put_ptr_atomic(nextblock, KM_USER1);
+               /* If there was a next page then the free bits are set. */
+               return;
        }
 
        __set_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
        __set_bit(flindex, &pool->flbitmap);
 }
 
-/*
- * Remove block from head of freelist. Index 'slindex' identifies the freelist.
- */
-static void remove_block_head(struct xv_pool *pool,
-                       struct block_header *block, u32 slindex)
-{
-       struct block_header *tmpblock;
-       u32 flindex = slindex / BITS_PER_LONG;
-
-       pool->freelist[slindex].page = block->link.next_page;
-       pool->freelist[slindex].offset = block->link.next_offset;
-       block->link.prev_page = NULL;
-       block->link.prev_offset = 0;
-
-       if (!pool->freelist[slindex].page) {
-               __clear_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
-               if (!pool->slbitmap[flindex])
-                       __clear_bit(flindex, &pool->flbitmap);
-       } else {
-               /*
-                * DEBUG ONLY: We need not reinitialize freelist head previous
-                * pointer to 0 - we never depend on its value. But just for
-                * sanity, lets do it.
-                */
-               tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
-                               pool->freelist[slindex].offset, KM_USER1);
-               tmpblock->link.prev_page = NULL;
-               tmpblock->link.prev_offset = 0;
-               put_ptr_atomic(tmpblock, KM_USER1);
-       }
-}
-
 /*
  * Remove block from freelist. Index 'slindex' identifies the freelist.
  */
 static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
                        struct block_header *block, u32 slindex)
 {
-       u32 flindex;
+       u32 flindex = slindex / BITS_PER_LONG;
        struct block_header *tmpblock;
 
-       if (pool->freelist[slindex].page == page
-          && pool->freelist[slindex].offset == offset) {
-               remove_block_head(pool, block, slindex);
-               return;
-       }
-
-       flindex = slindex / BITS_PER_LONG;
-
        if (block->link.prev_page) {
                tmpblock = get_ptr_atomic(block->link.prev_page,
                                block->link.prev_offset, KM_USER1);
@@ -270,6 +238,35 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
                tmpblock->link.prev_offset = block->link.prev_offset;
                put_ptr_atomic(tmpblock, KM_USER1);
        }
+
+       /* Is this block is at the head of the freelist? */
+       if (pool->freelist[slindex].page == page
+          && pool->freelist[slindex].offset == offset) {
+
+               pool->freelist[slindex].page = block->link.next_page;
+               pool->freelist[slindex].offset = block->link.next_offset;
+
+               if (pool->freelist[slindex].page) {
+                       struct block_header *tmpblock;
+                       tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
+                                       pool->freelist[slindex].offset,
+                                       KM_USER1);
+                       tmpblock->link.prev_page = NULL;
+                       tmpblock->link.prev_offset = 0;
+                       put_ptr_atomic(tmpblock, KM_USER1);
+               } else {
+                       /* This freelist bucket is empty */
+                       __clear_bit(slindex % BITS_PER_LONG,
+                                   &pool->slbitmap[flindex]);
+                       if (!pool->slbitmap[flindex])
+                               __clear_bit(flindex, &pool->flbitmap);
+               }
+       }
+
+       block->link.prev_page = NULL;
+       block->link.prev_offset = 0;
+       block->link.next_page = NULL;
+       block->link.next_offset = 0;
 }
 
 /*
@@ -320,11 +317,13 @@ struct xv_pool *xv_create_pool(void)
 
        return pool;
 }
+EXPORT_SYMBOL_GPL(xv_create_pool);
 
 void xv_destroy_pool(struct xv_pool *pool)
 {
        kfree(pool);
 }
+EXPORT_SYMBOL_GPL(xv_destroy_pool);
 
 /**
  * xv_malloc - Allocate block of given size from pool.
@@ -378,7 +377,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
 
        block = get_ptr_atomic(*page, *offset, KM_USER0);
 
-       remove_block_head(pool, block, index);
+       remove_block(pool, *page, *offset, block, index);
 
        /* Split the block if required */
        tmpoffset = *offset + size + XV_ALIGN;
@@ -413,6 +412,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(xv_malloc);
 
 /*
  * Free block identified with <page, offset>
@@ -489,6 +489,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
        put_ptr_atomic(page_start, KM_USER0);
        spin_unlock(&pool->lock);
 }
+EXPORT_SYMBOL_GPL(xv_free);
 
 u32 xv_get_object_size(void *obj)
 {
@@ -497,6 +498,7 @@ u32 xv_get_object_size(void *obj)
        blk = (struct block_header *)((char *)(obj) - XV_ALIGN);
        return blk->size;
 }
+EXPORT_SYMBOL_GPL(xv_get_object_size);
 
 /*
  * Returns total memory used by allocator (userdata + metadata)
@@ -505,3 +507,4 @@ u64 xv_get_total_size_bytes(struct xv_pool *pool)
 {
        return pool->total_pages << PAGE_SHIFT;
 }
+EXPORT_SYMBOL_GPL(xv_get_total_size_bytes);