vexpress/spc: fix a build warning on array bounds
[cascardo/linux.git] / mm / zsmalloc.c
index fe78189..4e2fc83 100644 (file)
@@ -92,6 +92,7 @@
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <linux/zsmalloc.h>
+#include <linux/zpool.h>
 
 /*
  * This must be power of 2 and greater than of equal to sizeof(link_free).
@@ -240,6 +241,81 @@ struct mapping_area {
        enum zs_mapmode vm_mm; /* mapping mode */
 };
 
+/* zpool driver */
+
+#ifdef CONFIG_ZPOOL
+
+static void *zs_zpool_create(gfp_t gfp, struct zpool_ops *zpool_ops)
+{
+       return zs_create_pool(gfp);
+}
+
+static void zs_zpool_destroy(void *pool)
+{
+       zs_destroy_pool(pool);
+}
+
+static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
+                       unsigned long *handle)
+{
+       *handle = zs_malloc(pool, size);
+       return *handle ? 0 : -1;
+}
+static void zs_zpool_free(void *pool, unsigned long handle)
+{
+       zs_free(pool, handle);
+}
+
+static int zs_zpool_shrink(void *pool, unsigned int pages,
+                       unsigned int *reclaimed)
+{
+       return -EINVAL;
+}
+
+static void *zs_zpool_map(void *pool, unsigned long handle,
+                       enum zpool_mapmode mm)
+{
+       enum zs_mapmode zs_mm;
+
+       switch (mm) {
+       case ZPOOL_MM_RO:
+               zs_mm = ZS_MM_RO;
+               break;
+       case ZPOOL_MM_WO:
+               zs_mm = ZS_MM_WO;
+               break;
+       case ZPOOL_MM_RW: /* fallthru */
+       default:
+               zs_mm = ZS_MM_RW;
+               break;
+       }
+
+       return zs_map_object(pool, handle, zs_mm);
+}
+static void zs_zpool_unmap(void *pool, unsigned long handle)
+{
+       zs_unmap_object(pool, handle);
+}
+
+static u64 zs_zpool_total_size(void *pool)
+{
+       return zs_get_total_size_bytes(pool);
+}
+
+static struct zpool_driver zs_zpool_driver = {
+       .type =         "zsmalloc",
+       .owner =        THIS_MODULE,
+       .create =       zs_zpool_create,
+       .destroy =      zs_zpool_destroy,
+       .malloc =       zs_zpool_malloc,
+       .free =         zs_zpool_free,
+       .shrink =       zs_zpool_shrink,
+       .map =          zs_zpool_map,
+       .unmap =        zs_zpool_unmap,
+       .total_size =   zs_zpool_total_size,
+};
+
+#endif /* CONFIG_ZPOOL */
 
 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
 static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
@@ -690,7 +766,7 @@ static inline void __zs_cpu_down(struct mapping_area *area)
 static inline void *__zs_map_object(struct mapping_area *area,
                                struct page *pages[2], int off, int size)
 {
-       BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, &pages));
+       BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages));
        area->vm_addr = area->vm->addr;
        return area->vm_addr + off;
 }
@@ -814,6 +890,10 @@ static void zs_exit(void)
 {
        int cpu;
 
+#ifdef CONFIG_ZPOOL
+       zpool_unregister_driver(&zs_zpool_driver);
+#endif
+
        cpu_notifier_register_begin();
 
        for_each_online_cpu(cpu)
@@ -840,6 +920,10 @@ static int zs_init(void)
 
        cpu_notifier_register_done();
 
+#ifdef CONFIG_ZPOOL
+       zpool_register_driver(&zs_zpool_driver);
+#endif
+
        return 0;
 fail:
        zs_exit();