2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
12 * Project home: http://compcache.googlecode.com
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #ifdef CONFIG_ZRAM_DEBUG
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
39 static int zram_major;
40 struct zram *zram_devices;
42 /* Module params (documentation at end) */
43 static unsigned int num_devices = 1;
45 static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
47 spin_lock(&zram->stat64_lock);
49 spin_unlock(&zram->stat64_lock);
52 static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
54 spin_lock(&zram->stat64_lock);
56 spin_unlock(&zram->stat64_lock);
59 static void zram_stat64_inc(struct zram *zram, u64 *v)
61 zram_stat64_add(zram, v, 1);
64 static int zram_test_flag(struct zram *zram, u32 index,
65 enum zram_pageflags flag)
67 return zram->table[index].flags & BIT(flag);
70 static void zram_set_flag(struct zram *zram, u32 index,
71 enum zram_pageflags flag)
73 zram->table[index].flags |= BIT(flag);
76 static void zram_clear_flag(struct zram *zram, u32 index,
77 enum zram_pageflags flag)
79 zram->table[index].flags &= ~BIT(flag);
82 static int page_zero_filled(void *ptr)
87 page = (unsigned long *)ptr;
89 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
97 static void zram_free_page(struct zram *zram, size_t index)
99 unsigned long handle = zram->table[index].handle;
100 u16 size = zram->table[index].size;
102 if (unlikely(!handle)) {
104 * No memory is allocated for zero filled pages.
105 * Simply clear zero page flag.
107 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
108 zram_clear_flag(zram, index, ZRAM_ZERO);
109 zram->stats.pages_zero--;
114 if (unlikely(size > max_zpage_size))
115 zram->stats.bad_compress--;
117 zs_free(zram->mem_pool, handle);
119 if (size <= PAGE_SIZE / 2)
120 zram->stats.good_compress--;
122 zram_stat64_sub(zram, &zram->stats.compr_size,
123 zram->table[index].size);
124 zram->stats.pages_stored--;
126 zram->table[index].handle = 0;
127 zram->table[index].size = 0;
130 static void handle_zero_page(struct bio_vec *bvec)
132 struct page *page = bvec->bv_page;
135 user_mem = kmap_atomic(page);
136 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
137 kunmap_atomic(user_mem);
139 flush_dcache_page(page);
142 static inline int is_partial_io(struct bio_vec *bvec)
144 return bvec->bv_len != PAGE_SIZE;
147 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
150 size_t clen = PAGE_SIZE;
152 unsigned long handle = zram->table[index].handle;
154 if (!handle || zram_test_flag(zram, index, ZRAM_ZERO)) {
155 memset(mem, 0, PAGE_SIZE);
159 cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
160 if (zram->table[index].size == PAGE_SIZE)
161 memcpy(mem, cmem, PAGE_SIZE);
163 ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
165 zs_unmap_object(zram->mem_pool, handle);
167 /* Should NEVER happen. Return bio error if it does. */
168 if (unlikely(ret != LZO_E_OK)) {
169 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
170 zram_stat64_inc(zram, &zram->stats.failed_reads);
177 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
178 u32 index, int offset, struct bio *bio)
182 unsigned char *user_mem, *uncmem = NULL;
184 page = bvec->bv_page;
186 if (unlikely(!zram->table[index].handle) ||
187 zram_test_flag(zram, index, ZRAM_ZERO)) {
188 handle_zero_page(bvec);
192 if (is_partial_io(bvec))
193 /* Use a temporary buffer to decompress the page */
194 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
196 user_mem = kmap_atomic(page);
197 if (!is_partial_io(bvec))
201 pr_info("Unable to allocate temp memory\n");
206 ret = zram_decompress_page(zram, uncmem, index);
207 /* Should NEVER happen. Return bio error if it does. */
208 if (unlikely(ret != LZO_E_OK)) {
209 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
210 zram_stat64_inc(zram, &zram->stats.failed_reads);
214 if (is_partial_io(bvec))
215 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
218 flush_dcache_page(page);
221 kunmap_atomic(user_mem);
222 if (is_partial_io(bvec))
227 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
232 unsigned long handle;
234 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
236 page = bvec->bv_page;
237 src = zram->compress_buffer;
239 if (is_partial_io(bvec)) {
241 * This is a partial IO. We need to read the full page
242 * before to write the changes.
244 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
246 pr_info("Error allocating temp memory!\n");
250 ret = zram_decompress_page(zram, uncmem, index);
256 * System overwrites unused sectors. Free memory associated
257 * with this sector now.
259 if (zram->table[index].handle ||
260 zram_test_flag(zram, index, ZRAM_ZERO))
261 zram_free_page(zram, index);
263 user_mem = kmap_atomic(page);
265 if (is_partial_io(bvec)) {
266 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
268 kunmap_atomic(user_mem);
274 if (page_zero_filled(uncmem)) {
275 kunmap_atomic(user_mem);
276 if (is_partial_io(bvec))
278 zram->stats.pages_zero++;
279 zram_set_flag(zram, index, ZRAM_ZERO);
284 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
285 zram->compress_workmem);
287 if (!is_partial_io(bvec)) {
288 kunmap_atomic(user_mem);
293 if (unlikely(ret != LZO_E_OK)) {
294 pr_err("Compression failed! err=%d\n", ret);
298 if (unlikely(clen > max_zpage_size)) {
299 zram->stats.bad_compress++;
302 if (is_partial_io(bvec))
306 handle = zs_malloc(zram->mem_pool, clen);
308 pr_info("Error allocating memory for compressed "
309 "page: %u, size=%zu\n", index, clen);
313 cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
315 if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
316 src = kmap_atomic(page);
317 memcpy(cmem, src, clen);
318 if ((clen == PAGE_SIZE) && !is_partial_io(bvec))
321 zs_unmap_object(zram->mem_pool, handle);
323 zram->table[index].handle = handle;
324 zram->table[index].size = clen;
327 zram_stat64_add(zram, &zram->stats.compr_size, clen);
328 zram->stats.pages_stored++;
329 if (clen <= PAGE_SIZE / 2)
330 zram->stats.good_compress++;
333 if (is_partial_io(bvec))
337 zram_stat64_inc(zram, &zram->stats.failed_writes);
341 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
342 int offset, struct bio *bio, int rw)
347 down_read(&zram->lock);
348 ret = zram_bvec_read(zram, bvec, index, offset, bio);
349 up_read(&zram->lock);
351 down_write(&zram->lock);
352 ret = zram_bvec_write(zram, bvec, index, offset);
353 up_write(&zram->lock);
359 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
361 if (*offset + bvec->bv_len >= PAGE_SIZE)
363 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
366 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
370 struct bio_vec *bvec;
374 zram_stat64_inc(zram, &zram->stats.num_reads);
377 zram_stat64_inc(zram, &zram->stats.num_writes);
381 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
382 offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
384 bio_for_each_segment(bvec, bio, i) {
385 int max_transfer_size = PAGE_SIZE - offset;
387 if (bvec->bv_len > max_transfer_size) {
389 * zram_bvec_rw() can only make operation on a single
390 * zram page. Split the bio vector.
394 bv.bv_page = bvec->bv_page;
395 bv.bv_len = max_transfer_size;
396 bv.bv_offset = bvec->bv_offset;
398 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
401 bv.bv_len = bvec->bv_len - max_transfer_size;
402 bv.bv_offset += max_transfer_size;
403 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
406 if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
410 update_position(&index, &offset, bvec);
413 set_bit(BIO_UPTODATE, &bio->bi_flags);
422 * Check if request is within bounds and aligned on zram logical blocks.
424 static inline int valid_io_request(struct zram *zram, struct bio *bio)
427 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
428 (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
429 (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
434 /* I/O request is valid */
439 * Handler function for all zram I/O requests.
441 static void zram_make_request(struct request_queue *queue, struct bio *bio)
443 struct zram *zram = queue->queuedata;
445 down_read(&zram->init_lock);
446 if (unlikely(!zram->init_done))
449 if (!valid_io_request(zram, bio)) {
450 zram_stat64_inc(zram, &zram->stats.invalid_io);
454 __zram_make_request(zram, bio, bio_data_dir(bio));
455 up_read(&zram->init_lock);
460 up_read(&zram->init_lock);
464 void __zram_reset_device(struct zram *zram)
468 if (!zram->init_done)
473 /* Free various per-device buffers */
474 kfree(zram->compress_workmem);
475 free_pages((unsigned long)zram->compress_buffer, 1);
477 zram->compress_workmem = NULL;
478 zram->compress_buffer = NULL;
480 /* Free all pages that are still in this zram device */
481 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
482 unsigned long handle = zram->table[index].handle;
486 zs_free(zram->mem_pool, handle);
492 zs_destroy_pool(zram->mem_pool);
493 zram->mem_pool = NULL;
496 memset(&zram->stats, 0, sizeof(zram->stats));
499 set_capacity(zram->disk, 0);
502 void zram_reset_device(struct zram *zram)
504 down_write(&zram->init_lock);
505 __zram_reset_device(zram);
506 up_write(&zram->init_lock);
509 /* zram->init_lock should be held */
510 int zram_init_device(struct zram *zram)
515 if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
517 "There is little point creating a zram of greater than "
518 "twice the size of memory since we expect a 2:1 compression "
519 "ratio. Note that zram uses about 0.1%% of the size of "
520 "the disk when not in use so a huge zram is "
522 "\tMemory Size: %lu kB\n"
523 "\tSize you selected: %llu kB\n"
524 "Continuing anyway ...\n",
525 (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
529 zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
530 if (!zram->compress_workmem) {
531 pr_err("Error allocating compressor working memory!\n");
536 zram->compress_buffer =
537 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
538 if (!zram->compress_buffer) {
539 pr_err("Error allocating compressor buffer space\n");
544 num_pages = zram->disksize >> PAGE_SHIFT;
545 zram->table = vzalloc(num_pages * sizeof(*zram->table));
547 pr_err("Error allocating zram address table\n");
552 /* zram devices sort of resembles non-rotational disks */
553 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
555 zram->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
556 if (!zram->mem_pool) {
557 pr_err("Error creating memory pool\n");
564 pr_debug("Initialization done!\n");
568 /* To prevent accessing table entries during cleanup */
571 __zram_reset_device(zram);
572 pr_err("Initialization failed: err=%d\n", ret);
576 static void zram_slot_free_notify(struct block_device *bdev,
581 zram = bdev->bd_disk->private_data;
582 zram_free_page(zram, index);
583 zram_stat64_inc(zram, &zram->stats.notify_free);
586 static const struct block_device_operations zram_devops = {
587 .swap_slot_free_notify = zram_slot_free_notify,
591 static int create_device(struct zram *zram, int device_id)
595 init_rwsem(&zram->lock);
596 init_rwsem(&zram->init_lock);
597 spin_lock_init(&zram->stat64_lock);
599 zram->queue = blk_alloc_queue(GFP_KERNEL);
601 pr_err("Error allocating disk queue for device %d\n",
607 blk_queue_make_request(zram->queue, zram_make_request);
608 zram->queue->queuedata = zram;
610 /* gendisk structure */
611 zram->disk = alloc_disk(1);
613 blk_cleanup_queue(zram->queue);
614 pr_warn("Error allocating disk structure for device %d\n",
620 zram->disk->major = zram_major;
621 zram->disk->first_minor = device_id;
622 zram->disk->fops = &zram_devops;
623 zram->disk->queue = zram->queue;
624 zram->disk->private_data = zram;
625 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
627 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
628 set_capacity(zram->disk, 0);
631 * To ensure that we always get PAGE_SIZE aligned
632 * and n*PAGE_SIZED sized I/O requests.
634 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
635 blk_queue_logical_block_size(zram->disk->queue,
636 ZRAM_LOGICAL_BLOCK_SIZE);
637 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
638 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
640 add_disk(zram->disk);
642 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
643 &zram_disk_attr_group);
645 pr_warn("Error creating sysfs group");
655 static void destroy_device(struct zram *zram)
657 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
658 &zram_disk_attr_group);
661 del_gendisk(zram->disk);
662 put_disk(zram->disk);
666 blk_cleanup_queue(zram->queue);
669 unsigned int zram_get_num_devices(void)
674 static int __init zram_init(void)
678 if (num_devices > max_num_devices) {
679 pr_warn("Invalid value for num_devices: %u\n",
685 zram_major = register_blkdev(0, "zram");
686 if (zram_major <= 0) {
687 pr_warn("Unable to get major number\n");
692 /* Allocate the device array and initialize each one */
693 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
699 for (dev_id = 0; dev_id < num_devices; dev_id++) {
700 ret = create_device(&zram_devices[dev_id], dev_id);
705 pr_info("Created %u device(s) ...\n", num_devices);
711 destroy_device(&zram_devices[--dev_id]);
714 unregister_blkdev(zram_major, "zram");
719 static void __exit zram_exit(void)
724 for (i = 0; i < num_devices; i++) {
725 zram = &zram_devices[i];
727 destroy_device(zram);
728 zram_reset_device(zram);
731 unregister_blkdev(zram_major, "zram");
734 pr_debug("Cleanup done!\n");
737 module_param(num_devices, uint, 0);
738 MODULE_PARM_DESC(num_devices, "Number of zram devices");
740 module_init(zram_init);
741 module_exit(zram_exit);
743 MODULE_LICENSE("Dual BSD/GPL");
744 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
745 MODULE_DESCRIPTION("Compressed RAM Block Device");