Merge tag 'master-2014-07-31' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
[cascardo/linux.git] / drivers / block / zram / zram_drv.c
1 /*
2  * Compressed RAM block device
3  *
4  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5  *               2012, 2013 Minchan Kim
6  *
7  * This code is released using a dual license strategy: BSD/GPL
8  * You can choose the licence that better fits your requirements.
9  *
10  * Released under the terms of 3-clause BSD License
11  * Released under the terms of GNU General Public License Version 2.0
12  *
13  */
14
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #ifdef CONFIG_ZRAM_DEBUG
19 #define DEBUG
20 #endif
21
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/vmalloc.h>
34 #include <linux/err.h>
35
36 #include "zram_drv.h"
37
38 /* Globals */
39 static int zram_major;
40 static struct zram *zram_devices;
41 static const char *default_compressor = "lzo";
42
43 /* Module params (documentation at end) */
44 static unsigned int num_devices = 1;
45
46 #define ZRAM_ATTR_RO(name)                                              \
47 static ssize_t zram_attr_##name##_show(struct device *d,                \
48                                 struct device_attribute *attr, char *b) \
49 {                                                                       \
50         struct zram *zram = dev_to_zram(d);                             \
51         return scnprintf(b, PAGE_SIZE, "%llu\n",                        \
52                 (u64)atomic64_read(&zram->stats.name));                 \
53 }                                                                       \
54 static struct device_attribute dev_attr_##name =                        \
55         __ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
56
57 static inline int init_done(struct zram *zram)
58 {
59         return zram->meta != NULL;
60 }
61
62 static inline struct zram *dev_to_zram(struct device *dev)
63 {
64         return (struct zram *)dev_to_disk(dev)->private_data;
65 }
66
67 static ssize_t disksize_show(struct device *dev,
68                 struct device_attribute *attr, char *buf)
69 {
70         struct zram *zram = dev_to_zram(dev);
71
72         return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
73 }
74
75 static ssize_t initstate_show(struct device *dev,
76                 struct device_attribute *attr, char *buf)
77 {
78         u32 val;
79         struct zram *zram = dev_to_zram(dev);
80
81         down_read(&zram->init_lock);
82         val = init_done(zram);
83         up_read(&zram->init_lock);
84
85         return scnprintf(buf, PAGE_SIZE, "%u\n", val);
86 }
87
88 static ssize_t orig_data_size_show(struct device *dev,
89                 struct device_attribute *attr, char *buf)
90 {
91         struct zram *zram = dev_to_zram(dev);
92
93         return scnprintf(buf, PAGE_SIZE, "%llu\n",
94                 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
95 }
96
97 static ssize_t mem_used_total_show(struct device *dev,
98                 struct device_attribute *attr, char *buf)
99 {
100         u64 val = 0;
101         struct zram *zram = dev_to_zram(dev);
102         struct zram_meta *meta = zram->meta;
103
104         down_read(&zram->init_lock);
105         if (init_done(zram))
106                 val = zs_get_total_size_bytes(meta->mem_pool);
107         up_read(&zram->init_lock);
108
109         return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
110 }
111
112 static ssize_t max_comp_streams_show(struct device *dev,
113                 struct device_attribute *attr, char *buf)
114 {
115         int val;
116         struct zram *zram = dev_to_zram(dev);
117
118         down_read(&zram->init_lock);
119         val = zram->max_comp_streams;
120         up_read(&zram->init_lock);
121
122         return scnprintf(buf, PAGE_SIZE, "%d\n", val);
123 }
124
125 static ssize_t max_comp_streams_store(struct device *dev,
126                 struct device_attribute *attr, const char *buf, size_t len)
127 {
128         int num;
129         struct zram *zram = dev_to_zram(dev);
130         int ret;
131
132         ret = kstrtoint(buf, 0, &num);
133         if (ret < 0)
134                 return ret;
135         if (num < 1)
136                 return -EINVAL;
137
138         down_write(&zram->init_lock);
139         if (init_done(zram)) {
140                 if (!zcomp_set_max_streams(zram->comp, num)) {
141                         pr_info("Cannot change max compression streams\n");
142                         ret = -EINVAL;
143                         goto out;
144                 }
145         }
146
147         zram->max_comp_streams = num;
148         ret = len;
149 out:
150         up_write(&zram->init_lock);
151         return ret;
152 }
153
154 static ssize_t comp_algorithm_show(struct device *dev,
155                 struct device_attribute *attr, char *buf)
156 {
157         size_t sz;
158         struct zram *zram = dev_to_zram(dev);
159
160         down_read(&zram->init_lock);
161         sz = zcomp_available_show(zram->compressor, buf);
162         up_read(&zram->init_lock);
163
164         return sz;
165 }
166
167 static ssize_t comp_algorithm_store(struct device *dev,
168                 struct device_attribute *attr, const char *buf, size_t len)
169 {
170         struct zram *zram = dev_to_zram(dev);
171         down_write(&zram->init_lock);
172         if (init_done(zram)) {
173                 up_write(&zram->init_lock);
174                 pr_info("Can't change algorithm for initialized device\n");
175                 return -EBUSY;
176         }
177         strlcpy(zram->compressor, buf, sizeof(zram->compressor));
178         up_write(&zram->init_lock);
179         return len;
180 }
181
182 /* flag operations needs meta->tb_lock */
183 static int zram_test_flag(struct zram_meta *meta, u32 index,
184                         enum zram_pageflags flag)
185 {
186         return meta->table[index].flags & BIT(flag);
187 }
188
189 static void zram_set_flag(struct zram_meta *meta, u32 index,
190                         enum zram_pageflags flag)
191 {
192         meta->table[index].flags |= BIT(flag);
193 }
194
195 static void zram_clear_flag(struct zram_meta *meta, u32 index,
196                         enum zram_pageflags flag)
197 {
198         meta->table[index].flags &= ~BIT(flag);
199 }
200
201 static inline int is_partial_io(struct bio_vec *bvec)
202 {
203         return bvec->bv_len != PAGE_SIZE;
204 }
205
206 /*
207  * Check if request is within bounds and aligned on zram logical blocks.
208  */
209 static inline int valid_io_request(struct zram *zram, struct bio *bio)
210 {
211         u64 start, end, bound;
212
213         /* unaligned request */
214         if (unlikely(bio->bi_iter.bi_sector &
215                      (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
216                 return 0;
217         if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
218                 return 0;
219
220         start = bio->bi_iter.bi_sector;
221         end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
222         bound = zram->disksize >> SECTOR_SHIFT;
223         /* out of range range */
224         if (unlikely(start >= bound || end > bound || start > end))
225                 return 0;
226
227         /* I/O request is valid */
228         return 1;
229 }
230
231 static void zram_meta_free(struct zram_meta *meta)
232 {
233         zs_destroy_pool(meta->mem_pool);
234         vfree(meta->table);
235         kfree(meta);
236 }
237
238 static struct zram_meta *zram_meta_alloc(u64 disksize)
239 {
240         size_t num_pages;
241         struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
242         if (!meta)
243                 goto out;
244
245         num_pages = disksize >> PAGE_SHIFT;
246         meta->table = vzalloc(num_pages * sizeof(*meta->table));
247         if (!meta->table) {
248                 pr_err("Error allocating zram address table\n");
249                 goto free_meta;
250         }
251
252         meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
253         if (!meta->mem_pool) {
254                 pr_err("Error creating memory pool\n");
255                 goto free_table;
256         }
257
258         rwlock_init(&meta->tb_lock);
259         return meta;
260
261 free_table:
262         vfree(meta->table);
263 free_meta:
264         kfree(meta);
265         meta = NULL;
266 out:
267         return meta;
268 }
269
270 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
271 {
272         if (*offset + bvec->bv_len >= PAGE_SIZE)
273                 (*index)++;
274         *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
275 }
276
277 static int page_zero_filled(void *ptr)
278 {
279         unsigned int pos;
280         unsigned long *page;
281
282         page = (unsigned long *)ptr;
283
284         for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
285                 if (page[pos])
286                         return 0;
287         }
288
289         return 1;
290 }
291
292 static void handle_zero_page(struct bio_vec *bvec)
293 {
294         struct page *page = bvec->bv_page;
295         void *user_mem;
296
297         user_mem = kmap_atomic(page);
298         if (is_partial_io(bvec))
299                 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
300         else
301                 clear_page(user_mem);
302         kunmap_atomic(user_mem);
303
304         flush_dcache_page(page);
305 }
306
307 /* NOTE: caller should hold meta->tb_lock with write-side */
308 static void zram_free_page(struct zram *zram, size_t index)
309 {
310         struct zram_meta *meta = zram->meta;
311         unsigned long handle = meta->table[index].handle;
312
313         if (unlikely(!handle)) {
314                 /*
315                  * No memory is allocated for zero filled pages.
316                  * Simply clear zero page flag.
317                  */
318                 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
319                         zram_clear_flag(meta, index, ZRAM_ZERO);
320                         atomic64_dec(&zram->stats.zero_pages);
321                 }
322                 return;
323         }
324
325         zs_free(meta->mem_pool, handle);
326
327         atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size);
328         atomic64_dec(&zram->stats.pages_stored);
329
330         meta->table[index].handle = 0;
331         meta->table[index].size = 0;
332 }
333
334 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
335 {
336         int ret = 0;
337         unsigned char *cmem;
338         struct zram_meta *meta = zram->meta;
339         unsigned long handle;
340         u16 size;
341
342         read_lock(&meta->tb_lock);
343         handle = meta->table[index].handle;
344         size = meta->table[index].size;
345
346         if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
347                 read_unlock(&meta->tb_lock);
348                 clear_page(mem);
349                 return 0;
350         }
351
352         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
353         if (size == PAGE_SIZE)
354                 copy_page(mem, cmem);
355         else
356                 ret = zcomp_decompress(zram->comp, cmem, size, mem);
357         zs_unmap_object(meta->mem_pool, handle);
358         read_unlock(&meta->tb_lock);
359
360         /* Should NEVER happen. Return bio error if it does. */
361         if (unlikely(ret)) {
362                 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
363                 atomic64_inc(&zram->stats.failed_reads);
364                 return ret;
365         }
366
367         return 0;
368 }
369
370 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
371                           u32 index, int offset, struct bio *bio)
372 {
373         int ret;
374         struct page *page;
375         unsigned char *user_mem, *uncmem = NULL;
376         struct zram_meta *meta = zram->meta;
377         page = bvec->bv_page;
378
379         read_lock(&meta->tb_lock);
380         if (unlikely(!meta->table[index].handle) ||
381                         zram_test_flag(meta, index, ZRAM_ZERO)) {
382                 read_unlock(&meta->tb_lock);
383                 handle_zero_page(bvec);
384                 return 0;
385         }
386         read_unlock(&meta->tb_lock);
387
388         if (is_partial_io(bvec))
389                 /* Use  a temporary buffer to decompress the page */
390                 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
391
392         user_mem = kmap_atomic(page);
393         if (!is_partial_io(bvec))
394                 uncmem = user_mem;
395
396         if (!uncmem) {
397                 pr_info("Unable to allocate temp memory\n");
398                 ret = -ENOMEM;
399                 goto out_cleanup;
400         }
401
402         ret = zram_decompress_page(zram, uncmem, index);
403         /* Should NEVER happen. Return bio error if it does. */
404         if (unlikely(ret))
405                 goto out_cleanup;
406
407         if (is_partial_io(bvec))
408                 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
409                                 bvec->bv_len);
410
411         flush_dcache_page(page);
412         ret = 0;
413 out_cleanup:
414         kunmap_atomic(user_mem);
415         if (is_partial_io(bvec))
416                 kfree(uncmem);
417         return ret;
418 }
419
420 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
421                            int offset)
422 {
423         int ret = 0;
424         size_t clen;
425         unsigned long handle;
426         struct page *page;
427         unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
428         struct zram_meta *meta = zram->meta;
429         struct zcomp_strm *zstrm;
430         bool locked = false;
431
432         page = bvec->bv_page;
433         if (is_partial_io(bvec)) {
434                 /*
435                  * This is a partial IO. We need to read the full page
436                  * before to write the changes.
437                  */
438                 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
439                 if (!uncmem) {
440                         ret = -ENOMEM;
441                         goto out;
442                 }
443                 ret = zram_decompress_page(zram, uncmem, index);
444                 if (ret)
445                         goto out;
446         }
447
448         zstrm = zcomp_strm_find(zram->comp);
449         locked = true;
450         user_mem = kmap_atomic(page);
451
452         if (is_partial_io(bvec)) {
453                 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
454                        bvec->bv_len);
455                 kunmap_atomic(user_mem);
456                 user_mem = NULL;
457         } else {
458                 uncmem = user_mem;
459         }
460
461         if (page_zero_filled(uncmem)) {
462                 kunmap_atomic(user_mem);
463                 /* Free memory associated with this sector now. */
464                 write_lock(&zram->meta->tb_lock);
465                 zram_free_page(zram, index);
466                 zram_set_flag(meta, index, ZRAM_ZERO);
467                 write_unlock(&zram->meta->tb_lock);
468
469                 atomic64_inc(&zram->stats.zero_pages);
470                 ret = 0;
471                 goto out;
472         }
473
474         ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
475         if (!is_partial_io(bvec)) {
476                 kunmap_atomic(user_mem);
477                 user_mem = NULL;
478                 uncmem = NULL;
479         }
480
481         if (unlikely(ret)) {
482                 pr_err("Compression failed! err=%d\n", ret);
483                 goto out;
484         }
485         src = zstrm->buffer;
486         if (unlikely(clen > max_zpage_size)) {
487                 clen = PAGE_SIZE;
488                 if (is_partial_io(bvec))
489                         src = uncmem;
490         }
491
492         handle = zs_malloc(meta->mem_pool, clen);
493         if (!handle) {
494                 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
495                         index, clen);
496                 ret = -ENOMEM;
497                 goto out;
498         }
499         cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
500
501         if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
502                 src = kmap_atomic(page);
503                 copy_page(cmem, src);
504                 kunmap_atomic(src);
505         } else {
506                 memcpy(cmem, src, clen);
507         }
508
509         zcomp_strm_release(zram->comp, zstrm);
510         locked = false;
511         zs_unmap_object(meta->mem_pool, handle);
512
513         /*
514          * Free memory associated with this sector
515          * before overwriting unused sectors.
516          */
517         write_lock(&zram->meta->tb_lock);
518         zram_free_page(zram, index);
519
520         meta->table[index].handle = handle;
521         meta->table[index].size = clen;
522         write_unlock(&zram->meta->tb_lock);
523
524         /* Update stats */
525         atomic64_add(clen, &zram->stats.compr_data_size);
526         atomic64_inc(&zram->stats.pages_stored);
527 out:
528         if (locked)
529                 zcomp_strm_release(zram->comp, zstrm);
530         if (is_partial_io(bvec))
531                 kfree(uncmem);
532         if (ret)
533                 atomic64_inc(&zram->stats.failed_writes);
534         return ret;
535 }
536
537 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
538                         int offset, struct bio *bio)
539 {
540         int ret;
541         int rw = bio_data_dir(bio);
542
543         if (rw == READ) {
544                 atomic64_inc(&zram->stats.num_reads);
545                 ret = zram_bvec_read(zram, bvec, index, offset, bio);
546         } else {
547                 atomic64_inc(&zram->stats.num_writes);
548                 ret = zram_bvec_write(zram, bvec, index, offset);
549         }
550
551         return ret;
552 }
553
554 /*
555  * zram_bio_discard - handler on discard request
556  * @index: physical block index in PAGE_SIZE units
557  * @offset: byte offset within physical block
558  */
559 static void zram_bio_discard(struct zram *zram, u32 index,
560                              int offset, struct bio *bio)
561 {
562         size_t n = bio->bi_iter.bi_size;
563
564         /*
565          * zram manages data in physical block size units. Because logical block
566          * size isn't identical with physical block size on some arch, we
567          * could get a discard request pointing to a specific offset within a
568          * certain physical block.  Although we can handle this request by
569          * reading that physiclal block and decompressing and partially zeroing
570          * and re-compressing and then re-storing it, this isn't reasonable
571          * because our intent with a discard request is to save memory.  So
572          * skipping this logical block is appropriate here.
573          */
574         if (offset) {
575                 if (n <= (PAGE_SIZE - offset))
576                         return;
577
578                 n -= (PAGE_SIZE - offset);
579                 index++;
580         }
581
582         while (n >= PAGE_SIZE) {
583                 /*
584                  * Discard request can be large so the lock hold times could be
585                  * lengthy.  So take the lock once per page.
586                  */
587                 write_lock(&zram->meta->tb_lock);
588                 zram_free_page(zram, index);
589                 write_unlock(&zram->meta->tb_lock);
590                 index++;
591                 n -= PAGE_SIZE;
592         }
593 }
594
595 static void zram_reset_device(struct zram *zram, bool reset_capacity)
596 {
597         size_t index;
598         struct zram_meta *meta;
599
600         down_write(&zram->init_lock);
601         if (!init_done(zram)) {
602                 up_write(&zram->init_lock);
603                 return;
604         }
605
606         meta = zram->meta;
607         /* Free all pages that are still in this zram device */
608         for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
609                 unsigned long handle = meta->table[index].handle;
610                 if (!handle)
611                         continue;
612
613                 zs_free(meta->mem_pool, handle);
614         }
615
616         zcomp_destroy(zram->comp);
617         zram->max_comp_streams = 1;
618
619         zram_meta_free(zram->meta);
620         zram->meta = NULL;
621         /* Reset stats */
622         memset(&zram->stats, 0, sizeof(zram->stats));
623
624         zram->disksize = 0;
625         if (reset_capacity)
626                 set_capacity(zram->disk, 0);
627
628         up_write(&zram->init_lock);
629
630         /*
631          * Revalidate disk out of the init_lock to avoid lockdep splat.
632          * It's okay because disk's capacity is protected by init_lock
633          * so that revalidate_disk always sees up-to-date capacity.
634          */
635         if (reset_capacity)
636                 revalidate_disk(zram->disk);
637 }
638
639 static ssize_t disksize_store(struct device *dev,
640                 struct device_attribute *attr, const char *buf, size_t len)
641 {
642         u64 disksize;
643         struct zcomp *comp;
644         struct zram_meta *meta;
645         struct zram *zram = dev_to_zram(dev);
646         int err;
647
648         disksize = memparse(buf, NULL);
649         if (!disksize)
650                 return -EINVAL;
651
652         disksize = PAGE_ALIGN(disksize);
653         meta = zram_meta_alloc(disksize);
654         if (!meta)
655                 return -ENOMEM;
656
657         comp = zcomp_create(zram->compressor, zram->max_comp_streams);
658         if (IS_ERR(comp)) {
659                 pr_info("Cannot initialise %s compressing backend\n",
660                                 zram->compressor);
661                 err = PTR_ERR(comp);
662                 goto out_free_meta;
663         }
664
665         down_write(&zram->init_lock);
666         if (init_done(zram)) {
667                 pr_info("Cannot change disksize for initialized device\n");
668                 err = -EBUSY;
669                 goto out_destroy_comp;
670         }
671
672         zram->meta = meta;
673         zram->comp = comp;
674         zram->disksize = disksize;
675         set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
676         up_write(&zram->init_lock);
677
678         /*
679          * Revalidate disk out of the init_lock to avoid lockdep splat.
680          * It's okay because disk's capacity is protected by init_lock
681          * so that revalidate_disk always sees up-to-date capacity.
682          */
683         revalidate_disk(zram->disk);
684
685         return len;
686
687 out_destroy_comp:
688         up_write(&zram->init_lock);
689         zcomp_destroy(comp);
690 out_free_meta:
691         zram_meta_free(meta);
692         return err;
693 }
694
695 static ssize_t reset_store(struct device *dev,
696                 struct device_attribute *attr, const char *buf, size_t len)
697 {
698         int ret;
699         unsigned short do_reset;
700         struct zram *zram;
701         struct block_device *bdev;
702
703         zram = dev_to_zram(dev);
704         bdev = bdget_disk(zram->disk, 0);
705
706         if (!bdev)
707                 return -ENOMEM;
708
709         /* Do not reset an active device! */
710         if (bdev->bd_holders) {
711                 ret = -EBUSY;
712                 goto out;
713         }
714
715         ret = kstrtou16(buf, 10, &do_reset);
716         if (ret)
717                 goto out;
718
719         if (!do_reset) {
720                 ret = -EINVAL;
721                 goto out;
722         }
723
724         /* Make sure all pending I/O is finished */
725         fsync_bdev(bdev);
726         bdput(bdev);
727
728         zram_reset_device(zram, true);
729         return len;
730
731 out:
732         bdput(bdev);
733         return ret;
734 }
735
736 static void __zram_make_request(struct zram *zram, struct bio *bio)
737 {
738         int offset;
739         u32 index;
740         struct bio_vec bvec;
741         struct bvec_iter iter;
742
743         index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
744         offset = (bio->bi_iter.bi_sector &
745                   (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
746
747         if (unlikely(bio->bi_rw & REQ_DISCARD)) {
748                 zram_bio_discard(zram, index, offset, bio);
749                 bio_endio(bio, 0);
750                 return;
751         }
752
753         bio_for_each_segment(bvec, bio, iter) {
754                 int max_transfer_size = PAGE_SIZE - offset;
755
756                 if (bvec.bv_len > max_transfer_size) {
757                         /*
758                          * zram_bvec_rw() can only make operation on a single
759                          * zram page. Split the bio vector.
760                          */
761                         struct bio_vec bv;
762
763                         bv.bv_page = bvec.bv_page;
764                         bv.bv_len = max_transfer_size;
765                         bv.bv_offset = bvec.bv_offset;
766
767                         if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
768                                 goto out;
769
770                         bv.bv_len = bvec.bv_len - max_transfer_size;
771                         bv.bv_offset += max_transfer_size;
772                         if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
773                                 goto out;
774                 } else
775                         if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
776                                 goto out;
777
778                 update_position(&index, &offset, &bvec);
779         }
780
781         set_bit(BIO_UPTODATE, &bio->bi_flags);
782         bio_endio(bio, 0);
783         return;
784
785 out:
786         bio_io_error(bio);
787 }
788
789 /*
790  * Handler function for all zram I/O requests.
791  */
792 static void zram_make_request(struct request_queue *queue, struct bio *bio)
793 {
794         struct zram *zram = queue->queuedata;
795
796         down_read(&zram->init_lock);
797         if (unlikely(!init_done(zram)))
798                 goto error;
799
800         if (!valid_io_request(zram, bio)) {
801                 atomic64_inc(&zram->stats.invalid_io);
802                 goto error;
803         }
804
805         __zram_make_request(zram, bio);
806         up_read(&zram->init_lock);
807
808         return;
809
810 error:
811         up_read(&zram->init_lock);
812         bio_io_error(bio);
813 }
814
815 static void zram_slot_free_notify(struct block_device *bdev,
816                                 unsigned long index)
817 {
818         struct zram *zram;
819         struct zram_meta *meta;
820
821         zram = bdev->bd_disk->private_data;
822         meta = zram->meta;
823
824         write_lock(&meta->tb_lock);
825         zram_free_page(zram, index);
826         write_unlock(&meta->tb_lock);
827         atomic64_inc(&zram->stats.notify_free);
828 }
829
830 static const struct block_device_operations zram_devops = {
831         .swap_slot_free_notify = zram_slot_free_notify,
832         .owner = THIS_MODULE
833 };
834
835 static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
836                 disksize_show, disksize_store);
837 static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
838 static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
839 static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
840 static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
841 static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
842                 max_comp_streams_show, max_comp_streams_store);
843 static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
844                 comp_algorithm_show, comp_algorithm_store);
845
846 ZRAM_ATTR_RO(num_reads);
847 ZRAM_ATTR_RO(num_writes);
848 ZRAM_ATTR_RO(failed_reads);
849 ZRAM_ATTR_RO(failed_writes);
850 ZRAM_ATTR_RO(invalid_io);
851 ZRAM_ATTR_RO(notify_free);
852 ZRAM_ATTR_RO(zero_pages);
853 ZRAM_ATTR_RO(compr_data_size);
854
855 static struct attribute *zram_disk_attrs[] = {
856         &dev_attr_disksize.attr,
857         &dev_attr_initstate.attr,
858         &dev_attr_reset.attr,
859         &dev_attr_num_reads.attr,
860         &dev_attr_num_writes.attr,
861         &dev_attr_failed_reads.attr,
862         &dev_attr_failed_writes.attr,
863         &dev_attr_invalid_io.attr,
864         &dev_attr_notify_free.attr,
865         &dev_attr_zero_pages.attr,
866         &dev_attr_orig_data_size.attr,
867         &dev_attr_compr_data_size.attr,
868         &dev_attr_mem_used_total.attr,
869         &dev_attr_max_comp_streams.attr,
870         &dev_attr_comp_algorithm.attr,
871         NULL,
872 };
873
874 static struct attribute_group zram_disk_attr_group = {
875         .attrs = zram_disk_attrs,
876 };
877
878 static int create_device(struct zram *zram, int device_id)
879 {
880         int ret = -ENOMEM;
881
882         init_rwsem(&zram->init_lock);
883
884         zram->queue = blk_alloc_queue(GFP_KERNEL);
885         if (!zram->queue) {
886                 pr_err("Error allocating disk queue for device %d\n",
887                         device_id);
888                 goto out;
889         }
890
891         blk_queue_make_request(zram->queue, zram_make_request);
892         zram->queue->queuedata = zram;
893
894          /* gendisk structure */
895         zram->disk = alloc_disk(1);
896         if (!zram->disk) {
897                 pr_warn("Error allocating disk structure for device %d\n",
898                         device_id);
899                 goto out_free_queue;
900         }
901
902         zram->disk->major = zram_major;
903         zram->disk->first_minor = device_id;
904         zram->disk->fops = &zram_devops;
905         zram->disk->queue = zram->queue;
906         zram->disk->private_data = zram;
907         snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
908
909         /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
910         set_capacity(zram->disk, 0);
911         /* zram devices sort of resembles non-rotational disks */
912         queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
913         /*
914          * To ensure that we always get PAGE_SIZE aligned
915          * and n*PAGE_SIZED sized I/O requests.
916          */
917         blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
918         blk_queue_logical_block_size(zram->disk->queue,
919                                         ZRAM_LOGICAL_BLOCK_SIZE);
920         blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
921         blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
922         zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
923         zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
924         /*
925          * zram_bio_discard() will clear all logical blocks if logical block
926          * size is identical with physical block size(PAGE_SIZE). But if it is
927          * different, we will skip discarding some parts of logical blocks in
928          * the part of the request range which isn't aligned to physical block
929          * size.  So we can't ensure that all discarded logical blocks are
930          * zeroed.
931          */
932         if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
933                 zram->disk->queue->limits.discard_zeroes_data = 1;
934         else
935                 zram->disk->queue->limits.discard_zeroes_data = 0;
936         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
937
938         add_disk(zram->disk);
939
940         ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
941                                 &zram_disk_attr_group);
942         if (ret < 0) {
943                 pr_warn("Error creating sysfs group");
944                 goto out_free_disk;
945         }
946         strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
947         zram->meta = NULL;
948         zram->max_comp_streams = 1;
949         return 0;
950
951 out_free_disk:
952         del_gendisk(zram->disk);
953         put_disk(zram->disk);
954 out_free_queue:
955         blk_cleanup_queue(zram->queue);
956 out:
957         return ret;
958 }
959
960 static void destroy_device(struct zram *zram)
961 {
962         sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
963                         &zram_disk_attr_group);
964
965         del_gendisk(zram->disk);
966         put_disk(zram->disk);
967
968         blk_cleanup_queue(zram->queue);
969 }
970
971 static int __init zram_init(void)
972 {
973         int ret, dev_id;
974
975         if (num_devices > max_num_devices) {
976                 pr_warn("Invalid value for num_devices: %u\n",
977                                 num_devices);
978                 ret = -EINVAL;
979                 goto out;
980         }
981
982         zram_major = register_blkdev(0, "zram");
983         if (zram_major <= 0) {
984                 pr_warn("Unable to get major number\n");
985                 ret = -EBUSY;
986                 goto out;
987         }
988
989         /* Allocate the device array and initialize each one */
990         zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
991         if (!zram_devices) {
992                 ret = -ENOMEM;
993                 goto unregister;
994         }
995
996         for (dev_id = 0; dev_id < num_devices; dev_id++) {
997                 ret = create_device(&zram_devices[dev_id], dev_id);
998                 if (ret)
999                         goto free_devices;
1000         }
1001
1002         pr_info("Created %u device(s) ...\n", num_devices);
1003
1004         return 0;
1005
1006 free_devices:
1007         while (dev_id)
1008                 destroy_device(&zram_devices[--dev_id]);
1009         kfree(zram_devices);
1010 unregister:
1011         unregister_blkdev(zram_major, "zram");
1012 out:
1013         return ret;
1014 }
1015
1016 static void __exit zram_exit(void)
1017 {
1018         int i;
1019         struct zram *zram;
1020
1021         for (i = 0; i < num_devices; i++) {
1022                 zram = &zram_devices[i];
1023
1024                 destroy_device(zram);
1025                 /*
1026                  * Shouldn't access zram->disk after destroy_device
1027                  * because destroy_device already released zram->disk.
1028                  */
1029                 zram_reset_device(zram, false);
1030         }
1031
1032         unregister_blkdev(zram_major, "zram");
1033
1034         kfree(zram_devices);
1035         pr_debug("Cleanup done!\n");
1036 }
1037
1038 module_init(zram_init);
1039 module_exit(zram_exit);
1040
1041 module_param(num_devices, uint, 0);
1042 MODULE_PARM_DESC(num_devices, "Number of zram devices");
1043
1044 MODULE_LICENSE("Dual BSD/GPL");
1045 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1046 MODULE_DESCRIPTION("Compressed RAM Block Device");