zram: remove good and bad compress stats
authorSergey Senozhatsky <sergey.senozhatsky@gmail.com>
Mon, 7 Apr 2014 22:38:02 +0000 (15:38 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Apr 2014 23:35:59 +0000 (16:35 -0700)
Remove `good' and `bad' compressed sub-requests stats.  RW request may
cause a number of RW sub-requests.  zram used to account `good' compressed
sub-queries (with compressed size less than 50% of original size), `bad'
compressed sub-queries (with compressed size greater that 75% of original
size), leaving sub-requests with compression size between 50% and 75% of
original size not accounted and not reported.  zram already accounts each
sub-request's compression size so we can calculate real device compression
ratio.

Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Acked-by: Jerome Marchand <jmarchan@redhat.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/block/zram/zram_drv.c
drivers/block/zram/zram_drv.h

index 3f0d6de..995304e 100644 (file)
@@ -293,7 +293,6 @@ static void zram_free_page(struct zram *zram, size_t index)
 {
        struct zram_meta *meta = zram->meta;
        unsigned long handle = meta->table[index].handle;
-       u16 size = meta->table[index].size;
 
        if (unlikely(!handle)) {
                /*
@@ -307,14 +306,8 @@ static void zram_free_page(struct zram *zram, size_t index)
                return;
        }
 
-       if (unlikely(size > max_zpage_size))
-               atomic_dec(&zram->stats.bad_compress);
-
        zs_free(meta->mem_pool, handle);
 
-       if (size <= PAGE_SIZE / 2)
-               atomic_dec(&zram->stats.good_compress);
-
        atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
        atomic_dec(&zram->stats.pages_stored);
 
@@ -478,7 +471,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        }
 
        if (unlikely(clen > max_zpage_size)) {
-               atomic_inc(&zram->stats.bad_compress);
                clen = PAGE_SIZE;
                src = NULL;
                if (is_partial_io(bvec))
@@ -518,9 +510,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
        /* Update stats */
        atomic64_add(clen, &zram->stats.compr_size);
        atomic_inc(&zram->stats.pages_stored);
-       if (clen <= PAGE_SIZE / 2)
-               atomic_inc(&zram->stats.good_compress);
-
 out:
        if (locked)
                mutex_unlock(&meta->buffer_lock);
index e81e9cd..2f173cb 100644 (file)
@@ -78,8 +78,6 @@ struct zram_stats {
        atomic64_t notify_free; /* no. of swap slot free notifications */
        atomic_t pages_zero;            /* no. of zero filled pages */
        atomic_t pages_stored;  /* no. of pages currently stored */
-       atomic_t good_compress; /* % of pages with compression ratio<=50% */
-       atomic_t bad_compress;  /* % of pages with compression ratio>=75% */
 };
 
 struct zram_meta {