dm cache: fix some issues with the new discard range support
[cascardo/linux.git] / drivers / md / dm-cache-target.c
1 /*
2  * Copyright (C) 2012 Red Hat. All rights reserved.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm.h"
8 #include "dm-bio-prison.h"
9 #include "dm-bio-record.h"
10 #include "dm-cache-metadata.h"
11
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/init.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #define DM_MSG_PREFIX "cache"
21
22 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
23         "A percentage of time allocated for copying to and/or from cache");
24
25 /*----------------------------------------------------------------*/
26
27 /*
28  * Glossary:
29  *
30  * oblock: index of an origin block
31  * cblock: index of a cache block
32  * promotion: movement of a block from origin to cache
33  * demotion: movement of a block from cache to origin
34  * migration: movement of a block between the origin and cache device,
35  *            either direction
36  */
37
38 /*----------------------------------------------------------------*/
39
40 static size_t bitset_size_in_bytes(unsigned nr_entries)
41 {
42         return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
43 }
44
45 static unsigned long *alloc_bitset(unsigned nr_entries)
46 {
47         size_t s = bitset_size_in_bytes(nr_entries);
48         return vzalloc(s);
49 }
50
51 static void clear_bitset(void *bitset, unsigned nr_entries)
52 {
53         size_t s = bitset_size_in_bytes(nr_entries);
54         memset(bitset, 0, s);
55 }
56
57 static void free_bitset(unsigned long *bits)
58 {
59         vfree(bits);
60 }
61
62 /*----------------------------------------------------------------*/
63
64 /*
65  * There are a couple of places where we let a bio run, but want to do some
66  * work before calling its endio function.  We do this by temporarily
67  * changing the endio fn.
68  */
69 struct dm_hook_info {
70         bio_end_io_t *bi_end_io;
71         void *bi_private;
72 };
73
74 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
75                         bio_end_io_t *bi_end_io, void *bi_private)
76 {
77         h->bi_end_io = bio->bi_end_io;
78         h->bi_private = bio->bi_private;
79
80         bio->bi_end_io = bi_end_io;
81         bio->bi_private = bi_private;
82 }
83
84 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
85 {
86         bio->bi_end_io = h->bi_end_io;
87         bio->bi_private = h->bi_private;
88
89         /*
90          * Must bump bi_remaining to allow bio to complete with
91          * restored bi_end_io.
92          */
93         atomic_inc(&bio->bi_remaining);
94 }
95
96 /*----------------------------------------------------------------*/
97
98 #define MIGRATION_POOL_SIZE 128
99 #define COMMIT_PERIOD HZ
100 #define MIGRATION_COUNT_WINDOW 10
101
102 /*
103  * The block size of the device holding cache data must be
104  * between 32KB and 1GB.
105  */
106 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
107 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
108
109 /*
110  * FIXME: the cache is read/write for the time being.
111  */
112 enum cache_metadata_mode {
113         CM_WRITE,               /* metadata may be changed */
114         CM_READ_ONLY,           /* metadata may not be changed */
115 };
116
117 enum cache_io_mode {
118         /*
119          * Data is written to cached blocks only.  These blocks are marked
120          * dirty.  If you lose the cache device you will lose data.
121          * Potential performance increase for both reads and writes.
122          */
123         CM_IO_WRITEBACK,
124
125         /*
126          * Data is written to both cache and origin.  Blocks are never
127          * dirty.  Potential performance benfit for reads only.
128          */
129         CM_IO_WRITETHROUGH,
130
131         /*
132          * A degraded mode useful for various cache coherency situations
133          * (eg, rolling back snapshots).  Reads and writes always go to the
134          * origin.  If a write goes to a cached oblock, then the cache
135          * block is invalidated.
136          */
137         CM_IO_PASSTHROUGH
138 };
139
140 struct cache_features {
141         enum cache_metadata_mode mode;
142         enum cache_io_mode io_mode;
143 };
144
145 struct cache_stats {
146         atomic_t read_hit;
147         atomic_t read_miss;
148         atomic_t write_hit;
149         atomic_t write_miss;
150         atomic_t demotion;
151         atomic_t promotion;
152         atomic_t copies_avoided;
153         atomic_t cache_cell_clash;
154         atomic_t commit_count;
155         atomic_t discard_count;
156 };
157
158 /*
159  * Defines a range of cblocks, begin to (end - 1) are in the range.  end is
160  * the one-past-the-end value.
161  */
162 struct cblock_range {
163         dm_cblock_t begin;
164         dm_cblock_t end;
165 };
166
167 struct invalidation_request {
168         struct list_head list;
169         struct cblock_range *cblocks;
170
171         atomic_t complete;
172         int err;
173
174         wait_queue_head_t result_wait;
175 };
176
177 struct cache {
178         struct dm_target *ti;
179         struct dm_target_callbacks callbacks;
180
181         struct dm_cache_metadata *cmd;
182
183         /*
184          * Metadata is written to this device.
185          */
186         struct dm_dev *metadata_dev;
187
188         /*
189          * The slower of the two data devices.  Typically a spindle.
190          */
191         struct dm_dev *origin_dev;
192
193         /*
194          * The faster of the two data devices.  Typically an SSD.
195          */
196         struct dm_dev *cache_dev;
197
198         /*
199          * Size of the origin device in _complete_ blocks and native sectors.
200          */
201         dm_oblock_t origin_blocks;
202         sector_t origin_sectors;
203
204         /*
205          * Size of the cache device in blocks.
206          */
207         dm_cblock_t cache_size;
208
209         /*
210          * Fields for converting from sectors to blocks.
211          */
212         uint32_t sectors_per_block;
213         int sectors_per_block_shift;
214
215         spinlock_t lock;
216         struct bio_list deferred_bios;
217         struct bio_list deferred_flush_bios;
218         struct bio_list deferred_writethrough_bios;
219         struct list_head quiesced_migrations;
220         struct list_head completed_migrations;
221         struct list_head need_commit_migrations;
222         sector_t migration_threshold;
223         wait_queue_head_t migration_wait;
224         atomic_t nr_migrations;
225
226         wait_queue_head_t quiescing_wait;
227         atomic_t quiescing;
228         atomic_t quiescing_ack;
229
230         /*
231          * cache_size entries, dirty if set
232          */
233         atomic_t nr_dirty;
234         unsigned long *dirty_bitset;
235
236         /*
237          * origin_blocks entries, discarded if set.
238          */
239         dm_dblock_t discard_nr_blocks;
240         unsigned long *discard_bitset;
241         uint32_t discard_block_size; /* a power of 2 times sectors per block */
242
243         /*
244          * Rather than reconstructing the table line for the status we just
245          * save it and regurgitate.
246          */
247         unsigned nr_ctr_args;
248         const char **ctr_args;
249
250         struct dm_kcopyd_client *copier;
251         struct workqueue_struct *wq;
252         struct work_struct worker;
253
254         struct delayed_work waker;
255         unsigned long last_commit_jiffies;
256
257         struct dm_bio_prison *prison;
258         struct dm_deferred_set *all_io_ds;
259
260         mempool_t *migration_pool;
261         struct dm_cache_migration *next_migration;
262
263         struct dm_cache_policy *policy;
264         unsigned policy_nr_args;
265
266         bool need_tick_bio:1;
267         bool sized:1;
268         bool invalidate:1;
269         bool commit_requested:1;
270         bool loaded_mappings:1;
271         bool loaded_discards:1;
272
273         /*
274          * Cache features such as write-through.
275          */
276         struct cache_features features;
277
278         struct cache_stats stats;
279
280         /*
281          * Invalidation fields.
282          */
283         spinlock_t invalidation_lock;
284         struct list_head invalidation_requests;
285 };
286
287 struct per_bio_data {
288         bool tick:1;
289         unsigned req_nr:2;
290         struct dm_deferred_entry *all_io_entry;
291         struct dm_hook_info hook_info;
292
293         /*
294          * writethrough fields.  These MUST remain at the end of this
295          * structure and the 'cache' member must be the first as it
296          * is used to determine the offset of the writethrough fields.
297          */
298         struct cache *cache;
299         dm_cblock_t cblock;
300         struct dm_bio_details bio_details;
301 };
302
303 struct dm_cache_migration {
304         struct list_head list;
305         struct cache *cache;
306
307         unsigned long start_jiffies;
308         dm_oblock_t old_oblock;
309         dm_oblock_t new_oblock;
310         dm_cblock_t cblock;
311
312         bool err:1;
313         bool discard:1;
314         bool writeback:1;
315         bool demote:1;
316         bool promote:1;
317         bool requeue_holder:1;
318         bool invalidate:1;
319
320         struct dm_bio_prison_cell *old_ocell;
321         struct dm_bio_prison_cell *new_ocell;
322 };
323
324 /*
325  * Processing a bio in the worker thread may require these memory
326  * allocations.  We prealloc to avoid deadlocks (the same worker thread
327  * frees them back to the mempool).
328  */
329 struct prealloc {
330         struct dm_cache_migration *mg;
331         struct dm_bio_prison_cell *cell1;
332         struct dm_bio_prison_cell *cell2;
333 };
334
335 static void wake_worker(struct cache *cache)
336 {
337         queue_work(cache->wq, &cache->worker);
338 }
339
340 /*----------------------------------------------------------------*/
341
342 static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
343 {
344         /* FIXME: change to use a local slab. */
345         return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
346 }
347
348 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
349 {
350         dm_bio_prison_free_cell(cache->prison, cell);
351 }
352
353 static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
354 {
355         if (!p->mg) {
356                 p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
357                 if (!p->mg)
358                         return -ENOMEM;
359         }
360
361         if (!p->cell1) {
362                 p->cell1 = alloc_prison_cell(cache);
363                 if (!p->cell1)
364                         return -ENOMEM;
365         }
366
367         if (!p->cell2) {
368                 p->cell2 = alloc_prison_cell(cache);
369                 if (!p->cell2)
370                         return -ENOMEM;
371         }
372
373         return 0;
374 }
375
376 static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
377 {
378         if (p->cell2)
379                 free_prison_cell(cache, p->cell2);
380
381         if (p->cell1)
382                 free_prison_cell(cache, p->cell1);
383
384         if (p->mg)
385                 mempool_free(p->mg, cache->migration_pool);
386 }
387
388 static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
389 {
390         struct dm_cache_migration *mg = p->mg;
391
392         BUG_ON(!mg);
393         p->mg = NULL;
394
395         return mg;
396 }
397
398 /*
399  * You must have a cell within the prealloc struct to return.  If not this
400  * function will BUG() rather than returning NULL.
401  */
402 static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
403 {
404         struct dm_bio_prison_cell *r = NULL;
405
406         if (p->cell1) {
407                 r = p->cell1;
408                 p->cell1 = NULL;
409
410         } else if (p->cell2) {
411                 r = p->cell2;
412                 p->cell2 = NULL;
413         } else
414                 BUG();
415
416         return r;
417 }
418
419 /*
420  * You can't have more than two cells in a prealloc struct.  BUG() will be
421  * called if you try and overfill.
422  */
423 static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
424 {
425         if (!p->cell2)
426                 p->cell2 = cell;
427
428         else if (!p->cell1)
429                 p->cell1 = cell;
430
431         else
432                 BUG();
433 }
434
435 /*----------------------------------------------------------------*/
436
437 static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key *key)
438 {
439         key->virtual = 0;
440         key->dev = 0;
441         key->block_begin = from_oblock(begin);
442         key->block_end = from_oblock(end);
443 }
444
445 /*
446  * The caller hands in a preallocated cell, and a free function for it.
447  * The cell will be freed if there's an error, or if it wasn't used because
448  * a cell with that key already exists.
449  */
450 typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
451
452 static int bio_detain_range(struct cache *cache, dm_oblock_t oblock_begin, dm_oblock_t oblock_end,
453                             struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
454                             cell_free_fn free_fn, void *free_context,
455                             struct dm_bio_prison_cell **cell_result)
456 {
457         int r;
458         struct dm_cell_key key;
459
460         build_key(oblock_begin, oblock_end, &key);
461         r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
462         if (r)
463                 free_fn(free_context, cell_prealloc);
464
465         return r;
466 }
467
468 static int bio_detain(struct cache *cache, dm_oblock_t oblock,
469                       struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
470                       cell_free_fn free_fn, void *free_context,
471                       struct dm_bio_prison_cell **cell_result)
472 {
473         dm_oblock_t end = to_oblock(from_oblock(oblock) + 1ULL);
474         return bio_detain_range(cache, oblock, end, bio,
475                                 cell_prealloc, free_fn, free_context, cell_result);
476 }
477
478 static int get_cell(struct cache *cache,
479                     dm_oblock_t oblock,
480                     struct prealloc *structs,
481                     struct dm_bio_prison_cell **cell_result)
482 {
483         int r;
484         struct dm_cell_key key;
485         struct dm_bio_prison_cell *cell_prealloc;
486
487         cell_prealloc = prealloc_get_cell(structs);
488
489         build_key(oblock, to_oblock(from_oblock(oblock) + 1ULL), &key);
490         r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
491         if (r)
492                 prealloc_put_cell(structs, cell_prealloc);
493
494         return r;
495 }
496
497 /*----------------------------------------------------------------*/
498
499 static bool is_dirty(struct cache *cache, dm_cblock_t b)
500 {
501         return test_bit(from_cblock(b), cache->dirty_bitset);
502 }
503
504 static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
505 {
506         if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
507                 atomic_inc(&cache->nr_dirty);
508                 policy_set_dirty(cache->policy, oblock);
509         }
510 }
511
512 static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
513 {
514         if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
515                 policy_clear_dirty(cache->policy, oblock);
516                 if (atomic_dec_return(&cache->nr_dirty) == 0)
517                         dm_table_event(cache->ti->table);
518         }
519 }
520
521 /*----------------------------------------------------------------*/
522
523 static bool block_size_is_power_of_two(struct cache *cache)
524 {
525         return cache->sectors_per_block_shift >= 0;
526 }
527
528 /* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
529 #if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
530 __always_inline
531 #endif
532 static dm_block_t block_div(dm_block_t b, uint32_t n)
533 {
534         do_div(b, n);
535
536         return b;
537 }
538
539 static dm_block_t oblocks_per_dblock(struct cache *cache)
540 {
541         dm_block_t oblocks = cache->discard_block_size;
542
543         if (block_size_is_power_of_two(cache))
544                 oblocks >>= cache->sectors_per_block_shift;
545         else
546                 oblocks = block_div(oblocks, cache->sectors_per_block);
547
548         return oblocks;
549 }
550
551 static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
552 {
553         return to_dblock(block_div(from_oblock(oblock),
554                                    oblocks_per_dblock(cache)));
555 }
556
557 static dm_oblock_t dblock_to_oblock(struct cache *cache, dm_dblock_t dblock)
558 {
559         return to_oblock(from_dblock(dblock) * oblocks_per_dblock(cache));
560 }
561
562 static void set_discard(struct cache *cache, dm_dblock_t b)
563 {
564         unsigned long flags;
565
566         BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks));
567         atomic_inc(&cache->stats.discard_count);
568
569         spin_lock_irqsave(&cache->lock, flags);
570         set_bit(from_dblock(b), cache->discard_bitset);
571         spin_unlock_irqrestore(&cache->lock, flags);
572 }
573
574 static void clear_discard(struct cache *cache, dm_dblock_t b)
575 {
576         unsigned long flags;
577
578         spin_lock_irqsave(&cache->lock, flags);
579         clear_bit(from_dblock(b), cache->discard_bitset);
580         spin_unlock_irqrestore(&cache->lock, flags);
581 }
582
583 static bool is_discarded(struct cache *cache, dm_dblock_t b)
584 {
585         int r;
586         unsigned long flags;
587
588         spin_lock_irqsave(&cache->lock, flags);
589         r = test_bit(from_dblock(b), cache->discard_bitset);
590         spin_unlock_irqrestore(&cache->lock, flags);
591
592         return r;
593 }
594
595 static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
596 {
597         int r;
598         unsigned long flags;
599
600         spin_lock_irqsave(&cache->lock, flags);
601         r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
602                      cache->discard_bitset);
603         spin_unlock_irqrestore(&cache->lock, flags);
604
605         return r;
606 }
607
608 /*----------------------------------------------------------------*/
609
610 static void load_stats(struct cache *cache)
611 {
612         struct dm_cache_statistics stats;
613
614         dm_cache_metadata_get_stats(cache->cmd, &stats);
615         atomic_set(&cache->stats.read_hit, stats.read_hits);
616         atomic_set(&cache->stats.read_miss, stats.read_misses);
617         atomic_set(&cache->stats.write_hit, stats.write_hits);
618         atomic_set(&cache->stats.write_miss, stats.write_misses);
619 }
620
621 static void save_stats(struct cache *cache)
622 {
623         struct dm_cache_statistics stats;
624
625         stats.read_hits = atomic_read(&cache->stats.read_hit);
626         stats.read_misses = atomic_read(&cache->stats.read_miss);
627         stats.write_hits = atomic_read(&cache->stats.write_hit);
628         stats.write_misses = atomic_read(&cache->stats.write_miss);
629
630         dm_cache_metadata_set_stats(cache->cmd, &stats);
631 }
632
633 /*----------------------------------------------------------------
634  * Per bio data
635  *--------------------------------------------------------------*/
636
637 /*
638  * If using writeback, leave out struct per_bio_data's writethrough fields.
639  */
640 #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
641 #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
642
643 static bool writethrough_mode(struct cache_features *f)
644 {
645         return f->io_mode == CM_IO_WRITETHROUGH;
646 }
647
648 static bool writeback_mode(struct cache_features *f)
649 {
650         return f->io_mode == CM_IO_WRITEBACK;
651 }
652
653 static bool passthrough_mode(struct cache_features *f)
654 {
655         return f->io_mode == CM_IO_PASSTHROUGH;
656 }
657
658 static size_t get_per_bio_data_size(struct cache *cache)
659 {
660         return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
661 }
662
663 static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
664 {
665         struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
666         BUG_ON(!pb);
667         return pb;
668 }
669
670 static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
671 {
672         struct per_bio_data *pb = get_per_bio_data(bio, data_size);
673
674         pb->tick = false;
675         pb->req_nr = dm_bio_get_target_bio_nr(bio);
676         pb->all_io_entry = NULL;
677
678         return pb;
679 }
680
681 /*----------------------------------------------------------------
682  * Remapping
683  *--------------------------------------------------------------*/
684 static void remap_to_origin(struct cache *cache, struct bio *bio)
685 {
686         bio->bi_bdev = cache->origin_dev->bdev;
687 }
688
689 static void remap_to_cache(struct cache *cache, struct bio *bio,
690                            dm_cblock_t cblock)
691 {
692         sector_t bi_sector = bio->bi_iter.bi_sector;
693         sector_t block = from_cblock(cblock);
694
695         bio->bi_bdev = cache->cache_dev->bdev;
696         if (!block_size_is_power_of_two(cache))
697                 bio->bi_iter.bi_sector =
698                         (block * cache->sectors_per_block) +
699                         sector_div(bi_sector, cache->sectors_per_block);
700         else
701                 bio->bi_iter.bi_sector =
702                         (block << cache->sectors_per_block_shift) |
703                         (bi_sector & (cache->sectors_per_block - 1));
704 }
705
706 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
707 {
708         unsigned long flags;
709         size_t pb_data_size = get_per_bio_data_size(cache);
710         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
711
712         spin_lock_irqsave(&cache->lock, flags);
713         if (cache->need_tick_bio &&
714             !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
715                 pb->tick = true;
716                 cache->need_tick_bio = false;
717         }
718         spin_unlock_irqrestore(&cache->lock, flags);
719 }
720
721 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
722                                   dm_oblock_t oblock)
723 {
724         check_if_tick_bio_needed(cache, bio);
725         remap_to_origin(cache, bio);
726         if (bio_data_dir(bio) == WRITE)
727                 clear_discard(cache, oblock_to_dblock(cache, oblock));
728 }
729
730 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
731                                  dm_oblock_t oblock, dm_cblock_t cblock)
732 {
733         check_if_tick_bio_needed(cache, bio);
734         remap_to_cache(cache, bio, cblock);
735         if (bio_data_dir(bio) == WRITE) {
736                 set_dirty(cache, oblock, cblock);
737                 clear_discard(cache, oblock_to_dblock(cache, oblock));
738         }
739 }
740
741 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
742 {
743         sector_t block_nr = bio->bi_iter.bi_sector;
744
745         if (!block_size_is_power_of_two(cache))
746                 (void) sector_div(block_nr, cache->sectors_per_block);
747         else
748                 block_nr >>= cache->sectors_per_block_shift;
749
750         return to_oblock(block_nr);
751 }
752
753 static int bio_triggers_commit(struct cache *cache, struct bio *bio)
754 {
755         return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
756 }
757
758 /*
759  * You must increment the deferred set whilst the prison cell is held.  To
760  * encourage this, we ask for 'cell' to be passed in.
761  */
762 static void inc_ds(struct cache *cache, struct bio *bio,
763                    struct dm_bio_prison_cell *cell)
764 {
765         size_t pb_data_size = get_per_bio_data_size(cache);
766         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
767
768         BUG_ON(!cell);
769         BUG_ON(pb->all_io_entry);
770
771         pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
772 }
773
774 static void issue(struct cache *cache, struct bio *bio)
775 {
776         unsigned long flags;
777
778         if (!bio_triggers_commit(cache, bio)) {
779                 generic_make_request(bio);
780                 return;
781         }
782
783         /*
784          * Batch together any bios that trigger commits and then issue a
785          * single commit for them in do_worker().
786          */
787         spin_lock_irqsave(&cache->lock, flags);
788         cache->commit_requested = true;
789         bio_list_add(&cache->deferred_flush_bios, bio);
790         spin_unlock_irqrestore(&cache->lock, flags);
791 }
792
793 static void inc_and_issue(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell *cell)
794 {
795         inc_ds(cache, bio, cell);
796         issue(cache, bio);
797 }
798
799 static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
800 {
801         unsigned long flags;
802
803         spin_lock_irqsave(&cache->lock, flags);
804         bio_list_add(&cache->deferred_writethrough_bios, bio);
805         spin_unlock_irqrestore(&cache->lock, flags);
806
807         wake_worker(cache);
808 }
809
810 static void writethrough_endio(struct bio *bio, int err)
811 {
812         struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
813
814         dm_unhook_bio(&pb->hook_info, bio);
815
816         if (err) {
817                 bio_endio(bio, err);
818                 return;
819         }
820
821         dm_bio_restore(&pb->bio_details, bio);
822         remap_to_cache(pb->cache, bio, pb->cblock);
823
824         /*
825          * We can't issue this bio directly, since we're in interrupt
826          * context.  So it gets put on a bio list for processing by the
827          * worker thread.
828          */
829         defer_writethrough_bio(pb->cache, bio);
830 }
831
832 /*
833  * When running in writethrough mode we need to send writes to clean blocks
834  * to both the cache and origin devices.  In future we'd like to clone the
835  * bio and send them in parallel, but for now we're doing them in
836  * series as this is easier.
837  */
838 static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
839                                        dm_oblock_t oblock, dm_cblock_t cblock)
840 {
841         struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
842
843         pb->cache = cache;
844         pb->cblock = cblock;
845         dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
846         dm_bio_record(&pb->bio_details, bio);
847
848         remap_to_origin_clear_discard(pb->cache, bio, oblock);
849 }
850
851 /*----------------------------------------------------------------
852  * Migration processing
853  *
854  * Migration covers moving data from the origin device to the cache, or
855  * vice versa.
856  *--------------------------------------------------------------*/
857 static void free_migration(struct dm_cache_migration *mg)
858 {
859         mempool_free(mg, mg->cache->migration_pool);
860 }
861
862 static void inc_nr_migrations(struct cache *cache)
863 {
864         atomic_inc(&cache->nr_migrations);
865 }
866
867 static void dec_nr_migrations(struct cache *cache)
868 {
869         atomic_dec(&cache->nr_migrations);
870
871         /*
872          * Wake the worker in case we're suspending the target.
873          */
874         wake_up(&cache->migration_wait);
875 }
876
877 static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
878                          bool holder)
879 {
880         (holder ? dm_cell_release : dm_cell_release_no_holder)
881                 (cache->prison, cell, &cache->deferred_bios);
882         free_prison_cell(cache, cell);
883 }
884
885 static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
886                        bool holder)
887 {
888         unsigned long flags;
889
890         spin_lock_irqsave(&cache->lock, flags);
891         __cell_defer(cache, cell, holder);
892         spin_unlock_irqrestore(&cache->lock, flags);
893
894         wake_worker(cache);
895 }
896
897 static void cleanup_migration(struct dm_cache_migration *mg)
898 {
899         struct cache *cache = mg->cache;
900         free_migration(mg);
901         dec_nr_migrations(cache);
902 }
903
904 static void migration_failure(struct dm_cache_migration *mg)
905 {
906         struct cache *cache = mg->cache;
907
908         if (mg->writeback) {
909                 DMWARN_LIMIT("writeback failed; couldn't copy block");
910                 set_dirty(cache, mg->old_oblock, mg->cblock);
911                 cell_defer(cache, mg->old_ocell, false);
912
913         } else if (mg->demote) {
914                 DMWARN_LIMIT("demotion failed; couldn't copy block");
915                 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
916
917                 cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
918                 if (mg->promote)
919                         cell_defer(cache, mg->new_ocell, true);
920         } else {
921                 DMWARN_LIMIT("promotion failed; couldn't copy block");
922                 policy_remove_mapping(cache->policy, mg->new_oblock);
923                 cell_defer(cache, mg->new_ocell, true);
924         }
925
926         cleanup_migration(mg);
927 }
928
929 static void migration_success_pre_commit(struct dm_cache_migration *mg)
930 {
931         unsigned long flags;
932         struct cache *cache = mg->cache;
933
934         if (mg->writeback) {
935                 clear_dirty(cache, mg->old_oblock, mg->cblock);
936                 cell_defer(cache, mg->old_ocell, false);
937                 cleanup_migration(mg);
938                 return;
939
940         } else if (mg->demote) {
941                 if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
942                         DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
943                         policy_force_mapping(cache->policy, mg->new_oblock,
944                                              mg->old_oblock);
945                         if (mg->promote)
946                                 cell_defer(cache, mg->new_ocell, true);
947                         cleanup_migration(mg);
948                         return;
949                 }
950         } else {
951                 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
952                         DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
953                         policy_remove_mapping(cache->policy, mg->new_oblock);
954                         cleanup_migration(mg);
955                         return;
956                 }
957         }
958
959         spin_lock_irqsave(&cache->lock, flags);
960         list_add_tail(&mg->list, &cache->need_commit_migrations);
961         cache->commit_requested = true;
962         spin_unlock_irqrestore(&cache->lock, flags);
963 }
964
965 static void migration_success_post_commit(struct dm_cache_migration *mg)
966 {
967         unsigned long flags;
968         struct cache *cache = mg->cache;
969
970         if (mg->writeback) {
971                 DMWARN("writeback unexpectedly triggered commit");
972                 return;
973
974         } else if (mg->demote) {
975                 cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
976
977                 if (mg->promote) {
978                         mg->demote = false;
979
980                         spin_lock_irqsave(&cache->lock, flags);
981                         list_add_tail(&mg->list, &cache->quiesced_migrations);
982                         spin_unlock_irqrestore(&cache->lock, flags);
983
984                 } else {
985                         if (mg->invalidate)
986                                 policy_remove_mapping(cache->policy, mg->old_oblock);
987                         cleanup_migration(mg);
988                 }
989
990         } else {
991                 clear_dirty(cache, mg->new_oblock, mg->cblock);
992                 if (mg->requeue_holder)
993                         cell_defer(cache, mg->new_ocell, true);
994                 else {
995                         bio_endio(mg->new_ocell->holder, 0);
996                         cell_defer(cache, mg->new_ocell, false);
997                 }
998                 cleanup_migration(mg);
999         }
1000 }
1001
1002 static void copy_complete(int read_err, unsigned long write_err, void *context)
1003 {
1004         unsigned long flags;
1005         struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
1006         struct cache *cache = mg->cache;
1007
1008         if (read_err || write_err)
1009                 mg->err = true;
1010
1011         spin_lock_irqsave(&cache->lock, flags);
1012         list_add_tail(&mg->list, &cache->completed_migrations);
1013         spin_unlock_irqrestore(&cache->lock, flags);
1014
1015         wake_worker(cache);
1016 }
1017
1018 static void issue_copy(struct dm_cache_migration *mg)
1019 {
1020         int r;
1021         struct dm_io_region o_region, c_region;
1022         struct cache *cache = mg->cache;
1023         sector_t cblock = from_cblock(mg->cblock);
1024
1025         o_region.bdev = cache->origin_dev->bdev;
1026         o_region.count = cache->sectors_per_block;
1027
1028         c_region.bdev = cache->cache_dev->bdev;
1029         c_region.sector = cblock * cache->sectors_per_block;
1030         c_region.count = cache->sectors_per_block;
1031
1032         if (mg->writeback || mg->demote) {
1033                 /* demote */
1034                 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
1035                 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
1036         } else {
1037                 /* promote */
1038                 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
1039                 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
1040         }
1041
1042         if (r < 0) {
1043                 DMERR_LIMIT("issuing migration failed");
1044                 migration_failure(mg);
1045         }
1046 }
1047
1048 static void overwrite_endio(struct bio *bio, int err)
1049 {
1050         struct dm_cache_migration *mg = bio->bi_private;
1051         struct cache *cache = mg->cache;
1052         size_t pb_data_size = get_per_bio_data_size(cache);
1053         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1054         unsigned long flags;
1055
1056         dm_unhook_bio(&pb->hook_info, bio);
1057
1058         if (err)
1059                 mg->err = true;
1060
1061         mg->requeue_holder = false;
1062
1063         spin_lock_irqsave(&cache->lock, flags);
1064         list_add_tail(&mg->list, &cache->completed_migrations);
1065         spin_unlock_irqrestore(&cache->lock, flags);
1066
1067         wake_worker(cache);
1068 }
1069
1070 static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
1071 {
1072         size_t pb_data_size = get_per_bio_data_size(mg->cache);
1073         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1074
1075         dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1076         remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
1077
1078         /*
1079          * No need to inc_ds() here, since the cell will be held for the
1080          * duration of the io.
1081          */
1082         generic_make_request(bio);
1083 }
1084
1085 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1086 {
1087         return (bio_data_dir(bio) == WRITE) &&
1088                 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1089 }
1090
1091 static void avoid_copy(struct dm_cache_migration *mg)
1092 {
1093         atomic_inc(&mg->cache->stats.copies_avoided);
1094         migration_success_pre_commit(mg);
1095 }
1096
1097 static void calc_discard_block_range(struct cache *cache, struct bio *bio,
1098                                      dm_dblock_t *b, dm_dblock_t *e)
1099 {
1100         sector_t sb = bio->bi_iter.bi_sector;
1101         sector_t se = bio_end_sector(bio);
1102
1103         *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size));
1104
1105         if (se - sb < cache->discard_block_size)
1106                 *e = *b;
1107         else
1108                 *e = to_dblock(block_div(se, cache->discard_block_size));
1109 }
1110
1111 static void issue_discard(struct dm_cache_migration *mg)
1112 {
1113         dm_dblock_t b, e;
1114         struct bio *bio = mg->new_ocell->holder;
1115
1116         calc_discard_block_range(mg->cache, bio, &b, &e);
1117         while (b != e) {
1118                 set_discard(mg->cache, b);
1119                 b = to_dblock(from_dblock(b) + 1);
1120         }
1121
1122         bio_endio(bio, 0);
1123         cell_defer(mg->cache, mg->new_ocell, false);
1124         free_migration(mg);
1125 }
1126
1127 static void issue_copy_or_discard(struct dm_cache_migration *mg)
1128 {
1129         bool avoid;
1130         struct cache *cache = mg->cache;
1131
1132         if (mg->discard) {
1133                 issue_discard(mg);
1134                 return;
1135         }
1136
1137         if (mg->writeback || mg->demote)
1138                 avoid = !is_dirty(cache, mg->cblock) ||
1139                         is_discarded_oblock(cache, mg->old_oblock);
1140         else {
1141                 struct bio *bio = mg->new_ocell->holder;
1142
1143                 avoid = is_discarded_oblock(cache, mg->new_oblock);
1144
1145                 if (!avoid && bio_writes_complete_block(cache, bio)) {
1146                         issue_overwrite(mg, bio);
1147                         return;
1148                 }
1149         }
1150
1151         avoid ? avoid_copy(mg) : issue_copy(mg);
1152 }
1153
1154 static void complete_migration(struct dm_cache_migration *mg)
1155 {
1156         if (mg->err)
1157                 migration_failure(mg);
1158         else
1159                 migration_success_pre_commit(mg);
1160 }
1161
1162 static void process_migrations(struct cache *cache, struct list_head *head,
1163                                void (*fn)(struct dm_cache_migration *))
1164 {
1165         unsigned long flags;
1166         struct list_head list;
1167         struct dm_cache_migration *mg, *tmp;
1168
1169         INIT_LIST_HEAD(&list);
1170         spin_lock_irqsave(&cache->lock, flags);
1171         list_splice_init(head, &list);
1172         spin_unlock_irqrestore(&cache->lock, flags);
1173
1174         list_for_each_entry_safe(mg, tmp, &list, list)
1175                 fn(mg);
1176 }
1177
1178 static void __queue_quiesced_migration(struct dm_cache_migration *mg)
1179 {
1180         list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
1181 }
1182
1183 static void queue_quiesced_migration(struct dm_cache_migration *mg)
1184 {
1185         unsigned long flags;
1186         struct cache *cache = mg->cache;
1187
1188         spin_lock_irqsave(&cache->lock, flags);
1189         __queue_quiesced_migration(mg);
1190         spin_unlock_irqrestore(&cache->lock, flags);
1191
1192         wake_worker(cache);
1193 }
1194
1195 static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
1196 {
1197         unsigned long flags;
1198         struct dm_cache_migration *mg, *tmp;
1199
1200         spin_lock_irqsave(&cache->lock, flags);
1201         list_for_each_entry_safe(mg, tmp, work, list)
1202                 __queue_quiesced_migration(mg);
1203         spin_unlock_irqrestore(&cache->lock, flags);
1204
1205         wake_worker(cache);
1206 }
1207
1208 static void check_for_quiesced_migrations(struct cache *cache,
1209                                           struct per_bio_data *pb)
1210 {
1211         struct list_head work;
1212
1213         if (!pb->all_io_entry)
1214                 return;
1215
1216         INIT_LIST_HEAD(&work);
1217         dm_deferred_entry_dec(pb->all_io_entry, &work);
1218
1219         if (!list_empty(&work))
1220                 queue_quiesced_migrations(cache, &work);
1221 }
1222
1223 static void quiesce_migration(struct dm_cache_migration *mg)
1224 {
1225         if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
1226                 queue_quiesced_migration(mg);
1227 }
1228
1229 static void promote(struct cache *cache, struct prealloc *structs,
1230                     dm_oblock_t oblock, dm_cblock_t cblock,
1231                     struct dm_bio_prison_cell *cell)
1232 {
1233         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1234
1235         mg->err = false;
1236         mg->discard = false;
1237         mg->writeback = false;
1238         mg->demote = false;
1239         mg->promote = true;
1240         mg->requeue_holder = true;
1241         mg->invalidate = false;
1242         mg->cache = cache;
1243         mg->new_oblock = oblock;
1244         mg->cblock = cblock;
1245         mg->old_ocell = NULL;
1246         mg->new_ocell = cell;
1247         mg->start_jiffies = jiffies;
1248
1249         inc_nr_migrations(cache);
1250         quiesce_migration(mg);
1251 }
1252
1253 static void writeback(struct cache *cache, struct prealloc *structs,
1254                       dm_oblock_t oblock, dm_cblock_t cblock,
1255                       struct dm_bio_prison_cell *cell)
1256 {
1257         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1258
1259         mg->err = false;
1260         mg->discard = false;
1261         mg->writeback = true;
1262         mg->demote = false;
1263         mg->promote = false;
1264         mg->requeue_holder = true;
1265         mg->invalidate = false;
1266         mg->cache = cache;
1267         mg->old_oblock = oblock;
1268         mg->cblock = cblock;
1269         mg->old_ocell = cell;
1270         mg->new_ocell = NULL;
1271         mg->start_jiffies = jiffies;
1272
1273         inc_nr_migrations(cache);
1274         quiesce_migration(mg);
1275 }
1276
1277 static void demote_then_promote(struct cache *cache, struct prealloc *structs,
1278                                 dm_oblock_t old_oblock, dm_oblock_t new_oblock,
1279                                 dm_cblock_t cblock,
1280                                 struct dm_bio_prison_cell *old_ocell,
1281                                 struct dm_bio_prison_cell *new_ocell)
1282 {
1283         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1284
1285         mg->err = false;
1286         mg->discard = false;
1287         mg->writeback = false;
1288         mg->demote = true;
1289         mg->promote = true;
1290         mg->requeue_holder = true;
1291         mg->invalidate = false;
1292         mg->cache = cache;
1293         mg->old_oblock = old_oblock;
1294         mg->new_oblock = new_oblock;
1295         mg->cblock = cblock;
1296         mg->old_ocell = old_ocell;
1297         mg->new_ocell = new_ocell;
1298         mg->start_jiffies = jiffies;
1299
1300         inc_nr_migrations(cache);
1301         quiesce_migration(mg);
1302 }
1303
1304 /*
1305  * Invalidate a cache entry.  No writeback occurs; any changes in the cache
1306  * block are thrown away.
1307  */
1308 static void invalidate(struct cache *cache, struct prealloc *structs,
1309                        dm_oblock_t oblock, dm_cblock_t cblock,
1310                        struct dm_bio_prison_cell *cell)
1311 {
1312         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1313
1314         mg->err = false;
1315         mg->discard = false;
1316         mg->writeback = false;
1317         mg->demote = true;
1318         mg->promote = false;
1319         mg->requeue_holder = true;
1320         mg->invalidate = true;
1321         mg->cache = cache;
1322         mg->old_oblock = oblock;
1323         mg->cblock = cblock;
1324         mg->old_ocell = cell;
1325         mg->new_ocell = NULL;
1326         mg->start_jiffies = jiffies;
1327
1328         inc_nr_migrations(cache);
1329         quiesce_migration(mg);
1330 }
1331
1332 static void discard(struct cache *cache, struct prealloc *structs,
1333                     struct dm_bio_prison_cell *cell)
1334 {
1335         struct dm_cache_migration *mg = prealloc_get_migration(structs);
1336
1337         mg->err = false;
1338         mg->discard = true;
1339         mg->writeback = false;
1340         mg->demote = false;
1341         mg->promote = false;
1342         mg->requeue_holder = false;
1343         mg->invalidate = false;
1344         mg->cache = cache;
1345         mg->old_ocell = NULL;
1346         mg->new_ocell = cell;
1347         mg->start_jiffies = jiffies;
1348
1349         quiesce_migration(mg);
1350 }
1351
1352 /*----------------------------------------------------------------
1353  * bio processing
1354  *--------------------------------------------------------------*/
1355 static void defer_bio(struct cache *cache, struct bio *bio)
1356 {
1357         unsigned long flags;
1358
1359         spin_lock_irqsave(&cache->lock, flags);
1360         bio_list_add(&cache->deferred_bios, bio);
1361         spin_unlock_irqrestore(&cache->lock, flags);
1362
1363         wake_worker(cache);
1364 }
1365
1366 static void process_flush_bio(struct cache *cache, struct bio *bio)
1367 {
1368         size_t pb_data_size = get_per_bio_data_size(cache);
1369         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1370
1371         BUG_ON(bio->bi_iter.bi_size);
1372         if (!pb->req_nr)
1373                 remap_to_origin(cache, bio);
1374         else
1375                 remap_to_cache(cache, bio, 0);
1376
1377         /*
1378          * REQ_FLUSH is not directed at any particular block so we don't
1379          * need to inc_ds().  REQ_FUA's are split into a write + REQ_FLUSH
1380          * by dm-core.
1381          */
1382         issue(cache, bio);
1383 }
1384
1385 static void process_discard_bio(struct cache *cache, struct prealloc *structs,
1386                                 struct bio *bio)
1387 {
1388         int r;
1389         dm_dblock_t b, e;
1390         struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
1391
1392         calc_discard_block_range(cache, bio, &b, &e);
1393         if (b == e) {
1394                 bio_endio(bio, 0);
1395                 return;
1396         }
1397
1398         cell_prealloc = prealloc_get_cell(structs);
1399         r = bio_detain_range(cache, dblock_to_oblock(cache, b), dblock_to_oblock(cache, e), bio, cell_prealloc,
1400                              (cell_free_fn) prealloc_put_cell,
1401                              structs, &new_ocell);
1402         if (r > 0)
1403                 return;
1404
1405         discard(cache, structs, new_ocell);
1406 }
1407
1408 static bool spare_migration_bandwidth(struct cache *cache)
1409 {
1410         sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
1411                 cache->sectors_per_block;
1412         return current_volume < cache->migration_threshold;
1413 }
1414
1415 static void inc_hit_counter(struct cache *cache, struct bio *bio)
1416 {
1417         atomic_inc(bio_data_dir(bio) == READ ?
1418                    &cache->stats.read_hit : &cache->stats.write_hit);
1419 }
1420
1421 static void inc_miss_counter(struct cache *cache, struct bio *bio)
1422 {
1423         atomic_inc(bio_data_dir(bio) == READ ?
1424                    &cache->stats.read_miss : &cache->stats.write_miss);
1425 }
1426
1427 static void process_bio(struct cache *cache, struct prealloc *structs,
1428                         struct bio *bio)
1429 {
1430         int r;
1431         bool release_cell = true;
1432         dm_oblock_t block = get_bio_block(cache, bio);
1433         struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1434         struct policy_result lookup_result;
1435         bool discarded_block = is_discarded_oblock(cache, block);
1436         bool passthrough = passthrough_mode(&cache->features);
1437         bool can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
1438
1439         /*
1440          * Check to see if that block is currently migrating.
1441          */
1442         cell_prealloc = prealloc_get_cell(structs);
1443         r = bio_detain(cache, block, bio, cell_prealloc,
1444                        (cell_free_fn) prealloc_put_cell,
1445                        structs, &new_ocell);
1446         if (r > 0)
1447                 return;
1448
1449         r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
1450                        bio, &lookup_result);
1451
1452         if (r == -EWOULDBLOCK)
1453                 /* migration has been denied */
1454                 lookup_result.op = POLICY_MISS;
1455
1456         switch (lookup_result.op) {
1457         case POLICY_HIT:
1458                 if (passthrough) {
1459                         inc_miss_counter(cache, bio);
1460
1461                         /*
1462                          * Passthrough always maps to the origin,
1463                          * invalidating any cache blocks that are written
1464                          * to.
1465                          */
1466
1467                         if (bio_data_dir(bio) == WRITE) {
1468                                 atomic_inc(&cache->stats.demotion);
1469                                 invalidate(cache, structs, block, lookup_result.cblock, new_ocell);
1470                                 release_cell = false;
1471
1472                         } else {
1473                                 /* FIXME: factor out issue_origin() */
1474                                 remap_to_origin_clear_discard(cache, bio, block);
1475                                 inc_and_issue(cache, bio, new_ocell);
1476                         }
1477                 } else {
1478                         inc_hit_counter(cache, bio);
1479
1480                         if (bio_data_dir(bio) == WRITE &&
1481                             writethrough_mode(&cache->features) &&
1482                             !is_dirty(cache, lookup_result.cblock)) {
1483                                 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
1484                                 inc_and_issue(cache, bio, new_ocell);
1485
1486                         } else  {
1487                                 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
1488                                 inc_and_issue(cache, bio, new_ocell);
1489                         }
1490                 }
1491
1492                 break;
1493
1494         case POLICY_MISS:
1495                 inc_miss_counter(cache, bio);
1496                 remap_to_origin_clear_discard(cache, bio, block);
1497                 inc_and_issue(cache, bio, new_ocell);
1498                 break;
1499
1500         case POLICY_NEW:
1501                 atomic_inc(&cache->stats.promotion);
1502                 promote(cache, structs, block, lookup_result.cblock, new_ocell);
1503                 release_cell = false;
1504                 break;
1505
1506         case POLICY_REPLACE:
1507                 cell_prealloc = prealloc_get_cell(structs);
1508                 r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
1509                                (cell_free_fn) prealloc_put_cell,
1510                                structs, &old_ocell);
1511                 if (r > 0) {
1512                         /*
1513                          * We have to be careful to avoid lock inversion of
1514                          * the cells.  So we back off, and wait for the
1515                          * old_ocell to become free.
1516                          */
1517                         policy_force_mapping(cache->policy, block,
1518                                              lookup_result.old_oblock);
1519                         atomic_inc(&cache->stats.cache_cell_clash);
1520                         break;
1521                 }
1522                 atomic_inc(&cache->stats.demotion);
1523                 atomic_inc(&cache->stats.promotion);
1524
1525                 demote_then_promote(cache, structs, lookup_result.old_oblock,
1526                                     block, lookup_result.cblock,
1527                                     old_ocell, new_ocell);
1528                 release_cell = false;
1529                 break;
1530
1531         default:
1532                 DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
1533                             (unsigned) lookup_result.op);
1534                 bio_io_error(bio);
1535         }
1536
1537         if (release_cell)
1538                 cell_defer(cache, new_ocell, false);
1539 }
1540
1541 static int need_commit_due_to_time(struct cache *cache)
1542 {
1543         return jiffies < cache->last_commit_jiffies ||
1544                jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
1545 }
1546
1547 static int commit_if_needed(struct cache *cache)
1548 {
1549         int r = 0;
1550
1551         if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
1552             dm_cache_changed_this_transaction(cache->cmd)) {
1553                 atomic_inc(&cache->stats.commit_count);
1554                 cache->commit_requested = false;
1555                 r = dm_cache_commit(cache->cmd, false);
1556                 cache->last_commit_jiffies = jiffies;
1557         }
1558
1559         return r;
1560 }
1561
1562 static void process_deferred_bios(struct cache *cache)
1563 {
1564         unsigned long flags;
1565         struct bio_list bios;
1566         struct bio *bio;
1567         struct prealloc structs;
1568
1569         memset(&structs, 0, sizeof(structs));
1570         bio_list_init(&bios);
1571
1572         spin_lock_irqsave(&cache->lock, flags);
1573         bio_list_merge(&bios, &cache->deferred_bios);
1574         bio_list_init(&cache->deferred_bios);
1575         spin_unlock_irqrestore(&cache->lock, flags);
1576
1577         while (!bio_list_empty(&bios)) {
1578                 /*
1579                  * If we've got no free migration structs, and processing
1580                  * this bio might require one, we pause until there are some
1581                  * prepared mappings to process.
1582                  */
1583                 if (prealloc_data_structs(cache, &structs)) {
1584                         spin_lock_irqsave(&cache->lock, flags);
1585                         bio_list_merge(&cache->deferred_bios, &bios);
1586                         spin_unlock_irqrestore(&cache->lock, flags);
1587                         break;
1588                 }
1589
1590                 bio = bio_list_pop(&bios);
1591
1592                 if (bio->bi_rw & REQ_FLUSH)
1593                         process_flush_bio(cache, bio);
1594                 else if (bio->bi_rw & REQ_DISCARD)
1595                         process_discard_bio(cache, &structs, bio);
1596                 else
1597                         process_bio(cache, &structs, bio);
1598         }
1599
1600         prealloc_free_structs(cache, &structs);
1601 }
1602
1603 static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
1604 {
1605         unsigned long flags;
1606         struct bio_list bios;
1607         struct bio *bio;
1608
1609         bio_list_init(&bios);
1610
1611         spin_lock_irqsave(&cache->lock, flags);
1612         bio_list_merge(&bios, &cache->deferred_flush_bios);
1613         bio_list_init(&cache->deferred_flush_bios);
1614         spin_unlock_irqrestore(&cache->lock, flags);
1615
1616         /*
1617          * These bios have already been through inc_ds()
1618          */
1619         while ((bio = bio_list_pop(&bios)))
1620                 submit_bios ? generic_make_request(bio) : bio_io_error(bio);
1621 }
1622
1623 static void process_deferred_writethrough_bios(struct cache *cache)
1624 {
1625         unsigned long flags;
1626         struct bio_list bios;
1627         struct bio *bio;
1628
1629         bio_list_init(&bios);
1630
1631         spin_lock_irqsave(&cache->lock, flags);
1632         bio_list_merge(&bios, &cache->deferred_writethrough_bios);
1633         bio_list_init(&cache->deferred_writethrough_bios);
1634         spin_unlock_irqrestore(&cache->lock, flags);
1635
1636         /*
1637          * These bios have already been through inc_ds()
1638          */
1639         while ((bio = bio_list_pop(&bios)))
1640                 generic_make_request(bio);
1641 }
1642
1643 static void writeback_some_dirty_blocks(struct cache *cache)
1644 {
1645         int r = 0;
1646         dm_oblock_t oblock;
1647         dm_cblock_t cblock;
1648         struct prealloc structs;
1649         struct dm_bio_prison_cell *old_ocell;
1650
1651         memset(&structs, 0, sizeof(structs));
1652
1653         while (spare_migration_bandwidth(cache)) {
1654                 if (prealloc_data_structs(cache, &structs))
1655                         break;
1656
1657                 r = policy_writeback_work(cache->policy, &oblock, &cblock);
1658                 if (r)
1659                         break;
1660
1661                 r = get_cell(cache, oblock, &structs, &old_ocell);
1662                 if (r) {
1663                         policy_set_dirty(cache->policy, oblock);
1664                         break;
1665                 }
1666
1667                 writeback(cache, &structs, oblock, cblock, old_ocell);
1668         }
1669
1670         prealloc_free_structs(cache, &structs);
1671 }
1672
1673 /*----------------------------------------------------------------
1674  * Invalidations.
1675  * Dropping something from the cache *without* writing back.
1676  *--------------------------------------------------------------*/
1677
1678 static void process_invalidation_request(struct cache *cache, struct invalidation_request *req)
1679 {
1680         int r = 0;
1681         uint64_t begin = from_cblock(req->cblocks->begin);
1682         uint64_t end = from_cblock(req->cblocks->end);
1683
1684         while (begin != end) {
1685                 r = policy_remove_cblock(cache->policy, to_cblock(begin));
1686                 if (!r) {
1687                         r = dm_cache_remove_mapping(cache->cmd, to_cblock(begin));
1688                         if (r)
1689                                 break;
1690
1691                 } else if (r == -ENODATA) {
1692                         /* harmless, already unmapped */
1693                         r = 0;
1694
1695                 } else {
1696                         DMERR("policy_remove_cblock failed");
1697                         break;
1698                 }
1699
1700                 begin++;
1701         }
1702
1703         cache->commit_requested = true;
1704
1705         req->err = r;
1706         atomic_set(&req->complete, 1);
1707
1708         wake_up(&req->result_wait);
1709 }
1710
1711 static void process_invalidation_requests(struct cache *cache)
1712 {
1713         struct list_head list;
1714         struct invalidation_request *req, *tmp;
1715
1716         INIT_LIST_HEAD(&list);
1717         spin_lock(&cache->invalidation_lock);
1718         list_splice_init(&cache->invalidation_requests, &list);
1719         spin_unlock(&cache->invalidation_lock);
1720
1721         list_for_each_entry_safe (req, tmp, &list, list)
1722                 process_invalidation_request(cache, req);
1723 }
1724
1725 /*----------------------------------------------------------------
1726  * Main worker loop
1727  *--------------------------------------------------------------*/
1728 static bool is_quiescing(struct cache *cache)
1729 {
1730         return atomic_read(&cache->quiescing);
1731 }
1732
1733 static void ack_quiescing(struct cache *cache)
1734 {
1735         if (is_quiescing(cache)) {
1736                 atomic_inc(&cache->quiescing_ack);
1737                 wake_up(&cache->quiescing_wait);
1738         }
1739 }
1740
1741 static void wait_for_quiescing_ack(struct cache *cache)
1742 {
1743         wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
1744 }
1745
1746 static void start_quiescing(struct cache *cache)
1747 {
1748         atomic_inc(&cache->quiescing);
1749         wait_for_quiescing_ack(cache);
1750 }
1751
1752 static void stop_quiescing(struct cache *cache)
1753 {
1754         atomic_set(&cache->quiescing, 0);
1755         atomic_set(&cache->quiescing_ack, 0);
1756 }
1757
1758 static void wait_for_migrations(struct cache *cache)
1759 {
1760         wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
1761 }
1762
1763 static void stop_worker(struct cache *cache)
1764 {
1765         cancel_delayed_work(&cache->waker);
1766         flush_workqueue(cache->wq);
1767 }
1768
1769 static void requeue_deferred_io(struct cache *cache)
1770 {
1771         struct bio *bio;
1772         struct bio_list bios;
1773
1774         bio_list_init(&bios);
1775         bio_list_merge(&bios, &cache->deferred_bios);
1776         bio_list_init(&cache->deferred_bios);
1777
1778         while ((bio = bio_list_pop(&bios)))
1779                 bio_endio(bio, DM_ENDIO_REQUEUE);
1780 }
1781
1782 static int more_work(struct cache *cache)
1783 {
1784         if (is_quiescing(cache))
1785                 return !list_empty(&cache->quiesced_migrations) ||
1786                         !list_empty(&cache->completed_migrations) ||
1787                         !list_empty(&cache->need_commit_migrations);
1788         else
1789                 return !bio_list_empty(&cache->deferred_bios) ||
1790                         !bio_list_empty(&cache->deferred_flush_bios) ||
1791                         !bio_list_empty(&cache->deferred_writethrough_bios) ||
1792                         !list_empty(&cache->quiesced_migrations) ||
1793                         !list_empty(&cache->completed_migrations) ||
1794                         !list_empty(&cache->need_commit_migrations) ||
1795                         cache->invalidate;
1796 }
1797
1798 static void do_worker(struct work_struct *ws)
1799 {
1800         struct cache *cache = container_of(ws, struct cache, worker);
1801
1802         do {
1803                 if (!is_quiescing(cache)) {
1804                         writeback_some_dirty_blocks(cache);
1805                         process_deferred_writethrough_bios(cache);
1806                         process_deferred_bios(cache);
1807                         process_invalidation_requests(cache);
1808                 }
1809
1810                 process_migrations(cache, &cache->quiesced_migrations, issue_copy_or_discard);
1811                 process_migrations(cache, &cache->completed_migrations, complete_migration);
1812
1813                 if (commit_if_needed(cache)) {
1814                         process_deferred_flush_bios(cache, false);
1815                         process_migrations(cache, &cache->need_commit_migrations, migration_failure);
1816
1817                         /*
1818                          * FIXME: rollback metadata or just go into a
1819                          * failure mode and error everything
1820                          */
1821                 } else {
1822                         process_deferred_flush_bios(cache, true);
1823                         process_migrations(cache, &cache->need_commit_migrations,
1824                                            migration_success_post_commit);
1825                 }
1826
1827                 ack_quiescing(cache);
1828
1829         } while (more_work(cache));
1830 }
1831
1832 /*
1833  * We want to commit periodically so that not too much
1834  * unwritten metadata builds up.
1835  */
1836 static void do_waker(struct work_struct *ws)
1837 {
1838         struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
1839         policy_tick(cache->policy);
1840         wake_worker(cache);
1841         queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1842 }
1843
1844 /*----------------------------------------------------------------*/
1845
1846 static int is_congested(struct dm_dev *dev, int bdi_bits)
1847 {
1848         struct request_queue *q = bdev_get_queue(dev->bdev);
1849         return bdi_congested(&q->backing_dev_info, bdi_bits);
1850 }
1851
1852 static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1853 {
1854         struct cache *cache = container_of(cb, struct cache, callbacks);
1855
1856         return is_congested(cache->origin_dev, bdi_bits) ||
1857                 is_congested(cache->cache_dev, bdi_bits);
1858 }
1859
1860 /*----------------------------------------------------------------
1861  * Target methods
1862  *--------------------------------------------------------------*/
1863
1864 /*
1865  * This function gets called on the error paths of the constructor, so we
1866  * have to cope with a partially initialised struct.
1867  */
1868 static void destroy(struct cache *cache)
1869 {
1870         unsigned i;
1871
1872         if (cache->next_migration)
1873                 mempool_free(cache->next_migration, cache->migration_pool);
1874
1875         if (cache->migration_pool)
1876                 mempool_destroy(cache->migration_pool);
1877
1878         if (cache->all_io_ds)
1879                 dm_deferred_set_destroy(cache->all_io_ds);
1880
1881         if (cache->prison)
1882                 dm_bio_prison_destroy(cache->prison);
1883
1884         if (cache->wq)
1885                 destroy_workqueue(cache->wq);
1886
1887         if (cache->dirty_bitset)
1888                 free_bitset(cache->dirty_bitset);
1889
1890         if (cache->discard_bitset)
1891                 free_bitset(cache->discard_bitset);
1892
1893         if (cache->copier)
1894                 dm_kcopyd_client_destroy(cache->copier);
1895
1896         if (cache->cmd)
1897                 dm_cache_metadata_close(cache->cmd);
1898
1899         if (cache->metadata_dev)
1900                 dm_put_device(cache->ti, cache->metadata_dev);
1901
1902         if (cache->origin_dev)
1903                 dm_put_device(cache->ti, cache->origin_dev);
1904
1905         if (cache->cache_dev)
1906                 dm_put_device(cache->ti, cache->cache_dev);
1907
1908         if (cache->policy)
1909                 dm_cache_policy_destroy(cache->policy);
1910
1911         for (i = 0; i < cache->nr_ctr_args ; i++)
1912                 kfree(cache->ctr_args[i]);
1913         kfree(cache->ctr_args);
1914
1915         kfree(cache);
1916 }
1917
1918 static void cache_dtr(struct dm_target *ti)
1919 {
1920         struct cache *cache = ti->private;
1921
1922         destroy(cache);
1923 }
1924
1925 static sector_t get_dev_size(struct dm_dev *dev)
1926 {
1927         return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1928 }
1929
1930 /*----------------------------------------------------------------*/
1931
1932 /*
1933  * Construct a cache device mapping.
1934  *
1935  * cache <metadata dev> <cache dev> <origin dev> <block size>
1936  *       <#feature args> [<feature arg>]*
1937  *       <policy> <#policy args> [<policy arg>]*
1938  *
1939  * metadata dev    : fast device holding the persistent metadata
1940  * cache dev       : fast device holding cached data blocks
1941  * origin dev      : slow device holding original data blocks
1942  * block size      : cache unit size in sectors
1943  *
1944  * #feature args   : number of feature arguments passed
1945  * feature args    : writethrough.  (The default is writeback.)
1946  *
1947  * policy          : the replacement policy to use
1948  * #policy args    : an even number of policy arguments corresponding
1949  *                   to key/value pairs passed to the policy
1950  * policy args     : key/value pairs passed to the policy
1951  *                   E.g. 'sequential_threshold 1024'
1952  *                   See cache-policies.txt for details.
1953  *
1954  * Optional feature arguments are:
1955  *   writethrough  : write through caching that prohibits cache block
1956  *                   content from being different from origin block content.
1957  *                   Without this argument, the default behaviour is to write
1958  *                   back cache block contents later for performance reasons,
1959  *                   so they may differ from the corresponding origin blocks.
1960  */
1961 struct cache_args {
1962         struct dm_target *ti;
1963
1964         struct dm_dev *metadata_dev;
1965
1966         struct dm_dev *cache_dev;
1967         sector_t cache_sectors;
1968
1969         struct dm_dev *origin_dev;
1970         sector_t origin_sectors;
1971
1972         uint32_t block_size;
1973
1974         const char *policy_name;
1975         int policy_argc;
1976         const char **policy_argv;
1977
1978         struct cache_features features;
1979 };
1980
1981 static void destroy_cache_args(struct cache_args *ca)
1982 {
1983         if (ca->metadata_dev)
1984                 dm_put_device(ca->ti, ca->metadata_dev);
1985
1986         if (ca->cache_dev)
1987                 dm_put_device(ca->ti, ca->cache_dev);
1988
1989         if (ca->origin_dev)
1990                 dm_put_device(ca->ti, ca->origin_dev);
1991
1992         kfree(ca);
1993 }
1994
1995 static bool at_least_one_arg(struct dm_arg_set *as, char **error)
1996 {
1997         if (!as->argc) {
1998                 *error = "Insufficient args";
1999                 return false;
2000         }
2001
2002         return true;
2003 }
2004
2005 static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
2006                               char **error)
2007 {
2008         int r;
2009         sector_t metadata_dev_size;
2010         char b[BDEVNAME_SIZE];
2011
2012         if (!at_least_one_arg(as, error))
2013                 return -EINVAL;
2014
2015         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2016                           &ca->metadata_dev);
2017         if (r) {
2018                 *error = "Error opening metadata device";
2019                 return r;
2020         }
2021
2022         metadata_dev_size = get_dev_size(ca->metadata_dev);
2023         if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
2024                 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2025                        bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
2026
2027         return 0;
2028 }
2029
2030 static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
2031                            char **error)
2032 {
2033         int r;
2034
2035         if (!at_least_one_arg(as, error))
2036                 return -EINVAL;
2037
2038         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2039                           &ca->cache_dev);
2040         if (r) {
2041                 *error = "Error opening cache device";
2042                 return r;
2043         }
2044         ca->cache_sectors = get_dev_size(ca->cache_dev);
2045
2046         return 0;
2047 }
2048
2049 static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
2050                             char **error)
2051 {
2052         int r;
2053
2054         if (!at_least_one_arg(as, error))
2055                 return -EINVAL;
2056
2057         r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
2058                           &ca->origin_dev);
2059         if (r) {
2060                 *error = "Error opening origin device";
2061                 return r;
2062         }
2063
2064         ca->origin_sectors = get_dev_size(ca->origin_dev);
2065         if (ca->ti->len > ca->origin_sectors) {
2066                 *error = "Device size larger than cached device";
2067                 return -EINVAL;
2068         }
2069
2070         return 0;
2071 }
2072
2073 static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
2074                             char **error)
2075 {
2076         unsigned long block_size;
2077
2078         if (!at_least_one_arg(as, error))
2079                 return -EINVAL;
2080
2081         if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
2082             block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2083             block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
2084             block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
2085                 *error = "Invalid data block size";
2086                 return -EINVAL;
2087         }
2088
2089         if (block_size > ca->cache_sectors) {
2090                 *error = "Data block size is larger than the cache device";
2091                 return -EINVAL;
2092         }
2093
2094         ca->block_size = block_size;
2095
2096         return 0;
2097 }
2098
2099 static void init_features(struct cache_features *cf)
2100 {
2101         cf->mode = CM_WRITE;
2102         cf->io_mode = CM_IO_WRITEBACK;
2103 }
2104
2105 static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2106                           char **error)
2107 {
2108         static struct dm_arg _args[] = {
2109                 {0, 1, "Invalid number of cache feature arguments"},
2110         };
2111
2112         int r;
2113         unsigned argc;
2114         const char *arg;
2115         struct cache_features *cf = &ca->features;
2116
2117         init_features(cf);
2118
2119         r = dm_read_arg_group(_args, as, &argc, error);
2120         if (r)
2121                 return -EINVAL;
2122
2123         while (argc--) {
2124                 arg = dm_shift_arg(as);
2125
2126                 if (!strcasecmp(arg, "writeback"))
2127                         cf->io_mode = CM_IO_WRITEBACK;
2128
2129                 else if (!strcasecmp(arg, "writethrough"))
2130                         cf->io_mode = CM_IO_WRITETHROUGH;
2131
2132                 else if (!strcasecmp(arg, "passthrough"))
2133                         cf->io_mode = CM_IO_PASSTHROUGH;
2134
2135                 else {
2136                         *error = "Unrecognised cache feature requested";
2137                         return -EINVAL;
2138                 }
2139         }
2140
2141         return 0;
2142 }
2143
2144 static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
2145                         char **error)
2146 {
2147         static struct dm_arg _args[] = {
2148                 {0, 1024, "Invalid number of policy arguments"},
2149         };
2150
2151         int r;
2152
2153         if (!at_least_one_arg(as, error))
2154                 return -EINVAL;
2155
2156         ca->policy_name = dm_shift_arg(as);
2157
2158         r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
2159         if (r)
2160                 return -EINVAL;
2161
2162         ca->policy_argv = (const char **)as->argv;
2163         dm_consume_args(as, ca->policy_argc);
2164
2165         return 0;
2166 }
2167
2168 static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
2169                             char **error)
2170 {
2171         int r;
2172         struct dm_arg_set as;
2173
2174         as.argc = argc;
2175         as.argv = argv;
2176
2177         r = parse_metadata_dev(ca, &as, error);
2178         if (r)
2179                 return r;
2180
2181         r = parse_cache_dev(ca, &as, error);
2182         if (r)
2183                 return r;
2184
2185         r = parse_origin_dev(ca, &as, error);
2186         if (r)
2187                 return r;
2188
2189         r = parse_block_size(ca, &as, error);
2190         if (r)
2191                 return r;
2192
2193         r = parse_features(ca, &as, error);
2194         if (r)
2195                 return r;
2196
2197         r = parse_policy(ca, &as, error);
2198         if (r)
2199                 return r;
2200
2201         return 0;
2202 }
2203
2204 /*----------------------------------------------------------------*/
2205
2206 static struct kmem_cache *migration_cache;
2207
2208 #define NOT_CORE_OPTION 1
2209
2210 static int process_config_option(struct cache *cache, const char *key, const char *value)
2211 {
2212         unsigned long tmp;
2213
2214         if (!strcasecmp(key, "migration_threshold")) {
2215                 if (kstrtoul(value, 10, &tmp))
2216                         return -EINVAL;
2217
2218                 cache->migration_threshold = tmp;
2219                 return 0;
2220         }
2221
2222         return NOT_CORE_OPTION;
2223 }
2224
2225 static int set_config_value(struct cache *cache, const char *key, const char *value)
2226 {
2227         int r = process_config_option(cache, key, value);
2228
2229         if (r == NOT_CORE_OPTION)
2230                 r = policy_set_config_value(cache->policy, key, value);
2231
2232         if (r)
2233                 DMWARN("bad config value for %s: %s", key, value);
2234
2235         return r;
2236 }
2237
2238 static int set_config_values(struct cache *cache, int argc, const char **argv)
2239 {
2240         int r = 0;
2241
2242         if (argc & 1) {
2243                 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
2244                 return -EINVAL;
2245         }
2246
2247         while (argc) {
2248                 r = set_config_value(cache, argv[0], argv[1]);
2249                 if (r)
2250                         break;
2251
2252                 argc -= 2;
2253                 argv += 2;
2254         }
2255
2256         return r;
2257 }
2258
2259 static int create_cache_policy(struct cache *cache, struct cache_args *ca,
2260                                char **error)
2261 {
2262         struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
2263                                                            cache->cache_size,
2264                                                            cache->origin_sectors,
2265                                                            cache->sectors_per_block);
2266         if (IS_ERR(p)) {
2267                 *error = "Error creating cache's policy";
2268                 return PTR_ERR(p);
2269         }
2270         cache->policy = p;
2271
2272         return 0;
2273 }
2274
2275 /*
2276  * We want the discard block size to be a power of two, at least the size
2277  * of the cache block size, and have no more than 2^14 discard blocks
2278  * across the origin.
2279  */
2280 #define MAX_DISCARD_BLOCKS (1 << 14)
2281
2282 static bool too_many_discard_blocks(sector_t discard_block_size,
2283                                     sector_t origin_size)
2284 {
2285         (void) sector_div(origin_size, discard_block_size);
2286
2287         return origin_size > MAX_DISCARD_BLOCKS;
2288 }
2289
2290 static sector_t calculate_discard_block_size(sector_t cache_block_size,
2291                                              sector_t origin_size)
2292 {
2293         sector_t discard_block_size;
2294
2295         discard_block_size = roundup_pow_of_two(cache_block_size);
2296
2297         if (origin_size)
2298                 while (too_many_discard_blocks(discard_block_size, origin_size))
2299                         discard_block_size *= 2;
2300
2301         return discard_block_size;
2302 }
2303
2304 static void set_cache_size(struct cache *cache, dm_cblock_t size)
2305 {
2306         dm_block_t nr_blocks = from_cblock(size);
2307
2308         if (nr_blocks > (1 << 20) && cache->cache_size != size)
2309                 DMWARN_LIMIT("You have created a cache device with a lot of individual cache blocks (%llu)\n"
2310                              "All these mappings can consume a lot of kernel memory, and take some time to read/write.\n"
2311                              "Please consider increasing the cache block size to reduce the overall cache block count.",
2312                              (unsigned long long) nr_blocks);
2313
2314         cache->cache_size = size;
2315 }
2316
2317 #define DEFAULT_MIGRATION_THRESHOLD 2048
2318
2319 static int cache_create(struct cache_args *ca, struct cache **result)
2320 {
2321         int r = 0;
2322         char **error = &ca->ti->error;
2323         struct cache *cache;
2324         struct dm_target *ti = ca->ti;
2325         dm_block_t origin_blocks;
2326         struct dm_cache_metadata *cmd;
2327         bool may_format = ca->features.mode == CM_WRITE;
2328
2329         cache = kzalloc(sizeof(*cache), GFP_KERNEL);
2330         if (!cache)
2331                 return -ENOMEM;
2332
2333         cache->ti = ca->ti;
2334         ti->private = cache;
2335         ti->num_flush_bios = 2;
2336         ti->flush_supported = true;
2337
2338         ti->num_discard_bios = 1;
2339         ti->discards_supported = true;
2340         ti->discard_zeroes_data_unsupported = true;
2341         ti->split_discard_bios = false;
2342
2343         cache->features = ca->features;
2344         ti->per_bio_data_size = get_per_bio_data_size(cache);
2345
2346         cache->callbacks.congested_fn = cache_is_congested;
2347         dm_table_add_target_callbacks(ti->table, &cache->callbacks);
2348
2349         cache->metadata_dev = ca->metadata_dev;
2350         cache->origin_dev = ca->origin_dev;
2351         cache->cache_dev = ca->cache_dev;
2352
2353         ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
2354
2355         /* FIXME: factor out this whole section */
2356         origin_blocks = cache->origin_sectors = ca->origin_sectors;
2357         origin_blocks = block_div(origin_blocks, ca->block_size);
2358         cache->origin_blocks = to_oblock(origin_blocks);
2359
2360         cache->sectors_per_block = ca->block_size;
2361         if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
2362                 r = -EINVAL;
2363                 goto bad;
2364         }
2365
2366         if (ca->block_size & (ca->block_size - 1)) {
2367                 dm_block_t cache_size = ca->cache_sectors;
2368
2369                 cache->sectors_per_block_shift = -1;
2370                 cache_size = block_div(cache_size, ca->block_size);
2371                 set_cache_size(cache, to_cblock(cache_size));
2372         } else {
2373                 cache->sectors_per_block_shift = __ffs(ca->block_size);
2374                 set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
2375         }
2376
2377         r = create_cache_policy(cache, ca, error);
2378         if (r)
2379                 goto bad;
2380
2381         cache->policy_nr_args = ca->policy_argc;
2382         cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2383
2384         r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2385         if (r) {
2386                 *error = "Error setting cache policy's config values";
2387                 goto bad;
2388         }
2389
2390         cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2391                                      ca->block_size, may_format,
2392                                      dm_cache_policy_get_hint_size(cache->policy));
2393         if (IS_ERR(cmd)) {
2394                 *error = "Error creating metadata object";
2395                 r = PTR_ERR(cmd);
2396                 goto bad;
2397         }
2398         cache->cmd = cmd;
2399
2400         if (passthrough_mode(&cache->features)) {
2401                 bool all_clean;
2402
2403                 r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
2404                 if (r) {
2405                         *error = "dm_cache_metadata_all_clean() failed";
2406                         goto bad;
2407                 }
2408
2409                 if (!all_clean) {
2410                         *error = "Cannot enter passthrough mode unless all blocks are clean";
2411                         r = -EINVAL;
2412                         goto bad;
2413                 }
2414         }
2415
2416         spin_lock_init(&cache->lock);
2417         bio_list_init(&cache->deferred_bios);
2418         bio_list_init(&cache->deferred_flush_bios);
2419         bio_list_init(&cache->deferred_writethrough_bios);
2420         INIT_LIST_HEAD(&cache->quiesced_migrations);
2421         INIT_LIST_HEAD(&cache->completed_migrations);
2422         INIT_LIST_HEAD(&cache->need_commit_migrations);
2423         atomic_set(&cache->nr_migrations, 0);
2424         init_waitqueue_head(&cache->migration_wait);
2425
2426         init_waitqueue_head(&cache->quiescing_wait);
2427         atomic_set(&cache->quiescing, 0);
2428         atomic_set(&cache->quiescing_ack, 0);
2429
2430         r = -ENOMEM;
2431         atomic_set(&cache->nr_dirty, 0);
2432         cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2433         if (!cache->dirty_bitset) {
2434                 *error = "could not allocate dirty bitset";
2435                 goto bad;
2436         }
2437         clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2438
2439         cache->discard_block_size =
2440                 calculate_discard_block_size(cache->sectors_per_block,
2441                                              cache->origin_sectors);
2442         cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors,
2443                                                               cache->discard_block_size));
2444         cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2445         if (!cache->discard_bitset) {
2446                 *error = "could not allocate discard bitset";
2447                 goto bad;
2448         }
2449         clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2450
2451         cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2452         if (IS_ERR(cache->copier)) {
2453                 *error = "could not create kcopyd client";
2454                 r = PTR_ERR(cache->copier);
2455                 goto bad;
2456         }
2457
2458         cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2459         if (!cache->wq) {
2460                 *error = "could not create workqueue for metadata object";
2461                 goto bad;
2462         }
2463         INIT_WORK(&cache->worker, do_worker);
2464         INIT_DELAYED_WORK(&cache->waker, do_waker);
2465         cache->last_commit_jiffies = jiffies;
2466
2467         cache->prison = dm_bio_prison_create();
2468         if (!cache->prison) {
2469                 *error = "could not create bio prison";
2470                 goto bad;
2471         }
2472
2473         cache->all_io_ds = dm_deferred_set_create();
2474         if (!cache->all_io_ds) {
2475                 *error = "could not create all_io deferred set";
2476                 goto bad;
2477         }
2478
2479         cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
2480                                                          migration_cache);
2481         if (!cache->migration_pool) {
2482                 *error = "Error creating cache's migration mempool";
2483                 goto bad;
2484         }
2485
2486         cache->next_migration = NULL;
2487
2488         cache->need_tick_bio = true;
2489         cache->sized = false;
2490         cache->invalidate = false;
2491         cache->commit_requested = false;
2492         cache->loaded_mappings = false;
2493         cache->loaded_discards = false;
2494
2495         load_stats(cache);
2496
2497         atomic_set(&cache->stats.demotion, 0);
2498         atomic_set(&cache->stats.promotion, 0);
2499         atomic_set(&cache->stats.copies_avoided, 0);
2500         atomic_set(&cache->stats.cache_cell_clash, 0);
2501         atomic_set(&cache->stats.commit_count, 0);
2502         atomic_set(&cache->stats.discard_count, 0);
2503
2504         spin_lock_init(&cache->invalidation_lock);
2505         INIT_LIST_HEAD(&cache->invalidation_requests);
2506
2507         *result = cache;
2508         return 0;
2509
2510 bad:
2511         destroy(cache);
2512         return r;
2513 }
2514
2515 static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2516 {
2517         unsigned i;
2518         const char **copy;
2519
2520         copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2521         if (!copy)
2522                 return -ENOMEM;
2523         for (i = 0; i < argc; i++) {
2524                 copy[i] = kstrdup(argv[i], GFP_KERNEL);
2525                 if (!copy[i]) {
2526                         while (i--)
2527                                 kfree(copy[i]);
2528                         kfree(copy);
2529                         return -ENOMEM;
2530                 }
2531         }
2532
2533         cache->nr_ctr_args = argc;
2534         cache->ctr_args = copy;
2535
2536         return 0;
2537 }
2538
2539 static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2540 {
2541         int r = -EINVAL;
2542         struct cache_args *ca;
2543         struct cache *cache = NULL;
2544
2545         ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2546         if (!ca) {
2547                 ti->error = "Error allocating memory for cache";
2548                 return -ENOMEM;
2549         }
2550         ca->ti = ti;
2551
2552         r = parse_cache_args(ca, argc, argv, &ti->error);
2553         if (r)
2554                 goto out;
2555
2556         r = cache_create(ca, &cache);
2557         if (r)
2558                 goto out;
2559
2560         r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2561         if (r) {
2562                 destroy(cache);
2563                 goto out;
2564         }
2565
2566         ti->private = cache;
2567
2568 out:
2569         destroy_cache_args(ca);
2570         return r;
2571 }
2572
2573 static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_prison_cell **cell)
2574 {
2575         int r;
2576         dm_oblock_t block = get_bio_block(cache, bio);
2577         size_t pb_data_size = get_per_bio_data_size(cache);
2578         bool can_migrate = false;
2579         bool discarded_block;
2580         struct policy_result lookup_result;
2581         struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
2582
2583         if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
2584                 /*
2585                  * This can only occur if the io goes to a partial block at
2586                  * the end of the origin device.  We don't cache these.
2587                  * Just remap to the origin and carry on.
2588                  */
2589                 remap_to_origin(cache, bio);
2590                 return DM_MAPIO_REMAPPED;
2591         }
2592
2593         if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
2594                 defer_bio(cache, bio);
2595                 return DM_MAPIO_SUBMITTED;
2596         }
2597
2598         /*
2599          * Check to see if that block is currently migrating.
2600          */
2601         *cell = alloc_prison_cell(cache);
2602         if (!*cell) {
2603                 defer_bio(cache, bio);
2604                 return DM_MAPIO_SUBMITTED;
2605         }
2606
2607         r = bio_detain(cache, block, bio, *cell,
2608                        (cell_free_fn) free_prison_cell,
2609                        cache, cell);
2610         if (r) {
2611                 if (r < 0)
2612                         defer_bio(cache, bio);
2613
2614                 return DM_MAPIO_SUBMITTED;
2615         }
2616
2617         discarded_block = is_discarded_oblock(cache, block);
2618
2619         r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
2620                        bio, &lookup_result);
2621         if (r == -EWOULDBLOCK) {
2622                 cell_defer(cache, *cell, true);
2623                 return DM_MAPIO_SUBMITTED;
2624
2625         } else if (r) {
2626                 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
2627                 cell_defer(cache, *cell, false);
2628                 bio_io_error(bio);
2629                 return DM_MAPIO_SUBMITTED;
2630         }
2631
2632         r = DM_MAPIO_REMAPPED;
2633         switch (lookup_result.op) {
2634         case POLICY_HIT:
2635                 if (passthrough_mode(&cache->features)) {
2636                         if (bio_data_dir(bio) == WRITE) {
2637                                 /*
2638                                  * We need to invalidate this block, so
2639                                  * defer for the worker thread.
2640                                  */
2641                                 cell_defer(cache, *cell, true);
2642                                 r = DM_MAPIO_SUBMITTED;
2643
2644                         } else {
2645                                 inc_miss_counter(cache, bio);
2646                                 remap_to_origin_clear_discard(cache, bio, block);
2647                         }
2648
2649                 } else {
2650                         inc_hit_counter(cache, bio);
2651                         if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
2652                             !is_dirty(cache, lookup_result.cblock))
2653                                 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
2654                         else
2655                                 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
2656                 }
2657                 break;
2658
2659         case POLICY_MISS:
2660                 inc_miss_counter(cache, bio);
2661                 if (pb->req_nr != 0) {
2662                         /*
2663                          * This is a duplicate writethrough io that is no
2664                          * longer needed because the block has been demoted.
2665                          */
2666                         bio_endio(bio, 0);
2667                         cell_defer(cache, *cell, false);
2668                         r = DM_MAPIO_SUBMITTED;
2669
2670                 } else
2671                         remap_to_origin_clear_discard(cache, bio, block);
2672
2673                 break;
2674
2675         default:
2676                 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
2677                             (unsigned) lookup_result.op);
2678                 cell_defer(cache, *cell, false);
2679                 bio_io_error(bio);
2680                 r = DM_MAPIO_SUBMITTED;
2681         }
2682
2683         return r;
2684 }
2685
2686 static int cache_map(struct dm_target *ti, struct bio *bio)
2687 {
2688         int r;
2689         struct dm_bio_prison_cell *cell;
2690         struct cache *cache = ti->private;
2691
2692         r = __cache_map(cache, bio, &cell);
2693         if (r == DM_MAPIO_REMAPPED) {
2694                 inc_ds(cache, bio, cell);
2695                 cell_defer(cache, cell, false);
2696         }
2697
2698         return r;
2699 }
2700
2701 static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2702 {
2703         struct cache *cache = ti->private;
2704         unsigned long flags;
2705         size_t pb_data_size = get_per_bio_data_size(cache);
2706         struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
2707
2708         if (pb->tick) {
2709                 policy_tick(cache->policy);
2710
2711                 spin_lock_irqsave(&cache->lock, flags);
2712                 cache->need_tick_bio = true;
2713                 spin_unlock_irqrestore(&cache->lock, flags);
2714         }
2715
2716         check_for_quiesced_migrations(cache, pb);
2717
2718         return 0;
2719 }
2720
2721 static int write_dirty_bitset(struct cache *cache)
2722 {
2723         unsigned i, r;
2724
2725         for (i = 0; i < from_cblock(cache->cache_size); i++) {
2726                 r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
2727                                        is_dirty(cache, to_cblock(i)));
2728                 if (r)
2729                         return r;
2730         }
2731
2732         return 0;
2733 }
2734
2735 static int write_discard_bitset(struct cache *cache)
2736 {
2737         unsigned i, r;
2738
2739         r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2740                                            cache->discard_nr_blocks);
2741         if (r) {
2742                 DMERR("could not resize on-disk discard bitset");
2743                 return r;
2744         }
2745
2746         for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2747                 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2748                                          is_discarded(cache, to_dblock(i)));
2749                 if (r)
2750                         return r;
2751         }
2752
2753         return 0;
2754 }
2755
2756 /*
2757  * returns true on success
2758  */
2759 static bool sync_metadata(struct cache *cache)
2760 {
2761         int r1, r2, r3, r4;
2762
2763         r1 = write_dirty_bitset(cache);
2764         if (r1)
2765                 DMERR("could not write dirty bitset");
2766
2767         r2 = write_discard_bitset(cache);
2768         if (r2)
2769                 DMERR("could not write discard bitset");
2770
2771         save_stats(cache);
2772
2773         r3 = dm_cache_write_hints(cache->cmd, cache->policy);
2774         if (r3)
2775                 DMERR("could not write hints");
2776
2777         /*
2778          * If writing the above metadata failed, we still commit, but don't
2779          * set the clean shutdown flag.  This will effectively force every
2780          * dirty bit to be set on reload.
2781          */
2782         r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
2783         if (r4)
2784                 DMERR("could not write cache metadata.  Data loss may occur.");
2785
2786         return !r1 && !r2 && !r3 && !r4;
2787 }
2788
2789 static void cache_postsuspend(struct dm_target *ti)
2790 {
2791         struct cache *cache = ti->private;
2792
2793         start_quiescing(cache);
2794         wait_for_migrations(cache);
2795         stop_worker(cache);
2796         requeue_deferred_io(cache);
2797         stop_quiescing(cache);
2798
2799         (void) sync_metadata(cache);
2800 }
2801
2802 static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2803                         bool dirty, uint32_t hint, bool hint_valid)
2804 {
2805         int r;
2806         struct cache *cache = context;
2807
2808         r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
2809         if (r)
2810                 return r;
2811
2812         if (dirty)
2813                 set_dirty(cache, oblock, cblock);
2814         else
2815                 clear_dirty(cache, oblock, cblock);
2816
2817         return 0;
2818 }
2819
2820 static int load_discard(void *context, sector_t discard_block_size,
2821                         dm_dblock_t dblock, bool discard)
2822 {
2823         struct cache *cache = context;
2824
2825         /* FIXME: handle mis-matched block size */
2826
2827         if (discard)
2828                 set_discard(cache, dblock);
2829         else
2830                 clear_discard(cache, dblock);
2831
2832         return 0;
2833 }
2834
2835 static dm_cblock_t get_cache_dev_size(struct cache *cache)
2836 {
2837         sector_t size = get_dev_size(cache->cache_dev);
2838         (void) sector_div(size, cache->sectors_per_block);
2839         return to_cblock(size);
2840 }
2841
2842 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
2843 {
2844         if (from_cblock(new_size) > from_cblock(cache->cache_size))
2845                 return true;
2846
2847         /*
2848          * We can't drop a dirty block when shrinking the cache.
2849          */
2850         while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
2851                 new_size = to_cblock(from_cblock(new_size) + 1);
2852                 if (is_dirty(cache, new_size)) {
2853                         DMERR("unable to shrink cache; cache block %llu is dirty",
2854                               (unsigned long long) from_cblock(new_size));
2855                         return false;
2856                 }
2857         }
2858
2859         return true;
2860 }
2861
2862 static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2863 {
2864         int r;
2865
2866         r = dm_cache_resize(cache->cmd, new_size);
2867         if (r) {
2868                 DMERR("could not resize cache metadata");
2869                 return r;
2870         }
2871
2872         set_cache_size(cache, new_size);
2873
2874         return 0;
2875 }
2876
2877 static int cache_preresume(struct dm_target *ti)
2878 {
2879         int r = 0;
2880         struct cache *cache = ti->private;
2881         dm_cblock_t csize = get_cache_dev_size(cache);
2882
2883         /*
2884          * Check to see if the cache has resized.
2885          */
2886         if (!cache->sized) {
2887                 r = resize_cache_dev(cache, csize);
2888                 if (r)
2889                         return r;
2890
2891                 cache->sized = true;
2892
2893         } else if (csize != cache->cache_size) {
2894                 if (!can_resize(cache, csize))
2895                         return -EINVAL;
2896
2897                 r = resize_cache_dev(cache, csize);
2898                 if (r)
2899                         return r;
2900         }
2901
2902         if (!cache->loaded_mappings) {
2903                 r = dm_cache_load_mappings(cache->cmd, cache->policy,
2904                                            load_mapping, cache);
2905                 if (r) {
2906                         DMERR("could not load cache mappings");
2907                         return r;
2908                 }
2909
2910                 cache->loaded_mappings = true;
2911         }
2912
2913         if (!cache->loaded_discards) {
2914                 r = dm_cache_load_discards(cache->cmd, load_discard, cache);
2915                 if (r) {
2916                         DMERR("could not load origin discards");
2917                         return r;
2918                 }
2919
2920                 cache->loaded_discards = true;
2921         }
2922
2923         return r;
2924 }
2925
2926 static void cache_resume(struct dm_target *ti)
2927 {
2928         struct cache *cache = ti->private;
2929
2930         cache->need_tick_bio = true;
2931         do_waker(&cache->waker.work);
2932 }
2933
2934 /*
2935  * Status format:
2936  *
2937  * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
2938  * <cache block size> <#used cache blocks>/<#total cache blocks>
2939  * <#read hits> <#read misses> <#write hits> <#write misses>
2940  * <#demotions> <#promotions> <#dirty>
2941  * <#features> <features>*
2942  * <#core args> <core args>
2943  * <policy name> <#policy args> <policy args>*
2944  */
2945 static void cache_status(struct dm_target *ti, status_type_t type,
2946                          unsigned status_flags, char *result, unsigned maxlen)
2947 {
2948         int r = 0;
2949         unsigned i;
2950         ssize_t sz = 0;
2951         dm_block_t nr_free_blocks_metadata = 0;
2952         dm_block_t nr_blocks_metadata = 0;
2953         char buf[BDEVNAME_SIZE];
2954         struct cache *cache = ti->private;
2955         dm_cblock_t residency;
2956
2957         switch (type) {
2958         case STATUSTYPE_INFO:
2959                 /* Commit to ensure statistics aren't out-of-date */
2960                 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
2961                         r = dm_cache_commit(cache->cmd, false);
2962                         if (r)
2963                                 DMERR("could not commit metadata for accurate status");
2964                 }
2965
2966                 r = dm_cache_get_free_metadata_block_count(cache->cmd,
2967                                                            &nr_free_blocks_metadata);
2968                 if (r) {
2969                         DMERR("could not get metadata free block count");
2970                         goto err;
2971                 }
2972
2973                 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
2974                 if (r) {
2975                         DMERR("could not get metadata device size");
2976                         goto err;
2977                 }
2978
2979                 residency = policy_residency(cache->policy);
2980
2981                 DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
2982                        (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
2983                        (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2984                        (unsigned long long)nr_blocks_metadata,
2985                        cache->sectors_per_block,
2986                        (unsigned long long) from_cblock(residency),
2987                        (unsigned long long) from_cblock(cache->cache_size),
2988                        (unsigned) atomic_read(&cache->stats.read_hit),
2989                        (unsigned) atomic_read(&cache->stats.read_miss),
2990                        (unsigned) atomic_read(&cache->stats.write_hit),
2991                        (unsigned) atomic_read(&cache->stats.write_miss),
2992                        (unsigned) atomic_read(&cache->stats.demotion),
2993                        (unsigned) atomic_read(&cache->stats.promotion),
2994                        (unsigned long) atomic_read(&cache->nr_dirty));
2995
2996                 if (writethrough_mode(&cache->features))
2997                         DMEMIT("1 writethrough ");
2998
2999                 else if (passthrough_mode(&cache->features))
3000                         DMEMIT("1 passthrough ");
3001
3002                 else if (writeback_mode(&cache->features))
3003                         DMEMIT("1 writeback ");
3004
3005                 else {
3006                         DMERR("internal error: unknown io mode: %d", (int) cache->features.io_mode);
3007                         goto err;
3008                 }
3009
3010                 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
3011
3012                 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy));
3013                 if (sz < maxlen) {
3014                         r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
3015                         if (r)
3016                                 DMERR("policy_emit_config_values returned %d", r);
3017                 }
3018
3019                 break;
3020
3021         case STATUSTYPE_TABLE:
3022                 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
3023                 DMEMIT("%s ", buf);
3024                 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
3025                 DMEMIT("%s ", buf);
3026                 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
3027                 DMEMIT("%s", buf);
3028
3029                 for (i = 0; i < cache->nr_ctr_args - 1; i++)
3030                         DMEMIT(" %s", cache->ctr_args[i]);
3031                 if (cache->nr_ctr_args)
3032                         DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
3033         }
3034
3035         return;
3036
3037 err:
3038         DMEMIT("Error");
3039 }
3040
3041 /*
3042  * A cache block range can take two forms:
3043  *
3044  * i) A single cblock, eg. '3456'
3045  * ii) A begin and end cblock with dots between, eg. 123-234
3046  */
3047 static int parse_cblock_range(struct cache *cache, const char *str,
3048                               struct cblock_range *result)
3049 {
3050         char dummy;
3051         uint64_t b, e;
3052         int r;
3053
3054         /*
3055          * Try and parse form (ii) first.
3056          */
3057         r = sscanf(str, "%llu-%llu%c", &b, &e, &dummy);
3058         if (r < 0)
3059                 return r;
3060
3061         if (r == 2) {
3062                 result->begin = to_cblock(b);
3063                 result->end = to_cblock(e);
3064                 return 0;
3065         }
3066
3067         /*
3068          * That didn't work, try form (i).
3069          */
3070         r = sscanf(str, "%llu%c", &b, &dummy);
3071         if (r < 0)
3072                 return r;
3073
3074         if (r == 1) {
3075                 result->begin = to_cblock(b);
3076                 result->end = to_cblock(from_cblock(result->begin) + 1u);
3077                 return 0;
3078         }
3079
3080         DMERR("invalid cblock range '%s'", str);
3081         return -EINVAL;
3082 }
3083
3084 static int validate_cblock_range(struct cache *cache, struct cblock_range *range)
3085 {
3086         uint64_t b = from_cblock(range->begin);
3087         uint64_t e = from_cblock(range->end);
3088         uint64_t n = from_cblock(cache->cache_size);
3089
3090         if (b >= n) {
3091                 DMERR("begin cblock out of range: %llu >= %llu", b, n);
3092                 return -EINVAL;
3093         }
3094
3095         if (e > n) {
3096                 DMERR("end cblock out of range: %llu > %llu", e, n);
3097                 return -EINVAL;
3098         }
3099
3100         if (b >= e) {
3101                 DMERR("invalid cblock range: %llu >= %llu", b, e);
3102                 return -EINVAL;
3103         }
3104
3105         return 0;
3106 }
3107
3108 static int request_invalidation(struct cache *cache, struct cblock_range *range)
3109 {
3110         struct invalidation_request req;
3111
3112         INIT_LIST_HEAD(&req.list);
3113         req.cblocks = range;
3114         atomic_set(&req.complete, 0);
3115         req.err = 0;
3116         init_waitqueue_head(&req.result_wait);
3117
3118         spin_lock(&cache->invalidation_lock);
3119         list_add(&req.list, &cache->invalidation_requests);
3120         spin_unlock(&cache->invalidation_lock);
3121         wake_worker(cache);
3122
3123         wait_event(req.result_wait, atomic_read(&req.complete));
3124         return req.err;
3125 }
3126
3127 static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
3128                                               const char **cblock_ranges)
3129 {
3130         int r = 0;
3131         unsigned i;
3132         struct cblock_range range;
3133
3134         if (!passthrough_mode(&cache->features)) {
3135                 DMERR("cache has to be in passthrough mode for invalidation");
3136                 return -EPERM;
3137         }
3138
3139         for (i = 0; i < count; i++) {
3140                 r = parse_cblock_range(cache, cblock_ranges[i], &range);
3141                 if (r)
3142                         break;
3143
3144                 r = validate_cblock_range(cache, &range);
3145                 if (r)
3146                         break;
3147
3148                 /*
3149                  * Pass begin and end origin blocks to the worker and wake it.
3150                  */
3151                 r = request_invalidation(cache, &range);
3152                 if (r)
3153                         break;
3154         }
3155
3156         return r;
3157 }
3158
3159 /*
3160  * Supports
3161  *      "<key> <value>"
3162  * and
3163  *     "invalidate_cblocks [(<begin>)|(<begin>-<end>)]*
3164  *
3165  * The key migration_threshold is supported by the cache target core.
3166  */
3167 static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
3168 {
3169         struct cache *cache = ti->private;
3170
3171         if (!argc)
3172                 return -EINVAL;
3173
3174         if (!strcasecmp(argv[0], "invalidate_cblocks"))
3175                 return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1);
3176
3177         if (argc != 2)
3178                 return -EINVAL;
3179
3180         return set_config_value(cache, argv[0], argv[1]);
3181 }
3182
3183 static int cache_iterate_devices(struct dm_target *ti,
3184                                  iterate_devices_callout_fn fn, void *data)
3185 {
3186         int r = 0;
3187         struct cache *cache = ti->private;
3188
3189         r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
3190         if (!r)
3191                 r = fn(ti, cache->origin_dev, 0, ti->len, data);
3192
3193         return r;
3194 }
3195
3196 /*
3197  * We assume I/O is going to the origin (which is the volume
3198  * more likely to have restrictions e.g. by being striped).
3199  * (Looking up the exact location of the data would be expensive
3200  * and could always be out of date by the time the bio is submitted.)
3201  */
3202 static int cache_bvec_merge(struct dm_target *ti,
3203                             struct bvec_merge_data *bvm,
3204                             struct bio_vec *biovec, int max_size)
3205 {
3206         struct cache *cache = ti->private;
3207         struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
3208
3209         if (!q->merge_bvec_fn)
3210                 return max_size;
3211
3212         bvm->bi_bdev = cache->origin_dev->bdev;
3213         return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3214 }
3215
3216 static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
3217 {
3218         /*
3219          * FIXME: these limits may be incompatible with the cache device
3220          */
3221         limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
3222                                             cache->origin_sectors);
3223         limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
3224 }
3225
3226 static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
3227 {
3228         struct cache *cache = ti->private;
3229         uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3230
3231         /*
3232          * If the system-determined stacked limits are compatible with the
3233          * cache's blocksize (io_opt is a factor) do not override them.
3234          */
3235         if (io_opt_sectors < cache->sectors_per_block ||
3236             do_div(io_opt_sectors, cache->sectors_per_block)) {
3237                 blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT);
3238                 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
3239         }
3240         set_discard_limits(cache, limits);
3241 }
3242
3243 /*----------------------------------------------------------------*/
3244
3245 static struct target_type cache_target = {
3246         .name = "cache",
3247         .version = {1, 6, 0},
3248         .module = THIS_MODULE,
3249         .ctr = cache_ctr,
3250         .dtr = cache_dtr,
3251         .map = cache_map,
3252         .end_io = cache_end_io,
3253         .postsuspend = cache_postsuspend,
3254         .preresume = cache_preresume,
3255         .resume = cache_resume,
3256         .status = cache_status,
3257         .message = cache_message,
3258         .iterate_devices = cache_iterate_devices,
3259         .merge = cache_bvec_merge,
3260         .io_hints = cache_io_hints,
3261 };
3262
3263 static int __init dm_cache_init(void)
3264 {
3265         int r;
3266
3267         r = dm_register_target(&cache_target);
3268         if (r) {
3269                 DMERR("cache target registration failed: %d", r);
3270                 return r;
3271         }
3272
3273         migration_cache = KMEM_CACHE(dm_cache_migration, 0);
3274         if (!migration_cache) {
3275                 dm_unregister_target(&cache_target);
3276                 return -ENOMEM;
3277         }
3278
3279         return 0;
3280 }
3281
3282 static void __exit dm_cache_exit(void)
3283 {
3284         dm_unregister_target(&cache_target);
3285         kmem_cache_destroy(migration_cache);
3286 }
3287
3288 module_init(dm_cache_init);
3289 module_exit(dm_cache_exit);
3290
3291 MODULE_DESCRIPTION(DM_NAME " cache target");
3292 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
3293 MODULE_LICENSE("GPL");