MAINTAINERS: Add INTEL MERRIFIELD GPIO entry
[cascardo/linux.git] / drivers / lightnvm / gennvm.c
1 /*
2  * Copyright (C) 2015 Matias Bjorling <m@bjorling.me>
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License version
6  * 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; see the file COPYING.  If not, write to
15  * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
16  * USA.
17  *
18  * Implementation of a generic nvm manager for Open-Channel SSDs.
19  */
20
21 #include "gennvm.h"
22
23 static int gennvm_get_area(struct nvm_dev *dev, sector_t *lba, sector_t len)
24 {
25         struct gen_nvm *gn = dev->mp;
26         struct gennvm_area *area, *prev, *next;
27         sector_t begin = 0;
28         sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
29
30         if (len > max_sectors)
31                 return -EINVAL;
32
33         area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
34         if (!area)
35                 return -ENOMEM;
36
37         prev = NULL;
38
39         spin_lock(&dev->lock);
40         list_for_each_entry(next, &gn->area_list, list) {
41                 if (begin + len > next->begin) {
42                         begin = next->end;
43                         prev = next;
44                         continue;
45                 }
46                 break;
47         }
48
49         if ((begin + len) > max_sectors) {
50                 spin_unlock(&dev->lock);
51                 kfree(area);
52                 return -EINVAL;
53         }
54
55         area->begin = *lba = begin;
56         area->end = begin + len;
57
58         if (prev) /* insert into sorted order */
59                 list_add(&area->list, &prev->list);
60         else
61                 list_add(&area->list, &gn->area_list);
62         spin_unlock(&dev->lock);
63
64         return 0;
65 }
66
67 static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
68 {
69         struct gen_nvm *gn = dev->mp;
70         struct gennvm_area *area;
71
72         spin_lock(&dev->lock);
73         list_for_each_entry(area, &gn->area_list, list) {
74                 if (area->begin != begin)
75                         continue;
76
77                 list_del(&area->list);
78                 spin_unlock(&dev->lock);
79                 kfree(area);
80                 return;
81         }
82         spin_unlock(&dev->lock);
83 }
84
85 static void gennvm_blocks_free(struct nvm_dev *dev)
86 {
87         struct gen_nvm *gn = dev->mp;
88         struct gen_lun *lun;
89         int i;
90
91         gennvm_for_each_lun(gn, lun, i) {
92                 if (!lun->vlun.blocks)
93                         break;
94                 vfree(lun->vlun.blocks);
95         }
96 }
97
98 static void gennvm_luns_free(struct nvm_dev *dev)
99 {
100         struct gen_nvm *gn = dev->mp;
101
102         kfree(gn->luns);
103 }
104
105 static int gennvm_luns_init(struct nvm_dev *dev, struct gen_nvm *gn)
106 {
107         struct gen_lun *lun;
108         int i;
109
110         gn->luns = kcalloc(dev->nr_luns, sizeof(struct gen_lun), GFP_KERNEL);
111         if (!gn->luns)
112                 return -ENOMEM;
113
114         gennvm_for_each_lun(gn, lun, i) {
115                 spin_lock_init(&lun->vlun.lock);
116                 INIT_LIST_HEAD(&lun->free_list);
117                 INIT_LIST_HEAD(&lun->used_list);
118                 INIT_LIST_HEAD(&lun->bb_list);
119
120                 lun->reserved_blocks = 2; /* for GC only */
121                 lun->vlun.id = i;
122                 lun->vlun.lun_id = i % dev->luns_per_chnl;
123                 lun->vlun.chnl_id = i / dev->luns_per_chnl;
124                 lun->vlun.nr_free_blocks = dev->blks_per_lun;
125                 lun->vlun.nr_open_blocks = 0;
126                 lun->vlun.nr_closed_blocks = 0;
127                 lun->vlun.nr_bad_blocks = 0;
128         }
129         return 0;
130 }
131
132 static int gennvm_block_bb(struct gen_nvm *gn, struct ppa_addr ppa,
133                                                         u8 *blks, int nr_blks)
134 {
135         struct nvm_dev *dev = gn->dev;
136         struct gen_lun *lun;
137         struct nvm_block *blk;
138         int i;
139
140         nr_blks = nvm_bb_tbl_fold(dev, blks, nr_blks);
141         if (nr_blks < 0)
142                 return nr_blks;
143
144         lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
145
146         for (i = 0; i < nr_blks; i++) {
147                 if (blks[i] == 0)
148                         continue;
149
150                 blk = &lun->vlun.blocks[i];
151                 list_move_tail(&blk->list, &lun->bb_list);
152                 lun->vlun.nr_bad_blocks++;
153                 lun->vlun.nr_free_blocks--;
154         }
155
156         return 0;
157 }
158
159 static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
160 {
161         struct nvm_dev *dev = private;
162         struct gen_nvm *gn = dev->mp;
163         u64 elba = slba + nlb;
164         struct gen_lun *lun;
165         struct nvm_block *blk;
166         u64 i;
167         int lun_id;
168
169         if (unlikely(elba > dev->total_secs)) {
170                 pr_err("gennvm: L2P data from device is out of bounds!\n");
171                 return -EINVAL;
172         }
173
174         for (i = 0; i < nlb; i++) {
175                 u64 pba = le64_to_cpu(entries[i]);
176
177                 if (unlikely(pba >= dev->total_secs && pba != U64_MAX)) {
178                         pr_err("gennvm: L2P data entry is out of bounds!\n");
179                         return -EINVAL;
180                 }
181
182                 /* Address zero is a special one. The first page on a disk is
183                  * protected. It often holds internal device boot
184                  * information.
185                  */
186                 if (!pba)
187                         continue;
188
189                 /* resolve block from physical address */
190                 lun_id = div_u64(pba, dev->sec_per_lun);
191                 lun = &gn->luns[lun_id];
192
193                 /* Calculate block offset into lun */
194                 pba = pba - (dev->sec_per_lun * lun_id);
195                 blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
196
197                 if (!blk->state) {
198                         /* at this point, we don't know anything about the
199                          * block. It's up to the FTL on top to re-etablish the
200                          * block state. The block is assumed to be open.
201                          */
202                         list_move_tail(&blk->list, &lun->used_list);
203                         blk->state = NVM_BLK_ST_OPEN;
204                         lun->vlun.nr_free_blocks--;
205                         lun->vlun.nr_open_blocks++;
206                 }
207         }
208
209         return 0;
210 }
211
212 static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn)
213 {
214         struct gen_lun *lun;
215         struct nvm_block *block;
216         sector_t lun_iter, blk_iter, cur_block_id = 0;
217         int ret, nr_blks;
218         u8 *blks;
219
220         nr_blks = dev->blks_per_lun * dev->plane_mode;
221         blks = kmalloc(nr_blks, GFP_KERNEL);
222         if (!blks)
223                 return -ENOMEM;
224
225         gennvm_for_each_lun(gn, lun, lun_iter) {
226                 lun->vlun.blocks = vzalloc(sizeof(struct nvm_block) *
227                                                         dev->blks_per_lun);
228                 if (!lun->vlun.blocks) {
229                         kfree(blks);
230                         return -ENOMEM;
231                 }
232
233                 for (blk_iter = 0; blk_iter < dev->blks_per_lun; blk_iter++) {
234                         block = &lun->vlun.blocks[blk_iter];
235
236                         INIT_LIST_HEAD(&block->list);
237
238                         block->lun = &lun->vlun;
239                         block->id = cur_block_id++;
240
241                         /* First block is reserved for device */
242                         if (unlikely(lun_iter == 0 && blk_iter == 0)) {
243                                 lun->vlun.nr_free_blocks--;
244                                 continue;
245                         }
246
247                         list_add_tail(&block->list, &lun->free_list);
248                 }
249
250                 if (dev->ops->get_bb_tbl) {
251                         struct ppa_addr ppa;
252
253                         ppa.ppa = 0;
254                         ppa.g.ch = lun->vlun.chnl_id;
255                         ppa.g.lun = lun->vlun.lun_id;
256
257                         ret = nvm_get_bb_tbl(dev, ppa, blks);
258                         if (ret)
259                                 pr_err("gennvm: could not get BB table\n");
260
261                         ret = gennvm_block_bb(gn, ppa, blks, nr_blks);
262                         if (ret)
263                                 pr_err("gennvm: BB table map failed\n");
264                 }
265         }
266
267         if ((dev->identity.dom & NVM_RSP_L2P) && dev->ops->get_l2p_tbl) {
268                 ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs,
269                                                         gennvm_block_map, dev);
270                 if (ret) {
271                         pr_err("gennvm: could not read L2P table.\n");
272                         pr_warn("gennvm: default block initialization");
273                 }
274         }
275
276         kfree(blks);
277         return 0;
278 }
279
280 static void gennvm_free(struct nvm_dev *dev)
281 {
282         gennvm_blocks_free(dev);
283         gennvm_luns_free(dev);
284         kfree(dev->mp);
285         dev->mp = NULL;
286 }
287
288 static int gennvm_register(struct nvm_dev *dev)
289 {
290         struct gen_nvm *gn;
291         int ret;
292
293         if (!try_module_get(THIS_MODULE))
294                 return -ENODEV;
295
296         gn = kzalloc(sizeof(struct gen_nvm), GFP_KERNEL);
297         if (!gn)
298                 return -ENOMEM;
299
300         gn->dev = dev;
301         gn->nr_luns = dev->nr_luns;
302         INIT_LIST_HEAD(&gn->area_list);
303         dev->mp = gn;
304
305         ret = gennvm_luns_init(dev, gn);
306         if (ret) {
307                 pr_err("gennvm: could not initialize luns\n");
308                 goto err;
309         }
310
311         ret = gennvm_blocks_init(dev, gn);
312         if (ret) {
313                 pr_err("gennvm: could not initialize blocks\n");
314                 goto err;
315         }
316
317         return 1;
318 err:
319         gennvm_free(dev);
320         module_put(THIS_MODULE);
321         return ret;
322 }
323
324 static void gennvm_unregister(struct nvm_dev *dev)
325 {
326         gennvm_free(dev);
327         module_put(THIS_MODULE);
328 }
329
330 static struct nvm_block *gennvm_get_blk_unlocked(struct nvm_dev *dev,
331                                 struct nvm_lun *vlun, unsigned long flags)
332 {
333         struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
334         struct nvm_block *blk = NULL;
335         int is_gc = flags & NVM_IOTYPE_GC;
336
337         assert_spin_locked(&vlun->lock);
338
339         if (list_empty(&lun->free_list)) {
340                 pr_err_ratelimited("gennvm: lun %u have no free pages available",
341                                                                 lun->vlun.id);
342                 goto out;
343         }
344
345         if (!is_gc && lun->vlun.nr_free_blocks < lun->reserved_blocks)
346                 goto out;
347
348         blk = list_first_entry(&lun->free_list, struct nvm_block, list);
349         list_move_tail(&blk->list, &lun->used_list);
350         blk->state = NVM_BLK_ST_OPEN;
351
352         lun->vlun.nr_free_blocks--;
353         lun->vlun.nr_open_blocks++;
354
355 out:
356         return blk;
357 }
358
359 static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev,
360                                 struct nvm_lun *vlun, unsigned long flags)
361 {
362         struct nvm_block *blk;
363
364         spin_lock(&vlun->lock);
365         blk = gennvm_get_blk_unlocked(dev, vlun, flags);
366         spin_unlock(&vlun->lock);
367         return blk;
368 }
369
370 static void gennvm_put_blk_unlocked(struct nvm_dev *dev, struct nvm_block *blk)
371 {
372         struct nvm_lun *vlun = blk->lun;
373         struct gen_lun *lun = container_of(vlun, struct gen_lun, vlun);
374
375         assert_spin_locked(&vlun->lock);
376
377         if (blk->state & NVM_BLK_ST_OPEN) {
378                 list_move_tail(&blk->list, &lun->free_list);
379                 lun->vlun.nr_open_blocks--;
380                 lun->vlun.nr_free_blocks++;
381                 blk->state = NVM_BLK_ST_FREE;
382         } else if (blk->state & NVM_BLK_ST_CLOSED) {
383                 list_move_tail(&blk->list, &lun->free_list);
384                 lun->vlun.nr_closed_blocks--;
385                 lun->vlun.nr_free_blocks++;
386                 blk->state = NVM_BLK_ST_FREE;
387         } else if (blk->state & NVM_BLK_ST_BAD) {
388                 list_move_tail(&blk->list, &lun->bb_list);
389                 lun->vlun.nr_bad_blocks++;
390                 blk->state = NVM_BLK_ST_BAD;
391         } else {
392                 WARN_ON_ONCE(1);
393                 pr_err("gennvm: erroneous block type (%lu -> %u)\n",
394                                                         blk->id, blk->state);
395                 list_move_tail(&blk->list, &lun->bb_list);
396                 lun->vlun.nr_bad_blocks++;
397                 blk->state = NVM_BLK_ST_BAD;
398         }
399 }
400
401 static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk)
402 {
403         struct nvm_lun *vlun = blk->lun;
404
405         spin_lock(&vlun->lock);
406         gennvm_put_blk_unlocked(dev, blk);
407         spin_unlock(&vlun->lock);
408 }
409
410 static void gennvm_mark_blk(struct nvm_dev *dev, struct ppa_addr ppa, int type)
411 {
412         struct gen_nvm *gn = dev->mp;
413         struct gen_lun *lun;
414         struct nvm_block *blk;
415
416         pr_debug("gennvm: ppa  (ch: %u lun: %u blk: %u pg: %u) -> %u\n",
417                         ppa.g.ch, ppa.g.lun, ppa.g.blk, ppa.g.pg, type);
418
419         if (unlikely(ppa.g.ch > dev->nr_chnls ||
420                                         ppa.g.lun > dev->luns_per_chnl ||
421                                         ppa.g.blk > dev->blks_per_lun)) {
422                 WARN_ON_ONCE(1);
423                 pr_err("gennvm: ppa broken (ch: %u > %u lun: %u > %u blk: %u > %u",
424                                 ppa.g.ch, dev->nr_chnls,
425                                 ppa.g.lun, dev->luns_per_chnl,
426                                 ppa.g.blk, dev->blks_per_lun);
427                 return;
428         }
429
430         lun = &gn->luns[ppa.g.lun * ppa.g.ch];
431         blk = &lun->vlun.blocks[ppa.g.blk];
432
433         /* will be moved to bb list on put_blk from target */
434         blk->state = type;
435 }
436
437 /*
438  * mark block bad in gennvm. It is expected that the target recovers separately
439  */
440 static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd)
441 {
442         int bit = -1;
443         int max_secs = dev->ops->max_phys_sect;
444         void *comp_bits = &rqd->ppa_status;
445
446         nvm_addr_to_generic_mode(dev, rqd);
447
448         /* look up blocks and mark them as bad */
449         if (rqd->nr_ppas == 1) {
450                 gennvm_mark_blk(dev, rqd->ppa_addr, NVM_BLK_ST_BAD);
451                 return;
452         }
453
454         while ((bit = find_next_bit(comp_bits, max_secs, bit + 1)) < max_secs)
455                 gennvm_mark_blk(dev, rqd->ppa_list[bit], NVM_BLK_ST_BAD);
456 }
457
458 static void gennvm_end_io(struct nvm_rq *rqd)
459 {
460         struct nvm_tgt_instance *ins = rqd->ins;
461
462         if (rqd->error == NVM_RSP_ERR_FAILWRITE)
463                 gennvm_mark_blk_bad(rqd->dev, rqd);
464
465         ins->tt->end_io(rqd);
466 }
467
468 static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
469 {
470         if (!dev->ops->submit_io)
471                 return -ENODEV;
472
473         /* Convert address space */
474         nvm_generic_to_addr_mode(dev, rqd);
475
476         rqd->dev = dev;
477         rqd->end_io = gennvm_end_io;
478         return dev->ops->submit_io(dev, rqd);
479 }
480
481 static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
482                                                         unsigned long flags)
483 {
484         struct ppa_addr addr = block_to_ppa(dev, blk);
485
486         return nvm_erase_ppa(dev, &addr, 1);
487 }
488
489 static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
490 {
491         return test_and_set_bit(lunid, dev->lun_map);
492 }
493
494 static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
495 {
496         WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
497 }
498
499 static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
500 {
501         struct gen_nvm *gn = dev->mp;
502
503         if (unlikely(lunid >= dev->nr_luns))
504                 return NULL;
505
506         return &gn->luns[lunid].vlun;
507 }
508
509 static void gennvm_lun_info_print(struct nvm_dev *dev)
510 {
511         struct gen_nvm *gn = dev->mp;
512         struct gen_lun *lun;
513         unsigned int i;
514
515
516         gennvm_for_each_lun(gn, lun, i) {
517                 spin_lock(&lun->vlun.lock);
518
519                 pr_info("%s: lun%8u\t%u\t%u\t%u\t%u\n",
520                                 dev->name, i,
521                                 lun->vlun.nr_free_blocks,
522                                 lun->vlun.nr_open_blocks,
523                                 lun->vlun.nr_closed_blocks,
524                                 lun->vlun.nr_bad_blocks);
525
526                 spin_unlock(&lun->vlun.lock);
527         }
528 }
529
530 static struct nvmm_type gennvm = {
531         .name                   = "gennvm",
532         .version                = {0, 1, 0},
533
534         .register_mgr           = gennvm_register,
535         .unregister_mgr         = gennvm_unregister,
536
537         .get_blk_unlocked       = gennvm_get_blk_unlocked,
538         .put_blk_unlocked       = gennvm_put_blk_unlocked,
539
540         .get_blk                = gennvm_get_blk,
541         .put_blk                = gennvm_put_blk,
542
543         .submit_io              = gennvm_submit_io,
544         .erase_blk              = gennvm_erase_blk,
545
546         .mark_blk               = gennvm_mark_blk,
547
548         .get_lun                = gennvm_get_lun,
549         .reserve_lun            = gennvm_reserve_lun,
550         .release_lun            = gennvm_release_lun,
551         .lun_info_print         = gennvm_lun_info_print,
552
553         .get_area               = gennvm_get_area,
554         .put_area               = gennvm_put_area,
555
556 };
557
558 static int __init gennvm_module_init(void)
559 {
560         return nvm_register_mgr(&gennvm);
561 }
562
563 static void gennvm_module_exit(void)
564 {
565         nvm_unregister_mgr(&gennvm);
566 }
567
568 module_init(gennvm_module_init);
569 module_exit(gennvm_module_exit);
570 MODULE_LICENSE("GPL v2");
571 MODULE_DESCRIPTION("Generic media manager for Open-Channel SSDs");