2 * Persistent Memory Driver
4 * Copyright (c) 2014-2015, Intel Corporation.
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/badblocks.h>
26 #include <linux/memremap.h>
27 #include <linux/vmalloc.h>
28 #include <linux/pfn_t.h>
29 #include <linux/slab.h>
30 #include <linux/pmem.h>
36 struct request_queue *pmem_queue;
37 struct gendisk *pmem_disk;
39 /* One contiguous memory region per device */
40 phys_addr_t phys_addr;
41 /* when non-zero this device is hosting a 'pfn' instance */
42 phys_addr_t data_offset;
44 void __pmem *virt_addr;
45 /* immutable base size of the namespace */
47 /* trim size when namespace capacity has been section aligned */
52 static bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len)
58 return !!badblocks_check(bb, sector, len / 512, &first_bad,
65 static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
68 struct device *dev = disk_to_dev(pmem->pmem_disk);
72 sector = (offset - pmem->data_offset) / 512;
73 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
75 if (cleared > 0 && cleared / 512) {
76 dev_dbg(dev, "%s: %llx clear %ld sector%s\n",
77 __func__, (unsigned long long) sector,
78 cleared / 512, cleared / 512 > 1 ? "s" : "");
79 badblocks_clear(&pmem->bb, sector, cleared / 512);
81 invalidate_pmem(pmem->virt_addr + offset, len);
84 static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
85 unsigned int len, unsigned int off, int rw,
89 bool bad_pmem = false;
90 void *mem = kmap_atomic(page);
91 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
92 void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
94 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
98 if (unlikely(bad_pmem))
101 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
102 flush_dcache_page(page);
106 * Note that we write the data both before and after
107 * clearing poison. The write before clear poison
108 * handles situations where the latest written data is
109 * preserved and the clear poison operation simply marks
110 * the address range as valid without changing the data.
111 * In this case application software can assume that an
112 * interrupted write will either return the new good
115 * However, if pmem_clear_poison() leaves the data in an
116 * indeterminate state we need to perform the write
117 * after clear poison.
119 flush_dcache_page(page);
120 memcpy_to_pmem(pmem_addr, mem + off, len);
121 if (unlikely(bad_pmem)) {
122 pmem_clear_poison(pmem, pmem_off, len);
123 memcpy_to_pmem(pmem_addr, mem + off, len);
131 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
137 struct bvec_iter iter;
138 struct pmem_device *pmem = q->queuedata;
140 do_acct = nd_iostat_start(bio, &start);
141 bio_for_each_segment(bvec, bio, iter) {
142 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
143 bvec.bv_offset, bio_data_dir(bio),
151 nd_iostat_end(bio, start);
153 if (bio_data_dir(bio))
157 return BLK_QC_T_NONE;
160 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
161 struct page *page, int rw)
163 struct pmem_device *pmem = bdev->bd_queue->queuedata;
166 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
171 * The ->rw_page interface is subtle and tricky. The core
172 * retries on any error, so we can only invoke page_endio() in
173 * the successful completion case. Otherwise, we'll see crashes
174 * caused by double completion.
177 page_endio(page, rw & WRITE, 0);
182 static long pmem_direct_access(struct block_device *bdev, sector_t sector,
183 void __pmem **kaddr, pfn_t *pfn)
185 struct pmem_device *pmem = bdev->bd_queue->queuedata;
186 resource_size_t offset = sector * 512 + pmem->data_offset;
188 *kaddr = pmem->virt_addr + offset;
189 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
191 return pmem->size - pmem->pfn_pad - offset;
194 static const struct block_device_operations pmem_fops = {
195 .owner = THIS_MODULE,
196 .rw_page = pmem_rw_page,
197 .direct_access = pmem_direct_access,
198 .revalidate_disk = nvdimm_revalidate_disk,
201 static struct pmem_device *pmem_alloc(struct device *dev,
202 struct resource *res, int id)
204 struct pmem_device *pmem;
205 struct request_queue *q;
207 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
209 return ERR_PTR(-ENOMEM);
211 pmem->phys_addr = res->start;
212 pmem->size = resource_size(res);
213 if (!arch_has_wmb_pmem())
214 dev_warn(dev, "unable to guarantee persistence of writes\n");
216 if (!devm_request_mem_region(dev, pmem->phys_addr, pmem->size,
218 dev_warn(dev, "could not reserve region [0x%pa:0x%zx]\n",
219 &pmem->phys_addr, pmem->size);
220 return ERR_PTR(-EBUSY);
223 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
225 return ERR_PTR(-ENOMEM);
227 pmem->pfn_flags = PFN_DEV;
228 if (pmem_should_map_pages(dev)) {
229 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res,
230 &q->q_usage_counter, NULL);
231 pmem->pfn_flags |= PFN_MAP;
233 pmem->virt_addr = (void __pmem *) devm_memremap(dev,
234 pmem->phys_addr, pmem->size,
237 if (IS_ERR(pmem->virt_addr)) {
238 blk_cleanup_queue(q);
239 return (void __force *) pmem->virt_addr;
242 pmem->pmem_queue = q;
246 static void pmem_detach_disk(struct pmem_device *pmem)
248 if (!pmem->pmem_disk)
251 del_gendisk(pmem->pmem_disk);
252 put_disk(pmem->pmem_disk);
253 blk_cleanup_queue(pmem->pmem_queue);
256 static int pmem_attach_disk(struct device *dev,
257 struct nd_namespace_common *ndns, struct pmem_device *pmem)
259 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
260 int nid = dev_to_node(dev);
261 struct resource bb_res;
262 struct gendisk *disk;
264 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
265 blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
266 blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
267 blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
268 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
269 pmem->pmem_queue->queuedata = pmem;
271 disk = alloc_disk_node(0, nid);
273 blk_cleanup_queue(pmem->pmem_queue);
277 disk->fops = &pmem_fops;
278 disk->queue = pmem->pmem_queue;
279 disk->flags = GENHD_FL_EXT_DEVT;
280 nvdimm_namespace_disk_name(ndns, disk->disk_name);
281 disk->driverfs_dev = dev;
282 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
284 pmem->pmem_disk = disk;
285 devm_exit_badblocks(dev, &pmem->bb);
286 if (devm_init_badblocks(dev, &pmem->bb))
288 bb_res.start = nsio->res.start + pmem->data_offset;
289 bb_res.end = nsio->res.end;
290 if (is_nd_pfn(dev)) {
291 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
292 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
294 bb_res.start += __le32_to_cpu(pfn_sb->start_pad);
295 bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc);
297 nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb,
299 disk->bb = &pmem->bb;
301 revalidate_disk(disk);
306 static int pmem_rw_bytes(struct nd_namespace_common *ndns,
307 resource_size_t offset, void *buf, size_t size, int rw)
309 struct pmem_device *pmem = dev_get_drvdata(ndns->claim);
311 if (unlikely(offset + size > pmem->size)) {
312 dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n");
317 unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
319 if (unlikely(is_bad_pmem(&pmem->bb, offset / 512, sz_align)))
321 return memcpy_from_pmem(buf, pmem->virt_addr + offset, size);
323 memcpy_to_pmem(pmem->virt_addr + offset, buf, size);
330 static int nd_pfn_init(struct nd_pfn *nd_pfn)
332 struct pmem_device *pmem = dev_get_drvdata(&nd_pfn->dev);
333 struct nd_namespace_common *ndns = nd_pfn->ndns;
334 u32 start_pad = 0, end_trunc = 0;
335 resource_size_t start, size;
336 struct nd_namespace_io *nsio;
337 struct nd_region *nd_region;
338 struct nd_pfn_sb *pfn_sb;
344 pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
348 nd_pfn->pfn_sb = pfn_sb;
349 rc = nd_pfn_validate(nd_pfn);
351 /* no info block, do init */;
355 nd_region = to_nd_region(nd_pfn->dev.parent);
357 dev_info(&nd_pfn->dev,
358 "%s is read-only, unable to init metadata\n",
359 dev_name(&nd_region->dev));
363 memset(pfn_sb, 0, sizeof(*pfn_sb));
366 * Check if pmem collides with 'System RAM' when section aligned and
367 * trim it accordingly
369 nsio = to_nd_namespace_io(&ndns->dev);
370 start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
371 size = resource_size(&nsio->res);
372 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
373 IORES_DESC_NONE) == REGION_MIXED) {
375 start = nsio->res.start;
376 start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
379 start = nsio->res.start;
380 size = PHYS_SECTION_ALIGN_UP(start + size) - start;
381 if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
382 IORES_DESC_NONE) == REGION_MIXED) {
383 size = resource_size(&nsio->res);
384 end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size);
387 if (start_pad + end_trunc)
388 dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n",
389 dev_name(&ndns->dev), start_pad + end_trunc);
392 * Note, we use 64 here for the standard size of struct page,
393 * debugging options may cause it to be larger in which case the
394 * implementation will limit the pfns advertised through
395 * ->direct_access() to those that are included in the memmap.
398 npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
399 if (nd_pfn->mode == PFN_MODE_PMEM)
400 offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
402 else if (nd_pfn->mode == PFN_MODE_RAM)
403 offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
407 if (offset + start_pad + end_trunc >= pmem->size) {
408 dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
409 dev_name(&ndns->dev));
413 npfns = (pmem->size - offset - start_pad - end_trunc) / SZ_4K;
414 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
415 pfn_sb->dataoff = cpu_to_le64(offset);
416 pfn_sb->npfns = cpu_to_le64(npfns);
417 memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN);
418 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
419 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
420 pfn_sb->version_major = cpu_to_le16(1);
421 pfn_sb->version_minor = cpu_to_le16(1);
422 pfn_sb->start_pad = cpu_to_le32(start_pad);
423 pfn_sb->end_trunc = cpu_to_le32(end_trunc);
424 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
425 pfn_sb->checksum = cpu_to_le64(checksum);
427 return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
430 static void nvdimm_namespace_detach_pfn(struct nd_pfn *nd_pfn)
432 struct pmem_device *pmem;
435 pmem = dev_get_drvdata(&nd_pfn->dev);
436 pmem_detach_disk(pmem);
440 * We hotplug memory at section granularity, pad the reserved area from
441 * the previous section base to the namespace base address.
443 static unsigned long init_altmap_base(resource_size_t base)
445 unsigned long base_pfn = PHYS_PFN(base);
447 return PFN_SECTION_ALIGN_DOWN(base_pfn);
450 static unsigned long init_altmap_reserve(resource_size_t base)
452 unsigned long reserve = PHYS_PFN(SZ_8K);
453 unsigned long base_pfn = PHYS_PFN(base);
455 reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn);
459 static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
463 struct request_queue *q;
464 struct pmem_device *pmem;
465 struct vmem_altmap *altmap;
466 struct device *dev = &nd_pfn->dev;
467 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
468 struct nd_namespace_common *ndns = nd_pfn->ndns;
469 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
470 u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
471 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
472 resource_size_t base = nsio->res.start + start_pad;
473 struct vmem_altmap __altmap = {
474 .base_pfn = init_altmap_base(base),
475 .reserve = init_altmap_reserve(base),
478 pmem = dev_get_drvdata(dev);
479 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
480 pmem->pfn_pad = start_pad + end_trunc;
481 nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode);
482 if (nd_pfn->mode == PFN_MODE_RAM) {
483 if (pmem->data_offset < SZ_8K)
485 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
487 } else if (nd_pfn->mode == PFN_MODE_PMEM) {
488 nd_pfn->npfns = (pmem->size - pmem->pfn_pad - pmem->data_offset)
490 if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
491 dev_info(&nd_pfn->dev,
492 "number of pfns truncated from %lld to %ld\n",
493 le64_to_cpu(nd_pfn->pfn_sb->npfns),
496 altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K);
503 /* establish pfn range for lookup, and switch to direct map */
504 q = pmem->pmem_queue;
505 memcpy(&res, &nsio->res, sizeof(res));
506 res.start += start_pad;
507 res.end -= end_trunc;
508 devm_memunmap(dev, (void __force *) pmem->virt_addr);
509 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
510 &q->q_usage_counter, altmap);
511 pmem->pfn_flags |= PFN_MAP;
512 if (IS_ERR(pmem->virt_addr)) {
513 rc = PTR_ERR(pmem->virt_addr);
517 /* attach pmem disk in "pfn-mode" */
518 rc = pmem_attach_disk(dev, ndns, pmem);
524 nvdimm_namespace_detach_pfn(nd_pfn);
529 static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
531 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
534 if (!nd_pfn->uuid || !nd_pfn->ndns)
537 rc = nd_pfn_init(nd_pfn);
540 /* we need a valid pfn_sb before we can init a vmem_altmap */
541 return __nvdimm_namespace_attach_pfn(nd_pfn);
544 static int nd_pmem_probe(struct device *dev)
546 struct nd_region *nd_region = to_nd_region(dev->parent);
547 struct nd_namespace_common *ndns;
548 struct nd_namespace_io *nsio;
549 struct pmem_device *pmem;
551 ndns = nvdimm_namespace_common_probe(dev);
553 return PTR_ERR(ndns);
555 nsio = to_nd_namespace_io(&ndns->dev);
556 pmem = pmem_alloc(dev, &nsio->res, nd_region->id);
558 return PTR_ERR(pmem);
560 dev_set_drvdata(dev, pmem);
561 ndns->rw_bytes = pmem_rw_bytes;
562 if (devm_init_badblocks(dev, &pmem->bb))
564 nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res);
566 if (is_nd_btt(dev)) {
567 /* btt allocates its own request_queue */
568 blk_cleanup_queue(pmem->pmem_queue);
569 pmem->pmem_queue = NULL;
570 return nvdimm_namespace_attach_btt(ndns);
574 return nvdimm_namespace_attach_pfn(ndns);
576 if (nd_btt_probe(dev, ndns, pmem) == 0
577 || nd_pfn_probe(dev, ndns, pmem) == 0) {
579 * We'll come back as either btt-pmem, or pfn-pmem, so
580 * drop the queue allocation for now.
582 blk_cleanup_queue(pmem->pmem_queue);
586 return pmem_attach_disk(dev, ndns, pmem);
589 static int nd_pmem_remove(struct device *dev)
591 struct pmem_device *pmem = dev_get_drvdata(dev);
594 nvdimm_namespace_detach_btt(to_nd_btt(dev));
595 else if (is_nd_pfn(dev))
596 nvdimm_namespace_detach_pfn(to_nd_pfn(dev));
598 pmem_detach_disk(pmem);
603 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
605 struct nd_region *nd_region = to_nd_region(dev->parent);
606 struct pmem_device *pmem = dev_get_drvdata(dev);
607 resource_size_t offset = 0, end_trunc = 0;
608 struct nd_namespace_common *ndns;
609 struct nd_namespace_io *nsio;
612 if (event != NVDIMM_REVALIDATE_POISON)
615 if (is_nd_btt(dev)) {
616 struct nd_btt *nd_btt = to_nd_btt(dev);
619 } else if (is_nd_pfn(dev)) {
620 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
621 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
624 offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
625 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
629 nsio = to_nd_namespace_io(&ndns->dev);
630 res.start = nsio->res.start + offset;
631 res.end = nsio->res.end - end_trunc;
632 nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
635 MODULE_ALIAS("pmem");
636 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
637 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
638 static struct nd_device_driver nd_pmem_driver = {
639 .probe = nd_pmem_probe,
640 .remove = nd_pmem_remove,
641 .notify = nd_pmem_notify,
645 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
648 static int __init pmem_init(void)
650 return nd_driver_register(&nd_pmem_driver);
652 module_init(pmem_init);
654 static void pmem_exit(void)
656 driver_unregister(&nd_pmem_driver.drv);
658 module_exit(pmem_exit);
660 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
661 MODULE_LICENSE("GPL v2");