2 * Persistent Memory Driver
4 * Copyright (c) 2014-2015, Intel Corporation.
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <asm/cacheflush.h>
19 #include <linux/blkdev.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/platform_device.h>
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/badblocks.h>
26 #include <linux/memremap.h>
27 #include <linux/vmalloc.h>
28 #include <linux/pfn_t.h>
29 #include <linux/slab.h>
30 #include <linux/pmem.h>
36 struct request_queue *pmem_queue;
37 struct gendisk *pmem_disk;
39 /* One contiguous memory region per device */
40 phys_addr_t phys_addr;
41 /* when non-zero this device is hosting a 'pfn' instance */
42 phys_addr_t data_offset;
44 void __pmem *virt_addr;
45 /* immutable base size of the namespace */
47 /* trim size when namespace capacity has been section aligned */
52 static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
55 struct device *dev = disk_to_dev(pmem->pmem_disk);
59 sector = (offset - pmem->data_offset) / 512;
60 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
62 if (cleared > 0 && cleared / 512) {
63 dev_dbg(dev, "%s: %llx clear %ld sector%s\n",
64 __func__, (unsigned long long) sector,
65 cleared / 512, cleared / 512 > 1 ? "s" : "");
66 badblocks_clear(&pmem->bb, sector, cleared / 512);
68 invalidate_pmem(pmem->virt_addr + offset, len);
71 static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
72 unsigned int len, unsigned int off, int rw,
76 bool bad_pmem = false;
77 void *mem = kmap_atomic(page);
78 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
79 void __pmem *pmem_addr = pmem->virt_addr + pmem_off;
81 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
85 if (unlikely(bad_pmem))
88 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
89 flush_dcache_page(page);
93 * Note that we write the data both before and after
94 * clearing poison. The write before clear poison
95 * handles situations where the latest written data is
96 * preserved and the clear poison operation simply marks
97 * the address range as valid without changing the data.
98 * In this case application software can assume that an
99 * interrupted write will either return the new good
102 * However, if pmem_clear_poison() leaves the data in an
103 * indeterminate state we need to perform the write
104 * after clear poison.
106 flush_dcache_page(page);
107 memcpy_to_pmem(pmem_addr, mem + off, len);
108 if (unlikely(bad_pmem)) {
109 pmem_clear_poison(pmem, pmem_off, len);
110 memcpy_to_pmem(pmem_addr, mem + off, len);
118 static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
124 struct bvec_iter iter;
125 struct pmem_device *pmem = q->queuedata;
127 do_acct = nd_iostat_start(bio, &start);
128 bio_for_each_segment(bvec, bio, iter) {
129 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
130 bvec.bv_offset, bio_data_dir(bio),
138 nd_iostat_end(bio, start);
140 if (bio_data_dir(bio))
144 return BLK_QC_T_NONE;
147 static int pmem_rw_page(struct block_device *bdev, sector_t sector,
148 struct page *page, int rw)
150 struct pmem_device *pmem = bdev->bd_queue->queuedata;
153 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
158 * The ->rw_page interface is subtle and tricky. The core
159 * retries on any error, so we can only invoke page_endio() in
160 * the successful completion case. Otherwise, we'll see crashes
161 * caused by double completion.
164 page_endio(page, rw & WRITE, 0);
169 static long pmem_direct_access(struct block_device *bdev, sector_t sector,
170 void __pmem **kaddr, pfn_t *pfn)
172 struct pmem_device *pmem = bdev->bd_queue->queuedata;
173 resource_size_t offset = sector * 512 + pmem->data_offset;
175 *kaddr = pmem->virt_addr + offset;
176 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
178 return pmem->size - pmem->pfn_pad - offset;
181 static const struct block_device_operations pmem_fops = {
182 .owner = THIS_MODULE,
183 .rw_page = pmem_rw_page,
184 .direct_access = pmem_direct_access,
185 .revalidate_disk = nvdimm_revalidate_disk,
188 static void pmem_release_queue(void *q)
190 blk_cleanup_queue(q);
193 void pmem_release_disk(void *disk)
199 static int pmem_attach_disk(struct device *dev,
200 struct nd_namespace_common *ndns)
202 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
203 struct vmem_altmap __altmap, *altmap = NULL;
204 struct resource *res = &nsio->res;
205 struct nd_pfn *nd_pfn = NULL;
206 int nid = dev_to_node(dev);
207 struct nd_pfn_sb *pfn_sb;
208 struct pmem_device *pmem;
209 struct resource pfn_res;
210 struct request_queue *q;
211 struct gendisk *disk;
214 /* while nsio_rw_bytes is active, parse a pfn info block if present */
215 if (is_nd_pfn(dev)) {
216 nd_pfn = to_nd_pfn(dev);
217 altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
219 return PTR_ERR(altmap);
222 /* we're attaching a block device, disable raw namespace access */
223 devm_nsio_disable(dev, nsio);
225 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
229 dev_set_drvdata(dev, pmem);
230 pmem->phys_addr = res->start;
231 pmem->size = resource_size(res);
232 if (!arch_has_wmb_pmem())
233 dev_warn(dev, "unable to guarantee persistence of writes\n");
235 if (!devm_request_mem_region(dev, res->start, resource_size(res),
237 dev_warn(dev, "could not reserve region %pR\n", res);
241 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
244 pmem->pmem_queue = q;
246 pmem->pfn_flags = PFN_DEV;
247 if (is_nd_pfn(dev)) {
248 addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
250 pfn_sb = nd_pfn->pfn_sb;
251 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
252 pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
253 pmem->pfn_flags |= PFN_MAP;
254 res = &pfn_res; /* for badblocks populate */
255 res->start += pmem->data_offset;
256 } else if (pmem_should_map_pages(dev)) {
257 addr = devm_memremap_pages(dev, &nsio->res,
258 &q->q_usage_counter, NULL);
259 pmem->pfn_flags |= PFN_MAP;
261 addr = devm_memremap(dev, pmem->phys_addr,
262 pmem->size, ARCH_MEMREMAP_PMEM);
265 * At release time the queue must be dead before
266 * devm_memremap_pages is unwound
268 if (devm_add_action(dev, pmem_release_queue, q)) {
269 blk_cleanup_queue(q);
274 return PTR_ERR(addr);
275 pmem->virt_addr = (void __pmem *) addr;
277 blk_queue_make_request(pmem->pmem_queue, pmem_make_request);
278 blk_queue_physical_block_size(pmem->pmem_queue, PAGE_SIZE);
279 blk_queue_max_hw_sectors(pmem->pmem_queue, UINT_MAX);
280 blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY);
281 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue);
282 pmem->pmem_queue->queuedata = pmem;
284 disk = alloc_disk_node(0, nid);
287 if (devm_add_action(dev, pmem_release_disk, disk)) {
292 disk->fops = &pmem_fops;
293 disk->queue = pmem->pmem_queue;
294 disk->flags = GENHD_FL_EXT_DEVT;
295 nvdimm_namespace_disk_name(ndns, disk->disk_name);
296 disk->driverfs_dev = dev;
297 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
299 pmem->pmem_disk = disk;
300 if (devm_init_badblocks(dev, &pmem->bb))
302 nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, res);
303 disk->bb = &pmem->bb;
305 revalidate_disk(disk);
310 static int nd_pmem_probe(struct device *dev)
312 struct nd_namespace_common *ndns;
314 ndns = nvdimm_namespace_common_probe(dev);
316 return PTR_ERR(ndns);
318 if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
322 return nvdimm_namespace_attach_btt(ndns);
325 return pmem_attach_disk(dev, ndns);
327 /* if we find a valid info-block we'll come back as that personality */
328 if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0)
331 /* ...otherwise we're just a raw pmem device */
332 return pmem_attach_disk(dev, ndns);
335 static int nd_pmem_remove(struct device *dev)
338 nvdimm_namespace_detach_btt(to_nd_btt(dev));
342 static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
344 struct nd_region *nd_region = to_nd_region(dev->parent);
345 struct pmem_device *pmem = dev_get_drvdata(dev);
346 resource_size_t offset = 0, end_trunc = 0;
347 struct nd_namespace_common *ndns;
348 struct nd_namespace_io *nsio;
351 if (event != NVDIMM_REVALIDATE_POISON)
354 if (is_nd_btt(dev)) {
355 struct nd_btt *nd_btt = to_nd_btt(dev);
358 } else if (is_nd_pfn(dev)) {
359 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
360 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
363 offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
364 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
368 nsio = to_nd_namespace_io(&ndns->dev);
369 res.start = nsio->res.start + offset;
370 res.end = nsio->res.end - end_trunc;
371 nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
374 MODULE_ALIAS("pmem");
375 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
376 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
377 static struct nd_device_driver nd_pmem_driver = {
378 .probe = nd_pmem_probe,
379 .remove = nd_pmem_remove,
380 .notify = nd_pmem_notify,
384 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
387 static int __init pmem_init(void)
389 return nd_driver_register(&nd_pmem_driver);
391 module_init(pmem_init);
393 static void pmem_exit(void)
395 driver_unregister(&nd_pmem_driver.drv);
397 module_exit(pmem_exit);
399 MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
400 MODULE_LICENSE("GPL v2");