Merge branch 'for-4.9/dax' into libnvdimm-for-next
[cascardo/linux.git] / drivers / dax / pmem.c
1 /*
2  * Copyright(c) 2016 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/percpu-refcount.h>
14 #include <linux/memremap.h>
15 #include <linux/module.h>
16 #include <linux/pfn_t.h>
17 #include "../nvdimm/pfn.h"
18 #include "../nvdimm/nd.h"
19 #include "dax.h"
20
21 struct dax_pmem {
22         struct device *dev;
23         struct percpu_ref ref;
24         struct completion cmp;
25 };
26
27 static struct dax_pmem *to_dax_pmem(struct percpu_ref *ref)
28 {
29         return container_of(ref, struct dax_pmem, ref);
30 }
31
32 static void dax_pmem_percpu_release(struct percpu_ref *ref)
33 {
34         struct dax_pmem *dax_pmem = to_dax_pmem(ref);
35
36         dev_dbg(dax_pmem->dev, "%s\n", __func__);
37         complete(&dax_pmem->cmp);
38 }
39
40 static void dax_pmem_percpu_exit(void *data)
41 {
42         struct percpu_ref *ref = data;
43         struct dax_pmem *dax_pmem = to_dax_pmem(ref);
44
45         dev_dbg(dax_pmem->dev, "%s\n", __func__);
46         percpu_ref_exit(ref);
47         wait_for_completion(&dax_pmem->cmp);
48 }
49
50 static void dax_pmem_percpu_kill(void *data)
51 {
52         struct percpu_ref *ref = data;
53         struct dax_pmem *dax_pmem = to_dax_pmem(ref);
54
55         dev_dbg(dax_pmem->dev, "%s\n", __func__);
56         percpu_ref_kill(ref);
57 }
58
59 static int dax_pmem_probe(struct device *dev)
60 {
61         int rc;
62         void *addr;
63         struct resource res;
64         struct dax_dev *dax_dev;
65         struct nd_pfn_sb *pfn_sb;
66         struct dax_pmem *dax_pmem;
67         struct nd_region *nd_region;
68         struct nd_namespace_io *nsio;
69         struct dax_region *dax_region;
70         struct nd_namespace_common *ndns;
71         struct nd_dax *nd_dax = to_nd_dax(dev);
72         struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
73         struct vmem_altmap __altmap, *altmap = NULL;
74
75         ndns = nvdimm_namespace_common_probe(dev);
76         if (IS_ERR(ndns))
77                 return PTR_ERR(ndns);
78         nsio = to_nd_namespace_io(&ndns->dev);
79
80         /* parse the 'pfn' info block via ->rw_bytes */
81         devm_nsio_enable(dev, nsio);
82         altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap);
83         if (IS_ERR(altmap))
84                 return PTR_ERR(altmap);
85         devm_nsio_disable(dev, nsio);
86
87         pfn_sb = nd_pfn->pfn_sb;
88
89         if (!devm_request_mem_region(dev, nsio->res.start,
90                                 resource_size(&nsio->res), dev_name(dev))) {
91                 dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
92                 return -EBUSY;
93         }
94
95         dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL);
96         if (!dax_pmem)
97                 return -ENOMEM;
98
99         dax_pmem->dev = dev;
100         init_completion(&dax_pmem->cmp);
101         rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0,
102                         GFP_KERNEL);
103         if (rc)
104                 return rc;
105
106         rc = devm_add_action_or_reset(dev, dax_pmem_percpu_exit,
107                                                         &dax_pmem->ref);
108         if (rc)
109                 return rc;
110
111         addr = devm_memremap_pages(dev, &res, &dax_pmem->ref, altmap);
112         if (IS_ERR(addr))
113                 return PTR_ERR(addr);
114
115         rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
116                                                         &dax_pmem->ref);
117         if (rc)
118                 return rc;
119
120         /* adjust the dax_region resource to the start of data */
121         res.start += le64_to_cpu(pfn_sb->dataoff);
122
123         nd_region = to_nd_region(dev->parent);
124         dax_region = alloc_dax_region(dev, nd_region->id, &res,
125                         le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
126         if (!dax_region)
127                 return -ENOMEM;
128
129         /* TODO: support for subdividing a dax region... */
130         dax_dev = devm_create_dax_dev(dax_region, &res, 1);
131
132         /* child dax_dev instances now own the lifetime of the dax_region */
133         dax_region_put(dax_region);
134
135         return PTR_ERR_OR_ZERO(dax_dev);
136 }
137
138 static struct nd_device_driver dax_pmem_driver = {
139         .probe = dax_pmem_probe,
140         .drv = {
141                 .name = "dax_pmem",
142         },
143         .type = ND_DRIVER_DAX_PMEM,
144 };
145
146 static int __init dax_pmem_init(void)
147 {
148         return nd_driver_register(&dax_pmem_driver);
149 }
150 module_init(dax_pmem_init);
151
152 static void __exit dax_pmem_exit(void)
153 {
154         driver_unregister(&dax_pmem_driver.drv);
155 }
156 module_exit(dax_pmem_exit);
157
158 MODULE_LICENSE("GPL v2");
159 MODULE_AUTHOR("Intel Corporation");
160 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);