2 * Copyright(c) 2016 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/pagemap.h>
14 #include <linux/module.h>
15 #include <linux/device.h>
16 #include <linux/pfn_t.h>
17 #include <linux/slab.h>
18 #include <linux/dax.h>
23 static struct class *dax_class;
24 static DEFINE_IDA(dax_minor_ida);
27 * struct dax_region - mapping infrastructure for dax devices
28 * @id: kernel-wide unique region for a memory range
29 * @base: linear address corresponding to @res
30 * @kref: to pin while other agents have a need to do lookups
31 * @dev: parent device backing this region
32 * @align: allocation and mapping alignment for child dax devices
33 * @res: physical address range of the region
34 * @pfn_flags: identify whether the pfns are paged back or not
44 unsigned long pfn_flags;
48 * struct dax_dev - subdivision of a dax region
49 * @region - parent region
50 * @dev - device backing the character device
51 * @kref - enable this data to be tracked in filp->private_data
52 * @alive - !alive + rcu grace period == no new mappings can be established
53 * @id - child id in the region
54 * @num_resources - number of physical address extents in this device
55 * @res - array of physical address ranges
58 struct dax_region *region;
64 struct resource res[0];
67 static void dax_region_free(struct kref *kref)
69 struct dax_region *dax_region;
71 dax_region = container_of(kref, struct dax_region, kref);
75 void dax_region_put(struct dax_region *dax_region)
77 kref_put(&dax_region->kref, dax_region_free);
79 EXPORT_SYMBOL_GPL(dax_region_put);
81 static void dax_dev_free(struct kref *kref)
83 struct dax_dev *dax_dev;
85 dax_dev = container_of(kref, struct dax_dev, kref);
86 dax_region_put(dax_dev->region);
90 static void dax_dev_put(struct dax_dev *dax_dev)
92 kref_put(&dax_dev->kref, dax_dev_free);
95 struct dax_region *alloc_dax_region(struct device *parent, int region_id,
96 struct resource *res, unsigned int align, void *addr,
97 unsigned long pfn_flags)
99 struct dax_region *dax_region;
101 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
106 memcpy(&dax_region->res, res, sizeof(*res));
107 dax_region->pfn_flags = pfn_flags;
108 kref_init(&dax_region->kref);
109 dax_region->id = region_id;
110 ida_init(&dax_region->ida);
111 dax_region->align = align;
112 dax_region->dev = parent;
113 dax_region->base = addr;
117 EXPORT_SYMBOL_GPL(alloc_dax_region);
119 static ssize_t size_show(struct device *dev,
120 struct device_attribute *attr, char *buf)
122 struct dax_dev *dax_dev = dev_get_drvdata(dev);
123 unsigned long long size = 0;
126 for (i = 0; i < dax_dev->num_resources; i++)
127 size += resource_size(&dax_dev->res[i]);
129 return sprintf(buf, "%llu\n", size);
131 static DEVICE_ATTR_RO(size);
133 static struct attribute *dax_device_attributes[] = {
138 static const struct attribute_group dax_device_attribute_group = {
139 .attrs = dax_device_attributes,
142 static const struct attribute_group *dax_attribute_groups[] = {
143 &dax_device_attribute_group,
147 static void unregister_dax_dev(void *_dev)
149 struct device *dev = _dev;
150 struct dax_dev *dax_dev = dev_get_drvdata(dev);
151 struct dax_region *dax_region = dax_dev->region;
153 dev_dbg(dev, "%s\n", __func__);
156 * Note, rcu is not protecting the liveness of dax_dev, rcu is
157 * ensuring that any fault handlers that might have seen
158 * dax_dev->alive == true, have completed. Any fault handlers
159 * that start after synchronize_rcu() has started will abort
160 * upon seeing dax_dev->alive == false.
162 dax_dev->alive = false;
166 device_unregister(dev);
167 ida_simple_remove(&dax_region->ida, dax_dev->id);
168 ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
170 dax_dev_put(dax_dev);
173 int devm_create_dax_dev(struct dax_region *dax_region, struct resource *res,
176 struct device *parent = dax_region->dev;
177 struct dax_dev *dax_dev;
182 dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL);
185 memcpy(dax_dev->res, res, sizeof(*res) * count);
186 dax_dev->num_resources = count;
187 kref_init(&dax_dev->kref);
188 dax_dev->alive = true;
189 dax_dev->region = dax_region;
190 kref_get(&dax_region->kref);
192 dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
193 if (dax_dev->id < 0) {
198 minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
204 dev_t = MKDEV(dax_major, minor);
205 dev = device_create_with_groups(dax_class, parent, dev_t, dax_dev,
206 dax_attribute_groups, "dax%d.%d", dax_region->id,
214 rc = devm_add_action(dax_region->dev, unregister_dax_dev, dev);
216 unregister_dax_dev(dev);
223 ida_simple_remove(&dax_minor_ida, minor);
225 ida_simple_remove(&dax_region->ida, dax_dev->id);
227 dax_dev_put(dax_dev);
231 EXPORT_SYMBOL_GPL(devm_create_dax_dev);
233 /* return an unmapped area aligned to the dax region specified alignment */
234 static unsigned long dax_dev_get_unmapped_area(struct file *filp,
235 unsigned long addr, unsigned long len, unsigned long pgoff,
238 unsigned long off, off_end, off_align, len_align, addr_align, align;
239 struct dax_dev *dax_dev = filp ? filp->private_data : NULL;
240 struct dax_region *dax_region;
242 if (!dax_dev || addr)
245 dax_region = dax_dev->region;
246 align = dax_region->align;
247 off = pgoff << PAGE_SHIFT;
249 off_align = round_up(off, align);
251 if ((off_end <= off_align) || ((off_end - off_align) < align))
254 len_align = len + align;
255 if ((off + len_align) < off)
258 addr_align = current->mm->get_unmapped_area(filp, addr, len_align,
260 if (!IS_ERR_VALUE(addr_align)) {
261 addr_align += (off - addr_align) & (align - 1);
265 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
268 static int __match_devt(struct device *dev, const void *data)
270 const dev_t *devt = data;
272 return dev->devt == *devt;
275 static struct device *dax_dev_find(dev_t dev_t)
277 return class_find_device(dax_class, NULL, &dev_t, __match_devt);
280 static int dax_dev_open(struct inode *inode, struct file *filp)
282 struct dax_dev *dax_dev = NULL;
285 dev = dax_dev_find(inode->i_rdev);
290 dax_dev = dev_get_drvdata(dev);
292 dev_dbg(dev, "%s\n", __func__);
293 filp->private_data = dax_dev;
294 kref_get(&dax_dev->kref);
295 inode->i_flags = S_DAX;
306 static int dax_dev_release(struct inode *inode, struct file *filp)
308 struct dax_dev *dax_dev = filp->private_data;
309 struct device *dev = dax_dev->dev;
311 dev_dbg(dax_dev->dev, "%s\n", __func__);
312 dax_dev_put(dax_dev);
318 static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma,
321 struct dax_region *dax_region = dax_dev->region;
322 struct device *dev = dax_dev->dev;
328 /* prevent private / writable mappings from being established */
329 if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) {
330 dev_info(dev, "%s: %s: fail, attempted private mapping\n",
331 current->comm, func);
335 mask = dax_region->align - 1;
336 if (vma->vm_start & mask || vma->vm_end & mask) {
337 dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
338 current->comm, func, vma->vm_start, vma->vm_end,
343 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
344 && (vma->vm_flags & VM_DONTCOPY) == 0) {
345 dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
346 current->comm, func);
350 if (!vma_is_dax(vma)) {
351 dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
352 current->comm, func);
359 static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff,
362 struct resource *res;
366 for (i = 0; i < dax_dev->num_resources; i++) {
367 res = &dax_dev->res[i];
368 phys = pgoff * PAGE_SIZE + res->start;
369 if (phys >= res->start && phys <= res->end)
371 pgoff -= PHYS_PFN(resource_size(res));
374 if (i < dax_dev->num_resources) {
375 res = &dax_dev->res[i];
376 if (phys + size - 1 <= res->end)
383 static int __dax_dev_fault(struct dax_dev *dax_dev, struct vm_area_struct *vma,
384 struct vm_fault *vmf)
386 unsigned long vaddr = (unsigned long) vmf->virtual_address;
387 struct device *dev = dax_dev->dev;
388 struct dax_region *dax_region;
389 int rc = VM_FAULT_SIGBUS;
393 if (check_vma(dax_dev, vma, __func__))
394 return VM_FAULT_SIGBUS;
396 dax_region = dax_dev->region;
397 if (dax_region->align > PAGE_SIZE) {
398 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
399 return VM_FAULT_SIGBUS;
402 phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
404 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
406 return VM_FAULT_SIGBUS;
409 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
411 rc = vm_insert_mixed(vma, vaddr, pfn);
415 if (rc < 0 && rc != -EBUSY)
416 return VM_FAULT_SIGBUS;
418 return VM_FAULT_NOPAGE;
421 static int dax_dev_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
424 struct file *filp = vma->vm_file;
425 struct dax_dev *dax_dev = filp->private_data;
427 dev_dbg(dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
428 current->comm, (vmf->flags & FAULT_FLAG_WRITE)
429 ? "write" : "read", vma->vm_start, vma->vm_end);
431 rc = __dax_dev_fault(dax_dev, vma, vmf);
437 static int __dax_dev_pmd_fault(struct dax_dev *dax_dev,
438 struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd,
441 unsigned long pmd_addr = addr & PMD_MASK;
442 struct device *dev = dax_dev->dev;
443 struct dax_region *dax_region;
448 if (check_vma(dax_dev, vma, __func__))
449 return VM_FAULT_SIGBUS;
451 dax_region = dax_dev->region;
452 if (dax_region->align > PMD_SIZE) {
453 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
454 return VM_FAULT_SIGBUS;
457 /* dax pmd mappings require pfn_t_devmap() */
458 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) {
459 dev_dbg(dev, "%s: alignment > fault size\n", __func__);
460 return VM_FAULT_SIGBUS;
463 pgoff = linear_page_index(vma, pmd_addr);
464 phys = pgoff_to_phys(dax_dev, pgoff, PAGE_SIZE);
466 dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
468 return VM_FAULT_SIGBUS;
471 pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
473 return vmf_insert_pfn_pmd(vma, addr, pmd, pfn,
474 flags & FAULT_FLAG_WRITE);
477 static int dax_dev_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
478 pmd_t *pmd, unsigned int flags)
481 struct file *filp = vma->vm_file;
482 struct dax_dev *dax_dev = filp->private_data;
484 dev_dbg(dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__,
485 current->comm, (flags & FAULT_FLAG_WRITE)
486 ? "write" : "read", vma->vm_start, vma->vm_end);
489 rc = __dax_dev_pmd_fault(dax_dev, vma, addr, pmd, flags);
495 static void dax_dev_vm_open(struct vm_area_struct *vma)
497 struct file *filp = vma->vm_file;
498 struct dax_dev *dax_dev = filp->private_data;
500 dev_dbg(dax_dev->dev, "%s\n", __func__);
501 kref_get(&dax_dev->kref);
504 static void dax_dev_vm_close(struct vm_area_struct *vma)
506 struct file *filp = vma->vm_file;
507 struct dax_dev *dax_dev = filp->private_data;
509 dev_dbg(dax_dev->dev, "%s\n", __func__);
510 dax_dev_put(dax_dev);
513 static const struct vm_operations_struct dax_dev_vm_ops = {
514 .fault = dax_dev_fault,
515 .pmd_fault = dax_dev_pmd_fault,
516 .open = dax_dev_vm_open,
517 .close = dax_dev_vm_close,
520 static int dax_dev_mmap(struct file *filp, struct vm_area_struct *vma)
522 struct dax_dev *dax_dev = filp->private_data;
525 dev_dbg(dax_dev->dev, "%s\n", __func__);
527 rc = check_vma(dax_dev, vma, __func__);
531 kref_get(&dax_dev->kref);
532 vma->vm_ops = &dax_dev_vm_ops;
533 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
538 static const struct file_operations dax_fops = {
539 .llseek = noop_llseek,
540 .owner = THIS_MODULE,
541 .open = dax_dev_open,
542 .release = dax_dev_release,
543 .get_unmapped_area = dax_dev_get_unmapped_area,
544 .mmap = dax_dev_mmap,
547 static int __init dax_init(void)
551 rc = register_chrdev(0, "dax", &dax_fops);
556 dax_class = class_create(THIS_MODULE, "dax");
557 if (IS_ERR(dax_class)) {
558 unregister_chrdev(dax_major, "dax");
559 return PTR_ERR(dax_class);
565 static void __exit dax_exit(void)
567 class_destroy(dax_class);
568 unregister_chrdev(dax_major, "dax");
569 ida_destroy(&dax_minor_ida);
572 MODULE_AUTHOR("Intel Corporation");
573 MODULE_LICENSE("GPL v2");
574 subsys_initcall(dax_init);
575 module_exit(dax_exit);