2 * Copyright (C) 2013 - Virtual Open Systems
3 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/device.h>
16 #include <linux/acpi.h>
17 #include <linux/iommu.h>
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/types.h>
22 #include <linux/uaccess.h>
23 #include <linux/vfio.h>
25 #include "vfio_platform_private.h"
27 #define DRIVER_VERSION "0.10"
28 #define DRIVER_AUTHOR "Antonios Motakis <a.motakis@virtualopensystems.com>"
29 #define DRIVER_DESC "VFIO platform base module"
31 static LIST_HEAD(reset_list);
32 static DEFINE_MUTEX(driver_lock);
34 static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
35 struct module **module)
37 struct vfio_platform_reset_node *iter;
38 vfio_platform_reset_fn_t reset_fn = NULL;
40 mutex_lock(&driver_lock);
41 list_for_each_entry(iter, &reset_list, link) {
42 if (!strcmp(iter->compat, compat) &&
43 try_module_get(iter->owner)) {
44 *module = iter->owner;
45 reset_fn = iter->of_reset;
49 mutex_unlock(&driver_lock);
53 static int vfio_platform_acpi_probe(struct vfio_platform_device *vdev,
56 struct acpi_device *adev;
61 adev = ACPI_COMPANION(dev);
63 pr_err("VFIO: ACPI companion device not found for %s\n",
69 vdev->acpihid = acpi_device_hid(adev);
71 return WARN_ON(!vdev->acpihid) ? -EINVAL : 0;
74 static bool vfio_platform_has_reset(struct vfio_platform_device *vdev)
76 return vdev->of_reset ? true : false;
79 static void vfio_platform_get_reset(struct vfio_platform_device *vdev)
81 vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
83 if (!vdev->of_reset) {
84 request_module("vfio-reset:%s", vdev->compat);
85 vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
90 static void vfio_platform_put_reset(struct vfio_platform_device *vdev)
93 module_put(vdev->reset_module);
96 static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
100 while (vdev->get_resource(vdev, cnt))
103 vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
108 for (i = 0; i < cnt; i++) {
109 struct resource *res =
110 vdev->get_resource(vdev, i);
115 vdev->regions[i].addr = res->start;
116 vdev->regions[i].size = resource_size(res);
117 vdev->regions[i].flags = 0;
119 switch (resource_type(res)) {
121 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
122 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
123 if (!(res->flags & IORESOURCE_READONLY))
124 vdev->regions[i].flags |=
125 VFIO_REGION_INFO_FLAG_WRITE;
128 * Only regions addressed with PAGE granularity may be
131 if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
132 !(vdev->regions[i].size & ~PAGE_MASK))
133 vdev->regions[i].flags |=
134 VFIO_REGION_INFO_FLAG_MMAP;
138 vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
145 vdev->num_regions = cnt;
149 kfree(vdev->regions);
153 static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
157 for (i = 0; i < vdev->num_regions; i++)
158 iounmap(vdev->regions[i].ioaddr);
160 vdev->num_regions = 0;
161 kfree(vdev->regions);
164 static int vfio_platform_call_reset(struct vfio_platform_device *vdev,
165 const char **extra_dbg)
167 if (vdev->of_reset) {
168 dev_info(vdev->device, "reset\n");
169 return vdev->of_reset(vdev);
172 dev_warn(vdev->device, "no reset function found!\n");
176 static void vfio_platform_release(void *device_data)
178 struct vfio_platform_device *vdev = device_data;
180 mutex_lock(&driver_lock);
182 if (!(--vdev->refcnt)) {
183 vfio_platform_call_reset(vdev, NULL);
184 vfio_platform_regions_cleanup(vdev);
185 vfio_platform_irq_cleanup(vdev);
188 mutex_unlock(&driver_lock);
190 module_put(vdev->parent_module);
193 static int vfio_platform_open(void *device_data)
195 struct vfio_platform_device *vdev = device_data;
198 if (!try_module_get(vdev->parent_module))
201 mutex_lock(&driver_lock);
204 ret = vfio_platform_regions_init(vdev);
208 ret = vfio_platform_irq_init(vdev);
212 vfio_platform_call_reset(vdev, NULL);
217 mutex_unlock(&driver_lock);
221 vfio_platform_regions_cleanup(vdev);
223 mutex_unlock(&driver_lock);
224 module_put(THIS_MODULE);
228 static long vfio_platform_ioctl(void *device_data,
229 unsigned int cmd, unsigned long arg)
231 struct vfio_platform_device *vdev = device_data;
234 if (cmd == VFIO_DEVICE_GET_INFO) {
235 struct vfio_device_info info;
237 minsz = offsetofend(struct vfio_device_info, num_irqs);
239 if (copy_from_user(&info, (void __user *)arg, minsz))
242 if (info.argsz < minsz)
245 if (vfio_platform_has_reset(vdev))
246 vdev->flags |= VFIO_DEVICE_FLAGS_RESET;
247 info.flags = vdev->flags;
248 info.num_regions = vdev->num_regions;
249 info.num_irqs = vdev->num_irqs;
251 return copy_to_user((void __user *)arg, &info, minsz) ?
254 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
255 struct vfio_region_info info;
257 minsz = offsetofend(struct vfio_region_info, offset);
259 if (copy_from_user(&info, (void __user *)arg, minsz))
262 if (info.argsz < minsz)
265 if (info.index >= vdev->num_regions)
268 /* map offset to the physical address */
269 info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
270 info.size = vdev->regions[info.index].size;
271 info.flags = vdev->regions[info.index].flags;
273 return copy_to_user((void __user *)arg, &info, minsz) ?
276 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
277 struct vfio_irq_info info;
279 minsz = offsetofend(struct vfio_irq_info, count);
281 if (copy_from_user(&info, (void __user *)arg, minsz))
284 if (info.argsz < minsz)
287 if (info.index >= vdev->num_irqs)
290 info.flags = vdev->irqs[info.index].flags;
291 info.count = vdev->irqs[info.index].count;
293 return copy_to_user((void __user *)arg, &info, minsz) ?
296 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
297 struct vfio_irq_set hdr;
301 minsz = offsetofend(struct vfio_irq_set, count);
303 if (copy_from_user(&hdr, (void __user *)arg, minsz))
306 if (hdr.argsz < minsz)
309 if (hdr.index >= vdev->num_irqs)
312 if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
313 VFIO_IRQ_SET_ACTION_TYPE_MASK))
316 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
319 if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
320 size = sizeof(uint8_t);
321 else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
322 size = sizeof(int32_t);
326 if (hdr.argsz - minsz < size)
329 data = memdup_user((void __user *)(arg + minsz), size);
331 return PTR_ERR(data);
334 mutex_lock(&vdev->igate);
336 ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
337 hdr.start, hdr.count, data);
338 mutex_unlock(&vdev->igate);
343 } else if (cmd == VFIO_DEVICE_RESET) {
344 return vfio_platform_call_reset(vdev, NULL);
350 static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
351 char __user *buf, size_t count,
354 unsigned int done = 0;
358 ioremap_nocache(reg->addr, reg->size);
367 if (count >= 4 && !(off % 4)) {
370 val = ioread32(reg->ioaddr + off);
371 if (copy_to_user(buf, &val, 4))
375 } else if (count >= 2 && !(off % 2)) {
378 val = ioread16(reg->ioaddr + off);
379 if (copy_to_user(buf, &val, 2))
386 val = ioread8(reg->ioaddr + off);
387 if (copy_to_user(buf, &val, 1))
405 static ssize_t vfio_platform_read(void *device_data, char __user *buf,
406 size_t count, loff_t *ppos)
408 struct vfio_platform_device *vdev = device_data;
409 unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
410 loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
412 if (index >= vdev->num_regions)
415 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
418 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
419 return vfio_platform_read_mmio(&vdev->regions[index],
421 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
422 return -EINVAL; /* not implemented */
427 static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
428 const char __user *buf, size_t count,
431 unsigned int done = 0;
435 ioremap_nocache(reg->addr, reg->size);
444 if (count >= 4 && !(off % 4)) {
447 if (copy_from_user(&val, buf, 4))
449 iowrite32(val, reg->ioaddr + off);
452 } else if (count >= 2 && !(off % 2)) {
455 if (copy_from_user(&val, buf, 2))
457 iowrite16(val, reg->ioaddr + off);
463 if (copy_from_user(&val, buf, 1))
465 iowrite8(val, reg->ioaddr + off);
481 static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
482 size_t count, loff_t *ppos)
484 struct vfio_platform_device *vdev = device_data;
485 unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
486 loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
488 if (index >= vdev->num_regions)
491 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
494 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
495 return vfio_platform_write_mmio(&vdev->regions[index],
497 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
498 return -EINVAL; /* not implemented */
503 static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
504 struct vm_area_struct *vma)
506 u64 req_len, pgoff, req_start;
508 req_len = vma->vm_end - vma->vm_start;
509 pgoff = vma->vm_pgoff &
510 ((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
511 req_start = pgoff << PAGE_SHIFT;
513 if (region.size < PAGE_SIZE || req_start + req_len > region.size)
516 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
517 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
519 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
520 req_len, vma->vm_page_prot);
523 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
525 struct vfio_platform_device *vdev = device_data;
528 index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
530 if (vma->vm_end < vma->vm_start)
532 if (!(vma->vm_flags & VM_SHARED))
534 if (index >= vdev->num_regions)
536 if (vma->vm_start & ~PAGE_MASK)
538 if (vma->vm_end & ~PAGE_MASK)
541 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
544 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
545 && (vma->vm_flags & VM_READ))
548 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
549 && (vma->vm_flags & VM_WRITE))
552 vma->vm_private_data = vdev;
554 if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
555 return vfio_platform_mmap_mmio(vdev->regions[index], vma);
557 else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
558 return -EINVAL; /* not implemented */
563 static const struct vfio_device_ops vfio_platform_ops = {
564 .name = "vfio-platform",
565 .open = vfio_platform_open,
566 .release = vfio_platform_release,
567 .ioctl = vfio_platform_ioctl,
568 .read = vfio_platform_read,
569 .write = vfio_platform_write,
570 .mmap = vfio_platform_mmap,
573 int vfio_platform_of_probe(struct vfio_platform_device *vdev,
578 ret = device_property_read_string(dev, "compatible",
581 pr_err("VFIO: cannot retrieve compat for %s\n",
588 * There can be two kernel build combinations. One build where
589 * ACPI is not selected in Kconfig and another one with the ACPI Kconfig.
591 * In the first case, vfio_platform_acpi_probe will return since
592 * acpi_disabled is 1. DT user will not see any kind of messages from
595 * In the second case, both DT and ACPI is compiled in but the system is
596 * booting with any of these combinations.
598 * If the firmware is DT type, then acpi_disabled is 1. The ACPI probe routine
599 * terminates immediately without any messages.
601 * If the firmware is ACPI type, then acpi_disabled is 0. All other checks are
602 * valid checks. We cannot claim that this system is DT.
604 int vfio_platform_probe_common(struct vfio_platform_device *vdev,
607 struct iommu_group *group;
613 ret = vfio_platform_acpi_probe(vdev, dev);
615 ret = vfio_platform_of_probe(vdev, dev);
622 group = vfio_iommu_group_get(dev);
624 pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
628 ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
630 vfio_iommu_group_put(group, dev);
634 vfio_platform_get_reset(vdev);
636 mutex_init(&vdev->igate);
640 EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
642 struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
644 struct vfio_platform_device *vdev;
646 vdev = vfio_del_group_dev(dev);
649 vfio_platform_put_reset(vdev);
650 vfio_iommu_group_put(dev->iommu_group, dev);
655 EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
657 void __vfio_platform_register_reset(struct vfio_platform_reset_node *node)
659 mutex_lock(&driver_lock);
660 list_add(&node->link, &reset_list);
661 mutex_unlock(&driver_lock);
663 EXPORT_SYMBOL_GPL(__vfio_platform_register_reset);
665 void vfio_platform_unregister_reset(const char *compat,
666 vfio_platform_reset_fn_t fn)
668 struct vfio_platform_reset_node *iter, *temp;
670 mutex_lock(&driver_lock);
671 list_for_each_entry_safe(iter, temp, &reset_list, link) {
672 if (!strcmp(iter->compat, compat) && (iter->of_reset == fn)) {
673 list_del(&iter->link);
678 mutex_unlock(&driver_lock);
681 EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset);
683 MODULE_VERSION(DRIVER_VERSION);
684 MODULE_LICENSE("GPL v2");
685 MODULE_AUTHOR(DRIVER_AUTHOR);
686 MODULE_DESCRIPTION(DRIVER_DESC);