mm: introduce find_dev_pagemap()
[cascardo/linux.git] / kernel / memremap.c
index 7658d32..61cfbf4 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  */
+#include <linux/radix-tree.h>
+#include <linux/memremap.h>
 #include <linux/device.h>
 #include <linux/types.h>
+#include <linux/pfn_t.h>
 #include <linux/io.h>
 #include <linux/mm.h>
 #include <linux/memory_hotplug.h>
@@ -147,23 +150,64 @@ void devm_memunmap(struct device *dev, void *addr)
 }
 EXPORT_SYMBOL(devm_memunmap);
 
+pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags)
+{
+       return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
+}
+EXPORT_SYMBOL(phys_to_pfn_t);
+
 #ifdef CONFIG_ZONE_DEVICE
+static DEFINE_MUTEX(pgmap_lock);
+static RADIX_TREE(pgmap_radix, GFP_KERNEL);
+#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
+#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
+
 struct page_map {
        struct resource res;
+       struct percpu_ref *ref;
+       struct dev_pagemap pgmap;
 };
 
-static void devm_memremap_pages_release(struct device *dev, void *res)
+static void pgmap_radix_release(struct resource *res)
+{
+       resource_size_t key;
+
+       mutex_lock(&pgmap_lock);
+       for (key = res->start; key <= res->end; key += SECTION_SIZE)
+               radix_tree_delete(&pgmap_radix, key >> PA_SECTION_SHIFT);
+       mutex_unlock(&pgmap_lock);
+}
+
+static void devm_memremap_pages_release(struct device *dev, void *data)
 {
-       struct page_map *page_map = res;
+       struct page_map *page_map = data;
+       struct resource *res = &page_map->res;
+       resource_size_t align_start, align_size;
+
+       pgmap_radix_release(res);
 
        /* pages are dead and unused, undo the arch mapping */
-       arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
+       align_start = res->start & ~(SECTION_SIZE - 1);
+       align_size = ALIGN(resource_size(res), SECTION_SIZE);
+       arch_remove_memory(align_start, align_size);
+}
+
+/* assumes rcu_read_lock() held at entry */
+struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
+{
+       struct page_map *page_map;
+
+       WARN_ON_ONCE(!rcu_read_lock_held());
+
+       page_map = radix_tree_lookup(&pgmap_radix, phys >> PA_SECTION_SHIFT);
+       return page_map ? &page_map->pgmap : NULL;
 }
 
 void *devm_memremap_pages(struct device *dev, struct resource *res)
 {
        int is_ram = region_intersects(res->start, resource_size(res),
                        "System RAM");
+       resource_size_t key, align_start, align_size;
        struct page_map *page_map;
        int error, nid;
 
@@ -183,18 +227,50 @@ void *devm_memremap_pages(struct device *dev, struct resource *res)
 
        memcpy(&page_map->res, res, sizeof(*res));
 
+       page_map->pgmap.dev = dev;
+       mutex_lock(&pgmap_lock);
+       error = 0;
+       for (key = res->start; key <= res->end; key += SECTION_SIZE) {
+               struct dev_pagemap *dup;
+
+               rcu_read_lock();
+               dup = find_dev_pagemap(key);
+               rcu_read_unlock();
+               if (dup) {
+                       dev_err(dev, "%s: %pr collides with mapping for %s\n",
+                                       __func__, res, dev_name(dup->dev));
+                       error = -EBUSY;
+                       break;
+               }
+               error = radix_tree_insert(&pgmap_radix, key >> PA_SECTION_SHIFT,
+                               page_map);
+               if (error) {
+                       dev_err(dev, "%s: failed: %d\n", __func__, error);
+                       break;
+               }
+       }
+       mutex_unlock(&pgmap_lock);
+       if (error)
+               goto err_radix;
+
        nid = dev_to_node(dev);
        if (nid < 0)
                nid = numa_mem_id();
 
-       error = arch_add_memory(nid, res->start, resource_size(res), true);
-       if (error) {
-               devres_free(page_map);
-               return ERR_PTR(error);
-       }
+       align_start = res->start & ~(SECTION_SIZE - 1);
+       align_size = ALIGN(resource_size(res), SECTION_SIZE);
+       error = arch_add_memory(nid, align_start, align_size, true);
+       if (error)
+               goto err_add_memory;
 
        devres_add(dev, page_map);
        return __va(res->start);
+
+ err_add_memory:
+ err_radix:
+       pgmap_radix_release(res);
+       devres_free(page_map);
+       return ERR_PTR(error);
 }
 EXPORT_SYMBOL(devm_memremap_pages);
 #endif /* CONFIG_ZONE_DEVICE */