xen: find unused contiguous memory area
[cascardo/linux.git] / arch / x86 / xen / setup.c
index ab6c36e..973d294 100644 (file)
@@ -223,7 +223,7 @@ static int __init xen_free_mfn(unsigned long mfn)
  * as a fallback if the remapping fails.
  */
 static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
-       unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
+                       unsigned long end_pfn, unsigned long nr_pages)
 {
        unsigned long pfn, end;
        int ret;
@@ -243,7 +243,7 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
                WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
 
                if (ret == 1) {
-                       (*released)++;
+                       xen_released_pages++;
                        if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
                                break;
                } else
@@ -359,8 +359,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
  */
 static unsigned long __init xen_set_identity_and_remap_chunk(
        unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
-       unsigned long remap_pfn, unsigned long *released,
-       unsigned long *remapped)
+       unsigned long remap_pfn)
 {
        unsigned long pfn;
        unsigned long i = 0;
@@ -385,7 +384,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                if (!remap_range_size) {
                        pr_warning("Unable to find available pfn range, not remapping identity pages\n");
                        xen_set_identity_and_release_chunk(cur_pfn,
-                               cur_pfn + left, nr_pages, released);
+                                               cur_pfn + left, nr_pages);
                        break;
                }
                /* Adjust size to fit in current e820 RAM region */
@@ -397,7 +396,6 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
                /* Update variables to reflect new mappings. */
                i += size;
                remap_pfn += size;
-               *remapped += size;
        }
 
        /*
@@ -412,14 +410,11 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
        return remap_pfn;
 }
 
-static void __init xen_set_identity_and_remap(unsigned long nr_pages,
-                       unsigned long *released, unsigned long *remapped)
+static void __init xen_set_identity_and_remap(unsigned long nr_pages)
 {
        phys_addr_t start = 0;
        unsigned long last_pfn = nr_pages;
        const struct e820entry *entry = xen_e820_map;
-       unsigned long num_released = 0;
-       unsigned long num_remapped = 0;
        int i;
 
        /*
@@ -445,16 +440,12 @@ static void __init xen_set_identity_and_remap(unsigned long nr_pages,
                        if (start_pfn < end_pfn)
                                last_pfn = xen_set_identity_and_remap_chunk(
                                                start_pfn, end_pfn, nr_pages,
-                                               last_pfn, &num_released,
-                                               &num_remapped);
+                                               last_pfn);
                        start = end;
                }
        }
 
-       *released = num_released;
-       *remapped = num_remapped;
-
-       pr_info("Released %ld page(s)\n", num_released);
+       pr_info("Released %ld page(s)\n", xen_released_pages);
 }
 
 /*
@@ -560,6 +551,85 @@ static void __init xen_ignore_unusable(void)
        }
 }
 
+static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
+{
+       unsigned long extra = 0;
+       const struct e820entry *entry = xen_e820_map;
+       int i;
+
+       for (i = 0; i < xen_e820_map_entries; i++, entry++) {
+               unsigned long start_pfn = PFN_DOWN(entry->addr);
+               unsigned long end_pfn = PFN_UP(entry->addr + entry->size);
+
+               if (start_pfn >= max_pfn)
+                       break;
+               if (entry->type == E820_RAM)
+                       continue;
+               if (end_pfn >= max_pfn)
+                       end_pfn = max_pfn;
+               extra += end_pfn - start_pfn;
+       }
+
+       return extra;
+}
+
+bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
+{
+       struct e820entry *entry;
+       unsigned mapcnt;
+       phys_addr_t end;
+
+       if (!size)
+               return false;
+
+       end = start + size;
+       entry = xen_e820_map;
+
+       for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++) {
+               if (entry->type == E820_RAM && entry->addr <= start &&
+                   (entry->addr + entry->size) >= end)
+                       return false;
+
+               entry++;
+       }
+
+       return true;
+}
+
+/*
+ * Find a free area in physical memory not yet reserved and compliant with
+ * E820 map.
+ * Used to relocate pre-allocated areas like initrd or p2m list which are in
+ * conflict with the to be used E820 map.
+ * In case no area is found, return 0. Otherwise return the physical address
+ * of the area which is already reserved for convenience.
+ */
+phys_addr_t __init xen_find_free_area(phys_addr_t size)
+{
+       unsigned mapcnt;
+       phys_addr_t addr, start;
+       struct e820entry *entry = xen_e820_map;
+
+       for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) {
+               if (entry->type != E820_RAM || entry->size < size)
+                       continue;
+               start = entry->addr;
+               for (addr = start; addr < start + size; addr += PAGE_SIZE) {
+                       if (!memblock_is_reserved(addr))
+                               continue;
+                       start = addr + PAGE_SIZE;
+                       if (start + size > entry->addr + entry->size)
+                               break;
+               }
+               if (addr >= start + size) {
+                       memblock_reserve(start, size);
+                       return start;
+               }
+       }
+
+       return 0;
+}
+
 /*
  * Reserve Xen mfn_list.
  * See comment above "struct start_info" in <xen/interface/xen.h>
@@ -601,12 +671,12 @@ static void __init xen_reserve_xen_mfnlist(void)
 char * __init xen_memory_setup(void)
 {
        unsigned long max_pfn = xen_start_info->nr_pages;
-       phys_addr_t mem_end;
+       phys_addr_t mem_end, addr, size, chunk_size;
+       u32 type;
        int rc;
        struct xen_memory_map memmap;
        unsigned long max_pages;
        unsigned long extra_pages = 0;
-       unsigned long remapped_pages;
        int i;
        int op;
 
@@ -653,15 +723,8 @@ char * __init xen_memory_setup(void)
        if (max_pages > max_pfn)
                extra_pages += max_pages - max_pfn;
 
-       /*
-        * Set identity map on non-RAM pages and prepare remapping the
-        * underlying RAM.
-        */
-       xen_set_identity_and_remap(max_pfn, &xen_released_pages,
-                                  &remapped_pages);
-
-       extra_pages += xen_released_pages;
-       extra_pages += remapped_pages;
+       /* How many extra pages do we need due to remapping? */
+       extra_pages += xen_count_remap_pages(max_pfn);
 
        /*
         * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
@@ -677,29 +740,35 @@ char * __init xen_memory_setup(void)
        extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
                          extra_pages);
        i = 0;
+       addr = xen_e820_map[0].addr;
+       size = xen_e820_map[0].size;
        while (i < xen_e820_map_entries) {
-               phys_addr_t addr = xen_e820_map[i].addr;
-               phys_addr_t size = xen_e820_map[i].size;
-               u32 type = xen_e820_map[i].type;
+               chunk_size = size;
+               type = xen_e820_map[i].type;
 
                if (type == E820_RAM) {
                        if (addr < mem_end) {
-                               size = min(size, mem_end - addr);
+                               chunk_size = min(size, mem_end - addr);
                        } else if (extra_pages) {
-                               size = min(size, PFN_PHYS(extra_pages));
-                               extra_pages -= PFN_DOWN(size);
-                               xen_add_extra_mem(addr, size);
-                               xen_max_p2m_pfn = PFN_DOWN(addr + size);
+                               chunk_size = min(size, PFN_PHYS(extra_pages));
+                               extra_pages -= PFN_DOWN(chunk_size);
+                               xen_add_extra_mem(addr, chunk_size);
+                               xen_max_p2m_pfn = PFN_DOWN(addr + chunk_size);
                        } else
                                type = E820_UNUSABLE;
                }
 
-               xen_align_and_add_e820_region(addr, size, type);
+               xen_align_and_add_e820_region(addr, chunk_size, type);
 
-               xen_e820_map[i].addr += size;
-               xen_e820_map[i].size -= size;
-               if (xen_e820_map[i].size == 0)
+               addr += chunk_size;
+               size -= chunk_size;
+               if (size == 0) {
                        i++;
+                       if (i < xen_e820_map_entries) {
+                               addr = xen_e820_map[i].addr;
+                               size = xen_e820_map[i].size;
+                       }
+               }
        }
 
        /*
@@ -709,7 +778,7 @@ char * __init xen_memory_setup(void)
         * PFNs above MAX_P2M_PFN are considered identity mapped as
         * well.
         */
-       set_phys_range_identity(xen_e820_map[i - 1].addr / PAGE_SIZE, ~0ul);
+       set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
 
        /*
         * In domU, the ISA region is normal, usable memory, but we
@@ -723,6 +792,12 @@ char * __init xen_memory_setup(void)
 
        xen_reserve_xen_mfnlist();
 
+       /*
+        * Set identity map on non-RAM pages and prepare remapping the
+        * underlying RAM.
+        */
+       xen_set_identity_and_remap(max_pfn);
+
        return "Xen";
 }