s390/mm: make arch_add_memory() NUMA aware
authorGerald Schaefer <gerald.schaefer@de.ibm.com>
Fri, 8 May 2015 15:40:43 +0000 (17:40 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Mon, 3 Aug 2015 08:06:20 +0000 (10:06 +0200)
With NUMA support for s390, arch_add_memory() needs to respect the nid
parameter.

Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/mm/init.c

index 76e8737..dc4db08 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/initrd.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
+#include <linux/memblock.h>
 #include <asm/processor.h>
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -170,37 +171,36 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
 #ifdef CONFIG_MEMORY_HOTPLUG
 int arch_add_memory(int nid, u64 start, u64 size)
 {
-       unsigned long zone_start_pfn, zone_end_pfn, nr_pages;
+       unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
+       unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
        unsigned long start_pfn = PFN_DOWN(start);
        unsigned long size_pages = PFN_DOWN(size);
-       struct zone *zone;
-       int rc;
+       unsigned long nr_pages;
+       int rc, zone_enum;
 
        rc = vmem_add_mapping(start, size);
        if (rc)
                return rc;
-       for_each_zone(zone) {
-               if (zone_idx(zone) != ZONE_MOVABLE) {
-                       /* Add range within existing zone limits */
-                       zone_start_pfn = zone->zone_start_pfn;
-                       zone_end_pfn = zone->zone_start_pfn +
-                                      zone->spanned_pages;
+
+       while (size_pages > 0) {
+               if (start_pfn < dma_end_pfn) {
+                       nr_pages = (start_pfn + size_pages > dma_end_pfn) ?
+                                  dma_end_pfn - start_pfn : size_pages;
+                       zone_enum = ZONE_DMA;
+               } else if (start_pfn < normal_end_pfn) {
+                       nr_pages = (start_pfn + size_pages > normal_end_pfn) ?
+                                  normal_end_pfn - start_pfn : size_pages;
+                       zone_enum = ZONE_NORMAL;
                } else {
-                       /* Add remaining range to ZONE_MOVABLE */
-                       zone_start_pfn = start_pfn;
-                       zone_end_pfn = start_pfn + size_pages;
+                       nr_pages = size_pages;
+                       zone_enum = ZONE_MOVABLE;
                }
-               if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn)
-                       continue;
-               nr_pages = (start_pfn + size_pages > zone_end_pfn) ?
-                          zone_end_pfn - start_pfn : size_pages;
-               rc = __add_pages(nid, zone, start_pfn, nr_pages);
+               rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,
+                                start_pfn, size_pages);
                if (rc)
                        break;
                start_pfn += nr_pages;
                size_pages -= nr_pages;
-               if (!size_pages)
-                       break;
        }
        if (rc)
                vmem_remove_mapping(start, size);