Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/mm: make arch_add_memory() NUMA aware

With NUMA support for s390, arch_add_memory() needs to respect the nid
parameter.

Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Gerald Schaefer and committed by
Martin Schwidefsky
199071f1 ecf46abd

+19 -19
+19 -19
arch/s390/mm/init.c
··· 27 27 #include <linux/initrd.h> 28 28 #include <linux/export.h> 29 29 #include <linux/gfp.h> 30 + #include <linux/memblock.h> 30 31 #include <asm/processor.h> 31 32 #include <asm/uaccess.h> 32 33 #include <asm/pgtable.h> ··· 171 170 #ifdef CONFIG_MEMORY_HOTPLUG 172 171 int arch_add_memory(int nid, u64 start, u64 size) 173 172 { 174 - unsigned long zone_start_pfn, zone_end_pfn, nr_pages; 173 + unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM()); 174 + unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS); 175 175 unsigned long start_pfn = PFN_DOWN(start); 176 176 unsigned long size_pages = PFN_DOWN(size); 177 - struct zone *zone; 178 - int rc; 177 + unsigned long nr_pages; 178 + int rc, zone_enum; 179 179 180 180 rc = vmem_add_mapping(start, size); 181 181 if (rc) 182 182 return rc; 183 - for_each_zone(zone) { 184 - if (zone_idx(zone) != ZONE_MOVABLE) { 185 - /* Add range within existing zone limits */ 186 - zone_start_pfn = zone->zone_start_pfn; 187 - zone_end_pfn = zone->zone_start_pfn + 188 - zone->spanned_pages; 183 + 184 + while (size_pages > 0) { 185 + if (start_pfn < dma_end_pfn) { 186 + nr_pages = (start_pfn + size_pages > dma_end_pfn) ? 187 + dma_end_pfn - start_pfn : size_pages; 188 + zone_enum = ZONE_DMA; 189 + } else if (start_pfn < normal_end_pfn) { 190 + nr_pages = (start_pfn + size_pages > normal_end_pfn) ? 191 + normal_end_pfn - start_pfn : size_pages; 192 + zone_enum = ZONE_NORMAL; 189 193 } else { 190 - /* Add remaining range to ZONE_MOVABLE */ 191 - zone_start_pfn = start_pfn; 192 - zone_end_pfn = start_pfn + size_pages; 194 + nr_pages = size_pages; 195 + zone_enum = ZONE_MOVABLE; 193 196 } 194 - if (start_pfn < zone_start_pfn || start_pfn >= zone_end_pfn) 195 - continue; 196 - nr_pages = (start_pfn + size_pages > zone_end_pfn) ? 197 - zone_end_pfn - start_pfn : size_pages; 198 - rc = __add_pages(nid, zone, start_pfn, nr_pages); 197 + rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum, 198 + start_pfn, size_pages); 199 199 if (rc) 200 200 break; 201 201 start_pfn += nr_pages; 202 202 size_pages -= nr_pages; 203 - if (!size_pages) 204 - break; 205 203 } 206 204 if (rc) 207 205 vmem_remove_mapping(start, size);