[S390] Have s390 use add_active_range() and free_area_init_nodes.

Size zones and holes in an architecture independent manner for s390.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>

authored by Heiko Carstens and committed by Martin Schwidefsky 7676bef9 cb601d41

+27 -67
+3
arch/s390/Kconfig
··· 233 233 This allows you to specify the maximum frame size a function may 234 234 have without the compiler complaining about it. 235 235 236 + config ARCH_POPULATES_NODE_MAP 237 + def_bool y 238 + 236 239 source "mm/Kconfig" 237 240 238 241 comment "I/O subsystem configuration"
+1
arch/s390/defconfig
··· 118 118 CONFIG_CHECK_STACK=y 119 119 CONFIG_STACK_GUARD=256 120 120 # CONFIG_WARN_STACK is not set 121 + CONFIG_ARCH_POPULATES_NODE_MAP=y 121 122 CONFIG_SELECT_MEMORY_MODEL=y 122 123 CONFIG_FLATMEM_MANUAL=y 123 124 # CONFIG_DISCONTIGMEM_MANUAL is not set
+13 -42
arch/s390/kernel/setup.c
··· 70 70 #define CHUNK_READ_WRITE 0 71 71 #define CHUNK_READ_ONLY 1 72 72 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 73 - unsigned long __initdata zholes_size[MAX_NR_ZONES]; 74 73 static unsigned long __initdata memory_end; 75 74 76 75 /* ··· 357 358 */ 358 359 void (*pm_power_off)(void) = machine_power_off; 359 360 360 - static void __init 361 - add_memory_hole(unsigned long start, unsigned long end) 362 - { 363 - unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; 364 - 365 - if (end <= dma_pfn) 366 - zholes_size[ZONE_DMA] += end - start + 1; 367 - else if (start > dma_pfn) 368 - zholes_size[ZONE_NORMAL] += end - start + 1; 369 - else { 370 - zholes_size[ZONE_DMA] += dma_pfn - start + 1; 371 - zholes_size[ZONE_NORMAL] += end - dma_pfn; 372 - } 373 - } 374 - 375 361 static int __init early_parse_mem(char *p) 376 362 { 377 363 memory_end = memparse(p, &p); ··· 478 494 { 479 495 unsigned long bootmap_size; 480 496 unsigned long start_pfn, end_pfn, init_pfn; 481 - unsigned long last_rw_end; 482 497 int i; 483 498 484 499 /* ··· 526 543 #endif 527 544 528 545 /* 529 - * Initialize the boot-time allocator (with low memory only): 546 + * Initialize the boot-time allocator 530 547 */ 531 548 bootmap_size = init_bootmem(start_pfn, end_pfn); 532 549 533 550 /* 534 551 * Register RAM areas with the bootmem allocator. 535 552 */ 536 - last_rw_end = start_pfn; 537 553 538 554 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 539 - unsigned long start_chunk, end_chunk; 555 + unsigned long start_chunk, end_chunk, pfn; 540 556 541 557 if (memory_chunk[i].type != CHUNK_READ_WRITE) 542 558 continue; 543 - start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); 544 - start_chunk >>= PAGE_SHIFT; 545 - end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); 546 - end_chunk >>= PAGE_SHIFT; 547 - if (start_chunk < start_pfn) 548 - start_chunk = start_pfn; 549 - if (end_chunk > end_pfn) 550 - end_chunk = end_pfn; 551 - if (start_chunk < end_chunk) { 552 - /* Initialize storage key for RAM pages */ 553 - for (init_pfn = start_chunk ; init_pfn < end_chunk; 554 - init_pfn++) 555 - page_set_storage_key(init_pfn << PAGE_SHIFT, 556 - PAGE_DEFAULT_KEY); 557 - free_bootmem(start_chunk << PAGE_SHIFT, 558 - (end_chunk - start_chunk) << PAGE_SHIFT); 559 - if (last_rw_end < start_chunk) 560 - add_memory_hole(last_rw_end, start_chunk - 1); 561 - last_rw_end = end_chunk; 562 - } 559 + start_chunk = PFN_DOWN(memory_chunk[i].addr); 560 + end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1; 561 + end_chunk = min(end_chunk, end_pfn); 562 + if (start_chunk >= end_chunk) 563 + continue; 564 + add_active_range(0, start_chunk, end_chunk); 565 + pfn = max(start_chunk, start_pfn); 566 + for (; pfn <= end_chunk; pfn++) 567 + page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); 563 568 } 564 569 565 570 psw_set_key(PAGE_DEFAULT_KEY); 566 571 567 - if (last_rw_end < end_pfn - 1) 568 - add_memory_hole(last_rw_end, end_pfn - 1); 572 + free_bootmem_with_active_regions(0, max_pfn); 573 + reserve_bootmem(0, PFN_PHYS(start_pfn)); 569 574 570 575 /* 571 576 * Reserve the bootmem bitmap itself as well. We do this in two
+10 -25
arch/s390/mm/init.c
··· 82 82 printk("%d pages swap cached\n",cached); 83 83 } 84 84 85 - extern unsigned long __initdata zholes_size[]; 86 85 /* 87 86 * paging_init() sets up the page tables 88 87 */ ··· 98 99 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 99 100 static const int ssm_mask = 0x04000000L; 100 101 unsigned long ro_start_pfn, ro_end_pfn; 101 - unsigned long zones_size[MAX_NR_ZONES]; 102 + unsigned long max_zone_pfns[MAX_NR_ZONES]; 102 103 103 104 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 104 105 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 105 106 106 - memset(zones_size, 0, sizeof(zones_size)); 107 - zones_size[ZONE_DMA] = max_low_pfn; 108 - free_area_init_node(0, &contig_page_data, zones_size, 109 - __pa(PAGE_OFFSET) >> PAGE_SHIFT, 110 - zholes_size); 107 + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 108 + max_zone_pfns[ZONE_DMA] = max_low_pfn; 109 + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 110 + free_area_init_nodes(max_zone_pfns); 111 111 112 112 /* unmap whole virtual address space */ 113 113 ··· 151 153 __raw_local_irq_ssm(ssm_mask); 152 154 153 155 local_flush_tlb(); 154 - return; 155 156 } 156 157 157 158 #else /* CONFIG_64BIT */ ··· 166 169 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | 167 170 _KERN_REGION_TABLE; 168 171 static const int ssm_mask = 0x04000000L; 169 - unsigned long zones_size[MAX_NR_ZONES]; 170 - unsigned long dma_pfn, high_pfn; 171 172 unsigned long ro_start_pfn, ro_end_pfn; 173 + unsigned long max_zone_pfns[MAX_NR_ZONES]; 172 174 173 - memset(zones_size, 0, sizeof(zones_size)); 174 - dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; 175 - high_pfn = max_low_pfn; 176 175 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 177 176 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 178 177 179 - if (dma_pfn > high_pfn) 180 - zones_size[ZONE_DMA] = high_pfn; 181 - else { 182 - zones_size[ZONE_DMA] = dma_pfn; 183 - zones_size[ZONE_NORMAL] = high_pfn - dma_pfn; 184 - } 185 - 186 - /* Initialize mem_map[]. */ 187 - free_area_init_node(0, &contig_page_data, zones_size, 188 - __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); 178 + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 179 + max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); 180 + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 181 + free_area_init_nodes(max_zone_pfns); 189 182 190 183 /* 191 184 * map whole physical memory to virtual memory (identity mapping) ··· 224 237 __raw_local_irq_ssm(ssm_mask); 225 238 226 239 local_flush_tlb(); 227 - 228 - return; 229 240 } 230 241 #endif /* CONFIG_64BIT */ 231 242