[S390] revert add_active_range() usage patch.

Commit 7676bef9c183fd573822cac9992927ef596d584c breaks DCSS support on
s390. DCSS needs initialized struct pages to work. With the usage of
add_active_range() only the struct pages for physically present pages
are initialized.
This could be fixed if the DCSS driver would initiliaze the struct pages
itself, but this doesn't work too. This is because the mem_map array
does not include holes after the last present memory area and therefore
there is nothing that could be initialized.
To fix this and to avoid some dirty hacks revert this patch for now.
Will be added later when we move to a virtual mem_map.

Cc: Carsten Otte <cotte@de.ibm.com>
Cc: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by Heiko Carstens and committed by Martin Schwidefsky bcc8bcb1 d1ed6a3e

+63 -26
-3
arch/s390/Kconfig
··· 236 236 This allows you to specify the maximum frame size a function may 237 237 have without the compiler complaining about it. 238 238 239 - config ARCH_POPULATES_NODE_MAP 240 - def_bool y 241 - 242 239 source "mm/Kconfig" 243 240 244 241 comment "I/O subsystem configuration"
-1
arch/s390/defconfig
··· 119 119 CONFIG_CHECK_STACK=y 120 120 CONFIG_STACK_GUARD=256 121 121 # CONFIG_WARN_STACK is not set 122 - CONFIG_ARCH_POPULATES_NODE_MAP=y 123 122 CONFIG_SELECT_MEMORY_MODEL=y 124 123 CONFIG_FLATMEM_MANUAL=y 125 124 # CONFIG_DISCONTIGMEM_MANUAL is not set
+41 -12
arch/s390/kernel/setup.c
··· 70 70 #define CHUNK_READ_WRITE 0 71 71 #define CHUNK_READ_ONLY 1 72 72 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 73 + unsigned long __initdata zholes_size[MAX_NR_ZONES]; 73 74 static unsigned long __initdata memory_end; 74 75 75 76 /* ··· 358 357 */ 359 358 void (*pm_power_off)(void) = machine_power_off; 360 359 360 + static void __init 361 + add_memory_hole(unsigned long start, unsigned long end) 362 + { 363 + unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; 364 + 365 + if (end <= dma_pfn) 366 + zholes_size[ZONE_DMA] += end - start + 1; 367 + else if (start > dma_pfn) 368 + zholes_size[ZONE_NORMAL] += end - start + 1; 369 + else { 370 + zholes_size[ZONE_DMA] += dma_pfn - start + 1; 371 + zholes_size[ZONE_NORMAL] += end - dma_pfn; 372 + } 373 + } 374 + 361 375 static int __init early_parse_mem(char *p) 362 376 { 363 377 memory_end = memparse(p, &p); ··· 494 478 { 495 479 unsigned long bootmap_size; 496 480 unsigned long start_pfn, end_pfn, init_pfn; 481 + unsigned long last_rw_end; 497 482 int i; 498 483 499 484 /* ··· 550 533 /* 551 534 * Register RAM areas with the bootmem allocator. 552 535 */ 536 + last_rw_end = start_pfn; 553 537 554 538 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 555 - unsigned long start_chunk, end_chunk, pfn; 539 + unsigned long start_chunk, end_chunk; 556 540 557 541 if (memory_chunk[i].type != CHUNK_READ_WRITE) 558 542 continue; 559 - start_chunk = PFN_DOWN(memory_chunk[i].addr); 560 - end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1; 561 - end_chunk = min(end_chunk, end_pfn); 562 - if (start_chunk >= end_chunk) 563 - continue; 564 - add_active_range(0, start_chunk, end_chunk); 565 - pfn = max(start_chunk, start_pfn); 566 - for (; pfn <= end_chunk; pfn++) 567 - page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); 543 + start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); 544 + start_chunk >>= PAGE_SHIFT; 545 + end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); 546 + end_chunk >>= PAGE_SHIFT; 547 + if (start_chunk < start_pfn) 548 + start_chunk = start_pfn; 549 + if (end_chunk > end_pfn) 550 + end_chunk = end_pfn; 551 + if (start_chunk < end_chunk) { 552 + /* Initialize storage key for RAM pages */ 553 + for (init_pfn = start_chunk ; init_pfn < end_chunk; 554 + init_pfn++) 555 + page_set_storage_key(init_pfn << PAGE_SHIFT, 556 + PAGE_DEFAULT_KEY); 557 + free_bootmem(start_chunk << PAGE_SHIFT, 558 + (end_chunk - start_chunk) << PAGE_SHIFT); 559 + if (last_rw_end < start_chunk) 560 + add_memory_hole(last_rw_end, start_chunk - 1); 561 + last_rw_end = end_chunk; 562 + } 568 563 } 569 564 570 565 psw_set_key(PAGE_DEFAULT_KEY); 571 566 572 - free_bootmem_with_active_regions(0, max_pfn); 573 - reserve_bootmem(0, PFN_PHYS(start_pfn)); 567 + if (last_rw_end < end_pfn - 1) 568 + add_memory_hole(last_rw_end, end_pfn - 1); 574 569 575 570 /* 576 571 * Reserve the bootmem bitmap itself as well. We do this in two
+22 -10
arch/s390/mm/init.c
··· 84 84 printk("%d pages swap cached\n",cached); 85 85 } 86 86 87 + extern unsigned long __initdata zholes_size[]; 87 88 /* 88 89 * paging_init() sets up the page tables 89 90 */ ··· 101 100 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 102 101 static const int ssm_mask = 0x04000000L; 103 102 unsigned long ro_start_pfn, ro_end_pfn; 104 - unsigned long max_zone_pfns[MAX_NR_ZONES]; 103 + unsigned long zones_size[MAX_NR_ZONES]; 105 104 106 105 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 107 106 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 108 107 109 - memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 110 - max_zone_pfns[ZONE_DMA] = max_low_pfn; 111 - max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 112 - free_area_init_nodes(max_zone_pfns); 108 + memset(zones_size, 0, sizeof(zones_size)); 109 + zones_size[ZONE_DMA] = max_low_pfn; 110 + free_area_init_node(0, &contig_page_data, zones_size, 111 + __pa(PAGE_OFFSET) >> PAGE_SHIFT, 112 + zholes_size); 113 113 114 114 /* unmap whole virtual address space */ 115 115 ··· 170 168 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | 171 169 _KERN_REGION_TABLE; 172 170 static const int ssm_mask = 0x04000000L; 171 + unsigned long zones_size[MAX_NR_ZONES]; 172 + unsigned long dma_pfn, high_pfn; 173 173 unsigned long ro_start_pfn, ro_end_pfn; 174 - unsigned long max_zone_pfns[MAX_NR_ZONES]; 175 174 175 + memset(zones_size, 0, sizeof(zones_size)); 176 + dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; 177 + high_pfn = max_low_pfn; 176 178 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 177 179 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 178 180 179 - memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 180 - max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); 181 - max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 182 - free_area_init_nodes(max_zone_pfns); 181 + if (dma_pfn > high_pfn) 182 + zones_size[ZONE_DMA] = high_pfn; 183 + else { 184 + zones_size[ZONE_DMA] = dma_pfn; 185 + zones_size[ZONE_NORMAL] = high_pfn - dma_pfn; 186 + } 187 + 188 + /* Initialize mem_map[]. */ 189 + free_area_init_node(0, &contig_page_data, zones_size, 190 + __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); 183 191 184 192 /* 185 193 * map whole physical memory to virtual memory (identity mapping)