[S390] revert add_active_range() usage patch.

Commit 7676bef9c183fd573822cac9992927ef596d584c breaks DCSS support on
s390. DCSS needs initialized struct pages to work. With the usage of
add_active_range() only the struct pages for physically present pages
are initialized.
This could be fixed if the DCSS driver would initiliaze the struct pages
itself, but this doesn't work too. This is because the mem_map array
does not include holes after the last present memory area and therefore
there is nothing that could be initialized.
To fix this and to avoid some dirty hacks revert this patch for now.
Will be added later when we move to a virtual mem_map.

Cc: Carsten Otte <cotte@de.ibm.com>
Cc: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by Heiko Carstens and committed by Martin Schwidefsky bcc8bcb1 d1ed6a3e

+63 -26
-3
arch/s390/Kconfig
··· 236 This allows you to specify the maximum frame size a function may 237 have without the compiler complaining about it. 238 239 - config ARCH_POPULATES_NODE_MAP 240 - def_bool y 241 - 242 source "mm/Kconfig" 243 244 comment "I/O subsystem configuration"
··· 236 This allows you to specify the maximum frame size a function may 237 have without the compiler complaining about it. 238 239 source "mm/Kconfig" 240 241 comment "I/O subsystem configuration"
-1
arch/s390/defconfig
··· 119 CONFIG_CHECK_STACK=y 120 CONFIG_STACK_GUARD=256 121 # CONFIG_WARN_STACK is not set 122 - CONFIG_ARCH_POPULATES_NODE_MAP=y 123 CONFIG_SELECT_MEMORY_MODEL=y 124 CONFIG_FLATMEM_MANUAL=y 125 # CONFIG_DISCONTIGMEM_MANUAL is not set
··· 119 CONFIG_CHECK_STACK=y 120 CONFIG_STACK_GUARD=256 121 # CONFIG_WARN_STACK is not set 122 CONFIG_SELECT_MEMORY_MODEL=y 123 CONFIG_FLATMEM_MANUAL=y 124 # CONFIG_DISCONTIGMEM_MANUAL is not set
+41 -12
arch/s390/kernel/setup.c
··· 70 #define CHUNK_READ_WRITE 0 71 #define CHUNK_READ_ONLY 1 72 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 73 static unsigned long __initdata memory_end; 74 75 /* ··· 358 */ 359 void (*pm_power_off)(void) = machine_power_off; 360 361 static int __init early_parse_mem(char *p) 362 { 363 memory_end = memparse(p, &p); ··· 494 { 495 unsigned long bootmap_size; 496 unsigned long start_pfn, end_pfn, init_pfn; 497 int i; 498 499 /* ··· 550 /* 551 * Register RAM areas with the bootmem allocator. 552 */ 553 554 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 555 - unsigned long start_chunk, end_chunk, pfn; 556 557 if (memory_chunk[i].type != CHUNK_READ_WRITE) 558 continue; 559 - start_chunk = PFN_DOWN(memory_chunk[i].addr); 560 - end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1; 561 - end_chunk = min(end_chunk, end_pfn); 562 - if (start_chunk >= end_chunk) 563 - continue; 564 - add_active_range(0, start_chunk, end_chunk); 565 - pfn = max(start_chunk, start_pfn); 566 - for (; pfn <= end_chunk; pfn++) 567 - page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); 568 } 569 570 psw_set_key(PAGE_DEFAULT_KEY); 571 572 - free_bootmem_with_active_regions(0, max_pfn); 573 - reserve_bootmem(0, PFN_PHYS(start_pfn)); 574 575 /* 576 * Reserve the bootmem bitmap itself as well. We do this in two
··· 70 #define CHUNK_READ_WRITE 0 71 #define CHUNK_READ_ONLY 1 72 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 73 + unsigned long __initdata zholes_size[MAX_NR_ZONES]; 74 static unsigned long __initdata memory_end; 75 76 /* ··· 357 */ 358 void (*pm_power_off)(void) = machine_power_off; 359 360 + static void __init 361 + add_memory_hole(unsigned long start, unsigned long end) 362 + { 363 + unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; 364 + 365 + if (end <= dma_pfn) 366 + zholes_size[ZONE_DMA] += end - start + 1; 367 + else if (start > dma_pfn) 368 + zholes_size[ZONE_NORMAL] += end - start + 1; 369 + else { 370 + zholes_size[ZONE_DMA] += dma_pfn - start + 1; 371 + zholes_size[ZONE_NORMAL] += end - dma_pfn; 372 + } 373 + } 374 + 375 static int __init early_parse_mem(char *p) 376 { 377 memory_end = memparse(p, &p); ··· 478 { 479 unsigned long bootmap_size; 480 unsigned long start_pfn, end_pfn, init_pfn; 481 + unsigned long last_rw_end; 482 int i; 483 484 /* ··· 533 /* 534 * Register RAM areas with the bootmem allocator. 535 */ 536 + last_rw_end = start_pfn; 537 538 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 539 + unsigned long start_chunk, end_chunk; 540 541 if (memory_chunk[i].type != CHUNK_READ_WRITE) 542 continue; 543 + start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); 544 + start_chunk >>= PAGE_SHIFT; 545 + end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); 546 + end_chunk >>= PAGE_SHIFT; 547 + if (start_chunk < start_pfn) 548 + start_chunk = start_pfn; 549 + if (end_chunk > end_pfn) 550 + end_chunk = end_pfn; 551 + if (start_chunk < end_chunk) { 552 + /* Initialize storage key for RAM pages */ 553 + for (init_pfn = start_chunk ; init_pfn < end_chunk; 554 + init_pfn++) 555 + page_set_storage_key(init_pfn << PAGE_SHIFT, 556 + PAGE_DEFAULT_KEY); 557 + free_bootmem(start_chunk << PAGE_SHIFT, 558 + (end_chunk - start_chunk) << PAGE_SHIFT); 559 + if (last_rw_end < start_chunk) 560 + add_memory_hole(last_rw_end, start_chunk - 1); 561 + last_rw_end = end_chunk; 562 + } 563 } 564 565 psw_set_key(PAGE_DEFAULT_KEY); 566 567 + if (last_rw_end < end_pfn - 1) 568 + add_memory_hole(last_rw_end, end_pfn - 1); 569 570 /* 571 * Reserve the bootmem bitmap itself as well. We do this in two
+22 -10
arch/s390/mm/init.c
··· 84 printk("%d pages swap cached\n",cached); 85 } 86 87 /* 88 * paging_init() sets up the page tables 89 */ ··· 101 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 102 static const int ssm_mask = 0x04000000L; 103 unsigned long ro_start_pfn, ro_end_pfn; 104 - unsigned long max_zone_pfns[MAX_NR_ZONES]; 105 106 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 107 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 108 109 - memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 110 - max_zone_pfns[ZONE_DMA] = max_low_pfn; 111 - max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 112 - free_area_init_nodes(max_zone_pfns); 113 114 /* unmap whole virtual address space */ 115 ··· 170 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | 171 _KERN_REGION_TABLE; 172 static const int ssm_mask = 0x04000000L; 173 unsigned long ro_start_pfn, ro_end_pfn; 174 - unsigned long max_zone_pfns[MAX_NR_ZONES]; 175 176 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 177 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 178 179 - memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 180 - max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); 181 - max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 182 - free_area_init_nodes(max_zone_pfns); 183 184 /* 185 * map whole physical memory to virtual memory (identity mapping)
··· 84 printk("%d pages swap cached\n",cached); 85 } 86 87 + extern unsigned long __initdata zholes_size[]; 88 /* 89 * paging_init() sets up the page tables 90 */ ··· 100 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 101 static const int ssm_mask = 0x04000000L; 102 unsigned long ro_start_pfn, ro_end_pfn; 103 + unsigned long zones_size[MAX_NR_ZONES]; 104 105 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 106 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 107 108 + memset(zones_size, 0, sizeof(zones_size)); 109 + zones_size[ZONE_DMA] = max_low_pfn; 110 + free_area_init_node(0, &contig_page_data, zones_size, 111 + __pa(PAGE_OFFSET) >> PAGE_SHIFT, 112 + zholes_size); 113 114 /* unmap whole virtual address space */ 115 ··· 168 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | 169 _KERN_REGION_TABLE; 170 static const int ssm_mask = 0x04000000L; 171 + unsigned long zones_size[MAX_NR_ZONES]; 172 + unsigned long dma_pfn, high_pfn; 173 unsigned long ro_start_pfn, ro_end_pfn; 174 175 + memset(zones_size, 0, sizeof(zones_size)); 176 + dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; 177 + high_pfn = max_low_pfn; 178 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 179 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 180 181 + if (dma_pfn > high_pfn) 182 + zones_size[ZONE_DMA] = high_pfn; 183 + else { 184 + zones_size[ZONE_DMA] = dma_pfn; 185 + zones_size[ZONE_NORMAL] = high_pfn - dma_pfn; 186 + } 187 + 188 + /* Initialize mem_map[]. */ 189 + free_area_init_node(0, &contig_page_data, zones_size, 190 + __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); 191 192 /* 193 * map whole physical memory to virtual memory (identity mapping)