xtensa: fix high memory/reserved memory collision

Xtensa memory initialization code frees high memory pages without
checking whether they are in the reserved memory regions or not. That
results in invalid value of totalram_pages and duplicate page usage by
CMA and highmem. It produces a bunch of BUGs at startup looking like
this:

BUG: Bad page state in process swapper pfn:70800
page:be60c000 count:0 mapcount:-127 mapping: (null) index:0x1
flags: 0x80000000()
raw: 80000000 00000000 00000001 ffffff80 00000000 be60c014 be60c014 0000000a
page dumped because: nonzero mapcount
Modules linked in:
CPU: 0 PID: 1 Comm: swapper Tainted: G B 4.16.0-rc1-00015-g7928b2cbe55b-dirty #23
Stack:
bd839d33 00000000 00000018 ba97b64c a106578c bd839d70 be60c000 00000000
a1378054 bd86a000 00000003 ba97b64c a1066166 bd839da0 be60c000 ffe00000
a1066b58 bd839dc0 be504000 00000000 000002f4 bd838000 00000000 0000001e
Call Trace:
[<a1065734>] bad_page+0xac/0xd0
[<a106578c>] free_pages_check_bad+0x34/0x4c
[<a1066166>] __free_pages_ok+0xae/0x14c
[<a1066b58>] __free_pages+0x30/0x64
[<a1365de5>] init_cma_reserved_pageblock+0x35/0x44
[<a13682dc>] cma_init_reserved_areas+0xf4/0x148
[<a10034b8>] do_one_initcall+0x80/0xf8
[<a1361c16>] kernel_init_freeable+0xda/0x13c
[<a125b59d>] kernel_init+0x9/0xd0
[<a1004304>] ret_from_kernel_thread+0xc/0x18

Only free high memory pages that are not reserved.

Cc: stable@vger.kernel.org
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>

Changed files
+63 -7
arch
xtensa
mm
+63 -7
arch/xtensa/mm/init.c
··· 79 79 free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); 80 80 } 81 81 82 + #ifdef CONFIG_HIGHMEM 83 + static void __init free_area_high(unsigned long pfn, unsigned long end) 84 + { 85 + for (; pfn < end; pfn++) 86 + free_highmem_page(pfn_to_page(pfn)); 87 + } 88 + 89 + static void __init free_highpages(void) 90 + { 91 + unsigned long max_low = max_low_pfn; 92 + struct memblock_region *mem, *res; 93 + 94 + reset_all_zones_managed_pages(); 95 + /* set highmem page free */ 96 + for_each_memblock(memory, mem) { 97 + unsigned long start = memblock_region_memory_base_pfn(mem); 98 + unsigned long end = memblock_region_memory_end_pfn(mem); 99 + 100 + /* Ignore complete lowmem entries */ 101 + if (end <= max_low) 102 + continue; 103 + 104 + if (memblock_is_nomap(mem)) 105 + continue; 106 + 107 + /* Truncate partial highmem entries */ 108 + if (start < max_low) 109 + start = max_low; 110 + 111 + /* Find and exclude any reserved regions */ 112 + for_each_memblock(reserved, res) { 113 + unsigned long res_start, res_end; 114 + 115 + res_start = memblock_region_reserved_base_pfn(res); 116 + res_end = memblock_region_reserved_end_pfn(res); 117 + 118 + if (res_end < start) 119 + continue; 120 + if (res_start < start) 121 + res_start = start; 122 + if (res_start > end) 123 + res_start = end; 124 + if (res_end > end) 125 + res_end = end; 126 + if (res_start != start) 127 + free_area_high(start, res_start); 128 + start = res_end; 129 + if (start == end) 130 + break; 131 + } 132 + 133 + /* And now free anything which remains */ 134 + if (start < end) 135 + free_area_high(start, end); 136 + } 137 + } 138 + #else 139 + static void __init free_highpages(void) 140 + { 141 + } 142 + #endif 143 + 82 144 /* 83 145 * Initialize memory pages. 84 146 */ 85 147 86 148 void __init mem_init(void) 87 149 { 88 - #ifdef CONFIG_HIGHMEM 89 - unsigned long tmp; 90 - 91 - reset_all_zones_managed_pages(); 92 - for (tmp = max_low_pfn; tmp < max_pfn; tmp++) 93 - free_highmem_page(pfn_to_page(tmp)); 94 - #endif 150 + free_highpages(); 95 151 96 152 max_mapnr = max_pfn - ARCH_PFN_OFFSET; 97 153 high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);