memblock, bootmem: Round pfn properly for memory and reserved regions

We need to round memory regions correctly -- specifically, we need to
round reserved region in the more expansive direction (lower limit
down, upper limit up) whereas usable memory regions need to be rounded
in the more restrictive direction (lower limit up, upper limit down).

This introduces two set of inlines:

memblock_region_memory_base_pfn()
memblock_region_memory_end_pfn()
memblock_region_reserved_base_pfn()
memblock_region_reserved_end_pfn()

Although they are antisymmetric (and therefore are technically
duplicates) the use of the different inlines explicitly documents the
programmer's intention.

The lack of proper rounding caused a bug on ARM, which was then found
to also affect other architectures.

Reported-by: Russell King <rmk@arm.linux.org.uk>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <4CB4CDFD.4020105@kernel.org>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>

authored by Yinghai Lu and committed by H. Peter Anvin c7fc2de0 8e4029ee

+29 -30
+4 -4
arch/arm/mm/init.c
··· 182 * Reserve the memblock reserved regions in bootmem. 183 */ 184 for_each_memblock(reserved, reg) { 185 - phys_addr_t start = memblock_region_base_pfn(reg); 186 - phys_addr_t end = memblock_region_end_pfn(reg); 187 if (start >= start_pfn && end <= end_pfn) 188 reserve_bootmem_node(pgdat, __pfn_to_phys(start), 189 (end - start) << PAGE_SHIFT, ··· 251 struct memblock_region *reg; 252 253 for_each_memblock(memory, reg) 254 - memory_present(0, memblock_region_base_pfn(reg), 255 - memblock_region_end_pfn(reg)); 256 } 257 #endif 258
··· 182 * Reserve the memblock reserved regions in bootmem. 183 */ 184 for_each_memblock(reserved, reg) { 185 + phys_addr_t start = memblock_region_reserved_base_pfn(reg); 186 + phys_addr_t end = memblock_region_reserved_end_pfn(reg); 187 if (start >= start_pfn && end <= end_pfn) 188 reserve_bootmem_node(pgdat, __pfn_to_phys(start), 189 (end - start) << PAGE_SHIFT, ··· 251 struct memblock_region *reg; 252 253 for_each_memblock(memory, reg) 254 + memory_present(0, memblock_region_memory_base_pfn(reg), 255 + memblock_region_memory_end_pfn(reg)); 256 } 257 #endif 258
+7 -7
arch/powerpc/mm/mem.c
··· 148 int ret = -1; 149 150 for_each_memblock(memory, reg) { 151 - tstart = max(start_pfn, memblock_region_base_pfn(reg)); 152 - tend = min(end_pfn, memblock_region_end_pfn(reg)); 153 if (tstart >= tend) 154 continue; 155 ret = (*func)(tstart, tend - tstart, arg); ··· 195 /* Add active regions with valid PFNs */ 196 for_each_memblock(memory, reg) { 197 unsigned long start_pfn, end_pfn; 198 - start_pfn = memblock_region_base_pfn(reg); 199 - end_pfn = memblock_region_end_pfn(reg); 200 add_active_range(0, start_pfn, end_pfn); 201 } 202 ··· 236 237 for_each_memblock(memory, reg) { 238 if (prev && 239 - memblock_region_end_pfn(prev) < memblock_region_base_pfn(reg)) 240 - register_nosave_region(memblock_region_end_pfn(prev), 241 - memblock_region_base_pfn(reg)); 242 prev = reg; 243 } 244 return 0;
··· 148 int ret = -1; 149 150 for_each_memblock(memory, reg) { 151 + tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); 152 + tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); 153 if (tstart >= tend) 154 continue; 155 ret = (*func)(tstart, tend - tstart, arg); ··· 195 /* Add active regions with valid PFNs */ 196 for_each_memblock(memory, reg) { 197 unsigned long start_pfn, end_pfn; 198 + start_pfn = memblock_region_memory_base_pfn(reg); 199 + end_pfn = memblock_region_memory_end_pfn(reg); 200 add_active_range(0, start_pfn, end_pfn); 201 } 202 ··· 236 237 for_each_memblock(memory, reg) { 238 if (prev && 239 + memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) 240 + register_nosave_region(memblock_region_memory_end_pfn(prev), 241 + memblock_region_memory_base_pfn(reg)); 242 prev = reg; 243 } 244 return 0;
+2 -2
arch/powerpc/mm/numa.c
··· 811 (top_of_ram - total_ram) >> 20); 812 813 for_each_memblock(memory, reg) { 814 - start_pfn = memblock_region_base_pfn(reg); 815 - end_pfn = memblock_region_end_pfn(reg); 816 817 fake_numa_create_new_node(end_pfn, &nid); 818 add_active_range(nid, start_pfn, end_pfn);
··· 811 (top_of_ram - total_ram) >> 20); 812 813 for_each_memblock(memory, reg) { 814 + start_pfn = memblock_region_memory_base_pfn(reg); 815 + end_pfn = memblock_region_memory_end_pfn(reg); 816 817 fake_numa_create_new_node(end_pfn, &nid); 818 add_active_range(nid, start_pfn, end_pfn);
+2 -2
arch/sh/mm/init.c
··· 244 /* Add active regions with valid PFNs. */ 245 for_each_memblock(memory, reg) { 246 unsigned long start_pfn, end_pfn; 247 - start_pfn = memblock_region_base_pfn(reg); 248 - end_pfn = memblock_region_end_pfn(reg); 249 __add_active_range(0, start_pfn, end_pfn); 250 } 251
··· 244 /* Add active regions with valid PFNs. */ 245 for_each_memblock(memory, reg) { 246 unsigned long start_pfn, end_pfn; 247 + start_pfn = memblock_region_memory_base_pfn(reg); 248 + end_pfn = memblock_region_memory_end_pfn(reg); 249 __add_active_range(0, start_pfn, end_pfn); 250 } 251
+2 -2
arch/sparc/mm/init_64.c
··· 1294 if (!reg->size) 1295 continue; 1296 1297 - start_pfn = memblock_region_base_pfn(reg); 1298 - end_pfn = memblock_region_end_pfn(reg); 1299 add_active_range(0, start_pfn, end_pfn); 1300 } 1301
··· 1294 if (!reg->size) 1295 continue; 1296 1297 + start_pfn = memblock_region_memory_base_pfn(reg); 1298 + end_pfn = memblock_region_memory_end_pfn(reg); 1299 add_active_range(0, start_pfn, end_pfn); 1300 } 1301
+12 -13
include/linux/memblock.h
··· 111 */ 112 113 /** 114 - * memblock_region_base_pfn - Return the lowest pfn intersecting with the region 115 * @reg: memblock_region structure 116 */ 117 - static inline unsigned long memblock_region_base_pfn(const struct memblock_region *reg) 118 { 119 - return reg->base >> PAGE_SHIFT; 120 } 121 122 /** 123 - * memblock_region_last_pfn - Return the highest pfn intersecting with the region 124 * @reg: memblock_region structure 125 */ 126 - static inline unsigned long memblock_region_last_pfn(const struct memblock_region *reg) 127 { 128 - return (reg->base + reg->size - 1) >> PAGE_SHIFT; 129 } 130 131 /** 132 - * memblock_region_end_pfn - Return the pfn of the first page following the region 133 - * but not intersecting it 134 * @reg: memblock_region structure 135 */ 136 - static inline unsigned long memblock_region_end_pfn(const struct memblock_region *reg) 137 { 138 - return memblock_region_last_pfn(reg) + 1; 139 } 140 141 /** 142 - * memblock_region_pages - Return the number of pages covering a region 143 * @reg: memblock_region structure 144 */ 145 - static inline unsigned long memblock_region_pages(const struct memblock_region *reg) 146 { 147 - return memblock_region_end_pfn(reg) - memblock_region_end_pfn(reg); 148 } 149 150 #define for_each_memblock(memblock_type, region) \
··· 111 */ 112 113 /** 114 + * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region 115 * @reg: memblock_region structure 116 */ 117 + static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) 118 { 119 + return PFN_UP(reg->base); 120 } 121 122 /** 123 + * memblock_region_memory_end_pfn - Return the end_pfn this region 124 * @reg: memblock_region structure 125 */ 126 + static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) 127 { 128 + return PFN_DOWN(reg->base + reg->size); 129 } 130 131 /** 132 + * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region 133 * @reg: memblock_region structure 134 */ 135 + static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) 136 { 137 + return PFN_DOWN(reg->base); 138 } 139 140 /** 141 + * memblock_region_reserved_end_pfn - Return the end_pfn this region 142 * @reg: memblock_region structure 143 */ 144 + static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) 145 { 146 + return PFN_UP(reg->base + reg->size); 147 } 148 149 #define for_each_memblock(memblock_type, region) \