memblock, bootmem: Round pfn properly for memory and reserved regions

We need to round memory regions correctly -- specifically, we need to
round reserved region in the more expansive direction (lower limit
down, upper limit up) whereas usable memory regions need to be rounded
in the more restrictive direction (lower limit up, upper limit down).

This introduces two set of inlines:

memblock_region_memory_base_pfn()
memblock_region_memory_end_pfn()
memblock_region_reserved_base_pfn()
memblock_region_reserved_end_pfn()

Although they are antisymmetric (and therefore are technically
duplicates) the use of the different inlines explicitly documents the
programmer's intention.

The lack of proper rounding caused a bug on ARM, which was then found
to also affect other architectures.

Reported-by: Russell King <rmk@arm.linux.org.uk>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <4CB4CDFD.4020105@kernel.org>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>

authored by Yinghai Lu and committed by H. Peter Anvin c7fc2de0 8e4029ee

+29 -30
+4 -4
arch/arm/mm/init.c
··· 182 182 * Reserve the memblock reserved regions in bootmem. 183 183 */ 184 184 for_each_memblock(reserved, reg) { 185 - phys_addr_t start = memblock_region_base_pfn(reg); 186 - phys_addr_t end = memblock_region_end_pfn(reg); 185 + phys_addr_t start = memblock_region_reserved_base_pfn(reg); 186 + phys_addr_t end = memblock_region_reserved_end_pfn(reg); 187 187 if (start >= start_pfn && end <= end_pfn) 188 188 reserve_bootmem_node(pgdat, __pfn_to_phys(start), 189 189 (end - start) << PAGE_SHIFT, ··· 251 251 struct memblock_region *reg; 252 252 253 253 for_each_memblock(memory, reg) 254 - memory_present(0, memblock_region_base_pfn(reg), 255 - memblock_region_end_pfn(reg)); 254 + memory_present(0, memblock_region_memory_base_pfn(reg), 255 + memblock_region_memory_end_pfn(reg)); 256 256 } 257 257 #endif 258 258
+7 -7
arch/powerpc/mm/mem.c
··· 148 148 int ret = -1; 149 149 150 150 for_each_memblock(memory, reg) { 151 - tstart = max(start_pfn, memblock_region_base_pfn(reg)); 152 - tend = min(end_pfn, memblock_region_end_pfn(reg)); 151 + tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); 152 + tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); 153 153 if (tstart >= tend) 154 154 continue; 155 155 ret = (*func)(tstart, tend - tstart, arg); ··· 195 195 /* Add active regions with valid PFNs */ 196 196 for_each_memblock(memory, reg) { 197 197 unsigned long start_pfn, end_pfn; 198 - start_pfn = memblock_region_base_pfn(reg); 199 - end_pfn = memblock_region_end_pfn(reg); 198 + start_pfn = memblock_region_memory_base_pfn(reg); 199 + end_pfn = memblock_region_memory_end_pfn(reg); 200 200 add_active_range(0, start_pfn, end_pfn); 201 201 } 202 202 ··· 236 236 237 237 for_each_memblock(memory, reg) { 238 238 if (prev && 239 - memblock_region_end_pfn(prev) < memblock_region_base_pfn(reg)) 240 - register_nosave_region(memblock_region_end_pfn(prev), 241 - memblock_region_base_pfn(reg)); 239 + memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) 240 + register_nosave_region(memblock_region_memory_end_pfn(prev), 241 + memblock_region_memory_base_pfn(reg)); 242 242 prev = reg; 243 243 } 244 244 return 0;
+2 -2
arch/powerpc/mm/numa.c
··· 811 811 (top_of_ram - total_ram) >> 20); 812 812 813 813 for_each_memblock(memory, reg) { 814 - start_pfn = memblock_region_base_pfn(reg); 815 - end_pfn = memblock_region_end_pfn(reg); 814 + start_pfn = memblock_region_memory_base_pfn(reg); 815 + end_pfn = memblock_region_memory_end_pfn(reg); 816 816 817 817 fake_numa_create_new_node(end_pfn, &nid); 818 818 add_active_range(nid, start_pfn, end_pfn);
+2 -2
arch/sh/mm/init.c
··· 244 244 /* Add active regions with valid PFNs. */ 245 245 for_each_memblock(memory, reg) { 246 246 unsigned long start_pfn, end_pfn; 247 - start_pfn = memblock_region_base_pfn(reg); 248 - end_pfn = memblock_region_end_pfn(reg); 247 + start_pfn = memblock_region_memory_base_pfn(reg); 248 + end_pfn = memblock_region_memory_end_pfn(reg); 249 249 __add_active_range(0, start_pfn, end_pfn); 250 250 } 251 251
+2 -2
arch/sparc/mm/init_64.c
··· 1294 1294 if (!reg->size) 1295 1295 continue; 1296 1296 1297 - start_pfn = memblock_region_base_pfn(reg); 1298 - end_pfn = memblock_region_end_pfn(reg); 1297 + start_pfn = memblock_region_memory_base_pfn(reg); 1298 + end_pfn = memblock_region_memory_end_pfn(reg); 1299 1299 add_active_range(0, start_pfn, end_pfn); 1300 1300 } 1301 1301
+12 -13
include/linux/memblock.h
··· 111 111 */ 112 112 113 113 /** 114 - * memblock_region_base_pfn - Return the lowest pfn intersecting with the region 114 + * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region 115 115 * @reg: memblock_region structure 116 116 */ 117 - static inline unsigned long memblock_region_base_pfn(const struct memblock_region *reg) 117 + static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) 118 118 { 119 - return reg->base >> PAGE_SHIFT; 119 + return PFN_UP(reg->base); 120 120 } 121 121 122 122 /** 123 - * memblock_region_last_pfn - Return the highest pfn intersecting with the region 123 + * memblock_region_memory_end_pfn - Return the end_pfn this region 124 124 * @reg: memblock_region structure 125 125 */ 126 - static inline unsigned long memblock_region_last_pfn(const struct memblock_region *reg) 126 + static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) 127 127 { 128 - return (reg->base + reg->size - 1) >> PAGE_SHIFT; 128 + return PFN_DOWN(reg->base + reg->size); 129 129 } 130 130 131 131 /** 132 - * memblock_region_end_pfn - Return the pfn of the first page following the region 133 - * but not intersecting it 132 + * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region 134 133 * @reg: memblock_region structure 135 134 */ 136 - static inline unsigned long memblock_region_end_pfn(const struct memblock_region *reg) 135 + static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) 137 136 { 138 - return memblock_region_last_pfn(reg) + 1; 137 + return PFN_DOWN(reg->base); 139 138 } 140 139 141 140 /** 142 - * memblock_region_pages - Return the number of pages covering a region 141 + * memblock_region_reserved_end_pfn - Return the end_pfn this region 143 142 * @reg: memblock_region structure 144 143 */ 145 - static inline unsigned long memblock_region_pages(const struct memblock_region *reg) 144 + static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) 146 145 { 147 - return memblock_region_end_pfn(reg) - memblock_region_end_pfn(reg); 146 + return PFN_UP(reg->base + reg->size); 148 147 } 149 148 150 149 #define for_each_memblock(memblock_type, region) \