Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: Use bootmem ontop of lmb

Rework the bootmem allocator to use the lmb framework.

Signed-off-by: Matt Fleming <matt@console-pimps.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>

authored by

Matt Fleming and committed by
Paul Mundt
c601a51a 47220f62

+61 -21
+1
arch/sh/Kconfig
··· 10 10 select EMBEDDED 11 11 select HAVE_CLK 12 12 select HAVE_IDE 13 + select HAVE_LMB 13 14 select HAVE_OPROFILE 14 15 select HAVE_GENERIC_DMA_COHERENT 15 16 select HAVE_IOREMAP_PROT if MMU
+6
arch/sh/include/asm/lmb.h
··· 1 + #ifndef __ASM_SH_LMB_H 2 + #define __ASM_SH_LMB_H 3 + 4 + #define LMB_REAL_LIMIT 0 5 + 6 + #endif /* __ASM_SH_LMB_H */
+54 -21
arch/sh/kernel/setup.c
··· 30 30 #include <linux/clk.h> 31 31 #include <linux/delay.h> 32 32 #include <linux/platform_device.h> 33 + #include <linux/lmb.h> 33 34 #include <asm/uaccess.h> 34 35 #include <asm/io.h> 35 36 #include <asm/page.h> ··· 234 233 void __init setup_bootmem_allocator(unsigned long free_pfn) 235 234 { 236 235 unsigned long bootmap_size; 236 + unsigned long bootmap_pages, bootmem_paddr; 237 + u64 total_pages = (lmb_end_of_DRAM() - __MEMORY_START) >> PAGE_SHIFT; 238 + int i; 239 + 240 + bootmap_pages = bootmem_bootmap_pages(total_pages); 241 + 242 + bootmem_paddr = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 237 243 238 244 /* 239 245 * Find a proper area for the bootmem bitmap. After this 240 246 * bootstrap step all allocations (until the page allocator 241 247 * is intact) must be done via bootmem_alloc(). 242 248 */ 243 - bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn, 249 + bootmap_size = init_bootmem_node(NODE_DATA(0), 250 + bootmem_paddr >> PAGE_SHIFT, 244 251 min_low_pfn, max_low_pfn); 245 252 246 - __add_active_range(0, min_low_pfn, max_low_pfn); 253 + /* Add active regions with valid PFNs. */ 254 + for (i = 0; i < lmb.memory.cnt; i++) { 255 + unsigned long start_pfn, end_pfn; 256 + start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; 257 + end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); 258 + __add_active_range(0, start_pfn, end_pfn); 259 + } 260 + 261 + /* 262 + * Add all physical memory to the bootmem map and mark each 263 + * area as present. 264 + */ 247 265 register_bootmem_low_pages(); 248 266 249 - node_set_online(0); 250 - 251 - /* 252 - * Reserve the kernel text and 253 - * Reserve the bootmem bitmap. We do this in two steps (first step 254 - * was init_bootmem()), because this catches the (definitely buggy) 255 - * case of us accidentally initializing the bootmem allocator with 256 - * an invalid RAM area. 257 - */ 258 - reserve_bootmem(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, 259 - (PFN_PHYS(free_pfn) + bootmap_size + PAGE_SIZE - 1) - 260 - (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET), 261 - BOOTMEM_DEFAULT); 262 - 263 - /* 264 - * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. 265 - */ 266 - if (CONFIG_ZERO_PAGE_OFFSET != 0) 267 - reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET, 267 + /* Reserve the sections we're already using. */ 268 + for (i = 0; i < lmb.reserved.cnt; i++) 269 + reserve_bootmem(lmb.reserved.region[i].base, 270 + lmb_size_bytes(&lmb.reserved, i), 268 271 BOOTMEM_DEFAULT); 272 + 273 + node_set_online(0); 269 274 270 275 sparse_memory_present_with_active_regions(0); 271 276 ··· 303 296 static void __init setup_memory(void) 304 297 { 305 298 unsigned long start_pfn; 299 + u64 base = min_low_pfn << PAGE_SHIFT; 300 + u64 size = (max_low_pfn << PAGE_SHIFT) - base; 306 301 307 302 /* 308 303 * Partially used pages are not usable - thus 309 304 * we are rounding upwards: 310 305 */ 311 306 start_pfn = PFN_UP(__pa(_end)); 307 + 308 + lmb_add(base, size); 309 + 310 + /* 311 + * Reserve the kernel text and 312 + * Reserve the bootmem bitmap. We do this in two steps (first step 313 + * was init_bootmem()), because this catches the (definitely buggy) 314 + * case of us accidentally initializing the bootmem allocator with 315 + * an invalid RAM area. 316 + */ 317 + lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, 318 + (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - 319 + (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); 320 + 321 + /* 322 + * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. 323 + */ 324 + if (CONFIG_ZERO_PAGE_OFFSET != 0) 325 + lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); 326 + 327 + lmb_analyze(); 328 + lmb_dump_all(); 329 + 312 330 setup_bootmem_allocator(start_pfn); 313 331 } 314 332 #else ··· 434 402 nodes_clear(node_online_map); 435 403 436 404 /* Setup bootmem with available RAM */ 405 + lmb_init(); 437 406 setup_memory(); 438 407 sparse_init(); 439 408