Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xtensa: drop sysmem and switch to memblock

Memblock is the standard kernel boot-time memory tracker/allocator. Use
it instead of the custom sysmem allocator. This allows using kmemleak,
CMA and device tree memory reservation.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>

+40 -292
+3
arch/xtensa/Kconfig
··· 13 13 select GENERIC_IRQ_SHOW 14 14 select GENERIC_PCI_IOMAP 15 15 select GENERIC_SCHED_CLOCK 16 + select HAVE_DEBUG_KMEMLEAK 16 17 select HAVE_DMA_API_DEBUG 17 18 select HAVE_EXIT_THREAD 18 19 select HAVE_FUNCTION_TRACER 19 20 select HAVE_FUTEX_CMPXCHG if !MMU 20 21 select HAVE_HW_BREAKPOINT if PERF_EVENTS 21 22 select HAVE_IRQ_TIME_ACCOUNTING 23 + select HAVE_MEMBLOCK 22 24 select HAVE_OPROFILE 23 25 select HAVE_PERF_EVENTS 24 26 select IRQ_DOMAIN 25 27 select MODULES_USE_ELF_RELA 28 + select NO_BOOTMEM 26 29 select PERF_USE_VMALLOC 27 30 select VIRT_TO_BUS 28 31 help
+1 -20
arch/xtensa/include/asm/sysmem.h
··· 11 11 #ifndef _XTENSA_SYSMEM_H 12 12 #define _XTENSA_SYSMEM_H 13 13 14 - #define SYSMEM_BANKS_MAX 31 14 + #include <linux/memblock.h> 15 15 16 - struct meminfo { 17 - unsigned long start; 18 - unsigned long end; 19 - }; 20 - 21 - /* 22 - * Bank array is sorted by .start. 23 - * Banks don't overlap and there's at least one page gap 24 - * between adjacent bank entries. 25 - */ 26 - struct sysmem_info { 27 - int nr_banks; 28 - struct meminfo bank[SYSMEM_BANKS_MAX]; 29 - }; 30 - 31 - extern struct sysmem_info sysmem; 32 - 33 - int add_sysmem_bank(unsigned long start, unsigned long end); 34 - int mem_reserve(unsigned long, unsigned long, int); 35 16 void bootmem_init(void); 36 17 void zones_init(void); 37 18
+21 -15
arch/xtensa/kernel/setup.c
··· 7 7 * 8 8 * Copyright (C) 1995 Linus Torvalds 9 9 * Copyright (C) 2001 - 2005 Tensilica Inc. 10 + * Copyright (C) 2014 - 2016 Cadence Design Systems Inc. 10 11 * 11 12 * Chris Zankel <chris@zankel.net> 12 13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> ··· 25 24 #include <linux/percpu.h> 26 25 #include <linux/clk-provider.h> 27 26 #include <linux/cpu.h> 27 + #include <linux/of.h> 28 28 #include <linux/of_fdt.h> 29 29 #include <linux/of_platform.h> 30 30 ··· 116 114 if (mi->type != MEMORY_TYPE_CONVENTIONAL) 117 115 return -1; 118 116 119 - return add_sysmem_bank(mi->start, mi->end); 117 + return memblock_add(mi->start, mi->end - mi->start); 120 118 } 121 119 122 120 __tagtable(BP_TAG_MEMORY, parse_tag_mem); ··· 230 228 void __init early_init_dt_add_memory_arch(u64 base, u64 size) 231 229 { 232 230 size &= PAGE_MASK; 233 - add_sysmem_bank(base, base + size); 231 + memblock_add(base, size); 234 232 } 235 233 236 234 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) ··· 442 440 early_initcall(check_s32c1i); 443 441 #endif /* CONFIG_S32C1I_SELFTEST */ 444 442 443 + static inline int mem_reserve(unsigned long start, unsigned long end) 444 + { 445 + return memblock_reserve(start, end - start); 446 + } 445 447 446 448 void __init setup_arch(char **cmdline_p) 447 449 { ··· 457 451 #ifdef CONFIG_BLK_DEV_INITRD 458 452 if (initrd_start < initrd_end) { 459 453 initrd_is_mapped = mem_reserve(__pa(initrd_start), 460 - __pa(initrd_end), 0) == 0; 454 + __pa(initrd_end)) == 0; 461 455 initrd_below_start_ok = 1; 462 456 } else { 463 457 initrd_start = 0; 464 458 } 465 459 #endif 466 460 467 - mem_reserve(__pa(&_stext),__pa(&_end), 1); 461 + mem_reserve(__pa(&_stext), __pa(&_end)); 468 462 469 463 mem_reserve(__pa(&_WindowVectors_text_start), 470 - __pa(&_WindowVectors_text_end), 0); 464 + __pa(&_WindowVectors_text_end)); 471 465 472 466 mem_reserve(__pa(&_DebugInterruptVector_literal_start), 473 - __pa(&_DebugInterruptVector_text_end), 0); 467 + __pa(&_DebugInterruptVector_text_end)); 474 468 475 469 mem_reserve(__pa(&_KernelExceptionVector_literal_start), 476 - __pa(&_KernelExceptionVector_text_end), 0); 470 + __pa(&_KernelExceptionVector_text_end)); 477 471 478 472 mem_reserve(__pa(&_UserExceptionVector_literal_start), 479 - __pa(&_UserExceptionVector_text_end), 0); 473 + __pa(&_UserExceptionVector_text_end)); 480 474 481 475 mem_reserve(__pa(&_DoubleExceptionVector_literal_start), 482 - __pa(&_DoubleExceptionVector_text_end), 0); 476 + __pa(&_DoubleExceptionVector_text_end)); 483 477 484 478 #if XCHAL_EXCM_LEVEL >= 2 485 479 mem_reserve(__pa(&_Level2InterruptVector_text_start), 486 - __pa(&_Level2InterruptVector_text_end), 0); 480 + __pa(&_Level2InterruptVector_text_end)); 487 481 #endif 488 482 #if XCHAL_EXCM_LEVEL >= 3 489 483 mem_reserve(__pa(&_Level3InterruptVector_text_start), 490 - __pa(&_Level3InterruptVector_text_end), 0); 484 + __pa(&_Level3InterruptVector_text_end)); 491 485 #endif 492 486 #if XCHAL_EXCM_LEVEL >= 4 493 487 mem_reserve(__pa(&_Level4InterruptVector_text_start), 494 - __pa(&_Level4InterruptVector_text_end), 0); 488 + __pa(&_Level4InterruptVector_text_end)); 495 489 #endif 496 490 #if XCHAL_EXCM_LEVEL >= 5 497 491 mem_reserve(__pa(&_Level5InterruptVector_text_start), 498 - __pa(&_Level5InterruptVector_text_end), 0); 492 + __pa(&_Level5InterruptVector_text_end)); 499 493 #endif 500 494 #if XCHAL_EXCM_LEVEL >= 6 501 495 mem_reserve(__pa(&_Level6InterruptVector_text_start), 502 - __pa(&_Level6InterruptVector_text_end), 0); 496 + __pa(&_Level6InterruptVector_text_end)); 503 497 #endif 504 498 505 499 #ifdef CONFIG_SMP 506 500 mem_reserve(__pa(&_SecondaryResetVector_text_start), 507 - __pa(&_SecondaryResetVector_text_end), 0); 501 + __pa(&_SecondaryResetVector_text_end)); 508 502 #endif 509 503 parse_early_param(); 510 504 bootmem_init();
+15 -257
arch/xtensa/mm/init.c
··· 8 8 * for more details. 9 9 * 10 10 * Copyright (C) 2001 - 2005 Tensilica Inc. 11 - * Copyright (C) 2014 Cadence Design Systems Inc. 11 + * Copyright (C) 2014 - 2016 Cadence Design Systems Inc. 12 12 * 13 13 * Chris Zankel <chris@zankel.net> 14 14 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> ··· 31 31 #include <asm/sections.h> 32 32 #include <asm/sysmem.h> 33 33 34 - struct sysmem_info sysmem __initdata; 35 - 36 - static void __init sysmem_dump(void) 37 - { 38 - unsigned i; 39 - 40 - pr_debug("Sysmem:\n"); 41 - for (i = 0; i < sysmem.nr_banks; ++i) 42 - pr_debug(" 0x%08lx - 0x%08lx (%ldK)\n", 43 - sysmem.bank[i].start, sysmem.bank[i].end, 44 - (sysmem.bank[i].end - sysmem.bank[i].start) >> 10); 45 - } 46 - 47 - /* 48 - * Find bank with maximal .start such that bank.start <= start 49 - */ 50 - static inline struct meminfo * __init find_bank(unsigned long start) 51 - { 52 - unsigned i; 53 - struct meminfo *it = NULL; 54 - 55 - for (i = 0; i < sysmem.nr_banks; ++i) 56 - if (sysmem.bank[i].start <= start) 57 - it = sysmem.bank + i; 58 - else 59 - break; 60 - return it; 61 - } 62 - 63 - /* 64 - * Move all memory banks starting at 'from' to a new place at 'to', 65 - * adjust nr_banks accordingly. 66 - * Both 'from' and 'to' must be inside the sysmem.bank. 67 - * 68 - * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank). 69 - */ 70 - static int __init move_banks(struct meminfo *to, struct meminfo *from) 71 - { 72 - unsigned n = sysmem.nr_banks - (from - sysmem.bank); 73 - 74 - if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX) 75 - return -ENOMEM; 76 - if (to != from) 77 - memmove(to, from, n * sizeof(struct meminfo)); 78 - sysmem.nr_banks += to - from; 79 - return 0; 80 - } 81 - 82 - /* 83 - * Add new bank to sysmem. Resulting sysmem is the union of bytes of the 84 - * original sysmem and the new bank. 85 - * 86 - * Returns: 0 (success), < 0 (error) 87 - */ 88 - int __init add_sysmem_bank(unsigned long start, unsigned long end) 89 - { 90 - unsigned i; 91 - struct meminfo *it = NULL; 92 - unsigned long sz; 93 - unsigned long bank_sz = 0; 94 - 95 - if (start == end || 96 - (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) { 97 - pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n", 98 - start, end - start); 99 - return -EINVAL; 100 - } 101 - 102 - start = PAGE_ALIGN(start); 103 - end &= PAGE_MASK; 104 - sz = end - start; 105 - 106 - it = find_bank(start); 107 - 108 - if (it) 109 - bank_sz = it->end - it->start; 110 - 111 - if (it && bank_sz >= start - it->start) { 112 - if (end - it->start > bank_sz) 113 - it->end = end; 114 - else 115 - return 0; 116 - } else { 117 - if (!it) 118 - it = sysmem.bank; 119 - else 120 - ++it; 121 - 122 - if (it - sysmem.bank < sysmem.nr_banks && 123 - it->start - start <= sz) { 124 - it->start = start; 125 - if (it->end - it->start < sz) 126 - it->end = end; 127 - else 128 - return 0; 129 - } else { 130 - if (move_banks(it + 1, it) < 0) { 131 - pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n", 132 - start, end - start); 133 - return -EINVAL; 134 - } 135 - it->start = start; 136 - it->end = end; 137 - return 0; 138 - } 139 - } 140 - sz = it->end - it->start; 141 - for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i) 142 - if (sysmem.bank[i].start - it->start <= sz) { 143 - if (sz < sysmem.bank[i].end - it->start) 144 - it->end = sysmem.bank[i].end; 145 - } else { 146 - break; 147 - } 148 - 149 - move_banks(it + 1, sysmem.bank + i); 150 - return 0; 151 - } 152 - 153 - /* 154 - * mem_reserve(start, end, must_exist) 155 - * 156 - * Reserve some memory from the memory pool. 157 - * If must_exist is set and a part of the region being reserved does not exist 158 - * memory map is not altered. 159 - * 160 - * Parameters: 161 - * start Start of region, 162 - * end End of region, 163 - * must_exist Must exist in memory pool. 164 - * 165 - * Returns: 166 - * 0 (success) 167 - * < 0 (error) 168 - */ 169 - 170 - int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) 171 - { 172 - struct meminfo *it; 173 - struct meminfo *rm = NULL; 174 - unsigned long sz; 175 - unsigned long bank_sz = 0; 176 - 177 - start = start & PAGE_MASK; 178 - end = PAGE_ALIGN(end); 179 - sz = end - start; 180 - if (!sz) 181 - return -EINVAL; 182 - 183 - it = find_bank(start); 184 - 185 - if (it) 186 - bank_sz = it->end - it->start; 187 - 188 - if ((!it || end - it->start > bank_sz) && must_exist) { 189 - pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n", 190 - start, end); 191 - return -EINVAL; 192 - } 193 - 194 - if (it && start - it->start <= bank_sz) { 195 - if (start == it->start) { 196 - if (end - it->start < bank_sz) { 197 - it->start = end; 198 - return 0; 199 - } else { 200 - rm = it; 201 - } 202 - } else { 203 - it->end = start; 204 - if (end - it->start < bank_sz) 205 - return add_sysmem_bank(end, 206 - it->start + bank_sz); 207 - ++it; 208 - } 209 - } 210 - 211 - if (!it) 212 - it = sysmem.bank; 213 - 214 - for (; it < sysmem.bank + sysmem.nr_banks; ++it) { 215 - if (it->end - start <= sz) { 216 - if (!rm) 217 - rm = it; 218 - } else { 219 - if (it->start - start < sz) 220 - it->start = end; 221 - break; 222 - } 223 - } 224 - 225 - if (rm) 226 - move_banks(rm, it); 227 - 228 - return 0; 229 - } 230 - 231 - 232 34 /* 233 35 * Initialize the bootmem system and give it all low memory we have available. 234 36 */ 235 37 236 38 void __init bootmem_init(void) 237 39 { 238 - unsigned long pfn; 239 - unsigned long bootmap_start, bootmap_size; 240 - int i; 241 - 242 - /* Reserve all memory below PLATFORM_DEFAULT_MEM_START, as memory 40 + /* Reserve all memory below PHYS_OFFSET, as memory 243 41 * accounting doesn't work for pages below that address. 244 42 * 245 - * If PLATFORM_DEFAULT_MEM_START is zero reserve page at address 0: 43 + * If PHYS_OFFSET is zero reserve page at address 0: 246 44 * successfull allocations should never return NULL. 247 45 */ 248 - if (PLATFORM_DEFAULT_MEM_START) 249 - mem_reserve(0, PLATFORM_DEFAULT_MEM_START, 0); 46 + if (PHYS_OFFSET) 47 + memblock_reserve(0, PHYS_OFFSET); 250 48 else 251 - mem_reserve(0, 1, 0); 49 + memblock_reserve(0, 1); 252 50 253 - sysmem_dump(); 254 - max_low_pfn = max_pfn = 0; 255 - min_low_pfn = ~0; 256 51 257 - for (i=0; i < sysmem.nr_banks; i++) { 258 - pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT; 259 - if (pfn < min_low_pfn) 260 - min_low_pfn = pfn; 261 - pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT; 262 - if (pfn > max_pfn) 263 - max_pfn = pfn; 264 - } 265 - 266 - if (min_low_pfn > max_pfn) 52 + if (!memblock_phys_mem_size()) 267 53 panic("No memory found!\n"); 268 54 55 + min_low_pfn = PFN_UP(memblock_start_of_DRAM()); 56 + min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET)); 57 + max_pfn = PFN_DOWN(memblock_end_of_DRAM()); 269 58 max_low_pfn = min(max_pfn, MAX_LOW_PFN); 270 59 271 - /* Find an area to use for the bootmem bitmap. */ 60 + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); 272 61 273 - bootmap_size = bootmem_bootmap_pages(max_low_pfn - min_low_pfn); 274 - bootmap_size <<= PAGE_SHIFT; 275 - bootmap_start = ~0; 276 - 277 - for (i=0; i<sysmem.nr_banks; i++) 278 - if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) { 279 - bootmap_start = sysmem.bank[i].start; 280 - break; 281 - } 282 - 283 - if (bootmap_start == ~0UL) 284 - panic("Cannot find %ld bytes for bootmap\n", bootmap_size); 285 - 286 - /* Reserve the bootmem bitmap area */ 287 - 288 - mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1); 289 - bootmap_size = init_bootmem_node(NODE_DATA(0), 290 - bootmap_start >> PAGE_SHIFT, 291 - min_low_pfn, 292 - max_low_pfn); 293 - 294 - /* Add all remaining memory pieces into the bootmem map */ 295 - 296 - for (i = 0; i < sysmem.nr_banks; i++) { 297 - if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) { 298 - unsigned long end = min(max_low_pfn << PAGE_SHIFT, 299 - sysmem.bank[i].end); 300 - free_bootmem(sysmem.bank[i].start, 301 - end - sysmem.bank[i].start); 302 - } 303 - } 304 - 62 + memblock_dump_all(); 305 63 } 306 64 307 65 ··· 152 394 switch (*p) { 153 395 case '@': 154 396 start_at = memparse(p + 1, &p); 155 - add_sysmem_bank(start_at, start_at + mem_size); 397 + memblock_add(start_at, mem_size); 156 398 break; 157 399 158 400 case '$': 159 401 start_at = memparse(p + 1, &p); 160 - mem_reserve(start_at, start_at + mem_size, 0); 402 + memblock_reserve(start_at, mem_size); 161 403 break; 162 404 163 405 case 0: 164 - mem_reserve(mem_size, 0, 0); 406 + memblock_reserve(mem_size, -mem_size); 165 407 break; 166 408 167 409 default: