[Blackfin] arch: move the init sections to the end of memory

Move the init sections to the end of memory so that after they
are free, run time memory is all continugous - this should help decrease
memory fragementation.

When doing this, we also pack some of the other sections a little closer
together, to make sure we don't waste memory. To make this happen,
we need to rename the .data.init_task section to .init_task.data, so
it doesn't get picked up by the linker script glob.

Signed-off-by: Mike Frysinger <vapier.adi@gmail.com>
Signed-off-by: Bryan Wu <bryan.wu@analog.com>

authored by Mike Frysinger and committed by Bryan Wu b7627acc 80f31c8a

+36 -27
+1 -1
arch/blackfin/kernel/init_task.c
··· 57 57 * "init_task" linker map entry. 58 58 */ 59 59 union thread_union init_thread_union 60 - __attribute__ ((__section__(".data.init_task"))) = { 60 + __attribute__ ((__section__(".init_task.data"))) = { 61 61 INIT_THREAD_INFO(init_task)};
+7 -7
arch/blackfin/kernel/setup.c
··· 427 427 static __init void memory_setup(void) 428 428 { 429 429 _rambase = (unsigned long)_stext; 430 - _ramstart = (unsigned long)__bss_stop; 430 + _ramstart = (unsigned long)_end; 431 431 432 432 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { 433 433 console_init(); ··· 489 489 } 490 490 491 491 /* Relocate MTD image to the top of memory after the uncached memory area */ 492 - dma_memcpy((char *)memory_end, __bss_stop, mtd_size); 492 + dma_memcpy((char *)memory_end, _end, mtd_size); 493 493 494 494 memory_mtd_start = memory_end; 495 495 _ebss = memory_mtd_start; /* define _ebss for compatible */ ··· 528 528 printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20); 529 529 printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20); 530 530 531 - printk( KERN_INFO "Memory map:\n" 531 + printk(KERN_INFO "Memory map:\n" 532 532 KERN_INFO " text = 0x%p-0x%p\n" 533 533 KERN_INFO " rodata = 0x%p-0x%p\n" 534 + KERN_INFO " bss = 0x%p-0x%p\n" 534 535 KERN_INFO " data = 0x%p-0x%p\n" 535 536 KERN_INFO " stack = 0x%p-0x%p\n" 536 537 KERN_INFO " init = 0x%p-0x%p\n" 537 - KERN_INFO " bss = 0x%p-0x%p\n" 538 538 KERN_INFO " available = 0x%p-0x%p\n" 539 539 #ifdef CONFIG_MTD_UCLINUX 540 540 KERN_INFO " rootfs = 0x%p-0x%p\n" ··· 544 544 #endif 545 545 , _stext, _etext, 546 546 __start_rodata, __end_rodata, 547 + __bss_start, __bss_stop, 547 548 _sdata, _edata, 548 549 (void *)&init_thread_union, 549 550 (void *)((int)(&init_thread_union) + 0x2000), 550 - __init_begin, __init_end, 551 - __bss_start, __bss_stop, 552 - (void *)_ramstart, (void *)memory_end 551 + __init_begin, __init_end, 552 + (void *)_ramstart, (void *)memory_end 553 553 #ifdef CONFIG_MTD_UCLINUX 554 554 , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size) 555 555 #endif
+28 -19
arch/blackfin/kernel/vmlinux.lds.S
··· 41 41 SECTIONS 42 42 { 43 43 . = CONFIG_BOOT_LOAD; 44 + /* Neither the text, ro_data or bss section need to be aligned 45 + * So pack them back to back 46 + */ 44 47 .text : 45 48 { 46 49 __text = .; ··· 61 58 *(__ex_table) 62 59 ___stop___ex_table = .; 63 60 64 - . = ALIGN(4); 65 61 __etext = .; 66 62 } 67 63 68 - RO_DATA(PAGE_SIZE) 64 + /* Just in case the first read only is a 32-bit access */ 65 + RO_DATA(4) 66 + 67 + .bss : 68 + { 69 + . = ALIGN(4); 70 + ___bss_start = .; 71 + *(.bss .bss.*) 72 + *(COMMON) 73 + ___bss_stop = .; 74 + } 69 75 70 76 .data : 71 77 { 72 - /* make sure the init_task is aligned to the 73 - * kernel thread size so we can locate the kernel 74 - * stack properly and quickly. 75 - */ 76 78 __sdata = .; 77 - . = ALIGN(THREAD_SIZE); 78 - *(.data.init_task) 79 - 79 + /* This gets done first, so the glob doesn't suck it in */ 80 80 . = ALIGN(32); 81 81 *(.data.cacheline_aligned) 82 82 ··· 87 81 *(.data.*) 88 82 CONSTRUCTORS 89 83 84 + /* make sure the init_task is aligned to the 85 + * kernel thread size so we can locate the kernel 86 + * stack properly and quickly. 87 + */ 90 88 . = ALIGN(THREAD_SIZE); 89 + *(.init_task.data) 90 + 91 91 __edata = .; 92 92 } 93 93 94 + /* The init section should be last, so when we free it, it goes into 95 + * the general memory pool, and (hopefully) will decrease fragmentation 96 + * a tiny bit. The init section has a _requirement_ that it be 97 + * PAGE_SIZE aligned 98 + */ 99 + . = ALIGN(PAGE_SIZE); 94 100 ___init_begin = .; 95 101 96 102 .init.text : ··· 197 179 . = ALIGN(PAGE_SIZE); 198 180 ___init_end = .; 199 181 200 - .bss : 201 - { 202 - . = ALIGN(4); 203 - ___bss_start = .; 204 - *(.bss .bss.*) 205 - *(COMMON) 206 - . = ALIGN(4); 207 - ___bss_stop = .; 208 - __end = .; 209 - } 182 + __end =.; 210 183 211 184 STABS_DEBUG 212 185