[Blackfin] arch: move the init sections to the end of memory

Move the init sections to the end of memory so that after they
are free, run time memory is all continugous - this should help decrease
memory fragementation.

When doing this, we also pack some of the other sections a little closer
together, to make sure we don't waste memory. To make this happen,
we need to rename the .data.init_task section to .init_task.data, so
it doesn't get picked up by the linker script glob.

Signed-off-by: Mike Frysinger <vapier.adi@gmail.com>
Signed-off-by: Bryan Wu <bryan.wu@analog.com>

authored by Mike Frysinger and committed by Bryan Wu b7627acc 80f31c8a

+36 -27
+1 -1
arch/blackfin/kernel/init_task.c
··· 57 * "init_task" linker map entry. 58 */ 59 union thread_union init_thread_union 60 - __attribute__ ((__section__(".data.init_task"))) = { 61 INIT_THREAD_INFO(init_task)};
··· 57 * "init_task" linker map entry. 58 */ 59 union thread_union init_thread_union 60 + __attribute__ ((__section__(".init_task.data"))) = { 61 INIT_THREAD_INFO(init_task)};
+7 -7
arch/blackfin/kernel/setup.c
··· 427 static __init void memory_setup(void) 428 { 429 _rambase = (unsigned long)_stext; 430 - _ramstart = (unsigned long)__bss_stop; 431 432 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { 433 console_init(); ··· 489 } 490 491 /* Relocate MTD image to the top of memory after the uncached memory area */ 492 - dma_memcpy((char *)memory_end, __bss_stop, mtd_size); 493 494 memory_mtd_start = memory_end; 495 _ebss = memory_mtd_start; /* define _ebss for compatible */ ··· 528 printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20); 529 printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20); 530 531 - printk( KERN_INFO "Memory map:\n" 532 KERN_INFO " text = 0x%p-0x%p\n" 533 KERN_INFO " rodata = 0x%p-0x%p\n" 534 KERN_INFO " data = 0x%p-0x%p\n" 535 KERN_INFO " stack = 0x%p-0x%p\n" 536 KERN_INFO " init = 0x%p-0x%p\n" 537 - KERN_INFO " bss = 0x%p-0x%p\n" 538 KERN_INFO " available = 0x%p-0x%p\n" 539 #ifdef CONFIG_MTD_UCLINUX 540 KERN_INFO " rootfs = 0x%p-0x%p\n" ··· 544 #endif 545 , _stext, _etext, 546 __start_rodata, __end_rodata, 547 _sdata, _edata, 548 (void *)&init_thread_union, 549 (void *)((int)(&init_thread_union) + 0x2000), 550 - __init_begin, __init_end, 551 - __bss_start, __bss_stop, 552 - (void *)_ramstart, (void *)memory_end 553 #ifdef CONFIG_MTD_UCLINUX 554 , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size) 555 #endif
··· 427 static __init void memory_setup(void) 428 { 429 _rambase = (unsigned long)_stext; 430 + _ramstart = (unsigned long)_end; 431 432 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { 433 console_init(); ··· 489 } 490 491 /* Relocate MTD image to the top of memory after the uncached memory area */ 492 + dma_memcpy((char *)memory_end, _end, mtd_size); 493 494 memory_mtd_start = memory_end; 495 _ebss = memory_mtd_start; /* define _ebss for compatible */ ··· 528 printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20); 529 printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20); 530 531 + printk(KERN_INFO "Memory map:\n" 532 KERN_INFO " text = 0x%p-0x%p\n" 533 KERN_INFO " rodata = 0x%p-0x%p\n" 534 + KERN_INFO " bss = 0x%p-0x%p\n" 535 KERN_INFO " data = 0x%p-0x%p\n" 536 KERN_INFO " stack = 0x%p-0x%p\n" 537 KERN_INFO " init = 0x%p-0x%p\n" 538 KERN_INFO " available = 0x%p-0x%p\n" 539 #ifdef CONFIG_MTD_UCLINUX 540 KERN_INFO " rootfs = 0x%p-0x%p\n" ··· 544 #endif 545 , _stext, _etext, 546 __start_rodata, __end_rodata, 547 + __bss_start, __bss_stop, 548 _sdata, _edata, 549 (void *)&init_thread_union, 550 (void *)((int)(&init_thread_union) + 0x2000), 551 + __init_begin, __init_end, 552 + (void *)_ramstart, (void *)memory_end 553 #ifdef CONFIG_MTD_UCLINUX 554 , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size) 555 #endif
+28 -19
arch/blackfin/kernel/vmlinux.lds.S
··· 41 SECTIONS 42 { 43 . = CONFIG_BOOT_LOAD; 44 .text : 45 { 46 __text = .; ··· 61 *(__ex_table) 62 ___stop___ex_table = .; 63 64 - . = ALIGN(4); 65 __etext = .; 66 } 67 68 - RO_DATA(PAGE_SIZE) 69 70 .data : 71 { 72 - /* make sure the init_task is aligned to the 73 - * kernel thread size so we can locate the kernel 74 - * stack properly and quickly. 75 - */ 76 __sdata = .; 77 - . = ALIGN(THREAD_SIZE); 78 - *(.data.init_task) 79 - 80 . = ALIGN(32); 81 *(.data.cacheline_aligned) 82 ··· 87 *(.data.*) 88 CONSTRUCTORS 89 90 . = ALIGN(THREAD_SIZE); 91 __edata = .; 92 } 93 94 ___init_begin = .; 95 96 .init.text : ··· 197 . = ALIGN(PAGE_SIZE); 198 ___init_end = .; 199 200 - .bss : 201 - { 202 - . = ALIGN(4); 203 - ___bss_start = .; 204 - *(.bss .bss.*) 205 - *(COMMON) 206 - . = ALIGN(4); 207 - ___bss_stop = .; 208 - __end = .; 209 - } 210 211 STABS_DEBUG 212
··· 41 SECTIONS 42 { 43 . = CONFIG_BOOT_LOAD; 44 + /* Neither the text, ro_data or bss section need to be aligned 45 + * So pack them back to back 46 + */ 47 .text : 48 { 49 __text = .; ··· 58 *(__ex_table) 59 ___stop___ex_table = .; 60 61 __etext = .; 62 } 63 64 + /* Just in case the first read only is a 32-bit access */ 65 + RO_DATA(4) 66 + 67 + .bss : 68 + { 69 + . = ALIGN(4); 70 + ___bss_start = .; 71 + *(.bss .bss.*) 72 + *(COMMON) 73 + ___bss_stop = .; 74 + } 75 76 .data : 77 { 78 __sdata = .; 79 + /* This gets done first, so the glob doesn't suck it in */ 80 . = ALIGN(32); 81 *(.data.cacheline_aligned) 82 ··· 81 *(.data.*) 82 CONSTRUCTORS 83 84 + /* make sure the init_task is aligned to the 85 + * kernel thread size so we can locate the kernel 86 + * stack properly and quickly. 87 + */ 88 . = ALIGN(THREAD_SIZE); 89 + *(.init_task.data) 90 + 91 __edata = .; 92 } 93 94 + /* The init section should be last, so when we free it, it goes into 95 + * the general memory pool, and (hopefully) will decrease fragmentation 96 + * a tiny bit. The init section has a _requirement_ that it be 97 + * PAGE_SIZE aligned 98 + */ 99 + . = ALIGN(PAGE_SIZE); 100 ___init_begin = .; 101 102 .init.text : ··· 179 . = ALIGN(PAGE_SIZE); 180 ___init_end = .; 181 182 + __end =.; 183 184 STABS_DEBUG 185