Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

microblaze: Use LOAD_OFFSET macro to get correct LMA for all sections

Currently, vmlinux has LMA==VMA for all sections, which is wrong for MMU
kernels. Previous patches in this series defined the LOAD_OFFSET constant,
now we make use of it in our link script.

Other minor changes in this patch:
* brace/indenting cleanup of some sections
* put __fdt_* symbols in their own section, and apply LOAD_OFFSET fixup

Signed-off-by: John Williams <john.williams@petalogix.com>
Signed-off-by: Michal Simek <monstr@monstr.eu>

+19 -15
+19 -15
arch/microblaze/kernel/vmlinux.lds.S
··· 12 12 OUTPUT_ARCH(microblaze) 13 13 ENTRY(_start) 14 14 15 - #include <asm-generic/vmlinux.lds.h> 16 15 #include <asm/page.h> 16 + #include <asm-generic/vmlinux.lds.h> 17 17 #include <asm/thread_info.h> 18 18 19 19 jiffies = jiffies_64 + 4; 20 20 21 21 SECTIONS { 22 22 . = CONFIG_KERNEL_START; 23 - .text : { 23 + .text : AT(ADDR(.text) - LOAD_OFFSET) { 24 24 _text = . ; 25 25 _stext = . ; 26 26 *(.text .text.*) ··· 35 35 } 36 36 37 37 . = ALIGN (4) ; 38 - _fdt_start = . ; /* place for fdt blob */ 39 - . = . + 0x4000; 40 - _fdt_end = . ; 38 + __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) { 39 + _fdt_start = . ; /* place for fdt blob */ 40 + *(__fdt_blob) ; /* Any link-placed DTB */ 41 + . = _fdt_start + 0x4000; /* Pad up to 16kbyte */ 42 + _fdt_end = . ; 43 + } 41 44 42 45 . = ALIGN(16); 43 46 RODATA ··· 50 47 * sdata2 section can go anywhere, but must be word aligned 51 48 * and SDA2_BASE must point to the middle of it 52 49 */ 53 - .sdata2 : { 50 + .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) { 54 51 _ssrw = .; 55 52 . = ALIGN(4096); /* page aligned when MMU used - origin 0x8 */ 56 53 *(.sdata2) ··· 71 68 72 69 /* Under the microblaze ABI, .sdata and .sbss must be contiguous */ 73 70 . = ALIGN(8); 74 - .sdata : { 71 + .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) { 75 72 _ssro = .; 76 73 *(.sdata) 77 74 } 78 75 79 - .sbss : { 76 + .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { 80 77 _ssbss = .; 81 78 *(.sbss) 82 79 _esbss = .; ··· 89 86 90 87 INIT_TEXT_SECTION(PAGE_SIZE) 91 88 92 - .init.data : { 89 + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { 93 90 INIT_DATA 94 91 } 95 92 96 93 . = ALIGN(4); 97 - .init.ivt : { 94 + .init.ivt : AT(ADDR(.init.ivt) - LOAD_OFFSET) { 98 95 __ivt_start = .; 99 96 *(.init.ivt) 100 97 __ivt_end = .; 101 98 } 102 99 103 - .init.setup : { 100 + .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { 104 101 INIT_SETUP(0) 105 102 } 106 103 107 - .initcall.init : { 104 + .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET ) { 108 105 INIT_CALLS 109 106 } 110 107 111 - .con_initcall.init : { 108 + .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { 112 109 CON_INITCALL 113 110 } 114 111 ··· 116 113 117 114 __init_end_before_initramfs = .; 118 115 119 - .init.ramfs ALIGN(4096) : { 116 + .init.ramfs ALIGN(4096) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { 120 117 __initramfs_start = .; 121 118 *(.init.ramfs) 122 119 __initramfs_end = .; ··· 132 129 } 133 130 __init_end = .; 134 131 135 - .bss ALIGN (4096) : { /* page aligned when MMU used */ 132 + .bss ALIGN (4096) : AT(ADDR(.bss) - LOAD_OFFSET) { 133 + /* page aligned when MMU used */ 136 134 __bss_start = . ; 137 135 *(.bss*) 138 136 *(COMMON)