Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Blackfin: calculate on-chip lengths at link time rather than run time

Since the link sizes never change at runtime, push the calculation out to
the linker script to save some useless calculation costs.

Signed-off-by: Mike Frysinger <vapier@gentoo.org>

+44 -39
+12 -4
arch/blackfin/include/asm/sections.h
··· 13 13 extern unsigned long _ramstart, _ramend, _rambase; 14 14 extern unsigned long memory_start, memory_end, physical_mem_end; 15 15 16 - extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], 17 - _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[], 18 - _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[], 19 - _ebss_l2[], _l2_lma_start[]; 16 + /* 17 + * The weak markings on the lengths might seem weird, but this is required 18 + * in order to make gcc accept the fact that these may actually have a value 19 + * of 0 (since they aren't actually addresses, but sizes of sections). 20 + */ 21 + extern char _stext_l1[], _etext_l1[], _text_l1_lma[], __weak _text_l1_len[]; 22 + extern char _sdata_l1[], _edata_l1[], _sbss_l1[], _ebss_l1[], 23 + _data_l1_lma[], __weak _data_l1_len[]; 24 + extern char _sdata_b_l1[], _edata_b_l1[], _sbss_b_l1[], _ebss_b_l1[], 25 + _data_b_l1_lma[], __weak _data_b_l1_len[]; 26 + extern char _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], 27 + _sbss_l2[], _ebss_l2[], _l2_lma[], __weak _l2_len[]; 20 28 21 29 #include <asm/mem_map.h> 22 30
+16 -23
arch/blackfin/kernel/setup.c
··· 178 178 179 179 void __init bfin_relocate_l1_mem(void) 180 180 { 181 - unsigned long l1_code_length; 182 - unsigned long l1_data_a_length; 183 - unsigned long l1_data_b_length; 184 - unsigned long l2_length; 181 + unsigned long text_l1_len = (unsigned long)_text_l1_len; 182 + unsigned long data_l1_len = (unsigned long)_data_l1_len; 183 + unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len; 184 + unsigned long l2_len = (unsigned long)_l2_len; 185 185 186 186 early_shadow_stamp(); 187 187 ··· 201 201 202 202 blackfin_dma_early_init(); 203 203 204 - /* if necessary, copy _stext_l1 to _etext_l1 to L1 instruction SRAM */ 205 - l1_code_length = _etext_l1 - _stext_l1; 206 - if (l1_code_length) 207 - early_dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length); 204 + /* if necessary, copy L1 text to L1 instruction SRAM */ 205 + if (L1_CODE_LENGTH && text_l1_len) 206 + early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len); 208 207 209 - /* if necessary, copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */ 210 - l1_data_a_length = _sbss_l1 - _sdata_l1; 211 - if (l1_data_a_length) 212 - early_dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length); 208 + /* if necessary, copy L1 data to L1 data bank A SRAM */ 209 + if (L1_DATA_A_LENGTH && data_l1_len) 210 + early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len); 213 211 214 - /* if necessary, copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */ 215 - l1_data_b_length = _sbss_b_l1 - _sdata_b_l1; 216 - if (l1_data_b_length) 217 - early_dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length + 218 - l1_data_a_length, l1_data_b_length); 212 + /* if necessary, copy L1 data B to L1 data bank B SRAM */ 213 + if (L1_DATA_B_LENGTH && data_b_l1_len) 214 + early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len); 219 215 220 216 early_dma_memcpy_done(); 221 217 222 - /* if necessary, copy _stext_l2 to _edata_l2 to L2 SRAM */ 223 - if (L2_LENGTH != 0) { 224 - l2_length = _sbss_l2 - _stext_l2; 225 - if (l2_length) 226 - memcpy(_stext_l2, _l2_lma_start, l2_length); 227 - } 218 + /* if necessary, copy L2 text/data to L2 SRAM */ 219 + if (L2_LENGTH && l2_len) 220 + memcpy(_stext_l2, _l2_lma, l2_len); 228 221 } 229 222 230 223 /* add_memory_region to memmap */
+16 -12
arch/blackfin/kernel/vmlinux.lds.S
··· 123 123 EXIT_DATA 124 124 } 125 125 126 - __l1_lma_start = .; 127 - 128 126 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) 129 127 { 130 128 . = ALIGN(4); ··· 134 136 . = ALIGN(4); 135 137 __etext_l1 = .; 136 138 } 137 - ASSERT (SIZEOF(.text_l1) <= L1_CODE_LENGTH, "L1 text overflow!") 139 + __text_l1_lma = LOADADDR(.text_l1); 140 + __text_l1_len = SIZEOF(.text_l1); 141 + ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!") 138 142 139 - .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1)) 143 + .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len) 140 144 { 141 145 . = ALIGN(4); 142 146 __sdata_l1 = .; ··· 154 154 . = ALIGN(4); 155 155 __ebss_l1 = .; 156 156 } 157 - ASSERT (SIZEOF(.data_l1) <= L1_DATA_A_LENGTH, "L1 data A overflow!") 157 + __data_l1_lma = LOADADDR(.data_l1); 158 + __data_l1_len = SIZEOF(.data_l1); 159 + ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!") 158 160 159 - .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1)) 161 + .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len) 160 162 { 161 163 . = ALIGN(4); 162 164 __sdata_b_l1 = .; ··· 171 169 . = ALIGN(4); 172 170 __ebss_b_l1 = .; 173 171 } 174 - ASSERT (SIZEOF(.data_b_l1) <= L1_DATA_B_LENGTH, "L1 data B overflow!") 172 + __data_b_l1_lma = LOADADDR(.data_b_l1); 173 + __data_b_l1_len = SIZEOF(.data_b_l1); 174 + ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!") 175 175 176 - __l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1); 177 - 178 - .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1)) 176 + .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len) 179 177 { 180 178 . = ALIGN(4); 181 179 __stext_l2 = .; ··· 197 195 . = ALIGN(4); 198 196 __ebss_l2 = .; 199 197 } 200 - ASSERT (SIZEOF(.text_data_l2) <= L2_LENGTH, "L2 overflow!") 198 + __l2_lma = LOADADDR(.text_data_l2); 199 + __l2_len = SIZEOF(.text_data_l2); 200 + ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!") 201 201 202 202 /* Force trailing alignment of our init section so that when we 203 203 * free our init memory, we don't leave behind a partial page. 204 204 */ 205 - . = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2); 205 + . = __l2_lma + __l2_len; 206 206 . = ALIGN(PAGE_SIZE); 207 207 ___init_end = .; 208 208