Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Blackfin: SMP: work around anomaly 05000491

In order to safely work around anomaly 05000491, we have to execute IFLUSH
from L1 instruction sram. The trouble with multi-core systems is that all
L1 sram is visible only to the active core. So we can't just place the
functions into L1 and call it directly. We need to setup a jump table and
place the entry point in external memory. This will call the right func
based on the active core.

In the process, convert from the manual relocation of a small bit of code
into Core B's L1 to the more general framework we already have in place
for loading arbitrary pieces of code into L1.

Signed-off-by: Sonic Zhang <sonic.zhang@analog.com>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>

authored by

Sonic Zhang and committed by
Mike Frysinger
c6345ab1 6f546bc3

+74 -12
-1
arch/blackfin/Kconfig
··· 850 850 config ICACHE_FLUSH_L1 851 851 bool "Locate icache flush funcs in L1 Inst Memory" 852 852 default y 853 - depends on !SMP 854 853 help 855 854 If enabled, the Blackfin icache flushing functions are linked 856 855 into L1 instruction memory.
+6 -1
arch/blackfin/include/asm/smp.h
··· 17 17 18 18 #define raw_smp_processor_id() blackfin_core_id() 19 19 20 - extern char coreb_trampoline_start, coreb_trampoline_end; 20 + extern void bfin_relocate_coreb_l1_mem(void); 21 + 22 + #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1) 23 + asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr); 24 + extern unsigned long blackfin_iflush_l1_entry[NR_CPUS]; 25 + #endif 21 26 22 27 struct corelock_slot { 23 28 int lock;
+37
arch/blackfin/kernel/setup.c
··· 215 215 216 216 early_dma_memcpy_done(); 217 217 218 + #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1) 219 + blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1; 220 + #endif 221 + 218 222 /* if necessary, copy L2 text/data to L2 SRAM */ 219 223 if (L2_LENGTH && l2_len) 220 224 memcpy(_stext_l2, _l2_lma, l2_len); 221 225 } 226 + 227 + #ifdef CONFIG_SMP 228 + void __init bfin_relocate_coreb_l1_mem(void) 229 + { 230 + unsigned long text_l1_len = (unsigned long)_text_l1_len; 231 + unsigned long data_l1_len = (unsigned long)_data_l1_len; 232 + unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len; 233 + 234 + blackfin_dma_early_init(); 235 + 236 + /* if necessary, copy L1 text to L1 instruction SRAM */ 237 + if (L1_CODE_LENGTH && text_l1_len) 238 + early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma, 239 + text_l1_len); 240 + 241 + /* if necessary, copy L1 data to L1 data bank A SRAM */ 242 + if (L1_DATA_A_LENGTH && data_l1_len) 243 + early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma, 244 + data_l1_len); 245 + 246 + /* if necessary, copy L1 data B to L1 data bank B SRAM */ 247 + if (L1_DATA_B_LENGTH && data_b_l1_len) 248 + early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma, 249 + data_b_l1_len); 250 + 251 + early_dma_memcpy_done(); 252 + 253 + #ifdef CONFIG_ICACHE_FLUSH_L1 254 + blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 - 255 + (unsigned long)_stext_l1 + COREB_L1_CODE_START; 256 + #endif 257 + } 258 + #endif 222 259 223 260 #ifdef CONFIG_ROMKERNEL 224 261 void __init bfin_relocate_xip_data(void)
+1
arch/blackfin/kernel/vmlinux.lds.S
··· 176 176 { 177 177 . = ALIGN(4); 178 178 __stext_l1 = .; 179 + *(.l1.text.head) 179 180 *(.l1.text) 180 181 #ifdef CONFIG_SCHEDULE_L1 181 182 SCHED_TEXT
+5 -2
arch/blackfin/mach-bf561/secondary.S
··· 13 13 #include <asm/asm-offsets.h> 14 14 #include <asm/trace.h> 15 15 16 - __INIT 16 + /* 17 + * This code must come first as CoreB is hardcoded (in hardware) 18 + * to start at the beginning of its L1 instruction memory. 19 + */ 20 + .section .l1.text.head 17 21 18 22 /* Lay the initial stack into the L1 scratch area of Core B */ 19 23 #define INITIAL_STACK (COREB_L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12) ··· 164 160 .LWAIT_HERE: 165 161 jump .LWAIT_HERE; 166 162 ENDPROC(_coreb_trampoline_start) 167 - ENTRY(_coreb_trampoline_end) 168 163 169 164 #ifdef CONFIG_HOTPLUG_CPU 170 165 .section ".text"
+1 -8
arch/blackfin/mach-bf561/smp.c
··· 30 30 31 31 void __init platform_prepare_cpus(unsigned int max_cpus) 32 32 { 33 - int len; 34 - 35 - len = &coreb_trampoline_end - &coreb_trampoline_start + 1; 36 - BUG_ON(len > L1_CODE_LENGTH); 37 - 38 - dma_memcpy((void *)COREB_L1_CODE_START, &coreb_trampoline_start, len); 33 + bfin_relocate_coreb_l1_mem(); 39 34 40 35 /* Both cores ought to be present on a bf561! */ 41 36 cpu_set(0, cpu_present_map); /* CoreA */ 42 37 cpu_set(1, cpu_present_map); /* CoreB */ 43 - 44 - printk(KERN_INFO "CoreB bootstrap code to SRAM %p via DMA.\n", (void *)COREB_L1_CODE_START); 45 38 } 46 39 47 40 int __init setup_profiling_timer(unsigned int multiplier) /* not supported */
+20
arch/blackfin/mach-common/cache.S
··· 69 69 #endif 70 70 71 71 /* Invalidate all instruction cache lines assocoiated with this memory area */ 72 + #ifdef CONFIG_SMP 73 + # define _blackfin_icache_flush_range _blackfin_icache_flush_range_l1 74 + #endif 72 75 ENTRY(_blackfin_icache_flush_range) 73 76 do_flush IFLUSH 74 77 ENDPROC(_blackfin_icache_flush_range) 78 + 79 + #ifdef CONFIG_SMP 80 + .text 81 + # undef _blackfin_icache_flush_range 82 + ENTRY(_blackfin_icache_flush_range) 83 + p0.L = LO(DSPID); 84 + p0.H = HI(DSPID); 85 + r3 = [p0]; 86 + r3 = r3.b (z); 87 + p2 = r3; 88 + p0.L = _blackfin_iflush_l1_entry; 89 + p0.H = _blackfin_iflush_l1_entry; 90 + p0 = p0 + (p2 << 2); 91 + p1 = [p0]; 92 + jump (p1); 93 + ENDPROC(_blackfin_icache_flush_range) 94 + #endif 75 95 76 96 #ifdef CONFIG_DCACHE_FLUSH_L1 77 97 .section .l1.text
+4
arch/blackfin/mach-common/smp.c
··· 40 40 */ 41 41 struct corelock_slot corelock __attribute__ ((__section__(".l2.bss"))); 42 42 43 + #ifdef CONFIG_ICACHE_FLUSH_L1 44 + unsigned long blackfin_iflush_l1_entry[NR_CPUS]; 45 + #endif 46 + 43 47 void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb, 44 48 *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb, 45 49 *init_saved_dcplb_fault_addr_coreb;