Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/fsl_booke: smp support for booting a relocatable kernel above 64M

When booting above the 64M for a secondary cpu, we also face the
same issue as the boot cpu that the PAGE_OFFSET map two different
physical address for the init tlb and the final map. So we have to use
switch_to_as1/restore_to_as0 between the conversion of these two
maps. When restoring to as0 for a secondary cpu, we only need to
return to the caller. So add a new parameter for function
restore_to_as0 for this purpose.

Use LOAD_REG_ADDR_PIC to get the address of variables which may
be used before we set the final map in cams for the secondary cpu.
Move the setting of cams a bit earlier in order to avoid the
unnecessary using of LOAD_REG_ADDR_PIC.

Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Scott Wood <scottwood@freescale.com>

authored by

Kevin Hao and committed by
Scott Wood
0be7d969 7d2471f9

+35 -18
+29 -14
arch/powerpc/kernel/head_fsl_booke.S
··· 216 216 /* Check to see if we're the second processor, and jump 217 217 * to the secondary_start code if so 218 218 */ 219 - lis r24, boot_cpuid@h 220 - ori r24, r24, boot_cpuid@l 219 + LOAD_REG_ADDR_PIC(r24, boot_cpuid) 221 220 lwz r24, 0(r24) 222 221 cmpwi r24, -1 223 222 mfspr r24,SPRN_PIR ··· 1145 1146 /* When we get here, r24 needs to hold the CPU # */ 1146 1147 .globl __secondary_start 1147 1148 __secondary_start: 1149 + LOAD_REG_ADDR_PIC(r3, tlbcam_index) 1150 + lwz r3,0(r3) 1151 + mtctr r3 1152 + li r26,0 /* r26 safe? */ 1153 + 1154 + bl switch_to_as1 1155 + mr r27,r3 /* tlb entry */ 1156 + /* Load each CAM entry */ 1157 + 1: mr r3,r26 1158 + bl loadcam_entry 1159 + addi r26,r26,1 1160 + bdnz 1b 1161 + mr r3,r27 /* tlb entry */ 1162 + LOAD_REG_ADDR_PIC(r4, memstart_addr) 1163 + lwz r4,0(r4) 1164 + mr r5,r25 /* phys kernel start */ 1165 + rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */ 1166 + subf r4,r5,r4 /* memstart_addr - phys kernel start */ 1167 + li r5,0 /* no device tree */ 1168 + li r6,0 /* not boot cpu */ 1169 + bl restore_to_as0 1170 + 1171 + 1148 1172 lis r3,__secondary_hold_acknowledge@h 1149 1173 ori r3,r3,__secondary_hold_acknowledge@l 1150 1174 stw r24,0(r3) ··· 1175 1153 li r3,0 1176 1154 mr r4,r24 /* Why? */ 1177 1155 bl call_setup_cpu 1178 - 1179 - lis r3,tlbcam_index@ha 1180 - lwz r3,tlbcam_index@l(r3) 1181 - mtctr r3 1182 - li r26,0 /* r26 safe? */ 1183 - 1184 - /* Load each CAM entry */ 1185 - 1: mr r3,r26 1186 - bl loadcam_entry 1187 - addi r26,r26,1 1188 - bdnz 1b 1189 1156 1190 1157 /* get current_thread_info and current */ 1191 1158 lis r1,secondary_ti@ha ··· 1264 1253 * r3 - the tlb entry which should be invalidated 1265 1254 * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0) 1266 1255 * r5 - device tree virtual address. If r4 is 0, r5 is ignored. 1256 + * r6 - boot cpu 1267 1257 */ 1268 1258 _GLOBAL(restore_to_as0) 1269 1259 mflr r0 ··· 1280 1268 */ 1281 1269 add r9,r9,r4 1282 1270 add r5,r5,r4 1271 + add r0,r0,r4 1283 1272 1284 1273 2: mfmsr r7 1285 1274 li r8,(MSR_IS | MSR_DS) ··· 1303 1290 isync 1304 1291 1305 1292 cmpwi r4,0 1306 - bne 3f 1293 + cmpwi cr1,r6,0 1294 + cror eq,4*cr1+eq,eq 1295 + bne 3f /* offset != 0 && is_boot_cpu */ 1307 1296 mtlr r0 1308 1297 blr 1309 1298
+2 -2
arch/powerpc/mm/fsl_booke_mmu.c
··· 231 231 232 232 i = switch_to_as1(); 233 233 __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM); 234 - restore_to_as0(i, 0, 0); 234 + restore_to_as0(i, 0, 0, 1); 235 235 236 236 pr_info("Memory CAM mapping: "); 237 237 for (i = 0; i < tlbcam_index - 1; i++) ··· 302 302 else 303 303 map_mem_in_cams_addr(start, PAGE_OFFSET + offset, 304 304 0x4000000, CONFIG_LOWMEM_CAM_NUM); 305 - restore_to_as0(n, offset, __va(dt_ptr)); 305 + restore_to_as0(n, offset, __va(dt_ptr), 1); 306 306 /* We should never reach here */ 307 307 panic("Relocation error"); 308 308 }
+1 -1
arch/powerpc/mm/mmu_decl.h
··· 149 149 extern unsigned long mmu_mapin_ram(unsigned long top); 150 150 extern void adjust_total_lowmem(void); 151 151 extern int switch_to_as1(void); 152 - extern void restore_to_as0(int esel, int offset, void *dt_ptr); 152 + extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu); 153 153 #endif 154 154 extern void loadcam_entry(unsigned int index); 155 155
+3 -1
arch/powerpc/mm/tlb_nohash_low.S
··· 402 402 * Load TLBCAM[index] entry in to the L2 CAM MMU 403 403 */ 404 404 _GLOBAL(loadcam_entry) 405 - LOAD_REG_ADDR(r4, TLBCAM) 405 + mflr r5 406 + LOAD_REG_ADDR_PIC(r4, TLBCAM) 407 + mtlr r5 406 408 mulli r5,r3,TLBCAM_SIZE 407 409 add r3,r5,r4 408 410 lwz r4,TLBCAM_MAS0(r3)