Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/fsl_booke: make sure PAGE_OFFSET map to memstart_addr for relocatable kernel

This is always true for a non-relocatable kernel. Otherwise the kernel
would get stuck. But for a relocatable kernel, it seems a little
complicated. When booting a relocatable kernel, we just align the
kernel start addr to 64M and map the PAGE_OFFSET from there. The
relocation will base on this virtual address. But if this address
is not the same as the memstart_addr, we will have to change the
map of PAGE_OFFSET to the real memstart_addr and do another relocation
again.

Signed-off-by: Kevin Hao <haokexin@gmail.com>
[scottwood@freescale.com: make offset long and non-negative in simple case]
Signed-off-by: Scott Wood <scottwood@freescale.com>

authored by

Kevin Hao and committed by
Scott Wood
7d2471f9 813125d8

+106 -12
+69 -5
arch/powerpc/kernel/head_fsl_booke.S
··· 81 81 mr r23,r3 82 82 mr r25,r4 83 83 84 + bl 0f 85 + 0: mflr r8 86 + addis r3,r8,(is_second_reloc - 0b)@ha 87 + lwz r19,(is_second_reloc - 0b)@l(r3) 88 + 89 + /* Check if this is the second relocation. */ 90 + cmpwi r19,1 91 + bne 1f 92 + 93 + /* 94 + * For the second relocation, we already get the real memstart_addr 95 + * from device tree. So we will map PAGE_OFFSET to memstart_addr, 96 + * then the virtual address of start kernel should be: 97 + * PAGE_OFFSET + (kernstart_addr - memstart_addr) 98 + * Since the offset between kernstart_addr and memstart_addr should 99 + * never be beyond 1G, so we can just use the lower 32bit of them 100 + * for the calculation. 101 + */ 102 + lis r3,PAGE_OFFSET@h 103 + 104 + addis r4,r8,(kernstart_addr - 0b)@ha 105 + addi r4,r4,(kernstart_addr - 0b)@l 106 + lwz r5,4(r4) 107 + 108 + addis r6,r8,(memstart_addr - 0b)@ha 109 + addi r6,r6,(memstart_addr - 0b)@l 110 + lwz r7,4(r6) 111 + 112 + subf r5,r7,r5 113 + add r3,r3,r5 114 + b 2f 115 + 116 + 1: 84 117 /* 85 118 * We have the runtime (virutal) address of our base. 86 119 * We calculate our shift of offset from a 64M page. ··· 127 94 subf r3,r5,r6 /* r3 = r6 - r5 */ 128 95 add r3,r4,r3 /* Required Virtual Address */ 129 96 130 - bl relocate 97 + 2: bl relocate 98 + 99 + /* 100 + * For the second relocation, we already set the right tlb entries 101 + * for the kernel space, so skip the code in fsl_booke_entry_mapping.S 102 + */ 103 + cmpwi r19,1 104 + beq set_ivor 131 105 #endif 132 106 133 107 /* We try to not make any assumptions about how the boot loader ··· 162 122 #include "fsl_booke_entry_mapping.S" 163 123 #undef ENTRY_MAPPING_BOOT_SETUP 164 124 125 + set_ivor: 165 126 /* Establish the interrupt vector offsets */ 166 127 SET_IVOR(0, CriticalInput); 167 128 SET_IVOR(1, MachineCheck); ··· 248 207 bl early_init 249 208 250 209 #ifdef CONFIG_RELOCATABLE 210 + mr r3,r30 211 + mr r4,r31 251 212 #ifdef CONFIG_PHYS_64BIT 252 - mr r3,r23 253 - mr r4,r25 213 + mr r5,r23 214 + mr r6,r25 254 215 #else 255 - mr r3,r25 216 + mr r5,r25 256 217 #endif 257 218 bl relocate_init 258 219 #endif ··· 1250 1207 /* 1251 1208 * Restore to the address space 0 and also invalidate the tlb entry created 1252 1209 * by switch_to_as1. 1210 + * r3 - the tlb entry which should be invalidated 1211 + * r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0) 1212 + * r5 - device tree virtual address. If r4 is 0, r5 is ignored. 1253 1213 */ 1254 1214 _GLOBAL(restore_to_as0) 1255 1215 mflr r0 ··· 1261 1215 0: mflr r9 1262 1216 addi r9,r9,1f - 0b 1263 1217 1264 - mfmsr r7 1218 + /* 1219 + * We may map the PAGE_OFFSET in AS0 to a different physical address, 1220 + * so we need calculate the right jump and device tree address based 1221 + * on the offset passed by r4. 1222 + */ 1223 + add r9,r9,r4 1224 + add r5,r5,r4 1225 + 1226 + 2: mfmsr r7 1265 1227 li r8,(MSR_IS | MSR_DS) 1266 1228 andc r7,r7,r8 1267 1229 ··· 1288 1234 mtspr SPRN_MAS1,r9 1289 1235 tlbwe 1290 1236 isync 1237 + 1238 + cmpwi r4,0 1239 + bne 3f 1291 1240 mtlr r0 1292 1241 blr 1242 + 1243 + /* 1244 + * The PAGE_OFFSET will map to a different physical address, 1245 + * jump to _start to do another relocation again. 1246 + */ 1247 + 3: mr r3,r5 1248 + bl _start 1293 1249 1294 1250 /* 1295 1251 * We put a few things here that have to be page-aligned. This stuff
+36 -6
arch/powerpc/mm/fsl_booke_mmu.c
··· 231 231 232 232 i = switch_to_as1(); 233 233 __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM); 234 - restore_to_as0(i); 234 + restore_to_as0(i, 0, 0); 235 235 236 236 pr_info("Memory CAM mapping: "); 237 237 for (i = 0; i < tlbcam_index - 1; i++) ··· 252 252 } 253 253 254 254 #ifdef CONFIG_RELOCATABLE 255 - notrace void __init relocate_init(phys_addr_t start) 255 + int __initdata is_second_reloc; 256 + notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start) 256 257 { 257 258 unsigned long base = KERNELBASE; 258 259 260 + kernstart_addr = start; 261 + if (is_second_reloc) { 262 + virt_phys_offset = PAGE_OFFSET - memstart_addr; 263 + return; 264 + } 265 + 259 266 /* 260 267 * Relocatable kernel support based on processing of dynamic 261 - * relocation entries. 262 - * Compute the virt_phys_offset : 268 + * relocation entries. Before we get the real memstart_addr, 269 + * We will compute the virt_phys_offset like this: 263 270 * virt_phys_offset = stext.run - kernstart_addr 264 271 * 265 - * stext.run = (KERNELBASE & ~0x3ffffff) + (kernstart_addr & 0x3ffffff) 272 + * stext.run = (KERNELBASE & ~0x3ffffff) + 273 + * (kernstart_addr & 0x3ffffff) 266 274 * When we relocate, we have : 267 275 * 268 276 * (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff) ··· 280 272 * (kernstart_addr & ~0x3ffffff) 281 273 * 282 274 */ 283 - kernstart_addr = start; 284 275 start &= ~0x3ffffff; 285 276 base &= ~0x3ffffff; 286 277 virt_phys_offset = base - start; 278 + early_get_first_memblock_info(__va(dt_ptr), NULL); 279 + /* 280 + * We now get the memstart_addr, then we should check if this 281 + * address is the same as what the PAGE_OFFSET map to now. If 282 + * not we have to change the map of PAGE_OFFSET to memstart_addr 283 + * and do a second relocation. 284 + */ 285 + if (start != memstart_addr) { 286 + int n; 287 + long offset = start - memstart_addr; 288 + 289 + is_second_reloc = 1; 290 + n = switch_to_as1(); 291 + /* map a 64M area for the second relocation */ 292 + if (memstart_addr > start) 293 + map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM); 294 + else 295 + map_mem_in_cams_addr(start, PAGE_OFFSET + offset, 296 + 0x4000000, CONFIG_LOWMEM_CAM_NUM); 297 + restore_to_as0(n, offset, __va(dt_ptr)); 298 + /* We should never reach here */ 299 + panic("Relocation error"); 300 + } 287 301 } 288 302 #endif 289 303 #endif
+1 -1
arch/powerpc/mm/mmu_decl.h
··· 149 149 extern unsigned long mmu_mapin_ram(unsigned long top); 150 150 extern void adjust_total_lowmem(void); 151 151 extern int switch_to_as1(void); 152 - extern void restore_to_as0(int esel); 152 + extern void restore_to_as0(int esel, int offset, void *dt_ptr); 153 153 #endif 154 154 extern void loadcam_entry(unsigned int index); 155 155