Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/32: Split kexec low level code out of misc_32.S

Almost half of misc_32.S is dedicated to kexec.
That's the relocation function for kexec.

Drop it into a dedicated kexec_relocate_32.S

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/e235973a1198195763afd3b6baffa548a83f4611.1572351221.git.christophe.leroy@c-s.fr

authored by

Christophe Leroy and committed by
Michael Ellerman
9f7bd920 8795a739

+501 -491
+1
arch/powerpc/kernel/Makefile
··· 82 82 obj-$(CONFIG_PRESERVE_FA_DUMP) += fadump.o 83 83 ifdef CONFIG_PPC32 84 84 obj-$(CONFIG_E500) += idle_e500.o 85 + obj-$(CONFIG_KEXEC_CORE) += kexec_relocate_32.o 85 86 endif 86 87 obj-$(CONFIG_PPC_BOOK3S_32) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o 87 88 obj-$(CONFIG_TAU) += tau_6xx.o
+500
arch/powerpc/kernel/kexec_relocate_32.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 + /* 3 + * This file contains kexec low-level functions. 4 + * 5 + * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> 6 + * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz 7 + * PPC44x port. Copyright (C) 2011, IBM Corporation 8 + * Author: Suzuki Poulose <suzuki@in.ibm.com> 9 + */ 10 + 11 + #include <asm/reg.h> 12 + #include <asm/page.h> 13 + #include <asm/mmu.h> 14 + #include <asm/ppc_asm.h> 15 + #include <asm/kexec.h> 16 + 17 + .text 18 + 19 + /* 20 + * Must be relocatable PIC code callable as a C function. 21 + */ 22 + .globl relocate_new_kernel 23 + relocate_new_kernel: 24 + /* r3 = page_list */ 25 + /* r4 = reboot_code_buffer */ 26 + /* r5 = start_address */ 27 + 28 + #ifdef CONFIG_FSL_BOOKE 29 + 30 + mr r29, r3 31 + mr r30, r4 32 + mr r31, r5 33 + 34 + #define ENTRY_MAPPING_KEXEC_SETUP 35 + #include "fsl_booke_entry_mapping.S" 36 + #undef ENTRY_MAPPING_KEXEC_SETUP 37 + 38 + mr r3, r29 39 + mr r4, r30 40 + mr r5, r31 41 + 42 + li r0, 0 43 + #elif defined(CONFIG_44x) 44 + 45 + /* Save our parameters */ 46 + mr r29, r3 47 + mr r30, r4 48 + mr r31, r5 49 + 50 + #ifdef CONFIG_PPC_47x 51 + /* Check for 47x cores */ 52 + mfspr r3,SPRN_PVR 53 + srwi r3,r3,16 54 + cmplwi cr0,r3,PVR_476FPE@h 55 + beq setup_map_47x 56 + cmplwi cr0,r3,PVR_476@h 57 + beq setup_map_47x 58 + cmplwi cr0,r3,PVR_476_ISS@h 59 + beq setup_map_47x 60 + #endif /* CONFIG_PPC_47x */ 61 + 62 + /* 63 + * Code for setting up 1:1 mapping for PPC440x for KEXEC 64 + * 65 + * We cannot switch off the MMU on PPC44x. 66 + * So we: 67 + * 1) Invalidate all the mappings except the one we are running from. 68 + * 2) Create a tmp mapping for our code in the other address space(TS) and 69 + * jump to it. Invalidate the entry we started in. 70 + * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS. 71 + * 4) Jump to the 1:1 mapping in original TS. 72 + * 5) Invalidate the tmp mapping. 73 + * 74 + * - Based on the kexec support code for FSL BookE 75 + * 76 + */ 77 + 78 + /* 79 + * Load the PID with kernel PID (0). 80 + * Also load our MSR_IS and TID to MMUCR for TLB search. 81 + */ 82 + li r3, 0 83 + mtspr SPRN_PID, r3 84 + mfmsr r4 85 + andi. r4,r4,MSR_IS@l 86 + beq wmmucr 87 + oris r3,r3,PPC44x_MMUCR_STS@h 88 + wmmucr: 89 + mtspr SPRN_MMUCR,r3 90 + sync 91 + 92 + /* 93 + * Invalidate all the TLB entries except the current entry 94 + * where we are running from 95 + */ 96 + bl 0f /* Find our address */ 97 + 0: mflr r5 /* Make it accessible */ 98 + tlbsx r23,0,r5 /* Find entry we are in */ 99 + li r4,0 /* Start at TLB entry 0 */ 100 + li r3,0 /* Set PAGEID inval value */ 101 + 1: cmpw r23,r4 /* Is this our entry? */ 102 + beq skip /* If so, skip the inval */ 103 + tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ 104 + skip: 105 + addi r4,r4,1 /* Increment */ 106 + cmpwi r4,64 /* Are we done? */ 107 + bne 1b /* If not, repeat */ 108 + isync 109 + 110 + /* Create a temp mapping and jump to it */ 111 + andi. r6, r23, 1 /* Find the index to use */ 112 + addi r24, r6, 1 /* r24 will contain 1 or 2 */ 113 + 114 + mfmsr r9 /* get the MSR */ 115 + rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */ 116 + xori r7, r5, 1 /* Use the other address space */ 117 + 118 + /* Read the current mapping entries */ 119 + tlbre r3, r23, PPC44x_TLB_PAGEID 120 + tlbre r4, r23, PPC44x_TLB_XLAT 121 + tlbre r5, r23, PPC44x_TLB_ATTRIB 122 + 123 + /* Save our current XLAT entry */ 124 + mr r25, r4 125 + 126 + /* Extract the TLB PageSize */ 127 + li r10, 1 /* r10 will hold PageSize */ 128 + rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */ 129 + 130 + /* XXX: As of now we use 256M, 4K pages */ 131 + cmpwi r11, PPC44x_TLB_256M 132 + bne tlb_4k 133 + rotlwi r10, r10, 28 /* r10 = 256M */ 134 + b write_out 135 + tlb_4k: 136 + cmpwi r11, PPC44x_TLB_4K 137 + bne default 138 + rotlwi r10, r10, 12 /* r10 = 4K */ 139 + b write_out 140 + default: 141 + rotlwi r10, r10, 10 /* r10 = 1K */ 142 + 143 + write_out: 144 + /* 145 + * Write out the tmp 1:1 mapping for this code in other address space 146 + * Fixup EPN = RPN , TS=other address space 147 + */ 148 + insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */ 149 + 150 + /* Write out the tmp mapping entries */ 151 + tlbwe r3, r24, PPC44x_TLB_PAGEID 152 + tlbwe r4, r24, PPC44x_TLB_XLAT 153 + tlbwe r5, r24, PPC44x_TLB_ATTRIB 154 + 155 + subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */ 156 + not r10, r11 /* Mask for PageNum */ 157 + 158 + /* Switch to other address space in MSR */ 159 + insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ 160 + 161 + bl 1f 162 + 1: mflr r8 163 + addi r8, r8, (2f-1b) /* Find the target offset */ 164 + 165 + /* Jump to the tmp mapping */ 166 + mtspr SPRN_SRR0, r8 167 + mtspr SPRN_SRR1, r9 168 + rfi 169 + 170 + 2: 171 + /* Invalidate the entry we were executing from */ 172 + li r3, 0 173 + tlbwe r3, r23, PPC44x_TLB_PAGEID 174 + 175 + /* attribute fields. rwx for SUPERVISOR mode */ 176 + li r5, 0 177 + ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) 178 + 179 + /* Create 1:1 mapping in 256M pages */ 180 + xori r7, r7, 1 /* Revert back to Original TS */ 181 + 182 + li r8, 0 /* PageNumber */ 183 + li r6, 3 /* TLB Index, start at 3 */ 184 + 185 + next_tlb: 186 + rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */ 187 + mr r4, r3 /* RPN = EPN */ 188 + ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */ 189 + insrwi r3, r7, 1, 23 /* Set TS from r7 */ 190 + 191 + tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */ 192 + tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */ 193 + tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */ 194 + 195 + addi r8, r8, 1 /* Increment PN */ 196 + addi r6, r6, 1 /* Increment TLB Index */ 197 + cmpwi r8, 8 /* Are we done ? */ 198 + bne next_tlb 199 + isync 200 + 201 + /* Jump to the new mapping 1:1 */ 202 + li r9,0 203 + insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ 204 + 205 + bl 1f 206 + 1: mflr r8 207 + and r8, r8, r11 /* Get our offset within page */ 208 + addi r8, r8, (2f-1b) 209 + 210 + and r5, r25, r10 /* Get our target PageNum */ 211 + or r8, r8, r5 /* Target jump address */ 212 + 213 + mtspr SPRN_SRR0, r8 214 + mtspr SPRN_SRR1, r9 215 + rfi 216 + 2: 217 + /* Invalidate the tmp entry we used */ 218 + li r3, 0 219 + tlbwe r3, r24, PPC44x_TLB_PAGEID 220 + sync 221 + b ppc44x_map_done 222 + 223 + #ifdef CONFIG_PPC_47x 224 + 225 + /* 1:1 mapping for 47x */ 226 + 227 + setup_map_47x: 228 + 229 + /* 230 + * Load the kernel pid (0) to PID and also to MMUCR[TID]. 231 + * Also set the MSR IS->MMUCR STS 232 + */ 233 + li r3, 0 234 + mtspr SPRN_PID, r3 /* Set PID */ 235 + mfmsr r4 /* Get MSR */ 236 + andi. r4, r4, MSR_IS@l /* TS=1? */ 237 + beq 1f /* If not, leave STS=0 */ 238 + oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */ 239 + 1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */ 240 + sync 241 + 242 + /* Find the entry we are running from */ 243 + bl 2f 244 + 2: mflr r23 245 + tlbsx r23, 0, r23 246 + tlbre r24, r23, 0 /* TLB Word 0 */ 247 + tlbre r25, r23, 1 /* TLB Word 1 */ 248 + tlbre r26, r23, 2 /* TLB Word 2 */ 249 + 250 + 251 + /* 252 + * Invalidates all the tlb entries by writing to 256 RPNs(r4) 253 + * of 4k page size in all 4 ways (0-3 in r3). 254 + * This would invalidate the entire UTLB including the one we are 255 + * running from. However the shadow TLB entries would help us 256 + * to continue the execution, until we flush them (rfi/isync). 257 + */ 258 + addis r3, 0, 0x8000 /* specify the way */ 259 + addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */ 260 + addi r5, 0, 0 261 + b clear_utlb_entry 262 + 263 + /* Align the loop to speed things up. from head_44x.S */ 264 + .align 6 265 + 266 + clear_utlb_entry: 267 + 268 + tlbwe r4, r3, 0 269 + tlbwe r5, r3, 1 270 + tlbwe r5, r3, 2 271 + addis r3, r3, 0x2000 /* Increment the way */ 272 + cmpwi r3, 0 273 + bne clear_utlb_entry 274 + addis r3, 0, 0x8000 275 + addis r4, r4, 0x100 /* Increment the EPN */ 276 + cmpwi r4, 0 277 + bne clear_utlb_entry 278 + 279 + /* Create the entries in the other address space */ 280 + mfmsr r5 281 + rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */ 282 + xori r7, r7, 1 /* r7 = !TS */ 283 + 284 + insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */ 285 + 286 + /* 287 + * write out the TLB entries for the tmp mapping 288 + * Use way '0' so that we could easily invalidate it later. 289 + */ 290 + lis r3, 0x8000 /* Way '0' */ 291 + 292 + tlbwe r24, r3, 0 293 + tlbwe r25, r3, 1 294 + tlbwe r26, r3, 2 295 + 296 + /* Update the msr to the new TS */ 297 + insrwi r5, r7, 1, 26 298 + 299 + bl 1f 300 + 1: mflr r6 301 + addi r6, r6, (2f-1b) 302 + 303 + mtspr SPRN_SRR0, r6 304 + mtspr SPRN_SRR1, r5 305 + rfi 306 + 307 + /* 308 + * Now we are in the tmp address space. 309 + * Create a 1:1 mapping for 0-2GiB in the original TS. 310 + */ 311 + 2: 312 + li r3, 0 313 + li r4, 0 /* TLB Word 0 */ 314 + li r5, 0 /* TLB Word 1 */ 315 + li r6, 0 316 + ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */ 317 + 318 + li r8, 0 /* PageIndex */ 319 + 320 + xori r7, r7, 1 /* revert back to original TS */ 321 + 322 + write_utlb: 323 + rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */ 324 + /* ERPN = 0 as we don't use memory above 2G */ 325 + 326 + mr r4, r5 /* EPN = RPN */ 327 + ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M) 328 + insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */ 329 + 330 + tlbwe r4, r3, 0 /* Write out the entries */ 331 + tlbwe r5, r3, 1 332 + tlbwe r6, r3, 2 333 + addi r8, r8, 1 334 + cmpwi r8, 8 /* Have we completed ? */ 335 + bne write_utlb 336 + 337 + /* make sure we complete the TLB write up */ 338 + isync 339 + 340 + /* 341 + * Prepare to jump to the 1:1 mapping. 342 + * 1) Extract page size of the tmp mapping 343 + * DSIZ = TLB_Word0[22:27] 344 + * 2) Calculate the physical address of the address 345 + * to jump to. 346 + */ 347 + rlwinm r10, r24, 0, 22, 27 348 + 349 + cmpwi r10, PPC47x_TLB0_4K 350 + bne 0f 351 + li r10, 0x1000 /* r10 = 4k */ 352 + bl 1f 353 + 354 + 0: 355 + /* Defaults to 256M */ 356 + lis r10, 0x1000 357 + 358 + bl 1f 359 + 1: mflr r4 360 + addi r4, r4, (2f-1b) /* virtual address of 2f */ 361 + 362 + subi r11, r10, 1 /* offsetmask = Pagesize - 1 */ 363 + not r10, r11 /* Pagemask = ~(offsetmask) */ 364 + 365 + and r5, r25, r10 /* Physical page */ 366 + and r6, r4, r11 /* offset within the current page */ 367 + 368 + or r5, r5, r6 /* Physical address for 2f */ 369 + 370 + /* Switch the TS in MSR to the original one */ 371 + mfmsr r8 372 + insrwi r8, r7, 1, 26 373 + 374 + mtspr SPRN_SRR1, r8 375 + mtspr SPRN_SRR0, r5 376 + rfi 377 + 378 + 2: 379 + /* Invalidate the tmp mapping */ 380 + lis r3, 0x8000 /* Way '0' */ 381 + 382 + clrrwi r24, r24, 12 /* Clear the valid bit */ 383 + tlbwe r24, r3, 0 384 + tlbwe r25, r3, 1 385 + tlbwe r26, r3, 2 386 + 387 + /* Make sure we complete the TLB write and flush the shadow TLB */ 388 + isync 389 + 390 + #endif 391 + 392 + ppc44x_map_done: 393 + 394 + 395 + /* Restore the parameters */ 396 + mr r3, r29 397 + mr r4, r30 398 + mr r5, r31 399 + 400 + li r0, 0 401 + #else 402 + li r0, 0 403 + 404 + /* 405 + * Set Machine Status Register to a known status, 406 + * switch the MMU off and jump to 1: in a single step. 407 + */ 408 + 409 + mr r8, r0 410 + ori r8, r8, MSR_RI|MSR_ME 411 + mtspr SPRN_SRR1, r8 412 + addi r8, r4, 1f - relocate_new_kernel 413 + mtspr SPRN_SRR0, r8 414 + sync 415 + rfi 416 + 417 + 1: 418 + #endif 419 + /* from this point address translation is turned off */ 420 + /* and interrupts are disabled */ 421 + 422 + /* set a new stack at the bottom of our page... */ 423 + /* (not really needed now) */ 424 + addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */ 425 + stw r0, 0(r1) 426 + 427 + /* Do the copies */ 428 + li r6, 0 /* checksum */ 429 + mr r0, r3 430 + b 1f 431 + 432 + 0: /* top, read another word for the indirection page */ 433 + lwzu r0, 4(r3) 434 + 435 + 1: 436 + /* is it a destination page? (r8) */ 437 + rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */ 438 + beq 2f 439 + 440 + rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */ 441 + b 0b 442 + 443 + 2: /* is it an indirection page? (r3) */ 444 + rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */ 445 + beq 2f 446 + 447 + rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */ 448 + subi r3, r3, 4 449 + b 0b 450 + 451 + 2: /* are we done? */ 452 + rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */ 453 + beq 2f 454 + b 3f 455 + 456 + 2: /* is it a source page? (r9) */ 457 + rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */ 458 + beq 0b 459 + 460 + rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */ 461 + 462 + li r7, PAGE_SIZE / 4 463 + mtctr r7 464 + subi r9, r9, 4 465 + subi r8, r8, 4 466 + 9: 467 + lwzu r0, 4(r9) /* do the copy */ 468 + xor r6, r6, r0 469 + stwu r0, 4(r8) 470 + dcbst 0, r8 471 + sync 472 + icbi 0, r8 473 + bdnz 9b 474 + 475 + addi r9, r9, 4 476 + addi r8, r8, 4 477 + b 0b 478 + 479 + 3: 480 + 481 + /* To be certain of avoiding problems with self-modifying code 482 + * execute a serializing instruction here. 483 + */ 484 + isync 485 + sync 486 + 487 + mfspr r3, SPRN_PIR /* current core we are running on */ 488 + mr r4, r5 /* load physical address of chunk called */ 489 + 490 + /* jump to the entry point, usually the setup routine */ 491 + mtlr r5 492 + blrl 493 + 494 + 1: b 1b 495 + 496 + relocate_new_kernel_end: 497 + 498 + .globl relocate_new_kernel_size 499 + relocate_new_kernel_size: 500 + .long relocate_new_kernel_end - relocate_new_kernel
-491
arch/powerpc/kernel/misc_32.S
··· 6 6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) 7 7 * and Paul Mackerras. 8 8 * 9 - * kexec bits: 10 - * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> 11 - * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz 12 - * PPC44x port. Copyright (C) 2011, IBM Corporation 13 - * Author: Suzuki Poulose <suzuki@in.ibm.com> 14 9 */ 15 10 16 11 #include <linux/sys.h> ··· 20 25 #include <asm/thread_info.h> 21 26 #include <asm/asm-offsets.h> 22 27 #include <asm/processor.h> 23 - #include <asm/kexec.h> 24 28 #include <asm/bug.h> 25 29 #include <asm/ptrace.h> 26 30 #include <asm/export.h> ··· 488 494 */ 489 495 _GLOBAL(__main) 490 496 blr 491 - 492 - #ifdef CONFIG_KEXEC_CORE 493 - /* 494 - * Must be relocatable PIC code callable as a C function. 495 - */ 496 - .globl relocate_new_kernel 497 - relocate_new_kernel: 498 - /* r3 = page_list */ 499 - /* r4 = reboot_code_buffer */ 500 - /* r5 = start_address */ 501 - 502 - #ifdef CONFIG_FSL_BOOKE 503 - 504 - mr r29, r3 505 - mr r30, r4 506 - mr r31, r5 507 - 508 - #define ENTRY_MAPPING_KEXEC_SETUP 509 - #include "fsl_booke_entry_mapping.S" 510 - #undef ENTRY_MAPPING_KEXEC_SETUP 511 - 512 - mr r3, r29 513 - mr r4, r30 514 - mr r5, r31 515 - 516 - li r0, 0 517 - #elif defined(CONFIG_44x) 518 - 519 - /* Save our parameters */ 520 - mr r29, r3 521 - mr r30, r4 522 - mr r31, r5 523 - 524 - #ifdef CONFIG_PPC_47x 525 - /* Check for 47x cores */ 526 - mfspr r3,SPRN_PVR 527 - srwi r3,r3,16 528 - cmplwi cr0,r3,PVR_476FPE@h 529 - beq setup_map_47x 530 - cmplwi cr0,r3,PVR_476@h 531 - beq setup_map_47x 532 - cmplwi cr0,r3,PVR_476_ISS@h 533 - beq setup_map_47x 534 - #endif /* CONFIG_PPC_47x */ 535 - 536 - /* 537 - * Code for setting up 1:1 mapping for PPC440x for KEXEC 538 - * 539 - * We cannot switch off the MMU on PPC44x. 540 - * So we: 541 - * 1) Invalidate all the mappings except the one we are running from. 542 - * 2) Create a tmp mapping for our code in the other address space(TS) and 543 - * jump to it. Invalidate the entry we started in. 544 - * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS. 545 - * 4) Jump to the 1:1 mapping in original TS. 546 - * 5) Invalidate the tmp mapping. 547 - * 548 - * - Based on the kexec support code for FSL BookE 549 - * 550 - */ 551 - 552 - /* 553 - * Load the PID with kernel PID (0). 554 - * Also load our MSR_IS and TID to MMUCR for TLB search. 555 - */ 556 - li r3, 0 557 - mtspr SPRN_PID, r3 558 - mfmsr r4 559 - andi. r4,r4,MSR_IS@l 560 - beq wmmucr 561 - oris r3,r3,PPC44x_MMUCR_STS@h 562 - wmmucr: 563 - mtspr SPRN_MMUCR,r3 564 - sync 565 - 566 - /* 567 - * Invalidate all the TLB entries except the current entry 568 - * where we are running from 569 - */ 570 - bl 0f /* Find our address */ 571 - 0: mflr r5 /* Make it accessible */ 572 - tlbsx r23,0,r5 /* Find entry we are in */ 573 - li r4,0 /* Start at TLB entry 0 */ 574 - li r3,0 /* Set PAGEID inval value */ 575 - 1: cmpw r23,r4 /* Is this our entry? */ 576 - beq skip /* If so, skip the inval */ 577 - tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ 578 - skip: 579 - addi r4,r4,1 /* Increment */ 580 - cmpwi r4,64 /* Are we done? */ 581 - bne 1b /* If not, repeat */ 582 - isync 583 - 584 - /* Create a temp mapping and jump to it */ 585 - andi. r6, r23, 1 /* Find the index to use */ 586 - addi r24, r6, 1 /* r24 will contain 1 or 2 */ 587 - 588 - mfmsr r9 /* get the MSR */ 589 - rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */ 590 - xori r7, r5, 1 /* Use the other address space */ 591 - 592 - /* Read the current mapping entries */ 593 - tlbre r3, r23, PPC44x_TLB_PAGEID 594 - tlbre r4, r23, PPC44x_TLB_XLAT 595 - tlbre r5, r23, PPC44x_TLB_ATTRIB 596 - 597 - /* Save our current XLAT entry */ 598 - mr r25, r4 599 - 600 - /* Extract the TLB PageSize */ 601 - li r10, 1 /* r10 will hold PageSize */ 602 - rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */ 603 - 604 - /* XXX: As of now we use 256M, 4K pages */ 605 - cmpwi r11, PPC44x_TLB_256M 606 - bne tlb_4k 607 - rotlwi r10, r10, 28 /* r10 = 256M */ 608 - b write_out 609 - tlb_4k: 610 - cmpwi r11, PPC44x_TLB_4K 611 - bne default 612 - rotlwi r10, r10, 12 /* r10 = 4K */ 613 - b write_out 614 - default: 615 - rotlwi r10, r10, 10 /* r10 = 1K */ 616 - 617 - write_out: 618 - /* 619 - * Write out the tmp 1:1 mapping for this code in other address space 620 - * Fixup EPN = RPN , TS=other address space 621 - */ 622 - insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */ 623 - 624 - /* Write out the tmp mapping entries */ 625 - tlbwe r3, r24, PPC44x_TLB_PAGEID 626 - tlbwe r4, r24, PPC44x_TLB_XLAT 627 - tlbwe r5, r24, PPC44x_TLB_ATTRIB 628 - 629 - subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */ 630 - not r10, r11 /* Mask for PageNum */ 631 - 632 - /* Switch to other address space in MSR */ 633 - insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ 634 - 635 - bl 1f 636 - 1: mflr r8 637 - addi r8, r8, (2f-1b) /* Find the target offset */ 638 - 639 - /* Jump to the tmp mapping */ 640 - mtspr SPRN_SRR0, r8 641 - mtspr SPRN_SRR1, r9 642 - rfi 643 - 644 - 2: 645 - /* Invalidate the entry we were executing from */ 646 - li r3, 0 647 - tlbwe r3, r23, PPC44x_TLB_PAGEID 648 - 649 - /* attribute fields. rwx for SUPERVISOR mode */ 650 - li r5, 0 651 - ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) 652 - 653 - /* Create 1:1 mapping in 256M pages */ 654 - xori r7, r7, 1 /* Revert back to Original TS */ 655 - 656 - li r8, 0 /* PageNumber */ 657 - li r6, 3 /* TLB Index, start at 3 */ 658 - 659 - next_tlb: 660 - rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */ 661 - mr r4, r3 /* RPN = EPN */ 662 - ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */ 663 - insrwi r3, r7, 1, 23 /* Set TS from r7 */ 664 - 665 - tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */ 666 - tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */ 667 - tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */ 668 - 669 - addi r8, r8, 1 /* Increment PN */ 670 - addi r6, r6, 1 /* Increment TLB Index */ 671 - cmpwi r8, 8 /* Are we done ? */ 672 - bne next_tlb 673 - isync 674 - 675 - /* Jump to the new mapping 1:1 */ 676 - li r9,0 677 - insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */ 678 - 679 - bl 1f 680 - 1: mflr r8 681 - and r8, r8, r11 /* Get our offset within page */ 682 - addi r8, r8, (2f-1b) 683 - 684 - and r5, r25, r10 /* Get our target PageNum */ 685 - or r8, r8, r5 /* Target jump address */ 686 - 687 - mtspr SPRN_SRR0, r8 688 - mtspr SPRN_SRR1, r9 689 - rfi 690 - 2: 691 - /* Invalidate the tmp entry we used */ 692 - li r3, 0 693 - tlbwe r3, r24, PPC44x_TLB_PAGEID 694 - sync 695 - b ppc44x_map_done 696 - 697 - #ifdef CONFIG_PPC_47x 698 - 699 - /* 1:1 mapping for 47x */ 700 - 701 - setup_map_47x: 702 - 703 - /* 704 - * Load the kernel pid (0) to PID and also to MMUCR[TID]. 705 - * Also set the MSR IS->MMUCR STS 706 - */ 707 - li r3, 0 708 - mtspr SPRN_PID, r3 /* Set PID */ 709 - mfmsr r4 /* Get MSR */ 710 - andi. r4, r4, MSR_IS@l /* TS=1? */ 711 - beq 1f /* If not, leave STS=0 */ 712 - oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */ 713 - 1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */ 714 - sync 715 - 716 - /* Find the entry we are running from */ 717 - bl 2f 718 - 2: mflr r23 719 - tlbsx r23, 0, r23 720 - tlbre r24, r23, 0 /* TLB Word 0 */ 721 - tlbre r25, r23, 1 /* TLB Word 1 */ 722 - tlbre r26, r23, 2 /* TLB Word 2 */ 723 - 724 - 725 - /* 726 - * Invalidates all the tlb entries by writing to 256 RPNs(r4) 727 - * of 4k page size in all 4 ways (0-3 in r3). 728 - * This would invalidate the entire UTLB including the one we are 729 - * running from. However the shadow TLB entries would help us 730 - * to continue the execution, until we flush them (rfi/isync). 731 - */ 732 - addis r3, 0, 0x8000 /* specify the way */ 733 - addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */ 734 - addi r5, 0, 0 735 - b clear_utlb_entry 736 - 737 - /* Align the loop to speed things up. from head_44x.S */ 738 - .align 6 739 - 740 - clear_utlb_entry: 741 - 742 - tlbwe r4, r3, 0 743 - tlbwe r5, r3, 1 744 - tlbwe r5, r3, 2 745 - addis r3, r3, 0x2000 /* Increment the way */ 746 - cmpwi r3, 0 747 - bne clear_utlb_entry 748 - addis r3, 0, 0x8000 749 - addis r4, r4, 0x100 /* Increment the EPN */ 750 - cmpwi r4, 0 751 - bne clear_utlb_entry 752 - 753 - /* Create the entries in the other address space */ 754 - mfmsr r5 755 - rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */ 756 - xori r7, r7, 1 /* r7 = !TS */ 757 - 758 - insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */ 759 - 760 - /* 761 - * write out the TLB entries for the tmp mapping 762 - * Use way '0' so that we could easily invalidate it later. 763 - */ 764 - lis r3, 0x8000 /* Way '0' */ 765 - 766 - tlbwe r24, r3, 0 767 - tlbwe r25, r3, 1 768 - tlbwe r26, r3, 2 769 - 770 - /* Update the msr to the new TS */ 771 - insrwi r5, r7, 1, 26 772 - 773 - bl 1f 774 - 1: mflr r6 775 - addi r6, r6, (2f-1b) 776 - 777 - mtspr SPRN_SRR0, r6 778 - mtspr SPRN_SRR1, r5 779 - rfi 780 - 781 - /* 782 - * Now we are in the tmp address space. 783 - * Create a 1:1 mapping for 0-2GiB in the original TS. 784 - */ 785 - 2: 786 - li r3, 0 787 - li r4, 0 /* TLB Word 0 */ 788 - li r5, 0 /* TLB Word 1 */ 789 - li r6, 0 790 - ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */ 791 - 792 - li r8, 0 /* PageIndex */ 793 - 794 - xori r7, r7, 1 /* revert back to original TS */ 795 - 796 - write_utlb: 797 - rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */ 798 - /* ERPN = 0 as we don't use memory above 2G */ 799 - 800 - mr r4, r5 /* EPN = RPN */ 801 - ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M) 802 - insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */ 803 - 804 - tlbwe r4, r3, 0 /* Write out the entries */ 805 - tlbwe r5, r3, 1 806 - tlbwe r6, r3, 2 807 - addi r8, r8, 1 808 - cmpwi r8, 8 /* Have we completed ? */ 809 - bne write_utlb 810 - 811 - /* make sure we complete the TLB write up */ 812 - isync 813 - 814 - /* 815 - * Prepare to jump to the 1:1 mapping. 816 - * 1) Extract page size of the tmp mapping 817 - * DSIZ = TLB_Word0[22:27] 818 - * 2) Calculate the physical address of the address 819 - * to jump to. 820 - */ 821 - rlwinm r10, r24, 0, 22, 27 822 - 823 - cmpwi r10, PPC47x_TLB0_4K 824 - bne 0f 825 - li r10, 0x1000 /* r10 = 4k */ 826 - bl 1f 827 - 828 - 0: 829 - /* Defaults to 256M */ 830 - lis r10, 0x1000 831 - 832 - bl 1f 833 - 1: mflr r4 834 - addi r4, r4, (2f-1b) /* virtual address of 2f */ 835 - 836 - subi r11, r10, 1 /* offsetmask = Pagesize - 1 */ 837 - not r10, r11 /* Pagemask = ~(offsetmask) */ 838 - 839 - and r5, r25, r10 /* Physical page */ 840 - and r6, r4, r11 /* offset within the current page */ 841 - 842 - or r5, r5, r6 /* Physical address for 2f */ 843 - 844 - /* Switch the TS in MSR to the original one */ 845 - mfmsr r8 846 - insrwi r8, r7, 1, 26 847 - 848 - mtspr SPRN_SRR1, r8 849 - mtspr SPRN_SRR0, r5 850 - rfi 851 - 852 - 2: 853 - /* Invalidate the tmp mapping */ 854 - lis r3, 0x8000 /* Way '0' */ 855 - 856 - clrrwi r24, r24, 12 /* Clear the valid bit */ 857 - tlbwe r24, r3, 0 858 - tlbwe r25, r3, 1 859 - tlbwe r26, r3, 2 860 - 861 - /* Make sure we complete the TLB write and flush the shadow TLB */ 862 - isync 863 - 864 - #endif 865 - 866 - ppc44x_map_done: 867 - 868 - 869 - /* Restore the parameters */ 870 - mr r3, r29 871 - mr r4, r30 872 - mr r5, r31 873 - 874 - li r0, 0 875 - #else 876 - li r0, 0 877 - 878 - /* 879 - * Set Machine Status Register to a known status, 880 - * switch the MMU off and jump to 1: in a single step. 881 - */ 882 - 883 - mr r8, r0 884 - ori r8, r8, MSR_RI|MSR_ME 885 - mtspr SPRN_SRR1, r8 886 - addi r8, r4, 1f - relocate_new_kernel 887 - mtspr SPRN_SRR0, r8 888 - sync 889 - rfi 890 - 891 - 1: 892 - #endif 893 - /* from this point address translation is turned off */ 894 - /* and interrupts are disabled */ 895 - 896 - /* set a new stack at the bottom of our page... */ 897 - /* (not really needed now) */ 898 - addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */ 899 - stw r0, 0(r1) 900 - 901 - /* Do the copies */ 902 - li r6, 0 /* checksum */ 903 - mr r0, r3 904 - b 1f 905 - 906 - 0: /* top, read another word for the indirection page */ 907 - lwzu r0, 4(r3) 908 - 909 - 1: 910 - /* is it a destination page? (r8) */ 911 - rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */ 912 - beq 2f 913 - 914 - rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */ 915 - b 0b 916 - 917 - 2: /* is it an indirection page? (r3) */ 918 - rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */ 919 - beq 2f 920 - 921 - rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */ 922 - subi r3, r3, 4 923 - b 0b 924 - 925 - 2: /* are we done? */ 926 - rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */ 927 - beq 2f 928 - b 3f 929 - 930 - 2: /* is it a source page? (r9) */ 931 - rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */ 932 - beq 0b 933 - 934 - rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */ 935 - 936 - li r7, PAGE_SIZE / 4 937 - mtctr r7 938 - subi r9, r9, 4 939 - subi r8, r8, 4 940 - 9: 941 - lwzu r0, 4(r9) /* do the copy */ 942 - xor r6, r6, r0 943 - stwu r0, 4(r8) 944 - dcbst 0, r8 945 - sync 946 - icbi 0, r8 947 - bdnz 9b 948 - 949 - addi r9, r9, 4 950 - addi r8, r8, 4 951 - b 0b 952 - 953 - 3: 954 - 955 - /* To be certain of avoiding problems with self-modifying code 956 - * execute a serializing instruction here. 957 - */ 958 - isync 959 - sync 960 - 961 - mfspr r3, SPRN_PIR /* current core we are running on */ 962 - mr r4, r5 /* load physical address of chunk called */ 963 - 964 - /* jump to the entry point, usually the setup routine */ 965 - mtlr r5 966 - blrl 967 - 968 - 1: b 1b 969 - 970 - relocate_new_kernel_end: 971 - 972 - .globl relocate_new_kernel_size 973 - relocate_new_kernel_size: 974 - .long relocate_new_kernel_end - relocate_new_kernel 975 - #endif