Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] ppc32: Support 36-bit physical addressing on e500

To add support for 36-bit physical addressing on e500 the following changes
have been made. The changes are generalized to support any physical address
size larger than 32-bits:

* Allow FSL Book-E parts to use a 64-bit PTE, it is 44-bits of pfn, 20-bits
of flags.

* Introduced new CPU feature (CPU_FTR_BIG_PHYS) to allow runtime handling of
updating hardware register (SPRN_MAS7) which holds the upper 32-bits of
physical address that will be written into the TLB. This is useful since
not all e500 cores support 36-bit physical addressing.

* Currently have a pass through implementation of fixup_bigphys_addr

* Moved _PAGE_DIRTY in the 64-bit PTE case to free room for three additional
storage attributes that may exist in future FSL Book-E cores and updated
fault handler to copy these bits into the hardware TLBs.

Signed-off-by: Kumar Gala <kumar.gala@freescale.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Kumar Gala and committed by
Linus Torvalds
f50b153b b464fce5

+122 -62
+11 -5
arch/ppc/Kconfig
··· 98 98 99 99 config PTE_64BIT 100 100 bool 101 - depends on 44x 102 - default y 101 + depends on 44x || E500 102 + default y if 44x 103 + default y if E500 && PHYS_64BIT 103 104 104 105 config PHYS_64BIT 105 - bool 106 - depends on 44x 107 - default y 106 + bool 'Large physical address support' if E500 107 + depends on 44x || E500 108 + default y if 44x 109 + ---help--- 110 + This option enables kernel support for larger than 32-bit physical 111 + addresses. This features is not be available on all e500 cores. 112 + 113 + If in doubt, say N here. 108 114 109 115 config ALTIVEC 110 116 bool "AltiVec Support"
+73 -40
arch/ppc/kernel/head_fsl_booke.S
··· 347 347 mtspr SPRN_SRR1,r3 348 348 rfi /* change context and jump to start_kernel */ 349 349 350 + /* Macros to hide the PTE size differences 351 + * 352 + * FIND_PTE -- walks the page tables given EA & pgdir pointer 353 + * r10 -- EA of fault 354 + * r11 -- PGDIR pointer 355 + * r12 -- free 356 + * label 2: is the bailout case 357 + * 358 + * if we find the pte (fall through): 359 + * r11 is low pte word 360 + * r12 is pointer to the pte 361 + */ 362 + #ifdef CONFIG_PTE_64BIT 363 + #define PTE_FLAGS_OFFSET 4 364 + #define FIND_PTE \ 365 + rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ 366 + lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ 367 + rlwinm. r12, r11, 0, 0, 20; /* Extract pt base address */ \ 368 + beq 2f; /* Bail if no table */ \ 369 + rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ 370 + lwz r11, 4(r12); /* Get pte entry */ 371 + #else 372 + #define PTE_FLAGS_OFFSET 0 373 + #define FIND_PTE \ 374 + rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ 375 + lwz r11, 0(r11); /* Get L1 entry */ \ 376 + rlwinm. r12, r11, 0, 0, 19; /* Extract L2 (pte) base address */ \ 377 + beq 2f; /* Bail if no table */ \ 378 + rlwimi r12, r10, 22, 20, 29; /* Compute PTE address */ \ 379 + lwz r11, 0(r12); /* Get Linux PTE */ 380 + #endif 381 + 350 382 /* 351 383 * Interrupt vector entry code 352 384 * ··· 437 405 mfspr r11,SPRN_SPRG3 438 406 lwz r11,PGDIR(r11) 439 407 4: 440 - rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ 441 - lwz r11, 0(r11) /* Get L1 entry */ 442 - rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ 443 - beq 2f /* Bail if no table */ 444 - 445 - rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ 446 - lwz r11, 0(r12) /* Get Linux PTE */ 408 + FIND_PTE 447 409 448 410 /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */ 449 411 andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE ··· 446 420 447 421 /* Update 'changed'. */ 448 422 ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE 449 - stw r11, 0(r12) /* Update Linux page table */ 423 + stw r11, PTE_FLAGS_OFFSET(r12) /* Update Linux page table */ 450 424 451 425 /* MAS2 not updated as the entry does exist in the tlb, this 452 426 fault taken to detect state transition (eg: COW -> DIRTY) 453 427 */ 454 - lis r12, MAS3_RPN@h 455 - ori r12, r12, _PAGE_HWEXEC | MAS3_RPN@l 456 - and r11, r11, r12 428 + andi. r11, r11, _PAGE_HWEXEC 457 429 rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ 458 430 ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ 459 431 ··· 463 439 /* find the TLB index that caused the fault. It has to be here. */ 464 440 tlbsx 0, r10 465 441 466 - mtspr SPRN_MAS3,r11 442 + /* only update the perm bits, assume the RPN is fine */ 443 + mfspr r12, SPRN_MAS3 444 + rlwimi r12, r11, 0, 20, 31 445 + mtspr SPRN_MAS3,r12 467 446 tlbwe 468 447 469 448 /* Done...restore registers and get out of here. */ ··· 557 530 lwz r11,PGDIR(r11) 558 531 559 532 4: 560 - rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ 561 - lwz r11, 0(r11) /* Get L1 entry */ 562 - rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ 563 - beq 2f /* Bail if no table */ 533 + FIND_PTE 534 + andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 535 + beq 2f /* Bail if not present */ 564 536 565 - rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ 566 - lwz r11, 0(r12) /* Get Linux PTE */ 567 - andi. r13, r11, _PAGE_PRESENT 568 - beq 2f 569 - 537 + #ifdef CONFIG_PTE_64BIT 538 + lwz r13, 0(r12) 539 + #endif 570 540 ori r11, r11, _PAGE_ACCESSED 571 - stw r11, 0(r12) 541 + stw r11, PTE_FLAGS_OFFSET(r12) 572 542 573 543 /* Jump to common tlb load */ 574 544 b finish_tlb_load ··· 618 594 lwz r11,PGDIR(r11) 619 595 620 596 4: 621 - rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ 622 - lwz r11, 0(r11) /* Get L1 entry */ 623 - rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ 624 - beq 2f /* Bail if no table */ 597 + FIND_PTE 598 + andi. r13, r11, _PAGE_PRESENT /* Is the page present? */ 599 + beq 2f /* Bail if not present */ 625 600 626 - rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ 627 - lwz r11, 0(r12) /* Get Linux PTE */ 628 - andi. r13, r11, _PAGE_PRESENT 629 - beq 2f 630 - 601 + #ifdef CONFIG_PTE_64BIT 602 + lwz r13, 0(r12) 603 + #endif 631 604 ori r11, r11, _PAGE_ACCESSED 632 - stw r11, 0(r12) 605 + stw r11, PTE_FLAGS_OFFSET(r12) 633 606 634 607 /* Jump to common TLB load point */ 635 608 b finish_tlb_load ··· 711 690 */ 712 691 713 692 mfspr r12, SPRN_MAS2 693 + #ifdef CONFIG_PTE_64BIT 694 + rlwimi r12, r11, 26, 24, 31 /* extract ...WIMGE from pte */ 695 + #else 714 696 rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ 697 + #endif 715 698 mtspr SPRN_MAS2, r12 716 699 717 700 bge 5, 1f 718 701 719 - /* addr > TASK_SIZE */ 720 - li r10, (MAS3_UX | MAS3_UW | MAS3_UR) 721 - andi. r13, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC) 722 - andi. r12, r11, _PAGE_USER /* Test for _PAGE_USER */ 723 - iseleq r12, 0, r10 724 - and r10, r12, r13 725 - srwi r12, r10, 1 702 + /* is user addr */ 703 + andi. r12, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC) 704 + andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */ 705 + srwi r10, r12, 1 726 706 or r12, r12, r10 /* Copy user perms into supervisor */ 707 + iseleq r12, 0, r12 727 708 b 2f 728 709 729 - /* addr <= TASK_SIZE */ 710 + /* is kernel addr */ 730 711 1: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */ 731 712 ori r12, r12, (MAS3_SX | MAS3_SR) 732 713 714 + #ifdef CONFIG_PTE_64BIT 715 + 2: rlwimi r12, r13, 24, 0, 7 /* grab RPN[32:39] */ 716 + rlwimi r12, r11, 24, 8, 19 /* grab RPN[40:51] */ 717 + mtspr SPRN_MAS3, r12 718 + BEGIN_FTR_SECTION 719 + srwi r10, r13, 8 /* grab RPN[8:31] */ 720 + mtspr SPRN_MAS7, r10 721 + END_FTR_SECTION_IFSET(CPU_FTR_BIG_PHYS) 722 + #else 733 723 2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ 734 724 mtspr SPRN_MAS3, r11 725 + #endif 735 726 tlbwe 736 727 737 728 /* Done...restore registers and get out of here. */
+8
arch/ppc/syslib/ppc85xx_common.c
··· 31 31 } 32 32 33 33 EXPORT_SYMBOL(get_ccsrbar); 34 + 35 + /* For now this is a pass through */ 36 + phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size) 37 + { 38 + return addr; 39 + }; 40 + EXPORT_SYMBOL(fixup_bigphys_addr); 41 +
+2 -1
include/asm-ppc/cputable.h
··· 86 86 #define CPU_FTR_DUAL_PLL_750FX 0x00004000 87 87 #define CPU_FTR_NO_DPM 0x00008000 88 88 #define CPU_FTR_HAS_HIGH_BATS 0x00010000 89 - #define CPU_FTR_NEED_COHERENT 0x00020000 89 + #define CPU_FTR_NEED_COHERENT 0x00020000 90 90 #define CPU_FTR_NO_BTIC 0x00040000 91 + #define CPU_FTR_BIG_PHYS 0x00080000 91 92 92 93 #ifdef __ASSEMBLY__ 93 94
+27 -16
include/asm-ppc/pgtable.h
··· 225 225 /* ERPN in a PTE never gets cleared, ignore it */ 226 226 #define _PTE_NONE_MASK 0xffffffff00000000ULL 227 227 228 - #elif defined(CONFIG_E500) 229 - 228 + #elif defined(CONFIG_FSL_BOOKE) 230 229 /* 231 230 MMU Assist Register 3: 232 231 ··· 239 240 entries use the top 29 bits. 240 241 */ 241 242 242 - /* Definitions for e500 core */ 243 - #define _PAGE_PRESENT 0x001 /* S: PTE contains a translation */ 244 - #define _PAGE_USER 0x002 /* S: User page (maps to UR) */ 245 - #define _PAGE_FILE 0x002 /* S: when !present: nonlinear file mapping */ 246 - #define _PAGE_ACCESSED 0x004 /* S: Page referenced */ 247 - #define _PAGE_HWWRITE 0x008 /* H: Dirty & RW, set in exception */ 248 - #define _PAGE_RW 0x010 /* S: Write permission */ 249 - #define _PAGE_HWEXEC 0x020 /* H: UX permission */ 243 + /* Definitions for FSL Book-E Cores */ 244 + #define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */ 245 + #define _PAGE_USER 0x00002 /* S: User page (maps to UR) */ 246 + #define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */ 247 + #define _PAGE_ACCESSED 0x00004 /* S: Page referenced */ 248 + #define _PAGE_HWWRITE 0x00008 /* H: Dirty & RW, set in exception */ 249 + #define _PAGE_RW 0x00010 /* S: Write permission */ 250 + #define _PAGE_HWEXEC 0x00020 /* H: UX permission */ 250 251 251 - #define _PAGE_ENDIAN 0x040 /* H: E bit */ 252 - #define _PAGE_GUARDED 0x080 /* H: G bit */ 253 - #define _PAGE_COHERENT 0x100 /* H: M bit */ 254 - #define _PAGE_NO_CACHE 0x200 /* H: I bit */ 255 - #define _PAGE_WRITETHRU 0x400 /* H: W bit */ 256 - #define _PAGE_DIRTY 0x800 /* S: Page dirty */ 252 + #define _PAGE_ENDIAN 0x00040 /* H: E bit */ 253 + #define _PAGE_GUARDED 0x00080 /* H: G bit */ 254 + #define _PAGE_COHERENT 0x00100 /* H: M bit */ 255 + #define _PAGE_NO_CACHE 0x00200 /* H: I bit */ 256 + #define _PAGE_WRITETHRU 0x00400 /* H: W bit */ 257 + 258 + #ifdef CONFIG_PTE_64BIT 259 + #define _PAGE_DIRTY 0x08000 /* S: Page dirty */ 260 + 261 + /* ERPN in a PTE never gets cleared, ignore it */ 262 + #define _PTE_NONE_MASK 0xffffffffffff0000ULL 263 + #else 264 + #define _PAGE_DIRTY 0x00800 /* S: Page dirty */ 265 + #endif 257 266 258 267 #define _PMD_PRESENT 0 259 268 #define _PMD_PRESENT_MASK (PAGE_MASK) ··· 440 433 441 434 /* in some case we want to additionaly adjust where the pfn is in the pte to 442 435 * allow room for more flags */ 436 + #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) 437 + #define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8) 438 + #else 443 439 #define PFN_SHIFT_OFFSET (PAGE_SHIFT) 440 + #endif 444 441 445 442 #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) 446 443 #define pte_page(x) pfn_to_page(pte_pfn(x))
+1
include/asm-ppc/reg_booke.h
··· 172 172 #define SPRN_MAS4 0x274 /* MMU Assist Register 4 */ 173 173 #define SPRN_MAS5 0x275 /* MMU Assist Register 5 */ 174 174 #define SPRN_MAS6 0x276 /* MMU Assist Register 6 */ 175 + #define SPRN_MAS7 0x3b0 /* MMU Assist Register 7 */ 175 176 #define SPRN_PID1 0x279 /* Process ID Register 1 */ 176 177 #define SPRN_PID2 0x27A /* Process ID Register 2 */ 177 178 #define SPRN_TLB0CFG 0x2B0 /* TLB 0 Config Register */