Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARCv2: MMUv4: TLB programming Model changes

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>

+114 -5
+5
arch/arc/Kconfig
··· 267 267 prompt "MMU Version" 268 268 default ARC_MMU_V3 if ARC_CPU_770 269 269 default ARC_MMU_V2 if ARC_CPU_750D 270 + default ARC_MMU_V4 if ARC_CPU_HS 270 271 271 272 config ARC_MMU_V1 272 273 bool "MMU v1" ··· 287 286 Introduced with ARC700 4.10: New Features 288 287 Variable Page size (1k-16k), var JTLB size 128 x (2 or 4) 289 288 Shared Address Spaces (SASID) 289 + 290 + config ARC_MMU_V4 291 + bool "MMU v4" 292 + depends on ISA_ARCV2 290 293 291 294 endchoice 292 295
+1 -1
arch/arc/include/asm/arcregs.h
··· 326 326 */ 327 327 328 328 struct cpuinfo_arc_mmu { 329 - unsigned int ver:4, pg_sz_k:8, pad:8, u_dtlb:6, u_itlb:6; 329 + unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, u_dtlb:6, u_itlb:6; 330 330 unsigned int num_tlb:16, sets:12, ways:4; 331 331 }; 332 332
+23 -1
arch/arc/include/asm/mmu.h
··· 15 15 #define CONFIG_ARC_MMU_VER 2 16 16 #elif defined(CONFIG_ARC_MMU_V3) 17 17 #define CONFIG_ARC_MMU_VER 3 18 + #elif defined(CONFIG_ARC_MMU_V4) 19 + #define CONFIG_ARC_MMU_VER 4 18 20 #endif 19 21 20 22 /* MMU Management regs */ 21 23 #define ARC_REG_MMU_BCR 0x06f 24 + #if (CONFIG_ARC_MMU_VER < 4) 22 25 #define ARC_REG_TLBPD0 0x405 23 26 #define ARC_REG_TLBPD1 0x406 24 27 #define ARC_REG_TLBINDEX 0x407 25 28 #define ARC_REG_TLBCOMMAND 0x408 26 29 #define ARC_REG_PID 0x409 27 30 #define ARC_REG_SCRATCH_DATA0 0x418 31 + #else 32 + #define ARC_REG_TLBPD0 0x460 33 + #define ARC_REG_TLBPD1 0x461 34 + #define ARC_REG_TLBINDEX 0x464 35 + #define ARC_REG_TLBCOMMAND 0x465 36 + #define ARC_REG_PID 0x468 37 + #define ARC_REG_SCRATCH_DATA0 0x46c 38 + #endif 28 39 29 40 /* Bits in MMU PID register */ 30 - #define MMU_ENABLE (1 << 31) /* Enable MMU for process */ 41 + #define __TLB_ENABLE (1 << 31) 42 + #define __PROG_ENABLE (1 << 30) 43 + #define MMU_ENABLE (__TLB_ENABLE | __PROG_ENABLE) 31 44 32 45 /* Error code if probe fails */ 33 46 #define TLB_LKUP_ERR 0x80000000 34 47 48 + #if (CONFIG_ARC_MMU_VER < 4) 35 49 #define TLB_DUP_ERR (TLB_LKUP_ERR | 0x00000001) 50 + #else 51 + #define TLB_DUP_ERR (TLB_LKUP_ERR | 0x40000000) 52 + #endif 36 53 37 54 /* TLB Commands */ 38 55 #define TLBWrite 0x1 ··· 60 43 #if (CONFIG_ARC_MMU_VER >= 2) 61 44 #define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */ 62 45 #define TLBIVUTLB 0x6 /* explicitly inv uTLBs */ 46 + #endif 47 + 48 + #if (CONFIG_ARC_MMU_VER >= 4) 49 + #define TLBInsertEntry 0x7 50 + #define TLBDeleteEntry 0x8 63 51 #endif 64 52 65 53 #ifndef __ASSEMBLY__
+10
arch/arc/include/asm/pgtable.h
··· 72 72 #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ 73 73 #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ 74 74 #define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */ 75 + 76 + #if (CONFIG_ARC_MMU_VER >= 4) 77 + #define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */ 78 + #endif 79 + 75 80 #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ 76 81 #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ 82 + 83 + #if (CONFIG_ARC_MMU_VER >= 4) 84 + #define _PAGE_SZ (1<<10) /* Page Size indicator (H) */ 85 + #endif 86 + 77 87 #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr 78 88 usable for shared TLB entries (H) */ 79 89 #endif
+51 -3
arch/arc/mm/tlb.c
··· 113 113 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); 114 114 } 115 115 116 + #if (CONFIG_ARC_MMU_VER < 4) 117 + 116 118 static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid) 117 119 { 118 120 unsigned int idx; ··· 211 209 */ 212 210 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); 213 211 } 212 + 213 + #else /* CONFIG_ARC_MMU_VER >= 4) */ 214 + 215 + static void utlb_invalidate(void) 216 + { 217 + /* No need since uTLB is always in sync with JTLB */ 218 + } 219 + 220 + static void tlb_entry_erase(unsigned int vaddr_n_asid) 221 + { 222 + write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT); 223 + write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry); 224 + } 225 + 226 + static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) 227 + { 228 + write_aux_reg(ARC_REG_TLBPD0, pd0); 229 + write_aux_reg(ARC_REG_TLBPD1, pd1); 230 + write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry); 231 + } 232 + 233 + #endif 214 234 215 235 /* 216 236 * Un-conditionally (without lookup) erase the entire MMU contents ··· 606 582 #endif 607 583 } *mmu3; 608 584 585 + struct bcr_mmu_4 { 586 + #ifdef CONFIG_CPU_BIG_ENDIAN 587 + unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1, 588 + n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3; 589 + #else 590 + /* DTLB ITLB JES JE JA */ 591 + unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2, 592 + pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8; 593 + #endif 594 + } *mmu4; 595 + 609 596 tmp = read_aux_reg(ARC_REG_MMU_BCR); 610 597 mmu->ver = (tmp >> 24); 611 598 ··· 627 592 mmu->ways = 1 << mmu2->ways; 628 593 mmu->u_dtlb = mmu2->u_dtlb; 629 594 mmu->u_itlb = mmu2->u_itlb; 630 - } else { 595 + } else if (mmu->ver == 3) { 631 596 mmu3 = (struct bcr_mmu_3 *)&tmp; 632 597 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1); 633 598 mmu->sets = 1 << mmu3->sets; 634 599 mmu->ways = 1 << mmu3->ways; 635 600 mmu->u_dtlb = mmu3->u_dtlb; 636 601 mmu->u_itlb = mmu3->u_itlb; 602 + } else { 603 + mmu4 = (struct bcr_mmu_4 *)&tmp; 604 + mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); 605 + mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11); 606 + mmu->sets = 64 << mmu4->n_entry; 607 + mmu->ways = mmu4->n_ways * 2; 608 + mmu->u_dtlb = mmu4->u_dtlb * 4; 609 + mmu->u_itlb = mmu4->u_itlb * 4; 637 610 } 638 611 639 612 mmu->num_tlb = mmu->sets * mmu->ways; ··· 651 608 { 652 609 int n = 0; 653 610 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu; 611 + char super_pg[64] = ""; 612 + 613 + if (p_mmu->s_pg_sz_m) 614 + scnprintf(super_pg, 64, "%dM Super Page%s, ", 615 + p_mmu->s_pg_sz_m, " (not used)"); 654 616 655 617 n += scnprintf(buf + n, len - n, 656 - "MMU [v%x]\t: %dk PAGE, JTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n", 657 - p_mmu->ver, p_mmu->pg_sz_k, 618 + "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n", 619 + p_mmu->ver, p_mmu->pg_sz_k, super_pg, 658 620 p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, 659 621 p_mmu->u_dtlb, p_mmu->u_itlb, 660 622 IS_ENABLED(CONFIG_ARC_MMU_SASID) ? ",SASID" : "");
+24
arch/arc/mm/tlbex.S
··· 44 44 #include <asm/processor.h> 45 45 #include <asm/tlb-mmu1.h> 46 46 47 + #ifdef CONFIG_ISA_ARCOMPACT 47 48 ;----------------------------------------------------------------- 48 49 ; ARC700 Exception Handling doesn't auto-switch stack and it only provides 49 50 ; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0" ··· 121 120 ld_s r0, [r0] 122 121 #endif 123 122 .endm 123 + 124 + #else /* ARCv2 */ 125 + 126 + .macro TLBMISS_FREEUP_REGS 127 + PUSH r0 128 + PUSH r1 129 + PUSH r2 130 + PUSH r3 131 + .endm 132 + 133 + .macro TLBMISS_RESTORE_REGS 134 + POP r3 135 + POP r2 136 + POP r1 137 + POP r0 138 + .endm 139 + 140 + #endif 124 141 125 142 ;============================================================================ 126 143 ; Troubleshooting Stuff ··· 258 239 ; Commit the TLB entry into MMU 259 240 260 241 .macro COMMIT_ENTRY_TO_MMU 242 + #if (CONFIG_ARC_MMU_VER < 4) 261 243 262 244 /* Get free TLB slot: Set = computed from vaddr, way = random */ 263 245 sr TLBGetIndex, [ARC_REG_TLBCOMMAND] ··· 268 248 sr TLBWriteNI, [ARC_REG_TLBCOMMAND] 269 249 #else 270 250 sr TLBWrite, [ARC_REG_TLBCOMMAND] 251 + #endif 252 + 253 + #else 254 + sr TLBInsertEntry, [ARC_REG_TLBCOMMAND] 271 255 #endif 272 256 .endm 273 257