Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARC: MMUv4 preps/2 - Reshuffle PTE bits

With previous commit freeing up PTE bits, reassign them so as to:

- Match the bit to H/w counterpart where possible
(e.g. MMUv2 GLOBAL/PRESENT, this avoids a shift in create_tlb())
- Avoid holes in _PAGE_xxx definitions

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>

+11 -25
+8 -14
arch/arc/include/asm/pgtable.h
··· 60 60 #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ 61 61 #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ 62 62 #define _PAGE_READ (1<<5) /* Page has user read perm (H) */ 63 - #define _PAGE_GLOBAL (1<<9) /* Page is global (H) */ 64 - #define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */ 65 - #define _PAGE_FILE (1<<10) /* page cache/ swap (S) */ 66 - #define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */ 63 + #define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */ 64 + #define _PAGE_FILE (1<<7) /* page cache/ swap (S) */ 65 + #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ 66 + #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */ 67 67 68 68 #else /* MMU v3 onwards */ 69 69 70 - /* PD1 */ 71 70 #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ 72 71 #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ 73 72 #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ 74 73 #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ 75 - #define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */ 76 - 77 - /* PD0 */ 74 + #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ 75 + #define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */ 76 + #define _PAGE_FILE (1<<6) /* page cache/ swap (S) */ 78 77 #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ 79 78 #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ 80 - #define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr 79 + #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr 81 80 usable for shared TLB entries (H) */ 82 - 83 - #define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */ 84 - #define _PAGE_FILE (1<<12) /* page cache/ swap (S) */ 85 - 86 - #define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */ 87 81 #endif 88 82 89 83 /* vmalloc permissions */
+3 -8
arch/arc/mm/tlb.c
··· 342 342 { 343 343 unsigned long flags; 344 344 unsigned int idx, asid_or_sasid, rwx; 345 - unsigned long pd0_flags; 346 345 347 346 /* 348 347 * create_tlb() assumes that current->mm == vma->mm, since ··· 380 381 /* update this PTE credentials */ 381 382 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); 382 383 383 - /* Create HW TLB entry Flags (in PD0) from PTE Flags */ 384 - #if (CONFIG_ARC_MMU_VER <= 2) 385 - pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1); 386 - #else 387 - pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0)); 388 - #endif 384 + /* Create HW TLB(PD0,PD1) from PTE */ 389 385 390 386 /* ASID for this task */ 391 387 asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; 392 388 393 - write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid); 389 + write_aux_reg(ARC_REG_TLBPD0, address | asid_or_sasid | 390 + (pte_val(*ptep) & PTE_BITS_IN_PD0)); 394 391 395 392 /* 396 393 * ARC MMU provides fully orthogonal access bits for K/U mode,
-3
arch/arc/mm/tlbex.S
··· 229 229 sr r3, [ARC_REG_TLBPD1] ; these go in PD1 230 230 231 231 and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb 232 - #if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */ 233 - lsr r2, r2 ; shift PTE flags to match layout in PD0 234 - #endif 235 232 236 233 lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid 237 234