arm64: mm: Introduce PTE_WRITE

We have the following means for encoding writable or dirty ptes:

PTE_DIRTY PTE_RDONLY
!pte_dirty && !pte_write 0 1
!pte_dirty && pte_write 0 1
pte_dirty && !pte_write 1 1
pte_dirty && pte_write 1 0

So we can't distinguish between writable clean ptes and read only
ptes. This can cause problems with ptes being incorrectly flagged as
read only when they are writable but not dirty.

This patch introduces a new software bit PTE_WRITE which allows us to
correctly identify writable ptes. PTE_RDONLY is now only clear for
valid ptes where a page is both writable and dirty.

Signed-off-by: Steve Capper <steve.capper@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by Steve Capper and committed by Catalin Marinas c2c93e5b 44b6dfc5

+25 -23
+25 -23
arch/arm64/include/asm/pgtable.h
··· 28 #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ 29 #define PTE_DIRTY (_AT(pteval_t, 1) << 55) 30 #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) 31 - /* bit 57 for PMD_SECT_SPLITTING */ 32 #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ 33 34 /* ··· 67 68 #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) 69 70 - #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) 71 - #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 72 - #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) 73 - #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) 74 - #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) 75 - #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) 76 - #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) 77 - #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) 78 - #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) 79 80 #define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) 81 #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) ··· 83 #define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) 84 #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) 85 86 - #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_RDONLY | PTE_PXN | PTE_UXN) 87 - #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 88 - #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 89 - #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) 90 - #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) 91 - #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) 92 - #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) 93 94 #endif /* __ASSEMBLY__ */ 95 ··· 140 #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) 141 #define pte_young(pte) (pte_val(pte) & PTE_AF) 142 #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) 143 - #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) 144 #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 145 146 #define pte_valid_user(pte) \ ··· 148 149 static inline pte_t pte_wrprotect(pte_t pte) 150 { 151 - pte_val(pte) |= PTE_RDONLY; 152 return pte; 153 } 154 155 static inline pte_t pte_mkwrite(pte_t pte) 156 { 157 - pte_val(pte) &= ~PTE_RDONLY; 158 return pte; 159 } 160 ··· 201 if (pte_valid_user(pte)) { 202 if (pte_exec(pte)) 203 __sync_icache_dcache(pte, addr); 204 - if (!pte_dirty(pte)) 205 - pte = pte_wrprotect(pte); 206 } 207 208 set_pte(ptep, pte); ··· 378 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 379 { 380 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 381 - PTE_PROT_NONE | PTE_VALID; 382 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 383 return pte; 384 }
··· 28 #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ 29 #define PTE_DIRTY (_AT(pteval_t, 1) << 55) 30 #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) 31 + #define PTE_WRITE (_AT(pteval_t, 1) << 57) 32 #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ 33 34 /* ··· 67 68 #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) 69 70 + #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN) 71 + #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 72 + #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) 73 + #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 74 + #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) 75 + #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 76 + #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) 77 + #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) 78 + #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE) 79 80 #define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) 81 #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) ··· 83 #define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) 84 #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) 85 86 + #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) 87 + #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 88 + #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) 89 + #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 90 + #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 91 + #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 92 + #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 93 94 #endif /* __ASSEMBLY__ */ 95 ··· 140 #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) 141 #define pte_young(pte) (pte_val(pte) & PTE_AF) 142 #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) 143 + #define pte_write(pte) (pte_val(pte) & PTE_WRITE) 144 #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) 145 146 #define pte_valid_user(pte) \ ··· 148 149 static inline pte_t pte_wrprotect(pte_t pte) 150 { 151 + pte_val(pte) &= ~PTE_WRITE; 152 return pte; 153 } 154 155 static inline pte_t pte_mkwrite(pte_t pte) 156 { 157 + pte_val(pte) |= PTE_WRITE; 158 return pte; 159 } 160 ··· 201 if (pte_valid_user(pte)) { 202 if (pte_exec(pte)) 203 __sync_icache_dcache(pte, addr); 204 + if (pte_dirty(pte) && pte_write(pte)) 205 + pte_val(pte) &= ~PTE_RDONLY; 206 + else 207 + pte_val(pte) |= PTE_RDONLY; 208 } 209 210 set_pte(ptep, pte); ··· 376 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 377 { 378 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 379 + PTE_PROT_NONE | PTE_VALID | PTE_WRITE; 380 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 381 return pte; 382 }