Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Support execute-only on all powerpc

Introduce PAGE_EXECONLY_X macro which provides exec-only rights.
The _X may be seen as redundant with the EXECONLY but it helps
keep consistency, all macros having the EXEC right have _X.

And put it next to PAGE_NONE as PAGE_EXECONLY_X is
somehow PAGE_NONE + EXEC just like all other SOMETHING_X are
just SOMETHING + EXEC.

On book3s/64 PAGE_EXECONLY becomes PAGE_READONLY_X.

On book3s/64, as PAGE_EXECONLY is only valid for Radix add
VM_READ flag in vm_get_page_prot() for non-Radix.

And update access_error() so that a non exec fault on a VM_EXEC only
mapping is always invalid, even when the underlying layer don't
always generate a fault for that.

For 8xx, set PAGE_EXECONLY_X as _PAGE_NA | _PAGE_EXEC.
For others, only set it as just _PAGE_EXEC

With that change, 8xx, e500 and 44x fully honor execute-only
protection.

On 40x that is a partial implementation of execute-only. The
implementation won't be complete because once a TLB has been loaded
via the Instruction TLB miss handler, it will be possible to read
the page. But at least it can't be read unless it is executed first.

On 603 MMU, TLB missed are handled by SW and there are separate
DTLB and ITLB. Execute-only is therefore now supported by not loading
DTLB when read access is not permitted.

On hash (604) MMU it is more tricky because hash table is common to
load/store and execute. Nevertheless it is still possible to check
whether _PAGE_READ is set before loading hash table for a load/store
access. At least it can't be read unless it is executed first.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/4283ea9cbef9ff2fbee468904800e1962bc8fc18.1695659959.git.christophe.leroy@csgroup.eu

authored by

Christophe Leroy and committed by
Michael Ellerman
b1fba034 163a72fa

+18 -17
+1 -1
arch/powerpc/include/asm/book3s/32/pgtable.h
··· 425 425 { 426 426 /* 427 427 * A read-only access is controlled by _PAGE_READ bit. 428 - * We have _PAGE_READ set for WRITE and EXECUTE 428 + * We have _PAGE_READ set for WRITE 429 429 */ 430 430 if (!pte_present(pte) || !pte_read(pte)) 431 431 return false;
+1 -3
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 18 18 #define _PAGE_WRITE 0x00002 /* write access allowed */ 19 19 #define _PAGE_READ 0x00004 /* read access allowed */ 20 20 #define _PAGE_NA _PAGE_PRIVILEGED 21 + #define _PAGE_NAX _PAGE_EXEC 21 22 #define _PAGE_RO _PAGE_READ 22 23 #define _PAGE_ROX (_PAGE_READ | _PAGE_EXEC) 23 24 #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) ··· 141 140 #define _PAGE_BASE (_PAGE_BASE_NC) 142 141 143 142 #include <asm/pgtable-masks.h> 144 - 145 - /* Radix only, Hash uses PAGE_READONLY_X + execute-only pkey instead */ 146 - #define PAGE_EXECONLY __pgprot(_PAGE_BASE | _PAGE_EXEC) 147 143 148 144 /* Permission masks used for kernel mappings */ 149 145 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+1
arch/powerpc/include/asm/nohash/32/pte-8xx.h
··· 48 48 49 49 #define _PAGE_HUGE 0x0800 /* Copied to L1 PS bit 29 */ 50 50 51 + #define _PAGE_NAX (_PAGE_NA | _PAGE_EXEC) 51 52 #define _PAGE_ROX (_PAGE_RO | _PAGE_EXEC) 52 53 #define _PAGE_RW 0 53 54 #define _PAGE_RWX _PAGE_EXEC
+1 -1
arch/powerpc/include/asm/nohash/pgtable.h
··· 202 202 { 203 203 /* 204 204 * A read-only access is controlled by _PAGE_READ bit. 205 - * We have _PAGE_READ set for WRITE and EXECUTE 205 + * We have _PAGE_READ set for WRITE 206 206 */ 207 207 if (!pte_present(pte) || !pte_read(pte)) 208 208 return false;
+1
arch/powerpc/include/asm/nohash/pte-e500.h
··· 57 57 #define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX) 58 58 59 59 #define _PAGE_NA 0 60 + #define _PAGE_NAX _PAGE_BAP_UX 60 61 #define _PAGE_RO _PAGE_READ 61 62 #define _PAGE_ROX (_PAGE_READ | _PAGE_BAP_UX) 62 63 #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
+2
arch/powerpc/include/asm/pgtable-masks.h
··· 4 4 5 5 #ifndef _PAGE_NA 6 6 #define _PAGE_NA 0 7 + #define _PAGE_NAX _PAGE_EXEC 7 8 #define _PAGE_RO _PAGE_READ 8 9 #define _PAGE_ROX (_PAGE_READ | _PAGE_EXEC) 9 10 #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) ··· 21 20 22 21 /* Permission masks used to generate the __P and __S table */ 23 22 #define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_NA) 23 + #define PAGE_EXECONLY_X __pgprot(_PAGE_BASE | _PAGE_NAX) 24 24 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW) 25 25 #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RWX) 26 26 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_RO)
+4 -6
arch/powerpc/mm/book3s64/pgtable.c
··· 635 635 unsigned long prot; 636 636 637 637 /* Radix supports execute-only, but protection_map maps X -> RX */ 638 - if (radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)) { 639 - prot = pgprot_val(PAGE_EXECONLY); 640 - } else { 641 - prot = pgprot_val(protection_map[vm_flags & 642 - (VM_ACCESS_FLAGS | VM_SHARED)]); 643 - } 638 + if (!radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)) 639 + vm_flags |= VM_READ; 640 + 641 + prot = pgprot_val(protection_map[vm_flags & (VM_ACCESS_FLAGS | VM_SHARED)]); 644 642 645 643 if (vm_flags & VM_SAO) 646 644 prot |= _PAGE_SAO;
+5 -4
arch/powerpc/mm/fault.c
··· 266 266 } 267 267 268 268 /* 269 - * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as 270 - * defined in protection_map[]. Read faults can only be caused by 271 - * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix. 269 + * VM_READ, VM_WRITE and VM_EXEC may imply read permissions, as 270 + * defined in protection_map[]. In that case Read faults can only be 271 + * caused by a PROT_NONE mapping. However a non exec access on a 272 + * VM_EXEC only mapping is invalid anyway, so report it as such. 272 273 */ 273 274 if (unlikely(!vma_is_accessible(vma))) 274 275 return true; 275 276 276 - if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC))) 277 + if ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC) 277 278 return true; 278 279 279 280 /*
+2 -2
arch/powerpc/mm/pgtable.c
··· 492 492 [VM_READ] = PAGE_READONLY, 493 493 [VM_WRITE] = PAGE_COPY, 494 494 [VM_WRITE | VM_READ] = PAGE_COPY, 495 - [VM_EXEC] = PAGE_READONLY_X, 495 + [VM_EXEC] = PAGE_EXECONLY_X, 496 496 [VM_EXEC | VM_READ] = PAGE_READONLY_X, 497 497 [VM_EXEC | VM_WRITE] = PAGE_COPY_X, 498 498 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X, ··· 500 500 [VM_SHARED | VM_READ] = PAGE_READONLY, 501 501 [VM_SHARED | VM_WRITE] = PAGE_SHARED, 502 502 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED, 503 - [VM_SHARED | VM_EXEC] = PAGE_READONLY_X, 503 + [VM_SHARED | VM_EXEC] = PAGE_EXECONLY_X, 504 504 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X, 505 505 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X, 506 506 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X