Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Revert "powerpc/64s: Remove PROT_SAO support"

This reverts commit 5c9fa16e8abd342ce04dc830c1ebb2a03abf6c05.

Since PROT_SAO can still be useful for certain classes of software,
reintroduce it. Concerns about guest migration for LPARs using SAO
will be addressed next.

Signed-off-by: Shawn Anastasio <shawn@anastas.io>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200821185558.35561-2-shawn@anastas.io

authored by

Shawn Anastasio and committed by
Michael Ellerman
12564485 64ef8f2c

+90 -17
+3 -5
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 20 20 #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) 21 21 #define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) 22 22 #define _PAGE_PRIVILEGED 0x00008 /* kernel access only */ 23 - 24 - #define _PAGE_CACHE_CTL 0x00030 /* Bits for the folowing cache modes */ 25 - /* No bits set is normal cacheable memory */ 26 - /* 0x00010 unused, is SAO bit on radix POWER9 */ 23 + #define _PAGE_SAO 0x00010 /* Strong access order */ 27 24 #define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */ 28 25 #define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */ 29 - 30 26 #define _PAGE_DIRTY 0x00080 /* C: page changed */ 31 27 #define _PAGE_ACCESSED 0x00100 /* R: page referenced */ 32 28 /* ··· 823 827 return radix__set_pte_at(mm, addr, ptep, pte, percpu); 824 828 return hash__set_pte_at(mm, addr, ptep, pte, percpu); 825 829 } 830 + 831 + #define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT) 826 832 827 833 #define pgprot_noncached pgprot_noncached 828 834 static inline pgprot_t pgprot_noncached(pgprot_t prot)
+5 -5
arch/powerpc/include/asm/cputable.h
··· 196 196 #define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000) 197 197 #define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000) 198 198 #define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000) 199 - // Free LONG_ASM_CONST(0x0000000008000000) 199 + #define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000) 200 200 #define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000) 201 201 #define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000) 202 202 #define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000) ··· 441 441 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 442 442 CPU_FTR_COHERENT_ICACHE | \ 443 443 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 444 - CPU_FTR_DSCR | CPU_FTR_ASYM_SMT | \ 444 + CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ 445 445 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 446 446 CPU_FTR_CFAR | CPU_FTR_HVMODE | \ 447 447 CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX ) ··· 450 450 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 451 451 CPU_FTR_COHERENT_ICACHE | \ 452 452 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 453 - CPU_FTR_DSCR | \ 453 + CPU_FTR_DSCR | CPU_FTR_SAO | \ 454 454 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 455 455 CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ 456 456 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ ··· 461 461 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 462 462 CPU_FTR_COHERENT_ICACHE | \ 463 463 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 464 - CPU_FTR_DSCR | \ 464 + CPU_FTR_DSCR | CPU_FTR_SAO | \ 465 465 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 466 466 CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ 467 467 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ ··· 479 479 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 480 480 CPU_FTR_COHERENT_ICACHE | \ 481 481 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 482 - CPU_FTR_DSCR | \ 482 + CPU_FTR_DSCR | CPU_FTR_SAO | \ 483 483 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 484 484 CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ 485 485 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
+22 -4
arch/powerpc/include/asm/mman.h
··· 13 13 #include <linux/pkeys.h> 14 14 #include <asm/cpu_has_feature.h> 15 15 16 - #ifdef CONFIG_PPC_MEM_KEYS 17 16 static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, 18 17 unsigned long pkey) 19 18 { 20 - return pkey_to_vmflag_bits(pkey); 19 + #ifdef CONFIG_PPC_MEM_KEYS 20 + return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey)); 21 + #else 22 + return ((prot & PROT_SAO) ? VM_SAO : 0); 23 + #endif 21 24 } 22 25 #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) 23 26 24 27 static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) 25 28 { 26 - return __pgprot(vmflag_to_pte_pkey_bits(vm_flags)); 29 + #ifdef CONFIG_PPC_MEM_KEYS 30 + return (vm_flags & VM_SAO) ? 31 + __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) : 32 + __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags)); 33 + #else 34 + return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); 35 + #endif 27 36 } 28 37 #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) 29 - #endif 38 + 39 + static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) 40 + { 41 + if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO)) 42 + return false; 43 + if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO)) 44 + return false; 45 + return true; 46 + } 47 + #define arch_validate_prot arch_validate_prot 30 48 31 49 #endif /* CONFIG_PPC64 */ 32 50 #endif /* _ASM_POWERPC_MMAN_H */
+2
arch/powerpc/include/asm/nohash/64/pgtable.h
··· 82 82 */ 83 83 #include <asm/nohash/pte-book3e.h> 84 84 85 + #define _PAGE_SAO 0 86 + 85 87 #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) 86 88 87 89 /*
+1 -1
arch/powerpc/include/uapi/asm/mman.h
··· 11 11 #include <asm-generic/mman-common.h> 12 12 13 13 14 - #define PROT_SAO 0x10 /* Unsupported since v5.9 */ 14 + #define PROT_SAO 0x10 /* Strong Access Ordering */ 15 15 16 16 #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ 17 17 #define MAP_NORESERVE 0x40 /* don't reserve swap pages */
+1 -1
arch/powerpc/kernel/dt_cpu_ftrs.c
··· 653 653 {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL}, 654 654 {"processor-utilization-of-resources-register", feat_enable_purr, 0}, 655 655 {"no-execute", feat_enable, 0}, 656 - /* strong-access-ordering is unused */ 656 + {"strong-access-ordering", feat_enable, CPU_FTR_SAO}, 657 657 {"cache-inhibited-large-page", feat_enable_large_ci, 0}, 658 658 {"coprocessor-icswx", feat_enable, 0}, 659 659 {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
+2
arch/powerpc/mm/book3s64/hash_utils.c
··· 232 232 rflags |= HPTE_R_I; 233 233 else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT) 234 234 rflags |= (HPTE_R_I | HPTE_R_G); 235 + else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) 236 + rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M); 235 237 else 236 238 /* 237 239 * Add memory coherence if cache inhibited is not set
+2
include/linux/mm.h
··· 321 321 322 322 #if defined(CONFIG_X86) 323 323 # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 324 + #elif defined(CONFIG_PPC) 325 + # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 324 326 #elif defined(CONFIG_PARISC) 325 327 # define VM_GROWSUP VM_ARCH_1 326 328 #elif defined(CONFIG_IA64)
+2
include/trace/events/mmflags.h
··· 114 114 115 115 #if defined(CONFIG_X86) 116 116 #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } 117 + #elif defined(CONFIG_PPC) 118 + #define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } 117 119 #elif defined(CONFIG_PARISC) || defined(CONFIG_IA64) 118 120 #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } 119 121 #elif !defined(CONFIG_MMU)
+4
mm/ksm.c
··· 2453 2453 if (vma_is_dax(vma)) 2454 2454 return 0; 2455 2455 2456 + #ifdef VM_SAO 2457 + if (*vm_flags & VM_SAO) 2458 + return 0; 2459 + #endif 2456 2460 #ifdef VM_SPARC_ADI 2457 2461 if (*vm_flags & VM_SPARC_ADI) 2458 2462 return 0;
+1
tools/testing/selftests/powerpc/mm/.gitignore
··· 2 2 hugetlb_vs_thp_test 3 3 subpage_prot 4 4 tempfile 5 + prot_sao 5 6 segv_errors 6 7 wild_bctr 7 8 large_vm_fork_separation
+3 -1
tools/testing/selftests/powerpc/mm/Makefile
··· 2 2 noarg: 3 3 $(MAKE) -C ../ 4 4 5 - TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot segv_errors wild_bctr \ 5 + TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \ 6 6 large_vm_fork_separation bad_accesses pkey_exec_prot \ 7 7 pkey_siginfo stack_expansion_signal stack_expansion_ldst 8 8 ··· 13 13 include ../../lib.mk 14 14 15 15 $(TEST_GEN_PROGS): ../harness.c ../utils.c 16 + 17 + $(OUTPUT)/prot_sao: ../utils.c 16 18 17 19 $(OUTPUT)/wild_bctr: CFLAGS += -m64 18 20 $(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
+42
tools/testing/selftests/powerpc/mm/prot_sao.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright 2016, Michael Ellerman, IBM Corp. 4 + */ 5 + 6 + #include <stdio.h> 7 + #include <stdlib.h> 8 + #include <string.h> 9 + #include <sys/mman.h> 10 + 11 + #include <asm/cputable.h> 12 + 13 + #include "utils.h" 14 + 15 + #define SIZE (64 * 1024) 16 + 17 + int test_prot_sao(void) 18 + { 19 + char *p; 20 + 21 + /* 2.06 or later should support SAO */ 22 + SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06)); 23 + 24 + /* 25 + * Ensure we can ask for PROT_SAO. 26 + * We can't really verify that it does the right thing, but at least we 27 + * confirm the kernel will accept it. 28 + */ 29 + p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE | PROT_SAO, 30 + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 31 + FAIL_IF(p == MAP_FAILED); 32 + 33 + /* Write to the mapping, to at least cause a fault */ 34 + memset(p, 0xaa, SIZE); 35 + 36 + return 0; 37 + } 38 + 39 + int main(void) 40 + { 41 + return test_harness(test_prot_sao, "prot-sao"); 42 + }