Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'fixes' into next

Bring in our fixes branch for this cycle which avoids some small
conflicts with upcoming commits.

+181 -65
+1 -1
Documentation/features/debug/debug-vm-pgtable/arch-support.txt
··· 22 22 | nios2: | TODO | 23 23 | openrisc: | TODO | 24 24 | parisc: | TODO | 25 - | powerpc: | ok | 25 + | powerpc: | TODO | 26 26 | riscv: | ok | 27 27 | s390: | ok | 28 28 | sh: | TODO |
+3 -1
Documentation/powerpc/syscall64-abi.rst
··· 49 49 Register preservation rules match the ELF ABI calling sequence with the 50 50 following differences: 51 51 52 - =========== ============= ======================================== 53 52 --- For the sc instruction, differences with the ELF ABI --- 53 + =========== ============= ======================================== 54 54 r0 Volatile (System call number.) 55 55 r3 Volatile (Parameter 1, and return value.) 56 56 r4-r8 Volatile (Parameters 2-6.) 57 57 cr0 Volatile (cr0.SO is the return error condition.) 58 58 cr1, cr5-7 Nonvolatile 59 59 lr Nonvolatile 60 + =========== ============= ======================================== 60 61 61 62 --- For the scv 0 instruction, differences with the ELF ABI --- 63 + =========== ============= ======================================== 62 64 r0 Volatile (System call number.) 63 65 r3 Volatile (Parameter 1, and return value.) 64 66 r4-r8 Volatile (Parameters 2-6.)
+12 -1
arch/powerpc/Kconfig
··· 116 116 # 117 117 select ARCH_32BIT_OFF_T if PPC32 118 118 select ARCH_HAS_DEBUG_VIRTUAL 119 - select ARCH_HAS_DEBUG_VM_PGTABLE 120 119 select ARCH_HAS_DEVMEM_IS_ALLOWED 121 120 select ARCH_HAS_ELF_RANDOMIZE 122 121 select ARCH_HAS_FORTIFY_SOURCE ··· 856 857 This option adds support for system call to allow user programs 857 858 to set access permissions (read/write, readonly, or no access) 858 859 on the 4k subpages of each 64k page. 860 + 861 + If unsure, say N here. 862 + 863 + config PPC_PROT_SAO_LPAR 864 + bool "Support PROT_SAO mappings in LPARs" 865 + depends on PPC_BOOK3S_64 866 + help 867 + This option adds support for PROT_SAO mappings from userspace 868 + inside LPARs on supported CPUs. 869 + 870 + This may cause issues when performing guest migration from 871 + a CPU that supports SAO to one that does not. 859 872 860 873 If unsure, say N here. 861 874
+5 -5
arch/powerpc/include/asm/book3s/64/mmu.h
··· 239 239 240 240 extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base, 241 241 phys_addr_t first_memblock_size); 242 - extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, 243 - phys_addr_t first_memblock_size); 244 242 static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, 245 243 phys_addr_t first_memblock_size) 246 244 { 247 - if (early_radix_enabled()) 248 - return radix__setup_initial_memory_limit(first_memblock_base, 249 - first_memblock_size); 245 + /* 246 + * Hash has more strict restrictions. At this point we don't 247 + * know which translations we will pick. Hence go with hash 248 + * restrictions. 249 + */ 250 250 return hash__setup_initial_memory_limit(first_memblock_base, 251 251 first_memblock_size); 252 252 }
+3 -5
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 20 20 #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) 21 21 #define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) 22 22 #define _PAGE_PRIVILEGED 0x00008 /* kernel access only */ 23 - 24 - #define _PAGE_CACHE_CTL 0x00030 /* Bits for the folowing cache modes */ 25 - /* No bits set is normal cacheable memory */ 26 - /* 0x00010 unused, is SAO bit on radix POWER9 */ 23 + #define _PAGE_SAO 0x00010 /* Strong access order */ 27 24 #define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */ 28 25 #define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */ 29 - 30 26 #define _PAGE_DIRTY 0x00080 /* C: page changed */ 31 27 #define _PAGE_ACCESSED 0x00100 /* R: page referenced */ 32 28 /* ··· 823 827 return radix__set_pte_at(mm, addr, ptep, pte, percpu); 824 828 return hash__set_pte_at(mm, addr, ptep, pte, percpu); 825 829 } 830 + 831 + #define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT) 826 832 827 833 #define pgprot_noncached pgprot_noncached 828 834 static inline pgprot_t pgprot_noncached(pgprot_t prot)
+5 -5
arch/powerpc/include/asm/cputable.h
··· 196 196 #define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000) 197 197 #define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000) 198 198 #define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000) 199 - // Free LONG_ASM_CONST(0x0000000008000000) 199 + #define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000) 200 200 #define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000) 201 201 #define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000) 202 202 #define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000) ··· 441 441 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 442 442 CPU_FTR_COHERENT_ICACHE | \ 443 443 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 444 - CPU_FTR_DSCR | CPU_FTR_ASYM_SMT | \ 444 + CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ 445 445 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 446 446 CPU_FTR_CFAR | CPU_FTR_HVMODE | \ 447 447 CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX ) ··· 450 450 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 451 451 CPU_FTR_COHERENT_ICACHE | \ 452 452 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 453 - CPU_FTR_DSCR | \ 453 + CPU_FTR_DSCR | CPU_FTR_SAO | \ 454 454 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 455 455 CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ 456 456 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ ··· 461 461 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 462 462 CPU_FTR_COHERENT_ICACHE | \ 463 463 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 464 - CPU_FTR_DSCR | \ 464 + CPU_FTR_DSCR | CPU_FTR_SAO | \ 465 465 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 466 466 CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ 467 467 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \ ··· 479 479 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 480 480 CPU_FTR_COHERENT_ICACHE | \ 481 481 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 482 - CPU_FTR_DSCR | \ 482 + CPU_FTR_DSCR | CPU_FTR_SAO | \ 483 483 CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 484 484 CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \ 485 485 CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
+27 -4
arch/powerpc/include/asm/mman.h
··· 13 13 #include <linux/pkeys.h> 14 14 #include <asm/cpu_has_feature.h> 15 15 16 - #ifdef CONFIG_PPC_MEM_KEYS 17 16 static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, 18 17 unsigned long pkey) 19 18 { 20 - return pkey_to_vmflag_bits(pkey); 19 + #ifdef CONFIG_PPC_MEM_KEYS 20 + return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey)); 21 + #else 22 + return ((prot & PROT_SAO) ? VM_SAO : 0); 23 + #endif 21 24 } 22 25 #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) 23 26 24 27 static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) 25 28 { 26 - return __pgprot(vmflag_to_pte_pkey_bits(vm_flags)); 29 + #ifdef CONFIG_PPC_MEM_KEYS 30 + return (vm_flags & VM_SAO) ? 31 + __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) : 32 + __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags)); 33 + #else 34 + return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0); 35 + #endif 27 36 } 28 37 #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) 29 - #endif 38 + 39 + static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) 40 + { 41 + if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO)) 42 + return false; 43 + if (prot & PROT_SAO) { 44 + if (!cpu_has_feature(CPU_FTR_SAO)) 45 + return false; 46 + if (firmware_has_feature(FW_FEATURE_LPAR) && 47 + !IS_ENABLED(CONFIG_PPC_PROT_SAO_LPAR)) 48 + return false; 49 + } 50 + return true; 51 + } 52 + #define arch_validate_prot arch_validate_prot 30 53 31 54 #endif /* CONFIG_PPC64 */ 32 55 #endif /* _ASM_POWERPC_MMAN_H */
+2
arch/powerpc/include/asm/nohash/64/pgtable.h
··· 82 82 */ 83 83 #include <asm/nohash/pte-book3e.h> 84 84 85 + #define _PAGE_SAO 0 86 + 85 87 #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) 86 88 87 89 /*
+1 -1
arch/powerpc/include/uapi/asm/mman.h
··· 11 11 #include <asm-generic/mman-common.h> 12 12 13 13 14 - #define PROT_SAO 0x10 /* Unsupported since v5.9 */ 14 + #define PROT_SAO 0x10 /* Strong Access Ordering */ 15 15 16 16 #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ 17 17 #define MAP_NORESERVE 0x40 /* don't reserve swap pages */
+2 -1
arch/powerpc/kernel/dma-iommu.c
··· 120 120 if (!tbl) 121 121 return 0; 122 122 123 - mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1); 123 + mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) + 124 + tbl->it_page_shift - 1); 124 125 mask += mask - 1; 125 126 126 127 return mask;
+1 -1
arch/powerpc/kernel/dt_cpu_ftrs.c
··· 653 653 {"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL}, 654 654 {"processor-utilization-of-resources-register", feat_enable_purr, 0}, 655 655 {"no-execute", feat_enable, 0}, 656 - /* strong-access-ordering is unused */ 656 + {"strong-access-ordering", feat_enable, CPU_FTR_SAO}, 657 657 {"cache-inhibited-large-page", feat_enable_large_ci, 0}, 658 658 {"coprocessor-icswx", feat_enable, 0}, 659 659 {"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
+4
arch/powerpc/kernel/entry_64.S
··· 113 113 ld r11,exception_marker@toc(r2) 114 114 std r11,-16(r10) /* "regshere" marker */ 115 115 116 + BEGIN_FTR_SECTION 117 + HMT_MEDIUM 118 + END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) 119 + 116 120 /* 117 121 * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which 118 122 * would clobber syscall parameters. Also we always enter with IRQs
+9 -3
arch/powerpc/kernel/process.c
··· 548 548 * are live for the user thread). 549 549 */ 550 550 if ((!(msr & MSR_FP)) && should_restore_fp()) 551 - new_msr |= MSR_FP | current->thread.fpexc_mode; 551 + new_msr |= MSR_FP; 552 552 553 553 if ((!(msr & MSR_VEC)) && should_restore_altivec()) 554 554 new_msr |= MSR_VEC; ··· 559 559 } 560 560 561 561 if (new_msr) { 562 + unsigned long fpexc_mode = 0; 563 + 562 564 msr_check_and_set(new_msr); 563 565 564 - if (new_msr & MSR_FP) 566 + if (new_msr & MSR_FP) { 565 567 do_restore_fp(); 568 + 569 + // This also covers VSX, because VSX implies FP 570 + fpexc_mode = current->thread.fpexc_mode; 571 + } 566 572 567 573 if (new_msr & MSR_VEC) 568 574 do_restore_altivec(); ··· 578 572 579 573 msr_check_and_clear(new_msr); 580 574 581 - regs->msr |= new_msr; 575 + regs->msr |= new_msr | fpexc_mode; 582 576 } 583 577 } 584 578 #endif
+1 -1
arch/powerpc/kernel/vdso32/Makefile
··· 50 50 51 51 # actual build commands 52 52 quiet_cmd_vdso32ld = VDSO32L $@ 53 - cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) 53 + cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) 54 54 quiet_cmd_vdso32as = VDSO32A $@ 55 55 cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) -c -o $@ $< 56 56
-1
arch/powerpc/kernel/vdso32/vdso32.lds.S
··· 111 111 *(.note.GNU-stack) 112 112 *(.data .data.* .gnu.linkonce.d.* .sdata*) 113 113 *(.bss .sbss .dynbss .dynsbss) 114 - *(.glink .iplt .plt .rela*) 115 114 } 116 115 } 117 116
+1 -1
arch/powerpc/kernel/vdso64/Makefile
··· 34 34 35 35 # actual build commands 36 36 quiet_cmd_vdso64ld = VDSO64L $@ 37 - cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) 37 + cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) 38 38 39 39 # install commands for the unstripped file 40 40 quiet_cmd_vdso_install = INSTALL $@
+1 -2
arch/powerpc/kernel/vdso64/vdso64.lds.S
··· 30 30 . = ALIGN(16); 31 31 .text : { 32 32 *(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*) 33 - *(.sfpr) 33 + *(.sfpr .glink) 34 34 } :text 35 35 PROVIDE(__etext = .); 36 36 PROVIDE(_etext = .); ··· 111 111 *(.branch_lt) 112 112 *(.data .data.* .gnu.linkonce.d.* .sdata*) 113 113 *(.bss .sbss .dynbss .dynsbss) 114 - *(.glink .iplt .plt .rela*) 115 114 } 116 115 } 117 116
+2
arch/powerpc/mm/book3s64/hash_utils.c
··· 232 232 rflags |= HPTE_R_I; 233 233 else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT) 234 234 rflags |= (HPTE_R_I | HPTE_R_G); 235 + else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) 236 + rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M); 235 237 else 236 238 /* 237 239 * Add memory coherence if cache inhibited is not set
-15
arch/powerpc/mm/book3s64/radix_pgtable.c
··· 734 734 } 735 735 } 736 736 737 - void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, 738 - phys_addr_t first_memblock_size) 739 - { 740 - /* 741 - * We don't currently support the first MEMBLOCK not mapping 0 742 - * physical on those processors 743 - */ 744 - BUG_ON(first_memblock_base != 0); 745 - 746 - /* 747 - * Radix mode is not limited by RMA / VRMA addressing. 748 - */ 749 - ppc64_rma_size = ULONG_MAX; 750 - } 751 - 752 737 #ifdef CONFIG_MEMORY_HOTPLUG 753 738 static void free_pte_table(pte_t *pte_start, pmd_t *pmd) 754 739 {
+9 -2
arch/powerpc/mm/init_64.c
··· 452 452 if (!(mfmsr() & MSR_HV)) 453 453 early_check_vec5(); 454 454 455 - if (early_radix_enabled()) 455 + if (early_radix_enabled()) { 456 456 radix__early_init_devtree(); 457 - else 457 + /* 458 + * We have finalized the translation we are going to use by now. 459 + * Radix mode is not limited by RMA / VRMA addressing. 460 + * Hence don't limit memblock allocations. 461 + */ 462 + ppc64_rma_size = ULONG_MAX; 463 + memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); 464 + } else 458 465 hash__early_init_devtree(); 459 466 } 460 467 #endif /* CONFIG_PPC_BOOK3S_64 */
+14 -5
arch/powerpc/perf/core-book3s.c
··· 1557 1557 ret = 0; 1558 1558 out: 1559 1559 if (has_branch_stack(event)) { 1560 - power_pmu_bhrb_enable(event); 1561 - cpuhw->bhrb_filter = ppmu->bhrb_filter_map( 1562 - event->attr.branch_sample_type); 1560 + u64 bhrb_filter = -1; 1561 + 1562 + if (ppmu->bhrb_filter_map) 1563 + bhrb_filter = ppmu->bhrb_filter_map( 1564 + event->attr.branch_sample_type); 1565 + 1566 + if (bhrb_filter != -1) { 1567 + cpuhw->bhrb_filter = bhrb_filter; 1568 + power_pmu_bhrb_enable(event); 1569 + } 1563 1570 } 1564 1571 1565 1572 perf_pmu_enable(event->pmu); ··· 1888 1881 int n; 1889 1882 int err; 1890 1883 struct cpu_hw_events *cpuhw; 1891 - u64 bhrb_filter; 1892 1884 1893 1885 if (!ppmu) 1894 1886 return -ENOENT; ··· 1993 1987 err = power_check_constraints(cpuhw, events, cflags, n + 1); 1994 1988 1995 1989 if (has_branch_stack(event)) { 1996 - bhrb_filter = ppmu->bhrb_filter_map( 1990 + u64 bhrb_filter = -1; 1991 + 1992 + if (ppmu->bhrb_filter_map) 1993 + bhrb_filter = ppmu->bhrb_filter_map( 1997 1994 event->attr.branch_sample_type); 1998 1995 1999 1996 if (bhrb_filter == -1) {
+2 -2
arch/powerpc/perf/imc-pmu.c
··· 1289 1289 header->misc = 0; 1290 1290 1291 1291 if (cpu_has_feature(CPU_FTR_ARCH_31)) { 1292 - switch (IMC_TRACE_RECORD_VAL_HVPR(mem->val)) { 1292 + switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) { 1293 1293 case 0:/* when MSR HV and PR not set in the trace-record */ 1294 1294 header->misc |= PERF_RECORD_MISC_GUEST_KERNEL; 1295 1295 break; ··· 1297 1297 header->misc |= PERF_RECORD_MISC_GUEST_USER; 1298 1298 break; 1299 1299 case 2: /* MSR HV is 1 and PR is 0 */ 1300 - header->misc |= PERF_RECORD_MISC_HYPERVISOR; 1300 + header->misc |= PERF_RECORD_MISC_KERNEL; 1301 1301 break; 1302 1302 case 3: /* MSR HV is 1 and PR is 1 */ 1303 1303 header->misc |= PERF_RECORD_MISC_USER;
+1 -1
arch/powerpc/platforms/Kconfig.cputype
··· 36 36 select PPC_HAVE_PMU_SUPPORT 37 37 select PPC_HAVE_KUEP 38 38 select PPC_HAVE_KUAP 39 - select HAVE_ARCH_VMAP_STACK 39 + select HAVE_ARCH_VMAP_STACK if !ADB_PMU 40 40 41 41 config PPC_BOOK3S_601 42 42 bool "PowerPC 601"
+1 -1
arch/powerpc/platforms/powernv/idle.c
··· 1223 1223 return; 1224 1224 } 1225 1225 1226 - if (pvr_version_is(PVR_POWER9)) 1226 + if (cpu_has_feature(CPU_FTR_ARCH_300)) 1227 1227 pnv_power9_idle_init(); 1228 1228 1229 1229 for (i = 0; i < nr_pnv_idle_states; i++)
+1 -1
arch/powerpc/platforms/pseries/papr_scm.c
··· 822 822 kfree(stats); 823 823 return rc ? rc : seq_buf_used(&s); 824 824 } 825 - DEVICE_ATTR_RO(perf_stats); 825 + DEVICE_ATTR_ADMIN_RO(perf_stats); 826 826 827 827 static ssize_t flags_show(struct device *dev, 828 828 struct device_attribute *attr, char *buf)
+11 -4
drivers/cpuidle/cpuidle-pseries.c
··· 361 361 for (i = 0; i < nr_xcede_records; i++) { 362 362 struct xcede_latency_record *record = &payload->records[i]; 363 363 u64 latency_tb = be64_to_cpu(record->latency_ticks); 364 - u64 latency_us = tb_to_ns(latency_tb) / NSEC_PER_USEC; 364 + u64 latency_us = DIV_ROUND_UP_ULL(tb_to_ns(latency_tb), NSEC_PER_USEC); 365 + 366 + if (latency_us == 0) 367 + pr_warn("cpuidle: xcede record %d has an unrealistic latency of 0us.\n", i); 365 368 366 369 if (latency_us < min_latency_us) 367 370 min_latency_us = latency_us; ··· 381 378 * Perform the fix-up. 382 379 */ 383 380 if (min_latency_us < dedicated_states[1].exit_latency) { 384 - u64 cede0_latency = min_latency_us - 1; 381 + /* 382 + * We set a minimum of 1us wakeup latency for cede0 to 383 + * distinguish it from snooze 384 + */ 385 + u64 cede0_latency = 1; 385 386 386 - if (cede0_latency <= 0) 387 - cede0_latency = min_latency_us; 387 + if (min_latency_us > cede0_latency) 388 + cede0_latency = min_latency_us - 1; 388 389 389 390 dedicated_states[1].exit_latency = cede0_latency; 390 391 dedicated_states[1].target_residency = 10 * (cede0_latency);
+2
drivers/video/fbdev/controlfb.c
··· 49 49 #include <linux/cuda.h> 50 50 #ifdef CONFIG_PPC_PMAC 51 51 #include <asm/prom.h> 52 + #endif 53 + #ifdef CONFIG_BOOTX_TEXT 52 54 #include <asm/btext.h> 53 55 #endif 54 56
+2
include/linux/mm.h
··· 321 321 322 322 #if defined(CONFIG_X86) 323 323 # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ 324 + #elif defined(CONFIG_PPC) 325 + # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ 324 326 #elif defined(CONFIG_PARISC) 325 327 # define VM_GROWSUP VM_ARCH_1 326 328 #elif defined(CONFIG_IA64)
+2
include/trace/events/mmflags.h
··· 114 114 115 115 #if defined(CONFIG_X86) 116 116 #define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" } 117 + #elif defined(CONFIG_PPC) 118 + #define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" } 117 119 #elif defined(CONFIG_PARISC) || defined(CONFIG_IA64) 118 120 #define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" } 119 121 #elif !defined(CONFIG_MMU)
+4
mm/ksm.c
··· 2453 2453 if (vma_is_dax(vma)) 2454 2454 return 0; 2455 2455 2456 + #ifdef VM_SAO 2457 + if (*vm_flags & VM_SAO) 2458 + return 0; 2459 + #endif 2456 2460 #ifdef VM_SPARC_ADI 2457 2461 if (*vm_flags & VM_SPARC_ADI) 2458 2462 return 0;
+1
tools/testing/selftests/powerpc/mm/.gitignore
··· 2 2 hugetlb_vs_thp_test 3 3 subpage_prot 4 4 tempfile 5 + prot_sao 5 6 segv_errors 6 7 wild_bctr 7 8 large_vm_fork_separation
+3 -1
tools/testing/selftests/powerpc/mm/Makefile
··· 2 2 noarg: 3 3 $(MAKE) -C ../ 4 4 5 - TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot segv_errors wild_bctr \ 5 + TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \ 6 6 large_vm_fork_separation bad_accesses pkey_exec_prot \ 7 7 pkey_siginfo stack_expansion_signal stack_expansion_ldst 8 8 ··· 13 13 include ../../lib.mk 14 14 15 15 $(TEST_GEN_PROGS): ../harness.c ../utils.c 16 + 17 + $(OUTPUT)/prot_sao: ../utils.c 16 18 17 19 $(OUTPUT)/wild_bctr: CFLAGS += -m64 18 20 $(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
+48
tools/testing/selftests/powerpc/mm/prot_sao.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright 2016, Michael Ellerman, IBM Corp. 4 + */ 5 + 6 + #include <stdio.h> 7 + #include <stdlib.h> 8 + #include <string.h> 9 + #include <sys/mman.h> 10 + #include <unistd.h> 11 + 12 + #include <asm/cputable.h> 13 + 14 + #include "utils.h" 15 + 16 + #define SIZE (64 * 1024) 17 + 18 + int test_prot_sao(void) 19 + { 20 + char *p; 21 + 22 + /* 23 + * SAO was introduced in 2.06 and removed in 3.1. It's disabled in 24 + * guests/LPARs by default, so also skip if we are running in a guest. 25 + */ 26 + SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06) || 27 + have_hwcap2(PPC_FEATURE2_ARCH_3_1) || 28 + access("/proc/device-tree/rtas/ibm,hypertas-functions", F_OK) == 0); 29 + 30 + /* 31 + * Ensure we can ask for PROT_SAO. 32 + * We can't really verify that it does the right thing, but at least we 33 + * confirm the kernel will accept it. 34 + */ 35 + p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE | PROT_SAO, 36 + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 37 + FAIL_IF(p == MAP_FAILED); 38 + 39 + /* Write to the mapping, to at least cause a fault */ 40 + memset(p, 0xaa, SIZE); 41 + 42 + return 0; 43 + } 44 + 45 + int main(void) 46 + { 47 + return test_harness(test_prot_sao, "prot-sao"); 48 + }