Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'locking_core_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Borislav Petkov:
"Lots of cleanups and preparation. Highlights:

- futex: Cleanup and remove runtime futex_cmpxchg detection

- rtmutex: Some fixes for the PREEMPT_RT locking infrastructure

- kcsan: Share owner_on_cpu() between mutex,rtmutex and rwsem and
annotate the racy owner->on_cpu access *once*.

- atomic64: Dead-Code-Elemination"

[ Description above by Peter Zijlstra ]

* tag 'locking_core_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/atomic: atomic64: Remove unusable atomic ops
futex: Fix additional regressions
locking: Allow to include asm/spinlock_types.h from linux/spinlock_types_raw.h
x86/mm: Include spinlock_t definition in pgtable.
locking: Mark racy reads of owner->on_cpu
locking: Make owner_on_cpu() into <linux/sched.h>
lockdep/selftests: Adapt ww-tests for PREEMPT_RT
lockdep/selftests: Skip the softirq related tests on PREEMPT_RT
lockdep/selftests: Unbalanced migrate_disable() & rcu_read_lock().
lockdep/selftests: Avoid using local_lock_{acquire|release}().
lockdep: Remove softirq accounting on PREEMPT_RT.
locking/rtmutex: Add rt_mutex_lock_nest_lock() and rt_mutex_lock_killable().
locking/rtmutex: Squash self-deadlock check for ww_rt_mutex.
locking: Remove rt_rwlock_is_contended().
sched: Trigger warning if ->migration_disabled counter underflows.
futex: Fix sparc32/m68k/nds32 build regression
futex: Remove futex_cmpxchg detection
futex: Ensure futex_atomic_cmpxchg_inatomic() is present
kernel/locking: Use a pointer in ww_mutex_trylock().

+240 -225
+1 -1
arch/alpha/include/asm/spinlock_types.h
··· 2 2 #ifndef _ALPHA_SPINLOCK_TYPES_H 3 3 #define _ALPHA_SPINLOCK_TYPES_H 4 4 5 - #ifndef __LINUX_SPINLOCK_TYPES_H 5 + #ifndef __LINUX_SPINLOCK_TYPES_RAW_H 6 6 # error "please don't include this file directly" 7 7 #endif 8 8
-1
arch/arc/Kconfig
··· 32 32 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4 33 33 select HAVE_DEBUG_STACKOVERFLOW 34 34 select HAVE_DEBUG_KMEMLEAK 35 - select HAVE_FUTEX_CMPXCHG if FUTEX 36 35 select HAVE_IOREMAP_PROT 37 36 select HAVE_KERNEL_GZIP 38 37 select HAVE_KERNEL_LZMA
-1
arch/arm/Kconfig
··· 93 93 select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL 94 94 select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG 95 95 select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !(THUMB2_KERNEL && CC_IS_CLANG) 96 - select HAVE_FUTEX_CMPXCHG if FUTEX 97 96 select HAVE_GCC_PLUGINS 98 97 select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7) 99 98 select HAVE_IRQ_TIME_ACCOUNTING
+1 -1
arch/arm/include/asm/spinlock_types.h
··· 2 2 #ifndef __ASM_SPINLOCK_TYPES_H 3 3 #define __ASM_SPINLOCK_TYPES_H 4 4 5 - #ifndef __LINUX_SPINLOCK_TYPES_H 5 + #ifndef __LINUX_SPINLOCK_TYPES_RAW_H 6 6 # error "please don't include this file directly" 7 7 #endif 8 8
-1
arch/arm64/Kconfig
··· 196 196 select HAVE_REGS_AND_STACK_ACCESS_API 197 197 select HAVE_POSIX_CPU_TIMERS_TASK_WORK 198 198 select HAVE_FUNCTION_ARG_ACCESS_API 199 - select HAVE_FUTEX_CMPXCHG if FUTEX 200 199 select MMU_GATHER_RCU_TABLE_FREE 201 200 select HAVE_RSEQ 202 201 select HAVE_STACKPROTECTOR
+1 -1
arch/arm64/include/asm/spinlock_types.h
··· 5 5 #ifndef __ASM_SPINLOCK_TYPES_H 6 6 #define __ASM_SPINLOCK_TYPES_H 7 7 8 - #if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) 8 + #if !defined(__LINUX_SPINLOCK_TYPES_RAW_H) && !defined(__ASM_SPINLOCK_H) 9 9 # error "please don't include this file directly" 10 10 #endif 11 11
-1
arch/csky/Kconfig
··· 52 52 select HAVE_FUNCTION_TRACER 53 53 select HAVE_FUNCTION_GRAPH_TRACER 54 54 select HAVE_FUNCTION_ERROR_INJECTION 55 - select HAVE_FUTEX_CMPXCHG if FUTEX && SMP 56 55 select HAVE_FTRACE_MCOUNT_RECORD 57 56 select HAVE_KERNEL_GZIP 58 57 select HAVE_KERNEL_LZO
+1 -1
arch/csky/include/asm/spinlock_types.h
··· 3 3 #ifndef __ASM_CSKY_SPINLOCK_TYPES_H 4 4 #define __ASM_CSKY_SPINLOCK_TYPES_H 5 5 6 - #ifndef __LINUX_SPINLOCK_TYPES_H 6 + #ifndef __LINUX_SPINLOCK_TYPES_RAW_H 7 7 # error "please don't include this file directly" 8 8 #endif 9 9
+1 -1
arch/hexagon/include/asm/spinlock_types.h
··· 8 8 #ifndef _ASM_SPINLOCK_TYPES_H 9 9 #define _ASM_SPINLOCK_TYPES_H 10 10 11 - #ifndef __LINUX_SPINLOCK_TYPES_H 11 + #ifndef __LINUX_SPINLOCK_TYPES_RAW_H 12 12 # error "please don't include this file directly" 13 13 #endif 14 14
+1 -1
arch/ia64/include/asm/spinlock_types.h
··· 2 2 #ifndef _ASM_IA64_SPINLOCK_TYPES_H 3 3 #define _ASM_IA64_SPINLOCK_TYPES_H 4 4 5 - #ifndef __LINUX_SPINLOCK_TYPES_H 5 + #ifndef __LINUX_SPINLOCK_TYPES_RAW_H 6 6 # error "please don't include this file directly" 7 7 #endif 8 8
-1
arch/m68k/Kconfig
··· 21 21 select HAVE_ASM_MODVERSIONS 22 22 select HAVE_DEBUG_BUGVERBOSE 23 23 select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED 24 - select HAVE_FUTEX_CMPXCHG if MMU && FUTEX 25 24 select HAVE_MOD_ARCH_SPECIFIC 26 25 select HAVE_UID16 27 26 select MMU_GATHER_NO_RANGE if MMU
+17 -10
arch/mips/include/asm/futex.h
··· 19 19 #include <asm/sync.h> 20 20 #include <asm/war.h> 21 21 22 - #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ 22 + #define arch_futex_atomic_op_inuser arch_futex_atomic_op_inuser 23 + #define futex_atomic_cmpxchg_inatomic futex_atomic_cmpxchg_inatomic 24 + #include <asm-generic/futex.h> 25 + 26 + #define __futex_atomic_op(op, insn, ret, oldval, uaddr, oparg) \ 23 27 { \ 24 28 if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { \ 25 29 __asm__ __volatile__( \ ··· 84 80 : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ 85 81 "i" (-EFAULT) \ 86 82 : "memory"); \ 87 - } else \ 88 - ret = -ENOSYS; \ 83 + } else { \ 84 + /* fallback for non-SMP */ \ 85 + ret = futex_atomic_op_inuser_local(op, oparg, oval, uaddr); \ 86 + } \ 89 87 } 90 88 91 89 static inline int ··· 100 94 101 95 switch (op) { 102 96 case FUTEX_OP_SET: 103 - __futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg); 97 + __futex_atomic_op(op, "move $1, %z5", ret, oldval, uaddr, oparg); 104 98 break; 105 99 106 100 case FUTEX_OP_ADD: 107 - __futex_atomic_op("addu $1, %1, %z5", 101 + __futex_atomic_op(op, "addu $1, %1, %z5", 108 102 ret, oldval, uaddr, oparg); 109 103 break; 110 104 case FUTEX_OP_OR: 111 - __futex_atomic_op("or $1, %1, %z5", 105 + __futex_atomic_op(op, "or $1, %1, %z5", 112 106 ret, oldval, uaddr, oparg); 113 107 break; 114 108 case FUTEX_OP_ANDN: 115 - __futex_atomic_op("and $1, %1, %z5", 109 + __futex_atomic_op(op, "and $1, %1, %z5", 116 110 ret, oldval, uaddr, ~oparg); 117 111 break; 118 112 case FUTEX_OP_XOR: 119 - __futex_atomic_op("xor $1, %1, %z5", 113 + __futex_atomic_op(op, "xor $1, %1, %z5", 120 114 ret, oldval, uaddr, oparg); 121 115 break; 122 116 default: ··· 199 193 : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 200 194 "i" (-EFAULT) 201 195 : "memory"); 202 - } else 203 - return -ENOSYS; 196 + } else { 197 + return futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval); 198 + } 204 199 205 200 *uval = val; 206 201 return ret;
+1 -1
arch/powerpc/include/asm/simple_spinlock_types.h
··· 2 2 #ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H 3 3 #define _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H 4 4 5 - #ifndef __LINUX_SPINLOCK_TYPES_H 5 + #ifndef __LINUX_SPINLOCK_TYPES_RAW_H 6 6 # error "please don't include this file directly" 7 7 #endif 8 8
+1 -1
arch/powerpc/include/asm/spinlock_types.h
··· 2 2 #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H 3 3 #define _ASM_POWERPC_SPINLOCK_TYPES_H 4 4 5 - #ifndef __LINUX_SPINLOCK_TYPES_H 5 + #ifndef __LINUX_SPINLOCK_TYPES_RAW_H 6 6 # error "please don't include this file directly" 7 7 #endif 8 8
-1
arch/riscv/Kconfig
··· 83 83 select HAVE_DMA_CONTIGUOUS if MMU 84 84 select HAVE_EBPF_JIT if MMU 85 85 select HAVE_FUNCTION_ERROR_INJECTION 86 - select HAVE_FUTEX_CMPXCHG if FUTEX 87 86 select HAVE_GCC_PLUGINS 88 87 select HAVE_GENERIC_VDSO if MMU && 64BIT 89 88 select HAVE_IRQ_TIME_ACCOUNTING
+1 -1
arch/riscv/include/asm/spinlock_types.h
··· 6 6 #ifndef _ASM_RISCV_SPINLOCK_TYPES_H 7 7 #define _ASM_RISCV_SPINLOCK_TYPES_H 8 8 9 - #ifndef __LINUX_SPINLOCK_TYPES_H 9 + #ifndef __LINUX_SPINLOCK_TYPES_RAW_H 10 10 # error "please don't include this file directly" 11 11 #endif 12 12
-1
arch/s390/Kconfig
··· 165 165 select HAVE_FUNCTION_ERROR_INJECTION 166 166 select HAVE_FUNCTION_GRAPH_TRACER 167 167 select HAVE_FUNCTION_TRACER 168 - select HAVE_FUTEX_CMPXCHG if FUTEX 169 168 select HAVE_GCC_PLUGINS 170 169 select HAVE_GENERIC_VDSO 171 170 select HAVE_IOREMAP_PROT if PCI
+1 -1
arch/s390/include/asm/spinlock_types.h
··· 2 2 #ifndef __ASM_SPINLOCK_TYPES_H 3 3 #define __ASM_SPINLOCK_TYPES_H 4 4 5 - #ifndef __LINUX_SPINLOCK_TYPES_H 5 + #ifndef __LINUX_SPINLOCK_TYPES_RAW_H 6 6 # error "please don't include this file directly" 7 7 #endif 8 8
-1
arch/sh/Kconfig
··· 34 34 select HAVE_FAST_GUP if MMU 35 35 select HAVE_FUNCTION_GRAPH_TRACER 36 36 select HAVE_FUNCTION_TRACER 37 - select HAVE_FUTEX_CMPXCHG if FUTEX 38 37 select HAVE_FTRACE_MCOUNT_RECORD 39 38 select HAVE_HW_BREAKPOINT 40 39 select HAVE_IOREMAP_PROT if MMU && !X2TLB
+1 -1
arch/sh/include/asm/spinlock_types.h
··· 2 2 #ifndef __ASM_SH_SPINLOCK_TYPES_H 3 3 #define __ASM_SH_SPINLOCK_TYPES_H 4 4 5 - #ifndef __LINUX_SPINLOCK_TYPES_H 5 + #ifndef __LINUX_SPINLOCK_TYPES_RAW_H 6 6 # error "please don't include this file directly" 7 7 #endif 8 8
-1
arch/um/Kconfig
··· 14 14 select HAVE_ARCH_SECCOMP_FILTER 15 15 select HAVE_ASM_MODVERSIONS 16 16 select HAVE_UID16 17 - select HAVE_FUTEX_CMPXCHG if FUTEX 18 17 select HAVE_DEBUG_KMEMLEAK 19 18 select HAVE_DEBUG_BUGVERBOSE 20 19 select NO_DMA if !UML_DMA_EMULATION
-1
arch/um/kernel/skas/uaccess.c
··· 323 323 * 0 - On success 324 324 * -EFAULT - User access resulted in a page fault 325 325 * -EAGAIN - Atomic operation was unable to complete due to contention 326 - * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG) 327 326 */ 328 327 329 328 int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
-1
arch/xtensa/Kconfig
··· 31 31 select HAVE_DMA_CONTIGUOUS 32 32 select HAVE_EXIT_THREAD 33 33 select HAVE_FUNCTION_TRACER 34 - select HAVE_FUTEX_CMPXCHG if !MMU && FUTEX 35 34 select HAVE_HW_BREAKPOINT if PERF_EVENTS 36 35 select HAVE_IRQ_TIME_ACCOUNTING 37 36 select HAVE_PCI
+6 -2
arch/xtensa/include/asm/futex.h
··· 16 16 #include <linux/uaccess.h> 17 17 #include <linux/errno.h> 18 18 19 + #define arch_futex_atomic_op_inuser arch_futex_atomic_op_inuser 20 + #define futex_atomic_cmpxchg_inatomic futex_atomic_cmpxchg_inatomic 21 + #include <asm-generic/futex.h> 22 + 19 23 #if XCHAL_HAVE_EXCLUSIVE 20 24 #define __futex_atomic_op(insn, ret, old, uaddr, arg) \ 21 25 __asm__ __volatile( \ ··· 109 105 110 106 return ret; 111 107 #else 112 - return -ENOSYS; 108 + return futex_atomic_op_inuser_local(op, oparg, oval, uaddr); 113 109 #endif 114 110 } 115 111 ··· 160 156 161 157 return ret; 162 158 #else 163 - return -ENOSYS; 159 + return futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval); 164 160 #endif 165 161 } 166 162
+1 -1
arch/xtensa/include/asm/spinlock_types.h
··· 2 2 #ifndef __ASM_SPINLOCK_TYPES_H 3 3 #define __ASM_SPINLOCK_TYPES_H 4 4 5 - #if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) 5 + #if !defined(__LINUX_SPINLOCK_TYPES_RAW_H) && !defined(__ASM_SPINLOCK_H) 6 6 # error "please don't include this file directly" 7 7 #endif 8 8
+11 -20
include/asm-generic/futex.h
··· 6 6 #include <linux/uaccess.h> 7 7 #include <asm/errno.h> 8 8 9 + #ifndef futex_atomic_cmpxchg_inatomic 9 10 #ifndef CONFIG_SMP 10 11 /* 11 12 * The following implementation only for uniprocessor machines. 12 13 * It relies on preempt_disable() ensuring mutual exclusion. 13 14 * 14 15 */ 16 + #define futex_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval) \ 17 + futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval) 18 + #define arch_futex_atomic_op_inuser(op, oparg, oval, uaddr) \ 19 + futex_atomic_op_inuser_local(op, oparg, oval, uaddr) 20 + #endif /* CONFIG_SMP */ 21 + #endif 15 22 16 23 /** 17 - * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant 24 + * futex_atomic_op_inuser_local() - Atomic arithmetic operation with constant 18 25 * argument and comparison of the previous 19 26 * futex value with another constant. 20 27 * ··· 35 28 * -ENOSYS - Operation not supported 36 29 */ 37 30 static inline int 38 - arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) 31 + futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr) 39 32 { 40 33 int oldval, ret; 41 34 u32 tmp; ··· 82 75 } 83 76 84 77 /** 85 - * futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the 78 + * futex_atomic_cmpxchg_inatomic_local() - Compare and exchange the content of the 86 79 * uaddr with newval if the current value is 87 80 * oldval. 88 81 * @uval: pointer to store content of @uaddr ··· 94 87 * 0 - On success 95 88 * -EFAULT - User access resulted in a page fault 96 89 * -EAGAIN - Atomic operation was unable to complete due to contention 97 - * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG) 98 90 */ 99 91 static inline int 100 - futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 92 + futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr, 101 93 u32 oldval, u32 newval) 102 94 { 103 95 u32 val; ··· 118 112 return 0; 119 113 } 120 114 121 - #else 122 - static inline int 123 - arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) 124 - { 125 - return -ENOSYS; 126 - } 127 - 128 - static inline int 129 - futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 130 - u32 oldval, u32 newval) 131 - { 132 - return -ENOSYS; 133 - } 134 - 135 - #endif /* CONFIG_SMP */ 136 115 #endif
+15 -8
include/linux/irqflags.h
··· 71 71 do { \ 72 72 __this_cpu_dec(hardirq_context); \ 73 73 } while (0) 74 - # define lockdep_softirq_enter() \ 75 - do { \ 76 - current->softirq_context++; \ 77 - } while (0) 78 - # define lockdep_softirq_exit() \ 79 - do { \ 80 - current->softirq_context--; \ 81 - } while (0) 82 74 83 75 # define lockdep_hrtimer_enter(__hrtimer) \ 84 76 ({ \ ··· 130 138 # define lockdep_posixtimer_exit() do { } while (0) 131 139 # define lockdep_irq_work_enter(__work) do { } while (0) 132 140 # define lockdep_irq_work_exit(__work) do { } while (0) 141 + #endif 142 + 143 + #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT) 144 + # define lockdep_softirq_enter() \ 145 + do { \ 146 + current->softirq_context++; \ 147 + } while (0) 148 + # define lockdep_softirq_exit() \ 149 + do { \ 150 + current->softirq_context--; \ 151 + } while (0) 152 + 153 + #else 154 + # define lockdep_softirq_enter() do { } while (0) 155 + # define lockdep_softirq_exit() do { } while (0) 133 156 #endif 134 157 135 158 #if defined(CONFIG_IRQSOFF_TRACER) || \
+1 -1
include/linux/ratelimit_types.h
··· 4 4 5 5 #include <linux/bits.h> 6 6 #include <linux/param.h> 7 - #include <linux/spinlock_types.h> 7 + #include <linux/spinlock_types_raw.h> 8 8 9 9 #define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) 10 10 #define DEFAULT_RATELIMIT_BURST 10
+9
include/linux/rtmutex.h
··· 99 99 100 100 #ifdef CONFIG_DEBUG_LOCK_ALLOC 101 101 extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); 102 + extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock); 102 103 #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0) 104 + #define rt_mutex_lock_nest_lock(lock, nest_lock) \ 105 + do { \ 106 + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ 107 + _rt_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 108 + } while (0) 109 + 103 110 #else 104 111 extern void rt_mutex_lock(struct rt_mutex *lock); 105 112 #define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock) 113 + #define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock) 106 114 #endif 107 115 108 116 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); 117 + extern int rt_mutex_lock_killable(struct rt_mutex *lock); 109 118 extern int rt_mutex_trylock(struct rt_mutex *lock); 110 119 111 120 extern void rt_mutex_unlock(struct rt_mutex *lock);
+9
include/linux/sched.h
··· 2178 2178 #endif 2179 2179 2180 2180 #ifdef CONFIG_SMP 2181 + static inline bool owner_on_cpu(struct task_struct *owner) 2182 + { 2183 + /* 2184 + * As lock holder preemption issue, we both skip spinning if 2185 + * task is not on cpu or its cpu is preempted 2186 + */ 2187 + return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); 2188 + } 2189 + 2181 2190 /* Returns effective CPU energy utilization, as seen by the scheduler */ 2182 2191 unsigned long sched_cpu_util(int cpu, unsigned long max); 2183 2192 #endif /* CONFIG_SMP */
+1 -1
include/linux/spinlock_types_up.h
··· 1 1 #ifndef __LINUX_SPINLOCK_TYPES_UP_H 2 2 #define __LINUX_SPINLOCK_TYPES_UP_H 3 3 4 - #ifndef __LINUX_SPINLOCK_TYPES_H 4 + #ifndef __LINUX_SPINLOCK_TYPES_RAW_H 5 5 # error "please don't include this file directly" 6 6 #endif 7 7
+1 -8
init/Kconfig
··· 1579 1579 1580 1580 config FUTEX 1581 1581 bool "Enable futex support" if EXPERT 1582 + depends on !(SPARC32 && SMP) 1582 1583 default y 1583 1584 imply RT_MUTEXES 1584 1585 help ··· 1591 1590 bool 1592 1591 depends on FUTEX && RT_MUTEXES 1593 1592 default y 1594 - 1595 - config HAVE_FUTEX_CMPXCHG 1596 - bool 1597 - depends on FUTEX 1598 - help 1599 - Architectures should select this if futex_atomic_cmpxchg_inatomic() 1600 - is implemented and always working. This removes a couple of runtime 1601 - checks. 1602 1593 1603 1594 config EPOLL 1604 1595 bool "Enable eventpoll support" if EXPERT
-35
kernel/futex/core.c
··· 41 41 #include "futex.h" 42 42 #include "../locking/rtmutex_common.h" 43 43 44 - #ifndef CONFIG_HAVE_FUTEX_CMPXCHG 45 - int __read_mostly futex_cmpxchg_enabled; 46 - #endif 47 - 48 - 49 44 /* 50 45 * The base of the bucket array and its size are always used together 51 46 * (after initialization only in futex_hash()), so ensure that they ··· 771 776 unsigned long futex_offset; 772 777 int rc; 773 778 774 - if (!futex_cmpxchg_enabled) 775 - return; 776 - 777 779 /* 778 780 * Fetch the list head (which was registered earlier, via 779 781 * sys_set_robust_list()): ··· 866 874 compat_long_t futex_offset; 867 875 int rc; 868 876 869 - if (!futex_cmpxchg_enabled) 870 - return; 871 - 872 877 /* 873 878 * Fetch the list head (which was registered earlier, via 874 879 * sys_set_robust_list()): ··· 939 950 struct futex_hash_bucket *hb; 940 951 union futex_key key = FUTEX_KEY_INIT; 941 952 942 - if (!futex_cmpxchg_enabled) 943 - return; 944 953 /* 945 954 * We are a ZOMBIE and nobody can enqueue itself on 946 955 * pi_state_list anymore, but we have to be careful ··· 1112 1125 futex_cleanup_end(tsk, FUTEX_STATE_DEAD); 1113 1126 } 1114 1127 1115 - static void __init futex_detect_cmpxchg(void) 1116 - { 1117 - #ifndef CONFIG_HAVE_FUTEX_CMPXCHG 1118 - u32 curval; 1119 - 1120 - /* 1121 - * This will fail and we want it. Some arch implementations do 1122 - * runtime detection of the futex_atomic_cmpxchg_inatomic() 1123 - * functionality. We want to know that before we call in any 1124 - * of the complex code paths. Also we want to prevent 1125 - * registration of robust lists in that case. NULL is 1126 - * guaranteed to fault and we get -EFAULT on functional 1127 - * implementation, the non-functional ones will return 1128 - * -ENOSYS. 1129 - */ 1130 - if (futex_cmpxchg_value_locked(&curval, NULL, 0, 0) == -EFAULT) 1131 - futex_cmpxchg_enabled = 1; 1132 - #endif 1133 - } 1134 - 1135 1128 static int __init futex_init(void) 1136 1129 { 1137 1130 unsigned int futex_shift; ··· 1129 1162 &futex_shift, NULL, 1130 1163 futex_hashsize, futex_hashsize); 1131 1164 futex_hashsize = 1UL << futex_shift; 1132 - 1133 - futex_detect_cmpxchg(); 1134 1165 1135 1166 for (i = 0; i < futex_hashsize; i++) { 1136 1167 atomic_set(&futex_queues[i].waiters, 0);
-6
kernel/futex/futex.h
··· 27 27 #define FLAGS_CLOCKRT 0x02 28 28 #define FLAGS_HAS_TIMEOUT 0x04 29 29 30 - #ifdef CONFIG_HAVE_FUTEX_CMPXCHG 31 - #define futex_cmpxchg_enabled 1 32 - #else 33 - extern int __read_mostly futex_cmpxchg_enabled; 34 - #endif 35 - 36 30 #ifdef CONFIG_FAIL_FUTEX 37 31 extern bool should_fail_futex(bool fshared); 38 32 #else
-22
kernel/futex/syscalls.c
··· 29 29 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, 30 30 size_t, len) 31 31 { 32 - if (!futex_cmpxchg_enabled) 33 - return -ENOSYS; 34 32 /* 35 33 * The kernel knows only one size for now: 36 34 */ ··· 53 55 struct robust_list_head __user *head; 54 56 unsigned long ret; 55 57 struct task_struct *p; 56 - 57 - if (!futex_cmpxchg_enabled) 58 - return -ENOSYS; 59 58 60 59 rcu_read_lock(); 61 60 ··· 95 100 flags |= FLAGS_CLOCKRT; 96 101 if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI && 97 102 cmd != FUTEX_LOCK_PI2) 98 - return -ENOSYS; 99 - } 100 - 101 - switch (cmd) { 102 - case FUTEX_LOCK_PI: 103 - case FUTEX_LOCK_PI2: 104 - case FUTEX_UNLOCK_PI: 105 - case FUTEX_TRYLOCK_PI: 106 - case FUTEX_WAIT_REQUEUE_PI: 107 - case FUTEX_CMP_REQUEUE_PI: 108 - if (!futex_cmpxchg_enabled) 109 103 return -ENOSYS; 110 104 } 111 105 ··· 307 323 struct compat_robust_list_head __user *, head, 308 324 compat_size_t, len) 309 325 { 310 - if (!futex_cmpxchg_enabled) 311 - return -ENOSYS; 312 - 313 326 if (unlikely(len != sizeof(*head))) 314 327 return -EINVAL; 315 328 ··· 322 341 struct compat_robust_list_head __user *head; 323 342 unsigned long ret; 324 343 struct task_struct *p; 325 - 326 - if (!futex_cmpxchg_enabled) 327 - return -ENOSYS; 328 344 329 345 rcu_read_lock(); 330 346
+2
kernel/locking/lockdep.c
··· 5485 5485 } 5486 5486 } 5487 5487 5488 + #ifndef CONFIG_PREEMPT_RT 5488 5489 /* 5489 5490 * We dont accurately track softirq state in e.g. 5490 5491 * hardirq contexts (such as on 4KSTACKS), so only ··· 5500 5499 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); 5501 5500 } 5502 5501 } 5502 + #endif 5503 5503 5504 5504 if (!debug_locks) 5505 5505 print_irqtrace_events(current);
+2 -9
kernel/locking/mutex.c
··· 367 367 /* 368 368 * Use vcpu_is_preempted to detect lock holder preemption issue. 369 369 */ 370 - if (!owner->on_cpu || need_resched() || 371 - vcpu_is_preempted(task_cpu(owner))) { 370 + if (!owner_on_cpu(owner) || need_resched()) { 372 371 ret = false; 373 372 break; 374 373 } ··· 402 403 * structure won't go away during the spinning period. 403 404 */ 404 405 owner = __mutex_owner(lock); 405 - 406 - /* 407 - * As lock holder preemption issue, we both skip spinning if task is not 408 - * on cpu or its cpu is preempted 409 - */ 410 - 411 406 if (owner) 412 - retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); 407 + retval = owner_on_cpu(owner); 413 408 414 409 /* 415 410 * If lock->owner is not set, the mutex has been released. Return true
+6 -4
kernel/locking/rtmutex.c
··· 1103 1103 * the other will detect the deadlock and return -EDEADLOCK, 1104 1104 * which is wrong, as the other waiter is not in a deadlock 1105 1105 * situation. 1106 + * 1107 + * Except for ww_mutex, in that case the chain walk must already deal 1108 + * with spurious cycles, see the comments at [3] and [6]. 1106 1109 */ 1107 - if (owner == task) 1110 + if (owner == task && !(build_ww_mutex() && ww_ctx)) 1108 1111 return -EDEADLK; 1109 1112 1110 1113 raw_spin_lock(&task->pi_lock); ··· 1382 1379 * for CONFIG_PREEMPT_RCU=y) 1383 1380 * - the VCPU on which owner runs is preempted 1384 1381 */ 1385 - if (!owner->on_cpu || need_resched() || 1386 - !rt_mutex_waiter_is_top_waiter(lock, waiter) || 1387 - vcpu_is_preempted(task_cpu(owner))) { 1382 + if (!owner_on_cpu(owner) || need_resched() || 1383 + !rt_mutex_waiter_is_top_waiter(lock, waiter)) { 1388 1384 res = false; 1389 1385 break; 1390 1386 }
+26 -4
kernel/locking/rtmutex_api.c
··· 21 21 */ 22 22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, 23 23 unsigned int state, 24 + struct lockdep_map *nest_lock, 24 25 unsigned int subclass) 25 26 { 26 27 int ret; 27 28 28 29 might_sleep(); 29 - mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 30 + mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_); 30 31 ret = __rt_mutex_lock(&lock->rtmutex, state); 31 32 if (ret) 32 33 mutex_release(&lock->dep_map, _RET_IP_); ··· 49 48 */ 50 49 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) 51 50 { 52 - __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); 51 + __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass); 53 52 } 54 53 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); 54 + 55 + void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock) 56 + { 57 + __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0); 58 + } 59 + EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock); 55 60 56 61 #else /* !CONFIG_DEBUG_LOCK_ALLOC */ 57 62 ··· 68 61 */ 69 62 void __sched rt_mutex_lock(struct rt_mutex *lock) 70 63 { 71 - __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); 64 + __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0); 72 65 } 73 66 EXPORT_SYMBOL_GPL(rt_mutex_lock); 74 67 #endif ··· 84 77 */ 85 78 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) 86 79 { 87 - return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); 80 + return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, 0); 88 81 } 89 82 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); 83 + 84 + /** 85 + * rt_mutex_lock_killable - lock a rt_mutex killable 86 + * 87 + * @lock: the rt_mutex to be locked 88 + * 89 + * Returns: 90 + * 0 on success 91 + * -EINTR when interrupted by a signal 92 + */ 93 + int __sched rt_mutex_lock_killable(struct rt_mutex *lock) 94 + { 95 + return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, 0); 96 + } 97 + EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); 90 98 91 99 /** 92 100 * rt_mutex_trylock - try to lock a rt_mutex
-9
kernel/locking/rwsem.c
··· 658 658 return false; 659 659 } 660 660 661 - static inline bool owner_on_cpu(struct task_struct *owner) 662 - { 663 - /* 664 - * As lock holder preemption issue, we both skip spinning if 665 - * task is not on cpu or its cpu is preempted 666 - */ 667 - return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); 668 - } 669 - 670 661 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) 671 662 { 672 663 struct task_struct *owner;
-6
kernel/locking/spinlock_rt.c
··· 257 257 } 258 258 EXPORT_SYMBOL(rt_write_unlock); 259 259 260 - int __sched rt_rwlock_is_contended(rwlock_t *rwlock) 261 - { 262 - return rw_base_is_contended(&rwlock->rwbase); 263 - } 264 - EXPORT_SYMBOL(rt_rwlock_is_contended); 265 - 266 260 #ifdef CONFIG_DEBUG_LOCK_ALLOC 267 261 void __rt_rwlock_init(rwlock_t *rwlock, const char *name, 268 262 struct lock_class_key *key)
+1 -1
kernel/locking/ww_rt_mutex.c
··· 26 26 27 27 if (__rt_mutex_trylock(&rtm->rtmutex)) { 28 28 ww_mutex_set_context_fastpath(lock, ww_ctx); 29 - mutex_acquire_nest(&rtm->dep_map, 0, 1, ww_ctx->dep_map, _RET_IP_); 29 + mutex_acquire_nest(&rtm->dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_); 30 30 return 1; 31 31 } 32 32
+3
kernel/sched/core.c
··· 2184 2184 return; 2185 2185 } 2186 2186 2187 + if (WARN_ON_ONCE(!p->migration_disabled)) 2188 + return; 2189 + 2187 2190 /* 2188 2191 * Ensure stop_task runs either before or after this, and that 2189 2192 * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
-2
lib/atomic64.c
··· 118 118 #undef ATOMIC64_OPS 119 119 #define ATOMIC64_OPS(op, c_op) \ 120 120 ATOMIC64_OP(op, c_op) \ 121 - ATOMIC64_OP_RETURN(op, c_op) \ 122 121 ATOMIC64_FETCH_OP(op, c_op) 123 122 124 123 ATOMIC64_OPS(and, &=) ··· 126 127 127 128 #undef ATOMIC64_OPS 128 129 #undef ATOMIC64_FETCH_OP 129 - #undef ATOMIC64_OP_RETURN 130 130 #undef ATOMIC64_OP 131 131 132 132 s64 generic_atomic64_dec_if_positive(atomic64_t *v)
+118 -54
lib/locking-selftest.c
··· 26 26 #include <linux/rtmutex.h> 27 27 #include <linux/local_lock.h> 28 28 29 + #ifdef CONFIG_PREEMPT_RT 30 + # define NON_RT(...) 31 + #else 32 + # define NON_RT(...) __VA_ARGS__ 33 + #endif 34 + 29 35 /* 30 36 * Change this to 1 if you want to see the failure printouts: 31 37 */ ··· 145 139 146 140 #endif 147 141 148 - static local_lock_t local_A = INIT_LOCAL_LOCK(local_A); 142 + static DEFINE_PER_CPU(local_lock_t, local_A); 149 143 150 144 /* 151 145 * non-inlined runtime initializers, to let separate locks share ··· 718 712 719 713 #undef E 720 714 715 + #ifdef CONFIG_PREEMPT_RT 716 + # define RT_PREPARE_DBL_UNLOCK() { migrate_disable(); rcu_read_lock(); } 717 + #else 718 + # define RT_PREPARE_DBL_UNLOCK() 719 + #endif 721 720 /* 722 721 * Double unlock: 723 722 */ 724 723 #define E() \ 725 724 \ 726 725 LOCK(A); \ 726 + RT_PREPARE_DBL_UNLOCK(); \ 727 727 UNLOCK(A); \ 728 728 UNLOCK(A); /* fail */ 729 729 ··· 814 802 #include "locking-selftest-wlock-hardirq.h" 815 803 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock) 816 804 805 + #ifndef CONFIG_PREEMPT_RT 817 806 #include "locking-selftest-spin-softirq.h" 818 807 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin) 819 808 ··· 823 810 824 811 #include "locking-selftest-wlock-softirq.h" 825 812 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) 813 + #endif 826 814 827 815 #undef E1 828 816 #undef E2 829 817 818 + #ifndef CONFIG_PREEMPT_RT 830 819 /* 831 820 * Enabling hardirqs with a softirq-safe lock held: 832 821 */ ··· 861 846 #undef E1 862 847 #undef E2 863 848 849 + #endif 850 + 864 851 /* 865 852 * Enabling irqs with an irq-safe lock held: 866 853 */ ··· 892 875 #include "locking-selftest-wlock-hardirq.h" 893 876 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock) 894 877 878 + #ifndef CONFIG_PREEMPT_RT 895 879 #include "locking-selftest-spin-softirq.h" 896 880 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin) 897 881 ··· 901 883 902 884 #include "locking-selftest-wlock-softirq.h" 903 885 GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) 886 + #endif 904 887 905 888 #undef E1 906 889 #undef E2 ··· 940 921 #include "locking-selftest-wlock-hardirq.h" 941 922 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock) 942 923 924 + #ifndef CONFIG_PREEMPT_RT 943 925 #include "locking-selftest-spin-softirq.h" 944 926 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin) 945 927 ··· 949 929 950 930 #include "locking-selftest-wlock-softirq.h" 951 931 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) 932 + #endif 952 933 953 934 #undef E1 954 935 #undef E2 ··· 990 969 #include "locking-selftest-wlock-hardirq.h" 991 970 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock) 992 971 972 + #ifndef CONFIG_PREEMPT_RT 993 973 #include "locking-selftest-spin-softirq.h" 994 974 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin) 995 975 ··· 999 977 1000 978 #include "locking-selftest-wlock-softirq.h" 1001 979 GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) 980 + #endif 1002 981 1003 982 #undef E1 1004 983 #undef E2 ··· 1054 1031 #include "locking-selftest-wlock-hardirq.h" 1055 1032 GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock) 1056 1033 1034 + #ifndef CONFIG_PREEMPT_RT 1057 1035 #include "locking-selftest-spin-softirq.h" 1058 1036 GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin) 1059 1037 ··· 1063 1039 1064 1040 #include "locking-selftest-wlock-softirq.h" 1065 1041 GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock) 1042 + #endif 1066 1043 1067 1044 #undef E1 1068 1045 #undef E2 ··· 1231 1206 #include "locking-selftest-wlock.h" 1232 1207 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock) 1233 1208 1209 + #ifndef CONFIG_PREEMPT_RT 1234 1210 #include "locking-selftest-softirq.h" 1235 1211 #include "locking-selftest-rlock.h" 1236 1212 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock) 1237 1213 1238 1214 #include "locking-selftest-wlock.h" 1239 1215 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock) 1216 + #endif 1240 1217 1241 1218 #undef E1 1242 1219 #undef E2 ··· 1279 1252 #include "locking-selftest-wlock.h" 1280 1253 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock) 1281 1254 1255 + #ifndef CONFIG_PREEMPT_RT 1282 1256 #include "locking-selftest-softirq.h" 1283 1257 #include "locking-selftest-rlock.h" 1284 1258 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock) 1285 1259 1286 1260 #include "locking-selftest-wlock.h" 1287 1261 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock) 1262 + #endif 1288 1263 1289 1264 #undef E1 1290 1265 #undef E2 ··· 1335 1306 #include "locking-selftest-wlock.h" 1336 1307 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock) 1337 1308 1309 + #ifndef CONFIG_PREEMPT_RT 1338 1310 #include "locking-selftest-softirq.h" 1339 1311 #include "locking-selftest-rlock.h" 1340 1312 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock) 1341 1313 1342 1314 #include "locking-selftest-wlock.h" 1343 1315 GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock) 1316 + #endif 1344 1317 1345 1318 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1346 1319 # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map) ··· 1351 1320 # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) 1352 1321 # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) 1353 1322 # define I_WW(x) lockdep_reset_lock(&x.dep_map) 1354 - # define I_LOCAL_LOCK(x) lockdep_reset_lock(&local_##x.dep_map) 1323 + # define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map)) 1355 1324 #ifdef CONFIG_RT_MUTEXES 1356 1325 # define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map) 1357 1326 #endif ··· 1411 1380 init_shared_classes(); 1412 1381 raw_spin_lock_init(&raw_lock_A); 1413 1382 raw_spin_lock_init(&raw_lock_B); 1414 - local_lock_init(&local_A); 1383 + local_lock_init(this_cpu_ptr(&local_A)); 1415 1384 1416 1385 ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep); 1417 1386 memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2)); ··· 1429 1398 1430 1399 static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) 1431 1400 { 1432 - unsigned long saved_preempt_count = preempt_count(); 1401 + int saved_preempt_count = preempt_count(); 1402 + #ifdef CONFIG_PREEMPT_RT 1403 + #ifdef CONFIG_SMP 1404 + int saved_mgd_count = current->migration_disabled; 1405 + #endif 1406 + int saved_rcu_count = current->rcu_read_lock_nesting; 1407 + #endif 1433 1408 1434 1409 WARN_ON(irqs_disabled()); 1435 1410 ··· 1469 1432 * count, so restore it: 1470 1433 */ 1471 1434 preempt_count_set(saved_preempt_count); 1435 + 1436 + #ifdef CONFIG_PREEMPT_RT 1437 + #ifdef CONFIG_SMP 1438 + while (current->migration_disabled > saved_mgd_count) 1439 + migrate_enable(); 1440 + #endif 1441 + 1442 + while (current->rcu_read_lock_nesting > saved_rcu_count) 1443 + rcu_read_unlock(); 1444 + WARN_ON_ONCE(current->rcu_read_lock_nesting < saved_rcu_count); 1445 + #endif 1446 + 1472 1447 #ifdef CONFIG_TRACE_IRQFLAGS 1473 1448 if (softirq_count()) 1474 1449 current->softirqs_enabled = 0; ··· 1548 1499 1549 1500 #define DO_TESTCASE_2x2RW(desc, name, nr) \ 1550 1501 DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \ 1551 - DO_TESTCASE_2RW("soft-"desc, name##_soft, nr) \ 1502 + NON_RT(DO_TESTCASE_2RW("soft-"desc, name##_soft, nr)) \ 1552 1503 1553 1504 #define DO_TESTCASE_6x2x2RW(desc, name) \ 1554 1505 DO_TESTCASE_2x2RW(desc, name, 123); \ ··· 1596 1547 1597 1548 #define DO_TESTCASE_2I(desc, name, nr) \ 1598 1549 DO_TESTCASE_1("hard-"desc, name##_hard, nr); \ 1599 - DO_TESTCASE_1("soft-"desc, name##_soft, nr); 1550 + NON_RT(DO_TESTCASE_1("soft-"desc, name##_soft, nr)); 1600 1551 1601 1552 #define DO_TESTCASE_2IB(desc, name, nr) \ 1602 1553 DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \ 1603 - DO_TESTCASE_1B("soft-"desc, name##_soft, nr); 1554 + NON_RT(DO_TESTCASE_1B("soft-"desc, name##_soft, nr)); 1604 1555 1605 1556 #define DO_TESTCASE_6I(desc, name, nr) \ 1606 1557 DO_TESTCASE_3("hard-"desc, name##_hard, nr); \ 1607 - DO_TESTCASE_3("soft-"desc, name##_soft, nr); 1558 + NON_RT(DO_TESTCASE_3("soft-"desc, name##_soft, nr)); 1608 1559 1609 1560 #define DO_TESTCASE_6IRW(desc, name, nr) \ 1610 1561 DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \ 1611 - DO_TESTCASE_3RW("soft-"desc, name##_soft, nr); 1562 + NON_RT(DO_TESTCASE_3RW("soft-"desc, name##_soft, nr)); 1612 1563 1613 1564 #define DO_TESTCASE_2x3(desc, name) \ 1614 1565 DO_TESTCASE_3(desc, name, 12); \ ··· 1700 1651 #endif 1701 1652 } 1702 1653 1654 + #ifdef CONFIG_PREEMPT_RT 1655 + #define ww_mutex_base_lock(b) rt_mutex_lock(b) 1656 + #define ww_mutex_base_trylock(b) rt_mutex_trylock(b) 1657 + #define ww_mutex_base_lock_nest_lock(b, b2) rt_mutex_lock_nest_lock(b, b2) 1658 + #define ww_mutex_base_lock_interruptible(b) rt_mutex_lock_interruptible(b) 1659 + #define ww_mutex_base_lock_killable(b) rt_mutex_lock_killable(b) 1660 + #define ww_mutex_base_unlock(b) rt_mutex_unlock(b) 1661 + #else 1662 + #define ww_mutex_base_lock(b) mutex_lock(b) 1663 + #define ww_mutex_base_trylock(b) mutex_trylock(b) 1664 + #define ww_mutex_base_lock_nest_lock(b, b2) mutex_lock_nest_lock(b, b2) 1665 + #define ww_mutex_base_lock_interruptible(b) mutex_lock_interruptible(b) 1666 + #define ww_mutex_base_lock_killable(b) mutex_lock_killable(b) 1667 + #define ww_mutex_base_unlock(b) mutex_unlock(b) 1668 + #endif 1669 + 1703 1670 static void ww_test_normal(void) 1704 1671 { 1705 1672 int ret; ··· 1730 1665 1731 1666 /* mutex_lock (and indirectly, mutex_lock_nested) */ 1732 1667 o.ctx = (void *)~0UL; 1733 - mutex_lock(&o.base); 1734 - mutex_unlock(&o.base); 1668 + ww_mutex_base_lock(&o.base); 1669 + ww_mutex_base_unlock(&o.base); 1735 1670 WARN_ON(o.ctx != (void *)~0UL); 1736 1671 1737 1672 /* mutex_lock_interruptible (and *_nested) */ 1738 1673 o.ctx = (void *)~0UL; 1739 - ret = mutex_lock_interruptible(&o.base); 1674 + ret = ww_mutex_base_lock_interruptible(&o.base); 1740 1675 if (!ret) 1741 - mutex_unlock(&o.base); 1676 + ww_mutex_base_unlock(&o.base); 1742 1677 else 1743 1678 WARN_ON(1); 1744 1679 WARN_ON(o.ctx != (void *)~0UL); 1745 1680 1746 1681 /* mutex_lock_killable (and *_nested) */ 1747 1682 o.ctx = (void *)~0UL; 1748 - ret = mutex_lock_killable(&o.base); 1683 + ret = ww_mutex_base_lock_killable(&o.base); 1749 1684 if (!ret) 1750 - mutex_unlock(&o.base); 1685 + ww_mutex_base_unlock(&o.base); 1751 1686 else 1752 1687 WARN_ON(1); 1753 1688 WARN_ON(o.ctx != (void *)~0UL); 1754 1689 1755 1690 /* trylock, succeeding */ 1756 1691 o.ctx = (void *)~0UL; 1757 - ret = mutex_trylock(&o.base); 1692 + ret = ww_mutex_base_trylock(&o.base); 1758 1693 WARN_ON(!ret); 1759 1694 if (ret) 1760 - mutex_unlock(&o.base); 1695 + ww_mutex_base_unlock(&o.base); 1761 1696 else 1762 1697 WARN_ON(1); 1763 1698 WARN_ON(o.ctx != (void *)~0UL); 1764 1699 1765 1700 /* trylock, failing */ 1766 1701 o.ctx = (void *)~0UL; 1767 - mutex_lock(&o.base); 1768 - ret = mutex_trylock(&o.base); 1702 + ww_mutex_base_lock(&o.base); 1703 + ret = ww_mutex_base_trylock(&o.base); 1769 1704 WARN_ON(ret); 1770 - mutex_unlock(&o.base); 1705 + ww_mutex_base_unlock(&o.base); 1771 1706 WARN_ON(o.ctx != (void *)~0UL); 1772 1707 1773 1708 /* nest_lock */ 1774 1709 o.ctx = (void *)~0UL; 1775 - mutex_lock_nest_lock(&o.base, &t); 1776 - mutex_unlock(&o.base); 1710 + ww_mutex_base_lock_nest_lock(&o.base, &t); 1711 + ww_mutex_base_unlock(&o.base); 1777 1712 WARN_ON(o.ctx != (void *)~0UL); 1778 1713 } 1779 1714 ··· 1786 1721 static void ww_test_diff_class(void) 1787 1722 { 1788 1723 WWAI(&t); 1789 - #ifdef CONFIG_DEBUG_MUTEXES 1724 + #ifdef DEBUG_WW_MUTEXES 1790 1725 t.ww_class = NULL; 1791 1726 #endif 1792 1727 WWL(&o, &t); ··· 1850 1785 { 1851 1786 int ret; 1852 1787 1853 - mutex_lock(&o2.base); 1788 + ww_mutex_base_lock(&o2.base); 1854 1789 o2.ctx = &t2; 1855 1790 mutex_release(&o2.base.dep_map, _THIS_IP_); 1856 1791 ··· 1866 1801 1867 1802 o2.ctx = NULL; 1868 1803 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); 1869 - mutex_unlock(&o2.base); 1804 + ww_mutex_base_unlock(&o2.base); 1870 1805 WWU(&o); 1871 1806 1872 1807 WWL(&o2, &t); ··· 1876 1811 { 1877 1812 int ret; 1878 1813 1879 - mutex_lock(&o2.base); 1814 + ww_mutex_base_lock(&o2.base); 1880 1815 mutex_release(&o2.base.dep_map, _THIS_IP_); 1881 1816 o2.ctx = &t2; 1882 1817 ··· 1892 1827 1893 1828 o2.ctx = NULL; 1894 1829 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); 1895 - mutex_unlock(&o2.base); 1830 + ww_mutex_base_unlock(&o2.base); 1896 1831 WWU(&o); 1897 1832 1898 1833 ww_mutex_lock_slow(&o2, &t); ··· 1902 1837 { 1903 1838 int ret; 1904 1839 1905 - mutex_lock(&o2.base); 1840 + ww_mutex_base_lock(&o2.base); 1906 1841 o2.ctx = &t2; 1907 1842 mutex_release(&o2.base.dep_map, _THIS_IP_); 1908 1843 ··· 1918 1853 1919 1854 o2.ctx = NULL; 1920 1855 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); 1921 - mutex_unlock(&o2.base); 1856 + ww_mutex_base_unlock(&o2.base); 1922 1857 1923 1858 WWL(&o2, &t); 1924 1859 } ··· 1927 1862 { 1928 1863 int ret; 1929 1864 1930 - mutex_lock(&o2.base); 1865 + ww_mutex_base_lock(&o2.base); 1931 1866 mutex_release(&o2.base.dep_map, _THIS_IP_); 1932 1867 o2.ctx = &t2; 1933 1868 ··· 1943 1878 1944 1879 o2.ctx = NULL; 1945 1880 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); 1946 - mutex_unlock(&o2.base); 1881 + ww_mutex_base_unlock(&o2.base); 1947 1882 1948 1883 ww_mutex_lock_slow(&o2, &t); 1949 1884 } ··· 1952 1887 { 1953 1888 int ret; 1954 1889 1955 - mutex_lock(&o2.base); 1890 + ww_mutex_base_lock(&o2.base); 1956 1891 mutex_release(&o2.base.dep_map, _THIS_IP_); 1957 1892 o2.ctx = &t2; 1958 1893 ··· 1973 1908 { 1974 1909 int ret; 1975 1910 1976 - mutex_lock(&o2.base); 1911 + ww_mutex_base_lock(&o2.base); 1977 1912 mutex_release(&o2.base.dep_map, _THIS_IP_); 1978 1913 o2.ctx = &t2; 1979 1914 ··· 1994 1929 { 1995 1930 int ret; 1996 1931 1997 - mutex_lock(&o2.base); 1932 + ww_mutex_base_lock(&o2.base); 1998 1933 mutex_release(&o2.base.dep_map, _THIS_IP_); 1999 1934 o2.ctx = &t2; 2000 1935 2001 - mutex_lock(&o3.base); 1936 + ww_mutex_base_lock(&o3.base); 2002 1937 mutex_release(&o3.base.dep_map, _THIS_IP_); 2003 1938 o3.ctx = &t2; 2004 1939 ··· 2020 1955 { 2021 1956 int ret; 2022 1957 2023 - mutex_lock(&o2.base); 1958 + ww_mutex_base_lock(&o2.base); 2024 1959 mutex_release(&o2.base.dep_map, _THIS_IP_); 2025 1960 o2.ctx = &t2; 2026 1961 2027 - mutex_lock(&o3.base); 1962 + ww_mutex_base_lock(&o3.base); 2028 1963 mutex_release(&o3.base.dep_map, _THIS_IP_); 2029 1964 o3.ctx = &t2; 2030 1965 ··· 2045 1980 { 2046 1981 int ret; 2047 1982 2048 - mutex_lock(&o2.base); 1983 + ww_mutex_base_lock(&o2.base); 2049 1984 mutex_release(&o2.base.dep_map, _THIS_IP_); 2050 1985 o2.ctx = &t2; 2051 1986 ··· 2070 2005 { 2071 2006 int ret; 2072 2007 2073 - mutex_lock(&o2.base); 2008 + ww_mutex_base_lock(&o2.base); 2074 2009 mutex_release(&o2.base.dep_map, _THIS_IP_); 2075 2010 o2.ctx = &t2; 2076 2011 ··· 2711 2646 2712 2647 static void local_lock_2(void) 2713 2648 { 2714 - local_lock_acquire(&local_A); /* IRQ-ON */ 2715 - local_lock_release(&local_A); 2649 + local_lock(&local_A); /* IRQ-ON */ 2650 + local_unlock(&local_A); 2716 2651 2717 2652 HARDIRQ_ENTER(); 2718 2653 spin_lock(&lock_A); /* IN-IRQ */ ··· 2721 2656 2722 2657 HARDIRQ_DISABLE(); 2723 2658 spin_lock(&lock_A); 2724 - local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */ 2725 - local_lock_release(&local_A); 2659 + local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */ 2660 + local_unlock(&local_A); 2726 2661 spin_unlock(&lock_A); 2727 2662 HARDIRQ_ENABLE(); 2728 2663 } 2729 2664 2730 2665 static void local_lock_3A(void) 2731 2666 { 2732 - local_lock_acquire(&local_A); /* IRQ-ON */ 2667 + local_lock(&local_A); /* IRQ-ON */ 2733 2668 spin_lock(&lock_B); /* IRQ-ON */ 2734 2669 spin_unlock(&lock_B); 2735 - local_lock_release(&local_A); 2670 + local_unlock(&local_A); 2736 2671 2737 2672 HARDIRQ_ENTER(); 2738 2673 spin_lock(&lock_A); /* IN-IRQ */ ··· 2741 2676 2742 2677 HARDIRQ_DISABLE(); 2743 2678 spin_lock(&lock_A); 2744 - local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ 2745 - local_lock_release(&local_A); 2679 + local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ 2680 + local_unlock(&local_A); 2746 2681 spin_unlock(&lock_A); 2747 2682 HARDIRQ_ENABLE(); 2748 2683 } 2749 2684 2750 2685 static void local_lock_3B(void) 2751 2686 { 2752 - local_lock_acquire(&local_A); /* IRQ-ON */ 2687 + local_lock(&local_A); /* IRQ-ON */ 2753 2688 spin_lock(&lock_B); /* IRQ-ON */ 2754 2689 spin_unlock(&lock_B); 2755 - local_lock_release(&local_A); 2690 + local_unlock(&local_A); 2756 2691 2757 2692 HARDIRQ_ENTER(); 2758 2693 spin_lock(&lock_A); /* IN-IRQ */ ··· 2761 2696 2762 2697 HARDIRQ_DISABLE(); 2763 2698 spin_lock(&lock_A); 2764 - local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ 2765 - local_lock_release(&local_A); 2699 + local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ 2700 + local_unlock(&local_A); 2766 2701 spin_unlock(&lock_A); 2767 2702 HARDIRQ_ENABLE(); 2768 2703 ··· 2877 2812 printk("------------------------\n"); 2878 2813 printk("| Locking API testsuite:\n"); 2879 2814 printk("----------------------------------------------------------------------------\n"); 2880 - printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n"); 2815 + printk(" | spin |wlock |rlock |mutex | wsem | rsem |rtmutex\n"); 2881 2816 printk(" --------------------------------------------------------------------------\n"); 2882 2817 2883 2818 init_shared_classes(); ··· 2950 2885 DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1); 2951 2886 2952 2887 printk(" --------------------------------------------------------------------------\n"); 2953 - 2954 2888 /* 2955 2889 * irq-context testcases: 2956 2890 */ 2957 2891 DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1); 2958 - DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A); 2892 + NON_RT(DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A)); 2959 2893 DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B); 2960 2894 DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3); 2961 2895 DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);