Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/preempt: Fix cond_resched_lock() and cond_resched_softirq()

These functions check should_resched() before unlocking spinlock/bh-enable:
preempt_count always non-zero => should_resched() always returns false.
cond_resched_lock() worked iff spin_needbreak is set.

This patch adds argument "preempt_offset" to should_resched().

preempt_count offset constants for that:

PREEMPT_DISABLE_OFFSET - offset after preempt_disable()
PREEMPT_LOCK_OFFSET - offset after spin_lock()
SOFTIRQ_DISABLE_OFFSET - offset after local_bh_distable()
SOFTIRQ_LOCK_OFFSET - offset after spin_lock_bh()

Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Graf <agraf@suse.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: bdb438065890 ("sched: Extract the basic add/sub preempt_count modifiers")
Link: http://lkml.kernel.org/r/20150715095204.12246.98268.stgit@buzz
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Konstantin Khlebnikov and committed by
Ingo Molnar
fe32d3cd c56dadf3

+22 -18
+2 -2
arch/x86/include/asm/preempt.h
··· 90 90 /* 91 91 * Returns true when we need to resched and can (barring IRQ state). 92 92 */ 93 - static __always_inline bool should_resched(void) 93 + static __always_inline bool should_resched(int preempt_offset) 94 94 { 95 - return unlikely(!raw_cpu_read_4(__preempt_count)); 95 + return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); 96 96 } 97 97 98 98 #ifdef CONFIG_PREEMPT
+3 -2
include/asm-generic/preempt.h
··· 71 71 /* 72 72 * Returns true when we need to resched and can (barring IRQ state). 73 73 */ 74 - static __always_inline bool should_resched(void) 74 + static __always_inline bool should_resched(int preempt_offset) 75 75 { 76 - return unlikely(!preempt_count() && tif_need_resched()); 76 + return unlikely(preempt_count() == preempt_offset && 77 + tif_need_resched()); 77 78 } 78 79 79 80 #ifdef CONFIG_PREEMPT
+14 -5
include/linux/preempt.h
··· 84 84 */ 85 85 #define in_nmi() (preempt_count() & NMI_MASK) 86 86 87 + /* 88 + * The preempt_count offset after preempt_disable(); 89 + */ 87 90 #if defined(CONFIG_PREEMPT_COUNT) 88 - # define PREEMPT_DISABLE_OFFSET 1 91 + # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET 89 92 #else 90 - # define PREEMPT_DISABLE_OFFSET 0 93 + # define PREEMPT_DISABLE_OFFSET 0 91 94 #endif 95 + 96 + /* 97 + * The preempt_count offset after spin_lock() 98 + */ 99 + #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET 92 100 93 101 /* 94 102 * The preempt_count offset needed for things like: ··· 111 103 * 112 104 * Work as expected. 113 105 */ 114 - #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET) 106 + #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) 115 107 116 108 /* 117 109 * Are we running in atomic context? WARNING: this macro cannot ··· 132 124 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) 133 125 extern void preempt_count_add(int val); 134 126 extern void preempt_count_sub(int val); 135 - #define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); }) 127 + #define preempt_count_dec_and_test() \ 128 + ({ preempt_count_sub(1); should_resched(0); }) 136 129 #else 137 130 #define preempt_count_add(val) __preempt_count_add(val) 138 131 #define preempt_count_sub(val) __preempt_count_sub(val) ··· 193 184 194 185 #define preempt_check_resched() \ 195 186 do { \ 196 - if (should_resched()) \ 187 + if (should_resched(0)) \ 197 188 __preempt_schedule(); \ 198 189 } while (0) 199 190
-6
include/linux/sched.h
··· 2891 2891 2892 2892 extern int __cond_resched_lock(spinlock_t *lock); 2893 2893 2894 - #ifdef CONFIG_PREEMPT_COUNT 2895 - #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET 2896 - #else 2897 - #define PREEMPT_LOCK_OFFSET 0 2898 - #endif 2899 - 2900 2894 #define cond_resched_lock(lock) ({ \ 2901 2895 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ 2902 2896 __cond_resched_lock(lock); \
+3 -3
kernel/sched/core.c
··· 4496 4496 4497 4497 int __sched _cond_resched(void) 4498 4498 { 4499 - if (should_resched()) { 4499 + if (should_resched(0)) { 4500 4500 preempt_schedule_common(); 4501 4501 return 1; 4502 4502 } ··· 4514 4514 */ 4515 4515 int __cond_resched_lock(spinlock_t *lock) 4516 4516 { 4517 - int resched = should_resched(); 4517 + int resched = should_resched(PREEMPT_LOCK_OFFSET); 4518 4518 int ret = 0; 4519 4519 4520 4520 lockdep_assert_held(lock); ··· 4536 4536 { 4537 4537 BUG_ON(!in_softirq()); 4538 4538 4539 - if (should_resched()) { 4539 + if (should_resched(SOFTIRQ_DISABLE_OFFSET)) { 4540 4540 local_bh_enable(); 4541 4541 preempt_schedule_common(); 4542 4542 local_bh_disable();