Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched: Isolate preempt counting in its own config option

Create a new CONFIG_PREEMPT_COUNT that handles the inc/dec
of preempt count offset independently. So that the offset
can be updated by preempt_disable() and preempt_enable()
even without the need for CONFIG_PREEMPT beeing set.

This prepares to make CONFIG_DEBUG_SPINLOCK_SLEEP working
with !CONFIG_PREEMPT where it currently doesn't detect
code that sleeps inside explicit preemption disabled
sections.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>

+33 -22
+1 -1
include/linux/bit_spinlock.h
··· 88 88 { 89 89 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 90 90 return test_bit(bitnum, addr); 91 - #elif defined CONFIG_PREEMPT 91 + #elif defined CONFIG_PREEMPT_COUNT 92 92 return preempt_count(); 93 93 #else 94 94 return 1;
+2 -2
include/linux/hardirq.h
··· 93 93 */ 94 94 #define in_nmi() (preempt_count() & NMI_MASK) 95 95 96 - #if defined(CONFIG_PREEMPT) 96 + #if defined(CONFIG_PREEMPT_COUNT) 97 97 # define PREEMPT_CHECK_OFFSET 1 98 98 #else 99 99 # define PREEMPT_CHECK_OFFSET 0 ··· 115 115 #define in_atomic_preempt_off() \ 116 116 ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) 117 117 118 - #ifdef CONFIG_PREEMPT 118 + #ifdef CONFIG_PREEMPT_COUNT 119 119 # define preemptible() (preempt_count() == 0 && !irqs_disabled()) 120 120 # define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) 121 121 #else
+2 -2
include/linux/pagemap.h
··· 134 134 VM_BUG_ON(in_interrupt()); 135 135 136 136 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) 137 - # ifdef CONFIG_PREEMPT 137 + # ifdef CONFIG_PREEMPT_COUNT 138 138 VM_BUG_ON(!in_atomic()); 139 139 # endif 140 140 /* ··· 172 172 VM_BUG_ON(in_interrupt()); 173 173 174 174 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) 175 - # ifdef CONFIG_PREEMPT 175 + # ifdef CONFIG_PREEMPT_COUNT 176 176 VM_BUG_ON(!in_atomic()); 177 177 # endif 178 178 VM_BUG_ON(page_count(page) == 0);
+17 -9
include/linux/preempt.h
··· 27 27 28 28 asmlinkage void preempt_schedule(void); 29 29 30 + #define preempt_check_resched() \ 31 + do { \ 32 + if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ 33 + preempt_schedule(); \ 34 + } while (0) 35 + 36 + #else /* !CONFIG_PREEMPT */ 37 + 38 + #define preempt_check_resched() do { } while (0) 39 + 40 + #endif /* CONFIG_PREEMPT */ 41 + 42 + 43 + #ifdef CONFIG_PREEMPT_COUNT 44 + 30 45 #define preempt_disable() \ 31 46 do { \ 32 47 inc_preempt_count(); \ ··· 52 37 do { \ 53 38 barrier(); \ 54 39 dec_preempt_count(); \ 55 - } while (0) 56 - 57 - #define preempt_check_resched() \ 58 - do { \ 59 - if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ 60 - preempt_schedule(); \ 61 40 } while (0) 62 41 63 42 #define preempt_enable() \ ··· 89 80 preempt_check_resched(); \ 90 81 } while (0) 91 82 92 - #else 83 + #else /* !CONFIG_PREEMPT_COUNT */ 93 84 94 85 #define preempt_disable() do { } while (0) 95 86 #define preempt_enable_no_resched() do { } while (0) 96 87 #define preempt_enable() do { } while (0) 97 - #define preempt_check_resched() do { } while (0) 98 88 99 89 #define preempt_disable_notrace() do { } while (0) 100 90 #define preempt_enable_no_resched_notrace() do { } while (0) 101 91 #define preempt_enable_notrace() do { } while (0) 102 92 103 - #endif 93 + #endif /* CONFIG_PREEMPT_COUNT */ 104 94 105 95 #ifdef CONFIG_PREEMPT_NOTIFIERS 106 96
+6 -6
include/linux/rcupdate.h
··· 239 239 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot 240 240 * and while lockdep is disabled. 241 241 */ 242 - #ifdef CONFIG_PREEMPT 242 + #ifdef CONFIG_PREEMPT_COUNT 243 243 static inline int rcu_read_lock_sched_held(void) 244 244 { 245 245 int lockdep_opinion = 0; ··· 250 250 lockdep_opinion = lock_is_held(&rcu_sched_lock_map); 251 251 return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); 252 252 } 253 - #else /* #ifdef CONFIG_PREEMPT */ 253 + #else /* #ifdef CONFIG_PREEMPT_COUNT */ 254 254 static inline int rcu_read_lock_sched_held(void) 255 255 { 256 256 return 1; 257 257 } 258 - #endif /* #else #ifdef CONFIG_PREEMPT */ 258 + #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ 259 259 260 260 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 261 261 ··· 276 276 return 1; 277 277 } 278 278 279 - #ifdef CONFIG_PREEMPT 279 + #ifdef CONFIG_PREEMPT_COUNT 280 280 static inline int rcu_read_lock_sched_held(void) 281 281 { 282 282 return preempt_count() != 0 || irqs_disabled(); 283 283 } 284 - #else /* #ifdef CONFIG_PREEMPT */ 284 + #else /* #ifdef CONFIG_PREEMPT_COUNT */ 285 285 static inline int rcu_read_lock_sched_held(void) 286 286 { 287 287 return 1; 288 288 } 289 - #endif /* #else #ifdef CONFIG_PREEMPT */ 289 + #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ 290 290 291 291 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 292 292
+1 -1
include/linux/sched.h
··· 2502 2502 2503 2503 extern int __cond_resched_lock(spinlock_t *lock); 2504 2504 2505 - #ifdef CONFIG_PREEMPT 2505 + #ifdef CONFIG_PREEMPT_COUNT 2506 2506 #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET 2507 2507 #else 2508 2508 #define PREEMPT_LOCK_OFFSET 0
+3
kernel/Kconfig.preempt
··· 35 35 36 36 config PREEMPT 37 37 bool "Preemptible Kernel (Low-Latency Desktop)" 38 + select PREEMPT_COUNT 38 39 help 39 40 This option reduces the latency of the kernel by making 40 41 all kernel code (that is not executing in a critical section) ··· 53 52 54 53 endchoice 55 54 55 + config PREEMPT_COUNT 56 + bool
+1 -1
kernel/sched.c
··· 2843 2843 #if defined(CONFIG_SMP) 2844 2844 p->on_cpu = 0; 2845 2845 #endif 2846 - #ifdef CONFIG_PREEMPT 2846 + #ifdef CONFIG_PREEMPT_COUNT 2847 2847 /* Want to start with kernel preemption disabled. */ 2848 2848 task_thread_info(p)->preempt_count = 1; 2849 2849 #endif