Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched: Introduce preempt_count accessor functions

Replace the single preempt_count() 'function' that's an lvalue with
two proper functions:

preempt_count() - returns the preempt_count value as rvalue
preempt_count_set() - Allows setting the preempt-count value

Also provide preempt_count_ptr() as a convenience wrapper to implement
all modifying operations.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-orxrbycjozopqfhb4dxdkdvb@git.kernel.org
[ Fixed build failure. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
4a2b4b22 ea811747

+30 -18
+19 -6
include/linux/preempt.h
··· 10 10 #include <linux/linkage.h> 11 11 #include <linux/list.h> 12 12 13 + static __always_inline int preempt_count(void) 14 + { 15 + return current_thread_info()->preempt_count; 16 + } 17 + 18 + static __always_inline int *preempt_count_ptr(void) 19 + { 20 + return &current_thread_info()->preempt_count; 21 + } 22 + 23 + static __always_inline void preempt_count_set(int pc) 24 + { 25 + *preempt_count_ptr() = pc; 26 + } 27 + 13 28 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) 14 29 extern void add_preempt_count(int val); 15 30 extern void sub_preempt_count(int val); 16 31 #else 17 - # define add_preempt_count(val) do { preempt_count() += (val); } while (0) 18 - # define sub_preempt_count(val) do { preempt_count() -= (val); } while (0) 32 + # define add_preempt_count(val) do { *preempt_count_ptr() += (val); } while (0) 33 + # define sub_preempt_count(val) do { *preempt_count_ptr() -= (val); } while (0) 19 34 #endif 20 35 21 36 #define inc_preempt_count() add_preempt_count(1) 22 37 #define dec_preempt_count() sub_preempt_count(1) 23 - 24 - #define preempt_count() (current_thread_info()->preempt_count) 25 38 26 39 #ifdef CONFIG_PREEMPT 27 40 ··· 94 81 95 82 /* For debugging and tracer internals only! */ 96 83 #define add_preempt_count_notrace(val) \ 97 - do { preempt_count() += (val); } while (0) 84 + do { *preempt_count_ptr() += (val); } while (0) 98 85 #define sub_preempt_count_notrace(val) \ 99 - do { preempt_count() -= (val); } while (0) 86 + do { *preempt_count_ptr() -= (val); } while (0) 100 87 #define inc_preempt_count_notrace() add_preempt_count_notrace(1) 101 88 #define dec_preempt_count_notrace() sub_preempt_count_notrace(1) 102 89
+1 -1
init/main.c
··· 692 692 693 693 if (preempt_count() != count) { 694 694 sprintf(msgbuf, "preemption imbalance "); 695 - preempt_count() = count; 695 + preempt_count_set(count); 696 696 } 697 697 if (irqs_disabled()) { 698 698 strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
+2 -2
kernel/sched/core.c
··· 2219 2219 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 2220 2220 return; 2221 2221 #endif 2222 - preempt_count() += val; 2222 + add_preempt_count_notrace(val); 2223 2223 #ifdef CONFIG_DEBUG_PREEMPT 2224 2224 /* 2225 2225 * Spinlock count overflowing soon? ··· 2250 2250 2251 2251 if (preempt_count() == val) 2252 2252 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); 2253 - preempt_count() -= val; 2253 + sub_preempt_count_notrace(val); 2254 2254 } 2255 2255 EXPORT_SYMBOL(sub_preempt_count); 2256 2256
+2 -2
kernel/softirq.c
··· 106 106 * We must manually increment preempt_count here and manually 107 107 * call the trace_preempt_off later. 108 108 */ 109 - preempt_count() += cnt; 109 + add_preempt_count_notrace(cnt); 110 110 /* 111 111 * Were softirqs turned off above: 112 112 */ ··· 256 256 " exited with %08x?\n", vec_nr, 257 257 softirq_to_name[vec_nr], h->action, 258 258 prev_count, preempt_count()); 259 - preempt_count() = prev_count; 259 + preempt_count_set(prev_count); 260 260 } 261 261 262 262 rcu_bh_qs(cpu);
+4 -4
kernel/timer.c
··· 1092 1092 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), 1093 1093 unsigned long data) 1094 1094 { 1095 - int preempt_count = preempt_count(); 1095 + int count = preempt_count(); 1096 1096 1097 1097 #ifdef CONFIG_LOCKDEP 1098 1098 /* ··· 1119 1119 1120 1120 lock_map_release(&lockdep_map); 1121 1121 1122 - if (preempt_count != preempt_count()) { 1122 + if (count != preempt_count()) { 1123 1123 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", 1124 - fn, preempt_count, preempt_count()); 1124 + fn, count, preempt_count()); 1125 1125 /* 1126 1126 * Restore the preempt count. That gives us a decent 1127 1127 * chance to survive and extract information. If the 1128 1128 * callback kept a lock held, bad luck, but not worse 1129 1129 * than the BUG() we had. 1130 1130 */ 1131 - preempt_count() = preempt_count; 1131 + preempt_count_set(count); 1132 1132 } 1133 1133 } 1134 1134
+1 -1
lib/locking-selftest.c
··· 1002 1002 * Some tests (e.g. double-unlock) might corrupt the preemption 1003 1003 * count, so restore it: 1004 1004 */ 1005 - preempt_count() = saved_preempt_count; 1005 + preempt_count_set(saved_preempt_count); 1006 1006 #ifdef CONFIG_TRACE_IRQFLAGS 1007 1007 if (softirq_count()) 1008 1008 current->softirqs_enabled = 0;
+1 -2
lib/smp_processor_id.c
··· 9 9 10 10 notrace unsigned int debug_smp_processor_id(void) 11 11 { 12 - unsigned long preempt_count = preempt_count(); 13 12 int this_cpu = raw_smp_processor_id(); 14 13 15 - if (likely(preempt_count)) 14 + if (likely(preempt_count())) 16 15 goto out; 17 16 18 17 if (irqs_disabled())