Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

percpu: add preemption checks to __this_cpu ops

We define a check function in order to avoid trouble with the include
files. Then the higher level __this_cpu macros are modified to invoke
the preemption check.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Christoph Lameter <cl@linux.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Tested-by: Grygorii Strashko <grygorii.strashko@ti.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Christoph Lameter and committed by
Linus Torvalds
188a8140 293b6a4c

+43 -14
+29 -10
include/linux/percpu.h
··· 173 173 174 174 extern void __bad_size_call_parameter(void); 175 175 176 + #ifdef CONFIG_DEBUG_PREEMPT 177 + extern void __this_cpu_preempt_check(const char *op); 178 + #else 179 + static inline void __this_cpu_preempt_check(const char *op) { } 180 + #endif 181 + 176 182 #define __pcpu_size_call_return(stem, variable) \ 177 183 ({ typeof(variable) pscr_ret__; \ 178 184 __verify_pcpu_ptr(&(variable)); \ ··· 731 725 732 726 /* 733 727 * Generic percpu operations for context that are safe from preemption/interrupts. 734 - * Checks will be added here soon. 735 728 */ 736 729 #ifndef __this_cpu_read 737 - # define __this_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp)) 730 + # define __this_cpu_read(pcp) \ 731 + (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp))) 738 732 #endif 739 733 740 734 #ifndef __this_cpu_write 741 - # define __this_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val)) 735 + # define __this_cpu_write(pcp, val) \ 736 + do { __this_cpu_preempt_check("write"); \ 737 + __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \ 738 + } while (0) 742 739 #endif 743 740 744 741 #ifndef __this_cpu_add 745 - # define __this_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val)) 742 + # define __this_cpu_add(pcp, val) \ 743 + do { __this_cpu_preempt_check("add"); \ 744 + __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \ 745 + } while (0) 746 746 #endif 747 747 748 748 #ifndef __this_cpu_sub ··· 764 752 #endif 765 753 766 754 #ifndef __this_cpu_and 767 - # define __this_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val)) 755 + # define __this_cpu_and(pcp, val) \ 756 + do { __this_cpu_preempt_check("and"); \ 757 + __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \ 758 + } while (0) 759 + 768 760 #endif 769 761 770 762 #ifndef __this_cpu_or 771 - # define __this_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val)) 763 + # define __this_cpu_or(pcp, val) \ 764 + do { __this_cpu_preempt_check("or"); \ 765 + __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \ 766 + } while (0) 772 767 #endif 773 768 774 769 #ifndef __this_cpu_add_return 775 770 # define __this_cpu_add_return(pcp, val) \ 776 - __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) 771 + (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)) 777 772 #endif 778 773 779 774 #define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) ··· 789 770 790 771 #ifndef __this_cpu_xchg 791 772 # define __this_cpu_xchg(pcp, nval) \ 792 - __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval) 773 + (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)) 793 774 #endif 794 775 795 776 #ifndef __this_cpu_cmpxchg 796 777 # define __this_cpu_cmpxchg(pcp, oval, nval) \ 797 - __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) 778 + (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)) 798 779 #endif 799 780 800 781 #ifndef __this_cpu_cmpxchg_double 801 782 # define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ 802 - __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) 783 + (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))) 803 784 #endif 804 785 805 786 #endif /* __LINUX_PERCPU_H */
+14 -4
lib/smp_processor_id.c
··· 7 7 #include <linux/kallsyms.h> 8 8 #include <linux/sched.h> 9 9 10 - notrace unsigned int debug_smp_processor_id(void) 10 + notrace static unsigned int check_preemption_disabled(const char *what1, 11 + const char *what2) 11 12 { 12 13 int this_cpu = raw_smp_processor_id(); 13 14 ··· 39 38 if (!printk_ratelimit()) 40 39 goto out_enable; 41 40 42 - printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] " 43 - "code: %s/%d\n", 44 - preempt_count() - 1, current->comm, current->pid); 41 + printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n", 42 + what1, what2, preempt_count() - 1, current->comm, current->pid); 43 + 45 44 print_symbol("caller is %s\n", (long)__builtin_return_address(0)); 46 45 dump_stack(); 47 46 ··· 51 50 return this_cpu; 52 51 } 53 52 53 + notrace unsigned int debug_smp_processor_id(void) 54 + { 55 + return check_preemption_disabled("smp_processor_id", ""); 56 + } 54 57 EXPORT_SYMBOL(debug_smp_processor_id); 55 58 59 + notrace void __this_cpu_preempt_check(const char *op) 60 + { 61 + check_preemption_disabled("__this_cpu_", op); 62 + } 63 + EXPORT_SYMBOL(__this_cpu_preempt_check);