Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390: don't trace preemption in percpu macros

Since commit a21ee6055c30 ("lockdep: Change hardirq{s_enabled,_context}
to per-cpu variables") the lockdep code itself uses percpu variables. This
leads to recursions because the percpu macros are calling preempt_enable()
which might call trace_preempt_on().

Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>

authored by

Sven Schnelle and committed by
Vasily Gorbik
1196f12a d012a719

+14 -14
+14 -14
arch/s390/include/asm/percpu.h
··· 29 29 typedef typeof(pcp) pcp_op_T__; \ 30 30 pcp_op_T__ old__, new__, prev__; \ 31 31 pcp_op_T__ *ptr__; \ 32 - preempt_disable(); \ 32 + preempt_disable_notrace(); \ 33 33 ptr__ = raw_cpu_ptr(&(pcp)); \ 34 34 prev__ = *ptr__; \ 35 35 do { \ ··· 37 37 new__ = old__ op (val); \ 38 38 prev__ = cmpxchg(ptr__, old__, new__); \ 39 39 } while (prev__ != old__); \ 40 - preempt_enable(); \ 40 + preempt_enable_notrace(); \ 41 41 new__; \ 42 42 }) 43 43 ··· 68 68 typedef typeof(pcp) pcp_op_T__; \ 69 69 pcp_op_T__ val__ = (val); \ 70 70 pcp_op_T__ old__, *ptr__; \ 71 - preempt_disable(); \ 71 + preempt_disable_notrace(); \ 72 72 ptr__ = raw_cpu_ptr(&(pcp)); \ 73 73 if (__builtin_constant_p(val__) && \ 74 74 ((szcast)val__ > -129) && ((szcast)val__ < 128)) { \ ··· 84 84 : [val__] "d" (val__) \ 85 85 : "cc"); \ 86 86 } \ 87 - preempt_enable(); \ 87 + preempt_enable_notrace(); \ 88 88 } 89 89 90 90 #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int) ··· 95 95 typedef typeof(pcp) pcp_op_T__; \ 96 96 pcp_op_T__ val__ = (val); \ 97 97 pcp_op_T__ old__, *ptr__; \ 98 - preempt_disable(); \ 98 + preempt_disable_notrace(); \ 99 99 ptr__ = raw_cpu_ptr(&(pcp)); \ 100 100 asm volatile( \ 101 101 op " %[old__],%[val__],%[ptr__]\n" \ 102 102 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ 103 103 : [val__] "d" (val__) \ 104 104 : "cc"); \ 105 - preempt_enable(); \ 105 + preempt_enable_notrace(); \ 106 106 old__ + val__; \ 107 107 }) 108 108 ··· 114 114 typedef typeof(pcp) pcp_op_T__; \ 115 115 pcp_op_T__ val__ = (val); \ 116 116 pcp_op_T__ old__, *ptr__; \ 117 - preempt_disable(); \ 117 + preempt_disable_notrace(); \ 118 118 ptr__ = raw_cpu_ptr(&(pcp)); \ 119 119 asm volatile( \ 120 120 op " %[old__],%[val__],%[ptr__]\n" \ 121 121 : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \ 122 122 : [val__] "d" (val__) \ 123 123 : "cc"); \ 124 - preempt_enable(); \ 124 + preempt_enable_notrace(); \ 125 125 } 126 126 127 127 #define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, "lan") ··· 136 136 typedef typeof(pcp) pcp_op_T__; \ 137 137 pcp_op_T__ ret__; \ 138 138 pcp_op_T__ *ptr__; \ 139 - preempt_disable(); \ 139 + preempt_disable_notrace(); \ 140 140 ptr__ = raw_cpu_ptr(&(pcp)); \ 141 141 ret__ = cmpxchg(ptr__, oval, nval); \ 142 - preempt_enable(); \ 142 + preempt_enable_notrace(); \ 143 143 ret__; \ 144 144 }) 145 145 ··· 152 152 ({ \ 153 153 typeof(pcp) *ptr__; \ 154 154 typeof(pcp) ret__; \ 155 - preempt_disable(); \ 155 + preempt_disable_notrace(); \ 156 156 ptr__ = raw_cpu_ptr(&(pcp)); \ 157 157 ret__ = xchg(ptr__, nval); \ 158 - preempt_enable(); \ 158 + preempt_enable_notrace(); \ 159 159 ret__; \ 160 160 }) 161 161 ··· 171 171 typeof(pcp1) *p1__; \ 172 172 typeof(pcp2) *p2__; \ 173 173 int ret__; \ 174 - preempt_disable(); \ 174 + preempt_disable_notrace(); \ 175 175 p1__ = raw_cpu_ptr(&(pcp1)); \ 176 176 p2__ = raw_cpu_ptr(&(pcp2)); \ 177 177 ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \ 178 - preempt_enable(); \ 178 + preempt_enable_notrace(); \ 179 179 ret__; \ 180 180 }) 181 181