Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Ingo Molnar:
"Two fixes from lockdep coverage of seqlocks, which fix deadlocks on
lockdep-enabled ARM systems"

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched_clock: Disable seqlock lockdep usage in sched_clock()
seqlock: Use raw_ prefix instead of _no_lockdep

Changed files
+26 -15
arch
x86
include
linux
kernel
+4 -4
arch/x86/vdso/vclock_gettime.c
··· 178 179 ts->tv_nsec = 0; 180 do { 181 - seq = read_seqcount_begin_no_lockdep(&gtod->seq); 182 mode = gtod->clock.vclock_mode; 183 ts->tv_sec = gtod->wall_time_sec; 184 ns = gtod->wall_time_snsec; ··· 198 199 ts->tv_nsec = 0; 200 do { 201 - seq = read_seqcount_begin_no_lockdep(&gtod->seq); 202 mode = gtod->clock.vclock_mode; 203 ts->tv_sec = gtod->monotonic_time_sec; 204 ns = gtod->monotonic_time_snsec; ··· 214 { 215 unsigned long seq; 216 do { 217 - seq = read_seqcount_begin_no_lockdep(&gtod->seq); 218 ts->tv_sec = gtod->wall_time_coarse.tv_sec; 219 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; 220 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); ··· 225 { 226 unsigned long seq; 227 do { 228 - seq = read_seqcount_begin_no_lockdep(&gtod->seq); 229 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; 230 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; 231 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
··· 178 179 ts->tv_nsec = 0; 180 do { 181 + seq = raw_read_seqcount_begin(&gtod->seq); 182 mode = gtod->clock.vclock_mode; 183 ts->tv_sec = gtod->wall_time_sec; 184 ns = gtod->wall_time_snsec; ··· 198 199 ts->tv_nsec = 0; 200 do { 201 + seq = raw_read_seqcount_begin(&gtod->seq); 202 mode = gtod->clock.vclock_mode; 203 ts->tv_sec = gtod->monotonic_time_sec; 204 ns = gtod->monotonic_time_snsec; ··· 214 { 215 unsigned long seq; 216 do { 217 + seq = raw_read_seqcount_begin(&gtod->seq); 218 ts->tv_sec = gtod->wall_time_coarse.tv_sec; 219 ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; 220 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); ··· 225 { 226 unsigned long seq; 227 do { 228 + seq = raw_read_seqcount_begin(&gtod->seq); 229 ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; 230 ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; 231 } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
+19 -8
include/linux/seqlock.h
··· 117 } 118 119 /** 120 - * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep 121 * @s: pointer to seqcount_t 122 * Returns: count to be passed to read_seqcount_retry 123 * 124 - * read_seqcount_begin_no_lockdep opens a read critical section of the given 125 * seqcount, but without any lockdep checking. Validity of the critical 126 * section is tested by checking read_seqcount_retry function. 127 */ 128 - static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s) 129 { 130 unsigned ret = __read_seqcount_begin(s); 131 smp_rmb(); ··· 144 static inline unsigned read_seqcount_begin(const seqcount_t *s) 145 { 146 seqcount_lockdep_reader_access(s); 147 - return read_seqcount_begin_no_lockdep(s); 148 } 149 150 /** ··· 206 } 207 208 209 /* 210 * Sequence counter only version assumes that callers are using their 211 * own mutexing. 212 */ 213 static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) 214 { 215 - s->sequence++; 216 - smp_wmb(); 217 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); 218 } 219 ··· 237 static inline void write_seqcount_end(seqcount_t *s) 238 { 239 seqcount_release(&s->dep_map, 1, _RET_IP_); 240 - smp_wmb(); 241 - s->sequence++; 242 } 243 244 /**
··· 117 } 118 119 /** 120 + * raw_read_seqcount_begin - start seq-read critical section w/o lockdep 121 * @s: pointer to seqcount_t 122 * Returns: count to be passed to read_seqcount_retry 123 * 124 + * raw_read_seqcount_begin opens a read critical section of the given 125 * seqcount, but without any lockdep checking. Validity of the critical 126 * section is tested by checking read_seqcount_retry function. 127 */ 128 + static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) 129 { 130 unsigned ret = __read_seqcount_begin(s); 131 smp_rmb(); ··· 144 static inline unsigned read_seqcount_begin(const seqcount_t *s) 145 { 146 seqcount_lockdep_reader_access(s); 147 + return raw_read_seqcount_begin(s); 148 } 149 150 /** ··· 206 } 207 208 209 + 210 + static inline void raw_write_seqcount_begin(seqcount_t *s) 211 + { 212 + s->sequence++; 213 + smp_wmb(); 214 + } 215 + 216 + static inline void raw_write_seqcount_end(seqcount_t *s) 217 + { 218 + smp_wmb(); 219 + s->sequence++; 220 + } 221 + 222 /* 223 * Sequence counter only version assumes that callers are using their 224 * own mutexing. 225 */ 226 static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) 227 { 228 + raw_write_seqcount_begin(s); 229 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); 230 } 231 ··· 225 static inline void write_seqcount_end(seqcount_t *s) 226 { 227 seqcount_release(&s->dep_map, 1, _RET_IP_); 228 + raw_write_seqcount_end(s); 229 } 230 231 /**
+3 -3
kernel/time/sched_clock.c
··· 74 return cd.epoch_ns; 75 76 do { 77 - seq = read_seqcount_begin(&cd.seq); 78 epoch_cyc = cd.epoch_cyc; 79 epoch_ns = cd.epoch_ns; 80 } while (read_seqcount_retry(&cd.seq, seq)); ··· 99 cd.mult, cd.shift); 100 101 raw_local_irq_save(flags); 102 - write_seqcount_begin(&cd.seq); 103 cd.epoch_ns = ns; 104 cd.epoch_cyc = cyc; 105 - write_seqcount_end(&cd.seq); 106 raw_local_irq_restore(flags); 107 } 108
··· 74 return cd.epoch_ns; 75 76 do { 77 + seq = raw_read_seqcount_begin(&cd.seq); 78 epoch_cyc = cd.epoch_cyc; 79 epoch_ns = cd.epoch_ns; 80 } while (read_seqcount_retry(&cd.seq, seq)); ··· 99 cd.mult, cd.shift); 100 101 raw_local_irq_save(flags); 102 + raw_write_seqcount_begin(&cd.seq); 103 cd.epoch_ns = ns; 104 cd.epoch_cyc = cyc; 105 + raw_write_seqcount_end(&cd.seq); 106 raw_local_irq_restore(flags); 107 } 108