Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
oprofile: warn on freeing event buffer too early
oprofile: fix race condition in event_buffer free
lockdep: Use cpu_clock() for lockstat

+38 -17
+26 -9
drivers/oprofile/event_buffer.c
··· 35 35 /* atomic_t because wait_event checks it outside of buffer_mutex */ 36 36 static atomic_t buffer_ready = ATOMIC_INIT(0); 37 37 38 - /* Add an entry to the event buffer. When we 39 - * get near to the end we wake up the process 40 - * sleeping on the read() of the file. 38 + /* 39 + * Add an entry to the event buffer. When we get near to the end we 40 + * wake up the process sleeping on the read() of the file. To protect 41 + * the event_buffer this function may only be called when buffer_mutex 42 + * is set. 41 43 */ 42 44 void add_event_entry(unsigned long value) 43 45 { 46 + /* 47 + * This shouldn't happen since all workqueues or handlers are 48 + * canceled or flushed before the event buffer is freed. 49 + */ 50 + if (!event_buffer) { 51 + WARN_ON_ONCE(1); 52 + return; 53 + } 54 + 44 55 if (buffer_pos == buffer_size) { 45 56 atomic_inc(&oprofile_stats.event_lost_overflow); 46 57 return; ··· 80 69 81 70 int alloc_event_buffer(void) 82 71 { 83 - int err = -ENOMEM; 84 72 unsigned long flags; 85 73 86 74 spin_lock_irqsave(&oprofilefs_lock, flags); ··· 90 80 if (buffer_watershed >= buffer_size) 91 81 return -EINVAL; 92 82 83 + buffer_pos = 0; 93 84 event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); 94 85 if (!event_buffer) 95 - goto out; 86 + return -ENOMEM; 96 87 97 - err = 0; 98 - out: 99 - return err; 88 + return 0; 100 89 } 101 90 102 91 103 92 void free_event_buffer(void) 104 93 { 94 + mutex_lock(&buffer_mutex); 105 95 vfree(event_buffer); 106 - 96 + buffer_pos = 0; 107 97 event_buffer = NULL; 98 + mutex_unlock(&buffer_mutex); 108 99 } 109 100 110 101 ··· 177 166 return -EAGAIN; 178 167 179 168 mutex_lock(&buffer_mutex); 169 + 170 + /* May happen if the buffer is freed during pending reads. */ 171 + if (!event_buffer) { 172 + retval = -EINTR; 173 + goto out; 174 + } 180 175 181 176 atomic_set(&buffer_ready, 0); 182 177
+12 -8
kernel/lockdep.c
··· 142 142 #ifdef CONFIG_LOCK_STAT 143 143 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 144 144 145 + static inline u64 lockstat_clock(void) 146 + { 147 + return cpu_clock(smp_processor_id()); 148 + } 149 + 145 150 static int lock_point(unsigned long points[], unsigned long ip) 146 151 { 147 152 int i; ··· 163 158 return i; 164 159 } 165 160 166 - static void lock_time_inc(struct lock_time *lt, s64 time) 161 + static void lock_time_inc(struct lock_time *lt, u64 time) 167 162 { 168 163 if (time > lt->max) 169 164 lt->max = time; ··· 239 234 static void lock_release_holdtime(struct held_lock *hlock) 240 235 { 241 236 struct lock_class_stats *stats; 242 - s64 holdtime; 237 + u64 holdtime; 243 238 244 239 if (!lock_stat) 245 240 return; 246 241 247 - holdtime = sched_clock() - hlock->holdtime_stamp; 242 + holdtime = lockstat_clock() - hlock->holdtime_stamp; 248 243 249 244 stats = get_lock_stats(hlock_class(hlock)); 250 245 if (hlock->read) ··· 2797 2792 hlock->references = references; 2798 2793 #ifdef CONFIG_LOCK_STAT 2799 2794 hlock->waittime_stamp = 0; 2800 - hlock->holdtime_stamp = sched_clock(); 2795 + hlock->holdtime_stamp = lockstat_clock(); 2801 2796 #endif 2802 2797 2803 2798 if (check == 2 && !mark_irqflags(curr, hlock)) ··· 3327 3322 if (hlock->instance != lock) 3328 3323 return; 3329 3324 3330 - hlock->waittime_stamp = sched_clock(); 3325 + hlock->waittime_stamp = lockstat_clock(); 3331 3326 3332 3327 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 3333 3328 contending_point = lock_point(hlock_class(hlock)->contending_point, ··· 3350 3345 struct held_lock *hlock, *prev_hlock; 3351 3346 struct lock_class_stats *stats; 3352 3347 unsigned int depth; 3353 - u64 now; 3354 - s64 waittime = 0; 3348 + u64 now, waittime = 0; 3355 3349 int i, cpu; 3356 3350 3357 3351 depth = curr->lockdep_depth; ··· 3378 3374 3379 3375 cpu = smp_processor_id(); 3380 3376 if (hlock->waittime_stamp) { 3381 - now = sched_clock(); 3377 + now = lockstat_clock(); 3382 3378 waittime = now - hlock->waittime_stamp; 3383 3379 hlock->holdtime_stamp = now; 3384 3380 }