Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
oprofile: warn on freeing event buffer too early
oprofile: fix race condition in event_buffer free
lockdep: Use cpu_clock() for lockstat

+38 -17
+26 -9
drivers/oprofile/event_buffer.c
··· 35 /* atomic_t because wait_event checks it outside of buffer_mutex */ 36 static atomic_t buffer_ready = ATOMIC_INIT(0); 37 38 - /* Add an entry to the event buffer. When we 39 - * get near to the end we wake up the process 40 - * sleeping on the read() of the file. 41 */ 42 void add_event_entry(unsigned long value) 43 { 44 if (buffer_pos == buffer_size) { 45 atomic_inc(&oprofile_stats.event_lost_overflow); 46 return; ··· 80 81 int alloc_event_buffer(void) 82 { 83 - int err = -ENOMEM; 84 unsigned long flags; 85 86 spin_lock_irqsave(&oprofilefs_lock, flags); ··· 90 if (buffer_watershed >= buffer_size) 91 return -EINVAL; 92 93 event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); 94 if (!event_buffer) 95 - goto out; 96 97 - err = 0; 98 - out: 99 - return err; 100 } 101 102 103 void free_event_buffer(void) 104 { 105 vfree(event_buffer); 106 - 107 event_buffer = NULL; 108 } 109 110 ··· 177 return -EAGAIN; 178 179 mutex_lock(&buffer_mutex); 180 181 atomic_set(&buffer_ready, 0); 182
··· 35 /* atomic_t because wait_event checks it outside of buffer_mutex */ 36 static atomic_t buffer_ready = ATOMIC_INIT(0); 37 38 + /* 39 + * Add an entry to the event buffer. When we get near to the end we 40 + * wake up the process sleeping on the read() of the file. To protect 41 + * the event_buffer this function may only be called when buffer_mutex 42 + * is set. 43 */ 44 void add_event_entry(unsigned long value) 45 { 46 + /* 47 + * This shouldn't happen since all workqueues or handlers are 48 + * canceled or flushed before the event buffer is freed. 49 + */ 50 + if (!event_buffer) { 51 + WARN_ON_ONCE(1); 52 + return; 53 + } 54 + 55 if (buffer_pos == buffer_size) { 56 atomic_inc(&oprofile_stats.event_lost_overflow); 57 return; ··· 69 70 int alloc_event_buffer(void) 71 { 72 unsigned long flags; 73 74 spin_lock_irqsave(&oprofilefs_lock, flags); ··· 80 if (buffer_watershed >= buffer_size) 81 return -EINVAL; 82 83 + buffer_pos = 0; 84 event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); 85 if (!event_buffer) 86 + return -ENOMEM; 87 88 + return 0; 89 } 90 91 92 void free_event_buffer(void) 93 { 94 + mutex_lock(&buffer_mutex); 95 vfree(event_buffer); 96 + buffer_pos = 0; 97 event_buffer = NULL; 98 + mutex_unlock(&buffer_mutex); 99 } 100 101 ··· 166 return -EAGAIN; 167 168 mutex_lock(&buffer_mutex); 169 + 170 + /* May happen if the buffer is freed during pending reads. */ 171 + if (!event_buffer) { 172 + retval = -EINTR; 173 + goto out; 174 + } 175 176 atomic_set(&buffer_ready, 0); 177
+12 -8
kernel/lockdep.c
··· 142 #ifdef CONFIG_LOCK_STAT 143 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 144 145 static int lock_point(unsigned long points[], unsigned long ip) 146 { 147 int i; ··· 163 return i; 164 } 165 166 - static void lock_time_inc(struct lock_time *lt, s64 time) 167 { 168 if (time > lt->max) 169 lt->max = time; ··· 239 static void lock_release_holdtime(struct held_lock *hlock) 240 { 241 struct lock_class_stats *stats; 242 - s64 holdtime; 243 244 if (!lock_stat) 245 return; 246 247 - holdtime = sched_clock() - hlock->holdtime_stamp; 248 249 stats = get_lock_stats(hlock_class(hlock)); 250 if (hlock->read) ··· 2797 hlock->references = references; 2798 #ifdef CONFIG_LOCK_STAT 2799 hlock->waittime_stamp = 0; 2800 - hlock->holdtime_stamp = sched_clock(); 2801 #endif 2802 2803 if (check == 2 && !mark_irqflags(curr, hlock)) ··· 3327 if (hlock->instance != lock) 3328 return; 3329 3330 - hlock->waittime_stamp = sched_clock(); 3331 3332 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 3333 contending_point = lock_point(hlock_class(hlock)->contending_point, ··· 3350 struct held_lock *hlock, *prev_hlock; 3351 struct lock_class_stats *stats; 3352 unsigned int depth; 3353 - u64 now; 3354 - s64 waittime = 0; 3355 int i, cpu; 3356 3357 depth = curr->lockdep_depth; ··· 3378 3379 cpu = smp_processor_id(); 3380 if (hlock->waittime_stamp) { 3381 - now = sched_clock(); 3382 waittime = now - hlock->waittime_stamp; 3383 hlock->holdtime_stamp = now; 3384 }
··· 142 #ifdef CONFIG_LOCK_STAT 143 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 144 145 + static inline u64 lockstat_clock(void) 146 + { 147 + return cpu_clock(smp_processor_id()); 148 + } 149 + 150 static int lock_point(unsigned long points[], unsigned long ip) 151 { 152 int i; ··· 158 return i; 159 } 160 161 + static void lock_time_inc(struct lock_time *lt, u64 time) 162 { 163 if (time > lt->max) 164 lt->max = time; ··· 234 static void lock_release_holdtime(struct held_lock *hlock) 235 { 236 struct lock_class_stats *stats; 237 + u64 holdtime; 238 239 if (!lock_stat) 240 return; 241 242 + holdtime = lockstat_clock() - hlock->holdtime_stamp; 243 244 stats = get_lock_stats(hlock_class(hlock)); 245 if (hlock->read) ··· 2792 hlock->references = references; 2793 #ifdef CONFIG_LOCK_STAT 2794 hlock->waittime_stamp = 0; 2795 + hlock->holdtime_stamp = lockstat_clock(); 2796 #endif 2797 2798 if (check == 2 && !mark_irqflags(curr, hlock)) ··· 3322 if (hlock->instance != lock) 3323 return; 3324 3325 + hlock->waittime_stamp = lockstat_clock(); 3326 3327 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 3328 contending_point = lock_point(hlock_class(hlock)->contending_point, ··· 3345 struct held_lock *hlock, *prev_hlock; 3346 struct lock_class_stats *stats; 3347 unsigned int depth; 3348 + u64 now, waittime = 0; 3349 int i, cpu; 3350 3351 depth = curr->lockdep_depth; ··· 3374 3375 cpu = smp_processor_id(); 3376 if (hlock->waittime_stamp) { 3377 + now = lockstat_clock(); 3378 waittime = now - hlock->waittime_stamp; 3379 hlock->holdtime_stamp = now; 3380 }