Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
futex: Handle futex value corruption gracefully
futex: Handle user space corruption gracefully
futex_lock_pi() key refcnt fix
softlockup: Add sched_clock_tick() to avoid kernel warning on kgdb resume

+49 -6
+4
include/linux/sched.h
··· 310 #ifdef CONFIG_DETECT_SOFTLOCKUP 311 extern void softlockup_tick(void); 312 extern void touch_softlockup_watchdog(void); 313 extern void touch_all_softlockup_watchdogs(void); 314 extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, 315 void __user *buffer, ··· 322 { 323 } 324 static inline void touch_softlockup_watchdog(void) 325 { 326 } 327 static inline void touch_all_softlockup_watchdogs(void)
··· 310 #ifdef CONFIG_DETECT_SOFTLOCKUP 311 extern void softlockup_tick(void); 312 extern void touch_softlockup_watchdog(void); 313 + extern void touch_softlockup_watchdog_sync(void); 314 extern void touch_all_softlockup_watchdogs(void); 315 extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, 316 void __user *buffer, ··· 321 { 322 } 323 static inline void touch_softlockup_watchdog(void) 324 + { 325 + } 326 + static inline void touch_softlockup_watchdog_sync(void) 327 { 328 } 329 static inline void touch_all_softlockup_watchdogs(void)
+27 -3
kernel/futex.c
··· 530 return -EINVAL; 531 532 WARN_ON(!atomic_read(&pi_state->refcount)); 533 - WARN_ON(pid && pi_state->owner && 534 - pi_state->owner->pid != pid); 535 536 atomic_inc(&pi_state->refcount); 537 *ps = pi_state; ··· 773 u32 curval, newval; 774 775 if (!pi_state) 776 return -EINVAL; 777 778 raw_spin_lock(&pi_state->pi_mutex.wait_lock); ··· 1995 /* Unqueue and drop the lock */ 1996 unqueue_me_pi(&q); 1997 1998 - goto out; 1999 2000 out_unlock_put_key: 2001 queue_unlock(&q, hb);
··· 530 return -EINVAL; 531 532 WARN_ON(!atomic_read(&pi_state->refcount)); 533 + 534 + /* 535 + * When pi_state->owner is NULL then the owner died 536 + * and another waiter is on the fly. pi_state->owner 537 + * is fixed up by the task which acquires 538 + * pi_state->rt_mutex. 539 + * 540 + * We do not check for pid == 0 which can happen when 541 + * the owner died and robust_list_exit() cleared the 542 + * TID. 543 + */ 544 + if (pid && pi_state->owner) { 545 + /* 546 + * Bail out if user space manipulated the 547 + * futex value. 548 + */ 549 + if (pid != task_pid_vnr(pi_state->owner)) 550 + return -EINVAL; 551 + } 552 553 atomic_inc(&pi_state->refcount); 554 *ps = pi_state; ··· 756 u32 curval, newval; 757 758 if (!pi_state) 759 + return -EINVAL; 760 + 761 + /* 762 + * If current does not own the pi_state then the futex is 763 + * inconsistent and user space fiddled with the futex value. 764 + */ 765 + if (pi_state->owner != current) 766 return -EINVAL; 767 768 raw_spin_lock(&pi_state->pi_mutex.wait_lock); ··· 1971 /* Unqueue and drop the lock */ 1972 unqueue_me_pi(&q); 1973 1974 + goto out_put_key; 1975 1976 out_unlock_put_key: 1977 queue_unlock(&q, hb);
+3 -3
kernel/kgdb.c
··· 599 600 /* Signal the primary CPU that we are done: */ 601 atomic_set(&cpu_in_kgdb[cpu], 0); 602 - touch_softlockup_watchdog(); 603 clocksource_touch_watchdog(); 604 local_irq_restore(flags); 605 } ··· 1453 (kgdb_info[cpu].task && 1454 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { 1455 atomic_set(&kgdb_active, -1); 1456 - touch_softlockup_watchdog(); 1457 clocksource_touch_watchdog(); 1458 local_irq_restore(flags); 1459 ··· 1553 } 1554 /* Free kgdb_active */ 1555 atomic_set(&kgdb_active, -1); 1556 - touch_softlockup_watchdog(); 1557 clocksource_touch_watchdog(); 1558 local_irq_restore(flags); 1559
··· 599 600 /* Signal the primary CPU that we are done: */ 601 atomic_set(&cpu_in_kgdb[cpu], 0); 602 + touch_softlockup_watchdog_sync(); 603 clocksource_touch_watchdog(); 604 local_irq_restore(flags); 605 } ··· 1453 (kgdb_info[cpu].task && 1454 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { 1455 atomic_set(&kgdb_active, -1); 1456 + touch_softlockup_watchdog_sync(); 1457 clocksource_touch_watchdog(); 1458 local_irq_restore(flags); 1459 ··· 1553 } 1554 /* Free kgdb_active */ 1555 atomic_set(&kgdb_active, -1); 1556 + touch_softlockup_watchdog_sync(); 1557 clocksource_touch_watchdog(); 1558 local_irq_restore(flags); 1559
+15
kernel/softlockup.c
··· 25 static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ 26 static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ 27 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); 28 29 static int __read_mostly did_panic; 30 int __read_mostly softlockup_thresh = 60; ··· 80 } 81 EXPORT_SYMBOL(touch_softlockup_watchdog); 82 83 void touch_all_softlockup_watchdogs(void) 84 { 85 int cpu; ··· 125 } 126 127 if (touch_ts == 0) { 128 __touch_softlockup_watchdog(); 129 return; 130 }
··· 25 static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ 26 static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ 27 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); 28 + static DEFINE_PER_CPU(bool, softlock_touch_sync); 29 30 static int __read_mostly did_panic; 31 int __read_mostly softlockup_thresh = 60; ··· 79 } 80 EXPORT_SYMBOL(touch_softlockup_watchdog); 81 82 + void touch_softlockup_watchdog_sync(void) 83 + { 84 + __raw_get_cpu_var(softlock_touch_sync) = true; 85 + __raw_get_cpu_var(softlockup_touch_ts) = 0; 86 + } 87 + 88 void touch_all_softlockup_watchdogs(void) 89 { 90 int cpu; ··· 118 } 119 120 if (touch_ts == 0) { 121 + if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) { 122 + /* 123 + * If the time stamp was touched atomically 124 + * make sure the scheduler tick is up to date. 125 + */ 126 + per_cpu(softlock_touch_sync, this_cpu) = false; 127 + sched_clock_tick(); 128 + } 129 __touch_softlockup_watchdog(); 130 return; 131 }