Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
futex: Handle futex value corruption gracefully
futex: Handle user space corruption gracefully
futex_lock_pi() key refcnt fix
softlockup: Add sched_clock_tick() to avoid kernel warning on kgdb resume

+49 -6
+4
include/linux/sched.h
··· 310 310 #ifdef CONFIG_DETECT_SOFTLOCKUP 311 311 extern void softlockup_tick(void); 312 312 extern void touch_softlockup_watchdog(void); 313 + extern void touch_softlockup_watchdog_sync(void); 313 314 extern void touch_all_softlockup_watchdogs(void); 314 315 extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, 315 316 void __user *buffer, ··· 322 321 { 323 322 } 324 323 static inline void touch_softlockup_watchdog(void) 324 + { 325 + } 326 + static inline void touch_softlockup_watchdog_sync(void) 325 327 { 326 328 } 327 329 static inline void touch_all_softlockup_watchdogs(void)
+27 -3
kernel/futex.c
··· 530 530 return -EINVAL; 531 531 532 532 WARN_ON(!atomic_read(&pi_state->refcount)); 533 - WARN_ON(pid && pi_state->owner && 534 - pi_state->owner->pid != pid); 533 + 534 + /* 535 + * When pi_state->owner is NULL then the owner died 536 + * and another waiter is on the fly. pi_state->owner 537 + * is fixed up by the task which acquires 538 + * pi_state->rt_mutex. 539 + * 540 + * We do not check for pid == 0 which can happen when 541 + * the owner died and robust_list_exit() cleared the 542 + * TID. 543 + */ 544 + if (pid && pi_state->owner) { 545 + /* 546 + * Bail out if user space manipulated the 547 + * futex value. 548 + */ 549 + if (pid != task_pid_vnr(pi_state->owner)) 550 + return -EINVAL; 551 + } 535 552 536 553 atomic_inc(&pi_state->refcount); 537 554 *ps = pi_state; ··· 773 756 u32 curval, newval; 774 757 775 758 if (!pi_state) 759 + return -EINVAL; 760 + 761 + /* 762 + * If current does not own the pi_state then the futex is 763 + * inconsistent and user space fiddled with the futex value. 764 + */ 765 + if (pi_state->owner != current) 776 766 return -EINVAL; 777 767 778 768 raw_spin_lock(&pi_state->pi_mutex.wait_lock); ··· 1995 1971 /* Unqueue and drop the lock */ 1996 1972 unqueue_me_pi(&q); 1997 1973 1998 - goto out; 1974 + goto out_put_key; 1999 1975 2000 1976 out_unlock_put_key: 2001 1977 queue_unlock(&q, hb);
+3 -3
kernel/kgdb.c
··· 599 599 600 600 /* Signal the primary CPU that we are done: */ 601 601 atomic_set(&cpu_in_kgdb[cpu], 0); 602 - touch_softlockup_watchdog(); 602 + touch_softlockup_watchdog_sync(); 603 603 clocksource_touch_watchdog(); 604 604 local_irq_restore(flags); 605 605 } ··· 1453 1453 (kgdb_info[cpu].task && 1454 1454 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { 1455 1455 atomic_set(&kgdb_active, -1); 1456 - touch_softlockup_watchdog(); 1456 + touch_softlockup_watchdog_sync(); 1457 1457 clocksource_touch_watchdog(); 1458 1458 local_irq_restore(flags); 1459 1459 ··· 1553 1553 } 1554 1554 /* Free kgdb_active */ 1555 1555 atomic_set(&kgdb_active, -1); 1556 - touch_softlockup_watchdog(); 1556 + touch_softlockup_watchdog_sync(); 1557 1557 clocksource_touch_watchdog(); 1558 1558 local_irq_restore(flags); 1559 1559
+15
kernel/softlockup.c
··· 25 25 static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ 26 26 static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ 27 27 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); 28 + static DEFINE_PER_CPU(bool, softlock_touch_sync); 28 29 29 30 static int __read_mostly did_panic; 30 31 int __read_mostly softlockup_thresh = 60; ··· 80 79 } 81 80 EXPORT_SYMBOL(touch_softlockup_watchdog); 82 81 82 + void touch_softlockup_watchdog_sync(void) 83 + { 84 + __raw_get_cpu_var(softlock_touch_sync) = true; 85 + __raw_get_cpu_var(softlockup_touch_ts) = 0; 86 + } 87 + 83 88 void touch_all_softlockup_watchdogs(void) 84 89 { 85 90 int cpu; ··· 125 118 } 126 119 127 120 if (touch_ts == 0) { 121 + if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) { 122 + /* 123 + * If the time stamp was touched atomically 124 + * make sure the scheduler tick is up to date. 125 + */ 126 + per_cpu(softlock_touch_sync, this_cpu) = false; 127 + sched_clock_tick(); 128 + } 128 129 __touch_softlockup_watchdog(); 129 130 return; 130 131 }