Merge branch 'futexes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'futexes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
futex: Fix kernel-doc notation & typos
futex: Add lock context annotations
futex: Mark restart_block.futex.uaddr[2] __user
futex: Change 3rd arg of fetch_robust_entry() to unsigned int*

+24 -17
+2 -2
include/linux/thread_info.h
··· 23 23 }; 24 24 /* For futex_wait and futex_wait_requeue_pi */ 25 25 struct { 26 - u32 *uaddr; 26 + u32 __user *uaddr; 27 27 u32 val; 28 28 u32 flags; 29 29 u32 bitset; 30 30 u64 time; 31 - u32 *uaddr2; 31 + u32 __user *uaddr2; 32 32 } futex; 33 33 /* For nanosleep */ 34 34 struct {
+21 -14
kernel/futex.c
··· 91 91 92 92 /** 93 93 * struct futex_q - The hashed futex queue entry, one per waiting task 94 + * @list: priority-sorted list of tasks waiting on this futex 94 95 * @task: the task waiting on the futex 95 96 * @lock_ptr: the hash bucket lock 96 97 * @key: the key the futex is hashed on ··· 105 104 * 106 105 * A futex_q has a woken state, just like tasks have TASK_RUNNING. 107 106 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0. 108 - * The order of wakup is always to make the first condition true, then 107 + * The order of wakeup is always to make the first condition true, then 109 108 * the second. 110 109 * 111 110 * PI futexes are typically woken before they are removed from the hash list via ··· 296 295 * Slow path to fixup the fault we just took in the atomic write 297 296 * access to @uaddr. 298 297 * 299 - * We have no generic implementation of a non destructive write to the 298 + * We have no generic implementation of a non-destructive write to the 300 299 * user address. We know that we faulted in the atomic pagefault 301 300 * disabled section so we can as well avoid the #PF overhead by 302 301 * calling get_user_pages() right away. ··· 516 515 */ 517 516 pi_state = this->pi_state; 518 517 /* 519 - * Userspace might have messed up non PI and PI futexes 518 + * Userspace might have messed up non-PI and PI futexes 520 519 */ 521 520 if (unlikely(!pi_state)) 522 521 return -EINVAL; ··· 737 736 738 737 /* 739 738 * We set q->lock_ptr = NULL _before_ we wake up the task. If 740 - * a non futex wake up happens on another CPU then the task 741 - * might exit and p would dereference a non existing task 739 + * a non-futex wake up happens on another CPU then the task 740 + * might exit and p would dereference a non-existing task 742 741 * struct. Prevent this by holding a reference on p across the 743 742 * wake up. 744 743 */ ··· 1132 1131 1133 1132 /** 1134 1133 * futex_requeue() - Requeue waiters from uaddr1 to uaddr2 1135 - * uaddr1: source futex user address 1136 - * uaddr2: target futex user address 1137 - * nr_wake: number of waiters to wake (must be 1 for requeue_pi) 1138 - * nr_requeue: number of waiters to requeue (0-INT_MAX) 1139 - * requeue_pi: if we are attempting to requeue from a non-pi futex to a 1134 + * @uaddr1: source futex user address 1135 + * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED 1136 + * @uaddr2: target futex user address 1137 + * @nr_wake: number of waiters to wake (must be 1 for requeue_pi) 1138 + * @nr_requeue: number of waiters to requeue (0-INT_MAX) 1139 + * @cmpval: @uaddr1 expected value (or %NULL) 1140 + * @requeue_pi: if we are attempting to requeue from a non-pi futex to a 1140 1141 * pi futex (pi to pi requeue is not supported) 1141 1142 * 1142 1143 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire ··· 1363 1360 1364 1361 /* The key must be already stored in q->key. */ 1365 1362 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) 1363 + __acquires(&hb->lock) 1366 1364 { 1367 1365 struct futex_hash_bucket *hb; 1368 1366 ··· 1376 1372 1377 1373 static inline void 1378 1374 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) 1375 + __releases(&hb->lock) 1379 1376 { 1380 1377 spin_unlock(&hb->lock); 1381 1378 } ··· 1394 1389 * an example). 1395 1390 */ 1396 1391 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) 1392 + __releases(&hb->lock) 1397 1393 { 1398 1394 int prio; 1399 1395 ··· 1475 1469 * and dropped here. 1476 1470 */ 1477 1471 static void unqueue_me_pi(struct futex_q *q) 1472 + __releases(q->lock_ptr) 1478 1473 { 1479 1474 WARN_ON(plist_node_empty(&q->list)); 1480 1475 plist_del(&q->list, &q->list.plist); ··· 1848 1841 1849 1842 restart = &current_thread_info()->restart_block; 1850 1843 restart->fn = futex_wait_restart; 1851 - restart->futex.uaddr = (u32 *)uaddr; 1844 + restart->futex.uaddr = uaddr; 1852 1845 restart->futex.val = val; 1853 1846 restart->futex.time = abs_time->tv64; 1854 1847 restart->futex.bitset = bitset; ··· 1872 1865 1873 1866 static long futex_wait_restart(struct restart_block *restart) 1874 1867 { 1875 - u32 __user *uaddr = (u32 __user *)restart->futex.uaddr; 1868 + u32 __user *uaddr = restart->futex.uaddr; 1876 1869 int fshared = 0; 1877 1870 ktime_t t, *tp = NULL; 1878 1871 ··· 2466 2459 */ 2467 2460 static inline int fetch_robust_entry(struct robust_list __user **entry, 2468 2461 struct robust_list __user * __user *head, 2469 - int *pi) 2462 + unsigned int *pi) 2470 2463 { 2471 2464 unsigned long uentry; 2472 2465 ··· 2655 2648 * of the complex code paths. Also we want to prevent 2656 2649 * registration of robust lists in that case. NULL is 2657 2650 * guaranteed to fault and we get -EFAULT on functional 2658 - * implementation, the non functional ones will return 2651 + * implementation, the non-functional ones will return 2659 2652 * -ENOSYS. 2660 2653 */ 2661 2654 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
+1 -1
kernel/futex_compat.c
··· 19 19 */ 20 20 static inline int 21 21 fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, 22 - compat_uptr_t __user *head, int *pi) 22 + compat_uptr_t __user *head, unsigned int *pi) 23 23 { 24 24 if (get_user(*uentry, head)) 25 25 return -EFAULT;