robust futex thread exit race

Calling handle_futex_death in exit_robust_list for the different robust
mutexes of a thread basically frees the mutex. Another thread might grab
the lock immediately which updates the next pointer of the mutex.
fetch_robust_entry over the next pointer might therefore branch into the
robust mutex list of a different thread. This can cause two problems: 1)
some mutexes held by the dead thread are not getting freed and 2) some
mutexs held by a different thread are freed.

The next point need to be read before calling handle_futex_death.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Martin Schwidefsky and committed by Linus Torvalds 9f96cb1e 8792f961

+34 -20
+16 -10
kernel/futex.c
··· 1943 void exit_robust_list(struct task_struct *curr) 1944 { 1945 struct robust_list_head __user *head = curr->robust_list; 1946 - struct robust_list __user *entry, *pending; 1947 - unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 1948 unsigned long futex_offset; 1949 1950 /* 1951 * Fetch the list head (which was registered earlier, via ··· 1966 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) 1967 return; 1968 1969 - if (pending) 1970 - handle_futex_death((void __user *)pending + futex_offset, 1971 - curr, pip); 1972 - 1973 while (entry != &head->list) { 1974 /* 1975 * A pending lock might already be on the list, so 1976 * don't process it twice: ··· 1981 if (handle_futex_death((void __user *)entry + futex_offset, 1982 curr, pi)) 1983 return; 1984 - /* 1985 - * Fetch the next entry in the list: 1986 - */ 1987 - if (fetch_robust_entry(&entry, &entry->next, &pi)) 1988 return; 1989 /* 1990 * Avoid excessively long or circular lists: 1991 */ ··· 1993 1994 cond_resched(); 1995 } 1996 } 1997 1998 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
··· 1943 void exit_robust_list(struct task_struct *curr) 1944 { 1945 struct robust_list_head __user *head = curr->robust_list; 1946 + struct robust_list __user *entry, *next_entry, *pending; 1947 + unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; 1948 unsigned long futex_offset; 1949 + int rc; 1950 1951 /* 1952 * Fetch the list head (which was registered earlier, via ··· 1965 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) 1966 return; 1967 1968 + next_entry = NULL; /* avoid warning with gcc */ 1969 while (entry != &head->list) { 1970 + /* 1971 + * Fetch the next entry in the list before calling 1972 + * handle_futex_death: 1973 + */ 1974 + rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); 1975 /* 1976 * A pending lock might already be on the list, so 1977 * don't process it twice: ··· 1978 if (handle_futex_death((void __user *)entry + futex_offset, 1979 curr, pi)) 1980 return; 1981 + if (rc) 1982 return; 1983 + entry = next_entry; 1984 + pi = next_pi; 1985 /* 1986 * Avoid excessively long or circular lists: 1987 */ ··· 1991 1992 cond_resched(); 1993 } 1994 + 1995 + if (pending) 1996 + handle_futex_death((void __user *)pending + futex_offset, 1997 + curr, pip); 1998 } 1999 2000 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+18 -10
kernel/futex_compat.c
··· 38 void compat_exit_robust_list(struct task_struct *curr) 39 { 40 struct compat_robust_list_head __user *head = curr->compat_robust_list; 41 - struct robust_list __user *entry, *pending; 42 - unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; 43 - compat_uptr_t uentry, upending; 44 compat_long_t futex_offset; 45 46 /* 47 * Fetch the list head (which was registered earlier, via ··· 62 if (fetch_robust_entry(&upending, &pending, 63 &head->list_op_pending, &pip)) 64 return; 65 - if (pending) 66 - handle_futex_death((void __user *)pending + futex_offset, curr, pip); 67 68 while (entry != (struct robust_list __user *) &head->list) { 69 /* 70 * A pending lock might already be on the list, so 71 * dont process it twice: ··· 80 curr, pi)) 81 return; 82 83 - /* 84 - * Fetch the next entry in the list: 85 - */ 86 - if (fetch_robust_entry(&uentry, &entry, 87 - (compat_uptr_t __user *)&entry->next, &pi)) 88 return; 89 /* 90 * Avoid excessively long or circular lists: 91 */ ··· 93 94 cond_resched(); 95 } 96 } 97 98 asmlinkage long
··· 38 void compat_exit_robust_list(struct task_struct *curr) 39 { 40 struct compat_robust_list_head __user *head = curr->compat_robust_list; 41 + struct robust_list __user *entry, *next_entry, *pending; 42 + unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; 43 + compat_uptr_t uentry, next_uentry, upending; 44 compat_long_t futex_offset; 45 + int rc; 46 47 /* 48 * Fetch the list head (which was registered earlier, via ··· 61 if (fetch_robust_entry(&upending, &pending, 62 &head->list_op_pending, &pip)) 63 return; 64 65 + next_entry = NULL; /* avoid warning with gcc */ 66 while (entry != (struct robust_list __user *) &head->list) { 67 + /* 68 + * Fetch the next entry in the list before calling 69 + * handle_futex_death: 70 + */ 71 + rc = fetch_robust_entry(&next_uentry, &next_entry, 72 + (compat_uptr_t __user *)&entry->next, &next_pi); 73 /* 74 * A pending lock might already be on the list, so 75 * dont process it twice: ··· 74 curr, pi)) 75 return; 76 77 + if (rc) 78 return; 79 + uentry = next_uentry; 80 + entry = next_entry; 81 + pi = next_pi; 82 /* 83 * Avoid excessively long or circular lists: 84 */ ··· 88 89 cond_resched(); 90 } 91 + if (pending) 92 + handle_futex_death((void __user *)pending + futex_offset, 93 + curr, pip); 94 } 95 96 asmlinkage long