[PATCH] Fix semundo lock leakage

semundo->lock can leak if semundo->refcount goes from 2 to 1 while
another thread has it locked. This causes major problems for PREEMPT
kernels.

The simplest fix for now is to undo the single-thread optimization.

This bug was found via relentless testing by Dominik Karall.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Ingo Molnar and committed by Linus Torvalds 00a5dfdb ba025082

+3 -7
+3 -7
ipc/sem.c
··· 895 895 struct sem_undo_list *undo_list; 896 896 897 897 undo_list = current->sysvsem.undo_list; 898 - if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1)) 898 + if (undo_list) 899 899 spin_lock(&undo_list->lock); 900 900 } 901 901 ··· 915 915 struct sem_undo_list *undo_list; 916 916 917 917 undo_list = current->sysvsem.undo_list; 918 - if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1)) 918 + if (undo_list) 919 919 spin_unlock(&undo_list->lock); 920 920 } 921 921 ··· 943 943 if (undo_list == NULL) 944 944 return -ENOMEM; 945 945 memset(undo_list, 0, size); 946 - /* don't initialize unodhd->lock here. It's done 947 - * in copy_semundo() instead. 948 - */ 946 + spin_lock_init(&undo_list->lock); 949 947 atomic_set(&undo_list->refcnt, 1); 950 948 current->sysvsem.undo_list = undo_list; 951 949 } ··· 1229 1231 error = get_undo_list(&undo_list); 1230 1232 if (error) 1231 1233 return error; 1232 - if (atomic_read(&undo_list->refcnt) == 1) 1233 - spin_lock_init(&undo_list->lock); 1234 1234 atomic_inc(&undo_list->refcnt); 1235 1235 tsk->sysvsem.undo_list = undo_list; 1236 1236 } else