android: binder: Don't get mm from task

Use binder_alloc struct's mm_struct rather than getting
a reference to the mm struct through get_task_mm to
avoid a potential deadlock between lru lock, task lock and
dentry lock, since a thread can be holding the task lock
and the dentry lock while trying to acquire the lru lock.

Acked-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Sherry Yang <sherryy@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by Sherry Yang and committed by Greg Kroah-Hartman a0c2baaf 9d35593b

+9 -14
+9 -13
drivers/android/binder_alloc.c
··· 215 } 216 } 217 218 - if (!vma && need_mm) 219 - mm = get_task_mm(alloc->tsk); 220 221 if (mm) { 222 down_write(&mm->mmap_sem); 223 vma = alloc->vma; 224 - if (vma && mm != alloc->vma_vm_mm) { 225 - pr_err("%d: vma mm and task mm mismatch\n", 226 - alloc->pid); 227 - vma = NULL; 228 - } 229 } 230 231 if (!vma && need_mm) { ··· 715 barrier(); 716 alloc->vma = vma; 717 alloc->vma_vm_mm = vma->vm_mm; 718 719 return 0; 720 ··· 791 vfree(alloc->buffer); 792 } 793 mutex_unlock(&alloc->mutex); 794 795 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, 796 "%s: %d buffers %d, pages %d\n", ··· 887 void binder_alloc_vma_close(struct binder_alloc *alloc) 888 { 889 WRITE_ONCE(alloc->vma, NULL); 890 - WRITE_ONCE(alloc->vma_vm_mm, NULL); 891 } 892 893 /** ··· 923 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 924 vma = alloc->vma; 925 if (vma) { 926 - mm = get_task_mm(alloc->tsk); 927 - if (!mm) 928 - goto err_get_task_mm_failed; 929 if (!down_write_trylock(&mm->mmap_sem)) 930 goto err_down_write_mmap_sem_failed; 931 } ··· 960 961 err_down_write_mmap_sem_failed: 962 mmput_async(mm); 963 - err_get_task_mm_failed: 964 err_page_already_freed: 965 mutex_unlock(&alloc->mutex); 966 err_get_alloc_mutex_failed: ··· 999 */ 1000 void binder_alloc_init(struct binder_alloc *alloc) 1001 { 1002 - alloc->tsk = current->group_leader; 1003 alloc->pid = current->group_leader->pid; 1004 mutex_init(&alloc->mutex); 1005 INIT_LIST_HEAD(&alloc->buffers);
··· 215 } 216 } 217 218 + if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm)) 219 + mm = alloc->vma_vm_mm; 220 221 if (mm) { 222 down_write(&mm->mmap_sem); 223 vma = alloc->vma; 224 } 225 226 if (!vma && need_mm) { ··· 720 barrier(); 721 alloc->vma = vma; 722 alloc->vma_vm_mm = vma->vm_mm; 723 + mmgrab(alloc->vma_vm_mm); 724 725 return 0; 726 ··· 795 vfree(alloc->buffer); 796 } 797 mutex_unlock(&alloc->mutex); 798 + if (alloc->vma_vm_mm) 799 + mmdrop(alloc->vma_vm_mm); 800 801 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, 802 "%s: %d buffers %d, pages %d\n", ··· 889 void binder_alloc_vma_close(struct binder_alloc *alloc) 890 { 891 WRITE_ONCE(alloc->vma, NULL); 892 } 893 894 /** ··· 926 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 927 vma = alloc->vma; 928 if (vma) { 929 + if (!mmget_not_zero(alloc->vma_vm_mm)) 930 + goto err_mmget; 931 + mm = alloc->vma_vm_mm; 932 if (!down_write_trylock(&mm->mmap_sem)) 933 goto err_down_write_mmap_sem_failed; 934 } ··· 963 964 err_down_write_mmap_sem_failed: 965 mmput_async(mm); 966 + err_mmget: 967 err_page_already_freed: 968 mutex_unlock(&alloc->mutex); 969 err_get_alloc_mutex_failed: ··· 1002 */ 1003 void binder_alloc_init(struct binder_alloc *alloc) 1004 { 1005 alloc->pid = current->group_leader->pid; 1006 mutex_init(&alloc->mutex); 1007 INIT_LIST_HEAD(&alloc->buffers);
-1
drivers/android/binder_alloc.h
··· 100 */ 101 struct binder_alloc { 102 struct mutex mutex; 103 - struct task_struct *tsk; 104 struct vm_area_struct *vma; 105 struct mm_struct *vma_vm_mm; 106 void *buffer;
··· 100 */ 101 struct binder_alloc { 102 struct mutex mutex; 103 struct vm_area_struct *vma; 104 struct mm_struct *vma_vm_mm; 105 void *buffer;