Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Fix race in process_vm_rw_core

This fixes the race in process_vm_core found by Oleg (see

http://article.gmane.org/gmane.linux.kernel/1235667/

for details).

This has been updated since I last sent it as the creation of the new
mm_access() function did almost exactly the same thing as parts of the
previous version of this patch did.

In order to use mm_access() even when /proc isn't enabled, we move it to
kernel/fork.c where other related process mm access functions already
are.

Signed-off-by: Chris Yeoh <yeohc@au1.ibm.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Christopher Yeoh and committed by
Linus Torvalds
8cdb878d 24b36da3

+35 -34
-20
fs/proc/base.c
··· 198 198 return result; 199 199 } 200 200 201 - static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 202 - { 203 - struct mm_struct *mm; 204 - int err; 205 - 206 - err = mutex_lock_killable(&task->signal->cred_guard_mutex); 207 - if (err) 208 - return ERR_PTR(err); 209 - 210 - mm = get_task_mm(task); 211 - if (mm && mm != current->mm && 212 - !ptrace_may_access(task, mode)) { 213 - mmput(mm); 214 - mm = ERR_PTR(-EACCES); 215 - } 216 - mutex_unlock(&task->signal->cred_guard_mutex); 217 - 218 - return mm; 219 - } 220 - 221 201 struct mm_struct *mm_for_maps(struct task_struct *task) 222 202 { 223 203 return mm_access(task, PTRACE_MODE_READ);
+6
include/linux/sched.h
··· 2259 2259 extern void mmput(struct mm_struct *); 2260 2260 /* Grab a reference to a task's mm, if it is not already going away */ 2261 2261 extern struct mm_struct *get_task_mm(struct task_struct *task); 2262 + /* 2263 + * Grab a reference to a task's mm, if it is not already going away 2264 + * and ptrace_may_access with the mode parameter passed to it 2265 + * succeeds. 2266 + */ 2267 + extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); 2262 2268 /* Remove the current tasks stale references to the old mm_struct */ 2263 2269 extern void mm_release(struct task_struct *, struct mm_struct *); 2264 2270 /* Allocate a new mm structure and copy contents from tsk->mm */
+20
kernel/fork.c
··· 647 647 } 648 648 EXPORT_SYMBOL_GPL(get_task_mm); 649 649 650 + struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) 651 + { 652 + struct mm_struct *mm; 653 + int err; 654 + 655 + err = mutex_lock_killable(&task->signal->cred_guard_mutex); 656 + if (err) 657 + return ERR_PTR(err); 658 + 659 + mm = get_task_mm(task); 660 + if (mm && mm != current->mm && 661 + !ptrace_may_access(task, mode)) { 662 + mmput(mm); 663 + mm = ERR_PTR(-EACCES); 664 + } 665 + mutex_unlock(&task->signal->cred_guard_mutex); 666 + 667 + return mm; 668 + } 669 + 650 670 /* Please note the differences between mmput and mm_release. 651 671 * mmput is called whenever we stop holding onto a mm_struct, 652 672 * error success whatever.
+9 -14
mm/process_vm_access.c
··· 298 298 goto free_proc_pages; 299 299 } 300 300 301 - task_lock(task); 302 - if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) { 303 - task_unlock(task); 304 - rc = -EPERM; 301 + mm = mm_access(task, PTRACE_MODE_ATTACH); 302 + if (!mm || IS_ERR(mm)) { 303 + rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; 304 + /* 305 + * Explicitly map EACCES to EPERM as EPERM is a more a 306 + * appropriate error code for process_vw_readv/writev 307 + */ 308 + if (rc == -EACCES) 309 + rc = -EPERM; 305 310 goto put_task_struct; 306 311 } 307 - mm = task->mm; 308 - 309 - if (!mm || (task->flags & PF_KTHREAD)) { 310 - task_unlock(task); 311 - rc = -EINVAL; 312 - goto put_task_struct; 313 - } 314 - 315 - atomic_inc(&mm->mm_users); 316 - task_unlock(task); 317 312 318 313 for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) { 319 314 rc = process_vm_rw_single_vec(