inotify: fix lock ordering wrt do_page_fault's mmap_sem

Fix inotify lock order reversal with mmap_sem due to holding locks over
copy_to_user.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Reported-by: "Daniel J Blueman" <daniel.blueman@gmail.com>
Tested-by: "Daniel J Blueman" <daniel.blueman@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Nick Piggin and committed by Linus Torvalds 16dbc6c9 08650869

+21 -7
+20 -7
fs/inotify_user.c
··· 323 } 324 325 /* 326 - * remove_kevent - cleans up and ultimately frees the given kevent 327 * 328 * Caller must hold dev->ev_mutex. 329 */ ··· 334 335 dev->event_count--; 336 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; 337 338 kfree(kevent->name); 339 kmem_cache_free(event_cachep, kevent); 340 } ··· 356 struct inotify_kernel_event *kevent; 357 kevent = inotify_dev_get_event(dev); 358 remove_kevent(dev, kevent); 359 } 360 } 361 ··· 440 dev = file->private_data; 441 442 while (1) { 443 - int events; 444 445 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); 446 447 mutex_lock(&dev->ev_mutex); 448 - events = !list_empty(&dev->events); 449 - mutex_unlock(&dev->ev_mutex); 450 - if (events) { 451 ret = 0; 452 break; 453 } 454 455 if (file->f_flags & O_NONBLOCK) { 456 ret = -EAGAIN; ··· 467 if (ret) 468 return ret; 469 470 - mutex_lock(&dev->ev_mutex); 471 while (1) { 472 struct inotify_kernel_event *kevent; 473 ··· 485 } 486 break; 487 } 488 489 if (copy_to_user(buf, &kevent->event, event_size)) { 490 ret = -EFAULT; ··· 509 count -= kevent->event.len; 510 } 511 512 - remove_kevent(dev, kevent); 513 } 514 mutex_unlock(&dev->ev_mutex); 515
··· 323 } 324 325 /* 326 + * remove_kevent - cleans up the given kevent 327 * 328 * Caller must hold dev->ev_mutex. 329 */ ··· 334 335 dev->event_count--; 336 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; 337 + } 338 339 + /* 340 + * free_kevent - frees the given kevent. 341 + */ 342 + static void free_kevent(struct inotify_kernel_event *kevent) 343 + { 344 kfree(kevent->name); 345 kmem_cache_free(event_cachep, kevent); 346 } ··· 350 struct inotify_kernel_event *kevent; 351 kevent = inotify_dev_get_event(dev); 352 remove_kevent(dev, kevent); 353 + free_kevent(kevent); 354 } 355 } 356 ··· 433 dev = file->private_data; 434 435 while (1) { 436 437 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); 438 439 mutex_lock(&dev->ev_mutex); 440 + if (!list_empty(&dev->events)) { 441 ret = 0; 442 break; 443 } 444 + mutex_unlock(&dev->ev_mutex); 445 446 if (file->f_flags & O_NONBLOCK) { 447 ret = -EAGAIN; ··· 462 if (ret) 463 return ret; 464 465 while (1) { 466 struct inotify_kernel_event *kevent; 467 ··· 481 } 482 break; 483 } 484 + remove_kevent(dev, kevent); 485 + 486 + /* 487 + * Must perform the copy_to_user outside the mutex in order 488 + * to avoid a lock order reversal with mmap_sem. 489 + */ 490 + mutex_unlock(&dev->ev_mutex); 491 492 if (copy_to_user(buf, &kevent->event, event_size)) { 493 ret = -EFAULT; ··· 498 count -= kevent->event.len; 499 } 500 501 + free_kevent(kevent); 502 + 503 + mutex_lock(&dev->ev_mutex); 504 } 505 mutex_unlock(&dev->ev_mutex); 506
+1
include/asm-x86/uaccess_64.h
··· 7 #include <linux/compiler.h> 8 #include <linux/errno.h> 9 #include <linux/prefetch.h> 10 #include <asm/page.h> 11 12 /*
··· 7 #include <linux/compiler.h> 8 #include <linux/errno.h> 9 #include <linux/prefetch.h> 10 + #include <linux/lockdep.h> 11 #include <asm/page.h> 12 13 /*