inotify: fix lock ordering wrt do_page_fault's mmap_sem

Fix inotify lock order reversal with mmap_sem due to holding locks over
copy_to_user.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Reported-by: "Daniel J Blueman" <daniel.blueman@gmail.com>
Tested-by: "Daniel J Blueman" <daniel.blueman@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by Nick Piggin and committed by Linus Torvalds 16dbc6c9 08650869

+21 -7
+20 -7
fs/inotify_user.c
··· 323 323 } 324 324 325 325 /* 326 - * remove_kevent - cleans up and ultimately frees the given kevent 326 + * remove_kevent - cleans up the given kevent 327 327 * 328 328 * Caller must hold dev->ev_mutex. 329 329 */ ··· 334 334 335 335 dev->event_count--; 336 336 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; 337 + } 337 338 339 + /* 340 + * free_kevent - frees the given kevent. 341 + */ 342 + static void free_kevent(struct inotify_kernel_event *kevent) 343 + { 338 344 kfree(kevent->name); 339 345 kmem_cache_free(event_cachep, kevent); 340 346 } ··· 356 350 struct inotify_kernel_event *kevent; 357 351 kevent = inotify_dev_get_event(dev); 358 352 remove_kevent(dev, kevent); 353 + free_kevent(kevent); 359 354 } 360 355 } 361 356 ··· 440 433 dev = file->private_data; 441 434 442 435 while (1) { 443 - int events; 444 436 445 437 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); 446 438 447 439 mutex_lock(&dev->ev_mutex); 448 - events = !list_empty(&dev->events); 449 - mutex_unlock(&dev->ev_mutex); 450 - if (events) { 440 + if (!list_empty(&dev->events)) { 451 441 ret = 0; 452 442 break; 453 443 } 444 + mutex_unlock(&dev->ev_mutex); 454 445 455 446 if (file->f_flags & O_NONBLOCK) { 456 447 ret = -EAGAIN; ··· 467 462 if (ret) 468 463 return ret; 469 464 470 - mutex_lock(&dev->ev_mutex); 471 465 while (1) { 472 466 struct inotify_kernel_event *kevent; 473 467 ··· 485 481 } 486 482 break; 487 483 } 484 + remove_kevent(dev, kevent); 485 + 486 + /* 487 + * Must perform the copy_to_user outside the mutex in order 488 + * to avoid a lock order reversal with mmap_sem. 489 + */ 490 + mutex_unlock(&dev->ev_mutex); 488 491 489 492 if (copy_to_user(buf, &kevent->event, event_size)) { 490 493 ret = -EFAULT; ··· 509 498 count -= kevent->event.len; 510 499 } 511 500 512 - remove_kevent(dev, kevent); 501 + free_kevent(kevent); 502 + 503 + mutex_lock(&dev->ev_mutex); 513 504 } 514 505 mutex_unlock(&dev->ev_mutex); 515 506
+1
include/asm-x86/uaccess_64.h
··· 7 7 #include <linux/compiler.h> 8 8 #include <linux/errno.h> 9 9 #include <linux/prefetch.h> 10 + #include <linux/lockdep.h> 10 11 #include <asm/page.h> 11 12 12 13 /*