Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Input: use guard notation in input core

Switch input core to use "guard" notation when acquiring spinlocks and
mutexes to simplify the code and ensure that locks are automatically
released when control leaves critical section.

Link: https://lore.kernel.org/r/20241107071538.195340-9-dmitry.torokhov@gmail.com
Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>

+131 -208
+131 -208
drivers/input/input.c
··· 115 115 116 116 lockdep_assert_held(&dev->event_lock); 117 117 118 - rcu_read_lock(); 118 + scoped_guard(rcu) { 119 + handle = rcu_dereference(dev->grab); 120 + if (handle) { 121 + count = handle->handle_events(handle, vals, count); 122 + break; 123 + } 119 124 120 - handle = rcu_dereference(dev->grab); 121 - if (handle) { 122 - count = handle->handle_events(handle, vals, count); 123 - } else { 124 - list_for_each_entry_rcu(handle, &dev->h_list, d_node) 125 + list_for_each_entry_rcu(handle, &dev->h_list, d_node) { 125 126 if (handle->open) { 126 127 count = handle->handle_events(handle, vals, 127 128 count); 128 129 if (!count) 129 130 break; 130 131 } 132 + } 131 133 } 132 - 133 - rcu_read_unlock(); 134 134 135 135 /* trigger auto repeat for key events */ 136 136 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) { ··· 390 390 void input_event(struct input_dev *dev, 391 391 unsigned int type, unsigned int code, int value) 392 392 { 393 - unsigned long flags; 394 - 395 393 if (is_event_supported(type, dev->evbit, EV_MAX)) { 396 - 397 - spin_lock_irqsave(&dev->event_lock, flags); 394 + guard(spinlock_irqsave)(&dev->event_lock); 398 395 input_handle_event(dev, type, code, value); 399 - spin_unlock_irqrestore(&dev->event_lock, flags); 400 396 } 401 397 } 402 398 EXPORT_SYMBOL(input_event); ··· 413 417 { 414 418 struct input_dev *dev = handle->dev; 415 419 struct input_handle *grab; 416 - unsigned long flags; 417 420 418 421 if (is_event_supported(type, dev->evbit, EV_MAX)) { 419 - spin_lock_irqsave(&dev->event_lock, flags); 422 + guard(spinlock_irqsave)(&dev->event_lock); 423 + guard(rcu)(); 420 424 421 - rcu_read_lock(); 422 425 grab = rcu_dereference(dev->grab); 423 426 if (!grab || grab == handle) 424 427 input_handle_event(dev, type, code, value); 425 - rcu_read_unlock(); 426 428 427 - spin_unlock_irqrestore(&dev->event_lock, flags); 428 429 } 429 430 } 430 431 EXPORT_SYMBOL(input_inject_event); ··· 519 526 int input_grab_device(struct input_handle *handle) 520 527 { 521 528 struct input_dev *dev = handle->dev; 522 - int retval; 523 529 524 - retval = mutex_lock_interruptible(&dev->mutex); 525 - if (retval) 526 - return retval; 530 + scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) { 531 + if (dev->grab) 532 + return -EBUSY; 527 533 528 - if (dev->grab) { 529 - retval = -EBUSY; 530 - goto out; 534 + rcu_assign_pointer(dev->grab, handle); 531 535 } 532 536 533 - rcu_assign_pointer(dev->grab, handle); 534 - 535 - out: 536 - mutex_unlock(&dev->mutex); 537 - return retval; 537 + return 0; 538 538 } 539 539 EXPORT_SYMBOL(input_grab_device); 540 540 ··· 562 576 { 563 577 struct input_dev *dev = handle->dev; 564 578 565 - mutex_lock(&dev->mutex); 579 + guard(mutex)(&dev->mutex); 566 580 __input_release_device(handle); 567 - mutex_unlock(&dev->mutex); 568 581 } 569 582 EXPORT_SYMBOL(input_release_device); 570 583 ··· 577 592 int input_open_device(struct input_handle *handle) 578 593 { 579 594 struct input_dev *dev = handle->dev; 580 - int retval; 595 + int error; 581 596 582 - retval = mutex_lock_interruptible(&dev->mutex); 583 - if (retval) 584 - return retval; 597 + scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) { 598 + if (dev->going_away) 599 + return -ENODEV; 585 600 586 - if (dev->going_away) { 587 - retval = -ENODEV; 588 - goto out; 589 - } 601 + handle->open++; 590 602 591 - handle->open++; 603 + if (handle->handler->passive_observer) 604 + return 0; 592 605 593 - if (handle->handler->passive_observer) 594 - goto out; 595 - 596 - if (dev->users++ || dev->inhibited) { 597 - /* 598 - * Device is already opened and/or inhibited, 599 - * so we can exit immediately and report success. 600 - */ 601 - goto out; 602 - } 603 - 604 - if (dev->open) { 605 - retval = dev->open(dev); 606 - if (retval) { 607 - dev->users--; 608 - handle->open--; 606 + if (dev->users++ || dev->inhibited) { 609 607 /* 610 - * Make sure we are not delivering any more events 611 - * through this handle 608 + * Device is already opened and/or inhibited, 609 + * so we can exit immediately and report success. 612 610 */ 613 - synchronize_rcu(); 614 - goto out; 611 + return 0; 615 612 } 613 + 614 + if (dev->open) { 615 + error = dev->open(dev); 616 + if (error) { 617 + dev->users--; 618 + handle->open--; 619 + /* 620 + * Make sure we are not delivering any more 621 + * events through this handle. 622 + */ 623 + synchronize_rcu(); 624 + return error; 625 + } 626 + } 627 + 628 + if (dev->poller) 629 + input_dev_poller_start(dev->poller); 616 630 } 617 631 618 - if (dev->poller) 619 - input_dev_poller_start(dev->poller); 620 - 621 - out: 622 - mutex_unlock(&dev->mutex); 623 - return retval; 632 + return 0; 624 633 } 625 634 EXPORT_SYMBOL(input_open_device); 626 635 627 636 int input_flush_device(struct input_handle *handle, struct file *file) 628 637 { 629 638 struct input_dev *dev = handle->dev; 630 - int retval; 631 639 632 - retval = mutex_lock_interruptible(&dev->mutex); 633 - if (retval) 634 - return retval; 640 + scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) { 641 + if (dev->flush) 642 + return dev->flush(dev, file); 643 + } 635 644 636 - if (dev->flush) 637 - retval = dev->flush(dev, file); 638 - 639 - mutex_unlock(&dev->mutex); 640 - return retval; 645 + return 0; 641 646 } 642 647 EXPORT_SYMBOL(input_flush_device); 643 648 ··· 642 667 { 643 668 struct input_dev *dev = handle->dev; 644 669 645 - mutex_lock(&dev->mutex); 670 + guard(mutex)(&dev->mutex); 646 671 647 672 __input_release_device(handle); 648 673 ··· 663 688 */ 664 689 synchronize_rcu(); 665 690 } 666 - 667 - mutex_unlock(&dev->mutex); 668 691 } 669 692 EXPORT_SYMBOL(input_close_device); 670 693 ··· 699 726 * not to protect access to dev->going_away but rather to ensure 700 727 * that there are no threads in the middle of input_open_device() 701 728 */ 702 - mutex_lock(&dev->mutex); 703 - dev->going_away = true; 704 - mutex_unlock(&dev->mutex); 729 + scoped_guard(mutex, &dev->mutex) 730 + dev->going_away = true; 705 731 706 - spin_lock_irq(&dev->event_lock); 732 + guard(spinlock_irq)(&dev->event_lock); 707 733 708 734 /* 709 735 * Simulate keyup events for all pressed keys so that handlers ··· 715 743 716 744 list_for_each_entry(handle, &dev->h_list, d_node) 717 745 handle->open = 0; 718 - 719 - spin_unlock_irq(&dev->event_lock); 720 746 } 721 747 722 748 /** ··· 871 901 */ 872 902 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke) 873 903 { 874 - unsigned long flags; 875 - int retval; 904 + guard(spinlock_irqsave)(&dev->event_lock); 876 905 877 - spin_lock_irqsave(&dev->event_lock, flags); 878 - retval = dev->getkeycode(dev, ke); 879 - spin_unlock_irqrestore(&dev->event_lock, flags); 880 - 881 - return retval; 906 + return dev->getkeycode(dev, ke); 882 907 } 883 908 EXPORT_SYMBOL(input_get_keycode); 884 909 ··· 888 923 int input_set_keycode(struct input_dev *dev, 889 924 const struct input_keymap_entry *ke) 890 925 { 891 - unsigned long flags; 892 926 unsigned int old_keycode; 893 - int retval; 927 + int error; 894 928 895 929 if (ke->keycode > KEY_MAX) 896 930 return -EINVAL; 897 931 898 - spin_lock_irqsave(&dev->event_lock, flags); 932 + guard(spinlock_irqsave)(&dev->event_lock); 899 933 900 - retval = dev->setkeycode(dev, ke, &old_keycode); 901 - if (retval) 902 - goto out; 934 + error = dev->setkeycode(dev, ke, &old_keycode); 935 + if (error) 936 + return error; 903 937 904 938 /* Make sure KEY_RESERVED did not get enabled. */ 905 939 __clear_bit(KEY_RESERVED, dev->keybit); ··· 926 962 EV_SYN, SYN_REPORT, 1); 927 963 } 928 964 929 - out: 930 - spin_unlock_irqrestore(&dev->event_lock, flags); 931 - 932 - return retval; 965 + return 0; 933 966 } 934 967 EXPORT_SYMBOL(input_set_keycode); 935 968 ··· 1760 1799 */ 1761 1800 void input_reset_device(struct input_dev *dev) 1762 1801 { 1763 - unsigned long flags; 1764 - 1765 - mutex_lock(&dev->mutex); 1766 - spin_lock_irqsave(&dev->event_lock, flags); 1802 + guard(mutex)(&dev->mutex); 1803 + guard(spinlock_irqsave)(&dev->event_lock); 1767 1804 1768 1805 input_dev_toggle(dev, true); 1769 1806 if (input_dev_release_keys(dev)) 1770 1807 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 1771 - 1772 - spin_unlock_irqrestore(&dev->event_lock, flags); 1773 - mutex_unlock(&dev->mutex); 1774 1808 } 1775 1809 EXPORT_SYMBOL(input_reset_device); 1776 1810 1777 1811 static int input_inhibit_device(struct input_dev *dev) 1778 1812 { 1779 - mutex_lock(&dev->mutex); 1813 + guard(mutex)(&dev->mutex); 1780 1814 1781 1815 if (dev->inhibited) 1782 - goto out; 1816 + return 0; 1783 1817 1784 1818 if (dev->users) { 1785 1819 if (dev->close) ··· 1783 1827 input_dev_poller_stop(dev->poller); 1784 1828 } 1785 1829 1786 - spin_lock_irq(&dev->event_lock); 1787 - input_mt_release_slots(dev); 1788 - input_dev_release_keys(dev); 1789 - input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 1790 - input_dev_toggle(dev, false); 1791 - spin_unlock_irq(&dev->event_lock); 1830 + scoped_guard(spinlock_irq, &dev->event_lock) { 1831 + input_mt_release_slots(dev); 1832 + input_dev_release_keys(dev); 1833 + input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 1834 + input_dev_toggle(dev, false); 1835 + } 1792 1836 1793 1837 dev->inhibited = true; 1794 1838 1795 - out: 1796 - mutex_unlock(&dev->mutex); 1797 1839 return 0; 1798 1840 } 1799 1841 1800 1842 static int input_uninhibit_device(struct input_dev *dev) 1801 1843 { 1802 - int ret = 0; 1844 + int error; 1803 1845 1804 - mutex_lock(&dev->mutex); 1846 + guard(mutex)(&dev->mutex); 1805 1847 1806 1848 if (!dev->inhibited) 1807 - goto out; 1849 + return 0; 1808 1850 1809 1851 if (dev->users) { 1810 1852 if (dev->open) { 1811 - ret = dev->open(dev); 1812 - if (ret) 1813 - goto out; 1853 + error = dev->open(dev); 1854 + if (error) 1855 + return error; 1814 1856 } 1815 1857 if (dev->poller) 1816 1858 input_dev_poller_start(dev->poller); 1817 1859 } 1818 1860 1819 1861 dev->inhibited = false; 1820 - spin_lock_irq(&dev->event_lock); 1821 - input_dev_toggle(dev, true); 1822 - spin_unlock_irq(&dev->event_lock); 1823 1862 1824 - out: 1825 - mutex_unlock(&dev->mutex); 1826 - return ret; 1863 + scoped_guard(spinlock_irq, &dev->event_lock) 1864 + input_dev_toggle(dev, true); 1865 + 1866 + return 0; 1827 1867 } 1828 1868 1829 1869 static int input_dev_suspend(struct device *dev) 1830 1870 { 1831 1871 struct input_dev *input_dev = to_input_dev(dev); 1832 1872 1833 - spin_lock_irq(&input_dev->event_lock); 1873 + guard(spinlock_irq)(&input_dev->event_lock); 1834 1874 1835 1875 /* 1836 1876 * Keys that are pressed now are unlikely to be ··· 1837 1885 1838 1886 /* Turn off LEDs and sounds, if any are active. */ 1839 1887 input_dev_toggle(input_dev, false); 1840 - 1841 - spin_unlock_irq(&input_dev->event_lock); 1842 1888 1843 1889 return 0; 1844 1890 } ··· 1845 1895 { 1846 1896 struct input_dev *input_dev = to_input_dev(dev); 1847 1897 1848 - spin_lock_irq(&input_dev->event_lock); 1898 + guard(spinlock_irq)(&input_dev->event_lock); 1849 1899 1850 1900 /* Restore state of LEDs and sounds, if any were active. */ 1851 1901 input_dev_toggle(input_dev, true); 1852 - 1853 - spin_unlock_irq(&input_dev->event_lock); 1854 1902 1855 1903 return 0; 1856 1904 } ··· 1857 1909 { 1858 1910 struct input_dev *input_dev = to_input_dev(dev); 1859 1911 1860 - spin_lock_irq(&input_dev->event_lock); 1912 + guard(spinlock_irq)(&input_dev->event_lock); 1861 1913 1862 1914 /* 1863 1915 * Keys that are pressed now are unlikely to be ··· 1866 1918 if (input_dev_release_keys(input_dev)) 1867 1919 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1); 1868 1920 1869 - spin_unlock_irq(&input_dev->event_lock); 1870 - 1871 1921 return 0; 1872 1922 } 1873 1923 ··· 1873 1927 { 1874 1928 struct input_dev *input_dev = to_input_dev(dev); 1875 1929 1876 - spin_lock_irq(&input_dev->event_lock); 1930 + guard(spinlock_irq)(&input_dev->event_lock); 1877 1931 1878 1932 /* Turn off LEDs and sounds, if any are active. */ 1879 1933 input_dev_toggle(input_dev, false); 1880 - 1881 - spin_unlock_irq(&input_dev->event_lock); 1882 1934 1883 1935 return 0; 1884 1936 } ··· 2218 2274 2219 2275 input_disconnect_device(dev); 2220 2276 2221 - mutex_lock(&input_mutex); 2277 + scoped_guard(mutex, &input_mutex) { 2278 + list_for_each_entry_safe(handle, next, &dev->h_list, d_node) 2279 + handle->handler->disconnect(handle); 2280 + WARN_ON(!list_empty(&dev->h_list)); 2222 2281 2223 - list_for_each_entry_safe(handle, next, &dev->h_list, d_node) 2224 - handle->handler->disconnect(handle); 2225 - WARN_ON(!list_empty(&dev->h_list)); 2282 + del_timer_sync(&dev->timer); 2283 + list_del_init(&dev->node); 2226 2284 2227 - del_timer_sync(&dev->timer); 2228 - list_del_init(&dev->node); 2229 - 2230 - input_wakeup_procfs_readers(); 2231 - 2232 - mutex_unlock(&input_mutex); 2285 + input_wakeup_procfs_readers(); 2286 + } 2233 2287 2234 2288 device_del(&dev->dev); 2235 2289 } ··· 2250 2308 static void input_repeat_key(struct timer_list *t) 2251 2309 { 2252 2310 struct input_dev *dev = from_timer(dev, t, timer); 2253 - unsigned long flags; 2254 2311 2255 - spin_lock_irqsave(&dev->event_lock, flags); 2312 + guard(spinlock_irqsave)(&dev->event_lock); 2256 2313 2257 2314 if (!dev->inhibited && 2258 2315 test_bit(dev->repeat_key, dev->key) && ··· 2265 2324 mod_timer(&dev->timer, jiffies + 2266 2325 msecs_to_jiffies(dev->rep[REP_PERIOD])); 2267 2326 } 2268 - 2269 - spin_unlock_irqrestore(&dev->event_lock, flags); 2270 2327 } 2271 2328 2272 2329 /** ··· 2309 2370 if (!vals) 2310 2371 return -ENOMEM; 2311 2372 2312 - spin_lock_irq(&dev->event_lock); 2313 - dev->max_vals = max_vals; 2314 - swap(dev->vals, vals); 2315 - spin_unlock_irq(&dev->event_lock); 2373 + scoped_guard(spinlock_irq, &dev->event_lock) { 2374 + dev->max_vals = max_vals; 2375 + swap(dev->vals, vals); 2376 + } 2316 2377 2317 2378 /* Because of swap() above, this frees the old vals memory */ 2318 2379 kfree(vals); ··· 2404 2465 path ? path : "N/A"); 2405 2466 kfree(path); 2406 2467 2407 - error = mutex_lock_interruptible(&input_mutex); 2408 - if (error) 2409 - goto err_device_del; 2468 + error = -EINTR; 2469 + scoped_cond_guard(mutex_intr, goto err_device_del, &input_mutex) { 2470 + list_add_tail(&dev->node, &input_dev_list); 2410 2471 2411 - list_add_tail(&dev->node, &input_dev_list); 2472 + list_for_each_entry(handler, &input_handler_list, node) 2473 + input_attach_handler(dev, handler); 2412 2474 2413 - list_for_each_entry(handler, &input_handler_list, node) 2414 - input_attach_handler(dev, handler); 2415 - 2416 - input_wakeup_procfs_readers(); 2417 - 2418 - mutex_unlock(&input_mutex); 2475 + input_wakeup_procfs_readers(); 2476 + } 2419 2477 2420 2478 if (dev->devres_managed) { 2421 2479 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n", ··· 2492 2556 if (error) 2493 2557 return error; 2494 2558 2495 - INIT_LIST_HEAD(&handler->h_list); 2559 + scoped_cond_guard(mutex_intr, return -EINTR, &input_mutex) { 2560 + INIT_LIST_HEAD(&handler->h_list); 2496 2561 2497 - error = mutex_lock_interruptible(&input_mutex); 2498 - if (error) 2499 - return error; 2562 + list_add_tail(&handler->node, &input_handler_list); 2500 2563 2501 - list_add_tail(&handler->node, &input_handler_list); 2564 + list_for_each_entry(dev, &input_dev_list, node) 2565 + input_attach_handler(dev, handler); 2502 2566 2503 - list_for_each_entry(dev, &input_dev_list, node) 2504 - input_attach_handler(dev, handler); 2567 + input_wakeup_procfs_readers(); 2568 + } 2505 2569 2506 - input_wakeup_procfs_readers(); 2507 - 2508 - mutex_unlock(&input_mutex); 2509 2570 return 0; 2510 2571 } 2511 2572 EXPORT_SYMBOL(input_register_handler); ··· 2518 2585 { 2519 2586 struct input_handle *handle, *next; 2520 2587 2521 - mutex_lock(&input_mutex); 2588 + guard(mutex)(&input_mutex); 2522 2589 2523 2590 list_for_each_entry_safe(handle, next, &handler->h_list, h_node) 2524 2591 handler->disconnect(handle); ··· 2527 2594 list_del_init(&handler->node); 2528 2595 2529 2596 input_wakeup_procfs_readers(); 2530 - 2531 - mutex_unlock(&input_mutex); 2532 2597 } 2533 2598 EXPORT_SYMBOL(input_unregister_handler); 2534 2599 ··· 2546 2615 int (*fn)(struct input_handle *, void *)) 2547 2616 { 2548 2617 struct input_handle *handle; 2549 - int retval = 0; 2618 + int retval; 2550 2619 2551 - rcu_read_lock(); 2620 + guard(rcu)(); 2552 2621 2553 2622 list_for_each_entry_rcu(handle, &handler->h_list, h_node) { 2554 2623 retval = fn(handle, data); 2555 2624 if (retval) 2556 - break; 2625 + return retval; 2557 2626 } 2558 2627 2559 - rcu_read_unlock(); 2560 - 2561 - return retval; 2628 + return 0; 2562 2629 } 2563 2630 EXPORT_SYMBOL(input_handler_for_each_handle); 2564 2631 ··· 2644 2715 { 2645 2716 struct input_handler *handler = handle->handler; 2646 2717 struct input_dev *dev = handle->dev; 2647 - int error; 2648 2718 2649 2719 input_handle_setup_event_handler(handle); 2650 2720 /* 2651 2721 * We take dev->mutex here to prevent race with 2652 2722 * input_release_device(). 2653 2723 */ 2654 - error = mutex_lock_interruptible(&dev->mutex); 2655 - if (error) 2656 - return error; 2657 - 2658 - /* 2659 - * Filters go to the head of the list, normal handlers 2660 - * to the tail. 2661 - */ 2662 - if (handler->filter) 2663 - list_add_rcu(&handle->d_node, &dev->h_list); 2664 - else 2665 - list_add_tail_rcu(&handle->d_node, &dev->h_list); 2666 - 2667 - mutex_unlock(&dev->mutex); 2724 + scoped_cond_guard(mutex_intr, return -EINTR, &dev->mutex) { 2725 + /* 2726 + * Filters go to the head of the list, normal handlers 2727 + * to the tail. 2728 + */ 2729 + if (handler->filter) 2730 + list_add_rcu(&handle->d_node, &dev->h_list); 2731 + else 2732 + list_add_tail_rcu(&handle->d_node, &dev->h_list); 2733 + } 2668 2734 2669 2735 /* 2670 2736 * Since we are supposed to be called from ->connect() ··· 2695 2771 /* 2696 2772 * Take dev->mutex to prevent race with input_release_device(). 2697 2773 */ 2698 - mutex_lock(&dev->mutex); 2699 - list_del_rcu(&handle->d_node); 2700 - mutex_unlock(&dev->mutex); 2774 + scoped_guard(mutex, &dev->mutex) 2775 + list_del_rcu(&handle->d_node); 2701 2776 2702 2777 synchronize_rcu(); 2703 2778 }