Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

binder: fix sparse warnings on locking context

Add __acquire()/__release() annnotations to fix warnings
in sparse context checking

There is one case where the warning was due to a lack of
a "default:" case in a switch statement where a lock was
being released in each of the cases, so the default
case was added.

Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Todd Kjos and committed by
Greg Kroah-Hartman
324fa64c 1dbfe7f2

+43 -1
+42 -1
drivers/android/binder.c
··· 660 660 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) 661 661 static void 662 662 _binder_proc_lock(struct binder_proc *proc, int line) 663 + __acquires(&proc->outer_lock) 663 664 { 664 665 binder_debug(BINDER_DEBUG_SPINLOCKS, 665 666 "%s: line=%d\n", __func__, line); ··· 676 675 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__) 677 676 static void 678 677 _binder_proc_unlock(struct binder_proc *proc, int line) 678 + __releases(&proc->outer_lock) 679 679 { 680 680 binder_debug(BINDER_DEBUG_SPINLOCKS, 681 681 "%s: line=%d\n", __func__, line); ··· 692 690 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) 693 691 static void 694 692 _binder_inner_proc_lock(struct binder_proc *proc, int line) 693 + __acquires(&proc->inner_lock) 695 694 { 696 695 binder_debug(BINDER_DEBUG_SPINLOCKS, 697 696 "%s: line=%d\n", __func__, line); ··· 708 705 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) 709 706 static void 710 707 _binder_inner_proc_unlock(struct binder_proc *proc, int line) 708 + __releases(&proc->inner_lock) 711 709 { 712 710 binder_debug(BINDER_DEBUG_SPINLOCKS, 713 711 "%s: line=%d\n", __func__, line); ··· 724 720 #define binder_node_lock(node) _binder_node_lock(node, __LINE__) 725 721 static void 726 722 _binder_node_lock(struct binder_node *node, int line) 723 + __acquires(&node->lock) 727 724 { 728 725 binder_debug(BINDER_DEBUG_SPINLOCKS, 729 726 "%s: line=%d\n", __func__, line); ··· 740 735 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) 741 736 static void 742 737 _binder_node_unlock(struct binder_node *node, int line) 738 + __releases(&node->lock) 743 739 { 744 740 binder_debug(BINDER_DEBUG_SPINLOCKS, 745 741 "%s: line=%d\n", __func__, line); ··· 757 751 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) 758 752 static void 759 753 _binder_node_inner_lock(struct binder_node *node, int line) 754 + __acquires(&node->lock) __acquires(&node->proc->inner_lock) 760 755 { 761 756 binder_debug(BINDER_DEBUG_SPINLOCKS, 762 757 "%s: line=%d\n", __func__, line); 763 758 spin_lock(&node->lock); 764 759 if (node->proc) 765 760 binder_inner_proc_lock(node->proc); 761 + else 762 + /* annotation for sparse */ 763 + __acquire(&node->proc->inner_lock); 766 764 } 767 765 768 766 /** ··· 778 768 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) 779 769 static void 780 770 _binder_node_inner_unlock(struct binder_node *node, int line) 771 + __releases(&node->lock) __releases(&node->proc->inner_lock) 781 772 { 782 773 struct binder_proc *proc = node->proc; 783 774 ··· 786 775 "%s: line=%d\n", __func__, line); 787 776 if (proc) 788 777 binder_inner_proc_unlock(proc); 778 + else 779 + /* annotation for sparse */ 780 + __release(&node->proc->inner_lock); 789 781 spin_unlock(&node->lock); 790 782 } 791 783 ··· 1398 1384 binder_node_inner_lock(node); 1399 1385 if (!node->proc) 1400 1386 spin_lock(&binder_dead_nodes_lock); 1387 + else 1388 + __acquire(&binder_dead_nodes_lock); 1401 1389 node->tmp_refs--; 1402 1390 BUG_ON(node->tmp_refs < 0); 1403 1391 if (!node->proc) 1404 1392 spin_unlock(&binder_dead_nodes_lock); 1393 + else 1394 + __release(&binder_dead_nodes_lock); 1405 1395 /* 1406 1396 * Call binder_dec_node() to check if all refcounts are 0 1407 1397 * and cleanup is needed. Calling with strong=0 and internal=1 ··· 1908 1890 */ 1909 1891 static struct binder_thread *binder_get_txn_from_and_acq_inner( 1910 1892 struct binder_transaction *t) 1893 + __acquires(&t->from->proc->inner_lock) 1911 1894 { 1912 1895 struct binder_thread *from; 1913 1896 1914 1897 from = binder_get_txn_from(t); 1915 - if (!from) 1898 + if (!from) { 1899 + __acquire(&from->proc->inner_lock); 1916 1900 return NULL; 1901 + } 1917 1902 binder_inner_proc_lock(from->proc); 1918 1903 if (t->from) { 1919 1904 BUG_ON(from != t->from); 1920 1905 return from; 1921 1906 } 1922 1907 binder_inner_proc_unlock(from->proc); 1908 + __acquire(&from->proc->inner_lock); 1923 1909 binder_thread_dec_tmpref(from); 1924 1910 return NULL; 1925 1911 } ··· 1995 1973 binder_thread_dec_tmpref(target_thread); 1996 1974 binder_free_transaction(t); 1997 1975 return; 1976 + } else { 1977 + __release(&target_thread->proc->inner_lock); 1998 1978 } 1999 1979 next = t->from_parent; 2000 1980 ··· 2418 2394 fp->cookie = node->cookie; 2419 2395 if (node->proc) 2420 2396 binder_inner_proc_lock(node->proc); 2397 + else 2398 + __acquire(&node->proc->inner_lock); 2421 2399 binder_inc_node_nilocked(node, 2422 2400 fp->hdr.type == BINDER_TYPE_BINDER, 2423 2401 0, NULL); 2424 2402 if (node->proc) 2425 2403 binder_inner_proc_unlock(node->proc); 2404 + else 2405 + __release(&node->proc->inner_lock); 2426 2406 trace_binder_transaction_ref_to_node(t, node, &src_rdata); 2427 2407 binder_debug(BINDER_DEBUG_TRANSACTION, 2428 2408 " ref %d desc %d -> node %d u%016llx\n", ··· 2790 2762 binder_set_nice(in_reply_to->saved_priority); 2791 2763 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); 2792 2764 if (target_thread == NULL) { 2765 + /* annotation for sparse */ 2766 + __release(&target_thread->proc->inner_lock); 2793 2767 return_error = BR_DEAD_REPLY; 2794 2768 return_error_line = __LINE__; 2795 2769 goto err_dead_binder; ··· 4191 4161 if (cmd == BR_DEAD_BINDER) 4192 4162 goto done; /* DEAD_BINDER notifications can cause transactions */ 4193 4163 } break; 4164 + default: 4165 + binder_inner_proc_unlock(proc); 4166 + pr_err("%d:%d: bad work type %d\n", 4167 + proc->pid, thread->pid, w->type); 4168 + break; 4194 4169 } 4195 4170 4196 4171 if (!t) ··· 4499 4464 spin_lock(&t->lock); 4500 4465 if (t->to_thread == thread) 4501 4466 send_reply = t; 4467 + } else { 4468 + __acquire(&t->lock); 4502 4469 } 4503 4470 thread->is_dead = true; 4504 4471 ··· 4529 4492 spin_unlock(&last_t->lock); 4530 4493 if (t) 4531 4494 spin_lock(&t->lock); 4495 + else 4496 + __acquire(&t->lock); 4532 4497 } 4498 + /* annotation for sparse, lock not acquired in last iteration above */ 4499 + __release(&t->lock); 4533 4500 4534 4501 /* 4535 4502 * If this thread used poll, make sure we remove the waitqueue
+1
drivers/android/binder_alloc.c
··· 943 943 struct list_lru_one *lru, 944 944 spinlock_t *lock, 945 945 void *cb_arg) 946 + __must_hold(lock) 946 947 { 947 948 struct mm_struct *mm = NULL; 948 949 struct binder_lru_page *page = container_of(item,