Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rcu: treewide: Do not use rcu_read_lock_held when calling rcu_dereference_check

Since ca5ecddf (rcu: define __rcu address space modifier for sparse)
rcu_dereference_check use rcu_read_lock_held as a part of condition
automatically so callers do not have to do that as well.

Signed-off-by: Michal Hocko <mhocko@suse.cz>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>

authored by

Michal Hocko and committed by
Jiri Kosina
d8bf4ca9 eb032b98

+6 -27
-1
include/linux/cgroup.h
··· 539 539 */ 540 540 #define task_subsys_state_check(task, subsys_id, __c) \ 541 541 rcu_dereference_check(task->cgroups->subsys[subsys_id], \ 542 - rcu_read_lock_held() || \ 543 542 lockdep_is_held(&task->alloc_lock) || \ 544 543 cgroup_lock_is_held() || (__c)) 545 544
-1
include/linux/cred.h
··· 284 284 ({ \ 285 285 const struct task_struct *__t = (task); \ 286 286 rcu_dereference_check(__t->real_cred, \ 287 - rcu_read_lock_held() || \ 288 287 task_is_dead(__t)); \ 289 288 }) 290 289
-1
include/linux/fdtable.h
··· 60 60 61 61 #define rcu_dereference_check_fdtable(files, fdtfd) \ 62 62 (rcu_dereference_check((fdtfd), \ 63 - rcu_read_lock_held() || \ 64 63 lockdep_is_held(&(files)->file_lock) || \ 65 64 atomic_read(&(files)->count) == 1 || \ 66 65 rcu_my_thread_group_empty()))
+1 -2
include/linux/rtnetlink.h
··· 758 758 * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference() 759 759 */ 760 760 #define rcu_dereference_rtnl(p) \ 761 - rcu_dereference_check(p, rcu_read_lock_held() || \ 762 - lockdep_rtnl_is_held()) 761 + rcu_dereference_check(p, lockdep_rtnl_is_held()) 763 762 764 763 /** 765 764 * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
+1 -2
include/net/sock.h
··· 1301 1301 static inline struct dst_entry * 1302 1302 __sk_dst_get(struct sock *sk) 1303 1303 { 1304 - return rcu_dereference_check(sk->sk_dst_cache, rcu_read_lock_held() || 1305 - sock_owned_by_user(sk) || 1304 + return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) || 1306 1305 lockdep_is_held(&sk->sk_lock.slock)); 1307 1306 } 1308 1307
+2 -6
kernel/cgroup.c
··· 1697 1697 { 1698 1698 char *start; 1699 1699 struct dentry *dentry = rcu_dereference_check(cgrp->dentry, 1700 - rcu_read_lock_held() || 1701 1700 cgroup_lock_is_held()); 1702 1701 1703 1702 if (!dentry || cgrp == dummytop) { ··· 1722 1723 break; 1723 1724 1724 1725 dentry = rcu_dereference_check(cgrp->dentry, 1725 - rcu_read_lock_held() || 1726 1726 cgroup_lock_is_held()); 1727 1727 if (!cgrp->parent) 1728 1728 continue; ··· 4811 4813 * on this or this is under rcu_read_lock(). Once css->id is allocated, 4812 4814 * it's unchanged until freed. 4813 4815 */ 4814 - cssid = rcu_dereference_check(css->id, 4815 - rcu_read_lock_held() || atomic_read(&css->refcnt)); 4816 + cssid = rcu_dereference_check(css->id, atomic_read(&css->refcnt)); 4816 4817 4817 4818 if (cssid) 4818 4819 return cssid->id; ··· 4823 4826 { 4824 4827 struct css_id *cssid; 4825 4828 4826 - cssid = rcu_dereference_check(css->id, 4827 - rcu_read_lock_held() || atomic_read(&css->refcnt)); 4829 + cssid = rcu_dereference_check(css->id, atomic_read(&css->refcnt)); 4828 4830 4829 4831 if (cssid) 4830 4832 return cssid->depth;
-1
kernel/exit.c
··· 85 85 struct tty_struct *uninitialized_var(tty); 86 86 87 87 sighand = rcu_dereference_check(tsk->sighand, 88 - rcu_read_lock_held() || 89 88 lockdep_tasklist_lock_is_held()); 90 89 spin_lock(&sighand->siglock); 91 90
-1
kernel/pid.c
··· 405 405 if (pid) { 406 406 struct hlist_node *first; 407 407 first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), 408 - rcu_read_lock_held() || 409 408 lockdep_tasklist_lock_is_held()); 410 409 if (first) 411 410 result = hlist_entry(first, struct task_struct, pids[(type)].node);
-2
kernel/rcutorture.c
··· 941 941 idx = cur_ops->readlock(); 942 942 completed = cur_ops->completed(); 943 943 p = rcu_dereference_check(rcu_torture_current, 944 - rcu_read_lock_held() || 945 944 rcu_read_lock_bh_held() || 946 945 rcu_read_lock_sched_held() || 947 946 srcu_read_lock_held(&srcu_ctl)); ··· 1001 1002 idx = cur_ops->readlock(); 1002 1003 completed = cur_ops->completed(); 1003 1004 p = rcu_dereference_check(rcu_torture_current, 1004 - rcu_read_lock_held() || 1005 1005 rcu_read_lock_bh_held() || 1006 1006 rcu_read_lock_sched_held() || 1007 1007 srcu_read_lock_held(&srcu_ctl));
-1
kernel/sched.c
··· 581 581 582 582 #define rcu_dereference_check_sched_domain(p) \ 583 583 rcu_dereference_check((p), \ 584 - rcu_read_lock_held() || \ 585 584 lockdep_is_held(&sched_domains_mutex)) 586 585 587 586 /*
-4
net/mac80211/sta_info.c
··· 97 97 struct sta_info *sta; 98 98 99 99 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], 100 - rcu_read_lock_held() || 101 100 lockdep_is_held(&local->sta_lock) || 102 101 lockdep_is_held(&local->sta_mtx)); 103 102 while (sta) { ··· 104 105 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 105 106 break; 106 107 sta = rcu_dereference_check(sta->hnext, 107 - rcu_read_lock_held() || 108 108 lockdep_is_held(&local->sta_lock) || 109 109 lockdep_is_held(&local->sta_mtx)); 110 110 } ··· 121 123 struct sta_info *sta; 122 124 123 125 sta = rcu_dereference_check(local->sta_hash[STA_HASH(addr)], 124 - rcu_read_lock_held() || 125 126 lockdep_is_held(&local->sta_lock) || 126 127 lockdep_is_held(&local->sta_mtx)); 127 128 while (sta) { ··· 129 132 memcmp(sta->sta.addr, addr, ETH_ALEN) == 0) 130 133 break; 131 134 sta = rcu_dereference_check(sta->hnext, 132 - rcu_read_lock_held() || 133 135 lockdep_is_held(&local->sta_lock) || 134 136 lockdep_is_held(&local->sta_mtx)); 135 137 }
+1 -2
net/netlabel/netlabel_domainhash.c
··· 55 55 * should be okay */ 56 56 static DEFINE_SPINLOCK(netlbl_domhsh_lock); 57 57 #define netlbl_domhsh_rcu_deref(p) \ 58 - rcu_dereference_check(p, rcu_read_lock_held() || \ 59 - lockdep_is_held(&netlbl_domhsh_lock)) 58 + rcu_dereference_check(p, lockdep_is_held(&netlbl_domhsh_lock)) 60 59 static struct netlbl_domhsh_tbl *netlbl_domhsh = NULL; 61 60 static struct netlbl_dom_map *netlbl_domhsh_def = NULL; 62 61
+1 -2
net/netlabel/netlabel_unlabeled.c
··· 116 116 * hash table should be okay */ 117 117 static DEFINE_SPINLOCK(netlbl_unlhsh_lock); 118 118 #define netlbl_unlhsh_rcu_deref(p) \ 119 - rcu_dereference_check(p, rcu_read_lock_held() || \ 120 - lockdep_is_held(&netlbl_unlhsh_lock)) 119 + rcu_dereference_check(p, lockdep_is_held(&netlbl_unlhsh_lock)) 121 120 static struct netlbl_unlhsh_tbl *netlbl_unlhsh = NULL; 122 121 static struct netlbl_unlhsh_iface *netlbl_unlhsh_def = NULL; 123 122
-1
security/keys/keyring.c
··· 155 155 } 156 156 157 157 klist = rcu_dereference_check(keyring->payload.subscriptions, 158 - rcu_read_lock_held() || 159 158 atomic_read(&keyring->usage) == 0); 160 159 if (klist) { 161 160 for (loop = klist->nkeys - 1; loop >= 0; loop--)