Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking: Remove ACCESS_ONCE() usage

With the new standardized functions, we can replace all
ACCESS_ONCE() calls across relevant locking - this includes
lockref and seqlock while at it.

ACCESS_ONCE() does not work reliably on non-scalar types.
For example gcc 4.6 and 4.7 might remove the volatile tag
for such accesses during the SRA (scalar replacement of
aggregates) step:

https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145

Update the new calls regardless of if it is a scalar type,
this is cleaner than having three alternatives.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Link: http://lkml.kernel.org/r/1424662301.6539.18.camel@stgolabs.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Davidlohr Bueso and committed by
Ingo Molnar
4d3199e4 2ae79026

+23 -23
+3 -3
include/linux/seqlock.h
··· 108 108 unsigned ret; 109 109 110 110 repeat: 111 - ret = ACCESS_ONCE(s->sequence); 111 + ret = READ_ONCE(s->sequence); 112 112 if (unlikely(ret & 1)) { 113 113 cpu_relax(); 114 114 goto repeat; ··· 127 127 */ 128 128 static inline unsigned raw_read_seqcount(const seqcount_t *s) 129 129 { 130 - unsigned ret = ACCESS_ONCE(s->sequence); 130 + unsigned ret = READ_ONCE(s->sequence); 131 131 smp_rmb(); 132 132 return ret; 133 133 } ··· 179 179 */ 180 180 static inline unsigned raw_seqcount_begin(const seqcount_t *s) 181 181 { 182 - unsigned ret = ACCESS_ONCE(s->sequence); 182 + unsigned ret = READ_ONCE(s->sequence); 183 183 smp_rmb(); 184 184 return ret & ~1; 185 185 }
+3 -3
kernel/locking/mcs_spinlock.h
··· 78 78 */ 79 79 return; 80 80 } 81 - ACCESS_ONCE(prev->next) = node; 81 + WRITE_ONCE(prev->next, node); 82 82 83 83 /* Wait until the lock holder passes the lock down. */ 84 84 arch_mcs_spin_lock_contended(&node->locked); ··· 91 91 static inline 92 92 void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) 93 93 { 94 - struct mcs_spinlock *next = ACCESS_ONCE(node->next); 94 + struct mcs_spinlock *next = READ_ONCE(node->next); 95 95 96 96 if (likely(!next)) { 97 97 /* ··· 100 100 if (likely(cmpxchg(lock, node, NULL) == node)) 101 101 return; 102 102 /* Wait until the next pointer is set */ 103 - while (!(next = ACCESS_ONCE(node->next))) 103 + while (!(next = READ_ONCE(node->next))) 104 104 cpu_relax_lowlatency(); 105 105 } 106 106
+4 -4
kernel/locking/mutex.c
··· 266 266 return 0; 267 267 268 268 rcu_read_lock(); 269 - owner = ACCESS_ONCE(lock->owner); 269 + owner = READ_ONCE(lock->owner); 270 270 if (owner) 271 271 retval = owner->on_cpu; 272 272 rcu_read_unlock(); ··· 340 340 * As such, when deadlock detection needs to be 341 341 * performed the optimistic spinning cannot be done. 342 342 */ 343 - if (ACCESS_ONCE(ww->ctx)) 343 + if (READ_ONCE(ww->ctx)) 344 344 break; 345 345 } 346 346 ··· 348 348 * If there's an owner, wait for it to either 349 349 * release the lock or go to sleep. 350 350 */ 351 - owner = ACCESS_ONCE(lock->owner); 351 + owner = READ_ONCE(lock->owner); 352 352 if (owner && !mutex_spin_on_owner(lock, owner)) 353 353 break; 354 354 ··· 487 487 __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) 488 488 { 489 489 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 490 - struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); 490 + struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); 491 491 492 492 if (!hold_ctx) 493 493 return 0;
+7 -7
kernel/locking/osq_lock.c
··· 98 98 99 99 prev = decode_cpu(old); 100 100 node->prev = prev; 101 - ACCESS_ONCE(prev->next) = node; 101 + WRITE_ONCE(prev->next, node); 102 102 103 103 /* 104 104 * Normally @prev is untouchable after the above store; because at that ··· 109 109 * cmpxchg in an attempt to undo our queueing. 110 110 */ 111 111 112 - while (!ACCESS_ONCE(node->locked)) { 112 + while (!READ_ONCE(node->locked)) { 113 113 /* 114 114 * If we need to reschedule bail... so we can block. 115 115 */ ··· 148 148 * Or we race against a concurrent unqueue()'s step-B, in which 149 149 * case its step-C will write us a new @node->prev pointer. 150 150 */ 151 - prev = ACCESS_ONCE(node->prev); 151 + prev = READ_ONCE(node->prev); 152 152 } 153 153 154 154 /* ··· 170 170 * it will wait in Step-A. 171 171 */ 172 172 173 - ACCESS_ONCE(next->prev) = prev; 174 - ACCESS_ONCE(prev->next) = next; 173 + WRITE_ONCE(next->prev, prev); 174 + WRITE_ONCE(prev->next, next); 175 175 176 176 return false; 177 177 } ··· 193 193 node = this_cpu_ptr(&osq_node); 194 194 next = xchg(&node->next, NULL); 195 195 if (next) { 196 - ACCESS_ONCE(next->locked) = 1; 196 + WRITE_ONCE(next->locked, 1); 197 197 return; 198 198 } 199 199 200 200 next = osq_wait_next(lock, node, NULL); 201 201 if (next) 202 - ACCESS_ONCE(next->locked) = 1; 202 + WRITE_ONCE(next->locked, 1); 203 203 }
+5 -5
kernel/locking/rwsem-xadd.c
··· 279 279 */ 280 280 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) 281 281 { 282 - long old, count = ACCESS_ONCE(sem->count); 282 + long old, count = READ_ONCE(sem->count); 283 283 284 284 while (true) { 285 285 if (!(count == 0 || count == RWSEM_WAITING_BIAS)) ··· 304 304 return false; 305 305 306 306 rcu_read_lock(); 307 - owner = ACCESS_ONCE(sem->owner); 307 + owner = READ_ONCE(sem->owner); 308 308 if (!owner) { 309 - long count = ACCESS_ONCE(sem->count); 309 + long count = READ_ONCE(sem->count); 310 310 /* 311 311 * If sem->owner is not set, yet we have just recently entered the 312 312 * slowpath with the lock being active, then there is a possibility ··· 385 385 goto done; 386 386 387 387 while (true) { 388 - owner = ACCESS_ONCE(sem->owner); 388 + owner = READ_ONCE(sem->owner); 389 389 if (owner && !rwsem_spin_on_owner(sem, owner)) 390 390 break; 391 391 ··· 459 459 460 460 /* we're now waiting on the lock, but no longer actively locking */ 461 461 if (waiting) { 462 - count = ACCESS_ONCE(sem->count); 462 + count = READ_ONCE(sem->count); 463 463 464 464 /* 465 465 * If there were already threads queued before us and there are
+1 -1
lib/lockref.c
··· 18 18 #define CMPXCHG_LOOP(CODE, SUCCESS) do { \ 19 19 struct lockref old; \ 20 20 BUILD_BUG_ON(sizeof(old) != 8); \ 21 - old.lock_count = ACCESS_ONCE(lockref->lock_count); \ 21 + old.lock_count = READ_ONCE(lockref->lock_count); \ 22 22 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ 23 23 struct lockref new = old, prev = old; \ 24 24 CODE \