Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.9 119 lines 3.5 kB view raw
1/* 2 * Copyright 2011 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 */ 14 15#include <linux/spinlock.h> 16#include <linux/module.h> 17#include <asm/processor.h> 18 19#include "spinlock_common.h" 20 21/* 22 * Read the spinlock value without allocating in our cache and without 23 * causing an invalidation to another cpu with a copy of the cacheline. 24 * This is important when we are spinning waiting for the lock. 25 */ 26static inline u32 arch_spin_read_noalloc(void *lock) 27{ 28 return atomic_cmpxchg((atomic_t *)lock, -1, -1); 29} 30 31/* 32 * Wait until the high bits (current) match my ticket. 33 * If we notice the overflow bit set on entry, we clear it. 34 */ 35void arch_spin_lock_slow(arch_spinlock_t *lock, u32 my_ticket) 36{ 37 if (unlikely(my_ticket & __ARCH_SPIN_NEXT_OVERFLOW)) { 38 __insn_fetchand4(&lock->lock, ~__ARCH_SPIN_NEXT_OVERFLOW); 39 my_ticket &= ~__ARCH_SPIN_NEXT_OVERFLOW; 40 } 41 42 for (;;) { 43 u32 val = arch_spin_read_noalloc(lock); 44 u32 delta = my_ticket - arch_spin_current(val); 45 if (delta == 0) 46 return; 47 relax((128 / CYCLES_PER_RELAX_LOOP) * delta); 48 } 49} 50EXPORT_SYMBOL(arch_spin_lock_slow); 51 52/* 53 * Check the lock to see if it is plausible, and try to get it with cmpxchg(). 54 */ 55int arch_spin_trylock(arch_spinlock_t *lock) 56{ 57 u32 val = arch_spin_read_noalloc(lock); 58 if (unlikely(arch_spin_current(val) != arch_spin_next(val))) 59 return 0; 60 return cmpxchg(&lock->lock, val, (val + 1) & ~__ARCH_SPIN_NEXT_OVERFLOW) 61 == val; 62} 63EXPORT_SYMBOL(arch_spin_trylock); 64 65void arch_spin_unlock_wait(arch_spinlock_t *lock) 66{ 67 u32 iterations = 0; 68 u32 val = READ_ONCE(lock->lock); 69 u32 curr = arch_spin_current(val); 70 71 /* Return immediately if unlocked. */ 72 if (arch_spin_next(val) == curr) 73 return; 74 75 /* Wait until the current locker has released the lock. */ 76 do { 77 delay_backoff(iterations++); 78 } while (arch_spin_current(READ_ONCE(lock->lock)) == curr); 79 80 /* 81 * The TILE architecture doesn't do read speculation; therefore 82 * a control dependency guarantees a LOAD->{LOAD,STORE} order. 83 */ 84 barrier(); 85} 86EXPORT_SYMBOL(arch_spin_unlock_wait); 87 88/* 89 * If the read lock fails due to a writer, we retry periodically 90 * until the value is positive and we write our incremented reader count. 91 */ 92void __read_lock_failed(arch_rwlock_t *rw) 93{ 94 u32 val; 95 int iterations = 0; 96 do { 97 delay_backoff(iterations++); 98 val = __insn_fetchaddgez4(&rw->lock, 1); 99 } while (unlikely(arch_write_val_locked(val))); 100} 101EXPORT_SYMBOL(__read_lock_failed); 102 103/* 104 * If we failed because there were readers, clear the "writer" bit 105 * so we don't block additional readers. Otherwise, there was another 106 * writer anyway, so our "fetchor" made no difference. Then wait, 107 * issuing periodic fetchor instructions, till we get the lock. 108 */ 109void __write_lock_failed(arch_rwlock_t *rw, u32 val) 110{ 111 int iterations = 0; 112 do { 113 if (!arch_write_val_locked(val)) 114 val = __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT); 115 delay_backoff(iterations++); 116 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT); 117 } while (val != 0); 118} 119EXPORT_SYMBOL(__write_lock_failed);