Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_SPINLOCK_H
3#define __ASM_SPINLOCK_H
4
5#include <asm/barrier.h>
6#include <asm/ldcw.h>
7#include <asm/processor.h>
8#include <asm/spinlock_types.h>
9
10static inline int arch_spin_is_locked(arch_spinlock_t *x)
11{
12 volatile unsigned int *a = __ldcw_align(x);
13 return READ_ONCE(*a) == 0;
14}
15
16static inline void arch_spin_lock(arch_spinlock_t *x)
17{
18 volatile unsigned int *a;
19
20 a = __ldcw_align(x);
21 while (__ldcw(a) == 0)
22 while (*a == 0)
23 continue;
24}
25
26static inline void arch_spin_lock_flags(arch_spinlock_t *x,
27 unsigned long flags)
28{
29 volatile unsigned int *a;
30
31 a = __ldcw_align(x);
32 while (__ldcw(a) == 0)
33 while (*a == 0)
34 if (flags & PSW_SM_I) {
35 local_irq_enable();
36 local_irq_disable();
37 }
38}
39#define arch_spin_lock_flags arch_spin_lock_flags
40
41static inline void arch_spin_unlock(arch_spinlock_t *x)
42{
43 volatile unsigned int *a;
44
45 a = __ldcw_align(x);
46 /* Release with ordered store. */
47 __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
48}
49
50static inline int arch_spin_trylock(arch_spinlock_t *x)
51{
52 volatile unsigned int *a;
53
54 a = __ldcw_align(x);
55 return __ldcw(a) != 0;
56}
57
58/*
59 * Read-write spinlocks, allowing multiple readers but only one writer.
60 * Unfair locking as Writers could be starved indefinitely by Reader(s)
61 *
62 * The spinlock itself is contained in @counter and access to it is
63 * serialized with @lock_mutex.
64 */
65
66/* 1 - lock taken successfully */
67static inline int arch_read_trylock(arch_rwlock_t *rw)
68{
69 int ret = 0;
70 unsigned long flags;
71
72 local_irq_save(flags);
73 arch_spin_lock(&(rw->lock_mutex));
74
75 /*
76 * zero means writer holds the lock exclusively, deny Reader.
77 * Otherwise grant lock to first/subseq reader
78 */
79 if (rw->counter > 0) {
80 rw->counter--;
81 ret = 1;
82 }
83
84 arch_spin_unlock(&(rw->lock_mutex));
85 local_irq_restore(flags);
86
87 return ret;
88}
89
90/* 1 - lock taken successfully */
91static inline int arch_write_trylock(arch_rwlock_t *rw)
92{
93 int ret = 0;
94 unsigned long flags;
95
96 local_irq_save(flags);
97 arch_spin_lock(&(rw->lock_mutex));
98
99 /*
100 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
101 * deny writer. Otherwise if unlocked grant to writer
102 * Hence the claim that Linux rwlocks are unfair to writers.
103 * (can be starved for an indefinite time by readers).
104 */
105 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
106 rw->counter = 0;
107 ret = 1;
108 }
109 arch_spin_unlock(&(rw->lock_mutex));
110 local_irq_restore(flags);
111
112 return ret;
113}
114
115static inline void arch_read_lock(arch_rwlock_t *rw)
116{
117 while (!arch_read_trylock(rw))
118 cpu_relax();
119}
120
121static inline void arch_write_lock(arch_rwlock_t *rw)
122{
123 while (!arch_write_trylock(rw))
124 cpu_relax();
125}
126
127static inline void arch_read_unlock(arch_rwlock_t *rw)
128{
129 unsigned long flags;
130
131 local_irq_save(flags);
132 arch_spin_lock(&(rw->lock_mutex));
133 rw->counter++;
134 arch_spin_unlock(&(rw->lock_mutex));
135 local_irq_restore(flags);
136}
137
138static inline void arch_write_unlock(arch_rwlock_t *rw)
139{
140 unsigned long flags;
141
142 local_irq_save(flags);
143 arch_spin_lock(&(rw->lock_mutex));
144 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
145 arch_spin_unlock(&(rw->lock_mutex));
146 local_irq_restore(flags);
147}
148
149#endif /* __ASM_SPINLOCK_H */