+32
-4
include/linux/lglock.h
+32
-4
include/linux/lglock.h
···
22
22
#include <linux/spinlock.h>
23
23
#include <linux/lockdep.h>
24
24
#include <linux/percpu.h>
25
+
#include <linux/cpu.h>
25
26
26
27
/* can make br locks by using local lock for read side, global lock for write */
27
28
#define br_lock_init(name) name##_lock_init()
···
73
72
74
73
#define DEFINE_LGLOCK(name) \
75
74
\
75
+
DEFINE_SPINLOCK(name##_cpu_lock); \
76
+
cpumask_t name##_cpus __read_mostly; \
76
77
DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
77
78
DEFINE_LGLOCK_LOCKDEP(name); \
78
79
\
80
+
static int \
81
+
name##_lg_cpu_callback(struct notifier_block *nb, \
82
+
unsigned long action, void *hcpu) \
83
+
{ \
84
+
switch (action & ~CPU_TASKS_FROZEN) { \
85
+
case CPU_UP_PREPARE: \
86
+
spin_lock(&name##_cpu_lock); \
87
+
cpu_set((unsigned long)hcpu, name##_cpus); \
88
+
spin_unlock(&name##_cpu_lock); \
89
+
break; \
90
+
case CPU_UP_CANCELED: case CPU_DEAD: \
91
+
spin_lock(&name##_cpu_lock); \
92
+
cpu_clear((unsigned long)hcpu, name##_cpus); \
93
+
spin_unlock(&name##_cpu_lock); \
94
+
} \
95
+
return NOTIFY_OK; \
96
+
} \
97
+
static struct notifier_block name##_lg_cpu_notifier = { \
98
+
.notifier_call = name##_lg_cpu_callback, \
99
+
}; \
79
100
void name##_lock_init(void) { \
80
101
int i; \
81
102
LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
···
106
83
lock = &per_cpu(name##_lock, i); \
107
84
*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
108
85
} \
86
+
register_hotcpu_notifier(&name##_lg_cpu_notifier); \
87
+
get_online_cpus(); \
88
+
for_each_online_cpu(i) \
89
+
cpu_set(i, name##_cpus); \
90
+
put_online_cpus(); \
109
91
} \
110
92
EXPORT_SYMBOL(name##_lock_init); \
111
93
\
···
152
124
\
153
125
void name##_global_lock_online(void) { \
154
126
int i; \
155
-
preempt_disable(); \
127
+
spin_lock(&name##_cpu_lock); \
156
128
rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
157
-
for_each_online_cpu(i) { \
129
+
for_each_cpu(i, &name##_cpus) { \
158
130
arch_spinlock_t *lock; \
159
131
lock = &per_cpu(name##_lock, i); \
160
132
arch_spin_lock(lock); \
···
165
137
void name##_global_unlock_online(void) { \
166
138
int i; \
167
139
rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
168
-
for_each_online_cpu(i) { \
140
+
for_each_cpu(i, &name##_cpus) { \
169
141
arch_spinlock_t *lock; \
170
142
lock = &per_cpu(name##_lock, i); \
171
143
arch_spin_unlock(lock); \
172
144
} \
173
-
preempt_enable(); \
145
+
spin_unlock(&name##_cpu_lock); \
174
146
} \
175
147
EXPORT_SYMBOL(name##_global_unlock_online); \
176
148
\