Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking, percpu_counter: Annotate ::lock as raw

The percpu_counter::lock can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Thomas Gleixner and committed by
Ingo Molnar
f032a450 ec484608

+10 -10
+1 -1
include/linux/percpu_counter.h
··· 16 16 #ifdef CONFIG_SMP 17 17 18 18 struct percpu_counter { 19 - spinlock_t lock; 19 + raw_spinlock_t lock; 20 20 s64 count; 21 21 #ifdef CONFIG_HOTPLUG_CPU 22 22 struct list_head list; /* All percpu_counters are on a list */
+9 -9
lib/percpu_counter.c
··· 59 59 { 60 60 int cpu; 61 61 62 - spin_lock(&fbc->lock); 62 + raw_spin_lock(&fbc->lock); 63 63 for_each_possible_cpu(cpu) { 64 64 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 65 65 *pcount = 0; 66 66 } 67 67 fbc->count = amount; 68 - spin_unlock(&fbc->lock); 68 + raw_spin_unlock(&fbc->lock); 69 69 } 70 70 EXPORT_SYMBOL(percpu_counter_set); 71 71 ··· 76 76 preempt_disable(); 77 77 count = __this_cpu_read(*fbc->counters) + amount; 78 78 if (count >= batch || count <= -batch) { 79 - spin_lock(&fbc->lock); 79 + raw_spin_lock(&fbc->lock); 80 80 fbc->count += count; 81 81 __this_cpu_write(*fbc->counters, 0); 82 - spin_unlock(&fbc->lock); 82 + raw_spin_unlock(&fbc->lock); 83 83 } else { 84 84 __this_cpu_write(*fbc->counters, count); 85 85 } ··· 96 96 s64 ret; 97 97 int cpu; 98 98 99 - spin_lock(&fbc->lock); 99 + raw_spin_lock(&fbc->lock); 100 100 ret = fbc->count; 101 101 for_each_online_cpu(cpu) { 102 102 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 103 103 ret += *pcount; 104 104 } 105 - spin_unlock(&fbc->lock); 105 + raw_spin_unlock(&fbc->lock); 106 106 return ret; 107 107 } 108 108 EXPORT_SYMBOL(__percpu_counter_sum); ··· 110 110 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, 111 111 struct lock_class_key *key) 112 112 { 113 - spin_lock_init(&fbc->lock); 113 + raw_spin_lock_init(&fbc->lock); 114 114 lockdep_set_class(&fbc->lock, key); 115 115 fbc->count = amount; 116 116 fbc->counters = alloc_percpu(s32); ··· 173 173 s32 *pcount; 174 174 unsigned long flags; 175 175 176 - spin_lock_irqsave(&fbc->lock, flags); 176 + raw_spin_lock_irqsave(&fbc->lock, flags); 177 177 pcount = per_cpu_ptr(fbc->counters, cpu); 178 178 fbc->count += *pcount; 179 179 *pcount = 0; 180 - spin_unlock_irqrestore(&fbc->lock, flags); 180 + raw_spin_unlock_irqrestore(&fbc->lock, flags); 181 181 } 182 182 mutex_unlock(&percpu_counters_lock); 183 183 #endif