Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking, kprobes: Annotate the hash locks and kretprobe.lock as raw

The kprobe locks can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Thomas Gleixner and committed by
Ingo Molnar
ec484608 76bf6877

+18 -18
+1 -1
include/linux/kprobes.h
··· 181 181 int nmissed; 182 182 size_t data_size; 183 183 struct hlist_head free_instances; 184 - spinlock_t lock; 184 + raw_spinlock_t lock; 185 185 }; 186 186 187 187 struct kretprobe_instance {
+17 -17
kernel/kprobes.c
··· 78 78 static DEFINE_MUTEX(kprobe_mutex); 79 79 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 80 80 static struct { 81 - spinlock_t lock ____cacheline_aligned_in_smp; 81 + raw_spinlock_t lock ____cacheline_aligned_in_smp; 82 82 } kretprobe_table_locks[KPROBE_TABLE_SIZE]; 83 83 84 - static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 84 + static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 85 85 { 86 86 return &(kretprobe_table_locks[hash].lock); 87 87 } ··· 1013 1013 hlist_del(&ri->hlist); 1014 1014 INIT_HLIST_NODE(&ri->hlist); 1015 1015 if (likely(rp)) { 1016 - spin_lock(&rp->lock); 1016 + raw_spin_lock(&rp->lock); 1017 1017 hlist_add_head(&ri->hlist, &rp->free_instances); 1018 - spin_unlock(&rp->lock); 1018 + raw_spin_unlock(&rp->lock); 1019 1019 } else 1020 1020 /* Unregistering */ 1021 1021 hlist_add_head(&ri->hlist, head); ··· 1026 1026 __acquires(hlist_lock) 1027 1027 { 1028 1028 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1029 - spinlock_t *hlist_lock; 1029 + raw_spinlock_t *hlist_lock; 1030 1030 1031 1031 *head = &kretprobe_inst_table[hash]; 1032 1032 hlist_lock = kretprobe_table_lock_ptr(hash); 1033 - spin_lock_irqsave(hlist_lock, *flags); 1033 + raw_spin_lock_irqsave(hlist_lock, *flags); 1034 1034 } 1035 1035 1036 1036 static void __kprobes kretprobe_table_lock(unsigned long hash, 1037 1037 unsigned long *flags) 1038 1038 __acquires(hlist_lock) 1039 1039 { 1040 - spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1041 - spin_lock_irqsave(hlist_lock, *flags); 1040 + raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1041 + raw_spin_lock_irqsave(hlist_lock, *flags); 1042 1042 } 1043 1043 1044 1044 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, ··· 1046 1046 __releases(hlist_lock) 1047 1047 { 1048 1048 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1049 - spinlock_t *hlist_lock; 1049 + raw_spinlock_t *hlist_lock; 1050 1050 1051 1051 hlist_lock = kretprobe_table_lock_ptr(hash); 1052 - spin_unlock_irqrestore(hlist_lock, *flags); 1052 + raw_spin_unlock_irqrestore(hlist_lock, *flags); 1053 1053 } 1054 1054 1055 1055 static void __kprobes kretprobe_table_unlock(unsigned long hash, 1056 1056 unsigned long *flags) 1057 1057 __releases(hlist_lock) 1058 1058 { 1059 - spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1060 - spin_unlock_irqrestore(hlist_lock, *flags); 1059 + raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1060 + raw_spin_unlock_irqrestore(hlist_lock, *flags); 1061 1061 } 1062 1062 1063 1063 /* ··· 1663 1663 1664 1664 /*TODO: consider to only swap the RA after the last pre_handler fired */ 1665 1665 hash = hash_ptr(current, KPROBE_HASH_BITS); 1666 - spin_lock_irqsave(&rp->lock, flags); 1666 + raw_spin_lock_irqsave(&rp->lock, flags); 1667 1667 if (!hlist_empty(&rp->free_instances)) { 1668 1668 ri = hlist_entry(rp->free_instances.first, 1669 1669 struct kretprobe_instance, hlist); 1670 1670 hlist_del(&ri->hlist); 1671 - spin_unlock_irqrestore(&rp->lock, flags); 1671 + raw_spin_unlock_irqrestore(&rp->lock, flags); 1672 1672 1673 1673 ri->rp = rp; 1674 1674 ri->task = current; ··· 1685 1685 kretprobe_table_unlock(hash, &flags); 1686 1686 } else { 1687 1687 rp->nmissed++; 1688 - spin_unlock_irqrestore(&rp->lock, flags); 1688 + raw_spin_unlock_irqrestore(&rp->lock, flags); 1689 1689 } 1690 1690 return 0; 1691 1691 } ··· 1721 1721 rp->maxactive = num_possible_cpus(); 1722 1722 #endif 1723 1723 } 1724 - spin_lock_init(&rp->lock); 1724 + raw_spin_lock_init(&rp->lock); 1725 1725 INIT_HLIST_HEAD(&rp->free_instances); 1726 1726 for (i = 0; i < rp->maxactive; i++) { 1727 1727 inst = kmalloc(sizeof(struct kretprobe_instance) + ··· 1959 1959 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1960 1960 INIT_HLIST_HEAD(&kprobe_table[i]); 1961 1961 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 1962 - spin_lock_init(&(kretprobe_table_locks[i].lock)); 1962 + raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); 1963 1963 } 1964 1964 1965 1965 /*