Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
"The main change in this cycle are initial preparatory bits of dynamic
lockdep keys support from Bart Van Assche.

There are also misc changes, a comment cleanup and a data structure
cleanup"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/fair: Clean up comment in nohz_idle_balance()
locking/lockdep: Stop using RCU primitives to access 'all_lock_classes'
locking/lockdep: Make concurrent lockdep_reset_lock() calls safe
locking/lockdep: Remove a superfluous INIT_LIST_HEAD() statement
locking/lockdep: Introduce lock_class_cache_is_registered()
locking/lockdep: Inline __lockdep_init_map()
locking/lockdep: Declare local symbols static
tools/lib/lockdep/tests: Test the lockdep_reset_lock() implementation
tools/lib/lockdep: Add dummy print_irqtrace_events() implementation
tools/lib/lockdep: Rename "trywlock" into "trywrlock"
tools/lib/lockdep/tests: Run lockdep tests a second time under Valgrind
tools/lib/lockdep/tests: Improve testing accuracy
tools/lib/lockdep/tests: Fix shellcheck warnings
tools/lib/lockdep/tests: Display compiler warning and error messages
locking/lockdep: Remove ::version from lock_class structure

+131 -53
-2
include/linux/lockdep.h
··· 97 97 * Generation counter, when doing certain classes of graph walking, 98 98 * to ensure that we check one node only once: 99 99 */ 100 - unsigned int version; 101 - 102 100 int name_version; 103 101 const char *name; 104 102
+43 -33
kernel/locking/lockdep.c
··· 138 138 * get freed - this significantly simplifies the debugging code. 139 139 */ 140 140 unsigned long nr_lock_classes; 141 + #ifndef CONFIG_DEBUG_LOCKDEP 142 + static 143 + #endif 141 144 struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; 142 145 143 146 static inline struct lock_class *hlock_class(struct held_lock *hlock) ··· 629 626 630 627 /* 631 628 * To make lock name printouts unique, we calculate a unique 632 - * class->name_version generation counter: 629 + * class->name_version generation counter. The caller must hold the graph 630 + * lock. 633 631 */ 634 632 static int count_matching_names(struct lock_class *new_class) 635 633 { ··· 640 636 if (!new_class->name) 641 637 return 0; 642 638 643 - list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { 639 + list_for_each_entry(class, &all_lock_classes, lock_entry) { 644 640 if (new_class->key - new_class->subclass == class->key) 645 641 return class->name_version; 646 642 if (class->name && !strcmp(class->name, new_class->name)) ··· 793 789 class->key = key; 794 790 class->name = lock->name; 795 791 class->subclass = subclass; 796 - INIT_LIST_HEAD(&class->lock_entry); 797 792 INIT_LIST_HEAD(&class->locks_before); 798 793 INIT_LIST_HEAD(&class->locks_after); 799 794 class->name_version = count_matching_names(class); ··· 804 801 /* 805 802 * Add it to the global list of classes: 806 803 */ 807 - list_add_tail_rcu(&class->lock_entry, &all_lock_classes); 804 + list_add_tail(&class->lock_entry, &all_lock_classes); 808 805 809 806 if (verbose(class)) { 810 807 graph_unlock(); ··· 3091 3088 /* 3092 3089 * Initialize a lock instance's lock-class mapping info: 3093 3090 */ 3094 - static void __lockdep_init_map(struct lockdep_map *lock, const char *name, 3091 + void lockdep_init_map(struct lockdep_map *lock, const char *name, 3095 3092 struct lock_class_key *key, int subclass) 3096 3093 { 3097 3094 int i; ··· 3146 3143 current->lockdep_recursion = 0; 3147 3144 raw_local_irq_restore(flags); 3148 3145 } 3149 - } 3150 - 3151 - void lockdep_init_map(struct lockdep_map *lock, const char *name, 3152 - struct lock_class_key *key, int subclass) 3153 - { 3154 - __lockdep_init_map(lock, name, key, subclass); 3155 3146 } 3156 3147 EXPORT_SYMBOL_GPL(lockdep_init_map); 3157 3148 ··· 4123 4126 raw_local_irq_restore(flags); 4124 4127 } 4125 4128 4129 + /* 4130 + * Remove all references to a lock class. The caller must hold the graph lock. 4131 + */ 4126 4132 static void zap_class(struct lock_class *class) 4127 4133 { 4128 4134 int i; ··· 4142 4142 * Unhash the class and remove it from the all_lock_classes list: 4143 4143 */ 4144 4144 hlist_del_rcu(&class->hash_entry); 4145 - list_del_rcu(&class->lock_entry); 4145 + list_del(&class->lock_entry); 4146 4146 4147 4147 RCU_INIT_POINTER(class->key, NULL); 4148 4148 RCU_INIT_POINTER(class->name, NULL); ··· 4204 4204 */ 4205 4205 } 4206 4206 4207 - void lockdep_reset_lock(struct lockdep_map *lock) 4207 + /* 4208 + * Check whether any element of the @lock->class_cache[] array refers to a 4209 + * registered lock class. The caller must hold either the graph lock or the 4210 + * RCU read lock. 4211 + */ 4212 + static bool lock_class_cache_is_registered(struct lockdep_map *lock) 4208 4213 { 4209 4214 struct lock_class *class; 4210 4215 struct hlist_head *head; 4211 - unsigned long flags; 4212 4216 int i, j; 4213 - int locked; 4217 + 4218 + for (i = 0; i < CLASSHASH_SIZE; i++) { 4219 + head = classhash_table + i; 4220 + hlist_for_each_entry_rcu(class, head, hash_entry) { 4221 + for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) 4222 + if (lock->class_cache[j] == class) 4223 + return true; 4224 + } 4225 + } 4226 + return false; 4227 + } 4228 + 4229 + void lockdep_reset_lock(struct lockdep_map *lock) 4230 + { 4231 + struct lock_class *class; 4232 + unsigned long flags; 4233 + int j, locked; 4214 4234 4215 4235 raw_local_irq_save(flags); 4236 + locked = graph_lock(); 4216 4237 4217 4238 /* 4218 4239 * Remove all classes this lock might have: ··· 4250 4229 * Debug check: in the end all mapped classes should 4251 4230 * be gone. 4252 4231 */ 4253 - locked = graph_lock(); 4254 - for (i = 0; i < CLASSHASH_SIZE; i++) { 4255 - head = classhash_table + i; 4256 - hlist_for_each_entry_rcu(class, head, hash_entry) { 4257 - int match = 0; 4258 - 4259 - for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) 4260 - match |= class == lock->class_cache[j]; 4261 - 4262 - if (unlikely(match)) { 4263 - if (debug_locks_off_graph_unlock()) { 4264 - /* 4265 - * We all just reset everything, how did it match? 4266 - */ 4267 - WARN_ON(1); 4268 - } 4269 - goto out_restore; 4270 - } 4232 + if (unlikely(lock_class_cache_is_registered(lock))) { 4233 + if (debug_locks_off_graph_unlock()) { 4234 + /* 4235 + * We all just reset everything, how did it match? 4236 + */ 4237 + WARN_ON(1); 4271 4238 } 4239 + goto out_restore; 4272 4240 } 4273 4241 if (locked) 4274 4242 graph_unlock();
+1 -3
kernel/sched/fair.c
··· 9533 9533 return false; 9534 9534 } 9535 9535 9536 - /* 9537 - * barrier, pairs with nohz_balance_enter_idle(), ensures ... 9538 - */ 9536 + /* could be _relaxed() */ 9539 9537 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu)); 9540 9538 if (!(flags & NOHZ_KICK_MASK)) 9541 9539 return false;
+1
tools/lib/lockdep/include/liblockdep/common.h
··· 44 44 struct lockdep_map *nest_lock, unsigned long ip); 45 45 void lock_release(struct lockdep_map *lock, int nested, 46 46 unsigned long ip); 47 + void lockdep_reset_lock(struct lockdep_map *lock); 47 48 extern void debug_check_no_locks_freed(const void *from, unsigned long len); 48 49 49 50 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
+1
tools/lib/lockdep/include/liblockdep/mutex.h
··· 54 54 55 55 static inline int liblockdep_pthread_mutex_destroy(liblockdep_pthread_mutex_t *lock) 56 56 { 57 + lockdep_reset_lock(&lock->dep_map); 57 58 return pthread_mutex_destroy(&lock->mutex); 58 59 } 59 60
+3 -3
tools/lib/lockdep/include/liblockdep/rwlock.h
··· 60 60 return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0; 61 61 } 62 62 63 - static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock) 63 + static inline int liblockdep_pthread_rwlock_trywrlock(liblockdep_pthread_rwlock_t *lock) 64 64 { 65 65 lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); 66 - return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0; 66 + return pthread_rwlock_trywrlock(&lock->rwlock) == 0 ? 1 : 0; 67 67 } 68 68 69 69 static inline int liblockdep_rwlock_destroy(liblockdep_pthread_rwlock_t *lock) ··· 79 79 #define pthread_rwlock_unlock liblockdep_pthread_rwlock_unlock 80 80 #define pthread_rwlock_wrlock liblockdep_pthread_rwlock_wrlock 81 81 #define pthread_rwlock_tryrdlock liblockdep_pthread_rwlock_tryrdlock 82 - #define pthread_rwlock_trywlock liblockdep_pthread_rwlock_trywlock 82 + #define pthread_rwlock_trywrlock liblockdep_pthread_rwlock_trywrlock 83 83 #define pthread_rwlock_destroy liblockdep_rwlock_destroy 84 84 85 85 #endif
+5
tools/lib/lockdep/lockdep.c
··· 15 15 abort(); 16 16 } 17 17 18 + void print_irqtrace_events(struct task_struct *curr) 19 + { 20 + abort(); 21 + } 22 + 18 23 static struct new_utsname *init_utsname(void) 19 24 { 20 25 static struct new_utsname n = (struct new_utsname) {
+27 -12
tools/lib/lockdep/run_tests.sh
··· 1 1 #! /bin/bash 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 4 - make &> /dev/null 4 + if ! make >/dev/null; then 5 + echo "Building liblockdep failed." 6 + echo "FAILED!" 7 + exit 1 8 + fi 5 9 6 - for i in `ls tests/*.c`; do 10 + find tests -name '*.c' | sort | while read -r i; do 7 11 testname=$(basename "$i" .c) 8 - gcc -o tests/$testname -pthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null 9 12 echo -ne "$testname... " 10 - if [ $(timeout 1 ./tests/$testname 2>&1 | wc -l) -gt 0 ]; then 13 + if gcc -o "tests/$testname" -pthread "$i" liblockdep.a -Iinclude -D__USE_LIBLOCKDEP && 14 + timeout 1 "tests/$testname" 2>&1 | "tests/${testname}.sh"; then 11 15 echo "PASSED!" 12 16 else 13 17 echo "FAILED!" 14 18 fi 15 - if [ -f "tests/$testname" ]; then 16 - rm tests/$testname 17 - fi 19 + rm -f "tests/$testname" 18 20 done 19 21 20 - for i in `ls tests/*.c`; do 22 + find tests -name '*.c' | sort | while read -r i; do 21 23 testname=$(basename "$i" .c) 22 - gcc -o tests/$testname -pthread -Iinclude $i &> /dev/null 23 24 echo -ne "(PRELOAD) $testname... " 24 - if [ $(timeout 1 ./lockdep ./tests/$testname 2>&1 | wc -l) -gt 0 ]; then 25 + if gcc -o "tests/$testname" -pthread -Iinclude "$i" && 26 + timeout 1 ./lockdep "tests/$testname" 2>&1 | 27 + "tests/${testname}.sh"; then 25 28 echo "PASSED!" 26 29 else 27 30 echo "FAILED!" 28 31 fi 29 - if [ -f "tests/$testname" ]; then 30 - rm tests/$testname 32 + rm -f "tests/$testname" 33 + done 34 + 35 + find tests -name '*.c' | sort | while read -r i; do 36 + testname=$(basename "$i" .c) 37 + echo -ne "(PRELOAD + Valgrind) $testname... " 38 + if gcc -o "tests/$testname" -pthread -Iinclude "$i" && 39 + { timeout 10 valgrind --read-var-info=yes ./lockdep "./tests/$testname" >& "tests/${testname}.vg.out"; true; } && 40 + "tests/${testname}.sh" < "tests/${testname}.vg.out" && 41 + ! grep -Eq '(^==[0-9]*== (Invalid |Uninitialised ))|Mismatched free|Source and destination overlap| UME ' "tests/${testname}.vg.out"; then 42 + echo "PASSED!" 43 + else 44 + echo "FAILED!" 31 45 fi 46 + rm -f "tests/$testname" 32 47 done
+2
tools/lib/lockdep/tests/AA.sh
··· 1 + #!/bin/bash 2 + grep -q 'WARNING: possible recursive locking detected'
+2
tools/lib/lockdep/tests/ABA.sh
··· 1 + #!/bin/bash 2 + grep -q 'WARNING: possible recursive locking detected'
+3
tools/lib/lockdep/tests/ABBA.c
··· 11 11 12 12 LOCK_UNLOCK_2(a, b); 13 13 LOCK_UNLOCK_2(b, a); 14 + 15 + pthread_mutex_destroy(&b); 16 + pthread_mutex_destroy(&a); 14 17 }
+2
tools/lib/lockdep/tests/ABBA.sh
··· 1 + #!/bin/bash 2 + grep -q 'WARNING: possible circular locking dependency detected'
+2
tools/lib/lockdep/tests/ABBA_2threads.sh
··· 1 + #!/bin/bash 2 + grep -q 'WARNING: possible circular locking dependency detected'
+4
tools/lib/lockdep/tests/ABBCCA.c
··· 13 13 LOCK_UNLOCK_2(a, b); 14 14 LOCK_UNLOCK_2(b, c); 15 15 LOCK_UNLOCK_2(c, a); 16 + 17 + pthread_mutex_destroy(&c); 18 + pthread_mutex_destroy(&b); 19 + pthread_mutex_destroy(&a); 16 20 }
+2
tools/lib/lockdep/tests/ABBCCA.sh
··· 1 + #!/bin/bash 2 + grep -q 'WARNING: possible circular locking dependency detected'
+5
tools/lib/lockdep/tests/ABBCCDDA.c
··· 15 15 LOCK_UNLOCK_2(b, c); 16 16 LOCK_UNLOCK_2(c, d); 17 17 LOCK_UNLOCK_2(d, a); 18 + 19 + pthread_mutex_destroy(&d); 20 + pthread_mutex_destroy(&c); 21 + pthread_mutex_destroy(&b); 22 + pthread_mutex_destroy(&a); 18 23 }
+2
tools/lib/lockdep/tests/ABBCCDDA.sh
··· 1 + #!/bin/bash 2 + grep -q 'WARNING: possible circular locking dependency detected'
+4
tools/lib/lockdep/tests/ABCABC.c
··· 13 13 LOCK_UNLOCK_2(a, b); 14 14 LOCK_UNLOCK_2(c, a); 15 15 LOCK_UNLOCK_2(b, c); 16 + 17 + pthread_mutex_destroy(&c); 18 + pthread_mutex_destroy(&b); 19 + pthread_mutex_destroy(&a); 16 20 }
+2
tools/lib/lockdep/tests/ABCABC.sh
··· 1 + #!/bin/bash 2 + grep -q 'WARNING: possible circular locking dependency detected'
+5
tools/lib/lockdep/tests/ABCDBCDA.c
··· 15 15 LOCK_UNLOCK_2(c, d); 16 16 LOCK_UNLOCK_2(b, c); 17 17 LOCK_UNLOCK_2(d, a); 18 + 19 + pthread_mutex_destroy(&d); 20 + pthread_mutex_destroy(&c); 21 + pthread_mutex_destroy(&b); 22 + pthread_mutex_destroy(&a); 18 23 }
+2
tools/lib/lockdep/tests/ABCDBCDA.sh
··· 1 + #!/bin/bash 2 + grep -q 'WARNING: possible circular locking dependency detected'
+5
tools/lib/lockdep/tests/ABCDBDDA.c
··· 15 15 LOCK_UNLOCK_2(c, d); 16 16 LOCK_UNLOCK_2(b, d); 17 17 LOCK_UNLOCK_2(d, a); 18 + 19 + pthread_mutex_destroy(&d); 20 + pthread_mutex_destroy(&c); 21 + pthread_mutex_destroy(&b); 22 + pthread_mutex_destroy(&a); 18 23 }
+2
tools/lib/lockdep/tests/ABCDBDDA.sh
··· 1 + #!/bin/bash 2 + grep -q 'WARNING: possible circular locking dependency detected'
+2
tools/lib/lockdep/tests/WW.sh
··· 1 + #!/bin/bash 2 + grep -q 'WARNING: possible recursive locking detected'
+2
tools/lib/lockdep/tests/unlock_balance.c
··· 10 10 pthread_mutex_lock(&a); 11 11 pthread_mutex_unlock(&a); 12 12 pthread_mutex_unlock(&a); 13 + 14 + pthread_mutex_destroy(&a); 13 15 }
+2
tools/lib/lockdep/tests/unlock_balance.sh
··· 1 + #!/bin/bash 2 + grep -q 'WARNING: bad unlock balance detected'