···124unsigned long nr_lock_classes;125static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];126000000000127#ifdef CONFIG_LOCK_STAT128static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);129···231232 holdtime = sched_clock() - hlock->holdtime_stamp;233234- stats = get_lock_stats(hlock->class);235 if (hlock->read)236 lock_time_inc(&stats->read_holdtime, holdtime);237 else···381unsigned int max_lockdep_depth;382unsigned int max_recursion_depth;3830000000000000384#ifdef CONFIG_DEBUG_LOCKDEP385/*386 * We cannot printk in early bootup code. Not even early_printk()···527528static void print_lock(struct held_lock *hlock)529{530- print_lock_name(hlock->class);531 printk(", at: ");532 print_ip_sym(hlock->acquire_ip);533}···579static void print_lock_dependencies(struct lock_class *class, int depth)580{581 struct lock_list *entry;000582583 if (DEBUG_LOCKS_WARN_ON(depth >= 20))584 return;···957 if (debug_locks_silent)958 return 0;959960- this.class = check_source->class;961 if (!save_trace(&this.trace))962 return 0;963···984 return 0;985}9860000000000000000000000000000000000000000000000000000000000000987/*988 * Prove that the dependency graph starting at <entry> can not989 * lead to <target>. Print an error and return 0 if it does.···1053check_noncircular(struct lock_class *source, unsigned int depth)1054{1055 struct lock_list *entry;00010561057 debug_atomic_inc(&nr_cyclic_check_recursions);1058 if (depth > max_recursion_depth)···1066 * Check this lock's dependency list:1067 */1068 list_for_each_entry(entry, &source->locks_after, entry) {1069- if (entry->class == check_target->class)1070 return print_circular_bug_header(entry, depth+1);1071 debug_atomic_inc(&nr_cyclic_checks);1072 if (!check_noncircular(entry->class, depth+1))···1099{1100 struct lock_list *entry;1101 int ret;00011021103 if (depth > max_recursion_depth)1104 max_recursion_depth = depth;···1142 struct lock_list *entry;1143 int ret;11440001145 if (!__raw_spin_is_locked(&lockdep_lock))1146 return DEBUG_LOCKS_WARN_ON(1);1147···1157 if (source->usage_mask & (1 << find_usage_bit)) {1158 backwards_match = source;1159 return 2;000001160 }11611162 /*···1203 printk("\nand this task is already holding:\n");1204 print_lock(prev);1205 printk("which would create a new lock dependency:\n");1206- print_lock_name(prev->class);1207 printk(" ->");1208- print_lock_name(next->class);1209 printk("\n");12101211 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",···12461247 find_usage_bit = bit_backwards;1248 /* fills in <backwards_match> */1249- ret = find_usage_backwards(prev->class, 0);1250 if (!ret || ret == 1)1251 return ret;12521253 find_usage_bit = bit_forwards;1254- ret = find_usage_forwards(next->class, 0);1255 if (!ret || ret == 1)1256 return ret;1257 /* ret == 2 */···1372 struct lockdep_map *next_instance, int read)1373{1374 struct held_lock *prev;01375 int i;13761377 for (i = 0; i < curr->lockdep_depth; i++) {1378 prev = curr->held_locks + i;1379- if (prev->class != next->class)00001380 continue;01381 /*1382 * Allow read-after-read recursion of the same1383 * lock class (i.e. read_lock(lock)+read_lock(lock)):1384 */1385 if ((read == 2) && prev->read)1386 return 2;000000001387 return print_deadlock_bug(curr, prev, next);1388 }1389 return 1;···1443 */1444 check_source = next;1445 check_target = prev;1446- if (!(check_noncircular(next->class, 0)))1447 return print_circular_bug_tail();14481449 if (!check_prev_add_irq(curr, prev, next))···1467 * chains - the second one will be new, but L1 already has1468 * L2 added to its dependency list, due to the first chain.)1469 */1470- list_for_each_entry(entry, &prev->class->locks_after, entry) {1471- if (entry->class == next->class) {1472 if (distance == 1)1473 entry->distance = 1;1474 return 2;···1479 * Ok, all validations passed, add the new lock1480 * to the previous lock's dependency list:1481 */1482- ret = add_lock_to_list(prev->class, next->class,1483- &prev->class->locks_after, next->acquire_ip, distance);014841485 if (!ret)1486 return 0;14871488- ret = add_lock_to_list(next->class, prev->class,1489- &next->class->locks_before, next->acquire_ip, distance);01490 if (!ret)1491 return 0;14921493 /*1494 * Debugging printouts:1495 */1496- if (verbose(prev->class) || verbose(next->class)) {1497 graph_unlock();1498 printk("\n new dependency: ");1499- print_lock_name(prev->class);1500 printk(" => ");1501- print_lock_name(next->class);1502 printk("\n");1503 dump_stack();1504 return graph_lock();···1597 struct held_lock *hlock,1598 u64 chain_key)1599{1600- struct lock_class *class = hlock->class;1601 struct list_head *hash_head = chainhashentry(chain_key);1602 struct lock_chain *chain;1603 struct held_lock *hlock_curr, *hlock_next;···1670 if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {1671 chain->base = cn;1672 for (j = 0; j < chain->depth - 1; j++, i++) {1673- int lock_id = curr->held_locks[i].class - lock_classes;1674 chain_hlocks[chain->base + j] = lock_id;1675 }1676 chain_hlocks[chain->base + j] = class - lock_classes;···1766 WARN_ON(1);1767 return;1768 }1769- id = hlock->class - lock_classes;1770 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))1771 return;1772···1811 print_lock(this);18121813 printk("{%s} state was registered at:\n", usage_str[prev_bit]);1814- print_stack_trace(this->class->usage_traces + prev_bit, 1);18151816 print_irqtrace_events(curr);1817 printk("\nother info that might help us debug this:\n");···1830valid_state(struct task_struct *curr, struct held_lock *this,1831 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)1832{1833- if (unlikely(this->class->usage_mask & (1 << bad_bit)))1834 return print_usage_bug(curr, this, bad_bit, new_bit);1835 return 1;1836}···1869 lockdep_print_held_locks(curr);18701871 printk("\nthe first lock's dependencies:\n");1872- print_lock_dependencies(this->class, 0);18731874 printk("\nthe second lock's dependencies:\n");1875 print_lock_dependencies(other, 0);···18921893 find_usage_bit = bit;1894 /* fills in <forwards_match> */1895- ret = find_usage_forwards(this->class, 0);1896 if (!ret || ret == 1)1897 return ret;1898···19111912 find_usage_bit = bit;1913 /* fills in <backwards_match> */1914- ret = find_usage_backwards(this->class, 0);1915 if (!ret || ret == 1)1916 return ret;1917···1977 LOCK_ENABLED_HARDIRQS_READ, "hard-read"))1978 return 0;1979#endif1980- if (hardirq_verbose(this->class))1981 ret = 2;1982 break;1983 case LOCK_USED_IN_SOFTIRQ:···2002 LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))2003 return 0;2004#endif2005- if (softirq_verbose(this->class))2006 ret = 2;2007 break;2008 case LOCK_USED_IN_HARDIRQ_READ:···2015 if (!check_usage_forwards(curr, this,2016 LOCK_ENABLED_HARDIRQS, "hard"))2017 return 0;2018- if (hardirq_verbose(this->class))2019 ret = 2;2020 break;2021 case LOCK_USED_IN_SOFTIRQ_READ:···2028 if (!check_usage_forwards(curr, this,2029 LOCK_ENABLED_SOFTIRQS, "soft"))2030 return 0;2031- if (softirq_verbose(this->class))2032 ret = 2;2033 break;2034 case LOCK_ENABLED_HARDIRQS:···2054 LOCK_USED_IN_HARDIRQ_READ, "hard-read"))2055 return 0;2056#endif2057- if (hardirq_verbose(this->class))2058 ret = 2;2059 break;2060 case LOCK_ENABLED_SOFTIRQS:···2080 LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))2081 return 0;2082#endif2083- if (softirq_verbose(this->class))2084 ret = 2;2085 break;2086 case LOCK_ENABLED_HARDIRQS_READ:···2095 LOCK_USED_IN_HARDIRQ, "hard"))2096 return 0;2097#endif2098- if (hardirq_verbose(this->class))2099 ret = 2;2100 break;2101 case LOCK_ENABLED_SOFTIRQS_READ:···2110 LOCK_USED_IN_SOFTIRQ, "soft"))2111 return 0;2112#endif2113- if (softirq_verbose(this->class))2114 ret = 2;2115 break;2116 default:···2426 * If already set then do not dirty the cacheline,2427 * nor do any checks:2428 */2429- if (likely(this->class->usage_mask & new_mask))2430 return 1;24312432 if (!graph_lock())···2434 /*2435 * Make sure we didnt race:2436 */2437- if (unlikely(this->class->usage_mask & new_mask)) {2438 graph_unlock();2439 return 1;2440 }24412442- this->class->usage_mask |= new_mask;24432444- if (!save_trace(this->class->usage_traces + new_bit))2445 return 0;24462447 switch (new_bit) {···2521 */2522static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,2523 int trylock, int read, int check, int hardirqs_off,2524- unsigned long ip)2525{2526 struct task_struct *curr = current;2527 struct lock_class *class = NULL;···2575 return 0;25762577 hlock = curr->held_locks + depth;2578-2579- hlock->class = class;02580 hlock->acquire_ip = ip;2581 hlock->instance = lock;02582 hlock->trylock = trylock;2583 hlock->read = read;2584 hlock->check = check;···2692 return 1;2693}269400000000000000000000000000000000000000000000000002695/*2696 * Remove the lock to the list of currently held locks in a2697 * potentially non-nested (out of order) manner. This is a···2791 for (i++; i < depth; i++) {2792 hlock = curr->held_locks + i;2793 if (!__lock_acquire(hlock->instance,2794- hlock->class->subclass, hlock->trylock,2795 hlock->read, hlock->check, hlock->hardirqs_off,2796- hlock->acquire_ip))2797 return 0;2798 }2799···28362837#ifdef CONFIG_DEBUG_LOCKDEP2838 hlock->prev_chain_key = 0;2839- hlock->class = NULL;2840 hlock->acquire_ip = 0;2841 hlock->irq_context = 0;2842#endif···2905#endif2906}2907000000000000000000002908/*2909 * We are not always called with irqs disabled - do that here,2910 * and also avoid lockdep recursion:2911 */2912void lock_acquire(struct lockdep_map *lock, unsigned int subclass,2913- int trylock, int read, int check, unsigned long ip)02914{2915 unsigned long flags;2916-2917- if (unlikely(!lock_stat && !prove_locking))2918- return;29192920 if (unlikely(current->lockdep_recursion))2921 return;···29432944 current->lockdep_recursion = 1;2945 __lock_acquire(lock, subclass, trylock, read, check,2946- irqs_disabled_flags(flags), ip);2947 current->lockdep_recursion = 0;2948 raw_local_irq_restore(flags);2949}···2954 unsigned long ip)2955{2956 unsigned long flags;2957-2958- if (unlikely(!lock_stat && !prove_locking))2959- return;29602961 if (unlikely(current->lockdep_recursion))2962 return;···3027found_it:3028 hlock->waittime_stamp = sched_clock();30293030- point = lock_contention_point(hlock->class, ip);30313032- stats = get_lock_stats(hlock->class);3033 if (point < ARRAY_SIZE(stats->contention_point))3034 stats->contention_point[i]++;3035 if (lock->cpu != smp_processor_id())···3075 hlock->holdtime_stamp = now;3076 }30773078- stats = get_lock_stats(hlock->class);3079 if (waittime) {3080 if (hlock->read)3081 lock_time_inc(&stats->read_waittime, waittime);···3170 list_del_rcu(&class->hash_entry);3171 list_del_rcu(&class->lock_entry);317203173}31743175static inline int within(const void *addr, void *start, unsigned long size)
···124unsigned long nr_lock_classes;125static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];126127+static inline struct lock_class *hlock_class(struct held_lock *hlock)128+{129+ if (!hlock->class_idx) {130+ DEBUG_LOCKS_WARN_ON(1);131+ return NULL;132+ }133+ return lock_classes + hlock->class_idx - 1;134+}135+136#ifdef CONFIG_LOCK_STAT137static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);138···222223 holdtime = sched_clock() - hlock->holdtime_stamp;224225+ stats = get_lock_stats(hlock_class(hlock));226 if (hlock->read)227 lock_time_inc(&stats->read_holdtime, holdtime);228 else···372unsigned int max_lockdep_depth;373unsigned int max_recursion_depth;374375+static unsigned int lockdep_dependency_gen_id;376+377+static bool lockdep_dependency_visit(struct lock_class *source,378+ unsigned int depth)379+{380+ if (!depth)381+ lockdep_dependency_gen_id++;382+ if (source->dep_gen_id == lockdep_dependency_gen_id)383+ return true;384+ source->dep_gen_id = lockdep_dependency_gen_id;385+ return false;386+}387+388#ifdef CONFIG_DEBUG_LOCKDEP389/*390 * We cannot printk in early bootup code. Not even early_printk()···505506static void print_lock(struct held_lock *hlock)507{508+ print_lock_name(hlock_class(hlock));509 printk(", at: ");510 print_ip_sym(hlock->acquire_ip);511}···557static void print_lock_dependencies(struct lock_class *class, int depth)558{559 struct lock_list *entry;560+561+ if (lockdep_dependency_visit(class, depth))562+ return;563564 if (DEBUG_LOCKS_WARN_ON(depth >= 20))565 return;···932 if (debug_locks_silent)933 return 0;934935+ this.class = hlock_class(check_source);936 if (!save_trace(&this.trace))937 return 0;938···959 return 0;960}961962+unsigned long __lockdep_count_forward_deps(struct lock_class *class,963+ unsigned int depth)964+{965+ struct lock_list *entry;966+ unsigned long ret = 1;967+968+ if (lockdep_dependency_visit(class, depth))969+ return 0;970+971+ /*972+ * Recurse this class's dependency list:973+ */974+ list_for_each_entry(entry, &class->locks_after, entry)975+ ret += __lockdep_count_forward_deps(entry->class, depth + 1);976+977+ return ret;978+}979+980+unsigned long lockdep_count_forward_deps(struct lock_class *class)981+{982+ unsigned long ret, flags;983+984+ local_irq_save(flags);985+ __raw_spin_lock(&lockdep_lock);986+ ret = __lockdep_count_forward_deps(class, 0);987+ __raw_spin_unlock(&lockdep_lock);988+ local_irq_restore(flags);989+990+ return ret;991+}992+993+unsigned long __lockdep_count_backward_deps(struct lock_class *class,994+ unsigned int depth)995+{996+ struct lock_list *entry;997+ unsigned long ret = 1;998+999+ if (lockdep_dependency_visit(class, depth))1000+ return 0;1001+ /*1002+ * Recurse this class's dependency list:1003+ */1004+ list_for_each_entry(entry, &class->locks_before, entry)1005+ ret += __lockdep_count_backward_deps(entry->class, depth + 1);1006+1007+ return ret;1008+}1009+1010+unsigned long lockdep_count_backward_deps(struct lock_class *class)1011+{1012+ unsigned long ret, flags;1013+1014+ local_irq_save(flags);1015+ __raw_spin_lock(&lockdep_lock);1016+ ret = __lockdep_count_backward_deps(class, 0);1017+ __raw_spin_unlock(&lockdep_lock);1018+ local_irq_restore(flags);1019+1020+ return ret;1021+}1022+1023/*1024 * Prove that the dependency graph starting at <entry> can not1025 * lead to <target>. Print an error and return 0 if it does.···967check_noncircular(struct lock_class *source, unsigned int depth)968{969 struct lock_list *entry;970+971+ if (lockdep_dependency_visit(source, depth))972+ return 1;973974 debug_atomic_inc(&nr_cyclic_check_recursions);975 if (depth > max_recursion_depth)···977 * Check this lock's dependency list:978 */979 list_for_each_entry(entry, &source->locks_after, entry) {980+ if (entry->class == hlock_class(check_target))981 return print_circular_bug_header(entry, depth+1);982 debug_atomic_inc(&nr_cyclic_checks);983 if (!check_noncircular(entry->class, depth+1))···1010{1011 struct lock_list *entry;1012 int ret;1013+1014+ if (lockdep_dependency_visit(source, depth))1015+ return 1;10161017 if (depth > max_recursion_depth)1018 max_recursion_depth = depth;···1050 struct lock_list *entry;1051 int ret;10521053+ if (lockdep_dependency_visit(source, depth))1054+ return 1;1055+1056 if (!__raw_spin_is_locked(&lockdep_lock))1057 return DEBUG_LOCKS_WARN_ON(1);1058···1062 if (source->usage_mask & (1 << find_usage_bit)) {1063 backwards_match = source;1064 return 2;1065+ }1066+1067+ if (!source && debug_locks_off_graph_unlock()) {1068+ WARN_ON(1);1069+ return 0;1070 }10711072 /*···1103 printk("\nand this task is already holding:\n");1104 print_lock(prev);1105 printk("which would create a new lock dependency:\n");1106+ print_lock_name(hlock_class(prev));1107 printk(" ->");1108+ print_lock_name(hlock_class(next));1109 printk("\n");11101111 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",···11461147 find_usage_bit = bit_backwards;1148 /* fills in <backwards_match> */1149+ ret = find_usage_backwards(hlock_class(prev), 0);1150 if (!ret || ret == 1)1151 return ret;11521153 find_usage_bit = bit_forwards;1154+ ret = find_usage_forwards(hlock_class(next), 0);1155 if (!ret || ret == 1)1156 return ret;1157 /* ret == 2 */···1272 struct lockdep_map *next_instance, int read)1273{1274 struct held_lock *prev;1275+ struct held_lock *nest = NULL;1276 int i;12771278 for (i = 0; i < curr->lockdep_depth; i++) {1279 prev = curr->held_locks + i;1280+1281+ if (prev->instance == next->nest_lock)1282+ nest = prev;1283+1284+ if (hlock_class(prev) != hlock_class(next))1285 continue;1286+1287 /*1288 * Allow read-after-read recursion of the same1289 * lock class (i.e. read_lock(lock)+read_lock(lock)):1290 */1291 if ((read == 2) && prev->read)1292 return 2;1293+1294+ /*1295+ * We're holding the nest_lock, which serializes this lock's1296+ * nesting behaviour.1297+ */1298+ if (nest)1299+ return 2;1300+1301 return print_deadlock_bug(curr, prev, next);1302 }1303 return 1;···1329 */1330 check_source = next;1331 check_target = prev;1332+ if (!(check_noncircular(hlock_class(next), 0)))1333 return print_circular_bug_tail();13341335 if (!check_prev_add_irq(curr, prev, next))···1353 * chains - the second one will be new, but L1 already has1354 * L2 added to its dependency list, due to the first chain.)1355 */1356+ list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {1357+ if (entry->class == hlock_class(next)) {1358 if (distance == 1)1359 entry->distance = 1;1360 return 2;···1365 * Ok, all validations passed, add the new lock1366 * to the previous lock's dependency list:1367 */1368+ ret = add_lock_to_list(hlock_class(prev), hlock_class(next),1369+ &hlock_class(prev)->locks_after,1370+ next->acquire_ip, distance);13711372 if (!ret)1373 return 0;13741375+ ret = add_lock_to_list(hlock_class(next), hlock_class(prev),1376+ &hlock_class(next)->locks_before,1377+ next->acquire_ip, distance);1378 if (!ret)1379 return 0;13801381 /*1382 * Debugging printouts:1383 */1384+ if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {1385 graph_unlock();1386 printk("\n new dependency: ");1387+ print_lock_name(hlock_class(prev));1388 printk(" => ");1389+ print_lock_name(hlock_class(next));1390 printk("\n");1391 dump_stack();1392 return graph_lock();···1481 struct held_lock *hlock,1482 u64 chain_key)1483{1484+ struct lock_class *class = hlock_class(hlock);1485 struct list_head *hash_head = chainhashentry(chain_key);1486 struct lock_chain *chain;1487 struct held_lock *hlock_curr, *hlock_next;···1554 if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {1555 chain->base = cn;1556 for (j = 0; j < chain->depth - 1; j++, i++) {1557+ int lock_id = curr->held_locks[i].class_idx - 1;1558 chain_hlocks[chain->base + j] = lock_id;1559 }1560 chain_hlocks[chain->base + j] = class - lock_classes;···1650 WARN_ON(1);1651 return;1652 }1653+ id = hlock->class_idx - 1;1654 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))1655 return;1656···1695 print_lock(this);16961697 printk("{%s} state was registered at:\n", usage_str[prev_bit]);1698+ print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);16991700 print_irqtrace_events(curr);1701 printk("\nother info that might help us debug this:\n");···1714valid_state(struct task_struct *curr, struct held_lock *this,1715 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)1716{1717+ if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))1718 return print_usage_bug(curr, this, bad_bit, new_bit);1719 return 1;1720}···1753 lockdep_print_held_locks(curr);17541755 printk("\nthe first lock's dependencies:\n");1756+ print_lock_dependencies(hlock_class(this), 0);17571758 printk("\nthe second lock's dependencies:\n");1759 print_lock_dependencies(other, 0);···17761777 find_usage_bit = bit;1778 /* fills in <forwards_match> */1779+ ret = find_usage_forwards(hlock_class(this), 0);1780 if (!ret || ret == 1)1781 return ret;1782···17951796 find_usage_bit = bit;1797 /* fills in <backwards_match> */1798+ ret = find_usage_backwards(hlock_class(this), 0);1799 if (!ret || ret == 1)1800 return ret;1801···1861 LOCK_ENABLED_HARDIRQS_READ, "hard-read"))1862 return 0;1863#endif1864+ if (hardirq_verbose(hlock_class(this)))1865 ret = 2;1866 break;1867 case LOCK_USED_IN_SOFTIRQ:···1886 LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))1887 return 0;1888#endif1889+ if (softirq_verbose(hlock_class(this)))1890 ret = 2;1891 break;1892 case LOCK_USED_IN_HARDIRQ_READ:···1899 if (!check_usage_forwards(curr, this,1900 LOCK_ENABLED_HARDIRQS, "hard"))1901 return 0;1902+ if (hardirq_verbose(hlock_class(this)))1903 ret = 2;1904 break;1905 case LOCK_USED_IN_SOFTIRQ_READ:···1912 if (!check_usage_forwards(curr, this,1913 LOCK_ENABLED_SOFTIRQS, "soft"))1914 return 0;1915+ if (softirq_verbose(hlock_class(this)))1916 ret = 2;1917 break;1918 case LOCK_ENABLED_HARDIRQS:···1938 LOCK_USED_IN_HARDIRQ_READ, "hard-read"))1939 return 0;1940#endif1941+ if (hardirq_verbose(hlock_class(this)))1942 ret = 2;1943 break;1944 case LOCK_ENABLED_SOFTIRQS:···1964 LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))1965 return 0;1966#endif1967+ if (softirq_verbose(hlock_class(this)))1968 ret = 2;1969 break;1970 case LOCK_ENABLED_HARDIRQS_READ:···1979 LOCK_USED_IN_HARDIRQ, "hard"))1980 return 0;1981#endif1982+ if (hardirq_verbose(hlock_class(this)))1983 ret = 2;1984 break;1985 case LOCK_ENABLED_SOFTIRQS_READ:···1994 LOCK_USED_IN_SOFTIRQ, "soft"))1995 return 0;1996#endif1997+ if (softirq_verbose(hlock_class(this)))1998 ret = 2;1999 break;2000 default:···2310 * If already set then do not dirty the cacheline,2311 * nor do any checks:2312 */2313+ if (likely(hlock_class(this)->usage_mask & new_mask))2314 return 1;23152316 if (!graph_lock())···2318 /*2319 * Make sure we didnt race:2320 */2321+ if (unlikely(hlock_class(this)->usage_mask & new_mask)) {2322 graph_unlock();2323 return 1;2324 }23252326+ hlock_class(this)->usage_mask |= new_mask;23272328+ if (!save_trace(hlock_class(this)->usage_traces + new_bit))2329 return 0;23302331 switch (new_bit) {···2405 */2406static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,2407 int trylock, int read, int check, int hardirqs_off,2408+ struct lockdep_map *nest_lock, unsigned long ip)2409{2410 struct task_struct *curr = current;2411 struct lock_class *class = NULL;···2459 return 0;24602461 hlock = curr->held_locks + depth;2462+ if (DEBUG_LOCKS_WARN_ON(!class))2463+ return 0;2464+ hlock->class_idx = class - lock_classes + 1;2465 hlock->acquire_ip = ip;2466 hlock->instance = lock;2467+ hlock->nest_lock = nest_lock;2468 hlock->trylock = trylock;2469 hlock->read = read;2470 hlock->check = check;···2574 return 1;2575}25762577+static int2578+__lock_set_subclass(struct lockdep_map *lock,2579+ unsigned int subclass, unsigned long ip)2580+{2581+ struct task_struct *curr = current;2582+ struct held_lock *hlock, *prev_hlock;2583+ struct lock_class *class;2584+ unsigned int depth;2585+ int i;2586+2587+ depth = curr->lockdep_depth;2588+ if (DEBUG_LOCKS_WARN_ON(!depth))2589+ return 0;2590+2591+ prev_hlock = NULL;2592+ for (i = depth-1; i >= 0; i--) {2593+ hlock = curr->held_locks + i;2594+ /*2595+ * We must not cross into another context:2596+ */2597+ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)2598+ break;2599+ if (hlock->instance == lock)2600+ goto found_it;2601+ prev_hlock = hlock;2602+ }2603+ return print_unlock_inbalance_bug(curr, lock, ip);2604+2605+found_it:2606+ class = register_lock_class(lock, subclass, 0);2607+ hlock->class_idx = class - lock_classes + 1;2608+2609+ curr->lockdep_depth = i;2610+ curr->curr_chain_key = hlock->prev_chain_key;2611+2612+ for (; i < depth; i++) {2613+ hlock = curr->held_locks + i;2614+ if (!__lock_acquire(hlock->instance,2615+ hlock_class(hlock)->subclass, hlock->trylock,2616+ hlock->read, hlock->check, hlock->hardirqs_off,2617+ hlock->nest_lock, hlock->acquire_ip))2618+ return 0;2619+ }2620+2621+ if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))2622+ return 0;2623+ return 1;2624+}2625+2626/*2627 * Remove the lock to the list of currently held locks in a2628 * potentially non-nested (out of order) manner. This is a···2624 for (i++; i < depth; i++) {2625 hlock = curr->held_locks + i;2626 if (!__lock_acquire(hlock->instance,2627+ hlock_class(hlock)->subclass, hlock->trylock,2628 hlock->read, hlock->check, hlock->hardirqs_off,2629+ hlock->nest_lock, hlock->acquire_ip))2630 return 0;2631 }2632···26692670#ifdef CONFIG_DEBUG_LOCKDEP2671 hlock->prev_chain_key = 0;2672+ hlock->class_idx = 0;2673 hlock->acquire_ip = 0;2674 hlock->irq_context = 0;2675#endif···2738#endif2739}27402741+void2742+lock_set_subclass(struct lockdep_map *lock,2743+ unsigned int subclass, unsigned long ip)2744+{2745+ unsigned long flags;2746+2747+ if (unlikely(current->lockdep_recursion))2748+ return;2749+2750+ raw_local_irq_save(flags);2751+ current->lockdep_recursion = 1;2752+ check_flags(flags);2753+ if (__lock_set_subclass(lock, subclass, ip))2754+ check_chain_key(current);2755+ current->lockdep_recursion = 0;2756+ raw_local_irq_restore(flags);2757+}2758+2759+EXPORT_SYMBOL_GPL(lock_set_subclass);2760+2761/*2762 * We are not always called with irqs disabled - do that here,2763 * and also avoid lockdep recursion:2764 */2765void lock_acquire(struct lockdep_map *lock, unsigned int subclass,2766+ int trylock, int read, int check,2767+ struct lockdep_map *nest_lock, unsigned long ip)2768{2769 unsigned long flags;00027702771 if (unlikely(current->lockdep_recursion))2772 return;···27582759 current->lockdep_recursion = 1;2760 __lock_acquire(lock, subclass, trylock, read, check,2761+ irqs_disabled_flags(flags), nest_lock, ip);2762 current->lockdep_recursion = 0;2763 raw_local_irq_restore(flags);2764}···2769 unsigned long ip)2770{2771 unsigned long flags;00027722773 if (unlikely(current->lockdep_recursion))2774 return;···2845found_it:2846 hlock->waittime_stamp = sched_clock();28472848+ point = lock_contention_point(hlock_class(hlock), ip);28492850+ stats = get_lock_stats(hlock_class(hlock));2851 if (point < ARRAY_SIZE(stats->contention_point))2852 stats->contention_point[i]++;2853 if (lock->cpu != smp_processor_id())···2893 hlock->holdtime_stamp = now;2894 }28952896+ stats = get_lock_stats(hlock_class(hlock));2897 if (waittime) {2898 if (hlock->read)2899 lock_time_inc(&stats->read_waittime, waittime);···2988 list_del_rcu(&class->hash_entry);2989 list_del_rcu(&class->lock_entry);29902991+ class->key = NULL;2992}29932994static inline int within(const void *addr, void *start, unsigned long size)
+3-3
kernel/lockdep_internals.h
···17 */18#define MAX_LOCKDEP_ENTRIES 8192UL1920-#define MAX_LOCKDEP_KEYS_BITS 1121-#define MAX_LOCKDEP_KEYS (1UL << MAX_LOCKDEP_KEYS_BITS)22-23#define MAX_LOCKDEP_CHAINS_BITS 1424#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)25···49extern unsigned int nr_process_chains;50extern unsigned int max_lockdep_depth;51extern unsigned int max_recursion_depth;0005253#ifdef CONFIG_DEBUG_LOCKDEP54/*
···17 */18#define MAX_LOCKDEP_ENTRIES 8192UL1900020#define MAX_LOCKDEP_CHAINS_BITS 1421#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)22···52extern unsigned int nr_process_chains;53extern unsigned int max_lockdep_depth;54extern unsigned int max_recursion_depth;55+56+extern unsigned long lockdep_count_forward_deps(struct lock_class *);57+extern unsigned long lockdep_count_backward_deps(struct lock_class *);5859#ifdef CONFIG_DEBUG_LOCKDEP60/*
+6-31
kernel/lockdep_proc.c
···63{64}6566-static unsigned long count_forward_deps(struct lock_class *class)67-{68- struct lock_list *entry;69- unsigned long ret = 1;70-71- /*72- * Recurse this class's dependency list:73- */74- list_for_each_entry(entry, &class->locks_after, entry)75- ret += count_forward_deps(entry->class);76-77- return ret;78-}79-80-static unsigned long count_backward_deps(struct lock_class *class)81-{82- struct lock_list *entry;83- unsigned long ret = 1;84-85- /*86- * Recurse this class's dependency list:87- */88- list_for_each_entry(entry, &class->locks_before, entry)89- ret += count_backward_deps(entry->class);90-91- return ret;92-}93-94static void print_name(struct seq_file *m, struct lock_class *class)95{96 char str[128];···96#ifdef CONFIG_DEBUG_LOCKDEP97 seq_printf(m, " OPS:%8ld", class->ops);98#endif99- nr_forward_deps = count_forward_deps(class);100 seq_printf(m, " FD:%5ld", nr_forward_deps);101102- nr_backward_deps = count_backward_deps(class);103 seq_printf(m, " BD:%5ld", nr_backward_deps);104105 get_usage_chars(class, &c1, &c2, &c3, &c4);···201202 for (i = 0; i < chain->depth; i++) {203 class = lock_chain_get_class(chain, i);000204 seq_printf(m, "[%p] ", class->key);205 print_name(m, class);206 seq_puts(m, "\n");···325 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)326 nr_hardirq_read_unsafe++;327328- sum_forward_deps += count_forward_deps(class);329 }330#ifdef CONFIG_DEBUG_LOCKDEP331 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
···861#define RT_MAX_TRIES 3862863static int double_lock_balance(struct rq *this_rq, struct rq *busiest);00864static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);865866static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)···1024 break;10251026 /* try again */1027- spin_unlock(&lowest_rq->lock);1028 lowest_rq = NULL;1029 }1030···10931094 resched_task(lowest_rq->curr);10951096- spin_unlock(&lowest_rq->lock);10971098 ret = 1;1099out:···11991200 }1201 skip:1202- spin_unlock(&src_rq->lock);1203 }12041205 return ret;
···861#define RT_MAX_TRIES 3862863static int double_lock_balance(struct rq *this_rq, struct rq *busiest);864+static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);865+866static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);867868static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)···1022 break;10231024 /* try again */1025+ double_unlock_balance(rq, lowest_rq);1026 lowest_rq = NULL;1027 }1028···10911092 resched_task(lowest_rq->curr);10931094+ double_unlock_balance(rq, lowest_rq);10951096 ret = 1;1097out:···11971198 }1199 skip:1200+ double_unlock_balance(this_rq, src_rq);1201 }12021203 return ret;
+11
kernel/spinlock.c
···292}293294EXPORT_SYMBOL(_spin_lock_nested);0295unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)296{297 unsigned long flags;···314}315316EXPORT_SYMBOL(_spin_lock_irqsave_nested);0000000000317318#endif319
···292}293294EXPORT_SYMBOL(_spin_lock_nested);295+296unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)297{298 unsigned long flags;···313}314315EXPORT_SYMBOL(_spin_lock_irqsave_nested);316+317+void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,318+ struct lockdep_map *nest_lock)319+{320+ preempt_disable();321+ spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);322+ LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);323+}324+325+EXPORT_SYMBOL(_spin_lock_nest_lock);326327#endif328
···8 *9 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>10 */011#include <linux/rwsem.h>12#include <linux/mutex.h>13#include <linux/module.h>···38{39 if (xchg(&debug_locks, 0)) {40 if (!debug_locks_silent) {041 console_verbose();42 return 1;43 }
···8 *9 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>10 */11+#include <linux/kernel.h>12#include <linux/rwsem.h>13#include <linux/mutex.h>14#include <linux/module.h>···37{38 if (xchg(&debug_locks, 0)) {39 if (!debug_locks_silent) {40+ oops_in_progress = 1;41 console_verbose();42 return 1;43 }
+13-7
mm/mmap.c
···22732274static DEFINE_MUTEX(mm_all_locks_mutex);22752276-static void vm_lock_anon_vma(struct anon_vma *anon_vma)2277{2278 if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {2279 /*2280 * The LSB of head.next can't change from under us2281 * because we hold the mm_all_locks_mutex.2282 */2283- spin_lock(&anon_vma->lock);2284 /*2285 * We can safely modify head.next after taking the2286 * anon_vma->lock. If some other vma in this mm shares···2296 }2297}22982299-static void vm_lock_mapping(struct address_space *mapping)2300{2301 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {2302 /*···2310 */2311 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))2312 BUG();2313- spin_lock(&mapping->i_mmap_lock);2314 }2315}2316···2358 for (vma = mm->mmap; vma; vma = vma->vm_next) {2359 if (signal_pending(current))2360 goto out_unlock;2361- if (vma->anon_vma)2362- vm_lock_anon_vma(vma->anon_vma);2363 if (vma->vm_file && vma->vm_file->f_mapping)2364- vm_lock_mapping(vma->vm_file->f_mapping);2365 }000000002366 ret = 0;23672368out_unlock:
···22732274static DEFINE_MUTEX(mm_all_locks_mutex);22752276+static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)2277{2278 if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {2279 /*2280 * The LSB of head.next can't change from under us2281 * because we hold the mm_all_locks_mutex.2282 */2283+ spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem);2284 /*2285 * We can safely modify head.next after taking the2286 * anon_vma->lock. If some other vma in this mm shares···2296 }2297}22982299+static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)2300{2301 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {2302 /*···2310 */2311 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))2312 BUG();2313+ spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem);2314 }2315}2316···2358 for (vma = mm->mmap; vma; vma = vma->vm_next) {2359 if (signal_pending(current))2360 goto out_unlock;002361 if (vma->vm_file && vma->vm_file->f_mapping)2362+ vm_lock_mapping(mm, vma->vm_file->f_mapping);2363 }2364+2365+ for (vma = mm->mmap; vma; vma = vma->vm_next) {2366+ if (signal_pending(current))2367+ goto out_unlock;2368+ if (vma->anon_vma)2369+ vm_lock_anon_vma(mm, vma->anon_vma);2370+ }2371+2372 ret = 0;23732374out_unlock: