Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Ingo Molnar:
"Two lockdep fixes"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
lockdep: Fix lock_chain::base size
locking/lockdep: Fix ->irq_context calculation

+41 -6
+5 -3
include/linux/lockdep.h
··· 196 196 * We record lock dependency chains, so that we can cache them: 197 197 */ 198 198 struct lock_chain { 199 - u8 irq_context; 200 - u8 depth; 201 - u16 base; 199 + /* see BUILD_BUG_ON()s in lookup_chain_cache() */ 200 + unsigned int irq_context : 2, 201 + depth : 6, 202 + base : 24; 203 + /* 4 byte hole */ 202 204 struct hlist_node entry; 203 205 u64 chain_key; 204 206 };
+34 -3
kernel/locking/lockdep.c
··· 2176 2176 chain->irq_context = hlock->irq_context; 2177 2177 i = get_first_held_lock(curr, hlock); 2178 2178 chain->depth = curr->lockdep_depth + 1 - i; 2179 + 2180 + BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks)); 2181 + BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); 2182 + BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes)); 2183 + 2179 2184 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { 2180 2185 chain->base = nr_chain_hlocks; 2181 - nr_chain_hlocks += chain->depth; 2182 2186 for (j = 0; j < chain->depth - 1; j++, i++) { 2183 2187 int lock_id = curr->held_locks[i].class_idx - 1; 2184 2188 chain_hlocks[chain->base + j] = lock_id; 2185 2189 } 2186 2190 chain_hlocks[chain->base + j] = class - lock_classes; 2187 2191 } 2192 + 2193 + if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS) 2194 + nr_chain_hlocks += chain->depth; 2195 + 2196 + #ifdef CONFIG_DEBUG_LOCKDEP 2197 + /* 2198 + * Important for check_no_collision(). 2199 + */ 2200 + if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) { 2201 + if (debug_locks_off_graph_unlock()) 2202 + return 0; 2203 + 2204 + print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!"); 2205 + dump_stack(); 2206 + return 0; 2207 + } 2208 + #endif 2209 + 2188 2210 hlist_add_head_rcu(&chain->entry, hash_head); 2189 2211 debug_atomic_inc(chain_lookup_misses); 2190 2212 inc_chains(); ··· 2954 2932 return 1; 2955 2933 } 2956 2934 2935 + static inline unsigned int task_irq_context(struct task_struct *task) 2936 + { 2937 + return 2 * !!task->hardirq_context + !!task->softirq_context; 2938 + } 2939 + 2957 2940 static int separate_irq_context(struct task_struct *curr, 2958 2941 struct held_lock *hlock) 2959 2942 { ··· 2967 2940 /* 2968 2941 * Keep track of points where we cross into an interrupt context: 2969 2942 */ 2970 - hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + 2971 - curr->softirq_context; 2972 2943 if (depth) { 2973 2944 struct held_lock *prev_hlock; 2974 2945 ··· 2996 2971 struct held_lock *hlock) 2997 2972 { 2998 2973 return 1; 2974 + } 2975 + 2976 + static inline unsigned int task_irq_context(struct task_struct *task) 2977 + { 2978 + return 0; 2999 2979 } 3000 2980 3001 2981 static inline int separate_irq_context(struct task_struct *curr, ··· 3271 3241 hlock->acquire_ip = ip; 3272 3242 hlock->instance = lock; 3273 3243 hlock->nest_lock = nest_lock; 3244 + hlock->irq_context = task_irq_context(curr); 3274 3245 hlock->trylock = trylock; 3275 3246 hlock->read = read; 3276 3247 hlock->check = check;
+2
kernel/locking/lockdep_proc.c
··· 141 141 int i; 142 142 143 143 if (v == SEQ_START_TOKEN) { 144 + if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS) 145 + seq_printf(m, "(buggered) "); 144 146 seq_printf(m, "all lock chains:\n"); 145 147 return 0; 146 148 }