Merge tag 'core-debugobjects-2023-05-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull debugobjects fixes from Thomas Gleixner:
"Two fixes for debugobjects:

- Prevent the allocation path from waking up kswapd.

That's a long standing issue due to the GFP_ATOMIC allocation flag.
As debug objects can be invoked from pretty much any context waking
kswapd can end up in arbitrary lock chains versus the waitqueue
lock

- Correct the explicit lockdep wait-type violation in
debug_object_fill_pool()"

* tag 'core-debugobjects-2023-05-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
debugobjects: Don't wake up kswapd from fill_pool()
debugobjects,locking: Annotate debug_object_fill_pool() wait type violation

Changed files
+50 -10
include
kernel
locking
lib
+14
include/linux/lockdep.h
··· 344 344 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) 345 345 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) 346 346 347 + /* 348 + * Must use lock_map_aquire_try() with override maps to avoid 349 + * lockdep thinking they participate in the block chain. 350 + */ 351 + #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ 352 + struct lockdep_map _name = { \ 353 + .name = #_name "-wait-type-override", \ 354 + .wait_type_inner = _wait_type, \ 355 + .lock_type = LD_LOCK_WAIT_OVERRIDE, } 356 + 347 357 #else /* !CONFIG_LOCKDEP */ 348 358 349 359 static inline void lockdep_init_task(struct task_struct *task) ··· 441 431 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) 442 432 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) 443 433 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) 434 + 435 + #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ 436 + struct lockdep_map __maybe_unused _name = {} 444 437 445 438 #endif /* !LOCKDEP */ 446 439 ··· 569 556 #define rwsem_release(l, i) lock_release(l, i) 570 557 571 558 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 559 + #define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_) 572 560 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 573 561 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) 574 562 #define lock_map_release(l) lock_release(l, _THIS_IP_)
+1
include/linux/lockdep_types.h
··· 33 33 enum lockdep_lock_type { 34 34 LD_LOCK_NORMAL = 0, /* normal, catch all */ 35 35 LD_LOCK_PERCPU, /* percpu */ 36 + LD_LOCK_WAIT_OVERRIDE, /* annotation */ 36 37 LD_LOCK_MAX, 37 38 }; 38 39
+21 -7
kernel/locking/lockdep.c
··· 2263 2263 2264 2264 static inline bool usage_skip(struct lock_list *entry, void *mask) 2265 2265 { 2266 + if (entry->class->lock_type == LD_LOCK_NORMAL) 2267 + return false; 2268 + 2266 2269 /* 2267 2270 * Skip local_lock() for irq inversion detection. 2268 2271 * ··· 2292 2289 * As a result, we will skip local_lock(), when we search for irq 2293 2290 * inversion bugs. 2294 2291 */ 2295 - if (entry->class->lock_type == LD_LOCK_PERCPU) { 2296 - if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) 2297 - return false; 2292 + if (entry->class->lock_type == LD_LOCK_PERCPU && 2293 + DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) 2294 + return false; 2298 2295 2299 - return true; 2300 - } 2296 + /* 2297 + * Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually 2298 + * a lock and only used to override the wait_type. 2299 + */ 2301 2300 2302 - return false; 2301 + return true; 2303 2302 } 2304 2303 2305 2304 /* ··· 4773 4768 4774 4769 for (; depth < curr->lockdep_depth; depth++) { 4775 4770 struct held_lock *prev = curr->held_locks + depth; 4776 - u8 prev_inner = hlock_class(prev)->wait_type_inner; 4771 + struct lock_class *class = hlock_class(prev); 4772 + u8 prev_inner = class->wait_type_inner; 4777 4773 4778 4774 if (prev_inner) { 4779 4775 /* ··· 4784 4778 * Also due to trylocks. 4785 4779 */ 4786 4780 curr_inner = min(curr_inner, prev_inner); 4781 + 4782 + /* 4783 + * Allow override for annotations -- this is typically 4784 + * only valid/needed for code that only exists when 4785 + * CONFIG_PREEMPT_RT=n. 4786 + */ 4787 + if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE)) 4788 + curr_inner = prev_inner; 4787 4789 } 4788 4790 } 4789 4791
+14 -3
lib/debugobjects.c
··· 126 126 127 127 static void fill_pool(void) 128 128 { 129 - gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 129 + gfp_t gfp = __GFP_HIGH | __GFP_NOWARN; 130 130 struct debug_obj *obj; 131 131 unsigned long flags; 132 132 ··· 591 591 { 592 592 /* 593 593 * On RT enabled kernels the pool refill must happen in preemptible 594 - * context: 594 + * context -- for !RT kernels we rely on the fact that spinlock_t and 595 + * raw_spinlock_t are basically the same type and this lock-type 596 + * inversion works just fine. 595 597 */ 596 - if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) 598 + if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { 599 + /* 600 + * Annotate away the spinlock_t inside raw_spinlock_t warning 601 + * by temporarily raising the wait-type to WAIT_SLEEP, matching 602 + * the preemptible() condition above. 603 + */ 604 + static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP); 605 + lock_map_acquire_try(&fill_pool_map); 597 606 fill_pool(); 607 + lock_map_release(&fill_pool_map); 608 + } 598 609 } 599 610 600 611 static void