Merge tag 'core-debugobjects-2023-05-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull debugobjects fixes from Thomas Gleixner:
"Two fixes for debugobjects:

- Prevent the allocation path from waking up kswapd.

That's a long standing issue due to the GFP_ATOMIC allocation flag.
As debug objects can be invoked from pretty much any context waking
kswapd can end up in arbitrary lock chains versus the waitqueue
lock

- Correct the explicit lockdep wait-type violation in
debug_object_fill_pool()"

* tag 'core-debugobjects-2023-05-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
debugobjects: Don't wake up kswapd from fill_pool()
debugobjects,locking: Annotate debug_object_fill_pool() wait type violation

+50 -10
+14
include/linux/lockdep.h
··· 344 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) 345 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) 346 347 #else /* !CONFIG_LOCKDEP */ 348 349 static inline void lockdep_init_task(struct task_struct *task) ··· 441 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) 442 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) 443 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) 444 445 #endif /* !LOCKDEP */ 446 ··· 569 #define rwsem_release(l, i) lock_release(l, i) 570 571 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 572 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 573 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) 574 #define lock_map_release(l) lock_release(l, _THIS_IP_)
··· 344 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) 345 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) 346 347 + /* 348 + * Must use lock_map_aquire_try() with override maps to avoid 349 + * lockdep thinking they participate in the block chain. 350 + */ 351 + #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ 352 + struct lockdep_map _name = { \ 353 + .name = #_name "-wait-type-override", \ 354 + .wait_type_inner = _wait_type, \ 355 + .lock_type = LD_LOCK_WAIT_OVERRIDE, } 356 + 357 #else /* !CONFIG_LOCKDEP */ 358 359 static inline void lockdep_init_task(struct task_struct *task) ··· 431 #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) 432 #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) 433 #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) 434 + 435 + #define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \ 436 + struct lockdep_map __maybe_unused _name = {} 437 438 #endif /* !LOCKDEP */ 439 ··· 556 #define rwsem_release(l, i) lock_release(l, i) 557 558 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 559 + #define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_) 560 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 561 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) 562 #define lock_map_release(l) lock_release(l, _THIS_IP_)
+1
include/linux/lockdep_types.h
··· 33 enum lockdep_lock_type { 34 LD_LOCK_NORMAL = 0, /* normal, catch all */ 35 LD_LOCK_PERCPU, /* percpu */ 36 LD_LOCK_MAX, 37 }; 38
··· 33 enum lockdep_lock_type { 34 LD_LOCK_NORMAL = 0, /* normal, catch all */ 35 LD_LOCK_PERCPU, /* percpu */ 36 + LD_LOCK_WAIT_OVERRIDE, /* annotation */ 37 LD_LOCK_MAX, 38 }; 39
+21 -7
kernel/locking/lockdep.c
··· 2263 2264 static inline bool usage_skip(struct lock_list *entry, void *mask) 2265 { 2266 /* 2267 * Skip local_lock() for irq inversion detection. 2268 * ··· 2292 * As a result, we will skip local_lock(), when we search for irq 2293 * inversion bugs. 2294 */ 2295 - if (entry->class->lock_type == LD_LOCK_PERCPU) { 2296 - if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) 2297 - return false; 2298 2299 - return true; 2300 - } 2301 2302 - return false; 2303 } 2304 2305 /* ··· 4773 4774 for (; depth < curr->lockdep_depth; depth++) { 4775 struct held_lock *prev = curr->held_locks + depth; 4776 - u8 prev_inner = hlock_class(prev)->wait_type_inner; 4777 4778 if (prev_inner) { 4779 /* ··· 4784 * Also due to trylocks. 4785 */ 4786 curr_inner = min(curr_inner, prev_inner); 4787 } 4788 } 4789
··· 2263 2264 static inline bool usage_skip(struct lock_list *entry, void *mask) 2265 { 2266 + if (entry->class->lock_type == LD_LOCK_NORMAL) 2267 + return false; 2268 + 2269 /* 2270 * Skip local_lock() for irq inversion detection. 2271 * ··· 2289 * As a result, we will skip local_lock(), when we search for irq 2290 * inversion bugs. 2291 */ 2292 + if (entry->class->lock_type == LD_LOCK_PERCPU && 2293 + DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) 2294 + return false; 2295 2296 + /* 2297 + * Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually 2298 + * a lock and only used to override the wait_type. 2299 + */ 2300 2301 + return true; 2302 } 2303 2304 /* ··· 4768 4769 for (; depth < curr->lockdep_depth; depth++) { 4770 struct held_lock *prev = curr->held_locks + depth; 4771 + struct lock_class *class = hlock_class(prev); 4772 + u8 prev_inner = class->wait_type_inner; 4773 4774 if (prev_inner) { 4775 /* ··· 4778 * Also due to trylocks. 4779 */ 4780 curr_inner = min(curr_inner, prev_inner); 4781 + 4782 + /* 4783 + * Allow override for annotations -- this is typically 4784 + * only valid/needed for code that only exists when 4785 + * CONFIG_PREEMPT_RT=n. 4786 + */ 4787 + if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE)) 4788 + curr_inner = prev_inner; 4789 } 4790 } 4791
+14 -3
lib/debugobjects.c
··· 126 127 static void fill_pool(void) 128 { 129 - gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 130 struct debug_obj *obj; 131 unsigned long flags; 132 ··· 591 { 592 /* 593 * On RT enabled kernels the pool refill must happen in preemptible 594 - * context: 595 */ 596 - if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) 597 fill_pool(); 598 } 599 600 static void
··· 126 127 static void fill_pool(void) 128 { 129 + gfp_t gfp = __GFP_HIGH | __GFP_NOWARN; 130 struct debug_obj *obj; 131 unsigned long flags; 132 ··· 591 { 592 /* 593 * On RT enabled kernels the pool refill must happen in preemptible 594 + * context -- for !RT kernels we rely on the fact that spinlock_t and 595 + * raw_spinlock_t are basically the same type and this lock-type 596 + * inversion works just fine. 597 */ 598 + if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) { 599 + /* 600 + * Annotate away the spinlock_t inside raw_spinlock_t warning 601 + * by temporarily raising the wait-type to WAIT_SLEEP, matching 602 + * the preemptible() condition above. 603 + */ 604 + static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP); 605 + lock_map_acquire_try(&fill_pool_map); 606 fill_pool(); 607 + lock_map_release(&fill_pool_map); 608 + } 609 } 610 611 static void