Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/lockdep: Remove unused @nested argument from lock_release()

Since the following commit:

b4adfe8e05f1 ("locking/lockdep: Remove unused argument in __lock_release")

@nested is no longer used in lock_release(), so remove it from all
lock_release() calls and friends.

Signed-off-by: Qian Cai <cai@lca.pw>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will@kernel.org>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: airlied@linux.ie
Cc: akpm@linux-foundation.org
Cc: alexander.levin@microsoft.com
Cc: daniel@iogearbox.net
Cc: davem@davemloft.net
Cc: dri-devel@lists.freedesktop.org
Cc: duyuyang@gmail.com
Cc: gregkh@linuxfoundation.org
Cc: hannes@cmpxchg.org
Cc: intel-gfx@lists.freedesktop.org
Cc: jack@suse.com
Cc: jlbec@evilplan.or
Cc: joonas.lahtinen@linux.intel.com
Cc: joseph.qi@linux.alibaba.com
Cc: jslaby@suse.com
Cc: juri.lelli@redhat.com
Cc: maarten.lankhorst@linux.intel.com
Cc: mark@fasheh.com
Cc: mhocko@kernel.org
Cc: mripard@kernel.org
Cc: ocfs2-devel@oss.oracle.com
Cc: rodrigo.vivi@intel.com
Cc: sean@poorly.run
Cc: st@kernel.org
Cc: tj@kernel.org
Cc: tytso@mit.edu
Cc: vdavydov.dev@gmail.com
Cc: vincent.guittot@linaro.org
Cc: viro@zeniv.linux.org.uk
Link: https://lkml.kernel.org/r/1568909380-32199-1-git-send-email-cai@lca.pw
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Qian Cai and committed by
Ingo Molnar
5facae4f e950cca3

+90 -93
+1 -1
drivers/gpu/drm/drm_connector.c
··· 719 719 __drm_connector_put_safe(iter->conn); 720 720 spin_unlock_irqrestore(&config->connector_list_lock, flags); 721 721 } 722 - lock_release(&connector_list_iter_dep_map, 0, _RET_IP_); 722 + lock_release(&connector_list_iter_dep_map, _RET_IP_); 723 723 } 724 724 EXPORT_SYMBOL(drm_connector_list_iter_end); 725 725
+3 -3
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
··· 509 509 I915_MM_SHRINKER, 0, _RET_IP_); 510 510 511 511 mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_); 512 - mutex_release(&mutex->dep_map, 0, _RET_IP_); 512 + mutex_release(&mutex->dep_map, _RET_IP_); 513 513 514 - mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_); 514 + mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_); 515 515 516 516 fs_reclaim_release(GFP_KERNEL); 517 517 518 518 if (unlock) 519 - mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_); 519 + mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_); 520 520 } 521 521 522 522 #define obj_to_i915(obj__) to_i915((obj__)->base.dev)
+1 -1
drivers/gpu/drm/i915/gt/intel_engine_pm.c
··· 52 52 static inline void __timeline_mark_unlock(struct intel_context *ce, 53 53 unsigned long flags) 54 54 { 55 - mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_); 55 + mutex_release(&ce->timeline->mutex.dep_map, _THIS_IP_); 56 56 local_irq_restore(flags); 57 57 } 58 58
+1 -1
drivers/gpu/drm/i915/i915_request.c
··· 1456 1456 dma_fence_remove_callback(&rq->fence, &wait.cb); 1457 1457 1458 1458 out: 1459 - mutex_release(&rq->engine->gt->reset.mutex.dep_map, 0, _THIS_IP_); 1459 + mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_); 1460 1460 trace_i915_request_wait_end(rq); 1461 1461 return timeout; 1462 1462 }
+4 -4
drivers/tty/tty_ldsem.c
··· 303 303 if (count <= 0) { 304 304 lock_contended(&sem->dep_map, _RET_IP_); 305 305 if (!down_read_failed(sem, count, timeout)) { 306 - rwsem_release(&sem->dep_map, 1, _RET_IP_); 306 + rwsem_release(&sem->dep_map, _RET_IP_); 307 307 return 0; 308 308 } 309 309 } ··· 322 322 if ((count & LDSEM_ACTIVE_MASK) != LDSEM_ACTIVE_BIAS) { 323 323 lock_contended(&sem->dep_map, _RET_IP_); 324 324 if (!down_write_failed(sem, count, timeout)) { 325 - rwsem_release(&sem->dep_map, 1, _RET_IP_); 325 + rwsem_release(&sem->dep_map, _RET_IP_); 326 326 return 0; 327 327 } 328 328 } ··· 390 390 { 391 391 long count; 392 392 393 - rwsem_release(&sem->dep_map, 1, _RET_IP_); 393 + rwsem_release(&sem->dep_map, _RET_IP_); 394 394 395 395 count = atomic_long_add_return(-LDSEM_READ_BIAS, &sem->count); 396 396 if (count < 0 && (count & LDSEM_ACTIVE_MASK) == 0) ··· 404 404 { 405 405 long count; 406 406 407 - rwsem_release(&sem->dep_map, 1, _RET_IP_); 407 + rwsem_release(&sem->dep_map, _RET_IP_); 408 408 409 409 count = atomic_long_add_return(-LDSEM_WRITE_BIAS, &sem->count); 410 410 if (count < 0)
+1 -1
fs/dcache.c
··· 1319 1319 1320 1320 if (!list_empty(&dentry->d_subdirs)) { 1321 1321 spin_unlock(&this_parent->d_lock); 1322 - spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); 1322 + spin_release(&dentry->d_lock.dep_map, _RET_IP_); 1323 1323 this_parent = dentry; 1324 1324 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); 1325 1325 goto repeat;
+2 -2
fs/jbd2/transaction.c
··· 713 713 if (need_to_start) 714 714 jbd2_log_start_commit(journal, tid); 715 715 716 - rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_); 716 + rwsem_release(&journal->j_trans_commit_map, _THIS_IP_); 717 717 handle->h_buffer_credits = nblocks; 718 718 /* 719 719 * Restore the original nofs context because the journal restart ··· 1848 1848 wake_up(&journal->j_wait_transaction_locked); 1849 1849 } 1850 1850 1851 - rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_); 1851 + rwsem_release(&journal->j_trans_commit_map, _THIS_IP_); 1852 1852 1853 1853 if (wait_for_commit) 1854 1854 err = jbd2_log_wait_commit(journal, tid);
+2 -2
fs/kernfs/dir.c
··· 438 438 return; 439 439 440 440 if (kernfs_lockdep(kn)) 441 - rwsem_release(&kn->dep_map, 1, _RET_IP_); 441 + rwsem_release(&kn->dep_map, _RET_IP_); 442 442 v = atomic_dec_return(&kn->active); 443 443 if (likely(v != KN_DEACTIVATED_BIAS)) 444 444 return; ··· 476 476 477 477 if (kernfs_lockdep(kn)) { 478 478 lock_acquired(&kn->dep_map, _RET_IP_); 479 - rwsem_release(&kn->dep_map, 1, _RET_IP_); 479 + rwsem_release(&kn->dep_map, _RET_IP_); 480 480 } 481 481 482 482 kernfs_drain_open_files(kn);
+1 -1
fs/ocfs2/dlmglue.c
··· 1687 1687 spin_unlock_irqrestore(&lockres->l_lock, flags); 1688 1688 #ifdef CONFIG_DEBUG_LOCK_ALLOC 1689 1689 if (lockres->l_lockdep_map.key != NULL) 1690 - rwsem_release(&lockres->l_lockdep_map, 1, caller_ip); 1690 + rwsem_release(&lockres->l_lockdep_map, caller_ip); 1691 1691 #endif 1692 1692 } 1693 1693
+1 -1
include/linux/jbd2.h
··· 1170 1170 #define jbd2_might_wait_for_commit(j) \ 1171 1171 do { \ 1172 1172 rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \ 1173 - rwsem_release(&j->j_trans_commit_map, 1, _THIS_IP_); \ 1173 + rwsem_release(&j->j_trans_commit_map, _THIS_IP_); \ 1174 1174 } while (0) 1175 1175 1176 1176 /* journal feature predicate functions */
+10 -11
include/linux/lockdep.h
··· 349 349 int trylock, int read, int check, 350 350 struct lockdep_map *nest_lock, unsigned long ip); 351 351 352 - extern void lock_release(struct lockdep_map *lock, int nested, 353 - unsigned long ip); 352 + extern void lock_release(struct lockdep_map *lock, unsigned long ip); 354 353 355 354 /* 356 355 * Same "read" as for lock_acquire(), except -1 means any. ··· 427 428 } 428 429 429 430 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) 430 - # define lock_release(l, n, i) do { } while (0) 431 + # define lock_release(l, i) do { } while (0) 431 432 # define lock_downgrade(l, i) do { } while (0) 432 433 # define lock_set_class(l, n, k, s, i) do { } while (0) 433 434 # define lock_set_subclass(l, s, i) do { } while (0) ··· 590 591 591 592 #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 592 593 #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 593 - #define spin_release(l, n, i) lock_release(l, n, i) 594 + #define spin_release(l, i) lock_release(l, i) 594 595 595 596 #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 596 597 #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 597 - #define rwlock_release(l, n, i) lock_release(l, n, i) 598 + #define rwlock_release(l, i) lock_release(l, i) 598 599 599 600 #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 600 601 #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) 601 - #define seqcount_release(l, n, i) lock_release(l, n, i) 602 + #define seqcount_release(l, i) lock_release(l, i) 602 603 603 604 #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 604 605 #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 605 - #define mutex_release(l, n, i) lock_release(l, n, i) 606 + #define mutex_release(l, i) lock_release(l, i) 606 607 607 608 #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) 608 609 #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) 609 610 #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) 610 - #define rwsem_release(l, n, i) lock_release(l, n, i) 611 + #define rwsem_release(l, i) lock_release(l, i) 611 612 612 613 #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 613 614 #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 614 615 #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) 615 - #define lock_map_release(l) lock_release(l, 1, _THIS_IP_) 616 + #define lock_map_release(l) lock_release(l, _THIS_IP_) 616 617 617 618 #ifdef CONFIG_PROVE_LOCKING 618 619 # define might_lock(lock) \ 619 620 do { \ 620 621 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 621 622 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ 622 - lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ 623 + lock_release(&(lock)->dep_map, _THIS_IP_); \ 623 624 } while (0) 624 625 # define might_lock_read(lock) \ 625 626 do { \ 626 627 typecheck(struct lockdep_map *, &(lock)->dep_map); \ 627 628 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ 628 - lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ 629 + lock_release(&(lock)->dep_map, _THIS_IP_); \ 629 630 } while (0) 630 631 631 632 #define lockdep_assert_irqs_enabled() do { \
+2 -2
include/linux/percpu-rwsem.h
··· 93 93 __percpu_up_read(sem); /* Unconditional memory barrier */ 94 94 preempt_enable(); 95 95 96 - rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); 96 + rwsem_release(&sem->rw_sem.dep_map, _RET_IP_); 97 97 } 98 98 99 99 extern void percpu_down_write(struct percpu_rw_semaphore *); ··· 118 118 static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, 119 119 bool read, unsigned long ip) 120 120 { 121 - lock_release(&sem->rw_sem.dep_map, 1, ip); 121 + lock_release(&sem->rw_sem.dep_map, ip); 122 122 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER 123 123 if (!read) 124 124 atomic_long_set(&sem->rw_sem.owner, RWSEM_OWNER_UNKNOWN);
+1 -1
include/linux/rcupdate.h
··· 210 210 211 211 static inline void rcu_lock_release(struct lockdep_map *map) 212 212 { 213 - lock_release(map, 1, _THIS_IP_); 213 + lock_release(map, _THIS_IP_); 214 214 } 215 215 216 216 extern struct lockdep_map rcu_lock_map;
+8 -8
include/linux/rwlock_api_smp.h
··· 215 215 216 216 static inline void __raw_write_unlock(rwlock_t *lock) 217 217 { 218 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 218 + rwlock_release(&lock->dep_map, _RET_IP_); 219 219 do_raw_write_unlock(lock); 220 220 preempt_enable(); 221 221 } 222 222 223 223 static inline void __raw_read_unlock(rwlock_t *lock) 224 224 { 225 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 225 + rwlock_release(&lock->dep_map, _RET_IP_); 226 226 do_raw_read_unlock(lock); 227 227 preempt_enable(); 228 228 } ··· 230 230 static inline void 231 231 __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 232 232 { 233 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 233 + rwlock_release(&lock->dep_map, _RET_IP_); 234 234 do_raw_read_unlock(lock); 235 235 local_irq_restore(flags); 236 236 preempt_enable(); ··· 238 238 239 239 static inline void __raw_read_unlock_irq(rwlock_t *lock) 240 240 { 241 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 241 + rwlock_release(&lock->dep_map, _RET_IP_); 242 242 do_raw_read_unlock(lock); 243 243 local_irq_enable(); 244 244 preempt_enable(); ··· 246 246 247 247 static inline void __raw_read_unlock_bh(rwlock_t *lock) 248 248 { 249 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 249 + rwlock_release(&lock->dep_map, _RET_IP_); 250 250 do_raw_read_unlock(lock); 251 251 __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); 252 252 } ··· 254 254 static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, 255 255 unsigned long flags) 256 256 { 257 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 257 + rwlock_release(&lock->dep_map, _RET_IP_); 258 258 do_raw_write_unlock(lock); 259 259 local_irq_restore(flags); 260 260 preempt_enable(); ··· 262 262 263 263 static inline void __raw_write_unlock_irq(rwlock_t *lock) 264 264 { 265 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 265 + rwlock_release(&lock->dep_map, _RET_IP_); 266 266 do_raw_write_unlock(lock); 267 267 local_irq_enable(); 268 268 preempt_enable(); ··· 270 270 271 271 static inline void __raw_write_unlock_bh(rwlock_t *lock) 272 272 { 273 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 273 + rwlock_release(&lock->dep_map, _RET_IP_); 274 274 do_raw_write_unlock(lock); 275 275 __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); 276 276 }
+2 -2
include/linux/seqlock.h
··· 79 79 80 80 local_irq_save(flags); 81 81 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); 82 - seqcount_release(&l->dep_map, 1, _RET_IP_); 82 + seqcount_release(&l->dep_map, _RET_IP_); 83 83 local_irq_restore(flags); 84 84 } 85 85 ··· 384 384 385 385 static inline void write_seqcount_end(seqcount_t *s) 386 386 { 387 - seqcount_release(&s->dep_map, 1, _RET_IP_); 387 + seqcount_release(&s->dep_map, _RET_IP_); 388 388 raw_write_seqcount_end(s); 389 389 } 390 390
+4 -4
include/linux/spinlock_api_smp.h
··· 147 147 148 148 static inline void __raw_spin_unlock(raw_spinlock_t *lock) 149 149 { 150 - spin_release(&lock->dep_map, 1, _RET_IP_); 150 + spin_release(&lock->dep_map, _RET_IP_); 151 151 do_raw_spin_unlock(lock); 152 152 preempt_enable(); 153 153 } ··· 155 155 static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, 156 156 unsigned long flags) 157 157 { 158 - spin_release(&lock->dep_map, 1, _RET_IP_); 158 + spin_release(&lock->dep_map, _RET_IP_); 159 159 do_raw_spin_unlock(lock); 160 160 local_irq_restore(flags); 161 161 preempt_enable(); ··· 163 163 164 164 static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) 165 165 { 166 - spin_release(&lock->dep_map, 1, _RET_IP_); 166 + spin_release(&lock->dep_map, _RET_IP_); 167 167 do_raw_spin_unlock(lock); 168 168 local_irq_enable(); 169 169 preempt_enable(); ··· 171 171 172 172 static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) 173 173 { 174 - spin_release(&lock->dep_map, 1, _RET_IP_); 174 + spin_release(&lock->dep_map, _RET_IP_); 175 175 do_raw_spin_unlock(lock); 176 176 __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); 177 177 }
+1 -1
include/linux/ww_mutex.h
··· 182 182 static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) 183 183 { 184 184 #ifdef CONFIG_DEBUG_MUTEXES 185 - mutex_release(&ctx->dep_map, 0, _THIS_IP_); 185 + mutex_release(&ctx->dep_map, _THIS_IP_); 186 186 187 187 DEBUG_LOCKS_WARN_ON(ctx->acquired); 188 188 if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
+1 -1
include/net/sock.h
··· 1484 1484 sk->sk_lock.owned = 0; 1485 1485 1486 1486 /* The sk_lock has mutex_unlock() semantics: */ 1487 - mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 1487 + mutex_release(&sk->sk_lock.dep_map, _RET_IP_); 1488 1488 } 1489 1489 } 1490 1490
+1 -1
kernel/bpf/stackmap.c
··· 338 338 * up_read_non_owner(). The rwsem_release() is called 339 339 * here to release the lock from lockdep's perspective. 340 340 */ 341 - rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_); 341 + rwsem_release(&current->mm->mmap_sem.dep_map, _RET_IP_); 342 342 } 343 343 } 344 344
+1 -1
kernel/cpu.c
··· 336 336 337 337 static void lockdep_release_cpus_lock(void) 338 338 { 339 - rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, 1, _THIS_IP_); 339 + rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, _THIS_IP_); 340 340 } 341 341 342 342 /*
+1 -2
kernel/locking/lockdep.c
··· 4491 4491 } 4492 4492 EXPORT_SYMBOL_GPL(lock_acquire); 4493 4493 4494 - void lock_release(struct lockdep_map *lock, int nested, 4495 - unsigned long ip) 4494 + void lock_release(struct lockdep_map *lock, unsigned long ip) 4496 4495 { 4497 4496 unsigned long flags; 4498 4497
+2 -2
kernel/locking/mutex.c
··· 1091 1091 err_early_kill: 1092 1092 spin_unlock(&lock->wait_lock); 1093 1093 debug_mutex_free_waiter(&waiter); 1094 - mutex_release(&lock->dep_map, 1, ip); 1094 + mutex_release(&lock->dep_map, ip); 1095 1095 preempt_enable(); 1096 1096 return ret; 1097 1097 } ··· 1225 1225 DEFINE_WAKE_Q(wake_q); 1226 1226 unsigned long owner; 1227 1227 1228 - mutex_release(&lock->dep_map, 1, ip); 1228 + mutex_release(&lock->dep_map, ip); 1229 1229 1230 1230 /* 1231 1231 * Release the lock before (potentially) taking the spinlock such that
+3 -3
kernel/locking/rtmutex.c
··· 1517 1517 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); 1518 1518 ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); 1519 1519 if (ret) 1520 - mutex_release(&lock->dep_map, 1, _RET_IP_); 1520 + mutex_release(&lock->dep_map, _RET_IP_); 1521 1521 1522 1522 return ret; 1523 1523 } ··· 1561 1561 RT_MUTEX_MIN_CHAINWALK, 1562 1562 rt_mutex_slowlock); 1563 1563 if (ret) 1564 - mutex_release(&lock->dep_map, 1, _RET_IP_); 1564 + mutex_release(&lock->dep_map, _RET_IP_); 1565 1565 1566 1566 return ret; 1567 1567 } ··· 1600 1600 */ 1601 1601 void __sched rt_mutex_unlock(struct rt_mutex *lock) 1602 1602 { 1603 - mutex_release(&lock->dep_map, 1, _RET_IP_); 1603 + mutex_release(&lock->dep_map, _RET_IP_); 1604 1604 rt_mutex_fastunlock(lock, rt_mutex_slowunlock); 1605 1605 } 1606 1606 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+5 -5
kernel/locking/rwsem.c
··· 1504 1504 rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); 1505 1505 1506 1506 if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) { 1507 - rwsem_release(&sem->dep_map, 1, _RET_IP_); 1507 + rwsem_release(&sem->dep_map, _RET_IP_); 1508 1508 return -EINTR; 1509 1509 } 1510 1510 ··· 1546 1546 1547 1547 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, 1548 1548 __down_write_killable)) { 1549 - rwsem_release(&sem->dep_map, 1, _RET_IP_); 1549 + rwsem_release(&sem->dep_map, _RET_IP_); 1550 1550 return -EINTR; 1551 1551 } 1552 1552 ··· 1573 1573 */ 1574 1574 void up_read(struct rw_semaphore *sem) 1575 1575 { 1576 - rwsem_release(&sem->dep_map, 1, _RET_IP_); 1576 + rwsem_release(&sem->dep_map, _RET_IP_); 1577 1577 __up_read(sem); 1578 1578 } 1579 1579 EXPORT_SYMBOL(up_read); ··· 1583 1583 */ 1584 1584 void up_write(struct rw_semaphore *sem) 1585 1585 { 1586 - rwsem_release(&sem->dep_map, 1, _RET_IP_); 1586 + rwsem_release(&sem->dep_map, _RET_IP_); 1587 1587 __up_write(sem); 1588 1588 } 1589 1589 EXPORT_SYMBOL(up_write); ··· 1639 1639 1640 1640 if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, 1641 1641 __down_write_killable)) { 1642 - rwsem_release(&sem->dep_map, 1, _RET_IP_); 1642 + rwsem_release(&sem->dep_map, _RET_IP_); 1643 1643 return -EINTR; 1644 1644 } 1645 1645
+5 -5
kernel/printk/printk.c
··· 248 248 { 249 249 unsigned long flags; 250 250 251 - mutex_release(&console_lock_dep_map, 1, ip); 251 + mutex_release(&console_lock_dep_map, ip); 252 252 253 253 printk_safe_enter_irqsave(flags); 254 254 up(&console_sem); ··· 1679 1679 raw_spin_unlock(&console_owner_lock); 1680 1680 1681 1681 if (!waiter) { 1682 - spin_release(&console_owner_dep_map, 1, _THIS_IP_); 1682 + spin_release(&console_owner_dep_map, _THIS_IP_); 1683 1683 return 0; 1684 1684 } 1685 1685 1686 1686 /* The waiter is now free to continue */ 1687 1687 WRITE_ONCE(console_waiter, false); 1688 1688 1689 - spin_release(&console_owner_dep_map, 1, _THIS_IP_); 1689 + spin_release(&console_owner_dep_map, _THIS_IP_); 1690 1690 1691 1691 /* 1692 1692 * Hand off console_lock to waiter. The waiter will perform 1693 1693 * the up(). After this, the waiter is the console_lock owner. 1694 1694 */ 1695 - mutex_release(&console_lock_dep_map, 1, _THIS_IP_); 1695 + mutex_release(&console_lock_dep_map, _THIS_IP_); 1696 1696 return 1; 1697 1697 } 1698 1698 ··· 1746 1746 /* Owner will clear console_waiter on hand off */ 1747 1747 while (READ_ONCE(console_waiter)) 1748 1748 cpu_relax(); 1749 - spin_release(&console_owner_dep_map, 1, _THIS_IP_); 1749 + spin_release(&console_owner_dep_map, _THIS_IP_); 1750 1750 1751 1751 printk_safe_exit_irqrestore(flags); 1752 1752 /*
+1 -1
kernel/sched/core.c
··· 3105 3105 * do an early lockdep release here: 3106 3106 */ 3107 3107 rq_unpin_lock(rq, rf); 3108 - spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 3108 + spin_release(&rq->lock.dep_map, _THIS_IP_); 3109 3109 #ifdef CONFIG_DEBUG_SPINLOCK 3110 3110 /* this is a valid case when another task releases the spinlock */ 3111 3111 rq->lock.owner = next;
+12 -12
lib/locking-selftest.c
··· 1475 1475 1476 1476 mutex_lock(&o2.base); 1477 1477 o2.ctx = &t2; 1478 - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); 1478 + mutex_release(&o2.base.dep_map, _THIS_IP_); 1479 1479 1480 1480 WWAI(&t); 1481 1481 t2 = t; ··· 1500 1500 int ret; 1501 1501 1502 1502 mutex_lock(&o2.base); 1503 - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); 1503 + mutex_release(&o2.base.dep_map, _THIS_IP_); 1504 1504 o2.ctx = &t2; 1505 1505 1506 1506 WWAI(&t); ··· 1527 1527 1528 1528 mutex_lock(&o2.base); 1529 1529 o2.ctx = &t2; 1530 - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); 1530 + mutex_release(&o2.base.dep_map, _THIS_IP_); 1531 1531 1532 1532 WWAI(&t); 1533 1533 t2 = t; ··· 1551 1551 int ret; 1552 1552 1553 1553 mutex_lock(&o2.base); 1554 - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); 1554 + mutex_release(&o2.base.dep_map, _THIS_IP_); 1555 1555 o2.ctx = &t2; 1556 1556 1557 1557 WWAI(&t); ··· 1576 1576 int ret; 1577 1577 1578 1578 mutex_lock(&o2.base); 1579 - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); 1579 + mutex_release(&o2.base.dep_map, _THIS_IP_); 1580 1580 o2.ctx = &t2; 1581 1581 1582 1582 WWAI(&t); ··· 1597 1597 int ret; 1598 1598 1599 1599 mutex_lock(&o2.base); 1600 - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); 1600 + mutex_release(&o2.base.dep_map, _THIS_IP_); 1601 1601 o2.ctx = &t2; 1602 1602 1603 1603 WWAI(&t); ··· 1618 1618 int ret; 1619 1619 1620 1620 mutex_lock(&o2.base); 1621 - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); 1621 + mutex_release(&o2.base.dep_map, _THIS_IP_); 1622 1622 o2.ctx = &t2; 1623 1623 1624 1624 mutex_lock(&o3.base); 1625 - mutex_release(&o3.base.dep_map, 1, _THIS_IP_); 1625 + mutex_release(&o3.base.dep_map, _THIS_IP_); 1626 1626 o3.ctx = &t2; 1627 1627 1628 1628 WWAI(&t); ··· 1644 1644 int ret; 1645 1645 1646 1646 mutex_lock(&o2.base); 1647 - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); 1647 + mutex_release(&o2.base.dep_map, _THIS_IP_); 1648 1648 o2.ctx = &t2; 1649 1649 1650 1650 mutex_lock(&o3.base); 1651 - mutex_release(&o3.base.dep_map, 1, _THIS_IP_); 1651 + mutex_release(&o3.base.dep_map, _THIS_IP_); 1652 1652 o3.ctx = &t2; 1653 1653 1654 1654 WWAI(&t); ··· 1669 1669 int ret; 1670 1670 1671 1671 mutex_lock(&o2.base); 1672 - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); 1672 + mutex_release(&o2.base.dep_map, _THIS_IP_); 1673 1673 o2.ctx = &t2; 1674 1674 1675 1675 WWAI(&t); ··· 1694 1694 int ret; 1695 1695 1696 1696 mutex_lock(&o2.base); 1697 - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); 1697 + mutex_release(&o2.base.dep_map, _THIS_IP_); 1698 1698 o2.ctx = &t2; 1699 1699 1700 1700 WWAI(&t);
+1 -1
mm/memcontrol.c
··· 1800 1800 struct mem_cgroup *iter; 1801 1801 1802 1802 spin_lock(&memcg_oom_lock); 1803 - mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_); 1803 + mutex_release(&memcg_oom_lock_dep_map, _RET_IP_); 1804 1804 for_each_mem_cgroup_tree(iter, memcg) 1805 1805 iter->oom_lock = false; 1806 1806 spin_unlock(&memcg_oom_lock);
+1 -1
net/core/sock.c
··· 521 521 522 522 rc = sk_backlog_rcv(sk, skb); 523 523 524 - mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 524 + mutex_release(&sk->sk_lock.dep_map, _RET_IP_); 525 525 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { 526 526 bh_unlock_sock(sk); 527 527 atomic_inc(&sk->sk_drops);
+1 -2
tools/lib/lockdep/include/liblockdep/common.h
··· 42 42 void lock_acquire(struct lockdep_map *lock, unsigned int subclass, 43 43 int trylock, int read, int check, 44 44 struct lockdep_map *nest_lock, unsigned long ip); 45 - void lock_release(struct lockdep_map *lock, int nested, 46 - unsigned long ip); 45 + void lock_release(struct lockdep_map *lock, unsigned long ip); 47 46 void lockdep_reset_lock(struct lockdep_map *lock); 48 47 void lockdep_register_key(struct lock_class_key *key); 49 48 void lockdep_unregister_key(struct lock_class_key *key);
+1 -1
tools/lib/lockdep/include/liblockdep/mutex.h
··· 42 42 43 43 static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lock) 44 44 { 45 - lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_); 45 + lock_release(&lock->dep_map, (unsigned long)_RET_IP_); 46 46 return pthread_mutex_unlock(&lock->mutex); 47 47 } 48 48
+1 -1
tools/lib/lockdep/include/liblockdep/rwlock.h
··· 44 44 45 45 static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *lock) 46 46 { 47 - lock_release(&lock->dep_map, 0, (unsigned long)_RET_IP_); 47 + lock_release(&lock->dep_map, (unsigned long)_RET_IP_); 48 48 return pthread_rwlock_unlock(&lock->rwlock); 49 49 } 50 50
+8 -8
tools/lib/lockdep/preload.c
··· 270 270 */ 271 271 r = ll_pthread_mutex_lock(mutex); 272 272 if (r) 273 - lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_); 273 + lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_); 274 274 275 275 return r; 276 276 } ··· 284 284 lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); 285 285 r = ll_pthread_mutex_trylock(mutex); 286 286 if (r) 287 - lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_); 287 + lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_); 288 288 289 289 return r; 290 290 } ··· 295 295 296 296 try_init_preload(); 297 297 298 - lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_); 298 + lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_); 299 299 /* 300 300 * Just like taking a lock, only in reverse! 301 301 * ··· 355 355 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_); 356 356 r = ll_pthread_rwlock_rdlock(rwlock); 357 357 if (r) 358 - lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 358 + lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_); 359 359 360 360 return r; 361 361 } ··· 369 369 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_); 370 370 r = ll_pthread_rwlock_tryrdlock(rwlock); 371 371 if (r) 372 - lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 372 + lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_); 373 373 374 374 return r; 375 375 } ··· 383 383 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_); 384 384 r = ll_pthread_rwlock_trywrlock(rwlock); 385 385 if (r) 386 - lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 386 + lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_); 387 387 388 388 return r; 389 389 } ··· 397 397 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_); 398 398 r = ll_pthread_rwlock_wrlock(rwlock); 399 399 if (r) 400 - lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 400 + lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_); 401 401 402 402 return r; 403 403 } ··· 408 408 409 409 init_preload(); 410 410 411 - lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 411 + lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_); 412 412 r = ll_pthread_rwlock_unlock(rwlock); 413 413 if (r) 414 414 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);