at v2.6.23 51 lines 1.4 kB view raw
1#ifndef __LINUX_SMPLOCK_H 2#define __LINUX_SMPLOCK_H 3 4#ifdef CONFIG_LOCK_KERNEL 5#include <linux/sched.h> 6 7#define kernel_locked() (current->lock_depth >= 0) 8 9extern int __lockfunc __reacquire_kernel_lock(void); 10extern void __lockfunc __release_kernel_lock(void); 11 12/* 13 * Release/re-acquire global kernel lock for the scheduler 14 */ 15#define release_kernel_lock(tsk) do { \ 16 if (unlikely((tsk)->lock_depth >= 0)) \ 17 __release_kernel_lock(); \ 18} while (0) 19 20/* 21 * Non-SMP kernels will never block on the kernel lock, 22 * so we are better off returning a constant zero from 23 * reacquire_kernel_lock() so that the compiler can see 24 * it at compile-time. 25 */ 26#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL) 27# define return_value_on_smp return 28#else 29# define return_value_on_smp 30#endif 31 32static inline int reacquire_kernel_lock(struct task_struct *task) 33{ 34 if (unlikely(task->lock_depth >= 0)) 35 return_value_on_smp __reacquire_kernel_lock(); 36 return 0; 37} 38 39extern void __lockfunc lock_kernel(void) __acquires(kernel_lock); 40extern void __lockfunc unlock_kernel(void) __releases(kernel_lock); 41 42#else 43 44#define lock_kernel() do { } while(0) 45#define unlock_kernel() do { } while(0) 46#define release_kernel_lock(task) do { } while(0) 47#define reacquire_kernel_lock(task) 0 48#define kernel_locked() 1 49 50#endif /* CONFIG_LOCK_KERNEL */ 51#endif /* __LINUX_SMPLOCK_H */