at v2.6.17 1.4 kB view raw
1#ifndef __LINUX_SMPLOCK_H 2#define __LINUX_SMPLOCK_H 3 4#include <linux/config.h> 5#ifdef CONFIG_LOCK_KERNEL 6#include <linux/sched.h> 7#include <linux/spinlock.h> 8 9#define kernel_locked() (current->lock_depth >= 0) 10 11extern int __lockfunc __reacquire_kernel_lock(void); 12extern void __lockfunc __release_kernel_lock(void); 13 14/* 15 * Release/re-acquire global kernel lock for the scheduler 16 */ 17#define release_kernel_lock(tsk) do { \ 18 if (unlikely((tsk)->lock_depth >= 0)) \ 19 __release_kernel_lock(); \ 20} while (0) 21 22/* 23 * Non-SMP kernels will never block on the kernel lock, 24 * so we are better off returning a constant zero from 25 * reacquire_kernel_lock() so that the compiler can see 26 * it at compile-time. 27 */ 28#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL) 29# define return_value_on_smp return 30#else 31# define return_value_on_smp 32#endif 33 34static inline int reacquire_kernel_lock(struct task_struct *task) 35{ 36 if (unlikely(task->lock_depth >= 0)) 37 return_value_on_smp __reacquire_kernel_lock(); 38 return 0; 39} 40 41extern void __lockfunc lock_kernel(void) __acquires(kernel_lock); 42extern void __lockfunc unlock_kernel(void) __releases(kernel_lock); 43 44#else 45 46#define lock_kernel() do { } while(0) 47#define unlock_kernel() do { } while(0) 48#define release_kernel_lock(task) do { } while(0) 49#define reacquire_kernel_lock(task) 0 50#define kernel_locked() 1 51 52#endif /* CONFIG_LOCK_KERNEL */ 53#endif /* __LINUX_SMPLOCK_H */