at v2.6.37 65 lines 1.6 kB view raw
1#ifndef __LINUX_SMPLOCK_H 2#define __LINUX_SMPLOCK_H 3 4#ifdef CONFIG_LOCK_KERNEL 5#include <linux/sched.h> 6 7extern int __lockfunc __reacquire_kernel_lock(void); 8extern void __lockfunc __release_kernel_lock(void); 9 10/* 11 * Release/re-acquire global kernel lock for the scheduler 12 */ 13#define release_kernel_lock(tsk) do { \ 14 if (unlikely((tsk)->lock_depth >= 0)) \ 15 __release_kernel_lock(); \ 16} while (0) 17 18static inline int reacquire_kernel_lock(struct task_struct *task) 19{ 20 if (unlikely(task->lock_depth >= 0)) 21 return __reacquire_kernel_lock(); 22 return 0; 23} 24 25extern void __lockfunc 26_lock_kernel(const char *func, const char *file, int line) 27__acquires(kernel_lock); 28 29extern void __lockfunc 30_unlock_kernel(const char *func, const char *file, int line) 31__releases(kernel_lock); 32 33#define lock_kernel() do { \ 34 _lock_kernel(__func__, __FILE__, __LINE__); \ 35} while (0) 36 37#define unlock_kernel() do { \ 38 _unlock_kernel(__func__, __FILE__, __LINE__); \ 39} while (0) 40 41/* 42 * Various legacy drivers don't really need the BKL in a specific 43 * function, but they *do* need to know that the BKL became available. 44 * This function just avoids wrapping a bunch of lock/unlock pairs 45 * around code which doesn't really need it. 46 */ 47static inline void cycle_kernel_lock(void) 48{ 49 lock_kernel(); 50 unlock_kernel(); 51} 52 53#else 54 55#ifdef CONFIG_BKL /* provoke build bug if not set */ 56#define lock_kernel() 57#define unlock_kernel() 58#define cycle_kernel_lock() do { } while(0) 59#endif /* CONFIG_BKL */ 60 61#define release_kernel_lock(task) do { } while(0) 62#define reacquire_kernel_lock(task) 0 63 64#endif /* CONFIG_LOCK_KERNEL */ 65#endif /* __LINUX_SMPLOCK_H */