at v2.6.25 2.0 kB view raw
1/* 2 * lib/kernel_lock.c 3 * 4 * This is the traditional BKL - big kernel lock. Largely 5 * relegated to obsolescence, but used by various less 6 * important (or lazy) subsystems. 7 */ 8#include <linux/smp_lock.h> 9#include <linux/module.h> 10#include <linux/kallsyms.h> 11 12/* 13 * The 'big kernel semaphore' 14 * 15 * This mutex is taken and released recursively by lock_kernel() 16 * and unlock_kernel(). It is transparently dropped and reacquired 17 * over schedule(). It is used to protect legacy code that hasn't 18 * been migrated to a proper locking design yet. 19 * 20 * Note: code locked by this semaphore will only be serialized against 21 * other code using the same locking facility. The code guarantees that 22 * the task remains on the same CPU. 23 * 24 * Don't use in new code. 25 */ 26static DECLARE_MUTEX(kernel_sem); 27 28/* 29 * Re-acquire the kernel semaphore. 30 * 31 * This function is called with preemption off. 32 * 33 * We are executing in schedule() so the code must be extremely careful 34 * about recursion, both due to the down() and due to the enabling of 35 * preemption. schedule() will re-check the preemption flag after 36 * reacquiring the semaphore. 37 */ 38int __lockfunc __reacquire_kernel_lock(void) 39{ 40 struct task_struct *task = current; 41 int saved_lock_depth = task->lock_depth; 42 43 BUG_ON(saved_lock_depth < 0); 44 45 task->lock_depth = -1; 46 preempt_enable_no_resched(); 47 48 down(&kernel_sem); 49 50 preempt_disable(); 51 task->lock_depth = saved_lock_depth; 52 53 return 0; 54} 55 56void __lockfunc __release_kernel_lock(void) 57{ 58 up(&kernel_sem); 59} 60 61/* 62 * Getting the big kernel semaphore. 63 */ 64void __lockfunc lock_kernel(void) 65{ 66 struct task_struct *task = current; 67 int depth = task->lock_depth + 1; 68 69 if (likely(!depth)) 70 /* 71 * No recursion worries - we set up lock_depth _after_ 72 */ 73 down(&kernel_sem); 74 75 task->lock_depth = depth; 76} 77 78void __lockfunc unlock_kernel(void) 79{ 80 struct task_struct *task = current; 81 82 BUG_ON(task->lock_depth < 0); 83 84 if (likely(--task->lock_depth < 0)) 85 up(&kernel_sem); 86} 87 88EXPORT_SYMBOL(lock_kernel); 89EXPORT_SYMBOL(unlock_kernel); 90