Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

docs: kernel-locking: Convert semaphore references

I converted some of the document to reflect mutex usage instead of
semaphore usage. Since we shouldin't be promoting semaphore usage when
it's on it's way out..

Signed-off-by: Daniel Walker <dwalker@mvista.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Daniel Walker and committed by
Linus Torvalds
66656ebb ec5b1157

+16 -16
+16 -16
Documentation/DocBook/kernel-locking.tmpl
··· 717 717 <para> 718 718 For our first example, we assume that all operations are in user 719 719 context (ie. from system calls), so we can sleep. This means we can 720 - use a semaphore to protect the cache and all the objects within 720 + use a mutex to protect the cache and all the objects within 721 721 it. Here's the code: 722 722 </para> 723 723 ··· 725 725 #include &lt;linux/list.h&gt; 726 726 #include &lt;linux/slab.h&gt; 727 727 #include &lt;linux/string.h&gt; 728 - #include &lt;asm/semaphore.h&gt; 728 + #include &lt;linux/mutex.h&gt; 729 729 #include &lt;asm/errno.h&gt; 730 730 731 731 struct object ··· 737 737 }; 738 738 739 739 /* Protects the cache, cache_num, and the objects within it */ 740 - static DECLARE_MUTEX(cache_lock); 740 + static DEFINE_MUTEX(cache_lock); 741 741 static LIST_HEAD(cache); 742 742 static unsigned int cache_num = 0; 743 743 #define MAX_CACHE_SIZE 10 ··· 789 789 obj-&gt;id = id; 790 790 obj-&gt;popularity = 0; 791 791 792 - down(&amp;cache_lock); 792 + mutex_lock(&amp;cache_lock); 793 793 __cache_add(obj); 794 - up(&amp;cache_lock); 794 + mutex_unlock(&amp;cache_lock); 795 795 return 0; 796 796 } 797 797 798 798 void cache_delete(int id) 799 799 { 800 - down(&amp;cache_lock); 800 + mutex_lock(&amp;cache_lock); 801 801 __cache_delete(__cache_find(id)); 802 - up(&amp;cache_lock); 802 + mutex_unlock(&amp;cache_lock); 803 803 } 804 804 805 805 int cache_find(int id, char *name) ··· 807 807 struct object *obj; 808 808 int ret = -ENOENT; 809 809 810 - down(&amp;cache_lock); 810 + mutex_lock(&amp;cache_lock); 811 811 obj = __cache_find(id); 812 812 if (obj) { 813 813 ret = 0; 814 814 strcpy(name, obj-&gt;name); 815 815 } 816 - up(&amp;cache_lock); 816 + mutex_unlock(&amp;cache_lock); 817 817 return ret; 818 818 } 819 819 </programlisting> ··· 853 853 int popularity; 854 854 }; 855 855 856 - -static DECLARE_MUTEX(cache_lock); 856 + -static DEFINE_MUTEX(cache_lock); 857 857 +static spinlock_t cache_lock = SPIN_LOCK_UNLOCKED; 858 858 static LIST_HEAD(cache); 859 859 static unsigned int cache_num = 0; ··· 870 870 obj-&gt;id = id; 871 871 obj-&gt;popularity = 0; 872 872 873 - - down(&amp;cache_lock); 873 + - mutex_lock(&amp;cache_lock); 874 874 + spin_lock_irqsave(&amp;cache_lock, flags); 875 875 __cache_add(obj); 876 - - up(&amp;cache_lock); 876 + - mutex_unlock(&amp;cache_lock); 877 877 + spin_unlock_irqrestore(&amp;cache_lock, flags); 878 878 return 0; 879 879 } 880 880 881 881 void cache_delete(int id) 882 882 { 883 - - down(&amp;cache_lock); 883 + - mutex_lock(&amp;cache_lock); 884 884 + unsigned long flags; 885 885 + 886 886 + spin_lock_irqsave(&amp;cache_lock, flags); 887 887 __cache_delete(__cache_find(id)); 888 - - up(&amp;cache_lock); 888 + - mutex_unlock(&amp;cache_lock); 889 889 + spin_unlock_irqrestore(&amp;cache_lock, flags); 890 890 } 891 891 ··· 895 895 int ret = -ENOENT; 896 896 + unsigned long flags; 897 897 898 - - down(&amp;cache_lock); 898 + - mutex_lock(&amp;cache_lock); 899 899 + spin_lock_irqsave(&amp;cache_lock, flags); 900 900 obj = __cache_find(id); 901 901 if (obj) { 902 902 ret = 0; 903 903 strcpy(name, obj-&gt;name); 904 904 } 905 - - up(&amp;cache_lock); 905 + - mutex_unlock(&amp;cache_lock); 906 906 + spin_unlock_irqrestore(&amp;cache_lock, flags); 907 907 return ret; 908 908 }