Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.25-rc8 149 lines 3.1 kB view raw
1#ifndef _ALPHA_SEMAPHORE_H 2#define _ALPHA_SEMAPHORE_H 3 4/* 5 * SMP- and interrupt-safe semaphores.. 6 * 7 * (C) Copyright 1996 Linus Torvalds 8 * (C) Copyright 1996, 2000 Richard Henderson 9 */ 10 11#include <asm/current.h> 12#include <asm/system.h> 13#include <asm/atomic.h> 14#include <linux/compiler.h> 15#include <linux/wait.h> 16#include <linux/rwsem.h> 17 18struct semaphore { 19 atomic_t count; 20 wait_queue_head_t wait; 21}; 22 23#define __SEMAPHORE_INITIALIZER(name, n) \ 24{ \ 25 .count = ATOMIC_INIT(n), \ 26 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ 27} 28 29#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 30 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) 31 32#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) 33 34static inline void sema_init(struct semaphore *sem, int val) 35{ 36 /* 37 * Logically, 38 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); 39 * except that gcc produces better initializing by parts yet. 40 */ 41 42 atomic_set(&sem->count, val); 43 init_waitqueue_head(&sem->wait); 44} 45 46static inline void init_MUTEX (struct semaphore *sem) 47{ 48 sema_init(sem, 1); 49} 50 51static inline void init_MUTEX_LOCKED (struct semaphore *sem) 52{ 53 sema_init(sem, 0); 54} 55 56extern void down(struct semaphore *); 57extern void __down_failed(struct semaphore *); 58extern int down_interruptible(struct semaphore *); 59extern int __down_failed_interruptible(struct semaphore *); 60extern int down_trylock(struct semaphore *); 61extern void up(struct semaphore *); 62extern void __up_wakeup(struct semaphore *); 63 64/* 65 * Hidden out of line code is fun, but extremely messy. Rely on newer 66 * compilers to do a respectable job with this. The contention cases 67 * are handled out of line in arch/alpha/kernel/semaphore.c. 68 */ 69 70static inline void __down(struct semaphore *sem) 71{ 72 long count; 73 might_sleep(); 74 count = atomic_dec_return(&sem->count); 75 if (unlikely(count < 0)) 76 __down_failed(sem); 77} 78 79static inline int __down_interruptible(struct semaphore *sem) 80{ 81 long count; 82 might_sleep(); 83 count = atomic_dec_return(&sem->count); 84 if (unlikely(count < 0)) 85 return __down_failed_interruptible(sem); 86 return 0; 87} 88 89/* 90 * down_trylock returns 0 on success, 1 if we failed to get the lock. 91 */ 92 93static inline int __down_trylock(struct semaphore *sem) 94{ 95 long ret; 96 97 /* "Equivalent" C: 98 99 do { 100 ret = ldl_l; 101 --ret; 102 if (ret < 0) 103 break; 104 ret = stl_c = ret; 105 } while (ret == 0); 106 */ 107 __asm__ __volatile__( 108 "1: ldl_l %0,%1\n" 109 " subl %0,1,%0\n" 110 " blt %0,2f\n" 111 " stl_c %0,%1\n" 112 " beq %0,3f\n" 113 " mb\n" 114 "2:\n" 115 ".subsection 2\n" 116 "3: br 1b\n" 117 ".previous" 118 : "=&r" (ret), "=m" (sem->count) 119 : "m" (sem->count)); 120 121 return ret < 0; 122} 123 124static inline void __up(struct semaphore *sem) 125{ 126 if (unlikely(atomic_inc_return(&sem->count) <= 0)) 127 __up_wakeup(sem); 128} 129 130#if !defined(CONFIG_DEBUG_SEMAPHORE) 131extern inline void down(struct semaphore *sem) 132{ 133 __down(sem); 134} 135extern inline int down_interruptible(struct semaphore *sem) 136{ 137 return __down_interruptible(sem); 138} 139extern inline int down_trylock(struct semaphore *sem) 140{ 141 return __down_trylock(sem); 142} 143extern inline void up(struct semaphore *sem) 144{ 145 __up(sem); 146} 147#endif 148 149#endif