2 * linux/include/asm-arm/semaphore.h 4 #ifndef __ASM_ARM_SEMAPHORE_H 5 #define __ASM_ARM_SEMAPHORE_H 7 #include <linux/linkage.h> 8 #include <linux/spinlock.h> 9 #include <linux/wait.h> 11 #include <asm/atomic.h> 12 #include <asm/proc/locks.h> 17 wait_queue_head_t wait
; 24 # define __SEM_DEBUG_INIT(name) \ 25 , (long)&(name).__magic 27 # define __SEM_DEBUG_INIT(name) 30 #define __SEMAPHORE_INIT(name,count) \ 31 { ATOMIC_INIT(count), 0, \ 32 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ 33 __SEM_DEBUG_INIT(name) } 35 #define __MUTEX_INITIALIZER(name) \ 36 __SEMAPHORE_INIT(name,1) 38 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \ 39 struct semaphore name = __SEMAPHORE_INIT(name,count) 41 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) 42 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) 44 extern inlinevoidsema_init(struct semaphore
*sem
,int val
) 46 atomic_set(&sem
->count
, val
); 48 init_waitqueue_head(&sem
->wait
); 50 sem
->__magic
= (long)&sem
->__magic
; 54 staticinlinevoidinit_MUTEX(struct semaphore
*sem
) 59 staticinlinevoidinit_MUTEX_LOCKED(struct semaphore
*sem
) 65 * special register calling convention 67 asmlinkage
void__down_failed(void); 68 asmlinkage
int__down_interruptible_failed(void); 69 asmlinkage
int__down_trylock_failed(void); 70 asmlinkage
void__up_wakeup(void); 72 externvoid__down(struct semaphore
* sem
); 73 externint__down_interruptible(struct semaphore
* sem
); 74 externint__down_trylock(struct semaphore
* sem
); 75 externvoid__up(struct semaphore
* sem
); 78 * This is ugly, but we want the default case to fall through. 79 * "__down" is the actual routine that waits... 81 extern inlinevoiddown(struct semaphore
* sem
) 84 CHECK_MAGIC(sem
->__magic
); 87 __down_op(sem
, __down_failed
); 91 * This is ugly, but we want the default case to fall through. 92 * "__down_interruptible" is the actual routine that waits... 94 extern inlineintdown_interruptible(struct semaphore
* sem
) 97 CHECK_MAGIC(sem
->__magic
); 100 return__down_op_ret(sem
, __down_interruptible_failed
); 103 extern inlineintdown_trylock(struct semaphore
*sem
) 106 CHECK_MAGIC(sem
->__magic
); 109 return__down_op_ret(sem
, __down_trylock_failed
); 113 * Note! This is subtle. We jump to wake people up only if 114 * the semaphore was negative (== somebody was waiting on it). 115 * The default case (no contention) will result in NO 116 * jumps for both down() and up(). 118 extern inlinevoidup(struct semaphore
* sem
) 121 CHECK_MAGIC(sem
->__magic
); 124 __up_op(sem
, __up_wakeup
); 127 /* rw mutexes (should that be mutices? =) -- throw rw 128 * spinlocks and semaphores together, and this is what we 131 * The lock is initialized to BIAS. This way, a writer 132 * subtracts BIAS ands gets 0 for the case of an uncontended 133 * lock. Readers decrement by 1 and see a positive value 134 * when uncontended, negative if there are writers waiting 135 * (in which case it goes to sleep). 137 * In terms of fairness, this should result in the lock 138 * flopping back and forth between readers and writers 143 struct rw_semaphore
{ 145 volatileunsigned char write_bias_granted
; 146 volatileunsigned char read_bias_granted
; 147 volatileunsigned char pad1
; 148 volatileunsigned char pad2
; 149 wait_queue_head_t wait
; 150 wait_queue_head_t write_bias_wait
; 159 #define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) 161 #define __RWSEM_DEBUG_INIT/* */ 164 #define __RWSEM_INITIALIZER(name,count) \ 165 { ATOMIC_INIT(count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ 166 __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ 167 __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } 169 #define __DECLARE_RWSEM_GENERIC(name,count) \ 170 struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) 172 #define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) 173 #define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) 174 #define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0) 176 extern inlinevoidinit_rwsem(struct rw_semaphore
*sem
) 178 atomic_set(&sem
->count
, RW_LOCK_BIAS
); 179 sem
->read_bias_granted
=0; 180 sem
->write_bias_granted
=0; 181 init_waitqueue_head(&sem
->wait
); 182 init_waitqueue_head(&sem
->write_bias_wait
); 184 sem
->__magic
= (long)&sem
->__magic
; 185 atomic_set(&sem
->readers
,0); 186 atomic_set(&sem
->writers
,0); 190 externstruct rw_semaphore
*__down_read_failed(struct rw_semaphore
*sem
); 191 externstruct rw_semaphore
*__down_write_failed(struct rw_semaphore
*sem
); 192 externstruct rw_semaphore
*__rwsem_wake(struct rw_semaphore
*sem
); 194 extern inlinevoiddown_read(struct rw_semaphore
*sem
) 197 CHECK_MAGIC(sem
->__magic
); 199 __down_op_read(sem
, __down_read_failed
); 201 if(sem
->write_bias_granted
) 203 if(atomic_read(&sem
->writers
)) 205 atomic_inc(&sem
->readers
); 209 extern inlinevoiddown_write(struct rw_semaphore
*sem
) 212 CHECK_MAGIC(sem
->__magic
); 214 __down_op_write(sem
, __down_write_failed
); 216 if(atomic_read(&sem
->writers
)) 218 if(atomic_read(&sem
->readers
)) 220 if(sem
->read_bias_granted
) 222 if(sem
->write_bias_granted
) 224 atomic_inc(&sem
->writers
); 228 extern inlinevoidup_read(struct rw_semaphore
*sem
) 231 if(sem
->write_bias_granted
) 233 if(atomic_read(&sem
->writers
)) 235 atomic_dec(&sem
->readers
); 237 __up_op_read(sem
, __rwsem_wake
); 240 extern inlinevoidup_write(struct rw_semaphore
*sem
) 243 if(sem
->read_bias_granted
) 245 if(sem
->write_bias_granted
) 247 if(atomic_read(&sem
->readers
)) 249 if(atomic_read(&sem
->writers
) !=1) 251 atomic_dec(&sem
->writers
); 253 __up_op_write(sem
, __rwsem_wake
);