* the original code and to make semaphore waits
* interruptible so that processes waiting on
* semaphores can be killed.
+ * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
+ * functions in asm/sempahore-helper.h while fixing a
+ * potential and subtle race discovered by Ulrich Schmid
+ * in down_interruptible(). Since I started to play here I
+ * also implemented the `trylock' semaphore operation.
+ * 1999-07-02 Artur Skawina <skawina@geocities.com>
+ * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
+ * do this). Changed calling sequences from push/jmp to
+ * traditional call/ret.
*
* If you would like to see an analysis of this implementation, please
* ftp to gcom.com and download the file
#include <asm/system.h>
#include <asm/atomic.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
struct semaphore {
atomic_t count;
- int waking;
- struct wait_queue * wait;
+ int sleepers;
+ wait_queue_head_t wait;
+#if WAITQUEUE_DEBUG
+ long __magic;
+#endif
};
-#define MUTEX ((struct semaphore) { ATOMIC_INIT(1), 0, NULL })
-#define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), 0, NULL })
+#if WAITQUEUE_DEBUG
+# define __SEM_DEBUG_INIT(name) \
+ , (int)&(name).__magic
+#else
+# define __SEM_DEBUG_INIT(name)
+#endif
-asmlinkage void __down_failed(void /* special register calling convention */);
-asmlinkage int __down_failed_interruptible(void /* params in registers */);
-asmlinkage void __up_wakeup(void /* special register calling convention */);
+#define __SEMAPHORE_INITIALIZER(name,count) \
+{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
+ __SEM_DEBUG_INIT(name) }
-extern void __down(struct semaphore * sem);
-extern void __up(struct semaphore * sem);
+#define __MUTEX_INITIALIZER(name) \
+ __SEMAPHORE_INITIALIZER(name,1)
-#define sema_init(sem, val) atomic_set(&((sem)->count), (val))
+#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
+#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
+#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
+
+extern inline void sema_init (struct semaphore *sem, int val)
+{
/*
- * These two _must_ execute atomically wrt each other.
- *
- * This is trivially done with load_locked/store_cond,
- * but on the x86 we need an external synchronizer.
- * Currently this is just the global interrupt lock,
- * bah. Go for a smaller spinlock some day.
+ * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
*
- * (On the other hand this shouldn't be in any critical
- * path, so..)
+ * i'd rather use the more flexible initialization above, but sadly
+ * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
*/
-static inline void wake_one_more(struct semaphore * sem)
-{
- unsigned long flags;
+ atomic_set(&sem->count, val);
+ sem->sleepers = 0;
+ init_waitqueue_head(&sem->wait);
+#if WAITQUEUE_DEBUG
+ sem->__magic = (int)&sem->__magic;
+#endif
+}
- save_flags(flags);
- cli();
- sem->waking++;
- restore_flags(flags);
+static inline void init_MUTEX (struct semaphore *sem)
+{
+ sema_init(sem, 1);
}
-static inline int waking_non_zero(struct semaphore *sem)
+static inline void init_MUTEX_LOCKED (struct semaphore *sem)
{
- unsigned long flags;
- int ret = 0;
-
- save_flags(flags);
- cli();
- if (sem->waking > 0) {
- sem->waking--;
- ret = 1;
- }
- restore_flags(flags);
- return ret;
+ sema_init(sem, 0);
}
+asmlinkage void __down_failed(void /* special register calling convention */);
+asmlinkage int __down_failed_interruptible(void /* params in registers */);
+asmlinkage int __down_failed_trylock(void /* params in registers */);
+asmlinkage void __up_wakeup(void /* special register calling convention */);
+
+asmlinkage void __down(struct semaphore * sem);
+asmlinkage int __down_interruptible(struct semaphore * sem);
+asmlinkage int __down_trylock(struct semaphore * sem);
+asmlinkage void __up(struct semaphore * sem);
+
+extern spinlock_t semaphore_wake_lock;
+
/*
* This is ugly, but we want the default case to fall through.
* "down_failed" is a special asm handler that calls the C
@@ -83,44+107,79 @@ static inline int waking_non_zero(struct semaphore *sem) */
extern inline void down(struct semaphore * sem)
{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
__asm__ __volatile__(
"# atomic down operation\n\t"
- "movl $1f,%%eax\n\t"
#ifdef __SMP__
"lock ; "
#endif
- "decl 0(%0)\n\t"
- "js " SYMBOL_NAME_STR(__down_failed)
- "\n1:"
+ "decl (%0)\n\t" /* --sem->count */
+ "js 2f\n"
+ "1:\n"
+ ".section .text.lock,\"ax\"\n"
+ "2:\tcall __down_failed\n\t"
+ "jmp 1b\n"
+ ".previous"
:/* no outputs */
:"c" (sem)
- :"ax","memory");
+ :"memory");
}
-/*
- * This version waits in interruptible state so that the waiting
- * process can be killed. The down_failed_interruptible routine
- * returns negative for signalled and zero for semaphore acquired.
- */
extern inline int down_interruptible(struct semaphore * sem)
{
- int ret;
+ int result;
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
__asm__ __volatile__(
"# atomic interruptible down operation\n\t"
- "movl $1f,%0\n\t"
#ifdef __SMP__
"lock ; "
#endif
- "decl 0(%1)\n\t"
- "js " SYMBOL_NAME_STR(__down_failed_interruptible) "\n\t"
- "xorl %0,%0"
- "\n1:"
- :"=a" (ret)
+ "decl (%1)\n\t" /* --sem->count */
+ "js 2f\n\t"
+ "xorl %0,%0\n"
+ "1:\n"
+ ".section .text.lock,\"ax\"\n"
+ "2:\tcall __down_failed_interruptible\n\t"
+ "jmp 1b\n"
+ ".previous"
+ :"=a" (result)
:"c" (sem)
:"memory");
+ return result;
+}
+
+extern inline int down_trylock(struct semaphore * sem)
+{
+ int result;
+
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
- return ret;
+ __asm__ __volatile__(
+ "# atomic interruptible down operation\n\t"
+#ifdef __SMP__
+ "lock ; "
+#endif
+ "decl (%1)\n\t" /* --sem->count */
+ "js 2f\n\t"
+ "xorl %0,%0\n"
+ "1:\n"
+ ".section .text.lock,\"ax\"\n"
+ "2:\tcall __down_failed_trylock\n\t"
+ "jmp 1b\n"
+ ".previous"
+ :"=a" (result)
+ :"c" (sem)
+ :"memory");
+ return result;
}
/*
@@ -131,18+190,24 @@ extern inline int down_interruptible(struct semaphore * sem) */
extern inline void up(struct semaphore * sem)
{
+#if WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
__asm__ __volatile__(
"# atomic up operation\n\t"
- "movl $1f,%%eax\n\t"
#ifdef __SMP__
"lock ; "
#endif
- "incl 0(%0)\n\t"
- "jle " SYMBOL_NAME_STR(__up_wakeup)
- "\n1:"
+ "incl (%0)\n\t" /* ++sem->count */
+ "jle 2f\n"
+ "1:\n"
+ ".section .text.lock,\"ax\"\n"
+ "2:\tcall __up_wakeup\n\t"
+ "jmp 1b\n"
+ ".previous"
:/* no outputs */
:"c" (sem)
- :"ax", "memory");
+ :"memory");
}
#endif