Linux 2.1.116
[davej-history.git] / include / asm-i386 / semaphore.h
bloba68b23fc638e54e0185b7c80f778bde4e9a3e40d
1 #ifndef _I386_SEMAPHORE_H
2 #define _I386_SEMAPHORE_H
4 #include <linux/linkage.h>
6 /*
7 * SMP- and interrupt-safe semaphores..
9 * (C) Copyright 1996 Linus Torvalds
11 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
12 * the original code and to make semaphore waits
13 * interruptible so that processes waiting on
14 * semaphores can be killed.
16 * If you would like to see an analysis of this implementation, please
17 * ftp to gcom.com and download the file
18 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
22 #include <asm/system.h>
23 #include <asm/atomic.h>
24 #include <asm/spinlock.h>
26 struct semaphore {
27 atomic_t count;
28 int waking;
29 struct wait_queue * wait;
32 #define MUTEX ((struct semaphore) { ATOMIC_INIT(1), 0, NULL })
33 #define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), 0, NULL })
35 asmlinkage void__down_failed(void/* special register calling convention */);
36 asmlinkage int__down_failed_interruptible(void/* params in registers */);
37 asmlinkage void__up_wakeup(void/* special register calling convention */);
39 externvoid__down(struct semaphore * sem);
40 externvoid__up(struct semaphore * sem);
42 extern spinlock_t semaphore_wake_lock;
44 #define sema_init(sem, val) atomic_set(&((sem)->count), (val))
47 * These two _must_ execute atomically wrt each other.
49 * This is trivially done with load_locked/store_cond,
50 * but on the x86 we need an external synchronizer.
51 * Currently this is just the global interrupt lock,
52 * bah. Go for a smaller spinlock some day.
54 * (On the other hand this shouldn't be in any critical
55 * path, so..)
57 staticinlinevoidwake_one_more(struct semaphore * sem)
59 unsigned long flags;
61 spin_lock_irqsave(&semaphore_wake_lock, flags);
62 sem->waking++;
63 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
66 staticinlineintwaking_non_zero(struct semaphore *sem)
68 unsigned long flags;
69 int ret =0;
71 spin_lock_irqsave(&semaphore_wake_lock, flags);
72 if(sem->waking >0) {
73 sem->waking--;
74 ret =1;
76 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
77 return ret;
81 * This is ugly, but we want the default case to fall through.
82 * "down_failed" is a special asm handler that calls the C
83 * routine that actually waits. See arch/i386/lib/semaphore.S
85 extern inlinevoiddown(struct semaphore * sem)
87 __asm__ __volatile__(
88 "# atomic down operation\n\t"
89 #ifdef __SMP__
90 "lock ; "
91 #endif
92 "decl 0(%0)\n\t"
93 "js 2f\n"
94 "1:\n"
95 ".section .text.lock,\"ax\"\n"
96 "2:\tpushl $1b\n\t"
97 "jmp __down_failed\n"
98 ".previous"
99 :/* no outputs */
100 :"c"(sem)
101 :"memory");
104 extern inlineintdown_interruptible(struct semaphore * sem)
106 int result;
108 __asm__ __volatile__(
109 "# atomic interruptible down operation\n\t"
110 #ifdef __SMP__
111 "lock ; "
112 #endif
113 "decl 0(%1)\n\t"
114 "js 2f\n\t"
115 "xorl %0,%0\n"
116 "1:\n"
117 ".section .text.lock,\"ax\"\n"
118 "2:\tpushl $1b\n\t"
119 "jmp __down_failed_interruptible\n"
120 ".previous"
121 :"=a"(result)
122 :"c"(sem)
123 :"memory");
124 return result;
129 * Note! This is subtle. We jump to wake people up only if
130 * the semaphore was negative (== somebody was waiting on it).
131 * The default case (no contention) will result in NO
132 * jumps for both down() and up().
134 extern inlinevoidup(struct semaphore * sem)
136 __asm__ __volatile__(
137 "# atomic up operation\n\t"
138 #ifdef __SMP__
139 "lock ; "
140 #endif
141 "incl 0(%0)\n\t"
142 "jle 2f\n"
143 "1:\n"
144 ".section .text.lock,\"ax\"\n"
145 "2:\tpushl $1b\n\t"
146 "jmp __up_wakeup\n"
147 ".previous"
148 :/* no outputs */
149 :"c"(sem)
150 :"memory");
153 #endif
close