Import 2.1.36pre1
[davej-history.git] / include / asm-i386 / semaphore.h
blob4395dfce026163986adbc306e90aaca2bbbd5906
1 #ifndef _I386_SEMAPHORE_H
2 #define _I386_SEMAPHORE_H
4 #include <linux/linkage.h>
6 /*
7 * SMP- and interrupt-safe semaphores..
9 * (C) Copyright 1996 Linus Torvalds
11 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
12 * the original code and to make semaphore waits
13 * interruptible so that processes waiting on
14 * semaphores can be killed.
16 * If you would like to see an analysis of this implementation, please
17 * ftp to gcom.com and download the file
18 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
22 #include <asm/system.h>
23 #include <asm/atomic.h>
25 struct semaphore {
26 atomic_t count;
27 int waking;
28 struct wait_queue * wait;
31 #define MUTEX ((struct semaphore) { ATOMIC_INIT(1), 0, NULL })
32 #define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), 0, NULL })
34 asmlinkage void__down_failed(void/* special register calling convention */);
35 asmlinkage int__down_failed_interruptible(void/* params in registers */);
36 asmlinkage void__up_wakeup(void/* special register calling convention */);
38 externvoid__down(struct semaphore * sem);
39 externvoid__up(struct semaphore * sem);
41 #define sema_init(sem, val) atomic_set(&((sem)->count), (val))
44 * These two _must_ execute atomically wrt each other.
46 * This is trivially done with load_locked/store_cond,
47 * but on the x86 we need an external synchronizer.
48 * Currently this is just the global interrupt lock,
49 * bah. Go for a smaller spinlock some day.
51 * (On the other hand this shouldn't be in any critical
52 * path, so..)
54 staticinlinevoidwake_one_more(struct semaphore * sem)
56 unsigned long flags;
58 save_flags(flags);
59 cli();
60 sem->waking++;
61 restore_flags(flags);
64 staticinlineintwaking_non_zero(struct semaphore *sem)
66 unsigned long flags;
67 int ret =0;
69 save_flags(flags);
70 cli();
71 if(sem->waking >0) {
72 sem->waking--;
73 ret =1;
75 restore_flags(flags);
76 return ret;
80 * This is ugly, but we want the default case to fall through.
81 * "down_failed" is a special asm handler that calls the C
82 * routine that actually waits. See arch/i386/lib/semaphore.S
84 extern inlinevoiddown(struct semaphore * sem)
86 __asm__ __volatile__(
87 "# atomic down operation\n\t"
88 "movl $1f,%%eax\n\t"
89 #ifdef __SMP__
90 "lock ; "
91 #endif
92 "decl 0(%0)\n\t"
93 "js "SYMBOL_NAME_STR(__down_failed)
94 "\n1:"
95 :/* no outputs */
96 :"c"(sem)
97 :"ax","memory");
101 * This version waits in interruptible state so that the waiting
102 * process can be killed. The down_failed_interruptible routine
103 * returns negative for signalled and zero for semaphore acquired.
105 extern inlineintdown_interruptible(struct semaphore * sem)
107 int ret;
109 __asm__ __volatile__(
110 "# atomic interruptible down operation\n\t"
111 "movl $1f,%0\n\t"
112 #ifdef __SMP__
113 "lock ; "
114 #endif
115 "decl 0(%1)\n\t"
116 "js "SYMBOL_NAME_STR(__down_failed_interruptible)"\n\t"
117 "xorl %0,%0"
118 "\n1:"
119 :"=a"(ret)
120 :"c"(sem)
121 :"memory");
123 return ret;
127 * Note! This is subtle. We jump to wake people up only if
128 * the semaphore was negative (== somebody was waiting on it).
129 * The default case (no contention) will result in NO
130 * jumps for both down() and up().
132 extern inlinevoidup(struct semaphore * sem)
134 __asm__ __volatile__(
135 "# atomic up operation\n\t"
136 "movl $1f,%%eax\n\t"
137 #ifdef __SMP__
138 "lock ; "
139 #endif
140 "incl 0(%0)\n\t"
141 "jle "SYMBOL_NAME_STR(__up_wakeup)
142 "\n1:"
143 :/* no outputs */
144 :"c"(sem)
145 :"ax","memory");
148 #endif
close