Import 2.1.37
[davej-history.git] / include / asm-i386 / semaphore.h
blob255880b6157e65bfeb611ea7378246fb76635172
1 #ifndef _I386_SEMAPHORE_H
2 #define _I386_SEMAPHORE_H
4 #include <linux/linkage.h>
6 /*
7 * SMP- and interrupt-safe semaphores..
9 * (C) Copyright 1996 Linus Torvalds
11 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
12 * the original code and to make semaphore waits
13 * interruptible so that processes waiting on
14 * semaphores can be killed.
16 * If you would like to see an analysis of this implementation, please
17 * ftp to gcom.com and download the file
18 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
22 #include <asm/system.h>
23 #include <asm/atomic.h>
25 struct semaphore {
26 atomic_t count;
27 int waking;
28 struct wait_queue * wait;
31 #define MUTEX ((struct semaphore) { ATOMIC_INIT(1), 0, NULL })
32 #define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), 0, NULL })
34 asmlinkage void__down_failed(void/* special register calling convention */);
35 asmlinkage int__down_failed_interruptible(void/* params in registers */);
36 asmlinkage void__up_wakeup(void/* special register calling convention */);
38 externvoid__down(struct semaphore * sem);
39 externvoid__up(struct semaphore * sem);
41 #define sema_init(sem, val) atomic_set(&((sem)->count), (val))
44 * These two _must_ execute atomically wrt each other.
46 * This is trivially done with load_locked/store_cond,
47 * but on the x86 we need an external synchronizer.
48 * Currently this is just the global interrupt lock,
49 * bah. Go for a smaller spinlock some day.
51 * (On the other hand this shouldn't be in any critical
52 * path, so..)
54 staticinlinevoidwake_one_more(struct semaphore * sem)
56 unsigned long flags;
58 save_flags(flags);
59 cli();
60 sem->waking++;
61 restore_flags(flags);
64 staticinlineintwaking_non_zero(struct semaphore *sem)
66 unsigned long flags;
67 int ret =0;
69 save_flags(flags);
70 cli();
71 if(sem->waking >0) {
72 sem->waking--;
73 ret =1;
75 restore_flags(flags);
76 return ret;
80 * This is ugly, but we want the default case to fall through.
81 * "down_failed" is a special asm handler that calls the C
82 * routine that actually waits. See arch/i386/lib/semaphore.S
84 extern inlinevoiddo_down(struct semaphore * sem,void(*failed)(void))
86 __asm__ __volatile__(
87 "# atomic down operation\n\t"
88 #ifdef __SMP__
89 "lock ; "
90 #endif
91 "decl 0(%0)\n\t"
92 "js 2f\n"
93 "1:\n"
94 ".section .text.lock,\"ax\"\n"
95 "2:\tpushl $1b\n\t"
96 "jmp %1\n"
97 ".previous"
98 :/* no outputs */
99 :"c"(sem),"m"(*(unsigned long*)failed)
100 :"memory");
103 #define down(sem) do_down((sem),__down_failed)
104 #define down_interruptible(sem) do_down((sem),__down_failed_interruptible)
107 * Note! This is subtle. We jump to wake people up only if
108 * the semaphore was negative (== somebody was waiting on it).
109 * The default case (no contention) will result in NO
110 * jumps for both down() and up().
112 extern inlinevoidup(struct semaphore * sem)
114 __asm__ __volatile__(
115 "# atomic up operation\n\t"
116 #ifdef __SMP__
117 "lock ; "
118 #endif
119 "incl 0(%0)\n\t"
120 "jle 2f\n"
121 "1:\n"
122 ".section .text.lock,\"ax\"\n"
123 "2:\tpushl $1b\n\t"
124 "jmp %1\n"
125 ".previous"
126 :/* no outputs */
127 :"c"(sem),"m"(*(unsigned long*)__up_wakeup)
128 :"memory");
131 #endif
close