]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/alpha/kernel/semaphore.c
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / arch / alpha / kernel / semaphore.c
1 /*
2 * Alpha semaphore implementation.
3 *
4 * (C) Copyright 1996 Linus Torvalds
5 * (C) Copyright 1999, 2000 Richard Henderson
6 */
7
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/init.h>
11
12 /*
13 * This is basically the PPC semaphore scheme ported to use
14 * the Alpha ll/sc sequences, so see the PPC code for
15 * credits.
16 */
17
18 /*
19 * Atomically update sem->count.
20 * This does the equivalent of the following:
21 *
22 * old_count = sem->count;
23 * tmp = MAX(old_count, 0) + incr;
24 * sem->count = tmp;
25 * return old_count;
26 */
27 static inline int __sem_update_count(struct semaphore *sem, int incr)
28 {
29 long old_count, tmp = 0;
30
31 __asm__ __volatile__(
32 "1: ldl_l %0,%2\n"
33 " cmovgt %0,%0,%1\n"
34 " addl %1,%3,%1\n"
35 " stl_c %1,%2\n"
36 " beq %1,2f\n"
37 " mb\n"
38 ".subsection 2\n"
39 "2: br 1b\n"
40 ".previous"
41 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
42 : "Ir" (incr), "1" (tmp), "m" (sem->count));
43
44 return old_count;
45 }
46
47 /*
48 * Perform the "down" function. Return zero for semaphore acquired,
49 * return negative for signalled out of the function.
50 *
51 * If called from down, the return is ignored and the wait loop is
52 * not interruptible. This means that a task waiting on a semaphore
53 * using "down()" cannot be killed until someone does an "up()" on
54 * the semaphore.
55 *
56 * If called from down_interruptible, the return value gets checked
57 * upon return. If the return value is negative then the task continues
58 * with the negative value in the return register (it can be tested by
59 * the caller).
60 *
61 * Either form may be used in conjunction with "up()".
62 */
63
64 void __sched
65 __down_failed(struct semaphore *sem)
66 {
67 struct task_struct *tsk = current;
68 DECLARE_WAITQUEUE(wait, tsk);
69
70 #ifdef CONFIG_DEBUG_SEMAPHORE
71 printk("%s(%d): down failed(%p)\n",
72 tsk->comm, tsk->pid, sem);
73 #endif
74
75 tsk->state = TASK_UNINTERRUPTIBLE;
76 wmb();
77 add_wait_queue_exclusive(&sem->wait, &wait);
78
79 /*
80 * Try to get the semaphore. If the count is > 0, then we've
81 * got the semaphore; we decrement count and exit the loop.
82 * If the count is 0 or negative, we set it to -1, indicating
83 * that we are asleep, and then sleep.
84 */
85 while (__sem_update_count(sem, -1) <= 0) {
86 schedule();
87 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
88 }
89 remove_wait_queue(&sem->wait, &wait);
90 tsk->state = TASK_RUNNING;
91
92 /*
93 * If there are any more sleepers, wake one of them up so
94 * that it can either get the semaphore, or set count to -1
95 * indicating that there are still processes sleeping.
96 */
97 wake_up(&sem->wait);
98
99 #ifdef CONFIG_DEBUG_SEMAPHORE
100 printk("%s(%d): down acquired(%p)\n",
101 tsk->comm, tsk->pid, sem);
102 #endif
103 }
104
105 int __sched
106 __down_failed_interruptible(struct semaphore *sem)
107 {
108 struct task_struct *tsk = current;
109 DECLARE_WAITQUEUE(wait, tsk);
110 long ret = 0;
111
112 #ifdef CONFIG_DEBUG_SEMAPHORE
113 printk("%s(%d): down failed(%p)\n",
114 tsk->comm, tsk->pid, sem);
115 #endif
116
117 tsk->state = TASK_INTERRUPTIBLE;
118 wmb();
119 add_wait_queue_exclusive(&sem->wait, &wait);
120
121 while (__sem_update_count(sem, -1) <= 0) {
122 if (signal_pending(current)) {
123 /*
124 * A signal is pending - give up trying.
125 * Set sem->count to 0 if it is negative,
126 * since we are no longer sleeping.
127 */
128 __sem_update_count(sem, 0);
129 ret = -EINTR;
130 break;
131 }
132 schedule();
133 set_task_state(tsk, TASK_INTERRUPTIBLE);
134 }
135
136 remove_wait_queue(&sem->wait, &wait);
137 tsk->state = TASK_RUNNING;
138 wake_up(&sem->wait);
139
140 #ifdef CONFIG_DEBUG_SEMAPHORE
141 printk("%s(%d): down %s(%p)\n",
142 current->comm, current->pid,
143 (ret < 0 ? "interrupted" : "acquired"), sem);
144 #endif
145 return ret;
146 }
147
148 void
149 __up_wakeup(struct semaphore *sem)
150 {
151 /*
152 * Note that we incremented count in up() before we came here,
153 * but that was ineffective since the result was <= 0, and
154 * any negative value of count is equivalent to 0.
155 * This ends up setting count to 1, unless count is now > 0
156 * (i.e. because some other cpu has called up() in the meantime),
157 * in which case we just increment count.
158 */
159 __sem_update_count(sem, 1);
160 wake_up(&sem->wait);
161 }
162
163 void __sched
164 down(struct semaphore *sem)
165 {
166 #ifdef WAITQUEUE_DEBUG
167 CHECK_MAGIC(sem->__magic);
168 #endif
169 #ifdef CONFIG_DEBUG_SEMAPHORE
170 printk("%s(%d): down(%p) <count=%d> from %p\n",
171 current->comm, current->pid, sem,
172 atomic_read(&sem->count), __builtin_return_address(0));
173 #endif
174 __down(sem);
175 }
176
177 int __sched
178 down_interruptible(struct semaphore *sem)
179 {
180 #ifdef WAITQUEUE_DEBUG
181 CHECK_MAGIC(sem->__magic);
182 #endif
183 #ifdef CONFIG_DEBUG_SEMAPHORE
184 printk("%s(%d): down(%p) <count=%d> from %p\n",
185 current->comm, current->pid, sem,
186 atomic_read(&sem->count), __builtin_return_address(0));
187 #endif
188 return __down_interruptible(sem);
189 }
190
191 int
192 down_trylock(struct semaphore *sem)
193 {
194 int ret;
195
196 #ifdef WAITQUEUE_DEBUG
197 CHECK_MAGIC(sem->__magic);
198 #endif
199
200 ret = __down_trylock(sem);
201
202 #ifdef CONFIG_DEBUG_SEMAPHORE
203 printk("%s(%d): down_trylock %s from %p\n",
204 current->comm, current->pid,
205 ret ? "failed" : "acquired",
206 __builtin_return_address(0));
207 #endif
208
209 return ret;
210 }
211
212 void
213 up(struct semaphore *sem)
214 {
215 #ifdef WAITQUEUE_DEBUG
216 CHECK_MAGIC(sem->__magic);
217 #endif
218 #ifdef CONFIG_DEBUG_SEMAPHORE
219 printk("%s(%d): up(%p) <count=%d> from %p\n",
220 current->comm, current->pid, sem,
221 atomic_read(&sem->count), __builtin_return_address(0));
222 #endif
223 __up(sem);
224 }