]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+ |
2 | * | |
3 | * Written by David Howells (dhowells@redhat.com). | |
4 | * | |
99122a3f | 5 | * Derived from asm-x86/semaphore.h |
1da177e4 LT |
6 | * |
7 | * | |
8 | * The MSW of the count is the negated number of active writers and waiting | |
9 | * lockers, and the LSW is the total number of active locks | |
10 | * | |
11 | * The lock count is initialized to 0 (no active and no waiting lockers). | |
12 | * | |
13 | * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an | |
14 | * uncontended lock. This can be determined because XADD returns the old value. | |
15 | * Readers increment by 1 and see a positive value when uncontended, negative | |
16 | * if there are writers (and maybe) readers waiting (in which case it goes to | |
17 | * sleep). | |
18 | * | |
19 | * The value of WAITING_BIAS supports up to 32766 waiting processes. This can | |
20 | * be extended to 65534 by manually checking the whole MSW rather than relying | |
21 | * on the S flag. | |
22 | * | |
23 | * The value of ACTIVE_BIAS supports up to 65535 active processes. | |
24 | * | |
25 | * This should be totally fair - if anything is waiting, a process that wants a | |
26 | * lock will go to the back of the queue. When the currently active lock is | |
27 | * released, if there's a writer at the front of the queue, then that and only | |
6a6256f9 | 28 | * that will be woken up; if there's a bunch of consecutive readers at the |
1da177e4 LT |
29 | * front, then they'll all be woken up, but no other readers will be. |
30 | */ | |
31 | ||
1965aae3 PA |
32 | #ifndef _ASM_X86_RWSEM_H |
33 | #define _ASM_X86_RWSEM_H | |
1da177e4 LT |
34 | |
35 | #ifndef _LINUX_RWSEM_H | |
36 | #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" | |
37 | #endif | |
38 | ||
39 | #ifdef __KERNEL__ | |
1838ef1d | 40 | #include <asm/asm.h> |
1da177e4 | 41 | |
1da177e4 | 42 | /* |
1838ef1d PA |
43 | * The bias values and the counter type limits the number of |
44 | * potential readers/writers to 32767 for 32 bits and 2147483647 | |
45 | * for 64 bits. | |
1da177e4 | 46 | */ |
6e5609a9 | 47 | |
1838ef1d PA |
48 | #ifdef CONFIG_X86_64 |
49 | # define RWSEM_ACTIVE_MASK 0xffffffffL | |
50 | #else | |
51 | # define RWSEM_ACTIVE_MASK 0x0000ffffL | |
52 | #endif | |
53 | ||
54 | #define RWSEM_UNLOCKED_VALUE 0x00000000L | |
55 | #define RWSEM_ACTIVE_BIAS 0x00000001L | |
56 | #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) | |
1da177e4 LT |
57 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
58 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | |
6e5609a9 | 59 | |
1da177e4 LT |
60 | /* |
61 | * lock for reading | |
62 | */ | |
63 | static inline void __down_read(struct rw_semaphore *sem) | |
64 | { | |
6e5609a9 | 65 | asm volatile("# beginning down_read\n\t" |
1838ef1d | 66 | LOCK_PREFIX _ASM_INC "(%1)\n\t" |
b4bcb4c2 | 67 | /* adds 0x00000001 */ |
6e5609a9 JP |
68 | " jns 1f\n" |
69 | " call call_rwsem_down_read_failed\n" | |
70 | "1:\n\t" | |
71 | "# ending down_read\n\t" | |
72 | : "+m" (sem->count) | |
73 | : "a" (sem) | |
74 | : "memory", "cc"); | |
1da177e4 LT |
75 | } |
76 | ||
77 | /* | |
78 | * trylock for reading -- returns 1 if successful, 0 if contention | |
79 | */ | |
117780ee | 80 | static inline bool __down_read_trylock(struct rw_semaphore *sem) |
1da177e4 | 81 | { |
bde11efb | 82 | long result, tmp; |
6e5609a9 | 83 | asm volatile("# beginning __down_read_trylock\n\t" |
59c33fa7 | 84 | " mov %0,%1\n\t" |
6e5609a9 | 85 | "1:\n\t" |
59c33fa7 LT |
86 | " mov %1,%2\n\t" |
87 | " add %3,%2\n\t" | |
6e5609a9 | 88 | " jle 2f\n\t" |
59c33fa7 | 89 | LOCK_PREFIX " cmpxchg %2,%0\n\t" |
6e5609a9 JP |
90 | " jnz 1b\n\t" |
91 | "2:\n\t" | |
92 | "# ending __down_read_trylock\n\t" | |
93 | : "+m" (sem->count), "=&a" (result), "=&r" (tmp) | |
94 | : "i" (RWSEM_ACTIVE_READ_BIAS) | |
95 | : "memory", "cc"); | |
117780ee | 96 | return result >= 0; |
1da177e4 LT |
97 | } |
98 | ||
99 | /* | |
100 | * lock for writing | |
101 | */ | |
664b4e24 MH |
102 | #define ____down_write(sem, slow_path) \ |
103 | ({ \ | |
104 | long tmp; \ | |
916633a4 | 105 | struct rw_semaphore* ret; \ |
664b4e24 | 106 | asm volatile("# beginning down_write\n\t" \ |
916633a4 | 107 | LOCK_PREFIX " xadd %1,(%3)\n\t" \ |
664b4e24 MH |
108 | /* adds 0xffff0001, returns the old value */ \ |
109 | " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \ | |
110 | /* was the active mask 0 before? */\ | |
111 | " jz 1f\n" \ | |
112 | " call " slow_path "\n" \ | |
113 | "1:\n" \ | |
114 | "# ending down_write" \ | |
916633a4 | 115 | : "+m" (sem->count), "=d" (tmp), "=a" (ret) \ |
664b4e24 MH |
116 | : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \ |
117 | : "memory", "cc"); \ | |
118 | ret; \ | |
119 | }) | |
120 | ||
f8e04d85 | 121 | static inline void __down_write(struct rw_semaphore *sem) |
1da177e4 | 122 | { |
664b4e24 MH |
123 | ____down_write(sem, "call_rwsem_down_write_failed"); |
124 | } | |
125 | ||
126 | static inline int __down_write_killable(struct rw_semaphore *sem) | |
127 | { | |
128 | if (IS_ERR(____down_write(sem, "call_rwsem_down_write_failed_killable"))) | |
129 | return -EINTR; | |
130 | ||
131 | return 0; | |
1da177e4 LT |
132 | } |
133 | ||
134 | /* | |
135 | * trylock for writing -- returns 1 if successful, 0 if contention | |
136 | */ | |
117780ee | 137 | static inline bool __down_write_trylock(struct rw_semaphore *sem) |
1da177e4 | 138 | { |
117780ee PA |
139 | bool result; |
140 | long tmp0, tmp1; | |
a31a369b ML |
141 | asm volatile("# beginning __down_write_trylock\n\t" |
142 | " mov %0,%1\n\t" | |
143 | "1:\n\t" | |
144 | " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" | |
145 | /* was the active mask 0 before? */ | |
146 | " jnz 2f\n\t" | |
147 | " mov %1,%2\n\t" | |
117780ee | 148 | " add %4,%2\n\t" |
a31a369b ML |
149 | LOCK_PREFIX " cmpxchg %2,%0\n\t" |
150 | " jnz 1b\n\t" | |
151 | "2:\n\t" | |
117780ee | 152 | " sete %3\n\t" |
a31a369b | 153 | "# ending __down_write_trylock\n\t" |
117780ee PA |
154 | : "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1), |
155 | "=qm" (result) | |
a31a369b ML |
156 | : "er" (RWSEM_ACTIVE_WRITE_BIAS) |
157 | : "memory", "cc"); | |
158 | return result; | |
1da177e4 LT |
159 | } |
160 | ||
161 | /* | |
162 | * unlock after reading | |
163 | */ | |
164 | static inline void __up_read(struct rw_semaphore *sem) | |
165 | { | |
bde11efb | 166 | long tmp; |
6e5609a9 | 167 | asm volatile("# beginning __up_read\n\t" |
59c33fa7 | 168 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
6e5609a9 JP |
169 | /* subtracts 1, returns the old value */ |
170 | " jns 1f\n\t" | |
b4bcb4c2 | 171 | " call call_rwsem_wake\n" /* expects old value in %edx */ |
6e5609a9 JP |
172 | "1:\n" |
173 | "# ending __up_read\n" | |
174 | : "+m" (sem->count), "=d" (tmp) | |
b4bcb4c2 | 175 | : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS) |
6e5609a9 | 176 | : "memory", "cc"); |
1da177e4 LT |
177 | } |
178 | ||
179 | /* | |
180 | * unlock after writing | |
181 | */ | |
182 | static inline void __up_write(struct rw_semaphore *sem) | |
183 | { | |
bde11efb | 184 | long tmp; |
6e5609a9 | 185 | asm volatile("# beginning __up_write\n\t" |
59c33fa7 | 186 | LOCK_PREFIX " xadd %1,(%2)\n\t" |
a751bd85 ML |
187 | /* subtracts 0xffff0001, returns the old value */ |
188 | " jns 1f\n\t" | |
b4bcb4c2 | 189 | " call call_rwsem_wake\n" /* expects old value in %edx */ |
6e5609a9 JP |
190 | "1:\n\t" |
191 | "# ending __up_write\n" | |
59c33fa7 LT |
192 | : "+m" (sem->count), "=d" (tmp) |
193 | : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS) | |
194 | : "memory", "cc"); | |
1da177e4 LT |
195 | } |
196 | ||
197 | /* | |
198 | * downgrade write lock to read lock | |
199 | */ | |
200 | static inline void __downgrade_write(struct rw_semaphore *sem) | |
201 | { | |
6e5609a9 | 202 | asm volatile("# beginning __downgrade_write\n\t" |
1838ef1d | 203 | LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" |
0d1622d7 AK |
204 | /* |
205 | * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) | |
206 | * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) | |
207 | */ | |
6e5609a9 JP |
208 | " jns 1f\n\t" |
209 | " call call_rwsem_downgrade_wake\n" | |
210 | "1:\n\t" | |
211 | "# ending __downgrade_write\n" | |
212 | : "+m" (sem->count) | |
0d1622d7 | 213 | : "a" (sem), "er" (-RWSEM_WAITING_BIAS) |
6e5609a9 | 214 | : "memory", "cc"); |
1da177e4 LT |
215 | } |
216 | ||
217 | /* | |
218 | * implement atomic add functionality | |
219 | */ | |
bde11efb | 220 | static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) |
1da177e4 | 221 | { |
1838ef1d | 222 | asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" |
6e5609a9 | 223 | : "+m" (sem->count) |
1838ef1d | 224 | : "er" (delta)); |
1da177e4 LT |
225 | } |
226 | ||
227 | /* | |
228 | * implement exchange and add functionality | |
229 | */ | |
bde11efb | 230 | static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) |
1da177e4 | 231 | { |
8b8bc2f7 | 232 | return delta + xadd(&sem->count, delta); |
1da177e4 LT |
233 | } |
234 | ||
235 | #endif /* __KERNEL__ */ | |
1965aae3 | 236 | #endif /* _ASM_X86_RWSEM_H */ |