]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/include/asm/rwsem.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / rwsem.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
3 *
4 * Written by David Howells (dhowells@redhat.com).
5 *
6 * Derived from asm-x86/semaphore.h
7 *
8 *
9 * The MSW of the count is the negated number of active writers and waiting
10 * lockers, and the LSW is the total number of active locks
11 *
12 * The lock count is initialized to 0 (no active and no waiting lockers).
13 *
14 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
15 * uncontended lock. This can be determined because XADD returns the old value.
16 * Readers increment by 1 and see a positive value when uncontended, negative
17 * if there are writers (and maybe) readers waiting (in which case it goes to
18 * sleep).
19 *
20 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
21 * be extended to 65534 by manually checking the whole MSW rather than relying
22 * on the S flag.
23 *
24 * The value of ACTIVE_BIAS supports up to 65535 active processes.
25 *
26 * This should be totally fair - if anything is waiting, a process that wants a
27 * lock will go to the back of the queue. When the currently active lock is
28 * released, if there's a writer at the front of the queue, then that and only
29 * that will be woken up; if there's a bunch of consecutive readers at the
30 * front, then they'll all be woken up, but no other readers will be.
31 */
32
33 #ifndef _ASM_X86_RWSEM_H
34 #define _ASM_X86_RWSEM_H
35
36 #ifndef _LINUX_RWSEM_H
37 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
38 #endif
39
40 #ifdef __KERNEL__
41 #include <asm/asm.h>
42
43 /*
44 * The bias values and the counter type limits the number of
45 * potential readers/writers to 32767 for 32 bits and 2147483647
46 * for 64 bits.
47 */
48
49 #ifdef CONFIG_X86_64
50 # define RWSEM_ACTIVE_MASK 0xffffffffL
51 #else
52 # define RWSEM_ACTIVE_MASK 0x0000ffffL
53 #endif
54
55 #define RWSEM_UNLOCKED_VALUE 0x00000000L
56 #define RWSEM_ACTIVE_BIAS 0x00000001L
57 #define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
58 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
59 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
60
61 /*
62 * lock for reading
63 */
64 static inline void __down_read(struct rw_semaphore *sem)
65 {
66 asm volatile("# beginning down_read\n\t"
67 LOCK_PREFIX _ASM_INC "(%1)\n\t"
68 /* adds 0x00000001 */
69 " jns 1f\n"
70 " call call_rwsem_down_read_failed\n"
71 "1:\n\t"
72 "# ending down_read\n\t"
73 : "+m" (sem->count)
74 : "a" (sem)
75 : "memory", "cc");
76 }
77
78 /*
79 * trylock for reading -- returns 1 if successful, 0 if contention
80 */
81 static inline bool __down_read_trylock(struct rw_semaphore *sem)
82 {
83 long result, tmp;
84 asm volatile("# beginning __down_read_trylock\n\t"
85 " mov %0,%1\n\t"
86 "1:\n\t"
87 " mov %1,%2\n\t"
88 " add %3,%2\n\t"
89 " jle 2f\n\t"
90 LOCK_PREFIX " cmpxchg %2,%0\n\t"
91 " jnz 1b\n\t"
92 "2:\n\t"
93 "# ending __down_read_trylock\n\t"
94 : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
95 : "i" (RWSEM_ACTIVE_READ_BIAS)
96 : "memory", "cc");
97 return result >= 0;
98 }
99
100 /*
101 * lock for writing
102 */
103 #define ____down_write(sem, slow_path) \
104 ({ \
105 long tmp; \
106 struct rw_semaphore* ret; \
107 \
108 asm volatile("# beginning down_write\n\t" \
109 LOCK_PREFIX " xadd %1,(%4)\n\t" \
110 /* adds 0xffff0001, returns the old value */ \
111 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
112 /* was the active mask 0 before? */\
113 " jz 1f\n" \
114 " call " slow_path "\n" \
115 "1:\n" \
116 "# ending down_write" \
117 : "+m" (sem->count), "=d" (tmp), \
118 "=a" (ret), ASM_CALL_CONSTRAINT \
119 : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
120 : "memory", "cc"); \
121 ret; \
122 })
123
124 static inline void __down_write(struct rw_semaphore *sem)
125 {
126 ____down_write(sem, "call_rwsem_down_write_failed");
127 }
128
129 static inline int __down_write_killable(struct rw_semaphore *sem)
130 {
131 if (IS_ERR(____down_write(sem, "call_rwsem_down_write_failed_killable")))
132 return -EINTR;
133
134 return 0;
135 }
136
137 /*
138 * trylock for writing -- returns 1 if successful, 0 if contention
139 */
140 static inline bool __down_write_trylock(struct rw_semaphore *sem)
141 {
142 bool result;
143 long tmp0, tmp1;
144 asm volatile("# beginning __down_write_trylock\n\t"
145 " mov %0,%1\n\t"
146 "1:\n\t"
147 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
148 /* was the active mask 0 before? */
149 " jnz 2f\n\t"
150 " mov %1,%2\n\t"
151 " add %4,%2\n\t"
152 LOCK_PREFIX " cmpxchg %2,%0\n\t"
153 " jnz 1b\n\t"
154 "2:\n\t"
155 CC_SET(e)
156 "# ending __down_write_trylock\n\t"
157 : "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1),
158 CC_OUT(e) (result)
159 : "er" (RWSEM_ACTIVE_WRITE_BIAS)
160 : "memory");
161 return result;
162 }
163
164 /*
165 * unlock after reading
166 */
167 static inline void __up_read(struct rw_semaphore *sem)
168 {
169 long tmp;
170 asm volatile("# beginning __up_read\n\t"
171 LOCK_PREFIX " xadd %1,(%2)\n\t"
172 /* subtracts 1, returns the old value */
173 " jns 1f\n\t"
174 " call call_rwsem_wake\n" /* expects old value in %edx */
175 "1:\n"
176 "# ending __up_read\n"
177 : "+m" (sem->count), "=d" (tmp)
178 : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS)
179 : "memory", "cc");
180 }
181
182 /*
183 * unlock after writing
184 */
185 static inline void __up_write(struct rw_semaphore *sem)
186 {
187 long tmp;
188 asm volatile("# beginning __up_write\n\t"
189 LOCK_PREFIX " xadd %1,(%2)\n\t"
190 /* subtracts 0xffff0001, returns the old value */
191 " jns 1f\n\t"
192 " call call_rwsem_wake\n" /* expects old value in %edx */
193 "1:\n\t"
194 "# ending __up_write\n"
195 : "+m" (sem->count), "=d" (tmp)
196 : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
197 : "memory", "cc");
198 }
199
200 /*
201 * downgrade write lock to read lock
202 */
203 static inline void __downgrade_write(struct rw_semaphore *sem)
204 {
205 asm volatile("# beginning __downgrade_write\n\t"
206 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
207 /*
208 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
209 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
210 */
211 " jns 1f\n\t"
212 " call call_rwsem_downgrade_wake\n"
213 "1:\n\t"
214 "# ending __downgrade_write\n"
215 : "+m" (sem->count)
216 : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
217 : "memory", "cc");
218 }
219
220 #endif /* __KERNEL__ */
221 #endif /* _ASM_X86_RWSEM_H */