]>
Commit | Line | Data |
---|---|---|
2fed0c50 GOC |
1 | #ifndef _X86_SPINLOCK_H_ |
2 | #define _X86_SPINLOCK_H_ | |
3 | ||
1075cf7a TG |
4 | #include <asm/atomic.h> |
5 | #include <asm/rwlock.h> | |
6 | #include <asm/page.h> | |
7 | #include <asm/processor.h> | |
314cdbef | 8 | #include <linux/compiler.h> |
1075cf7a TG |
9 | |
10 | /* | |
11 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
12 | * | |
13 | * Simple spin lock operations. There are two variants, one clears IRQ's | |
14 | * on the local processor, one does not. | |
15 | * | |
314cdbef NP |
16 | * These are fair FIFO ticket locks, which are currently limited to 256 |
17 | * CPUs. | |
1075cf7a TG |
18 | * |
19 | * (the type definitions are in asm/spinlock_types.h) | |
20 | */ | |
21 | ||
2fed0c50 GOC |
22 | #ifdef CONFIG_PARAVIRT |
23 | #include <asm/paravirt.h> | |
24 | #else | |
25 | #define CLI_STRING "cli" | |
26 | #define STI_STRING "sti" | |
27 | #define CLI_STI_CLOBBERS | |
28 | #define CLI_STI_INPUT_ARGS | |
29 | #endif /* CONFIG_PARAVIRT */ | |
30 | ||
96a388de | 31 | #ifdef CONFIG_X86_32 |
1075cf7a TG |
32 | typedef char _slock_t; |
33 | # define LOCK_INS_DEC "decb" | |
34 | # define LOCK_INS_XCH "xchgb" | |
35 | # define LOCK_INS_MOV "movb" | |
36 | # define LOCK_INS_CMP "cmpb" | |
37 | # define LOCK_PTR_REG "a" | |
96a388de | 38 | #else |
1075cf7a TG |
39 | typedef int _slock_t; |
40 | # define LOCK_INS_DEC "decl" | |
41 | # define LOCK_INS_XCH "xchgl" | |
42 | # define LOCK_INS_MOV "movl" | |
43 | # define LOCK_INS_CMP "cmpl" | |
44 | # define LOCK_PTR_REG "D" | |
45 | #endif | |
46 | ||
314cdbef NP |
47 | #if (NR_CPUS > 256) |
48 | #error spinlock supports a maximum of 256 CPUs | |
49 | #endif | |
50 | ||
1075cf7a TG |
51 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
52 | { | |
314cdbef NP |
53 | int tmp = *(volatile signed int *)(&(lock)->slock); |
54 | ||
55 | return (((tmp >> 8) & 0xff) != (tmp & 0xff)); | |
1075cf7a TG |
56 | } |
57 | ||
314cdbef | 58 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) |
1075cf7a | 59 | { |
314cdbef NP |
60 | int tmp = *(volatile signed int *)(&(lock)->slock); |
61 | ||
62 | return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; | |
1075cf7a TG |
63 | } |
64 | ||
314cdbef | 65 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
1075cf7a | 66 | { |
314cdbef NP |
67 | short inc = 0x0100; |
68 | ||
69 | /* | |
70 | * Ticket locks are conceptually two bytes, one indicating the current | |
71 | * head of the queue, and the other indicating the current tail. The | |
72 | * lock is acquired by atomically noting the tail and incrementing it | |
73 | * by one (thus adding ourself to the queue and noting our position), | |
74 | * then waiting until the head becomes equal to the the initial value | |
75 | * of the tail. | |
76 | * | |
77 | * This uses a 16-bit xadd to increment the tail and also load the | |
78 | * position of the head, which takes care of memory ordering issues | |
79 | * and should be optimal for the uncontended case. Note the tail must | |
80 | * be in the high byte, otherwise the 16-bit wide increment of the low | |
81 | * byte would carry up and contaminate the high byte. | |
82 | */ | |
83 | ||
84 | __asm__ __volatile__ ( | |
85 | LOCK_PREFIX "xaddw %w0, %1\n" | |
86 | "1:\t" | |
87 | "cmpb %h0, %b0\n\t" | |
88 | "je 2f\n\t" | |
89 | "rep ; nop\n\t" | |
90 | "movb %1, %b0\n\t" | |
91 | /* don't need lfence here, because loads are in-order */ | |
1075cf7a | 92 | "jmp 1b\n" |
314cdbef NP |
93 | "2:" |
94 | :"+Q" (inc), "+m" (lock->slock) | |
95 | : | |
96 | :"memory", "cc"); | |
1075cf7a | 97 | } |
314cdbef NP |
98 | |
99 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | |
1075cf7a TG |
100 | |
101 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |
102 | { | |
314cdbef NP |
103 | int tmp; |
104 | short new; | |
1075cf7a TG |
105 | |
106 | asm volatile( | |
314cdbef NP |
107 | "movw %2,%w0\n\t" |
108 | "cmpb %h0,%b0\n\t" | |
109 | "jne 1f\n\t" | |
110 | "movw %w0,%w1\n\t" | |
111 | "incb %h1\n\t" | |
112 | "lock ; cmpxchgw %w1,%2\n\t" | |
113 | "1:" | |
114 | "sete %b1\n\t" | |
115 | "movzbl %b1,%0\n\t" | |
116 | :"=&a" (tmp), "=Q" (new), "+m" (lock->slock) | |
117 | : | |
118 | : "memory", "cc"); | |
119 | ||
120 | return tmp; | |
1075cf7a TG |
121 | } |
122 | ||
314cdbef NP |
123 | #if defined(CONFIG_X86_32) && \ |
124 | (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) | |
1075cf7a | 125 | /* |
314cdbef | 126 | * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock |
1075cf7a TG |
127 | * (PPro errata 66, 92) |
128 | */ | |
314cdbef | 129 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX |
1075cf7a | 130 | #else |
314cdbef NP |
131 | # define UNLOCK_LOCK_PREFIX |
132 | #endif | |
1075cf7a TG |
133 | |
134 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |
135 | { | |
314cdbef NP |
136 | __asm__ __volatile__( |
137 | UNLOCK_LOCK_PREFIX "incb %0" | |
138 | :"+m" (lock->slock) | |
139 | : | |
140 | :"memory", "cc"); | |
1075cf7a TG |
141 | } |
142 | ||
1075cf7a TG |
143 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
144 | { | |
145 | while (__raw_spin_is_locked(lock)) | |
146 | cpu_relax(); | |
147 | } | |
148 | ||
149 | /* | |
150 | * Read-write spinlocks, allowing multiple readers | |
151 | * but only one writer. | |
152 | * | |
153 | * NOTE! it is quite common to have readers in interrupts | |
154 | * but no interrupt writers. For those circumstances we | |
155 | * can "mix" irq-safe locks - any writer needs to get a | |
156 | * irq-safe write-lock, but readers can get non-irqsafe | |
157 | * read-locks. | |
158 | * | |
159 | * On x86, we implement read-write locks as a 32-bit counter | |
160 | * with the high bit (sign) being the "contended" bit. | |
161 | */ | |
162 | ||
314cdbef NP |
163 | /** |
164 | * read_can_lock - would read_trylock() succeed? | |
165 | * @lock: the rwlock in question. | |
166 | */ | |
1075cf7a TG |
167 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) |
168 | { | |
169 | return (int)(lock)->lock > 0; | |
170 | } | |
171 | ||
314cdbef NP |
172 | /** |
173 | * write_can_lock - would write_trylock() succeed? | |
174 | * @lock: the rwlock in question. | |
175 | */ | |
1075cf7a TG |
176 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) |
177 | { | |
178 | return (lock)->lock == RW_LOCK_BIAS; | |
179 | } | |
180 | ||
181 | static inline void __raw_read_lock(raw_rwlock_t *rw) | |
182 | { | |
183 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" | |
184 | "jns 1f\n" | |
185 | "call __read_lock_failed\n\t" | |
186 | "1:\n" | |
187 | ::LOCK_PTR_REG (rw) : "memory"); | |
188 | } | |
189 | ||
190 | static inline void __raw_write_lock(raw_rwlock_t *rw) | |
191 | { | |
192 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" | |
193 | "jz 1f\n" | |
194 | "call __write_lock_failed\n\t" | |
195 | "1:\n" | |
196 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); | |
197 | } | |
198 | ||
199 | static inline int __raw_read_trylock(raw_rwlock_t *lock) | |
200 | { | |
201 | atomic_t *count = (atomic_t *)lock; | |
202 | ||
203 | atomic_dec(count); | |
204 | if (atomic_read(count) >= 0) | |
205 | return 1; | |
206 | atomic_inc(count); | |
207 | return 0; | |
208 | } | |
209 | ||
210 | static inline int __raw_write_trylock(raw_rwlock_t *lock) | |
211 | { | |
212 | atomic_t *count = (atomic_t *)lock; | |
213 | ||
214 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | |
215 | return 1; | |
216 | atomic_add(RW_LOCK_BIAS, count); | |
217 | return 0; | |
218 | } | |
219 | ||
220 | static inline void __raw_read_unlock(raw_rwlock_t *rw) | |
221 | { | |
222 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); | |
223 | } | |
224 | ||
225 | static inline void __raw_write_unlock(raw_rwlock_t *rw) | |
226 | { | |
227 | asm volatile(LOCK_PREFIX "addl %1, %0" | |
228 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); | |
229 | } | |
230 | ||
231 | #define _raw_spin_relax(lock) cpu_relax() | |
232 | #define _raw_read_relax(lock) cpu_relax() | |
233 | #define _raw_write_relax(lock) cpu_relax() | |
234 | ||
2fed0c50 | 235 | #endif |