]>
Commit | Line | Data |
---|---|---|
f5e706ad SR |
1 | /* spinlock.h: 32-bit Sparc spinlock support. |
2 | * | |
3 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) | |
4 | */ | |
5 | ||
6 | #ifndef __SPARC_SPINLOCK_H | |
7 | #define __SPARC_SPINLOCK_H | |
8 | ||
f5e706ad SR |
9 | #ifndef __ASSEMBLY__ |
10 | ||
11 | #include <asm/psr.h> | |
f400bdb1 | 12 | #include <asm/processor.h> /* for cpu_relax */ |
f5e706ad | 13 | |
0199c4e6 | 14 | #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) |
f5e706ad | 15 | |
0199c4e6 TG |
16 | #define arch_spin_unlock_wait(lock) \ |
17 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | |
f5e706ad | 18 | |
0199c4e6 | 19 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
f5e706ad SR |
20 | { |
21 | __asm__ __volatile__( | |
22 | "\n1:\n\t" | |
23 | "ldstub [%0], %%g2\n\t" | |
24 | "orcc %%g2, 0x0, %%g0\n\t" | |
25 | "bne,a 2f\n\t" | |
26 | " ldub [%0], %%g2\n\t" | |
27 | ".subsection 2\n" | |
28 | "2:\n\t" | |
29 | "orcc %%g2, 0x0, %%g0\n\t" | |
30 | "bne,a 2b\n\t" | |
31 | " ldub [%0], %%g2\n\t" | |
32 | "b,a 1b\n\t" | |
33 | ".previous\n" | |
34 | : /* no outputs */ | |
35 | : "r" (lock) | |
36 | : "g2", "memory", "cc"); | |
37 | } | |
38 | ||
0199c4e6 | 39 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
f5e706ad SR |
40 | { |
41 | unsigned int result; | |
42 | __asm__ __volatile__("ldstub [%1], %0" | |
43 | : "=r" (result) | |
44 | : "r" (lock) | |
45 | : "memory"); | |
46 | return (result == 0); | |
47 | } | |
48 | ||
0199c4e6 | 49 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
f5e706ad SR |
50 | { |
51 | __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); | |
52 | } | |
53 | ||
54 | /* Read-write spinlocks, allowing multiple readers | |
55 | * but only one writer. | |
56 | * | |
57 | * NOTE! it is quite common to have readers in interrupts | |
58 | * but no interrupt writers. For those circumstances we | |
59 | * can "mix" irq-safe locks - any writer needs to get a | |
60 | * irq-safe write-lock, but readers can get non-irqsafe | |
61 | * read-locks. | |
62 | * | |
63 | * XXX This might create some problems with my dual spinlock | |
64 | * XXX scheme, deadlocks etc. -DaveM | |
65 | * | |
66 | * Sort of like atomic_t's on Sparc, but even more clever. | |
67 | * | |
68 | * ------------------------------------ | |
fb3a6bbc | 69 | * | 24-bit counter | wlock | arch_rwlock_t |
f5e706ad SR |
70 | * ------------------------------------ |
71 | * 31 8 7 0 | |
72 | * | |
73 | * wlock signifies the one writer is in or somebody is updating | |
74 | * counter. For a writer, if he successfully acquires the wlock, | |
75 | * but counter is non-zero, he has to release the lock and wait, | |
76 | * till both counter and wlock are zero. | |
77 | * | |
78 | * Unfortunately this scheme limits us to ~16,000,000 cpus. | |
79 | */ | |
e5931943 | 80 | static inline void __arch_read_lock(arch_rwlock_t *rw) |
f5e706ad | 81 | { |
fb3a6bbc | 82 | register arch_rwlock_t *lp asm("g1"); |
f5e706ad SR |
83 | lp = rw; |
84 | __asm__ __volatile__( | |
85 | "mov %%o7, %%g4\n\t" | |
86 | "call ___rw_read_enter\n\t" | |
87 | " ldstub [%%g1 + 3], %%g2\n" | |
88 | : /* no outputs */ | |
89 | : "r" (lp) | |
90 | : "g2", "g4", "memory", "cc"); | |
91 | } | |
92 | ||
e5931943 | 93 | #define arch_read_lock(lock) \ |
f5e706ad SR |
94 | do { unsigned long flags; \ |
95 | local_irq_save(flags); \ | |
e5931943 | 96 | __arch_read_lock(lock); \ |
f5e706ad SR |
97 | local_irq_restore(flags); \ |
98 | } while(0) | |
99 | ||
e5931943 | 100 | static inline void __arch_read_unlock(arch_rwlock_t *rw) |
f5e706ad | 101 | { |
fb3a6bbc | 102 | register arch_rwlock_t *lp asm("g1"); |
f5e706ad SR |
103 | lp = rw; |
104 | __asm__ __volatile__( | |
105 | "mov %%o7, %%g4\n\t" | |
106 | "call ___rw_read_exit\n\t" | |
107 | " ldstub [%%g1 + 3], %%g2\n" | |
108 | : /* no outputs */ | |
109 | : "r" (lp) | |
110 | : "g2", "g4", "memory", "cc"); | |
111 | } | |
112 | ||
e5931943 | 113 | #define arch_read_unlock(lock) \ |
f5e706ad SR |
114 | do { unsigned long flags; \ |
115 | local_irq_save(flags); \ | |
e5931943 | 116 | __arch_read_unlock(lock); \ |
f5e706ad SR |
117 | local_irq_restore(flags); \ |
118 | } while(0) | |
119 | ||
e5931943 | 120 | static inline void arch_write_lock(arch_rwlock_t *rw) |
f5e706ad | 121 | { |
fb3a6bbc | 122 | register arch_rwlock_t *lp asm("g1"); |
f5e706ad SR |
123 | lp = rw; |
124 | __asm__ __volatile__( | |
125 | "mov %%o7, %%g4\n\t" | |
126 | "call ___rw_write_enter\n\t" | |
127 | " ldstub [%%g1 + 3], %%g2\n" | |
128 | : /* no outputs */ | |
129 | : "r" (lp) | |
130 | : "g2", "g4", "memory", "cc"); | |
131 | *(volatile __u32 *)&lp->lock = ~0U; | |
132 | } | |
133 | ||
3f6aa0b1 MP |
134 | static void inline arch_write_unlock(arch_rwlock_t *lock) |
135 | { | |
136 | __asm__ __volatile__( | |
137 | " st %%g0, [%0]" | |
138 | : /* no outputs */ | |
139 | : "r" (lock) | |
140 | : "memory"); | |
141 | } | |
142 | ||
e5931943 | 143 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
f5e706ad SR |
144 | { |
145 | unsigned int val; | |
146 | ||
147 | __asm__ __volatile__("ldstub [%1 + 3], %0" | |
148 | : "=r" (val) | |
149 | : "r" (&rw->lock) | |
150 | : "memory"); | |
151 | ||
152 | if (val == 0) { | |
153 | val = rw->lock & ~0xff; | |
154 | if (val) | |
155 | ((volatile u8*)&rw->lock)[3] = 0; | |
156 | else | |
157 | *(volatile u32*)&rw->lock = ~0U; | |
158 | } | |
159 | ||
160 | return (val == 0); | |
161 | } | |
162 | ||
e5931943 | 163 | static inline int __arch_read_trylock(arch_rwlock_t *rw) |
f5e706ad | 164 | { |
fb3a6bbc | 165 | register arch_rwlock_t *lp asm("g1"); |
f5e706ad SR |
166 | register int res asm("o0"); |
167 | lp = rw; | |
168 | __asm__ __volatile__( | |
169 | "mov %%o7, %%g4\n\t" | |
170 | "call ___rw_read_try\n\t" | |
171 | " ldstub [%%g1 + 3], %%g2\n" | |
172 | : "=r" (res) | |
173 | : "r" (lp) | |
174 | : "g2", "g4", "memory", "cc"); | |
175 | return res; | |
176 | } | |
177 | ||
e5931943 | 178 | #define arch_read_trylock(lock) \ |
f5e706ad SR |
179 | ({ unsigned long flags; \ |
180 | int res; \ | |
181 | local_irq_save(flags); \ | |
e5931943 | 182 | res = __arch_read_trylock(lock); \ |
f5e706ad SR |
183 | local_irq_restore(flags); \ |
184 | res; \ | |
185 | }) | |
186 | ||
0199c4e6 | 187 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) |
e5931943 TG |
188 | #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) |
189 | #define arch_write_lock_flags(rw, flags) arch_write_lock(rw) | |
f5e706ad | 190 | |
0199c4e6 TG |
191 | #define arch_spin_relax(lock) cpu_relax() |
192 | #define arch_read_relax(lock) cpu_relax() | |
193 | #define arch_write_relax(lock) cpu_relax() | |
f5e706ad | 194 | |
e5931943 TG |
195 | #define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) |
196 | #define arch_write_can_lock(rw) (!(rw)->lock) | |
f5e706ad SR |
197 | |
198 | #endif /* !(__ASSEMBLY__) */ | |
199 | ||
200 | #endif /* __SPARC_SPINLOCK_H */ |