2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
12 #include <asm/spinlock_types.h>
13 #include <asm/processor.h>
14 #include <asm/barrier.h>
16 #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
17 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
18 #define arch_spin_unlock_wait(x) \
19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
21 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
23 unsigned int tmp
= __ARCH_SPIN_LOCK_LOCKED__
;
26 * This smp_mb() is technically superfluous, we only need the one
27 * after the lock for providing the ACQUIRE semantics.
28 * However doing the "right" thing was regressing hackbench
29 * so keeping this, pending further investigation
37 : "r"(&(lock
->slock
)), "ir"(__ARCH_SPIN_LOCK_LOCKED__
)
41 * ACQUIRE barrier to ensure load/store after taking the lock
42 * don't "bleed-up" out of the critical section (leak-in is allowed)
43 * http://www.spinics.net/lists/kernel/msg2010409.html
45 * ARCv2 only has load-load, store-store and all-all barrier
46 * thus need the full all-all barrier
51 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
53 unsigned int tmp
= __ARCH_SPIN_LOCK_LOCKED__
;
65 return (tmp
== __ARCH_SPIN_LOCK_UNLOCKED__
);
68 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
70 unsigned int tmp
= __ARCH_SPIN_LOCK_UNLOCKED__
;
73 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
85 * superfluous, but keeping for now - see pairing version in
86 * arch_spin_lock above
92 * Read-write spinlocks, allowing multiple readers but only one writer.
94 * The spinlock itself is contained in @counter and access to it is
95 * serialized with @lock_mutex.
97 * Unfair locking as Writers could be starved indefinitely by Reader(s)
100 /* Would read_trylock() succeed? */
101 #define arch_read_can_lock(x) ((x)->counter > 0)
103 /* Would write_trylock() succeed? */
104 #define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
106 /* 1 - lock taken successfully */
107 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
111 arch_spin_lock(&(rw
->lock_mutex
));
114 * zero means writer holds the lock exclusively, deny Reader.
115 * Otherwise grant lock to first/subseq reader
117 if (rw
->counter
> 0) {
122 arch_spin_unlock(&(rw
->lock_mutex
));
128 /* 1 - lock taken successfully */
129 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
133 arch_spin_lock(&(rw
->lock_mutex
));
136 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
137 * deny writer. Otherwise if unlocked grant to writer
138 * Hence the claim that Linux rwlocks are unfair to writers.
139 * (can be starved for an indefinite time by readers).
141 if (rw
->counter
== __ARCH_RW_LOCK_UNLOCKED__
) {
145 arch_spin_unlock(&(rw
->lock_mutex
));
150 static inline void arch_read_lock(arch_rwlock_t
*rw
)
152 while (!arch_read_trylock(rw
))
156 static inline void arch_write_lock(arch_rwlock_t
*rw
)
158 while (!arch_write_trylock(rw
))
162 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
164 arch_spin_lock(&(rw
->lock_mutex
));
166 arch_spin_unlock(&(rw
->lock_mutex
));
169 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
171 arch_spin_lock(&(rw
->lock_mutex
));
172 rw
->counter
= __ARCH_RW_LOCK_UNLOCKED__
;
173 arch_spin_unlock(&(rw
->lock_mutex
));
176 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
177 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
179 #define arch_spin_relax(lock) cpu_relax()
180 #define arch_read_relax(lock) cpu_relax()
181 #define arch_write_relax(lock) cpu_relax()
183 #endif /* __ASM_SPINLOCK_H */