]>
Commit | Line | Data |
---|---|---|
1 | /* SPDX-License-Identifier: GPL-2.0 */ | |
2 | #ifndef __ASM_SPINLOCK_H | |
3 | #define __ASM_SPINLOCK_H | |
4 | ||
5 | #include <asm/barrier.h> | |
6 | #include <asm/ldcw.h> | |
7 | #include <asm/processor.h> | |
8 | #include <asm/spinlock_types.h> | |
9 | ||
10 | static inline int arch_spin_is_locked(arch_spinlock_t *x) | |
11 | { | |
12 | volatile unsigned int *a = __ldcw_align(x); | |
13 | return *a == 0; | |
14 | } | |
15 | ||
16 | #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) | |
17 | ||
18 | static inline void arch_spin_lock_flags(arch_spinlock_t *x, | |
19 | unsigned long flags) | |
20 | { | |
21 | volatile unsigned int *a; | |
22 | ||
23 | mb(); | |
24 | a = __ldcw_align(x); | |
25 | while (__ldcw(a) == 0) | |
26 | while (*a == 0) | |
27 | if (flags & PSW_SM_I) { | |
28 | local_irq_enable(); | |
29 | cpu_relax(); | |
30 | local_irq_disable(); | |
31 | } else | |
32 | cpu_relax(); | |
33 | mb(); | |
34 | } | |
35 | ||
36 | static inline void arch_spin_unlock(arch_spinlock_t *x) | |
37 | { | |
38 | volatile unsigned int *a; | |
39 | mb(); | |
40 | a = __ldcw_align(x); | |
41 | *a = 1; | |
42 | mb(); | |
43 | } | |
44 | ||
45 | static inline int arch_spin_trylock(arch_spinlock_t *x) | |
46 | { | |
47 | volatile unsigned int *a; | |
48 | int ret; | |
49 | ||
50 | mb(); | |
51 | a = __ldcw_align(x); | |
52 | ret = __ldcw(a) != 0; | |
53 | mb(); | |
54 | ||
55 | return ret; | |
56 | } | |
57 | ||
58 | /* | |
59 | * Read-write spinlocks, allowing multiple readers but only one writer. | |
60 | * Linux rwlocks are unfair to writers; they can be starved for an indefinite | |
61 | * time by readers. With care, they can also be taken in interrupt context. | |
62 | * | |
63 | * In the PA-RISC implementation, we have a spinlock and a counter. | |
64 | * Readers use the lock to serialise their access to the counter (which | |
65 | * records how many readers currently hold the lock). | |
66 | * Writers hold the spinlock, preventing any readers or other writers from | |
67 | * grabbing the rwlock. | |
68 | */ | |
69 | ||
70 | /* Note that we have to ensure interrupts are disabled in case we're | |
71 | * interrupted by some other code that wants to grab the same read lock */ | |
72 | static __inline__ void arch_read_lock(arch_rwlock_t *rw) | |
73 | { | |
74 | unsigned long flags; | |
75 | local_irq_save(flags); | |
76 | arch_spin_lock_flags(&rw->lock, flags); | |
77 | rw->counter++; | |
78 | arch_spin_unlock(&rw->lock); | |
79 | local_irq_restore(flags); | |
80 | } | |
81 | ||
82 | /* Note that we have to ensure interrupts are disabled in case we're | |
83 | * interrupted by some other code that wants to grab the same read lock */ | |
84 | static __inline__ void arch_read_unlock(arch_rwlock_t *rw) | |
85 | { | |
86 | unsigned long flags; | |
87 | local_irq_save(flags); | |
88 | arch_spin_lock_flags(&rw->lock, flags); | |
89 | rw->counter--; | |
90 | arch_spin_unlock(&rw->lock); | |
91 | local_irq_restore(flags); | |
92 | } | |
93 | ||
94 | /* Note that we have to ensure interrupts are disabled in case we're | |
95 | * interrupted by some other code that wants to grab the same read lock */ | |
96 | static __inline__ int arch_read_trylock(arch_rwlock_t *rw) | |
97 | { | |
98 | unsigned long flags; | |
99 | retry: | |
100 | local_irq_save(flags); | |
101 | if (arch_spin_trylock(&rw->lock)) { | |
102 | rw->counter++; | |
103 | arch_spin_unlock(&rw->lock); | |
104 | local_irq_restore(flags); | |
105 | return 1; | |
106 | } | |
107 | ||
108 | local_irq_restore(flags); | |
109 | /* If write-locked, we fail to acquire the lock */ | |
110 | if (rw->counter < 0) | |
111 | return 0; | |
112 | ||
113 | /* Wait until we have a realistic chance at the lock */ | |
114 | while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) | |
115 | cpu_relax(); | |
116 | ||
117 | goto retry; | |
118 | } | |
119 | ||
120 | /* Note that we have to ensure interrupts are disabled in case we're | |
121 | * interrupted by some other code that wants to read_trylock() this lock */ | |
122 | static __inline__ void arch_write_lock(arch_rwlock_t *rw) | |
123 | { | |
124 | unsigned long flags; | |
125 | retry: | |
126 | local_irq_save(flags); | |
127 | arch_spin_lock_flags(&rw->lock, flags); | |
128 | ||
129 | if (rw->counter != 0) { | |
130 | arch_spin_unlock(&rw->lock); | |
131 | local_irq_restore(flags); | |
132 | ||
133 | while (rw->counter != 0) | |
134 | cpu_relax(); | |
135 | ||
136 | goto retry; | |
137 | } | |
138 | ||
139 | rw->counter = -1; /* mark as write-locked */ | |
140 | mb(); | |
141 | local_irq_restore(flags); | |
142 | } | |
143 | ||
144 | static __inline__ void arch_write_unlock(arch_rwlock_t *rw) | |
145 | { | |
146 | rw->counter = 0; | |
147 | arch_spin_unlock(&rw->lock); | |
148 | } | |
149 | ||
150 | /* Note that we have to ensure interrupts are disabled in case we're | |
151 | * interrupted by some other code that wants to read_trylock() this lock */ | |
152 | static __inline__ int arch_write_trylock(arch_rwlock_t *rw) | |
153 | { | |
154 | unsigned long flags; | |
155 | int result = 0; | |
156 | ||
157 | local_irq_save(flags); | |
158 | if (arch_spin_trylock(&rw->lock)) { | |
159 | if (rw->counter == 0) { | |
160 | rw->counter = -1; | |
161 | result = 1; | |
162 | } else { | |
163 | /* Read-locked. Oh well. */ | |
164 | arch_spin_unlock(&rw->lock); | |
165 | } | |
166 | } | |
167 | local_irq_restore(flags); | |
168 | ||
169 | return result; | |
170 | } | |
171 | ||
172 | /* | |
173 | * read_can_lock - would read_trylock() succeed? | |
174 | * @lock: the rwlock in question. | |
175 | */ | |
176 | static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) | |
177 | { | |
178 | return rw->counter >= 0; | |
179 | } | |
180 | ||
181 | /* | |
182 | * write_can_lock - would write_trylock() succeed? | |
183 | * @lock: the rwlock in question. | |
184 | */ | |
185 | static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) | |
186 | { | |
187 | return !rw->counter; | |
188 | } | |
189 | ||
190 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | |
191 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
192 | ||
193 | #endif /* __ASM_SPINLOCK_H */ |