]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_SPINLOCK_H |
2 | #define _ASM_IA64_SPINLOCK_H | |
3 | ||
4 | /* | |
5 | * Copyright (C) 1998-2003 Hewlett-Packard Co | |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
7 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> | |
8 | * | |
9 | * This file is used for SMP configurations only. | |
10 | */ | |
11 | ||
12 | #include <linux/compiler.h> | |
13 | #include <linux/kernel.h> | |
14 | ||
15 | #include <asm/atomic.h> | |
16 | #include <asm/bitops.h> | |
17 | #include <asm/intrinsics.h> | |
18 | #include <asm/system.h> | |
19 | ||
20 | typedef struct { | |
21 | volatile unsigned int lock; | |
22 | #ifdef CONFIG_PREEMPT | |
23 | unsigned int break_lock; | |
24 | #endif | |
25 | } spinlock_t; | |
26 | ||
27 | #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } | |
28 | #define spin_lock_init(x) ((x)->lock = 0) | |
29 | ||
30 | #ifdef ASM_SUPPORTED | |
31 | /* | |
32 | * Try to get the lock. If we fail to get the lock, make a non-standard call to | |
33 | * ia64_spinlock_contention(). We do not use a normal call because that would force all | |
34 | * callers of spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is | |
35 | * carefully coded to touch only those registers that spin_lock() marks "clobbered". | |
36 | */ | |
37 | ||
38 | #define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" | |
39 | ||
40 | static inline void | |
41 | _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) | |
42 | { | |
43 | register volatile unsigned int *ptr asm ("r31") = &lock->lock; | |
44 | ||
45 | #if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) | |
46 | # ifdef CONFIG_ITANIUM | |
47 | /* don't use brl on Itanium... */ | |
48 | asm volatile ("{\n\t" | |
49 | " mov ar.ccv = r0\n\t" | |
50 | " mov r28 = ip\n\t" | |
51 | " mov r30 = 1;;\n\t" | |
52 | "}\n\t" | |
53 | "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t" | |
54 | "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t" | |
55 | "cmp4.ne p14, p0 = r30, r0\n\t" | |
56 | "mov b6 = r29;;\n\t" | |
57 | "mov r27=%2\n\t" | |
58 | "(p14) br.cond.spnt.many b6" | |
59 | : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); | |
60 | # else | |
61 | asm volatile ("{\n\t" | |
62 | " mov ar.ccv = r0\n\t" | |
63 | " mov r28 = ip\n\t" | |
64 | " mov r30 = 1;;\n\t" | |
65 | "}\n\t" | |
66 | "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t" | |
67 | "cmp4.ne p14, p0 = r30, r0\n\t" | |
68 | "mov r27=%2\n\t" | |
69 | "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;" | |
70 | : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); | |
71 | # endif /* CONFIG_MCKINLEY */ | |
72 | #else | |
73 | # ifdef CONFIG_ITANIUM | |
74 | /* don't use brl on Itanium... */ | |
75 | /* mis-declare, so we get the entry-point, not it's function descriptor: */ | |
76 | asm volatile ("mov r30 = 1\n\t" | |
77 | "mov r27=%2\n\t" | |
78 | "mov ar.ccv = r0;;\n\t" | |
79 | "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t" | |
80 | "movl r29 = ia64_spinlock_contention;;\n\t" | |
81 | "cmp4.ne p14, p0 = r30, r0\n\t" | |
82 | "mov b6 = r29;;\n\t" | |
83 | "(p14) br.call.spnt.many b6 = b6" | |
84 | : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); | |
85 | # else | |
86 | asm volatile ("mov r30 = 1\n\t" | |
87 | "mov r27=%2\n\t" | |
88 | "mov ar.ccv = r0;;\n\t" | |
89 | "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t" | |
90 | "cmp4.ne p14, p0 = r30, r0\n\t" | |
91 | "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;" | |
92 | : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); | |
93 | # endif /* CONFIG_MCKINLEY */ | |
94 | #endif | |
95 | } | |
96 | #define _raw_spin_lock(lock) _raw_spin_lock_flags(lock, 0) | |
97 | #else /* !ASM_SUPPORTED */ | |
98 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | |
99 | # define _raw_spin_lock(x) \ | |
100 | do { \ | |
101 | __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ | |
102 | __u64 ia64_spinlock_val; \ | |
103 | ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ | |
104 | if (unlikely(ia64_spinlock_val)) { \ | |
105 | do { \ | |
106 | while (*ia64_spinlock_ptr) \ | |
107 | ia64_barrier(); \ | |
108 | ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ | |
109 | } while (ia64_spinlock_val); \ | |
110 | } \ | |
111 | } while (0) | |
112 | #endif /* !ASM_SUPPORTED */ | |
113 | ||
114 | #define spin_is_locked(x) ((x)->lock != 0) | |
115 | #define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0) | |
116 | #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) | |
117 | #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) | |
118 | ||
119 | typedef struct { | |
120 | volatile unsigned int read_counter : 31; | |
121 | volatile unsigned int write_lock : 1; | |
122 | #ifdef CONFIG_PREEMPT | |
123 | unsigned int break_lock; | |
124 | #endif | |
125 | } rwlock_t; | |
126 | #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } | |
127 | ||
128 | #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) | |
129 | #define read_can_lock(rw) (*(volatile int *)(rw) >= 0) | |
130 | #define write_can_lock(rw) (*(volatile int *)(rw) == 0) | |
131 | ||
132 | #define _raw_read_lock(rw) \ | |
133 | do { \ | |
134 | rwlock_t *__read_lock_ptr = (rw); \ | |
135 | \ | |
136 | while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ | |
137 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | |
138 | while (*(volatile int *)__read_lock_ptr < 0) \ | |
139 | cpu_relax(); \ | |
140 | } \ | |
141 | } while (0) | |
142 | ||
143 | #define _raw_read_unlock(rw) \ | |
144 | do { \ | |
145 | rwlock_t *__read_lock_ptr = (rw); \ | |
146 | ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ | |
147 | } while (0) | |
148 | ||
149 | #ifdef ASM_SUPPORTED | |
150 | #define _raw_write_lock(rw) \ | |
151 | do { \ | |
152 | __asm__ __volatile__ ( \ | |
153 | "mov ar.ccv = r0\n" \ | |
154 | "dep r29 = -1, r0, 31, 1;;\n" \ | |
155 | "1:\n" \ | |
156 | "ld4 r2 = [%0];;\n" \ | |
157 | "cmp4.eq p0,p7 = r0,r2\n" \ | |
158 | "(p7) br.cond.spnt.few 1b \n" \ | |
159 | "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \ | |
160 | "cmp4.eq p0,p7 = r0, r2\n" \ | |
161 | "(p7) br.cond.spnt.few 1b;;\n" \ | |
162 | :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \ | |
163 | } while(0) | |
164 | ||
165 | #define _raw_write_trylock(rw) \ | |
166 | ({ \ | |
167 | register long result; \ | |
168 | \ | |
169 | __asm__ __volatile__ ( \ | |
170 | "mov ar.ccv = r0\n" \ | |
171 | "dep r29 = -1, r0, 31, 1;;\n" \ | |
172 | "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \ | |
173 | : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \ | |
174 | (result == 0); \ | |
175 | }) | |
176 | ||
177 | #else /* !ASM_SUPPORTED */ | |
178 | ||
179 | #define _raw_write_lock(l) \ | |
180 | ({ \ | |
181 | __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ | |
182 | __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ | |
183 | do { \ | |
184 | while (*ia64_write_lock_ptr) \ | |
185 | ia64_barrier(); \ | |
186 | ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \ | |
187 | } while (ia64_val); \ | |
188 | }) | |
189 | ||
190 | #define _raw_write_trylock(rw) \ | |
191 | ({ \ | |
192 | __u64 ia64_val; \ | |
193 | __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ | |
194 | ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \ | |
195 | (ia64_val == 0); \ | |
196 | }) | |
197 | ||
198 | #endif /* !ASM_SUPPORTED */ | |
199 | ||
200 | #define _raw_read_trylock(lock) generic_raw_read_trylock(lock) | |
201 | ||
202 | #define _raw_write_unlock(x) \ | |
203 | ({ \ | |
204 | smp_mb__before_clear_bit(); /* need barrier before releasing lock... */ \ | |
205 | clear_bit(31, (x)); \ | |
206 | }) | |
207 | ||
208 | #endif /* _ASM_IA64_SPINLOCK_H */ |