]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SPINLOCK_H |
2 | #define __ASM_SPINLOCK_H | |
3 | ||
4 | #include <asm/system.h> | |
fb1c8f93 IM |
5 | #include <asm/processor.h> |
6 | #include <asm/spinlock_types.h> | |
1da177e4 LT |
7 | |
8 | /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked | |
9 | * since it only has load-and-zero. Moreover, at least on some PA processors, | |
10 | * the semaphore address has to be 16-byte aligned. | |
11 | */ | |
12 | ||
fb1c8f93 | 13 | static inline int __raw_spin_is_locked(raw_spinlock_t *x) |
1da177e4 LT |
14 | { |
15 | volatile unsigned int *a = __ldcw_align(x); | |
16 | return *a == 0; | |
17 | } | |
18 | ||
fb1c8f93 IM |
19 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
20 | #define __raw_spin_unlock_wait(x) \ | |
21 | do { cpu_relax(); } while (__raw_spin_is_locked(x)) | |
1da177e4 | 22 | |
fb1c8f93 | 23 | static inline void __raw_spin_lock(raw_spinlock_t *x) |
1da177e4 LT |
24 | { |
25 | volatile unsigned int *a; | |
26 | ||
27 | mb(); | |
28 | a = __ldcw_align(x); | |
29 | while (__ldcw(a) == 0) | |
30 | while (*a == 0); | |
31 | mb(); | |
32 | } | |
33 | ||
fb1c8f93 | 34 | static inline void __raw_spin_unlock(raw_spinlock_t *x) |
1da177e4 LT |
35 | { |
36 | volatile unsigned int *a; | |
37 | mb(); | |
38 | a = __ldcw_align(x); | |
39 | *a = 1; | |
40 | mb(); | |
41 | } | |
42 | ||
fb1c8f93 | 43 | static inline int __raw_spin_trylock(raw_spinlock_t *x) |
1da177e4 LT |
44 | { |
45 | volatile unsigned int *a; | |
46 | int ret; | |
47 | ||
48 | mb(); | |
49 | a = __ldcw_align(x); | |
50 | ret = __ldcw(a) != 0; | |
51 | mb(); | |
52 | ||
53 | return ret; | |
54 | } | |
1da177e4 LT |
55 | |
56 | /* | |
57 | * Read-write spinlocks, allowing multiple readers | |
58 | * but only one writer. | |
59 | */ | |
1da177e4 | 60 | |
fb1c8f93 | 61 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
1da177e4 LT |
62 | |
63 | /* read_lock, read_unlock are pretty straightforward. Of course it somehow | |
64 | * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */ | |
65 | ||
fb1c8f93 | 66 | static __inline__ void __raw_read_lock(raw_rwlock_t *rw) |
1da177e4 LT |
67 | { |
68 | unsigned long flags; | |
69 | local_irq_save(flags); | |
fb1c8f93 | 70 | __raw_spin_lock(&rw->lock); |
1da177e4 LT |
71 | |
72 | rw->counter++; | |
73 | ||
fb1c8f93 | 74 | __raw_spin_unlock(&rw->lock); |
1da177e4 LT |
75 | local_irq_restore(flags); |
76 | } | |
1da177e4 | 77 | |
fb1c8f93 | 78 | static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) |
1da177e4 LT |
79 | { |
80 | unsigned long flags; | |
81 | local_irq_save(flags); | |
fb1c8f93 | 82 | __raw_spin_lock(&rw->lock); |
1da177e4 LT |
83 | |
84 | rw->counter--; | |
85 | ||
fb1c8f93 | 86 | __raw_spin_unlock(&rw->lock); |
1da177e4 LT |
87 | local_irq_restore(flags); |
88 | } | |
89 | ||
90 | /* write_lock is less trivial. We optimistically grab the lock and check | |
91 | * if we surprised any readers. If so we release the lock and wait till | |
92 | * they're all gone before trying again | |
93 | * | |
94 | * Also note that we don't use the _irqsave / _irqrestore suffixes here. | |
95 | * If we're called with interrupts enabled and we've got readers (or other | |
96 | * writers) in interrupt handlers someone fucked up and we'd dead-lock | |
97 | * sooner or later anyway. prumpf */ | |
98 | ||
fb1c8f93 | 99 | static __inline__ void __raw_write_lock(raw_rwlock_t *rw) |
1da177e4 LT |
100 | { |
101 | retry: | |
fb1c8f93 | 102 | __raw_spin_lock(&rw->lock); |
1da177e4 LT |
103 | |
104 | if(rw->counter != 0) { | |
105 | /* this basically never happens */ | |
fb1c8f93 | 106 | __raw_spin_unlock(&rw->lock); |
1da177e4 | 107 | |
fb1c8f93 IM |
108 | while (rw->counter != 0) |
109 | cpu_relax(); | |
1da177e4 LT |
110 | |
111 | goto retry; | |
112 | } | |
113 | ||
114 | /* got it. now leave without unlocking */ | |
115 | rw->counter = -1; /* remember we are locked */ | |
116 | } | |
1da177e4 LT |
117 | |
118 | /* write_unlock is absolutely trivial - we don't have to wait for anything */ | |
119 | ||
fb1c8f93 | 120 | static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) |
1da177e4 LT |
121 | { |
122 | rw->counter = 0; | |
fb1c8f93 | 123 | __raw_spin_unlock(&rw->lock); |
1da177e4 LT |
124 | } |
125 | ||
fb1c8f93 | 126 | static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) |
1da177e4 | 127 | { |
fb1c8f93 | 128 | __raw_spin_lock(&rw->lock); |
1da177e4 LT |
129 | if (rw->counter != 0) { |
130 | /* this basically never happens */ | |
fb1c8f93 | 131 | __raw_spin_unlock(&rw->lock); |
1da177e4 LT |
132 | |
133 | return 0; | |
134 | } | |
135 | ||
136 | /* got it. now leave without unlocking */ | |
137 | rw->counter = -1; /* remember we are locked */ | |
138 | return 1; | |
139 | } | |
1da177e4 | 140 | |
fb1c8f93 | 141 | static __inline__ int __raw_is_read_locked(raw_rwlock_t *rw) |
1da177e4 LT |
142 | { |
143 | return rw->counter > 0; | |
144 | } | |
145 | ||
fb1c8f93 | 146 | static __inline__ int __raw_is_write_locked(raw_rwlock_t *rw) |
1da177e4 LT |
147 | { |
148 | return rw->counter < 0; | |
149 | } | |
150 | ||
151 | #endif /* __ASM_SPINLOCK_H */ |