]> git.proxmox.com Git - mirror_spl-debian.git/blob - include/sys/rwlock.h
Disable rw_tryupgrade() for newer kernels
[mirror_spl-debian.git] / include / sys / rwlock.h
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2009 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #ifndef _SPL_RWLOCK_H
28 #define _SPL_RWLOCK_H
29
30 #include <sys/types.h>
31 #include <linux/rwsem.h>
32
33 typedef enum {
34 RW_DRIVER = 2,
35 RW_DEFAULT = 4
36 } krw_type_t;
37
38 typedef enum {
39 RW_NONE = 0,
40 RW_WRITER = 1,
41 RW_READER = 2
42 } krw_t;
43
44 typedef struct {
45 struct rw_semaphore rw_rwlock;
46 kthread_t *rw_owner;
47 } krwlock_t;
48
49 /*
50 * For the generic implementations of rw-semaphores the following is
51 * true. If your semaphore implementation internally represents the
52 * semaphore state differently. Then special case handling will be
53 * required so RW_COUNT() provides these semantics:
54 * - if activity/count is 0 then there are no active readers or writers
55 * - if activity/count is +ve then that is the number of active readers
56 * - if activity/count is -1 then there is one active writer
57 */
58 #define SEM(rwp) ((struct rw_semaphore *)(rwp))
59
60 #if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
61 # define RW_COUNT(rwp) (SEM(rwp)->activity)
62 # define rw_exit_locked(rwp) __up_read_locked(rwp)
63 # define rw_tryenter_locked(rwp) __down_write_trylock_locked(rwp)
64 extern void __up_read_locked(struct rw_semaphore *);
65 extern int __down_write_trylock_locked(struct rw_semaphore *);
66 #else
67 /*
68 * 2.6.x - 2.6.27 use guard macro _I386_RWSEM_H
69 * 2.6.28 - 2.6.32+ use guard macro _ASM_X86_RWSEM_H
70 */
71 # if defined(_I386_RWSEM_H) || defined(_ASM_X86_RWSEM_H)
72 # define RW_COUNT(rwp) ((SEM(rwp)->count < 0) ? (-1) : \
73 (SEM(rwp)->count & RWSEM_ACTIVE_MASK))
74 # else
75 # define RW_COUNT(rwp) (SEM(rwp)->count & RWSEM_ACTIVE_MASK)
76 # endif
77 # define rw_exit_locked(rwp) up_read(rwp)
78 # define rw_tryenter_locked(rwp) down_write_trylock(rwp)
79 #endif
80
81 static inline kthread_t *
82 spl_rw_get_owner(krwlock_t *rwp)
83 {
84 return rwp->rw_owner;
85 }
86
87 static inline void
88 spl_rw_set_owner(krwlock_t *rwp)
89 {
90 unsigned long flags;
91
92 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
93 rwp->rw_owner = current;
94 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
95 }
96
97 static inline void
98 spl_rw_clear_owner(krwlock_t *rwp)
99 {
100 unsigned long flags;
101
102 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
103 rwp->rw_owner = NULL;
104 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
105 }
106
107 static inline kthread_t *
108 rw_owner(krwlock_t *rwp)
109 {
110 unsigned long flags;
111 kthread_t *owner;
112
113 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
114 owner = spl_rw_get_owner(rwp);
115 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
116
117 return owner;
118 }
119
120 static inline int
121 RW_READ_HELD(krwlock_t *rwp)
122 {
123 unsigned long flags;
124 int rc;
125
126 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
127 rc = ((RW_COUNT(rwp) > 0) && (spl_rw_get_owner(rwp) == NULL));
128 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
129
130 return rc;
131 }
132
133 static inline int
134 RW_WRITE_HELD(krwlock_t *rwp)
135 {
136 unsigned long flags;
137 int rc;
138
139 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
140 rc = ((RW_COUNT(rwp) < 0) && (spl_rw_get_owner(rwp) == current));
141 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
142
143 return rc;
144 }
145
146 static inline int
147 RW_LOCK_HELD(krwlock_t *rwp)
148 {
149 unsigned long flags;
150 int rc;
151
152 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
153 rc = (RW_COUNT(rwp) != 0);
154 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
155
156 return rc;
157 }
158
159 /*
160 * The following functions must be a #define and not static inline.
161 * This ensures that the native linux semaphore functions (down/up)
162 * will be correctly located in the users code which is important
163 * for the built in kernel lock analysis tools
164 */
165 #define rw_init(rwp, name, type, arg) \
166 ({ \
167 static struct lock_class_key __key; \
168 \
169 __init_rwsem(SEM(rwp), #rwp, &__key); \
170 spl_rw_clear_owner(rwp); \
171 })
172
173 #define rw_destroy(rwp) \
174 ({ \
175 VERIFY(!RW_LOCK_HELD(rwp)); \
176 })
177
178 #define rw_tryenter(rwp, rw) \
179 ({ \
180 int _rc_ = 0; \
181 \
182 switch (rw) { \
183 case RW_READER: \
184 _rc_ = down_read_trylock(SEM(rwp)); \
185 break; \
186 case RW_WRITER: \
187 if ((_rc_ = down_write_trylock(SEM(rwp)))) \
188 spl_rw_set_owner(rwp); \
189 break; \
190 default: \
191 SBUG(); \
192 } \
193 _rc_; \
194 })
195
196 #define rw_enter(rwp, rw) \
197 ({ \
198 switch (rw) { \
199 case RW_READER: \
200 down_read(SEM(rwp)); \
201 break; \
202 case RW_WRITER: \
203 down_write(SEM(rwp)); \
204 spl_rw_set_owner(rwp); \
205 break; \
206 default: \
207 SBUG(); \
208 } \
209 })
210
211 #define rw_exit(rwp) \
212 ({ \
213 if (RW_WRITE_HELD(rwp)) { \
214 spl_rw_clear_owner(rwp); \
215 up_write(SEM(rwp)); \
216 } else { \
217 ASSERT(RW_READ_HELD(rwp)); \
218 up_read(SEM(rwp)); \
219 } \
220 })
221
222 #define rw_downgrade(rwp) \
223 ({ \
224 spl_rw_clear_owner(rwp); \
225 downgrade_write(SEM(rwp)); \
226 })
227
228 #if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
229 #define rw_tryupgrade(rwp) \
230 ({ \
231 unsigned long _flags_; \
232 int _rc_ = 0; \
233 \
234 spin_lock_irqsave(&SEM(rwp)->wait_lock, _flags_); \
235 if (list_empty(&SEM(rwp)->wait_list) && (RW_COUNT(rwp) == 1)) { \
236 rw_exit_locked(SEM(rwp)); \
237 VERIFY(_rc_ = rw_tryenter_locked(SEM(rwp))); \
238 (rwp)->rw_owner = current; \
239 } \
240 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, _flags_); \
241 _rc_; \
242 })
243 #else
244 /*
245 * This can be done correctly but for each supported arch we will need
246 * a custom cmpxchg() to atomically check and promote the rwsem. That's
247 * not worth the trouble for now so rw_tryupgrade() will always fail.
248 */
249 #define rw_tryupgrade(rwp) ({ 0; })
250 #endif
251
252 int spl_rw_init(void);
253 void spl_rw_fini(void);
254
255 #endif /* _SPL_RWLOCK_H */