]> git.proxmox.com Git - mirror_spl-debian.git/blob - include/sys/rwlock.h
When using x86 specific rwsem correctly intepret rwsem->count.
[mirror_spl-debian.git] / include / sys / rwlock.h
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2009 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #ifndef _SPL_RWLOCK_H
28 #define _SPL_RWLOCK_H
29
30 #include <sys/types.h>
31 #include <linux/rwsem.h>
32
33 typedef enum {
34 RW_DRIVER = 2,
35 RW_DEFAULT = 4
36 } krw_type_t;
37
38 typedef enum {
39 RW_NONE = 0,
40 RW_WRITER = 1,
41 RW_READER = 2
42 } krw_t;
43
44 typedef struct {
45 struct rw_semaphore rw_rwlock;
46 kthread_t *rw_owner;
47 } krwlock_t;
48
49 /*
50 * For the generic implementations of rw-semaphores the following is
51 * true. If your semaphore implementation internally represents the
52 * semaphore state differently. Then special case handling will be
53 * required so RW_COUNT() provides these semantics:
54 * - if activity/count is 0 then there are no active readers or writers
55 * - if activity/count is +ve then that is the number of active readers
56 * - if activity/count is -1 then there is one active writer
57 */
58 #define SEM(rwp) ((struct rw_semaphore *)(rwp))
59
60 #if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
61 # define RW_COUNT(rwp) (SEM(rwp)->activity)
62 # define rw_exit_locked(rwp) __up_read_locked(rwp)
63 # define rw_tryenter_locked(rwp) __down_write_trylock_locked(rwp)
64 extern void __up_read_locked(struct rw_semaphore *);
65 extern int __down_write_trylock_locked(struct rw_semaphore *);
66 #else
67 # ifdef _I386_RWSEM_H
68 # define RW_COUNT(rwp) ((SEM(rwp)->count < 0) ? (-1) : \
69 (SEM(rwp)->count & RWSEM_ACTIVE_MASK))
70 # else
71 # define RW_COUNT(rwp) (SEM(rwp)->count & RWSEM_ACTIVE_MASK)
72 # endif
73 # define rw_exit_locked(rwp) up_read(rwp)
74 # define rw_tryenter_locked(rwp) down_write_trylock(rwp)
75 #endif
76
77 static inline kthread_t *
78 spl_rw_get_owner(krwlock_t *rwp)
79 {
80 return rwp->rw_owner;
81 }
82
83 static inline void
84 spl_rw_set_owner(krwlock_t *rwp)
85 {
86 unsigned long flags;
87
88 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
89 rwp->rw_owner = current;
90 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
91 }
92
93 static inline void
94 spl_rw_clear_owner(krwlock_t *rwp)
95 {
96 unsigned long flags;
97
98 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
99 rwp->rw_owner = NULL;
100 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
101 }
102
103 static inline kthread_t *
104 rw_owner(krwlock_t *rwp)
105 {
106 unsigned long flags;
107 kthread_t *owner;
108
109 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
110 owner = spl_rw_get_owner(rwp);
111 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
112
113 return owner;
114 }
115
116 static inline int
117 RW_READ_HELD(krwlock_t *rwp)
118 {
119 unsigned long flags;
120 int rc;
121
122 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
123 rc = ((RW_COUNT(rwp) > 0) && (spl_rw_get_owner(rwp) == NULL));
124 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
125
126 return rc;
127 }
128
129 static inline int
130 RW_WRITE_HELD(krwlock_t *rwp)
131 {
132 unsigned long flags;
133 int rc;
134
135 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
136 rc = ((RW_COUNT(rwp) < 0) && (spl_rw_get_owner(rwp) == current));
137 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
138
139 return rc;
140 }
141
142 static inline int
143 RW_LOCK_HELD(krwlock_t *rwp)
144 {
145 unsigned long flags;
146 int rc;
147
148 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
149 rc = (RW_COUNT(rwp) != 0);
150 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
151
152 return rc;
153 }
154
155 /*
156 * The following functions must be a #define and not static inline.
157 * This ensures that the native linux semaphore functions (down/up)
158 * will be correctly located in the users code which is important
159 * for the built in kernel lock analysis tools
160 */
161 #define rw_init(rwp, name, type, arg) \
162 ({ \
163 static struct lock_class_key __key; \
164 \
165 __init_rwsem(SEM(rwp), #rwp, &__key); \
166 spl_rw_clear_owner(rwp); \
167 })
168
169 #define rw_destroy(rwp) \
170 ({ \
171 VERIFY(!RW_LOCK_HELD(rwp)); \
172 })
173
174 #define rw_tryenter(rwp, rw) \
175 ({ \
176 int _rc_ = 0; \
177 \
178 switch (rw) { \
179 case RW_READER: \
180 _rc_ = down_read_trylock(SEM(rwp)); \
181 break; \
182 case RW_WRITER: \
183 if ((_rc_ = down_write_trylock(SEM(rwp)))) \
184 spl_rw_set_owner(rwp); \
185 break; \
186 default: \
187 SBUG(); \
188 } \
189 _rc_; \
190 })
191
192 #define rw_enter(rwp, rw) \
193 ({ \
194 switch (rw) { \
195 case RW_READER: \
196 down_read(SEM(rwp)); \
197 break; \
198 case RW_WRITER: \
199 down_write(SEM(rwp)); \
200 spl_rw_set_owner(rwp); \
201 break; \
202 default: \
203 SBUG(); \
204 } \
205 })
206
207 #define rw_exit(rwp) \
208 ({ \
209 if (RW_WRITE_HELD(rwp)) { \
210 spl_rw_clear_owner(rwp); \
211 up_write(SEM(rwp)); \
212 } else { \
213 ASSERT(RW_READ_HELD(rwp)); \
214 up_read(SEM(rwp)); \
215 } \
216 })
217
218 #define rw_downgrade(rwp) \
219 ({ \
220 spl_rw_clear_owner(rwp); \
221 downgrade_write(SEM(rwp)); \
222 })
223
224 #define rw_tryupgrade(rwp) \
225 ({ \
226 unsigned long _flags_; \
227 int _rc_ = 0; \
228 \
229 spin_lock_irqsave(&SEM(rwp)->wait_lock, _flags_); \
230 if (list_empty(&SEM(rwp)->wait_list) && (RW_COUNT(rwp) == 1)) { \
231 rw_exit_locked(SEM(rwp)); \
232 VERIFY(_rc_ = rw_tryenter_locked(SEM(rwp))); \
233 (rwp)->rw_owner = current; \
234 } \
235 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, _flags_); \
236 _rc_; \
237 })
238
239 int spl_rw_init(void);
240 void spl_rw_fini(void);
241
242 #endif /* _SPL_RWLOCK_H */