]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/rwlock.h
Split <sys/debug.h> header
[mirror_spl.git] / include / sys / rwlock.h
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 \*****************************************************************************/
24
25 #ifndef _SPL_RWLOCK_H
26 #define _SPL_RWLOCK_H
27
28 #include <sys/types.h>
29 #include <linux/rwsem.h>
30
31 typedef enum {
32 RW_DRIVER = 2,
33 RW_DEFAULT = 4
34 } krw_type_t;
35
36 typedef enum {
37 RW_NONE = 0,
38 RW_WRITER = 1,
39 RW_READER = 2
40 } krw_t;
41
42 typedef struct {
43 struct rw_semaphore rw_rwlock;
44 kthread_t *rw_owner;
45 } krwlock_t;
46
47 #define SEM(rwp) ((struct rw_semaphore *)(rwp))
48
49 static inline kthread_t *
50 spl_rw_get_owner(krwlock_t *rwp)
51 {
52 return rwp->rw_owner;
53 }
54
55 static inline void
56 spl_rw_set_owner(krwlock_t *rwp)
57 {
58 unsigned long flags;
59
60 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
61 rwp->rw_owner = current;
62 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
63 }
64
65 static inline void
66 spl_rw_clear_owner(krwlock_t *rwp)
67 {
68 unsigned long flags;
69
70 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
71 rwp->rw_owner = NULL;
72 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
73 }
74
75 static inline kthread_t *
76 rw_owner(krwlock_t *rwp)
77 {
78 unsigned long flags;
79 kthread_t *owner;
80
81 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
82 owner = spl_rw_get_owner(rwp);
83 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
84
85 return owner;
86 }
87
88 static inline int
89 RW_READ_HELD(krwlock_t *rwp)
90 {
91 unsigned long flags;
92 int rc;
93
94 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
95 rc = (rwsem_is_locked(SEM(rwp)) && spl_rw_get_owner(rwp) == NULL);
96 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
97
98 return rc;
99 }
100
101 static inline int
102 RW_WRITE_HELD(krwlock_t *rwp)
103 {
104 unsigned long flags;
105 int rc;
106
107 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
108 rc = (rwsem_is_locked(SEM(rwp)) && spl_rw_get_owner(rwp) == current);
109 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
110
111 return rc;
112 }
113
114 static inline int
115 RW_LOCK_HELD(krwlock_t *rwp)
116 {
117 unsigned long flags;
118 int rc;
119
120 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
121 rc = rwsem_is_locked(SEM(rwp));
122 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
123
124 return rc;
125 }
126
127 /*
128 * The following functions must be a #define and not static inline.
129 * This ensures that the native linux semaphore functions (down/up)
130 * will be correctly located in the users code which is important
131 * for the built in kernel lock analysis tools
132 */
133 #define rw_init(rwp, name, type, arg) \
134 ({ \
135 static struct lock_class_key __key; \
136 \
137 __init_rwsem(SEM(rwp), #rwp, &__key); \
138 spl_rw_clear_owner(rwp); \
139 })
140
141 #define rw_destroy(rwp) \
142 ({ \
143 VERIFY(!RW_LOCK_HELD(rwp)); \
144 })
145
146 #define rw_tryenter(rwp, rw) \
147 ({ \
148 int _rc_ = 0; \
149 \
150 switch (rw) { \
151 case RW_READER: \
152 _rc_ = down_read_trylock(SEM(rwp)); \
153 break; \
154 case RW_WRITER: \
155 if ((_rc_ = down_write_trylock(SEM(rwp)))) \
156 spl_rw_set_owner(rwp); \
157 break; \
158 default: \
159 VERIFY(0); \
160 } \
161 _rc_; \
162 })
163
164 #define rw_enter(rwp, rw) \
165 ({ \
166 switch (rw) { \
167 case RW_READER: \
168 down_read(SEM(rwp)); \
169 break; \
170 case RW_WRITER: \
171 down_write(SEM(rwp)); \
172 spl_rw_set_owner(rwp); \
173 break; \
174 default: \
175 VERIFY(0); \
176 } \
177 })
178
179 #define rw_exit(rwp) \
180 ({ \
181 if (RW_WRITE_HELD(rwp)) { \
182 spl_rw_clear_owner(rwp); \
183 up_write(SEM(rwp)); \
184 } else { \
185 ASSERT(RW_READ_HELD(rwp)); \
186 up_read(SEM(rwp)); \
187 } \
188 })
189
190 #define rw_downgrade(rwp) \
191 ({ \
192 spl_rw_clear_owner(rwp); \
193 downgrade_write(SEM(rwp)); \
194 })
195
196 #if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
197 /*
198 * For the generic implementations of rw-semaphores the following is
199 * true. If your semaphore implementation internally represents the
200 * semaphore state differently then special case handling is required.
201 * - if activity/count is 0 then there are no active readers or writers
202 * - if activity/count is +ve then that is the number of active readers
203 * - if activity/count is -1 then there is one active writer
204 */
205
206 extern void __up_read_locked(struct rw_semaphore *);
207 extern int __down_write_trylock_locked(struct rw_semaphore *);
208
209 #define rw_tryupgrade(rwp) \
210 ({ \
211 unsigned long _flags_; \
212 int _rc_ = 0; \
213 \
214 spin_lock_irqsave(&SEM(rwp)->wait_lock, _flags_); \
215 if ((list_empty(&SEM(rwp)->wait_list)) && \
216 (SEM(rwp)->activity == 1)) { \
217 __up_read_locked(SEM(rwp)); \
218 VERIFY(_rc_ = __down_write_trylock_locked(SEM(rwp))); \
219 (rwp)->rw_owner = current; \
220 } \
221 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, _flags_); \
222 _rc_; \
223 })
224 #else
225 /*
226 * rw_tryupgrade() can be implemented correctly but for each supported
227 * arch we will need a custom implementation. For the x86 implementation
228 * it looks like a custom cmpxchg() to atomically check and promote the
229 * rwsem would be safe. For now that's not worth the trouble so in this
230 * case rw_tryupgrade() has just been disabled.
231 */
232 #define rw_tryupgrade(rwp) ({ 0; })
233 #endif
234
235 int spl_rw_init(void);
236 void spl_rw_fini(void);
237
238 #endif /* _SPL_RWLOCK_H */