]> git.proxmox.com Git - mirror_spl-debian.git/blame - include/sys/rwlock.h
Public Release Prep
[mirror_spl-debian.git] / include / sys / rwlock.h
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5
BB
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5
BB
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23\*****************************************************************************/
715f6251 24
09b414e8 25#ifndef _SPL_RWLOCK_H
e8b31e84 26#define _SPL_RWLOCK_H
f1ca4da6 27
f4b37741 28#include <sys/types.h>
d28db80f 29#include <linux/rwsem.h>
f1ca4da6 30
31typedef enum {
e811949a
BB
32 RW_DRIVER = 2,
33 RW_DEFAULT = 4
f1ca4da6 34} krw_type_t;
35
36typedef enum {
d28db80f 37 RW_NONE = 0,
e811949a
BB
38 RW_WRITER = 1,
39 RW_READER = 2
f1ca4da6 40} krw_t;
41
d28db80f
BB
42typedef struct {
43 struct rw_semaphore rw_rwlock;
44 kthread_t *rw_owner;
45} krwlock_t;
f1ca4da6 46
e811949a 47/*
5652e7b4
BB
48 * For the generic implementations of rw-semaphores the following is
49 * true. If your semaphore implementation internally represents the
50 * semaphore state differently. Then special case handling will be
51 * required so RW_COUNT() provides these semantics:
e811949a
BB
52 * - if activity/count is 0 then there are no active readers or writers
53 * - if activity/count is +ve then that is the number of active readers
54 * - if activity/count is -1 then there is one active writer
55 */
d28db80f
BB
56#define SEM(rwp) ((struct rw_semaphore *)(rwp))
57
e811949a 58#if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
d28db80f
BB
59# define RW_COUNT(rwp) (SEM(rwp)->activity)
60# define rw_exit_locked(rwp) __up_read_locked(rwp)
61# define rw_tryenter_locked(rwp) __down_write_trylock_locked(rwp)
62extern void __up_read_locked(struct rw_semaphore *);
63extern int __down_write_trylock_locked(struct rw_semaphore *);
e811949a 64#else
3a03ce5c
BB
65/*
66 * 2.6.x - 2.6.27 use guard macro _I386_RWSEM_H
67 * 2.6.28 - 2.6.32+ use guard macro _ASM_X86_RWSEM_H
68 */
69# if defined(_I386_RWSEM_H) || defined(_ASM_X86_RWSEM_H)
5652e7b4 70# define RW_COUNT(rwp) ((SEM(rwp)->count < 0) ? (-1) : \
ef6c1368 71 (SEM(rwp)->count & RWSEM_ACTIVE_MASK))
5652e7b4
BB
72# else
73# define RW_COUNT(rwp) (SEM(rwp)->count & RWSEM_ACTIVE_MASK)
74# endif
75# define rw_exit_locked(rwp) up_read(rwp)
76# define rw_tryenter_locked(rwp) down_write_trylock(rwp)
e811949a 77#endif
f1ca4da6 78
d28db80f
BB
79static inline kthread_t *
80spl_rw_get_owner(krwlock_t *rwp)
81{
82 return rwp->rw_owner;
83}
84
85static inline void
86spl_rw_set_owner(krwlock_t *rwp)
87{
88 unsigned long flags;
89
90 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
91 rwp->rw_owner = current;
92 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
93}
94
95static inline void
96spl_rw_clear_owner(krwlock_t *rwp)
97{
98 unsigned long flags;
99
100 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
101 rwp->rw_owner = NULL;
102 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
103}
104
105static inline kthread_t *
106rw_owner(krwlock_t *rwp)
107{
108 unsigned long flags;
109 kthread_t *owner;
110
111 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
112 owner = spl_rw_get_owner(rwp);
113 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
114
115 return owner;
116}
117
118static inline int
119RW_READ_HELD(krwlock_t *rwp)
120{
121 unsigned long flags;
122 int rc;
123
124 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
125 rc = ((RW_COUNT(rwp) > 0) && (spl_rw_get_owner(rwp) == NULL));
126 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
127
128 return rc;
129}
130
131static inline int
132RW_WRITE_HELD(krwlock_t *rwp)
133{
134 unsigned long flags;
135 int rc;
136
137 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
138 rc = ((RW_COUNT(rwp) < 0) && (spl_rw_get_owner(rwp) == current));
139 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
140
141 return rc;
142}
143
144static inline int
145RW_LOCK_HELD(krwlock_t *rwp)
146{
147 unsigned long flags;
148 int rc;
149
150 spin_lock_irqsave(&SEM(rwp)->wait_lock, flags);
151 rc = (RW_COUNT(rwp) != 0);
152 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, flags);
153
154 return rc;
155}
156
157/*
158 * The following functions must be a #define and not static inline.
159 * This ensures that the native linux semaphore functions (down/up)
160 * will be correctly located in the users code which is important
161 * for the built in kernel lock analysis tools
162 */
163#define rw_init(rwp, name, type, arg) \
164({ \
0e77fc11
BB
165 static struct lock_class_key __key; \
166 \
167 __init_rwsem(SEM(rwp), #rwp, &__key); \
d28db80f 168 spl_rw_clear_owner(rwp); \
e811949a 169})
f1ca4da6 170
d28db80f
BB
171#define rw_destroy(rwp) \
172({ \
173 VERIFY(!RW_LOCK_HELD(rwp)); \
e811949a 174})
ed61a7d0 175
d28db80f
BB
176#define rw_tryenter(rwp, rw) \
177({ \
178 int _rc_ = 0; \
179 \
180 switch (rw) { \
181 case RW_READER: \
182 _rc_ = down_read_trylock(SEM(rwp)); \
183 break; \
184 case RW_WRITER: \
185 if ((_rc_ = down_write_trylock(SEM(rwp)))) \
186 spl_rw_set_owner(rwp); \
187 break; \
188 default: \
189 SBUG(); \
190 } \
191 _rc_; \
e8b31e84 192})
e8b31e84 193
d28db80f
BB
194#define rw_enter(rwp, rw) \
195({ \
196 switch (rw) { \
197 case RW_READER: \
198 down_read(SEM(rwp)); \
199 break; \
200 case RW_WRITER: \
201 down_write(SEM(rwp)); \
202 spl_rw_set_owner(rwp); \
203 break; \
204 default: \
205 SBUG(); \
206 } \
e811949a 207})
f1ca4da6 208
d28db80f
BB
209#define rw_exit(rwp) \
210({ \
211 if (RW_WRITE_HELD(rwp)) { \
212 spl_rw_clear_owner(rwp); \
213 up_write(SEM(rwp)); \
214 } else { \
215 ASSERT(RW_READ_HELD(rwp)); \
216 up_read(SEM(rwp)); \
217 } \
218})
219
220#define rw_downgrade(rwp) \
221({ \
222 spl_rw_clear_owner(rwp); \
223 downgrade_write(SEM(rwp)); \
224})
225
ef6c1368 226#if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
d28db80f
BB
227#define rw_tryupgrade(rwp) \
228({ \
229 unsigned long _flags_; \
230 int _rc_ = 0; \
231 \
232 spin_lock_irqsave(&SEM(rwp)->wait_lock, _flags_); \
233 if (list_empty(&SEM(rwp)->wait_list) && (RW_COUNT(rwp) == 1)) { \
234 rw_exit_locked(SEM(rwp)); \
235 VERIFY(_rc_ = rw_tryenter_locked(SEM(rwp))); \
236 (rwp)->rw_owner = current; \
237 } \
238 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, _flags_); \
239 _rc_; \
240})
ef6c1368
BB
241#else
242/*
243 * This can be done correctly but for each supported arch we will need
244 * a custom cmpxchg() to atomically check and promote the rwsem. That's
245 * not worth the trouble for now so rw_tryupgrade() will always fail.
246 */
247#define rw_tryupgrade(rwp) ({ 0; })
248#endif
d28db80f
BB
249
250int spl_rw_init(void);
251void spl_rw_fini(void);
252
e8b31e84 253#endif /* _SPL_RWLOCK_H */