]>
git.proxmox.com Git - mirror_spl.git/blob - include/sys/rwlock.h
75356b95d8fb77bd91ee1d092c524b6c6069bae0
2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2009 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
30 #include <sys/types.h>
31 #include <linux/rwsem.h>
45 struct rw_semaphore rw_rwlock
;
50 * For the generic and x86 implementations of rw-semaphores the following
51 * is true. If your semaphore implementation internally represents the
52 * semaphore state differently special case handling will be required.
53 * - if activity/count is 0 then there are no active readers or writers
54 * - if activity/count is +ve then that is the number of active readers
55 * - if activity/count is -1 then there is one active writer
57 #define SEM(rwp) ((struct rw_semaphore *)(rwp))
59 #if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
60 # define RW_COUNT(rwp) (SEM(rwp)->activity)
61 # define rw_exit_locked(rwp) __up_read_locked(rwp)
62 # define rw_tryenter_locked(rwp) __down_write_trylock_locked(rwp)
63 extern void __up_read_locked(struct rw_semaphore
*);
64 extern int __down_write_trylock_locked(struct rw_semaphore
*);
66 # define RW_COUNT(rwp) (SEM(rwp)->count & RWSEM_ACTIVE_MASK)
67 # define rw_exit_locked(rwp) up_read(rwp)
68 # define rw_tryenter_locked(rwp) down_write_trylock(rwp)
71 static inline kthread_t
*
72 spl_rw_get_owner(krwlock_t
*rwp
)
78 spl_rw_set_owner(krwlock_t
*rwp
)
82 spin_lock_irqsave(&SEM(rwp
)->wait_lock
, flags
);
83 rwp
->rw_owner
= current
;
84 spin_unlock_irqrestore(&SEM(rwp
)->wait_lock
, flags
);
88 spl_rw_clear_owner(krwlock_t
*rwp
)
92 spin_lock_irqsave(&SEM(rwp
)->wait_lock
, flags
);
94 spin_unlock_irqrestore(&SEM(rwp
)->wait_lock
, flags
);
97 static inline kthread_t
*
98 rw_owner(krwlock_t
*rwp
)
103 spin_lock_irqsave(&SEM(rwp
)->wait_lock
, flags
);
104 owner
= spl_rw_get_owner(rwp
);
105 spin_unlock_irqrestore(&SEM(rwp
)->wait_lock
, flags
);
111 RW_READ_HELD(krwlock_t
*rwp
)
116 spin_lock_irqsave(&SEM(rwp
)->wait_lock
, flags
);
117 rc
= ((RW_COUNT(rwp
) > 0) && (spl_rw_get_owner(rwp
) == NULL
));
118 spin_unlock_irqrestore(&SEM(rwp
)->wait_lock
, flags
);
124 RW_WRITE_HELD(krwlock_t
*rwp
)
129 spin_lock_irqsave(&SEM(rwp
)->wait_lock
, flags
);
130 rc
= ((RW_COUNT(rwp
) < 0) && (spl_rw_get_owner(rwp
) == current
));
131 spin_unlock_irqrestore(&SEM(rwp
)->wait_lock
, flags
);
137 RW_LOCK_HELD(krwlock_t
*rwp
)
142 spin_lock_irqsave(&SEM(rwp
)->wait_lock
, flags
);
143 rc
= (RW_COUNT(rwp
) != 0);
144 spin_unlock_irqrestore(&SEM(rwp
)->wait_lock
, flags
);
150 * The following functions must be a #define and not static inline.
151 * This ensures that the native linux semaphore functions (down/up)
152 * will be correctly located in the users code which is important
153 * for the built in kernel lock analysis tools
155 #define rw_init(rwp, name, type, arg) \
157 static struct lock_class_key __key; \
159 __init_rwsem(SEM(rwp), #rwp, &__key); \
160 spl_rw_clear_owner(rwp); \
163 #define rw_destroy(rwp) \
165 VERIFY(!RW_LOCK_HELD(rwp)); \
168 #define rw_tryenter(rwp, rw) \
174 _rc_ = down_read_trylock(SEM(rwp)); \
177 if ((_rc_ = down_write_trylock(SEM(rwp)))) \
178 spl_rw_set_owner(rwp); \
186 #define rw_enter(rwp, rw) \
190 down_read(SEM(rwp)); \
193 down_write(SEM(rwp)); \
194 spl_rw_set_owner(rwp); \
201 #define rw_exit(rwp) \
203 if (RW_WRITE_HELD(rwp)) { \
204 spl_rw_clear_owner(rwp); \
205 up_write(SEM(rwp)); \
207 ASSERT(RW_READ_HELD(rwp)); \
212 #define rw_downgrade(rwp) \
214 spl_rw_clear_owner(rwp); \
215 downgrade_write(SEM(rwp)); \
218 #define rw_tryupgrade(rwp) \
220 unsigned long _flags_; \
223 spin_lock_irqsave(&SEM(rwp)->wait_lock, _flags_); \
224 if (list_empty(&SEM(rwp)->wait_list) && (RW_COUNT(rwp) == 1)) { \
225 rw_exit_locked(SEM(rwp)); \
226 VERIFY(_rc_ = rw_tryenter_locked(SEM(rwp))); \
227 (rwp)->rw_owner = current; \
229 spin_unlock_irqrestore(&SEM(rwp)->wait_lock, _flags_); \
233 int spl_rw_init(void);
234 void spl_rw_fini(void);
236 #endif /* _SPL_RWLOCK_H */