]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/mutex.h
Always use the generic mutex_destroy().
[mirror_spl.git] / include / sys / mutex.h
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2009 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #ifndef _SPL_MUTEX_H
28 #define _SPL_MUTEX_H
29
30 #include <sys/types.h>
31 #include <linux/mutex.h>
32
33 typedef enum {
34 MUTEX_DEFAULT = 0,
35 MUTEX_SPIN = 1,
36 MUTEX_ADAPTIVE = 2
37 } kmutex_type_t;
38
39 #ifdef HAVE_MUTEX_OWNER
40
41 typedef struct mutex kmutex_t;
42
43 static inline kthread_t *
44 mutex_owner(kmutex_t *mp)
45 {
46 if (mp->owner)
47 return (mp->owner)->task;
48
49 return NULL;
50 }
51 #define mutex_owned(mp) (mutex_owner(mp) == current)
52 #define MUTEX_HELD(mp) mutex_owned(mp)
53 #undef mutex_init
54 #define mutex_init(mp, name, type, ibc) \
55 ({ \
56 static struct lock_class_key __key; \
57 ASSERT(type == MUTEX_DEFAULT); \
58 \
59 __mutex_init((mp), #mp, &__key); \
60 })
61
62 #undef mutex_destroy
63 #define mutex_destroy(mp) \
64 ({ \
65 VERIFY(!MUTEX_HELD(mp)); \
66 })
67
68 #define mutex_tryenter(mp) mutex_trylock(mp)
69 #define mutex_enter(mp) mutex_lock(mp)
70 #define mutex_exit(mp) mutex_unlock(mp)
71
72
73 #ifdef HAVE_GPL_ONLY_SYMBOLS
74 # define mutex_enter_nested(mp, sc) mutex_lock_nested(mp, sc)
75 #else
76 # define mutex_enter_nested(mp, sc) mutex_enter(mp)
77 #endif /* HAVE_GPL_ONLY_SYMBOLS */
78
79 #else /* HAVE_MUTEX_OWNER */
80
81 typedef struct {
82 struct mutex m_mutex;
83 kthread_t *m_owner;
84 } kmutex_t;
85
86 #ifdef HAVE_TASK_CURR
87 extern int spl_mutex_spin_max(void);
88 #else /* HAVE_TASK_CURR */
89 # define task_curr(owner) 0
90 # define spl_mutex_spin_max() 0
91 #endif /* HAVE_TASK_CURR */
92
93 #define MUTEX(mp) ((struct mutex *)(mp))
94
95 static inline kthread_t *
96 spl_mutex_get_owner(kmutex_t *mp)
97 {
98 return mp->m_owner;
99 }
100
101 static inline void
102 spl_mutex_set_owner(kmutex_t *mp)
103 {
104 unsigned long flags;
105
106 spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
107 mp->m_owner = current;
108 spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
109 }
110
111 static inline void
112 spl_mutex_clear_owner(kmutex_t *mp)
113 {
114 unsigned long flags;
115
116 spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
117 mp->m_owner = NULL;
118 spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
119 }
120
121 static inline kthread_t *
122 mutex_owner(kmutex_t *mp)
123 {
124 unsigned long flags;
125 kthread_t *owner;
126
127 spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
128 owner = spl_mutex_get_owner(mp);
129 spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
130
131 return owner;
132 }
133
134 #define mutex_owned(mp) (mutex_owner(mp) == current)
135 #define MUTEX_HELD(mp) mutex_owned(mp)
136
137 /*
138 * The following functions must be a #define and not static inline.
139 * This ensures that the native linux mutex functions (lock/unlock)
140 * will be correctly located in the users code which is important
141 * for the built in kernel lock analysis tools
142 */
143 #undef mutex_init
144 #define mutex_init(mp, name, type, ibc) \
145 ({ \
146 static struct lock_class_key __key; \
147 ASSERT(type == MUTEX_DEFAULT); \
148 \
149 __mutex_init(MUTEX(mp), #mp, &__key); \
150 spl_mutex_clear_owner(mp); \
151 })
152
153 #undef mutex_destroy
154 #define mutex_destroy(mp) \
155 ({ \
156 VERIFY(!MUTEX_HELD(mp)); \
157 })
158
159 #define mutex_tryenter(mp) \
160 ({ \
161 int _rc_; \
162 \
163 if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \
164 spl_mutex_set_owner(mp); \
165 \
166 _rc_; \
167 })
168
169 /*
170 * Adaptive mutexs assume that the lock may be held by a task running
171 * on a different cpu. The expectation is that the task will drop the
172 * lock before leaving the head of the run queue. So the ideal thing
173 * to do is spin until we acquire the lock and avoid a context switch.
174 * However it is also possible the task holding the lock yields the
175 * processor with out dropping lock. In this case, we know it's going
176 * to be a while so we stop spinning and go to sleep waiting for the
177 * lock to be available. This should strike the optimum balance
178 * between spinning and sleeping waiting for a lock.
179 */
180 #define mutex_enter(mp) \
181 ({ \
182 kthread_t *_owner_; \
183 int _rc_, _count_; \
184 \
185 _rc_ = 0; \
186 _count_ = 0; \
187 _owner_ = mutex_owner(mp); \
188 \
189 while (_owner_ && task_curr(_owner_) && \
190 _count_ <= spl_mutex_spin_max()) { \
191 if ((_rc_ = mutex_trylock(MUTEX(mp)))) \
192 break; \
193 \
194 _count_++; \
195 } \
196 \
197 if (!_rc_) \
198 mutex_lock(MUTEX(mp)); \
199 \
200 spl_mutex_set_owner(mp); \
201 })
202
203 #define mutex_exit(mp) \
204 ({ \
205 spl_mutex_clear_owner(mp); \
206 mutex_unlock(MUTEX(mp)); \
207 })
208
209 #ifdef HAVE_GPL_ONLY_SYMBOLS
210 # define mutex_enter_nested(mp, sc) \
211 ({ \
212 mutex_lock_nested(MUTEX(mp, sc)); \
213 spl_mutex_set_owner(mp); \
214 })
215 #else
216 # define mutex_enter_nested(mp, sc) \
217 ({ \
218 mutex_enter(mp); \
219 })
220 #endif
221
222 #endif /* HAVE_MUTEX_OWNER */
223
224 int spl_mutex_init(void);
225 void spl_mutex_fini(void);
226
227 #endif /* _SPL_MUTEX_H */