]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/mutex.h
Make kmutex_t typesafe in all cases.
[mirror_spl.git] / include / sys / mutex.h
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 \*****************************************************************************/
24
25 #ifndef _SPL_MUTEX_H
26 #define _SPL_MUTEX_H
27
28 #include <sys/types.h>
29 #include <linux/mutex.h>
30 #include <linux/compiler_compat.h>
31
32 typedef enum {
33 MUTEX_DEFAULT = 0,
34 MUTEX_SPIN = 1,
35 MUTEX_ADAPTIVE = 2
36 } kmutex_type_t;
37
38 #if defined(HAVE_MUTEX_OWNER) && defined(CONFIG_SMP)
39
40 /*
41 * We define a 1-field struct rather than a straight typedef to enforce type
42 * safety.
43 */
44 typedef struct {
45 struct mutex m;
46 } kmutex_t;
47
48 static inline kthread_t *
49 mutex_owner(kmutex_t *mp)
50 {
51 struct thread_info *owner;
52
53 owner = ACCESS_ONCE(mp->m.owner);
54 if (owner)
55 return owner->task;
56
57 return NULL;
58 }
59
60 static inline int
61 mutex_owned(kmutex_t *mp)
62 {
63 return (ACCESS_ONCE(mp->m.owner) == current_thread_info());
64 }
65
66 #define MUTEX_HELD(mp) mutex_owned(mp)
67 #define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp))
68 #undef mutex_init
69 #define mutex_init(mp, name, type, ibc) \
70 ({ \
71 static struct lock_class_key __key; \
72 ASSERT(type == MUTEX_DEFAULT); \
73 \
74 __mutex_init(&(mp)->m, #mp, &__key); \
75 })
76
77 #undef mutex_destroy
78 #define mutex_destroy(mp) \
79 ({ \
80 VERIFY3P(mutex_owner(mp), ==, NULL); \
81 })
82
83 #define mutex_tryenter(mp) mutex_trylock(&(mp)->m)
84 #define mutex_enter(mp) mutex_lock(&(mp)->m)
85
86 /* mutex->owner is not cleared when CONFIG_DEBUG_MUTEXES is set */
87 #ifdef CONFIG_DEBUG_MUTEXES
88 # define mutex_exit(mp) \
89 ({ \
90 mutex_unlock(&(mp)->m); \
91 (mp)->m.owner = NULL; \
92 })
93 #else
94 # define mutex_exit(mp) mutex_unlock(&(mp)->m)
95 #endif /* CONFIG_DEBUG_MUTEXES */
96
97 #ifdef HAVE_GPL_ONLY_SYMBOLS
98 # define mutex_enter_nested(mp, sc) mutex_lock_nested(&(mp)->m, sc)
99 #else
100 # define mutex_enter_nested(mp, sc) mutex_enter(mp)
101 #endif /* HAVE_GPL_ONLY_SYMBOLS */
102
103 #else /* HAVE_MUTEX_OWNER */
104
105 typedef struct {
106 struct mutex m_mutex;
107 kthread_t *m_owner;
108 } kmutex_t;
109
110 #ifdef HAVE_TASK_CURR
111 extern int spl_mutex_spin_max(void);
112 #else /* HAVE_TASK_CURR */
113 # define task_curr(owner) 0
114 # define spl_mutex_spin_max() 0
115 #endif /* HAVE_TASK_CURR */
116
117 #define MUTEX(mp) ((struct mutex *)(mp))
118
119 static inline kthread_t *
120 spl_mutex_get_owner(kmutex_t *mp)
121 {
122 return mp->m_owner;
123 }
124
125 static inline void
126 spl_mutex_set_owner(kmutex_t *mp)
127 {
128 unsigned long flags;
129
130 spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
131 mp->m_owner = current;
132 spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
133 }
134
135 static inline void
136 spl_mutex_clear_owner(kmutex_t *mp)
137 {
138 unsigned long flags;
139
140 spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
141 mp->m_owner = NULL;
142 spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
143 }
144
145 static inline kthread_t *
146 mutex_owner(kmutex_t *mp)
147 {
148 unsigned long flags;
149 kthread_t *owner;
150
151 spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
152 owner = spl_mutex_get_owner(mp);
153 spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
154
155 return owner;
156 }
157
158 #define mutex_owned(mp) (mutex_owner(mp) == current)
159 #define MUTEX_HELD(mp) mutex_owned(mp)
160 #define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp))
161
162 /*
163 * The following functions must be a #define and not static inline.
164 * This ensures that the native linux mutex functions (lock/unlock)
165 * will be correctly located in the users code which is important
166 * for the built in kernel lock analysis tools
167 */
168 #undef mutex_init
169 #define mutex_init(mp, name, type, ibc) \
170 ({ \
171 static struct lock_class_key __key; \
172 ASSERT(type == MUTEX_DEFAULT); \
173 \
174 __mutex_init(MUTEX(mp), #mp, &__key); \
175 spl_mutex_clear_owner(mp); \
176 })
177
178 #undef mutex_destroy
179 #define mutex_destroy(mp) \
180 ({ \
181 VERIFY3P(mutex_owner(mp), ==, NULL); \
182 })
183
184 #define mutex_tryenter(mp) \
185 ({ \
186 int _rc_; \
187 \
188 if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \
189 spl_mutex_set_owner(mp); \
190 \
191 _rc_; \
192 })
193
194 /*
195 * Adaptive mutexs assume that the lock may be held by a task running
196 * on a different cpu. The expectation is that the task will drop the
197 * lock before leaving the head of the run queue. So the ideal thing
198 * to do is spin until we acquire the lock and avoid a context switch.
199 * However it is also possible the task holding the lock yields the
200 * processor with out dropping lock. In this case, we know it's going
201 * to be a while so we stop spinning and go to sleep waiting for the
202 * lock to be available. This should strike the optimum balance
203 * between spinning and sleeping waiting for a lock.
204 */
205 #define mutex_enter(mp) \
206 ({ \
207 kthread_t *_owner_; \
208 int _rc_, _count_; \
209 \
210 _rc_ = 0; \
211 _count_ = 0; \
212 _owner_ = mutex_owner(mp); \
213 \
214 while (_owner_ && task_curr(_owner_) && \
215 _count_ <= spl_mutex_spin_max()) { \
216 if ((_rc_ = mutex_trylock(MUTEX(mp)))) \
217 break; \
218 \
219 _count_++; \
220 } \
221 \
222 if (!_rc_) \
223 mutex_lock(MUTEX(mp)); \
224 \
225 spl_mutex_set_owner(mp); \
226 })
227
228 #define mutex_exit(mp) \
229 ({ \
230 spl_mutex_clear_owner(mp); \
231 mutex_unlock(MUTEX(mp)); \
232 })
233
234 #ifdef HAVE_GPL_ONLY_SYMBOLS
235 # define mutex_enter_nested(mp, sc) \
236 ({ \
237 mutex_lock_nested(MUTEX(mp, sc)); \
238 spl_mutex_set_owner(mp); \
239 })
240 #else
241 # define mutex_enter_nested(mp, sc) \
242 ({ \
243 mutex_enter(mp); \
244 })
245 #endif
246
247 #endif /* HAVE_MUTEX_OWNER */
248
249 int spl_mutex_init(void);
250 void spl_mutex_fini(void);
251
252 #endif /* _SPL_MUTEX_H */