]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/mutex.h
Treat mutex->owner as volatile
[mirror_spl.git] / include / sys / mutex.h
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 \*****************************************************************************/
24
25 #ifndef _SPL_MUTEX_H
26 #define _SPL_MUTEX_H
27
28 #include <sys/types.h>
29 #include <linux/mutex.h>
30
31 typedef enum {
32 MUTEX_DEFAULT = 0,
33 MUTEX_SPIN = 1,
34 MUTEX_ADAPTIVE = 2
35 } kmutex_type_t;
36
37 #if defined(HAVE_MUTEX_OWNER) && defined(CONFIG_SMP)
38
39 typedef struct mutex kmutex_t;
40
41 static inline kthread_t *
42 mutex_owner(kmutex_t *mp)
43 {
44 struct thread_info *owner;
45
46 owner = ACCESS_ONCE(mp->owner);
47 if (owner)
48 return owner->task;
49
50 return NULL;
51 }
52
53 static inline int
54 mutex_owned(kmutex_t *mp)
55 {
56 return (ACCESS_ONCE(mp->owner) == current_thread_info());
57 }
58
59 #define MUTEX_HELD(mp) mutex_owned(mp)
60 #undef mutex_init
61 #define mutex_init(mp, name, type, ibc) \
62 ({ \
63 static struct lock_class_key __key; \
64 ASSERT(type == MUTEX_DEFAULT); \
65 \
66 __mutex_init((mp), #mp, &__key); \
67 })
68
69 #undef mutex_destroy
70 #define mutex_destroy(mp) \
71 ({ \
72 VERIFY3P(mutex_owner(mp), ==, NULL); \
73 })
74
75 #define mutex_tryenter(mp) mutex_trylock(mp)
76 #define mutex_enter(mp) mutex_lock(mp)
77
78 /* mutex->owner is not cleared when CONFIG_DEBUG_MUTEXES is set */
79 #ifdef CONFIG_DEBUG_MUTEXES
80 # define mutex_exit(mp) \
81 ({ \
82 (mp)->owner = NULL; \
83 mutex_unlock(mp); \
84 })
85 #else
86 # define mutex_exit(mp) mutex_unlock(mp)
87 #endif /* CONFIG_DEBUG_MUTEXES */
88
89 #ifdef HAVE_GPL_ONLY_SYMBOLS
90 # define mutex_enter_nested(mp, sc) mutex_lock_nested(mp, sc)
91 #else
92 # define mutex_enter_nested(mp, sc) mutex_enter(mp)
93 #endif /* HAVE_GPL_ONLY_SYMBOLS */
94
95 #else /* HAVE_MUTEX_OWNER */
96
97 typedef struct {
98 struct mutex m_mutex;
99 kthread_t *m_owner;
100 } kmutex_t;
101
102 #ifdef HAVE_TASK_CURR
103 extern int spl_mutex_spin_max(void);
104 #else /* HAVE_TASK_CURR */
105 # define task_curr(owner) 0
106 # define spl_mutex_spin_max() 0
107 #endif /* HAVE_TASK_CURR */
108
109 #define MUTEX(mp) ((struct mutex *)(mp))
110
111 static inline kthread_t *
112 spl_mutex_get_owner(kmutex_t *mp)
113 {
114 return mp->m_owner;
115 }
116
117 static inline void
118 spl_mutex_set_owner(kmutex_t *mp)
119 {
120 unsigned long flags;
121
122 spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
123 mp->m_owner = current;
124 spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
125 }
126
127 static inline void
128 spl_mutex_clear_owner(kmutex_t *mp)
129 {
130 unsigned long flags;
131
132 spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
133 mp->m_owner = NULL;
134 spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
135 }
136
137 static inline kthread_t *
138 mutex_owner(kmutex_t *mp)
139 {
140 unsigned long flags;
141 kthread_t *owner;
142
143 spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
144 owner = spl_mutex_get_owner(mp);
145 spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
146
147 return owner;
148 }
149
150 #define mutex_owned(mp) (mutex_owner(mp) == current)
151 #define MUTEX_HELD(mp) mutex_owned(mp)
152
153 /*
154 * The following functions must be a #define and not static inline.
155 * This ensures that the native linux mutex functions (lock/unlock)
156 * will be correctly located in the users code which is important
157 * for the built in kernel lock analysis tools
158 */
159 #undef mutex_init
160 #define mutex_init(mp, name, type, ibc) \
161 ({ \
162 static struct lock_class_key __key; \
163 ASSERT(type == MUTEX_DEFAULT); \
164 \
165 __mutex_init(MUTEX(mp), #mp, &__key); \
166 spl_mutex_clear_owner(mp); \
167 })
168
169 #undef mutex_destroy
170 #define mutex_destroy(mp) \
171 ({ \
172 VERIFY3P(mutex_owner(mp), ==, NULL); \
173 })
174
175 #define mutex_tryenter(mp) \
176 ({ \
177 int _rc_; \
178 \
179 if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \
180 spl_mutex_set_owner(mp); \
181 \
182 _rc_; \
183 })
184
185 /*
186 * Adaptive mutexs assume that the lock may be held by a task running
187 * on a different cpu. The expectation is that the task will drop the
188 * lock before leaving the head of the run queue. So the ideal thing
189 * to do is spin until we acquire the lock and avoid a context switch.
190 * However it is also possible the task holding the lock yields the
191 * processor with out dropping lock. In this case, we know it's going
192 * to be a while so we stop spinning and go to sleep waiting for the
193 * lock to be available. This should strike the optimum balance
194 * between spinning and sleeping waiting for a lock.
195 */
196 #define mutex_enter(mp) \
197 ({ \
198 kthread_t *_owner_; \
199 int _rc_, _count_; \
200 \
201 _rc_ = 0; \
202 _count_ = 0; \
203 _owner_ = mutex_owner(mp); \
204 \
205 while (_owner_ && task_curr(_owner_) && \
206 _count_ <= spl_mutex_spin_max()) { \
207 if ((_rc_ = mutex_trylock(MUTEX(mp)))) \
208 break; \
209 \
210 _count_++; \
211 } \
212 \
213 if (!_rc_) \
214 mutex_lock(MUTEX(mp)); \
215 \
216 spl_mutex_set_owner(mp); \
217 })
218
219 #define mutex_exit(mp) \
220 ({ \
221 spl_mutex_clear_owner(mp); \
222 mutex_unlock(MUTEX(mp)); \
223 })
224
225 #ifdef HAVE_GPL_ONLY_SYMBOLS
226 # define mutex_enter_nested(mp, sc) \
227 ({ \
228 mutex_lock_nested(MUTEX(mp, sc)); \
229 spl_mutex_set_owner(mp); \
230 })
231 #else
232 # define mutex_enter_nested(mp, sc) \
233 ({ \
234 mutex_enter(mp); \
235 })
236 #endif
237
238 #endif /* HAVE_MUTEX_OWNER */
239
240 int spl_mutex_init(void);
241 void spl_mutex_fini(void);
242
243 #endif /* _SPL_MUTEX_H */