]> git.proxmox.com Git - mirror_spl-debian.git/blob - include/sys/mutex.h
905eed50e62e0ead526500ee57bb747ece18da97
[mirror_spl-debian.git] / include / sys / mutex.h
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 \*****************************************************************************/
24
25 #ifndef _SPL_MUTEX_H
26 #define _SPL_MUTEX_H
27
28 #include <sys/types.h>
29 #include <linux/mutex.h>
30 #include <linux/compiler_compat.h>
31
32 typedef enum {
33 MUTEX_DEFAULT = 0,
34 MUTEX_SPIN = 1,
35 MUTEX_ADAPTIVE = 2
36 } kmutex_type_t;
37
38 #if defined(HAVE_MUTEX_OWNER) && defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
39
40 /*
41 * We define a 1-field struct rather than a straight typedef to enforce type
42 * safety.
43 */
44 typedef struct {
45 struct mutex m;
46 } kmutex_t;
47
48 static inline kthread_t *
49 mutex_owner(kmutex_t *mp)
50 {
51 #if defined(HAVE_MUTEX_OWNER_TASK_STRUCT)
52 return ACCESS_ONCE(mp->m.owner);
53 #else
54 struct thread_info *owner = ACCESS_ONCE(mp->m.owner);
55 if (owner)
56 return owner->task;
57
58 return NULL;
59 #endif
60 }
61
62 #define mutex_owned(mp) (mutex_owner(mp) == current)
63 #define MUTEX_HELD(mp) mutex_owned(mp)
64 #define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp))
65 #undef mutex_init
66 #define mutex_init(mp, name, type, ibc) \
67 ({ \
68 static struct lock_class_key __key; \
69 ASSERT(type == MUTEX_DEFAULT); \
70 \
71 __mutex_init(&(mp)->m, #mp, &__key); \
72 })
73
74 #undef mutex_destroy
75 #define mutex_destroy(mp) \
76 ({ \
77 VERIFY3P(mutex_owner(mp), ==, NULL); \
78 })
79
80 #define mutex_tryenter(mp) mutex_trylock(&(mp)->m)
81 #define mutex_enter(mp) \
82 ({ \
83 ASSERT3P(mutex_owner(mp), !=, current); \
84 mutex_lock(&(mp)->m); \
85 })
86 #define mutex_exit(mp) mutex_unlock(&(mp)->m)
87
88 #ifdef HAVE_GPL_ONLY_SYMBOLS
89 # define mutex_enter_nested(mp, sc) mutex_lock_nested(&(mp)->m, sc)
90 #else
91 # define mutex_enter_nested(mp, sc) mutex_enter(mp)
92 #endif /* HAVE_GPL_ONLY_SYMBOLS */
93
94 #else /* HAVE_MUTEX_OWNER */
95
96 typedef struct {
97 struct mutex m_mutex;
98 kthread_t *m_owner;
99 } kmutex_t;
100
101 #ifdef HAVE_TASK_CURR
102 extern int spl_mutex_spin_max(void);
103 #else /* HAVE_TASK_CURR */
104 # define task_curr(owner) 0
105 # define spl_mutex_spin_max() 0
106 #endif /* HAVE_TASK_CURR */
107
108 #define MUTEX(mp) (&((mp)->m_mutex))
109
110 static inline void
111 spl_mutex_set_owner(kmutex_t *mp)
112 {
113 mp->m_owner = current;
114 }
115
116 static inline void
117 spl_mutex_clear_owner(kmutex_t *mp)
118 {
119 mp->m_owner = NULL;
120 }
121
122 #define mutex_owner(mp) (ACCESS_ONCE((mp)->m_owner))
123 #define mutex_owned(mp) (mutex_owner(mp) == current)
124 #define MUTEX_HELD(mp) mutex_owned(mp)
125 #define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp))
126
127 /*
128 * The following functions must be a #define and not static inline.
129 * This ensures that the native linux mutex functions (lock/unlock)
130 * will be correctly located in the users code which is important
131 * for the built in kernel lock analysis tools
132 */
133 #undef mutex_init
134 #define mutex_init(mp, name, type, ibc) \
135 ({ \
136 static struct lock_class_key __key; \
137 ASSERT(type == MUTEX_DEFAULT); \
138 \
139 __mutex_init(MUTEX(mp), #mp, &__key); \
140 spl_mutex_clear_owner(mp); \
141 })
142
143 #undef mutex_destroy
144 #define mutex_destroy(mp) \
145 ({ \
146 VERIFY3P(mutex_owner(mp), ==, NULL); \
147 })
148
149 #define mutex_tryenter(mp) \
150 ({ \
151 int _rc_; \
152 \
153 if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \
154 spl_mutex_set_owner(mp); \
155 \
156 _rc_; \
157 })
158
159 /*
160 * Adaptive mutexs assume that the lock may be held by a task running
161 * on a different cpu. The expectation is that the task will drop the
162 * lock before leaving the head of the run queue. So the ideal thing
163 * to do is spin until we acquire the lock and avoid a context switch.
164 * However it is also possible the task holding the lock yields the
165 * processor with out dropping lock. In this case, we know it's going
166 * to be a while so we stop spinning and go to sleep waiting for the
167 * lock to be available. This should strike the optimum balance
168 * between spinning and sleeping waiting for a lock.
169 */
170 #define mutex_enter(mp) \
171 ({ \
172 kthread_t *_owner_; \
173 int _rc_, _count_; \
174 \
175 _rc_ = 0; \
176 _count_ = 0; \
177 _owner_ = mutex_owner(mp); \
178 ASSERT3P(_owner_, !=, current); \
179 \
180 while (_owner_ && task_curr(_owner_) && \
181 _count_ <= spl_mutex_spin_max()) { \
182 if ((_rc_ = mutex_trylock(MUTEX(mp)))) \
183 break; \
184 \
185 _count_++; \
186 } \
187 \
188 if (!_rc_) \
189 mutex_lock(MUTEX(mp)); \
190 \
191 spl_mutex_set_owner(mp); \
192 })
193
194 #define mutex_exit(mp) \
195 ({ \
196 spl_mutex_clear_owner(mp); \
197 mutex_unlock(MUTEX(mp)); \
198 })
199
200 #ifdef HAVE_GPL_ONLY_SYMBOLS
201 # define mutex_enter_nested(mp, sc) \
202 ({ \
203 mutex_lock_nested(MUTEX(mp), sc); \
204 spl_mutex_set_owner(mp); \
205 })
206 #else
207 # define mutex_enter_nested(mp, sc) \
208 ({ \
209 mutex_enter(mp); \
210 })
211 #endif
212
213 #endif /* HAVE_MUTEX_OWNER */
214
215 int spl_mutex_init(void);
216 void spl_mutex_fini(void);
217
218 #endif /* _SPL_MUTEX_H */