]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/mutex.h
Use current_kernel_time() in the time compatibility wrappers
[mirror_spl.git] / include / sys / mutex.h
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 \*****************************************************************************/
24
25 #ifndef _SPL_MUTEX_H
26 #define _SPL_MUTEX_H
27
28 #include <sys/types.h>
29 #include <linux/mutex.h>
30 #include <linux/compiler_compat.h>
31
32 typedef enum {
33 MUTEX_DEFAULT = 0,
34 MUTEX_SPIN = 1,
35 MUTEX_ADAPTIVE = 2
36 } kmutex_type_t;
37
38 #if defined(HAVE_MUTEX_OWNER) && defined(CONFIG_SMP) && \
39 !defined(CONFIG_DEBUG_MUTEXES)
40
41 /*
42 * We define a 1-field struct rather than a straight typedef to enforce type
43 * safety.
44 */
45 typedef struct {
46 struct mutex m;
47 spinlock_t m_lock; /* used for serializing mutex_exit */
48 } kmutex_t;
49
50 static inline kthread_t *
51 mutex_owner(kmutex_t *mp)
52 {
53 #if defined(HAVE_MUTEX_OWNER_TASK_STRUCT)
54 return ACCESS_ONCE(mp->m.owner);
55 #else
56 struct thread_info *owner = ACCESS_ONCE(mp->m.owner);
57 if (owner)
58 return owner->task;
59
60 return NULL;
61 #endif
62 }
63
64 #define mutex_owned(mp) (mutex_owner(mp) == current)
65 #define MUTEX_HELD(mp) mutex_owned(mp)
66 #define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp))
67 #undef mutex_init
68 #define mutex_init(mp, name, type, ibc) \
69 ({ \
70 static struct lock_class_key __key; \
71 ASSERT(type == MUTEX_DEFAULT); \
72 \
73 __mutex_init(&(mp)->m, #mp, &__key); \
74 spin_lock_init(&(mp)->m_lock); \
75 })
76
77 #undef mutex_destroy
78 #define mutex_destroy(mp) \
79 ({ \
80 VERIFY3P(mutex_owner(mp), ==, NULL); \
81 })
82
83 #define mutex_tryenter(mp) mutex_trylock(&(mp)->m)
84 #define mutex_enter(mp) \
85 ({ \
86 ASSERT3P(mutex_owner(mp), !=, current); \
87 mutex_lock(&(mp)->m); \
88 })
89 /*
90 * The reason for the spinlock:
91 *
92 * The Linux mutex is designed with a fast-path/slow-path design such that it
93 * does not guarantee serialization upon itself, allowing a race where latter
94 * acquirers finish mutex_unlock before former ones.
95 *
96 * The race renders it unsafe to be used for serializing the freeing of an
97 * object in which the mutex is embedded, where the latter acquirer could go
98 * on to free the object while the former one is still doing mutex_unlock and
99 * causing memory corruption.
100 *
101 * However, there are many places in ZFS where the mutex is used for
102 * serializing object freeing, and the code is shared among other OSes without
103 * this issue. Thus, we need the spinlock to force the serialization on
104 * mutex_exit().
105 *
106 * See http://lwn.net/Articles/575477/ for the information about the race.
107 */
108 #define mutex_exit(mp) \
109 ({ \
110 spin_lock(&(mp)->m_lock); \
111 mutex_unlock(&(mp)->m); \
112 spin_unlock(&(mp)->m_lock); \
113 })
114
115 #else /* HAVE_MUTEX_OWNER */
116
117 typedef struct {
118 struct mutex m_mutex;
119 spinlock_t m_lock;
120 kthread_t *m_owner;
121 } kmutex_t;
122
123 #define MUTEX(mp) (&((mp)->m_mutex))
124
125 static inline void
126 spl_mutex_set_owner(kmutex_t *mp)
127 {
128 mp->m_owner = current;
129 }
130
131 static inline void
132 spl_mutex_clear_owner(kmutex_t *mp)
133 {
134 mp->m_owner = NULL;
135 }
136
137 #define mutex_owner(mp) (ACCESS_ONCE((mp)->m_owner))
138 #define mutex_owned(mp) (mutex_owner(mp) == current)
139 #define MUTEX_HELD(mp) mutex_owned(mp)
140 #define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp))
141
142 /*
143 * The following functions must be a #define and not static inline.
144 * This ensures that the native linux mutex functions (lock/unlock)
145 * will be correctly located in the users code which is important
146 * for the built in kernel lock analysis tools
147 */
148 #undef mutex_init
149 #define mutex_init(mp, name, type, ibc) \
150 ({ \
151 static struct lock_class_key __key; \
152 ASSERT(type == MUTEX_DEFAULT); \
153 \
154 __mutex_init(MUTEX(mp), #mp, &__key); \
155 spin_lock_init(&(mp)->m_lock); \
156 spl_mutex_clear_owner(mp); \
157 })
158
159 #undef mutex_destroy
160 #define mutex_destroy(mp) \
161 ({ \
162 VERIFY3P(mutex_owner(mp), ==, NULL); \
163 })
164
165 #define mutex_tryenter(mp) \
166 ({ \
167 int _rc_; \
168 \
169 if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \
170 spl_mutex_set_owner(mp); \
171 \
172 _rc_; \
173 })
174
175 #define mutex_enter(mp) \
176 ({ \
177 ASSERT3P(mutex_owner(mp), !=, current); \
178 mutex_lock(MUTEX(mp)); \
179 spl_mutex_set_owner(mp); \
180 })
181
182 #define mutex_exit(mp) \
183 ({ \
184 spin_lock(&(mp)->m_lock); \
185 spl_mutex_clear_owner(mp); \
186 mutex_unlock(MUTEX(mp)); \
187 spin_unlock(&(mp)->m_lock); \
188 })
189
190 #endif /* HAVE_MUTEX_OWNER */
191
192 int spl_mutex_init(void);
193 void spl_mutex_fini(void);
194
195 #endif /* _SPL_MUTEX_H */