]> git.proxmox.com Git - mirror_spl.git/blob - include/spl/sys/mutex.h
Prepare SPL repo to merge with ZFS repo
[mirror_spl.git] / include / spl / sys / mutex.h
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 */
24
25 #ifndef _SPL_MUTEX_H
26 #define _SPL_MUTEX_H
27
28 #include <sys/types.h>
29 #include <linux/mutex.h>
30 #include <linux/lockdep.h>
31
32 typedef enum {
33 MUTEX_DEFAULT = 0,
34 MUTEX_SPIN = 1,
35 MUTEX_ADAPTIVE = 2,
36 MUTEX_NOLOCKDEP = 3
37 } kmutex_type_t;
38
39 typedef struct {
40 struct mutex m_mutex;
41 spinlock_t m_lock; /* used for serializing mutex_exit */
42 kthread_t *m_owner;
43 #ifdef CONFIG_LOCKDEP
44 kmutex_type_t m_type;
45 #endif /* CONFIG_LOCKDEP */
46 } kmutex_t;
47
48 #define MUTEX(mp) (&((mp)->m_mutex))
49
50 static inline void
51 spl_mutex_set_owner(kmutex_t *mp)
52 {
53 mp->m_owner = current;
54 }
55
56 static inline void
57 spl_mutex_clear_owner(kmutex_t *mp)
58 {
59 mp->m_owner = NULL;
60 }
61
62 #define mutex_owner(mp) (ACCESS_ONCE((mp)->m_owner))
63 #define mutex_owned(mp) (mutex_owner(mp) == current)
64 #define MUTEX_HELD(mp) mutex_owned(mp)
65 #define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp))
66
67 #ifdef CONFIG_LOCKDEP
68 static inline void
69 spl_mutex_set_type(kmutex_t *mp, kmutex_type_t type)
70 {
71 mp->m_type = type;
72 }
73 static inline void
74 spl_mutex_lockdep_off_maybe(kmutex_t *mp) \
75 { \
76 if (mp && mp->m_type == MUTEX_NOLOCKDEP) \
77 lockdep_off(); \
78 }
79 static inline void
80 spl_mutex_lockdep_on_maybe(kmutex_t *mp) \
81 { \
82 if (mp && mp->m_type == MUTEX_NOLOCKDEP) \
83 lockdep_on(); \
84 }
85 #else /* CONFIG_LOCKDEP */
86 #define spl_mutex_set_type(mp, type)
87 #define spl_mutex_lockdep_off_maybe(mp)
88 #define spl_mutex_lockdep_on_maybe(mp)
89 #endif /* CONFIG_LOCKDEP */
90
91 /*
92 * The following functions must be a #define and not static inline.
93 * This ensures that the native linux mutex functions (lock/unlock)
94 * will be correctly located in the users code which is important
95 * for the built in kernel lock analysis tools
96 */
97 #undef mutex_init
98 #define mutex_init(mp, name, type, ibc) \
99 { \
100 static struct lock_class_key __key; \
101 ASSERT(type == MUTEX_DEFAULT || type == MUTEX_NOLOCKDEP); \
102 \
103 __mutex_init(MUTEX(mp), (name) ? (#name) : (#mp), &__key); \
104 spin_lock_init(&(mp)->m_lock); \
105 spl_mutex_clear_owner(mp); \
106 spl_mutex_set_type(mp, type); \
107 }
108
109 #undef mutex_destroy
110 #define mutex_destroy(mp) \
111 { \
112 VERIFY3P(mutex_owner(mp), ==, NULL); \
113 }
114
115 /* BEGIN CSTYLED */
116 #define mutex_tryenter(mp) \
117 ({ \
118 int _rc_; \
119 \
120 spl_mutex_lockdep_off_maybe(mp); \
121 if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \
122 spl_mutex_set_owner(mp); \
123 spl_mutex_lockdep_on_maybe(mp); \
124 \
125 _rc_; \
126 })
127 /* END CSTYLED */
128
129 #ifdef CONFIG_DEBUG_LOCK_ALLOC
130 #define mutex_enter_nested(mp, subclass) \
131 { \
132 ASSERT3P(mutex_owner(mp), !=, current); \
133 spl_mutex_lockdep_off_maybe(mp); \
134 mutex_lock_nested(MUTEX(mp), (subclass)); \
135 spl_mutex_lockdep_on_maybe(mp); \
136 spl_mutex_set_owner(mp); \
137 }
138 #else /* CONFIG_DEBUG_LOCK_ALLOC */
139 #define mutex_enter_nested(mp, subclass) \
140 { \
141 ASSERT3P(mutex_owner(mp), !=, current); \
142 spl_mutex_lockdep_off_maybe(mp); \
143 mutex_lock(MUTEX(mp)); \
144 spl_mutex_lockdep_on_maybe(mp); \
145 spl_mutex_set_owner(mp); \
146 }
147 #endif /* CONFIG_DEBUG_LOCK_ALLOC */
148
149 #define mutex_enter(mp) mutex_enter_nested((mp), 0)
150
151 /*
152 * The reason for the spinlock:
153 *
154 * The Linux mutex is designed with a fast-path/slow-path design such that it
155 * does not guarantee serialization upon itself, allowing a race where latter
156 * acquirers finish mutex_unlock before former ones.
157 *
158 * The race renders it unsafe to be used for serializing the freeing of an
159 * object in which the mutex is embedded, where the latter acquirer could go
160 * on to free the object while the former one is still doing mutex_unlock and
161 * causing memory corruption.
162 *
163 * However, there are many places in ZFS where the mutex is used for
164 * serializing object freeing, and the code is shared among other OSes without
165 * this issue. Thus, we need the spinlock to force the serialization on
166 * mutex_exit().
167 *
168 * See http://lwn.net/Articles/575477/ for the information about the race.
169 */
170 #define mutex_exit(mp) \
171 { \
172 spl_mutex_clear_owner(mp); \
173 spin_lock(&(mp)->m_lock); \
174 spl_mutex_lockdep_off_maybe(mp); \
175 mutex_unlock(MUTEX(mp)); \
176 spl_mutex_lockdep_on_maybe(mp); \
177 spin_unlock(&(mp)->m_lock); \
178 /* NOTE: do not dereference mp after this point */ \
179 }
180
181 int spl_mutex_init(void);
182 void spl_mutex_fini(void);
183
184 #endif /* _SPL_MUTEX_H */