]>
Commit | Line | Data |
---|---|---|
70e083d2 TG |
1 | /* |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. | |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
6 | * UCRL-CODE-235197 | |
7 | * | |
8 | * This file is part of the SPL, Solaris Porting Layer. | |
9 | * For details, see <http://zfsonlinux.org/>. | |
10 | * | |
11 | * The SPL is free software; you can redistribute it and/or modify it | |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | * | |
16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. | |
23 | */ | |
24 | ||
25 | #ifndef _SPL_MUTEX_H | |
26 | #define _SPL_MUTEX_H | |
27 | ||
28 | #include <sys/types.h> | |
29 | #include <linux/mutex.h> | |
30 | #include <linux/compiler_compat.h> | |
86e3c28a | 31 | #include <linux/lockdep.h> |
70e083d2 TG |
32 | |
33 | typedef enum { | |
34 | MUTEX_DEFAULT = 0, | |
35 | MUTEX_SPIN = 1, | |
86e3c28a CIK |
36 | MUTEX_ADAPTIVE = 2, |
37 | MUTEX_NOLOCKDEP = 3 | |
70e083d2 TG |
38 | } kmutex_type_t; |
39 | ||
40 | typedef struct { | |
41 | struct mutex m_mutex; | |
42 | spinlock_t m_lock; /* used for serializing mutex_exit */ | |
70e083d2 | 43 | kthread_t *m_owner; |
86e3c28a CIK |
44 | #ifdef CONFIG_LOCKDEP |
45 | kmutex_type_t m_type; | |
46 | #endif /* CONFIG_LOCKDEP */ | |
70e083d2 TG |
47 | } kmutex_t; |
48 | ||
49 | #define MUTEX(mp) (&((mp)->m_mutex)) | |
50 | ||
51 | static inline void | |
52 | spl_mutex_set_owner(kmutex_t *mp) | |
53 | { | |
70e083d2 | 54 | mp->m_owner = current; |
70e083d2 TG |
55 | } |
56 | ||
57 | static inline void | |
58 | spl_mutex_clear_owner(kmutex_t *mp) | |
59 | { | |
70e083d2 | 60 | mp->m_owner = NULL; |
70e083d2 TG |
61 | } |
62 | ||
70e083d2 | 63 | #define mutex_owner(mp) (ACCESS_ONCE((mp)->m_owner)) |
70e083d2 TG |
64 | #define mutex_owned(mp) (mutex_owner(mp) == current) |
65 | #define MUTEX_HELD(mp) mutex_owned(mp) | |
66 | #define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp)) | |
67 | ||
86e3c28a CIK |
68 | #ifdef CONFIG_LOCKDEP |
69 | static inline void | |
70 | spl_mutex_set_type(kmutex_t *mp, kmutex_type_t type) | |
71 | { | |
72 | mp->m_type = type; | |
73 | } | |
74 | static inline void | |
75 | spl_mutex_lockdep_off_maybe(kmutex_t *mp) \ | |
76 | { \ | |
77 | if (mp && mp->m_type == MUTEX_NOLOCKDEP) \ | |
78 | lockdep_off(); \ | |
79 | } | |
80 | static inline void | |
81 | spl_mutex_lockdep_on_maybe(kmutex_t *mp) \ | |
82 | { \ | |
83 | if (mp && mp->m_type == MUTEX_NOLOCKDEP) \ | |
84 | lockdep_on(); \ | |
85 | } | |
86 | #else /* CONFIG_LOCKDEP */ | |
87 | #define spl_mutex_set_type(mp, type) | |
88 | #define spl_mutex_lockdep_off_maybe(mp) | |
89 | #define spl_mutex_lockdep_on_maybe(mp) | |
90 | #endif /* CONFIG_LOCKDEP */ | |
91 | ||
70e083d2 TG |
92 | /* |
93 | * The following functions must be a #define and not static inline. | |
94 | * This ensures that the native linux mutex functions (lock/unlock) | |
95 | * will be correctly located in the users code which is important | |
96 | * for the built in kernel lock analysis tools | |
97 | */ | |
98 | #undef mutex_init | |
99 | #define mutex_init(mp, name, type, ibc) \ | |
100 | { \ | |
101 | static struct lock_class_key __key; \ | |
86e3c28a | 102 | ASSERT(type == MUTEX_DEFAULT || type == MUTEX_NOLOCKDEP); \ |
70e083d2 TG |
103 | \ |
104 | __mutex_init(MUTEX(mp), (name) ? (#name) : (#mp), &__key); \ | |
105 | spin_lock_init(&(mp)->m_lock); \ | |
106 | spl_mutex_clear_owner(mp); \ | |
86e3c28a | 107 | spl_mutex_set_type(mp, type); \ |
70e083d2 TG |
108 | } |
109 | ||
110 | #undef mutex_destroy | |
111 | #define mutex_destroy(mp) \ | |
112 | { \ | |
113 | VERIFY3P(mutex_owner(mp), ==, NULL); \ | |
114 | } | |
115 | ||
116 | #define mutex_tryenter(mp) \ | |
117 | ({ \ | |
118 | int _rc_; \ | |
119 | \ | |
86e3c28a | 120 | spl_mutex_lockdep_off_maybe(mp); \ |
70e083d2 TG |
121 | if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \ |
122 | spl_mutex_set_owner(mp); \ | |
86e3c28a | 123 | spl_mutex_lockdep_on_maybe(mp); \ |
70e083d2 TG |
124 | \ |
125 | _rc_; \ | |
126 | }) | |
127 | ||
128 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
129 | #define mutex_enter_nested(mp, subclass) \ | |
130 | { \ | |
131 | ASSERT3P(mutex_owner(mp), !=, current); \ | |
86e3c28a | 132 | spl_mutex_lockdep_off_maybe(mp); \ |
70e083d2 | 133 | mutex_lock_nested(MUTEX(mp), (subclass)); \ |
86e3c28a | 134 | spl_mutex_lockdep_on_maybe(mp); \ |
70e083d2 TG |
135 | spl_mutex_set_owner(mp); \ |
136 | } | |
137 | #else /* CONFIG_DEBUG_LOCK_ALLOC */ | |
138 | #define mutex_enter_nested(mp, subclass) \ | |
139 | { \ | |
140 | ASSERT3P(mutex_owner(mp), !=, current); \ | |
86e3c28a | 141 | spl_mutex_lockdep_off_maybe(mp); \ |
70e083d2 | 142 | mutex_lock(MUTEX(mp)); \ |
86e3c28a | 143 | spl_mutex_lockdep_on_maybe(mp); \ |
70e083d2 TG |
144 | spl_mutex_set_owner(mp); \ |
145 | } | |
146 | #endif /* CONFIG_DEBUG_LOCK_ALLOC */ | |
147 | ||
148 | #define mutex_enter(mp) mutex_enter_nested((mp), 0) | |
149 | ||
150 | /* | |
151 | * The reason for the spinlock: | |
152 | * | |
153 | * The Linux mutex is designed with a fast-path/slow-path design such that it | |
154 | * does not guarantee serialization upon itself, allowing a race where latter | |
155 | * acquirers finish mutex_unlock before former ones. | |
156 | * | |
157 | * The race renders it unsafe to be used for serializing the freeing of an | |
158 | * object in which the mutex is embedded, where the latter acquirer could go | |
159 | * on to free the object while the former one is still doing mutex_unlock and | |
160 | * causing memory corruption. | |
161 | * | |
162 | * However, there are many places in ZFS where the mutex is used for | |
163 | * serializing object freeing, and the code is shared among other OSes without | |
164 | * this issue. Thus, we need the spinlock to force the serialization on | |
165 | * mutex_exit(). | |
166 | * | |
167 | * See http://lwn.net/Articles/575477/ for the information about the race. | |
168 | */ | |
169 | #define mutex_exit(mp) \ | |
170 | { \ | |
70e083d2 | 171 | spl_mutex_clear_owner(mp); \ |
86e3c28a CIK |
172 | spin_lock(&(mp)->m_lock); \ |
173 | spl_mutex_lockdep_off_maybe(mp); \ | |
70e083d2 | 174 | mutex_unlock(MUTEX(mp)); \ |
86e3c28a | 175 | spl_mutex_lockdep_on_maybe(mp); \ |
70e083d2 | 176 | spin_unlock(&(mp)->m_lock); \ |
86e3c28a | 177 | /* NOTE: do not dereference mp after this point */ \ |
70e083d2 TG |
178 | } |
179 | ||
180 | int spl_mutex_init(void); | |
181 | void spl_mutex_fini(void); | |
182 | ||
183 | #endif /* _SPL_MUTEX_H */ |