]>
Commit | Line | Data |
---|---|---|
a900e28e | 1 | /* |
716154c5 BB |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
715f6251 | 6 | * UCRL-CODE-235197 |
7 | * | |
716154c5 | 8 | * This file is part of the SPL, Solaris Porting Layer. |
3d6af2dd | 9 | * For details, see <http://zfsonlinux.org/>. |
716154c5 BB |
10 | * |
11 | * The SPL is free software; you can redistribute it and/or modify it | |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
715f6251 | 15 | * |
716154c5 | 16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT |
715f6251 | 17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
716154c5 | 22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. |
a900e28e | 23 | */ |
715f6251 | 24 | |
09b414e8 | 25 | #ifndef _SPL_MUTEX_H |
a900e28e | 26 | #define _SPL_MUTEX_H |
f1ca4da6 | 27 | |
f4b37741 | 28 | #include <sys/types.h> |
4d54fdee | 29 | #include <linux/mutex.h> |
22cd0f19 | 30 | #include <linux/compiler_compat.h> |
4d54fdee BB |
31 | |
32 | typedef enum { | |
a900e28e BB |
33 | MUTEX_DEFAULT = 0, |
34 | MUTEX_SPIN = 1, | |
ae26dd00 | 35 | MUTEX_ADAPTIVE = 2 |
4d54fdee | 36 | } kmutex_type_t; |
f1b59d26 | 37 | |
f1ca4da6 | 38 | typedef struct { |
a900e28e BB |
39 | struct mutex m_mutex; |
40 | spinlock_t m_lock; /* used for serializing mutex_exit */ | |
41 | kthread_t *m_owner; | |
f1ca4da6 | 42 | } kmutex_t; |
43 | ||
a900e28e | 44 | #define MUTEX(mp) (&((mp)->m_mutex)) |
ae26dd00 TC |
45 | |
46 | static inline void | |
47 | spl_mutex_set_owner(kmutex_t *mp) | |
48 | { | |
49 | mp->m_owner = current; | |
50 | } | |
51 | ||
52 | static inline void | |
53 | spl_mutex_clear_owner(kmutex_t *mp) | |
54 | { | |
55 | mp->m_owner = NULL; | |
56 | } | |
57 | ||
a900e28e BB |
58 | #define mutex_owner(mp) (ACCESS_ONCE((mp)->m_owner)) |
59 | #define mutex_owned(mp) (mutex_owner(mp) == current) | |
60 | #define MUTEX_HELD(mp) mutex_owned(mp) | |
61 | #define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp)) | |
f1ca4da6 | 62 | |
4d54fdee BB |
63 | /* |
64 | * The following functions must be a #define and not static inline. | |
65 | * This ensures that the native linux mutex functions (lock/unlock) | |
66 | * will be correctly located in the users code which is important | |
67 | * for the built in kernel lock analysis tools | |
68 | */ | |
9ab1ac14 | 69 | #undef mutex_init |
a900e28e BB |
70 | #define mutex_init(mp, name, type, ibc) \ |
71 | { \ | |
72 | static struct lock_class_key __key; \ | |
ae26dd00 | 73 | ASSERT(type == MUTEX_DEFAULT); \ |
a900e28e | 74 | \ |
79a0056e | 75 | __mutex_init(MUTEX(mp), (name) ? (#name) : (#mp), &__key); \ |
a900e28e | 76 | spin_lock_init(&(mp)->m_lock); \ |
ae26dd00 | 77 | spl_mutex_clear_owner(mp); \ |
a900e28e | 78 | } |
4d54fdee | 79 | |
9ab1ac14 | 80 | #undef mutex_destroy |
a900e28e BB |
81 | #define mutex_destroy(mp) \ |
82 | { \ | |
83 | VERIFY3P(mutex_owner(mp), ==, NULL); \ | |
84 | } | |
d61e12af | 85 | |
a900e28e BB |
86 | #define mutex_tryenter(mp) \ |
87 | ({ \ | |
88 | int _rc_; \ | |
89 | \ | |
ae26dd00 TC |
90 | if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \ |
91 | spl_mutex_set_owner(mp); \ | |
a900e28e BB |
92 | \ |
93 | _rc_; \ | |
9ab1ac14 | 94 | }) |
f1ca4da6 | 95 | |
79a0056e TC |
96 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
97 | #define mutex_enter_nested(mp, subclass) \ | |
98 | { \ | |
99 | ASSERT3P(mutex_owner(mp), !=, current); \ | |
100 | mutex_lock_nested(MUTEX(mp), (subclass)); \ | |
ae26dd00 | 101 | spl_mutex_set_owner(mp); \ |
79a0056e TC |
102 | } |
103 | #else /* CONFIG_DEBUG_LOCK_ALLOC */ | |
104 | #define mutex_enter_nested(mp, subclass) \ | |
a900e28e BB |
105 | { \ |
106 | ASSERT3P(mutex_owner(mp), !=, current); \ | |
107 | mutex_lock(MUTEX(mp)); \ | |
ae26dd00 | 108 | spl_mutex_set_owner(mp); \ |
a900e28e | 109 | } |
79a0056e TC |
110 | #endif /* CONFIG_DEBUG_LOCK_ALLOC */ |
111 | ||
112 | #define mutex_enter(mp) mutex_enter_nested((mp), 0) | |
4d54fdee | 113 | |
5f920fbe BB |
114 | /* |
115 | * The reason for the spinlock: | |
116 | * | |
117 | * The Linux mutex is designed with a fast-path/slow-path design such that it | |
118 | * does not guarantee serialization upon itself, allowing a race where latter | |
119 | * acquirers finish mutex_unlock before former ones. | |
120 | * | |
121 | * The race renders it unsafe to be used for serializing the freeing of an | |
122 | * object in which the mutex is embedded, where the latter acquirer could go | |
123 | * on to free the object while the former one is still doing mutex_unlock and | |
124 | * causing memory corruption. | |
125 | * | |
126 | * However, there are many places in ZFS where the mutex is used for | |
127 | * serializing object freeing, and the code is shared among other OSes without | |
128 | * this issue. Thus, we need the spinlock to force the serialization on | |
129 | * mutex_exit(). | |
130 | * | |
131 | * See http://lwn.net/Articles/575477/ for the information about the race. | |
132 | */ | |
a900e28e BB |
133 | #define mutex_exit(mp) \ |
134 | { \ | |
135 | spin_lock(&(mp)->m_lock); \ | |
ae26dd00 | 136 | spl_mutex_clear_owner(mp); \ |
a900e28e BB |
137 | mutex_unlock(MUTEX(mp)); \ |
138 | spin_unlock(&(mp)->m_lock); \ | |
139 | } | |
4d54fdee | 140 | |
4d54fdee BB |
141 | int spl_mutex_init(void); |
142 | void spl_mutex_fini(void); | |
143 | ||
144 | #endif /* _SPL_MUTEX_H */ |