]>
git.proxmox.com Git - mirror_spl.git/blob - include/sys/mutex.h
2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2009 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
30 #include <sys/types.h>
31 #include <linux/mutex.h>
39 #ifdef HAVE_MUTEX_OWNER
41 typedef struct mutex kmutex_t
;
43 static inline kthread_t
*
44 mutex_owner(kmutex_t
*mp
)
47 return (mp
->owner
)->task
;
51 #define mutex_owned(mp) (mutex_owner(mp) == current)
52 #define MUTEX_HELD(mp) mutex_owned(mp)
54 #define mutex_init(mp, name, type, ibc) \
56 static struct lock_class_key __key; \
57 ASSERT(type == MUTEX_DEFAULT); \
59 __mutex_init((mp), #mp, &__key); \
63 #define mutex_destroy(mp) \
65 VERIFY(!MUTEX_HELD(mp)); \
68 #define mutex_tryenter(mp) mutex_trylock(mp)
69 #define mutex_enter(mp) mutex_lock(mp)
70 #define mutex_exit(mp) mutex_unlock(mp)
73 #ifdef HAVE_GPL_ONLY_SYMBOLS
74 # define mutex_enter_nested(mp, sc) mutex_lock_nested(mp, sc)
76 # define mutex_enter_nested(mp, sc) mutex_enter(mp)
77 #endif /* HAVE_GPL_ONLY_SYMBOLS */
79 #else /* HAVE_MUTEX_OWNER */
87 extern int spl_mutex_spin_max(void);
88 #else /* HAVE_TASK_CURR */
89 # define task_curr(owner) 0
90 # define spl_mutex_spin_max() 0
91 #endif /* HAVE_TASK_CURR */
93 #define MUTEX(mp) ((struct mutex *)(mp))
95 static inline kthread_t
*
96 spl_mutex_get_owner(kmutex_t
*mp
)
102 spl_mutex_set_owner(kmutex_t
*mp
)
106 spin_lock_irqsave(&MUTEX(mp
)->wait_lock
, flags
);
107 mp
->m_owner
= current
;
108 spin_unlock_irqrestore(&MUTEX(mp
)->wait_lock
, flags
);
112 spl_mutex_clear_owner(kmutex_t
*mp
)
116 spin_lock_irqsave(&MUTEX(mp
)->wait_lock
, flags
);
118 spin_unlock_irqrestore(&MUTEX(mp
)->wait_lock
, flags
);
121 static inline kthread_t
*
122 mutex_owner(kmutex_t
*mp
)
127 spin_lock_irqsave(&MUTEX(mp
)->wait_lock
, flags
);
128 owner
= spl_mutex_get_owner(mp
);
129 spin_unlock_irqrestore(&MUTEX(mp
)->wait_lock
, flags
);
134 #define mutex_owned(mp) (mutex_owner(mp) == current)
135 #define MUTEX_HELD(mp) mutex_owned(mp)
138 * The following functions must be a #define and not static inline.
139 * This ensures that the native linux mutex functions (lock/unlock)
140 * will be correctly located in the users code which is important
141 * for the built in kernel lock analysis tools
144 #define mutex_init(mp, name, type, ibc) \
146 static struct lock_class_key __key; \
147 ASSERT(type == MUTEX_DEFAULT); \
149 __mutex_init(MUTEX(mp), #mp, &__key); \
150 spl_mutex_clear_owner(mp); \
154 #define mutex_destroy(mp) \
156 VERIFY(!MUTEX_HELD(mp)); \
159 #define mutex_tryenter(mp) \
163 if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \
164 spl_mutex_set_owner(mp); \
170 * Adaptive mutexs assume that the lock may be held by a task running
171 * on a different cpu. The expectation is that the task will drop the
172 * lock before leaving the head of the run queue. So the ideal thing
173 * to do is spin until we acquire the lock and avoid a context switch.
174 * However it is also possible the task holding the lock yields the
175 * processor with out dropping lock. In this case, we know it's going
176 * to be a while so we stop spinning and go to sleep waiting for the
177 * lock to be available. This should strike the optimum balance
178 * between spinning and sleeping waiting for a lock.
180 #define mutex_enter(mp) \
182 kthread_t *_owner_; \
187 _owner_ = mutex_owner(mp); \
189 while (_owner_ && task_curr(_owner_) && \
190 _count_ <= spl_mutex_spin_max()) { \
191 if ((_rc_ = mutex_trylock(MUTEX(mp)))) \
198 mutex_lock(MUTEX(mp)); \
200 spl_mutex_set_owner(mp); \
203 #define mutex_exit(mp) \
205 spl_mutex_clear_owner(mp); \
206 mutex_unlock(MUTEX(mp)); \
209 #ifdef HAVE_GPL_ONLY_SYMBOLS
210 # define mutex_enter_nested(mp, sc) \
212 mutex_lock_nested(MUTEX(mp, sc)); \
213 spl_mutex_set_owner(mp); \
216 # define mutex_enter_nested(mp, sc) \
222 #endif /* HAVE_MUTEX_OWNER */
224 int spl_mutex_init(void);
225 void spl_mutex_fini(void);
227 #endif /* _SPL_MUTEX_H */