]>
Commit | Line | Data |
---|---|---|
715f6251 | 1 | /* |
2 | * This file is part of the SPL: Solaris Porting Layer. | |
3 | * | |
4 | * Copyright (c) 2008 Lawrence Livermore National Security, LLC. | |
5 | * Produced at Lawrence Livermore National Laboratory | |
6 | * Written by: | |
7 | * Brian Behlendorf <behlendorf1@llnl.gov>, | |
8 | * Herb Wartens <wartens2@llnl.gov>, | |
9 | * Jim Garlick <garlick@llnl.gov> | |
10 | * UCRL-CODE-235197 | |
11 | * | |
12 | * This is free software; you can redistribute it and/or modify it | |
13 | * under the terms of the GNU General Public License as published by | |
14 | * the Free Software Foundation; either version 2 of the License, or | |
15 | * (at your option) any later version. | |
16 | * | |
17 | * This is distributed in the hope that it will be useful, but WITHOUT | |
18 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
20 | * for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License along | |
23 | * with this program; if not, write to the Free Software Foundation, Inc., | |
24 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
25 | */ | |
26 | ||
9ab1ac14 | 27 | #include <sys/mutex.h> |
28 | ||
29 | #ifdef DEBUG_SUBSYSTEM | |
30 | #undef DEBUG_SUBSYSTEM | |
31 | #endif | |
32 | ||
33 | #define DEBUG_SUBSYSTEM S_MUTEX | |
34 | ||
35 | /* Mutex implementation based on those found in Solaris. This means | |
36 | * they the MUTEX_DEFAULT type is an adaptive mutex. When calling | |
37 | * mutex_enter() your process will spin waiting for the lock if it's | |
38 | * likely the lock will be free'd shortly. If it looks like the | |
39 | * lock will be held for a longer time we schedule and sleep waiting | |
40 | * for it. This determination is made by checking if the holder of | |
41 | * the lock is currently running on cpu or sleeping waiting to be | |
42 | * scheduled. If the holder is currently running it's likely the | |
43 | * lock will be shortly dropped. | |
44 | * | |
45 | * XXX: This is basically a rough implementation to see if this | |
46 | * helps our performance. If it does a more careful implementation | |
47 | * should be done, perhaps in assembly. | |
48 | */ | |
49 | ||
50 | /* 0: Never spin when trying to aquire lock | |
51 | * -1: Spin until aquired or holder yeilds without dropping lock | |
52 | * 1-MAX_INT: Spin for N attempts before sleeping for lock | |
53 | */ | |
56f92453 | 54 | int mutex_spin_max = 0; |
9ab1ac14 | 55 | |
56 | #ifdef DEBUG_MUTEX | |
57 | int mutex_stats[MUTEX_STATS_SIZE] = { 0 }; | |
404992e3 | 58 | spinlock_t mutex_stats_lock; |
4f86a887 | 59 | struct list_head mutex_stats_list; |
9ab1ac14 | 60 | #endif |
61 | ||
c30df9c8 | 62 | int |
9ab1ac14 | 63 | __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc) |
64 | { | |
d6a26c6a | 65 | int flags = KM_SLEEP; |
66 | ||
9ab1ac14 | 67 | ASSERT(mp); |
68 | ASSERT(name); | |
69 | ASSERT(ibc == NULL); | |
9ab1ac14 | 70 | |
9ab1ac14 | 71 | mp->km_name = NULL; |
72 | mp->km_name_size = strlen(name) + 1; | |
73 | ||
74 | switch (type) { | |
75 | case MUTEX_DEFAULT: | |
76 | mp->km_type = MUTEX_ADAPTIVE; | |
77 | break; | |
78 | case MUTEX_SPIN: | |
79 | case MUTEX_ADAPTIVE: | |
80 | mp->km_type = type; | |
81 | break; | |
82 | default: | |
83 | SBUG(); | |
84 | } | |
85 | ||
d6a26c6a | 86 | /* We may be called when there is a non-zero preempt_count or |
87 | * interrupts are disabled is which case we must not sleep. | |
88 | */ | |
89 | if (current_thread_info()->preempt_count || irqs_disabled()) | |
90 | flags = KM_NOSLEEP; | |
91 | ||
9ab1ac14 | 92 | /* Semaphore kmem_alloc'ed to keep struct size down (<64b) */ |
d6a26c6a | 93 | mp->km_sem = kmem_alloc(sizeof(struct semaphore), flags); |
9ab1ac14 | 94 | if (mp->km_sem == NULL) |
c30df9c8 | 95 | return -ENOMEM; |
9ab1ac14 | 96 | |
d6a26c6a | 97 | mp->km_name = kmem_alloc(mp->km_name_size, flags); |
9ab1ac14 | 98 | if (mp->km_name == NULL) { |
99 | kmem_free(mp->km_sem, sizeof(struct semaphore)); | |
c30df9c8 | 100 | return -ENOMEM; |
9ab1ac14 | 101 | } |
102 | ||
103 | sema_init(mp->km_sem, 1); | |
a97df54e | 104 | strncpy(mp->km_name, name, mp->km_name_size); |
9ab1ac14 | 105 | |
106 | #ifdef DEBUG_MUTEX | |
d6a26c6a | 107 | mp->km_stats = kmem_zalloc(sizeof(int) * MUTEX_STATS_SIZE, flags); |
9ab1ac14 | 108 | if (mp->km_stats == NULL) { |
109 | kmem_free(mp->km_name, mp->km_name_size); | |
110 | kmem_free(mp->km_sem, sizeof(struct semaphore)); | |
c30df9c8 | 111 | return -ENOMEM; |
9ab1ac14 | 112 | } |
113 | ||
c6dc93d6 | 114 | /* XXX - This appears to be a much more contended lock than I |
115 | * would have expected. To run with this debugging enabled and | |
116 | * get reasonable performance we may need to be more clever and | |
117 | * do something like hash the mutex ptr on to one of several | |
118 | * lists to ease this single point of contention. | |
119 | */ | |
404992e3 | 120 | spin_lock(&mutex_stats_lock); |
9ab1ac14 | 121 | list_add_tail(&mp->km_list, &mutex_stats_list); |
404992e3 | 122 | spin_unlock(&mutex_stats_lock); |
9ab1ac14 | 123 | #endif |
c30df9c8 | 124 | mp->km_magic = KM_MAGIC; |
125 | mp->km_owner = NULL; | |
126 | ||
127 | return 0; | |
9ab1ac14 | 128 | } |
129 | EXPORT_SYMBOL(__spl_mutex_init); | |
130 | ||
131 | void | |
132 | __spl_mutex_destroy(kmutex_t *mp) | |
133 | { | |
134 | ASSERT(mp); | |
135 | ASSERT(mp->km_magic == KM_MAGIC); | |
136 | ||
137 | #ifdef DEBUG_MUTEX | |
404992e3 | 138 | spin_lock(&mutex_stats_lock); |
9ab1ac14 | 139 | list_del_init(&mp->km_list); |
404992e3 | 140 | spin_unlock(&mutex_stats_lock); |
9ab1ac14 | 141 | |
142 | kmem_free(mp->km_stats, sizeof(int) * MUTEX_STATS_SIZE); | |
143 | #endif | |
144 | kmem_free(mp->km_name, mp->km_name_size); | |
145 | kmem_free(mp->km_sem, sizeof(struct semaphore)); | |
146 | ||
147 | memset(mp, KM_POISON, sizeof(*mp)); | |
148 | } | |
149 | EXPORT_SYMBOL(__spl_mutex_destroy); | |
150 | ||
151 | /* Return 1 if we acquired the mutex, else zero. */ | |
152 | int | |
153 | __mutex_tryenter(kmutex_t *mp) | |
154 | { | |
155 | int rc; | |
156 | ENTRY; | |
157 | ||
158 | ASSERT(mp); | |
159 | ASSERT(mp->km_magic == KM_MAGIC); | |
160 | MUTEX_STAT_INC(mutex_stats, MUTEX_TRYENTER_TOTAL); | |
161 | MUTEX_STAT_INC(mp->km_stats, MUTEX_TRYENTER_TOTAL); | |
162 | ||
163 | rc = down_trylock(mp->km_sem); | |
164 | if (rc == 0) { | |
165 | ASSERT(mp->km_owner == NULL); | |
166 | mp->km_owner = current; | |
167 | MUTEX_STAT_INC(mutex_stats, MUTEX_TRYENTER_NOT_HELD); | |
168 | MUTEX_STAT_INC(mp->km_stats, MUTEX_TRYENTER_NOT_HELD); | |
169 | } | |
170 | ||
171 | RETURN(!rc); | |
172 | } | |
173 | EXPORT_SYMBOL(__mutex_tryenter); | |
174 | ||
57d86234 | 175 | #ifndef HAVE_TASK_CURR |
176 | #define task_curr(owner) 0 | |
177 | #endif | |
178 | ||
179 | ||
9ab1ac14 | 180 | static void |
181 | mutex_enter_adaptive(kmutex_t *mp) | |
182 | { | |
183 | struct task_struct *owner; | |
184 | int count = 0; | |
185 | ||
186 | /* Lock is not held so we expect to aquire the lock */ | |
187 | if ((owner = mp->km_owner) == NULL) { | |
188 | down(mp->km_sem); | |
189 | MUTEX_STAT_INC(mutex_stats, MUTEX_ENTER_NOT_HELD); | |
190 | MUTEX_STAT_INC(mp->km_stats, MUTEX_ENTER_NOT_HELD); | |
191 | } else { | |
192 | /* The lock is held by a currently running task which | |
193 | * we expect will drop the lock before leaving the | |
194 | * head of the runqueue. So the ideal thing to do | |
195 | * is spin until we aquire the lock and avoid a | |
196 | * context switch. However it is also possible the | |
197 | * task holding the lock yields the processor with | |
198 | * out dropping lock. In which case, we know it's | |
199 | * going to be a while so we stop spinning and go | |
200 | * to sleep waiting for the lock to be available. | |
201 | * This should strike the optimum balance between | |
202 | * spinning and sleeping waiting for a lock. | |
203 | */ | |
204 | while (task_curr(owner) && (count <= mutex_spin_max)) { | |
205 | if (down_trylock(mp->km_sem) == 0) { | |
206 | MUTEX_STAT_INC(mutex_stats, MUTEX_ENTER_SPIN); | |
207 | MUTEX_STAT_INC(mp->km_stats, MUTEX_ENTER_SPIN); | |
208 | GOTO(out, count); | |
209 | } | |
210 | count++; | |
211 | } | |
212 | ||
213 | /* The lock is held by a sleeping task so it's going to | |
214 | * cost us minimally one context switch. We might as | |
215 | * well sleep and yield the processor to other tasks. | |
216 | */ | |
217 | down(mp->km_sem); | |
218 | MUTEX_STAT_INC(mutex_stats, MUTEX_ENTER_SLEEP); | |
219 | MUTEX_STAT_INC(mp->km_stats, MUTEX_ENTER_SLEEP); | |
220 | } | |
221 | out: | |
222 | MUTEX_STAT_INC(mutex_stats, MUTEX_ENTER_TOTAL); | |
223 | MUTEX_STAT_INC(mp->km_stats, MUTEX_ENTER_TOTAL); | |
224 | } | |
225 | ||
226 | void | |
227 | __mutex_enter(kmutex_t *mp) | |
228 | { | |
229 | ENTRY; | |
230 | ASSERT(mp); | |
231 | ASSERT(mp->km_magic == KM_MAGIC); | |
232 | ||
233 | switch (mp->km_type) { | |
234 | case MUTEX_SPIN: | |
235 | while (down_trylock(mp->km_sem)); | |
236 | MUTEX_STAT_INC(mutex_stats, MUTEX_ENTER_SPIN); | |
237 | MUTEX_STAT_INC(mp->km_stats, MUTEX_ENTER_SPIN); | |
238 | break; | |
239 | case MUTEX_ADAPTIVE: | |
240 | mutex_enter_adaptive(mp); | |
241 | break; | |
242 | } | |
243 | ||
244 | ASSERT(mp->km_owner == NULL); | |
245 | mp->km_owner = current; | |
246 | ||
247 | EXIT; | |
248 | } | |
249 | EXPORT_SYMBOL(__mutex_enter); | |
250 | ||
251 | void | |
252 | __mutex_exit(kmutex_t *mp) | |
253 | { | |
254 | ENTRY; | |
255 | ASSERT(mp); | |
256 | ASSERT(mp->km_magic == KM_MAGIC); | |
257 | ASSERT(mp->km_owner == current); | |
258 | mp->km_owner = NULL; | |
259 | up(mp->km_sem); | |
260 | EXIT; | |
261 | } | |
262 | EXPORT_SYMBOL(__mutex_exit); | |
263 | ||
264 | /* Return 1 if mutex is held by current process, else zero. */ | |
265 | int | |
266 | __mutex_owned(kmutex_t *mp) | |
267 | { | |
268 | ENTRY; | |
269 | ASSERT(mp); | |
270 | ASSERT(mp->km_magic == KM_MAGIC); | |
271 | RETURN(mp->km_owner == current); | |
272 | } | |
273 | EXPORT_SYMBOL(__mutex_owned); | |
274 | ||
275 | /* Return owner if mutex is owned, else NULL. */ | |
276 | kthread_t * | |
277 | __spl_mutex_owner(kmutex_t *mp) | |
278 | { | |
279 | ENTRY; | |
280 | ASSERT(mp); | |
281 | ASSERT(mp->km_magic == KM_MAGIC); | |
282 | RETURN(mp->km_owner); | |
283 | } | |
284 | EXPORT_SYMBOL(__spl_mutex_owner); | |
285 | ||
286 | int | |
287 | spl_mutex_init(void) | |
288 | { | |
289 | ENTRY; | |
4f86a887 | 290 | #ifdef DEBUG_MUTEX |
404992e3 | 291 | spin_lock_init(&mutex_stats_lock); |
4f86a887 | 292 | INIT_LIST_HEAD(&mutex_stats_list); |
293 | #endif | |
9ab1ac14 | 294 | RETURN(0); |
295 | } | |
296 | ||
297 | void | |
298 | spl_mutex_fini(void) | |
299 | { | |
300 | ENTRY; | |
301 | #ifdef DEBUG_MUTEX | |
302 | ASSERT(list_empty(&mutex_stats_list)); | |
303 | #endif | |
304 | EXIT; | |
305 | } | |
306 | ||
56f92453 | 307 | module_param(mutex_spin_max, int, 0644); |
308 | MODULE_PARM_DESC(mutex_spin_max, "Spin a maximum of N times to aquire lock"); |