]>
git.proxmox.com Git - mirror_spl.git/blob - module/splat/splat-mutex.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Mutex Tests.
25 \*****************************************************************************/
27 #include <sys/mutex.h>
28 #include <sys/taskq.h>
29 #include "splat-internal.h"
31 #define SPLAT_MUTEX_NAME "mutex"
32 #define SPLAT_MUTEX_DESC "Kernel Mutex Tests"
34 #define SPLAT_MUTEX_TEST1_ID 0x0401
35 #define SPLAT_MUTEX_TEST1_NAME "tryenter"
36 #define SPLAT_MUTEX_TEST1_DESC "Validate mutex_tryenter() correctness"
38 #define SPLAT_MUTEX_TEST2_ID 0x0402
39 #define SPLAT_MUTEX_TEST2_NAME "race"
40 #define SPLAT_MUTEX_TEST2_DESC "Many threads entering/exiting the mutex"
42 #define SPLAT_MUTEX_TEST3_ID 0x0403
43 #define SPLAT_MUTEX_TEST3_NAME "owned"
44 #define SPLAT_MUTEX_TEST3_DESC "Validate mutex_owned() correctness"
46 #define SPLAT_MUTEX_TEST4_ID 0x0404
47 #define SPLAT_MUTEX_TEST4_NAME "owner"
48 #define SPLAT_MUTEX_TEST4_DESC "Validate mutex_owner() correctness"
50 #define SPLAT_MUTEX_TEST_MAGIC 0x115599DDUL
51 #define SPLAT_MUTEX_TEST_NAME "mutex_test"
52 #define SPLAT_MUTEX_TEST_TASKQ "mutex_taskq"
53 #define SPLAT_MUTEX_TEST_COUNT 128
55 typedef struct mutex_priv
{
56 unsigned long mp_magic
;
64 splat_mutex_test1_func(void *arg
)
66 mutex_priv_t
*mp
= (mutex_priv_t
*)arg
;
67 ASSERT(mp
->mp_magic
== SPLAT_MUTEX_TEST_MAGIC
);
69 if (mutex_tryenter(&mp
->mp_mtx
)) {
71 mutex_exit(&mp
->mp_mtx
);
78 splat_mutex_test1(struct file
*file
, void *arg
)
84 mp
= (mutex_priv_t
*)kmalloc(sizeof(*mp
), GFP_KERNEL
);
88 tq
= taskq_create(SPLAT_MUTEX_TEST_TASKQ
, 1, maxclsyspri
,
89 50, INT_MAX
, TASKQ_PREPOPULATE
);
95 mp
->mp_magic
= SPLAT_MUTEX_TEST_MAGIC
;
97 mutex_init(&mp
->mp_mtx
, SPLAT_MUTEX_TEST_NAME
, MUTEX_DEFAULT
, NULL
);
98 mutex_enter(&mp
->mp_mtx
);
101 * Schedule a task function which will try and acquire the mutex via
102 * mutex_tryenter() while it's held. This should fail and the task
103 * function will indicate this status in the passed private data.
106 id
= taskq_dispatch(tq
, splat_mutex_test1_func
, mp
, TQ_SLEEP
);
108 mutex_exit(&mp
->mp_mtx
);
109 splat_vprint(file
, SPLAT_MUTEX_TEST1_NAME
, "%s",
110 "taskq_dispatch() failed\n");
115 taskq_wait_id(tq
, id
);
116 mutex_exit(&mp
->mp_mtx
);
118 /* Task function successfully acquired mutex, very bad! */
119 if (mp
->mp_rc
!= -EBUSY
) {
120 splat_vprint(file
, SPLAT_MUTEX_TEST1_NAME
,
121 "mutex_trylock() incorrectly succeeded when "
122 "the mutex was held, %d/%d\n", id
, mp
->mp_rc
);
126 splat_vprint(file
, SPLAT_MUTEX_TEST1_NAME
, "%s",
127 "mutex_trylock() correctly failed when "
128 "the mutex was held\n");
132 * Schedule a task function which will try and acquire the mutex via
133 * mutex_tryenter() while it is not held. This should succeed and
134 * can be verified by checking the private data.
137 id
= taskq_dispatch(tq
, splat_mutex_test1_func
, mp
, TQ_SLEEP
);
139 splat_vprint(file
, SPLAT_MUTEX_TEST1_NAME
, "%s",
140 "taskq_dispatch() failed\n");
145 taskq_wait_id(tq
, id
);
147 /* Task function failed to acquire mutex, very bad! */
148 if (mp
->mp_rc
!= 0) {
149 splat_vprint(file
, SPLAT_MUTEX_TEST1_NAME
,
150 "mutex_trylock() incorrectly failed when "
151 "the mutex was not held, %d/%d\n", id
, mp
->mp_rc
);
154 splat_vprint(file
, SPLAT_MUTEX_TEST1_NAME
, "%s",
155 "mutex_trylock() correctly succeeded "
156 "when the mutex was not held\n");
160 mutex_destroy(&(mp
->mp_mtx
));
167 splat_mutex_test2_func(void *arg
)
169 mutex_priv_t
*mp
= (mutex_priv_t
*)arg
;
171 ASSERT(mp
->mp_magic
== SPLAT_MUTEX_TEST_MAGIC
);
173 /* Read the value before sleeping and write it after we wake up to
174 * maximize the chance of a race if mutexs are not working properly */
175 mutex_enter(&mp
->mp_mtx
);
177 set_current_state(TASK_INTERRUPTIBLE
);
178 schedule_timeout(HZ
/ 100); /* 1/100 of a second */
179 VERIFY(mp
->mp_rc
== rc
);
181 mutex_exit(&mp
->mp_mtx
);
185 splat_mutex_test2(struct file
*file
, void *arg
)
191 mp
= (mutex_priv_t
*)kmalloc(sizeof(*mp
), GFP_KERNEL
);
195 /* Create several threads allowing tasks to race with each other */
196 tq
= taskq_create(SPLAT_MUTEX_TEST_TASKQ
, num_online_cpus(),
197 maxclsyspri
, 50, INT_MAX
, TASKQ_PREPOPULATE
);
203 mp
->mp_magic
= SPLAT_MUTEX_TEST_MAGIC
;
205 mutex_init(&(mp
->mp_mtx
), SPLAT_MUTEX_TEST_NAME
, MUTEX_DEFAULT
, NULL
);
209 * Schedule N work items to the work queue each of which enters the
210 * mutex, sleeps briefly, then exits the mutex. On a multiprocessor
211 * box these work items will be handled by all available CPUs. The
212 * task function checks to ensure the tracked shared variable is
213 * always only incremented by one. Additionally, the mutex itself
214 * is instrumented such that if any two processors are in the
215 * critical region at the same time the system will panic. If the
216 * mutex is implemented right this will never happy, that's a pass.
218 for (i
= 0; i
< SPLAT_MUTEX_TEST_COUNT
; i
++) {
219 if (!taskq_dispatch(tq
, splat_mutex_test2_func
, mp
, TQ_SLEEP
)) {
220 splat_vprint(file
, SPLAT_MUTEX_TEST2_NAME
,
221 "Failed to queue task %d\n", i
);
228 if (mp
->mp_rc
== SPLAT_MUTEX_TEST_COUNT
) {
229 splat_vprint(file
, SPLAT_MUTEX_TEST2_NAME
, "%d racing threads "
230 "correctly entered/exited the mutex %d times\n",
231 num_online_cpus(), mp
->mp_rc
);
233 splat_vprint(file
, SPLAT_MUTEX_TEST2_NAME
, "%d racing threads "
234 "only processed %d/%d mutex work items\n",
235 num_online_cpus(),mp
->mp_rc
,SPLAT_MUTEX_TEST_COUNT
);
240 mutex_destroy(&(mp
->mp_mtx
));
247 splat_mutex_owned(void *priv
)
249 mutex_priv_t
*mp
= (mutex_priv_t
*)priv
;
251 ASSERT(mp
->mp_magic
== SPLAT_MUTEX_TEST_MAGIC
);
252 mp
->mp_rc
= mutex_owned(&mp
->mp_mtx
);
253 mp
->mp_rc2
= MUTEX_HELD(&mp
->mp_mtx
);
257 splat_mutex_test3(struct file
*file
, void *arg
)
263 mp
.mp_magic
= SPLAT_MUTEX_TEST_MAGIC
;
265 mutex_init(&mp
.mp_mtx
, SPLAT_MUTEX_TEST_NAME
, MUTEX_DEFAULT
, NULL
);
267 if ((tq
= taskq_create(SPLAT_MUTEX_TEST_NAME
, 1, maxclsyspri
,
268 50, INT_MAX
, TASKQ_PREPOPULATE
)) == NULL
) {
269 splat_vprint(file
, SPLAT_MUTEX_TEST3_NAME
, "Taskq '%s' "
270 "create failed\n", SPLAT_MUTEX_TEST3_NAME
);
274 mutex_enter(&mp
.mp_mtx
);
276 /* Mutex should be owned by current */
277 if (!mutex_owned(&mp
.mp_mtx
)) {
278 splat_vprint(file
, SPLAT_MUTEX_TEST3_NAME
, "Unowned mutex "
279 "should be owned by pid %d\n", current
->pid
);
284 if (taskq_dispatch(tq
, splat_mutex_owned
, &mp
, TQ_SLEEP
) == 0) {
285 splat_vprint(file
, SPLAT_MUTEX_TEST3_NAME
, "Failed to "
286 "dispatch function '%s' to taskq\n",
287 sym2str(splat_mutex_owned
));
293 /* Mutex should not be owned which checked from a different thread */
294 if (mp
.mp_rc
|| mp
.mp_rc2
) {
295 splat_vprint(file
, SPLAT_MUTEX_TEST3_NAME
, "Mutex owned by "
296 "pid %d not by taskq\n", current
->pid
);
301 mutex_exit(&mp
.mp_mtx
);
303 /* Mutex should not be owned by current */
304 if (mutex_owned(&mp
.mp_mtx
)) {
305 splat_vprint(file
, SPLAT_MUTEX_TEST3_NAME
, "Mutex owned by "
306 "pid %d it should be unowned\b", current
->pid
);
311 if (taskq_dispatch(tq
, splat_mutex_owned
, &mp
, TQ_SLEEP
) == 0) {
312 splat_vprint(file
, SPLAT_MUTEX_TEST3_NAME
, "Failed to "
313 "dispatch function '%s' to taskq\n",
314 sym2str(splat_mutex_owned
));
320 /* Mutex should be owned by no one */
321 if (mp
.mp_rc
|| mp
.mp_rc2
) {
322 splat_vprint(file
, SPLAT_MUTEX_TEST3_NAME
, "Mutex owned by "
323 "no one, %d/%d disagrees\n", mp
.mp_rc
, mp
.mp_rc2
);
328 splat_vprint(file
, SPLAT_MUTEX_TEST3_NAME
, "%s",
329 "Correct mutex_owned() behavior\n");
332 mutex_exit(&mp
.mp_mtx
);
334 mutex_destroy(&mp
.mp_mtx
);
341 splat_mutex_test4(struct file
*file
, void *arg
)
347 mutex_init(&mtx
, SPLAT_MUTEX_TEST_NAME
, MUTEX_DEFAULT
, NULL
);
350 * Verify mutex owner is cleared after being dropped. Depending
351 * on how you build your kernel this behavior changes, ensure the
352 * SPL mutex implementation is properly detecting this.
357 if (MUTEX_HELD(&mtx
)) {
358 splat_vprint(file
, SPLAT_MUTEX_TEST4_NAME
, "Mutex should "
359 "not be held, bit is by %p\n", mutex_owner(&mtx
));
366 /* Mutex should be owned by current */
367 owner
= mutex_owner(&mtx
);
368 if (current
!= owner
) {
369 splat_vprint(file
, SPLAT_MUTEX_TEST4_NAME
, "Mutex should "
370 "be owned by pid %d but is owned by pid %d\n",
371 current
->pid
, owner
? owner
->pid
: -1);
378 /* Mutex should not be owned by any task */
379 owner
= mutex_owner(&mtx
);
381 splat_vprint(file
, SPLAT_MUTEX_TEST4_NAME
, "Mutex should not "
382 "be owned but is owned by pid %d\n", owner
->pid
);
387 splat_vprint(file
, SPLAT_MUTEX_TEST3_NAME
, "%s",
388 "Correct mutex_owner() behavior\n");
396 splat_mutex_init(void)
398 splat_subsystem_t
*sub
;
400 sub
= kmalloc(sizeof(*sub
), GFP_KERNEL
);
404 memset(sub
, 0, sizeof(*sub
));
405 strncpy(sub
->desc
.name
, SPLAT_MUTEX_NAME
, SPLAT_NAME_SIZE
);
406 strncpy(sub
->desc
.desc
, SPLAT_MUTEX_DESC
, SPLAT_DESC_SIZE
);
407 INIT_LIST_HEAD(&sub
->subsystem_list
);
408 INIT_LIST_HEAD(&sub
->test_list
);
409 spin_lock_init(&sub
->test_lock
);
410 sub
->desc
.id
= SPLAT_SUBSYSTEM_MUTEX
;
412 SPLAT_TEST_INIT(sub
, SPLAT_MUTEX_TEST1_NAME
, SPLAT_MUTEX_TEST1_DESC
,
413 SPLAT_MUTEX_TEST1_ID
, splat_mutex_test1
);
414 SPLAT_TEST_INIT(sub
, SPLAT_MUTEX_TEST2_NAME
, SPLAT_MUTEX_TEST2_DESC
,
415 SPLAT_MUTEX_TEST2_ID
, splat_mutex_test2
);
416 SPLAT_TEST_INIT(sub
, SPLAT_MUTEX_TEST3_NAME
, SPLAT_MUTEX_TEST3_DESC
,
417 SPLAT_MUTEX_TEST3_ID
, splat_mutex_test3
);
418 SPLAT_TEST_INIT(sub
, SPLAT_MUTEX_TEST4_NAME
, SPLAT_MUTEX_TEST4_DESC
,
419 SPLAT_MUTEX_TEST4_ID
, splat_mutex_test4
);
425 splat_mutex_fini(splat_subsystem_t
*sub
)
428 SPLAT_TEST_FINI(sub
, SPLAT_MUTEX_TEST4_ID
);
429 SPLAT_TEST_FINI(sub
, SPLAT_MUTEX_TEST3_ID
);
430 SPLAT_TEST_FINI(sub
, SPLAT_MUTEX_TEST2_ID
);
431 SPLAT_TEST_FINI(sub
, SPLAT_MUTEX_TEST1_ID
);
437 splat_mutex_id(void) {
438 return SPLAT_SUBSYSTEM_MUTEX
;