]>
git.proxmox.com Git - mirror_zfs.git/blob - modules/splat/splat-mutex.c
3 #define KZT_SUBSYSTEM_MUTEX 0x0400
4 #define KZT_MUTEX_NAME "mutex"
5 #define KZT_MUTEX_DESC "Kernel Mutex Tests"
7 #define KZT_MUTEX_TEST1_ID 0x0401
8 #define KZT_MUTEX_TEST1_NAME "tryenter"
9 #define KZT_MUTEX_TEST1_DESC "Validate mutex_tryenter() correctness"
11 #define KZT_MUTEX_TEST2_ID 0x0402
12 #define KZT_MUTEX_TEST2_NAME "race"
13 #define KZT_MUTEX_TEST2_DESC "Many threads entering/exiting the mutex"
15 #define KZT_MUTEX_TEST3_ID 0x0403
16 #define KZT_MUTEX_TEST3_NAME "owned"
17 #define KZT_MUTEX_TEST3_DESC "Validate mutex_owned() correctness"
19 #define KZT_MUTEX_TEST4_ID 0x0404
20 #define KZT_MUTEX_TEST4_NAME "owner"
21 #define KZT_MUTEX_TEST4_DESC "Validate mutex_owner() correctness"
23 #define KZT_MUTEX_TEST_MAGIC 0x115599DDUL
24 #define KZT_MUTEX_TEST_NAME "mutex_test"
25 #define KZT_MUTEX_TEST_WORKQ "mutex_wq"
26 #define KZT_MUTEX_TEST_COUNT 128
28 typedef struct mutex_priv
{
29 unsigned long mp_magic
;
31 struct work_struct mp_work
[KZT_MUTEX_TEST_COUNT
];
38 kzt_mutex_test1_work(void *priv
)
40 mutex_priv_t
*mp
= (mutex_priv_t
*)priv
;
42 ASSERT(mp
->mp_magic
== KZT_MUTEX_TEST_MAGIC
);
45 if (!mutex_tryenter(&mp
->mp_mtx
))
50 kzt_mutex_test1(struct file
*file
, void *arg
)
52 struct workqueue_struct
*wq
;
53 struct work_struct work
;
57 mp
= (mutex_priv_t
*)kmalloc(sizeof(*mp
), GFP_KERNEL
);
61 wq
= create_singlethread_workqueue(KZT_MUTEX_TEST_WORKQ
);
67 mutex_init(&(mp
->mp_mtx
), KZT_MUTEX_TEST_NAME
, MUTEX_DEFAULT
, NULL
);
68 mutex_enter(&(mp
->mp_mtx
));
70 mp
->mp_magic
= KZT_MUTEX_TEST_MAGIC
;
72 INIT_WORK(&work
, kzt_mutex_test1_work
, mp
);
74 /* Schedule a work item which will try and aquire the mutex via
75 * mutex_tryenter() while its held. This should fail and the work
76 * item will indicte this status in the passed private data. */
77 if (!queue_work(wq
, &work
)) {
78 mutex_exit(&(mp
->mp_mtx
));
84 mutex_exit(&(mp
->mp_mtx
));
86 /* Work item successfully aquired mutex, very bad! */
87 if (mp
->mp_rc
!= -EBUSY
) {
92 kzt_vprint(file
, KZT_MUTEX_TEST1_NAME
, "%s",
93 "mutex_trylock() correctly failed when mutex held\n");
95 /* Schedule a work item which will try and aquire the mutex via
96 * mutex_tryenter() while it is not held. This should work and
97 * the item will indicte this status in the passed private data. */
98 if (!queue_work(wq
, &work
)) {
105 /* Work item failed to aquire mutex, very bad! */
106 if (mp
->mp_rc
!= 0) {
111 kzt_vprint(file
, KZT_MUTEX_TEST1_NAME
, "%s",
112 "mutex_trylock() correctly succeeded when mutex unheld\n");
114 mutex_destroy(&(mp
->mp_mtx
));
115 destroy_workqueue(wq
);
123 kzt_mutex_test2_work(void *priv
)
125 mutex_priv_t
*mp
= (mutex_priv_t
*)priv
;
128 ASSERT(mp
->mp_magic
== KZT_MUTEX_TEST_MAGIC
);
130 /* Read the value before sleeping and write it after we wake up to
131 * maximize the chance of a race if mutexs are not working properly */
132 mutex_enter(&mp
->mp_mtx
);
134 set_current_state(TASK_INTERRUPTIBLE
);
135 schedule_timeout(HZ
/ 100); /* 1/100 of a second */
137 mutex_exit(&mp
->mp_mtx
);
141 kzt_mutex_test2(struct file
*file
, void *arg
)
143 struct workqueue_struct
*wq
;
147 mp
= (mutex_priv_t
*)kmalloc(sizeof(*mp
), GFP_KERNEL
);
151 /* Create a thread per CPU items on queue will race */
152 wq
= create_workqueue(KZT_MUTEX_TEST_WORKQ
);
158 mutex_init(&(mp
->mp_mtx
), KZT_MUTEX_TEST_NAME
, MUTEX_DEFAULT
, NULL
);
160 mp
->mp_magic
= KZT_MUTEX_TEST_MAGIC
;
164 /* Schedule N work items to the work queue each of which enters the
165 * mutex, sleeps briefly, then exits the mutex. On a multiprocessor
166 * box these work items will be handled by all available CPUs. The
167 * mutex is instrumented such that if any two processors are in the
168 * critical region at the same time the system will panic. If the
169 * mutex is implemented right this will never happy, that's a pass. */
170 for (i
= 0; i
< KZT_MUTEX_TEST_COUNT
; i
++) {
171 INIT_WORK(&(mp
->mp_work
[i
]), kzt_mutex_test2_work
, mp
);
173 if (!queue_work(wq
, &(mp
->mp_work
[i
]))) {
174 kzt_vprint(file
, KZT_MUTEX_TEST2_NAME
,
175 "Failed to queue work id %d\n", i
);
182 if (mp
->mp_rc
== KZT_MUTEX_TEST_COUNT
) {
183 kzt_vprint(file
, KZT_MUTEX_TEST2_NAME
, "%d racing threads "
184 "correctly entered/exited the mutex %d times\n",
185 num_online_cpus(), mp
->mp_rc
);
187 kzt_vprint(file
, KZT_MUTEX_TEST2_NAME
, "%d racing threads "
188 "only processed %d/%d mutex work items\n",
189 num_online_cpus(), mp
->mp_rc
, KZT_MUTEX_TEST_COUNT
);
193 mutex_destroy(&(mp
->mp_mtx
));
194 destroy_workqueue(wq
);
202 kzt_mutex_test3(struct file
*file
, void *arg
)
207 mutex_init(&mtx
, KZT_MUTEX_TEST_NAME
, MUTEX_DEFAULT
, NULL
);
211 /* Mutex should be owned by current */
212 if (!mutex_owned(&mtx
)) {
213 kzt_vprint(file
, KZT_MUTEX_TEST3_NAME
, "Mutex should "
214 "be owned by pid %d but is owned by pid %d\n",
215 current
->pid
, mtx
.km_owner
? mtx
.km_owner
->pid
: -1);
222 /* Mutex should not be owned by any task */
223 if (mutex_owned(&mtx
)) {
224 kzt_vprint(file
, KZT_MUTEX_TEST3_NAME
, "Mutex should "
225 "not be owned but is owned by pid %d\n",
226 mtx
.km_owner
? mtx
.km_owner
->pid
: -1);
231 kzt_vprint(file
, KZT_MUTEX_TEST3_NAME
, "%s",
232 "Correct mutex_owned() behavior\n");
240 kzt_mutex_test4(struct file
*file
, void *arg
)
246 mutex_init(&mtx
, KZT_MUTEX_TEST_NAME
, MUTEX_DEFAULT
, NULL
);
250 /* Mutex should be owned by current */
251 owner
= mutex_owner(&mtx
);
252 if (current
!= owner
) {
253 kzt_vprint(file
, KZT_MUTEX_TEST3_NAME
, "Mutex should "
254 "be owned by pid %d but is owned by pid %d\n",
255 current
->pid
, owner
? owner
->pid
: -1);
262 /* Mutex should not be owned by any task */
263 owner
= mutex_owner(&mtx
);
265 kzt_vprint(file
, KZT_MUTEX_TEST3_NAME
, "Mutex should not "
266 "be owned but is owned by pid %d\n", owner
->pid
);
271 kzt_vprint(file
, KZT_MUTEX_TEST3_NAME
, "%s",
272 "Correct mutex_owner() behavior\n");
282 kzt_subsystem_t
*sub
;
284 sub
= kmalloc(sizeof(*sub
), GFP_KERNEL
);
288 memset(sub
, 0, sizeof(*sub
));
289 strncpy(sub
->desc
.name
, KZT_MUTEX_NAME
, KZT_NAME_SIZE
);
290 strncpy(sub
->desc
.desc
, KZT_MUTEX_DESC
, KZT_DESC_SIZE
);
291 INIT_LIST_HEAD(&sub
->subsystem_list
);
292 INIT_LIST_HEAD(&sub
->test_list
);
293 spin_lock_init(&sub
->test_lock
);
294 sub
->desc
.id
= KZT_SUBSYSTEM_MUTEX
;
296 KZT_TEST_INIT(sub
, KZT_MUTEX_TEST1_NAME
, KZT_MUTEX_TEST1_DESC
,
297 KZT_MUTEX_TEST1_ID
, kzt_mutex_test1
);
298 KZT_TEST_INIT(sub
, KZT_MUTEX_TEST2_NAME
, KZT_MUTEX_TEST2_DESC
,
299 KZT_MUTEX_TEST2_ID
, kzt_mutex_test2
);
300 KZT_TEST_INIT(sub
, KZT_MUTEX_TEST3_NAME
, KZT_MUTEX_TEST3_DESC
,
301 KZT_MUTEX_TEST3_ID
, kzt_mutex_test3
);
302 KZT_TEST_INIT(sub
, KZT_MUTEX_TEST4_NAME
, KZT_MUTEX_TEST4_DESC
,
303 KZT_MUTEX_TEST4_ID
, kzt_mutex_test4
);
309 kzt_mutex_fini(kzt_subsystem_t
*sub
)
312 KZT_TEST_FINI(sub
, KZT_MUTEX_TEST4_ID
);
313 KZT_TEST_FINI(sub
, KZT_MUTEX_TEST3_ID
);
314 KZT_TEST_FINI(sub
, KZT_MUTEX_TEST2_ID
);
315 KZT_TEST_FINI(sub
, KZT_MUTEX_TEST1_ID
);
322 return KZT_SUBSYSTEM_MUTEX
;