]> git.proxmox.com Git - mirror_spl.git/blame - module/splat/splat-mutex.c
Update rwlocks to track owner to ensure correct semantics
[mirror_spl.git] / module / splat / splat-mutex.c
CommitLineData
715f6251 1/*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
7c50328b 27#include "splat-internal.h"
f1ca4da6 28
7c50328b 29#define SPLAT_MUTEX_NAME "mutex"
30#define SPLAT_MUTEX_DESC "Kernel Mutex Tests"
f1ca4da6 31
7c50328b 32#define SPLAT_MUTEX_TEST1_ID 0x0401
33#define SPLAT_MUTEX_TEST1_NAME "tryenter"
34#define SPLAT_MUTEX_TEST1_DESC "Validate mutex_tryenter() correctness"
f1ca4da6 35
7c50328b 36#define SPLAT_MUTEX_TEST2_ID 0x0402
37#define SPLAT_MUTEX_TEST2_NAME "race"
38#define SPLAT_MUTEX_TEST2_DESC "Many threads entering/exiting the mutex"
f1ca4da6 39
7c50328b 40#define SPLAT_MUTEX_TEST3_ID 0x0403
41#define SPLAT_MUTEX_TEST3_NAME "owned"
42#define SPLAT_MUTEX_TEST3_DESC "Validate mutex_owned() correctness"
f1ca4da6 43
7c50328b 44#define SPLAT_MUTEX_TEST4_ID 0x0404
45#define SPLAT_MUTEX_TEST4_NAME "owner"
46#define SPLAT_MUTEX_TEST4_DESC "Validate mutex_owner() correctness"
f1ca4da6 47
7c50328b 48#define SPLAT_MUTEX_TEST_MAGIC 0x115599DDUL
49#define SPLAT_MUTEX_TEST_NAME "mutex_test"
5b5f5685 50#define SPLAT_MUTEX_TEST_TASKQ "mutex_taskq"
7c50328b 51#define SPLAT_MUTEX_TEST_COUNT 128
f1ca4da6 52
53typedef struct mutex_priv {
54 unsigned long mp_magic;
55 struct file *mp_file;
f1ca4da6 56 kmutex_t mp_mtx;
57 int mp_rc;
58} mutex_priv_t;
59
f1ca4da6 60static void
5b5f5685 61splat_mutex_test1_func(void *arg)
f1ca4da6 62{
5b5f5685 63 mutex_priv_t *mp = (mutex_priv_t *)arg;
7c50328b 64 ASSERT(mp->mp_magic == SPLAT_MUTEX_TEST_MAGIC);
f1ca4da6 65
5b5f5685
BB
66 if (mutex_tryenter(&mp->mp_mtx)) {
67 mp->mp_rc = 0;
68 mutex_exit(&mp->mp_mtx);
69 } else {
f1ca4da6 70 mp->mp_rc = -EBUSY;
5b5f5685 71 }
f1ca4da6 72}
73
74static int
7c50328b 75splat_mutex_test1(struct file *file, void *arg)
f1ca4da6 76{
f1ca4da6 77 mutex_priv_t *mp;
5b5f5685
BB
78 taskq_t *tq;
79 int id, rc = 0;
f1ca4da6 80
81 mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
82 if (mp == NULL)
83 return -ENOMEM;
84
5b5f5685
BB
85 tq = taskq_create(SPLAT_MUTEX_TEST_TASKQ, 1, maxclsyspri,
86 50, INT_MAX, TASKQ_PREPOPULATE);
87 if (tq == NULL) {
f1ca4da6 88 rc = -ENOMEM;
89 goto out2;
90 }
91
7c50328b 92 mp->mp_magic = SPLAT_MUTEX_TEST_MAGIC;
f1ca4da6 93 mp->mp_file = file;
5b5f5685
BB
94 mutex_init(&mp->mp_mtx, SPLAT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
95 mutex_enter(&mp->mp_mtx);
f1ca4da6 96
5b5f5685
BB
97 /*
98 * Schedule a task function which will try and acquire the mutex via
99 * mutex_tryenter() while it's held. This should fail and the task
100 * function will indicate this status in the passed private data.
101 */
102 mp->mp_rc = -EINVAL;
103 id = taskq_dispatch(tq, splat_mutex_test1_func, mp, TQ_SLEEP);
104 if (id == 0) {
105 mutex_exit(&mp->mp_mtx);
106 splat_vprint(file, SPLAT_MUTEX_TEST1_NAME, "%s",
107 "taskq_dispatch() failed\n");
f1ca4da6 108 rc = -EINVAL;
109 goto out;
110 }
111
5b5f5685
BB
112 taskq_wait_id(tq, id);
113 mutex_exit(&mp->mp_mtx);
f1ca4da6 114
5b5f5685 115 /* Task function successfully acquired mutex, very bad! */
f1ca4da6 116 if (mp->mp_rc != -EBUSY) {
5b5f5685
BB
117 splat_vprint(file, SPLAT_MUTEX_TEST1_NAME,
118 "mutex_trylock() incorrectly succeeded when "
119 "the mutex was held, %d/%d\n", id, mp->mp_rc);
f1ca4da6 120 rc = -EINVAL;
121 goto out;
5b5f5685
BB
122 } else {
123 splat_vprint(file, SPLAT_MUTEX_TEST1_NAME, "%s",
124 "mutex_trylock() correctly failed when "
125 "the mutex was held\n");
f1ca4da6 126 }
127
5b5f5685
BB
128 /*
129 * Schedule a task function which will try and acquire the mutex via
130 * mutex_tryenter() while it is not held. This should succeed and
131 * can be verified by checking the private data.
132 */
133 mp->mp_rc = -EINVAL;
134 id = taskq_dispatch(tq, splat_mutex_test1_func, mp, TQ_SLEEP);
135 if (id == 0) {
136 splat_vprint(file, SPLAT_MUTEX_TEST1_NAME, "%s",
137 "taskq_dispatch() failed\n");
f1ca4da6 138 rc = -EINVAL;
139 goto out;
140 }
141
5b5f5685 142 taskq_wait_id(tq, id);
f1ca4da6 143
5b5f5685 144 /* Task function failed to acquire mutex, very bad! */
f1ca4da6 145 if (mp->mp_rc != 0) {
5b5f5685
BB
146 splat_vprint(file, SPLAT_MUTEX_TEST1_NAME,
147 "mutex_trylock() incorrectly failed when "
148 "the mutex was not held, %d/%d\n", id, mp->mp_rc);
f1ca4da6 149 rc = -EINVAL;
5b5f5685
BB
150 } else {
151 splat_vprint(file, SPLAT_MUTEX_TEST1_NAME, "%s",
152 "mutex_trylock() correctly succeeded "
153 "when the mutex was not held\n");
f1ca4da6 154 }
f1ca4da6 155out:
5b5f5685 156 taskq_destroy(tq);
f1ca4da6 157 mutex_destroy(&(mp->mp_mtx));
f1ca4da6 158out2:
159 kfree(mp);
f1ca4da6 160 return rc;
161}
162
163static void
5b5f5685 164splat_mutex_test2_func(void *arg)
f1ca4da6 165{
5b5f5685 166 mutex_priv_t *mp = (mutex_priv_t *)arg;
f1ca4da6 167 int rc;
7c50328b 168 ASSERT(mp->mp_magic == SPLAT_MUTEX_TEST_MAGIC);
f1ca4da6 169
170 /* Read the value before sleeping and write it after we wake up to
171 * maximize the chance of a race if mutexs are not working properly */
172 mutex_enter(&mp->mp_mtx);
173 rc = mp->mp_rc;
174 set_current_state(TASK_INTERRUPTIBLE);
175 schedule_timeout(HZ / 100); /* 1/100 of a second */
5b5f5685 176 VERIFY(mp->mp_rc == rc);
f1ca4da6 177 mp->mp_rc = rc + 1;
178 mutex_exit(&mp->mp_mtx);
179}
180
181static int
7c50328b 182splat_mutex_test2(struct file *file, void *arg)
f1ca4da6 183{
f1ca4da6 184 mutex_priv_t *mp;
5b5f5685 185 taskq_t *tq;
31a033ec 186 int i, rc = 0;
f1ca4da6 187
188 mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
189 if (mp == NULL)
190 return -ENOMEM;
191
5b5f5685
BB
192 /* Create several threads allowing tasks to race with each other */
193 tq = taskq_create(SPLAT_MUTEX_TEST_TASKQ, num_online_cpus(),
194 maxclsyspri, 50, INT_MAX, TASKQ_PREPOPULATE);
195 if (tq == NULL) {
f1ca4da6 196 rc = -ENOMEM;
197 goto out;
198 }
199
7c50328b 200 mp->mp_magic = SPLAT_MUTEX_TEST_MAGIC;
f1ca4da6 201 mp->mp_file = file;
5b5f5685 202 mutex_init(&(mp->mp_mtx), SPLAT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
f1ca4da6 203 mp->mp_rc = 0;
204
5b5f5685
BB
205 /*
206 * Schedule N work items to the work queue each of which enters the
f1ca4da6 207 * mutex, sleeps briefly, then exits the mutex. On a multiprocessor
208 * box these work items will be handled by all available CPUs. The
5b5f5685
BB
209 * task function checks to ensure the tracked shared variable is
210 * always only incremented by one. Additionally, the mutex itself
211 * is instrumented such that if any two processors are in the
f1ca4da6 212 * critical region at the same time the system will panic. If the
5b5f5685
BB
213 * mutex is implemented right this will never happy, that's a pass.
214 */
7c50328b 215 for (i = 0; i < SPLAT_MUTEX_TEST_COUNT; i++) {
5b5f5685 216 if (!taskq_dispatch(tq, splat_mutex_test2_func, mp, TQ_SLEEP)) {
7c50328b 217 splat_vprint(file, SPLAT_MUTEX_TEST2_NAME,
5b5f5685 218 "Failed to queue task %d\n", i);
f1ca4da6 219 rc = -EINVAL;
220 }
221 }
222
5b5f5685 223 taskq_wait(tq);
f1ca4da6 224
7c50328b 225 if (mp->mp_rc == SPLAT_MUTEX_TEST_COUNT) {
226 splat_vprint(file, SPLAT_MUTEX_TEST2_NAME, "%d racing threads "
f1ca4da6 227 "correctly entered/exited the mutex %d times\n",
228 num_online_cpus(), mp->mp_rc);
229 } else {
7c50328b 230 splat_vprint(file, SPLAT_MUTEX_TEST2_NAME, "%d racing threads "
f1ca4da6 231 "only processed %d/%d mutex work items\n",
5b5f5685 232 num_online_cpus(),mp->mp_rc,SPLAT_MUTEX_TEST_COUNT);
f1ca4da6 233 rc = -EINVAL;
234 }
235
5b5f5685 236 taskq_destroy(tq);
f1ca4da6 237 mutex_destroy(&(mp->mp_mtx));
f1ca4da6 238out:
239 kfree(mp);
f1ca4da6 240 return rc;
241}
242
243static int
7c50328b 244splat_mutex_test3(struct file *file, void *arg)
f1ca4da6 245{
246 kmutex_t mtx;
247 int rc = 0;
248
7c50328b 249 mutex_init(&mtx, SPLAT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
f1ca4da6 250
251 mutex_enter(&mtx);
252
253 /* Mutex should be owned by current */
254 if (!mutex_owned(&mtx)) {
7c50328b 255 splat_vprint(file, SPLAT_MUTEX_TEST3_NAME, "Mutex should "
f1ca4da6 256 "be owned by pid %d but is owned by pid %d\n",
257 current->pid, mtx.km_owner ? mtx.km_owner->pid : -1);
258 rc = -EINVAL;
259 goto out;
260 }
261
262 mutex_exit(&mtx);
263
264 /* Mutex should not be owned by any task */
265 if (mutex_owned(&mtx)) {
7c50328b 266 splat_vprint(file, SPLAT_MUTEX_TEST3_NAME, "Mutex should "
f1ca4da6 267 "not be owned but is owned by pid %d\n",
268 mtx.km_owner ? mtx.km_owner->pid : -1);
269 rc = -EINVAL;
270 goto out;
271 }
272
7c50328b 273 splat_vprint(file, SPLAT_MUTEX_TEST3_NAME, "%s",
f1ca4da6 274 "Correct mutex_owned() behavior\n");
275out:
276 mutex_destroy(&mtx);
277
278 return rc;
279}
280
281static int
7c50328b 282splat_mutex_test4(struct file *file, void *arg)
f1ca4da6 283{
284 kmutex_t mtx;
285 kthread_t *owner;
286 int rc = 0;
287
7c50328b 288 mutex_init(&mtx, SPLAT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
f1ca4da6 289
290 mutex_enter(&mtx);
291
292 /* Mutex should be owned by current */
293 owner = mutex_owner(&mtx);
294 if (current != owner) {
7c50328b 295 splat_vprint(file, SPLAT_MUTEX_TEST3_NAME, "Mutex should "
f1ca4da6 296 "be owned by pid %d but is owned by pid %d\n",
297 current->pid, owner ? owner->pid : -1);
298 rc = -EINVAL;
299 goto out;
300 }
301
302 mutex_exit(&mtx);
303
304 /* Mutex should not be owned by any task */
305 owner = mutex_owner(&mtx);
306 if (owner) {
7c50328b 307 splat_vprint(file, SPLAT_MUTEX_TEST3_NAME, "Mutex should not "
f1ca4da6 308 "be owned but is owned by pid %d\n", owner->pid);
309 rc = -EINVAL;
310 goto out;
311 }
312
7c50328b 313 splat_vprint(file, SPLAT_MUTEX_TEST3_NAME, "%s",
f1ca4da6 314 "Correct mutex_owner() behavior\n");
315out:
316 mutex_destroy(&mtx);
317
318 return rc;
319}
320
7c50328b 321splat_subsystem_t *
322splat_mutex_init(void)
f1ca4da6 323{
7c50328b 324 splat_subsystem_t *sub;
f1ca4da6 325
326 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
327 if (sub == NULL)
328 return NULL;
329
330 memset(sub, 0, sizeof(*sub));
7c50328b 331 strncpy(sub->desc.name, SPLAT_MUTEX_NAME, SPLAT_NAME_SIZE);
332 strncpy(sub->desc.desc, SPLAT_MUTEX_DESC, SPLAT_DESC_SIZE);
f1ca4da6 333 INIT_LIST_HEAD(&sub->subsystem_list);
334 INIT_LIST_HEAD(&sub->test_list);
335 spin_lock_init(&sub->test_lock);
7c50328b 336 sub->desc.id = SPLAT_SUBSYSTEM_MUTEX;
f1ca4da6 337
7c50328b 338 SPLAT_TEST_INIT(sub, SPLAT_MUTEX_TEST1_NAME, SPLAT_MUTEX_TEST1_DESC,
339 SPLAT_MUTEX_TEST1_ID, splat_mutex_test1);
340 SPLAT_TEST_INIT(sub, SPLAT_MUTEX_TEST2_NAME, SPLAT_MUTEX_TEST2_DESC,
341 SPLAT_MUTEX_TEST2_ID, splat_mutex_test2);
342 SPLAT_TEST_INIT(sub, SPLAT_MUTEX_TEST3_NAME, SPLAT_MUTEX_TEST3_DESC,
343 SPLAT_MUTEX_TEST3_ID, splat_mutex_test3);
344 SPLAT_TEST_INIT(sub, SPLAT_MUTEX_TEST4_NAME, SPLAT_MUTEX_TEST4_DESC,
345 SPLAT_MUTEX_TEST4_ID, splat_mutex_test4);
f1ca4da6 346
347 return sub;
348}
349
350void
7c50328b 351splat_mutex_fini(splat_subsystem_t *sub)
f1ca4da6 352{
353 ASSERT(sub);
7c50328b 354 SPLAT_TEST_FINI(sub, SPLAT_MUTEX_TEST4_ID);
355 SPLAT_TEST_FINI(sub, SPLAT_MUTEX_TEST3_ID);
356 SPLAT_TEST_FINI(sub, SPLAT_MUTEX_TEST2_ID);
357 SPLAT_TEST_FINI(sub, SPLAT_MUTEX_TEST1_ID);
f1ca4da6 358
359 kfree(sub);
360}
361
362int
7c50328b 363splat_mutex_id(void) {
364 return SPLAT_SUBSYSTEM_MUTEX;
f1ca4da6 365}