]> git.proxmox.com Git - mirror_zfs.git/blob - modules/splat/splat-mutex.c
OK, everything builds now. My initial intent was to place all of
[mirror_zfs.git] / modules / splat / splat-mutex.c
1 #include <splat-ctl.h>
2
3 #define KZT_SUBSYSTEM_MUTEX 0x0400
4 #define KZT_MUTEX_NAME "mutex"
5 #define KZT_MUTEX_DESC "Kernel Mutex Tests"
6
7 #define KZT_MUTEX_TEST1_ID 0x0401
8 #define KZT_MUTEX_TEST1_NAME "tryenter"
9 #define KZT_MUTEX_TEST1_DESC "Validate mutex_tryenter() correctness"
10
11 #define KZT_MUTEX_TEST2_ID 0x0402
12 #define KZT_MUTEX_TEST2_NAME "race"
13 #define KZT_MUTEX_TEST2_DESC "Many threads entering/exiting the mutex"
14
15 #define KZT_MUTEX_TEST3_ID 0x0403
16 #define KZT_MUTEX_TEST3_NAME "owned"
17 #define KZT_MUTEX_TEST3_DESC "Validate mutex_owned() correctness"
18
19 #define KZT_MUTEX_TEST4_ID 0x0404
20 #define KZT_MUTEX_TEST4_NAME "owner"
21 #define KZT_MUTEX_TEST4_DESC "Validate mutex_owner() correctness"
22
23 #define KZT_MUTEX_TEST_MAGIC 0x115599DDUL
24 #define KZT_MUTEX_TEST_NAME "mutex_test"
25 #define KZT_MUTEX_TEST_WORKQ "mutex_wq"
26 #define KZT_MUTEX_TEST_COUNT 128
27
28 typedef struct mutex_priv {
29 unsigned long mp_magic;
30 struct file *mp_file;
31 struct work_struct mp_work[KZT_MUTEX_TEST_COUNT];
32 kmutex_t mp_mtx;
33 int mp_rc;
34 } mutex_priv_t;
35
36
37 static void
38 kzt_mutex_test1_work(void *priv)
39 {
40 mutex_priv_t *mp = (mutex_priv_t *)priv;
41
42 ASSERT(mp->mp_magic == KZT_MUTEX_TEST_MAGIC);
43 mp->mp_rc = 0;
44
45 if (!mutex_tryenter(&mp->mp_mtx))
46 mp->mp_rc = -EBUSY;
47 }
48
49 static int
50 kzt_mutex_test1(struct file *file, void *arg)
51 {
52 struct workqueue_struct *wq;
53 struct work_struct work;
54 mutex_priv_t *mp;
55 int rc = 0;
56
57 mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
58 if (mp == NULL)
59 return -ENOMEM;
60
61 wq = create_singlethread_workqueue(KZT_MUTEX_TEST_WORKQ);
62 if (wq == NULL) {
63 rc = -ENOMEM;
64 goto out2;
65 }
66
67 mutex_init(&(mp->mp_mtx), KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
68 mutex_enter(&(mp->mp_mtx));
69
70 mp->mp_magic = KZT_MUTEX_TEST_MAGIC;
71 mp->mp_file = file;
72 INIT_WORK(&work, kzt_mutex_test1_work, mp);
73
74 /* Schedule a work item which will try and aquire the mutex via
75 * mutex_tryenter() while its held. This should fail and the work
76 * item will indicte this status in the passed private data. */
77 if (!queue_work(wq, &work)) {
78 mutex_exit(&(mp->mp_mtx));
79 rc = -EINVAL;
80 goto out;
81 }
82
83 flush_workqueue(wq);
84 mutex_exit(&(mp->mp_mtx));
85
86 /* Work item successfully aquired mutex, very bad! */
87 if (mp->mp_rc != -EBUSY) {
88 rc = -EINVAL;
89 goto out;
90 }
91
92 kzt_vprint(file, KZT_MUTEX_TEST1_NAME, "%s",
93 "mutex_trylock() correctly failed when mutex held\n");
94
95 /* Schedule a work item which will try and aquire the mutex via
96 * mutex_tryenter() while it is not held. This should work and
97 * the item will indicte this status in the passed private data. */
98 if (!queue_work(wq, &work)) {
99 rc = -EINVAL;
100 goto out;
101 }
102
103 flush_workqueue(wq);
104
105 /* Work item failed to aquire mutex, very bad! */
106 if (mp->mp_rc != 0) {
107 rc = -EINVAL;
108 goto out;
109 }
110
111 kzt_vprint(file, KZT_MUTEX_TEST1_NAME, "%s",
112 "mutex_trylock() correctly succeeded when mutex unheld\n");
113 out:
114 mutex_destroy(&(mp->mp_mtx));
115 destroy_workqueue(wq);
116 out2:
117 kfree(mp);
118
119 return rc;
120 }
121
122 static void
123 kzt_mutex_test2_work(void *priv)
124 {
125 mutex_priv_t *mp = (mutex_priv_t *)priv;
126 int rc;
127
128 ASSERT(mp->mp_magic == KZT_MUTEX_TEST_MAGIC);
129
130 /* Read the value before sleeping and write it after we wake up to
131 * maximize the chance of a race if mutexs are not working properly */
132 mutex_enter(&mp->mp_mtx);
133 rc = mp->mp_rc;
134 set_current_state(TASK_INTERRUPTIBLE);
135 schedule_timeout(HZ / 100); /* 1/100 of a second */
136 mp->mp_rc = rc + 1;
137 mutex_exit(&mp->mp_mtx);
138 }
139
140 static int
141 kzt_mutex_test2(struct file *file, void *arg)
142 {
143 struct workqueue_struct *wq;
144 mutex_priv_t *mp;
145 int i, rc = 0;
146
147 mp = (mutex_priv_t *)kmalloc(sizeof(*mp), GFP_KERNEL);
148 if (mp == NULL)
149 return -ENOMEM;
150
151 /* Create a thread per CPU items on queue will race */
152 wq = create_workqueue(KZT_MUTEX_TEST_WORKQ);
153 if (wq == NULL) {
154 rc = -ENOMEM;
155 goto out;
156 }
157
158 mutex_init(&(mp->mp_mtx), KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
159
160 mp->mp_magic = KZT_MUTEX_TEST_MAGIC;
161 mp->mp_file = file;
162 mp->mp_rc = 0;
163
164 /* Schedule N work items to the work queue each of which enters the
165 * mutex, sleeps briefly, then exits the mutex. On a multiprocessor
166 * box these work items will be handled by all available CPUs. The
167 * mutex is instrumented such that if any two processors are in the
168 * critical region at the same time the system will panic. If the
169 * mutex is implemented right this will never happy, that's a pass. */
170 for (i = 0; i < KZT_MUTEX_TEST_COUNT; i++) {
171 INIT_WORK(&(mp->mp_work[i]), kzt_mutex_test2_work, mp);
172
173 if (!queue_work(wq, &(mp->mp_work[i]))) {
174 kzt_vprint(file, KZT_MUTEX_TEST2_NAME,
175 "Failed to queue work id %d\n", i);
176 rc = -EINVAL;
177 }
178 }
179
180 flush_workqueue(wq);
181
182 if (mp->mp_rc == KZT_MUTEX_TEST_COUNT) {
183 kzt_vprint(file, KZT_MUTEX_TEST2_NAME, "%d racing threads "
184 "correctly entered/exited the mutex %d times\n",
185 num_online_cpus(), mp->mp_rc);
186 } else {
187 kzt_vprint(file, KZT_MUTEX_TEST2_NAME, "%d racing threads "
188 "only processed %d/%d mutex work items\n",
189 num_online_cpus(), mp->mp_rc, KZT_MUTEX_TEST_COUNT);
190 rc = -EINVAL;
191 }
192
193 mutex_destroy(&(mp->mp_mtx));
194 destroy_workqueue(wq);
195 out:
196 kfree(mp);
197
198 return rc;
199 }
200
201 static int
202 kzt_mutex_test3(struct file *file, void *arg)
203 {
204 kmutex_t mtx;
205 int rc = 0;
206
207 mutex_init(&mtx, KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
208
209 mutex_enter(&mtx);
210
211 /* Mutex should be owned by current */
212 if (!mutex_owned(&mtx)) {
213 kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
214 "be owned by pid %d but is owned by pid %d\n",
215 current->pid, mtx.km_owner ? mtx.km_owner->pid : -1);
216 rc = -EINVAL;
217 goto out;
218 }
219
220 mutex_exit(&mtx);
221
222 /* Mutex should not be owned by any task */
223 if (mutex_owned(&mtx)) {
224 kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
225 "not be owned but is owned by pid %d\n",
226 mtx.km_owner ? mtx.km_owner->pid : -1);
227 rc = -EINVAL;
228 goto out;
229 }
230
231 kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "%s",
232 "Correct mutex_owned() behavior\n");
233 out:
234 mutex_destroy(&mtx);
235
236 return rc;
237 }
238
239 static int
240 kzt_mutex_test4(struct file *file, void *arg)
241 {
242 kmutex_t mtx;
243 kthread_t *owner;
244 int rc = 0;
245
246 mutex_init(&mtx, KZT_MUTEX_TEST_NAME, MUTEX_DEFAULT, NULL);
247
248 mutex_enter(&mtx);
249
250 /* Mutex should be owned by current */
251 owner = mutex_owner(&mtx);
252 if (current != owner) {
253 kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should "
254 "be owned by pid %d but is owned by pid %d\n",
255 current->pid, owner ? owner->pid : -1);
256 rc = -EINVAL;
257 goto out;
258 }
259
260 mutex_exit(&mtx);
261
262 /* Mutex should not be owned by any task */
263 owner = mutex_owner(&mtx);
264 if (owner) {
265 kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "Mutex should not "
266 "be owned but is owned by pid %d\n", owner->pid);
267 rc = -EINVAL;
268 goto out;
269 }
270
271 kzt_vprint(file, KZT_MUTEX_TEST3_NAME, "%s",
272 "Correct mutex_owner() behavior\n");
273 out:
274 mutex_destroy(&mtx);
275
276 return rc;
277 }
278
279 kzt_subsystem_t *
280 kzt_mutex_init(void)
281 {
282 kzt_subsystem_t *sub;
283
284 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
285 if (sub == NULL)
286 return NULL;
287
288 memset(sub, 0, sizeof(*sub));
289 strncpy(sub->desc.name, KZT_MUTEX_NAME, KZT_NAME_SIZE);
290 strncpy(sub->desc.desc, KZT_MUTEX_DESC, KZT_DESC_SIZE);
291 INIT_LIST_HEAD(&sub->subsystem_list);
292 INIT_LIST_HEAD(&sub->test_list);
293 spin_lock_init(&sub->test_lock);
294 sub->desc.id = KZT_SUBSYSTEM_MUTEX;
295
296 KZT_TEST_INIT(sub, KZT_MUTEX_TEST1_NAME, KZT_MUTEX_TEST1_DESC,
297 KZT_MUTEX_TEST1_ID, kzt_mutex_test1);
298 KZT_TEST_INIT(sub, KZT_MUTEX_TEST2_NAME, KZT_MUTEX_TEST2_DESC,
299 KZT_MUTEX_TEST2_ID, kzt_mutex_test2);
300 KZT_TEST_INIT(sub, KZT_MUTEX_TEST3_NAME, KZT_MUTEX_TEST3_DESC,
301 KZT_MUTEX_TEST3_ID, kzt_mutex_test3);
302 KZT_TEST_INIT(sub, KZT_MUTEX_TEST4_NAME, KZT_MUTEX_TEST4_DESC,
303 KZT_MUTEX_TEST4_ID, kzt_mutex_test4);
304
305 return sub;
306 }
307
308 void
309 kzt_mutex_fini(kzt_subsystem_t *sub)
310 {
311 ASSERT(sub);
312 KZT_TEST_FINI(sub, KZT_MUTEX_TEST4_ID);
313 KZT_TEST_FINI(sub, KZT_MUTEX_TEST3_ID);
314 KZT_TEST_FINI(sub, KZT_MUTEX_TEST2_ID);
315 KZT_TEST_FINI(sub, KZT_MUTEX_TEST1_ID);
316
317 kfree(sub);
318 }
319
320 int
321 kzt_mutex_id(void) {
322 return KZT_SUBSYSTEM_MUTEX;
323 }