1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Thread Tests.
25 \*****************************************************************************/
27 #include <sys/thread.h>
28 #include <sys/random.h>
29 #include <linux/delay.h>
30 #include <linux/mm_compat.h>
31 #include <linux/slab.h>
32 #include "splat-internal.h"
34 #define SPLAT_THREAD_NAME "thread"
35 #define SPLAT_THREAD_DESC "Kernel Thread Tests"
37 #define SPLAT_THREAD_TEST1_ID 0x0601
38 #define SPLAT_THREAD_TEST1_NAME "create"
39 #define SPLAT_THREAD_TEST1_DESC "Validate thread creation"
41 #define SPLAT_THREAD_TEST2_ID 0x0602
42 #define SPLAT_THREAD_TEST2_NAME "exit"
43 #define SPLAT_THREAD_TEST2_DESC "Validate thread exit"
45 #define SPLAT_THREAD_TEST3_ID 0x6003
46 #define SPLAT_THREAD_TEST3_NAME "tsd"
47 #define SPLAT_THREAD_TEST3_DESC "Validate thread specific data"
49 #define SPLAT_THREAD_TEST_MAGIC 0x4488CC00UL
50 #define SPLAT_THREAD_TEST_KEYS 32
51 #define SPLAT_THREAD_TEST_THREADS 16
53 typedef struct thread_priv
{
54 unsigned long tp_magic
;
57 wait_queue_head_t tp_waitq
;
58 uint_t tp_keys
[SPLAT_THREAD_TEST_KEYS
];
65 splat_thread_rc(thread_priv_t
*tp
, int rc
)
69 spin_lock(&tp
->tp_lock
);
70 ret
= (tp
->tp_rc
== rc
);
71 spin_unlock(&tp
->tp_lock
);
77 splat_thread_count(thread_priv_t
*tp
, int count
)
81 spin_lock(&tp
->tp_lock
);
82 ret
= (tp
->tp_count
== count
);
83 spin_unlock(&tp
->tp_lock
);
89 splat_thread_work1(void *priv
)
91 thread_priv_t
*tp
= (thread_priv_t
*)priv
;
93 spin_lock(&tp
->tp_lock
);
94 ASSERT(tp
->tp_magic
== SPLAT_THREAD_TEST_MAGIC
);
96 wake_up(&tp
->tp_waitq
);
97 spin_unlock(&tp
->tp_lock
);
103 splat_thread_test1(struct file
*file
, void *arg
)
108 tp
.tp_magic
= SPLAT_THREAD_TEST_MAGIC
;
110 spin_lock_init(&tp
.tp_lock
);
111 init_waitqueue_head(&tp
.tp_waitq
);
114 thr
= (kthread_t
*)thread_create(NULL
, 0, splat_thread_work1
, &tp
, 0,
115 &p0
, TS_RUN
, minclsyspri
);
116 /* Must never fail under Solaris, but we check anyway since this
117 * can happen in the linux SPL, we may want to change this behavior */
121 /* Sleep until the thread sets tp.tp_rc == 1 */
122 wait_event(tp
.tp_waitq
, splat_thread_rc(&tp
, 1));
124 splat_vprint(file
, SPLAT_THREAD_TEST1_NAME
, "%s",
125 "Thread successfully started properly\n");
130 splat_thread_work2(void *priv
)
132 thread_priv_t
*tp
= (thread_priv_t
*)priv
;
134 spin_lock(&tp
->tp_lock
);
135 ASSERT(tp
->tp_magic
== SPLAT_THREAD_TEST_MAGIC
);
137 wake_up(&tp
->tp_waitq
);
138 spin_unlock(&tp
->tp_lock
);
142 /* The following code is unreachable when thread_exit() is
143 * working properly, which is exactly what we're testing */
144 spin_lock(&tp
->tp_lock
);
146 wake_up(&tp
->tp_waitq
);
147 spin_unlock(&tp
->tp_lock
);
151 splat_thread_test2(struct file
*file
, void *arg
)
157 tp
.tp_magic
= SPLAT_THREAD_TEST_MAGIC
;
159 spin_lock_init(&tp
.tp_lock
);
160 init_waitqueue_head(&tp
.tp_waitq
);
163 thr
= (kthread_t
*)thread_create(NULL
, 0, splat_thread_work2
, &tp
, 0,
164 &p0
, TS_RUN
, minclsyspri
);
165 /* Must never fail under Solaris, but we check anyway since this
166 * can happen in the linux SPL, we may want to change this behavior */
170 /* Sleep until the thread sets tp.tp_rc == 1 */
171 wait_event(tp
.tp_waitq
, splat_thread_rc(&tp
, 1));
173 /* Sleep until the thread sets tp.tp_rc == 2, or until we hit
174 * the timeout. If thread exit is working properly we should
175 * hit the timeout and never see to.tp_rc == 2. */
176 rc
= wait_event_timeout(tp
.tp_waitq
, splat_thread_rc(&tp
, 2), HZ
/ 10);
179 splat_vprint(file
, SPLAT_THREAD_TEST2_NAME
, "%s",
180 "Thread did not exit properly at thread_exit()\n");
182 splat_vprint(file
, SPLAT_THREAD_TEST2_NAME
, "%s",
183 "Thread successfully exited at thread_exit()\n");
190 splat_thread_work3_common(thread_priv_t
*tp
)
195 /* set a unique value for each key using a random value */
196 get_random_bytes((void *)&rnd
, 4);
197 for (i
= 0; i
< SPLAT_THREAD_TEST_KEYS
; i
++)
198 tsd_set(tp
->tp_keys
[i
], (void *)(i
+ rnd
));
200 /* verify the unique value for each key */
201 for (i
= 0; i
< SPLAT_THREAD_TEST_KEYS
; i
++)
202 if (tsd_get(tp
->tp_keys
[i
]) != (void *)(i
+ rnd
))
205 /* set the value to thread_priv_t for use by the destructor */
206 for (i
= 0; i
< SPLAT_THREAD_TEST_KEYS
; i
++)
207 tsd_set(tp
->tp_keys
[i
], (void *)tp
);
209 spin_lock(&tp
->tp_lock
);
210 if (rc
&& !tp
->tp_rc
)
214 wake_up_all(&tp
->tp_waitq
);
215 spin_unlock(&tp
->tp_lock
);
219 splat_thread_work3_wait(void *priv
)
221 thread_priv_t
*tp
= (thread_priv_t
*)priv
;
223 ASSERT(tp
->tp_magic
== SPLAT_THREAD_TEST_MAGIC
);
224 splat_thread_work3_common(tp
);
225 wait_event(tp
->tp_waitq
, splat_thread_count(tp
, 0));
230 splat_thread_work3_exit(void *priv
)
232 thread_priv_t
*tp
= (thread_priv_t
*)priv
;
234 ASSERT(tp
->tp_magic
== SPLAT_THREAD_TEST_MAGIC
);
235 splat_thread_work3_common(tp
);
240 splat_thread_dtor3(void *priv
)
242 thread_priv_t
*tp
= (thread_priv_t
*)priv
;
244 ASSERT(tp
->tp_magic
== SPLAT_THREAD_TEST_MAGIC
);
245 spin_lock(&tp
->tp_lock
);
247 spin_unlock(&tp
->tp_lock
);
251 * Create threads which set and verify SPLAT_THREAD_TEST_KEYS number of
252 * keys. These threads may then exit by calling thread_exit() which calls
253 * tsd_exit() resulting in all their thread specific data being reclaimed.
254 * Alternately, the thread may block in which case the thread specific
255 * data will be reclaimed as part of tsd_destroy(). In either case all
256 * thread specific data must be reclaimed, this is verified by ensuring
257 * the registered destructor is called the correct number of times.
260 splat_thread_test3(struct file
*file
, void *arg
)
262 int i
, rc
= 0, expected
, wait_count
= 0, exit_count
= 0;
265 tp
.tp_magic
= SPLAT_THREAD_TEST_MAGIC
;
267 spin_lock_init(&tp
.tp_lock
);
268 init_waitqueue_head(&tp
.tp_waitq
);
271 tp
.tp_dtor_count
= 0;
273 for (i
= 0; i
< SPLAT_THREAD_TEST_KEYS
; i
++) {
275 tsd_create(&tp
.tp_keys
[i
], splat_thread_dtor3
);
278 /* Start tsd wait threads */
279 for (i
= 0; i
< SPLAT_THREAD_TEST_THREADS
; i
++) {
280 if (thread_create(NULL
, 0, splat_thread_work3_wait
,
281 &tp
, 0, &p0
, TS_RUN
, minclsyspri
))
285 /* All wait threads have setup their tsd and are blocking. */
286 wait_event(tp
.tp_waitq
, splat_thread_count(&tp
, wait_count
));
288 if (tp
.tp_dtor_count
!= 0) {
289 splat_vprint(file
, SPLAT_THREAD_TEST3_NAME
,
290 "Prematurely ran %d tsd destructors\n", tp
.tp_dtor_count
);
295 /* Start tsd exit threads */
296 for (i
= 0; i
< SPLAT_THREAD_TEST_THREADS
; i
++) {
297 if (thread_create(NULL
, 0, splat_thread_work3_exit
,
298 &tp
, 0, &p0
, TS_RUN
, minclsyspri
))
302 /* All exit threads verified tsd and are in the process of exiting */
303 wait_event(tp
.tp_waitq
,splat_thread_count(&tp
, wait_count
+exit_count
));
306 expected
= (SPLAT_THREAD_TEST_KEYS
* exit_count
);
307 if (tp
.tp_dtor_count
!= expected
) {
308 splat_vprint(file
, SPLAT_THREAD_TEST3_NAME
,
309 "Expected %d exit tsd destructors but saw %d\n",
310 expected
, tp
.tp_dtor_count
);
315 /* Destroy all keys and associated tsd in blocked threads */
316 for (i
= 0; i
< SPLAT_THREAD_TEST_KEYS
; i
++)
317 tsd_destroy(&tp
.tp_keys
[i
]);
319 expected
= (SPLAT_THREAD_TEST_KEYS
* (exit_count
+ wait_count
));
320 if (tp
.tp_dtor_count
!= expected
) {
321 splat_vprint(file
, SPLAT_THREAD_TEST3_NAME
,
322 "Expected %d wait+exit tsd destructors but saw %d\n",
323 expected
, tp
.tp_dtor_count
);
328 /* Release the remaining wait threads, sleep briefly while they exit */
329 spin_lock(&tp
.tp_lock
);
331 wake_up_all(&tp
.tp_waitq
);
332 spin_unlock(&tp
.tp_lock
);
336 splat_vprint(file
, SPLAT_THREAD_TEST3_NAME
,
337 "Thread tsd_get()/tsd_set() error %d\n", tp
.tp_rc
);
341 splat_vprint(file
, SPLAT_THREAD_TEST3_NAME
, "%s",
342 "Thread specific data verified\n");
349 splat_thread_init(void)
351 splat_subsystem_t
*sub
;
353 sub
= kmalloc(sizeof(*sub
), GFP_KERNEL
);
357 memset(sub
, 0, sizeof(*sub
));
358 strncpy(sub
->desc
.name
, SPLAT_THREAD_NAME
, SPLAT_NAME_SIZE
);
359 strncpy(sub
->desc
.desc
, SPLAT_THREAD_DESC
, SPLAT_DESC_SIZE
);
360 INIT_LIST_HEAD(&sub
->subsystem_list
);
361 INIT_LIST_HEAD(&sub
->test_list
);
362 spin_lock_init(&sub
->test_lock
);
363 sub
->desc
.id
= SPLAT_SUBSYSTEM_THREAD
;
365 SPLAT_TEST_INIT(sub
, SPLAT_THREAD_TEST1_NAME
, SPLAT_THREAD_TEST1_DESC
,
366 SPLAT_THREAD_TEST1_ID
, splat_thread_test1
);
367 SPLAT_TEST_INIT(sub
, SPLAT_THREAD_TEST2_NAME
, SPLAT_THREAD_TEST2_DESC
,
368 SPLAT_THREAD_TEST2_ID
, splat_thread_test2
);
369 SPLAT_TEST_INIT(sub
, SPLAT_THREAD_TEST3_NAME
, SPLAT_THREAD_TEST3_DESC
,
370 SPLAT_THREAD_TEST3_ID
, splat_thread_test3
);
376 splat_thread_fini(splat_subsystem_t
*sub
)
379 SPLAT_TEST_FINI(sub
, SPLAT_THREAD_TEST3_ID
);
380 SPLAT_TEST_FINI(sub
, SPLAT_THREAD_TEST2_ID
);
381 SPLAT_TEST_FINI(sub
, SPLAT_THREAD_TEST1_ID
);
387 splat_thread_id(void) {
388 return SPLAT_SUBSYSTEM_THREAD
;