]> git.proxmox.com Git - mirror_spl.git/blob - module/splat/splat-kmem.c
Public Release Prep
[mirror_spl.git] / module / splat / splat-kmem.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Kmem Tests.
25 \*****************************************************************************/
26
27 #include "splat-internal.h"
28
29 #define SPLAT_KMEM_NAME "kmem"
30 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
31
32 #define SPLAT_KMEM_TEST1_ID 0x0101
33 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
34 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
35
36 #define SPLAT_KMEM_TEST2_ID 0x0102
37 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
38 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
39
40 #define SPLAT_KMEM_TEST3_ID 0x0103
41 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
42 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
43
44 #define SPLAT_KMEM_TEST4_ID 0x0104
45 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
46 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
47
48 #define SPLAT_KMEM_TEST5_ID 0x0105
49 #define SPLAT_KMEM_TEST5_NAME "slab_small"
50 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
51
52 #define SPLAT_KMEM_TEST6_ID 0x0106
53 #define SPLAT_KMEM_TEST6_NAME "slab_large"
54 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
55
56 #define SPLAT_KMEM_TEST7_ID 0x0107
57 #define SPLAT_KMEM_TEST7_NAME "slab_align"
58 #define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
59
60 #define SPLAT_KMEM_TEST8_ID 0x0108
61 #define SPLAT_KMEM_TEST8_NAME "slab_reap"
62 #define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
63
64 #define SPLAT_KMEM_TEST9_ID 0x0109
65 #define SPLAT_KMEM_TEST9_NAME "slab_age"
66 #define SPLAT_KMEM_TEST9_DESC "Slab aging test"
67
68 #define SPLAT_KMEM_TEST10_ID 0x010a
69 #define SPLAT_KMEM_TEST10_NAME "slab_lock"
70 #define SPLAT_KMEM_TEST10_DESC "Slab locking test"
71
72 #ifdef _LP64
73 #define SPLAT_KMEM_TEST11_ID 0x010b
74 #define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
75 #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
76 #endif /* _LP64 */
77
78 #define SPLAT_KMEM_TEST12_ID 0x010c
79 #define SPLAT_KMEM_TEST12_NAME "vmem_size"
80 #define SPLAT_KMEM_TEST12_DESC "Memory zone test"
81
82 #define SPLAT_KMEM_ALLOC_COUNT 10
83 #define SPLAT_VMEM_ALLOC_COUNT 10
84
85
86 static int
87 splat_kmem_test1(struct file *file, void *arg)
88 {
89 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
90 int size = PAGE_SIZE;
91 int i, count, rc = 0;
92
93 /* We are intentionally going to push kmem_alloc to its max
94 * allocation size, so suppress the console warnings for now */
95 kmem_set_warning(0);
96
97 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
98 count = 0;
99
100 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
101 ptr[i] = kmem_alloc(size, KM_SLEEP);
102 if (ptr[i])
103 count++;
104 }
105
106 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
107 if (ptr[i])
108 kmem_free(ptr[i], size);
109
110 splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
111 "%d byte allocations, %d/%d successful\n",
112 size, count, SPLAT_KMEM_ALLOC_COUNT);
113 if (count != SPLAT_KMEM_ALLOC_COUNT)
114 rc = -ENOMEM;
115
116 size *= 2;
117 }
118
119 kmem_set_warning(1);
120
121 return rc;
122 }
123
124 static int
125 splat_kmem_test2(struct file *file, void *arg)
126 {
127 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
128 int size = PAGE_SIZE;
129 int i, j, count, rc = 0;
130
131 /* We are intentionally going to push kmem_alloc to its max
132 * allocation size, so suppress the console warnings for now */
133 kmem_set_warning(0);
134
135 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
136 count = 0;
137
138 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
139 ptr[i] = kmem_zalloc(size, KM_SLEEP);
140 if (ptr[i])
141 count++;
142 }
143
144 /* Ensure buffer has been zero filled */
145 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
146 for (j = 0; j < size; j++) {
147 if (((char *)ptr[i])[j] != '\0') {
148 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
149 "%d-byte allocation was "
150 "not zeroed\n", size);
151 rc = -EFAULT;
152 }
153 }
154 }
155
156 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
157 if (ptr[i])
158 kmem_free(ptr[i], size);
159
160 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
161 "%d byte allocations, %d/%d successful\n",
162 size, count, SPLAT_KMEM_ALLOC_COUNT);
163 if (count != SPLAT_KMEM_ALLOC_COUNT)
164 rc = -ENOMEM;
165
166 size *= 2;
167 }
168
169 kmem_set_warning(1);
170
171 return rc;
172 }
173
174 static int
175 splat_kmem_test3(struct file *file, void *arg)
176 {
177 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
178 int size = PAGE_SIZE;
179 int i, count, rc = 0;
180
181 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
182 count = 0;
183
184 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
185 ptr[i] = vmem_alloc(size, KM_SLEEP);
186 if (ptr[i])
187 count++;
188 }
189
190 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
191 if (ptr[i])
192 vmem_free(ptr[i], size);
193
194 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
195 "%d byte allocations, %d/%d successful\n",
196 size, count, SPLAT_VMEM_ALLOC_COUNT);
197 if (count != SPLAT_VMEM_ALLOC_COUNT)
198 rc = -ENOMEM;
199
200 size *= 2;
201 }
202
203 return rc;
204 }
205
206 static int
207 splat_kmem_test4(struct file *file, void *arg)
208 {
209 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
210 int size = PAGE_SIZE;
211 int i, j, count, rc = 0;
212
213 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
214 count = 0;
215
216 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
217 ptr[i] = vmem_zalloc(size, KM_SLEEP);
218 if (ptr[i])
219 count++;
220 }
221
222 /* Ensure buffer has been zero filled */
223 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
224 for (j = 0; j < size; j++) {
225 if (((char *)ptr[i])[j] != '\0') {
226 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
227 "%d-byte allocation was "
228 "not zeroed\n", size);
229 rc = -EFAULT;
230 }
231 }
232 }
233
234 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
235 if (ptr[i])
236 vmem_free(ptr[i], size);
237
238 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
239 "%d byte allocations, %d/%d successful\n",
240 size, count, SPLAT_VMEM_ALLOC_COUNT);
241 if (count != SPLAT_VMEM_ALLOC_COUNT)
242 rc = -ENOMEM;
243
244 size *= 2;
245 }
246
247 return rc;
248 }
249
250 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
251 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
252 #define SPLAT_KMEM_OBJ_COUNT 1024
253 #define SPLAT_KMEM_OBJ_RECLAIM 20 /* percent */
254 #define SPLAT_KMEM_THREADS 32
255
256 #define KCP_FLAG_READY 0x01
257
258 typedef struct kmem_cache_data {
259 unsigned long kcd_magic;
260 int kcd_flag;
261 char kcd_buf[0];
262 } kmem_cache_data_t;
263
264 typedef struct kmem_cache_thread {
265 kmem_cache_t *kct_cache;
266 spinlock_t kct_lock;
267 int kct_id;
268 int kct_kcd_count;
269 kmem_cache_data_t *kct_kcd[0];
270 } kmem_cache_thread_t;
271
272 typedef struct kmem_cache_priv {
273 unsigned long kcp_magic;
274 struct file *kcp_file;
275 kmem_cache_t *kcp_cache;
276 spinlock_t kcp_lock;
277 wait_queue_head_t kcp_ctl_waitq;
278 wait_queue_head_t kcp_thr_waitq;
279 int kcp_flags;
280 int kcp_kct_count;
281 kmem_cache_thread_t *kcp_kct[SPLAT_KMEM_THREADS];
282 int kcp_size;
283 int kcp_align;
284 int kcp_count;
285 int kcp_alloc;
286 int kcp_rc;
287 int kcp_kcd_count;
288 kmem_cache_data_t *kcp_kcd[0];
289 } kmem_cache_priv_t;
290
291 static kmem_cache_priv_t *
292 splat_kmem_cache_test_kcp_alloc(struct file *file, char *name,
293 int size, int align, int alloc, int count)
294 {
295 kmem_cache_priv_t *kcp;
296
297 kcp = vmem_zalloc(sizeof(kmem_cache_priv_t) +
298 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
299 if (!kcp)
300 return NULL;
301
302 kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC;
303 kcp->kcp_file = file;
304 kcp->kcp_cache = NULL;
305 spin_lock_init(&kcp->kcp_lock);
306 init_waitqueue_head(&kcp->kcp_ctl_waitq);
307 init_waitqueue_head(&kcp->kcp_thr_waitq);
308 kcp->kcp_flags = 0;
309 kcp->kcp_kct_count = -1;
310 kcp->kcp_size = size;
311 kcp->kcp_align = align;
312 kcp->kcp_count = 0;
313 kcp->kcp_alloc = alloc;
314 kcp->kcp_rc = 0;
315 kcp->kcp_kcd_count = count;
316
317 return kcp;
318 }
319
320 static void
321 splat_kmem_cache_test_kcp_free(kmem_cache_priv_t *kcp)
322 {
323 vmem_free(kcp, sizeof(kmem_cache_priv_t) +
324 kcp->kcp_kcd_count * sizeof(kmem_cache_data_t *));
325 }
326
327 static kmem_cache_thread_t *
328 splat_kmem_cache_test_kct_alloc(int id, int count)
329 {
330 kmem_cache_thread_t *kct;
331
332 ASSERTF(id < SPLAT_KMEM_THREADS, "id=%d\n", id);
333 kct = vmem_zalloc(sizeof(kmem_cache_thread_t) +
334 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
335 if (!kct)
336 return NULL;
337
338 spin_lock_init(&kct->kct_lock);
339 kct->kct_cache = NULL;
340 kct->kct_id = id;
341 kct->kct_kcd_count = count;
342
343 return kct;
344 }
345
346 static void
347 splat_kmem_cache_test_kct_free(kmem_cache_thread_t *kct)
348 {
349 vmem_free(kct, sizeof(kmem_cache_thread_t) +
350 kct->kct_kcd_count * sizeof(kmem_cache_data_t *));
351 }
352
353 static int
354 splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
355 {
356 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
357 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
358
359 if (kcd && kcp) {
360 kcd->kcd_magic = kcp->kcp_magic;
361 kcd->kcd_flag = 1;
362 memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
363 kcp->kcp_count++;
364 }
365
366 return 0;
367 }
368
369 static void
370 splat_kmem_cache_test_destructor(void *ptr, void *priv)
371 {
372 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
373 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
374
375 if (kcd && kcp) {
376 kcd->kcd_magic = 0;
377 kcd->kcd_flag = 0;
378 memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
379 kcp->kcp_count--;
380 }
381
382 return;
383 }
384
385 /*
386 * Generic reclaim function which assumes that all objects may
387 * be reclaimed at any time. We free a small percentage of the
388 * objects linked off the kcp or kct[] every time we are called.
389 */
390 static void
391 splat_kmem_cache_test_reclaim(void *priv)
392 {
393 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
394 kmem_cache_thread_t *kct;
395 int i, j, count;
396
397 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
398 count = kcp->kcp_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
399
400 /* Objects directly attached to the kcp */
401 spin_lock(&kcp->kcp_lock);
402 for (i = 0; i < kcp->kcp_kcd_count; i++) {
403 if (kcp->kcp_kcd[i]) {
404 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
405 kcp->kcp_kcd[i] = NULL;
406
407 if ((--count) == 0)
408 break;
409 }
410 }
411 spin_unlock(&kcp->kcp_lock);
412
413 /* No threads containing objects to consider */
414 if (kcp->kcp_kct_count == -1)
415 return;
416
417 /* Objects attached to a kct thread */
418 for (i = 0; i < kcp->kcp_kct_count; i++) {
419 spin_lock(&kcp->kcp_lock);
420 kct = kcp->kcp_kct[i];
421 if (!kct) {
422 spin_unlock(&kcp->kcp_lock);
423 continue;
424 }
425
426 spin_lock(&kct->kct_lock);
427 count = kct->kct_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
428
429 for (j = 0; j < kct->kct_kcd_count; j++) {
430 if (kct->kct_kcd[j]) {
431 kmem_cache_free(kcp->kcp_cache,kct->kct_kcd[j]);
432 kct->kct_kcd[j] = NULL;
433
434 if ((--count) == 0)
435 break;
436 }
437 }
438 spin_unlock(&kct->kct_lock);
439 spin_unlock(&kcp->kcp_lock);
440 }
441
442 return;
443 }
444
445 static int
446 splat_kmem_cache_test_threads(kmem_cache_priv_t *kcp, int threads)
447 {
448 int rc;
449
450 spin_lock(&kcp->kcp_lock);
451 rc = (kcp->kcp_kct_count == threads);
452 spin_unlock(&kcp->kcp_lock);
453
454 return rc;
455 }
456
457 static int
458 splat_kmem_cache_test_flags(kmem_cache_priv_t *kcp, int flags)
459 {
460 int rc;
461
462 spin_lock(&kcp->kcp_lock);
463 rc = (kcp->kcp_flags & flags);
464 spin_unlock(&kcp->kcp_lock);
465
466 return rc;
467 }
468
469 static void
470 splat_kmem_cache_test_thread(void *arg)
471 {
472 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
473 kmem_cache_thread_t *kct;
474 int rc = 0, id, i;
475 void *obj;
476
477 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
478
479 /* Assign thread ids */
480 spin_lock(&kcp->kcp_lock);
481 if (kcp->kcp_kct_count == -1)
482 kcp->kcp_kct_count = 0;
483
484 id = kcp->kcp_kct_count;
485 kcp->kcp_kct_count++;
486 spin_unlock(&kcp->kcp_lock);
487
488 kct = splat_kmem_cache_test_kct_alloc(id, kcp->kcp_alloc);
489 if (!kct) {
490 rc = -ENOMEM;
491 goto out;
492 }
493
494 spin_lock(&kcp->kcp_lock);
495 kcp->kcp_kct[id] = kct;
496 spin_unlock(&kcp->kcp_lock);
497
498 /* Wait for all threads to have started and report they are ready */
499 if (kcp->kcp_kct_count == SPLAT_KMEM_THREADS)
500 wake_up(&kcp->kcp_ctl_waitq);
501
502 wait_event(kcp->kcp_thr_waitq,
503 splat_kmem_cache_test_flags(kcp, KCP_FLAG_READY));
504
505 /*
506 * Updates to kct->kct_kcd[] are performed under a spin_lock so
507 * they may safely run concurrent with the reclaim function. If
508 * we are not in a low memory situation we have one lock per-
509 * thread so they are not expected to be contended.
510 */
511 for (i = 0; i < kct->kct_kcd_count; i++) {
512 obj = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
513 spin_lock(&kct->kct_lock);
514 kct->kct_kcd[i] = obj;
515 spin_unlock(&kct->kct_lock);
516 }
517
518 for (i = 0; i < kct->kct_kcd_count; i++) {
519 spin_lock(&kct->kct_lock);
520 if (kct->kct_kcd[i]) {
521 kmem_cache_free(kcp->kcp_cache, kct->kct_kcd[i]);
522 kct->kct_kcd[i] = NULL;
523 }
524 spin_unlock(&kct->kct_lock);
525 }
526 out:
527 spin_lock(&kcp->kcp_lock);
528 if (kct) {
529 splat_kmem_cache_test_kct_free(kct);
530 kcp->kcp_kct[id] = kct = NULL;
531 }
532
533 if (!kcp->kcp_rc)
534 kcp->kcp_rc = rc;
535
536 if ((--kcp->kcp_kct_count) == 0)
537 wake_up(&kcp->kcp_ctl_waitq);
538
539 spin_unlock(&kcp->kcp_lock);
540
541 thread_exit();
542 }
543
544 static int
545 splat_kmem_cache_test(struct file *file, void *arg, char *name,
546 int size, int align, int flags)
547 {
548 kmem_cache_priv_t *kcp;
549 kmem_cache_data_t *kcd;
550 int rc = 0, max;
551
552 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0, 1);
553 if (!kcp) {
554 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
555 return -ENOMEM;
556 }
557
558 kcp->kcp_kcd[0] = NULL;
559 kcp->kcp_cache =
560 kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
561 kcp->kcp_size, kcp->kcp_align,
562 splat_kmem_cache_test_constructor,
563 splat_kmem_cache_test_destructor,
564 NULL, kcp, NULL, flags);
565 if (!kcp->kcp_cache) {
566 splat_vprint(file, name,
567 "Unable to create '%s'\n",
568 SPLAT_KMEM_CACHE_NAME);
569 rc = -ENOMEM;
570 goto out_free;
571 }
572
573 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
574 if (!kcd) {
575 splat_vprint(file, name,
576 "Unable to allocate from '%s'\n",
577 SPLAT_KMEM_CACHE_NAME);
578 rc = -EINVAL;
579 goto out_free;
580 }
581 spin_lock(&kcp->kcp_lock);
582 kcp->kcp_kcd[0] = kcd;
583 spin_unlock(&kcp->kcp_lock);
584
585 if (!kcp->kcp_kcd[0]->kcd_flag) {
586 splat_vprint(file, name,
587 "Failed to run contructor for '%s'\n",
588 SPLAT_KMEM_CACHE_NAME);
589 rc = -EINVAL;
590 goto out_free;
591 }
592
593 if (kcp->kcp_kcd[0]->kcd_magic != kcp->kcp_magic) {
594 splat_vprint(file, name,
595 "Failed to pass private data to constructor "
596 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
597 rc = -EINVAL;
598 goto out_free;
599 }
600
601 max = kcp->kcp_count;
602 spin_lock(&kcp->kcp_lock);
603 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
604 kcp->kcp_kcd[0] = NULL;
605 spin_unlock(&kcp->kcp_lock);
606
607 /* Destroy the entire cache which will force destructors to
608 * run and we can verify one was called for every object */
609 kmem_cache_destroy(kcp->kcp_cache);
610 if (kcp->kcp_count) {
611 splat_vprint(file, name,
612 "Failed to run destructor on all slab objects "
613 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
614 rc = -EINVAL;
615 }
616
617 splat_kmem_cache_test_kcp_free(kcp);
618 splat_vprint(file, name,
619 "Successfully ran ctors/dtors for %d elements in '%s'\n",
620 max, SPLAT_KMEM_CACHE_NAME);
621
622 return rc;
623
624 out_free:
625 if (kcp->kcp_kcd[0]) {
626 spin_lock(&kcp->kcp_lock);
627 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
628 kcp->kcp_kcd[0] = NULL;
629 spin_unlock(&kcp->kcp_lock);
630 }
631
632 if (kcp->kcp_cache)
633 kmem_cache_destroy(kcp->kcp_cache);
634
635 splat_kmem_cache_test_kcp_free(kcp);
636
637 return rc;
638 }
639
640 static int
641 splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
642 int size, int alloc, int max_time)
643 {
644 kmem_cache_priv_t *kcp;
645 kthread_t *thr;
646 struct timespec start, stop, delta;
647 char cache_name[32];
648 int i, rc = 0;
649
650 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc, 0);
651 if (!kcp) {
652 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
653 return -ENOMEM;
654 }
655
656 (void)snprintf(cache_name, 32, "%s-%d-%d",
657 SPLAT_KMEM_CACHE_NAME, size, alloc);
658 kcp->kcp_cache =
659 kmem_cache_create(cache_name, kcp->kcp_size, 0,
660 splat_kmem_cache_test_constructor,
661 splat_kmem_cache_test_destructor,
662 splat_kmem_cache_test_reclaim,
663 kcp, NULL, 0);
664 if (!kcp->kcp_cache) {
665 splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
666 rc = -ENOMEM;
667 goto out_kcp;
668 }
669
670 start = current_kernel_time();
671
672 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
673 thr = thread_create(NULL, 0,
674 splat_kmem_cache_test_thread,
675 kcp, 0, &p0, TS_RUN, minclsyspri);
676 if (thr == NULL) {
677 rc = -ESRCH;
678 goto out_cache;
679 }
680 }
681
682 /* Sleep until all threads have started, then set the ready
683 * flag and wake them all up for maximum concurrency. */
684 wait_event(kcp->kcp_ctl_waitq,
685 splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));
686
687 spin_lock(&kcp->kcp_lock);
688 kcp->kcp_flags |= KCP_FLAG_READY;
689 spin_unlock(&kcp->kcp_lock);
690 wake_up_all(&kcp->kcp_thr_waitq);
691
692 /* Sleep until all thread have finished */
693 wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));
694
695 stop = current_kernel_time();
696 delta = timespec_sub(stop, start);
697
698 splat_vprint(file, name,
699 "%-22s %2ld.%09ld\t"
700 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
701 kcp->kcp_cache->skc_name,
702 delta.tv_sec, delta.tv_nsec,
703 (unsigned long)kcp->kcp_cache->skc_slab_total,
704 (unsigned long)kcp->kcp_cache->skc_slab_max,
705 (unsigned long)(kcp->kcp_alloc *
706 SPLAT_KMEM_THREADS /
707 SPL_KMEM_CACHE_OBJ_PER_SLAB),
708 (unsigned long)kcp->kcp_cache->skc_obj_total,
709 (unsigned long)kcp->kcp_cache->skc_obj_max,
710 (unsigned long)(kcp->kcp_alloc *
711 SPLAT_KMEM_THREADS));
712
713 if (delta.tv_sec >= max_time)
714 rc = -ETIME;
715
716 if (!rc && kcp->kcp_rc)
717 rc = kcp->kcp_rc;
718
719 out_cache:
720 kmem_cache_destroy(kcp->kcp_cache);
721 out_kcp:
722 splat_kmem_cache_test_kcp_free(kcp);
723 return rc;
724 }
725
726 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
727 static int
728 splat_kmem_test5(struct file *file, void *arg)
729 {
730 char *name = SPLAT_KMEM_TEST5_NAME;
731 int rc;
732
733 rc = splat_kmem_cache_test(file, arg, name, 128, 0, 0);
734 if (rc)
735 return rc;
736
737 rc = splat_kmem_cache_test(file, arg, name, 128, 0, KMC_KMEM);
738 if (rc)
739 return rc;
740
741 return splat_kmem_cache_test(file, arg, name, 128, 0, KMC_VMEM);
742 }
743
744 /* Validate large object cache behavior for dynamic/kmem/vmem caches */
745 static int
746 splat_kmem_test6(struct file *file, void *arg)
747 {
748 char *name = SPLAT_KMEM_TEST6_NAME;
749 int rc;
750
751 rc = splat_kmem_cache_test(file, arg, name, 128*1024, 0, 0);
752 if (rc)
753 return rc;
754
755 rc = splat_kmem_cache_test(file, arg, name, 128*1024, 0, KMC_KMEM);
756 if (rc)
757 return rc;
758
759 return splat_kmem_cache_test(file, arg, name, 128*1028, 0, KMC_VMEM);
760 }
761
762 /* Validate object alignment cache behavior for caches */
763 static int
764 splat_kmem_test7(struct file *file, void *arg)
765 {
766 char *name = SPLAT_KMEM_TEST7_NAME;
767 int i, rc;
768
769 for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) {
770 rc = splat_kmem_cache_test(file, arg, name, 157, i, 0);
771 if (rc)
772 return rc;
773 }
774
775 return rc;
776 }
777
778 static int
779 splat_kmem_test8(struct file *file, void *arg)
780 {
781 kmem_cache_priv_t *kcp;
782 kmem_cache_data_t *kcd;
783 int i, j, rc = 0;
784
785 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
786 256, 0, 0, SPLAT_KMEM_OBJ_COUNT);
787 if (!kcp) {
788 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
789 "Unable to create '%s'\n", "kcp");
790 return -ENOMEM;
791 }
792
793 kcp->kcp_cache =
794 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
795 splat_kmem_cache_test_constructor,
796 splat_kmem_cache_test_destructor,
797 splat_kmem_cache_test_reclaim,
798 kcp, NULL, 0);
799 if (!kcp->kcp_cache) {
800 splat_kmem_cache_test_kcp_free(kcp);
801 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
802 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
803 return -ENOMEM;
804 }
805
806 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
807 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
808 spin_lock(&kcp->kcp_lock);
809 kcp->kcp_kcd[i] = kcd;
810 spin_unlock(&kcp->kcp_lock);
811 if (!kcd) {
812 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
813 "Unable to allocate from '%s'\n",
814 SPLAT_KMEM_CACHE_NAME);
815 }
816 }
817
818 /* Request the slab cache free any objects it can. For a few reasons
819 * this may not immediately result in more free memory even if objects
820 * are freed. First off, due to fragmentation we may not be able to
821 * reclaim any slabs. Secondly, even if we do we fully clear some
822 * slabs we will not want to immedately reclaim all of them because
823 * we may contend with cache allocs and thrash. What we want to see
824 * is the slab size decrease more gradually as it becomes clear they
825 * will not be needed. This should be acheivable in less than minute
826 * if it takes longer than this something has gone wrong.
827 */
828 for (i = 0; i < 60; i++) {
829 kmem_cache_reap_now(kcp->kcp_cache);
830 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
831 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
832 SPLAT_KMEM_CACHE_NAME, kcp->kcp_count,
833 (unsigned)kcp->kcp_cache->skc_slab_alloc,
834 (unsigned)kcp->kcp_cache->skc_slab_total,
835 (unsigned)kcp->kcp_cache->skc_obj_alloc,
836 (unsigned)kcp->kcp_cache->skc_obj_total);
837
838 for_each_online_cpu(j)
839 splat_print(file, "%u/%u ",
840 kcp->kcp_cache->skc_mag[j]->skm_avail,
841 kcp->kcp_cache->skc_mag[j]->skm_size);
842
843 splat_print(file, "%s\n", "");
844
845 if (kcp->kcp_cache->skc_obj_total == 0)
846 break;
847
848 set_current_state(TASK_INTERRUPTIBLE);
849 schedule_timeout(HZ);
850 }
851
852 if (kcp->kcp_cache->skc_obj_total == 0) {
853 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
854 "Successfully created %d objects "
855 "in cache %s and reclaimed them\n",
856 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
857 } else {
858 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
859 "Failed to reclaim %u/%d objects from cache %s\n",
860 (unsigned)kcp->kcp_cache->skc_obj_total,
861 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
862 rc = -ENOMEM;
863 }
864
865 /* Cleanup our mess (for failure case of time expiring) */
866 spin_lock(&kcp->kcp_lock);
867 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
868 if (kcp->kcp_kcd[i])
869 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
870 spin_unlock(&kcp->kcp_lock);
871
872 kmem_cache_destroy(kcp->kcp_cache);
873 splat_kmem_cache_test_kcp_free(kcp);
874
875 return rc;
876 }
877
878 static int
879 splat_kmem_test9(struct file *file, void *arg)
880 {
881 kmem_cache_priv_t *kcp;
882 kmem_cache_data_t *kcd;
883 int i, j, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;
884
885 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
886 256, 0, 0, count);
887 if (!kcp) {
888 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
889 "Unable to create '%s'\n", "kcp");
890 return -ENOMEM;
891 }
892
893 kcp->kcp_cache =
894 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
895 splat_kmem_cache_test_constructor,
896 splat_kmem_cache_test_destructor,
897 NULL, kcp, NULL, 0);
898 if (!kcp->kcp_cache) {
899 splat_kmem_cache_test_kcp_free(kcp);
900 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
901 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
902 return -ENOMEM;
903 }
904
905 for (i = 0; i < count; i++) {
906 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
907 spin_lock(&kcp->kcp_lock);
908 kcp->kcp_kcd[i] = kcd;
909 spin_unlock(&kcp->kcp_lock);
910 if (!kcd) {
911 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
912 "Unable to allocate from '%s'\n",
913 SPLAT_KMEM_CACHE_NAME);
914 }
915 }
916
917 spin_lock(&kcp->kcp_lock);
918 for (i = 0; i < count; i++)
919 if (kcp->kcp_kcd[i])
920 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
921 spin_unlock(&kcp->kcp_lock);
922
923 /* We have allocated a large number of objects thus creating a
924 * large number of slabs and then free'd them all. However since
925 * there should be little memory pressure at the moment those
926 * slabs have not been freed. What we want to see is the slab
927 * size decrease gradually as it becomes clear they will not be
928 * be needed. This should be acheivable in less than minute
929 * if it takes longer than this something has gone wrong.
930 */
931 for (i = 0; i < 60; i++) {
932 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
933 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
934 SPLAT_KMEM_CACHE_NAME, kcp->kcp_count,
935 (unsigned)kcp->kcp_cache->skc_slab_alloc,
936 (unsigned)kcp->kcp_cache->skc_slab_total,
937 (unsigned)kcp->kcp_cache->skc_obj_alloc,
938 (unsigned)kcp->kcp_cache->skc_obj_total);
939
940 for_each_online_cpu(j)
941 splat_print(file, "%u/%u ",
942 kcp->kcp_cache->skc_mag[j]->skm_avail,
943 kcp->kcp_cache->skc_mag[j]->skm_size);
944
945 splat_print(file, "%s\n", "");
946
947 if (kcp->kcp_cache->skc_obj_total == 0)
948 break;
949
950 set_current_state(TASK_INTERRUPTIBLE);
951 schedule_timeout(HZ);
952 }
953
954 if (kcp->kcp_cache->skc_obj_total == 0) {
955 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
956 "Successfully created %d objects "
957 "in cache %s and reclaimed them\n",
958 count, SPLAT_KMEM_CACHE_NAME);
959 } else {
960 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
961 "Failed to reclaim %u/%d objects from cache %s\n",
962 (unsigned)kcp->kcp_cache->skc_obj_total, count,
963 SPLAT_KMEM_CACHE_NAME);
964 rc = -ENOMEM;
965 }
966
967 kmem_cache_destroy(kcp->kcp_cache);
968 splat_kmem_cache_test_kcp_free(kcp);
969
970 return rc;
971 }
972
973 /*
974 * This test creates N threads with a shared kmem cache. They then all
975 * concurrently allocate and free from the cache to stress the locking and
976 * concurrent cache performance. If any one test takes longer than 5
977 * seconds to complete it is treated as a failure and may indicate a
978 * performance regression. On my test system no one test takes more
979 * than 1 second to complete so a 5x slowdown likely a problem.
980 */
981 static int
982 splat_kmem_test10(struct file *file, void *arg)
983 {
984 uint64_t size, alloc, rc = 0;
985
986 for (size = 16; size <= 1024*1024; size *= 2) {
987
988 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name",
989 "time (sec)\tslabs \tobjs \thash\n");
990 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "",
991 " \ttot/max/calc\ttot/max/calc\n");
992
993 for (alloc = 1; alloc <= 1024; alloc *= 2) {
994
995 /* Skip tests which exceed available memory. We
996 * leverage availrmem here for some extra testing */
997 if (size * alloc * SPLAT_KMEM_THREADS > availrmem / 2)
998 continue;
999
1000 rc = splat_kmem_cache_thread_test(file, arg,
1001 SPLAT_KMEM_TEST10_NAME, size, alloc, 5);
1002 if (rc)
1003 break;
1004 }
1005 }
1006
1007 return rc;
1008 }
1009
1010 #ifdef _LP64
1011 /*
1012 * This test creates N threads with a shared kmem cache which overcommits
1013 * memory by 4x. This makes it impossible for the slab to satify the
1014 * thread requirements without having its reclaim hook run which will
1015 * free objects back for use. This behavior is triggered by the linum VM
1016 * detecting a low memory condition on the node and invoking the shrinkers.
1017 * This should allow all the threads to complete while avoiding deadlock
1018 * and for the most part out of memory events. This is very tough on the
1019 * system so it is possible the test app may get oom'ed. This particular
1020 * test has proven troublesome on 32-bit archs with limited virtual
1021 * address space so it only run on 64-bit systems.
1022 */
1023 static int
1024 splat_kmem_test11(struct file *file, void *arg)
1025 {
1026 uint64_t size, alloc, rc;
1027
1028 size = 256*1024;
1029 alloc = ((4 * physmem * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS;
1030
1031 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "name",
1032 "time (sec)\tslabs \tobjs \thash\n");
1033 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "",
1034 " \ttot/max/calc\ttot/max/calc\n");
1035
1036 rc = splat_kmem_cache_thread_test(file, arg,
1037 SPLAT_KMEM_TEST11_NAME, size, alloc, 60);
1038
1039 return rc;
1040 }
1041 #endif /* _LP64 */
1042
1043 /*
1044 * Check vmem_size() behavior by acquiring the alloc/free/total vmem
1045 * space, then allocate a known buffer size from vmem space. We can
1046 * then check that vmem_size() values were updated properly with in
1047 * a fairly small tolerence. The tolerance is important because we
1048 * are not the only vmem consumer on the system. Other unrelated
1049 * allocations might occur during the small test window. The vmem
1050 * allocation itself may also add in a little extra private space to
1051 * the buffer. Finally, verify total space always remains unchanged.
1052 */
1053 static int
1054 splat_kmem_test12(struct file *file, void *arg)
1055 {
1056 size_t alloc1, free1, total1;
1057 size_t alloc2, free2, total2;
1058 int size = 8*1024*1024;
1059 void *ptr;
1060
1061 alloc1 = vmem_size(NULL, VMEM_ALLOC);
1062 free1 = vmem_size(NULL, VMEM_FREE);
1063 total1 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
1064 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1065 "free=%lu total=%lu\n", (unsigned long)alloc1,
1066 (unsigned long)free1, (unsigned long)total1);
1067
1068 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Alloc %d bytes\n", size);
1069 ptr = vmem_alloc(size, KM_SLEEP);
1070 if (!ptr) {
1071 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1072 "Failed to alloc %d bytes\n", size);
1073 return -ENOMEM;
1074 }
1075
1076 alloc2 = vmem_size(NULL, VMEM_ALLOC);
1077 free2 = vmem_size(NULL, VMEM_FREE);
1078 total2 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
1079 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1080 "free=%lu total=%lu\n", (unsigned long)alloc2,
1081 (unsigned long)free2, (unsigned long)total2);
1082
1083 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Free %d bytes\n", size);
1084 vmem_free(ptr, size);
1085 if (alloc2 < (alloc1 + size - (size / 100)) ||
1086 alloc2 > (alloc1 + size + (size / 100))) {
1087 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1088 "VMEM_ALLOC size: %lu != %lu+%d (+/- 1%%)\n",
1089 (unsigned long)alloc2,(unsigned long)alloc1,size);
1090 return -ERANGE;
1091 }
1092
1093 if (free2 < (free1 - size - (size / 100)) ||
1094 free2 > (free1 - size + (size / 100))) {
1095 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1096 "VMEM_FREE size: %lu != %lu-%d (+/- 1%%)\n",
1097 (unsigned long)free2, (unsigned long)free1, size);
1098 return -ERANGE;
1099 }
1100
1101 if (total1 != total2) {
1102 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1103 "VMEM_ALLOC | VMEM_FREE not constant: "
1104 "%lu != %lu\n", (unsigned long)total2,
1105 (unsigned long)total1);
1106 return -ERANGE;
1107 }
1108
1109 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1110 "VMEM_ALLOC within tolerance: ~%ld%% (%ld/%d)\n",
1111 (long)abs(alloc1 + (long)size - alloc2) * 100 / (long)size,
1112 (long)abs(alloc1 + (long)size - alloc2), size);
1113 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1114 "VMEM_FREE within tolerance: ~%ld%% (%ld/%d)\n",
1115 (long)abs((free1 - (long)size) - free2) * 100 / (long)size,
1116 (long)abs((free1 - (long)size) - free2), size);
1117
1118 return 0;
1119 }
1120
1121 splat_subsystem_t *
1122 splat_kmem_init(void)
1123 {
1124 splat_subsystem_t *sub;
1125
1126 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1127 if (sub == NULL)
1128 return NULL;
1129
1130 memset(sub, 0, sizeof(*sub));
1131 strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
1132 strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
1133 INIT_LIST_HEAD(&sub->subsystem_list);
1134 INIT_LIST_HEAD(&sub->test_list);
1135 spin_lock_init(&sub->test_lock);
1136 sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
1137
1138 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
1139 SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
1140 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
1141 SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
1142 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
1143 SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
1144 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
1145 SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
1146 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
1147 SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
1148 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
1149 SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
1150 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
1151 SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
1152 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
1153 SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
1154 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST9_NAME, SPLAT_KMEM_TEST9_DESC,
1155 SPLAT_KMEM_TEST9_ID, splat_kmem_test9);
1156 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST10_NAME, SPLAT_KMEM_TEST10_DESC,
1157 SPLAT_KMEM_TEST10_ID, splat_kmem_test10);
1158 #ifdef _LP64
1159 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC,
1160 SPLAT_KMEM_TEST11_ID, splat_kmem_test11);
1161 #endif /* _LP64 */
1162 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST12_NAME, SPLAT_KMEM_TEST12_DESC,
1163 SPLAT_KMEM_TEST12_ID, splat_kmem_test12);
1164
1165 return sub;
1166 }
1167
1168 void
1169 splat_kmem_fini(splat_subsystem_t *sub)
1170 {
1171 ASSERT(sub);
1172 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST12_ID);
1173 #ifdef _LP64
1174 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST11_ID);
1175 #endif /* _LP64 */
1176 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST10_ID);
1177 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST9_ID);
1178 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID);
1179 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
1180 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
1181 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
1182 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
1183 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
1184 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
1185 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
1186
1187 kfree(sub);
1188 }
1189
1190 int
1191 splat_kmem_id(void) {
1192 return SPLAT_SUBSYSTEM_KMEM;
1193 }