2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include "splat-internal.h"
29 #define SPLAT_SUBSYSTEM_KMEM 0x0100
30 #define SPLAT_KMEM_NAME "kmem"
31 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
33 #define SPLAT_KMEM_TEST1_ID 0x0101
34 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
35 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
37 #define SPLAT_KMEM_TEST2_ID 0x0102
38 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
39 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
41 #define SPLAT_KMEM_TEST3_ID 0x0103
42 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
43 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
45 #define SPLAT_KMEM_TEST4_ID 0x0104
46 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
47 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
49 #define SPLAT_KMEM_TEST5_ID 0x0105
50 #define SPLAT_KMEM_TEST5_NAME "kmem_cache1"
51 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
53 #define SPLAT_KMEM_TEST6_ID 0x0106
54 #define SPLAT_KMEM_TEST6_NAME "kmem_cache2"
55 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
57 #define SPLAT_KMEM_TEST7_ID 0x0107
58 #define SPLAT_KMEM_TEST7_NAME "kmem_reap"
59 #define SPLAT_KMEM_TEST7_DESC "Slab reaping test"
61 #define SPLAT_KMEM_TEST8_ID 0x0108
62 #define SPLAT_KMEM_TEST8_NAME "kmem_lock"
63 #define SPLAT_KMEM_TEST8_DESC "Slab locking test"
65 #define SPLAT_KMEM_ALLOC_COUNT 10
66 #define SPLAT_VMEM_ALLOC_COUNT 10
69 /* XXX - This test may fail under tight memory conditions */
71 splat_kmem_test1(struct file
*file
, void *arg
)
73 void *ptr
[SPLAT_KMEM_ALLOC_COUNT
];
77 /* We are intentionally going to push kmem_alloc to its max
78 * allocation size, so suppress the console warnings for now */
81 while ((!rc
) && (size
<= (PAGE_SIZE
* 32))) {
84 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++) {
85 ptr
[i
] = kmem_alloc(size
, KM_SLEEP
);
90 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++)
92 kmem_free(ptr
[i
], size
);
94 splat_vprint(file
, SPLAT_KMEM_TEST1_NAME
,
95 "%d byte allocations, %d/%d successful\n",
96 size
, count
, SPLAT_KMEM_ALLOC_COUNT
);
97 if (count
!= SPLAT_KMEM_ALLOC_COUNT
)
109 splat_kmem_test2(struct file
*file
, void *arg
)
111 void *ptr
[SPLAT_KMEM_ALLOC_COUNT
];
112 int size
= PAGE_SIZE
;
113 int i
, j
, count
, rc
= 0;
115 /* We are intentionally going to push kmem_alloc to its max
116 * allocation size, so suppress the console warnings for now */
119 while ((!rc
) && (size
<= (PAGE_SIZE
* 32))) {
122 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++) {
123 ptr
[i
] = kmem_zalloc(size
, KM_SLEEP
);
128 /* Ensure buffer has been zero filled */
129 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++) {
130 for (j
= 0; j
< size
; j
++) {
131 if (((char *)ptr
[i
])[j
] != '\0') {
132 splat_vprint(file
, SPLAT_KMEM_TEST2_NAME
,
133 "%d-byte allocation was "
134 "not zeroed\n", size
);
140 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++)
142 kmem_free(ptr
[i
], size
);
144 splat_vprint(file
, SPLAT_KMEM_TEST2_NAME
,
145 "%d byte allocations, %d/%d successful\n",
146 size
, count
, SPLAT_KMEM_ALLOC_COUNT
);
147 if (count
!= SPLAT_KMEM_ALLOC_COUNT
)
159 splat_kmem_test3(struct file
*file
, void *arg
)
161 void *ptr
[SPLAT_VMEM_ALLOC_COUNT
];
162 int size
= PAGE_SIZE
;
163 int i
, count
, rc
= 0;
165 while ((!rc
) && (size
<= (PAGE_SIZE
* 1024))) {
168 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++) {
169 ptr
[i
] = vmem_alloc(size
, KM_SLEEP
);
174 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++)
176 vmem_free(ptr
[i
], size
);
178 splat_vprint(file
, SPLAT_KMEM_TEST3_NAME
,
179 "%d byte allocations, %d/%d successful\n",
180 size
, count
, SPLAT_VMEM_ALLOC_COUNT
);
181 if (count
!= SPLAT_VMEM_ALLOC_COUNT
)
191 splat_kmem_test4(struct file
*file
, void *arg
)
193 void *ptr
[SPLAT_VMEM_ALLOC_COUNT
];
194 int size
= PAGE_SIZE
;
195 int i
, j
, count
, rc
= 0;
197 while ((!rc
) && (size
<= (PAGE_SIZE
* 1024))) {
200 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++) {
201 ptr
[i
] = vmem_zalloc(size
, KM_SLEEP
);
206 /* Ensure buffer has been zero filled */
207 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++) {
208 for (j
= 0; j
< size
; j
++) {
209 if (((char *)ptr
[i
])[j
] != '\0') {
210 splat_vprint(file
, SPLAT_KMEM_TEST4_NAME
,
211 "%d-byte allocation was "
212 "not zeroed\n", size
);
218 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++)
220 vmem_free(ptr
[i
], size
);
222 splat_vprint(file
, SPLAT_KMEM_TEST4_NAME
,
223 "%d byte allocations, %d/%d successful\n",
224 size
, count
, SPLAT_VMEM_ALLOC_COUNT
);
225 if (count
!= SPLAT_VMEM_ALLOC_COUNT
)
234 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
235 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
236 #define SPLAT_KMEM_OBJ_COUNT 128
237 #define SPLAT_KMEM_OBJ_RECLAIM 16
239 typedef struct kmem_cache_data
{
240 unsigned long kcd_magic
;
245 typedef struct kmem_cache_priv
{
246 unsigned long kcp_magic
;
247 struct file
*kcp_file
;
248 kmem_cache_t
*kcp_cache
;
249 kmem_cache_data_t
*kcp_kcd
[SPLAT_KMEM_OBJ_COUNT
];
251 wait_queue_head_t kcp_waitq
;
260 splat_kmem_cache_test_constructor(void *ptr
, void *priv
, int flags
)
262 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)priv
;
263 kmem_cache_data_t
*kcd
= (kmem_cache_data_t
*)ptr
;
266 kcd
->kcd_magic
= kcp
->kcp_magic
;
268 memset(kcd
->kcd_buf
, 0xaa, kcp
->kcp_size
- (sizeof *kcd
));
276 splat_kmem_cache_test_destructor(void *ptr
, void *priv
)
278 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)priv
;
279 kmem_cache_data_t
*kcd
= (kmem_cache_data_t
*)ptr
;
284 memset(kcd
->kcd_buf
, 0xbb, kcp
->kcp_size
- (sizeof *kcd
));
292 splat_kmem_cache_size_test(struct file
*file
, void *arg
,
293 char *name
, int size
, int flags
)
295 kmem_cache_t
*cache
= NULL
;
296 kmem_cache_data_t
*kcd
= NULL
;
297 kmem_cache_priv_t kcp
;
300 kcp
.kcp_magic
= SPLAT_KMEM_TEST_MAGIC
;
306 cache
= kmem_cache_create(SPLAT_KMEM_CACHE_NAME
, kcp
.kcp_size
, 0,
307 splat_kmem_cache_test_constructor
,
308 splat_kmem_cache_test_destructor
,
309 NULL
, &kcp
, NULL
, flags
);
311 splat_vprint(file
, name
,
312 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME
);
316 kcd
= kmem_cache_alloc(cache
, KM_SLEEP
);
318 splat_vprint(file
, name
,
319 "Unable to allocate from '%s'\n",
320 SPLAT_KMEM_CACHE_NAME
);
325 if (!kcd
->kcd_flag
) {
326 splat_vprint(file
, name
,
327 "Failed to run contructor for '%s'\n",
328 SPLAT_KMEM_CACHE_NAME
);
333 if (kcd
->kcd_magic
!= kcp
.kcp_magic
) {
334 splat_vprint(file
, name
,
335 "Failed to pass private data to constructor "
336 "for '%s'\n", SPLAT_KMEM_CACHE_NAME
);
342 kmem_cache_free(cache
, kcd
);
344 /* Destroy the entire cache which will force destructors to
345 * run and we can verify one was called for every object */
346 kmem_cache_destroy(cache
);
348 splat_vprint(file
, name
,
349 "Failed to run destructor on all slab objects "
350 "for '%s'\n", SPLAT_KMEM_CACHE_NAME
);
354 splat_vprint(file
, name
,
355 "Successfully ran ctors/dtors for %d elements in '%s'\n",
356 max
, SPLAT_KMEM_CACHE_NAME
);
362 kmem_cache_free(cache
, kcd
);
364 kmem_cache_destroy(cache
);
368 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
370 splat_kmem_test5(struct file
*file
, void *arg
)
372 char *name
= SPLAT_KMEM_TEST5_NAME
;
375 rc
= splat_kmem_cache_size_test(file
, arg
, name
, 128, 0);
379 rc
= splat_kmem_cache_size_test(file
, arg
, name
, 128, KMC_KMEM
);
383 return splat_kmem_cache_size_test(file
, arg
, name
, 128, KMC_VMEM
);
386 /* Validate large object cache behavior for dynamic/kmem/vmem caches */
388 splat_kmem_test6(struct file
*file
, void *arg
)
390 char *name
= SPLAT_KMEM_TEST6_NAME
;
393 rc
= splat_kmem_cache_size_test(file
, arg
, name
, 128 * 1024, 0);
397 rc
= splat_kmem_cache_size_test(file
, arg
, name
, 128 * 1024, KMC_KMEM
);
401 return splat_kmem_cache_size_test(file
, arg
, name
, 128 * 1028, KMC_VMEM
);
405 splat_kmem_cache_test_reclaim(void *priv
)
407 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)priv
;
410 count
= min(SPLAT_KMEM_OBJ_RECLAIM
, kcp
->kcp_count
);
411 splat_vprint(kcp
->kcp_file
, SPLAT_KMEM_TEST7_NAME
,
412 "Reaping %d objects from '%s'\n", count
,
413 SPLAT_KMEM_CACHE_NAME
);
415 for (i
= 0; i
< SPLAT_KMEM_OBJ_COUNT
; i
++) {
416 if (kcp
->kcp_kcd
[i
]) {
417 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[i
]);
418 kcp
->kcp_kcd
[i
] = NULL
;
429 splat_kmem_test7(struct file
*file
, void *arg
)
432 kmem_cache_priv_t kcp
;
435 kcp
.kcp_magic
= SPLAT_KMEM_TEST_MAGIC
;
441 cache
= kmem_cache_create(SPLAT_KMEM_CACHE_NAME
, kcp
.kcp_size
, 0,
442 splat_kmem_cache_test_constructor
,
443 splat_kmem_cache_test_destructor
,
444 splat_kmem_cache_test_reclaim
,
447 splat_vprint(file
, SPLAT_KMEM_TEST7_NAME
,
448 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME
);
452 kcp
.kcp_cache
= cache
;
454 for (i
= 0; i
< SPLAT_KMEM_OBJ_COUNT
; i
++) {
455 /* All allocations need not succeed */
456 kcp
.kcp_kcd
[i
] = kmem_cache_alloc(cache
, KM_SLEEP
);
457 if (!kcp
.kcp_kcd
[i
]) {
458 splat_vprint(file
, SPLAT_KMEM_TEST7_NAME
,
459 "Unable to allocate from '%s'\n",
460 SPLAT_KMEM_CACHE_NAME
);
464 ASSERT(kcp
.kcp_count
> 0);
466 /* Request the slab cache free any objects it can. For a few reasons
467 * this may not immediately result in more free memory even if objects
468 * are freed. First off, due to fragmentation we may not be able to
469 * reclaim any slabs. Secondly, even if we do we fully clear some
470 * slabs we will not want to immedately reclaim all of them because
471 * we may contend with cache allocs and thrash. What we want to see
472 * is slab size decrease more gradually as it becomes clear they
473 * will not be needed. This should be acheivable in less than minute
474 * if it takes longer than this something has gone wrong.
476 for (i
= 0; i
< 60; i
++) {
477 kmem_cache_reap_now(cache
);
478 splat_vprint(file
, SPLAT_KMEM_TEST7_NAME
,
479 "%s cache objects %d, slabs %u/%u objs %u/%u\n",
480 SPLAT_KMEM_CACHE_NAME
, kcp
.kcp_count
,
481 (unsigned)cache
->skc_slab_alloc
,
482 (unsigned)cache
->skc_slab_total
,
483 (unsigned)cache
->skc_obj_alloc
,
484 (unsigned)cache
->skc_obj_total
);
486 if (cache
->skc_obj_total
== 0)
489 set_current_state(TASK_INTERRUPTIBLE
);
490 schedule_timeout(HZ
);
493 if (cache
->skc_obj_total
== 0) {
494 splat_vprint(file
, SPLAT_KMEM_TEST7_NAME
,
495 "Successfully created %d objects "
496 "in cache %s and reclaimed them\n",
497 SPLAT_KMEM_OBJ_COUNT
, SPLAT_KMEM_CACHE_NAME
);
499 splat_vprint(file
, SPLAT_KMEM_TEST7_NAME
,
500 "Failed to reclaim %u/%d objects from cache %s\n",
501 (unsigned)cache
->skc_obj_total
, SPLAT_KMEM_OBJ_COUNT
,
502 SPLAT_KMEM_CACHE_NAME
);
506 /* Cleanup our mess (for failure case of time expiring) */
507 for (i
= 0; i
< SPLAT_KMEM_OBJ_COUNT
; i
++)
509 kmem_cache_free(cache
, kcp
.kcp_kcd
[i
]);
511 kmem_cache_destroy(cache
);
517 splat_kmem_test8_thread(void *arg
)
519 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)arg
;
520 int count
= kcp
->kcp_alloc
, rc
= 0, i
;
523 ASSERT(kcp
->kcp_magic
== SPLAT_KMEM_TEST_MAGIC
);
525 objs
= vmem_zalloc(count
* sizeof(void *), KM_SLEEP
);
527 splat_vprint(kcp
->kcp_file
, SPLAT_KMEM_TEST8_NAME
,
528 "Unable to alloc objp array for cache '%s'\n",
529 kcp
->kcp_cache
->skc_name
);
534 for (i
= 0; i
< count
; i
++) {
535 objs
[i
] = kmem_cache_alloc(kcp
->kcp_cache
, KM_SLEEP
);
537 splat_vprint(kcp
->kcp_file
, SPLAT_KMEM_TEST8_NAME
,
538 "Unable to allocate from cache '%s'\n",
539 kcp
->kcp_cache
->skc_name
);
545 for (i
= 0; i
< count
; i
++)
547 kmem_cache_free(kcp
->kcp_cache
, objs
[i
]);
549 vmem_free(objs
, count
* sizeof(void *));
551 spin_lock(&kcp
->kcp_lock
);
555 if (--kcp
->kcp_threads
== 0)
556 wake_up(&kcp
->kcp_waitq
);
558 spin_unlock(&kcp
->kcp_lock
);
564 splat_kmem_test8_count(kmem_cache_priv_t
*kcp
, int threads
)
568 spin_lock(&kcp
->kcp_lock
);
569 ret
= (kcp
->kcp_threads
== threads
);
570 spin_unlock(&kcp
->kcp_lock
);
575 /* This test will always pass and is simply here so I can easily
576 * eyeball the slab cache locking overhead to ensure it is reasonable.
579 splat_kmem_test8_sc(struct file
*file
, void *arg
, int size
, int count
)
581 kmem_cache_priv_t kcp
;
583 struct timespec start
, stop
, delta
;
585 int i
, j
, rc
= 0, threads
= 32;
587 kcp
.kcp_magic
= SPLAT_KMEM_TEST_MAGIC
;
590 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
, "%-22s %s", "name",
591 "time (sec)\tslabs \tobjs \thash\n");
592 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
, "%-22s %s", "",
593 " \ttot/max/calc\ttot/max/calc\n");
595 for (i
= 1; i
<= count
; i
*= 2) {
601 spin_lock_init(&kcp
.kcp_lock
);
602 init_waitqueue_head(&kcp
.kcp_waitq
);
604 (void)snprintf(cache_name
, 32, "%s-%d-%d",
605 SPLAT_KMEM_CACHE_NAME
, size
, i
);
606 kcp
.kcp_cache
= kmem_cache_create(cache_name
, kcp
.kcp_size
, 0,
607 splat_kmem_cache_test_constructor
,
608 splat_kmem_cache_test_destructor
,
609 NULL
, &kcp
, NULL
, 0);
610 if (!kcp
.kcp_cache
) {
611 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
612 "Unable to create '%s' cache\n",
613 SPLAT_KMEM_CACHE_NAME
);
618 start
= current_kernel_time();
620 for (j
= 0; j
< threads
; j
++) {
621 thr
= thread_create(NULL
, 0, splat_kmem_test8_thread
,
622 &kcp
, 0, &p0
, TS_RUN
, minclsyspri
);
627 spin_lock(&kcp
.kcp_lock
);
629 spin_unlock(&kcp
.kcp_lock
);
632 /* Sleep until the thread sets kcp.kcp_threads == 0 */
633 wait_event(kcp
.kcp_waitq
, splat_kmem_test8_count(&kcp
, 0));
634 stop
= current_kernel_time();
635 delta
= timespec_sub(stop
, start
);
637 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
, "%-22s %2ld.%09ld\t"
638 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
639 kcp
.kcp_cache
->skc_name
,
640 delta
.tv_sec
, delta
.tv_nsec
,
641 (unsigned long)kcp
.kcp_cache
->skc_slab_total
,
642 (unsigned long)kcp
.kcp_cache
->skc_slab_max
,
643 (unsigned long)(kcp
.kcp_alloc
* threads
/
644 SPL_KMEM_CACHE_OBJ_PER_SLAB
),
645 (unsigned long)kcp
.kcp_cache
->skc_obj_total
,
646 (unsigned long)kcp
.kcp_cache
->skc_obj_max
,
647 (unsigned long)(kcp
.kcp_alloc
* threads
));
649 kmem_cache_destroy(kcp
.kcp_cache
);
651 if (!rc
&& kcp
.kcp_rc
)
662 splat_kmem_test8(struct file
*file
, void *arg
)
666 /* Run through slab cache with objects size from
667 * 16-1Mb in 4x multiples with 1024 objects each */
668 for (i
= 16; i
<= 1024*1024; i
*= 4) {
669 rc
= splat_kmem_test8_sc(file
, arg
, i
, 256);
678 splat_kmem_init(void)
680 splat_subsystem_t
*sub
;
682 sub
= kmalloc(sizeof(*sub
), GFP_KERNEL
);
686 memset(sub
, 0, sizeof(*sub
));
687 strncpy(sub
->desc
.name
, SPLAT_KMEM_NAME
, SPLAT_NAME_SIZE
);
688 strncpy(sub
->desc
.desc
, SPLAT_KMEM_DESC
, SPLAT_DESC_SIZE
);
689 INIT_LIST_HEAD(&sub
->subsystem_list
);
690 INIT_LIST_HEAD(&sub
->test_list
);
691 spin_lock_init(&sub
->test_lock
);
692 sub
->desc
.id
= SPLAT_SUBSYSTEM_KMEM
;
694 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST1_NAME
, SPLAT_KMEM_TEST1_DESC
,
695 SPLAT_KMEM_TEST1_ID
, splat_kmem_test1
);
696 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST2_NAME
, SPLAT_KMEM_TEST2_DESC
,
697 SPLAT_KMEM_TEST2_ID
, splat_kmem_test2
);
698 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST3_NAME
, SPLAT_KMEM_TEST3_DESC
,
699 SPLAT_KMEM_TEST3_ID
, splat_kmem_test3
);
700 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST4_NAME
, SPLAT_KMEM_TEST4_DESC
,
701 SPLAT_KMEM_TEST4_ID
, splat_kmem_test4
);
702 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST5_NAME
, SPLAT_KMEM_TEST5_DESC
,
703 SPLAT_KMEM_TEST5_ID
, splat_kmem_test5
);
704 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST6_NAME
, SPLAT_KMEM_TEST6_DESC
,
705 SPLAT_KMEM_TEST6_ID
, splat_kmem_test6
);
706 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST7_NAME
, SPLAT_KMEM_TEST7_DESC
,
707 SPLAT_KMEM_TEST7_ID
, splat_kmem_test7
);
708 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST8_NAME
, SPLAT_KMEM_TEST8_DESC
,
709 SPLAT_KMEM_TEST8_ID
, splat_kmem_test8
);
715 splat_kmem_fini(splat_subsystem_t
*sub
)
718 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST8_ID
);
719 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST7_ID
);
720 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST6_ID
);
721 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST5_ID
);
722 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST4_ID
);
723 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST3_ID
);
724 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST2_ID
);
725 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST1_ID
);
731 splat_kmem_id(void) {
732 return SPLAT_SUBSYSTEM_KMEM
;