]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/splat/splat-kmem.c
Fix vmem leak in kmem_cache_test (missing splat_kmem_cache_test_kcp_free())
[mirror_spl-debian.git] / module / splat / splat-kmem.c
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #include "splat-internal.h"
28
29 #define SPLAT_SUBSYSTEM_KMEM 0x0100
30 #define SPLAT_KMEM_NAME "kmem"
31 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
32
33 #define SPLAT_KMEM_TEST1_ID 0x0101
34 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
35 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
36
37 #define SPLAT_KMEM_TEST2_ID 0x0102
38 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
39 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
40
41 #define SPLAT_KMEM_TEST3_ID 0x0103
42 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
43 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
44
45 #define SPLAT_KMEM_TEST4_ID 0x0104
46 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
47 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
48
49 #define SPLAT_KMEM_TEST5_ID 0x0105
50 #define SPLAT_KMEM_TEST5_NAME "slab_small"
51 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
52
53 #define SPLAT_KMEM_TEST6_ID 0x0106
54 #define SPLAT_KMEM_TEST6_NAME "slab_large"
55 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
56
57 #define SPLAT_KMEM_TEST7_ID 0x0107
58 #define SPLAT_KMEM_TEST7_NAME "slab_align"
59 #define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
60
61 #define SPLAT_KMEM_TEST8_ID 0x0108
62 #define SPLAT_KMEM_TEST8_NAME "slab_reap"
63 #define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
64
65 #define SPLAT_KMEM_TEST9_ID 0x0109
66 #define SPLAT_KMEM_TEST9_NAME "slab_age"
67 #define SPLAT_KMEM_TEST9_DESC "Slab aging test"
68
69 #define SPLAT_KMEM_TEST10_ID 0x010a
70 #define SPLAT_KMEM_TEST10_NAME "slab_lock"
71 #define SPLAT_KMEM_TEST10_DESC "Slab locking test"
72
73 #define SPLAT_KMEM_TEST11_ID 0x010b
74 #define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
75 #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
76
77 #define SPLAT_KMEM_TEST12_ID 0x010c
78 #define SPLAT_KMEM_TEST12_NAME "vmem_size"
79 #define SPLAT_KMEM_TEST12_DESC "Memory zone test"
80
81 #define SPLAT_KMEM_ALLOC_COUNT 10
82 #define SPLAT_VMEM_ALLOC_COUNT 10
83
84
85 static int
86 splat_kmem_test1(struct file *file, void *arg)
87 {
88 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
89 int size = PAGE_SIZE;
90 int i, count, rc = 0;
91
92 /* We are intentionally going to push kmem_alloc to its max
93 * allocation size, so suppress the console warnings for now */
94 kmem_set_warning(0);
95
96 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
97 count = 0;
98
99 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
100 ptr[i] = kmem_alloc(size, KM_SLEEP);
101 if (ptr[i])
102 count++;
103 }
104
105 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
106 if (ptr[i])
107 kmem_free(ptr[i], size);
108
109 splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
110 "%d byte allocations, %d/%d successful\n",
111 size, count, SPLAT_KMEM_ALLOC_COUNT);
112 if (count != SPLAT_KMEM_ALLOC_COUNT)
113 rc = -ENOMEM;
114
115 size *= 2;
116 }
117
118 kmem_set_warning(1);
119
120 return rc;
121 }
122
123 static int
124 splat_kmem_test2(struct file *file, void *arg)
125 {
126 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
127 int size = PAGE_SIZE;
128 int i, j, count, rc = 0;
129
130 /* We are intentionally going to push kmem_alloc to its max
131 * allocation size, so suppress the console warnings for now */
132 kmem_set_warning(0);
133
134 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
135 count = 0;
136
137 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
138 ptr[i] = kmem_zalloc(size, KM_SLEEP);
139 if (ptr[i])
140 count++;
141 }
142
143 /* Ensure buffer has been zero filled */
144 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
145 for (j = 0; j < size; j++) {
146 if (((char *)ptr[i])[j] != '\0') {
147 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
148 "%d-byte allocation was "
149 "not zeroed\n", size);
150 rc = -EFAULT;
151 }
152 }
153 }
154
155 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
156 if (ptr[i])
157 kmem_free(ptr[i], size);
158
159 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
160 "%d byte allocations, %d/%d successful\n",
161 size, count, SPLAT_KMEM_ALLOC_COUNT);
162 if (count != SPLAT_KMEM_ALLOC_COUNT)
163 rc = -ENOMEM;
164
165 size *= 2;
166 }
167
168 kmem_set_warning(1);
169
170 return rc;
171 }
172
173 static int
174 splat_kmem_test3(struct file *file, void *arg)
175 {
176 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
177 int size = PAGE_SIZE;
178 int i, count, rc = 0;
179
180 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
181 count = 0;
182
183 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
184 ptr[i] = vmem_alloc(size, KM_SLEEP);
185 if (ptr[i])
186 count++;
187 }
188
189 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
190 if (ptr[i])
191 vmem_free(ptr[i], size);
192
193 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
194 "%d byte allocations, %d/%d successful\n",
195 size, count, SPLAT_VMEM_ALLOC_COUNT);
196 if (count != SPLAT_VMEM_ALLOC_COUNT)
197 rc = -ENOMEM;
198
199 size *= 2;
200 }
201
202 return rc;
203 }
204
205 static int
206 splat_kmem_test4(struct file *file, void *arg)
207 {
208 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
209 int size = PAGE_SIZE;
210 int i, j, count, rc = 0;
211
212 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
213 count = 0;
214
215 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
216 ptr[i] = vmem_zalloc(size, KM_SLEEP);
217 if (ptr[i])
218 count++;
219 }
220
221 /* Ensure buffer has been zero filled */
222 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
223 for (j = 0; j < size; j++) {
224 if (((char *)ptr[i])[j] != '\0') {
225 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
226 "%d-byte allocation was "
227 "not zeroed\n", size);
228 rc = -EFAULT;
229 }
230 }
231 }
232
233 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
234 if (ptr[i])
235 vmem_free(ptr[i], size);
236
237 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
238 "%d byte allocations, %d/%d successful\n",
239 size, count, SPLAT_VMEM_ALLOC_COUNT);
240 if (count != SPLAT_VMEM_ALLOC_COUNT)
241 rc = -ENOMEM;
242
243 size *= 2;
244 }
245
246 return rc;
247 }
248
249 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
250 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
251 #define SPLAT_KMEM_OBJ_COUNT 1024
252 #define SPLAT_KMEM_OBJ_RECLAIM 20 /* percent */
253 #define SPLAT_KMEM_THREADS 32
254
255 #define KCP_FLAG_READY 0x01
256
257 typedef struct kmem_cache_data {
258 unsigned long kcd_magic;
259 int kcd_flag;
260 char kcd_buf[0];
261 } kmem_cache_data_t;
262
263 typedef struct kmem_cache_thread {
264 kmem_cache_t *kct_cache;
265 spinlock_t kct_lock;
266 int kct_id;
267 int kct_kcd_count;
268 kmem_cache_data_t *kct_kcd[0];
269 } kmem_cache_thread_t;
270
271 typedef struct kmem_cache_priv {
272 unsigned long kcp_magic;
273 struct file *kcp_file;
274 kmem_cache_t *kcp_cache;
275 spinlock_t kcp_lock;
276 wait_queue_head_t kcp_ctl_waitq;
277 wait_queue_head_t kcp_thr_waitq;
278 int kcp_flags;
279 int kcp_kct_count;
280 kmem_cache_thread_t *kcp_kct[SPLAT_KMEM_THREADS];
281 int kcp_size;
282 int kcp_align;
283 int kcp_count;
284 int kcp_alloc;
285 int kcp_rc;
286 int kcp_kcd_count;
287 kmem_cache_data_t *kcp_kcd[0];
288 } kmem_cache_priv_t;
289
290 static kmem_cache_priv_t *
291 splat_kmem_cache_test_kcp_alloc(struct file *file, char *name,
292 int size, int align, int alloc, int count)
293 {
294 kmem_cache_priv_t *kcp;
295
296 kcp = vmem_zalloc(sizeof(kmem_cache_priv_t) +
297 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
298 if (!kcp)
299 return NULL;
300
301 kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC;
302 kcp->kcp_file = file;
303 kcp->kcp_cache = NULL;
304 spin_lock_init(&kcp->kcp_lock);
305 init_waitqueue_head(&kcp->kcp_ctl_waitq);
306 init_waitqueue_head(&kcp->kcp_thr_waitq);
307 kcp->kcp_flags = 0;
308 kcp->kcp_kct_count = -1;
309 kcp->kcp_size = size;
310 kcp->kcp_align = align;
311 kcp->kcp_count = 0;
312 kcp->kcp_alloc = alloc;
313 kcp->kcp_rc = 0;
314 kcp->kcp_kcd_count = count;
315
316 return kcp;
317 }
318
319 static void
320 splat_kmem_cache_test_kcp_free(kmem_cache_priv_t *kcp)
321 {
322 vmem_free(kcp, sizeof(kmem_cache_priv_t) +
323 kcp->kcp_kcd_count * sizeof(kmem_cache_data_t *));
324 }
325
326 static kmem_cache_thread_t *
327 splat_kmem_cache_test_kct_alloc(int id, int count)
328 {
329 kmem_cache_thread_t *kct;
330
331 ASSERTF(id < SPLAT_KMEM_THREADS, "id=%d\n", id);
332 kct = vmem_zalloc(sizeof(kmem_cache_thread_t) +
333 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
334 if (!kct)
335 return NULL;
336
337 spin_lock_init(&kct->kct_lock);
338 kct->kct_cache = NULL;
339 kct->kct_id = id;
340 kct->kct_kcd_count = count;
341
342 return kct;
343 }
344
345 static void
346 splat_kmem_cache_test_kct_free(kmem_cache_thread_t *kct)
347 {
348 vmem_free(kct, sizeof(kmem_cache_thread_t) +
349 kct->kct_kcd_count * sizeof(kmem_cache_data_t *));
350 }
351
352 static int
353 splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
354 {
355 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
356 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
357
358 if (kcd && kcp) {
359 kcd->kcd_magic = kcp->kcp_magic;
360 kcd->kcd_flag = 1;
361 memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
362 kcp->kcp_count++;
363 }
364
365 return 0;
366 }
367
368 static void
369 splat_kmem_cache_test_destructor(void *ptr, void *priv)
370 {
371 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
372 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
373
374 if (kcd && kcp) {
375 kcd->kcd_magic = 0;
376 kcd->kcd_flag = 0;
377 memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
378 kcp->kcp_count--;
379 }
380
381 return;
382 }
383
384 /*
385 * Generic reclaim function which assumes that all objects may
386 * be reclaimed at any time. We free a small percentage of the
387 * objects linked off the kcp or kct[] every time we are called.
388 */
389 static void
390 splat_kmem_cache_test_reclaim(void *priv)
391 {
392 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
393 kmem_cache_thread_t *kct;
394 int i, j, count;
395
396 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
397 count = kcp->kcp_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
398
399 /* Objects directly attached to the kcp */
400 spin_lock(&kcp->kcp_lock);
401 for (i = 0; i < kcp->kcp_kcd_count; i++) {
402 if (kcp->kcp_kcd[i]) {
403 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
404 kcp->kcp_kcd[i] = NULL;
405
406 if ((--count) == 0)
407 break;
408 }
409 }
410 spin_unlock(&kcp->kcp_lock);
411
412 /* No threads containing objects to consider */
413 if (kcp->kcp_kct_count == -1)
414 return;
415
416 /* Objects attached to a kct thread */
417 for (i = 0; i < kcp->kcp_kct_count; i++) {
418 spin_lock(&kcp->kcp_lock);
419 kct = kcp->kcp_kct[i];
420 spin_unlock(&kcp->kcp_lock);
421 if (!kct)
422 continue;
423
424 spin_lock(&kct->kct_lock);
425 count = kct->kct_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
426
427 for (j = 0; j < kct->kct_kcd_count; j++) {
428 if (kct->kct_kcd[j]) {
429 kmem_cache_free(kcp->kcp_cache,kct->kct_kcd[j]);
430 kct->kct_kcd[j] = NULL;
431
432 if ((--count) == 0)
433 break;
434 }
435 }
436 spin_unlock(&kct->kct_lock);
437 }
438
439 return;
440 }
441
442 static int
443 splat_kmem_cache_test_threads(kmem_cache_priv_t *kcp, int threads)
444 {
445 int rc;
446
447 spin_lock(&kcp->kcp_lock);
448 rc = (kcp->kcp_kct_count == threads);
449 spin_unlock(&kcp->kcp_lock);
450
451 return rc;
452 }
453
454 static int
455 splat_kmem_cache_test_flags(kmem_cache_priv_t *kcp, int flags)
456 {
457 int rc;
458
459 spin_lock(&kcp->kcp_lock);
460 rc = (kcp->kcp_flags & flags);
461 spin_unlock(&kcp->kcp_lock);
462
463 return rc;
464 }
465
466 static void
467 splat_kmem_cache_test_thread(void *arg)
468 {
469 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
470 kmem_cache_thread_t *kct;
471 int rc = 0, id, i;
472 void *obj;
473
474 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
475
476 /* Assign thread ids */
477 spin_lock(&kcp->kcp_lock);
478 if (kcp->kcp_kct_count == -1)
479 kcp->kcp_kct_count = 0;
480
481 id = kcp->kcp_kct_count;
482 kcp->kcp_kct_count++;
483 spin_unlock(&kcp->kcp_lock);
484
485 kct = splat_kmem_cache_test_kct_alloc(id, kcp->kcp_alloc);
486 if (!kct) {
487 rc = -ENOMEM;
488 goto out;
489 }
490
491 spin_lock(&kcp->kcp_lock);
492 kcp->kcp_kct[id] = kct;
493 spin_unlock(&kcp->kcp_lock);
494
495 /* Wait for all threads to have started and report they are ready */
496 if (kcp->kcp_kct_count == SPLAT_KMEM_THREADS)
497 wake_up(&kcp->kcp_ctl_waitq);
498
499 wait_event(kcp->kcp_thr_waitq,
500 splat_kmem_cache_test_flags(kcp, KCP_FLAG_READY));
501
502 /*
503 * Updates to kct->kct_kcd[] are performed under a spin_lock so
504 * they may safely run concurrent with the reclaim function. If
505 * we are not in a low memory situation we have one lock per-
506 * thread so they are not expected to be contended.
507 */
508 for (i = 0; i < kct->kct_kcd_count; i++) {
509 obj = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
510 spin_lock(&kct->kct_lock);
511 kct->kct_kcd[i] = obj;
512 spin_unlock(&kct->kct_lock);
513 }
514
515 for (i = 0; i < kct->kct_kcd_count; i++) {
516 spin_lock(&kct->kct_lock);
517 if (kct->kct_kcd[i]) {
518 kmem_cache_free(kcp->kcp_cache, kct->kct_kcd[i]);
519 kct->kct_kcd[i] = NULL;
520 }
521 spin_unlock(&kct->kct_lock);
522 }
523 out:
524 spin_lock(&kcp->kcp_lock);
525 if (kct) {
526 splat_kmem_cache_test_kct_free(kct);
527 kcp->kcp_kct[id] = kct = NULL;
528 }
529
530 if (!kcp->kcp_rc)
531 kcp->kcp_rc = rc;
532
533 if ((--kcp->kcp_kct_count) == 0)
534 wake_up(&kcp->kcp_ctl_waitq);
535
536 spin_unlock(&kcp->kcp_lock);
537
538 thread_exit();
539 }
540
541 static int
542 splat_kmem_cache_test(struct file *file, void *arg, char *name,
543 int size, int align, int flags)
544 {
545 kmem_cache_priv_t *kcp;
546 kmem_cache_data_t *kcd;
547 int rc = 0, max;
548
549 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0, 1);
550 if (!kcp) {
551 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
552 return -ENOMEM;
553 }
554
555 kcp->kcp_kcd[0] = NULL;
556 kcp->kcp_cache =
557 kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
558 kcp->kcp_size, kcp->kcp_align,
559 splat_kmem_cache_test_constructor,
560 splat_kmem_cache_test_destructor,
561 NULL, kcp, NULL, flags);
562 if (!kcp->kcp_cache) {
563 splat_vprint(file, name,
564 "Unable to create '%s'\n",
565 SPLAT_KMEM_CACHE_NAME);
566 rc = -ENOMEM;
567 goto out_free;
568 }
569
570 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
571 if (!kcd) {
572 splat_vprint(file, name,
573 "Unable to allocate from '%s'\n",
574 SPLAT_KMEM_CACHE_NAME);
575 rc = -EINVAL;
576 goto out_free;
577 }
578 spin_lock(&kcp->kcp_lock);
579 kcp->kcp_kcd[0] = kcd;
580 spin_unlock(&kcp->kcp_lock);
581
582 if (!kcp->kcp_kcd[0]->kcd_flag) {
583 splat_vprint(file, name,
584 "Failed to run contructor for '%s'\n",
585 SPLAT_KMEM_CACHE_NAME);
586 rc = -EINVAL;
587 goto out_free;
588 }
589
590 if (kcp->kcp_kcd[0]->kcd_magic != kcp->kcp_magic) {
591 splat_vprint(file, name,
592 "Failed to pass private data to constructor "
593 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
594 rc = -EINVAL;
595 goto out_free;
596 }
597
598 max = kcp->kcp_count;
599 spin_lock(&kcp->kcp_lock);
600 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
601 kcp->kcp_kcd[0] = NULL;
602 spin_unlock(&kcp->kcp_lock);
603
604 /* Destroy the entire cache which will force destructors to
605 * run and we can verify one was called for every object */
606 kmem_cache_destroy(kcp->kcp_cache);
607 if (kcp->kcp_count) {
608 splat_vprint(file, name,
609 "Failed to run destructor on all slab objects "
610 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
611 rc = -EINVAL;
612 }
613
614 splat_kmem_cache_test_kcp_free(kcp);
615 splat_vprint(file, name,
616 "Successfully ran ctors/dtors for %d elements in '%s'\n",
617 max, SPLAT_KMEM_CACHE_NAME);
618
619 return rc;
620
621 out_free:
622 if (kcp->kcp_kcd[0]) {
623 spin_lock(&kcp->kcp_lock);
624 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
625 kcp->kcp_kcd[0] = NULL;
626 spin_unlock(&kcp->kcp_lock);
627 }
628
629 if (kcp->kcp_cache)
630 kmem_cache_destroy(kcp->kcp_cache);
631
632 splat_kmem_cache_test_kcp_free(kcp);
633
634 return rc;
635 }
636
637 static int
638 splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
639 int size, int alloc, int max_time)
640 {
641 kmem_cache_priv_t *kcp;
642 kthread_t *thr;
643 struct timespec start, stop, delta;
644 char cache_name[32];
645 int i, rc = 0;
646
647 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc, 0);
648 if (!kcp) {
649 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
650 return -ENOMEM;
651 }
652
653 (void)snprintf(cache_name, 32, "%s-%d-%d",
654 SPLAT_KMEM_CACHE_NAME, size, alloc);
655 kcp->kcp_cache =
656 kmem_cache_create(cache_name, kcp->kcp_size, 0,
657 splat_kmem_cache_test_constructor,
658 splat_kmem_cache_test_destructor,
659 splat_kmem_cache_test_reclaim,
660 kcp, NULL, KMC_KMEM);
661 if (!kcp->kcp_cache) {
662 splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
663 rc = -ENOMEM;
664 goto out_kcp;
665 }
666
667 start = current_kernel_time();
668
669 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
670 thr = thread_create(NULL, 0,
671 splat_kmem_cache_test_thread,
672 kcp, 0, &p0, TS_RUN, minclsyspri);
673 if (thr == NULL) {
674 rc = -ESRCH;
675 goto out_cache;
676 }
677 }
678
679 /* Sleep until all threads have started, then set the ready
680 * flag and wake them all up for maximum concurrency. */
681 wait_event(kcp->kcp_ctl_waitq,
682 splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));
683
684 spin_lock(&kcp->kcp_lock);
685 kcp->kcp_flags |= KCP_FLAG_READY;
686 spin_unlock(&kcp->kcp_lock);
687 wake_up_all(&kcp->kcp_thr_waitq);
688
689 /* Sleep until all thread have finished */
690 wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));
691
692 stop = current_kernel_time();
693 delta = timespec_sub(stop, start);
694
695 splat_vprint(file, name,
696 "%-22s %2ld.%09ld\t"
697 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
698 kcp->kcp_cache->skc_name,
699 delta.tv_sec, delta.tv_nsec,
700 (unsigned long)kcp->kcp_cache->skc_slab_total,
701 (unsigned long)kcp->kcp_cache->skc_slab_max,
702 (unsigned long)(kcp->kcp_alloc *
703 SPLAT_KMEM_THREADS /
704 SPL_KMEM_CACHE_OBJ_PER_SLAB),
705 (unsigned long)kcp->kcp_cache->skc_obj_total,
706 (unsigned long)kcp->kcp_cache->skc_obj_max,
707 (unsigned long)(kcp->kcp_alloc *
708 SPLAT_KMEM_THREADS));
709
710 if (delta.tv_sec >= max_time)
711 rc = -ETIME;
712
713 if (!rc && kcp->kcp_rc)
714 rc = kcp->kcp_rc;
715
716 out_cache:
717 kmem_cache_destroy(kcp->kcp_cache);
718 out_kcp:
719 splat_kmem_cache_test_kcp_free(kcp);
720 return rc;
721 }
722
723 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
724 static int
725 splat_kmem_test5(struct file *file, void *arg)
726 {
727 char *name = SPLAT_KMEM_TEST5_NAME;
728 int rc;
729
730 rc = splat_kmem_cache_test(file, arg, name, 128, 0, 0);
731 if (rc)
732 return rc;
733
734 rc = splat_kmem_cache_test(file, arg, name, 128, 0, KMC_KMEM);
735 if (rc)
736 return rc;
737
738 return splat_kmem_cache_test(file, arg, name, 128, 0, KMC_VMEM);
739 }
740
741 /* Validate large object cache behavior for dynamic/kmem/vmem caches */
742 static int
743 splat_kmem_test6(struct file *file, void *arg)
744 {
745 char *name = SPLAT_KMEM_TEST6_NAME;
746 int rc;
747
748 rc = splat_kmem_cache_test(file, arg, name, 128*1024, 0, 0);
749 if (rc)
750 return rc;
751
752 rc = splat_kmem_cache_test(file, arg, name, 128*1024, 0, KMC_KMEM);
753 if (rc)
754 return rc;
755
756 return splat_kmem_cache_test(file, arg, name, 128*1028, 0, KMC_VMEM);
757 }
758
759 /* Validate object alignment cache behavior for caches */
760 static int
761 splat_kmem_test7(struct file *file, void *arg)
762 {
763 char *name = SPLAT_KMEM_TEST7_NAME;
764 int i, rc;
765
766 for (i = 8; i <= PAGE_SIZE; i *= 2) {
767 rc = splat_kmem_cache_test(file, arg, name, 157, i, 0);
768 if (rc)
769 return rc;
770 }
771
772 return rc;
773 }
774
775 static int
776 splat_kmem_test8(struct file *file, void *arg)
777 {
778 kmem_cache_priv_t *kcp;
779 kmem_cache_data_t *kcd;
780 int i, j, rc = 0;
781
782 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
783 256, 0, 0, SPLAT_KMEM_OBJ_COUNT);
784 if (!kcp) {
785 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
786 "Unable to create '%s'\n", "kcp");
787 return -ENOMEM;
788 }
789
790 kcp->kcp_cache =
791 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
792 splat_kmem_cache_test_constructor,
793 splat_kmem_cache_test_destructor,
794 splat_kmem_cache_test_reclaim,
795 kcp, NULL, 0);
796 if (!kcp->kcp_cache) {
797 splat_kmem_cache_test_kcp_free(kcp);
798 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
799 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
800 return -ENOMEM;
801 }
802
803 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
804 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
805 spin_lock(&kcp->kcp_lock);
806 kcp->kcp_kcd[i] = kcd;
807 spin_unlock(&kcp->kcp_lock);
808 if (!kcd) {
809 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
810 "Unable to allocate from '%s'\n",
811 SPLAT_KMEM_CACHE_NAME);
812 }
813 }
814
815 /* Request the slab cache free any objects it can. For a few reasons
816 * this may not immediately result in more free memory even if objects
817 * are freed. First off, due to fragmentation we may not be able to
818 * reclaim any slabs. Secondly, even if we do we fully clear some
819 * slabs we will not want to immedately reclaim all of them because
820 * we may contend with cache allocs and thrash. What we want to see
821 * is the slab size decrease more gradually as it becomes clear they
822 * will not be needed. This should be acheivable in less than minute
823 * if it takes longer than this something has gone wrong.
824 */
825 for (i = 0; i < 60; i++) {
826 kmem_cache_reap_now(kcp->kcp_cache);
827 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
828 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
829 SPLAT_KMEM_CACHE_NAME, kcp->kcp_count,
830 (unsigned)kcp->kcp_cache->skc_slab_alloc,
831 (unsigned)kcp->kcp_cache->skc_slab_total,
832 (unsigned)kcp->kcp_cache->skc_obj_alloc,
833 (unsigned)kcp->kcp_cache->skc_obj_total);
834
835 for_each_online_cpu(j)
836 splat_print(file, "%u/%u ",
837 kcp->kcp_cache->skc_mag[j]->skm_avail,
838 kcp->kcp_cache->skc_mag[j]->skm_size);
839
840 splat_print(file, "%s\n", "");
841
842 if (kcp->kcp_cache->skc_obj_total == 0)
843 break;
844
845 set_current_state(TASK_INTERRUPTIBLE);
846 schedule_timeout(HZ);
847 }
848
849 if (kcp->kcp_cache->skc_obj_total == 0) {
850 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
851 "Successfully created %d objects "
852 "in cache %s and reclaimed them\n",
853 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
854 } else {
855 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
856 "Failed to reclaim %u/%d objects from cache %s\n",
857 (unsigned)kcp->kcp_cache->skc_obj_total,
858 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
859 rc = -ENOMEM;
860 }
861
862 /* Cleanup our mess (for failure case of time expiring) */
863 spin_lock(&kcp->kcp_lock);
864 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
865 if (kcp->kcp_kcd[i])
866 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
867 spin_unlock(&kcp->kcp_lock);
868
869 kmem_cache_destroy(kcp->kcp_cache);
870 splat_kmem_cache_test_kcp_free(kcp);
871
872 return rc;
873 }
874
875 static int
876 splat_kmem_test9(struct file *file, void *arg)
877 {
878 kmem_cache_priv_t *kcp;
879 kmem_cache_data_t *kcd;
880 int i, j, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;
881
882 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
883 256, 0, 0, count);
884 if (!kcp) {
885 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
886 "Unable to create '%s'\n", "kcp");
887 return -ENOMEM;
888 }
889
890 kcp->kcp_cache =
891 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
892 splat_kmem_cache_test_constructor,
893 splat_kmem_cache_test_destructor,
894 NULL, kcp, NULL, 0);
895 if (!kcp->kcp_cache) {
896 splat_kmem_cache_test_kcp_free(kcp);
897 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
898 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
899 return -ENOMEM;
900 }
901
902 for (i = 0; i < count; i++) {
903 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
904 spin_lock(&kcp->kcp_lock);
905 kcp->kcp_kcd[i] = kcd;
906 spin_unlock(&kcp->kcp_lock);
907 if (!kcd) {
908 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
909 "Unable to allocate from '%s'\n",
910 SPLAT_KMEM_CACHE_NAME);
911 }
912 }
913
914 spin_lock(&kcp->kcp_lock);
915 for (i = 0; i < count; i++)
916 if (kcp->kcp_kcd[i])
917 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
918 spin_unlock(&kcp->kcp_lock);
919
920 /* We have allocated a large number of objects thus creating a
921 * large number of slabs and then free'd them all. However since
922 * there should be little memory pressure at the moment those
923 * slabs have not been freed. What we want to see is the slab
924 * size decrease gradually as it becomes clear they will not be
925 * be needed. This should be acheivable in less than minute
926 * if it takes longer than this something has gone wrong.
927 */
928 for (i = 0; i < 60; i++) {
929 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
930 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
931 SPLAT_KMEM_CACHE_NAME, kcp->kcp_count,
932 (unsigned)kcp->kcp_cache->skc_slab_alloc,
933 (unsigned)kcp->kcp_cache->skc_slab_total,
934 (unsigned)kcp->kcp_cache->skc_obj_alloc,
935 (unsigned)kcp->kcp_cache->skc_obj_total);
936
937 for_each_online_cpu(j)
938 splat_print(file, "%u/%u ",
939 kcp->kcp_cache->skc_mag[j]->skm_avail,
940 kcp->kcp_cache->skc_mag[j]->skm_size);
941
942 splat_print(file, "%s\n", "");
943
944 if (kcp->kcp_cache->skc_obj_total == 0)
945 break;
946
947 set_current_state(TASK_INTERRUPTIBLE);
948 schedule_timeout(HZ);
949 }
950
951 if (kcp->kcp_cache->skc_obj_total == 0) {
952 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
953 "Successfully created %d objects "
954 "in cache %s and reclaimed them\n",
955 count, SPLAT_KMEM_CACHE_NAME);
956 } else {
957 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
958 "Failed to reclaim %u/%d objects from cache %s\n",
959 (unsigned)kcp->kcp_cache->skc_obj_total, count,
960 SPLAT_KMEM_CACHE_NAME);
961 rc = -ENOMEM;
962 }
963
964 kmem_cache_destroy(kcp->kcp_cache);
965 splat_kmem_cache_test_kcp_free(kcp);
966
967 return rc;
968 }
969
970 /*
971 * This test creates N threads with a shared kmem cache. They then all
972 * concurrently allocate and free from the cache to stress the locking and
973 * concurrent cache performance. If any one test takes longer than 5
974 * seconds to complete it is treated as a failure and may indicate a
975 * performance regression. On my test system no one test takes more
976 * than 1 second to complete so a 5x slowdown likely a problem.
977 */
978 static int
979 splat_kmem_test10(struct file *file, void *arg)
980 {
981 uint64_t size, alloc, rc = 0;
982
983 for (size = 16; size <= 1024*1024; size *= 2) {
984
985 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name",
986 "time (sec)\tslabs \tobjs \thash\n");
987 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "",
988 " \ttot/max/calc\ttot/max/calc\n");
989
990 for (alloc = 1; alloc <= 1024; alloc *= 2) {
991
992 /* Skip tests which exceed available memory. We
993 * leverage availrmem here for some extra testing */
994 if (size * alloc * SPLAT_KMEM_THREADS > availrmem / 2)
995 continue;
996
997 rc = splat_kmem_cache_thread_test(file, arg,
998 SPLAT_KMEM_TEST10_NAME, size, alloc, 5);
999 if (rc)
1000 break;
1001 }
1002 }
1003
1004 return rc;
1005 }
1006
1007 /*
1008 * This test creates N threads with a shared kmem cache which overcommits
1009 * memory by 4x. This makes it impossible for the slab to satify the
1010 * thread requirements without having its reclaim hook run which will
1011 * free objects back for use. This behavior is triggered by the linum VM
1012 * detecting a low memory condition on the node and invoking the shrinkers.
1013 * This should allow all the threads to complete while avoiding deadlock
1014 * and for the most part out of memory events. This is very tough on the
1015 * system so it is possible the test app may get oom'ed.
1016 */
1017 static int
1018 splat_kmem_test11(struct file *file, void *arg)
1019 {
1020 uint64_t size, alloc, rc;
1021
1022 size = 256*1024;
1023 alloc = ((4 * physmem * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS;
1024
1025 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "name",
1026 "time (sec)\tslabs \tobjs \thash\n");
1027 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "",
1028 " \ttot/max/calc\ttot/max/calc\n");
1029
1030 rc = splat_kmem_cache_thread_test(file, arg,
1031 SPLAT_KMEM_TEST11_NAME, size, alloc, 60);
1032
1033 return rc;
1034 }
1035
1036 /*
1037 * Check vmem_size() behavior by acquiring the alloc/free/total vmem
1038 * space, then allocate a known buffer size from vmem space. We can
1039 * then check that vmem_size() values were updated properly with in
1040 * a fairly small tolerence. The tolerance is important because we
1041 * are not the only vmem consumer on the system. Other unrelated
1042 * allocations might occur during the small test window. The vmem
1043 * allocation itself may also add in a little extra private space to
1044 * the buffer. Finally, verify total space always remains unchanged.
1045 */
1046 static int
1047 splat_kmem_test12(struct file *file, void *arg)
1048 {
1049 ssize_t alloc1, free1, total1;
1050 ssize_t alloc2, free2, total2;
1051 int size = 8*1024*1024;
1052 void *ptr;
1053
1054 alloc1 = vmem_size(NULL, VMEM_ALLOC);
1055 free1 = vmem_size(NULL, VMEM_FREE);
1056 total1 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
1057 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%d free=%d "
1058 "total=%d\n", (int)alloc1, (int)free1, (int)total1);
1059
1060 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Alloc %d bytes\n", size);
1061 ptr = vmem_alloc(size, KM_SLEEP);
1062 if (!ptr) {
1063 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1064 "Failed to alloc %d bytes\n", size);
1065 return -ENOMEM;
1066 }
1067
1068 alloc2 = vmem_size(NULL, VMEM_ALLOC);
1069 free2 = vmem_size(NULL, VMEM_FREE);
1070 total2 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
1071 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%d free=%d "
1072 "total=%d\n", (int)alloc2, (int)free2, (int)total2);
1073
1074 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Free %d bytes\n", size);
1075 vmem_free(ptr, size);
1076 if (alloc2 < (alloc1 + size - (size / 100)) ||
1077 alloc2 > (alloc1 + size + (size / 100))) {
1078 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1079 "Failed VMEM_ALLOC size: %d != %d+%d (+/- 1%%)\n",
1080 (int)alloc2, (int)alloc1, size);
1081 return -ERANGE;
1082 }
1083
1084 if (free2 < (free1 - size - (size / 100)) ||
1085 free2 > (free1 - size + (size / 100))) {
1086 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1087 "Failed VMEM_FREE size: %d != %d-%d (+/- 1%%)\n",
1088 (int)free2, (int)free1, size);
1089 return -ERANGE;
1090 }
1091
1092 if (total1 != total2) {
1093 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1094 "Failed VMEM_ALLOC | VMEM_FREE not constant: "
1095 "%d != %d\n", (int)total2, (int)total1);
1096 return -ERANGE;
1097 }
1098
1099 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1100 "VMEM_ALLOC within tolerance: ~%d%% (%d/%d)\n",
1101 (int)(((alloc1 + size) - alloc2) * 100 / size),
1102 (int)((alloc1 + size) - alloc2), size);
1103 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1104 "VMEM_FREE within tolerance: ~%d%% (%d/%d)\n",
1105 (int)(((free1 - size) - free2) * 100 / size),
1106 (int)((free1 - size) - free2), size);
1107
1108 return 0;
1109 }
1110
1111 splat_subsystem_t *
1112 splat_kmem_init(void)
1113 {
1114 splat_subsystem_t *sub;
1115
1116 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1117 if (sub == NULL)
1118 return NULL;
1119
1120 memset(sub, 0, sizeof(*sub));
1121 strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
1122 strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
1123 INIT_LIST_HEAD(&sub->subsystem_list);
1124 INIT_LIST_HEAD(&sub->test_list);
1125 spin_lock_init(&sub->test_lock);
1126 sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
1127
1128 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
1129 SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
1130 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
1131 SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
1132 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
1133 SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
1134 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
1135 SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
1136 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
1137 SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
1138 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
1139 SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
1140 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
1141 SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
1142 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
1143 SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
1144 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST9_NAME, SPLAT_KMEM_TEST9_DESC,
1145 SPLAT_KMEM_TEST9_ID, splat_kmem_test9);
1146 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST10_NAME, SPLAT_KMEM_TEST10_DESC,
1147 SPLAT_KMEM_TEST10_ID, splat_kmem_test10);
1148 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC,
1149 SPLAT_KMEM_TEST11_ID, splat_kmem_test11);
1150 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST12_NAME, SPLAT_KMEM_TEST12_DESC,
1151 SPLAT_KMEM_TEST12_ID, splat_kmem_test12);
1152
1153 return sub;
1154 }
1155
1156 void
1157 splat_kmem_fini(splat_subsystem_t *sub)
1158 {
1159 ASSERT(sub);
1160 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST12_ID);
1161 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST11_ID);
1162 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST10_ID);
1163 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST9_ID);
1164 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID);
1165 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
1166 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
1167 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
1168 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
1169 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
1170 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
1171 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
1172
1173 kfree(sub);
1174 }
1175
1176 int
1177 splat_kmem_id(void) {
1178 return SPLAT_SUBSYSTEM_KMEM;
1179 }