]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/splat/splat-kmem.c
Linux 2.6.31 kmem cache alignment fixes and cleanup.
[mirror_spl-debian.git] / module / splat / splat-kmem.c
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #include "splat-internal.h"
28
29 #define SPLAT_KMEM_NAME "kmem"
30 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
31
32 #define SPLAT_KMEM_TEST1_ID 0x0101
33 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
34 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
35
36 #define SPLAT_KMEM_TEST2_ID 0x0102
37 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
38 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
39
40 #define SPLAT_KMEM_TEST3_ID 0x0103
41 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
42 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
43
44 #define SPLAT_KMEM_TEST4_ID 0x0104
45 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
46 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
47
48 #define SPLAT_KMEM_TEST5_ID 0x0105
49 #define SPLAT_KMEM_TEST5_NAME "slab_small"
50 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
51
52 #define SPLAT_KMEM_TEST6_ID 0x0106
53 #define SPLAT_KMEM_TEST6_NAME "slab_large"
54 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
55
56 #define SPLAT_KMEM_TEST7_ID 0x0107
57 #define SPLAT_KMEM_TEST7_NAME "slab_align"
58 #define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
59
60 #define SPLAT_KMEM_TEST8_ID 0x0108
61 #define SPLAT_KMEM_TEST8_NAME "slab_reap"
62 #define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
63
64 #define SPLAT_KMEM_TEST9_ID 0x0109
65 #define SPLAT_KMEM_TEST9_NAME "slab_age"
66 #define SPLAT_KMEM_TEST9_DESC "Slab aging test"
67
68 #define SPLAT_KMEM_TEST10_ID 0x010a
69 #define SPLAT_KMEM_TEST10_NAME "slab_lock"
70 #define SPLAT_KMEM_TEST10_DESC "Slab locking test"
71
72 #define SPLAT_KMEM_TEST11_ID 0x010b
73 #define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
74 #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
75
76 #define SPLAT_KMEM_TEST12_ID 0x010c
77 #define SPLAT_KMEM_TEST12_NAME "vmem_size"
78 #define SPLAT_KMEM_TEST12_DESC "Memory zone test"
79
80 #define SPLAT_KMEM_ALLOC_COUNT 10
81 #define SPLAT_VMEM_ALLOC_COUNT 10
82
83
84 static int
85 splat_kmem_test1(struct file *file, void *arg)
86 {
87 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
88 int size = PAGE_SIZE;
89 int i, count, rc = 0;
90
91 /* We are intentionally going to push kmem_alloc to its max
92 * allocation size, so suppress the console warnings for now */
93 kmem_set_warning(0);
94
95 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
96 count = 0;
97
98 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
99 ptr[i] = kmem_alloc(size, KM_SLEEP);
100 if (ptr[i])
101 count++;
102 }
103
104 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
105 if (ptr[i])
106 kmem_free(ptr[i], size);
107
108 splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
109 "%d byte allocations, %d/%d successful\n",
110 size, count, SPLAT_KMEM_ALLOC_COUNT);
111 if (count != SPLAT_KMEM_ALLOC_COUNT)
112 rc = -ENOMEM;
113
114 size *= 2;
115 }
116
117 kmem_set_warning(1);
118
119 return rc;
120 }
121
122 static int
123 splat_kmem_test2(struct file *file, void *arg)
124 {
125 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
126 int size = PAGE_SIZE;
127 int i, j, count, rc = 0;
128
129 /* We are intentionally going to push kmem_alloc to its max
130 * allocation size, so suppress the console warnings for now */
131 kmem_set_warning(0);
132
133 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
134 count = 0;
135
136 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
137 ptr[i] = kmem_zalloc(size, KM_SLEEP);
138 if (ptr[i])
139 count++;
140 }
141
142 /* Ensure buffer has been zero filled */
143 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
144 for (j = 0; j < size; j++) {
145 if (((char *)ptr[i])[j] != '\0') {
146 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
147 "%d-byte allocation was "
148 "not zeroed\n", size);
149 rc = -EFAULT;
150 }
151 }
152 }
153
154 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
155 if (ptr[i])
156 kmem_free(ptr[i], size);
157
158 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
159 "%d byte allocations, %d/%d successful\n",
160 size, count, SPLAT_KMEM_ALLOC_COUNT);
161 if (count != SPLAT_KMEM_ALLOC_COUNT)
162 rc = -ENOMEM;
163
164 size *= 2;
165 }
166
167 kmem_set_warning(1);
168
169 return rc;
170 }
171
172 static int
173 splat_kmem_test3(struct file *file, void *arg)
174 {
175 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
176 int size = PAGE_SIZE;
177 int i, count, rc = 0;
178
179 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
180 count = 0;
181
182 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
183 ptr[i] = vmem_alloc(size, KM_SLEEP);
184 if (ptr[i])
185 count++;
186 }
187
188 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
189 if (ptr[i])
190 vmem_free(ptr[i], size);
191
192 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
193 "%d byte allocations, %d/%d successful\n",
194 size, count, SPLAT_VMEM_ALLOC_COUNT);
195 if (count != SPLAT_VMEM_ALLOC_COUNT)
196 rc = -ENOMEM;
197
198 size *= 2;
199 }
200
201 return rc;
202 }
203
204 static int
205 splat_kmem_test4(struct file *file, void *arg)
206 {
207 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
208 int size = PAGE_SIZE;
209 int i, j, count, rc = 0;
210
211 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
212 count = 0;
213
214 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
215 ptr[i] = vmem_zalloc(size, KM_SLEEP);
216 if (ptr[i])
217 count++;
218 }
219
220 /* Ensure buffer has been zero filled */
221 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
222 for (j = 0; j < size; j++) {
223 if (((char *)ptr[i])[j] != '\0') {
224 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
225 "%d-byte allocation was "
226 "not zeroed\n", size);
227 rc = -EFAULT;
228 }
229 }
230 }
231
232 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
233 if (ptr[i])
234 vmem_free(ptr[i], size);
235
236 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
237 "%d byte allocations, %d/%d successful\n",
238 size, count, SPLAT_VMEM_ALLOC_COUNT);
239 if (count != SPLAT_VMEM_ALLOC_COUNT)
240 rc = -ENOMEM;
241
242 size *= 2;
243 }
244
245 return rc;
246 }
247
248 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
249 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
250 #define SPLAT_KMEM_OBJ_COUNT 1024
251 #define SPLAT_KMEM_OBJ_RECLAIM 20 /* percent */
252 #define SPLAT_KMEM_THREADS 32
253
254 #define KCP_FLAG_READY 0x01
255
256 typedef struct kmem_cache_data {
257 unsigned long kcd_magic;
258 int kcd_flag;
259 char kcd_buf[0];
260 } kmem_cache_data_t;
261
262 typedef struct kmem_cache_thread {
263 kmem_cache_t *kct_cache;
264 spinlock_t kct_lock;
265 int kct_id;
266 int kct_kcd_count;
267 kmem_cache_data_t *kct_kcd[0];
268 } kmem_cache_thread_t;
269
270 typedef struct kmem_cache_priv {
271 unsigned long kcp_magic;
272 struct file *kcp_file;
273 kmem_cache_t *kcp_cache;
274 spinlock_t kcp_lock;
275 wait_queue_head_t kcp_ctl_waitq;
276 wait_queue_head_t kcp_thr_waitq;
277 int kcp_flags;
278 int kcp_kct_count;
279 kmem_cache_thread_t *kcp_kct[SPLAT_KMEM_THREADS];
280 int kcp_size;
281 int kcp_align;
282 int kcp_count;
283 int kcp_alloc;
284 int kcp_rc;
285 int kcp_kcd_count;
286 kmem_cache_data_t *kcp_kcd[0];
287 } kmem_cache_priv_t;
288
289 static kmem_cache_priv_t *
290 splat_kmem_cache_test_kcp_alloc(struct file *file, char *name,
291 int size, int align, int alloc, int count)
292 {
293 kmem_cache_priv_t *kcp;
294
295 kcp = vmem_zalloc(sizeof(kmem_cache_priv_t) +
296 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
297 if (!kcp)
298 return NULL;
299
300 kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC;
301 kcp->kcp_file = file;
302 kcp->kcp_cache = NULL;
303 spin_lock_init(&kcp->kcp_lock);
304 init_waitqueue_head(&kcp->kcp_ctl_waitq);
305 init_waitqueue_head(&kcp->kcp_thr_waitq);
306 kcp->kcp_flags = 0;
307 kcp->kcp_kct_count = -1;
308 kcp->kcp_size = size;
309 kcp->kcp_align = align;
310 kcp->kcp_count = 0;
311 kcp->kcp_alloc = alloc;
312 kcp->kcp_rc = 0;
313 kcp->kcp_kcd_count = count;
314
315 return kcp;
316 }
317
318 static void
319 splat_kmem_cache_test_kcp_free(kmem_cache_priv_t *kcp)
320 {
321 vmem_free(kcp, sizeof(kmem_cache_priv_t) +
322 kcp->kcp_kcd_count * sizeof(kmem_cache_data_t *));
323 }
324
325 static kmem_cache_thread_t *
326 splat_kmem_cache_test_kct_alloc(int id, int count)
327 {
328 kmem_cache_thread_t *kct;
329
330 ASSERTF(id < SPLAT_KMEM_THREADS, "id=%d\n", id);
331 kct = vmem_zalloc(sizeof(kmem_cache_thread_t) +
332 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
333 if (!kct)
334 return NULL;
335
336 spin_lock_init(&kct->kct_lock);
337 kct->kct_cache = NULL;
338 kct->kct_id = id;
339 kct->kct_kcd_count = count;
340
341 return kct;
342 }
343
344 static void
345 splat_kmem_cache_test_kct_free(kmem_cache_thread_t *kct)
346 {
347 vmem_free(kct, sizeof(kmem_cache_thread_t) +
348 kct->kct_kcd_count * sizeof(kmem_cache_data_t *));
349 }
350
351 static int
352 splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
353 {
354 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
355 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
356
357 if (kcd && kcp) {
358 kcd->kcd_magic = kcp->kcp_magic;
359 kcd->kcd_flag = 1;
360 memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
361 kcp->kcp_count++;
362 }
363
364 return 0;
365 }
366
367 static void
368 splat_kmem_cache_test_destructor(void *ptr, void *priv)
369 {
370 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
371 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
372
373 if (kcd && kcp) {
374 kcd->kcd_magic = 0;
375 kcd->kcd_flag = 0;
376 memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
377 kcp->kcp_count--;
378 }
379
380 return;
381 }
382
383 /*
384 * Generic reclaim function which assumes that all objects may
385 * be reclaimed at any time. We free a small percentage of the
386 * objects linked off the kcp or kct[] every time we are called.
387 */
388 static void
389 splat_kmem_cache_test_reclaim(void *priv)
390 {
391 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
392 kmem_cache_thread_t *kct;
393 int i, j, count;
394
395 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
396 count = kcp->kcp_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
397
398 /* Objects directly attached to the kcp */
399 spin_lock(&kcp->kcp_lock);
400 for (i = 0; i < kcp->kcp_kcd_count; i++) {
401 if (kcp->kcp_kcd[i]) {
402 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
403 kcp->kcp_kcd[i] = NULL;
404
405 if ((--count) == 0)
406 break;
407 }
408 }
409 spin_unlock(&kcp->kcp_lock);
410
411 /* No threads containing objects to consider */
412 if (kcp->kcp_kct_count == -1)
413 return;
414
415 /* Objects attached to a kct thread */
416 for (i = 0; i < kcp->kcp_kct_count; i++) {
417 spin_lock(&kcp->kcp_lock);
418 kct = kcp->kcp_kct[i];
419 spin_unlock(&kcp->kcp_lock);
420 if (!kct)
421 continue;
422
423 spin_lock(&kct->kct_lock);
424 count = kct->kct_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
425
426 for (j = 0; j < kct->kct_kcd_count; j++) {
427 if (kct->kct_kcd[j]) {
428 kmem_cache_free(kcp->kcp_cache,kct->kct_kcd[j]);
429 kct->kct_kcd[j] = NULL;
430
431 if ((--count) == 0)
432 break;
433 }
434 }
435 spin_unlock(&kct->kct_lock);
436 }
437
438 return;
439 }
440
441 static int
442 splat_kmem_cache_test_threads(kmem_cache_priv_t *kcp, int threads)
443 {
444 int rc;
445
446 spin_lock(&kcp->kcp_lock);
447 rc = (kcp->kcp_kct_count == threads);
448 spin_unlock(&kcp->kcp_lock);
449
450 return rc;
451 }
452
453 static int
454 splat_kmem_cache_test_flags(kmem_cache_priv_t *kcp, int flags)
455 {
456 int rc;
457
458 spin_lock(&kcp->kcp_lock);
459 rc = (kcp->kcp_flags & flags);
460 spin_unlock(&kcp->kcp_lock);
461
462 return rc;
463 }
464
465 static void
466 splat_kmem_cache_test_thread(void *arg)
467 {
468 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
469 kmem_cache_thread_t *kct;
470 int rc = 0, id, i;
471 void *obj;
472
473 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
474
475 /* Assign thread ids */
476 spin_lock(&kcp->kcp_lock);
477 if (kcp->kcp_kct_count == -1)
478 kcp->kcp_kct_count = 0;
479
480 id = kcp->kcp_kct_count;
481 kcp->kcp_kct_count++;
482 spin_unlock(&kcp->kcp_lock);
483
484 kct = splat_kmem_cache_test_kct_alloc(id, kcp->kcp_alloc);
485 if (!kct) {
486 rc = -ENOMEM;
487 goto out;
488 }
489
490 spin_lock(&kcp->kcp_lock);
491 kcp->kcp_kct[id] = kct;
492 spin_unlock(&kcp->kcp_lock);
493
494 /* Wait for all threads to have started and report they are ready */
495 if (kcp->kcp_kct_count == SPLAT_KMEM_THREADS)
496 wake_up(&kcp->kcp_ctl_waitq);
497
498 wait_event(kcp->kcp_thr_waitq,
499 splat_kmem_cache_test_flags(kcp, KCP_FLAG_READY));
500
501 /*
502 * Updates to kct->kct_kcd[] are performed under a spin_lock so
503 * they may safely run concurrent with the reclaim function. If
504 * we are not in a low memory situation we have one lock per-
505 * thread so they are not expected to be contended.
506 */
507 for (i = 0; i < kct->kct_kcd_count; i++) {
508 obj = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
509 spin_lock(&kct->kct_lock);
510 kct->kct_kcd[i] = obj;
511 spin_unlock(&kct->kct_lock);
512 }
513
514 for (i = 0; i < kct->kct_kcd_count; i++) {
515 spin_lock(&kct->kct_lock);
516 if (kct->kct_kcd[i]) {
517 kmem_cache_free(kcp->kcp_cache, kct->kct_kcd[i]);
518 kct->kct_kcd[i] = NULL;
519 }
520 spin_unlock(&kct->kct_lock);
521 }
522 out:
523 spin_lock(&kcp->kcp_lock);
524 if (kct) {
525 splat_kmem_cache_test_kct_free(kct);
526 kcp->kcp_kct[id] = kct = NULL;
527 }
528
529 if (!kcp->kcp_rc)
530 kcp->kcp_rc = rc;
531
532 if ((--kcp->kcp_kct_count) == 0)
533 wake_up(&kcp->kcp_ctl_waitq);
534
535 spin_unlock(&kcp->kcp_lock);
536
537 thread_exit();
538 }
539
540 static int
541 splat_kmem_cache_test(struct file *file, void *arg, char *name,
542 int size, int align, int flags)
543 {
544 kmem_cache_priv_t *kcp;
545 kmem_cache_data_t *kcd;
546 int rc = 0, max;
547
548 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0, 1);
549 if (!kcp) {
550 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
551 return -ENOMEM;
552 }
553
554 kcp->kcp_kcd[0] = NULL;
555 kcp->kcp_cache =
556 kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
557 kcp->kcp_size, kcp->kcp_align,
558 splat_kmem_cache_test_constructor,
559 splat_kmem_cache_test_destructor,
560 NULL, kcp, NULL, flags);
561 if (!kcp->kcp_cache) {
562 splat_vprint(file, name,
563 "Unable to create '%s'\n",
564 SPLAT_KMEM_CACHE_NAME);
565 rc = -ENOMEM;
566 goto out_free;
567 }
568
569 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
570 if (!kcd) {
571 splat_vprint(file, name,
572 "Unable to allocate from '%s'\n",
573 SPLAT_KMEM_CACHE_NAME);
574 rc = -EINVAL;
575 goto out_free;
576 }
577 spin_lock(&kcp->kcp_lock);
578 kcp->kcp_kcd[0] = kcd;
579 spin_unlock(&kcp->kcp_lock);
580
581 if (!kcp->kcp_kcd[0]->kcd_flag) {
582 splat_vprint(file, name,
583 "Failed to run contructor for '%s'\n",
584 SPLAT_KMEM_CACHE_NAME);
585 rc = -EINVAL;
586 goto out_free;
587 }
588
589 if (kcp->kcp_kcd[0]->kcd_magic != kcp->kcp_magic) {
590 splat_vprint(file, name,
591 "Failed to pass private data to constructor "
592 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
593 rc = -EINVAL;
594 goto out_free;
595 }
596
597 max = kcp->kcp_count;
598 spin_lock(&kcp->kcp_lock);
599 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
600 kcp->kcp_kcd[0] = NULL;
601 spin_unlock(&kcp->kcp_lock);
602
603 /* Destroy the entire cache which will force destructors to
604 * run and we can verify one was called for every object */
605 kmem_cache_destroy(kcp->kcp_cache);
606 if (kcp->kcp_count) {
607 splat_vprint(file, name,
608 "Failed to run destructor on all slab objects "
609 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
610 rc = -EINVAL;
611 }
612
613 splat_kmem_cache_test_kcp_free(kcp);
614 splat_vprint(file, name,
615 "Successfully ran ctors/dtors for %d elements in '%s'\n",
616 max, SPLAT_KMEM_CACHE_NAME);
617
618 return rc;
619
620 out_free:
621 if (kcp->kcp_kcd[0]) {
622 spin_lock(&kcp->kcp_lock);
623 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
624 kcp->kcp_kcd[0] = NULL;
625 spin_unlock(&kcp->kcp_lock);
626 }
627
628 if (kcp->kcp_cache)
629 kmem_cache_destroy(kcp->kcp_cache);
630
631 splat_kmem_cache_test_kcp_free(kcp);
632
633 return rc;
634 }
635
636 static int
637 splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
638 int size, int alloc, int max_time)
639 {
640 kmem_cache_priv_t *kcp;
641 kthread_t *thr;
642 struct timespec start, stop, delta;
643 char cache_name[32];
644 int i, rc = 0;
645
646 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc, 0);
647 if (!kcp) {
648 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
649 return -ENOMEM;
650 }
651
652 (void)snprintf(cache_name, 32, "%s-%d-%d",
653 SPLAT_KMEM_CACHE_NAME, size, alloc);
654 kcp->kcp_cache =
655 kmem_cache_create(cache_name, kcp->kcp_size, 0,
656 splat_kmem_cache_test_constructor,
657 splat_kmem_cache_test_destructor,
658 splat_kmem_cache_test_reclaim,
659 kcp, NULL, 0);
660 if (!kcp->kcp_cache) {
661 splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
662 rc = -ENOMEM;
663 goto out_kcp;
664 }
665
666 start = current_kernel_time();
667
668 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
669 thr = thread_create(NULL, 0,
670 splat_kmem_cache_test_thread,
671 kcp, 0, &p0, TS_RUN, minclsyspri);
672 if (thr == NULL) {
673 rc = -ESRCH;
674 goto out_cache;
675 }
676 }
677
678 /* Sleep until all threads have started, then set the ready
679 * flag and wake them all up for maximum concurrency. */
680 wait_event(kcp->kcp_ctl_waitq,
681 splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));
682
683 spin_lock(&kcp->kcp_lock);
684 kcp->kcp_flags |= KCP_FLAG_READY;
685 spin_unlock(&kcp->kcp_lock);
686 wake_up_all(&kcp->kcp_thr_waitq);
687
688 /* Sleep until all thread have finished */
689 wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));
690
691 stop = current_kernel_time();
692 delta = timespec_sub(stop, start);
693
694 splat_vprint(file, name,
695 "%-22s %2ld.%09ld\t"
696 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
697 kcp->kcp_cache->skc_name,
698 delta.tv_sec, delta.tv_nsec,
699 (unsigned long)kcp->kcp_cache->skc_slab_total,
700 (unsigned long)kcp->kcp_cache->skc_slab_max,
701 (unsigned long)(kcp->kcp_alloc *
702 SPLAT_KMEM_THREADS /
703 SPL_KMEM_CACHE_OBJ_PER_SLAB),
704 (unsigned long)kcp->kcp_cache->skc_obj_total,
705 (unsigned long)kcp->kcp_cache->skc_obj_max,
706 (unsigned long)(kcp->kcp_alloc *
707 SPLAT_KMEM_THREADS));
708
709 if (delta.tv_sec >= max_time)
710 rc = -ETIME;
711
712 if (!rc && kcp->kcp_rc)
713 rc = kcp->kcp_rc;
714
715 out_cache:
716 kmem_cache_destroy(kcp->kcp_cache);
717 out_kcp:
718 splat_kmem_cache_test_kcp_free(kcp);
719 return rc;
720 }
721
722 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
723 static int
724 splat_kmem_test5(struct file *file, void *arg)
725 {
726 char *name = SPLAT_KMEM_TEST5_NAME;
727 int rc;
728
729 rc = splat_kmem_cache_test(file, arg, name, 128, 0, 0);
730 if (rc)
731 return rc;
732
733 rc = splat_kmem_cache_test(file, arg, name, 128, 0, KMC_KMEM);
734 if (rc)
735 return rc;
736
737 return splat_kmem_cache_test(file, arg, name, 128, 0, KMC_VMEM);
738 }
739
740 /* Validate large object cache behavior for dynamic/kmem/vmem caches */
741 static int
742 splat_kmem_test6(struct file *file, void *arg)
743 {
744 char *name = SPLAT_KMEM_TEST6_NAME;
745 int rc;
746
747 rc = splat_kmem_cache_test(file, arg, name, 128*1024, 0, 0);
748 if (rc)
749 return rc;
750
751 rc = splat_kmem_cache_test(file, arg, name, 128*1024, 0, KMC_KMEM);
752 if (rc)
753 return rc;
754
755 return splat_kmem_cache_test(file, arg, name, 128*1028, 0, KMC_VMEM);
756 }
757
758 /* Validate object alignment cache behavior for caches */
759 static int
760 splat_kmem_test7(struct file *file, void *arg)
761 {
762 char *name = SPLAT_KMEM_TEST7_NAME;
763 int i, rc;
764
765 for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) {
766 rc = splat_kmem_cache_test(file, arg, name, 157, i, 0);
767 if (rc)
768 return rc;
769 }
770
771 return rc;
772 }
773
774 static int
775 splat_kmem_test8(struct file *file, void *arg)
776 {
777 kmem_cache_priv_t *kcp;
778 kmem_cache_data_t *kcd;
779 int i, j, rc = 0;
780
781 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
782 256, 0, 0, SPLAT_KMEM_OBJ_COUNT);
783 if (!kcp) {
784 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
785 "Unable to create '%s'\n", "kcp");
786 return -ENOMEM;
787 }
788
789 kcp->kcp_cache =
790 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
791 splat_kmem_cache_test_constructor,
792 splat_kmem_cache_test_destructor,
793 splat_kmem_cache_test_reclaim,
794 kcp, NULL, 0);
795 if (!kcp->kcp_cache) {
796 splat_kmem_cache_test_kcp_free(kcp);
797 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
798 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
799 return -ENOMEM;
800 }
801
802 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
803 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
804 spin_lock(&kcp->kcp_lock);
805 kcp->kcp_kcd[i] = kcd;
806 spin_unlock(&kcp->kcp_lock);
807 if (!kcd) {
808 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
809 "Unable to allocate from '%s'\n",
810 SPLAT_KMEM_CACHE_NAME);
811 }
812 }
813
814 /* Request the slab cache free any objects it can. For a few reasons
815 * this may not immediately result in more free memory even if objects
816 * are freed. First off, due to fragmentation we may not be able to
817 * reclaim any slabs. Secondly, even if we do we fully clear some
818 * slabs we will not want to immedately reclaim all of them because
819 * we may contend with cache allocs and thrash. What we want to see
820 * is the slab size decrease more gradually as it becomes clear they
821 * will not be needed. This should be acheivable in less than minute
822 * if it takes longer than this something has gone wrong.
823 */
824 for (i = 0; i < 60; i++) {
825 kmem_cache_reap_now(kcp->kcp_cache);
826 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
827 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
828 SPLAT_KMEM_CACHE_NAME, kcp->kcp_count,
829 (unsigned)kcp->kcp_cache->skc_slab_alloc,
830 (unsigned)kcp->kcp_cache->skc_slab_total,
831 (unsigned)kcp->kcp_cache->skc_obj_alloc,
832 (unsigned)kcp->kcp_cache->skc_obj_total);
833
834 for_each_online_cpu(j)
835 splat_print(file, "%u/%u ",
836 kcp->kcp_cache->skc_mag[j]->skm_avail,
837 kcp->kcp_cache->skc_mag[j]->skm_size);
838
839 splat_print(file, "%s\n", "");
840
841 if (kcp->kcp_cache->skc_obj_total == 0)
842 break;
843
844 set_current_state(TASK_INTERRUPTIBLE);
845 schedule_timeout(HZ);
846 }
847
848 if (kcp->kcp_cache->skc_obj_total == 0) {
849 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
850 "Successfully created %d objects "
851 "in cache %s and reclaimed them\n",
852 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
853 } else {
854 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
855 "Failed to reclaim %u/%d objects from cache %s\n",
856 (unsigned)kcp->kcp_cache->skc_obj_total,
857 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
858 rc = -ENOMEM;
859 }
860
861 /* Cleanup our mess (for failure case of time expiring) */
862 spin_lock(&kcp->kcp_lock);
863 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
864 if (kcp->kcp_kcd[i])
865 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
866 spin_unlock(&kcp->kcp_lock);
867
868 kmem_cache_destroy(kcp->kcp_cache);
869 splat_kmem_cache_test_kcp_free(kcp);
870
871 return rc;
872 }
873
874 static int
875 splat_kmem_test9(struct file *file, void *arg)
876 {
877 kmem_cache_priv_t *kcp;
878 kmem_cache_data_t *kcd;
879 int i, j, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;
880
881 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
882 256, 0, 0, count);
883 if (!kcp) {
884 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
885 "Unable to create '%s'\n", "kcp");
886 return -ENOMEM;
887 }
888
889 kcp->kcp_cache =
890 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
891 splat_kmem_cache_test_constructor,
892 splat_kmem_cache_test_destructor,
893 NULL, kcp, NULL, 0);
894 if (!kcp->kcp_cache) {
895 splat_kmem_cache_test_kcp_free(kcp);
896 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
897 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
898 return -ENOMEM;
899 }
900
901 for (i = 0; i < count; i++) {
902 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
903 spin_lock(&kcp->kcp_lock);
904 kcp->kcp_kcd[i] = kcd;
905 spin_unlock(&kcp->kcp_lock);
906 if (!kcd) {
907 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
908 "Unable to allocate from '%s'\n",
909 SPLAT_KMEM_CACHE_NAME);
910 }
911 }
912
913 spin_lock(&kcp->kcp_lock);
914 for (i = 0; i < count; i++)
915 if (kcp->kcp_kcd[i])
916 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
917 spin_unlock(&kcp->kcp_lock);
918
919 /* We have allocated a large number of objects thus creating a
920 * large number of slabs and then free'd them all. However since
921 * there should be little memory pressure at the moment those
922 * slabs have not been freed. What we want to see is the slab
923 * size decrease gradually as it becomes clear they will not be
924 * be needed. This should be acheivable in less than minute
925 * if it takes longer than this something has gone wrong.
926 */
927 for (i = 0; i < 60; i++) {
928 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
929 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
930 SPLAT_KMEM_CACHE_NAME, kcp->kcp_count,
931 (unsigned)kcp->kcp_cache->skc_slab_alloc,
932 (unsigned)kcp->kcp_cache->skc_slab_total,
933 (unsigned)kcp->kcp_cache->skc_obj_alloc,
934 (unsigned)kcp->kcp_cache->skc_obj_total);
935
936 for_each_online_cpu(j)
937 splat_print(file, "%u/%u ",
938 kcp->kcp_cache->skc_mag[j]->skm_avail,
939 kcp->kcp_cache->skc_mag[j]->skm_size);
940
941 splat_print(file, "%s\n", "");
942
943 if (kcp->kcp_cache->skc_obj_total == 0)
944 break;
945
946 set_current_state(TASK_INTERRUPTIBLE);
947 schedule_timeout(HZ);
948 }
949
950 if (kcp->kcp_cache->skc_obj_total == 0) {
951 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
952 "Successfully created %d objects "
953 "in cache %s and reclaimed them\n",
954 count, SPLAT_KMEM_CACHE_NAME);
955 } else {
956 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
957 "Failed to reclaim %u/%d objects from cache %s\n",
958 (unsigned)kcp->kcp_cache->skc_obj_total, count,
959 SPLAT_KMEM_CACHE_NAME);
960 rc = -ENOMEM;
961 }
962
963 kmem_cache_destroy(kcp->kcp_cache);
964 splat_kmem_cache_test_kcp_free(kcp);
965
966 return rc;
967 }
968
969 /*
970 * This test creates N threads with a shared kmem cache. They then all
971 * concurrently allocate and free from the cache to stress the locking and
972 * concurrent cache performance. If any one test takes longer than 5
973 * seconds to complete it is treated as a failure and may indicate a
974 * performance regression. On my test system no one test takes more
975 * than 1 second to complete so a 5x slowdown likely a problem.
976 */
977 static int
978 splat_kmem_test10(struct file *file, void *arg)
979 {
980 uint64_t size, alloc, rc = 0;
981
982 for (size = 16; size <= 1024*1024; size *= 2) {
983
984 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name",
985 "time (sec)\tslabs \tobjs \thash\n");
986 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "",
987 " \ttot/max/calc\ttot/max/calc\n");
988
989 for (alloc = 1; alloc <= 1024; alloc *= 2) {
990
991 /* Skip tests which exceed available memory. We
992 * leverage availrmem here for some extra testing */
993 if (size * alloc * SPLAT_KMEM_THREADS > availrmem / 2)
994 continue;
995
996 rc = splat_kmem_cache_thread_test(file, arg,
997 SPLAT_KMEM_TEST10_NAME, size, alloc, 5);
998 if (rc)
999 break;
1000 }
1001 }
1002
1003 return rc;
1004 }
1005
1006 /*
1007 * This test creates N threads with a shared kmem cache which overcommits
1008 * memory by 4x. This makes it impossible for the slab to satify the
1009 * thread requirements without having its reclaim hook run which will
1010 * free objects back for use. This behavior is triggered by the linum VM
1011 * detecting a low memory condition on the node and invoking the shrinkers.
1012 * This should allow all the threads to complete while avoiding deadlock
1013 * and for the most part out of memory events. This is very tough on the
1014 * system so it is possible the test app may get oom'ed.
1015 */
1016 static int
1017 splat_kmem_test11(struct file *file, void *arg)
1018 {
1019 uint64_t size, alloc, rc;
1020
1021 size = 256*1024;
1022 alloc = ((4 * physmem * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS;
1023
1024 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "name",
1025 "time (sec)\tslabs \tobjs \thash\n");
1026 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "",
1027 " \ttot/max/calc\ttot/max/calc\n");
1028
1029 rc = splat_kmem_cache_thread_test(file, arg,
1030 SPLAT_KMEM_TEST11_NAME, size, alloc, 60);
1031
1032 return rc;
1033 }
1034
1035 /*
1036 * Check vmem_size() behavior by acquiring the alloc/free/total vmem
1037 * space, then allocate a known buffer size from vmem space. We can
1038 * then check that vmem_size() values were updated properly with in
1039 * a fairly small tolerence. The tolerance is important because we
1040 * are not the only vmem consumer on the system. Other unrelated
1041 * allocations might occur during the small test window. The vmem
1042 * allocation itself may also add in a little extra private space to
1043 * the buffer. Finally, verify total space always remains unchanged.
1044 */
1045 static int
1046 splat_kmem_test12(struct file *file, void *arg)
1047 {
1048 size_t alloc1, free1, total1;
1049 size_t alloc2, free2, total2;
1050 int size = 8*1024*1024;
1051 void *ptr;
1052
1053 alloc1 = vmem_size(NULL, VMEM_ALLOC);
1054 free1 = vmem_size(NULL, VMEM_FREE);
1055 total1 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
1056 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1057 "free=%lu total=%lu\n", (unsigned long)alloc1,
1058 (unsigned long)free1, (unsigned long)total1);
1059
1060 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Alloc %d bytes\n", size);
1061 ptr = vmem_alloc(size, KM_SLEEP);
1062 if (!ptr) {
1063 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1064 "Failed to alloc %d bytes\n", size);
1065 return -ENOMEM;
1066 }
1067
1068 alloc2 = vmem_size(NULL, VMEM_ALLOC);
1069 free2 = vmem_size(NULL, VMEM_FREE);
1070 total2 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
1071 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1072 "free=%lu total=%lu\n", (unsigned long)alloc2,
1073 (unsigned long)free2, (unsigned long)total2);
1074
1075 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Free %d bytes\n", size);
1076 vmem_free(ptr, size);
1077 if (alloc2 < (alloc1 + size - (size / 100)) ||
1078 alloc2 > (alloc1 + size + (size / 100))) {
1079 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1080 "VMEM_ALLOC size: %lu != %lu+%d (+/- 1%%)\n",
1081 (unsigned long)alloc2,(unsigned long)alloc1,size);
1082 return -ERANGE;
1083 }
1084
1085 if (free2 < (free1 - size - (size / 100)) ||
1086 free2 > (free1 - size + (size / 100))) {
1087 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1088 "VMEM_FREE size: %lu != %lu-%d (+/- 1%%)\n",
1089 (unsigned long)free2, (unsigned long)free1, size);
1090 return -ERANGE;
1091 }
1092
1093 if (total1 != total2) {
1094 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1095 "VMEM_ALLOC | VMEM_FREE not constant: "
1096 "%lu != %lu\n", (unsigned long)total2,
1097 (unsigned long)total1);
1098 return -ERANGE;
1099 }
1100
1101 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1102 "VMEM_ALLOC within tolerance: ~%ld%% (%ld/%d)\n",
1103 (long)abs(alloc1 + (long)size - alloc2) * 100 / (long)size,
1104 (long)abs(alloc1 + (long)size - alloc2), size);
1105 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1106 "VMEM_FREE within tolerance: ~%ld%% (%ld/%d)\n",
1107 (long)abs((free1 - (long)size) - free2) * 100 / (long)size,
1108 (long)abs((free1 - (long)size) - free2), size);
1109
1110 return 0;
1111 }
1112
1113 splat_subsystem_t *
1114 splat_kmem_init(void)
1115 {
1116 splat_subsystem_t *sub;
1117
1118 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1119 if (sub == NULL)
1120 return NULL;
1121
1122 memset(sub, 0, sizeof(*sub));
1123 strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
1124 strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
1125 INIT_LIST_HEAD(&sub->subsystem_list);
1126 INIT_LIST_HEAD(&sub->test_list);
1127 spin_lock_init(&sub->test_lock);
1128 sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
1129
1130 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
1131 SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
1132 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
1133 SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
1134 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
1135 SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
1136 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
1137 SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
1138 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
1139 SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
1140 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
1141 SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
1142 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
1143 SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
1144 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
1145 SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
1146 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST9_NAME, SPLAT_KMEM_TEST9_DESC,
1147 SPLAT_KMEM_TEST9_ID, splat_kmem_test9);
1148 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST10_NAME, SPLAT_KMEM_TEST10_DESC,
1149 SPLAT_KMEM_TEST10_ID, splat_kmem_test10);
1150 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC,
1151 SPLAT_KMEM_TEST11_ID, splat_kmem_test11);
1152 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST12_NAME, SPLAT_KMEM_TEST12_DESC,
1153 SPLAT_KMEM_TEST12_ID, splat_kmem_test12);
1154
1155 return sub;
1156 }
1157
1158 void
1159 splat_kmem_fini(splat_subsystem_t *sub)
1160 {
1161 ASSERT(sub);
1162 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST12_ID);
1163 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST11_ID);
1164 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST10_ID);
1165 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST9_ID);
1166 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID);
1167 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
1168 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
1169 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
1170 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
1171 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
1172 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
1173 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
1174
1175 kfree(sub);
1176 }
1177
1178 int
1179 splat_kmem_id(void) {
1180 return SPLAT_SUBSYSTEM_KMEM;
1181 }