]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/splat/splat-kmem.c
Remove kmem_set_warning() interface replace with __GFP_NOWARN flag.
[mirror_spl-debian.git] / module / splat / splat-kmem.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Kmem Tests.
25 \*****************************************************************************/
26
27 #include "splat-internal.h"
28
29 #define SPLAT_KMEM_NAME "kmem"
30 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
31
32 #define SPLAT_KMEM_TEST1_ID 0x0101
33 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
34 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
35
36 #define SPLAT_KMEM_TEST2_ID 0x0102
37 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
38 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
39
40 #define SPLAT_KMEM_TEST3_ID 0x0103
41 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
42 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
43
44 #define SPLAT_KMEM_TEST4_ID 0x0104
45 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
46 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
47
48 #define SPLAT_KMEM_TEST5_ID 0x0105
49 #define SPLAT_KMEM_TEST5_NAME "slab_small"
50 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
51
52 #define SPLAT_KMEM_TEST6_ID 0x0106
53 #define SPLAT_KMEM_TEST6_NAME "slab_large"
54 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
55
56 #define SPLAT_KMEM_TEST7_ID 0x0107
57 #define SPLAT_KMEM_TEST7_NAME "slab_align"
58 #define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
59
60 #define SPLAT_KMEM_TEST8_ID 0x0108
61 #define SPLAT_KMEM_TEST8_NAME "slab_reap"
62 #define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
63
64 #define SPLAT_KMEM_TEST9_ID 0x0109
65 #define SPLAT_KMEM_TEST9_NAME "slab_age"
66 #define SPLAT_KMEM_TEST9_DESC "Slab aging test"
67
68 #define SPLAT_KMEM_TEST10_ID 0x010a
69 #define SPLAT_KMEM_TEST10_NAME "slab_lock"
70 #define SPLAT_KMEM_TEST10_DESC "Slab locking test"
71
72 #ifdef _LP64
73 #define SPLAT_KMEM_TEST11_ID 0x010b
74 #define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
75 #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
76 #endif /* _LP64 */
77
78 #define SPLAT_KMEM_TEST12_ID 0x010c
79 #define SPLAT_KMEM_TEST12_NAME "vmem_size"
80 #define SPLAT_KMEM_TEST12_DESC "Memory zone test"
81
82 #define SPLAT_KMEM_ALLOC_COUNT 10
83 #define SPLAT_VMEM_ALLOC_COUNT 10
84
85
86 static int
87 splat_kmem_test1(struct file *file, void *arg)
88 {
89 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
90 int size = PAGE_SIZE;
91 int i, count, rc = 0;
92
93 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
94 count = 0;
95
96 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
97 ptr[i] = kmem_alloc(size, KM_SLEEP | __GFP_NOWARN);
98 if (ptr[i])
99 count++;
100 }
101
102 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
103 if (ptr[i])
104 kmem_free(ptr[i], size);
105
106 splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
107 "%d byte allocations, %d/%d successful\n",
108 size, count, SPLAT_KMEM_ALLOC_COUNT);
109 if (count != SPLAT_KMEM_ALLOC_COUNT)
110 rc = -ENOMEM;
111
112 size *= 2;
113 }
114
115 return rc;
116 }
117
118 static int
119 splat_kmem_test2(struct file *file, void *arg)
120 {
121 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
122 int size = PAGE_SIZE;
123 int i, j, count, rc = 0;
124
125 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
126 count = 0;
127
128 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
129 ptr[i] = kmem_zalloc(size, KM_SLEEP | __GFP_NOWARN);
130 if (ptr[i])
131 count++;
132 }
133
134 /* Ensure buffer has been zero filled */
135 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
136 for (j = 0; j < size; j++) {
137 if (((char *)ptr[i])[j] != '\0') {
138 splat_vprint(file,SPLAT_KMEM_TEST2_NAME,
139 "%d-byte allocation was "
140 "not zeroed\n", size);
141 rc = -EFAULT;
142 }
143 }
144 }
145
146 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
147 if (ptr[i])
148 kmem_free(ptr[i], size);
149
150 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
151 "%d byte allocations, %d/%d successful\n",
152 size, count, SPLAT_KMEM_ALLOC_COUNT);
153 if (count != SPLAT_KMEM_ALLOC_COUNT)
154 rc = -ENOMEM;
155
156 size *= 2;
157 }
158
159 return rc;
160 }
161
162 static int
163 splat_kmem_test3(struct file *file, void *arg)
164 {
165 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
166 int size = PAGE_SIZE;
167 int i, count, rc = 0;
168
169 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
170 count = 0;
171
172 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
173 ptr[i] = vmem_alloc(size, KM_SLEEP);
174 if (ptr[i])
175 count++;
176 }
177
178 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
179 if (ptr[i])
180 vmem_free(ptr[i], size);
181
182 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
183 "%d byte allocations, %d/%d successful\n",
184 size, count, SPLAT_VMEM_ALLOC_COUNT);
185 if (count != SPLAT_VMEM_ALLOC_COUNT)
186 rc = -ENOMEM;
187
188 size *= 2;
189 }
190
191 return rc;
192 }
193
194 static int
195 splat_kmem_test4(struct file *file, void *arg)
196 {
197 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
198 int size = PAGE_SIZE;
199 int i, j, count, rc = 0;
200
201 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
202 count = 0;
203
204 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
205 ptr[i] = vmem_zalloc(size, KM_SLEEP);
206 if (ptr[i])
207 count++;
208 }
209
210 /* Ensure buffer has been zero filled */
211 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
212 for (j = 0; j < size; j++) {
213 if (((char *)ptr[i])[j] != '\0') {
214 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
215 "%d-byte allocation was "
216 "not zeroed\n", size);
217 rc = -EFAULT;
218 }
219 }
220 }
221
222 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
223 if (ptr[i])
224 vmem_free(ptr[i], size);
225
226 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
227 "%d byte allocations, %d/%d successful\n",
228 size, count, SPLAT_VMEM_ALLOC_COUNT);
229 if (count != SPLAT_VMEM_ALLOC_COUNT)
230 rc = -ENOMEM;
231
232 size *= 2;
233 }
234
235 return rc;
236 }
237
238 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
239 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
240 #define SPLAT_KMEM_OBJ_COUNT 1024
241 #define SPLAT_KMEM_OBJ_RECLAIM 20 /* percent */
242 #define SPLAT_KMEM_THREADS 32
243
244 #define KCP_FLAG_READY 0x01
245
246 typedef struct kmem_cache_data {
247 unsigned long kcd_magic;
248 int kcd_flag;
249 char kcd_buf[0];
250 } kmem_cache_data_t;
251
252 typedef struct kmem_cache_thread {
253 kmem_cache_t *kct_cache;
254 spinlock_t kct_lock;
255 int kct_id;
256 int kct_kcd_count;
257 kmem_cache_data_t *kct_kcd[0];
258 } kmem_cache_thread_t;
259
260 typedef struct kmem_cache_priv {
261 unsigned long kcp_magic;
262 struct file *kcp_file;
263 kmem_cache_t *kcp_cache;
264 spinlock_t kcp_lock;
265 wait_queue_head_t kcp_ctl_waitq;
266 wait_queue_head_t kcp_thr_waitq;
267 int kcp_flags;
268 int kcp_kct_count;
269 kmem_cache_thread_t *kcp_kct[SPLAT_KMEM_THREADS];
270 int kcp_size;
271 int kcp_align;
272 int kcp_count;
273 int kcp_alloc;
274 int kcp_rc;
275 int kcp_kcd_count;
276 kmem_cache_data_t *kcp_kcd[0];
277 } kmem_cache_priv_t;
278
279 static kmem_cache_priv_t *
280 splat_kmem_cache_test_kcp_alloc(struct file *file, char *name,
281 int size, int align, int alloc, int count)
282 {
283 kmem_cache_priv_t *kcp;
284
285 kcp = vmem_zalloc(sizeof(kmem_cache_priv_t) +
286 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
287 if (!kcp)
288 return NULL;
289
290 kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC;
291 kcp->kcp_file = file;
292 kcp->kcp_cache = NULL;
293 spin_lock_init(&kcp->kcp_lock);
294 init_waitqueue_head(&kcp->kcp_ctl_waitq);
295 init_waitqueue_head(&kcp->kcp_thr_waitq);
296 kcp->kcp_flags = 0;
297 kcp->kcp_kct_count = -1;
298 kcp->kcp_size = size;
299 kcp->kcp_align = align;
300 kcp->kcp_count = 0;
301 kcp->kcp_alloc = alloc;
302 kcp->kcp_rc = 0;
303 kcp->kcp_kcd_count = count;
304
305 return kcp;
306 }
307
308 static void
309 splat_kmem_cache_test_kcp_free(kmem_cache_priv_t *kcp)
310 {
311 vmem_free(kcp, sizeof(kmem_cache_priv_t) +
312 kcp->kcp_kcd_count * sizeof(kmem_cache_data_t *));
313 }
314
315 static kmem_cache_thread_t *
316 splat_kmem_cache_test_kct_alloc(int id, int count)
317 {
318 kmem_cache_thread_t *kct;
319
320 ASSERTF(id < SPLAT_KMEM_THREADS, "id=%d\n", id);
321 kct = vmem_zalloc(sizeof(kmem_cache_thread_t) +
322 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
323 if (!kct)
324 return NULL;
325
326 spin_lock_init(&kct->kct_lock);
327 kct->kct_cache = NULL;
328 kct->kct_id = id;
329 kct->kct_kcd_count = count;
330
331 return kct;
332 }
333
334 static void
335 splat_kmem_cache_test_kct_free(kmem_cache_thread_t *kct)
336 {
337 vmem_free(kct, sizeof(kmem_cache_thread_t) +
338 kct->kct_kcd_count * sizeof(kmem_cache_data_t *));
339 }
340
341 static int
342 splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
343 {
344 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
345 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
346
347 if (kcd && kcp) {
348 kcd->kcd_magic = kcp->kcp_magic;
349 kcd->kcd_flag = 1;
350 memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
351 kcp->kcp_count++;
352 }
353
354 return 0;
355 }
356
357 static void
358 splat_kmem_cache_test_destructor(void *ptr, void *priv)
359 {
360 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
361 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
362
363 if (kcd && kcp) {
364 kcd->kcd_magic = 0;
365 kcd->kcd_flag = 0;
366 memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
367 kcp->kcp_count--;
368 }
369
370 return;
371 }
372
373 /*
374 * Generic reclaim function which assumes that all objects may
375 * be reclaimed at any time. We free a small percentage of the
376 * objects linked off the kcp or kct[] every time we are called.
377 */
378 static void
379 splat_kmem_cache_test_reclaim(void *priv)
380 {
381 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
382 kmem_cache_thread_t *kct;
383 int i, j, count;
384
385 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
386 count = kcp->kcp_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
387
388 /* Objects directly attached to the kcp */
389 spin_lock(&kcp->kcp_lock);
390 for (i = 0; i < kcp->kcp_kcd_count; i++) {
391 if (kcp->kcp_kcd[i]) {
392 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
393 kcp->kcp_kcd[i] = NULL;
394
395 if ((--count) == 0)
396 break;
397 }
398 }
399 spin_unlock(&kcp->kcp_lock);
400
401 /* No threads containing objects to consider */
402 if (kcp->kcp_kct_count == -1)
403 return;
404
405 /* Objects attached to a kct thread */
406 for (i = 0; i < kcp->kcp_kct_count; i++) {
407 spin_lock(&kcp->kcp_lock);
408 kct = kcp->kcp_kct[i];
409 if (!kct) {
410 spin_unlock(&kcp->kcp_lock);
411 continue;
412 }
413
414 spin_lock(&kct->kct_lock);
415 count = kct->kct_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
416
417 for (j = 0; j < kct->kct_kcd_count; j++) {
418 if (kct->kct_kcd[j]) {
419 kmem_cache_free(kcp->kcp_cache,kct->kct_kcd[j]);
420 kct->kct_kcd[j] = NULL;
421
422 if ((--count) == 0)
423 break;
424 }
425 }
426 spin_unlock(&kct->kct_lock);
427 spin_unlock(&kcp->kcp_lock);
428 }
429
430 return;
431 }
432
433 static int
434 splat_kmem_cache_test_threads(kmem_cache_priv_t *kcp, int threads)
435 {
436 int rc;
437
438 spin_lock(&kcp->kcp_lock);
439 rc = (kcp->kcp_kct_count == threads);
440 spin_unlock(&kcp->kcp_lock);
441
442 return rc;
443 }
444
445 static int
446 splat_kmem_cache_test_flags(kmem_cache_priv_t *kcp, int flags)
447 {
448 int rc;
449
450 spin_lock(&kcp->kcp_lock);
451 rc = (kcp->kcp_flags & flags);
452 spin_unlock(&kcp->kcp_lock);
453
454 return rc;
455 }
456
457 static void
458 splat_kmem_cache_test_thread(void *arg)
459 {
460 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
461 kmem_cache_thread_t *kct;
462 int rc = 0, id, i;
463 void *obj;
464
465 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
466
467 /* Assign thread ids */
468 spin_lock(&kcp->kcp_lock);
469 if (kcp->kcp_kct_count == -1)
470 kcp->kcp_kct_count = 0;
471
472 id = kcp->kcp_kct_count;
473 kcp->kcp_kct_count++;
474 spin_unlock(&kcp->kcp_lock);
475
476 kct = splat_kmem_cache_test_kct_alloc(id, kcp->kcp_alloc);
477 if (!kct) {
478 rc = -ENOMEM;
479 goto out;
480 }
481
482 spin_lock(&kcp->kcp_lock);
483 kcp->kcp_kct[id] = kct;
484 spin_unlock(&kcp->kcp_lock);
485
486 /* Wait for all threads to have started and report they are ready */
487 if (kcp->kcp_kct_count == SPLAT_KMEM_THREADS)
488 wake_up(&kcp->kcp_ctl_waitq);
489
490 wait_event(kcp->kcp_thr_waitq,
491 splat_kmem_cache_test_flags(kcp, KCP_FLAG_READY));
492
493 /*
494 * Updates to kct->kct_kcd[] are performed under a spin_lock so
495 * they may safely run concurrent with the reclaim function. If
496 * we are not in a low memory situation we have one lock per-
497 * thread so they are not expected to be contended.
498 */
499 for (i = 0; i < kct->kct_kcd_count; i++) {
500 obj = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
501 spin_lock(&kct->kct_lock);
502 kct->kct_kcd[i] = obj;
503 spin_unlock(&kct->kct_lock);
504 }
505
506 for (i = 0; i < kct->kct_kcd_count; i++) {
507 spin_lock(&kct->kct_lock);
508 if (kct->kct_kcd[i]) {
509 kmem_cache_free(kcp->kcp_cache, kct->kct_kcd[i]);
510 kct->kct_kcd[i] = NULL;
511 }
512 spin_unlock(&kct->kct_lock);
513 }
514 out:
515 spin_lock(&kcp->kcp_lock);
516 if (kct) {
517 splat_kmem_cache_test_kct_free(kct);
518 kcp->kcp_kct[id] = kct = NULL;
519 }
520
521 if (!kcp->kcp_rc)
522 kcp->kcp_rc = rc;
523
524 if ((--kcp->kcp_kct_count) == 0)
525 wake_up(&kcp->kcp_ctl_waitq);
526
527 spin_unlock(&kcp->kcp_lock);
528
529 thread_exit();
530 }
531
532 static int
533 splat_kmem_cache_test(struct file *file, void *arg, char *name,
534 int size, int align, int flags)
535 {
536 kmem_cache_priv_t *kcp;
537 kmem_cache_data_t *kcd;
538 int rc = 0, max;
539
540 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0, 1);
541 if (!kcp) {
542 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
543 return -ENOMEM;
544 }
545
546 kcp->kcp_kcd[0] = NULL;
547 kcp->kcp_cache =
548 kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
549 kcp->kcp_size, kcp->kcp_align,
550 splat_kmem_cache_test_constructor,
551 splat_kmem_cache_test_destructor,
552 NULL, kcp, NULL, flags);
553 if (!kcp->kcp_cache) {
554 splat_vprint(file, name,
555 "Unable to create '%s'\n",
556 SPLAT_KMEM_CACHE_NAME);
557 rc = -ENOMEM;
558 goto out_free;
559 }
560
561 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
562 if (!kcd) {
563 splat_vprint(file, name,
564 "Unable to allocate from '%s'\n",
565 SPLAT_KMEM_CACHE_NAME);
566 rc = -EINVAL;
567 goto out_free;
568 }
569 spin_lock(&kcp->kcp_lock);
570 kcp->kcp_kcd[0] = kcd;
571 spin_unlock(&kcp->kcp_lock);
572
573 if (!kcp->kcp_kcd[0]->kcd_flag) {
574 splat_vprint(file, name,
575 "Failed to run contructor for '%s'\n",
576 SPLAT_KMEM_CACHE_NAME);
577 rc = -EINVAL;
578 goto out_free;
579 }
580
581 if (kcp->kcp_kcd[0]->kcd_magic != kcp->kcp_magic) {
582 splat_vprint(file, name,
583 "Failed to pass private data to constructor "
584 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
585 rc = -EINVAL;
586 goto out_free;
587 }
588
589 max = kcp->kcp_count;
590 spin_lock(&kcp->kcp_lock);
591 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
592 kcp->kcp_kcd[0] = NULL;
593 spin_unlock(&kcp->kcp_lock);
594
595 /* Destroy the entire cache which will force destructors to
596 * run and we can verify one was called for every object */
597 kmem_cache_destroy(kcp->kcp_cache);
598 if (kcp->kcp_count) {
599 splat_vprint(file, name,
600 "Failed to run destructor on all slab objects "
601 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
602 rc = -EINVAL;
603 }
604
605 splat_kmem_cache_test_kcp_free(kcp);
606 splat_vprint(file, name,
607 "Successfully ran ctors/dtors for %d elements in '%s'\n",
608 max, SPLAT_KMEM_CACHE_NAME);
609
610 return rc;
611
612 out_free:
613 if (kcp->kcp_kcd[0]) {
614 spin_lock(&kcp->kcp_lock);
615 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
616 kcp->kcp_kcd[0] = NULL;
617 spin_unlock(&kcp->kcp_lock);
618 }
619
620 if (kcp->kcp_cache)
621 kmem_cache_destroy(kcp->kcp_cache);
622
623 splat_kmem_cache_test_kcp_free(kcp);
624
625 return rc;
626 }
627
628 static int
629 splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
630 int size, int alloc, int max_time)
631 {
632 kmem_cache_priv_t *kcp;
633 kthread_t *thr;
634 struct timespec start, stop, delta;
635 char cache_name[32];
636 int i, rc = 0;
637
638 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc, 0);
639 if (!kcp) {
640 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
641 return -ENOMEM;
642 }
643
644 (void)snprintf(cache_name, 32, "%s-%d-%d",
645 SPLAT_KMEM_CACHE_NAME, size, alloc);
646 kcp->kcp_cache =
647 kmem_cache_create(cache_name, kcp->kcp_size, 0,
648 splat_kmem_cache_test_constructor,
649 splat_kmem_cache_test_destructor,
650 splat_kmem_cache_test_reclaim,
651 kcp, NULL, 0);
652 if (!kcp->kcp_cache) {
653 splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
654 rc = -ENOMEM;
655 goto out_kcp;
656 }
657
658 start = current_kernel_time();
659
660 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
661 thr = thread_create(NULL, 0,
662 splat_kmem_cache_test_thread,
663 kcp, 0, &p0, TS_RUN, minclsyspri);
664 if (thr == NULL) {
665 rc = -ESRCH;
666 goto out_cache;
667 }
668 }
669
670 /* Sleep until all threads have started, then set the ready
671 * flag and wake them all up for maximum concurrency. */
672 wait_event(kcp->kcp_ctl_waitq,
673 splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));
674
675 spin_lock(&kcp->kcp_lock);
676 kcp->kcp_flags |= KCP_FLAG_READY;
677 spin_unlock(&kcp->kcp_lock);
678 wake_up_all(&kcp->kcp_thr_waitq);
679
680 /* Sleep until all thread have finished */
681 wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));
682
683 stop = current_kernel_time();
684 delta = timespec_sub(stop, start);
685
686 splat_vprint(file, name,
687 "%-22s %2ld.%09ld\t"
688 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
689 kcp->kcp_cache->skc_name,
690 delta.tv_sec, delta.tv_nsec,
691 (unsigned long)kcp->kcp_cache->skc_slab_total,
692 (unsigned long)kcp->kcp_cache->skc_slab_max,
693 (unsigned long)(kcp->kcp_alloc *
694 SPLAT_KMEM_THREADS /
695 SPL_KMEM_CACHE_OBJ_PER_SLAB),
696 (unsigned long)kcp->kcp_cache->skc_obj_total,
697 (unsigned long)kcp->kcp_cache->skc_obj_max,
698 (unsigned long)(kcp->kcp_alloc *
699 SPLAT_KMEM_THREADS));
700
701 if (delta.tv_sec >= max_time)
702 rc = -ETIME;
703
704 if (!rc && kcp->kcp_rc)
705 rc = kcp->kcp_rc;
706
707 out_cache:
708 kmem_cache_destroy(kcp->kcp_cache);
709 out_kcp:
710 splat_kmem_cache_test_kcp_free(kcp);
711 return rc;
712 }
713
714 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
715 static int
716 splat_kmem_test5(struct file *file, void *arg)
717 {
718 char *name = SPLAT_KMEM_TEST5_NAME;
719 int rc;
720
721 rc = splat_kmem_cache_test(file, arg, name, 128, 0, 0);
722 if (rc)
723 return rc;
724
725 rc = splat_kmem_cache_test(file, arg, name, 128, 0, KMC_KMEM);
726 if (rc)
727 return rc;
728
729 return splat_kmem_cache_test(file, arg, name, 128, 0, KMC_VMEM);
730 }
731
732 /* Validate large object cache behavior for dynamic/kmem/vmem caches */
733 static int
734 splat_kmem_test6(struct file *file, void *arg)
735 {
736 char *name = SPLAT_KMEM_TEST6_NAME;
737 int rc;
738
739 rc = splat_kmem_cache_test(file, arg, name, 128*1024, 0, 0);
740 if (rc)
741 return rc;
742
743 rc = splat_kmem_cache_test(file, arg, name, 128*1024, 0, KMC_KMEM);
744 if (rc)
745 return rc;
746
747 return splat_kmem_cache_test(file, arg, name, 128*1028, 0, KMC_VMEM);
748 }
749
750 /* Validate object alignment cache behavior for caches */
751 static int
752 splat_kmem_test7(struct file *file, void *arg)
753 {
754 char *name = SPLAT_KMEM_TEST7_NAME;
755 int i, rc;
756
757 for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) {
758 rc = splat_kmem_cache_test(file, arg, name, 157, i, 0);
759 if (rc)
760 return rc;
761 }
762
763 return rc;
764 }
765
766 static int
767 splat_kmem_test8(struct file *file, void *arg)
768 {
769 kmem_cache_priv_t *kcp;
770 kmem_cache_data_t *kcd;
771 int i, j, rc = 0;
772
773 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
774 256, 0, 0, SPLAT_KMEM_OBJ_COUNT);
775 if (!kcp) {
776 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
777 "Unable to create '%s'\n", "kcp");
778 return -ENOMEM;
779 }
780
781 kcp->kcp_cache =
782 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
783 splat_kmem_cache_test_constructor,
784 splat_kmem_cache_test_destructor,
785 splat_kmem_cache_test_reclaim,
786 kcp, NULL, 0);
787 if (!kcp->kcp_cache) {
788 splat_kmem_cache_test_kcp_free(kcp);
789 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
790 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
791 return -ENOMEM;
792 }
793
794 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
795 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
796 spin_lock(&kcp->kcp_lock);
797 kcp->kcp_kcd[i] = kcd;
798 spin_unlock(&kcp->kcp_lock);
799 if (!kcd) {
800 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
801 "Unable to allocate from '%s'\n",
802 SPLAT_KMEM_CACHE_NAME);
803 }
804 }
805
806 /* Request the slab cache free any objects it can. For a few reasons
807 * this may not immediately result in more free memory even if objects
808 * are freed. First off, due to fragmentation we may not be able to
809 * reclaim any slabs. Secondly, even if we do we fully clear some
810 * slabs we will not want to immedately reclaim all of them because
811 * we may contend with cache allocs and thrash. What we want to see
812 * is the slab size decrease more gradually as it becomes clear they
813 * will not be needed. This should be acheivable in less than minute
814 * if it takes longer than this something has gone wrong.
815 */
816 for (i = 0; i < 60; i++) {
817 kmem_cache_reap_now(kcp->kcp_cache);
818 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
819 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
820 SPLAT_KMEM_CACHE_NAME, kcp->kcp_count,
821 (unsigned)kcp->kcp_cache->skc_slab_alloc,
822 (unsigned)kcp->kcp_cache->skc_slab_total,
823 (unsigned)kcp->kcp_cache->skc_obj_alloc,
824 (unsigned)kcp->kcp_cache->skc_obj_total);
825
826 for_each_online_cpu(j)
827 splat_print(file, "%u/%u ",
828 kcp->kcp_cache->skc_mag[j]->skm_avail,
829 kcp->kcp_cache->skc_mag[j]->skm_size);
830
831 splat_print(file, "%s\n", "");
832
833 if (kcp->kcp_cache->skc_obj_total == 0)
834 break;
835
836 set_current_state(TASK_INTERRUPTIBLE);
837 schedule_timeout(HZ);
838 }
839
840 if (kcp->kcp_cache->skc_obj_total == 0) {
841 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
842 "Successfully created %d objects "
843 "in cache %s and reclaimed them\n",
844 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
845 } else {
846 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
847 "Failed to reclaim %u/%d objects from cache %s\n",
848 (unsigned)kcp->kcp_cache->skc_obj_total,
849 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
850 rc = -ENOMEM;
851 }
852
853 /* Cleanup our mess (for failure case of time expiring) */
854 spin_lock(&kcp->kcp_lock);
855 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
856 if (kcp->kcp_kcd[i])
857 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
858 spin_unlock(&kcp->kcp_lock);
859
860 kmem_cache_destroy(kcp->kcp_cache);
861 splat_kmem_cache_test_kcp_free(kcp);
862
863 return rc;
864 }
865
866 static int
867 splat_kmem_test9(struct file *file, void *arg)
868 {
869 kmem_cache_priv_t *kcp;
870 kmem_cache_data_t *kcd;
871 int i, j, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;
872
873 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
874 256, 0, 0, count);
875 if (!kcp) {
876 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
877 "Unable to create '%s'\n", "kcp");
878 return -ENOMEM;
879 }
880
881 kcp->kcp_cache =
882 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
883 splat_kmem_cache_test_constructor,
884 splat_kmem_cache_test_destructor,
885 NULL, kcp, NULL, 0);
886 if (!kcp->kcp_cache) {
887 splat_kmem_cache_test_kcp_free(kcp);
888 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
889 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
890 return -ENOMEM;
891 }
892
893 for (i = 0; i < count; i++) {
894 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
895 spin_lock(&kcp->kcp_lock);
896 kcp->kcp_kcd[i] = kcd;
897 spin_unlock(&kcp->kcp_lock);
898 if (!kcd) {
899 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
900 "Unable to allocate from '%s'\n",
901 SPLAT_KMEM_CACHE_NAME);
902 }
903 }
904
905 spin_lock(&kcp->kcp_lock);
906 for (i = 0; i < count; i++)
907 if (kcp->kcp_kcd[i])
908 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
909 spin_unlock(&kcp->kcp_lock);
910
911 /* We have allocated a large number of objects thus creating a
912 * large number of slabs and then free'd them all. However since
913 * there should be little memory pressure at the moment those
914 * slabs have not been freed. What we want to see is the slab
915 * size decrease gradually as it becomes clear they will not be
916 * be needed. This should be acheivable in less than minute
917 * if it takes longer than this something has gone wrong.
918 */
919 for (i = 0; i < 60; i++) {
920 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
921 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
922 SPLAT_KMEM_CACHE_NAME, kcp->kcp_count,
923 (unsigned)kcp->kcp_cache->skc_slab_alloc,
924 (unsigned)kcp->kcp_cache->skc_slab_total,
925 (unsigned)kcp->kcp_cache->skc_obj_alloc,
926 (unsigned)kcp->kcp_cache->skc_obj_total);
927
928 for_each_online_cpu(j)
929 splat_print(file, "%u/%u ",
930 kcp->kcp_cache->skc_mag[j]->skm_avail,
931 kcp->kcp_cache->skc_mag[j]->skm_size);
932
933 splat_print(file, "%s\n", "");
934
935 if (kcp->kcp_cache->skc_obj_total == 0)
936 break;
937
938 set_current_state(TASK_INTERRUPTIBLE);
939 schedule_timeout(HZ);
940 }
941
942 if (kcp->kcp_cache->skc_obj_total == 0) {
943 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
944 "Successfully created %d objects "
945 "in cache %s and reclaimed them\n",
946 count, SPLAT_KMEM_CACHE_NAME);
947 } else {
948 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
949 "Failed to reclaim %u/%d objects from cache %s\n",
950 (unsigned)kcp->kcp_cache->skc_obj_total, count,
951 SPLAT_KMEM_CACHE_NAME);
952 rc = -ENOMEM;
953 }
954
955 kmem_cache_destroy(kcp->kcp_cache);
956 splat_kmem_cache_test_kcp_free(kcp);
957
958 return rc;
959 }
960
961 /*
962 * This test creates N threads with a shared kmem cache. They then all
963 * concurrently allocate and free from the cache to stress the locking and
964 * concurrent cache performance. If any one test takes longer than 5
965 * seconds to complete it is treated as a failure and may indicate a
966 * performance regression. On my test system no one test takes more
967 * than 1 second to complete so a 5x slowdown likely a problem.
968 */
969 static int
970 splat_kmem_test10(struct file *file, void *arg)
971 {
972 uint64_t size, alloc, rc = 0;
973
974 for (size = 16; size <= 1024*1024; size *= 2) {
975
976 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name",
977 "time (sec)\tslabs \tobjs \thash\n");
978 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "",
979 " \ttot/max/calc\ttot/max/calc\n");
980
981 for (alloc = 1; alloc <= 1024; alloc *= 2) {
982
983 /* Skip tests which exceed available memory. We
984 * leverage availrmem here for some extra testing */
985 if (size * alloc * SPLAT_KMEM_THREADS > availrmem / 2)
986 continue;
987
988 rc = splat_kmem_cache_thread_test(file, arg,
989 SPLAT_KMEM_TEST10_NAME, size, alloc, 5);
990 if (rc)
991 break;
992 }
993 }
994
995 return rc;
996 }
997
998 #ifdef _LP64
999 /*
1000 * This test creates N threads with a shared kmem cache which overcommits
1001 * memory by 4x. This makes it impossible for the slab to satify the
1002 * thread requirements without having its reclaim hook run which will
1003 * free objects back for use. This behavior is triggered by the linum VM
1004 * detecting a low memory condition on the node and invoking the shrinkers.
1005 * This should allow all the threads to complete while avoiding deadlock
1006 * and for the most part out of memory events. This is very tough on the
1007 * system so it is possible the test app may get oom'ed. This particular
1008 * test has proven troublesome on 32-bit archs with limited virtual
1009 * address space so it only run on 64-bit systems.
1010 */
1011 static int
1012 splat_kmem_test11(struct file *file, void *arg)
1013 {
1014 uint64_t size, alloc, rc;
1015
1016 size = 256*1024;
1017 alloc = ((4 * physmem * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS;
1018
1019 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "name",
1020 "time (sec)\tslabs \tobjs \thash\n");
1021 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "",
1022 " \ttot/max/calc\ttot/max/calc\n");
1023
1024 rc = splat_kmem_cache_thread_test(file, arg,
1025 SPLAT_KMEM_TEST11_NAME, size, alloc, 60);
1026
1027 return rc;
1028 }
1029 #endif /* _LP64 */
1030
1031 /*
1032 * Check vmem_size() behavior by acquiring the alloc/free/total vmem
1033 * space, then allocate a known buffer size from vmem space. We can
1034 * then check that vmem_size() values were updated properly with in
1035 * a fairly small tolerence. The tolerance is important because we
1036 * are not the only vmem consumer on the system. Other unrelated
1037 * allocations might occur during the small test window. The vmem
1038 * allocation itself may also add in a little extra private space to
1039 * the buffer. Finally, verify total space always remains unchanged.
1040 */
1041 static int
1042 splat_kmem_test12(struct file *file, void *arg)
1043 {
1044 size_t alloc1, free1, total1;
1045 size_t alloc2, free2, total2;
1046 int size = 8*1024*1024;
1047 void *ptr;
1048
1049 alloc1 = vmem_size(NULL, VMEM_ALLOC);
1050 free1 = vmem_size(NULL, VMEM_FREE);
1051 total1 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
1052 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1053 "free=%lu total=%lu\n", (unsigned long)alloc1,
1054 (unsigned long)free1, (unsigned long)total1);
1055
1056 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Alloc %d bytes\n", size);
1057 ptr = vmem_alloc(size, KM_SLEEP);
1058 if (!ptr) {
1059 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1060 "Failed to alloc %d bytes\n", size);
1061 return -ENOMEM;
1062 }
1063
1064 alloc2 = vmem_size(NULL, VMEM_ALLOC);
1065 free2 = vmem_size(NULL, VMEM_FREE);
1066 total2 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
1067 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1068 "free=%lu total=%lu\n", (unsigned long)alloc2,
1069 (unsigned long)free2, (unsigned long)total2);
1070
1071 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Free %d bytes\n", size);
1072 vmem_free(ptr, size);
1073 if (alloc2 < (alloc1 + size - (size / 100)) ||
1074 alloc2 > (alloc1 + size + (size / 100))) {
1075 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1076 "VMEM_ALLOC size: %lu != %lu+%d (+/- 1%%)\n",
1077 (unsigned long)alloc2,(unsigned long)alloc1,size);
1078 return -ERANGE;
1079 }
1080
1081 if (free2 < (free1 - size - (size / 100)) ||
1082 free2 > (free1 - size + (size / 100))) {
1083 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1084 "VMEM_FREE size: %lu != %lu-%d (+/- 1%%)\n",
1085 (unsigned long)free2, (unsigned long)free1, size);
1086 return -ERANGE;
1087 }
1088
1089 if (total1 != total2) {
1090 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1091 "VMEM_ALLOC | VMEM_FREE not constant: "
1092 "%lu != %lu\n", (unsigned long)total2,
1093 (unsigned long)total1);
1094 return -ERANGE;
1095 }
1096
1097 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1098 "VMEM_ALLOC within tolerance: ~%ld%% (%ld/%d)\n",
1099 (long)abs(alloc1 + (long)size - alloc2) * 100 / (long)size,
1100 (long)abs(alloc1 + (long)size - alloc2), size);
1101 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1102 "VMEM_FREE within tolerance: ~%ld%% (%ld/%d)\n",
1103 (long)abs((free1 - (long)size) - free2) * 100 / (long)size,
1104 (long)abs((free1 - (long)size) - free2), size);
1105
1106 return 0;
1107 }
1108
1109 splat_subsystem_t *
1110 splat_kmem_init(void)
1111 {
1112 splat_subsystem_t *sub;
1113
1114 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1115 if (sub == NULL)
1116 return NULL;
1117
1118 memset(sub, 0, sizeof(*sub));
1119 strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
1120 strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
1121 INIT_LIST_HEAD(&sub->subsystem_list);
1122 INIT_LIST_HEAD(&sub->test_list);
1123 spin_lock_init(&sub->test_lock);
1124 sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
1125
1126 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
1127 SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
1128 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
1129 SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
1130 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
1131 SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
1132 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
1133 SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
1134 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
1135 SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
1136 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
1137 SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
1138 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
1139 SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
1140 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
1141 SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
1142 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST9_NAME, SPLAT_KMEM_TEST9_DESC,
1143 SPLAT_KMEM_TEST9_ID, splat_kmem_test9);
1144 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST10_NAME, SPLAT_KMEM_TEST10_DESC,
1145 SPLAT_KMEM_TEST10_ID, splat_kmem_test10);
1146 #ifdef _LP64
1147 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC,
1148 SPLAT_KMEM_TEST11_ID, splat_kmem_test11);
1149 #endif /* _LP64 */
1150 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST12_NAME, SPLAT_KMEM_TEST12_DESC,
1151 SPLAT_KMEM_TEST12_ID, splat_kmem_test12);
1152
1153 return sub;
1154 }
1155
1156 void
1157 splat_kmem_fini(splat_subsystem_t *sub)
1158 {
1159 ASSERT(sub);
1160 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST12_ID);
1161 #ifdef _LP64
1162 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST11_ID);
1163 #endif /* _LP64 */
1164 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST10_ID);
1165 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST9_ID);
1166 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID);
1167 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
1168 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
1169 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
1170 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
1171 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
1172 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
1173 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
1174
1175 kfree(sub);
1176 }
1177
1178 int
1179 splat_kmem_id(void) {
1180 return SPLAT_SUBSYSTEM_KMEM;
1181 }