2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include "splat-internal.h"
29 #define SPLAT_KMEM_NAME "kmem"
30 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
32 #define SPLAT_KMEM_TEST1_ID 0x0101
33 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
34 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
36 #define SPLAT_KMEM_TEST2_ID 0x0102
37 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
38 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
40 #define SPLAT_KMEM_TEST3_ID 0x0103
41 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
42 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
44 #define SPLAT_KMEM_TEST4_ID 0x0104
45 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
46 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
48 #define SPLAT_KMEM_TEST5_ID 0x0105
49 #define SPLAT_KMEM_TEST5_NAME "slab_small"
50 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
52 #define SPLAT_KMEM_TEST6_ID 0x0106
53 #define SPLAT_KMEM_TEST6_NAME "slab_large"
54 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
56 #define SPLAT_KMEM_TEST7_ID 0x0107
57 #define SPLAT_KMEM_TEST7_NAME "slab_align"
58 #define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
60 #define SPLAT_KMEM_TEST8_ID 0x0108
61 #define SPLAT_KMEM_TEST8_NAME "slab_reap"
62 #define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
64 #define SPLAT_KMEM_TEST9_ID 0x0109
65 #define SPLAT_KMEM_TEST9_NAME "slab_age"
66 #define SPLAT_KMEM_TEST9_DESC "Slab aging test"
68 #define SPLAT_KMEM_TEST10_ID 0x010a
69 #define SPLAT_KMEM_TEST10_NAME "slab_lock"
70 #define SPLAT_KMEM_TEST10_DESC "Slab locking test"
72 #define SPLAT_KMEM_TEST11_ID 0x010b
73 #define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
74 #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
76 #define SPLAT_KMEM_TEST12_ID 0x010c
77 #define SPLAT_KMEM_TEST12_NAME "vmem_size"
78 #define SPLAT_KMEM_TEST12_DESC "Memory zone test"
80 #define SPLAT_KMEM_ALLOC_COUNT 10
81 #define SPLAT_VMEM_ALLOC_COUNT 10
85 splat_kmem_test1(struct file
*file
, void *arg
)
87 void *ptr
[SPLAT_KMEM_ALLOC_COUNT
];
91 /* We are intentionally going to push kmem_alloc to its max
92 * allocation size, so suppress the console warnings for now */
95 while ((!rc
) && (size
<= (PAGE_SIZE
* 32))) {
98 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++) {
99 ptr
[i
] = kmem_alloc(size
, KM_SLEEP
);
104 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++)
106 kmem_free(ptr
[i
], size
);
108 splat_vprint(file
, SPLAT_KMEM_TEST1_NAME
,
109 "%d byte allocations, %d/%d successful\n",
110 size
, count
, SPLAT_KMEM_ALLOC_COUNT
);
111 if (count
!= SPLAT_KMEM_ALLOC_COUNT
)
123 splat_kmem_test2(struct file
*file
, void *arg
)
125 void *ptr
[SPLAT_KMEM_ALLOC_COUNT
];
126 int size
= PAGE_SIZE
;
127 int i
, j
, count
, rc
= 0;
129 /* We are intentionally going to push kmem_alloc to its max
130 * allocation size, so suppress the console warnings for now */
133 while ((!rc
) && (size
<= (PAGE_SIZE
* 32))) {
136 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++) {
137 ptr
[i
] = kmem_zalloc(size
, KM_SLEEP
);
142 /* Ensure buffer has been zero filled */
143 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++) {
144 for (j
= 0; j
< size
; j
++) {
145 if (((char *)ptr
[i
])[j
] != '\0') {
146 splat_vprint(file
, SPLAT_KMEM_TEST2_NAME
,
147 "%d-byte allocation was "
148 "not zeroed\n", size
);
154 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++)
156 kmem_free(ptr
[i
], size
);
158 splat_vprint(file
, SPLAT_KMEM_TEST2_NAME
,
159 "%d byte allocations, %d/%d successful\n",
160 size
, count
, SPLAT_KMEM_ALLOC_COUNT
);
161 if (count
!= SPLAT_KMEM_ALLOC_COUNT
)
173 splat_kmem_test3(struct file
*file
, void *arg
)
175 void *ptr
[SPLAT_VMEM_ALLOC_COUNT
];
176 int size
= PAGE_SIZE
;
177 int i
, count
, rc
= 0;
179 while ((!rc
) && (size
<= (PAGE_SIZE
* 1024))) {
182 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++) {
183 ptr
[i
] = vmem_alloc(size
, KM_SLEEP
);
188 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++)
190 vmem_free(ptr
[i
], size
);
192 splat_vprint(file
, SPLAT_KMEM_TEST3_NAME
,
193 "%d byte allocations, %d/%d successful\n",
194 size
, count
, SPLAT_VMEM_ALLOC_COUNT
);
195 if (count
!= SPLAT_VMEM_ALLOC_COUNT
)
205 splat_kmem_test4(struct file
*file
, void *arg
)
207 void *ptr
[SPLAT_VMEM_ALLOC_COUNT
];
208 int size
= PAGE_SIZE
;
209 int i
, j
, count
, rc
= 0;
211 while ((!rc
) && (size
<= (PAGE_SIZE
* 1024))) {
214 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++) {
215 ptr
[i
] = vmem_zalloc(size
, KM_SLEEP
);
220 /* Ensure buffer has been zero filled */
221 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++) {
222 for (j
= 0; j
< size
; j
++) {
223 if (((char *)ptr
[i
])[j
] != '\0') {
224 splat_vprint(file
, SPLAT_KMEM_TEST4_NAME
,
225 "%d-byte allocation was "
226 "not zeroed\n", size
);
232 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++)
234 vmem_free(ptr
[i
], size
);
236 splat_vprint(file
, SPLAT_KMEM_TEST4_NAME
,
237 "%d byte allocations, %d/%d successful\n",
238 size
, count
, SPLAT_VMEM_ALLOC_COUNT
);
239 if (count
!= SPLAT_VMEM_ALLOC_COUNT
)
248 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
249 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
250 #define SPLAT_KMEM_OBJ_COUNT 1024
251 #define SPLAT_KMEM_OBJ_RECLAIM 20 /* percent */
252 #define SPLAT_KMEM_THREADS 32
254 #define KCP_FLAG_READY 0x01
256 typedef struct kmem_cache_data
{
257 unsigned long kcd_magic
;
262 typedef struct kmem_cache_thread
{
263 kmem_cache_t
*kct_cache
;
267 kmem_cache_data_t
*kct_kcd
[0];
268 } kmem_cache_thread_t
;
270 typedef struct kmem_cache_priv
{
271 unsigned long kcp_magic
;
272 struct file
*kcp_file
;
273 kmem_cache_t
*kcp_cache
;
275 wait_queue_head_t kcp_ctl_waitq
;
276 wait_queue_head_t kcp_thr_waitq
;
279 kmem_cache_thread_t
*kcp_kct
[SPLAT_KMEM_THREADS
];
286 kmem_cache_data_t
*kcp_kcd
[0];
289 static kmem_cache_priv_t
*
290 splat_kmem_cache_test_kcp_alloc(struct file
*file
, char *name
,
291 int size
, int align
, int alloc
, int count
)
293 kmem_cache_priv_t
*kcp
;
295 kcp
= vmem_zalloc(sizeof(kmem_cache_priv_t
) +
296 count
* sizeof(kmem_cache_data_t
*), KM_SLEEP
);
300 kcp
->kcp_magic
= SPLAT_KMEM_TEST_MAGIC
;
301 kcp
->kcp_file
= file
;
302 kcp
->kcp_cache
= NULL
;
303 spin_lock_init(&kcp
->kcp_lock
);
304 init_waitqueue_head(&kcp
->kcp_ctl_waitq
);
305 init_waitqueue_head(&kcp
->kcp_thr_waitq
);
307 kcp
->kcp_kct_count
= -1;
308 kcp
->kcp_size
= size
;
309 kcp
->kcp_align
= align
;
311 kcp
->kcp_alloc
= alloc
;
313 kcp
->kcp_kcd_count
= count
;
319 splat_kmem_cache_test_kcp_free(kmem_cache_priv_t
*kcp
)
321 vmem_free(kcp
, sizeof(kmem_cache_priv_t
) +
322 kcp
->kcp_kcd_count
* sizeof(kmem_cache_data_t
*));
325 static kmem_cache_thread_t
*
326 splat_kmem_cache_test_kct_alloc(int id
, int count
)
328 kmem_cache_thread_t
*kct
;
330 ASSERTF(id
< SPLAT_KMEM_THREADS
, "id=%d\n", id
);
331 kct
= vmem_zalloc(sizeof(kmem_cache_thread_t
) +
332 count
* sizeof(kmem_cache_data_t
*), KM_SLEEP
);
336 spin_lock_init(&kct
->kct_lock
);
337 kct
->kct_cache
= NULL
;
339 kct
->kct_kcd_count
= count
;
345 splat_kmem_cache_test_kct_free(kmem_cache_thread_t
*kct
)
347 vmem_free(kct
, sizeof(kmem_cache_thread_t
) +
348 kct
->kct_kcd_count
* sizeof(kmem_cache_data_t
*));
352 splat_kmem_cache_test_constructor(void *ptr
, void *priv
, int flags
)
354 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)priv
;
355 kmem_cache_data_t
*kcd
= (kmem_cache_data_t
*)ptr
;
358 kcd
->kcd_magic
= kcp
->kcp_magic
;
360 memset(kcd
->kcd_buf
, 0xaa, kcp
->kcp_size
- (sizeof *kcd
));
368 splat_kmem_cache_test_destructor(void *ptr
, void *priv
)
370 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)priv
;
371 kmem_cache_data_t
*kcd
= (kmem_cache_data_t
*)ptr
;
376 memset(kcd
->kcd_buf
, 0xbb, kcp
->kcp_size
- (sizeof *kcd
));
384 * Generic reclaim function which assumes that all objects may
385 * be reclaimed at any time. We free a small percentage of the
386 * objects linked off the kcp or kct[] every time we are called.
389 splat_kmem_cache_test_reclaim(void *priv
)
391 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)priv
;
392 kmem_cache_thread_t
*kct
;
395 ASSERT(kcp
->kcp_magic
== SPLAT_KMEM_TEST_MAGIC
);
396 count
= kcp
->kcp_kcd_count
* SPLAT_KMEM_OBJ_RECLAIM
/ 100;
398 /* Objects directly attached to the kcp */
399 spin_lock(&kcp
->kcp_lock
);
400 for (i
= 0; i
< kcp
->kcp_kcd_count
; i
++) {
401 if (kcp
->kcp_kcd
[i
]) {
402 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[i
]);
403 kcp
->kcp_kcd
[i
] = NULL
;
409 spin_unlock(&kcp
->kcp_lock
);
411 /* No threads containing objects to consider */
412 if (kcp
->kcp_kct_count
== -1)
415 /* Objects attached to a kct thread */
416 for (i
= 0; i
< kcp
->kcp_kct_count
; i
++) {
417 spin_lock(&kcp
->kcp_lock
);
418 kct
= kcp
->kcp_kct
[i
];
419 spin_unlock(&kcp
->kcp_lock
);
423 spin_lock(&kct
->kct_lock
);
424 count
= kct
->kct_kcd_count
* SPLAT_KMEM_OBJ_RECLAIM
/ 100;
426 for (j
= 0; j
< kct
->kct_kcd_count
; j
++) {
427 if (kct
->kct_kcd
[j
]) {
428 kmem_cache_free(kcp
->kcp_cache
,kct
->kct_kcd
[j
]);
429 kct
->kct_kcd
[j
] = NULL
;
435 spin_unlock(&kct
->kct_lock
);
442 splat_kmem_cache_test_threads(kmem_cache_priv_t
*kcp
, int threads
)
446 spin_lock(&kcp
->kcp_lock
);
447 rc
= (kcp
->kcp_kct_count
== threads
);
448 spin_unlock(&kcp
->kcp_lock
);
454 splat_kmem_cache_test_flags(kmem_cache_priv_t
*kcp
, int flags
)
458 spin_lock(&kcp
->kcp_lock
);
459 rc
= (kcp
->kcp_flags
& flags
);
460 spin_unlock(&kcp
->kcp_lock
);
466 splat_kmem_cache_test_thread(void *arg
)
468 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)arg
;
469 kmem_cache_thread_t
*kct
;
473 ASSERT(kcp
->kcp_magic
== SPLAT_KMEM_TEST_MAGIC
);
475 /* Assign thread ids */
476 spin_lock(&kcp
->kcp_lock
);
477 if (kcp
->kcp_kct_count
== -1)
478 kcp
->kcp_kct_count
= 0;
480 id
= kcp
->kcp_kct_count
;
481 kcp
->kcp_kct_count
++;
482 spin_unlock(&kcp
->kcp_lock
);
484 kct
= splat_kmem_cache_test_kct_alloc(id
, kcp
->kcp_alloc
);
490 spin_lock(&kcp
->kcp_lock
);
491 kcp
->kcp_kct
[id
] = kct
;
492 spin_unlock(&kcp
->kcp_lock
);
494 /* Wait for all threads to have started and report they are ready */
495 if (kcp
->kcp_kct_count
== SPLAT_KMEM_THREADS
)
496 wake_up(&kcp
->kcp_ctl_waitq
);
498 wait_event(kcp
->kcp_thr_waitq
,
499 splat_kmem_cache_test_flags(kcp
, KCP_FLAG_READY
));
502 * Updates to kct->kct_kcd[] are performed under a spin_lock so
503 * they may safely run concurrent with the reclaim function. If
504 * we are not in a low memory situation we have one lock per-
505 * thread so they are not expected to be contended.
507 for (i
= 0; i
< kct
->kct_kcd_count
; i
++) {
508 obj
= kmem_cache_alloc(kcp
->kcp_cache
, KM_SLEEP
);
509 spin_lock(&kct
->kct_lock
);
510 kct
->kct_kcd
[i
] = obj
;
511 spin_unlock(&kct
->kct_lock
);
514 for (i
= 0; i
< kct
->kct_kcd_count
; i
++) {
515 spin_lock(&kct
->kct_lock
);
516 if (kct
->kct_kcd
[i
]) {
517 kmem_cache_free(kcp
->kcp_cache
, kct
->kct_kcd
[i
]);
518 kct
->kct_kcd
[i
] = NULL
;
520 spin_unlock(&kct
->kct_lock
);
523 spin_lock(&kcp
->kcp_lock
);
525 splat_kmem_cache_test_kct_free(kct
);
526 kcp
->kcp_kct
[id
] = kct
= NULL
;
532 if ((--kcp
->kcp_kct_count
) == 0)
533 wake_up(&kcp
->kcp_ctl_waitq
);
535 spin_unlock(&kcp
->kcp_lock
);
541 splat_kmem_cache_test(struct file
*file
, void *arg
, char *name
,
542 int size
, int align
, int flags
)
544 kmem_cache_priv_t
*kcp
;
545 kmem_cache_data_t
*kcd
;
548 kcp
= splat_kmem_cache_test_kcp_alloc(file
, name
, size
, align
, 0, 1);
550 splat_vprint(file
, name
, "Unable to create '%s'\n", "kcp");
554 kcp
->kcp_kcd
[0] = NULL
;
556 kmem_cache_create(SPLAT_KMEM_CACHE_NAME
,
557 kcp
->kcp_size
, kcp
->kcp_align
,
558 splat_kmem_cache_test_constructor
,
559 splat_kmem_cache_test_destructor
,
560 NULL
, kcp
, NULL
, flags
);
561 if (!kcp
->kcp_cache
) {
562 splat_vprint(file
, name
,
563 "Unable to create '%s'\n",
564 SPLAT_KMEM_CACHE_NAME
);
569 kcd
= kmem_cache_alloc(kcp
->kcp_cache
, KM_SLEEP
);
571 splat_vprint(file
, name
,
572 "Unable to allocate from '%s'\n",
573 SPLAT_KMEM_CACHE_NAME
);
577 spin_lock(&kcp
->kcp_lock
);
578 kcp
->kcp_kcd
[0] = kcd
;
579 spin_unlock(&kcp
->kcp_lock
);
581 if (!kcp
->kcp_kcd
[0]->kcd_flag
) {
582 splat_vprint(file
, name
,
583 "Failed to run contructor for '%s'\n",
584 SPLAT_KMEM_CACHE_NAME
);
589 if (kcp
->kcp_kcd
[0]->kcd_magic
!= kcp
->kcp_magic
) {
590 splat_vprint(file
, name
,
591 "Failed to pass private data to constructor "
592 "for '%s'\n", SPLAT_KMEM_CACHE_NAME
);
597 max
= kcp
->kcp_count
;
598 spin_lock(&kcp
->kcp_lock
);
599 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[0]);
600 kcp
->kcp_kcd
[0] = NULL
;
601 spin_unlock(&kcp
->kcp_lock
);
603 /* Destroy the entire cache which will force destructors to
604 * run and we can verify one was called for every object */
605 kmem_cache_destroy(kcp
->kcp_cache
);
606 if (kcp
->kcp_count
) {
607 splat_vprint(file
, name
,
608 "Failed to run destructor on all slab objects "
609 "for '%s'\n", SPLAT_KMEM_CACHE_NAME
);
613 splat_kmem_cache_test_kcp_free(kcp
);
614 splat_vprint(file
, name
,
615 "Successfully ran ctors/dtors for %d elements in '%s'\n",
616 max
, SPLAT_KMEM_CACHE_NAME
);
621 if (kcp
->kcp_kcd
[0]) {
622 spin_lock(&kcp
->kcp_lock
);
623 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[0]);
624 kcp
->kcp_kcd
[0] = NULL
;
625 spin_unlock(&kcp
->kcp_lock
);
629 kmem_cache_destroy(kcp
->kcp_cache
);
631 splat_kmem_cache_test_kcp_free(kcp
);
637 splat_kmem_cache_thread_test(struct file
*file
, void *arg
, char *name
,
638 int size
, int alloc
, int max_time
)
640 kmem_cache_priv_t
*kcp
;
642 struct timespec start
, stop
, delta
;
646 kcp
= splat_kmem_cache_test_kcp_alloc(file
, name
, size
, 0, alloc
, 0);
648 splat_vprint(file
, name
, "Unable to create '%s'\n", "kcp");
652 (void)snprintf(cache_name
, 32, "%s-%d-%d",
653 SPLAT_KMEM_CACHE_NAME
, size
, alloc
);
655 kmem_cache_create(cache_name
, kcp
->kcp_size
, 0,
656 splat_kmem_cache_test_constructor
,
657 splat_kmem_cache_test_destructor
,
658 splat_kmem_cache_test_reclaim
,
660 if (!kcp
->kcp_cache
) {
661 splat_vprint(file
, name
, "Unable to create '%s'\n", cache_name
);
666 start
= current_kernel_time();
668 for (i
= 0; i
< SPLAT_KMEM_THREADS
; i
++) {
669 thr
= thread_create(NULL
, 0,
670 splat_kmem_cache_test_thread
,
671 kcp
, 0, &p0
, TS_RUN
, minclsyspri
);
678 /* Sleep until all threads have started, then set the ready
679 * flag and wake them all up for maximum concurrency. */
680 wait_event(kcp
->kcp_ctl_waitq
,
681 splat_kmem_cache_test_threads(kcp
, SPLAT_KMEM_THREADS
));
683 spin_lock(&kcp
->kcp_lock
);
684 kcp
->kcp_flags
|= KCP_FLAG_READY
;
685 spin_unlock(&kcp
->kcp_lock
);
686 wake_up_all(&kcp
->kcp_thr_waitq
);
688 /* Sleep until all thread have finished */
689 wait_event(kcp
->kcp_ctl_waitq
, splat_kmem_cache_test_threads(kcp
, 0));
691 stop
= current_kernel_time();
692 delta
= timespec_sub(stop
, start
);
694 splat_vprint(file
, name
,
696 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
697 kcp
->kcp_cache
->skc_name
,
698 delta
.tv_sec
, delta
.tv_nsec
,
699 (unsigned long)kcp
->kcp_cache
->skc_slab_total
,
700 (unsigned long)kcp
->kcp_cache
->skc_slab_max
,
701 (unsigned long)(kcp
->kcp_alloc
*
703 SPL_KMEM_CACHE_OBJ_PER_SLAB
),
704 (unsigned long)kcp
->kcp_cache
->skc_obj_total
,
705 (unsigned long)kcp
->kcp_cache
->skc_obj_max
,
706 (unsigned long)(kcp
->kcp_alloc
*
707 SPLAT_KMEM_THREADS
));
709 if (delta
.tv_sec
>= max_time
)
712 if (!rc
&& kcp
->kcp_rc
)
716 kmem_cache_destroy(kcp
->kcp_cache
);
718 splat_kmem_cache_test_kcp_free(kcp
);
722 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
724 splat_kmem_test5(struct file
*file
, void *arg
)
726 char *name
= SPLAT_KMEM_TEST5_NAME
;
729 rc
= splat_kmem_cache_test(file
, arg
, name
, 128, 0, 0);
733 rc
= splat_kmem_cache_test(file
, arg
, name
, 128, 0, KMC_KMEM
);
737 return splat_kmem_cache_test(file
, arg
, name
, 128, 0, KMC_VMEM
);
740 /* Validate large object cache behavior for dynamic/kmem/vmem caches */
742 splat_kmem_test6(struct file
*file
, void *arg
)
744 char *name
= SPLAT_KMEM_TEST6_NAME
;
747 rc
= splat_kmem_cache_test(file
, arg
, name
, 128*1024, 0, 0);
751 rc
= splat_kmem_cache_test(file
, arg
, name
, 128*1024, 0, KMC_KMEM
);
755 return splat_kmem_cache_test(file
, arg
, name
, 128*1028, 0, KMC_VMEM
);
758 /* Validate object alignment cache behavior for caches */
760 splat_kmem_test7(struct file
*file
, void *arg
)
762 char *name
= SPLAT_KMEM_TEST7_NAME
;
765 for (i
= SPL_KMEM_CACHE_ALIGN
; i
<= PAGE_SIZE
; i
*= 2) {
766 rc
= splat_kmem_cache_test(file
, arg
, name
, 157, i
, 0);
775 splat_kmem_test8(struct file
*file
, void *arg
)
777 kmem_cache_priv_t
*kcp
;
778 kmem_cache_data_t
*kcd
;
781 kcp
= splat_kmem_cache_test_kcp_alloc(file
, SPLAT_KMEM_TEST8_NAME
,
782 256, 0, 0, SPLAT_KMEM_OBJ_COUNT
);
784 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
785 "Unable to create '%s'\n", "kcp");
790 kmem_cache_create(SPLAT_KMEM_CACHE_NAME
, kcp
->kcp_size
, 0,
791 splat_kmem_cache_test_constructor
,
792 splat_kmem_cache_test_destructor
,
793 splat_kmem_cache_test_reclaim
,
795 if (!kcp
->kcp_cache
) {
796 splat_kmem_cache_test_kcp_free(kcp
);
797 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
798 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME
);
802 for (i
= 0; i
< SPLAT_KMEM_OBJ_COUNT
; i
++) {
803 kcd
= kmem_cache_alloc(kcp
->kcp_cache
, KM_SLEEP
);
804 spin_lock(&kcp
->kcp_lock
);
805 kcp
->kcp_kcd
[i
] = kcd
;
806 spin_unlock(&kcp
->kcp_lock
);
808 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
809 "Unable to allocate from '%s'\n",
810 SPLAT_KMEM_CACHE_NAME
);
814 /* Request the slab cache free any objects it can. For a few reasons
815 * this may not immediately result in more free memory even if objects
816 * are freed. First off, due to fragmentation we may not be able to
817 * reclaim any slabs. Secondly, even if we do we fully clear some
818 * slabs we will not want to immedately reclaim all of them because
819 * we may contend with cache allocs and thrash. What we want to see
820 * is the slab size decrease more gradually as it becomes clear they
821 * will not be needed. This should be acheivable in less than minute
822 * if it takes longer than this something has gone wrong.
824 for (i
= 0; i
< 60; i
++) {
825 kmem_cache_reap_now(kcp
->kcp_cache
);
826 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
827 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
828 SPLAT_KMEM_CACHE_NAME
, kcp
->kcp_count
,
829 (unsigned)kcp
->kcp_cache
->skc_slab_alloc
,
830 (unsigned)kcp
->kcp_cache
->skc_slab_total
,
831 (unsigned)kcp
->kcp_cache
->skc_obj_alloc
,
832 (unsigned)kcp
->kcp_cache
->skc_obj_total
);
834 for_each_online_cpu(j
)
835 splat_print(file
, "%u/%u ",
836 kcp
->kcp_cache
->skc_mag
[j
]->skm_avail
,
837 kcp
->kcp_cache
->skc_mag
[j
]->skm_size
);
839 splat_print(file
, "%s\n", "");
841 if (kcp
->kcp_cache
->skc_obj_total
== 0)
844 set_current_state(TASK_INTERRUPTIBLE
);
845 schedule_timeout(HZ
);
848 if (kcp
->kcp_cache
->skc_obj_total
== 0) {
849 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
850 "Successfully created %d objects "
851 "in cache %s and reclaimed them\n",
852 SPLAT_KMEM_OBJ_COUNT
, SPLAT_KMEM_CACHE_NAME
);
854 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
855 "Failed to reclaim %u/%d objects from cache %s\n",
856 (unsigned)kcp
->kcp_cache
->skc_obj_total
,
857 SPLAT_KMEM_OBJ_COUNT
, SPLAT_KMEM_CACHE_NAME
);
861 /* Cleanup our mess (for failure case of time expiring) */
862 spin_lock(&kcp
->kcp_lock
);
863 for (i
= 0; i
< SPLAT_KMEM_OBJ_COUNT
; i
++)
865 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[i
]);
866 spin_unlock(&kcp
->kcp_lock
);
868 kmem_cache_destroy(kcp
->kcp_cache
);
869 splat_kmem_cache_test_kcp_free(kcp
);
875 splat_kmem_test9(struct file
*file
, void *arg
)
877 kmem_cache_priv_t
*kcp
;
878 kmem_cache_data_t
*kcd
;
879 int i
, j
, rc
= 0, count
= SPLAT_KMEM_OBJ_COUNT
* 128;
881 kcp
= splat_kmem_cache_test_kcp_alloc(file
, SPLAT_KMEM_TEST9_NAME
,
884 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
885 "Unable to create '%s'\n", "kcp");
890 kmem_cache_create(SPLAT_KMEM_CACHE_NAME
, kcp
->kcp_size
, 0,
891 splat_kmem_cache_test_constructor
,
892 splat_kmem_cache_test_destructor
,
894 if (!kcp
->kcp_cache
) {
895 splat_kmem_cache_test_kcp_free(kcp
);
896 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
897 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME
);
901 for (i
= 0; i
< count
; i
++) {
902 kcd
= kmem_cache_alloc(kcp
->kcp_cache
, KM_SLEEP
);
903 spin_lock(&kcp
->kcp_lock
);
904 kcp
->kcp_kcd
[i
] = kcd
;
905 spin_unlock(&kcp
->kcp_lock
);
907 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
908 "Unable to allocate from '%s'\n",
909 SPLAT_KMEM_CACHE_NAME
);
913 spin_lock(&kcp
->kcp_lock
);
914 for (i
= 0; i
< count
; i
++)
916 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[i
]);
917 spin_unlock(&kcp
->kcp_lock
);
919 /* We have allocated a large number of objects thus creating a
920 * large number of slabs and then free'd them all. However since
921 * there should be little memory pressure at the moment those
922 * slabs have not been freed. What we want to see is the slab
923 * size decrease gradually as it becomes clear they will not be
924 * be needed. This should be acheivable in less than minute
925 * if it takes longer than this something has gone wrong.
927 for (i
= 0; i
< 60; i
++) {
928 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
929 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
930 SPLAT_KMEM_CACHE_NAME
, kcp
->kcp_count
,
931 (unsigned)kcp
->kcp_cache
->skc_slab_alloc
,
932 (unsigned)kcp
->kcp_cache
->skc_slab_total
,
933 (unsigned)kcp
->kcp_cache
->skc_obj_alloc
,
934 (unsigned)kcp
->kcp_cache
->skc_obj_total
);
936 for_each_online_cpu(j
)
937 splat_print(file
, "%u/%u ",
938 kcp
->kcp_cache
->skc_mag
[j
]->skm_avail
,
939 kcp
->kcp_cache
->skc_mag
[j
]->skm_size
);
941 splat_print(file
, "%s\n", "");
943 if (kcp
->kcp_cache
->skc_obj_total
== 0)
946 set_current_state(TASK_INTERRUPTIBLE
);
947 schedule_timeout(HZ
);
950 if (kcp
->kcp_cache
->skc_obj_total
== 0) {
951 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
952 "Successfully created %d objects "
953 "in cache %s and reclaimed them\n",
954 count
, SPLAT_KMEM_CACHE_NAME
);
956 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
957 "Failed to reclaim %u/%d objects from cache %s\n",
958 (unsigned)kcp
->kcp_cache
->skc_obj_total
, count
,
959 SPLAT_KMEM_CACHE_NAME
);
963 kmem_cache_destroy(kcp
->kcp_cache
);
964 splat_kmem_cache_test_kcp_free(kcp
);
970 * This test creates N threads with a shared kmem cache. They then all
971 * concurrently allocate and free from the cache to stress the locking and
972 * concurrent cache performance. If any one test takes longer than 5
973 * seconds to complete it is treated as a failure and may indicate a
974 * performance regression. On my test system no one test takes more
975 * than 1 second to complete so a 5x slowdown likely a problem.
978 splat_kmem_test10(struct file
*file
, void *arg
)
980 uint64_t size
, alloc
, rc
= 0;
982 for (size
= 16; size
<= 1024*1024; size
*= 2) {
984 splat_vprint(file
, SPLAT_KMEM_TEST10_NAME
, "%-22s %s", "name",
985 "time (sec)\tslabs \tobjs \thash\n");
986 splat_vprint(file
, SPLAT_KMEM_TEST10_NAME
, "%-22s %s", "",
987 " \ttot/max/calc\ttot/max/calc\n");
989 for (alloc
= 1; alloc
<= 1024; alloc
*= 2) {
991 /* Skip tests which exceed available memory. We
992 * leverage availrmem here for some extra testing */
993 if (size
* alloc
* SPLAT_KMEM_THREADS
> availrmem
/ 2)
996 rc
= splat_kmem_cache_thread_test(file
, arg
,
997 SPLAT_KMEM_TEST10_NAME
, size
, alloc
, 5);
1007 * This test creates N threads with a shared kmem cache which overcommits
1008 * memory by 4x. This makes it impossible for the slab to satify the
1009 * thread requirements without having its reclaim hook run which will
1010 * free objects back for use. This behavior is triggered by the linum VM
1011 * detecting a low memory condition on the node and invoking the shrinkers.
1012 * This should allow all the threads to complete while avoiding deadlock
1013 * and for the most part out of memory events. This is very tough on the
1014 * system so it is possible the test app may get oom'ed.
1017 splat_kmem_test11(struct file
*file
, void *arg
)
1019 uint64_t size
, alloc
, rc
;
1022 alloc
= ((4 * physmem
* PAGE_SIZE
) / size
) / SPLAT_KMEM_THREADS
;
1024 splat_vprint(file
, SPLAT_KMEM_TEST11_NAME
, "%-22s %s", "name",
1025 "time (sec)\tslabs \tobjs \thash\n");
1026 splat_vprint(file
, SPLAT_KMEM_TEST11_NAME
, "%-22s %s", "",
1027 " \ttot/max/calc\ttot/max/calc\n");
1029 rc
= splat_kmem_cache_thread_test(file
, arg
,
1030 SPLAT_KMEM_TEST11_NAME
, size
, alloc
, 60);
1036 * Check vmem_size() behavior by acquiring the alloc/free/total vmem
1037 * space, then allocate a known buffer size from vmem space. We can
1038 * then check that vmem_size() values were updated properly with in
1039 * a fairly small tolerence. The tolerance is important because we
1040 * are not the only vmem consumer on the system. Other unrelated
1041 * allocations might occur during the small test window. The vmem
1042 * allocation itself may also add in a little extra private space to
1043 * the buffer. Finally, verify total space always remains unchanged.
1046 splat_kmem_test12(struct file
*file
, void *arg
)
1048 size_t alloc1
, free1
, total1
;
1049 size_t alloc2
, free2
, total2
;
1050 int size
= 8*1024*1024;
1053 alloc1
= vmem_size(NULL
, VMEM_ALLOC
);
1054 free1
= vmem_size(NULL
, VMEM_FREE
);
1055 total1
= vmem_size(NULL
, VMEM_ALLOC
| VMEM_FREE
);
1056 splat_vprint(file
, SPLAT_KMEM_TEST12_NAME
, "Vmem alloc=%lu "
1057 "free=%lu total=%lu\n", (unsigned long)alloc1
,
1058 (unsigned long)free1
, (unsigned long)total1
);
1060 splat_vprint(file
, SPLAT_KMEM_TEST12_NAME
, "Alloc %d bytes\n", size
);
1061 ptr
= vmem_alloc(size
, KM_SLEEP
);
1063 splat_vprint(file
, SPLAT_KMEM_TEST12_NAME
,
1064 "Failed to alloc %d bytes\n", size
);
1068 alloc2
= vmem_size(NULL
, VMEM_ALLOC
);
1069 free2
= vmem_size(NULL
, VMEM_FREE
);
1070 total2
= vmem_size(NULL
, VMEM_ALLOC
| VMEM_FREE
);
1071 splat_vprint(file
, SPLAT_KMEM_TEST12_NAME
, "Vmem alloc=%lu "
1072 "free=%lu total=%lu\n", (unsigned long)alloc2
,
1073 (unsigned long)free2
, (unsigned long)total2
);
1075 splat_vprint(file
, SPLAT_KMEM_TEST12_NAME
, "Free %d bytes\n", size
);
1076 vmem_free(ptr
, size
);
1077 if (alloc2
< (alloc1
+ size
- (size
/ 100)) ||
1078 alloc2
> (alloc1
+ size
+ (size
/ 100))) {
1079 splat_vprint(file
, SPLAT_KMEM_TEST12_NAME
, "Failed "
1080 "VMEM_ALLOC size: %lu != %lu+%d (+/- 1%%)\n",
1081 (unsigned long)alloc2
,(unsigned long)alloc1
,size
);
1085 if (free2
< (free1
- size
- (size
/ 100)) ||
1086 free2
> (free1
- size
+ (size
/ 100))) {
1087 splat_vprint(file
, SPLAT_KMEM_TEST12_NAME
, "Failed "
1088 "VMEM_FREE size: %lu != %lu-%d (+/- 1%%)\n",
1089 (unsigned long)free2
, (unsigned long)free1
, size
);
1093 if (total1
!= total2
) {
1094 splat_vprint(file
, SPLAT_KMEM_TEST12_NAME
, "Failed "
1095 "VMEM_ALLOC | VMEM_FREE not constant: "
1096 "%lu != %lu\n", (unsigned long)total2
,
1097 (unsigned long)total1
);
1101 splat_vprint(file
, SPLAT_KMEM_TEST12_NAME
,
1102 "VMEM_ALLOC within tolerance: ~%ld%% (%ld/%d)\n",
1103 (long)abs(alloc1
+ (long)size
- alloc2
) * 100 / (long)size
,
1104 (long)abs(alloc1
+ (long)size
- alloc2
), size
);
1105 splat_vprint(file
, SPLAT_KMEM_TEST12_NAME
,
1106 "VMEM_FREE within tolerance: ~%ld%% (%ld/%d)\n",
1107 (long)abs((free1
- (long)size
) - free2
) * 100 / (long)size
,
1108 (long)abs((free1
- (long)size
) - free2
), size
);
1114 splat_kmem_init(void)
1116 splat_subsystem_t
*sub
;
1118 sub
= kmalloc(sizeof(*sub
), GFP_KERNEL
);
1122 memset(sub
, 0, sizeof(*sub
));
1123 strncpy(sub
->desc
.name
, SPLAT_KMEM_NAME
, SPLAT_NAME_SIZE
);
1124 strncpy(sub
->desc
.desc
, SPLAT_KMEM_DESC
, SPLAT_DESC_SIZE
);
1125 INIT_LIST_HEAD(&sub
->subsystem_list
);
1126 INIT_LIST_HEAD(&sub
->test_list
);
1127 spin_lock_init(&sub
->test_lock
);
1128 sub
->desc
.id
= SPLAT_SUBSYSTEM_KMEM
;
1130 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST1_NAME
, SPLAT_KMEM_TEST1_DESC
,
1131 SPLAT_KMEM_TEST1_ID
, splat_kmem_test1
);
1132 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST2_NAME
, SPLAT_KMEM_TEST2_DESC
,
1133 SPLAT_KMEM_TEST2_ID
, splat_kmem_test2
);
1134 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST3_NAME
, SPLAT_KMEM_TEST3_DESC
,
1135 SPLAT_KMEM_TEST3_ID
, splat_kmem_test3
);
1136 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST4_NAME
, SPLAT_KMEM_TEST4_DESC
,
1137 SPLAT_KMEM_TEST4_ID
, splat_kmem_test4
);
1138 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST5_NAME
, SPLAT_KMEM_TEST5_DESC
,
1139 SPLAT_KMEM_TEST5_ID
, splat_kmem_test5
);
1140 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST6_NAME
, SPLAT_KMEM_TEST6_DESC
,
1141 SPLAT_KMEM_TEST6_ID
, splat_kmem_test6
);
1142 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST7_NAME
, SPLAT_KMEM_TEST7_DESC
,
1143 SPLAT_KMEM_TEST7_ID
, splat_kmem_test7
);
1144 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST8_NAME
, SPLAT_KMEM_TEST8_DESC
,
1145 SPLAT_KMEM_TEST8_ID
, splat_kmem_test8
);
1146 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST9_NAME
, SPLAT_KMEM_TEST9_DESC
,
1147 SPLAT_KMEM_TEST9_ID
, splat_kmem_test9
);
1148 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST10_NAME
, SPLAT_KMEM_TEST10_DESC
,
1149 SPLAT_KMEM_TEST10_ID
, splat_kmem_test10
);
1150 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST11_NAME
, SPLAT_KMEM_TEST11_DESC
,
1151 SPLAT_KMEM_TEST11_ID
, splat_kmem_test11
);
1152 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST12_NAME
, SPLAT_KMEM_TEST12_DESC
,
1153 SPLAT_KMEM_TEST12_ID
, splat_kmem_test12
);
1159 splat_kmem_fini(splat_subsystem_t
*sub
)
1162 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST12_ID
);
1163 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST11_ID
);
1164 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST10_ID
);
1165 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST9_ID
);
1166 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST8_ID
);
1167 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST7_ID
);
1168 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST6_ID
);
1169 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST5_ID
);
1170 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST4_ID
);
1171 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST3_ID
);
1172 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST2_ID
);
1173 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST1_ID
);
1179 splat_kmem_id(void) {
1180 return SPLAT_SUBSYSTEM_KMEM
;