2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include "splat-internal.h"
29 #define SPLAT_SUBSYSTEM_KMEM 0x0100
30 #define SPLAT_KMEM_NAME "kmem"
31 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
33 #define SPLAT_KMEM_TEST1_ID 0x0101
34 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
35 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
37 #define SPLAT_KMEM_TEST2_ID 0x0102
38 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
39 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
41 #define SPLAT_KMEM_TEST3_ID 0x0103
42 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
43 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
45 #define SPLAT_KMEM_TEST4_ID 0x0104
46 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
47 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
49 #define SPLAT_KMEM_TEST5_ID 0x0105
50 #define SPLAT_KMEM_TEST5_NAME "slab_small"
51 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
53 #define SPLAT_KMEM_TEST6_ID 0x0106
54 #define SPLAT_KMEM_TEST6_NAME "slab_large"
55 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
57 #define SPLAT_KMEM_TEST7_ID 0x0107
58 #define SPLAT_KMEM_TEST7_NAME "slab_align"
59 #define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
61 #define SPLAT_KMEM_TEST8_ID 0x0108
62 #define SPLAT_KMEM_TEST8_NAME "slab_reap"
63 #define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
65 #define SPLAT_KMEM_TEST9_ID 0x0109
66 #define SPLAT_KMEM_TEST9_NAME "slab_age"
67 #define SPLAT_KMEM_TEST9_DESC "Slab aging test"
69 #define SPLAT_KMEM_TEST10_ID 0x010a
70 #define SPLAT_KMEM_TEST10_NAME "slab_lock"
71 #define SPLAT_KMEM_TEST10_DESC "Slab locking test"
73 #define SPLAT_KMEM_TEST11_ID 0x010b
74 #define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
75 #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
77 #define SPLAT_KMEM_ALLOC_COUNT 10
78 #define SPLAT_VMEM_ALLOC_COUNT 10
82 splat_kmem_test1(struct file
*file
, void *arg
)
84 void *ptr
[SPLAT_KMEM_ALLOC_COUNT
];
88 /* We are intentionally going to push kmem_alloc to its max
89 * allocation size, so suppress the console warnings for now */
92 while ((!rc
) && (size
<= (PAGE_SIZE
* 32))) {
95 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++) {
96 ptr
[i
] = kmem_alloc(size
, KM_SLEEP
);
101 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++)
103 kmem_free(ptr
[i
], size
);
105 splat_vprint(file
, SPLAT_KMEM_TEST1_NAME
,
106 "%d byte allocations, %d/%d successful\n",
107 size
, count
, SPLAT_KMEM_ALLOC_COUNT
);
108 if (count
!= SPLAT_KMEM_ALLOC_COUNT
)
120 splat_kmem_test2(struct file
*file
, void *arg
)
122 void *ptr
[SPLAT_KMEM_ALLOC_COUNT
];
123 int size
= PAGE_SIZE
;
124 int i
, j
, count
, rc
= 0;
126 /* We are intentionally going to push kmem_alloc to its max
127 * allocation size, so suppress the console warnings for now */
130 while ((!rc
) && (size
<= (PAGE_SIZE
* 32))) {
133 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++) {
134 ptr
[i
] = kmem_zalloc(size
, KM_SLEEP
);
139 /* Ensure buffer has been zero filled */
140 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++) {
141 for (j
= 0; j
< size
; j
++) {
142 if (((char *)ptr
[i
])[j
] != '\0') {
143 splat_vprint(file
, SPLAT_KMEM_TEST2_NAME
,
144 "%d-byte allocation was "
145 "not zeroed\n", size
);
151 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++)
153 kmem_free(ptr
[i
], size
);
155 splat_vprint(file
, SPLAT_KMEM_TEST2_NAME
,
156 "%d byte allocations, %d/%d successful\n",
157 size
, count
, SPLAT_KMEM_ALLOC_COUNT
);
158 if (count
!= SPLAT_KMEM_ALLOC_COUNT
)
170 splat_kmem_test3(struct file
*file
, void *arg
)
172 void *ptr
[SPLAT_VMEM_ALLOC_COUNT
];
173 int size
= PAGE_SIZE
;
174 int i
, count
, rc
= 0;
176 while ((!rc
) && (size
<= (PAGE_SIZE
* 1024))) {
179 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++) {
180 ptr
[i
] = vmem_alloc(size
, KM_SLEEP
);
185 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++)
187 vmem_free(ptr
[i
], size
);
189 splat_vprint(file
, SPLAT_KMEM_TEST3_NAME
,
190 "%d byte allocations, %d/%d successful\n",
191 size
, count
, SPLAT_VMEM_ALLOC_COUNT
);
192 if (count
!= SPLAT_VMEM_ALLOC_COUNT
)
202 splat_kmem_test4(struct file
*file
, void *arg
)
204 void *ptr
[SPLAT_VMEM_ALLOC_COUNT
];
205 int size
= PAGE_SIZE
;
206 int i
, j
, count
, rc
= 0;
208 while ((!rc
) && (size
<= (PAGE_SIZE
* 1024))) {
211 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++) {
212 ptr
[i
] = vmem_zalloc(size
, KM_SLEEP
);
217 /* Ensure buffer has been zero filled */
218 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++) {
219 for (j
= 0; j
< size
; j
++) {
220 if (((char *)ptr
[i
])[j
] != '\0') {
221 splat_vprint(file
, SPLAT_KMEM_TEST4_NAME
,
222 "%d-byte allocation was "
223 "not zeroed\n", size
);
229 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++)
231 vmem_free(ptr
[i
], size
);
233 splat_vprint(file
, SPLAT_KMEM_TEST4_NAME
,
234 "%d byte allocations, %d/%d successful\n",
235 size
, count
, SPLAT_VMEM_ALLOC_COUNT
);
236 if (count
!= SPLAT_VMEM_ALLOC_COUNT
)
245 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
246 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
247 #define SPLAT_KMEM_OBJ_COUNT 1024
248 #define SPLAT_KMEM_OBJ_RECLAIM 20 /* percent */
249 #define SPLAT_KMEM_THREADS 32
251 #define KCP_FLAG_READY 0x01
253 typedef struct kmem_cache_data
{
254 unsigned long kcd_magic
;
259 typedef struct kmem_cache_thread
{
260 kmem_cache_t
*kct_cache
;
264 kmem_cache_data_t
*kct_kcd
[0];
265 } kmem_cache_thread_t
;
267 typedef struct kmem_cache_priv
{
268 unsigned long kcp_magic
;
269 struct file
*kcp_file
;
270 kmem_cache_t
*kcp_cache
;
272 wait_queue_head_t kcp_ctl_waitq
;
273 wait_queue_head_t kcp_thr_waitq
;
276 kmem_cache_thread_t
*kcp_kct
[SPLAT_KMEM_THREADS
];
283 kmem_cache_data_t
*kcp_kcd
[0];
286 static kmem_cache_priv_t
*
287 splat_kmem_cache_test_kcp_alloc(struct file
*file
, char *name
,
288 int size
, int align
, int alloc
, int count
)
290 kmem_cache_priv_t
*kcp
;
292 kcp
= vmem_zalloc(sizeof(kmem_cache_priv_t
) +
293 count
* sizeof(kmem_cache_data_t
*), KM_SLEEP
);
297 kcp
->kcp_magic
= SPLAT_KMEM_TEST_MAGIC
;
298 kcp
->kcp_file
= file
;
299 kcp
->kcp_cache
= NULL
;
300 spin_lock_init(&kcp
->kcp_lock
);
301 init_waitqueue_head(&kcp
->kcp_ctl_waitq
);
302 init_waitqueue_head(&kcp
->kcp_thr_waitq
);
304 kcp
->kcp_kct_count
= -1;
305 kcp
->kcp_size
= size
;
306 kcp
->kcp_align
= align
;
308 kcp
->kcp_alloc
= alloc
;
310 kcp
->kcp_kcd_count
= count
;
316 splat_kmem_cache_test_kcp_free(kmem_cache_priv_t
*kcp
)
318 vmem_free(kcp
, sizeof(kmem_cache_priv_t
) +
319 kcp
->kcp_kcd_count
* sizeof(kmem_cache_data_t
*));
322 static kmem_cache_thread_t
*
323 splat_kmem_cache_test_kct_alloc(int id
, int count
)
325 kmem_cache_thread_t
*kct
;
327 ASSERTF(id
< SPLAT_KMEM_THREADS
, "id=%d\n", id
);
328 kct
= vmem_zalloc(sizeof(kmem_cache_thread_t
) +
329 count
* sizeof(kmem_cache_data_t
*), KM_SLEEP
);
333 spin_lock_init(&kct
->kct_lock
);
334 kct
->kct_cache
= NULL
;
336 kct
->kct_kcd_count
= count
;
342 splat_kmem_cache_test_kct_free(kmem_cache_thread_t
*kct
)
344 vmem_free(kct
, sizeof(kmem_cache_thread_t
) +
345 kct
->kct_kcd_count
* sizeof(kmem_cache_data_t
*));
349 splat_kmem_cache_test_constructor(void *ptr
, void *priv
, int flags
)
351 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)priv
;
352 kmem_cache_data_t
*kcd
= (kmem_cache_data_t
*)ptr
;
355 kcd
->kcd_magic
= kcp
->kcp_magic
;
357 memset(kcd
->kcd_buf
, 0xaa, kcp
->kcp_size
- (sizeof *kcd
));
365 splat_kmem_cache_test_destructor(void *ptr
, void *priv
)
367 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)priv
;
368 kmem_cache_data_t
*kcd
= (kmem_cache_data_t
*)ptr
;
373 memset(kcd
->kcd_buf
, 0xbb, kcp
->kcp_size
- (sizeof *kcd
));
381 * Generic reclaim function which assumes that all objects may
382 * be reclaimed at any time. We free a small percentage of the
383 * objects linked off the kcp or kct[] every time we are called.
386 splat_kmem_cache_test_reclaim(void *priv
)
388 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)priv
;
389 kmem_cache_thread_t
*kct
;
392 ASSERT(kcp
->kcp_magic
== SPLAT_KMEM_TEST_MAGIC
);
393 count
= kcp
->kcp_kcd_count
* SPLAT_KMEM_OBJ_RECLAIM
/ 100;
395 /* Objects directly attached to the kcp */
396 spin_lock(&kcp
->kcp_lock
);
397 for (i
= 0; i
< kcp
->kcp_kcd_count
; i
++) {
398 if (kcp
->kcp_kcd
[i
]) {
399 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[i
]);
400 kcp
->kcp_kcd
[i
] = NULL
;
406 spin_unlock(&kcp
->kcp_lock
);
408 /* No threads containing objects to consider */
409 if (kcp
->kcp_kct_count
== -1)
412 /* Objects attached to a kct thread */
413 for (i
= 0; i
< kcp
->kcp_kct_count
; i
++) {
414 spin_lock(&kcp
->kcp_lock
);
415 kct
= kcp
->kcp_kct
[i
];
416 spin_unlock(&kcp
->kcp_lock
);
420 spin_lock(&kct
->kct_lock
);
421 count
= kct
->kct_kcd_count
* SPLAT_KMEM_OBJ_RECLAIM
/ 100;
423 for (j
= 0; j
< kct
->kct_kcd_count
; j
++) {
424 if (kct
->kct_kcd
[j
]) {
425 kmem_cache_free(kcp
->kcp_cache
,kct
->kct_kcd
[j
]);
426 kct
->kct_kcd
[j
] = NULL
;
432 spin_unlock(&kct
->kct_lock
);
439 splat_kmem_cache_test_threads(kmem_cache_priv_t
*kcp
, int threads
)
443 spin_lock(&kcp
->kcp_lock
);
444 rc
= (kcp
->kcp_kct_count
== threads
);
445 spin_unlock(&kcp
->kcp_lock
);
451 splat_kmem_cache_test_flags(kmem_cache_priv_t
*kcp
, int flags
)
455 spin_lock(&kcp
->kcp_lock
);
456 rc
= (kcp
->kcp_flags
& flags
);
457 spin_unlock(&kcp
->kcp_lock
);
463 splat_kmem_cache_test_thread(void *arg
)
465 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)arg
;
466 kmem_cache_thread_t
*kct
;
470 ASSERT(kcp
->kcp_magic
== SPLAT_KMEM_TEST_MAGIC
);
472 /* Assign thread ids */
473 spin_lock(&kcp
->kcp_lock
);
474 if (kcp
->kcp_kct_count
== -1)
475 kcp
->kcp_kct_count
= 0;
477 id
= kcp
->kcp_kct_count
;
478 kcp
->kcp_kct_count
++;
479 spin_unlock(&kcp
->kcp_lock
);
481 kct
= splat_kmem_cache_test_kct_alloc(id
, kcp
->kcp_alloc
);
487 spin_lock(&kcp
->kcp_lock
);
488 kcp
->kcp_kct
[id
] = kct
;
489 spin_unlock(&kcp
->kcp_lock
);
491 /* Wait for all threads to have started and report they are ready */
492 if (kcp
->kcp_kct_count
== SPLAT_KMEM_THREADS
)
493 wake_up(&kcp
->kcp_ctl_waitq
);
495 wait_event(kcp
->kcp_thr_waitq
,
496 splat_kmem_cache_test_flags(kcp
, KCP_FLAG_READY
));
499 * Updates to kct->kct_kcd[] are performed under a spin_lock so
500 * they may safely run concurrent with the reclaim function. If
501 * we are not in a low memory situation we have one lock per-
502 * thread so they are not expected to be contended.
504 for (i
= 0; i
< kct
->kct_kcd_count
; i
++) {
505 obj
= kmem_cache_alloc(kcp
->kcp_cache
, KM_SLEEP
);
506 spin_lock(&kct
->kct_lock
);
507 kct
->kct_kcd
[i
] = obj
;
508 spin_unlock(&kct
->kct_lock
);
511 for (i
= 0; i
< kct
->kct_kcd_count
; i
++) {
512 spin_lock(&kct
->kct_lock
);
513 if (kct
->kct_kcd
[i
]) {
514 kmem_cache_free(kcp
->kcp_cache
, kct
->kct_kcd
[i
]);
515 kct
->kct_kcd
[i
] = NULL
;
517 spin_unlock(&kct
->kct_lock
);
520 spin_lock(&kcp
->kcp_lock
);
522 splat_kmem_cache_test_kct_free(kct
);
523 kcp
->kcp_kct
[id
] = kct
= NULL
;
529 if ((--kcp
->kcp_kct_count
) == 0)
530 wake_up(&kcp
->kcp_ctl_waitq
);
532 spin_unlock(&kcp
->kcp_lock
);
538 splat_kmem_cache_test(struct file
*file
, void *arg
, char *name
,
539 int size
, int align
, int flags
)
541 kmem_cache_priv_t
*kcp
;
542 kmem_cache_data_t
*kcd
;
545 kcp
= splat_kmem_cache_test_kcp_alloc(file
, name
, size
, align
, 0, 1);
547 splat_vprint(file
, name
, "Unable to create '%s'\n", "kcp");
551 kcp
->kcp_kcd
[0] = NULL
;
553 kmem_cache_create(SPLAT_KMEM_CACHE_NAME
,
554 kcp
->kcp_size
, kcp
->kcp_align
,
555 splat_kmem_cache_test_constructor
,
556 splat_kmem_cache_test_destructor
,
557 NULL
, kcp
, NULL
, flags
);
558 if (!kcp
->kcp_cache
) {
559 splat_vprint(file
, name
,
560 "Unable to create '%s'\n",
561 SPLAT_KMEM_CACHE_NAME
);
566 kcd
= kmem_cache_alloc(kcp
->kcp_cache
, KM_SLEEP
);
568 splat_vprint(file
, name
,
569 "Unable to allocate from '%s'\n",
570 SPLAT_KMEM_CACHE_NAME
);
574 spin_lock(&kcp
->kcp_lock
);
575 kcp
->kcp_kcd
[0] = kcd
;
576 spin_unlock(&kcp
->kcp_lock
);
578 if (!kcp
->kcp_kcd
[0]->kcd_flag
) {
579 splat_vprint(file
, name
,
580 "Failed to run contructor for '%s'\n",
581 SPLAT_KMEM_CACHE_NAME
);
586 if (kcp
->kcp_kcd
[0]->kcd_magic
!= kcp
->kcp_magic
) {
587 splat_vprint(file
, name
,
588 "Failed to pass private data to constructor "
589 "for '%s'\n", SPLAT_KMEM_CACHE_NAME
);
594 max
= kcp
->kcp_count
;
595 spin_lock(&kcp
->kcp_lock
);
596 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[0]);
597 kcp
->kcp_kcd
[0] = NULL
;
598 spin_unlock(&kcp
->kcp_lock
);
600 /* Destroy the entire cache which will force destructors to
601 * run and we can verify one was called for every object */
602 kmem_cache_destroy(kcp
->kcp_cache
);
603 if (kcp
->kcp_count
) {
604 splat_vprint(file
, name
,
605 "Failed to run destructor on all slab objects "
606 "for '%s'\n", SPLAT_KMEM_CACHE_NAME
);
610 splat_vprint(file
, name
,
611 "Successfully ran ctors/dtors for %d elements in '%s'\n",
612 max
, SPLAT_KMEM_CACHE_NAME
);
617 if (kcp
->kcp_kcd
[0]) {
618 spin_lock(&kcp
->kcp_lock
);
619 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[0]);
620 kcp
->kcp_kcd
[0] = NULL
;
621 spin_unlock(&kcp
->kcp_lock
);
625 kmem_cache_destroy(kcp
->kcp_cache
);
627 splat_kmem_cache_test_kcp_free(kcp
);
633 splat_kmem_cache_thread_test(struct file
*file
, void *arg
, char *name
,
634 int size
, int alloc
, int max_time
)
636 kmem_cache_priv_t
*kcp
;
638 struct timespec start
, stop
, delta
;
642 kcp
= splat_kmem_cache_test_kcp_alloc(file
, name
, size
, 0, alloc
, 0);
644 splat_vprint(file
, name
, "Unable to create '%s'\n", "kcp");
648 (void)snprintf(cache_name
, 32, "%s-%d-%d",
649 SPLAT_KMEM_CACHE_NAME
, size
, alloc
);
651 kmem_cache_create(cache_name
, kcp
->kcp_size
, 0,
652 splat_kmem_cache_test_constructor
,
653 splat_kmem_cache_test_destructor
,
654 splat_kmem_cache_test_reclaim
,
655 kcp
, NULL
, KMC_VMEM
);
656 if (!kcp
->kcp_cache
) {
657 splat_vprint(file
, name
, "Unable to create '%s'\n", cache_name
);
662 start
= current_kernel_time();
664 for (i
= 0; i
< SPLAT_KMEM_THREADS
; i
++) {
665 thr
= thread_create(NULL
, 0,
666 splat_kmem_cache_test_thread
,
667 kcp
, 0, &p0
, TS_RUN
, minclsyspri
);
674 /* Sleep until all threads have started, then set the ready
675 * flag and wake them all up for maximum concurrency. */
676 wait_event(kcp
->kcp_ctl_waitq
,
677 splat_kmem_cache_test_threads(kcp
, SPLAT_KMEM_THREADS
));
679 spin_lock(&kcp
->kcp_lock
);
680 kcp
->kcp_flags
|= KCP_FLAG_READY
;
681 spin_unlock(&kcp
->kcp_lock
);
682 wake_up_all(&kcp
->kcp_thr_waitq
);
684 /* Sleep until all thread have finished */
685 wait_event(kcp
->kcp_ctl_waitq
, splat_kmem_cache_test_threads(kcp
, 0));
687 stop
= current_kernel_time();
688 delta
= timespec_sub(stop
, start
);
690 splat_vprint(file
, name
,
692 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
693 kcp
->kcp_cache
->skc_name
,
694 delta
.tv_sec
, delta
.tv_nsec
,
695 (unsigned long)kcp
->kcp_cache
->skc_slab_total
,
696 (unsigned long)kcp
->kcp_cache
->skc_slab_max
,
697 (unsigned long)(kcp
->kcp_alloc
*
699 SPL_KMEM_CACHE_OBJ_PER_SLAB
),
700 (unsigned long)kcp
->kcp_cache
->skc_obj_total
,
701 (unsigned long)kcp
->kcp_cache
->skc_obj_max
,
702 (unsigned long)(kcp
->kcp_alloc
*
703 SPLAT_KMEM_THREADS
));
705 if (delta
.tv_sec
>= max_time
)
708 if (!rc
&& kcp
->kcp_rc
)
712 kmem_cache_destroy(kcp
->kcp_cache
);
714 splat_kmem_cache_test_kcp_free(kcp
);
718 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
720 splat_kmem_test5(struct file
*file
, void *arg
)
722 char *name
= SPLAT_KMEM_TEST5_NAME
;
725 rc
= splat_kmem_cache_test(file
, arg
, name
, 128, 0, 0);
729 rc
= splat_kmem_cache_test(file
, arg
, name
, 128, 0, KMC_KMEM
);
733 return splat_kmem_cache_test(file
, arg
, name
, 128, 0, KMC_VMEM
);
736 /* Validate large object cache behavior for dynamic/kmem/vmem caches */
738 splat_kmem_test6(struct file
*file
, void *arg
)
740 char *name
= SPLAT_KMEM_TEST6_NAME
;
743 rc
= splat_kmem_cache_test(file
, arg
, name
, 128*1024, 0, 0);
747 rc
= splat_kmem_cache_test(file
, arg
, name
, 128*1024, 0, KMC_KMEM
);
751 return splat_kmem_cache_test(file
, arg
, name
, 128*1028, 0, KMC_VMEM
);
754 /* Validate object alignment cache behavior for caches */
756 splat_kmem_test7(struct file
*file
, void *arg
)
758 char *name
= SPLAT_KMEM_TEST7_NAME
;
761 for (i
= 8; i
<= PAGE_SIZE
; i
*= 2) {
762 rc
= splat_kmem_cache_test(file
, arg
, name
, 157, i
, 0);
771 splat_kmem_test8(struct file
*file
, void *arg
)
773 kmem_cache_priv_t
*kcp
;
774 kmem_cache_data_t
*kcd
;
777 kcp
= splat_kmem_cache_test_kcp_alloc(file
, SPLAT_KMEM_TEST8_NAME
,
778 256, 0, 0, SPLAT_KMEM_OBJ_COUNT
);
780 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
781 "Unable to create '%s'\n", "kcp");
786 kmem_cache_create(SPLAT_KMEM_CACHE_NAME
, kcp
->kcp_size
, 0,
787 splat_kmem_cache_test_constructor
,
788 splat_kmem_cache_test_destructor
,
789 splat_kmem_cache_test_reclaim
,
791 if (!kcp
->kcp_cache
) {
792 splat_kmem_cache_test_kcp_free(kcp
);
793 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
794 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME
);
798 for (i
= 0; i
< SPLAT_KMEM_OBJ_COUNT
; i
++) {
799 kcd
= kmem_cache_alloc(kcp
->kcp_cache
, KM_SLEEP
);
800 spin_lock(&kcp
->kcp_lock
);
801 kcp
->kcp_kcd
[i
] = kcd
;
802 spin_unlock(&kcp
->kcp_lock
);
804 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
805 "Unable to allocate from '%s'\n",
806 SPLAT_KMEM_CACHE_NAME
);
810 /* Request the slab cache free any objects it can. For a few reasons
811 * this may not immediately result in more free memory even if objects
812 * are freed. First off, due to fragmentation we may not be able to
813 * reclaim any slabs. Secondly, even if we do we fully clear some
814 * slabs we will not want to immedately reclaim all of them because
815 * we may contend with cache allocs and thrash. What we want to see
816 * is the slab size decrease more gradually as it becomes clear they
817 * will not be needed. This should be acheivable in less than minute
818 * if it takes longer than this something has gone wrong.
820 for (i
= 0; i
< 60; i
++) {
821 kmem_cache_reap_now(kcp
->kcp_cache
);
822 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
823 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
824 SPLAT_KMEM_CACHE_NAME
, kcp
->kcp_count
,
825 (unsigned)kcp
->kcp_cache
->skc_slab_alloc
,
826 (unsigned)kcp
->kcp_cache
->skc_slab_total
,
827 (unsigned)kcp
->kcp_cache
->skc_obj_alloc
,
828 (unsigned)kcp
->kcp_cache
->skc_obj_total
);
830 for_each_online_cpu(j
)
831 splat_print(file
, "%u/%u ",
832 kcp
->kcp_cache
->skc_mag
[j
]->skm_avail
,
833 kcp
->kcp_cache
->skc_mag
[j
]->skm_size
);
835 splat_print(file
, "%s\n", "");
837 if (kcp
->kcp_cache
->skc_obj_total
== 0)
840 set_current_state(TASK_INTERRUPTIBLE
);
841 schedule_timeout(HZ
);
844 if (kcp
->kcp_cache
->skc_obj_total
== 0) {
845 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
846 "Successfully created %d objects "
847 "in cache %s and reclaimed them\n",
848 SPLAT_KMEM_OBJ_COUNT
, SPLAT_KMEM_CACHE_NAME
);
850 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
851 "Failed to reclaim %u/%d objects from cache %s\n",
852 (unsigned)kcp
->kcp_cache
->skc_obj_total
,
853 SPLAT_KMEM_OBJ_COUNT
, SPLAT_KMEM_CACHE_NAME
);
857 /* Cleanup our mess (for failure case of time expiring) */
858 spin_lock(&kcp
->kcp_lock
);
859 for (i
= 0; i
< SPLAT_KMEM_OBJ_COUNT
; i
++)
861 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[i
]);
862 spin_unlock(&kcp
->kcp_lock
);
864 kmem_cache_destroy(kcp
->kcp_cache
);
865 splat_kmem_cache_test_kcp_free(kcp
);
871 splat_kmem_test9(struct file
*file
, void *arg
)
873 kmem_cache_priv_t
*kcp
;
874 kmem_cache_data_t
*kcd
;
875 int i
, j
, rc
= 0, count
= SPLAT_KMEM_OBJ_COUNT
* 128;
877 kcp
= splat_kmem_cache_test_kcp_alloc(file
, SPLAT_KMEM_TEST9_NAME
,
880 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
881 "Unable to create '%s'\n", "kcp");
886 kmem_cache_create(SPLAT_KMEM_CACHE_NAME
, kcp
->kcp_size
, 0,
887 splat_kmem_cache_test_constructor
,
888 splat_kmem_cache_test_destructor
,
890 if (!kcp
->kcp_cache
) {
891 splat_kmem_cache_test_kcp_free(kcp
);
892 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
893 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME
);
897 for (i
= 0; i
< count
; i
++) {
898 kcd
= kmem_cache_alloc(kcp
->kcp_cache
, KM_SLEEP
);
899 spin_lock(&kcp
->kcp_lock
);
900 kcp
->kcp_kcd
[i
] = kcd
;
901 spin_unlock(&kcp
->kcp_lock
);
903 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
904 "Unable to allocate from '%s'\n",
905 SPLAT_KMEM_CACHE_NAME
);
909 spin_lock(&kcp
->kcp_lock
);
910 for (i
= 0; i
< count
; i
++)
912 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[i
]);
913 spin_unlock(&kcp
->kcp_lock
);
915 /* We have allocated a large number of objects thus creating a
916 * large number of slabs and then free'd them all. However since
917 * there should be little memory pressure at the moment those
918 * slabs have not been freed. What we want to see is the slab
919 * size decrease gradually as it becomes clear they will not be
920 * be needed. This should be acheivable in less than minute
921 * if it takes longer than this something has gone wrong.
923 for (i
= 0; i
< 60; i
++) {
924 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
925 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
926 SPLAT_KMEM_CACHE_NAME
, kcp
->kcp_count
,
927 (unsigned)kcp
->kcp_cache
->skc_slab_alloc
,
928 (unsigned)kcp
->kcp_cache
->skc_slab_total
,
929 (unsigned)kcp
->kcp_cache
->skc_obj_alloc
,
930 (unsigned)kcp
->kcp_cache
->skc_obj_total
);
932 for_each_online_cpu(j
)
933 splat_print(file
, "%u/%u ",
934 kcp
->kcp_cache
->skc_mag
[j
]->skm_avail
,
935 kcp
->kcp_cache
->skc_mag
[j
]->skm_size
);
937 splat_print(file
, "%s\n", "");
939 if (kcp
->kcp_cache
->skc_obj_total
== 0)
942 set_current_state(TASK_INTERRUPTIBLE
);
943 schedule_timeout(HZ
);
946 if (kcp
->kcp_cache
->skc_obj_total
== 0) {
947 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
948 "Successfully created %d objects "
949 "in cache %s and reclaimed them\n",
950 count
, SPLAT_KMEM_CACHE_NAME
);
952 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
953 "Failed to reclaim %u/%d objects from cache %s\n",
954 (unsigned)kcp
->kcp_cache
->skc_obj_total
, count
,
955 SPLAT_KMEM_CACHE_NAME
);
959 kmem_cache_destroy(kcp
->kcp_cache
);
960 splat_kmem_cache_test_kcp_free(kcp
);
966 * This test creates N threads with a shared kmem cache. They then all
967 * concurrently allocate and free from the cache to stress the locking and
968 * concurrent cache performance. If any one test takes longer than 5
969 * seconds to complete it is treated as a failure and may indicate a
970 * performance regression. On my test system no one test takes more
971 * than 1 second to complete so a 5x slowdown likely a problem.
974 splat_kmem_test10(struct file
*file
, void *arg
)
976 uint64_t size
, alloc
, free_mem
, rc
= 0;
978 free_mem
= nr_free_pages() * PAGE_SIZE
;
979 for (size
= 16; size
<= 1024*1024; size
*= 2) {
981 splat_vprint(file
, SPLAT_KMEM_TEST10_NAME
, "%-22s %s", "name",
982 "time (sec)\tslabs \tobjs \thash\n");
983 splat_vprint(file
, SPLAT_KMEM_TEST10_NAME
, "%-22s %s", "",
984 " \ttot/max/calc\ttot/max/calc\n");
986 for (alloc
= 1; alloc
<= 1024; alloc
*= 2) {
988 /* Skip tests which exceed free memory */
989 if (size
* alloc
* SPLAT_KMEM_THREADS
> free_mem
/ 2)
992 rc
= splat_kmem_cache_thread_test(file
, arg
,
993 SPLAT_KMEM_TEST10_NAME
, size
, alloc
, 5);
1003 * This test creates N threads with a shared kmem cache which overcommits
1004 * memory by 4x. This makes it impossible for the slab to satify the
1005 * thread requirements without having its reclaim hook run which will
1006 * free objects back for use. This behavior is triggered by the linum VM
1007 * detecting a low memory condition on the node and invoking the shrinkers.
1008 * This should allow all the threads to complete while avoiding deadlock
1009 * and for the most part out of memory events. This is very tough on the
1010 * system so it is possible the test app may get oom'ed.
1013 splat_kmem_test11(struct file
*file
, void *arg
)
1015 uint64_t size
, alloc
, rc
;
1018 alloc
= ((4 * num_physpages
* PAGE_SIZE
) / size
) / SPLAT_KMEM_THREADS
;
1020 splat_vprint(file
, SPLAT_KMEM_TEST10_NAME
, "%-22s %s", "name",
1021 "time (sec)\tslabs \tobjs \thash\n");
1022 splat_vprint(file
, SPLAT_KMEM_TEST10_NAME
, "%-22s %s", "",
1023 " \ttot/max/calc\ttot/max/calc\n");
1025 rc
= splat_kmem_cache_thread_test(file
, arg
,
1026 SPLAT_KMEM_TEST11_NAME
, size
, alloc
, 60);
1032 splat_kmem_init(void)
1034 splat_subsystem_t
*sub
;
1036 sub
= kmalloc(sizeof(*sub
), GFP_KERNEL
);
1040 memset(sub
, 0, sizeof(*sub
));
1041 strncpy(sub
->desc
.name
, SPLAT_KMEM_NAME
, SPLAT_NAME_SIZE
);
1042 strncpy(sub
->desc
.desc
, SPLAT_KMEM_DESC
, SPLAT_DESC_SIZE
);
1043 INIT_LIST_HEAD(&sub
->subsystem_list
);
1044 INIT_LIST_HEAD(&sub
->test_list
);
1045 spin_lock_init(&sub
->test_lock
);
1046 sub
->desc
.id
= SPLAT_SUBSYSTEM_KMEM
;
1048 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST1_NAME
, SPLAT_KMEM_TEST1_DESC
,
1049 SPLAT_KMEM_TEST1_ID
, splat_kmem_test1
);
1050 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST2_NAME
, SPLAT_KMEM_TEST2_DESC
,
1051 SPLAT_KMEM_TEST2_ID
, splat_kmem_test2
);
1052 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST3_NAME
, SPLAT_KMEM_TEST3_DESC
,
1053 SPLAT_KMEM_TEST3_ID
, splat_kmem_test3
);
1054 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST4_NAME
, SPLAT_KMEM_TEST4_DESC
,
1055 SPLAT_KMEM_TEST4_ID
, splat_kmem_test4
);
1056 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST5_NAME
, SPLAT_KMEM_TEST5_DESC
,
1057 SPLAT_KMEM_TEST5_ID
, splat_kmem_test5
);
1058 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST6_NAME
, SPLAT_KMEM_TEST6_DESC
,
1059 SPLAT_KMEM_TEST6_ID
, splat_kmem_test6
);
1060 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST7_NAME
, SPLAT_KMEM_TEST7_DESC
,
1061 SPLAT_KMEM_TEST7_ID
, splat_kmem_test7
);
1062 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST8_NAME
, SPLAT_KMEM_TEST8_DESC
,
1063 SPLAT_KMEM_TEST8_ID
, splat_kmem_test8
);
1064 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST9_NAME
, SPLAT_KMEM_TEST9_DESC
,
1065 SPLAT_KMEM_TEST9_ID
, splat_kmem_test9
);
1066 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST10_NAME
, SPLAT_KMEM_TEST10_DESC
,
1067 SPLAT_KMEM_TEST10_ID
, splat_kmem_test10
);
1068 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST11_NAME
, SPLAT_KMEM_TEST11_DESC
,
1069 SPLAT_KMEM_TEST11_ID
, splat_kmem_test11
);
1075 splat_kmem_fini(splat_subsystem_t
*sub
)
1078 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST11_ID
);
1079 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST10_ID
);
1080 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST9_ID
);
1081 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST8_ID
);
1082 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST7_ID
);
1083 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST6_ID
);
1084 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST5_ID
);
1085 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST4_ID
);
1086 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST3_ID
);
1087 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST2_ID
);
1088 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST1_ID
);
1094 splat_kmem_id(void) {
1095 return SPLAT_SUBSYSTEM_KMEM
;