2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include "splat-internal.h"
29 #define SPLAT_SUBSYSTEM_KMEM 0x0100
30 #define SPLAT_KMEM_NAME "kmem"
31 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
33 #define SPLAT_KMEM_TEST1_ID 0x0101
34 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
35 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
37 #define SPLAT_KMEM_TEST2_ID 0x0102
38 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
39 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
41 #define SPLAT_KMEM_TEST3_ID 0x0103
42 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
43 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
45 #define SPLAT_KMEM_TEST4_ID 0x0104
46 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
47 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
49 #define SPLAT_KMEM_TEST5_ID 0x0105
50 #define SPLAT_KMEM_TEST5_NAME "slab_small"
51 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
53 #define SPLAT_KMEM_TEST6_ID 0x0106
54 #define SPLAT_KMEM_TEST6_NAME "slab_large"
55 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
57 #define SPLAT_KMEM_TEST7_ID 0x0107
58 #define SPLAT_KMEM_TEST7_NAME "slab_align"
59 #define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
61 #define SPLAT_KMEM_TEST8_ID 0x0108
62 #define SPLAT_KMEM_TEST8_NAME "slab_reap"
63 #define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
65 #define SPLAT_KMEM_TEST9_ID 0x0109
66 #define SPLAT_KMEM_TEST9_NAME "slab_age"
67 #define SPLAT_KMEM_TEST9_DESC "Slab aging test"
69 #define SPLAT_KMEM_TEST10_ID 0x010a
70 #define SPLAT_KMEM_TEST10_NAME "slab_lock"
71 #define SPLAT_KMEM_TEST10_DESC "Slab locking test"
73 #define SPLAT_KMEM_TEST11_ID 0x010b
74 #define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
75 #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
77 #define SPLAT_KMEM_ALLOC_COUNT 10
78 #define SPLAT_VMEM_ALLOC_COUNT 10
82 splat_kmem_test1(struct file
*file
, void *arg
)
84 void *ptr
[SPLAT_KMEM_ALLOC_COUNT
];
88 /* We are intentionally going to push kmem_alloc to its max
89 * allocation size, so suppress the console warnings for now */
92 while ((!rc
) && (size
<= (PAGE_SIZE
* 32))) {
95 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++) {
96 ptr
[i
] = kmem_alloc(size
, KM_SLEEP
);
101 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++)
103 kmem_free(ptr
[i
], size
);
105 splat_vprint(file
, SPLAT_KMEM_TEST1_NAME
,
106 "%d byte allocations, %d/%d successful\n",
107 size
, count
, SPLAT_KMEM_ALLOC_COUNT
);
108 if (count
!= SPLAT_KMEM_ALLOC_COUNT
)
120 splat_kmem_test2(struct file
*file
, void *arg
)
122 void *ptr
[SPLAT_KMEM_ALLOC_COUNT
];
123 int size
= PAGE_SIZE
;
124 int i
, j
, count
, rc
= 0;
126 /* We are intentionally going to push kmem_alloc to its max
127 * allocation size, so suppress the console warnings for now */
130 while ((!rc
) && (size
<= (PAGE_SIZE
* 32))) {
133 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++) {
134 ptr
[i
] = kmem_zalloc(size
, KM_SLEEP
);
139 /* Ensure buffer has been zero filled */
140 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++) {
141 for (j
= 0; j
< size
; j
++) {
142 if (((char *)ptr
[i
])[j
] != '\0') {
143 splat_vprint(file
, SPLAT_KMEM_TEST2_NAME
,
144 "%d-byte allocation was "
145 "not zeroed\n", size
);
151 for (i
= 0; i
< SPLAT_KMEM_ALLOC_COUNT
; i
++)
153 kmem_free(ptr
[i
], size
);
155 splat_vprint(file
, SPLAT_KMEM_TEST2_NAME
,
156 "%d byte allocations, %d/%d successful\n",
157 size
, count
, SPLAT_KMEM_ALLOC_COUNT
);
158 if (count
!= SPLAT_KMEM_ALLOC_COUNT
)
170 splat_kmem_test3(struct file
*file
, void *arg
)
172 void *ptr
[SPLAT_VMEM_ALLOC_COUNT
];
173 int size
= PAGE_SIZE
;
174 int i
, count
, rc
= 0;
176 while ((!rc
) && (size
<= (PAGE_SIZE
* 1024))) {
179 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++) {
180 ptr
[i
] = vmem_alloc(size
, KM_SLEEP
);
185 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++)
187 vmem_free(ptr
[i
], size
);
189 splat_vprint(file
, SPLAT_KMEM_TEST3_NAME
,
190 "%d byte allocations, %d/%d successful\n",
191 size
, count
, SPLAT_VMEM_ALLOC_COUNT
);
192 if (count
!= SPLAT_VMEM_ALLOC_COUNT
)
202 splat_kmem_test4(struct file
*file
, void *arg
)
204 void *ptr
[SPLAT_VMEM_ALLOC_COUNT
];
205 int size
= PAGE_SIZE
;
206 int i
, j
, count
, rc
= 0;
208 while ((!rc
) && (size
<= (PAGE_SIZE
* 1024))) {
211 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++) {
212 ptr
[i
] = vmem_zalloc(size
, KM_SLEEP
);
217 /* Ensure buffer has been zero filled */
218 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++) {
219 for (j
= 0; j
< size
; j
++) {
220 if (((char *)ptr
[i
])[j
] != '\0') {
221 splat_vprint(file
, SPLAT_KMEM_TEST4_NAME
,
222 "%d-byte allocation was "
223 "not zeroed\n", size
);
229 for (i
= 0; i
< SPLAT_VMEM_ALLOC_COUNT
; i
++)
231 vmem_free(ptr
[i
], size
);
233 splat_vprint(file
, SPLAT_KMEM_TEST4_NAME
,
234 "%d byte allocations, %d/%d successful\n",
235 size
, count
, SPLAT_VMEM_ALLOC_COUNT
);
236 if (count
!= SPLAT_VMEM_ALLOC_COUNT
)
245 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
246 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
247 #define SPLAT_KMEM_OBJ_COUNT 1024
248 #define SPLAT_KMEM_OBJ_RECLAIM 20 /* percent */
249 #define SPLAT_KMEM_THREADS 32
251 #define KCP_FLAG_READY 0x01
253 typedef struct kmem_cache_data
{
254 unsigned long kcd_magic
;
259 typedef struct kmem_cache_thread
{
260 kmem_cache_t
*kct_cache
;
264 kmem_cache_data_t
*kct_kcd
[0];
265 } kmem_cache_thread_t
;
267 typedef struct kmem_cache_priv
{
268 unsigned long kcp_magic
;
269 struct file
*kcp_file
;
270 kmem_cache_t
*kcp_cache
;
272 wait_queue_head_t kcp_ctl_waitq
;
273 wait_queue_head_t kcp_thr_waitq
;
276 kmem_cache_thread_t
*kcp_kct
[SPLAT_KMEM_THREADS
];
283 kmem_cache_data_t
*kcp_kcd
[0];
286 static kmem_cache_priv_t
*
287 splat_kmem_cache_test_kcp_alloc(struct file
*file
, char *name
,
288 int size
, int align
, int alloc
, int count
)
290 kmem_cache_priv_t
*kcp
;
292 kcp
= vmem_zalloc(sizeof(kmem_cache_priv_t
) +
293 count
* sizeof(kmem_cache_data_t
*), KM_SLEEP
);
297 kcp
->kcp_magic
= SPLAT_KMEM_TEST_MAGIC
;
298 kcp
->kcp_file
= file
;
299 kcp
->kcp_cache
= NULL
;
300 spin_lock_init(&kcp
->kcp_lock
);
301 init_waitqueue_head(&kcp
->kcp_ctl_waitq
);
302 init_waitqueue_head(&kcp
->kcp_thr_waitq
);
304 kcp
->kcp_kct_count
= -1;
305 kcp
->kcp_size
= size
;
306 kcp
->kcp_align
= align
;
308 kcp
->kcp_alloc
= alloc
;
310 kcp
->kcp_kcd_count
= count
;
316 splat_kmem_cache_test_kcp_free(kmem_cache_priv_t
*kcp
)
318 vmem_free(kcp
, sizeof(kmem_cache_priv_t
) +
319 kcp
->kcp_kcd_count
* sizeof(kmem_cache_data_t
*));
322 static kmem_cache_thread_t
*
323 splat_kmem_cache_test_kct_alloc(int id
, int count
)
325 kmem_cache_thread_t
*kct
;
327 ASSERTF(id
< SPLAT_KMEM_THREADS
, "id=%d\n", id
);
328 kct
= vmem_zalloc(sizeof(kmem_cache_thread_t
) +
329 count
* sizeof(kmem_cache_data_t
*), KM_SLEEP
);
333 spin_lock_init(&kct
->kct_lock
);
334 kct
->kct_cache
= NULL
;
336 kct
->kct_kcd_count
= count
;
342 splat_kmem_cache_test_kct_free(kmem_cache_thread_t
*kct
)
344 vmem_free(kct
, sizeof(kmem_cache_thread_t
) +
345 kct
->kct_kcd_count
* sizeof(kmem_cache_data_t
*));
349 splat_kmem_cache_test_constructor(void *ptr
, void *priv
, int flags
)
351 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)priv
;
352 kmem_cache_data_t
*kcd
= (kmem_cache_data_t
*)ptr
;
355 kcd
->kcd_magic
= kcp
->kcp_magic
;
357 memset(kcd
->kcd_buf
, 0xaa, kcp
->kcp_size
- (sizeof *kcd
));
365 splat_kmem_cache_test_destructor(void *ptr
, void *priv
)
367 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)priv
;
368 kmem_cache_data_t
*kcd
= (kmem_cache_data_t
*)ptr
;
373 memset(kcd
->kcd_buf
, 0xbb, kcp
->kcp_size
- (sizeof *kcd
));
381 * Generic reclaim function which assumes that all objects may
382 * be reclaimed at any time. We free a small percentage of the
383 * objects linked off the kcp or kct[] every time we are called.
386 splat_kmem_cache_test_reclaim(void *priv
)
388 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)priv
;
389 kmem_cache_thread_t
*kct
;
392 ASSERT(kcp
->kcp_magic
== SPLAT_KMEM_TEST_MAGIC
);
393 count
= kcp
->kcp_kcd_count
* SPLAT_KMEM_OBJ_RECLAIM
/ 100;
395 /* Objects directly attached to the kcp */
396 spin_lock(&kcp
->kcp_lock
);
397 for (i
= 0; i
< kcp
->kcp_kcd_count
; i
++) {
398 if (kcp
->kcp_kcd
[i
]) {
399 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[i
]);
400 kcp
->kcp_kcd
[i
] = NULL
;
406 spin_unlock(&kcp
->kcp_lock
);
408 /* No threads containing objects to consider */
409 if (kcp
->kcp_kct_count
== -1)
412 /* Objects attached to a kct thread */
413 for (i
= 0; i
< kcp
->kcp_kct_count
; i
++) {
414 spin_lock(&kcp
->kcp_lock
);
415 kct
= kcp
->kcp_kct
[i
];
416 spin_unlock(&kcp
->kcp_lock
);
420 spin_lock(&kct
->kct_lock
);
421 count
= kct
->kct_kcd_count
* SPLAT_KMEM_OBJ_RECLAIM
/ 100;
423 for (j
= 0; j
< kct
->kct_kcd_count
; j
++) {
424 if (kct
->kct_kcd
[j
]) {
425 kmem_cache_free(kcp
->kcp_cache
,kct
->kct_kcd
[j
]);
426 kct
->kct_kcd
[j
] = NULL
;
432 spin_unlock(&kct
->kct_lock
);
439 splat_kmem_cache_test_threads(kmem_cache_priv_t
*kcp
, int threads
)
443 spin_lock(&kcp
->kcp_lock
);
444 rc
= (kcp
->kcp_kct_count
== threads
);
445 spin_unlock(&kcp
->kcp_lock
);
451 splat_kmem_cache_test_flags(kmem_cache_priv_t
*kcp
, int flags
)
455 spin_lock(&kcp
->kcp_lock
);
456 rc
= (kcp
->kcp_flags
& flags
);
457 spin_unlock(&kcp
->kcp_lock
);
463 splat_kmem_cache_test_thread(void *arg
)
465 kmem_cache_priv_t
*kcp
= (kmem_cache_priv_t
*)arg
;
466 kmem_cache_thread_t
*kct
;
470 ASSERT(kcp
->kcp_magic
== SPLAT_KMEM_TEST_MAGIC
);
472 /* Assign thread ids */
473 spin_lock(&kcp
->kcp_lock
);
474 if (kcp
->kcp_kct_count
== -1)
475 kcp
->kcp_kct_count
= 0;
477 id
= kcp
->kcp_kct_count
;
478 kcp
->kcp_kct_count
++;
479 spin_unlock(&kcp
->kcp_lock
);
481 kct
= splat_kmem_cache_test_kct_alloc(id
, kcp
->kcp_alloc
);
487 spin_lock(&kcp
->kcp_lock
);
488 kcp
->kcp_kct
[id
] = kct
;
489 spin_unlock(&kcp
->kcp_lock
);
491 /* Wait for all threads to have started and report they are ready */
492 if (kcp
->kcp_kct_count
== SPLAT_KMEM_THREADS
)
493 wake_up(&kcp
->kcp_ctl_waitq
);
495 wait_event(kcp
->kcp_thr_waitq
,
496 splat_kmem_cache_test_flags(kcp
, KCP_FLAG_READY
));
499 * Updates to kct->kct_kcd[] are performed under a spin_lock so
500 * they may safely run concurrent with the reclaim function. If
501 * we are not in a low memory situation we have one lock per-
502 * thread so they are not expected to be contended.
504 for (i
= 0; i
< kct
->kct_kcd_count
; i
++) {
505 obj
= kmem_cache_alloc(kcp
->kcp_cache
, KM_SLEEP
);
506 spin_lock(&kct
->kct_lock
);
507 kct
->kct_kcd
[i
] = obj
;
508 spin_unlock(&kct
->kct_lock
);
511 for (i
= 0; i
< kct
->kct_kcd_count
; i
++) {
512 spin_lock(&kct
->kct_lock
);
513 if (kct
->kct_kcd
[i
]) {
514 kmem_cache_free(kcp
->kcp_cache
, kct
->kct_kcd
[i
]);
515 kct
->kct_kcd
[i
] = NULL
;
517 spin_unlock(&kct
->kct_lock
);
520 spin_lock(&kcp
->kcp_lock
);
522 splat_kmem_cache_test_kct_free(kct
);
523 kcp
->kcp_kct
[id
] = kct
= NULL
;
529 if ((--kcp
->kcp_kct_count
) == 0)
530 wake_up(&kcp
->kcp_ctl_waitq
);
532 spin_unlock(&kcp
->kcp_lock
);
538 splat_kmem_cache_test(struct file
*file
, void *arg
, char *name
,
539 int size
, int align
, int flags
)
541 kmem_cache_priv_t
*kcp
;
542 kmem_cache_data_t
*kcd
;
545 kcp
= splat_kmem_cache_test_kcp_alloc(file
, name
, size
, align
, 0, 1);
547 splat_vprint(file
, name
, "Unable to create '%s'\n", "kcp");
552 kmem_cache_create(SPLAT_KMEM_CACHE_NAME
,
553 kcp
->kcp_size
, kcp
->kcp_align
,
554 splat_kmem_cache_test_constructor
,
555 splat_kmem_cache_test_destructor
,
556 NULL
, kcp
, NULL
, flags
);
557 if (!kcp
->kcp_cache
) {
558 splat_vprint(file
, name
,
559 "Unable to create '%s'\n",
560 SPLAT_KMEM_CACHE_NAME
);
565 kcd
= kmem_cache_alloc(kcp
->kcp_cache
, KM_SLEEP
);
567 splat_vprint(file
, name
,
568 "Unable to allocate from '%s'\n",
569 SPLAT_KMEM_CACHE_NAME
);
573 spin_lock(&kcp
->kcp_lock
);
574 kcp
->kcp_kcd
[0] = kcd
;
575 spin_unlock(&kcp
->kcp_lock
);
577 if (!kcp
->kcp_kcd
[0]->kcd_flag
) {
578 splat_vprint(file
, name
,
579 "Failed to run contructor for '%s'\n",
580 SPLAT_KMEM_CACHE_NAME
);
585 if (kcp
->kcp_kcd
[0]->kcd_magic
!= kcp
->kcp_magic
) {
586 splat_vprint(file
, name
,
587 "Failed to pass private data to constructor "
588 "for '%s'\n", SPLAT_KMEM_CACHE_NAME
);
593 max
= kcp
->kcp_count
;
594 spin_lock(&kcp
->kcp_lock
);
595 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[0]);
596 kcp
->kcp_kcd
[0] = NULL
;
597 spin_unlock(&kcp
->kcp_lock
);
599 /* Destroy the entire cache which will force destructors to
600 * run and we can verify one was called for every object */
601 kmem_cache_destroy(kcp
->kcp_cache
);
602 if (kcp
->kcp_count
) {
603 splat_vprint(file
, name
,
604 "Failed to run destructor on all slab objects "
605 "for '%s'\n", SPLAT_KMEM_CACHE_NAME
);
609 splat_vprint(file
, name
,
610 "Successfully ran ctors/dtors for %d elements in '%s'\n",
611 max
, SPLAT_KMEM_CACHE_NAME
);
616 if (kcp
->kcp_kcd
[0]) {
617 spin_lock(&kcp
->kcp_lock
);
618 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[0]);
619 kcp
->kcp_kcd
[0] = NULL
;
620 spin_unlock(&kcp
->kcp_lock
);
624 kmem_cache_destroy(kcp
->kcp_cache
);
626 splat_kmem_cache_test_kcp_free(kcp
);
632 splat_kmem_cache_thread_test(struct file
*file
, void *arg
, char *name
,
635 kmem_cache_priv_t
*kcp
;
637 struct timespec start
, stop
, delta
;
641 kcp
= splat_kmem_cache_test_kcp_alloc(file
, name
, size
, 0, alloc
, 0);
643 splat_vprint(file
, name
, "Unable to create '%s'\n", "kcp");
647 (void)snprintf(cache_name
, 32, "%s-%d-%d",
648 SPLAT_KMEM_CACHE_NAME
, size
, alloc
);
650 kmem_cache_create(cache_name
, kcp
->kcp_size
, 0,
651 splat_kmem_cache_test_constructor
,
652 splat_kmem_cache_test_destructor
,
653 splat_kmem_cache_test_reclaim
,
654 kcp
, NULL
, KMC_VMEM
);
655 if (!kcp
->kcp_cache
) {
656 splat_vprint(file
, name
, "Unable to create '%s'\n", cache_name
);
661 start
= current_kernel_time();
663 for (i
= 0; i
< SPLAT_KMEM_THREADS
; i
++) {
664 thr
= thread_create(NULL
, 0,
665 splat_kmem_cache_test_thread
,
666 kcp
, 0, &p0
, TS_RUN
, minclsyspri
);
673 /* Sleep until all threads have started, then set the ready
674 * flag and wake them all up for maximum concurrency. */
675 wait_event(kcp
->kcp_ctl_waitq
,
676 splat_kmem_cache_test_threads(kcp
, SPLAT_KMEM_THREADS
));
678 spin_lock(&kcp
->kcp_lock
);
679 kcp
->kcp_flags
|= KCP_FLAG_READY
;
680 spin_unlock(&kcp
->kcp_lock
);
681 wake_up_all(&kcp
->kcp_thr_waitq
);
683 /* Sleep until all thread have finished */
684 wait_event(kcp
->kcp_ctl_waitq
, splat_kmem_cache_test_threads(kcp
, 0));
686 stop
= current_kernel_time();
687 delta
= timespec_sub(stop
, start
);
689 splat_vprint(file
, name
,
691 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
692 kcp
->kcp_cache
->skc_name
,
693 delta
.tv_sec
, delta
.tv_nsec
,
694 (unsigned long)kcp
->kcp_cache
->skc_slab_total
,
695 (unsigned long)kcp
->kcp_cache
->skc_slab_max
,
696 (unsigned long)(kcp
->kcp_alloc
*
698 SPL_KMEM_CACHE_OBJ_PER_SLAB
),
699 (unsigned long)kcp
->kcp_cache
->skc_obj_total
,
700 (unsigned long)kcp
->kcp_cache
->skc_obj_max
,
701 (unsigned long)(kcp
->kcp_alloc
*
702 SPLAT_KMEM_THREADS
));
704 if (delta
.tv_sec
>= 5)
707 if (!rc
&& kcp
->kcp_rc
)
711 kmem_cache_destroy(kcp
->kcp_cache
);
713 splat_kmem_cache_test_kcp_free(kcp
);
717 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
719 splat_kmem_test5(struct file
*file
, void *arg
)
721 char *name
= SPLAT_KMEM_TEST5_NAME
;
724 rc
= splat_kmem_cache_test(file
, arg
, name
, 128, 0, 0);
728 rc
= splat_kmem_cache_test(file
, arg
, name
, 128, 0, KMC_KMEM
);
732 return splat_kmem_cache_test(file
, arg
, name
, 128, 0, KMC_VMEM
);
735 /* Validate large object cache behavior for dynamic/kmem/vmem caches */
737 splat_kmem_test6(struct file
*file
, void *arg
)
739 char *name
= SPLAT_KMEM_TEST6_NAME
;
742 rc
= splat_kmem_cache_test(file
, arg
, name
, 128*1024, 0, 0);
746 rc
= splat_kmem_cache_test(file
, arg
, name
, 128*1024, 0, KMC_KMEM
);
750 return splat_kmem_cache_test(file
, arg
, name
, 128*1028, 0, KMC_VMEM
);
753 /* Validate object alignment cache behavior for caches */
755 splat_kmem_test7(struct file
*file
, void *arg
)
757 char *name
= SPLAT_KMEM_TEST7_NAME
;
760 for (i
= 8; i
<= PAGE_SIZE
; i
*= 2) {
761 rc
= splat_kmem_cache_test(file
, arg
, name
, 157, i
, 0);
770 splat_kmem_test8(struct file
*file
, void *arg
)
772 kmem_cache_priv_t
*kcp
;
773 kmem_cache_data_t
*kcd
;
776 kcp
= splat_kmem_cache_test_kcp_alloc(file
, SPLAT_KMEM_TEST8_NAME
,
777 256, 0, 0, SPLAT_KMEM_OBJ_COUNT
);
779 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
780 "Unable to create '%s'\n", "kcp");
785 kmem_cache_create(SPLAT_KMEM_CACHE_NAME
, kcp
->kcp_size
, 0,
786 splat_kmem_cache_test_constructor
,
787 splat_kmem_cache_test_destructor
,
788 splat_kmem_cache_test_reclaim
,
790 if (!kcp
->kcp_cache
) {
791 splat_kmem_cache_test_kcp_free(kcp
);
792 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
793 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME
);
797 for (i
= 0; i
< SPLAT_KMEM_OBJ_COUNT
; i
++) {
798 kcd
= kmem_cache_alloc(kcp
->kcp_cache
, KM_SLEEP
);
799 spin_lock(&kcp
->kcp_lock
);
800 kcp
->kcp_kcd
[i
] = kcd
;
801 spin_unlock(&kcp
->kcp_lock
);
803 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
804 "Unable to allocate from '%s'\n",
805 SPLAT_KMEM_CACHE_NAME
);
809 /* Request the slab cache free any objects it can. For a few reasons
810 * this may not immediately result in more free memory even if objects
811 * are freed. First off, due to fragmentation we may not be able to
812 * reclaim any slabs. Secondly, even if we do we fully clear some
813 * slabs we will not want to immedately reclaim all of them because
814 * we may contend with cache allocs and thrash. What we want to see
815 * is the slab size decrease more gradually as it becomes clear they
816 * will not be needed. This should be acheivable in less than minute
817 * if it takes longer than this something has gone wrong.
819 for (i
= 0; i
< 60; i
++) {
820 kmem_cache_reap_now(kcp
->kcp_cache
);
821 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
822 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
823 SPLAT_KMEM_CACHE_NAME
, kcp
->kcp_count
,
824 (unsigned)kcp
->kcp_cache
->skc_slab_alloc
,
825 (unsigned)kcp
->kcp_cache
->skc_slab_total
,
826 (unsigned)kcp
->kcp_cache
->skc_obj_alloc
,
827 (unsigned)kcp
->kcp_cache
->skc_obj_total
);
829 for_each_online_cpu(j
)
830 splat_print(file
, "%u/%u ",
831 kcp
->kcp_cache
->skc_mag
[j
]->skm_avail
,
832 kcp
->kcp_cache
->skc_mag
[j
]->skm_size
);
834 splat_print(file
, "%s\n", "");
836 if (kcp
->kcp_cache
->skc_obj_total
== 0)
839 set_current_state(TASK_INTERRUPTIBLE
);
840 schedule_timeout(HZ
);
843 if (kcp
->kcp_cache
->skc_obj_total
== 0) {
844 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
845 "Successfully created %d objects "
846 "in cache %s and reclaimed them\n",
847 SPLAT_KMEM_OBJ_COUNT
, SPLAT_KMEM_CACHE_NAME
);
849 splat_vprint(file
, SPLAT_KMEM_TEST8_NAME
,
850 "Failed to reclaim %u/%d objects from cache %s\n",
851 (unsigned)kcp
->kcp_cache
->skc_obj_total
,
852 SPLAT_KMEM_OBJ_COUNT
, SPLAT_KMEM_CACHE_NAME
);
856 /* Cleanup our mess (for failure case of time expiring) */
857 spin_lock(&kcp
->kcp_lock
);
858 for (i
= 0; i
< SPLAT_KMEM_OBJ_COUNT
; i
++)
860 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[i
]);
861 spin_unlock(&kcp
->kcp_lock
);
863 kmem_cache_destroy(kcp
->kcp_cache
);
864 splat_kmem_cache_test_kcp_free(kcp
);
870 splat_kmem_test9(struct file
*file
, void *arg
)
872 kmem_cache_priv_t
*kcp
;
873 kmem_cache_data_t
*kcd
;
874 int i
, j
, rc
= 0, count
= SPLAT_KMEM_OBJ_COUNT
* 128;
876 kcp
= splat_kmem_cache_test_kcp_alloc(file
, SPLAT_KMEM_TEST9_NAME
,
879 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
880 "Unable to create '%s'\n", "kcp");
885 kmem_cache_create(SPLAT_KMEM_CACHE_NAME
, kcp
->kcp_size
, 0,
886 splat_kmem_cache_test_constructor
,
887 splat_kmem_cache_test_destructor
,
889 if (!kcp
->kcp_cache
) {
890 splat_kmem_cache_test_kcp_free(kcp
);
891 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
892 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME
);
896 for (i
= 0; i
< count
; i
++) {
897 kcd
= kmem_cache_alloc(kcp
->kcp_cache
, KM_SLEEP
);
898 spin_lock(&kcp
->kcp_lock
);
899 kcp
->kcp_kcd
[i
] = kcd
;
900 spin_unlock(&kcp
->kcp_lock
);
902 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
903 "Unable to allocate from '%s'\n",
904 SPLAT_KMEM_CACHE_NAME
);
908 spin_lock(&kcp
->kcp_lock
);
909 for (i
= 0; i
< count
; i
++)
911 kmem_cache_free(kcp
->kcp_cache
, kcp
->kcp_kcd
[i
]);
912 spin_unlock(&kcp
->kcp_lock
);
914 /* We have allocated a large number of objects thus creating a
915 * large number of slabs and then free'd them all. However since
916 * there should be little memory pressure at the moment those
917 * slabs have not been freed. What we want to see is the slab
918 * size decrease gradually as it becomes clear they will not be
919 * be needed. This should be acheivable in less than minute
920 * if it takes longer than this something has gone wrong.
922 for (i
= 0; i
< 60; i
++) {
923 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
924 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
925 SPLAT_KMEM_CACHE_NAME
, kcp
->kcp_count
,
926 (unsigned)kcp
->kcp_cache
->skc_slab_alloc
,
927 (unsigned)kcp
->kcp_cache
->skc_slab_total
,
928 (unsigned)kcp
->kcp_cache
->skc_obj_alloc
,
929 (unsigned)kcp
->kcp_cache
->skc_obj_total
);
931 for_each_online_cpu(j
)
932 splat_print(file
, "%u/%u ",
933 kcp
->kcp_cache
->skc_mag
[j
]->skm_avail
,
934 kcp
->kcp_cache
->skc_mag
[j
]->skm_size
);
936 splat_print(file
, "%s\n", "");
938 if (kcp
->kcp_cache
->skc_obj_total
== 0)
941 set_current_state(TASK_INTERRUPTIBLE
);
942 schedule_timeout(HZ
);
945 if (kcp
->kcp_cache
->skc_obj_total
== 0) {
946 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
947 "Successfully created %d objects "
948 "in cache %s and reclaimed them\n",
949 count
, SPLAT_KMEM_CACHE_NAME
);
951 splat_vprint(file
, SPLAT_KMEM_TEST9_NAME
,
952 "Failed to reclaim %u/%d objects from cache %s\n",
953 (unsigned)kcp
->kcp_cache
->skc_obj_total
, count
,
954 SPLAT_KMEM_CACHE_NAME
);
958 kmem_cache_destroy(kcp
->kcp_cache
);
959 splat_kmem_cache_test_kcp_free(kcp
);
965 * This test creates N threads with a shared kmem cache. They then all
966 * concurrently allocate and free from the cache to stress the locking and
967 * concurrent cache performance. If any one test takes longer than 5
968 * seconds to complete it is treated as a failure and may indicate a
969 * performance regression. On my test system no one test takes more
970 * than 1 second to complete so a 5x slowdown likely a problem.
973 splat_kmem_test10(struct file
*file
, void *arg
)
975 uint64_t size
, alloc
, free_mem
, rc
= 0;
977 free_mem
= nr_free_pages() * PAGE_SIZE
;
978 for (size
= 16; size
<= 1024*1024; size
*= 2) {
980 splat_vprint(file
, SPLAT_KMEM_TEST10_NAME
, "%-22s %s", "name",
981 "time (sec)\tslabs \tobjs \thash\n");
982 splat_vprint(file
, SPLAT_KMEM_TEST10_NAME
, "%-22s %s", "",
983 " \ttot/max/calc\ttot/max/calc\n");
985 for (alloc
= 1; alloc
<= 1024; alloc
*= 2) {
987 /* Skip tests which exceed free memory */
988 if (size
* alloc
* SPLAT_KMEM_THREADS
> free_mem
/ 2)
991 rc
= splat_kmem_cache_thread_test(file
, arg
,
992 SPLAT_KMEM_TEST10_NAME
, size
, alloc
);
1002 * This test creates N threads with a shared kmem cache which overcommits
1003 * memory by 4x. This makes it impossible for the slab to satify the
1004 * thread requirements without having its reclaim hook run which will
1005 * free objects back for use. This behavior is triggered by the linum VM
1006 * detecting a low memory condition on the node and invoking the shrinkers.
1007 * This should allow all the threads to complete while avoiding deadlock
1008 * and for the most part out of memory events. This is very tough on the
1009 * system so it is possible the test app may get oom'ed.
1012 splat_kmem_test11(struct file
*file
, void *arg
)
1014 uint64_t size
, alloc
, rc
;
1017 alloc
= ((4 * num_physpages
* PAGE_SIZE
) / size
) / SPLAT_KMEM_THREADS
;
1019 splat_vprint(file
, SPLAT_KMEM_TEST10_NAME
, "%-22s %s", "name",
1020 "time (sec)\tslabs \tobjs \thash\n");
1021 splat_vprint(file
, SPLAT_KMEM_TEST10_NAME
, "%-22s %s", "",
1022 " \ttot/max/calc\ttot/max/calc\n");
1024 rc
= splat_kmem_cache_thread_test(file
, arg
,
1025 SPLAT_KMEM_TEST11_NAME
, size
, alloc
);
1031 splat_kmem_init(void)
1033 splat_subsystem_t
*sub
;
1035 sub
= kmalloc(sizeof(*sub
), GFP_KERNEL
);
1039 memset(sub
, 0, sizeof(*sub
));
1040 strncpy(sub
->desc
.name
, SPLAT_KMEM_NAME
, SPLAT_NAME_SIZE
);
1041 strncpy(sub
->desc
.desc
, SPLAT_KMEM_DESC
, SPLAT_DESC_SIZE
);
1042 INIT_LIST_HEAD(&sub
->subsystem_list
);
1043 INIT_LIST_HEAD(&sub
->test_list
);
1044 spin_lock_init(&sub
->test_lock
);
1045 sub
->desc
.id
= SPLAT_SUBSYSTEM_KMEM
;
1047 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST1_NAME
, SPLAT_KMEM_TEST1_DESC
,
1048 SPLAT_KMEM_TEST1_ID
, splat_kmem_test1
);
1049 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST2_NAME
, SPLAT_KMEM_TEST2_DESC
,
1050 SPLAT_KMEM_TEST2_ID
, splat_kmem_test2
);
1051 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST3_NAME
, SPLAT_KMEM_TEST3_DESC
,
1052 SPLAT_KMEM_TEST3_ID
, splat_kmem_test3
);
1053 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST4_NAME
, SPLAT_KMEM_TEST4_DESC
,
1054 SPLAT_KMEM_TEST4_ID
, splat_kmem_test4
);
1055 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST5_NAME
, SPLAT_KMEM_TEST5_DESC
,
1056 SPLAT_KMEM_TEST5_ID
, splat_kmem_test5
);
1057 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST6_NAME
, SPLAT_KMEM_TEST6_DESC
,
1058 SPLAT_KMEM_TEST6_ID
, splat_kmem_test6
);
1059 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST7_NAME
, SPLAT_KMEM_TEST7_DESC
,
1060 SPLAT_KMEM_TEST7_ID
, splat_kmem_test7
);
1061 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST8_NAME
, SPLAT_KMEM_TEST8_DESC
,
1062 SPLAT_KMEM_TEST8_ID
, splat_kmem_test8
);
1063 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST9_NAME
, SPLAT_KMEM_TEST9_DESC
,
1064 SPLAT_KMEM_TEST9_ID
, splat_kmem_test9
);
1065 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST10_NAME
, SPLAT_KMEM_TEST10_DESC
,
1066 SPLAT_KMEM_TEST10_ID
, splat_kmem_test10
);
1067 SPLAT_TEST_INIT(sub
, SPLAT_KMEM_TEST11_NAME
, SPLAT_KMEM_TEST11_DESC
,
1068 SPLAT_KMEM_TEST11_ID
, splat_kmem_test11
);
1074 splat_kmem_fini(splat_subsystem_t
*sub
)
1077 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST11_ID
);
1078 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST10_ID
);
1079 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST9_ID
);
1080 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST8_ID
);
1081 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST7_ID
);
1082 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST6_ID
);
1083 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST5_ID
);
1084 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST4_ID
);
1085 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST3_ID
);
1086 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST2_ID
);
1087 SPLAT_TEST_FINI(sub
, SPLAT_KMEM_TEST1_ID
);
1093 splat_kmem_id(void) {
1094 return SPLAT_SUBSYSTEM_KMEM
;