]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/splat/splat-kmem.c
New upstream version 0.7.2
[mirror_spl-debian.git] / module / splat / splat-kmem.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Kmem Tests.
25 \*****************************************************************************/
26
27 #include <sys/kmem.h>
28 #include <sys/kmem_cache.h>
29 #include <sys/vmem.h>
30 #include <sys/random.h>
31 #include <sys/thread.h>
32 #include <sys/vmsystm.h>
33 #include "splat-internal.h"
34
35 #define SPLAT_KMEM_NAME "kmem"
36 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
37
38 #define SPLAT_KMEM_TEST1_ID 0x0101
39 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
40 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
41
42 #define SPLAT_KMEM_TEST2_ID 0x0102
43 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
44 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
45
46 #define SPLAT_KMEM_TEST3_ID 0x0103
47 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
48 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
49
50 #define SPLAT_KMEM_TEST4_ID 0x0104
51 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
52 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
53
54 #define SPLAT_KMEM_TEST5_ID 0x0105
55 #define SPLAT_KMEM_TEST5_NAME "slab_small"
56 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
57
58 #define SPLAT_KMEM_TEST6_ID 0x0106
59 #define SPLAT_KMEM_TEST6_NAME "slab_large"
60 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
61
62 #define SPLAT_KMEM_TEST7_ID 0x0107
63 #define SPLAT_KMEM_TEST7_NAME "slab_align"
64 #define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
65
66 #define SPLAT_KMEM_TEST8_ID 0x0108
67 #define SPLAT_KMEM_TEST8_NAME "slab_reap"
68 #define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
69
70 #define SPLAT_KMEM_TEST9_ID 0x0109
71 #define SPLAT_KMEM_TEST9_NAME "slab_age"
72 #define SPLAT_KMEM_TEST9_DESC "Slab aging test"
73
74 #define SPLAT_KMEM_TEST10_ID 0x010a
75 #define SPLAT_KMEM_TEST10_NAME "slab_lock"
76 #define SPLAT_KMEM_TEST10_DESC "Slab locking test"
77
78 #if 0
79 #define SPLAT_KMEM_TEST11_ID 0x010b
80 #define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
81 #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
82 #endif
83
84 #define SPLAT_KMEM_TEST13_ID 0x010d
85 #define SPLAT_KMEM_TEST13_NAME "slab_reclaim"
86 #define SPLAT_KMEM_TEST13_DESC "Slab direct memory reclaim test"
87
88 #define SPLAT_KMEM_ALLOC_COUNT 10
89 #define SPLAT_VMEM_ALLOC_COUNT 10
90
91
92 static int
93 splat_kmem_test1(struct file *file, void *arg)
94 {
95 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
96 int size = PAGE_SIZE;
97 int i, count, rc = 0;
98
99 while ((!rc) && (size <= spl_kmem_alloc_warn)) {
100 count = 0;
101
102 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
103 ptr[i] = kmem_alloc(size, KM_SLEEP);
104 if (ptr[i])
105 count++;
106 }
107
108 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
109 if (ptr[i])
110 kmem_free(ptr[i], size);
111
112 splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
113 "%d byte allocations, %d/%d successful\n",
114 size, count, SPLAT_KMEM_ALLOC_COUNT);
115 if (count != SPLAT_KMEM_ALLOC_COUNT)
116 rc = -ENOMEM;
117
118 size *= 2;
119 }
120
121 return rc;
122 }
123
124 static int
125 splat_kmem_test2(struct file *file, void *arg)
126 {
127 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
128 int size = PAGE_SIZE;
129 int i, j, count, rc = 0;
130
131 while ((!rc) && (size <= spl_kmem_alloc_warn)) {
132 count = 0;
133
134 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
135 ptr[i] = kmem_zalloc(size, KM_SLEEP);
136 if (ptr[i])
137 count++;
138 }
139
140 /* Ensure buffer has been zero filled */
141 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
142 for (j = 0; j < size; j++) {
143 if (((char *)ptr[i])[j] != '\0') {
144 splat_vprint(file,SPLAT_KMEM_TEST2_NAME,
145 "%d-byte allocation was "
146 "not zeroed\n", size);
147 rc = -EFAULT;
148 }
149 }
150 }
151
152 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
153 if (ptr[i])
154 kmem_free(ptr[i], size);
155
156 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
157 "%d byte allocations, %d/%d successful\n",
158 size, count, SPLAT_KMEM_ALLOC_COUNT);
159 if (count != SPLAT_KMEM_ALLOC_COUNT)
160 rc = -ENOMEM;
161
162 size *= 2;
163 }
164
165 return rc;
166 }
167
168 static int
169 splat_kmem_test3(struct file *file, void *arg)
170 {
171 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
172 int size = PAGE_SIZE;
173 int i, count, rc = 0;
174
175 /*
176 * Test up to 4x the maximum kmem_alloc() size to ensure both
177 * the kmem_alloc() and vmem_alloc() call paths are used.
178 */
179 while ((!rc) && (size <= (4 * spl_kmem_alloc_max))) {
180 count = 0;
181
182 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
183 ptr[i] = vmem_alloc(size, KM_SLEEP);
184 if (ptr[i])
185 count++;
186 }
187
188 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
189 if (ptr[i])
190 vmem_free(ptr[i], size);
191
192 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
193 "%d byte allocations, %d/%d successful\n",
194 size, count, SPLAT_VMEM_ALLOC_COUNT);
195 if (count != SPLAT_VMEM_ALLOC_COUNT)
196 rc = -ENOMEM;
197
198 size *= 2;
199 }
200
201 return rc;
202 }
203
204 static int
205 splat_kmem_test4(struct file *file, void *arg)
206 {
207 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
208 int size = PAGE_SIZE;
209 int i, j, count, rc = 0;
210
211 /*
212 * Test up to 4x the maximum kmem_zalloc() size to ensure both
213 * the kmem_zalloc() and vmem_zalloc() call paths are used.
214 */
215 while ((!rc) && (size <= (4 * spl_kmem_alloc_max))) {
216 count = 0;
217
218 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
219 ptr[i] = vmem_zalloc(size, KM_SLEEP);
220 if (ptr[i])
221 count++;
222 }
223
224 /* Ensure buffer has been zero filled */
225 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
226 for (j = 0; j < size; j++) {
227 if (((char *)ptr[i])[j] != '\0') {
228 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
229 "%d-byte allocation was "
230 "not zeroed\n", size);
231 rc = -EFAULT;
232 }
233 }
234 }
235
236 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
237 if (ptr[i])
238 vmem_free(ptr[i], size);
239
240 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
241 "%d byte allocations, %d/%d successful\n",
242 size, count, SPLAT_VMEM_ALLOC_COUNT);
243 if (count != SPLAT_VMEM_ALLOC_COUNT)
244 rc = -ENOMEM;
245
246 size *= 2;
247 }
248
249 return rc;
250 }
251
252 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
253 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
254 #define SPLAT_KMEM_OBJ_COUNT 1024
255 #define SPLAT_KMEM_OBJ_RECLAIM 32 /* objects */
256 #define SPLAT_KMEM_THREADS 32
257
258 #define KCP_FLAG_READY 0x01
259
260 typedef struct kmem_cache_data {
261 unsigned long kcd_magic;
262 struct list_head kcd_node;
263 int kcd_flag;
264 char kcd_buf[0];
265 } kmem_cache_data_t;
266
267 typedef struct kmem_cache_thread {
268 spinlock_t kct_lock;
269 int kct_id;
270 struct list_head kct_list;
271 } kmem_cache_thread_t;
272
273 typedef struct kmem_cache_priv {
274 unsigned long kcp_magic;
275 struct file *kcp_file;
276 kmem_cache_t *kcp_cache;
277 spinlock_t kcp_lock;
278 spl_wait_queue_head_t kcp_ctl_waitq;
279 spl_wait_queue_head_t kcp_thr_waitq;
280 int kcp_flags;
281 int kcp_kct_count;
282 kmem_cache_thread_t *kcp_kct[SPLAT_KMEM_THREADS];
283 int kcp_size;
284 int kcp_align;
285 int kcp_count;
286 int kcp_alloc;
287 int kcp_rc;
288 } kmem_cache_priv_t;
289
290 static kmem_cache_priv_t *
291 splat_kmem_cache_test_kcp_alloc(struct file *file, char *name,
292 int size, int align, int alloc)
293 {
294 kmem_cache_priv_t *kcp;
295
296 kcp = kmem_zalloc(sizeof(kmem_cache_priv_t), KM_SLEEP);
297 if (!kcp)
298 return NULL;
299
300 kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC;
301 kcp->kcp_file = file;
302 kcp->kcp_cache = NULL;
303 spin_lock_init(&kcp->kcp_lock);
304 init_waitqueue_head(&kcp->kcp_ctl_waitq);
305 init_waitqueue_head(&kcp->kcp_thr_waitq);
306 kcp->kcp_flags = 0;
307 kcp->kcp_kct_count = -1;
308 kcp->kcp_size = size;
309 kcp->kcp_align = align;
310 kcp->kcp_count = 0;
311 kcp->kcp_alloc = alloc;
312 kcp->kcp_rc = 0;
313
314 return kcp;
315 }
316
317 static void
318 splat_kmem_cache_test_kcp_free(kmem_cache_priv_t *kcp)
319 {
320 kmem_free(kcp, sizeof(kmem_cache_priv_t));
321 }
322
323 static kmem_cache_thread_t *
324 splat_kmem_cache_test_kct_alloc(kmem_cache_priv_t *kcp, int id)
325 {
326 kmem_cache_thread_t *kct;
327
328 ASSERT3S(id, <, SPLAT_KMEM_THREADS);
329 ASSERT(kcp->kcp_kct[id] == NULL);
330
331 kct = kmem_zalloc(sizeof(kmem_cache_thread_t), KM_SLEEP);
332 if (!kct)
333 return NULL;
334
335 spin_lock_init(&kct->kct_lock);
336 kct->kct_id = id;
337 INIT_LIST_HEAD(&kct->kct_list);
338
339 spin_lock(&kcp->kcp_lock);
340 kcp->kcp_kct[id] = kct;
341 spin_unlock(&kcp->kcp_lock);
342
343 return kct;
344 }
345
346 static void
347 splat_kmem_cache_test_kct_free(kmem_cache_priv_t *kcp,
348 kmem_cache_thread_t *kct)
349 {
350 spin_lock(&kcp->kcp_lock);
351 kcp->kcp_kct[kct->kct_id] = NULL;
352 spin_unlock(&kcp->kcp_lock);
353
354 kmem_free(kct, sizeof(kmem_cache_thread_t));
355 }
356
357 static void
358 splat_kmem_cache_test_kcd_free(kmem_cache_priv_t *kcp,
359 kmem_cache_thread_t *kct)
360 {
361 kmem_cache_data_t *kcd;
362
363 spin_lock(&kct->kct_lock);
364 while (!list_empty(&kct->kct_list)) {
365 kcd = list_entry(kct->kct_list.next,
366 kmem_cache_data_t, kcd_node);
367 list_del(&kcd->kcd_node);
368 spin_unlock(&kct->kct_lock);
369
370 kmem_cache_free(kcp->kcp_cache, kcd);
371
372 spin_lock(&kct->kct_lock);
373 }
374 spin_unlock(&kct->kct_lock);
375 }
376
377 static int
378 splat_kmem_cache_test_kcd_alloc(kmem_cache_priv_t *kcp,
379 kmem_cache_thread_t *kct, int count)
380 {
381 kmem_cache_data_t *kcd;
382 int i;
383
384 for (i = 0; i < count; i++) {
385 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
386 if (kcd == NULL) {
387 splat_kmem_cache_test_kcd_free(kcp, kct);
388 return -ENOMEM;
389 }
390
391 spin_lock(&kct->kct_lock);
392 list_add_tail(&kcd->kcd_node, &kct->kct_list);
393 spin_unlock(&kct->kct_lock);
394 }
395
396 return 0;
397 }
398
399 static void
400 splat_kmem_cache_test_debug(struct file *file, char *name,
401 kmem_cache_priv_t *kcp)
402 {
403 int j;
404
405 splat_vprint(file, name, "%s cache objects %d",
406 kcp->kcp_cache->skc_name, kcp->kcp_count);
407
408 if (kcp->kcp_cache->skc_flags & (KMC_KMEM | KMC_VMEM)) {
409 splat_vprint(file, name, ", slabs %u/%u objs %u/%u",
410 (unsigned)kcp->kcp_cache->skc_slab_alloc,
411 (unsigned)kcp->kcp_cache->skc_slab_total,
412 (unsigned)kcp->kcp_cache->skc_obj_alloc,
413 (unsigned)kcp->kcp_cache->skc_obj_total);
414
415 if (!(kcp->kcp_cache->skc_flags & KMC_NOMAGAZINE)) {
416 splat_vprint(file, name, "%s", "mags");
417
418 for_each_online_cpu(j)
419 splat_print(file, "%u/%u ",
420 kcp->kcp_cache->skc_mag[j]->skm_avail,
421 kcp->kcp_cache->skc_mag[j]->skm_size);
422 }
423 }
424
425 splat_print(file, "%s\n", "");
426 }
427
428 static int
429 splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
430 {
431 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
432 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
433
434 if (kcd && kcp) {
435 kcd->kcd_magic = kcp->kcp_magic;
436 INIT_LIST_HEAD(&kcd->kcd_node);
437 kcd->kcd_flag = 1;
438 memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
439 kcp->kcp_count++;
440 }
441
442 return 0;
443 }
444
445 static void
446 splat_kmem_cache_test_destructor(void *ptr, void *priv)
447 {
448 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
449 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
450
451 if (kcd && kcp) {
452 kcd->kcd_magic = 0;
453 kcd->kcd_flag = 0;
454 memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
455 kcp->kcp_count--;
456 }
457
458 return;
459 }
460
461 /*
462 * Generic reclaim function which assumes that all objects may
463 * be reclaimed at any time. We free a small percentage of the
464 * objects linked off the kcp or kct[] every time we are called.
465 */
466 static void
467 splat_kmem_cache_test_reclaim(void *priv)
468 {
469 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
470 kmem_cache_thread_t *kct;
471 kmem_cache_data_t *kcd;
472 LIST_HEAD(reclaim);
473 int i, count;
474
475 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
476
477 /* For each kct thread reclaim some objects */
478 spin_lock(&kcp->kcp_lock);
479 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
480 kct = kcp->kcp_kct[i];
481 if (!kct)
482 continue;
483
484 spin_unlock(&kcp->kcp_lock);
485 spin_lock(&kct->kct_lock);
486
487 count = SPLAT_KMEM_OBJ_RECLAIM;
488 while (count > 0 && !list_empty(&kct->kct_list)) {
489 kcd = list_entry(kct->kct_list.next,
490 kmem_cache_data_t, kcd_node);
491 list_del(&kcd->kcd_node);
492 list_add(&kcd->kcd_node, &reclaim);
493 count--;
494 }
495
496 spin_unlock(&kct->kct_lock);
497 spin_lock(&kcp->kcp_lock);
498 }
499 spin_unlock(&kcp->kcp_lock);
500
501 /* Freed outside the spin lock */
502 while (!list_empty(&reclaim)) {
503 kcd = list_entry(reclaim.next, kmem_cache_data_t, kcd_node);
504 list_del(&kcd->kcd_node);
505 kmem_cache_free(kcp->kcp_cache, kcd);
506 }
507
508 return;
509 }
510
511 static int
512 splat_kmem_cache_test_threads(kmem_cache_priv_t *kcp, int threads)
513 {
514 int rc;
515
516 spin_lock(&kcp->kcp_lock);
517 rc = (kcp->kcp_kct_count == threads);
518 spin_unlock(&kcp->kcp_lock);
519
520 return rc;
521 }
522
523 static int
524 splat_kmem_cache_test_flags(kmem_cache_priv_t *kcp, int flags)
525 {
526 int rc;
527
528 spin_lock(&kcp->kcp_lock);
529 rc = (kcp->kcp_flags & flags);
530 spin_unlock(&kcp->kcp_lock);
531
532 return rc;
533 }
534
535 static void
536 splat_kmem_cache_test_thread(void *arg)
537 {
538 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
539 kmem_cache_thread_t *kct;
540 int rc = 0, id;
541
542 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
543
544 /* Assign thread ids */
545 spin_lock(&kcp->kcp_lock);
546 if (kcp->kcp_kct_count == -1)
547 kcp->kcp_kct_count = 0;
548
549 id = kcp->kcp_kct_count;
550 kcp->kcp_kct_count++;
551 spin_unlock(&kcp->kcp_lock);
552
553 kct = splat_kmem_cache_test_kct_alloc(kcp, id);
554 if (!kct) {
555 rc = -ENOMEM;
556 goto out;
557 }
558
559 /* Wait for all threads to have started and report they are ready */
560 if (kcp->kcp_kct_count == SPLAT_KMEM_THREADS)
561 wake_up(&kcp->kcp_ctl_waitq);
562
563 wait_event(kcp->kcp_thr_waitq,
564 splat_kmem_cache_test_flags(kcp, KCP_FLAG_READY));
565
566 /* Create and destroy objects */
567 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, kcp->kcp_alloc);
568 splat_kmem_cache_test_kcd_free(kcp, kct);
569 out:
570 if (kct)
571 splat_kmem_cache_test_kct_free(kcp, kct);
572
573 spin_lock(&kcp->kcp_lock);
574 if (!kcp->kcp_rc)
575 kcp->kcp_rc = rc;
576
577 if ((--kcp->kcp_kct_count) == 0)
578 wake_up(&kcp->kcp_ctl_waitq);
579
580 spin_unlock(&kcp->kcp_lock);
581
582 thread_exit();
583 }
584
585 static int
586 splat_kmem_cache_test(struct file *file, void *arg, char *name,
587 int size, int align, int flags)
588 {
589 kmem_cache_priv_t *kcp = NULL;
590 kmem_cache_data_t **kcd = NULL;
591 int i, rc = 0, objs = 0;
592
593 /* Limit size for low memory machines (1/128 of memory) */
594 size = MIN(size, (physmem * PAGE_SIZE) >> 7);
595
596 splat_vprint(file, name,
597 "Testing size=%d, align=%d, flags=0x%04x\n",
598 size, align, flags);
599
600 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0);
601 if (!kcp) {
602 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
603 return (-ENOMEM);
604 }
605
606 kcp->kcp_cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
607 kcp->kcp_size, kcp->kcp_align,
608 splat_kmem_cache_test_constructor,
609 splat_kmem_cache_test_destructor,
610 NULL, kcp, NULL, flags);
611 if (kcp->kcp_cache == NULL) {
612 splat_vprint(file, name, "Unable to create "
613 "name='%s', size=%d, align=%d, flags=0x%x\n",
614 SPLAT_KMEM_CACHE_NAME, size, align, flags);
615 rc = -ENOMEM;
616 goto out_free;
617 }
618
619 /*
620 * Allocate several slabs worth of objects to verify functionality.
621 * However, on 32-bit systems with limited address space constrain
622 * it to a single slab for the purposes of this test.
623 */
624 #ifdef _LP64
625 objs = kcp->kcp_cache->skc_slab_objs * 4;
626 #else
627 objs = 1;
628 #endif
629 kcd = kmem_zalloc(sizeof (kmem_cache_data_t *) * objs, KM_SLEEP);
630 if (kcd == NULL) {
631 splat_vprint(file, name, "Unable to allocate pointers "
632 "for %d objects\n", objs);
633 rc = -ENOMEM;
634 goto out_free;
635 }
636
637 for (i = 0; i < objs; i++) {
638 kcd[i] = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
639 if (kcd[i] == NULL) {
640 splat_vprint(file, name, "Unable to allocate "
641 "from '%s'\n", SPLAT_KMEM_CACHE_NAME);
642 rc = -EINVAL;
643 goto out_free;
644 }
645
646 if (!kcd[i]->kcd_flag) {
647 splat_vprint(file, name, "Failed to run constructor "
648 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
649 rc = -EINVAL;
650 goto out_free;
651 }
652
653 if (kcd[i]->kcd_magic != kcp->kcp_magic) {
654 splat_vprint(file, name,
655 "Failed to pass private data to constructor "
656 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
657 rc = -EINVAL;
658 goto out_free;
659 }
660 }
661
662 for (i = 0; i < objs; i++) {
663 kmem_cache_free(kcp->kcp_cache, kcd[i]);
664
665 /* Destructors are run for every kmem_cache_free() */
666 if (kcd[i]->kcd_flag) {
667 splat_vprint(file, name,
668 "Failed to run destructor for '%s'\n",
669 SPLAT_KMEM_CACHE_NAME);
670 rc = -EINVAL;
671 goto out_free;
672 }
673 }
674
675 if (kcp->kcp_count) {
676 splat_vprint(file, name,
677 "Failed to run destructor on all slab objects for '%s'\n",
678 SPLAT_KMEM_CACHE_NAME);
679 rc = -EINVAL;
680 }
681
682 kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs);
683 kmem_cache_destroy(kcp->kcp_cache);
684
685 splat_kmem_cache_test_kcp_free(kcp);
686 splat_vprint(file, name,
687 "Success ran alloc'd/free'd %d objects of size %d\n",
688 objs, size);
689
690 return (rc);
691
692 out_free:
693 if (kcd) {
694 for (i = 0; i < objs; i++) {
695 if (kcd[i] != NULL)
696 kmem_cache_free(kcp->kcp_cache, kcd[i]);
697 }
698
699 kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs);
700 }
701
702 if (kcp->kcp_cache)
703 kmem_cache_destroy(kcp->kcp_cache);
704
705 splat_kmem_cache_test_kcp_free(kcp);
706
707 return (rc);
708 }
709
710 static int
711 splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
712 int size, int alloc, int max_time)
713 {
714 kmem_cache_priv_t *kcp;
715 kthread_t *thr;
716 struct timespec start, stop, delta;
717 char cache_name[32];
718 int i, rc = 0;
719
720 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc);
721 if (!kcp) {
722 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
723 return -ENOMEM;
724 }
725
726 (void)snprintf(cache_name, 32, "%s-%d-%d",
727 SPLAT_KMEM_CACHE_NAME, size, alloc);
728 kcp->kcp_cache =
729 kmem_cache_create(cache_name, kcp->kcp_size, 0,
730 splat_kmem_cache_test_constructor,
731 splat_kmem_cache_test_destructor,
732 splat_kmem_cache_test_reclaim,
733 kcp, NULL, 0);
734 if (!kcp->kcp_cache) {
735 splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
736 rc = -ENOMEM;
737 goto out_kcp;
738 }
739
740 getnstimeofday(&start);
741
742 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
743 thr = thread_create(NULL, 0,
744 splat_kmem_cache_test_thread,
745 kcp, 0, &p0, TS_RUN, defclsyspri);
746 if (thr == NULL) {
747 rc = -ESRCH;
748 goto out_cache;
749 }
750 }
751
752 /* Sleep until all threads have started, then set the ready
753 * flag and wake them all up for maximum concurrency. */
754 wait_event(kcp->kcp_ctl_waitq,
755 splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));
756
757 spin_lock(&kcp->kcp_lock);
758 kcp->kcp_flags |= KCP_FLAG_READY;
759 spin_unlock(&kcp->kcp_lock);
760 wake_up_all(&kcp->kcp_thr_waitq);
761
762 /* Sleep until all thread have finished */
763 wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));
764
765 getnstimeofday(&stop);
766 delta = timespec_sub(stop, start);
767
768 splat_vprint(file, name,
769 "%-22s %2ld.%09ld\t"
770 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
771 kcp->kcp_cache->skc_name,
772 delta.tv_sec, delta.tv_nsec,
773 (unsigned long)kcp->kcp_cache->skc_slab_total,
774 (unsigned long)kcp->kcp_cache->skc_slab_max,
775 (unsigned long)(kcp->kcp_alloc *
776 SPLAT_KMEM_THREADS /
777 SPL_KMEM_CACHE_OBJ_PER_SLAB),
778 (unsigned long)kcp->kcp_cache->skc_obj_total,
779 (unsigned long)kcp->kcp_cache->skc_obj_max,
780 (unsigned long)(kcp->kcp_alloc *
781 SPLAT_KMEM_THREADS));
782
783 if (delta.tv_sec >= max_time)
784 rc = -ETIME;
785
786 if (!rc && kcp->kcp_rc)
787 rc = kcp->kcp_rc;
788
789 out_cache:
790 kmem_cache_destroy(kcp->kcp_cache);
791 out_kcp:
792 splat_kmem_cache_test_kcp_free(kcp);
793 return rc;
794 }
795
796 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
797 static int
798 splat_kmem_test5(struct file *file, void *arg)
799 {
800 char *name = SPLAT_KMEM_TEST5_NAME;
801 int i, rc = 0;
802
803 /* Randomly pick small object sizes and alignments. */
804 for (i = 0; i < 100; i++) {
805 int size, align, flags = 0;
806 uint32_t rnd;
807
808 /* Evenly distribute tests over all value cache types */
809 get_random_bytes((void *)&rnd, sizeof (uint32_t));
810 switch (rnd & 0x03) {
811 default:
812 case 0x00:
813 flags = 0;
814 break;
815 case 0x01:
816 flags = KMC_KMEM;
817 break;
818 case 0x02:
819 flags = KMC_VMEM;
820 break;
821 case 0x03:
822 flags = KMC_SLAB;
823 break;
824 }
825
826 /* The following flags are set with a 1/10 chance */
827 flags |= ((((rnd >> 8) % 10) == 0) ? KMC_OFFSLAB : 0);
828 flags |= ((((rnd >> 16) % 10) == 0) ? KMC_NOEMERGENCY : 0);
829
830 /* 32b - PAGE_SIZE */
831 get_random_bytes((void *)&rnd, sizeof (uint32_t));
832 size = MAX(rnd % (PAGE_SIZE + 1), 32);
833
834 /* 2^N where (3 <= N <= PAGE_SHIFT) */
835 get_random_bytes((void *)&rnd, sizeof (uint32_t));
836 align = (1 << MAX(3, rnd % (PAGE_SHIFT + 1)));
837
838 rc = splat_kmem_cache_test(file, arg, name, size, align, flags);
839 if (rc)
840 return (rc);
841 }
842
843 return (rc);
844 }
845
846 /*
847 * Validate large object cache behavior for dynamic/kmem/vmem caches
848 */
849 static int
850 splat_kmem_test6(struct file *file, void *arg)
851 {
852 char *name = SPLAT_KMEM_TEST6_NAME;
853 int i, max_size, rc = 0;
854
855 /* Randomly pick large object sizes and alignments. */
856 for (i = 0; i < 100; i++) {
857 int size, align, flags = 0;
858 uint32_t rnd;
859
860 /* Evenly distribute tests over all value cache types */
861 get_random_bytes((void *)&rnd, sizeof (uint32_t));
862 switch (rnd & 0x03) {
863 default:
864 case 0x00:
865 flags = 0;
866 max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2;
867 break;
868 case 0x01:
869 flags = KMC_KMEM;
870 max_size = (SPL_MAX_ORDER_NR_PAGES - 2) * PAGE_SIZE;
871 break;
872 case 0x02:
873 flags = KMC_VMEM;
874 max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2;
875 break;
876 case 0x03:
877 flags = KMC_SLAB;
878 max_size = SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE;
879 break;
880 }
881
882 /* The following flags are set with a 1/10 chance */
883 flags |= ((((rnd >> 8) % 10) == 0) ? KMC_OFFSLAB : 0);
884 flags |= ((((rnd >> 16) % 10) == 0) ? KMC_NOEMERGENCY : 0);
885
886 /* PAGE_SIZE - max_size */
887 get_random_bytes((void *)&rnd, sizeof (uint32_t));
888 size = MAX(rnd % (max_size + 1), PAGE_SIZE),
889
890 /* 2^N where (3 <= N <= PAGE_SHIFT) */
891 get_random_bytes((void *)&rnd, sizeof (uint32_t));
892 align = (1 << MAX(3, rnd % (PAGE_SHIFT + 1)));
893
894 rc = splat_kmem_cache_test(file, arg, name, size, align, flags);
895 if (rc)
896 return (rc);
897 }
898
899 return (rc);
900 }
901
902 /*
903 * Validate object alignment cache behavior for caches
904 */
905 static int
906 splat_kmem_test7(struct file *file, void *arg)
907 {
908 char *name = SPLAT_KMEM_TEST7_NAME;
909 int max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2;
910 int i, rc;
911
912 for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) {
913 uint32_t size;
914
915 get_random_bytes((void *)&size, sizeof (uint32_t));
916 size = MAX(size % (max_size + 1), 32);
917
918 rc = splat_kmem_cache_test(file, arg, name, size, i, 0);
919 if (rc)
920 return rc;
921
922 rc = splat_kmem_cache_test(file, arg, name, size, i,
923 KMC_OFFSLAB);
924 if (rc)
925 return rc;
926 }
927
928 return rc;
929 }
930
931 /*
932 * Validate kmem_cache_reap() by requesting the slab cache free any objects
933 * it can. For a few reasons this may not immediately result in more free
934 * memory even if objects are freed. First off, due to fragmentation we
935 * may not be able to reclaim any slabs. Secondly, even if we do we fully
936 * clear some slabs we will not want to immediately reclaim all of them
937 * because we may contend with cache allocations and thrash. What we want
938 * to see is the slab size decrease more gradually as it becomes clear they
939 * will not be needed. This should be achievable in less than a minute.
940 * If it takes longer than this something has gone wrong.
941 */
942 static int
943 splat_kmem_test8(struct file *file, void *arg)
944 {
945 kmem_cache_priv_t *kcp;
946 kmem_cache_thread_t *kct;
947 unsigned int spl_kmem_cache_expire_old;
948 int i, rc = 0;
949
950 /* Enable cache aging just for this test if it is disabled */
951 spl_kmem_cache_expire_old = spl_kmem_cache_expire;
952 spl_kmem_cache_expire = KMC_EXPIRE_AGE;
953
954 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
955 256, 0, 0);
956 if (!kcp) {
957 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
958 "Unable to create '%s'\n", "kcp");
959 rc = -ENOMEM;
960 goto out;
961 }
962
963 kcp->kcp_cache =
964 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
965 splat_kmem_cache_test_constructor,
966 splat_kmem_cache_test_destructor,
967 splat_kmem_cache_test_reclaim,
968 kcp, NULL, 0);
969 if (!kcp->kcp_cache) {
970 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
971 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
972 rc = -ENOMEM;
973 goto out_kcp;
974 }
975
976 kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
977 if (!kct) {
978 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
979 "Unable to create '%s'\n", "kct");
980 rc = -ENOMEM;
981 goto out_cache;
982 }
983
984 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, SPLAT_KMEM_OBJ_COUNT);
985 if (rc) {
986 splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to "
987 "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
988 goto out_kct;
989 }
990
991 /* Force reclaim every 1/10 a second for 60 seconds. */
992 for (i = 0; i < 600; i++) {
993 kmem_cache_reap_now(kcp->kcp_cache);
994 splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp);
995
996 if (kcp->kcp_count == 0)
997 break;
998
999 set_current_state(TASK_INTERRUPTIBLE);
1000 schedule_timeout(HZ / 10);
1001 }
1002
1003 if (kcp->kcp_count == 0) {
1004 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
1005 "Successfully created %d objects "
1006 "in cache %s and reclaimed them\n",
1007 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
1008 } else {
1009 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
1010 "Failed to reclaim %u/%d objects from cache %s\n",
1011 (unsigned)kcp->kcp_count,
1012 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
1013 rc = -ENOMEM;
1014 }
1015
1016 /* Cleanup our mess (for failure case of time expiring) */
1017 splat_kmem_cache_test_kcd_free(kcp, kct);
1018 out_kct:
1019 splat_kmem_cache_test_kct_free(kcp, kct);
1020 out_cache:
1021 kmem_cache_destroy(kcp->kcp_cache);
1022 out_kcp:
1023 splat_kmem_cache_test_kcp_free(kcp);
1024 out:
1025 spl_kmem_cache_expire = spl_kmem_cache_expire_old;
1026
1027 return rc;
1028 }
1029
1030 /* Test cache aging, we have allocated a large number of objects thus
1031 * creating a large number of slabs and then free'd them all. However,
1032 * since there should be little memory pressure at the moment those
1033 * slabs have not been freed. What we want to see is the slab size
1034 * decrease gradually as it becomes clear they will not be be needed.
1035 * This should be achievable in less than minute. If it takes longer
1036 * than this something has gone wrong.
1037 */
1038 static int
1039 splat_kmem_test9(struct file *file, void *arg)
1040 {
1041 kmem_cache_priv_t *kcp;
1042 kmem_cache_thread_t *kct;
1043 unsigned int spl_kmem_cache_expire_old;
1044 int i, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;
1045
1046 /* Enable cache aging just for this test if it is disabled */
1047 spl_kmem_cache_expire_old = spl_kmem_cache_expire;
1048 spl_kmem_cache_expire = KMC_EXPIRE_AGE;
1049
1050 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
1051 256, 0, 0);
1052 if (!kcp) {
1053 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
1054 "Unable to create '%s'\n", "kcp");
1055 rc = -ENOMEM;
1056 goto out;
1057 }
1058
1059 kcp->kcp_cache =
1060 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
1061 splat_kmem_cache_test_constructor,
1062 splat_kmem_cache_test_destructor,
1063 NULL, kcp, NULL, 0);
1064 if (!kcp->kcp_cache) {
1065 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
1066 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
1067 rc = -ENOMEM;
1068 goto out_kcp;
1069 }
1070
1071 kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
1072 if (!kct) {
1073 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
1074 "Unable to create '%s'\n", "kct");
1075 rc = -ENOMEM;
1076 goto out_cache;
1077 }
1078
1079 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, count);
1080 if (rc) {
1081 splat_vprint(file, SPLAT_KMEM_TEST9_NAME, "Unable to "
1082 "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
1083 goto out_kct;
1084 }
1085
1086 splat_kmem_cache_test_kcd_free(kcp, kct);
1087
1088 for (i = 0; i < 60; i++) {
1089 splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST9_NAME, kcp);
1090
1091 if (kcp->kcp_count == 0)
1092 break;
1093
1094 set_current_state(TASK_INTERRUPTIBLE);
1095 schedule_timeout(HZ);
1096 }
1097
1098 if (kcp->kcp_count == 0) {
1099 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
1100 "Successfully created %d objects "
1101 "in cache %s and reclaimed them\n",
1102 count, SPLAT_KMEM_CACHE_NAME);
1103 } else {
1104 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
1105 "Failed to reclaim %u/%d objects from cache %s\n",
1106 (unsigned)kcp->kcp_count, count,
1107 SPLAT_KMEM_CACHE_NAME);
1108 rc = -ENOMEM;
1109 }
1110
1111 out_kct:
1112 splat_kmem_cache_test_kct_free(kcp, kct);
1113 out_cache:
1114 kmem_cache_destroy(kcp->kcp_cache);
1115 out_kcp:
1116 splat_kmem_cache_test_kcp_free(kcp);
1117 out:
1118 spl_kmem_cache_expire = spl_kmem_cache_expire_old;
1119
1120 return rc;
1121 }
1122
1123 /*
1124 * This test creates N threads with a shared kmem cache. They then all
1125 * concurrently allocate and free from the cache to stress the locking and
1126 * concurrent cache performance. If any one test takes longer than 5
1127 * seconds to complete it is treated as a failure and may indicate a
1128 * performance regression. On my test system no one test takes more
1129 * than 1 second to complete so a 5x slowdown likely a problem.
1130 */
1131 static int
1132 splat_kmem_test10(struct file *file, void *arg)
1133 {
1134 uint64_t size, alloc, maxsize, limit, rc = 0;
1135
1136 #if defined(CONFIG_64BIT)
1137 maxsize = (1024 * 1024);
1138 #else
1139 maxsize = (128 * 1024);
1140 #endif
1141
1142 for (size = 32; size <= maxsize; size *= 2) {
1143
1144 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name",
1145 "time (sec)\tslabs \tobjs \thash\n");
1146 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "",
1147 " \ttot/max/calc\ttot/max/calc\n");
1148
1149 for (alloc = 1; alloc <= 1024; alloc *= 2) {
1150
1151 /* Skip tests which exceed 1/2 of memory. */
1152 limit = MIN(physmem * PAGE_SIZE,
1153 vmem_size(NULL, VMEM_ALLOC | VMEM_FREE)) / 2;
1154 if (size * alloc * SPLAT_KMEM_THREADS > limit)
1155 continue;
1156
1157 rc = splat_kmem_cache_thread_test(file, arg,
1158 SPLAT_KMEM_TEST10_NAME, size, alloc, 5);
1159 if (rc)
1160 break;
1161 }
1162 }
1163
1164 return rc;
1165 }
1166
1167 #if 0
1168 /*
1169 * This test creates N threads with a shared kmem cache which overcommits
1170 * memory by 4x. This makes it impossible for the slab to satify the
1171 * thread requirements without having its reclaim hook run which will
1172 * free objects back for use. This behavior is triggered by the linum VM
1173 * detecting a low memory condition on the node and invoking the shrinkers.
1174 * This should allow all the threads to complete while avoiding deadlock
1175 * and for the most part out of memory events. This is very tough on the
1176 * system so it is possible the test app may get oom'ed. This particular
1177 * test has proven troublesome on 32-bit archs with limited virtual
1178 * address space so it only run on 64-bit systems.
1179 */
1180 static int
1181 splat_kmem_test11(struct file *file, void *arg)
1182 {
1183 uint64_t size, alloc, rc;
1184
1185 size = 8 * 1024;
1186 alloc = ((4 * physmem * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS;
1187
1188 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "name",
1189 "time (sec)\tslabs \tobjs \thash\n");
1190 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "",
1191 " \ttot/max/calc\ttot/max/calc\n");
1192
1193 rc = splat_kmem_cache_thread_test(file, arg,
1194 SPLAT_KMEM_TEST11_NAME, size, alloc, 60);
1195
1196 return rc;
1197 }
1198 #endif
1199
1200 typedef struct dummy_page {
1201 struct list_head dp_list;
1202 char dp_pad[PAGE_SIZE - sizeof(struct list_head)];
1203 } dummy_page_t;
1204
1205 /*
1206 * This test is designed to verify that direct reclaim is functioning as
1207 * expected. We allocate a large number of objects thus creating a large
1208 * number of slabs. We then apply memory pressure and expect that the
1209 * direct reclaim path can easily recover those slabs. The registered
1210 * reclaim function will free the objects and the slab shrinker will call
1211 * it repeatedly until at least a single slab can be freed.
1212 *
1213 * Note it may not be possible to reclaim every last slab via direct reclaim
1214 * without a failure because the shrinker_rwsem may be contended. For this
1215 * reason, quickly reclaiming 3/4 of the slabs is considered a success.
1216 *
1217 * This should all be possible within 10 seconds. For reference, on a
1218 * system with 2G of memory this test takes roughly 0.2 seconds to run.
1219 * It may take longer on larger memory systems but should still easily
1220 * complete in the alloted 10 seconds.
1221 */
1222 static int
1223 splat_kmem_test13(struct file *file, void *arg)
1224 {
1225 kmem_cache_priv_t *kcp;
1226 kmem_cache_thread_t *kct;
1227 dummy_page_t *dp;
1228 struct list_head list;
1229 struct timespec start, stop, delta = { 0, 0 };
1230 int size, count, slabs, fails = 0;
1231 int i, rc = 0, max_time = 10;
1232
1233 size = 128 * 1024;
1234 count = MIN(physmem * PAGE_SIZE, vmem_size(NULL,
1235 VMEM_ALLOC | VMEM_FREE)) / 4 / size;
1236
1237 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST13_NAME,
1238 size, 0, 0);
1239 if (!kcp) {
1240 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1241 "Unable to create '%s'\n", "kcp");
1242 rc = -ENOMEM;
1243 goto out;
1244 }
1245
1246 kcp->kcp_cache =
1247 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
1248 splat_kmem_cache_test_constructor,
1249 splat_kmem_cache_test_destructor,
1250 splat_kmem_cache_test_reclaim,
1251 kcp, NULL, 0);
1252 if (!kcp->kcp_cache) {
1253 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1254 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
1255 rc = -ENOMEM;
1256 goto out_kcp;
1257 }
1258
1259 kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
1260 if (!kct) {
1261 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1262 "Unable to create '%s'\n", "kct");
1263 rc = -ENOMEM;
1264 goto out_cache;
1265 }
1266
1267 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, count);
1268 if (rc) {
1269 splat_vprint(file, SPLAT_KMEM_TEST13_NAME, "Unable to "
1270 "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
1271 goto out_kct;
1272 }
1273
1274 i = 0;
1275 slabs = kcp->kcp_cache->skc_slab_total;
1276 INIT_LIST_HEAD(&list);
1277 getnstimeofday(&start);
1278
1279 /* Apply memory pressure */
1280 while (kcp->kcp_cache->skc_slab_total > (slabs >> 2)) {
1281
1282 if ((i % 10000) == 0)
1283 splat_kmem_cache_test_debug(
1284 file, SPLAT_KMEM_TEST13_NAME, kcp);
1285
1286 getnstimeofday(&stop);
1287 delta = timespec_sub(stop, start);
1288 if (delta.tv_sec >= max_time) {
1289 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1290 "Failed to reclaim 3/4 of cache in %ds, "
1291 "%u/%u slabs remain\n", max_time,
1292 (unsigned)kcp->kcp_cache->skc_slab_total,
1293 slabs);
1294 rc = -ETIME;
1295 break;
1296 }
1297
1298 dp = (dummy_page_t *)__get_free_page(GFP_KERNEL);
1299 if (!dp) {
1300 fails++;
1301 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1302 "Failed (%d) to allocate page with %u "
1303 "slabs still in the cache\n", fails,
1304 (unsigned)kcp->kcp_cache->skc_slab_total);
1305 continue;
1306 }
1307
1308 list_add(&dp->dp_list, &list);
1309 i++;
1310 }
1311
1312 if (rc == 0)
1313 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1314 "Successfully created %u slabs and with %d alloc "
1315 "failures reclaimed 3/4 of them in %d.%03ds\n",
1316 slabs, fails,
1317 (int)delta.tv_sec, (int)delta.tv_nsec / 1000000);
1318
1319 /* Release memory pressure pages */
1320 while (!list_empty(&list)) {
1321 dp = list_entry(list.next, dummy_page_t, dp_list);
1322 list_del_init(&dp->dp_list);
1323 free_page((unsigned long)dp);
1324 }
1325
1326 /* Release remaining kmem cache objects */
1327 splat_kmem_cache_test_kcd_free(kcp, kct);
1328 out_kct:
1329 splat_kmem_cache_test_kct_free(kcp, kct);
1330 out_cache:
1331 kmem_cache_destroy(kcp->kcp_cache);
1332 out_kcp:
1333 splat_kmem_cache_test_kcp_free(kcp);
1334 out:
1335 return rc;
1336 }
1337
1338 splat_subsystem_t *
1339 splat_kmem_init(void)
1340 {
1341 splat_subsystem_t *sub;
1342
1343 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1344 if (sub == NULL)
1345 return NULL;
1346
1347 memset(sub, 0, sizeof(*sub));
1348 strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
1349 strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
1350 INIT_LIST_HEAD(&sub->subsystem_list);
1351 INIT_LIST_HEAD(&sub->test_list);
1352 spin_lock_init(&sub->test_lock);
1353 sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
1354
1355 splat_test_init(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
1356 SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
1357 splat_test_init(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
1358 SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
1359 splat_test_init(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
1360 SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
1361 splat_test_init(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
1362 SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
1363 splat_test_init(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
1364 SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
1365 splat_test_init(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
1366 SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
1367 splat_test_init(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
1368 SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
1369 splat_test_init(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
1370 SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
1371 splat_test_init(sub, SPLAT_KMEM_TEST9_NAME, SPLAT_KMEM_TEST9_DESC,
1372 SPLAT_KMEM_TEST9_ID, splat_kmem_test9);
1373 splat_test_init(sub, SPLAT_KMEM_TEST10_NAME, SPLAT_KMEM_TEST10_DESC,
1374 SPLAT_KMEM_TEST10_ID, splat_kmem_test10);
1375 #if 0
1376 splat_test_init(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC,
1377 SPLAT_KMEM_TEST11_ID, splat_kmem_test11);
1378 #endif
1379 splat_test_init(sub, SPLAT_KMEM_TEST13_NAME, SPLAT_KMEM_TEST13_DESC,
1380 SPLAT_KMEM_TEST13_ID, splat_kmem_test13);
1381
1382 return sub;
1383 }
1384
1385 void
1386 splat_kmem_fini(splat_subsystem_t *sub)
1387 {
1388 ASSERT(sub);
1389 splat_test_fini(sub, SPLAT_KMEM_TEST13_ID);
1390 #if 0
1391 splat_test_fini(sub, SPLAT_KMEM_TEST11_ID);
1392 #endif
1393 splat_test_fini(sub, SPLAT_KMEM_TEST10_ID);
1394 splat_test_fini(sub, SPLAT_KMEM_TEST9_ID);
1395 splat_test_fini(sub, SPLAT_KMEM_TEST8_ID);
1396 splat_test_fini(sub, SPLAT_KMEM_TEST7_ID);
1397 splat_test_fini(sub, SPLAT_KMEM_TEST6_ID);
1398 splat_test_fini(sub, SPLAT_KMEM_TEST5_ID);
1399 splat_test_fini(sub, SPLAT_KMEM_TEST4_ID);
1400 splat_test_fini(sub, SPLAT_KMEM_TEST3_ID);
1401 splat_test_fini(sub, SPLAT_KMEM_TEST2_ID);
1402 splat_test_fini(sub, SPLAT_KMEM_TEST1_ID);
1403
1404 kfree(sub);
1405 }
1406
1407 int
1408 splat_kmem_id(void) {
1409 return SPLAT_SUBSYSTEM_KMEM;
1410 }