]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/splat/splat-kmem.c
Imported Upstream version 0.6.4.1
[mirror_spl-debian.git] / module / splat / splat-kmem.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Kmem Tests.
25 \*****************************************************************************/
26
27 #include <sys/kmem.h>
28 #include <sys/kmem_cache.h>
29 #include <sys/vmem.h>
30 #include <sys/random.h>
31 #include <sys/thread.h>
32 #include <sys/vmsystm.h>
33 #include "splat-internal.h"
34
35 #define SPLAT_KMEM_NAME "kmem"
36 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
37
38 #define SPLAT_KMEM_TEST1_ID 0x0101
39 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
40 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
41
42 #define SPLAT_KMEM_TEST2_ID 0x0102
43 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
44 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
45
46 #define SPLAT_KMEM_TEST3_ID 0x0103
47 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
48 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
49
50 #define SPLAT_KMEM_TEST4_ID 0x0104
51 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
52 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
53
54 #define SPLAT_KMEM_TEST5_ID 0x0105
55 #define SPLAT_KMEM_TEST5_NAME "slab_small"
56 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
57
58 #define SPLAT_KMEM_TEST6_ID 0x0106
59 #define SPLAT_KMEM_TEST6_NAME "slab_large"
60 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
61
62 #define SPLAT_KMEM_TEST7_ID 0x0107
63 #define SPLAT_KMEM_TEST7_NAME "slab_align"
64 #define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
65
66 #define SPLAT_KMEM_TEST8_ID 0x0108
67 #define SPLAT_KMEM_TEST8_NAME "slab_reap"
68 #define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
69
70 #define SPLAT_KMEM_TEST9_ID 0x0109
71 #define SPLAT_KMEM_TEST9_NAME "slab_age"
72 #define SPLAT_KMEM_TEST9_DESC "Slab aging test"
73
74 #define SPLAT_KMEM_TEST10_ID 0x010a
75 #define SPLAT_KMEM_TEST10_NAME "slab_lock"
76 #define SPLAT_KMEM_TEST10_DESC "Slab locking test"
77
78 #if 0
79 #define SPLAT_KMEM_TEST11_ID 0x010b
80 #define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
81 #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
82 #endif
83
84 #define SPLAT_KMEM_TEST13_ID 0x010d
85 #define SPLAT_KMEM_TEST13_NAME "slab_reclaim"
86 #define SPLAT_KMEM_TEST13_DESC "Slab direct memory reclaim test"
87
88 #define SPLAT_KMEM_ALLOC_COUNT 10
89 #define SPLAT_VMEM_ALLOC_COUNT 10
90
91
92 static int
93 splat_kmem_test1(struct file *file, void *arg)
94 {
95 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
96 int size = PAGE_SIZE;
97 int i, count, rc = 0;
98
99 while ((!rc) && (size <= spl_kmem_alloc_warn)) {
100 count = 0;
101
102 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
103 ptr[i] = kmem_alloc(size, KM_SLEEP);
104 if (ptr[i])
105 count++;
106 }
107
108 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
109 if (ptr[i])
110 kmem_free(ptr[i], size);
111
112 splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
113 "%d byte allocations, %d/%d successful\n",
114 size, count, SPLAT_KMEM_ALLOC_COUNT);
115 if (count != SPLAT_KMEM_ALLOC_COUNT)
116 rc = -ENOMEM;
117
118 size *= 2;
119 }
120
121 return rc;
122 }
123
124 static int
125 splat_kmem_test2(struct file *file, void *arg)
126 {
127 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
128 int size = PAGE_SIZE;
129 int i, j, count, rc = 0;
130
131 while ((!rc) && (size <= spl_kmem_alloc_warn)) {
132 count = 0;
133
134 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
135 ptr[i] = kmem_zalloc(size, KM_SLEEP);
136 if (ptr[i])
137 count++;
138 }
139
140 /* Ensure buffer has been zero filled */
141 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
142 for (j = 0; j < size; j++) {
143 if (((char *)ptr[i])[j] != '\0') {
144 splat_vprint(file,SPLAT_KMEM_TEST2_NAME,
145 "%d-byte allocation was "
146 "not zeroed\n", size);
147 rc = -EFAULT;
148 }
149 }
150 }
151
152 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
153 if (ptr[i])
154 kmem_free(ptr[i], size);
155
156 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
157 "%d byte allocations, %d/%d successful\n",
158 size, count, SPLAT_KMEM_ALLOC_COUNT);
159 if (count != SPLAT_KMEM_ALLOC_COUNT)
160 rc = -ENOMEM;
161
162 size *= 2;
163 }
164
165 return rc;
166 }
167
168 static int
169 splat_kmem_test3(struct file *file, void *arg)
170 {
171 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
172 int size = PAGE_SIZE;
173 int i, count, rc = 0;
174
175 /*
176 * Test up to 4x the maximum kmem_alloc() size to ensure both
177 * the kmem_alloc() and vmem_alloc() call paths are used.
178 */
179 while ((!rc) && (size <= (4 * spl_kmem_alloc_max))) {
180 count = 0;
181
182 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
183 ptr[i] = vmem_alloc(size, KM_SLEEP);
184 if (ptr[i])
185 count++;
186 }
187
188 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
189 if (ptr[i])
190 vmem_free(ptr[i], size);
191
192 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
193 "%d byte allocations, %d/%d successful\n",
194 size, count, SPLAT_VMEM_ALLOC_COUNT);
195 if (count != SPLAT_VMEM_ALLOC_COUNT)
196 rc = -ENOMEM;
197
198 size *= 2;
199 }
200
201 return rc;
202 }
203
204 static int
205 splat_kmem_test4(struct file *file, void *arg)
206 {
207 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
208 int size = PAGE_SIZE;
209 int i, j, count, rc = 0;
210
211 /*
212 * Test up to 4x the maximum kmem_zalloc() size to ensure both
213 * the kmem_zalloc() and vmem_zalloc() call paths are used.
214 */
215 while ((!rc) && (size <= (4 * spl_kmem_alloc_max))) {
216 count = 0;
217
218 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
219 ptr[i] = vmem_zalloc(size, KM_SLEEP);
220 if (ptr[i])
221 count++;
222 }
223
224 /* Ensure buffer has been zero filled */
225 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
226 for (j = 0; j < size; j++) {
227 if (((char *)ptr[i])[j] != '\0') {
228 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
229 "%d-byte allocation was "
230 "not zeroed\n", size);
231 rc = -EFAULT;
232 }
233 }
234 }
235
236 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
237 if (ptr[i])
238 vmem_free(ptr[i], size);
239
240 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
241 "%d byte allocations, %d/%d successful\n",
242 size, count, SPLAT_VMEM_ALLOC_COUNT);
243 if (count != SPLAT_VMEM_ALLOC_COUNT)
244 rc = -ENOMEM;
245
246 size *= 2;
247 }
248
249 return rc;
250 }
251
252 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
253 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
254 #define SPLAT_KMEM_OBJ_COUNT 1024
255 #define SPLAT_KMEM_OBJ_RECLAIM 32 /* objects */
256 #define SPLAT_KMEM_THREADS 32
257
258 #define KCP_FLAG_READY 0x01
259
260 typedef struct kmem_cache_data {
261 unsigned long kcd_magic;
262 struct list_head kcd_node;
263 int kcd_flag;
264 char kcd_buf[0];
265 } kmem_cache_data_t;
266
267 typedef struct kmem_cache_thread {
268 spinlock_t kct_lock;
269 int kct_id;
270 struct list_head kct_list;
271 } kmem_cache_thread_t;
272
273 typedef struct kmem_cache_priv {
274 unsigned long kcp_magic;
275 struct file *kcp_file;
276 kmem_cache_t *kcp_cache;
277 spinlock_t kcp_lock;
278 wait_queue_head_t kcp_ctl_waitq;
279 wait_queue_head_t kcp_thr_waitq;
280 int kcp_flags;
281 int kcp_kct_count;
282 kmem_cache_thread_t *kcp_kct[SPLAT_KMEM_THREADS];
283 int kcp_size;
284 int kcp_align;
285 int kcp_count;
286 int kcp_alloc;
287 int kcp_rc;
288 } kmem_cache_priv_t;
289
290 static kmem_cache_priv_t *
291 splat_kmem_cache_test_kcp_alloc(struct file *file, char *name,
292 int size, int align, int alloc)
293 {
294 kmem_cache_priv_t *kcp;
295
296 kcp = kmem_zalloc(sizeof(kmem_cache_priv_t), KM_SLEEP);
297 if (!kcp)
298 return NULL;
299
300 kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC;
301 kcp->kcp_file = file;
302 kcp->kcp_cache = NULL;
303 spin_lock_init(&kcp->kcp_lock);
304 init_waitqueue_head(&kcp->kcp_ctl_waitq);
305 init_waitqueue_head(&kcp->kcp_thr_waitq);
306 kcp->kcp_flags = 0;
307 kcp->kcp_kct_count = -1;
308 kcp->kcp_size = size;
309 kcp->kcp_align = align;
310 kcp->kcp_count = 0;
311 kcp->kcp_alloc = alloc;
312 kcp->kcp_rc = 0;
313
314 return kcp;
315 }
316
317 static void
318 splat_kmem_cache_test_kcp_free(kmem_cache_priv_t *kcp)
319 {
320 kmem_free(kcp, sizeof(kmem_cache_priv_t));
321 }
322
323 static kmem_cache_thread_t *
324 splat_kmem_cache_test_kct_alloc(kmem_cache_priv_t *kcp, int id)
325 {
326 kmem_cache_thread_t *kct;
327
328 ASSERT3S(id, <, SPLAT_KMEM_THREADS);
329 ASSERT(kcp->kcp_kct[id] == NULL);
330
331 kct = kmem_zalloc(sizeof(kmem_cache_thread_t), KM_SLEEP);
332 if (!kct)
333 return NULL;
334
335 spin_lock_init(&kct->kct_lock);
336 kct->kct_id = id;
337 INIT_LIST_HEAD(&kct->kct_list);
338
339 spin_lock(&kcp->kcp_lock);
340 kcp->kcp_kct[id] = kct;
341 spin_unlock(&kcp->kcp_lock);
342
343 return kct;
344 }
345
346 static void
347 splat_kmem_cache_test_kct_free(kmem_cache_priv_t *kcp,
348 kmem_cache_thread_t *kct)
349 {
350 spin_lock(&kcp->kcp_lock);
351 kcp->kcp_kct[kct->kct_id] = NULL;
352 spin_unlock(&kcp->kcp_lock);
353
354 kmem_free(kct, sizeof(kmem_cache_thread_t));
355 }
356
357 static void
358 splat_kmem_cache_test_kcd_free(kmem_cache_priv_t *kcp,
359 kmem_cache_thread_t *kct)
360 {
361 kmem_cache_data_t *kcd;
362
363 spin_lock(&kct->kct_lock);
364 while (!list_empty(&kct->kct_list)) {
365 kcd = list_entry(kct->kct_list.next,
366 kmem_cache_data_t, kcd_node);
367 list_del(&kcd->kcd_node);
368 spin_unlock(&kct->kct_lock);
369
370 kmem_cache_free(kcp->kcp_cache, kcd);
371
372 spin_lock(&kct->kct_lock);
373 }
374 spin_unlock(&kct->kct_lock);
375 }
376
377 static int
378 splat_kmem_cache_test_kcd_alloc(kmem_cache_priv_t *kcp,
379 kmem_cache_thread_t *kct, int count)
380 {
381 kmem_cache_data_t *kcd;
382 int i;
383
384 for (i = 0; i < count; i++) {
385 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
386 if (kcd == NULL) {
387 splat_kmem_cache_test_kcd_free(kcp, kct);
388 return -ENOMEM;
389 }
390
391 spin_lock(&kct->kct_lock);
392 list_add_tail(&kcd->kcd_node, &kct->kct_list);
393 spin_unlock(&kct->kct_lock);
394 }
395
396 return 0;
397 }
398
399 static void
400 splat_kmem_cache_test_debug(struct file *file, char *name,
401 kmem_cache_priv_t *kcp)
402 {
403 int j;
404
405 splat_vprint(file, name, "%s cache objects %d",
406 kcp->kcp_cache->skc_name, kcp->kcp_count);
407
408 if (kcp->kcp_cache->skc_flags & (KMC_KMEM | KMC_VMEM)) {
409 splat_vprint(file, name, ", slabs %u/%u objs %u/%u",
410 (unsigned)kcp->kcp_cache->skc_slab_alloc,
411 (unsigned)kcp->kcp_cache->skc_slab_total,
412 (unsigned)kcp->kcp_cache->skc_obj_alloc,
413 (unsigned)kcp->kcp_cache->skc_obj_total);
414
415 if (!(kcp->kcp_cache->skc_flags & KMC_NOMAGAZINE)) {
416 splat_vprint(file, name, "%s", "mags");
417
418 for_each_online_cpu(j)
419 splat_print(file, "%u/%u ",
420 kcp->kcp_cache->skc_mag[j]->skm_avail,
421 kcp->kcp_cache->skc_mag[j]->skm_size);
422 }
423 }
424
425 splat_print(file, "%s\n", "");
426 }
427
428 static int
429 splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
430 {
431 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
432 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
433
434 if (kcd && kcp) {
435 kcd->kcd_magic = kcp->kcp_magic;
436 INIT_LIST_HEAD(&kcd->kcd_node);
437 kcd->kcd_flag = 1;
438 memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
439 kcp->kcp_count++;
440 }
441
442 return 0;
443 }
444
445 static void
446 splat_kmem_cache_test_destructor(void *ptr, void *priv)
447 {
448 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
449 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
450
451 if (kcd && kcp) {
452 kcd->kcd_magic = 0;
453 kcd->kcd_flag = 0;
454 memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
455 kcp->kcp_count--;
456 }
457
458 return;
459 }
460
461 /*
462 * Generic reclaim function which assumes that all objects may
463 * be reclaimed at any time. We free a small percentage of the
464 * objects linked off the kcp or kct[] every time we are called.
465 */
466 static void
467 splat_kmem_cache_test_reclaim(void *priv)
468 {
469 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
470 kmem_cache_thread_t *kct;
471 kmem_cache_data_t *kcd;
472 LIST_HEAD(reclaim);
473 int i, count;
474
475 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
476
477 /* For each kct thread reclaim some objects */
478 spin_lock(&kcp->kcp_lock);
479 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
480 kct = kcp->kcp_kct[i];
481 if (!kct)
482 continue;
483
484 spin_unlock(&kcp->kcp_lock);
485 spin_lock(&kct->kct_lock);
486
487 count = SPLAT_KMEM_OBJ_RECLAIM;
488 while (count > 0 && !list_empty(&kct->kct_list)) {
489 kcd = list_entry(kct->kct_list.next,
490 kmem_cache_data_t, kcd_node);
491 list_del(&kcd->kcd_node);
492 list_add(&kcd->kcd_node, &reclaim);
493 count--;
494 }
495
496 spin_unlock(&kct->kct_lock);
497 spin_lock(&kcp->kcp_lock);
498 }
499 spin_unlock(&kcp->kcp_lock);
500
501 /* Freed outside the spin lock */
502 while (!list_empty(&reclaim)) {
503 kcd = list_entry(reclaim.next, kmem_cache_data_t, kcd_node);
504 list_del(&kcd->kcd_node);
505 kmem_cache_free(kcp->kcp_cache, kcd);
506 }
507
508 return;
509 }
510
511 static int
512 splat_kmem_cache_test_threads(kmem_cache_priv_t *kcp, int threads)
513 {
514 int rc;
515
516 spin_lock(&kcp->kcp_lock);
517 rc = (kcp->kcp_kct_count == threads);
518 spin_unlock(&kcp->kcp_lock);
519
520 return rc;
521 }
522
523 static int
524 splat_kmem_cache_test_flags(kmem_cache_priv_t *kcp, int flags)
525 {
526 int rc;
527
528 spin_lock(&kcp->kcp_lock);
529 rc = (kcp->kcp_flags & flags);
530 spin_unlock(&kcp->kcp_lock);
531
532 return rc;
533 }
534
535 static void
536 splat_kmem_cache_test_thread(void *arg)
537 {
538 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
539 kmem_cache_thread_t *kct;
540 int rc = 0, id;
541
542 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
543
544 /* Assign thread ids */
545 spin_lock(&kcp->kcp_lock);
546 if (kcp->kcp_kct_count == -1)
547 kcp->kcp_kct_count = 0;
548
549 id = kcp->kcp_kct_count;
550 kcp->kcp_kct_count++;
551 spin_unlock(&kcp->kcp_lock);
552
553 kct = splat_kmem_cache_test_kct_alloc(kcp, id);
554 if (!kct) {
555 rc = -ENOMEM;
556 goto out;
557 }
558
559 /* Wait for all threads to have started and report they are ready */
560 if (kcp->kcp_kct_count == SPLAT_KMEM_THREADS)
561 wake_up(&kcp->kcp_ctl_waitq);
562
563 wait_event(kcp->kcp_thr_waitq,
564 splat_kmem_cache_test_flags(kcp, KCP_FLAG_READY));
565
566 /* Create and destroy objects */
567 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, kcp->kcp_alloc);
568 splat_kmem_cache_test_kcd_free(kcp, kct);
569 out:
570 if (kct)
571 splat_kmem_cache_test_kct_free(kcp, kct);
572
573 spin_lock(&kcp->kcp_lock);
574 if (!kcp->kcp_rc)
575 kcp->kcp_rc = rc;
576
577 if ((--kcp->kcp_kct_count) == 0)
578 wake_up(&kcp->kcp_ctl_waitq);
579
580 spin_unlock(&kcp->kcp_lock);
581
582 thread_exit();
583 }
584
585 static int
586 splat_kmem_cache_test(struct file *file, void *arg, char *name,
587 int size, int align, int flags)
588 {
589 kmem_cache_priv_t *kcp = NULL;
590 kmem_cache_data_t **kcd = NULL;
591 int i, rc = 0, objs = 0;
592
593 splat_vprint(file, name,
594 "Testing size=%d, align=%d, flags=0x%04x\n",
595 size, align, flags);
596
597 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0);
598 if (!kcp) {
599 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
600 return (-ENOMEM);
601 }
602
603 kcp->kcp_cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
604 kcp->kcp_size, kcp->kcp_align,
605 splat_kmem_cache_test_constructor,
606 splat_kmem_cache_test_destructor,
607 NULL, kcp, NULL, flags);
608 if (kcp->kcp_cache == NULL) {
609 splat_vprint(file, name, "Unable to create "
610 "name='%s', size=%d, align=%d, flags=0x%x\n",
611 SPLAT_KMEM_CACHE_NAME, size, align, flags);
612 rc = -ENOMEM;
613 goto out_free;
614 }
615
616 /*
617 * Allocate several slabs worth of objects to verify functionality.
618 * However, on 32-bit systems with limited address space constrain
619 * it to a single slab for the purposes of this test.
620 */
621 #ifdef _LP64
622 objs = SPL_KMEM_CACHE_OBJ_PER_SLAB * 4;
623 #else
624 objs = 1;
625 #endif
626 kcd = kmem_zalloc(sizeof (kmem_cache_data_t *) * objs, KM_SLEEP);
627 if (kcd == NULL) {
628 splat_vprint(file, name, "Unable to allocate pointers "
629 "for %d objects\n", objs);
630 rc = -ENOMEM;
631 goto out_free;
632 }
633
634 for (i = 0; i < objs; i++) {
635 kcd[i] = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
636 if (kcd[i] == NULL) {
637 splat_vprint(file, name, "Unable to allocate "
638 "from '%s'\n", SPLAT_KMEM_CACHE_NAME);
639 rc = -EINVAL;
640 goto out_free;
641 }
642
643 if (!kcd[i]->kcd_flag) {
644 splat_vprint(file, name, "Failed to run constructor "
645 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
646 rc = -EINVAL;
647 goto out_free;
648 }
649
650 if (kcd[i]->kcd_magic != kcp->kcp_magic) {
651 splat_vprint(file, name,
652 "Failed to pass private data to constructor "
653 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
654 rc = -EINVAL;
655 goto out_free;
656 }
657 }
658
659 for (i = 0; i < objs; i++) {
660 kmem_cache_free(kcp->kcp_cache, kcd[i]);
661
662 /* Destructors are run for every kmem_cache_free() */
663 if (kcd[i]->kcd_flag) {
664 splat_vprint(file, name,
665 "Failed to run destructor for '%s'\n",
666 SPLAT_KMEM_CACHE_NAME);
667 rc = -EINVAL;
668 goto out_free;
669 }
670 }
671
672 if (kcp->kcp_count) {
673 splat_vprint(file, name,
674 "Failed to run destructor on all slab objects for '%s'\n",
675 SPLAT_KMEM_CACHE_NAME);
676 rc = -EINVAL;
677 }
678
679 kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs);
680 kmem_cache_destroy(kcp->kcp_cache);
681
682 splat_kmem_cache_test_kcp_free(kcp);
683 splat_vprint(file, name,
684 "Success ran alloc'd/free'd %d objects of size %d\n",
685 objs, size);
686
687 return (rc);
688
689 out_free:
690 if (kcd) {
691 for (i = 0; i < objs; i++) {
692 if (kcd[i] != NULL)
693 kmem_cache_free(kcp->kcp_cache, kcd[i]);
694 }
695
696 kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs);
697 }
698
699 if (kcp->kcp_cache)
700 kmem_cache_destroy(kcp->kcp_cache);
701
702 splat_kmem_cache_test_kcp_free(kcp);
703
704 return (rc);
705 }
706
707 static int
708 splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
709 int size, int alloc, int max_time)
710 {
711 kmem_cache_priv_t *kcp;
712 kthread_t *thr;
713 struct timespec start, stop, delta;
714 char cache_name[32];
715 int i, rc = 0;
716
717 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc);
718 if (!kcp) {
719 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
720 return -ENOMEM;
721 }
722
723 (void)snprintf(cache_name, 32, "%s-%d-%d",
724 SPLAT_KMEM_CACHE_NAME, size, alloc);
725 kcp->kcp_cache =
726 kmem_cache_create(cache_name, kcp->kcp_size, 0,
727 splat_kmem_cache_test_constructor,
728 splat_kmem_cache_test_destructor,
729 splat_kmem_cache_test_reclaim,
730 kcp, NULL, 0);
731 if (!kcp->kcp_cache) {
732 splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
733 rc = -ENOMEM;
734 goto out_kcp;
735 }
736
737 getnstimeofday(&start);
738
739 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
740 thr = thread_create(NULL, 0,
741 splat_kmem_cache_test_thread,
742 kcp, 0, &p0, TS_RUN, minclsyspri);
743 if (thr == NULL) {
744 rc = -ESRCH;
745 goto out_cache;
746 }
747 }
748
749 /* Sleep until all threads have started, then set the ready
750 * flag and wake them all up for maximum concurrency. */
751 wait_event(kcp->kcp_ctl_waitq,
752 splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));
753
754 spin_lock(&kcp->kcp_lock);
755 kcp->kcp_flags |= KCP_FLAG_READY;
756 spin_unlock(&kcp->kcp_lock);
757 wake_up_all(&kcp->kcp_thr_waitq);
758
759 /* Sleep until all thread have finished */
760 wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));
761
762 getnstimeofday(&stop);
763 delta = timespec_sub(stop, start);
764
765 splat_vprint(file, name,
766 "%-22s %2ld.%09ld\t"
767 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
768 kcp->kcp_cache->skc_name,
769 delta.tv_sec, delta.tv_nsec,
770 (unsigned long)kcp->kcp_cache->skc_slab_total,
771 (unsigned long)kcp->kcp_cache->skc_slab_max,
772 (unsigned long)(kcp->kcp_alloc *
773 SPLAT_KMEM_THREADS /
774 SPL_KMEM_CACHE_OBJ_PER_SLAB),
775 (unsigned long)kcp->kcp_cache->skc_obj_total,
776 (unsigned long)kcp->kcp_cache->skc_obj_max,
777 (unsigned long)(kcp->kcp_alloc *
778 SPLAT_KMEM_THREADS));
779
780 if (delta.tv_sec >= max_time)
781 rc = -ETIME;
782
783 if (!rc && kcp->kcp_rc)
784 rc = kcp->kcp_rc;
785
786 out_cache:
787 kmem_cache_destroy(kcp->kcp_cache);
788 out_kcp:
789 splat_kmem_cache_test_kcp_free(kcp);
790 return rc;
791 }
792
793 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
794 static int
795 splat_kmem_test5(struct file *file, void *arg)
796 {
797 char *name = SPLAT_KMEM_TEST5_NAME;
798 int i, rc = 0;
799
800 /* Randomly pick small object sizes and alignments. */
801 for (i = 0; i < 100; i++) {
802 int size, align, flags = 0;
803 uint32_t rnd;
804
805 /* Evenly distribute tests over all value cache types */
806 get_random_bytes((void *)&rnd, sizeof (uint32_t));
807 switch (rnd & 0x03) {
808 default:
809 case 0x00:
810 flags = 0;
811 break;
812 case 0x01:
813 flags = KMC_KMEM;
814 break;
815 case 0x02:
816 flags = KMC_VMEM;
817 break;
818 case 0x03:
819 flags = KMC_SLAB;
820 break;
821 }
822
823 /* The following flags are set with a 1/10 chance */
824 flags |= ((((rnd >> 8) % 10) == 0) ? KMC_OFFSLAB : 0);
825 flags |= ((((rnd >> 16) % 10) == 0) ? KMC_NOEMERGENCY : 0);
826
827 /* 32b - PAGE_SIZE */
828 get_random_bytes((void *)&rnd, sizeof (uint32_t));
829 size = MAX(rnd % (PAGE_SIZE + 1), 32);
830
831 /* 2^N where (3 <= N <= PAGE_SHIFT) */
832 get_random_bytes((void *)&rnd, sizeof (uint32_t));
833 align = (1 << MAX(3, rnd % (PAGE_SHIFT + 1)));
834
835 rc = splat_kmem_cache_test(file, arg, name, size, align, flags);
836 if (rc)
837 return (rc);
838 }
839
840 return (rc);
841 }
842
843 /*
844 * Validate large object cache behavior for dynamic/kmem/vmem caches
845 */
846 static int
847 splat_kmem_test6(struct file *file, void *arg)
848 {
849 char *name = SPLAT_KMEM_TEST6_NAME;
850 int i, max_size, rc = 0;
851
852 /* Randomly pick large object sizes and alignments. */
853 for (i = 0; i < 100; i++) {
854 int size, align, flags = 0;
855 uint32_t rnd;
856
857 /* Evenly distribute tests over all value cache types */
858 get_random_bytes((void *)&rnd, sizeof (uint32_t));
859 switch (rnd & 0x03) {
860 default:
861 case 0x00:
862 flags = 0;
863 max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2;
864 break;
865 case 0x01:
866 flags = KMC_KMEM;
867 max_size = (SPL_MAX_ORDER_NR_PAGES - 2) * PAGE_SIZE;
868 break;
869 case 0x02:
870 flags = KMC_VMEM;
871 max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2;
872 break;
873 case 0x03:
874 flags = KMC_SLAB;
875 max_size = SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE;
876 break;
877 }
878
879 /* The following flags are set with a 1/10 chance */
880 flags |= ((((rnd >> 8) % 10) == 0) ? KMC_OFFSLAB : 0);
881 flags |= ((((rnd >> 16) % 10) == 0) ? KMC_NOEMERGENCY : 0);
882
883 /* PAGE_SIZE - max_size */
884 get_random_bytes((void *)&rnd, sizeof (uint32_t));
885 size = MAX(rnd % (max_size + 1), PAGE_SIZE),
886
887 /* 2^N where (3 <= N <= PAGE_SHIFT) */
888 get_random_bytes((void *)&rnd, sizeof (uint32_t));
889 align = (1 << MAX(3, rnd % (PAGE_SHIFT + 1)));
890
891 rc = splat_kmem_cache_test(file, arg, name, size, align, flags);
892 if (rc)
893 return (rc);
894 }
895
896 return (rc);
897 }
898
899 /*
900 * Validate object alignment cache behavior for caches
901 */
902 static int
903 splat_kmem_test7(struct file *file, void *arg)
904 {
905 char *name = SPLAT_KMEM_TEST7_NAME;
906 int max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2;
907 int i, rc;
908
909 for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) {
910 uint32_t size;
911
912 get_random_bytes((void *)&size, sizeof (uint32_t));
913 size = MAX(size % (max_size + 1), 32);
914
915 rc = splat_kmem_cache_test(file, arg, name, size, i, 0);
916 if (rc)
917 return rc;
918
919 rc = splat_kmem_cache_test(file, arg, name, size, i,
920 KMC_OFFSLAB);
921 if (rc)
922 return rc;
923 }
924
925 return rc;
926 }
927
928 /*
929 * Validate kmem_cache_reap() by requesting the slab cache free any objects
930 * it can. For a few reasons this may not immediately result in more free
931 * memory even if objects are freed. First off, due to fragmentation we
932 * may not be able to reclaim any slabs. Secondly, even if we do we fully
933 * clear some slabs we will not want to immediately reclaim all of them
934 * because we may contend with cache allocations and thrash. What we want
935 * to see is the slab size decrease more gradually as it becomes clear they
936 * will not be needed. This should be achievable in less than a minute.
937 * If it takes longer than this something has gone wrong.
938 */
939 static int
940 splat_kmem_test8(struct file *file, void *arg)
941 {
942 kmem_cache_priv_t *kcp;
943 kmem_cache_thread_t *kct;
944 unsigned int spl_kmem_cache_expire_old;
945 int i, rc = 0;
946
947 /* Enable cache aging just for this test if it is disabled */
948 spl_kmem_cache_expire_old = spl_kmem_cache_expire;
949 spl_kmem_cache_expire = KMC_EXPIRE_AGE;
950
951 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
952 256, 0, 0);
953 if (!kcp) {
954 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
955 "Unable to create '%s'\n", "kcp");
956 rc = -ENOMEM;
957 goto out;
958 }
959
960 kcp->kcp_cache =
961 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
962 splat_kmem_cache_test_constructor,
963 splat_kmem_cache_test_destructor,
964 splat_kmem_cache_test_reclaim,
965 kcp, NULL, 0);
966 if (!kcp->kcp_cache) {
967 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
968 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
969 rc = -ENOMEM;
970 goto out_kcp;
971 }
972
973 kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
974 if (!kct) {
975 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
976 "Unable to create '%s'\n", "kct");
977 rc = -ENOMEM;
978 goto out_cache;
979 }
980
981 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, SPLAT_KMEM_OBJ_COUNT);
982 if (rc) {
983 splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to "
984 "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
985 goto out_kct;
986 }
987
988 /* Force reclaim every 1/10 a second for 60 seconds. */
989 for (i = 0; i < 600; i++) {
990 kmem_cache_reap_now(kcp->kcp_cache);
991 splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp);
992
993 if (kcp->kcp_count == 0)
994 break;
995
996 set_current_state(TASK_INTERRUPTIBLE);
997 schedule_timeout(HZ / 10);
998 }
999
1000 if (kcp->kcp_count == 0) {
1001 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
1002 "Successfully created %d objects "
1003 "in cache %s and reclaimed them\n",
1004 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
1005 } else {
1006 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
1007 "Failed to reclaim %u/%d objects from cache %s\n",
1008 (unsigned)kcp->kcp_count,
1009 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
1010 rc = -ENOMEM;
1011 }
1012
1013 /* Cleanup our mess (for failure case of time expiring) */
1014 splat_kmem_cache_test_kcd_free(kcp, kct);
1015 out_kct:
1016 splat_kmem_cache_test_kct_free(kcp, kct);
1017 out_cache:
1018 kmem_cache_destroy(kcp->kcp_cache);
1019 out_kcp:
1020 splat_kmem_cache_test_kcp_free(kcp);
1021 out:
1022 spl_kmem_cache_expire = spl_kmem_cache_expire_old;
1023
1024 return rc;
1025 }
1026
1027 /* Test cache aging, we have allocated a large number of objects thus
1028 * creating a large number of slabs and then free'd them all. However,
1029 * since there should be little memory pressure at the moment those
1030 * slabs have not been freed. What we want to see is the slab size
1031 * decrease gradually as it becomes clear they will not be be needed.
1032 * This should be achievable in less than minute. If it takes longer
1033 * than this something has gone wrong.
1034 */
1035 static int
1036 splat_kmem_test9(struct file *file, void *arg)
1037 {
1038 kmem_cache_priv_t *kcp;
1039 kmem_cache_thread_t *kct;
1040 unsigned int spl_kmem_cache_expire_old;
1041 int i, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;
1042
1043 /* Enable cache aging just for this test if it is disabled */
1044 spl_kmem_cache_expire_old = spl_kmem_cache_expire;
1045 spl_kmem_cache_expire = KMC_EXPIRE_AGE;
1046
1047 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
1048 256, 0, 0);
1049 if (!kcp) {
1050 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
1051 "Unable to create '%s'\n", "kcp");
1052 rc = -ENOMEM;
1053 goto out;
1054 }
1055
1056 kcp->kcp_cache =
1057 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
1058 splat_kmem_cache_test_constructor,
1059 splat_kmem_cache_test_destructor,
1060 NULL, kcp, NULL, 0);
1061 if (!kcp->kcp_cache) {
1062 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
1063 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
1064 rc = -ENOMEM;
1065 goto out_kcp;
1066 }
1067
1068 kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
1069 if (!kct) {
1070 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
1071 "Unable to create '%s'\n", "kct");
1072 rc = -ENOMEM;
1073 goto out_cache;
1074 }
1075
1076 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, count);
1077 if (rc) {
1078 splat_vprint(file, SPLAT_KMEM_TEST9_NAME, "Unable to "
1079 "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
1080 goto out_kct;
1081 }
1082
1083 splat_kmem_cache_test_kcd_free(kcp, kct);
1084
1085 for (i = 0; i < 60; i++) {
1086 splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST9_NAME, kcp);
1087
1088 if (kcp->kcp_count == 0)
1089 break;
1090
1091 set_current_state(TASK_INTERRUPTIBLE);
1092 schedule_timeout(HZ);
1093 }
1094
1095 if (kcp->kcp_count == 0) {
1096 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
1097 "Successfully created %d objects "
1098 "in cache %s and reclaimed them\n",
1099 count, SPLAT_KMEM_CACHE_NAME);
1100 } else {
1101 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
1102 "Failed to reclaim %u/%d objects from cache %s\n",
1103 (unsigned)kcp->kcp_count, count,
1104 SPLAT_KMEM_CACHE_NAME);
1105 rc = -ENOMEM;
1106 }
1107
1108 out_kct:
1109 splat_kmem_cache_test_kct_free(kcp, kct);
1110 out_cache:
1111 kmem_cache_destroy(kcp->kcp_cache);
1112 out_kcp:
1113 splat_kmem_cache_test_kcp_free(kcp);
1114 out:
1115 spl_kmem_cache_expire = spl_kmem_cache_expire_old;
1116
1117 return rc;
1118 }
1119
1120 /*
1121 * This test creates N threads with a shared kmem cache. They then all
1122 * concurrently allocate and free from the cache to stress the locking and
1123 * concurrent cache performance. If any one test takes longer than 5
1124 * seconds to complete it is treated as a failure and may indicate a
1125 * performance regression. On my test system no one test takes more
1126 * than 1 second to complete so a 5x slowdown likely a problem.
1127 */
1128 static int
1129 splat_kmem_test10(struct file *file, void *arg)
1130 {
1131 uint64_t size, alloc, rc = 0;
1132
1133 for (size = 32; size <= 1024*1024; size *= 2) {
1134
1135 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name",
1136 "time (sec)\tslabs \tobjs \thash\n");
1137 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "",
1138 " \ttot/max/calc\ttot/max/calc\n");
1139
1140 for (alloc = 1; alloc <= 1024; alloc *= 2) {
1141
1142 /* Skip tests which exceed 1/2 of physical memory. */
1143 if (size * alloc * SPLAT_KMEM_THREADS > physmem / 2)
1144 continue;
1145
1146 rc = splat_kmem_cache_thread_test(file, arg,
1147 SPLAT_KMEM_TEST10_NAME, size, alloc, 5);
1148 if (rc)
1149 break;
1150 }
1151 }
1152
1153 return rc;
1154 }
1155
1156 #if 0
1157 /*
1158 * This test creates N threads with a shared kmem cache which overcommits
1159 * memory by 4x. This makes it impossible for the slab to satify the
1160 * thread requirements without having its reclaim hook run which will
1161 * free objects back for use. This behavior is triggered by the linum VM
1162 * detecting a low memory condition on the node and invoking the shrinkers.
1163 * This should allow all the threads to complete while avoiding deadlock
1164 * and for the most part out of memory events. This is very tough on the
1165 * system so it is possible the test app may get oom'ed. This particular
1166 * test has proven troublesome on 32-bit archs with limited virtual
1167 * address space so it only run on 64-bit systems.
1168 */
1169 static int
1170 splat_kmem_test11(struct file *file, void *arg)
1171 {
1172 uint64_t size, alloc, rc;
1173
1174 size = 8 * 1024;
1175 alloc = ((4 * physmem * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS;
1176
1177 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "name",
1178 "time (sec)\tslabs \tobjs \thash\n");
1179 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "",
1180 " \ttot/max/calc\ttot/max/calc\n");
1181
1182 rc = splat_kmem_cache_thread_test(file, arg,
1183 SPLAT_KMEM_TEST11_NAME, size, alloc, 60);
1184
1185 return rc;
1186 }
1187 #endif
1188
1189 typedef struct dummy_page {
1190 struct list_head dp_list;
1191 char dp_pad[PAGE_SIZE - sizeof(struct list_head)];
1192 } dummy_page_t;
1193
1194 /*
1195 * This test is designed to verify that direct reclaim is functioning as
1196 * expected. We allocate a large number of objects thus creating a large
1197 * number of slabs. We then apply memory pressure and expect that the
1198 * direct reclaim path can easily recover those slabs. The registered
1199 * reclaim function will free the objects and the slab shrinker will call
1200 * it repeatedly until at least a single slab can be freed.
1201 *
1202 * Note it may not be possible to reclaim every last slab via direct reclaim
1203 * without a failure because the shrinker_rwsem may be contended. For this
1204 * reason, quickly reclaiming 3/4 of the slabs is considered a success.
1205 *
1206 * This should all be possible within 10 seconds. For reference, on a
1207 * system with 2G of memory this test takes roughly 0.2 seconds to run.
1208 * It may take longer on larger memory systems but should still easily
1209 * complete in the alloted 10 seconds.
1210 */
1211 static int
1212 splat_kmem_test13(struct file *file, void *arg)
1213 {
1214 kmem_cache_priv_t *kcp;
1215 kmem_cache_thread_t *kct;
1216 dummy_page_t *dp;
1217 struct list_head list;
1218 struct timespec start, stop, delta = { 0, 0 };
1219 int size, count, slabs, fails = 0;
1220 int i, rc = 0, max_time = 10;
1221
1222 size = 128 * 1024;
1223 count = ((physmem * PAGE_SIZE) / 4 / size);
1224
1225 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST13_NAME,
1226 size, 0, 0);
1227 if (!kcp) {
1228 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1229 "Unable to create '%s'\n", "kcp");
1230 rc = -ENOMEM;
1231 goto out;
1232 }
1233
1234 kcp->kcp_cache =
1235 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
1236 splat_kmem_cache_test_constructor,
1237 splat_kmem_cache_test_destructor,
1238 splat_kmem_cache_test_reclaim,
1239 kcp, NULL, 0);
1240 if (!kcp->kcp_cache) {
1241 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1242 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
1243 rc = -ENOMEM;
1244 goto out_kcp;
1245 }
1246
1247 kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
1248 if (!kct) {
1249 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1250 "Unable to create '%s'\n", "kct");
1251 rc = -ENOMEM;
1252 goto out_cache;
1253 }
1254
1255 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, count);
1256 if (rc) {
1257 splat_vprint(file, SPLAT_KMEM_TEST13_NAME, "Unable to "
1258 "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
1259 goto out_kct;
1260 }
1261
1262 i = 0;
1263 slabs = kcp->kcp_cache->skc_slab_total;
1264 INIT_LIST_HEAD(&list);
1265 getnstimeofday(&start);
1266
1267 /* Apply memory pressure */
1268 while (kcp->kcp_cache->skc_slab_total > (slabs >> 2)) {
1269
1270 if ((i % 10000) == 0)
1271 splat_kmem_cache_test_debug(
1272 file, SPLAT_KMEM_TEST13_NAME, kcp);
1273
1274 getnstimeofday(&stop);
1275 delta = timespec_sub(stop, start);
1276 if (delta.tv_sec >= max_time) {
1277 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1278 "Failed to reclaim 3/4 of cache in %ds, "
1279 "%u/%u slabs remain\n", max_time,
1280 (unsigned)kcp->kcp_cache->skc_slab_total,
1281 slabs);
1282 rc = -ETIME;
1283 break;
1284 }
1285
1286 dp = (dummy_page_t *)__get_free_page(GFP_KERNEL);
1287 if (!dp) {
1288 fails++;
1289 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1290 "Failed (%d) to allocate page with %u "
1291 "slabs still in the cache\n", fails,
1292 (unsigned)kcp->kcp_cache->skc_slab_total);
1293 continue;
1294 }
1295
1296 list_add(&dp->dp_list, &list);
1297 i++;
1298 }
1299
1300 if (rc == 0)
1301 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1302 "Successfully created %u slabs and with %d alloc "
1303 "failures reclaimed 3/4 of them in %d.%03ds\n",
1304 slabs, fails,
1305 (int)delta.tv_sec, (int)delta.tv_nsec / 1000000);
1306
1307 /* Release memory pressure pages */
1308 while (!list_empty(&list)) {
1309 dp = list_entry(list.next, dummy_page_t, dp_list);
1310 list_del_init(&dp->dp_list);
1311 free_page((unsigned long)dp);
1312 }
1313
1314 /* Release remaining kmem cache objects */
1315 splat_kmem_cache_test_kcd_free(kcp, kct);
1316 out_kct:
1317 splat_kmem_cache_test_kct_free(kcp, kct);
1318 out_cache:
1319 kmem_cache_destroy(kcp->kcp_cache);
1320 out_kcp:
1321 splat_kmem_cache_test_kcp_free(kcp);
1322 out:
1323 return rc;
1324 }
1325
1326 splat_subsystem_t *
1327 splat_kmem_init(void)
1328 {
1329 splat_subsystem_t *sub;
1330
1331 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1332 if (sub == NULL)
1333 return NULL;
1334
1335 memset(sub, 0, sizeof(*sub));
1336 strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
1337 strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
1338 INIT_LIST_HEAD(&sub->subsystem_list);
1339 INIT_LIST_HEAD(&sub->test_list);
1340 spin_lock_init(&sub->test_lock);
1341 sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
1342
1343 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
1344 SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
1345 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
1346 SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
1347 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
1348 SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
1349 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
1350 SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
1351 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
1352 SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
1353 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
1354 SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
1355 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
1356 SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
1357 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
1358 SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
1359 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST9_NAME, SPLAT_KMEM_TEST9_DESC,
1360 SPLAT_KMEM_TEST9_ID, splat_kmem_test9);
1361 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST10_NAME, SPLAT_KMEM_TEST10_DESC,
1362 SPLAT_KMEM_TEST10_ID, splat_kmem_test10);
1363 #if 0
1364 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC,
1365 SPLAT_KMEM_TEST11_ID, splat_kmem_test11);
1366 #endif
1367 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST13_NAME, SPLAT_KMEM_TEST13_DESC,
1368 SPLAT_KMEM_TEST13_ID, splat_kmem_test13);
1369
1370 return sub;
1371 }
1372
1373 void
1374 splat_kmem_fini(splat_subsystem_t *sub)
1375 {
1376 ASSERT(sub);
1377 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST13_ID);
1378 #if 0
1379 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST11_ID);
1380 #endif
1381 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST10_ID);
1382 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST9_ID);
1383 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID);
1384 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
1385 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
1386 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
1387 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
1388 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
1389 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
1390 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
1391
1392 kfree(sub);
1393 }
1394
1395 int
1396 splat_kmem_id(void) {
1397 return SPLAT_SUBSYSTEM_KMEM;
1398 }