]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/splat/splat-kmem.c
Imported Upstream version 0.6.3+git20140731
[mirror_spl-debian.git] / module / splat / splat-kmem.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Kmem Tests.
25 \*****************************************************************************/
26
27 #include <sys/kmem.h>
28 #include <sys/thread.h>
29 #include "splat-internal.h"
30
31 #define SPLAT_KMEM_NAME "kmem"
32 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
33
34 #define SPLAT_KMEM_TEST1_ID 0x0101
35 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
36 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
37
38 #define SPLAT_KMEM_TEST2_ID 0x0102
39 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
40 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
41
42 #define SPLAT_KMEM_TEST3_ID 0x0103
43 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
44 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
45
46 #define SPLAT_KMEM_TEST4_ID 0x0104
47 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
48 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
49
50 #define SPLAT_KMEM_TEST5_ID 0x0105
51 #define SPLAT_KMEM_TEST5_NAME "slab_small"
52 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
53
54 #define SPLAT_KMEM_TEST6_ID 0x0106
55 #define SPLAT_KMEM_TEST6_NAME "slab_large"
56 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
57
58 #define SPLAT_KMEM_TEST7_ID 0x0107
59 #define SPLAT_KMEM_TEST7_NAME "slab_align"
60 #define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
61
62 #define SPLAT_KMEM_TEST8_ID 0x0108
63 #define SPLAT_KMEM_TEST8_NAME "slab_reap"
64 #define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
65
66 #define SPLAT_KMEM_TEST9_ID 0x0109
67 #define SPLAT_KMEM_TEST9_NAME "slab_age"
68 #define SPLAT_KMEM_TEST9_DESC "Slab aging test"
69
70 #define SPLAT_KMEM_TEST10_ID 0x010a
71 #define SPLAT_KMEM_TEST10_NAME "slab_lock"
72 #define SPLAT_KMEM_TEST10_DESC "Slab locking test"
73
74 #if 0
75 #define SPLAT_KMEM_TEST11_ID 0x010b
76 #define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
77 #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
78 #endif
79
80 #define SPLAT_KMEM_TEST12_ID 0x010c
81 #define SPLAT_KMEM_TEST12_NAME "vmem_size"
82 #define SPLAT_KMEM_TEST12_DESC "Memory zone test"
83
84 #define SPLAT_KMEM_TEST13_ID 0x010d
85 #define SPLAT_KMEM_TEST13_NAME "slab_reclaim"
86 #define SPLAT_KMEM_TEST13_DESC "Slab direct memory reclaim test"
87
88 #define SPLAT_KMEM_ALLOC_COUNT 10
89 #define SPLAT_VMEM_ALLOC_COUNT 10
90
91
92 static int
93 splat_kmem_test1(struct file *file, void *arg)
94 {
95 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
96 int size = PAGE_SIZE;
97 int i, count, rc = 0;
98
99 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
100 count = 0;
101
102 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
103 ptr[i] = kmem_alloc(size, KM_SLEEP | KM_NODEBUG);
104 if (ptr[i])
105 count++;
106 }
107
108 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
109 if (ptr[i])
110 kmem_free(ptr[i], size);
111
112 splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
113 "%d byte allocations, %d/%d successful\n",
114 size, count, SPLAT_KMEM_ALLOC_COUNT);
115 if (count != SPLAT_KMEM_ALLOC_COUNT)
116 rc = -ENOMEM;
117
118 size *= 2;
119 }
120
121 return rc;
122 }
123
124 static int
125 splat_kmem_test2(struct file *file, void *arg)
126 {
127 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
128 int size = PAGE_SIZE;
129 int i, j, count, rc = 0;
130
131 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
132 count = 0;
133
134 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
135 ptr[i] = kmem_zalloc(size, KM_SLEEP | KM_NODEBUG);
136 if (ptr[i])
137 count++;
138 }
139
140 /* Ensure buffer has been zero filled */
141 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
142 for (j = 0; j < size; j++) {
143 if (((char *)ptr[i])[j] != '\0') {
144 splat_vprint(file,SPLAT_KMEM_TEST2_NAME,
145 "%d-byte allocation was "
146 "not zeroed\n", size);
147 rc = -EFAULT;
148 }
149 }
150 }
151
152 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
153 if (ptr[i])
154 kmem_free(ptr[i], size);
155
156 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
157 "%d byte allocations, %d/%d successful\n",
158 size, count, SPLAT_KMEM_ALLOC_COUNT);
159 if (count != SPLAT_KMEM_ALLOC_COUNT)
160 rc = -ENOMEM;
161
162 size *= 2;
163 }
164
165 return rc;
166 }
167
168 static int
169 splat_kmem_test3(struct file *file, void *arg)
170 {
171 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
172 int size = PAGE_SIZE;
173 int i, count, rc = 0;
174
175 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
176 count = 0;
177
178 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
179 ptr[i] = vmem_alloc(size, KM_SLEEP);
180 if (ptr[i])
181 count++;
182 }
183
184 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
185 if (ptr[i])
186 vmem_free(ptr[i], size);
187
188 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
189 "%d byte allocations, %d/%d successful\n",
190 size, count, SPLAT_VMEM_ALLOC_COUNT);
191 if (count != SPLAT_VMEM_ALLOC_COUNT)
192 rc = -ENOMEM;
193
194 size *= 2;
195 }
196
197 return rc;
198 }
199
200 static int
201 splat_kmem_test4(struct file *file, void *arg)
202 {
203 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
204 int size = PAGE_SIZE;
205 int i, j, count, rc = 0;
206
207 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
208 count = 0;
209
210 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
211 ptr[i] = vmem_zalloc(size, KM_SLEEP);
212 if (ptr[i])
213 count++;
214 }
215
216 /* Ensure buffer has been zero filled */
217 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
218 for (j = 0; j < size; j++) {
219 if (((char *)ptr[i])[j] != '\0') {
220 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
221 "%d-byte allocation was "
222 "not zeroed\n", size);
223 rc = -EFAULT;
224 }
225 }
226 }
227
228 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
229 if (ptr[i])
230 vmem_free(ptr[i], size);
231
232 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
233 "%d byte allocations, %d/%d successful\n",
234 size, count, SPLAT_VMEM_ALLOC_COUNT);
235 if (count != SPLAT_VMEM_ALLOC_COUNT)
236 rc = -ENOMEM;
237
238 size *= 2;
239 }
240
241 return rc;
242 }
243
244 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
245 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
246 #define SPLAT_KMEM_OBJ_COUNT 1024
247 #define SPLAT_KMEM_OBJ_RECLAIM 32 /* objects */
248 #define SPLAT_KMEM_THREADS 32
249
250 #define KCP_FLAG_READY 0x01
251
252 typedef struct kmem_cache_data {
253 unsigned long kcd_magic;
254 struct list_head kcd_node;
255 int kcd_flag;
256 char kcd_buf[0];
257 } kmem_cache_data_t;
258
259 typedef struct kmem_cache_thread {
260 spinlock_t kct_lock;
261 int kct_id;
262 struct list_head kct_list;
263 } kmem_cache_thread_t;
264
265 typedef struct kmem_cache_priv {
266 unsigned long kcp_magic;
267 struct file *kcp_file;
268 kmem_cache_t *kcp_cache;
269 spinlock_t kcp_lock;
270 wait_queue_head_t kcp_ctl_waitq;
271 wait_queue_head_t kcp_thr_waitq;
272 int kcp_flags;
273 int kcp_kct_count;
274 kmem_cache_thread_t *kcp_kct[SPLAT_KMEM_THREADS];
275 int kcp_size;
276 int kcp_align;
277 int kcp_count;
278 int kcp_alloc;
279 int kcp_rc;
280 } kmem_cache_priv_t;
281
282 static kmem_cache_priv_t *
283 splat_kmem_cache_test_kcp_alloc(struct file *file, char *name,
284 int size, int align, int alloc)
285 {
286 kmem_cache_priv_t *kcp;
287
288 kcp = kmem_zalloc(sizeof(kmem_cache_priv_t), KM_SLEEP);
289 if (!kcp)
290 return NULL;
291
292 kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC;
293 kcp->kcp_file = file;
294 kcp->kcp_cache = NULL;
295 spin_lock_init(&kcp->kcp_lock);
296 init_waitqueue_head(&kcp->kcp_ctl_waitq);
297 init_waitqueue_head(&kcp->kcp_thr_waitq);
298 kcp->kcp_flags = 0;
299 kcp->kcp_kct_count = -1;
300 kcp->kcp_size = size;
301 kcp->kcp_align = align;
302 kcp->kcp_count = 0;
303 kcp->kcp_alloc = alloc;
304 kcp->kcp_rc = 0;
305
306 return kcp;
307 }
308
309 static void
310 splat_kmem_cache_test_kcp_free(kmem_cache_priv_t *kcp)
311 {
312 kmem_free(kcp, sizeof(kmem_cache_priv_t));
313 }
314
315 static kmem_cache_thread_t *
316 splat_kmem_cache_test_kct_alloc(kmem_cache_priv_t *kcp, int id)
317 {
318 kmem_cache_thread_t *kct;
319
320 ASSERTF(id < SPLAT_KMEM_THREADS, "id=%d\n", id);
321 ASSERT(kcp->kcp_kct[id] == NULL);
322
323 kct = kmem_zalloc(sizeof(kmem_cache_thread_t), KM_SLEEP);
324 if (!kct)
325 return NULL;
326
327 spin_lock_init(&kct->kct_lock);
328 kct->kct_id = id;
329 INIT_LIST_HEAD(&kct->kct_list);
330
331 spin_lock(&kcp->kcp_lock);
332 kcp->kcp_kct[id] = kct;
333 spin_unlock(&kcp->kcp_lock);
334
335 return kct;
336 }
337
338 static void
339 splat_kmem_cache_test_kct_free(kmem_cache_priv_t *kcp,
340 kmem_cache_thread_t *kct)
341 {
342 spin_lock(&kcp->kcp_lock);
343 kcp->kcp_kct[kct->kct_id] = NULL;
344 spin_unlock(&kcp->kcp_lock);
345
346 kmem_free(kct, sizeof(kmem_cache_thread_t));
347 }
348
349 static void
350 splat_kmem_cache_test_kcd_free(kmem_cache_priv_t *kcp,
351 kmem_cache_thread_t *kct)
352 {
353 kmem_cache_data_t *kcd;
354
355 spin_lock(&kct->kct_lock);
356 while (!list_empty(&kct->kct_list)) {
357 kcd = list_entry(kct->kct_list.next,
358 kmem_cache_data_t, kcd_node);
359 list_del(&kcd->kcd_node);
360 spin_unlock(&kct->kct_lock);
361
362 kmem_cache_free(kcp->kcp_cache, kcd);
363
364 spin_lock(&kct->kct_lock);
365 }
366 spin_unlock(&kct->kct_lock);
367 }
368
369 static int
370 splat_kmem_cache_test_kcd_alloc(kmem_cache_priv_t *kcp,
371 kmem_cache_thread_t *kct, int count)
372 {
373 kmem_cache_data_t *kcd;
374 int i;
375
376 for (i = 0; i < count; i++) {
377 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
378 if (kcd == NULL) {
379 splat_kmem_cache_test_kcd_free(kcp, kct);
380 return -ENOMEM;
381 }
382
383 spin_lock(&kct->kct_lock);
384 list_add_tail(&kcd->kcd_node, &kct->kct_list);
385 spin_unlock(&kct->kct_lock);
386 }
387
388 return 0;
389 }
390
391 static void
392 splat_kmem_cache_test_debug(struct file *file, char *name,
393 kmem_cache_priv_t *kcp)
394 {
395 int j;
396
397 splat_vprint(file, name, "%s cache objects %d",
398 kcp->kcp_cache->skc_name, kcp->kcp_count);
399
400 if (kcp->kcp_cache->skc_flags & (KMC_KMEM | KMC_VMEM)) {
401 splat_vprint(file, name, ", slabs %u/%u objs %u/%u",
402 (unsigned)kcp->kcp_cache->skc_slab_alloc,
403 (unsigned)kcp->kcp_cache->skc_slab_total,
404 (unsigned)kcp->kcp_cache->skc_obj_alloc,
405 (unsigned)kcp->kcp_cache->skc_obj_total);
406
407 if (!(kcp->kcp_cache->skc_flags & KMC_NOMAGAZINE)) {
408 splat_vprint(file, name, "%s", "mags");
409
410 for_each_online_cpu(j)
411 splat_print(file, "%u/%u ",
412 kcp->kcp_cache->skc_mag[j]->skm_avail,
413 kcp->kcp_cache->skc_mag[j]->skm_size);
414 }
415 }
416
417 splat_print(file, "%s\n", "");
418 }
419
420 static int
421 splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
422 {
423 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
424 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
425
426 if (kcd && kcp) {
427 kcd->kcd_magic = kcp->kcp_magic;
428 INIT_LIST_HEAD(&kcd->kcd_node);
429 kcd->kcd_flag = 1;
430 memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
431 kcp->kcp_count++;
432 }
433
434 return 0;
435 }
436
437 static void
438 splat_kmem_cache_test_destructor(void *ptr, void *priv)
439 {
440 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
441 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
442
443 if (kcd && kcp) {
444 kcd->kcd_magic = 0;
445 kcd->kcd_flag = 0;
446 memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
447 kcp->kcp_count--;
448 }
449
450 return;
451 }
452
453 /*
454 * Generic reclaim function which assumes that all objects may
455 * be reclaimed at any time. We free a small percentage of the
456 * objects linked off the kcp or kct[] every time we are called.
457 */
458 static void
459 splat_kmem_cache_test_reclaim(void *priv)
460 {
461 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
462 kmem_cache_thread_t *kct;
463 kmem_cache_data_t *kcd;
464 LIST_HEAD(reclaim);
465 int i, count;
466
467 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
468
469 /* For each kct thread reclaim some objects */
470 spin_lock(&kcp->kcp_lock);
471 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
472 kct = kcp->kcp_kct[i];
473 if (!kct)
474 continue;
475
476 spin_unlock(&kcp->kcp_lock);
477 spin_lock(&kct->kct_lock);
478
479 count = SPLAT_KMEM_OBJ_RECLAIM;
480 while (count > 0 && !list_empty(&kct->kct_list)) {
481 kcd = list_entry(kct->kct_list.next,
482 kmem_cache_data_t, kcd_node);
483 list_del(&kcd->kcd_node);
484 list_add(&kcd->kcd_node, &reclaim);
485 count--;
486 }
487
488 spin_unlock(&kct->kct_lock);
489 spin_lock(&kcp->kcp_lock);
490 }
491 spin_unlock(&kcp->kcp_lock);
492
493 /* Freed outside the spin lock */
494 while (!list_empty(&reclaim)) {
495 kcd = list_entry(reclaim.next, kmem_cache_data_t, kcd_node);
496 list_del(&kcd->kcd_node);
497 kmem_cache_free(kcp->kcp_cache, kcd);
498 }
499
500 return;
501 }
502
503 static int
504 splat_kmem_cache_test_threads(kmem_cache_priv_t *kcp, int threads)
505 {
506 int rc;
507
508 spin_lock(&kcp->kcp_lock);
509 rc = (kcp->kcp_kct_count == threads);
510 spin_unlock(&kcp->kcp_lock);
511
512 return rc;
513 }
514
515 static int
516 splat_kmem_cache_test_flags(kmem_cache_priv_t *kcp, int flags)
517 {
518 int rc;
519
520 spin_lock(&kcp->kcp_lock);
521 rc = (kcp->kcp_flags & flags);
522 spin_unlock(&kcp->kcp_lock);
523
524 return rc;
525 }
526
527 static void
528 splat_kmem_cache_test_thread(void *arg)
529 {
530 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
531 kmem_cache_thread_t *kct;
532 int rc = 0, id;
533
534 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
535
536 /* Assign thread ids */
537 spin_lock(&kcp->kcp_lock);
538 if (kcp->kcp_kct_count == -1)
539 kcp->kcp_kct_count = 0;
540
541 id = kcp->kcp_kct_count;
542 kcp->kcp_kct_count++;
543 spin_unlock(&kcp->kcp_lock);
544
545 kct = splat_kmem_cache_test_kct_alloc(kcp, id);
546 if (!kct) {
547 rc = -ENOMEM;
548 goto out;
549 }
550
551 /* Wait for all threads to have started and report they are ready */
552 if (kcp->kcp_kct_count == SPLAT_KMEM_THREADS)
553 wake_up(&kcp->kcp_ctl_waitq);
554
555 wait_event(kcp->kcp_thr_waitq,
556 splat_kmem_cache_test_flags(kcp, KCP_FLAG_READY));
557
558 /* Create and destroy objects */
559 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, kcp->kcp_alloc);
560 splat_kmem_cache_test_kcd_free(kcp, kct);
561 out:
562 if (kct)
563 splat_kmem_cache_test_kct_free(kcp, kct);
564
565 spin_lock(&kcp->kcp_lock);
566 if (!kcp->kcp_rc)
567 kcp->kcp_rc = rc;
568
569 if ((--kcp->kcp_kct_count) == 0)
570 wake_up(&kcp->kcp_ctl_waitq);
571
572 spin_unlock(&kcp->kcp_lock);
573
574 thread_exit();
575 }
576
577 static int
578 splat_kmem_cache_test(struct file *file, void *arg, char *name,
579 int size, int align, int flags)
580 {
581 kmem_cache_priv_t *kcp;
582 kmem_cache_data_t *kcd = NULL;
583 int rc = 0, max;
584
585 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0);
586 if (!kcp) {
587 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
588 return -ENOMEM;
589 }
590
591 kcp->kcp_cache =
592 kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
593 kcp->kcp_size, kcp->kcp_align,
594 splat_kmem_cache_test_constructor,
595 splat_kmem_cache_test_destructor,
596 NULL, kcp, NULL, flags);
597 if (!kcp->kcp_cache) {
598 splat_vprint(file, name,
599 "Unable to create '%s'\n",
600 SPLAT_KMEM_CACHE_NAME);
601 rc = -ENOMEM;
602 goto out_free;
603 }
604
605 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
606 if (!kcd) {
607 splat_vprint(file, name,
608 "Unable to allocate from '%s'\n",
609 SPLAT_KMEM_CACHE_NAME);
610 rc = -EINVAL;
611 goto out_free;
612 }
613
614 if (!kcd->kcd_flag) {
615 splat_vprint(file, name,
616 "Failed to run contructor for '%s'\n",
617 SPLAT_KMEM_CACHE_NAME);
618 rc = -EINVAL;
619 goto out_free;
620 }
621
622 if (kcd->kcd_magic != kcp->kcp_magic) {
623 splat_vprint(file, name,
624 "Failed to pass private data to constructor "
625 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
626 rc = -EINVAL;
627 goto out_free;
628 }
629
630 max = kcp->kcp_count;
631 kmem_cache_free(kcp->kcp_cache, kcd);
632
633 /* Destroy the entire cache which will force destructors to
634 * run and we can verify one was called for every object */
635 kmem_cache_destroy(kcp->kcp_cache);
636 if (kcp->kcp_count) {
637 splat_vprint(file, name,
638 "Failed to run destructor on all slab objects "
639 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
640 rc = -EINVAL;
641 }
642
643 splat_kmem_cache_test_kcp_free(kcp);
644 splat_vprint(file, name,
645 "Successfully ran ctors/dtors for %d elements in '%s'\n",
646 max, SPLAT_KMEM_CACHE_NAME);
647
648 return rc;
649
650 out_free:
651 if (kcd)
652 kmem_cache_free(kcp->kcp_cache, kcd);
653
654 if (kcp->kcp_cache)
655 kmem_cache_destroy(kcp->kcp_cache);
656
657 splat_kmem_cache_test_kcp_free(kcp);
658
659 return rc;
660 }
661
662 static int
663 splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
664 int size, int alloc, int max_time)
665 {
666 kmem_cache_priv_t *kcp;
667 kthread_t *thr;
668 struct timespec start, stop, delta;
669 char cache_name[32];
670 int i, rc = 0;
671
672 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc);
673 if (!kcp) {
674 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
675 return -ENOMEM;
676 }
677
678 (void)snprintf(cache_name, 32, "%s-%d-%d",
679 SPLAT_KMEM_CACHE_NAME, size, alloc);
680 kcp->kcp_cache =
681 kmem_cache_create(cache_name, kcp->kcp_size, 0,
682 splat_kmem_cache_test_constructor,
683 splat_kmem_cache_test_destructor,
684 splat_kmem_cache_test_reclaim,
685 kcp, NULL, 0);
686 if (!kcp->kcp_cache) {
687 splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
688 rc = -ENOMEM;
689 goto out_kcp;
690 }
691
692 getnstimeofday(&start);
693
694 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
695 thr = thread_create(NULL, 0,
696 splat_kmem_cache_test_thread,
697 kcp, 0, &p0, TS_RUN, minclsyspri);
698 if (thr == NULL) {
699 rc = -ESRCH;
700 goto out_cache;
701 }
702 }
703
704 /* Sleep until all threads have started, then set the ready
705 * flag and wake them all up for maximum concurrency. */
706 wait_event(kcp->kcp_ctl_waitq,
707 splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));
708
709 spin_lock(&kcp->kcp_lock);
710 kcp->kcp_flags |= KCP_FLAG_READY;
711 spin_unlock(&kcp->kcp_lock);
712 wake_up_all(&kcp->kcp_thr_waitq);
713
714 /* Sleep until all thread have finished */
715 wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));
716
717 getnstimeofday(&stop);
718 delta = timespec_sub(stop, start);
719
720 splat_vprint(file, name,
721 "%-22s %2ld.%09ld\t"
722 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
723 kcp->kcp_cache->skc_name,
724 delta.tv_sec, delta.tv_nsec,
725 (unsigned long)kcp->kcp_cache->skc_slab_total,
726 (unsigned long)kcp->kcp_cache->skc_slab_max,
727 (unsigned long)(kcp->kcp_alloc *
728 SPLAT_KMEM_THREADS /
729 SPL_KMEM_CACHE_OBJ_PER_SLAB),
730 (unsigned long)kcp->kcp_cache->skc_obj_total,
731 (unsigned long)kcp->kcp_cache->skc_obj_max,
732 (unsigned long)(kcp->kcp_alloc *
733 SPLAT_KMEM_THREADS));
734
735 if (delta.tv_sec >= max_time)
736 rc = -ETIME;
737
738 if (!rc && kcp->kcp_rc)
739 rc = kcp->kcp_rc;
740
741 out_cache:
742 kmem_cache_destroy(kcp->kcp_cache);
743 out_kcp:
744 splat_kmem_cache_test_kcp_free(kcp);
745 return rc;
746 }
747
748 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
749 static int
750 splat_kmem_test5(struct file *file, void *arg)
751 {
752 char *name = SPLAT_KMEM_TEST5_NAME;
753 int rc;
754
755 /* On slab (default + kmem + vmem) */
756 rc = splat_kmem_cache_test(file, arg, name, 128, 0, 0);
757 if (rc)
758 return rc;
759
760 rc = splat_kmem_cache_test(file, arg, name, 128, 0, KMC_KMEM);
761 if (rc)
762 return rc;
763
764 rc = splat_kmem_cache_test(file, arg, name, 128, 0, KMC_VMEM);
765 if (rc)
766 return rc;
767
768 /* Off slab (default + kmem + vmem) */
769 rc = splat_kmem_cache_test(file, arg, name, 128, 0, KMC_OFFSLAB);
770 if (rc)
771 return rc;
772
773 rc = splat_kmem_cache_test(file, arg, name, 128, 0,
774 KMC_KMEM | KMC_OFFSLAB);
775 if (rc)
776 return rc;
777
778 rc = splat_kmem_cache_test(file, arg, name, 128, 0,
779 KMC_VMEM | KMC_OFFSLAB);
780
781 return rc;
782 }
783
784 /*
785 * Validate large object cache behavior for dynamic/kmem/vmem caches
786 */
787 static int
788 splat_kmem_test6(struct file *file, void *arg)
789 {
790 char *name = SPLAT_KMEM_TEST6_NAME;
791 int rc;
792
793 /* On slab (default + kmem + vmem) */
794 rc = splat_kmem_cache_test(file, arg, name, 256*1024, 0, 0);
795 if (rc)
796 return rc;
797
798 rc = splat_kmem_cache_test(file, arg, name, 64*1024, 0, KMC_KMEM);
799 if (rc)
800 return rc;
801
802 rc = splat_kmem_cache_test(file, arg, name, 1024*1024, 0, KMC_VMEM);
803 if (rc)
804 return rc;
805
806 /* Off slab (default + kmem + vmem) */
807 rc = splat_kmem_cache_test(file, arg, name, 256*1024, 0, KMC_OFFSLAB);
808 if (rc)
809 return rc;
810
811 rc = splat_kmem_cache_test(file, arg, name, 64*1024, 0,
812 KMC_KMEM | KMC_OFFSLAB);
813 if (rc)
814 return rc;
815
816 rc = splat_kmem_cache_test(file, arg, name, 1024*1024, 0,
817 KMC_VMEM | KMC_OFFSLAB);
818
819 return rc;
820 }
821
822 /*
823 * Validate object alignment cache behavior for caches
824 */
825 static int
826 splat_kmem_test7(struct file *file, void *arg)
827 {
828 char *name = SPLAT_KMEM_TEST7_NAME;
829 int i, rc;
830
831 for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) {
832 rc = splat_kmem_cache_test(file, arg, name, 157, i, 0);
833 if (rc)
834 return rc;
835
836 rc = splat_kmem_cache_test(file, arg, name, 157, i,
837 KMC_OFFSLAB);
838 if (rc)
839 return rc;
840 }
841
842 return rc;
843 }
844
845 /*
846 * Validate kmem_cache_reap() by requesting the slab cache free any objects
847 * it can. For a few reasons this may not immediately result in more free
848 * memory even if objects are freed. First off, due to fragmentation we
849 * may not be able to reclaim any slabs. Secondly, even if we do we fully
850 * clear some slabs we will not want to immediately reclaim all of them
851 * because we may contend with cache allocations and thrash. What we want
852 * to see is the slab size decrease more gradually as it becomes clear they
853 * will not be needed. This should be achievable in less than a minute.
854 * If it takes longer than this something has gone wrong.
855 */
856 static int
857 splat_kmem_test8(struct file *file, void *arg)
858 {
859 kmem_cache_priv_t *kcp;
860 kmem_cache_thread_t *kct;
861 unsigned int spl_kmem_cache_expire_old;
862 int i, rc = 0;
863
864 /* Enable cache aging just for this test if it is disabled */
865 spl_kmem_cache_expire_old = spl_kmem_cache_expire;
866 spl_kmem_cache_expire = KMC_EXPIRE_AGE;
867
868 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
869 256, 0, 0);
870 if (!kcp) {
871 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
872 "Unable to create '%s'\n", "kcp");
873 rc = -ENOMEM;
874 goto out;
875 }
876
877 kcp->kcp_cache =
878 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
879 splat_kmem_cache_test_constructor,
880 splat_kmem_cache_test_destructor,
881 splat_kmem_cache_test_reclaim,
882 kcp, NULL, 0);
883 if (!kcp->kcp_cache) {
884 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
885 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
886 rc = -ENOMEM;
887 goto out_kcp;
888 }
889
890 kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
891 if (!kct) {
892 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
893 "Unable to create '%s'\n", "kct");
894 rc = -ENOMEM;
895 goto out_cache;
896 }
897
898 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, SPLAT_KMEM_OBJ_COUNT);
899 if (rc) {
900 splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to "
901 "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
902 goto out_kct;
903 }
904
905 /* Force reclaim every 1/10 a second for 60 seconds. */
906 for (i = 0; i < 600; i++) {
907 kmem_cache_reap_now(kcp->kcp_cache);
908 splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp);
909
910 if (kcp->kcp_count == 0)
911 break;
912
913 set_current_state(TASK_INTERRUPTIBLE);
914 schedule_timeout(HZ / 10);
915 }
916
917 if (kcp->kcp_count == 0) {
918 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
919 "Successfully created %d objects "
920 "in cache %s and reclaimed them\n",
921 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
922 } else {
923 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
924 "Failed to reclaim %u/%d objects from cache %s\n",
925 (unsigned)kcp->kcp_count,
926 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
927 rc = -ENOMEM;
928 }
929
930 /* Cleanup our mess (for failure case of time expiring) */
931 splat_kmem_cache_test_kcd_free(kcp, kct);
932 out_kct:
933 splat_kmem_cache_test_kct_free(kcp, kct);
934 out_cache:
935 kmem_cache_destroy(kcp->kcp_cache);
936 out_kcp:
937 splat_kmem_cache_test_kcp_free(kcp);
938 out:
939 spl_kmem_cache_expire = spl_kmem_cache_expire_old;
940
941 return rc;
942 }
943
944 /* Test cache aging, we have allocated a large number of objects thus
945 * creating a large number of slabs and then free'd them all. However,
946 * since there should be little memory pressure at the moment those
947 * slabs have not been freed. What we want to see is the slab size
948 * decrease gradually as it becomes clear they will not be be needed.
949 * This should be achievable in less than minute. If it takes longer
950 * than this something has gone wrong.
951 */
952 static int
953 splat_kmem_test9(struct file *file, void *arg)
954 {
955 kmem_cache_priv_t *kcp;
956 kmem_cache_thread_t *kct;
957 unsigned int spl_kmem_cache_expire_old;
958 int i, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;
959
960 /* Enable cache aging just for this test if it is disabled */
961 spl_kmem_cache_expire_old = spl_kmem_cache_expire;
962 spl_kmem_cache_expire = KMC_EXPIRE_AGE;
963
964 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
965 256, 0, 0);
966 if (!kcp) {
967 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
968 "Unable to create '%s'\n", "kcp");
969 rc = -ENOMEM;
970 goto out;
971 }
972
973 kcp->kcp_cache =
974 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
975 splat_kmem_cache_test_constructor,
976 splat_kmem_cache_test_destructor,
977 NULL, kcp, NULL, 0);
978 if (!kcp->kcp_cache) {
979 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
980 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
981 rc = -ENOMEM;
982 goto out_kcp;
983 }
984
985 kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
986 if (!kct) {
987 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
988 "Unable to create '%s'\n", "kct");
989 rc = -ENOMEM;
990 goto out_cache;
991 }
992
993 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, count);
994 if (rc) {
995 splat_vprint(file, SPLAT_KMEM_TEST9_NAME, "Unable to "
996 "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
997 goto out_kct;
998 }
999
1000 splat_kmem_cache_test_kcd_free(kcp, kct);
1001
1002 for (i = 0; i < 60; i++) {
1003 splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST9_NAME, kcp);
1004
1005 if (kcp->kcp_count == 0)
1006 break;
1007
1008 set_current_state(TASK_INTERRUPTIBLE);
1009 schedule_timeout(HZ);
1010 }
1011
1012 if (kcp->kcp_count == 0) {
1013 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
1014 "Successfully created %d objects "
1015 "in cache %s and reclaimed them\n",
1016 count, SPLAT_KMEM_CACHE_NAME);
1017 } else {
1018 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
1019 "Failed to reclaim %u/%d objects from cache %s\n",
1020 (unsigned)kcp->kcp_count, count,
1021 SPLAT_KMEM_CACHE_NAME);
1022 rc = -ENOMEM;
1023 }
1024
1025 out_kct:
1026 splat_kmem_cache_test_kct_free(kcp, kct);
1027 out_cache:
1028 kmem_cache_destroy(kcp->kcp_cache);
1029 out_kcp:
1030 splat_kmem_cache_test_kcp_free(kcp);
1031 out:
1032 spl_kmem_cache_expire = spl_kmem_cache_expire_old;
1033
1034 return rc;
1035 }
1036
1037 /*
1038 * This test creates N threads with a shared kmem cache. They then all
1039 * concurrently allocate and free from the cache to stress the locking and
1040 * concurrent cache performance. If any one test takes longer than 5
1041 * seconds to complete it is treated as a failure and may indicate a
1042 * performance regression. On my test system no one test takes more
1043 * than 1 second to complete so a 5x slowdown likely a problem.
1044 */
1045 static int
1046 splat_kmem_test10(struct file *file, void *arg)
1047 {
1048 uint64_t size, alloc, rc = 0;
1049
1050 for (size = 32; size <= 1024*1024; size *= 2) {
1051
1052 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name",
1053 "time (sec)\tslabs \tobjs \thash\n");
1054 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "",
1055 " \ttot/max/calc\ttot/max/calc\n");
1056
1057 for (alloc = 1; alloc <= 1024; alloc *= 2) {
1058
1059 /* Skip tests which exceed available memory. We
1060 * leverage availrmem here for some extra testing */
1061 if (size * alloc * SPLAT_KMEM_THREADS > availrmem / 2)
1062 continue;
1063
1064 rc = splat_kmem_cache_thread_test(file, arg,
1065 SPLAT_KMEM_TEST10_NAME, size, alloc, 5);
1066 if (rc)
1067 break;
1068 }
1069 }
1070
1071 return rc;
1072 }
1073
1074 #if 0
1075 /*
1076 * This test creates N threads with a shared kmem cache which overcommits
1077 * memory by 4x. This makes it impossible for the slab to satify the
1078 * thread requirements without having its reclaim hook run which will
1079 * free objects back for use. This behavior is triggered by the linum VM
1080 * detecting a low memory condition on the node and invoking the shrinkers.
1081 * This should allow all the threads to complete while avoiding deadlock
1082 * and for the most part out of memory events. This is very tough on the
1083 * system so it is possible the test app may get oom'ed. This particular
1084 * test has proven troublesome on 32-bit archs with limited virtual
1085 * address space so it only run on 64-bit systems.
1086 */
1087 static int
1088 splat_kmem_test11(struct file *file, void *arg)
1089 {
1090 uint64_t size, alloc, rc;
1091
1092 size = 8 * 1024;
1093 alloc = ((4 * physmem * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS;
1094
1095 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "name",
1096 "time (sec)\tslabs \tobjs \thash\n");
1097 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "",
1098 " \ttot/max/calc\ttot/max/calc\n");
1099
1100 rc = splat_kmem_cache_thread_test(file, arg,
1101 SPLAT_KMEM_TEST11_NAME, size, alloc, 60);
1102
1103 return rc;
1104 }
1105 #endif
1106
1107 /*
1108 * Check vmem_size() behavior by acquiring the alloc/free/total vmem
1109 * space, then allocate a known buffer size from vmem space. We can
1110 * then check that vmem_size() values were updated properly with in
1111 * a fairly small tolerence. The tolerance is important because we
1112 * are not the only vmem consumer on the system. Other unrelated
1113 * allocations might occur during the small test window. The vmem
1114 * allocation itself may also add in a little extra private space to
1115 * the buffer. Finally, verify total space always remains unchanged.
1116 */
1117 static int
1118 splat_kmem_test12(struct file *file, void *arg)
1119 {
1120 size_t alloc1, free1, total1;
1121 size_t alloc2, free2, total2;
1122 int size = 8*1024*1024;
1123 void *ptr;
1124
1125 alloc1 = vmem_size(NULL, VMEM_ALLOC);
1126 free1 = vmem_size(NULL, VMEM_FREE);
1127 total1 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
1128 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1129 "free=%lu total=%lu\n", (unsigned long)alloc1,
1130 (unsigned long)free1, (unsigned long)total1);
1131
1132 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Alloc %d bytes\n", size);
1133 ptr = vmem_alloc(size, KM_SLEEP);
1134 if (!ptr) {
1135 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1136 "Failed to alloc %d bytes\n", size);
1137 return -ENOMEM;
1138 }
1139
1140 alloc2 = vmem_size(NULL, VMEM_ALLOC);
1141 free2 = vmem_size(NULL, VMEM_FREE);
1142 total2 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
1143 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1144 "free=%lu total=%lu\n", (unsigned long)alloc2,
1145 (unsigned long)free2, (unsigned long)total2);
1146
1147 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Free %d bytes\n", size);
1148 vmem_free(ptr, size);
1149 if (alloc2 < (alloc1 + size - (size / 100)) ||
1150 alloc2 > (alloc1 + size + (size / 100))) {
1151 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1152 "VMEM_ALLOC size: %lu != %lu+%d (+/- 1%%)\n",
1153 (unsigned long)alloc2,(unsigned long)alloc1,size);
1154 return -ERANGE;
1155 }
1156
1157 if (free2 < (free1 - size - (size / 100)) ||
1158 free2 > (free1 - size + (size / 100))) {
1159 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1160 "VMEM_FREE size: %lu != %lu-%d (+/- 1%%)\n",
1161 (unsigned long)free2, (unsigned long)free1, size);
1162 return -ERANGE;
1163 }
1164
1165 if (total1 != total2) {
1166 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1167 "VMEM_ALLOC | VMEM_FREE not constant: "
1168 "%lu != %lu\n", (unsigned long)total2,
1169 (unsigned long)total1);
1170 return -ERANGE;
1171 }
1172
1173 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1174 "VMEM_ALLOC within tolerance: ~%ld%% (%ld/%d)\n",
1175 (long)abs(alloc1 + (long)size - alloc2) * 100 / (long)size,
1176 (long)abs(alloc1 + (long)size - alloc2), size);
1177 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1178 "VMEM_FREE within tolerance: ~%ld%% (%ld/%d)\n",
1179 (long)abs((free1 - (long)size) - free2) * 100 / (long)size,
1180 (long)abs((free1 - (long)size) - free2), size);
1181
1182 return 0;
1183 }
1184
1185 typedef struct dummy_page {
1186 struct list_head dp_list;
1187 char dp_pad[PAGE_SIZE - sizeof(struct list_head)];
1188 } dummy_page_t;
1189
1190 /*
1191 * This test is designed to verify that direct reclaim is functioning as
1192 * expected. We allocate a large number of objects thus creating a large
1193 * number of slabs. We then apply memory pressure and expect that the
1194 * direct reclaim path can easily recover those slabs. The registered
1195 * reclaim function will free the objects and the slab shrinker will call
1196 * it repeatedly until at least a single slab can be freed.
1197 *
1198 * Note it may not be possible to reclaim every last slab via direct reclaim
1199 * without a failure because the shrinker_rwsem may be contended. For this
1200 * reason, quickly reclaiming 3/4 of the slabs is considered a success.
1201 *
1202 * This should all be possible within 10 seconds. For reference, on a
1203 * system with 2G of memory this test takes roughly 0.2 seconds to run.
1204 * It may take longer on larger memory systems but should still easily
1205 * complete in the alloted 10 seconds.
1206 */
1207 static int
1208 splat_kmem_test13(struct file *file, void *arg)
1209 {
1210 kmem_cache_priv_t *kcp;
1211 kmem_cache_thread_t *kct;
1212 dummy_page_t *dp;
1213 struct list_head list;
1214 struct timespec start, stop, delta = { 0, 0 };
1215 int size, count, slabs, fails = 0;
1216 int i, rc = 0, max_time = 10;
1217
1218 size = 128 * 1024;
1219 count = ((physmem * PAGE_SIZE) / 4 / size);
1220
1221 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST13_NAME,
1222 size, 0, 0);
1223 if (!kcp) {
1224 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1225 "Unable to create '%s'\n", "kcp");
1226 rc = -ENOMEM;
1227 goto out;
1228 }
1229
1230 kcp->kcp_cache =
1231 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
1232 splat_kmem_cache_test_constructor,
1233 splat_kmem_cache_test_destructor,
1234 splat_kmem_cache_test_reclaim,
1235 kcp, NULL, 0);
1236 if (!kcp->kcp_cache) {
1237 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1238 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
1239 rc = -ENOMEM;
1240 goto out_kcp;
1241 }
1242
1243 kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
1244 if (!kct) {
1245 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1246 "Unable to create '%s'\n", "kct");
1247 rc = -ENOMEM;
1248 goto out_cache;
1249 }
1250
1251 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, count);
1252 if (rc) {
1253 splat_vprint(file, SPLAT_KMEM_TEST13_NAME, "Unable to "
1254 "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
1255 goto out_kct;
1256 }
1257
1258 i = 0;
1259 slabs = kcp->kcp_cache->skc_slab_total;
1260 INIT_LIST_HEAD(&list);
1261 getnstimeofday(&start);
1262
1263 /* Apply memory pressure */
1264 while (kcp->kcp_cache->skc_slab_total > (slabs >> 2)) {
1265
1266 if ((i % 10000) == 0)
1267 splat_kmem_cache_test_debug(
1268 file, SPLAT_KMEM_TEST13_NAME, kcp);
1269
1270 getnstimeofday(&stop);
1271 delta = timespec_sub(stop, start);
1272 if (delta.tv_sec >= max_time) {
1273 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1274 "Failed to reclaim 3/4 of cache in %ds, "
1275 "%u/%u slabs remain\n", max_time,
1276 (unsigned)kcp->kcp_cache->skc_slab_total,
1277 slabs);
1278 rc = -ETIME;
1279 break;
1280 }
1281
1282 dp = (dummy_page_t *)__get_free_page(GFP_KERNEL);
1283 if (!dp) {
1284 fails++;
1285 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1286 "Failed (%d) to allocate page with %u "
1287 "slabs still in the cache\n", fails,
1288 (unsigned)kcp->kcp_cache->skc_slab_total);
1289 continue;
1290 }
1291
1292 list_add(&dp->dp_list, &list);
1293 i++;
1294 }
1295
1296 if (rc == 0)
1297 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1298 "Successfully created %u slabs and with %d alloc "
1299 "failures reclaimed 3/4 of them in %d.%03ds\n",
1300 slabs, fails,
1301 (int)delta.tv_sec, (int)delta.tv_nsec / 1000000);
1302
1303 /* Release memory pressure pages */
1304 while (!list_empty(&list)) {
1305 dp = list_entry(list.next, dummy_page_t, dp_list);
1306 list_del_init(&dp->dp_list);
1307 free_page((unsigned long)dp);
1308 }
1309
1310 /* Release remaining kmem cache objects */
1311 splat_kmem_cache_test_kcd_free(kcp, kct);
1312 out_kct:
1313 splat_kmem_cache_test_kct_free(kcp, kct);
1314 out_cache:
1315 kmem_cache_destroy(kcp->kcp_cache);
1316 out_kcp:
1317 splat_kmem_cache_test_kcp_free(kcp);
1318 out:
1319 return rc;
1320 }
1321
1322 splat_subsystem_t *
1323 splat_kmem_init(void)
1324 {
1325 splat_subsystem_t *sub;
1326
1327 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1328 if (sub == NULL)
1329 return NULL;
1330
1331 memset(sub, 0, sizeof(*sub));
1332 strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
1333 strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
1334 INIT_LIST_HEAD(&sub->subsystem_list);
1335 INIT_LIST_HEAD(&sub->test_list);
1336 spin_lock_init(&sub->test_lock);
1337 sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
1338
1339 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
1340 SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
1341 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
1342 SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
1343 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
1344 SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
1345 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
1346 SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
1347 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
1348 SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
1349 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
1350 SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
1351 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
1352 SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
1353 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
1354 SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
1355 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST9_NAME, SPLAT_KMEM_TEST9_DESC,
1356 SPLAT_KMEM_TEST9_ID, splat_kmem_test9);
1357 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST10_NAME, SPLAT_KMEM_TEST10_DESC,
1358 SPLAT_KMEM_TEST10_ID, splat_kmem_test10);
1359 #if 0
1360 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC,
1361 SPLAT_KMEM_TEST11_ID, splat_kmem_test11);
1362 #endif
1363 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST12_NAME, SPLAT_KMEM_TEST12_DESC,
1364 SPLAT_KMEM_TEST12_ID, splat_kmem_test12);
1365 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST13_NAME, SPLAT_KMEM_TEST13_DESC,
1366 SPLAT_KMEM_TEST13_ID, splat_kmem_test13);
1367
1368 return sub;
1369 }
1370
1371 void
1372 splat_kmem_fini(splat_subsystem_t *sub)
1373 {
1374 ASSERT(sub);
1375 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST13_ID);
1376 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST12_ID);
1377 #if 0
1378 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST11_ID);
1379 #endif
1380 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST10_ID);
1381 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST9_ID);
1382 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID);
1383 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
1384 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
1385 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
1386 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
1387 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
1388 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
1389 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
1390
1391 kfree(sub);
1392 }
1393
1394 int
1395 splat_kmem_id(void) {
1396 return SPLAT_SUBSYSTEM_KMEM;
1397 }