]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/splat/splat-kmem.c
Enhance SPLAT kmem:slab_overcommit test
[mirror_spl-debian.git] / module / splat / splat-kmem.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Kmem Tests.
25 \*****************************************************************************/
26
27 #include "splat-internal.h"
28
29 #define SPLAT_KMEM_NAME "kmem"
30 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
31
32 #define SPLAT_KMEM_TEST1_ID 0x0101
33 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
34 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
35
36 #define SPLAT_KMEM_TEST2_ID 0x0102
37 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
38 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
39
40 #define SPLAT_KMEM_TEST3_ID 0x0103
41 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
42 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
43
44 #define SPLAT_KMEM_TEST4_ID 0x0104
45 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
46 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
47
48 #define SPLAT_KMEM_TEST5_ID 0x0105
49 #define SPLAT_KMEM_TEST5_NAME "slab_small"
50 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
51
52 #define SPLAT_KMEM_TEST6_ID 0x0106
53 #define SPLAT_KMEM_TEST6_NAME "slab_large"
54 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
55
56 #define SPLAT_KMEM_TEST7_ID 0x0107
57 #define SPLAT_KMEM_TEST7_NAME "slab_align"
58 #define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
59
60 #define SPLAT_KMEM_TEST8_ID 0x0108
61 #define SPLAT_KMEM_TEST8_NAME "slab_reap"
62 #define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
63
64 #define SPLAT_KMEM_TEST9_ID 0x0109
65 #define SPLAT_KMEM_TEST9_NAME "slab_age"
66 #define SPLAT_KMEM_TEST9_DESC "Slab aging test"
67
68 #define SPLAT_KMEM_TEST10_ID 0x010a
69 #define SPLAT_KMEM_TEST10_NAME "slab_lock"
70 #define SPLAT_KMEM_TEST10_DESC "Slab locking test"
71
72 #ifdef _LP64
73 #define SPLAT_KMEM_TEST11_ID 0x010b
74 #define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
75 #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
76 #endif /* _LP64 */
77
78 #define SPLAT_KMEM_TEST12_ID 0x010c
79 #define SPLAT_KMEM_TEST12_NAME "vmem_size"
80 #define SPLAT_KMEM_TEST12_DESC "Memory zone test"
81
82 #define SPLAT_KMEM_TEST13_ID 0x010d
83 #define SPLAT_KMEM_TEST13_NAME "slab_reclaim"
84 #define SPLAT_KMEM_TEST13_DESC "Slab direct memory reclaim test"
85
86 #define SPLAT_KMEM_ALLOC_COUNT 10
87 #define SPLAT_VMEM_ALLOC_COUNT 10
88
89
90 static int
91 splat_kmem_test1(struct file *file, void *arg)
92 {
93 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
94 int size = PAGE_SIZE;
95 int i, count, rc = 0;
96
97 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
98 count = 0;
99
100 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
101 ptr[i] = kmem_alloc(size, KM_SLEEP | KM_NODEBUG);
102 if (ptr[i])
103 count++;
104 }
105
106 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
107 if (ptr[i])
108 kmem_free(ptr[i], size);
109
110 splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
111 "%d byte allocations, %d/%d successful\n",
112 size, count, SPLAT_KMEM_ALLOC_COUNT);
113 if (count != SPLAT_KMEM_ALLOC_COUNT)
114 rc = -ENOMEM;
115
116 size *= 2;
117 }
118
119 return rc;
120 }
121
122 static int
123 splat_kmem_test2(struct file *file, void *arg)
124 {
125 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
126 int size = PAGE_SIZE;
127 int i, j, count, rc = 0;
128
129 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
130 count = 0;
131
132 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
133 ptr[i] = kmem_zalloc(size, KM_SLEEP | KM_NODEBUG);
134 if (ptr[i])
135 count++;
136 }
137
138 /* Ensure buffer has been zero filled */
139 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
140 for (j = 0; j < size; j++) {
141 if (((char *)ptr[i])[j] != '\0') {
142 splat_vprint(file,SPLAT_KMEM_TEST2_NAME,
143 "%d-byte allocation was "
144 "not zeroed\n", size);
145 rc = -EFAULT;
146 }
147 }
148 }
149
150 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
151 if (ptr[i])
152 kmem_free(ptr[i], size);
153
154 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
155 "%d byte allocations, %d/%d successful\n",
156 size, count, SPLAT_KMEM_ALLOC_COUNT);
157 if (count != SPLAT_KMEM_ALLOC_COUNT)
158 rc = -ENOMEM;
159
160 size *= 2;
161 }
162
163 return rc;
164 }
165
166 static int
167 splat_kmem_test3(struct file *file, void *arg)
168 {
169 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
170 int size = PAGE_SIZE;
171 int i, count, rc = 0;
172
173 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
174 count = 0;
175
176 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
177 ptr[i] = vmem_alloc(size, KM_SLEEP);
178 if (ptr[i])
179 count++;
180 }
181
182 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
183 if (ptr[i])
184 vmem_free(ptr[i], size);
185
186 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
187 "%d byte allocations, %d/%d successful\n",
188 size, count, SPLAT_VMEM_ALLOC_COUNT);
189 if (count != SPLAT_VMEM_ALLOC_COUNT)
190 rc = -ENOMEM;
191
192 size *= 2;
193 }
194
195 return rc;
196 }
197
198 static int
199 splat_kmem_test4(struct file *file, void *arg)
200 {
201 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
202 int size = PAGE_SIZE;
203 int i, j, count, rc = 0;
204
205 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
206 count = 0;
207
208 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
209 ptr[i] = vmem_zalloc(size, KM_SLEEP);
210 if (ptr[i])
211 count++;
212 }
213
214 /* Ensure buffer has been zero filled */
215 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
216 for (j = 0; j < size; j++) {
217 if (((char *)ptr[i])[j] != '\0') {
218 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
219 "%d-byte allocation was "
220 "not zeroed\n", size);
221 rc = -EFAULT;
222 }
223 }
224 }
225
226 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
227 if (ptr[i])
228 vmem_free(ptr[i], size);
229
230 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
231 "%d byte allocations, %d/%d successful\n",
232 size, count, SPLAT_VMEM_ALLOC_COUNT);
233 if (count != SPLAT_VMEM_ALLOC_COUNT)
234 rc = -ENOMEM;
235
236 size *= 2;
237 }
238
239 return rc;
240 }
241
242 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
243 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
244 #define SPLAT_KMEM_OBJ_COUNT 1024
245 #define SPLAT_KMEM_OBJ_RECLAIM 1000 /* objects */
246 #define SPLAT_KMEM_THREADS 32
247
248 #define KCP_FLAG_READY 0x01
249
250 typedef struct kmem_cache_data {
251 unsigned long kcd_magic;
252 struct list_head kcd_node;
253 int kcd_flag;
254 char kcd_buf[0];
255 } kmem_cache_data_t;
256
257 typedef struct kmem_cache_thread {
258 spinlock_t kct_lock;
259 int kct_id;
260 struct list_head kct_list;
261 } kmem_cache_thread_t;
262
263 typedef struct kmem_cache_priv {
264 unsigned long kcp_magic;
265 struct file *kcp_file;
266 kmem_cache_t *kcp_cache;
267 spinlock_t kcp_lock;
268 wait_queue_head_t kcp_ctl_waitq;
269 wait_queue_head_t kcp_thr_waitq;
270 int kcp_flags;
271 int kcp_kct_count;
272 kmem_cache_thread_t *kcp_kct[SPLAT_KMEM_THREADS];
273 int kcp_size;
274 int kcp_align;
275 int kcp_count;
276 int kcp_alloc;
277 int kcp_rc;
278 } kmem_cache_priv_t;
279
280 static kmem_cache_priv_t *
281 splat_kmem_cache_test_kcp_alloc(struct file *file, char *name,
282 int size, int align, int alloc)
283 {
284 kmem_cache_priv_t *kcp;
285
286 kcp = kmem_zalloc(sizeof(kmem_cache_priv_t), KM_SLEEP);
287 if (!kcp)
288 return NULL;
289
290 kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC;
291 kcp->kcp_file = file;
292 kcp->kcp_cache = NULL;
293 spin_lock_init(&kcp->kcp_lock);
294 init_waitqueue_head(&kcp->kcp_ctl_waitq);
295 init_waitqueue_head(&kcp->kcp_thr_waitq);
296 kcp->kcp_flags = 0;
297 kcp->kcp_kct_count = -1;
298 kcp->kcp_size = size;
299 kcp->kcp_align = align;
300 kcp->kcp_count = 0;
301 kcp->kcp_alloc = alloc;
302 kcp->kcp_rc = 0;
303
304 return kcp;
305 }
306
307 static void
308 splat_kmem_cache_test_kcp_free(kmem_cache_priv_t *kcp)
309 {
310 kmem_free(kcp, sizeof(kmem_cache_priv_t));
311 }
312
313 static kmem_cache_thread_t *
314 splat_kmem_cache_test_kct_alloc(kmem_cache_priv_t *kcp, int id)
315 {
316 kmem_cache_thread_t *kct;
317
318 ASSERTF(id < SPLAT_KMEM_THREADS, "id=%d\n", id);
319 ASSERT(kcp->kcp_kct[id] == NULL);
320
321 kct = kmem_zalloc(sizeof(kmem_cache_thread_t), KM_SLEEP);
322 if (!kct)
323 return NULL;
324
325 spin_lock_init(&kct->kct_lock);
326 kct->kct_id = id;
327 INIT_LIST_HEAD(&kct->kct_list);
328
329 spin_lock(&kcp->kcp_lock);
330 kcp->kcp_kct[id] = kct;
331 spin_unlock(&kcp->kcp_lock);
332
333 return kct;
334 }
335
336 static void
337 splat_kmem_cache_test_kct_free(kmem_cache_priv_t *kcp,
338 kmem_cache_thread_t *kct)
339 {
340 spin_lock(&kcp->kcp_lock);
341 kcp->kcp_kct[kct->kct_id] = NULL;
342 spin_unlock(&kcp->kcp_lock);
343
344 kmem_free(kct, sizeof(kmem_cache_thread_t));
345 }
346
347 static void
348 splat_kmem_cache_test_kcd_free(kmem_cache_priv_t *kcp,
349 kmem_cache_thread_t *kct)
350 {
351 kmem_cache_data_t *kcd;
352
353 spin_lock(&kct->kct_lock);
354 while (!list_empty(&kct->kct_list)) {
355 kcd = list_entry(kct->kct_list.next,
356 kmem_cache_data_t, kcd_node);
357 list_del(&kcd->kcd_node);
358 spin_unlock(&kct->kct_lock);
359
360 kmem_cache_free(kcp->kcp_cache, kcd);
361
362 spin_lock(&kct->kct_lock);
363 }
364 spin_unlock(&kct->kct_lock);
365 }
366
367 static int
368 splat_kmem_cache_test_kcd_alloc(kmem_cache_priv_t *kcp,
369 kmem_cache_thread_t *kct, int count)
370 {
371 kmem_cache_data_t *kcd;
372 int i;
373
374 for (i = 0; i < count; i++) {
375 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
376 if (kcd == NULL) {
377 splat_kmem_cache_test_kcd_free(kcp, kct);
378 return -ENOMEM;
379 }
380
381 spin_lock(&kct->kct_lock);
382 list_add_tail(&kcd->kcd_node, &kct->kct_list);
383 spin_unlock(&kct->kct_lock);
384 }
385
386 return 0;
387 }
388
389 static void
390 splat_kmem_cache_test_debug(struct file *file, char *name,
391 kmem_cache_priv_t *kcp)
392 {
393 int j;
394
395 splat_vprint(file, name,
396 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
397 kcp->kcp_cache->skc_name, kcp->kcp_count,
398 (unsigned)kcp->kcp_cache->skc_slab_alloc,
399 (unsigned)kcp->kcp_cache->skc_slab_total,
400 (unsigned)kcp->kcp_cache->skc_obj_alloc,
401 (unsigned)kcp->kcp_cache->skc_obj_total);
402
403 for_each_online_cpu(j)
404 splat_print(file, "%u/%u ",
405 kcp->kcp_cache->skc_mag[j]->skm_avail,
406 kcp->kcp_cache->skc_mag[j]->skm_size);
407
408 splat_print(file, "%s\n", "");
409 }
410
411 static int
412 splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
413 {
414 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
415 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
416
417 if (kcd && kcp) {
418 kcd->kcd_magic = kcp->kcp_magic;
419 INIT_LIST_HEAD(&kcd->kcd_node);
420 kcd->kcd_flag = 1;
421 memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
422 kcp->kcp_count++;
423 }
424
425 return 0;
426 }
427
428 static void
429 splat_kmem_cache_test_destructor(void *ptr, void *priv)
430 {
431 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
432 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
433
434 if (kcd && kcp) {
435 kcd->kcd_magic = 0;
436 kcd->kcd_flag = 0;
437 memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
438 kcp->kcp_count--;
439 }
440
441 return;
442 }
443
444 /*
445 * Generic reclaim function which assumes that all objects may
446 * be reclaimed at any time. We free a small percentage of the
447 * objects linked off the kcp or kct[] every time we are called.
448 */
449 static void
450 splat_kmem_cache_test_reclaim(void *priv)
451 {
452 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
453 kmem_cache_thread_t *kct;
454 kmem_cache_data_t *kcd;
455 LIST_HEAD(reclaim);
456 int i, count;
457
458 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
459
460 /* For each kct thread reclaim some objects */
461 spin_lock(&kcp->kcp_lock);
462 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
463 kct = kcp->kcp_kct[i];
464 if (!kct)
465 continue;
466
467 spin_unlock(&kcp->kcp_lock);
468 spin_lock(&kct->kct_lock);
469
470 count = SPLAT_KMEM_OBJ_RECLAIM;
471 while (count > 0 && !list_empty(&kct->kct_list)) {
472 kcd = list_entry(kct->kct_list.next,
473 kmem_cache_data_t, kcd_node);
474 list_del(&kcd->kcd_node);
475 list_add(&kcd->kcd_node, &reclaim);
476 count--;
477 }
478
479 spin_unlock(&kct->kct_lock);
480 spin_lock(&kcp->kcp_lock);
481 }
482 spin_unlock(&kcp->kcp_lock);
483
484 /* Freed outside the spin lock */
485 while (!list_empty(&reclaim)) {
486 kcd = list_entry(reclaim.next, kmem_cache_data_t, kcd_node);
487 list_del(&kcd->kcd_node);
488 kmem_cache_free(kcp->kcp_cache, kcd);
489 }
490
491 return;
492 }
493
494 static int
495 splat_kmem_cache_test_threads(kmem_cache_priv_t *kcp, int threads)
496 {
497 int rc;
498
499 spin_lock(&kcp->kcp_lock);
500 rc = (kcp->kcp_kct_count == threads);
501 spin_unlock(&kcp->kcp_lock);
502
503 return rc;
504 }
505
506 static int
507 splat_kmem_cache_test_flags(kmem_cache_priv_t *kcp, int flags)
508 {
509 int rc;
510
511 spin_lock(&kcp->kcp_lock);
512 rc = (kcp->kcp_flags & flags);
513 spin_unlock(&kcp->kcp_lock);
514
515 return rc;
516 }
517
518 static void
519 splat_kmem_cache_test_thread(void *arg)
520 {
521 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
522 kmem_cache_thread_t *kct;
523 int rc = 0, id;
524
525 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
526
527 /* Assign thread ids */
528 spin_lock(&kcp->kcp_lock);
529 if (kcp->kcp_kct_count == -1)
530 kcp->kcp_kct_count = 0;
531
532 id = kcp->kcp_kct_count;
533 kcp->kcp_kct_count++;
534 spin_unlock(&kcp->kcp_lock);
535
536 kct = splat_kmem_cache_test_kct_alloc(kcp, id);
537 if (!kct) {
538 rc = -ENOMEM;
539 goto out;
540 }
541
542 /* Wait for all threads to have started and report they are ready */
543 if (kcp->kcp_kct_count == SPLAT_KMEM_THREADS)
544 wake_up(&kcp->kcp_ctl_waitq);
545
546 wait_event(kcp->kcp_thr_waitq,
547 splat_kmem_cache_test_flags(kcp, KCP_FLAG_READY));
548
549 /* Create and destroy objects */
550 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, kcp->kcp_alloc);
551 splat_kmem_cache_test_kcd_free(kcp, kct);
552 out:
553 if (kct)
554 splat_kmem_cache_test_kct_free(kcp, kct);
555
556 spin_lock(&kcp->kcp_lock);
557 if (!kcp->kcp_rc)
558 kcp->kcp_rc = rc;
559
560 if ((--kcp->kcp_kct_count) == 0)
561 wake_up(&kcp->kcp_ctl_waitq);
562
563 spin_unlock(&kcp->kcp_lock);
564
565 thread_exit();
566 }
567
568 static int
569 splat_kmem_cache_test(struct file *file, void *arg, char *name,
570 int size, int align, int flags)
571 {
572 kmem_cache_priv_t *kcp;
573 kmem_cache_data_t *kcd = NULL;
574 int rc = 0, max;
575
576 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0);
577 if (!kcp) {
578 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
579 return -ENOMEM;
580 }
581
582 kcp->kcp_cache =
583 kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
584 kcp->kcp_size, kcp->kcp_align,
585 splat_kmem_cache_test_constructor,
586 splat_kmem_cache_test_destructor,
587 NULL, kcp, NULL, flags);
588 if (!kcp->kcp_cache) {
589 splat_vprint(file, name,
590 "Unable to create '%s'\n",
591 SPLAT_KMEM_CACHE_NAME);
592 rc = -ENOMEM;
593 goto out_free;
594 }
595
596 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
597 if (!kcd) {
598 splat_vprint(file, name,
599 "Unable to allocate from '%s'\n",
600 SPLAT_KMEM_CACHE_NAME);
601 rc = -EINVAL;
602 goto out_free;
603 }
604
605 if (!kcd->kcd_flag) {
606 splat_vprint(file, name,
607 "Failed to run contructor for '%s'\n",
608 SPLAT_KMEM_CACHE_NAME);
609 rc = -EINVAL;
610 goto out_free;
611 }
612
613 if (kcd->kcd_magic != kcp->kcp_magic) {
614 splat_vprint(file, name,
615 "Failed to pass private data to constructor "
616 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
617 rc = -EINVAL;
618 goto out_free;
619 }
620
621 max = kcp->kcp_count;
622 kmem_cache_free(kcp->kcp_cache, kcd);
623
624 /* Destroy the entire cache which will force destructors to
625 * run and we can verify one was called for every object */
626 kmem_cache_destroy(kcp->kcp_cache);
627 if (kcp->kcp_count) {
628 splat_vprint(file, name,
629 "Failed to run destructor on all slab objects "
630 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
631 rc = -EINVAL;
632 }
633
634 splat_kmem_cache_test_kcp_free(kcp);
635 splat_vprint(file, name,
636 "Successfully ran ctors/dtors for %d elements in '%s'\n",
637 max, SPLAT_KMEM_CACHE_NAME);
638
639 return rc;
640
641 out_free:
642 if (kcd)
643 kmem_cache_free(kcp->kcp_cache, kcd);
644
645 if (kcp->kcp_cache)
646 kmem_cache_destroy(kcp->kcp_cache);
647
648 splat_kmem_cache_test_kcp_free(kcp);
649
650 return rc;
651 }
652
653 static int
654 splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
655 int size, int alloc, int max_time)
656 {
657 kmem_cache_priv_t *kcp;
658 kthread_t *thr;
659 struct timespec start, stop, delta;
660 char cache_name[32];
661 int i, rc = 0;
662
663 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc);
664 if (!kcp) {
665 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
666 return -ENOMEM;
667 }
668
669 (void)snprintf(cache_name, 32, "%s-%d-%d",
670 SPLAT_KMEM_CACHE_NAME, size, alloc);
671 kcp->kcp_cache =
672 kmem_cache_create(cache_name, kcp->kcp_size, 0,
673 splat_kmem_cache_test_constructor,
674 splat_kmem_cache_test_destructor,
675 splat_kmem_cache_test_reclaim,
676 kcp, NULL, 0);
677 if (!kcp->kcp_cache) {
678 splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
679 rc = -ENOMEM;
680 goto out_kcp;
681 }
682
683 start = current_kernel_time();
684
685 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
686 thr = thread_create(NULL, 0,
687 splat_kmem_cache_test_thread,
688 kcp, 0, &p0, TS_RUN, minclsyspri);
689 if (thr == NULL) {
690 rc = -ESRCH;
691 goto out_cache;
692 }
693 }
694
695 /* Sleep until all threads have started, then set the ready
696 * flag and wake them all up for maximum concurrency. */
697 wait_event(kcp->kcp_ctl_waitq,
698 splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));
699
700 spin_lock(&kcp->kcp_lock);
701 kcp->kcp_flags |= KCP_FLAG_READY;
702 spin_unlock(&kcp->kcp_lock);
703 wake_up_all(&kcp->kcp_thr_waitq);
704
705 /* Sleep until all thread have finished */
706 wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));
707
708 stop = current_kernel_time();
709 delta = timespec_sub(stop, start);
710
711 splat_vprint(file, name,
712 "%-22s %2ld.%09ld\t"
713 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
714 kcp->kcp_cache->skc_name,
715 delta.tv_sec, delta.tv_nsec,
716 (unsigned long)kcp->kcp_cache->skc_slab_total,
717 (unsigned long)kcp->kcp_cache->skc_slab_max,
718 (unsigned long)(kcp->kcp_alloc *
719 SPLAT_KMEM_THREADS /
720 SPL_KMEM_CACHE_OBJ_PER_SLAB),
721 (unsigned long)kcp->kcp_cache->skc_obj_total,
722 (unsigned long)kcp->kcp_cache->skc_obj_max,
723 (unsigned long)(kcp->kcp_alloc *
724 SPLAT_KMEM_THREADS));
725
726 if (delta.tv_sec >= max_time)
727 rc = -ETIME;
728
729 if (!rc && kcp->kcp_rc)
730 rc = kcp->kcp_rc;
731
732 out_cache:
733 kmem_cache_destroy(kcp->kcp_cache);
734 out_kcp:
735 splat_kmem_cache_test_kcp_free(kcp);
736 return rc;
737 }
738
739 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
740 static int
741 splat_kmem_test5(struct file *file, void *arg)
742 {
743 char *name = SPLAT_KMEM_TEST5_NAME;
744 int rc;
745
746 rc = splat_kmem_cache_test(file, arg, name, 128, 0, 0);
747 if (rc)
748 return rc;
749
750 rc = splat_kmem_cache_test(file, arg, name, 128, 0, KMC_KMEM);
751 if (rc)
752 return rc;
753
754 return splat_kmem_cache_test(file, arg, name, 128, 0, KMC_VMEM);
755 }
756
757 /*
758 * Validate large object cache behavior for dynamic/kmem/vmem caches
759 */
760 static int
761 splat_kmem_test6(struct file *file, void *arg)
762 {
763 char *name = SPLAT_KMEM_TEST6_NAME;
764 int rc;
765
766 rc = splat_kmem_cache_test(file, arg, name, 256*1024, 0, 0);
767 if (rc)
768 return rc;
769
770 rc = splat_kmem_cache_test(file, arg, name, 64*1024, 0, KMC_KMEM);
771 if (rc)
772 return rc;
773
774 return splat_kmem_cache_test(file, arg, name, 1024*1024, 0, KMC_VMEM);
775 }
776
777 /*
778 * Validate object alignment cache behavior for caches
779 */
780 static int
781 splat_kmem_test7(struct file *file, void *arg)
782 {
783 char *name = SPLAT_KMEM_TEST7_NAME;
784 int i, rc;
785
786 for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) {
787 rc = splat_kmem_cache_test(file, arg, name, 157, i, 0);
788 if (rc)
789 return rc;
790 }
791
792 return rc;
793 }
794
795 /*
796 * Validate kmem_cache_reap() by requesting the slab cache free any objects
797 * it can. For a few reasons this may not immediately result in more free
798 * memory even if objects are freed. First off, due to fragmentation we
799 * may not be able to reclaim any slabs. Secondly, even if we do we fully
800 * clear some slabs we will not want to immediately reclaim all of them
801 * because we may contend with cache allocations and thrash. What we want
802 * to see is the slab size decrease more gradually as it becomes clear they
803 * will not be needed. This should be achievable in less than a minute.
804 * If it takes longer than this something has gone wrong.
805 */
806 static int
807 splat_kmem_test8(struct file *file, void *arg)
808 {
809 kmem_cache_priv_t *kcp;
810 kmem_cache_thread_t *kct;
811 int i, rc = 0;
812
813 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
814 256, 0, 0);
815 if (!kcp) {
816 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
817 "Unable to create '%s'\n", "kcp");
818 rc = -ENOMEM;
819 goto out;
820 }
821
822 kcp->kcp_cache =
823 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
824 splat_kmem_cache_test_constructor,
825 splat_kmem_cache_test_destructor,
826 splat_kmem_cache_test_reclaim,
827 kcp, NULL, 0);
828 if (!kcp->kcp_cache) {
829 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
830 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
831 rc = -ENOMEM;
832 goto out_kcp;
833 }
834
835 kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
836 if (!kct) {
837 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
838 "Unable to create '%s'\n", "kct");
839 rc = -ENOMEM;
840 goto out_cache;
841 }
842
843 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, SPLAT_KMEM_OBJ_COUNT);
844 if (rc) {
845 splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to "
846 "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
847 goto out_kct;
848 }
849
850 for (i = 0; i < 60; i++) {
851 kmem_cache_reap_now(kcp->kcp_cache);
852 splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp);
853
854 if (kcp->kcp_cache->skc_obj_total == 0)
855 break;
856
857 set_current_state(TASK_INTERRUPTIBLE);
858 schedule_timeout(HZ);
859 }
860
861 if (kcp->kcp_cache->skc_obj_total == 0) {
862 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
863 "Successfully created %d objects "
864 "in cache %s and reclaimed them\n",
865 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
866 } else {
867 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
868 "Failed to reclaim %u/%d objects from cache %s\n",
869 (unsigned)kcp->kcp_cache->skc_obj_total,
870 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
871 rc = -ENOMEM;
872 }
873
874 /* Cleanup our mess (for failure case of time expiring) */
875 splat_kmem_cache_test_kcd_free(kcp, kct);
876 out_kct:
877 splat_kmem_cache_test_kct_free(kcp, kct);
878 out_cache:
879 kmem_cache_destroy(kcp->kcp_cache);
880 out_kcp:
881 splat_kmem_cache_test_kcp_free(kcp);
882 out:
883 return rc;
884 }
885
886 /* Test cache aging, we have allocated a large number of objects thus
887 * creating a large number of slabs and then free'd them all. However,
888 * since there should be little memory pressure at the moment those
889 * slabs have not been freed. What we want to see is the slab size
890 * decrease gradually as it becomes clear they will not be be needed.
891 * This should be achievable in less than minute. If it takes longer
892 * than this something has gone wrong.
893 */
894 static int
895 splat_kmem_test9(struct file *file, void *arg)
896 {
897 kmem_cache_priv_t *kcp;
898 kmem_cache_thread_t *kct;
899 int i, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;
900
901 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
902 256, 0, 0);
903 if (!kcp) {
904 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
905 "Unable to create '%s'\n", "kcp");
906 rc = -ENOMEM;
907 goto out;
908 }
909
910 kcp->kcp_cache =
911 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
912 splat_kmem_cache_test_constructor,
913 splat_kmem_cache_test_destructor,
914 NULL, kcp, NULL, 0);
915 if (!kcp->kcp_cache) {
916 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
917 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
918 rc = -ENOMEM;
919 goto out_kcp;
920 }
921
922 kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
923 if (!kct) {
924 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
925 "Unable to create '%s'\n", "kct");
926 rc = -ENOMEM;
927 goto out_cache;
928 }
929
930 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, count);
931 if (rc) {
932 splat_vprint(file, SPLAT_KMEM_TEST9_NAME, "Unable to "
933 "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
934 goto out_kct;
935 }
936
937 splat_kmem_cache_test_kcd_free(kcp, kct);
938
939 for (i = 0; i < 60; i++) {
940 splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST9_NAME, kcp);
941
942 if (kcp->kcp_cache->skc_obj_total == 0)
943 break;
944
945 set_current_state(TASK_INTERRUPTIBLE);
946 schedule_timeout(HZ);
947 }
948
949 if (kcp->kcp_cache->skc_obj_total == 0) {
950 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
951 "Successfully created %d objects "
952 "in cache %s and reclaimed them\n",
953 count, SPLAT_KMEM_CACHE_NAME);
954 } else {
955 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
956 "Failed to reclaim %u/%d objects from cache %s\n",
957 (unsigned)kcp->kcp_cache->skc_obj_total, count,
958 SPLAT_KMEM_CACHE_NAME);
959 rc = -ENOMEM;
960 }
961
962 out_kct:
963 splat_kmem_cache_test_kct_free(kcp, kct);
964 out_cache:
965 kmem_cache_destroy(kcp->kcp_cache);
966 out_kcp:
967 splat_kmem_cache_test_kcp_free(kcp);
968 out:
969 return rc;
970 }
971
972 /*
973 * This test creates N threads with a shared kmem cache. They then all
974 * concurrently allocate and free from the cache to stress the locking and
975 * concurrent cache performance. If any one test takes longer than 5
976 * seconds to complete it is treated as a failure and may indicate a
977 * performance regression. On my test system no one test takes more
978 * than 1 second to complete so a 5x slowdown likely a problem.
979 */
980 static int
981 splat_kmem_test10(struct file *file, void *arg)
982 {
983 uint64_t size, alloc, rc = 0;
984
985 for (size = 32; size <= 1024*1024; size *= 2) {
986
987 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name",
988 "time (sec)\tslabs \tobjs \thash\n");
989 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "",
990 " \ttot/max/calc\ttot/max/calc\n");
991
992 for (alloc = 1; alloc <= 1024; alloc *= 2) {
993
994 /* Skip tests which exceed available memory. We
995 * leverage availrmem here for some extra testing */
996 if (size * alloc * SPLAT_KMEM_THREADS > availrmem / 2)
997 continue;
998
999 rc = splat_kmem_cache_thread_test(file, arg,
1000 SPLAT_KMEM_TEST10_NAME, size, alloc, 5);
1001 if (rc)
1002 break;
1003 }
1004 }
1005
1006 return rc;
1007 }
1008
1009 #ifdef _LP64
1010 /*
1011 * This test creates N threads with a shared kmem cache which overcommits
1012 * memory by 4x. This makes it impossible for the slab to satify the
1013 * thread requirements without having its reclaim hook run which will
1014 * free objects back for use. This behavior is triggered by the linum VM
1015 * detecting a low memory condition on the node and invoking the shrinkers.
1016 * This should allow all the threads to complete while avoiding deadlock
1017 * and for the most part out of memory events. This is very tough on the
1018 * system so it is possible the test app may get oom'ed. This particular
1019 * test has proven troublesome on 32-bit archs with limited virtual
1020 * address space so it only run on 64-bit systems.
1021 */
1022 static int
1023 splat_kmem_test11(struct file *file, void *arg)
1024 {
1025 uint64_t size, alloc, rc;
1026
1027 size = 8 * 1024;
1028 alloc = ((4 * physmem * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS;
1029
1030 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "name",
1031 "time (sec)\tslabs \tobjs \thash\n");
1032 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "",
1033 " \ttot/max/calc\ttot/max/calc\n");
1034
1035 rc = splat_kmem_cache_thread_test(file, arg,
1036 SPLAT_KMEM_TEST11_NAME, size, alloc, 60);
1037
1038 return rc;
1039 }
1040 #endif /* _LP64 */
1041
1042 /*
1043 * Check vmem_size() behavior by acquiring the alloc/free/total vmem
1044 * space, then allocate a known buffer size from vmem space. We can
1045 * then check that vmem_size() values were updated properly with in
1046 * a fairly small tolerence. The tolerance is important because we
1047 * are not the only vmem consumer on the system. Other unrelated
1048 * allocations might occur during the small test window. The vmem
1049 * allocation itself may also add in a little extra private space to
1050 * the buffer. Finally, verify total space always remains unchanged.
1051 */
1052 static int
1053 splat_kmem_test12(struct file *file, void *arg)
1054 {
1055 size_t alloc1, free1, total1;
1056 size_t alloc2, free2, total2;
1057 int size = 8*1024*1024;
1058 void *ptr;
1059
1060 alloc1 = vmem_size(NULL, VMEM_ALLOC);
1061 free1 = vmem_size(NULL, VMEM_FREE);
1062 total1 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
1063 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1064 "free=%lu total=%lu\n", (unsigned long)alloc1,
1065 (unsigned long)free1, (unsigned long)total1);
1066
1067 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Alloc %d bytes\n", size);
1068 ptr = vmem_alloc(size, KM_SLEEP);
1069 if (!ptr) {
1070 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1071 "Failed to alloc %d bytes\n", size);
1072 return -ENOMEM;
1073 }
1074
1075 alloc2 = vmem_size(NULL, VMEM_ALLOC);
1076 free2 = vmem_size(NULL, VMEM_FREE);
1077 total2 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
1078 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1079 "free=%lu total=%lu\n", (unsigned long)alloc2,
1080 (unsigned long)free2, (unsigned long)total2);
1081
1082 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Free %d bytes\n", size);
1083 vmem_free(ptr, size);
1084 if (alloc2 < (alloc1 + size - (size / 100)) ||
1085 alloc2 > (alloc1 + size + (size / 100))) {
1086 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1087 "VMEM_ALLOC size: %lu != %lu+%d (+/- 1%%)\n",
1088 (unsigned long)alloc2,(unsigned long)alloc1,size);
1089 return -ERANGE;
1090 }
1091
1092 if (free2 < (free1 - size - (size / 100)) ||
1093 free2 > (free1 - size + (size / 100))) {
1094 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1095 "VMEM_FREE size: %lu != %lu-%d (+/- 1%%)\n",
1096 (unsigned long)free2, (unsigned long)free1, size);
1097 return -ERANGE;
1098 }
1099
1100 if (total1 != total2) {
1101 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1102 "VMEM_ALLOC | VMEM_FREE not constant: "
1103 "%lu != %lu\n", (unsigned long)total2,
1104 (unsigned long)total1);
1105 return -ERANGE;
1106 }
1107
1108 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1109 "VMEM_ALLOC within tolerance: ~%ld%% (%ld/%d)\n",
1110 (long)abs(alloc1 + (long)size - alloc2) * 100 / (long)size,
1111 (long)abs(alloc1 + (long)size - alloc2), size);
1112 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1113 "VMEM_FREE within tolerance: ~%ld%% (%ld/%d)\n",
1114 (long)abs((free1 - (long)size) - free2) * 100 / (long)size,
1115 (long)abs((free1 - (long)size) - free2), size);
1116
1117 return 0;
1118 }
1119
1120 typedef struct dummy_page {
1121 struct list_head dp_list;
1122 char dp_pad[PAGE_SIZE - sizeof(struct list_head)];
1123 } dummy_page_t;
1124
1125 /*
1126 * This test is designed to verify that direct reclaim is functioning as
1127 * expected. We allocate a large number of objects thus creating a large
1128 * number of slabs. We then apply memory pressure and expect that the
1129 * direct reclaim path can easily recover those slabs. The registered
1130 * reclaim function will free the objects and the slab shrinker will call
1131 * it repeatedly until at least a single slab can be freed.
1132 *
1133 * Note it may not be possible to reclaim every last slab via direct reclaim
1134 * without a failure because the shrinker_rwsem may be contended. For this
1135 * reason, quickly reclaiming 3/4 of the slabs is considered a success.
1136 *
1137 * This should all be possible within 10 seconds. For reference, on a
1138 * system with 2G of memory this test takes roughly 0.2 seconds to run.
1139 * It may take longer on larger memory systems but should still easily
1140 * complete in the alloted 10 seconds.
1141 */
1142 static int
1143 splat_kmem_test13(struct file *file, void *arg)
1144 {
1145 kmem_cache_priv_t *kcp;
1146 kmem_cache_thread_t *kct;
1147 dummy_page_t *dp;
1148 struct list_head list;
1149 struct timespec start, delta = { 0, 0 };
1150 int size, count, slabs, fails = 0;
1151 int i, rc = 0, max_time = 10;
1152
1153 size = 128 * 1024;
1154 count = ((physmem * PAGE_SIZE) / 4 / size);
1155
1156 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST13_NAME,
1157 size, 0, 0);
1158 if (!kcp) {
1159 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1160 "Unable to create '%s'\n", "kcp");
1161 rc = -ENOMEM;
1162 goto out;
1163 }
1164
1165 kcp->kcp_cache =
1166 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
1167 splat_kmem_cache_test_constructor,
1168 splat_kmem_cache_test_destructor,
1169 splat_kmem_cache_test_reclaim,
1170 kcp, NULL, 0);
1171 if (!kcp->kcp_cache) {
1172 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1173 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
1174 rc = -ENOMEM;
1175 goto out_kcp;
1176 }
1177
1178 kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
1179 if (!kct) {
1180 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1181 "Unable to create '%s'\n", "kct");
1182 rc = -ENOMEM;
1183 goto out_cache;
1184 }
1185
1186 rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, count);
1187 if (rc) {
1188 splat_vprint(file, SPLAT_KMEM_TEST13_NAME, "Unable to "
1189 "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
1190 goto out_kct;
1191 }
1192
1193 i = 0;
1194 slabs = kcp->kcp_cache->skc_slab_total;
1195 INIT_LIST_HEAD(&list);
1196 start = current_kernel_time();
1197
1198 /* Apply memory pressure */
1199 while (kcp->kcp_cache->skc_slab_total > (slabs >> 2)) {
1200
1201 if ((i % 10000) == 0)
1202 splat_kmem_cache_test_debug(
1203 file, SPLAT_KMEM_TEST13_NAME, kcp);
1204
1205 delta = timespec_sub(current_kernel_time(), start);
1206 if (delta.tv_sec >= max_time) {
1207 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1208 "Failed to reclaim 3/4 of cache in %ds, "
1209 "%u/%u slabs remain\n", max_time,
1210 (unsigned)kcp->kcp_cache->skc_slab_total,
1211 slabs);
1212 rc = -ETIME;
1213 break;
1214 }
1215
1216 dp = (dummy_page_t *)__get_free_page(GFP_KERNEL | __GFP_NORETRY);
1217 if (!dp) {
1218 fails++;
1219 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1220 "Failed (%d) to allocate page with %u "
1221 "slabs still in the cache\n", fails,
1222 (unsigned)kcp->kcp_cache->skc_slab_total);
1223 continue;
1224 }
1225
1226 list_add(&dp->dp_list, &list);
1227 i++;
1228 }
1229
1230 if (rc == 0)
1231 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1232 "Successfully created %u slabs and with %d alloc "
1233 "failures reclaimed 3/4 of them in %d.%03ds\n",
1234 slabs, fails,
1235 (int)delta.tv_sec, (int)delta.tv_nsec / 1000000);
1236
1237 /* Release memory pressure pages */
1238 while (!list_empty(&list)) {
1239 dp = list_entry(list.next, dummy_page_t, dp_list);
1240 list_del_init(&dp->dp_list);
1241 free_page((unsigned long)dp);
1242 }
1243
1244 /* Release remaining kmem cache objects */
1245 splat_kmem_cache_test_kcd_free(kcp, kct);
1246 out_kct:
1247 splat_kmem_cache_test_kct_free(kcp, kct);
1248 out_cache:
1249 kmem_cache_destroy(kcp->kcp_cache);
1250 out_kcp:
1251 splat_kmem_cache_test_kcp_free(kcp);
1252 out:
1253 return rc;
1254 }
1255
1256 splat_subsystem_t *
1257 splat_kmem_init(void)
1258 {
1259 splat_subsystem_t *sub;
1260
1261 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1262 if (sub == NULL)
1263 return NULL;
1264
1265 memset(sub, 0, sizeof(*sub));
1266 strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
1267 strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
1268 INIT_LIST_HEAD(&sub->subsystem_list);
1269 INIT_LIST_HEAD(&sub->test_list);
1270 spin_lock_init(&sub->test_lock);
1271 sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
1272
1273 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
1274 SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
1275 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
1276 SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
1277 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
1278 SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
1279 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
1280 SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
1281 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
1282 SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
1283 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
1284 SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
1285 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
1286 SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
1287 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
1288 SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
1289 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST9_NAME, SPLAT_KMEM_TEST9_DESC,
1290 SPLAT_KMEM_TEST9_ID, splat_kmem_test9);
1291 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST10_NAME, SPLAT_KMEM_TEST10_DESC,
1292 SPLAT_KMEM_TEST10_ID, splat_kmem_test10);
1293 #ifdef _LP64
1294 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC,
1295 SPLAT_KMEM_TEST11_ID, splat_kmem_test11);
1296 #endif /* _LP64 */
1297 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST12_NAME, SPLAT_KMEM_TEST12_DESC,
1298 SPLAT_KMEM_TEST12_ID, splat_kmem_test12);
1299 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST13_NAME, SPLAT_KMEM_TEST13_DESC,
1300 SPLAT_KMEM_TEST13_ID, splat_kmem_test13);
1301
1302 return sub;
1303 }
1304
1305 void
1306 splat_kmem_fini(splat_subsystem_t *sub)
1307 {
1308 ASSERT(sub);
1309 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST13_ID);
1310 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST12_ID);
1311 #ifdef _LP64
1312 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST11_ID);
1313 #endif /* _LP64 */
1314 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST10_ID);
1315 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST9_ID);
1316 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID);
1317 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
1318 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
1319 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
1320 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
1321 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
1322 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
1323 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
1324
1325 kfree(sub);
1326 }
1327
1328 int
1329 splat_kmem_id(void) {
1330 return SPLAT_SUBSYSTEM_KMEM;
1331 }