]> git.proxmox.com Git - mirror_spl.git/blob - module/splat/splat-kmem.c
Coverity 9656: Forward NULL
[mirror_spl.git] / module / splat / splat-kmem.c
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #include "splat-internal.h"
28
29 #define SPLAT_SUBSYSTEM_KMEM 0x0100
30 #define SPLAT_KMEM_NAME "kmem"
31 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
32
33 #define SPLAT_KMEM_TEST1_ID 0x0101
34 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
35 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
36
37 #define SPLAT_KMEM_TEST2_ID 0x0102
38 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
39 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
40
41 #define SPLAT_KMEM_TEST3_ID 0x0103
42 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
43 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
44
45 #define SPLAT_KMEM_TEST4_ID 0x0104
46 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
47 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
48
49 #define SPLAT_KMEM_TEST5_ID 0x0105
50 #define SPLAT_KMEM_TEST5_NAME "slab_small"
51 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
52
53 #define SPLAT_KMEM_TEST6_ID 0x0106
54 #define SPLAT_KMEM_TEST6_NAME "slab_large"
55 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
56
57 #define SPLAT_KMEM_TEST7_ID 0x0107
58 #define SPLAT_KMEM_TEST7_NAME "slab_align"
59 #define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
60
61 #define SPLAT_KMEM_TEST8_ID 0x0108
62 #define SPLAT_KMEM_TEST8_NAME "slab_reap"
63 #define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
64
65 #define SPLAT_KMEM_TEST9_ID 0x0109
66 #define SPLAT_KMEM_TEST9_NAME "slab_age"
67 #define SPLAT_KMEM_TEST9_DESC "Slab aging test"
68
69 #define SPLAT_KMEM_TEST10_ID 0x010a
70 #define SPLAT_KMEM_TEST10_NAME "slab_lock"
71 #define SPLAT_KMEM_TEST10_DESC "Slab locking test"
72
73 #define SPLAT_KMEM_TEST11_ID 0x010b
74 #define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
75 #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
76
77 #define SPLAT_KMEM_ALLOC_COUNT 10
78 #define SPLAT_VMEM_ALLOC_COUNT 10
79
80
81 static int
82 splat_kmem_test1(struct file *file, void *arg)
83 {
84 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
85 int size = PAGE_SIZE;
86 int i, count, rc = 0;
87
88 /* We are intentionally going to push kmem_alloc to its max
89 * allocation size, so suppress the console warnings for now */
90 kmem_set_warning(0);
91
92 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
93 count = 0;
94
95 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
96 ptr[i] = kmem_alloc(size, KM_SLEEP);
97 if (ptr[i])
98 count++;
99 }
100
101 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
102 if (ptr[i])
103 kmem_free(ptr[i], size);
104
105 splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
106 "%d byte allocations, %d/%d successful\n",
107 size, count, SPLAT_KMEM_ALLOC_COUNT);
108 if (count != SPLAT_KMEM_ALLOC_COUNT)
109 rc = -ENOMEM;
110
111 size *= 2;
112 }
113
114 kmem_set_warning(1);
115
116 return rc;
117 }
118
119 static int
120 splat_kmem_test2(struct file *file, void *arg)
121 {
122 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
123 int size = PAGE_SIZE;
124 int i, j, count, rc = 0;
125
126 /* We are intentionally going to push kmem_alloc to its max
127 * allocation size, so suppress the console warnings for now */
128 kmem_set_warning(0);
129
130 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
131 count = 0;
132
133 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
134 ptr[i] = kmem_zalloc(size, KM_SLEEP);
135 if (ptr[i])
136 count++;
137 }
138
139 /* Ensure buffer has been zero filled */
140 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
141 for (j = 0; j < size; j++) {
142 if (((char *)ptr[i])[j] != '\0') {
143 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
144 "%d-byte allocation was "
145 "not zeroed\n", size);
146 rc = -EFAULT;
147 }
148 }
149 }
150
151 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
152 if (ptr[i])
153 kmem_free(ptr[i], size);
154
155 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
156 "%d byte allocations, %d/%d successful\n",
157 size, count, SPLAT_KMEM_ALLOC_COUNT);
158 if (count != SPLAT_KMEM_ALLOC_COUNT)
159 rc = -ENOMEM;
160
161 size *= 2;
162 }
163
164 kmem_set_warning(1);
165
166 return rc;
167 }
168
169 static int
170 splat_kmem_test3(struct file *file, void *arg)
171 {
172 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
173 int size = PAGE_SIZE;
174 int i, count, rc = 0;
175
176 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
177 count = 0;
178
179 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
180 ptr[i] = vmem_alloc(size, KM_SLEEP);
181 if (ptr[i])
182 count++;
183 }
184
185 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
186 if (ptr[i])
187 vmem_free(ptr[i], size);
188
189 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
190 "%d byte allocations, %d/%d successful\n",
191 size, count, SPLAT_VMEM_ALLOC_COUNT);
192 if (count != SPLAT_VMEM_ALLOC_COUNT)
193 rc = -ENOMEM;
194
195 size *= 2;
196 }
197
198 return rc;
199 }
200
201 static int
202 splat_kmem_test4(struct file *file, void *arg)
203 {
204 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
205 int size = PAGE_SIZE;
206 int i, j, count, rc = 0;
207
208 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
209 count = 0;
210
211 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
212 ptr[i] = vmem_zalloc(size, KM_SLEEP);
213 if (ptr[i])
214 count++;
215 }
216
217 /* Ensure buffer has been zero filled */
218 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
219 for (j = 0; j < size; j++) {
220 if (((char *)ptr[i])[j] != '\0') {
221 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
222 "%d-byte allocation was "
223 "not zeroed\n", size);
224 rc = -EFAULT;
225 }
226 }
227 }
228
229 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
230 if (ptr[i])
231 vmem_free(ptr[i], size);
232
233 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
234 "%d byte allocations, %d/%d successful\n",
235 size, count, SPLAT_VMEM_ALLOC_COUNT);
236 if (count != SPLAT_VMEM_ALLOC_COUNT)
237 rc = -ENOMEM;
238
239 size *= 2;
240 }
241
242 return rc;
243 }
244
245 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
246 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
247 #define SPLAT_KMEM_OBJ_COUNT 1024
248 #define SPLAT_KMEM_OBJ_RECLAIM 20 /* percent */
249 #define SPLAT_KMEM_THREADS 32
250
251 #define KCP_FLAG_READY 0x01
252
253 typedef struct kmem_cache_data {
254 unsigned long kcd_magic;
255 int kcd_flag;
256 char kcd_buf[0];
257 } kmem_cache_data_t;
258
259 typedef struct kmem_cache_thread {
260 kmem_cache_t *kct_cache;
261 spinlock_t kct_lock;
262 int kct_id;
263 int kct_kcd_count;
264 kmem_cache_data_t *kct_kcd[0];
265 } kmem_cache_thread_t;
266
267 typedef struct kmem_cache_priv {
268 unsigned long kcp_magic;
269 struct file *kcp_file;
270 kmem_cache_t *kcp_cache;
271 spinlock_t kcp_lock;
272 wait_queue_head_t kcp_ctl_waitq;
273 wait_queue_head_t kcp_thr_waitq;
274 int kcp_flags;
275 int kcp_kct_count;
276 kmem_cache_thread_t *kcp_kct[SPLAT_KMEM_THREADS];
277 int kcp_size;
278 int kcp_align;
279 int kcp_count;
280 int kcp_alloc;
281 int kcp_rc;
282 int kcp_kcd_count;
283 kmem_cache_data_t *kcp_kcd[0];
284 } kmem_cache_priv_t;
285
286 static kmem_cache_priv_t *
287 splat_kmem_cache_test_kcp_alloc(struct file *file, char *name,
288 int size, int align, int alloc, int count)
289 {
290 kmem_cache_priv_t *kcp;
291
292 kcp = vmem_zalloc(sizeof(kmem_cache_priv_t) +
293 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
294 if (!kcp)
295 return NULL;
296
297 kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC;
298 kcp->kcp_file = file;
299 kcp->kcp_cache = NULL;
300 spin_lock_init(&kcp->kcp_lock);
301 init_waitqueue_head(&kcp->kcp_ctl_waitq);
302 init_waitqueue_head(&kcp->kcp_thr_waitq);
303 kcp->kcp_flags = 0;
304 kcp->kcp_kct_count = -1;
305 kcp->kcp_size = size;
306 kcp->kcp_align = align;
307 kcp->kcp_count = 0;
308 kcp->kcp_alloc = alloc;
309 kcp->kcp_rc = 0;
310 kcp->kcp_kcd_count = count;
311
312 return kcp;
313 }
314
315 static void
316 splat_kmem_cache_test_kcp_free(kmem_cache_priv_t *kcp)
317 {
318 vmem_free(kcp, sizeof(kmem_cache_priv_t) +
319 kcp->kcp_kcd_count * sizeof(kmem_cache_data_t *));
320 }
321
322 static kmem_cache_thread_t *
323 splat_kmem_cache_test_kct_alloc(int id, int count)
324 {
325 kmem_cache_thread_t *kct;
326
327 ASSERTF(id < SPLAT_KMEM_THREADS, "id=%d\n", id);
328 kct = vmem_zalloc(sizeof(kmem_cache_thread_t) +
329 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
330 if (!kct)
331 return NULL;
332
333 spin_lock_init(&kct->kct_lock);
334 kct->kct_cache = NULL;
335 kct->kct_id = id;
336 kct->kct_kcd_count = count;
337
338 return kct;
339 }
340
341 static void
342 splat_kmem_cache_test_kct_free(kmem_cache_thread_t *kct)
343 {
344 vmem_free(kct, sizeof(kmem_cache_thread_t) +
345 kct->kct_kcd_count * sizeof(kmem_cache_data_t *));
346 }
347
348 static int
349 splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
350 {
351 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
352 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
353
354 if (kcd && kcp) {
355 kcd->kcd_magic = kcp->kcp_magic;
356 kcd->kcd_flag = 1;
357 memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
358 kcp->kcp_count++;
359 }
360
361 return 0;
362 }
363
364 static void
365 splat_kmem_cache_test_destructor(void *ptr, void *priv)
366 {
367 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
368 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
369
370 if (kcd && kcp) {
371 kcd->kcd_magic = 0;
372 kcd->kcd_flag = 0;
373 memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
374 kcp->kcp_count--;
375 }
376
377 return;
378 }
379
380 /*
381 * Generic reclaim function which assumes that all objects may
382 * be reclaimed at any time. We free a small percentage of the
383 * objects linked off the kcp or kct[] every time we are called.
384 */
385 static void
386 splat_kmem_cache_test_reclaim(void *priv)
387 {
388 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
389 kmem_cache_thread_t *kct;
390 int i, j, count;
391
392 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
393 count = kcp->kcp_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
394
395 /* Objects directly attached to the kcp */
396 spin_lock(&kcp->kcp_lock);
397 for (i = 0; i < kcp->kcp_kcd_count; i++) {
398 if (kcp->kcp_kcd[i]) {
399 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
400 kcp->kcp_kcd[i] = NULL;
401
402 if ((--count) == 0)
403 break;
404 }
405 }
406 spin_unlock(&kcp->kcp_lock);
407
408 /* No threads containing objects to consider */
409 if (kcp->kcp_kct_count == -1)
410 return;
411
412 /* Objects attached to a kct thread */
413 for (i = 0; i < kcp->kcp_kct_count; i++) {
414 spin_lock(&kcp->kcp_lock);
415 kct = kcp->kcp_kct[i];
416 spin_unlock(&kcp->kcp_lock);
417 if (!kct)
418 continue;
419
420 spin_lock(&kct->kct_lock);
421 count = kct->kct_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
422
423 for (j = 0; j < kct->kct_kcd_count; j++) {
424 if (kct->kct_kcd[j]) {
425 kmem_cache_free(kcp->kcp_cache,kct->kct_kcd[j]);
426 kct->kct_kcd[j] = NULL;
427
428 if ((--count) == 0)
429 break;
430 }
431 }
432 spin_unlock(&kct->kct_lock);
433 }
434
435 return;
436 }
437
438 static int
439 splat_kmem_cache_test_threads(kmem_cache_priv_t *kcp, int threads)
440 {
441 int rc;
442
443 spin_lock(&kcp->kcp_lock);
444 rc = (kcp->kcp_kct_count == threads);
445 spin_unlock(&kcp->kcp_lock);
446
447 return rc;
448 }
449
450 static int
451 splat_kmem_cache_test_flags(kmem_cache_priv_t *kcp, int flags)
452 {
453 int rc;
454
455 spin_lock(&kcp->kcp_lock);
456 rc = (kcp->kcp_flags & flags);
457 spin_unlock(&kcp->kcp_lock);
458
459 return rc;
460 }
461
462 static void
463 splat_kmem_cache_test_thread(void *arg)
464 {
465 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
466 kmem_cache_thread_t *kct;
467 int rc = 0, id, i;
468 void *obj;
469
470 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
471
472 /* Assign thread ids */
473 spin_lock(&kcp->kcp_lock);
474 if (kcp->kcp_kct_count == -1)
475 kcp->kcp_kct_count = 0;
476
477 id = kcp->kcp_kct_count;
478 kcp->kcp_kct_count++;
479 spin_unlock(&kcp->kcp_lock);
480
481 kct = splat_kmem_cache_test_kct_alloc(id, kcp->kcp_alloc);
482 if (!kct) {
483 rc = -ENOMEM;
484 goto out;
485 }
486
487 spin_lock(&kcp->kcp_lock);
488 kcp->kcp_kct[id] = kct;
489 spin_unlock(&kcp->kcp_lock);
490
491 /* Wait for all threads to have started and report they are ready */
492 if (kcp->kcp_kct_count == SPLAT_KMEM_THREADS)
493 wake_up(&kcp->kcp_ctl_waitq);
494
495 wait_event(kcp->kcp_thr_waitq,
496 splat_kmem_cache_test_flags(kcp, KCP_FLAG_READY));
497
498 /*
499 * Updates to kct->kct_kcd[] are performed under a spin_lock so
500 * they may safely run concurrent with the reclaim function. If
501 * we are not in a low memory situation we have one lock per-
502 * thread so they are not expected to be contended.
503 */
504 for (i = 0; i < kct->kct_kcd_count; i++) {
505 obj = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
506 spin_lock(&kct->kct_lock);
507 kct->kct_kcd[i] = obj;
508 spin_unlock(&kct->kct_lock);
509 }
510
511 for (i = 0; i < kct->kct_kcd_count; i++) {
512 spin_lock(&kct->kct_lock);
513 if (kct->kct_kcd[i]) {
514 kmem_cache_free(kcp->kcp_cache, kct->kct_kcd[i]);
515 kct->kct_kcd[i] = NULL;
516 }
517 spin_unlock(&kct->kct_lock);
518 }
519 out:
520 spin_lock(&kcp->kcp_lock);
521 if (kct) {
522 splat_kmem_cache_test_kct_free(kct);
523 kcp->kcp_kct[id] = kct = NULL;
524 }
525
526 if (!kcp->kcp_rc)
527 kcp->kcp_rc = rc;
528
529 if ((--kcp->kcp_kct_count) == 0)
530 wake_up(&kcp->kcp_ctl_waitq);
531
532 spin_unlock(&kcp->kcp_lock);
533
534 thread_exit();
535 }
536
537 static int
538 splat_kmem_cache_test(struct file *file, void *arg, char *name,
539 int size, int align, int flags)
540 {
541 kmem_cache_priv_t *kcp;
542 kmem_cache_data_t *kcd;
543 int rc = 0, max;
544
545 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0, 1);
546 if (!kcp) {
547 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
548 return -ENOMEM;
549 }
550
551 kcp->kcp_kcd[0] = NULL;
552 kcp->kcp_cache =
553 kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
554 kcp->kcp_size, kcp->kcp_align,
555 splat_kmem_cache_test_constructor,
556 splat_kmem_cache_test_destructor,
557 NULL, kcp, NULL, flags);
558 if (!kcp->kcp_cache) {
559 splat_vprint(file, name,
560 "Unable to create '%s'\n",
561 SPLAT_KMEM_CACHE_NAME);
562 rc = -ENOMEM;
563 goto out_free;
564 }
565
566 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
567 if (!kcd) {
568 splat_vprint(file, name,
569 "Unable to allocate from '%s'\n",
570 SPLAT_KMEM_CACHE_NAME);
571 rc = -EINVAL;
572 goto out_free;
573 }
574 spin_lock(&kcp->kcp_lock);
575 kcp->kcp_kcd[0] = kcd;
576 spin_unlock(&kcp->kcp_lock);
577
578 if (!kcp->kcp_kcd[0]->kcd_flag) {
579 splat_vprint(file, name,
580 "Failed to run contructor for '%s'\n",
581 SPLAT_KMEM_CACHE_NAME);
582 rc = -EINVAL;
583 goto out_free;
584 }
585
586 if (kcp->kcp_kcd[0]->kcd_magic != kcp->kcp_magic) {
587 splat_vprint(file, name,
588 "Failed to pass private data to constructor "
589 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
590 rc = -EINVAL;
591 goto out_free;
592 }
593
594 max = kcp->kcp_count;
595 spin_lock(&kcp->kcp_lock);
596 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
597 kcp->kcp_kcd[0] = NULL;
598 spin_unlock(&kcp->kcp_lock);
599
600 /* Destroy the entire cache which will force destructors to
601 * run and we can verify one was called for every object */
602 kmem_cache_destroy(kcp->kcp_cache);
603 if (kcp->kcp_count) {
604 splat_vprint(file, name,
605 "Failed to run destructor on all slab objects "
606 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
607 rc = -EINVAL;
608 }
609
610 splat_vprint(file, name,
611 "Successfully ran ctors/dtors for %d elements in '%s'\n",
612 max, SPLAT_KMEM_CACHE_NAME);
613
614 return rc;
615
616 out_free:
617 if (kcp->kcp_kcd[0]) {
618 spin_lock(&kcp->kcp_lock);
619 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
620 kcp->kcp_kcd[0] = NULL;
621 spin_unlock(&kcp->kcp_lock);
622 }
623
624 if (kcp->kcp_cache)
625 kmem_cache_destroy(kcp->kcp_cache);
626
627 splat_kmem_cache_test_kcp_free(kcp);
628
629 return rc;
630 }
631
632 static int
633 splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
634 int size, int alloc, int max_time)
635 {
636 kmem_cache_priv_t *kcp;
637 kthread_t *thr;
638 struct timespec start, stop, delta;
639 char cache_name[32];
640 int i, rc = 0;
641
642 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc, 0);
643 if (!kcp) {
644 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
645 return -ENOMEM;
646 }
647
648 (void)snprintf(cache_name, 32, "%s-%d-%d",
649 SPLAT_KMEM_CACHE_NAME, size, alloc);
650 kcp->kcp_cache =
651 kmem_cache_create(cache_name, kcp->kcp_size, 0,
652 splat_kmem_cache_test_constructor,
653 splat_kmem_cache_test_destructor,
654 splat_kmem_cache_test_reclaim,
655 kcp, NULL, KMC_VMEM);
656 if (!kcp->kcp_cache) {
657 splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
658 rc = -ENOMEM;
659 goto out_kcp;
660 }
661
662 start = current_kernel_time();
663
664 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
665 thr = thread_create(NULL, 0,
666 splat_kmem_cache_test_thread,
667 kcp, 0, &p0, TS_RUN, minclsyspri);
668 if (thr == NULL) {
669 rc = -ESRCH;
670 goto out_cache;
671 }
672 }
673
674 /* Sleep until all threads have started, then set the ready
675 * flag and wake them all up for maximum concurrency. */
676 wait_event(kcp->kcp_ctl_waitq,
677 splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));
678
679 spin_lock(&kcp->kcp_lock);
680 kcp->kcp_flags |= KCP_FLAG_READY;
681 spin_unlock(&kcp->kcp_lock);
682 wake_up_all(&kcp->kcp_thr_waitq);
683
684 /* Sleep until all thread have finished */
685 wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));
686
687 stop = current_kernel_time();
688 delta = timespec_sub(stop, start);
689
690 splat_vprint(file, name,
691 "%-22s %2ld.%09ld\t"
692 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
693 kcp->kcp_cache->skc_name,
694 delta.tv_sec, delta.tv_nsec,
695 (unsigned long)kcp->kcp_cache->skc_slab_total,
696 (unsigned long)kcp->kcp_cache->skc_slab_max,
697 (unsigned long)(kcp->kcp_alloc *
698 SPLAT_KMEM_THREADS /
699 SPL_KMEM_CACHE_OBJ_PER_SLAB),
700 (unsigned long)kcp->kcp_cache->skc_obj_total,
701 (unsigned long)kcp->kcp_cache->skc_obj_max,
702 (unsigned long)(kcp->kcp_alloc *
703 SPLAT_KMEM_THREADS));
704
705 if (delta.tv_sec >= max_time)
706 rc = -ETIME;
707
708 if (!rc && kcp->kcp_rc)
709 rc = kcp->kcp_rc;
710
711 out_cache:
712 kmem_cache_destroy(kcp->kcp_cache);
713 out_kcp:
714 splat_kmem_cache_test_kcp_free(kcp);
715 return rc;
716 }
717
718 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
719 static int
720 splat_kmem_test5(struct file *file, void *arg)
721 {
722 char *name = SPLAT_KMEM_TEST5_NAME;
723 int rc;
724
725 rc = splat_kmem_cache_test(file, arg, name, 128, 0, 0);
726 if (rc)
727 return rc;
728
729 rc = splat_kmem_cache_test(file, arg, name, 128, 0, KMC_KMEM);
730 if (rc)
731 return rc;
732
733 return splat_kmem_cache_test(file, arg, name, 128, 0, KMC_VMEM);
734 }
735
736 /* Validate large object cache behavior for dynamic/kmem/vmem caches */
737 static int
738 splat_kmem_test6(struct file *file, void *arg)
739 {
740 char *name = SPLAT_KMEM_TEST6_NAME;
741 int rc;
742
743 rc = splat_kmem_cache_test(file, arg, name, 128*1024, 0, 0);
744 if (rc)
745 return rc;
746
747 rc = splat_kmem_cache_test(file, arg, name, 128*1024, 0, KMC_KMEM);
748 if (rc)
749 return rc;
750
751 return splat_kmem_cache_test(file, arg, name, 128*1028, 0, KMC_VMEM);
752 }
753
754 /* Validate object alignment cache behavior for caches */
755 static int
756 splat_kmem_test7(struct file *file, void *arg)
757 {
758 char *name = SPLAT_KMEM_TEST7_NAME;
759 int i, rc;
760
761 for (i = 8; i <= PAGE_SIZE; i *= 2) {
762 rc = splat_kmem_cache_test(file, arg, name, 157, i, 0);
763 if (rc)
764 return rc;
765 }
766
767 return rc;
768 }
769
770 static int
771 splat_kmem_test8(struct file *file, void *arg)
772 {
773 kmem_cache_priv_t *kcp;
774 kmem_cache_data_t *kcd;
775 int i, j, rc = 0;
776
777 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
778 256, 0, 0, SPLAT_KMEM_OBJ_COUNT);
779 if (!kcp) {
780 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
781 "Unable to create '%s'\n", "kcp");
782 return -ENOMEM;
783 }
784
785 kcp->kcp_cache =
786 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
787 splat_kmem_cache_test_constructor,
788 splat_kmem_cache_test_destructor,
789 splat_kmem_cache_test_reclaim,
790 kcp, NULL, 0);
791 if (!kcp->kcp_cache) {
792 splat_kmem_cache_test_kcp_free(kcp);
793 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
794 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
795 return -ENOMEM;
796 }
797
798 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
799 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
800 spin_lock(&kcp->kcp_lock);
801 kcp->kcp_kcd[i] = kcd;
802 spin_unlock(&kcp->kcp_lock);
803 if (!kcd) {
804 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
805 "Unable to allocate from '%s'\n",
806 SPLAT_KMEM_CACHE_NAME);
807 }
808 }
809
810 /* Request the slab cache free any objects it can. For a few reasons
811 * this may not immediately result in more free memory even if objects
812 * are freed. First off, due to fragmentation we may not be able to
813 * reclaim any slabs. Secondly, even if we do we fully clear some
814 * slabs we will not want to immedately reclaim all of them because
815 * we may contend with cache allocs and thrash. What we want to see
816 * is the slab size decrease more gradually as it becomes clear they
817 * will not be needed. This should be acheivable in less than minute
818 * if it takes longer than this something has gone wrong.
819 */
820 for (i = 0; i < 60; i++) {
821 kmem_cache_reap_now(kcp->kcp_cache);
822 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
823 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
824 SPLAT_KMEM_CACHE_NAME, kcp->kcp_count,
825 (unsigned)kcp->kcp_cache->skc_slab_alloc,
826 (unsigned)kcp->kcp_cache->skc_slab_total,
827 (unsigned)kcp->kcp_cache->skc_obj_alloc,
828 (unsigned)kcp->kcp_cache->skc_obj_total);
829
830 for_each_online_cpu(j)
831 splat_print(file, "%u/%u ",
832 kcp->kcp_cache->skc_mag[j]->skm_avail,
833 kcp->kcp_cache->skc_mag[j]->skm_size);
834
835 splat_print(file, "%s\n", "");
836
837 if (kcp->kcp_cache->skc_obj_total == 0)
838 break;
839
840 set_current_state(TASK_INTERRUPTIBLE);
841 schedule_timeout(HZ);
842 }
843
844 if (kcp->kcp_cache->skc_obj_total == 0) {
845 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
846 "Successfully created %d objects "
847 "in cache %s and reclaimed them\n",
848 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
849 } else {
850 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
851 "Failed to reclaim %u/%d objects from cache %s\n",
852 (unsigned)kcp->kcp_cache->skc_obj_total,
853 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
854 rc = -ENOMEM;
855 }
856
857 /* Cleanup our mess (for failure case of time expiring) */
858 spin_lock(&kcp->kcp_lock);
859 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
860 if (kcp->kcp_kcd[i])
861 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
862 spin_unlock(&kcp->kcp_lock);
863
864 kmem_cache_destroy(kcp->kcp_cache);
865 splat_kmem_cache_test_kcp_free(kcp);
866
867 return rc;
868 }
869
870 static int
871 splat_kmem_test9(struct file *file, void *arg)
872 {
873 kmem_cache_priv_t *kcp;
874 kmem_cache_data_t *kcd;
875 int i, j, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;
876
877 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
878 256, 0, 0, count);
879 if (!kcp) {
880 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
881 "Unable to create '%s'\n", "kcp");
882 return -ENOMEM;
883 }
884
885 kcp->kcp_cache =
886 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
887 splat_kmem_cache_test_constructor,
888 splat_kmem_cache_test_destructor,
889 NULL, kcp, NULL, 0);
890 if (!kcp->kcp_cache) {
891 splat_kmem_cache_test_kcp_free(kcp);
892 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
893 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
894 return -ENOMEM;
895 }
896
897 for (i = 0; i < count; i++) {
898 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
899 spin_lock(&kcp->kcp_lock);
900 kcp->kcp_kcd[i] = kcd;
901 spin_unlock(&kcp->kcp_lock);
902 if (!kcd) {
903 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
904 "Unable to allocate from '%s'\n",
905 SPLAT_KMEM_CACHE_NAME);
906 }
907 }
908
909 spin_lock(&kcp->kcp_lock);
910 for (i = 0; i < count; i++)
911 if (kcp->kcp_kcd[i])
912 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
913 spin_unlock(&kcp->kcp_lock);
914
915 /* We have allocated a large number of objects thus creating a
916 * large number of slabs and then free'd them all. However since
917 * there should be little memory pressure at the moment those
918 * slabs have not been freed. What we want to see is the slab
919 * size decrease gradually as it becomes clear they will not be
920 * be needed. This should be acheivable in less than minute
921 * if it takes longer than this something has gone wrong.
922 */
923 for (i = 0; i < 60; i++) {
924 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
925 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
926 SPLAT_KMEM_CACHE_NAME, kcp->kcp_count,
927 (unsigned)kcp->kcp_cache->skc_slab_alloc,
928 (unsigned)kcp->kcp_cache->skc_slab_total,
929 (unsigned)kcp->kcp_cache->skc_obj_alloc,
930 (unsigned)kcp->kcp_cache->skc_obj_total);
931
932 for_each_online_cpu(j)
933 splat_print(file, "%u/%u ",
934 kcp->kcp_cache->skc_mag[j]->skm_avail,
935 kcp->kcp_cache->skc_mag[j]->skm_size);
936
937 splat_print(file, "%s\n", "");
938
939 if (kcp->kcp_cache->skc_obj_total == 0)
940 break;
941
942 set_current_state(TASK_INTERRUPTIBLE);
943 schedule_timeout(HZ);
944 }
945
946 if (kcp->kcp_cache->skc_obj_total == 0) {
947 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
948 "Successfully created %d objects "
949 "in cache %s and reclaimed them\n",
950 count, SPLAT_KMEM_CACHE_NAME);
951 } else {
952 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
953 "Failed to reclaim %u/%d objects from cache %s\n",
954 (unsigned)kcp->kcp_cache->skc_obj_total, count,
955 SPLAT_KMEM_CACHE_NAME);
956 rc = -ENOMEM;
957 }
958
959 kmem_cache_destroy(kcp->kcp_cache);
960 splat_kmem_cache_test_kcp_free(kcp);
961
962 return rc;
963 }
964
965 /*
966 * This test creates N threads with a shared kmem cache. They then all
967 * concurrently allocate and free from the cache to stress the locking and
968 * concurrent cache performance. If any one test takes longer than 5
969 * seconds to complete it is treated as a failure and may indicate a
970 * performance regression. On my test system no one test takes more
971 * than 1 second to complete so a 5x slowdown likely a problem.
972 */
973 static int
974 splat_kmem_test10(struct file *file, void *arg)
975 {
976 uint64_t size, alloc, free_mem, rc = 0;
977
978 free_mem = nr_free_pages() * PAGE_SIZE;
979 for (size = 16; size <= 1024*1024; size *= 2) {
980
981 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name",
982 "time (sec)\tslabs \tobjs \thash\n");
983 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "",
984 " \ttot/max/calc\ttot/max/calc\n");
985
986 for (alloc = 1; alloc <= 1024; alloc *= 2) {
987
988 /* Skip tests which exceed free memory */
989 if (size * alloc * SPLAT_KMEM_THREADS > free_mem / 2)
990 continue;
991
992 rc = splat_kmem_cache_thread_test(file, arg,
993 SPLAT_KMEM_TEST10_NAME, size, alloc, 5);
994 if (rc)
995 break;
996 }
997 }
998
999 return rc;
1000 }
1001
1002 /*
1003 * This test creates N threads with a shared kmem cache which overcommits
1004 * memory by 4x. This makes it impossible for the slab to satify the
1005 * thread requirements without having its reclaim hook run which will
1006 * free objects back for use. This behavior is triggered by the linum VM
1007 * detecting a low memory condition on the node and invoking the shrinkers.
1008 * This should allow all the threads to complete while avoiding deadlock
1009 * and for the most part out of memory events. This is very tough on the
1010 * system so it is possible the test app may get oom'ed.
1011 */
1012 static int
1013 splat_kmem_test11(struct file *file, void *arg)
1014 {
1015 uint64_t size, alloc, rc;
1016
1017 size = 1024*1024;
1018 alloc = ((4 * num_physpages * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS;
1019
1020 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name",
1021 "time (sec)\tslabs \tobjs \thash\n");
1022 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "",
1023 " \ttot/max/calc\ttot/max/calc\n");
1024
1025 rc = splat_kmem_cache_thread_test(file, arg,
1026 SPLAT_KMEM_TEST11_NAME, size, alloc, 60);
1027
1028 return rc;
1029 }
1030
1031 splat_subsystem_t *
1032 splat_kmem_init(void)
1033 {
1034 splat_subsystem_t *sub;
1035
1036 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1037 if (sub == NULL)
1038 return NULL;
1039
1040 memset(sub, 0, sizeof(*sub));
1041 strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
1042 strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
1043 INIT_LIST_HEAD(&sub->subsystem_list);
1044 INIT_LIST_HEAD(&sub->test_list);
1045 spin_lock_init(&sub->test_lock);
1046 sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
1047
1048 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
1049 SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
1050 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
1051 SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
1052 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
1053 SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
1054 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
1055 SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
1056 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
1057 SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
1058 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
1059 SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
1060 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
1061 SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
1062 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
1063 SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
1064 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST9_NAME, SPLAT_KMEM_TEST9_DESC,
1065 SPLAT_KMEM_TEST9_ID, splat_kmem_test9);
1066 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST10_NAME, SPLAT_KMEM_TEST10_DESC,
1067 SPLAT_KMEM_TEST10_ID, splat_kmem_test10);
1068 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC,
1069 SPLAT_KMEM_TEST11_ID, splat_kmem_test11);
1070
1071 return sub;
1072 }
1073
1074 void
1075 splat_kmem_fini(splat_subsystem_t *sub)
1076 {
1077 ASSERT(sub);
1078 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST11_ID);
1079 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST10_ID);
1080 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST9_ID);
1081 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID);
1082 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
1083 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
1084 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
1085 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
1086 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
1087 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
1088 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
1089
1090 kfree(sub);
1091 }
1092
1093 int
1094 splat_kmem_id(void) {
1095 return SPLAT_SUBSYSTEM_KMEM;
1096 }