]> git.proxmox.com Git - mirror_spl.git/blame - module/splat/splat-kmem.c
Only run the kmem overcommit test on 64-bit systems.
[mirror_spl.git] / module / splat / splat-kmem.c
CommitLineData
715f6251 1/*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
ea3e6ca9
BB
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
715f6251 10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
7c50328b 27#include "splat-internal.h"
f1ca4da6 28
7c50328b 29#define SPLAT_KMEM_NAME "kmem"
30#define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
f1ca4da6 31
7c50328b 32#define SPLAT_KMEM_TEST1_ID 0x0101
33#define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
34#define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
f1ca4da6 35
7c50328b 36#define SPLAT_KMEM_TEST2_ID 0x0102
37#define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
38#define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
f1ca4da6 39
7c50328b 40#define SPLAT_KMEM_TEST3_ID 0x0103
2fb9b26a 41#define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
42#define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
f1ca4da6 43
7c50328b 44#define SPLAT_KMEM_TEST4_ID 0x0104
2fb9b26a 45#define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
46#define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
f1ca4da6 47
79b31f36 48#define SPLAT_KMEM_TEST5_ID 0x0105
ea3e6ca9 49#define SPLAT_KMEM_TEST5_NAME "slab_small"
2fb9b26a 50#define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
51
52#define SPLAT_KMEM_TEST6_ID 0x0106
ea3e6ca9 53#define SPLAT_KMEM_TEST6_NAME "slab_large"
2fb9b26a 54#define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
55
56#define SPLAT_KMEM_TEST7_ID 0x0107
ea3e6ca9
BB
57#define SPLAT_KMEM_TEST7_NAME "slab_align"
58#define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
79b31f36 59
44b8f176 60#define SPLAT_KMEM_TEST8_ID 0x0108
ea3e6ca9
BB
61#define SPLAT_KMEM_TEST8_NAME "slab_reap"
62#define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
44b8f176 63
48e0606a 64#define SPLAT_KMEM_TEST9_ID 0x0109
ea3e6ca9
BB
65#define SPLAT_KMEM_TEST9_NAME "slab_age"
66#define SPLAT_KMEM_TEST9_DESC "Slab aging test"
67
68#define SPLAT_KMEM_TEST10_ID 0x010a
69#define SPLAT_KMEM_TEST10_NAME "slab_lock"
70#define SPLAT_KMEM_TEST10_DESC "Slab locking test"
71
4e5691fa 72#ifdef _LP64
ea3e6ca9
BB
73#define SPLAT_KMEM_TEST11_ID 0x010b
74#define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
75#define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
4e5691fa 76#endif /* _LP64 */
48e0606a 77
e11d6c5f
BB
78#define SPLAT_KMEM_TEST12_ID 0x010c
79#define SPLAT_KMEM_TEST12_NAME "vmem_size"
80#define SPLAT_KMEM_TEST12_DESC "Memory zone test"
81
7c50328b 82#define SPLAT_KMEM_ALLOC_COUNT 10
79b31f36 83#define SPLAT_VMEM_ALLOC_COUNT 10
84
44b8f176 85
f1ca4da6 86static int
7c50328b 87splat_kmem_test1(struct file *file, void *arg)
f1ca4da6 88{
7c50328b 89 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
f1ca4da6 90 int size = PAGE_SIZE;
91 int i, count, rc = 0;
92
c19c06f3 93 /* We are intentionally going to push kmem_alloc to its max
94 * allocation size, so suppress the console warnings for now */
95 kmem_set_warning(0);
96
79b31f36 97 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
f1ca4da6 98 count = 0;
99
7c50328b 100 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
f1ca4da6 101 ptr[i] = kmem_alloc(size, KM_SLEEP);
102 if (ptr[i])
103 count++;
104 }
105
7c50328b 106 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
f1ca4da6 107 if (ptr[i])
108 kmem_free(ptr[i], size);
109
7c50328b 110 splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
ea3e6ca9
BB
111 "%d byte allocations, %d/%d successful\n",
112 size, count, SPLAT_KMEM_ALLOC_COUNT);
7c50328b 113 if (count != SPLAT_KMEM_ALLOC_COUNT)
f1ca4da6 114 rc = -ENOMEM;
115
116 size *= 2;
117 }
118
c19c06f3 119 kmem_set_warning(1);
120
f1ca4da6 121 return rc;
122}
123
124static int
7c50328b 125splat_kmem_test2(struct file *file, void *arg)
f1ca4da6 126{
7c50328b 127 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
f1ca4da6 128 int size = PAGE_SIZE;
129 int i, j, count, rc = 0;
130
c19c06f3 131 /* We are intentionally going to push kmem_alloc to its max
132 * allocation size, so suppress the console warnings for now */
133 kmem_set_warning(0);
134
79b31f36 135 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
f1ca4da6 136 count = 0;
137
7c50328b 138 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
f1ca4da6 139 ptr[i] = kmem_zalloc(size, KM_SLEEP);
140 if (ptr[i])
141 count++;
142 }
143
144 /* Ensure buffer has been zero filled */
7c50328b 145 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
f1ca4da6 146 for (j = 0; j < size; j++) {
147 if (((char *)ptr[i])[j] != '\0') {
7c50328b 148 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
ea3e6ca9
BB
149 "%d-byte allocation was "
150 "not zeroed\n", size);
f1ca4da6 151 rc = -EFAULT;
152 }
153 }
154 }
155
7c50328b 156 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
f1ca4da6 157 if (ptr[i])
158 kmem_free(ptr[i], size);
159
7c50328b 160 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
ea3e6ca9
BB
161 "%d byte allocations, %d/%d successful\n",
162 size, count, SPLAT_KMEM_ALLOC_COUNT);
7c50328b 163 if (count != SPLAT_KMEM_ALLOC_COUNT)
f1ca4da6 164 rc = -ENOMEM;
165
166 size *= 2;
167 }
168
c19c06f3 169 kmem_set_warning(1);
170
f1ca4da6 171 return rc;
172}
173
2fb9b26a 174static int
175splat_kmem_test3(struct file *file, void *arg)
176{
177 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
178 int size = PAGE_SIZE;
179 int i, count, rc = 0;
180
181 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
182 count = 0;
183
184 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
185 ptr[i] = vmem_alloc(size, KM_SLEEP);
186 if (ptr[i])
187 count++;
188 }
189
190 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
191 if (ptr[i])
192 vmem_free(ptr[i], size);
193
194 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
ea3e6ca9
BB
195 "%d byte allocations, %d/%d successful\n",
196 size, count, SPLAT_VMEM_ALLOC_COUNT);
2fb9b26a 197 if (count != SPLAT_VMEM_ALLOC_COUNT)
198 rc = -ENOMEM;
199
200 size *= 2;
201 }
202
203 return rc;
204}
205
206static int
207splat_kmem_test4(struct file *file, void *arg)
208{
209 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
210 int size = PAGE_SIZE;
211 int i, j, count, rc = 0;
212
213 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
214 count = 0;
215
216 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
217 ptr[i] = vmem_zalloc(size, KM_SLEEP);
218 if (ptr[i])
219 count++;
220 }
221
222 /* Ensure buffer has been zero filled */
223 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
224 for (j = 0; j < size; j++) {
225 if (((char *)ptr[i])[j] != '\0') {
226 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
ea3e6ca9
BB
227 "%d-byte allocation was "
228 "not zeroed\n", size);
2fb9b26a 229 rc = -EFAULT;
230 }
231 }
232 }
233
234 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
235 if (ptr[i])
236 vmem_free(ptr[i], size);
237
238 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
ea3e6ca9
BB
239 "%d byte allocations, %d/%d successful\n",
240 size, count, SPLAT_VMEM_ALLOC_COUNT);
2fb9b26a 241 if (count != SPLAT_VMEM_ALLOC_COUNT)
242 rc = -ENOMEM;
243
244 size *= 2;
245 }
246
247 return rc;
248}
249
7c50328b 250#define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
251#define SPLAT_KMEM_CACHE_NAME "kmem_test"
ea3e6ca9
BB
252#define SPLAT_KMEM_OBJ_COUNT 1024
253#define SPLAT_KMEM_OBJ_RECLAIM 20 /* percent */
254#define SPLAT_KMEM_THREADS 32
255
256#define KCP_FLAG_READY 0x01
f1ca4da6 257
258typedef struct kmem_cache_data {
f1ca4da6 259 unsigned long kcd_magic;
260 int kcd_flag;
2fb9b26a 261 char kcd_buf[0];
f1ca4da6 262} kmem_cache_data_t;
263
ea3e6ca9
BB
264typedef struct kmem_cache_thread {
265 kmem_cache_t *kct_cache;
266 spinlock_t kct_lock;
267 int kct_id;
268 int kct_kcd_count;
269 kmem_cache_data_t *kct_kcd[0];
270} kmem_cache_thread_t;
271
f1ca4da6 272typedef struct kmem_cache_priv {
273 unsigned long kcp_magic;
274 struct file *kcp_file;
275 kmem_cache_t *kcp_cache;
44b8f176 276 spinlock_t kcp_lock;
ea3e6ca9
BB
277 wait_queue_head_t kcp_ctl_waitq;
278 wait_queue_head_t kcp_thr_waitq;
279 int kcp_flags;
280 int kcp_kct_count;
281 kmem_cache_thread_t *kcp_kct[SPLAT_KMEM_THREADS];
2fb9b26a 282 int kcp_size;
48e0606a 283 int kcp_align;
f1ca4da6 284 int kcp_count;
44b8f176 285 int kcp_alloc;
f1ca4da6 286 int kcp_rc;
ea3e6ca9
BB
287 int kcp_kcd_count;
288 kmem_cache_data_t *kcp_kcd[0];
f1ca4da6 289} kmem_cache_priv_t;
290
ea3e6ca9
BB
291static kmem_cache_priv_t *
292splat_kmem_cache_test_kcp_alloc(struct file *file, char *name,
293 int size, int align, int alloc, int count)
294{
295 kmem_cache_priv_t *kcp;
296
297 kcp = vmem_zalloc(sizeof(kmem_cache_priv_t) +
298 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
299 if (!kcp)
300 return NULL;
301
302 kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC;
303 kcp->kcp_file = file;
304 kcp->kcp_cache = NULL;
305 spin_lock_init(&kcp->kcp_lock);
306 init_waitqueue_head(&kcp->kcp_ctl_waitq);
307 init_waitqueue_head(&kcp->kcp_thr_waitq);
308 kcp->kcp_flags = 0;
309 kcp->kcp_kct_count = -1;
310 kcp->kcp_size = size;
311 kcp->kcp_align = align;
312 kcp->kcp_count = 0;
313 kcp->kcp_alloc = alloc;
314 kcp->kcp_rc = 0;
315 kcp->kcp_kcd_count = count;
316
317 return kcp;
318}
319
320static void
321splat_kmem_cache_test_kcp_free(kmem_cache_priv_t *kcp)
322{
323 vmem_free(kcp, sizeof(kmem_cache_priv_t) +
324 kcp->kcp_kcd_count * sizeof(kmem_cache_data_t *));
325}
326
327static kmem_cache_thread_t *
328splat_kmem_cache_test_kct_alloc(int id, int count)
329{
330 kmem_cache_thread_t *kct;
331
332 ASSERTF(id < SPLAT_KMEM_THREADS, "id=%d\n", id);
333 kct = vmem_zalloc(sizeof(kmem_cache_thread_t) +
334 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
335 if (!kct)
336 return NULL;
337
338 spin_lock_init(&kct->kct_lock);
339 kct->kct_cache = NULL;
340 kct->kct_id = id;
341 kct->kct_kcd_count = count;
342
343 return kct;
344}
345
346static void
347splat_kmem_cache_test_kct_free(kmem_cache_thread_t *kct)
348{
349 vmem_free(kct, sizeof(kmem_cache_thread_t) +
350 kct->kct_kcd_count * sizeof(kmem_cache_data_t *));
351}
352
f1ca4da6 353static int
2fb9b26a 354splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
f1ca4da6 355{
f1ca4da6 356 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
2fb9b26a 357 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
f1ca4da6 358
0498e6c5 359 if (kcd && kcp) {
360 kcd->kcd_magic = kcp->kcp_magic;
2fb9b26a 361 kcd->kcd_flag = 1;
0498e6c5 362 memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
363 kcp->kcp_count++;
f1ca4da6 364 }
365
366 return 0;
367}
368
369static void
2fb9b26a 370splat_kmem_cache_test_destructor(void *ptr, void *priv)
f1ca4da6 371{
f1ca4da6 372 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
2fb9b26a 373 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
f1ca4da6 374
0498e6c5 375 if (kcd && kcp) {
376 kcd->kcd_magic = 0;
2fb9b26a 377 kcd->kcd_flag = 0;
0498e6c5 378 memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
379 kcp->kcp_count--;
f1ca4da6 380 }
381
382 return;
383}
384
ea3e6ca9
BB
385/*
386 * Generic reclaim function which assumes that all objects may
387 * be reclaimed at any time. We free a small percentage of the
388 * objects linked off the kcp or kct[] every time we are called.
389 */
390static void
391splat_kmem_cache_test_reclaim(void *priv)
392{
393 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
394 kmem_cache_thread_t *kct;
395 int i, j, count;
396
397 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
398 count = kcp->kcp_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
399
400 /* Objects directly attached to the kcp */
401 spin_lock(&kcp->kcp_lock);
402 for (i = 0; i < kcp->kcp_kcd_count; i++) {
403 if (kcp->kcp_kcd[i]) {
404 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
405 kcp->kcp_kcd[i] = NULL;
406
407 if ((--count) == 0)
408 break;
409 }
410 }
411 spin_unlock(&kcp->kcp_lock);
412
413 /* No threads containing objects to consider */
414 if (kcp->kcp_kct_count == -1)
415 return;
416
417 /* Objects attached to a kct thread */
418 for (i = 0; i < kcp->kcp_kct_count; i++) {
419 spin_lock(&kcp->kcp_lock);
420 kct = kcp->kcp_kct[i];
421 spin_unlock(&kcp->kcp_lock);
422 if (!kct)
423 continue;
424
425 spin_lock(&kct->kct_lock);
426 count = kct->kct_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
427
428 for (j = 0; j < kct->kct_kcd_count; j++) {
429 if (kct->kct_kcd[j]) {
430 kmem_cache_free(kcp->kcp_cache,kct->kct_kcd[j]);
431 kct->kct_kcd[j] = NULL;
432
433 if ((--count) == 0)
434 break;
435 }
436 }
437 spin_unlock(&kct->kct_lock);
438 }
439
440 return;
441}
442
443static int
444splat_kmem_cache_test_threads(kmem_cache_priv_t *kcp, int threads)
445{
446 int rc;
447
448 spin_lock(&kcp->kcp_lock);
449 rc = (kcp->kcp_kct_count == threads);
450 spin_unlock(&kcp->kcp_lock);
451
452 return rc;
453}
454
455static int
456splat_kmem_cache_test_flags(kmem_cache_priv_t *kcp, int flags)
457{
458 int rc;
459
460 spin_lock(&kcp->kcp_lock);
461 rc = (kcp->kcp_flags & flags);
462 spin_unlock(&kcp->kcp_lock);
463
464 return rc;
465}
466
467static void
468splat_kmem_cache_test_thread(void *arg)
469{
470 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
471 kmem_cache_thread_t *kct;
472 int rc = 0, id, i;
473 void *obj;
474
475 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
476
477 /* Assign thread ids */
478 spin_lock(&kcp->kcp_lock);
479 if (kcp->kcp_kct_count == -1)
480 kcp->kcp_kct_count = 0;
481
482 id = kcp->kcp_kct_count;
483 kcp->kcp_kct_count++;
484 spin_unlock(&kcp->kcp_lock);
485
486 kct = splat_kmem_cache_test_kct_alloc(id, kcp->kcp_alloc);
487 if (!kct) {
488 rc = -ENOMEM;
489 goto out;
490 }
491
492 spin_lock(&kcp->kcp_lock);
493 kcp->kcp_kct[id] = kct;
494 spin_unlock(&kcp->kcp_lock);
495
496 /* Wait for all threads to have started and report they are ready */
497 if (kcp->kcp_kct_count == SPLAT_KMEM_THREADS)
498 wake_up(&kcp->kcp_ctl_waitq);
499
500 wait_event(kcp->kcp_thr_waitq,
501 splat_kmem_cache_test_flags(kcp, KCP_FLAG_READY));
502
503 /*
504 * Updates to kct->kct_kcd[] are performed under a spin_lock so
505 * they may safely run concurrent with the reclaim function. If
506 * we are not in a low memory situation we have one lock per-
507 * thread so they are not expected to be contended.
508 */
509 for (i = 0; i < kct->kct_kcd_count; i++) {
510 obj = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
511 spin_lock(&kct->kct_lock);
512 kct->kct_kcd[i] = obj;
513 spin_unlock(&kct->kct_lock);
514 }
515
516 for (i = 0; i < kct->kct_kcd_count; i++) {
517 spin_lock(&kct->kct_lock);
518 if (kct->kct_kcd[i]) {
519 kmem_cache_free(kcp->kcp_cache, kct->kct_kcd[i]);
520 kct->kct_kcd[i] = NULL;
521 }
522 spin_unlock(&kct->kct_lock);
523 }
524out:
525 spin_lock(&kcp->kcp_lock);
526 if (kct) {
527 splat_kmem_cache_test_kct_free(kct);
528 kcp->kcp_kct[id] = kct = NULL;
529 }
530
531 if (!kcp->kcp_rc)
532 kcp->kcp_rc = rc;
533
534 if ((--kcp->kcp_kct_count) == 0)
535 wake_up(&kcp->kcp_ctl_waitq);
536
537 spin_unlock(&kcp->kcp_lock);
538
539 thread_exit();
540}
541
f1ca4da6 542static int
48e0606a 543splat_kmem_cache_test(struct file *file, void *arg, char *name,
ea3e6ca9 544 int size, int align, int flags)
f1ca4da6 545{
ea3e6ca9
BB
546 kmem_cache_priv_t *kcp;
547 kmem_cache_data_t *kcd;
f1ca4da6 548 int rc = 0, max;
549
ea3e6ca9
BB
550 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0, 1);
551 if (!kcp) {
552 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
553 return -ENOMEM;
554 }
555
02c7f164 556 kcp->kcp_kcd[0] = NULL;
ea3e6ca9
BB
557 kcp->kcp_cache =
558 kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
559 kcp->kcp_size, kcp->kcp_align,
560 splat_kmem_cache_test_constructor,
561 splat_kmem_cache_test_destructor,
562 NULL, kcp, NULL, flags);
563 if (!kcp->kcp_cache) {
2fb9b26a 564 splat_vprint(file, name,
ea3e6ca9 565 "Unable to create '%s'\n",
3f412673 566 SPLAT_KMEM_CACHE_NAME);
ea3e6ca9
BB
567 rc = -ENOMEM;
568 goto out_free;
f1ca4da6 569 }
570
ea3e6ca9 571 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
f1ca4da6 572 if (!kcd) {
2fb9b26a 573 splat_vprint(file, name,
ea3e6ca9
BB
574 "Unable to allocate from '%s'\n",
575 SPLAT_KMEM_CACHE_NAME);
f1ca4da6 576 rc = -EINVAL;
577 goto out_free;
578 }
ea3e6ca9
BB
579 spin_lock(&kcp->kcp_lock);
580 kcp->kcp_kcd[0] = kcd;
581 spin_unlock(&kcp->kcp_lock);
f1ca4da6 582
ea3e6ca9 583 if (!kcp->kcp_kcd[0]->kcd_flag) {
2fb9b26a 584 splat_vprint(file, name,
ea3e6ca9
BB
585 "Failed to run contructor for '%s'\n",
586 SPLAT_KMEM_CACHE_NAME);
f1ca4da6 587 rc = -EINVAL;
588 goto out_free;
589 }
590
ea3e6ca9 591 if (kcp->kcp_kcd[0]->kcd_magic != kcp->kcp_magic) {
2fb9b26a 592 splat_vprint(file, name,
ea3e6ca9
BB
593 "Failed to pass private data to constructor "
594 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
f1ca4da6 595 rc = -EINVAL;
596 goto out_free;
597 }
598
ea3e6ca9
BB
599 max = kcp->kcp_count;
600 spin_lock(&kcp->kcp_lock);
601 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
602 kcp->kcp_kcd[0] = NULL;
603 spin_unlock(&kcp->kcp_lock);
f1ca4da6 604
605 /* Destroy the entire cache which will force destructors to
606 * run and we can verify one was called for every object */
ea3e6ca9
BB
607 kmem_cache_destroy(kcp->kcp_cache);
608 if (kcp->kcp_count) {
2fb9b26a 609 splat_vprint(file, name,
ea3e6ca9
BB
610 "Failed to run destructor on all slab objects "
611 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
f1ca4da6 612 rc = -EINVAL;
613 }
614
f250d90b 615 splat_kmem_cache_test_kcp_free(kcp);
2fb9b26a 616 splat_vprint(file, name,
ea3e6ca9
BB
617 "Successfully ran ctors/dtors for %d elements in '%s'\n",
618 max, SPLAT_KMEM_CACHE_NAME);
f1ca4da6 619
620 return rc;
621
622out_free:
ea3e6ca9
BB
623 if (kcp->kcp_kcd[0]) {
624 spin_lock(&kcp->kcp_lock);
625 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
626 kcp->kcp_kcd[0] = NULL;
627 spin_unlock(&kcp->kcp_lock);
628 }
629
630 if (kcp->kcp_cache)
631 kmem_cache_destroy(kcp->kcp_cache);
632
633 splat_kmem_cache_test_kcp_free(kcp);
634
635 return rc;
636}
637
638static int
639splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
10a4be0f 640 int size, int alloc, int max_time)
ea3e6ca9
BB
641{
642 kmem_cache_priv_t *kcp;
643 kthread_t *thr;
644 struct timespec start, stop, delta;
645 char cache_name[32];
646 int i, rc = 0;
647
648 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc, 0);
649 if (!kcp) {
650 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
651 return -ENOMEM;
652 }
653
654 (void)snprintf(cache_name, 32, "%s-%d-%d",
655 SPLAT_KMEM_CACHE_NAME, size, alloc);
656 kcp->kcp_cache =
657 kmem_cache_create(cache_name, kcp->kcp_size, 0,
658 splat_kmem_cache_test_constructor,
659 splat_kmem_cache_test_destructor,
660 splat_kmem_cache_test_reclaim,
3c9ce2bf 661 kcp, NULL, 0);
ea3e6ca9
BB
662 if (!kcp->kcp_cache) {
663 splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
664 rc = -ENOMEM;
665 goto out_kcp;
666 }
667
668 start = current_kernel_time();
669
670 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
671 thr = thread_create(NULL, 0,
672 splat_kmem_cache_test_thread,
673 kcp, 0, &p0, TS_RUN, minclsyspri);
674 if (thr == NULL) {
675 rc = -ESRCH;
676 goto out_cache;
677 }
678 }
679
680 /* Sleep until all threads have started, then set the ready
681 * flag and wake them all up for maximum concurrency. */
682 wait_event(kcp->kcp_ctl_waitq,
683 splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));
684
685 spin_lock(&kcp->kcp_lock);
686 kcp->kcp_flags |= KCP_FLAG_READY;
687 spin_unlock(&kcp->kcp_lock);
688 wake_up_all(&kcp->kcp_thr_waitq);
689
690 /* Sleep until all thread have finished */
691 wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));
692
693 stop = current_kernel_time();
694 delta = timespec_sub(stop, start);
f1b59d26 695
ea3e6ca9
BB
696 splat_vprint(file, name,
697 "%-22s %2ld.%09ld\t"
698 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
699 kcp->kcp_cache->skc_name,
700 delta.tv_sec, delta.tv_nsec,
701 (unsigned long)kcp->kcp_cache->skc_slab_total,
702 (unsigned long)kcp->kcp_cache->skc_slab_max,
703 (unsigned long)(kcp->kcp_alloc *
704 SPLAT_KMEM_THREADS /
705 SPL_KMEM_CACHE_OBJ_PER_SLAB),
706 (unsigned long)kcp->kcp_cache->skc_obj_total,
707 (unsigned long)kcp->kcp_cache->skc_obj_max,
708 (unsigned long)(kcp->kcp_alloc *
709 SPLAT_KMEM_THREADS));
710
10a4be0f 711 if (delta.tv_sec >= max_time)
ea3e6ca9
BB
712 rc = -ETIME;
713
714 if (!rc && kcp->kcp_rc)
715 rc = kcp->kcp_rc;
716
717out_cache:
718 kmem_cache_destroy(kcp->kcp_cache);
719out_kcp:
720 splat_kmem_cache_test_kcp_free(kcp);
f1ca4da6 721 return rc;
722}
723
a1502d76 724/* Validate small object cache behavior for dynamic/kmem/vmem caches */
2fb9b26a 725static int
726splat_kmem_test5(struct file *file, void *arg)
727{
a1502d76 728 char *name = SPLAT_KMEM_TEST5_NAME;
729 int rc;
730
48e0606a 731 rc = splat_kmem_cache_test(file, arg, name, 128, 0, 0);
a1502d76 732 if (rc)
733 return rc;
734
48e0606a 735 rc = splat_kmem_cache_test(file, arg, name, 128, 0, KMC_KMEM);
a1502d76 736 if (rc)
737 return rc;
738
48e0606a 739 return splat_kmem_cache_test(file, arg, name, 128, 0, KMC_VMEM);
2fb9b26a 740}
741
a1502d76 742/* Validate large object cache behavior for dynamic/kmem/vmem caches */
2fb9b26a 743static int
744splat_kmem_test6(struct file *file, void *arg)
745{
a1502d76 746 char *name = SPLAT_KMEM_TEST6_NAME;
747 int rc;
748
48e0606a 749 rc = splat_kmem_cache_test(file, arg, name, 128*1024, 0, 0);
a1502d76 750 if (rc)
751 return rc;
752
48e0606a 753 rc = splat_kmem_cache_test(file, arg, name, 128*1024, 0, KMC_KMEM);
a1502d76 754 if (rc)
755 return rc;
756
48e0606a 757 return splat_kmem_cache_test(file, arg, name, 128*1028, 0, KMC_VMEM);
2fb9b26a 758}
759
ea3e6ca9
BB
760/* Validate object alignment cache behavior for caches */
761static int
762splat_kmem_test7(struct file *file, void *arg)
f1ca4da6 763{
ea3e6ca9
BB
764 char *name = SPLAT_KMEM_TEST7_NAME;
765 int i, rc;
2fb9b26a 766
8b45dda2 767 for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) {
ea3e6ca9
BB
768 rc = splat_kmem_cache_test(file, arg, name, 157, i, 0);
769 if (rc)
770 return rc;
f1ca4da6 771 }
772
ea3e6ca9 773 return rc;
f1ca4da6 774}
775
776static int
ea3e6ca9 777splat_kmem_test8(struct file *file, void *arg)
f1ca4da6 778{
ea3e6ca9
BB
779 kmem_cache_priv_t *kcp;
780 kmem_cache_data_t *kcd;
781 int i, j, rc = 0;
782
783 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
784 256, 0, 0, SPLAT_KMEM_OBJ_COUNT);
785 if (!kcp) {
786 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
787 "Unable to create '%s'\n", "kcp");
f1ca4da6 788 return -ENOMEM;
789 }
790
ea3e6ca9
BB
791 kcp->kcp_cache =
792 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
793 splat_kmem_cache_test_constructor,
794 splat_kmem_cache_test_destructor,
795 splat_kmem_cache_test_reclaim,
796 kcp, NULL, 0);
797 if (!kcp->kcp_cache) {
798 splat_kmem_cache_test_kcp_free(kcp);
799 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
800 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
801 return -ENOMEM;
802 }
f1ca4da6 803
7c50328b 804 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
ea3e6ca9
BB
805 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
806 spin_lock(&kcp->kcp_lock);
807 kcp->kcp_kcd[i] = kcd;
808 spin_unlock(&kcp->kcp_lock);
809 if (!kcd) {
810 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
811 "Unable to allocate from '%s'\n",
812 SPLAT_KMEM_CACHE_NAME);
f1ca4da6 813 }
814 }
815
2fb9b26a 816 /* Request the slab cache free any objects it can. For a few reasons
817 * this may not immediately result in more free memory even if objects
818 * are freed. First off, due to fragmentation we may not be able to
819 * reclaim any slabs. Secondly, even if we do we fully clear some
820 * slabs we will not want to immedately reclaim all of them because
821 * we may contend with cache allocs and thrash. What we want to see
ea3e6ca9 822 * is the slab size decrease more gradually as it becomes clear they
2fb9b26a 823 * will not be needed. This should be acheivable in less than minute
824 * if it takes longer than this something has gone wrong.
825 */
826 for (i = 0; i < 60; i++) {
ea3e6ca9
BB
827 kmem_cache_reap_now(kcp->kcp_cache);
828 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
829 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
830 SPLAT_KMEM_CACHE_NAME, kcp->kcp_count,
831 (unsigned)kcp->kcp_cache->skc_slab_alloc,
832 (unsigned)kcp->kcp_cache->skc_slab_total,
833 (unsigned)kcp->kcp_cache->skc_obj_alloc,
834 (unsigned)kcp->kcp_cache->skc_obj_total);
835
836 for_each_online_cpu(j)
837 splat_print(file, "%u/%u ",
838 kcp->kcp_cache->skc_mag[j]->skm_avail,
839 kcp->kcp_cache->skc_mag[j]->skm_size);
840
841 splat_print(file, "%s\n", "");
842
843 if (kcp->kcp_cache->skc_obj_total == 0)
2fb9b26a 844 break;
845
846 set_current_state(TASK_INTERRUPTIBLE);
847 schedule_timeout(HZ);
848 }
849
ea3e6ca9
BB
850 if (kcp->kcp_cache->skc_obj_total == 0) {
851 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
2fb9b26a 852 "Successfully created %d objects "
853 "in cache %s and reclaimed them\n",
ea3e6ca9 854 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
2fb9b26a 855 } else {
ea3e6ca9 856 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
2fb9b26a 857 "Failed to reclaim %u/%d objects from cache %s\n",
ea3e6ca9
BB
858 (unsigned)kcp->kcp_cache->skc_obj_total,
859 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
2fb9b26a 860 rc = -ENOMEM;
861 }
f1ca4da6 862
2fb9b26a 863 /* Cleanup our mess (for failure case of time expiring) */
ea3e6ca9 864 spin_lock(&kcp->kcp_lock);
7c50328b 865 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
ea3e6ca9
BB
866 if (kcp->kcp_kcd[i])
867 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
868 spin_unlock(&kcp->kcp_lock);
f1ca4da6 869
ea3e6ca9
BB
870 kmem_cache_destroy(kcp->kcp_cache);
871 splat_kmem_cache_test_kcp_free(kcp);
f1ca4da6 872
873 return rc;
874}
875
ea3e6ca9
BB
876static int
877splat_kmem_test9(struct file *file, void *arg)
44b8f176 878{
ea3e6ca9
BB
879 kmem_cache_priv_t *kcp;
880 kmem_cache_data_t *kcd;
881 int i, j, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;
882
883 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
884 256, 0, 0, count);
885 if (!kcp) {
886 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
887 "Unable to create '%s'\n", "kcp");
888 return -ENOMEM;
889 }
44b8f176 890
ea3e6ca9
BB
891 kcp->kcp_cache =
892 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
893 splat_kmem_cache_test_constructor,
894 splat_kmem_cache_test_destructor,
895 NULL, kcp, NULL, 0);
896 if (!kcp->kcp_cache) {
897 splat_kmem_cache_test_kcp_free(kcp);
898 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
899 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
900 return -ENOMEM;
44b8f176 901 }
902
903 for (i = 0; i < count; i++) {
ea3e6ca9
BB
904 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
905 spin_lock(&kcp->kcp_lock);
906 kcp->kcp_kcd[i] = kcd;
907 spin_unlock(&kcp->kcp_lock);
908 if (!kcd) {
909 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
910 "Unable to allocate from '%s'\n",
911 SPLAT_KMEM_CACHE_NAME);
44b8f176 912 }
913 }
914
44b8f176 915 spin_lock(&kcp->kcp_lock);
ea3e6ca9
BB
916 for (i = 0; i < count; i++)
917 if (kcp->kcp_kcd[i])
918 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
e9d7a2be 919 spin_unlock(&kcp->kcp_lock);
920
ea3e6ca9
BB
921 /* We have allocated a large number of objects thus creating a
922 * large number of slabs and then free'd them all. However since
923 * there should be little memory pressure at the moment those
924 * slabs have not been freed. What we want to see is the slab
925 * size decrease gradually as it becomes clear they will not be
926 * be needed. This should be acheivable in less than minute
927 * if it takes longer than this something has gone wrong.
928 */
929 for (i = 0; i < 60; i++) {
930 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
931 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
932 SPLAT_KMEM_CACHE_NAME, kcp->kcp_count,
933 (unsigned)kcp->kcp_cache->skc_slab_alloc,
934 (unsigned)kcp->kcp_cache->skc_slab_total,
935 (unsigned)kcp->kcp_cache->skc_obj_alloc,
936 (unsigned)kcp->kcp_cache->skc_obj_total);
937
938 for_each_online_cpu(j)
939 splat_print(file, "%u/%u ",
940 kcp->kcp_cache->skc_mag[j]->skm_avail,
941 kcp->kcp_cache->skc_mag[j]->skm_size);
942
943 splat_print(file, "%s\n", "");
944
945 if (kcp->kcp_cache->skc_obj_total == 0)
946 break;
44b8f176 947
ea3e6ca9
BB
948 set_current_state(TASK_INTERRUPTIBLE);
949 schedule_timeout(HZ);
950 }
44b8f176 951
ea3e6ca9
BB
952 if (kcp->kcp_cache->skc_obj_total == 0) {
953 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
954 "Successfully created %d objects "
955 "in cache %s and reclaimed them\n",
956 count, SPLAT_KMEM_CACHE_NAME);
957 } else {
958 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
959 "Failed to reclaim %u/%d objects from cache %s\n",
960 (unsigned)kcp->kcp_cache->skc_obj_total, count,
961 SPLAT_KMEM_CACHE_NAME);
962 rc = -ENOMEM;
963 }
964
965 kmem_cache_destroy(kcp->kcp_cache);
966 splat_kmem_cache_test_kcp_free(kcp);
44b8f176 967
ea3e6ca9 968 return rc;
44b8f176 969}
970
ea3e6ca9
BB
971/*
972 * This test creates N threads with a shared kmem cache. They then all
973 * concurrently allocate and free from the cache to stress the locking and
974 * concurrent cache performance. If any one test takes longer than 5
975 * seconds to complete it is treated as a failure and may indicate a
976 * performance regression. On my test system no one test takes more
977 * than 1 second to complete so a 5x slowdown likely a problem.
44b8f176 978 */
979static int
ea3e6ca9 980splat_kmem_test10(struct file *file, void *arg)
44b8f176 981{
e11d6c5f 982 uint64_t size, alloc, rc = 0;
44b8f176 983
ea3e6ca9 984 for (size = 16; size <= 1024*1024; size *= 2) {
44b8f176 985
ea3e6ca9
BB
986 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name",
987 "time (sec)\tslabs \tobjs \thash\n");
988 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "",
989 " \ttot/max/calc\ttot/max/calc\n");
44b8f176 990
ea3e6ca9 991 for (alloc = 1; alloc <= 1024; alloc *= 2) {
44b8f176 992
e11d6c5f
BB
993 /* Skip tests which exceed available memory. We
994 * leverage availrmem here for some extra testing */
995 if (size * alloc * SPLAT_KMEM_THREADS > availrmem / 2)
ea3e6ca9 996 continue;
7ea1cbf5 997
ea3e6ca9 998 rc = splat_kmem_cache_thread_test(file, arg,
10a4be0f 999 SPLAT_KMEM_TEST10_NAME, size, alloc, 5);
ea3e6ca9
BB
1000 if (rc)
1001 break;
1002 }
44b8f176 1003 }
1004
7ea1cbf5 1005 return rc;
44b8f176 1006}
1007
4e5691fa 1008#ifdef _LP64
ea3e6ca9
BB
1009/*
1010 * This test creates N threads with a shared kmem cache which overcommits
1011 * memory by 4x. This makes it impossible for the slab to satify the
1012 * thread requirements without having its reclaim hook run which will
1013 * free objects back for use. This behavior is triggered by the linum VM
1014 * detecting a low memory condition on the node and invoking the shrinkers.
1015 * This should allow all the threads to complete while avoiding deadlock
1016 * and for the most part out of memory events. This is very tough on the
4e5691fa
BB
1017 * system so it is possible the test app may get oom'ed. This particular
1018 * test has proven troublesome on 32-bit archs with limited virtual
1019 * address space so it only run on 64-bit systems.
ea3e6ca9 1020 */
fece7c99 1021static int
ea3e6ca9 1022splat_kmem_test11(struct file *file, void *arg)
fece7c99 1023{
ea3e6ca9 1024 uint64_t size, alloc, rc;
fece7c99 1025
e11d6c5f
BB
1026 size = 256*1024;
1027 alloc = ((4 * physmem * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS;
fece7c99 1028
e11d6c5f 1029 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "name",
ea3e6ca9 1030 "time (sec)\tslabs \tobjs \thash\n");
e11d6c5f 1031 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "",
ea3e6ca9 1032 " \ttot/max/calc\ttot/max/calc\n");
48e0606a 1033
ea3e6ca9 1034 rc = splat_kmem_cache_thread_test(file, arg,
10a4be0f 1035 SPLAT_KMEM_TEST11_NAME, size, alloc, 60);
48e0606a
BB
1036
1037 return rc;
1038}
4e5691fa 1039#endif /* _LP64 */
48e0606a 1040
e11d6c5f
BB
1041/*
1042 * Check vmem_size() behavior by acquiring the alloc/free/total vmem
1043 * space, then allocate a known buffer size from vmem space. We can
1044 * then check that vmem_size() values were updated properly with in
1045 * a fairly small tolerence. The tolerance is important because we
1046 * are not the only vmem consumer on the system. Other unrelated
1047 * allocations might occur during the small test window. The vmem
1048 * allocation itself may also add in a little extra private space to
1049 * the buffer. Finally, verify total space always remains unchanged.
1050 */
1051static int
1052splat_kmem_test12(struct file *file, void *arg)
1053{
6ae7fef5
BB
1054 size_t alloc1, free1, total1;
1055 size_t alloc2, free2, total2;
e11d6c5f
BB
1056 int size = 8*1024*1024;
1057 void *ptr;
1058
1059 alloc1 = vmem_size(NULL, VMEM_ALLOC);
1060 free1 = vmem_size(NULL, VMEM_FREE);
1061 total1 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
6ae7fef5
BB
1062 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1063 "free=%lu total=%lu\n", (unsigned long)alloc1,
1064 (unsigned long)free1, (unsigned long)total1);
e11d6c5f
BB
1065
1066 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Alloc %d bytes\n", size);
1067 ptr = vmem_alloc(size, KM_SLEEP);
1068 if (!ptr) {
1069 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1070 "Failed to alloc %d bytes\n", size);
1071 return -ENOMEM;
1072 }
1073
1074 alloc2 = vmem_size(NULL, VMEM_ALLOC);
1075 free2 = vmem_size(NULL, VMEM_FREE);
1076 total2 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
6ae7fef5
BB
1077 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1078 "free=%lu total=%lu\n", (unsigned long)alloc2,
1079 (unsigned long)free2, (unsigned long)total2);
e11d6c5f
BB
1080
1081 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Free %d bytes\n", size);
1082 vmem_free(ptr, size);
1083 if (alloc2 < (alloc1 + size - (size / 100)) ||
1084 alloc2 > (alloc1 + size + (size / 100))) {
6ae7fef5
BB
1085 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1086 "VMEM_ALLOC size: %lu != %lu+%d (+/- 1%%)\n",
1087 (unsigned long)alloc2,(unsigned long)alloc1,size);
e11d6c5f
BB
1088 return -ERANGE;
1089 }
1090
1091 if (free2 < (free1 - size - (size / 100)) ||
1092 free2 > (free1 - size + (size / 100))) {
6ae7fef5
BB
1093 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1094 "VMEM_FREE size: %lu != %lu-%d (+/- 1%%)\n",
1095 (unsigned long)free2, (unsigned long)free1, size);
e11d6c5f
BB
1096 return -ERANGE;
1097 }
1098
1099 if (total1 != total2) {
6ae7fef5
BB
1100 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1101 "VMEM_ALLOC | VMEM_FREE not constant: "
1102 "%lu != %lu\n", (unsigned long)total2,
1103 (unsigned long)total1);
e11d6c5f
BB
1104 return -ERANGE;
1105 }
1106
1107 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
6ae7fef5
BB
1108 "VMEM_ALLOC within tolerance: ~%ld%% (%ld/%d)\n",
1109 (long)abs(alloc1 + (long)size - alloc2) * 100 / (long)size,
1110 (long)abs(alloc1 + (long)size - alloc2), size);
e11d6c5f 1111 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
6ae7fef5
BB
1112 "VMEM_FREE within tolerance: ~%ld%% (%ld/%d)\n",
1113 (long)abs((free1 - (long)size) - free2) * 100 / (long)size,
1114 (long)abs((free1 - (long)size) - free2), size);
e11d6c5f
BB
1115
1116 return 0;
1117}
1118
7c50328b 1119splat_subsystem_t *
1120splat_kmem_init(void)
f1ca4da6 1121{
ea3e6ca9 1122 splat_subsystem_t *sub;
f1ca4da6 1123
ea3e6ca9
BB
1124 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1125 if (sub == NULL)
1126 return NULL;
f1ca4da6 1127
ea3e6ca9
BB
1128 memset(sub, 0, sizeof(*sub));
1129 strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
7c50328b 1130 strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
ea3e6ca9 1131 INIT_LIST_HEAD(&sub->subsystem_list);
f1ca4da6 1132 INIT_LIST_HEAD(&sub->test_list);
ea3e6ca9
BB
1133 spin_lock_init(&sub->test_lock);
1134 sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
1135
1136 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
1137 SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
1138 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
1139 SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
1140 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
1141 SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
1142 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
1143 SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
1144 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
1145 SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
1146 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
1147 SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
1148 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
1149 SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
1150 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
1151 SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
1152 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST9_NAME, SPLAT_KMEM_TEST9_DESC,
1153 SPLAT_KMEM_TEST9_ID, splat_kmem_test9);
1154 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST10_NAME, SPLAT_KMEM_TEST10_DESC,
1155 SPLAT_KMEM_TEST10_ID, splat_kmem_test10);
4e5691fa 1156#ifdef _LP64
ea3e6ca9
BB
1157 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC,
1158 SPLAT_KMEM_TEST11_ID, splat_kmem_test11);
4e5691fa 1159#endif /* _LP64 */
e11d6c5f
BB
1160 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST12_NAME, SPLAT_KMEM_TEST12_DESC,
1161 SPLAT_KMEM_TEST12_ID, splat_kmem_test12);
ea3e6ca9
BB
1162
1163 return sub;
f1ca4da6 1164}
1165
1166void
7c50328b 1167splat_kmem_fini(splat_subsystem_t *sub)
f1ca4da6 1168{
ea3e6ca9 1169 ASSERT(sub);
e11d6c5f 1170 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST12_ID);
4e5691fa 1171#ifdef _LP64
ea3e6ca9 1172 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST11_ID);
4e5691fa 1173#endif /* _LP64 */
ea3e6ca9
BB
1174 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST10_ID);
1175 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST9_ID);
1176 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID);
1177 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
1178 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
1179 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
1180 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
1181 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
1182 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
1183 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
1184
1185 kfree(sub);
f1ca4da6 1186}
1187
1188int
7c50328b 1189splat_kmem_id(void) {
ea3e6ca9 1190 return SPLAT_SUBSYSTEM_KMEM;
f1ca4da6 1191}