]> git.proxmox.com Git - mirror_zfs.git/blob - modules/splat/splat-kmem.c
Make the splat load message caps just for consistency
[mirror_zfs.git] / modules / splat / splat-kmem.c
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #include "splat-internal.h"
28
29 #define SPLAT_SUBSYSTEM_KMEM 0x0100
30 #define SPLAT_KMEM_NAME "kmem"
31 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
32
33 #define SPLAT_KMEM_TEST1_ID 0x0101
34 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
35 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
36
37 #define SPLAT_KMEM_TEST2_ID 0x0102
38 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
39 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
40
41 #define SPLAT_KMEM_TEST3_ID 0x0103
42 #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
43 #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
44
45 #define SPLAT_KMEM_TEST4_ID 0x0104
46 #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
47 #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
48
49 #define SPLAT_KMEM_TEST5_ID 0x0105
50 #define SPLAT_KMEM_TEST5_NAME "kmem_cache1"
51 #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
52
53 #define SPLAT_KMEM_TEST6_ID 0x0106
54 #define SPLAT_KMEM_TEST6_NAME "kmem_cache2"
55 #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
56
57 #define SPLAT_KMEM_TEST7_ID 0x0107
58 #define SPLAT_KMEM_TEST7_NAME "kmem_reap"
59 #define SPLAT_KMEM_TEST7_DESC "Slab reaping test"
60
61 #define SPLAT_KMEM_TEST8_ID 0x0108
62 #define SPLAT_KMEM_TEST8_NAME "kmem_lock"
63 #define SPLAT_KMEM_TEST8_DESC "Slab locking test"
64
65 #define SPLAT_KMEM_ALLOC_COUNT 10
66 #define SPLAT_VMEM_ALLOC_COUNT 10
67
68
69 /* XXX - This test may fail under tight memory conditions */
70 static int
71 splat_kmem_test1(struct file *file, void *arg)
72 {
73 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
74 int size = PAGE_SIZE;
75 int i, count, rc = 0;
76
77 /* We are intentionally going to push kmem_alloc to its max
78 * allocation size, so suppress the console warnings for now */
79 kmem_set_warning(0);
80
81 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
82 count = 0;
83
84 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
85 ptr[i] = kmem_alloc(size, KM_SLEEP);
86 if (ptr[i])
87 count++;
88 }
89
90 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
91 if (ptr[i])
92 kmem_free(ptr[i], size);
93
94 splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
95 "%d byte allocations, %d/%d successful\n",
96 size, count, SPLAT_KMEM_ALLOC_COUNT);
97 if (count != SPLAT_KMEM_ALLOC_COUNT)
98 rc = -ENOMEM;
99
100 size *= 2;
101 }
102
103 kmem_set_warning(1);
104
105 return rc;
106 }
107
108 static int
109 splat_kmem_test2(struct file *file, void *arg)
110 {
111 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
112 int size = PAGE_SIZE;
113 int i, j, count, rc = 0;
114
115 /* We are intentionally going to push kmem_alloc to its max
116 * allocation size, so suppress the console warnings for now */
117 kmem_set_warning(0);
118
119 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
120 count = 0;
121
122 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
123 ptr[i] = kmem_zalloc(size, KM_SLEEP);
124 if (ptr[i])
125 count++;
126 }
127
128 /* Ensure buffer has been zero filled */
129 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
130 for (j = 0; j < size; j++) {
131 if (((char *)ptr[i])[j] != '\0') {
132 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
133 "%d-byte allocation was "
134 "not zeroed\n", size);
135 rc = -EFAULT;
136 }
137 }
138 }
139
140 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
141 if (ptr[i])
142 kmem_free(ptr[i], size);
143
144 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
145 "%d byte allocations, %d/%d successful\n",
146 size, count, SPLAT_KMEM_ALLOC_COUNT);
147 if (count != SPLAT_KMEM_ALLOC_COUNT)
148 rc = -ENOMEM;
149
150 size *= 2;
151 }
152
153 kmem_set_warning(1);
154
155 return rc;
156 }
157
158 static int
159 splat_kmem_test3(struct file *file, void *arg)
160 {
161 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
162 int size = PAGE_SIZE;
163 int i, count, rc = 0;
164
165 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
166 count = 0;
167
168 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
169 ptr[i] = vmem_alloc(size, KM_SLEEP);
170 if (ptr[i])
171 count++;
172 }
173
174 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
175 if (ptr[i])
176 vmem_free(ptr[i], size);
177
178 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
179 "%d byte allocations, %d/%d successful\n",
180 size, count, SPLAT_VMEM_ALLOC_COUNT);
181 if (count != SPLAT_VMEM_ALLOC_COUNT)
182 rc = -ENOMEM;
183
184 size *= 2;
185 }
186
187 return rc;
188 }
189
190 static int
191 splat_kmem_test4(struct file *file, void *arg)
192 {
193 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
194 int size = PAGE_SIZE;
195 int i, j, count, rc = 0;
196
197 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
198 count = 0;
199
200 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
201 ptr[i] = vmem_zalloc(size, KM_SLEEP);
202 if (ptr[i])
203 count++;
204 }
205
206 /* Ensure buffer has been zero filled */
207 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
208 for (j = 0; j < size; j++) {
209 if (((char *)ptr[i])[j] != '\0') {
210 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
211 "%d-byte allocation was "
212 "not zeroed\n", size);
213 rc = -EFAULT;
214 }
215 }
216 }
217
218 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
219 if (ptr[i])
220 vmem_free(ptr[i], size);
221
222 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
223 "%d byte allocations, %d/%d successful\n",
224 size, count, SPLAT_VMEM_ALLOC_COUNT);
225 if (count != SPLAT_VMEM_ALLOC_COUNT)
226 rc = -ENOMEM;
227
228 size *= 2;
229 }
230
231 return rc;
232 }
233
234 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
235 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
236 #define SPLAT_KMEM_OBJ_COUNT 128
237 #define SPLAT_KMEM_OBJ_RECLAIM 16
238
239 typedef struct kmem_cache_data {
240 unsigned long kcd_magic;
241 int kcd_flag;
242 char kcd_buf[0];
243 } kmem_cache_data_t;
244
245 typedef struct kmem_cache_priv {
246 unsigned long kcp_magic;
247 struct file *kcp_file;
248 kmem_cache_t *kcp_cache;
249 kmem_cache_data_t *kcp_kcd[SPLAT_KMEM_OBJ_COUNT];
250 spinlock_t kcp_lock;
251 wait_queue_head_t kcp_waitq;
252 int kcp_size;
253 int kcp_count;
254 int kcp_threads;
255 int kcp_alloc;
256 int kcp_rc;
257 } kmem_cache_priv_t;
258
259 static int
260 splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
261 {
262 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
263 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
264
265 if (kcd && kcp) {
266 kcd->kcd_magic = kcp->kcp_magic;
267 kcd->kcd_flag = 1;
268 memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
269 kcp->kcp_count++;
270 }
271
272 return 0;
273 }
274
275 static void
276 splat_kmem_cache_test_destructor(void *ptr, void *priv)
277 {
278 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
279 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
280
281 if (kcd && kcp) {
282 kcd->kcd_magic = 0;
283 kcd->kcd_flag = 0;
284 memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
285 kcp->kcp_count--;
286 }
287
288 return;
289 }
290
291 static int
292 splat_kmem_cache_size_test(struct file *file, void *arg,
293 char *name, int size, int flags)
294 {
295 kmem_cache_t *cache = NULL;
296 kmem_cache_data_t *kcd = NULL;
297 kmem_cache_priv_t kcp;
298 int rc = 0, max;
299
300 kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
301 kcp.kcp_file = file;
302 kcp.kcp_size = size;
303 kcp.kcp_count = 0;
304 kcp.kcp_rc = 0;
305
306 cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp.kcp_size, 0,
307 splat_kmem_cache_test_constructor,
308 splat_kmem_cache_test_destructor,
309 NULL, &kcp, NULL, flags);
310 if (!cache) {
311 splat_vprint(file, name,
312 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
313 return -ENOMEM;
314 }
315
316 kcd = kmem_cache_alloc(cache, KM_SLEEP);
317 if (!kcd) {
318 splat_vprint(file, name,
319 "Unable to allocate from '%s'\n",
320 SPLAT_KMEM_CACHE_NAME);
321 rc = -EINVAL;
322 goto out_free;
323 }
324
325 if (!kcd->kcd_flag) {
326 splat_vprint(file, name,
327 "Failed to run contructor for '%s'\n",
328 SPLAT_KMEM_CACHE_NAME);
329 rc = -EINVAL;
330 goto out_free;
331 }
332
333 if (kcd->kcd_magic != kcp.kcp_magic) {
334 splat_vprint(file, name,
335 "Failed to pass private data to constructor "
336 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
337 rc = -EINVAL;
338 goto out_free;
339 }
340
341 max = kcp.kcp_count;
342 kmem_cache_free(cache, kcd);
343
344 /* Destroy the entire cache which will force destructors to
345 * run and we can verify one was called for every object */
346 kmem_cache_destroy(cache);
347 if (kcp.kcp_count) {
348 splat_vprint(file, name,
349 "Failed to run destructor on all slab objects "
350 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
351 rc = -EINVAL;
352 }
353
354 splat_vprint(file, name,
355 "Successfully ran ctors/dtors for %d elements in '%s'\n",
356 max, SPLAT_KMEM_CACHE_NAME);
357
358 return rc;
359
360 out_free:
361 if (kcd)
362 kmem_cache_free(cache, kcd);
363
364 kmem_cache_destroy(cache);
365 return rc;
366 }
367
368 /* Validate small object cache behavior for dynamic/kmem/vmem caches */
369 static int
370 splat_kmem_test5(struct file *file, void *arg)
371 {
372 char *name = SPLAT_KMEM_TEST5_NAME;
373 int rc;
374
375 rc = splat_kmem_cache_size_test(file, arg, name, 128, 0);
376 if (rc)
377 return rc;
378
379 rc = splat_kmem_cache_size_test(file, arg, name, 128, KMC_KMEM);
380 if (rc)
381 return rc;
382
383 return splat_kmem_cache_size_test(file, arg, name, 128, KMC_VMEM);
384 }
385
386 /* Validate large object cache behavior for dynamic/kmem/vmem caches */
387 static int
388 splat_kmem_test6(struct file *file, void *arg)
389 {
390 char *name = SPLAT_KMEM_TEST6_NAME;
391 int rc;
392
393 rc = splat_kmem_cache_size_test(file, arg, name, 128 * 1024, 0);
394 if (rc)
395 return rc;
396
397 rc = splat_kmem_cache_size_test(file, arg, name, 128 * 1024, KMC_KMEM);
398 if (rc)
399 return rc;
400
401 return splat_kmem_cache_size_test(file, arg, name, 128 * 1028, KMC_VMEM);
402 }
403
404 static void
405 splat_kmem_cache_test_reclaim(void *priv)
406 {
407 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
408 int i, count;
409
410 count = min(SPLAT_KMEM_OBJ_RECLAIM, kcp->kcp_count);
411 splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST7_NAME,
412 "Reaping %d objects from '%s'\n", count,
413 SPLAT_KMEM_CACHE_NAME);
414
415 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
416 if (kcp->kcp_kcd[i]) {
417 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
418 kcp->kcp_kcd[i] = NULL;
419
420 if (--count == 0)
421 break;
422 }
423 }
424
425 return;
426 }
427
428 static int
429 splat_kmem_test7(struct file *file, void *arg)
430 {
431 kmem_cache_t *cache;
432 kmem_cache_priv_t kcp;
433 int i, rc = 0;
434
435 kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
436 kcp.kcp_file = file;
437 kcp.kcp_size = 256;
438 kcp.kcp_count = 0;
439 kcp.kcp_rc = 0;
440
441 cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp.kcp_size, 0,
442 splat_kmem_cache_test_constructor,
443 splat_kmem_cache_test_destructor,
444 splat_kmem_cache_test_reclaim,
445 &kcp, NULL, 0);
446 if (!cache) {
447 splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
448 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
449 return -ENOMEM;
450 }
451
452 kcp.kcp_cache = cache;
453
454 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
455 /* All allocations need not succeed */
456 kcp.kcp_kcd[i] = kmem_cache_alloc(cache, KM_SLEEP);
457 if (!kcp.kcp_kcd[i]) {
458 splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
459 "Unable to allocate from '%s'\n",
460 SPLAT_KMEM_CACHE_NAME);
461 }
462 }
463
464 ASSERT(kcp.kcp_count > 0);
465
466 /* Request the slab cache free any objects it can. For a few reasons
467 * this may not immediately result in more free memory even if objects
468 * are freed. First off, due to fragmentation we may not be able to
469 * reclaim any slabs. Secondly, even if we do we fully clear some
470 * slabs we will not want to immedately reclaim all of them because
471 * we may contend with cache allocs and thrash. What we want to see
472 * is slab size decrease more gradually as it becomes clear they
473 * will not be needed. This should be acheivable in less than minute
474 * if it takes longer than this something has gone wrong.
475 */
476 for (i = 0; i < 60; i++) {
477 kmem_cache_reap_now(cache);
478 splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
479 "%s cache objects %d, slabs %u/%u objs %u/%u\n",
480 SPLAT_KMEM_CACHE_NAME, kcp.kcp_count,
481 (unsigned)cache->skc_slab_alloc,
482 (unsigned)cache->skc_slab_total,
483 (unsigned)cache->skc_obj_alloc,
484 (unsigned)cache->skc_obj_total);
485
486 if (cache->skc_obj_total == 0)
487 break;
488
489 set_current_state(TASK_INTERRUPTIBLE);
490 schedule_timeout(HZ);
491 }
492
493 if (cache->skc_obj_total == 0) {
494 splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
495 "Successfully created %d objects "
496 "in cache %s and reclaimed them\n",
497 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
498 } else {
499 splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
500 "Failed to reclaim %u/%d objects from cache %s\n",
501 (unsigned)cache->skc_obj_total, SPLAT_KMEM_OBJ_COUNT,
502 SPLAT_KMEM_CACHE_NAME);
503 rc = -ENOMEM;
504 }
505
506 /* Cleanup our mess (for failure case of time expiring) */
507 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
508 if (kcp.kcp_kcd[i])
509 kmem_cache_free(cache, kcp.kcp_kcd[i]);
510
511 kmem_cache_destroy(cache);
512
513 return rc;
514 }
515
516 static void
517 splat_kmem_test8_thread(void *arg)
518 {
519 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
520 int count = kcp->kcp_alloc, rc = 0, i;
521 void **objs;
522
523 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
524
525 objs = vmem_zalloc(count * sizeof(void *), KM_SLEEP);
526 if (!objs) {
527 splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST8_NAME,
528 "Unable to alloc objp array for cache '%s'\n",
529 kcp->kcp_cache->skc_name);
530 rc = -ENOMEM;
531 goto out;
532 }
533
534 for (i = 0; i < count; i++) {
535 objs[i] = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
536 if (!objs[i]) {
537 splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST8_NAME,
538 "Unable to allocate from cache '%s'\n",
539 kcp->kcp_cache->skc_name);
540 rc = -ENOMEM;
541 break;
542 }
543 }
544
545 for (i = 0; i < count; i++)
546 if (objs[i])
547 kmem_cache_free(kcp->kcp_cache, objs[i]);
548
549 vmem_free(objs, count * sizeof(void *));
550 out:
551 spin_lock(&kcp->kcp_lock);
552 if (!kcp->kcp_rc)
553 kcp->kcp_rc = rc;
554
555 if (--kcp->kcp_threads == 0)
556 wake_up(&kcp->kcp_waitq);
557
558 spin_unlock(&kcp->kcp_lock);
559
560 thread_exit();
561 }
562
563 static int
564 splat_kmem_test8_count(kmem_cache_priv_t *kcp, int threads)
565 {
566 int ret;
567
568 spin_lock(&kcp->kcp_lock);
569 ret = (kcp->kcp_threads == threads);
570 spin_unlock(&kcp->kcp_lock);
571
572 return ret;
573 }
574
575 /* This test will always pass and is simply here so I can easily
576 * eyeball the slab cache locking overhead to ensure it is reasonable.
577 */
578 static int
579 splat_kmem_test8_sc(struct file *file, void *arg, int size, int count)
580 {
581 kmem_cache_priv_t kcp;
582 kthread_t *thr;
583 struct timespec start, stop, delta;
584 char cache_name[32];
585 int i, j, rc = 0, threads = 32;
586
587 kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
588 kcp.kcp_file = file;
589
590 splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "%-22s %s", "name",
591 "time (sec)\tslabs \tobjs \thash\n");
592 splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "%-22s %s", "",
593 " \ttot/max/calc\ttot/max/calc\n");
594
595 for (i = 1; i <= count; i *= 2) {
596 kcp.kcp_size = size;
597 kcp.kcp_count = 0;
598 kcp.kcp_threads = 0;
599 kcp.kcp_alloc = i;
600 kcp.kcp_rc = 0;
601 spin_lock_init(&kcp.kcp_lock);
602 init_waitqueue_head(&kcp.kcp_waitq);
603
604 (void)snprintf(cache_name, 32, "%s-%d-%d",
605 SPLAT_KMEM_CACHE_NAME, size, i);
606 kcp.kcp_cache = kmem_cache_create(cache_name, kcp.kcp_size, 0,
607 splat_kmem_cache_test_constructor,
608 splat_kmem_cache_test_destructor,
609 NULL, &kcp, NULL, 0);
610 if (!kcp.kcp_cache) {
611 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
612 "Unable to create '%s' cache\n",
613 SPLAT_KMEM_CACHE_NAME);
614 rc = -ENOMEM;
615 break;
616 }
617
618 start = current_kernel_time();
619
620 for (j = 0; j < threads; j++) {
621 thr = thread_create(NULL, 0, splat_kmem_test8_thread,
622 &kcp, 0, &p0, TS_RUN, minclsyspri);
623 if (thr == NULL) {
624 rc = -ESRCH;
625 break;
626 }
627 spin_lock(&kcp.kcp_lock);
628 kcp.kcp_threads++;
629 spin_unlock(&kcp.kcp_lock);
630 }
631
632 /* Sleep until the thread sets kcp.kcp_threads == 0 */
633 wait_event(kcp.kcp_waitq, splat_kmem_test8_count(&kcp, 0));
634 stop = current_kernel_time();
635 delta = timespec_sub(stop, start);
636
637 splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "%-22s %2ld.%09ld\t"
638 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
639 kcp.kcp_cache->skc_name,
640 delta.tv_sec, delta.tv_nsec,
641 (unsigned long)kcp.kcp_cache->skc_slab_total,
642 (unsigned long)kcp.kcp_cache->skc_slab_max,
643 (unsigned long)(kcp.kcp_alloc * threads /
644 SPL_KMEM_CACHE_OBJ_PER_SLAB),
645 (unsigned long)kcp.kcp_cache->skc_obj_total,
646 (unsigned long)kcp.kcp_cache->skc_obj_max,
647 (unsigned long)(kcp.kcp_alloc * threads));
648
649 kmem_cache_destroy(kcp.kcp_cache);
650
651 if (!rc && kcp.kcp_rc)
652 rc = kcp.kcp_rc;
653
654 if (rc)
655 break;
656 }
657
658 return rc;
659 }
660
661 static int
662 splat_kmem_test8(struct file *file, void *arg)
663 {
664 int i, rc = 0;
665
666 /* Run through slab cache with objects size from
667 * 16-1Mb in 4x multiples with 1024 objects each */
668 for (i = 16; i <= 1024*1024; i *= 4) {
669 rc = splat_kmem_test8_sc(file, arg, i, 256);
670 if (rc)
671 break;
672 }
673
674 return rc;
675 }
676
677 splat_subsystem_t *
678 splat_kmem_init(void)
679 {
680 splat_subsystem_t *sub;
681
682 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
683 if (sub == NULL)
684 return NULL;
685
686 memset(sub, 0, sizeof(*sub));
687 strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
688 strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
689 INIT_LIST_HEAD(&sub->subsystem_list);
690 INIT_LIST_HEAD(&sub->test_list);
691 spin_lock_init(&sub->test_lock);
692 sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
693
694 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
695 SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
696 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
697 SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
698 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
699 SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
700 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
701 SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
702 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
703 SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
704 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
705 SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
706 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
707 SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
708 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
709 SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
710
711 return sub;
712 }
713
714 void
715 splat_kmem_fini(splat_subsystem_t *sub)
716 {
717 ASSERT(sub);
718 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID);
719 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
720 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
721 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
722 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
723 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
724 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
725 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
726
727 kfree(sub);
728 }
729
730 int
731 splat_kmem_id(void) {
732 return SPLAT_SUBSYSTEM_KMEM;
733 }