]> git.proxmox.com Git - mirror_spl.git/blame - module/splat/splat-kmem.c
Switch KM_SLEEP to KM_PUSHPAGE
[mirror_spl.git] / module / splat / splat-kmem.c
CommitLineData
716154c5
BB
1/*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5
BB
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5
BB
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting LAyer Tests (SPLAT) Kmem Tests.
25\*****************************************************************************/
715f6251 26
7c50328b 27#include "splat-internal.h"
f1ca4da6 28
7c50328b 29#define SPLAT_KMEM_NAME "kmem"
30#define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
f1ca4da6 31
7c50328b 32#define SPLAT_KMEM_TEST1_ID 0x0101
33#define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
34#define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
f1ca4da6 35
7c50328b 36#define SPLAT_KMEM_TEST2_ID 0x0102
37#define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
38#define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
f1ca4da6 39
7c50328b 40#define SPLAT_KMEM_TEST3_ID 0x0103
2fb9b26a 41#define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
42#define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
f1ca4da6 43
7c50328b 44#define SPLAT_KMEM_TEST4_ID 0x0104
2fb9b26a 45#define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
46#define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
f1ca4da6 47
79b31f36 48#define SPLAT_KMEM_TEST5_ID 0x0105
ea3e6ca9 49#define SPLAT_KMEM_TEST5_NAME "slab_small"
2fb9b26a 50#define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
51
52#define SPLAT_KMEM_TEST6_ID 0x0106
ea3e6ca9 53#define SPLAT_KMEM_TEST6_NAME "slab_large"
2fb9b26a 54#define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
55
56#define SPLAT_KMEM_TEST7_ID 0x0107
ea3e6ca9
BB
57#define SPLAT_KMEM_TEST7_NAME "slab_align"
58#define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
79b31f36 59
44b8f176 60#define SPLAT_KMEM_TEST8_ID 0x0108
ea3e6ca9
BB
61#define SPLAT_KMEM_TEST8_NAME "slab_reap"
62#define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
44b8f176 63
48e0606a 64#define SPLAT_KMEM_TEST9_ID 0x0109
ea3e6ca9
BB
65#define SPLAT_KMEM_TEST9_NAME "slab_age"
66#define SPLAT_KMEM_TEST9_DESC "Slab aging test"
67
68#define SPLAT_KMEM_TEST10_ID 0x010a
69#define SPLAT_KMEM_TEST10_NAME "slab_lock"
70#define SPLAT_KMEM_TEST10_DESC "Slab locking test"
71
4e5691fa 72#ifdef _LP64
ea3e6ca9
BB
73#define SPLAT_KMEM_TEST11_ID 0x010b
74#define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
75#define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
4e5691fa 76#endif /* _LP64 */
48e0606a 77
e11d6c5f
BB
78#define SPLAT_KMEM_TEST12_ID 0x010c
79#define SPLAT_KMEM_TEST12_NAME "vmem_size"
80#define SPLAT_KMEM_TEST12_DESC "Memory zone test"
81
a9a7a01c
PS
82#define SPLAT_KMEM_TEST13_ID 0x010d
83#define SPLAT_KMEM_TEST13_NAME "slab_reclaim"
84#define SPLAT_KMEM_TEST13_DESC "Slab direct memory reclaim test"
85
7c50328b 86#define SPLAT_KMEM_ALLOC_COUNT 10
79b31f36 87#define SPLAT_VMEM_ALLOC_COUNT 10
88
44b8f176 89
f1ca4da6 90static int
7c50328b 91splat_kmem_test1(struct file *file, void *arg)
f1ca4da6 92{
7c50328b 93 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
f1ca4da6 94 int size = PAGE_SIZE;
95 int i, count, rc = 0;
96
79b31f36 97 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
f1ca4da6 98 count = 0;
99
7c50328b 100 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
23d91792 101 ptr[i] = kmem_alloc(size, KM_SLEEP | KM_NODEBUG);
f1ca4da6 102 if (ptr[i])
103 count++;
104 }
105
7c50328b 106 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
f1ca4da6 107 if (ptr[i])
108 kmem_free(ptr[i], size);
109
7c50328b 110 splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
ea3e6ca9
BB
111 "%d byte allocations, %d/%d successful\n",
112 size, count, SPLAT_KMEM_ALLOC_COUNT);
7c50328b 113 if (count != SPLAT_KMEM_ALLOC_COUNT)
f1ca4da6 114 rc = -ENOMEM;
115
116 size *= 2;
117 }
118
119 return rc;
120}
121
122static int
7c50328b 123splat_kmem_test2(struct file *file, void *arg)
f1ca4da6 124{
7c50328b 125 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
f1ca4da6 126 int size = PAGE_SIZE;
127 int i, j, count, rc = 0;
128
79b31f36 129 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
f1ca4da6 130 count = 0;
131
7c50328b 132 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
23d91792 133 ptr[i] = kmem_zalloc(size, KM_SLEEP | KM_NODEBUG);
f1ca4da6 134 if (ptr[i])
135 count++;
136 }
137
138 /* Ensure buffer has been zero filled */
7c50328b 139 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
f1ca4da6 140 for (j = 0; j < size; j++) {
141 if (((char *)ptr[i])[j] != '\0') {
5198ea0e 142 splat_vprint(file,SPLAT_KMEM_TEST2_NAME,
ea3e6ca9
BB
143 "%d-byte allocation was "
144 "not zeroed\n", size);
f1ca4da6 145 rc = -EFAULT;
146 }
147 }
148 }
149
7c50328b 150 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
f1ca4da6 151 if (ptr[i])
152 kmem_free(ptr[i], size);
153
7c50328b 154 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
ea3e6ca9
BB
155 "%d byte allocations, %d/%d successful\n",
156 size, count, SPLAT_KMEM_ALLOC_COUNT);
7c50328b 157 if (count != SPLAT_KMEM_ALLOC_COUNT)
f1ca4da6 158 rc = -ENOMEM;
159
160 size *= 2;
161 }
162
163 return rc;
164}
165
2fb9b26a 166static int
167splat_kmem_test3(struct file *file, void *arg)
168{
169 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
170 int size = PAGE_SIZE;
171 int i, count, rc = 0;
172
173 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
174 count = 0;
175
176 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
177 ptr[i] = vmem_alloc(size, KM_SLEEP);
178 if (ptr[i])
179 count++;
180 }
181
182 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
183 if (ptr[i])
184 vmem_free(ptr[i], size);
185
186 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
ea3e6ca9
BB
187 "%d byte allocations, %d/%d successful\n",
188 size, count, SPLAT_VMEM_ALLOC_COUNT);
2fb9b26a 189 if (count != SPLAT_VMEM_ALLOC_COUNT)
190 rc = -ENOMEM;
191
192 size *= 2;
193 }
194
195 return rc;
196}
197
198static int
199splat_kmem_test4(struct file *file, void *arg)
200{
201 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
202 int size = PAGE_SIZE;
203 int i, j, count, rc = 0;
204
205 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
206 count = 0;
207
208 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
209 ptr[i] = vmem_zalloc(size, KM_SLEEP);
210 if (ptr[i])
211 count++;
212 }
213
214 /* Ensure buffer has been zero filled */
215 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
216 for (j = 0; j < size; j++) {
217 if (((char *)ptr[i])[j] != '\0') {
218 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
ea3e6ca9
BB
219 "%d-byte allocation was "
220 "not zeroed\n", size);
2fb9b26a 221 rc = -EFAULT;
222 }
223 }
224 }
225
226 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
227 if (ptr[i])
228 vmem_free(ptr[i], size);
229
230 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
ea3e6ca9
BB
231 "%d byte allocations, %d/%d successful\n",
232 size, count, SPLAT_VMEM_ALLOC_COUNT);
2fb9b26a 233 if (count != SPLAT_VMEM_ALLOC_COUNT)
234 rc = -ENOMEM;
235
236 size *= 2;
237 }
238
239 return rc;
240}
241
7c50328b 242#define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
243#define SPLAT_KMEM_CACHE_NAME "kmem_test"
ea3e6ca9
BB
244#define SPLAT_KMEM_OBJ_COUNT 1024
245#define SPLAT_KMEM_OBJ_RECLAIM 20 /* percent */
246#define SPLAT_KMEM_THREADS 32
247
248#define KCP_FLAG_READY 0x01
f1ca4da6 249
250typedef struct kmem_cache_data {
f1ca4da6 251 unsigned long kcd_magic;
252 int kcd_flag;
2fb9b26a 253 char kcd_buf[0];
f1ca4da6 254} kmem_cache_data_t;
255
ea3e6ca9
BB
256typedef struct kmem_cache_thread {
257 kmem_cache_t *kct_cache;
258 spinlock_t kct_lock;
259 int kct_id;
260 int kct_kcd_count;
261 kmem_cache_data_t *kct_kcd[0];
262} kmem_cache_thread_t;
263
f1ca4da6 264typedef struct kmem_cache_priv {
265 unsigned long kcp_magic;
266 struct file *kcp_file;
267 kmem_cache_t *kcp_cache;
44b8f176 268 spinlock_t kcp_lock;
ea3e6ca9
BB
269 wait_queue_head_t kcp_ctl_waitq;
270 wait_queue_head_t kcp_thr_waitq;
271 int kcp_flags;
272 int kcp_kct_count;
273 kmem_cache_thread_t *kcp_kct[SPLAT_KMEM_THREADS];
2fb9b26a 274 int kcp_size;
48e0606a 275 int kcp_align;
f1ca4da6 276 int kcp_count;
44b8f176 277 int kcp_alloc;
f1ca4da6 278 int kcp_rc;
ea3e6ca9
BB
279 int kcp_kcd_count;
280 kmem_cache_data_t *kcp_kcd[0];
f1ca4da6 281} kmem_cache_priv_t;
282
ea3e6ca9
BB
283static kmem_cache_priv_t *
284splat_kmem_cache_test_kcp_alloc(struct file *file, char *name,
285 int size, int align, int alloc, int count)
286{
287 kmem_cache_priv_t *kcp;
288
289 kcp = vmem_zalloc(sizeof(kmem_cache_priv_t) +
290 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
291 if (!kcp)
292 return NULL;
293
294 kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC;
295 kcp->kcp_file = file;
296 kcp->kcp_cache = NULL;
297 spin_lock_init(&kcp->kcp_lock);
298 init_waitqueue_head(&kcp->kcp_ctl_waitq);
299 init_waitqueue_head(&kcp->kcp_thr_waitq);
300 kcp->kcp_flags = 0;
301 kcp->kcp_kct_count = -1;
302 kcp->kcp_size = size;
303 kcp->kcp_align = align;
304 kcp->kcp_count = 0;
305 kcp->kcp_alloc = alloc;
306 kcp->kcp_rc = 0;
307 kcp->kcp_kcd_count = count;
308
309 return kcp;
310}
311
312static void
313splat_kmem_cache_test_kcp_free(kmem_cache_priv_t *kcp)
314{
315 vmem_free(kcp, sizeof(kmem_cache_priv_t) +
316 kcp->kcp_kcd_count * sizeof(kmem_cache_data_t *));
317}
318
319static kmem_cache_thread_t *
320splat_kmem_cache_test_kct_alloc(int id, int count)
321{
322 kmem_cache_thread_t *kct;
323
324 ASSERTF(id < SPLAT_KMEM_THREADS, "id=%d\n", id);
325 kct = vmem_zalloc(sizeof(kmem_cache_thread_t) +
326 count * sizeof(kmem_cache_data_t *), KM_SLEEP);
327 if (!kct)
328 return NULL;
329
330 spin_lock_init(&kct->kct_lock);
331 kct->kct_cache = NULL;
332 kct->kct_id = id;
333 kct->kct_kcd_count = count;
334
335 return kct;
336}
337
338static void
339splat_kmem_cache_test_kct_free(kmem_cache_thread_t *kct)
340{
341 vmem_free(kct, sizeof(kmem_cache_thread_t) +
342 kct->kct_kcd_count * sizeof(kmem_cache_data_t *));
343}
344
a9a7a01c
PS
345static void
346splat_kmem_cache_test_debug(struct file *file, char *name,
347 kmem_cache_priv_t *kcp)
348{
349 int j;
350
351 splat_vprint(file, name,
352 "%s cache objects %d, slabs %u/%u objs %u/%u mags ",
353 kcp->kcp_cache->skc_name, kcp->kcp_count,
354 (unsigned)kcp->kcp_cache->skc_slab_alloc,
355 (unsigned)kcp->kcp_cache->skc_slab_total,
356 (unsigned)kcp->kcp_cache->skc_obj_alloc,
357 (unsigned)kcp->kcp_cache->skc_obj_total);
358
359 for_each_online_cpu(j)
360 splat_print(file, "%u/%u ",
361 kcp->kcp_cache->skc_mag[j]->skm_avail,
362 kcp->kcp_cache->skc_mag[j]->skm_size);
363
364 splat_print(file, "%s\n", "");
365}
366
f1ca4da6 367static int
2fb9b26a 368splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
f1ca4da6 369{
f1ca4da6 370 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
2fb9b26a 371 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
f1ca4da6 372
0498e6c5 373 if (kcd && kcp) {
374 kcd->kcd_magic = kcp->kcp_magic;
2fb9b26a 375 kcd->kcd_flag = 1;
0498e6c5 376 memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
377 kcp->kcp_count++;
f1ca4da6 378 }
379
380 return 0;
381}
382
383static void
2fb9b26a 384splat_kmem_cache_test_destructor(void *ptr, void *priv)
f1ca4da6 385{
f1ca4da6 386 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
2fb9b26a 387 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
f1ca4da6 388
0498e6c5 389 if (kcd && kcp) {
390 kcd->kcd_magic = 0;
2fb9b26a 391 kcd->kcd_flag = 0;
0498e6c5 392 memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
393 kcp->kcp_count--;
f1ca4da6 394 }
395
396 return;
397}
398
ea3e6ca9
BB
399/*
400 * Generic reclaim function which assumes that all objects may
401 * be reclaimed at any time. We free a small percentage of the
402 * objects linked off the kcp or kct[] every time we are called.
403 */
404static void
405splat_kmem_cache_test_reclaim(void *priv)
406{
407 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
408 kmem_cache_thread_t *kct;
409 int i, j, count;
410
411 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
412 count = kcp->kcp_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
413
414 /* Objects directly attached to the kcp */
415 spin_lock(&kcp->kcp_lock);
416 for (i = 0; i < kcp->kcp_kcd_count; i++) {
417 if (kcp->kcp_kcd[i]) {
418 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
419 kcp->kcp_kcd[i] = NULL;
420
421 if ((--count) == 0)
422 break;
423 }
424 }
425 spin_unlock(&kcp->kcp_lock);
426
427 /* No threads containing objects to consider */
428 if (kcp->kcp_kct_count == -1)
429 return;
430
431 /* Objects attached to a kct thread */
432 for (i = 0; i < kcp->kcp_kct_count; i++) {
433 spin_lock(&kcp->kcp_lock);
434 kct = kcp->kcp_kct[i];
641bebe3
BB
435 if (!kct) {
436 spin_unlock(&kcp->kcp_lock);
ea3e6ca9 437 continue;
641bebe3 438 }
ea3e6ca9
BB
439
440 spin_lock(&kct->kct_lock);
441 count = kct->kct_kcd_count * SPLAT_KMEM_OBJ_RECLAIM / 100;
442
443 for (j = 0; j < kct->kct_kcd_count; j++) {
444 if (kct->kct_kcd[j]) {
445 kmem_cache_free(kcp->kcp_cache,kct->kct_kcd[j]);
446 kct->kct_kcd[j] = NULL;
447
448 if ((--count) == 0)
449 break;
450 }
451 }
452 spin_unlock(&kct->kct_lock);
641bebe3 453 spin_unlock(&kcp->kcp_lock);
ea3e6ca9
BB
454 }
455
456 return;
457}
458
459static int
460splat_kmem_cache_test_threads(kmem_cache_priv_t *kcp, int threads)
461{
462 int rc;
463
464 spin_lock(&kcp->kcp_lock);
465 rc = (kcp->kcp_kct_count == threads);
466 spin_unlock(&kcp->kcp_lock);
467
468 return rc;
469}
470
471static int
472splat_kmem_cache_test_flags(kmem_cache_priv_t *kcp, int flags)
473{
474 int rc;
475
476 spin_lock(&kcp->kcp_lock);
477 rc = (kcp->kcp_flags & flags);
478 spin_unlock(&kcp->kcp_lock);
479
480 return rc;
481}
482
483static void
484splat_kmem_cache_test_thread(void *arg)
485{
486 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
487 kmem_cache_thread_t *kct;
488 int rc = 0, id, i;
489 void *obj;
490
491 ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
492
493 /* Assign thread ids */
494 spin_lock(&kcp->kcp_lock);
495 if (kcp->kcp_kct_count == -1)
496 kcp->kcp_kct_count = 0;
497
498 id = kcp->kcp_kct_count;
499 kcp->kcp_kct_count++;
500 spin_unlock(&kcp->kcp_lock);
501
502 kct = splat_kmem_cache_test_kct_alloc(id, kcp->kcp_alloc);
503 if (!kct) {
504 rc = -ENOMEM;
505 goto out;
506 }
507
508 spin_lock(&kcp->kcp_lock);
509 kcp->kcp_kct[id] = kct;
510 spin_unlock(&kcp->kcp_lock);
511
512 /* Wait for all threads to have started and report they are ready */
513 if (kcp->kcp_kct_count == SPLAT_KMEM_THREADS)
514 wake_up(&kcp->kcp_ctl_waitq);
515
516 wait_event(kcp->kcp_thr_waitq,
517 splat_kmem_cache_test_flags(kcp, KCP_FLAG_READY));
518
519 /*
520 * Updates to kct->kct_kcd[] are performed under a spin_lock so
521 * they may safely run concurrent with the reclaim function. If
522 * we are not in a low memory situation we have one lock per-
523 * thread so they are not expected to be contended.
524 */
525 for (i = 0; i < kct->kct_kcd_count; i++) {
526 obj = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
527 spin_lock(&kct->kct_lock);
528 kct->kct_kcd[i] = obj;
529 spin_unlock(&kct->kct_lock);
530 }
531
532 for (i = 0; i < kct->kct_kcd_count; i++) {
533 spin_lock(&kct->kct_lock);
534 if (kct->kct_kcd[i]) {
535 kmem_cache_free(kcp->kcp_cache, kct->kct_kcd[i]);
536 kct->kct_kcd[i] = NULL;
537 }
538 spin_unlock(&kct->kct_lock);
539 }
540out:
541 spin_lock(&kcp->kcp_lock);
542 if (kct) {
543 splat_kmem_cache_test_kct_free(kct);
544 kcp->kcp_kct[id] = kct = NULL;
545 }
546
547 if (!kcp->kcp_rc)
548 kcp->kcp_rc = rc;
549
550 if ((--kcp->kcp_kct_count) == 0)
551 wake_up(&kcp->kcp_ctl_waitq);
552
553 spin_unlock(&kcp->kcp_lock);
554
555 thread_exit();
556}
557
f1ca4da6 558static int
48e0606a 559splat_kmem_cache_test(struct file *file, void *arg, char *name,
ea3e6ca9 560 int size, int align, int flags)
f1ca4da6 561{
ea3e6ca9
BB
562 kmem_cache_priv_t *kcp;
563 kmem_cache_data_t *kcd;
f1ca4da6 564 int rc = 0, max;
565
ea3e6ca9
BB
566 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0, 1);
567 if (!kcp) {
568 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
569 return -ENOMEM;
570 }
571
02c7f164 572 kcp->kcp_kcd[0] = NULL;
ea3e6ca9
BB
573 kcp->kcp_cache =
574 kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
575 kcp->kcp_size, kcp->kcp_align,
576 splat_kmem_cache_test_constructor,
577 splat_kmem_cache_test_destructor,
578 NULL, kcp, NULL, flags);
579 if (!kcp->kcp_cache) {
2fb9b26a 580 splat_vprint(file, name,
ea3e6ca9 581 "Unable to create '%s'\n",
3f412673 582 SPLAT_KMEM_CACHE_NAME);
ea3e6ca9
BB
583 rc = -ENOMEM;
584 goto out_free;
f1ca4da6 585 }
586
ea3e6ca9 587 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
f1ca4da6 588 if (!kcd) {
2fb9b26a 589 splat_vprint(file, name,
ea3e6ca9
BB
590 "Unable to allocate from '%s'\n",
591 SPLAT_KMEM_CACHE_NAME);
f1ca4da6 592 rc = -EINVAL;
593 goto out_free;
594 }
ea3e6ca9
BB
595 spin_lock(&kcp->kcp_lock);
596 kcp->kcp_kcd[0] = kcd;
597 spin_unlock(&kcp->kcp_lock);
f1ca4da6 598
ea3e6ca9 599 if (!kcp->kcp_kcd[0]->kcd_flag) {
2fb9b26a 600 splat_vprint(file, name,
ea3e6ca9
BB
601 "Failed to run contructor for '%s'\n",
602 SPLAT_KMEM_CACHE_NAME);
f1ca4da6 603 rc = -EINVAL;
604 goto out_free;
605 }
606
ea3e6ca9 607 if (kcp->kcp_kcd[0]->kcd_magic != kcp->kcp_magic) {
2fb9b26a 608 splat_vprint(file, name,
ea3e6ca9
BB
609 "Failed to pass private data to constructor "
610 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
f1ca4da6 611 rc = -EINVAL;
612 goto out_free;
613 }
614
ea3e6ca9
BB
615 max = kcp->kcp_count;
616 spin_lock(&kcp->kcp_lock);
617 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
618 kcp->kcp_kcd[0] = NULL;
619 spin_unlock(&kcp->kcp_lock);
f1ca4da6 620
621 /* Destroy the entire cache which will force destructors to
622 * run and we can verify one was called for every object */
ea3e6ca9
BB
623 kmem_cache_destroy(kcp->kcp_cache);
624 if (kcp->kcp_count) {
2fb9b26a 625 splat_vprint(file, name,
ea3e6ca9
BB
626 "Failed to run destructor on all slab objects "
627 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
f1ca4da6 628 rc = -EINVAL;
629 }
630
f250d90b 631 splat_kmem_cache_test_kcp_free(kcp);
2fb9b26a 632 splat_vprint(file, name,
ea3e6ca9
BB
633 "Successfully ran ctors/dtors for %d elements in '%s'\n",
634 max, SPLAT_KMEM_CACHE_NAME);
f1ca4da6 635
636 return rc;
637
638out_free:
ea3e6ca9
BB
639 if (kcp->kcp_kcd[0]) {
640 spin_lock(&kcp->kcp_lock);
641 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[0]);
642 kcp->kcp_kcd[0] = NULL;
643 spin_unlock(&kcp->kcp_lock);
644 }
645
646 if (kcp->kcp_cache)
647 kmem_cache_destroy(kcp->kcp_cache);
648
649 splat_kmem_cache_test_kcp_free(kcp);
650
651 return rc;
652}
653
654static int
655splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
10a4be0f 656 int size, int alloc, int max_time)
ea3e6ca9
BB
657{
658 kmem_cache_priv_t *kcp;
659 kthread_t *thr;
660 struct timespec start, stop, delta;
661 char cache_name[32];
662 int i, rc = 0;
663
664 kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc, 0);
665 if (!kcp) {
666 splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
667 return -ENOMEM;
668 }
669
670 (void)snprintf(cache_name, 32, "%s-%d-%d",
671 SPLAT_KMEM_CACHE_NAME, size, alloc);
672 kcp->kcp_cache =
673 kmem_cache_create(cache_name, kcp->kcp_size, 0,
674 splat_kmem_cache_test_constructor,
675 splat_kmem_cache_test_destructor,
676 splat_kmem_cache_test_reclaim,
3c9ce2bf 677 kcp, NULL, 0);
ea3e6ca9
BB
678 if (!kcp->kcp_cache) {
679 splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
680 rc = -ENOMEM;
681 goto out_kcp;
682 }
683
684 start = current_kernel_time();
685
686 for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
687 thr = thread_create(NULL, 0,
688 splat_kmem_cache_test_thread,
689 kcp, 0, &p0, TS_RUN, minclsyspri);
690 if (thr == NULL) {
691 rc = -ESRCH;
692 goto out_cache;
693 }
694 }
695
696 /* Sleep until all threads have started, then set the ready
697 * flag and wake them all up for maximum concurrency. */
698 wait_event(kcp->kcp_ctl_waitq,
699 splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));
700
701 spin_lock(&kcp->kcp_lock);
702 kcp->kcp_flags |= KCP_FLAG_READY;
703 spin_unlock(&kcp->kcp_lock);
704 wake_up_all(&kcp->kcp_thr_waitq);
705
706 /* Sleep until all thread have finished */
707 wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));
708
709 stop = current_kernel_time();
710 delta = timespec_sub(stop, start);
f1b59d26 711
ea3e6ca9
BB
712 splat_vprint(file, name,
713 "%-22s %2ld.%09ld\t"
714 "%lu/%lu/%lu\t%lu/%lu/%lu\n",
715 kcp->kcp_cache->skc_name,
716 delta.tv_sec, delta.tv_nsec,
717 (unsigned long)kcp->kcp_cache->skc_slab_total,
718 (unsigned long)kcp->kcp_cache->skc_slab_max,
719 (unsigned long)(kcp->kcp_alloc *
720 SPLAT_KMEM_THREADS /
721 SPL_KMEM_CACHE_OBJ_PER_SLAB),
722 (unsigned long)kcp->kcp_cache->skc_obj_total,
723 (unsigned long)kcp->kcp_cache->skc_obj_max,
724 (unsigned long)(kcp->kcp_alloc *
725 SPLAT_KMEM_THREADS));
726
10a4be0f 727 if (delta.tv_sec >= max_time)
ea3e6ca9
BB
728 rc = -ETIME;
729
730 if (!rc && kcp->kcp_rc)
731 rc = kcp->kcp_rc;
732
733out_cache:
734 kmem_cache_destroy(kcp->kcp_cache);
735out_kcp:
736 splat_kmem_cache_test_kcp_free(kcp);
f1ca4da6 737 return rc;
738}
739
a1502d76 740/* Validate small object cache behavior for dynamic/kmem/vmem caches */
2fb9b26a 741static int
742splat_kmem_test5(struct file *file, void *arg)
743{
a1502d76 744 char *name = SPLAT_KMEM_TEST5_NAME;
745 int rc;
746
48e0606a 747 rc = splat_kmem_cache_test(file, arg, name, 128, 0, 0);
a1502d76 748 if (rc)
749 return rc;
750
48e0606a 751 rc = splat_kmem_cache_test(file, arg, name, 128, 0, KMC_KMEM);
a1502d76 752 if (rc)
753 return rc;
754
48e0606a 755 return splat_kmem_cache_test(file, arg, name, 128, 0, KMC_VMEM);
2fb9b26a 756}
757
a1502d76 758/* Validate large object cache behavior for dynamic/kmem/vmem caches */
2fb9b26a 759static int
760splat_kmem_test6(struct file *file, void *arg)
761{
a1502d76 762 char *name = SPLAT_KMEM_TEST6_NAME;
763 int rc;
764
e0dcb22e 765 rc = splat_kmem_cache_test(file, arg, name, 256*1024, 0, 0);
a1502d76 766 if (rc)
767 return rc;
768
e0dcb22e 769 rc = splat_kmem_cache_test(file, arg, name, 64*1024, 0, KMC_KMEM);
a1502d76 770 if (rc)
771 return rc;
772
e0dcb22e 773 return splat_kmem_cache_test(file, arg, name, 1024*1024, 0, KMC_VMEM);
2fb9b26a 774}
775
ea3e6ca9
BB
776/* Validate object alignment cache behavior for caches */
777static int
778splat_kmem_test7(struct file *file, void *arg)
f1ca4da6 779{
ea3e6ca9
BB
780 char *name = SPLAT_KMEM_TEST7_NAME;
781 int i, rc;
2fb9b26a 782
8b45dda2 783 for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) {
ea3e6ca9
BB
784 rc = splat_kmem_cache_test(file, arg, name, 157, i, 0);
785 if (rc)
786 return rc;
f1ca4da6 787 }
788
ea3e6ca9 789 return rc;
f1ca4da6 790}
791
792static int
ea3e6ca9 793splat_kmem_test8(struct file *file, void *arg)
f1ca4da6 794{
ea3e6ca9
BB
795 kmem_cache_priv_t *kcp;
796 kmem_cache_data_t *kcd;
a9a7a01c 797 int i, rc = 0;
ea3e6ca9
BB
798
799 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
800 256, 0, 0, SPLAT_KMEM_OBJ_COUNT);
801 if (!kcp) {
802 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
803 "Unable to create '%s'\n", "kcp");
f1ca4da6 804 return -ENOMEM;
805 }
806
ea3e6ca9
BB
807 kcp->kcp_cache =
808 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
809 splat_kmem_cache_test_constructor,
810 splat_kmem_cache_test_destructor,
811 splat_kmem_cache_test_reclaim,
812 kcp, NULL, 0);
813 if (!kcp->kcp_cache) {
814 splat_kmem_cache_test_kcp_free(kcp);
815 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
816 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
817 return -ENOMEM;
818 }
f1ca4da6 819
7c50328b 820 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
ea3e6ca9
BB
821 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
822 spin_lock(&kcp->kcp_lock);
823 kcp->kcp_kcd[i] = kcd;
824 spin_unlock(&kcp->kcp_lock);
825 if (!kcd) {
826 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
827 "Unable to allocate from '%s'\n",
828 SPLAT_KMEM_CACHE_NAME);
f1ca4da6 829 }
830 }
831
2fb9b26a 832 /* Request the slab cache free any objects it can. For a few reasons
833 * this may not immediately result in more free memory even if objects
834 * are freed. First off, due to fragmentation we may not be able to
835 * reclaim any slabs. Secondly, even if we do we fully clear some
836 * slabs we will not want to immedately reclaim all of them because
837 * we may contend with cache allocs and thrash. What we want to see
ea3e6ca9 838 * is the slab size decrease more gradually as it becomes clear they
2fb9b26a 839 * will not be needed. This should be acheivable in less than minute
840 * if it takes longer than this something has gone wrong.
841 */
842 for (i = 0; i < 60; i++) {
ea3e6ca9 843 kmem_cache_reap_now(kcp->kcp_cache);
a9a7a01c 844 splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp);
ea3e6ca9
BB
845
846 if (kcp->kcp_cache->skc_obj_total == 0)
2fb9b26a 847 break;
848
849 set_current_state(TASK_INTERRUPTIBLE);
850 schedule_timeout(HZ);
851 }
852
ea3e6ca9
BB
853 if (kcp->kcp_cache->skc_obj_total == 0) {
854 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
2fb9b26a 855 "Successfully created %d objects "
856 "in cache %s and reclaimed them\n",
ea3e6ca9 857 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
2fb9b26a 858 } else {
ea3e6ca9 859 splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
2fb9b26a 860 "Failed to reclaim %u/%d objects from cache %s\n",
ea3e6ca9
BB
861 (unsigned)kcp->kcp_cache->skc_obj_total,
862 SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
2fb9b26a 863 rc = -ENOMEM;
864 }
f1ca4da6 865
2fb9b26a 866 /* Cleanup our mess (for failure case of time expiring) */
ea3e6ca9 867 spin_lock(&kcp->kcp_lock);
7c50328b 868 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
ea3e6ca9
BB
869 if (kcp->kcp_kcd[i])
870 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
871 spin_unlock(&kcp->kcp_lock);
f1ca4da6 872
ea3e6ca9
BB
873 kmem_cache_destroy(kcp->kcp_cache);
874 splat_kmem_cache_test_kcp_free(kcp);
f1ca4da6 875
876 return rc;
877}
878
ea3e6ca9
BB
879static int
880splat_kmem_test9(struct file *file, void *arg)
44b8f176 881{
ea3e6ca9
BB
882 kmem_cache_priv_t *kcp;
883 kmem_cache_data_t *kcd;
a9a7a01c 884 int i, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;
ea3e6ca9
BB
885
886 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
887 256, 0, 0, count);
888 if (!kcp) {
889 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
890 "Unable to create '%s'\n", "kcp");
891 return -ENOMEM;
892 }
44b8f176 893
ea3e6ca9
BB
894 kcp->kcp_cache =
895 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
896 splat_kmem_cache_test_constructor,
897 splat_kmem_cache_test_destructor,
898 NULL, kcp, NULL, 0);
899 if (!kcp->kcp_cache) {
900 splat_kmem_cache_test_kcp_free(kcp);
901 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
902 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
903 return -ENOMEM;
44b8f176 904 }
905
906 for (i = 0; i < count; i++) {
ea3e6ca9
BB
907 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
908 spin_lock(&kcp->kcp_lock);
909 kcp->kcp_kcd[i] = kcd;
910 spin_unlock(&kcp->kcp_lock);
911 if (!kcd) {
912 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
913 "Unable to allocate from '%s'\n",
914 SPLAT_KMEM_CACHE_NAME);
44b8f176 915 }
916 }
917
44b8f176 918 spin_lock(&kcp->kcp_lock);
ea3e6ca9
BB
919 for (i = 0; i < count; i++)
920 if (kcp->kcp_kcd[i])
921 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
e9d7a2be 922 spin_unlock(&kcp->kcp_lock);
923
ea3e6ca9
BB
924 /* We have allocated a large number of objects thus creating a
925 * large number of slabs and then free'd them all. However since
926 * there should be little memory pressure at the moment those
927 * slabs have not been freed. What we want to see is the slab
928 * size decrease gradually as it becomes clear they will not be
929 * be needed. This should be acheivable in less than minute
930 * if it takes longer than this something has gone wrong.
931 */
932 for (i = 0; i < 60; i++) {
a9a7a01c 933 splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST9_NAME, kcp);
ea3e6ca9
BB
934
935 if (kcp->kcp_cache->skc_obj_total == 0)
936 break;
44b8f176 937
ea3e6ca9
BB
938 set_current_state(TASK_INTERRUPTIBLE);
939 schedule_timeout(HZ);
940 }
44b8f176 941
ea3e6ca9
BB
942 if (kcp->kcp_cache->skc_obj_total == 0) {
943 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
944 "Successfully created %d objects "
945 "in cache %s and reclaimed them\n",
946 count, SPLAT_KMEM_CACHE_NAME);
947 } else {
948 splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
949 "Failed to reclaim %u/%d objects from cache %s\n",
950 (unsigned)kcp->kcp_cache->skc_obj_total, count,
951 SPLAT_KMEM_CACHE_NAME);
952 rc = -ENOMEM;
953 }
954
955 kmem_cache_destroy(kcp->kcp_cache);
956 splat_kmem_cache_test_kcp_free(kcp);
44b8f176 957
ea3e6ca9 958 return rc;
44b8f176 959}
960
ea3e6ca9
BB
961/*
962 * This test creates N threads with a shared kmem cache. They then all
963 * concurrently allocate and free from the cache to stress the locking and
964 * concurrent cache performance. If any one test takes longer than 5
965 * seconds to complete it is treated as a failure and may indicate a
966 * performance regression. On my test system no one test takes more
967 * than 1 second to complete so a 5x slowdown likely a problem.
44b8f176 968 */
969static int
ea3e6ca9 970splat_kmem_test10(struct file *file, void *arg)
44b8f176 971{
e11d6c5f 972 uint64_t size, alloc, rc = 0;
44b8f176 973
ea3e6ca9 974 for (size = 16; size <= 1024*1024; size *= 2) {
44b8f176 975
ea3e6ca9
BB
976 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name",
977 "time (sec)\tslabs \tobjs \thash\n");
978 splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "",
979 " \ttot/max/calc\ttot/max/calc\n");
44b8f176 980
ea3e6ca9 981 for (alloc = 1; alloc <= 1024; alloc *= 2) {
44b8f176 982
e11d6c5f
BB
983 /* Skip tests which exceed available memory. We
984 * leverage availrmem here for some extra testing */
985 if (size * alloc * SPLAT_KMEM_THREADS > availrmem / 2)
ea3e6ca9 986 continue;
7ea1cbf5 987
ea3e6ca9 988 rc = splat_kmem_cache_thread_test(file, arg,
10a4be0f 989 SPLAT_KMEM_TEST10_NAME, size, alloc, 5);
ea3e6ca9
BB
990 if (rc)
991 break;
992 }
44b8f176 993 }
994
7ea1cbf5 995 return rc;
44b8f176 996}
997
4e5691fa 998#ifdef _LP64
ea3e6ca9
BB
999/*
1000 * This test creates N threads with a shared kmem cache which overcommits
1001 * memory by 4x. This makes it impossible for the slab to satify the
1002 * thread requirements without having its reclaim hook run which will
1003 * free objects back for use. This behavior is triggered by the linum VM
1004 * detecting a low memory condition on the node and invoking the shrinkers.
1005 * This should allow all the threads to complete while avoiding deadlock
1006 * and for the most part out of memory events. This is very tough on the
4e5691fa
BB
1007 * system so it is possible the test app may get oom'ed. This particular
1008 * test has proven troublesome on 32-bit archs with limited virtual
1009 * address space so it only run on 64-bit systems.
ea3e6ca9 1010 */
fece7c99 1011static int
ea3e6ca9 1012splat_kmem_test11(struct file *file, void *arg)
fece7c99 1013{
ea3e6ca9 1014 uint64_t size, alloc, rc;
fece7c99 1015
e11d6c5f
BB
1016 size = 256*1024;
1017 alloc = ((4 * physmem * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS;
fece7c99 1018
e11d6c5f 1019 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "name",
ea3e6ca9 1020 "time (sec)\tslabs \tobjs \thash\n");
e11d6c5f 1021 splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "",
ea3e6ca9 1022 " \ttot/max/calc\ttot/max/calc\n");
48e0606a 1023
ea3e6ca9 1024 rc = splat_kmem_cache_thread_test(file, arg,
10a4be0f 1025 SPLAT_KMEM_TEST11_NAME, size, alloc, 60);
48e0606a
BB
1026
1027 return rc;
1028}
4e5691fa 1029#endif /* _LP64 */
48e0606a 1030
e11d6c5f
BB
1031/*
1032 * Check vmem_size() behavior by acquiring the alloc/free/total vmem
1033 * space, then allocate a known buffer size from vmem space. We can
1034 * then check that vmem_size() values were updated properly with in
1035 * a fairly small tolerence. The tolerance is important because we
1036 * are not the only vmem consumer on the system. Other unrelated
1037 * allocations might occur during the small test window. The vmem
1038 * allocation itself may also add in a little extra private space to
1039 * the buffer. Finally, verify total space always remains unchanged.
1040 */
1041static int
1042splat_kmem_test12(struct file *file, void *arg)
1043{
6ae7fef5
BB
1044 size_t alloc1, free1, total1;
1045 size_t alloc2, free2, total2;
e11d6c5f
BB
1046 int size = 8*1024*1024;
1047 void *ptr;
1048
1049 alloc1 = vmem_size(NULL, VMEM_ALLOC);
1050 free1 = vmem_size(NULL, VMEM_FREE);
1051 total1 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
6ae7fef5
BB
1052 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1053 "free=%lu total=%lu\n", (unsigned long)alloc1,
1054 (unsigned long)free1, (unsigned long)total1);
e11d6c5f
BB
1055
1056 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Alloc %d bytes\n", size);
1057 ptr = vmem_alloc(size, KM_SLEEP);
1058 if (!ptr) {
1059 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
1060 "Failed to alloc %d bytes\n", size);
1061 return -ENOMEM;
1062 }
1063
1064 alloc2 = vmem_size(NULL, VMEM_ALLOC);
1065 free2 = vmem_size(NULL, VMEM_FREE);
1066 total2 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
6ae7fef5
BB
1067 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
1068 "free=%lu total=%lu\n", (unsigned long)alloc2,
1069 (unsigned long)free2, (unsigned long)total2);
e11d6c5f
BB
1070
1071 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Free %d bytes\n", size);
1072 vmem_free(ptr, size);
1073 if (alloc2 < (alloc1 + size - (size / 100)) ||
1074 alloc2 > (alloc1 + size + (size / 100))) {
6ae7fef5
BB
1075 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1076 "VMEM_ALLOC size: %lu != %lu+%d (+/- 1%%)\n",
1077 (unsigned long)alloc2,(unsigned long)alloc1,size);
e11d6c5f
BB
1078 return -ERANGE;
1079 }
1080
1081 if (free2 < (free1 - size - (size / 100)) ||
1082 free2 > (free1 - size + (size / 100))) {
6ae7fef5
BB
1083 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1084 "VMEM_FREE size: %lu != %lu-%d (+/- 1%%)\n",
1085 (unsigned long)free2, (unsigned long)free1, size);
e11d6c5f
BB
1086 return -ERANGE;
1087 }
1088
1089 if (total1 != total2) {
6ae7fef5
BB
1090 splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
1091 "VMEM_ALLOC | VMEM_FREE not constant: "
1092 "%lu != %lu\n", (unsigned long)total2,
1093 (unsigned long)total1);
e11d6c5f
BB
1094 return -ERANGE;
1095 }
1096
1097 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
6ae7fef5
BB
1098 "VMEM_ALLOC within tolerance: ~%ld%% (%ld/%d)\n",
1099 (long)abs(alloc1 + (long)size - alloc2) * 100 / (long)size,
1100 (long)abs(alloc1 + (long)size - alloc2), size);
e11d6c5f 1101 splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
6ae7fef5
BB
1102 "VMEM_FREE within tolerance: ~%ld%% (%ld/%d)\n",
1103 (long)abs((free1 - (long)size) - free2) * 100 / (long)size,
1104 (long)abs((free1 - (long)size) - free2), size);
e11d6c5f
BB
1105
1106 return 0;
1107}
1108
a9a7a01c
PS
1109typedef struct dummy_page {
1110 struct list_head dp_list;
1111 char dp_pad[PAGE_SIZE - sizeof(struct list_head)];
1112} dummy_page_t;
1113
1114/*
1115 * This test is designed to verify that direct reclaim is functioning as
1116 * expected. We allocate a large number of objects thus creating a large
1117 * number of slabs. We then apply memory pressure and expect that the
1118 * direct reclaim path can easily recover those slabs. The registered
1119 * reclaim function will free the objects and the slab shrinker will call
1120 * it repeatedly until at least a single slab can be freed.
1121 *
1122 * Note it may not be possible to reclaim every last slab via direct reclaim
1123 * without a failure because the shrinker_rwsem may be contended. For this
1124 * reason, quickly reclaiming 3/4 of the slabs is considered a success.
1125 *
1126 * This should all be possible within 10 seconds. For reference, on a
1127 * system with 2G of memory this test takes roughly 0.2 seconds to run.
1128 * It may take longer on larger memory systems but should still easily
1129 * complete in the alloted 10 seconds.
1130 */
1131static int
1132splat_kmem_test13(struct file *file, void *arg)
1133{
1134 kmem_cache_priv_t *kcp;
1135 kmem_cache_data_t *kcd;
1136 dummy_page_t *dp;
1137 struct list_head list;
eaac9ba5 1138 struct timespec start, delta = { 0, 0 };
a9a7a01c
PS
1139 int size, count, slabs, fails = 0;
1140 int i, rc = 0, max_time = 10;
1141
1142 size = 128 * 1024;
1143 count = ((physmem * PAGE_SIZE) / 4 / size);
1144
1145 kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST13_NAME,
1146 size, 0, 0, count);
1147 if (!kcp) {
1148 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1149 "Unable to create '%s'\n", "kcp");
1150 return -ENOMEM;
1151 }
1152
1153 kcp->kcp_cache =
1154 kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
1155 splat_kmem_cache_test_constructor,
1156 splat_kmem_cache_test_destructor,
1157 splat_kmem_cache_test_reclaim,
1158 kcp, NULL, 0);
1159 if (!kcp->kcp_cache) {
1160 splat_kmem_cache_test_kcp_free(kcp);
1161 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1162 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
1163 return -ENOMEM;
1164 }
1165
1166 for (i = 0; i < count; i++) {
1167 kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
1168 spin_lock(&kcp->kcp_lock);
1169 kcp->kcp_kcd[i] = kcd;
1170 spin_unlock(&kcp->kcp_lock);
1171 if (!kcd) {
1172 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1173 "Unable to allocate from '%s'\n",
1174 SPLAT_KMEM_CACHE_NAME);
1175 }
1176 }
1177
1178 i = 0;
1179 slabs = kcp->kcp_cache->skc_slab_total;
1180 INIT_LIST_HEAD(&list);
1181 start = current_kernel_time();
1182
1183 while (kcp->kcp_cache->skc_slab_total > (slabs >> 2)) {
1184
1185 if ((i % 10000) == 0)
1186 splat_kmem_cache_test_debug(
1187 file, SPLAT_KMEM_TEST13_NAME, kcp);
1188
1189 delta = timespec_sub(current_kernel_time(), start);
1190 if (delta.tv_sec >= max_time) {
1191 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1192 "Failed to reclaim 3/4 of cache in %ds, "
1193 "%u/%u slabs remain\n", max_time,
1194 (unsigned)kcp->kcp_cache->skc_slab_total,
1195 slabs);
1196 rc = -ETIME;
1197 break;
1198 }
1199
1200 dp = (dummy_page_t *)__get_free_page(GFP_KERNEL | __GFP_NORETRY);
1201 if (!dp) {
1202 fails++;
1203 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1204 "Failed (%d) to allocate page with %u "
1205 "slabs still in the cache\n", fails,
1206 (unsigned)kcp->kcp_cache->skc_slab_total);
1207 continue;
1208 }
1209
1210 list_add(&dp->dp_list, &list);
1211 i++;
1212 }
1213
1214 if (rc == 0)
1215 splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
1216 "Successfully created %u slabs and with %d alloc "
1217 "failures reclaimed 3/4 of them in %d.%03ds\n",
1218 slabs, fails,
1219 (int)delta.tv_sec, (int)delta.tv_nsec / 1000000);
1220
1221 /* Release memory pressure pages */
1222 while (!list_empty(&list)) {
1223 dp = list_entry(list.next, dummy_page_t, dp_list);
1224 list_del_init(&dp->dp_list);
1225 free_page((unsigned long)dp);
1226 }
1227
1228 /* Release remaining kmem cache objects */
1229 spin_lock(&kcp->kcp_lock);
1230 for (i = 0; i < count; i++)
1231 if (kcp->kcp_kcd[i])
1232 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
1233 spin_unlock(&kcp->kcp_lock);
1234
1235 kmem_cache_destroy(kcp->kcp_cache);
1236 splat_kmem_cache_test_kcp_free(kcp);
1237
1238 return rc;
1239}
1240
7c50328b 1241splat_subsystem_t *
1242splat_kmem_init(void)
f1ca4da6 1243{
ea3e6ca9 1244 splat_subsystem_t *sub;
f1ca4da6 1245
ea3e6ca9
BB
1246 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
1247 if (sub == NULL)
1248 return NULL;
f1ca4da6 1249
ea3e6ca9
BB
1250 memset(sub, 0, sizeof(*sub));
1251 strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
7c50328b 1252 strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
ea3e6ca9 1253 INIT_LIST_HEAD(&sub->subsystem_list);
f1ca4da6 1254 INIT_LIST_HEAD(&sub->test_list);
ea3e6ca9
BB
1255 spin_lock_init(&sub->test_lock);
1256 sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
1257
1258 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
1259 SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
1260 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
1261 SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
1262 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
1263 SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
1264 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
1265 SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
1266 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
1267 SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
1268 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
1269 SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
1270 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
1271 SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
1272 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
1273 SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
1274 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST9_NAME, SPLAT_KMEM_TEST9_DESC,
1275 SPLAT_KMEM_TEST9_ID, splat_kmem_test9);
1276 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST10_NAME, SPLAT_KMEM_TEST10_DESC,
1277 SPLAT_KMEM_TEST10_ID, splat_kmem_test10);
4e5691fa 1278#ifdef _LP64
ea3e6ca9
BB
1279 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC,
1280 SPLAT_KMEM_TEST11_ID, splat_kmem_test11);
4e5691fa 1281#endif /* _LP64 */
e11d6c5f
BB
1282 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST12_NAME, SPLAT_KMEM_TEST12_DESC,
1283 SPLAT_KMEM_TEST12_ID, splat_kmem_test12);
a9a7a01c
PS
1284 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST13_NAME, SPLAT_KMEM_TEST13_DESC,
1285 SPLAT_KMEM_TEST13_ID, splat_kmem_test13);
ea3e6ca9
BB
1286
1287 return sub;
f1ca4da6 1288}
1289
1290void
7c50328b 1291splat_kmem_fini(splat_subsystem_t *sub)
f1ca4da6 1292{
ea3e6ca9 1293 ASSERT(sub);
a9a7a01c 1294 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST13_ID);
e11d6c5f 1295 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST12_ID);
4e5691fa 1296#ifdef _LP64
ea3e6ca9 1297 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST11_ID);
4e5691fa 1298#endif /* _LP64 */
ea3e6ca9
BB
1299 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST10_ID);
1300 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST9_ID);
1301 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID);
1302 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
1303 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
1304 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
1305 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
1306 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
1307 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
1308 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
1309
1310 kfree(sub);
f1ca4da6 1311}
1312
1313int
7c50328b 1314splat_kmem_id(void) {
ea3e6ca9 1315 return SPLAT_SUBSYSTEM_KMEM;
f1ca4da6 1316}