]>
Commit | Line | Data |
---|---|---|
716154c5 BB |
1 | /*****************************************************************************\ |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. | |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
715f6251 | 6 | * UCRL-CODE-235197 |
7 | * | |
716154c5 | 8 | * This file is part of the SPL, Solaris Porting Layer. |
3d6af2dd | 9 | * For details, see <http://zfsonlinux.org/>. |
716154c5 BB |
10 | * |
11 | * The SPL is free software; you can redistribute it and/or modify it | |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
715f6251 | 15 | * |
716154c5 | 16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT |
715f6251 | 17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
716154c5 BB |
22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. |
23 | ***************************************************************************** | |
24 | * Solaris Porting LAyer Tests (SPLAT) Kmem Tests. | |
25 | \*****************************************************************************/ | |
715f6251 | 26 | |
df870a69 | 27 | #include <sys/kmem.h> |
10946b02 AX |
28 | #include <sys/kmem_cache.h> |
29 | #include <sys/vmem.h> | |
30 | #include <sys/random.h> | |
df870a69 | 31 | #include <sys/thread.h> |
10946b02 | 32 | #include <sys/vmsystm.h> |
7c50328b | 33 | #include "splat-internal.h" |
f1ca4da6 | 34 | |
7c50328b | 35 | #define SPLAT_KMEM_NAME "kmem" |
36 | #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests" | |
f1ca4da6 | 37 | |
7c50328b | 38 | #define SPLAT_KMEM_TEST1_ID 0x0101 |
39 | #define SPLAT_KMEM_TEST1_NAME "kmem_alloc" | |
40 | #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)" | |
f1ca4da6 | 41 | |
7c50328b | 42 | #define SPLAT_KMEM_TEST2_ID 0x0102 |
43 | #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc" | |
44 | #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)" | |
f1ca4da6 | 45 | |
7c50328b | 46 | #define SPLAT_KMEM_TEST3_ID 0x0103 |
2fb9b26a | 47 | #define SPLAT_KMEM_TEST3_NAME "vmem_alloc" |
48 | #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)" | |
f1ca4da6 | 49 | |
7c50328b | 50 | #define SPLAT_KMEM_TEST4_ID 0x0104 |
2fb9b26a | 51 | #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc" |
52 | #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)" | |
f1ca4da6 | 53 | |
79b31f36 | 54 | #define SPLAT_KMEM_TEST5_ID 0x0105 |
ea3e6ca9 | 55 | #define SPLAT_KMEM_TEST5_NAME "slab_small" |
2fb9b26a | 56 | #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)" |
57 | ||
58 | #define SPLAT_KMEM_TEST6_ID 0x0106 | |
ea3e6ca9 | 59 | #define SPLAT_KMEM_TEST6_NAME "slab_large" |
2fb9b26a | 60 | #define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)" |
61 | ||
62 | #define SPLAT_KMEM_TEST7_ID 0x0107 | |
ea3e6ca9 BB |
63 | #define SPLAT_KMEM_TEST7_NAME "slab_align" |
64 | #define SPLAT_KMEM_TEST7_DESC "Slab alignment test" | |
79b31f36 | 65 | |
44b8f176 | 66 | #define SPLAT_KMEM_TEST8_ID 0x0108 |
ea3e6ca9 BB |
67 | #define SPLAT_KMEM_TEST8_NAME "slab_reap" |
68 | #define SPLAT_KMEM_TEST8_DESC "Slab reaping test" | |
44b8f176 | 69 | |
48e0606a | 70 | #define SPLAT_KMEM_TEST9_ID 0x0109 |
ea3e6ca9 BB |
71 | #define SPLAT_KMEM_TEST9_NAME "slab_age" |
72 | #define SPLAT_KMEM_TEST9_DESC "Slab aging test" | |
73 | ||
74 | #define SPLAT_KMEM_TEST10_ID 0x010a | |
75 | #define SPLAT_KMEM_TEST10_NAME "slab_lock" | |
76 | #define SPLAT_KMEM_TEST10_DESC "Slab locking test" | |
77 | ||
11124863 | 78 | #if 0 |
ea3e6ca9 BB |
79 | #define SPLAT_KMEM_TEST11_ID 0x010b |
80 | #define SPLAT_KMEM_TEST11_NAME "slab_overcommit" | |
81 | #define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test" | |
11124863 | 82 | #endif |
48e0606a | 83 | |
a9a7a01c PS |
84 | #define SPLAT_KMEM_TEST13_ID 0x010d |
85 | #define SPLAT_KMEM_TEST13_NAME "slab_reclaim" | |
86 | #define SPLAT_KMEM_TEST13_DESC "Slab direct memory reclaim test" | |
87 | ||
7c50328b | 88 | #define SPLAT_KMEM_ALLOC_COUNT 10 |
79b31f36 | 89 | #define SPLAT_VMEM_ALLOC_COUNT 10 |
90 | ||
44b8f176 | 91 | |
f1ca4da6 | 92 | static int |
7c50328b | 93 | splat_kmem_test1(struct file *file, void *arg) |
f1ca4da6 | 94 | { |
7c50328b | 95 | void *ptr[SPLAT_KMEM_ALLOC_COUNT]; |
f1ca4da6 | 96 | int size = PAGE_SIZE; |
97 | int i, count, rc = 0; | |
98 | ||
10946b02 | 99 | while ((!rc) && (size <= spl_kmem_alloc_warn)) { |
f1ca4da6 | 100 | count = 0; |
101 | ||
7c50328b | 102 | for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) { |
10946b02 | 103 | ptr[i] = kmem_alloc(size, KM_SLEEP); |
f1ca4da6 | 104 | if (ptr[i]) |
105 | count++; | |
106 | } | |
107 | ||
7c50328b | 108 | for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) |
f1ca4da6 | 109 | if (ptr[i]) |
110 | kmem_free(ptr[i], size); | |
111 | ||
7c50328b | 112 | splat_vprint(file, SPLAT_KMEM_TEST1_NAME, |
ea3e6ca9 BB |
113 | "%d byte allocations, %d/%d successful\n", |
114 | size, count, SPLAT_KMEM_ALLOC_COUNT); | |
7c50328b | 115 | if (count != SPLAT_KMEM_ALLOC_COUNT) |
f1ca4da6 | 116 | rc = -ENOMEM; |
117 | ||
118 | size *= 2; | |
119 | } | |
120 | ||
121 | return rc; | |
122 | } | |
123 | ||
124 | static int | |
7c50328b | 125 | splat_kmem_test2(struct file *file, void *arg) |
f1ca4da6 | 126 | { |
7c50328b | 127 | void *ptr[SPLAT_KMEM_ALLOC_COUNT]; |
f1ca4da6 | 128 | int size = PAGE_SIZE; |
129 | int i, j, count, rc = 0; | |
130 | ||
10946b02 | 131 | while ((!rc) && (size <= spl_kmem_alloc_warn)) { |
f1ca4da6 | 132 | count = 0; |
133 | ||
7c50328b | 134 | for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) { |
10946b02 | 135 | ptr[i] = kmem_zalloc(size, KM_SLEEP); |
f1ca4da6 | 136 | if (ptr[i]) |
137 | count++; | |
138 | } | |
139 | ||
140 | /* Ensure buffer has been zero filled */ | |
7c50328b | 141 | for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) { |
f1ca4da6 | 142 | for (j = 0; j < size; j++) { |
143 | if (((char *)ptr[i])[j] != '\0') { | |
5198ea0e | 144 | splat_vprint(file,SPLAT_KMEM_TEST2_NAME, |
ea3e6ca9 BB |
145 | "%d-byte allocation was " |
146 | "not zeroed\n", size); | |
f1ca4da6 | 147 | rc = -EFAULT; |
148 | } | |
149 | } | |
150 | } | |
151 | ||
7c50328b | 152 | for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) |
f1ca4da6 | 153 | if (ptr[i]) |
154 | kmem_free(ptr[i], size); | |
155 | ||
7c50328b | 156 | splat_vprint(file, SPLAT_KMEM_TEST2_NAME, |
ea3e6ca9 BB |
157 | "%d byte allocations, %d/%d successful\n", |
158 | size, count, SPLAT_KMEM_ALLOC_COUNT); | |
7c50328b | 159 | if (count != SPLAT_KMEM_ALLOC_COUNT) |
f1ca4da6 | 160 | rc = -ENOMEM; |
161 | ||
162 | size *= 2; | |
163 | } | |
164 | ||
165 | return rc; | |
166 | } | |
167 | ||
2fb9b26a | 168 | static int |
169 | splat_kmem_test3(struct file *file, void *arg) | |
170 | { | |
171 | void *ptr[SPLAT_VMEM_ALLOC_COUNT]; | |
172 | int size = PAGE_SIZE; | |
173 | int i, count, rc = 0; | |
174 | ||
10946b02 AX |
175 | /* |
176 | * Test up to 4x the maximum kmem_alloc() size to ensure both | |
177 | * the kmem_alloc() and vmem_alloc() call paths are used. | |
178 | */ | |
179 | while ((!rc) && (size <= (4 * spl_kmem_alloc_max))) { | |
2fb9b26a | 180 | count = 0; |
181 | ||
182 | for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) { | |
183 | ptr[i] = vmem_alloc(size, KM_SLEEP); | |
184 | if (ptr[i]) | |
185 | count++; | |
186 | } | |
187 | ||
188 | for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) | |
189 | if (ptr[i]) | |
190 | vmem_free(ptr[i], size); | |
191 | ||
192 | splat_vprint(file, SPLAT_KMEM_TEST3_NAME, | |
ea3e6ca9 BB |
193 | "%d byte allocations, %d/%d successful\n", |
194 | size, count, SPLAT_VMEM_ALLOC_COUNT); | |
2fb9b26a | 195 | if (count != SPLAT_VMEM_ALLOC_COUNT) |
196 | rc = -ENOMEM; | |
197 | ||
198 | size *= 2; | |
199 | } | |
200 | ||
201 | return rc; | |
202 | } | |
203 | ||
204 | static int | |
205 | splat_kmem_test4(struct file *file, void *arg) | |
206 | { | |
207 | void *ptr[SPLAT_VMEM_ALLOC_COUNT]; | |
208 | int size = PAGE_SIZE; | |
209 | int i, j, count, rc = 0; | |
210 | ||
10946b02 AX |
211 | /* |
212 | * Test up to 4x the maximum kmem_zalloc() size to ensure both | |
213 | * the kmem_zalloc() and vmem_zalloc() call paths are used. | |
214 | */ | |
215 | while ((!rc) && (size <= (4 * spl_kmem_alloc_max))) { | |
2fb9b26a | 216 | count = 0; |
217 | ||
218 | for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) { | |
219 | ptr[i] = vmem_zalloc(size, KM_SLEEP); | |
220 | if (ptr[i]) | |
221 | count++; | |
222 | } | |
223 | ||
224 | /* Ensure buffer has been zero filled */ | |
225 | for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) { | |
226 | for (j = 0; j < size; j++) { | |
227 | if (((char *)ptr[i])[j] != '\0') { | |
228 | splat_vprint(file, SPLAT_KMEM_TEST4_NAME, | |
ea3e6ca9 BB |
229 | "%d-byte allocation was " |
230 | "not zeroed\n", size); | |
2fb9b26a | 231 | rc = -EFAULT; |
232 | } | |
233 | } | |
234 | } | |
235 | ||
236 | for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) | |
237 | if (ptr[i]) | |
238 | vmem_free(ptr[i], size); | |
239 | ||
240 | splat_vprint(file, SPLAT_KMEM_TEST4_NAME, | |
ea3e6ca9 BB |
241 | "%d byte allocations, %d/%d successful\n", |
242 | size, count, SPLAT_VMEM_ALLOC_COUNT); | |
2fb9b26a | 243 | if (count != SPLAT_VMEM_ALLOC_COUNT) |
244 | rc = -ENOMEM; | |
245 | ||
246 | size *= 2; | |
247 | } | |
248 | ||
249 | return rc; | |
250 | } | |
251 | ||
7c50328b | 252 | #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL |
253 | #define SPLAT_KMEM_CACHE_NAME "kmem_test" | |
ea3e6ca9 | 254 | #define SPLAT_KMEM_OBJ_COUNT 1024 |
9e4fb5c2 | 255 | #define SPLAT_KMEM_OBJ_RECLAIM 32 /* objects */ |
ea3e6ca9 BB |
256 | #define SPLAT_KMEM_THREADS 32 |
257 | ||
258 | #define KCP_FLAG_READY 0x01 | |
f1ca4da6 | 259 | |
260 | typedef struct kmem_cache_data { | |
f1ca4da6 | 261 | unsigned long kcd_magic; |
efcd0ca3 | 262 | struct list_head kcd_node; |
f1ca4da6 | 263 | int kcd_flag; |
2fb9b26a | 264 | char kcd_buf[0]; |
f1ca4da6 | 265 | } kmem_cache_data_t; |
266 | ||
ea3e6ca9 | 267 | typedef struct kmem_cache_thread { |
ea3e6ca9 BB |
268 | spinlock_t kct_lock; |
269 | int kct_id; | |
efcd0ca3 | 270 | struct list_head kct_list; |
ea3e6ca9 BB |
271 | } kmem_cache_thread_t; |
272 | ||
f1ca4da6 | 273 | typedef struct kmem_cache_priv { |
274 | unsigned long kcp_magic; | |
275 | struct file *kcp_file; | |
276 | kmem_cache_t *kcp_cache; | |
44b8f176 | 277 | spinlock_t kcp_lock; |
ea3e6ca9 BB |
278 | wait_queue_head_t kcp_ctl_waitq; |
279 | wait_queue_head_t kcp_thr_waitq; | |
280 | int kcp_flags; | |
281 | int kcp_kct_count; | |
282 | kmem_cache_thread_t *kcp_kct[SPLAT_KMEM_THREADS]; | |
2fb9b26a | 283 | int kcp_size; |
48e0606a | 284 | int kcp_align; |
f1ca4da6 | 285 | int kcp_count; |
44b8f176 | 286 | int kcp_alloc; |
f1ca4da6 | 287 | int kcp_rc; |
288 | } kmem_cache_priv_t; | |
289 | ||
ea3e6ca9 BB |
290 | static kmem_cache_priv_t * |
291 | splat_kmem_cache_test_kcp_alloc(struct file *file, char *name, | |
efcd0ca3 | 292 | int size, int align, int alloc) |
ea3e6ca9 BB |
293 | { |
294 | kmem_cache_priv_t *kcp; | |
295 | ||
efcd0ca3 | 296 | kcp = kmem_zalloc(sizeof(kmem_cache_priv_t), KM_SLEEP); |
ea3e6ca9 BB |
297 | if (!kcp) |
298 | return NULL; | |
299 | ||
300 | kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC; | |
301 | kcp->kcp_file = file; | |
302 | kcp->kcp_cache = NULL; | |
303 | spin_lock_init(&kcp->kcp_lock); | |
304 | init_waitqueue_head(&kcp->kcp_ctl_waitq); | |
305 | init_waitqueue_head(&kcp->kcp_thr_waitq); | |
306 | kcp->kcp_flags = 0; | |
307 | kcp->kcp_kct_count = -1; | |
308 | kcp->kcp_size = size; | |
309 | kcp->kcp_align = align; | |
310 | kcp->kcp_count = 0; | |
311 | kcp->kcp_alloc = alloc; | |
312 | kcp->kcp_rc = 0; | |
ea3e6ca9 BB |
313 | |
314 | return kcp; | |
315 | } | |
316 | ||
317 | static void | |
318 | splat_kmem_cache_test_kcp_free(kmem_cache_priv_t *kcp) | |
319 | { | |
efcd0ca3 | 320 | kmem_free(kcp, sizeof(kmem_cache_priv_t)); |
ea3e6ca9 BB |
321 | } |
322 | ||
323 | static kmem_cache_thread_t * | |
efcd0ca3 | 324 | splat_kmem_cache_test_kct_alloc(kmem_cache_priv_t *kcp, int id) |
ea3e6ca9 BB |
325 | { |
326 | kmem_cache_thread_t *kct; | |
327 | ||
10946b02 | 328 | ASSERT3S(id, <, SPLAT_KMEM_THREADS); |
efcd0ca3 BB |
329 | ASSERT(kcp->kcp_kct[id] == NULL); |
330 | ||
331 | kct = kmem_zalloc(sizeof(kmem_cache_thread_t), KM_SLEEP); | |
ea3e6ca9 BB |
332 | if (!kct) |
333 | return NULL; | |
334 | ||
335 | spin_lock_init(&kct->kct_lock); | |
ea3e6ca9 | 336 | kct->kct_id = id; |
efcd0ca3 BB |
337 | INIT_LIST_HEAD(&kct->kct_list); |
338 | ||
339 | spin_lock(&kcp->kcp_lock); | |
340 | kcp->kcp_kct[id] = kct; | |
341 | spin_unlock(&kcp->kcp_lock); | |
ea3e6ca9 BB |
342 | |
343 | return kct; | |
344 | } | |
345 | ||
346 | static void | |
efcd0ca3 BB |
347 | splat_kmem_cache_test_kct_free(kmem_cache_priv_t *kcp, |
348 | kmem_cache_thread_t *kct) | |
349 | { | |
350 | spin_lock(&kcp->kcp_lock); | |
351 | kcp->kcp_kct[kct->kct_id] = NULL; | |
352 | spin_unlock(&kcp->kcp_lock); | |
353 | ||
354 | kmem_free(kct, sizeof(kmem_cache_thread_t)); | |
355 | } | |
356 | ||
357 | static void | |
358 | splat_kmem_cache_test_kcd_free(kmem_cache_priv_t *kcp, | |
359 | kmem_cache_thread_t *kct) | |
360 | { | |
361 | kmem_cache_data_t *kcd; | |
362 | ||
363 | spin_lock(&kct->kct_lock); | |
364 | while (!list_empty(&kct->kct_list)) { | |
365 | kcd = list_entry(kct->kct_list.next, | |
366 | kmem_cache_data_t, kcd_node); | |
367 | list_del(&kcd->kcd_node); | |
368 | spin_unlock(&kct->kct_lock); | |
369 | ||
370 | kmem_cache_free(kcp->kcp_cache, kcd); | |
371 | ||
372 | spin_lock(&kct->kct_lock); | |
373 | } | |
374 | spin_unlock(&kct->kct_lock); | |
375 | } | |
376 | ||
377 | static int | |
378 | splat_kmem_cache_test_kcd_alloc(kmem_cache_priv_t *kcp, | |
379 | kmem_cache_thread_t *kct, int count) | |
ea3e6ca9 | 380 | { |
efcd0ca3 BB |
381 | kmem_cache_data_t *kcd; |
382 | int i; | |
383 | ||
384 | for (i = 0; i < count; i++) { | |
385 | kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP); | |
386 | if (kcd == NULL) { | |
387 | splat_kmem_cache_test_kcd_free(kcp, kct); | |
388 | return -ENOMEM; | |
389 | } | |
390 | ||
391 | spin_lock(&kct->kct_lock); | |
392 | list_add_tail(&kcd->kcd_node, &kct->kct_list); | |
393 | spin_unlock(&kct->kct_lock); | |
394 | } | |
395 | ||
396 | return 0; | |
ea3e6ca9 BB |
397 | } |
398 | ||
a9a7a01c PS |
399 | static void |
400 | splat_kmem_cache_test_debug(struct file *file, char *name, | |
401 | kmem_cache_priv_t *kcp) | |
402 | { | |
403 | int j; | |
404 | ||
9e4fb5c2 LG |
405 | splat_vprint(file, name, "%s cache objects %d", |
406 | kcp->kcp_cache->skc_name, kcp->kcp_count); | |
407 | ||
408 | if (kcp->kcp_cache->skc_flags & (KMC_KMEM | KMC_VMEM)) { | |
409 | splat_vprint(file, name, ", slabs %u/%u objs %u/%u", | |
a9a7a01c PS |
410 | (unsigned)kcp->kcp_cache->skc_slab_alloc, |
411 | (unsigned)kcp->kcp_cache->skc_slab_total, | |
412 | (unsigned)kcp->kcp_cache->skc_obj_alloc, | |
413 | (unsigned)kcp->kcp_cache->skc_obj_total); | |
414 | ||
9e4fb5c2 LG |
415 | if (!(kcp->kcp_cache->skc_flags & KMC_NOMAGAZINE)) { |
416 | splat_vprint(file, name, "%s", "mags"); | |
417 | ||
418 | for_each_online_cpu(j) | |
419 | splat_print(file, "%u/%u ", | |
420 | kcp->kcp_cache->skc_mag[j]->skm_avail, | |
421 | kcp->kcp_cache->skc_mag[j]->skm_size); | |
422 | } | |
423 | } | |
a9a7a01c PS |
424 | |
425 | splat_print(file, "%s\n", ""); | |
426 | } | |
427 | ||
f1ca4da6 | 428 | static int |
2fb9b26a | 429 | splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags) |
f1ca4da6 | 430 | { |
f1ca4da6 | 431 | kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv; |
2fb9b26a | 432 | kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr; |
f1ca4da6 | 433 | |
0498e6c5 | 434 | if (kcd && kcp) { |
435 | kcd->kcd_magic = kcp->kcp_magic; | |
efcd0ca3 | 436 | INIT_LIST_HEAD(&kcd->kcd_node); |
2fb9b26a | 437 | kcd->kcd_flag = 1; |
0498e6c5 | 438 | memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd)); |
439 | kcp->kcp_count++; | |
f1ca4da6 | 440 | } |
441 | ||
442 | return 0; | |
443 | } | |
444 | ||
445 | static void | |
2fb9b26a | 446 | splat_kmem_cache_test_destructor(void *ptr, void *priv) |
f1ca4da6 | 447 | { |
f1ca4da6 | 448 | kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv; |
2fb9b26a | 449 | kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr; |
f1ca4da6 | 450 | |
0498e6c5 | 451 | if (kcd && kcp) { |
452 | kcd->kcd_magic = 0; | |
2fb9b26a | 453 | kcd->kcd_flag = 0; |
0498e6c5 | 454 | memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd)); |
455 | kcp->kcp_count--; | |
f1ca4da6 | 456 | } |
457 | ||
458 | return; | |
459 | } | |
460 | ||
ea3e6ca9 BB |
461 | /* |
462 | * Generic reclaim function which assumes that all objects may | |
463 | * be reclaimed at any time. We free a small percentage of the | |
464 | * objects linked off the kcp or kct[] every time we are called. | |
465 | */ | |
466 | static void | |
467 | splat_kmem_cache_test_reclaim(void *priv) | |
468 | { | |
469 | kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv; | |
470 | kmem_cache_thread_t *kct; | |
efcd0ca3 BB |
471 | kmem_cache_data_t *kcd; |
472 | LIST_HEAD(reclaim); | |
473 | int i, count; | |
ea3e6ca9 BB |
474 | |
475 | ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC); | |
ea3e6ca9 | 476 | |
efcd0ca3 | 477 | /* For each kct thread reclaim some objects */ |
ea3e6ca9 | 478 | spin_lock(&kcp->kcp_lock); |
efcd0ca3 | 479 | for (i = 0; i < SPLAT_KMEM_THREADS; i++) { |
ea3e6ca9 | 480 | kct = kcp->kcp_kct[i]; |
efcd0ca3 | 481 | if (!kct) |
ea3e6ca9 BB |
482 | continue; |
483 | ||
efcd0ca3 | 484 | spin_unlock(&kcp->kcp_lock); |
ea3e6ca9 | 485 | spin_lock(&kct->kct_lock); |
ea3e6ca9 | 486 | |
efcd0ca3 BB |
487 | count = SPLAT_KMEM_OBJ_RECLAIM; |
488 | while (count > 0 && !list_empty(&kct->kct_list)) { | |
489 | kcd = list_entry(kct->kct_list.next, | |
490 | kmem_cache_data_t, kcd_node); | |
491 | list_del(&kcd->kcd_node); | |
492 | list_add(&kcd->kcd_node, &reclaim); | |
493 | count--; | |
ea3e6ca9 | 494 | } |
efcd0ca3 | 495 | |
ea3e6ca9 | 496 | spin_unlock(&kct->kct_lock); |
efcd0ca3 BB |
497 | spin_lock(&kcp->kcp_lock); |
498 | } | |
499 | spin_unlock(&kcp->kcp_lock); | |
500 | ||
501 | /* Freed outside the spin lock */ | |
502 | while (!list_empty(&reclaim)) { | |
503 | kcd = list_entry(reclaim.next, kmem_cache_data_t, kcd_node); | |
504 | list_del(&kcd->kcd_node); | |
505 | kmem_cache_free(kcp->kcp_cache, kcd); | |
ea3e6ca9 BB |
506 | } |
507 | ||
508 | return; | |
509 | } | |
510 | ||
511 | static int | |
512 | splat_kmem_cache_test_threads(kmem_cache_priv_t *kcp, int threads) | |
513 | { | |
514 | int rc; | |
515 | ||
516 | spin_lock(&kcp->kcp_lock); | |
517 | rc = (kcp->kcp_kct_count == threads); | |
518 | spin_unlock(&kcp->kcp_lock); | |
519 | ||
520 | return rc; | |
521 | } | |
522 | ||
523 | static int | |
524 | splat_kmem_cache_test_flags(kmem_cache_priv_t *kcp, int flags) | |
525 | { | |
526 | int rc; | |
527 | ||
528 | spin_lock(&kcp->kcp_lock); | |
529 | rc = (kcp->kcp_flags & flags); | |
530 | spin_unlock(&kcp->kcp_lock); | |
531 | ||
532 | return rc; | |
533 | } | |
534 | ||
535 | static void | |
536 | splat_kmem_cache_test_thread(void *arg) | |
537 | { | |
538 | kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg; | |
539 | kmem_cache_thread_t *kct; | |
efcd0ca3 | 540 | int rc = 0, id; |
ea3e6ca9 BB |
541 | |
542 | ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC); | |
543 | ||
544 | /* Assign thread ids */ | |
545 | spin_lock(&kcp->kcp_lock); | |
546 | if (kcp->kcp_kct_count == -1) | |
547 | kcp->kcp_kct_count = 0; | |
548 | ||
549 | id = kcp->kcp_kct_count; | |
550 | kcp->kcp_kct_count++; | |
551 | spin_unlock(&kcp->kcp_lock); | |
552 | ||
efcd0ca3 | 553 | kct = splat_kmem_cache_test_kct_alloc(kcp, id); |
ea3e6ca9 BB |
554 | if (!kct) { |
555 | rc = -ENOMEM; | |
556 | goto out; | |
557 | } | |
558 | ||
ea3e6ca9 BB |
559 | /* Wait for all threads to have started and report they are ready */ |
560 | if (kcp->kcp_kct_count == SPLAT_KMEM_THREADS) | |
561 | wake_up(&kcp->kcp_ctl_waitq); | |
562 | ||
563 | wait_event(kcp->kcp_thr_waitq, | |
564 | splat_kmem_cache_test_flags(kcp, KCP_FLAG_READY)); | |
565 | ||
efcd0ca3 BB |
566 | /* Create and destroy objects */ |
567 | rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, kcp->kcp_alloc); | |
568 | splat_kmem_cache_test_kcd_free(kcp, kct); | |
ea3e6ca9 | 569 | out: |
efcd0ca3 BB |
570 | if (kct) |
571 | splat_kmem_cache_test_kct_free(kcp, kct); | |
ea3e6ca9 | 572 | |
efcd0ca3 | 573 | spin_lock(&kcp->kcp_lock); |
ea3e6ca9 BB |
574 | if (!kcp->kcp_rc) |
575 | kcp->kcp_rc = rc; | |
576 | ||
577 | if ((--kcp->kcp_kct_count) == 0) | |
578 | wake_up(&kcp->kcp_ctl_waitq); | |
579 | ||
580 | spin_unlock(&kcp->kcp_lock); | |
581 | ||
582 | thread_exit(); | |
583 | } | |
584 | ||
f1ca4da6 | 585 | static int |
48e0606a | 586 | splat_kmem_cache_test(struct file *file, void *arg, char *name, |
10946b02 | 587 | int size, int align, int flags) |
f1ca4da6 | 588 | { |
10946b02 AX |
589 | kmem_cache_priv_t *kcp = NULL; |
590 | kmem_cache_data_t **kcd = NULL; | |
591 | int i, rc = 0, objs = 0; | |
592 | ||
593 | splat_vprint(file, name, | |
594 | "Testing size=%d, align=%d, flags=0x%04x\n", | |
595 | size, align, flags); | |
f1ca4da6 | 596 | |
efcd0ca3 | 597 | kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0); |
ea3e6ca9 BB |
598 | if (!kcp) { |
599 | splat_vprint(file, name, "Unable to create '%s'\n", "kcp"); | |
10946b02 | 600 | return (-ENOMEM); |
ea3e6ca9 BB |
601 | } |
602 | ||
10946b02 AX |
603 | kcp->kcp_cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, |
604 | kcp->kcp_size, kcp->kcp_align, | |
605 | splat_kmem_cache_test_constructor, | |
606 | splat_kmem_cache_test_destructor, | |
607 | NULL, kcp, NULL, flags); | |
608 | if (kcp->kcp_cache == NULL) { | |
609 | splat_vprint(file, name, "Unable to create " | |
610 | "name='%s', size=%d, align=%d, flags=0x%x\n", | |
611 | SPLAT_KMEM_CACHE_NAME, size, align, flags); | |
ea3e6ca9 BB |
612 | rc = -ENOMEM; |
613 | goto out_free; | |
f1ca4da6 | 614 | } |
615 | ||
10946b02 AX |
616 | /* |
617 | * Allocate several slabs worth of objects to verify functionality. | |
618 | * However, on 32-bit systems with limited address space constrain | |
619 | * it to a single slab for the purposes of this test. | |
620 | */ | |
621 | #ifdef _LP64 | |
622 | objs = SPL_KMEM_CACHE_OBJ_PER_SLAB * 4; | |
623 | #else | |
624 | objs = 1; | |
625 | #endif | |
626 | kcd = kmem_zalloc(sizeof (kmem_cache_data_t *) * objs, KM_SLEEP); | |
627 | if (kcd == NULL) { | |
628 | splat_vprint(file, name, "Unable to allocate pointers " | |
629 | "for %d objects\n", objs); | |
630 | rc = -ENOMEM; | |
f1ca4da6 | 631 | goto out_free; |
632 | } | |
633 | ||
10946b02 AX |
634 | for (i = 0; i < objs; i++) { |
635 | kcd[i] = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP); | |
636 | if (kcd[i] == NULL) { | |
637 | splat_vprint(file, name, "Unable to allocate " | |
638 | "from '%s'\n", SPLAT_KMEM_CACHE_NAME); | |
639 | rc = -EINVAL; | |
640 | goto out_free; | |
641 | } | |
f1ca4da6 | 642 | |
10946b02 AX |
643 | if (!kcd[i]->kcd_flag) { |
644 | splat_vprint(file, name, "Failed to run constructor " | |
645 | "for '%s'\n", SPLAT_KMEM_CACHE_NAME); | |
646 | rc = -EINVAL; | |
647 | goto out_free; | |
648 | } | |
649 | ||
650 | if (kcd[i]->kcd_magic != kcp->kcp_magic) { | |
651 | splat_vprint(file, name, | |
652 | "Failed to pass private data to constructor " | |
653 | "for '%s'\n", SPLAT_KMEM_CACHE_NAME); | |
654 | rc = -EINVAL; | |
655 | goto out_free; | |
656 | } | |
f1ca4da6 | 657 | } |
658 | ||
10946b02 AX |
659 | for (i = 0; i < objs; i++) { |
660 | kmem_cache_free(kcp->kcp_cache, kcd[i]); | |
661 | ||
662 | /* Destructors are run for every kmem_cache_free() */ | |
663 | if (kcd[i]->kcd_flag) { | |
664 | splat_vprint(file, name, | |
665 | "Failed to run destructor for '%s'\n", | |
666 | SPLAT_KMEM_CACHE_NAME); | |
667 | rc = -EINVAL; | |
668 | goto out_free; | |
669 | } | |
670 | } | |
f1ca4da6 | 671 | |
ea3e6ca9 | 672 | if (kcp->kcp_count) { |
2fb9b26a | 673 | splat_vprint(file, name, |
10946b02 AX |
674 | "Failed to run destructor on all slab objects for '%s'\n", |
675 | SPLAT_KMEM_CACHE_NAME); | |
f1ca4da6 | 676 | rc = -EINVAL; |
677 | } | |
678 | ||
10946b02 AX |
679 | kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs); |
680 | kmem_cache_destroy(kcp->kcp_cache); | |
681 | ||
f250d90b | 682 | splat_kmem_cache_test_kcp_free(kcp); |
2fb9b26a | 683 | splat_vprint(file, name, |
10946b02 AX |
684 | "Success ran alloc'd/free'd %d objects of size %d\n", |
685 | objs, size); | |
f1ca4da6 | 686 | |
10946b02 | 687 | return (rc); |
f1ca4da6 | 688 | |
689 | out_free: | |
10946b02 AX |
690 | if (kcd) { |
691 | for (i = 0; i < objs; i++) { | |
692 | if (kcd[i] != NULL) | |
693 | kmem_cache_free(kcp->kcp_cache, kcd[i]); | |
694 | } | |
695 | ||
696 | kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs); | |
697 | } | |
ea3e6ca9 BB |
698 | |
699 | if (kcp->kcp_cache) | |
700 | kmem_cache_destroy(kcp->kcp_cache); | |
701 | ||
702 | splat_kmem_cache_test_kcp_free(kcp); | |
703 | ||
10946b02 | 704 | return (rc); |
ea3e6ca9 BB |
705 | } |
706 | ||
707 | static int | |
708 | splat_kmem_cache_thread_test(struct file *file, void *arg, char *name, | |
10a4be0f | 709 | int size, int alloc, int max_time) |
ea3e6ca9 BB |
710 | { |
711 | kmem_cache_priv_t *kcp; | |
712 | kthread_t *thr; | |
713 | struct timespec start, stop, delta; | |
714 | char cache_name[32]; | |
715 | int i, rc = 0; | |
716 | ||
efcd0ca3 | 717 | kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc); |
ea3e6ca9 BB |
718 | if (!kcp) { |
719 | splat_vprint(file, name, "Unable to create '%s'\n", "kcp"); | |
720 | return -ENOMEM; | |
721 | } | |
722 | ||
723 | (void)snprintf(cache_name, 32, "%s-%d-%d", | |
724 | SPLAT_KMEM_CACHE_NAME, size, alloc); | |
725 | kcp->kcp_cache = | |
726 | kmem_cache_create(cache_name, kcp->kcp_size, 0, | |
727 | splat_kmem_cache_test_constructor, | |
728 | splat_kmem_cache_test_destructor, | |
729 | splat_kmem_cache_test_reclaim, | |
3c9ce2bf | 730 | kcp, NULL, 0); |
ea3e6ca9 BB |
731 | if (!kcp->kcp_cache) { |
732 | splat_vprint(file, name, "Unable to create '%s'\n", cache_name); | |
733 | rc = -ENOMEM; | |
734 | goto out_kcp; | |
735 | } | |
736 | ||
33a20369 | 737 | getnstimeofday(&start); |
ea3e6ca9 BB |
738 | |
739 | for (i = 0; i < SPLAT_KMEM_THREADS; i++) { | |
740 | thr = thread_create(NULL, 0, | |
741 | splat_kmem_cache_test_thread, | |
f6188ddd | 742 | kcp, 0, &p0, TS_RUN, defclsyspri); |
ea3e6ca9 BB |
743 | if (thr == NULL) { |
744 | rc = -ESRCH; | |
745 | goto out_cache; | |
746 | } | |
747 | } | |
748 | ||
749 | /* Sleep until all threads have started, then set the ready | |
750 | * flag and wake them all up for maximum concurrency. */ | |
751 | wait_event(kcp->kcp_ctl_waitq, | |
752 | splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS)); | |
753 | ||
754 | spin_lock(&kcp->kcp_lock); | |
755 | kcp->kcp_flags |= KCP_FLAG_READY; | |
756 | spin_unlock(&kcp->kcp_lock); | |
757 | wake_up_all(&kcp->kcp_thr_waitq); | |
758 | ||
759 | /* Sleep until all thread have finished */ | |
760 | wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0)); | |
761 | ||
33a20369 | 762 | getnstimeofday(&stop); |
ea3e6ca9 | 763 | delta = timespec_sub(stop, start); |
f1b59d26 | 764 | |
ea3e6ca9 BB |
765 | splat_vprint(file, name, |
766 | "%-22s %2ld.%09ld\t" | |
767 | "%lu/%lu/%lu\t%lu/%lu/%lu\n", | |
768 | kcp->kcp_cache->skc_name, | |
769 | delta.tv_sec, delta.tv_nsec, | |
770 | (unsigned long)kcp->kcp_cache->skc_slab_total, | |
771 | (unsigned long)kcp->kcp_cache->skc_slab_max, | |
772 | (unsigned long)(kcp->kcp_alloc * | |
773 | SPLAT_KMEM_THREADS / | |
774 | SPL_KMEM_CACHE_OBJ_PER_SLAB), | |
775 | (unsigned long)kcp->kcp_cache->skc_obj_total, | |
776 | (unsigned long)kcp->kcp_cache->skc_obj_max, | |
777 | (unsigned long)(kcp->kcp_alloc * | |
778 | SPLAT_KMEM_THREADS)); | |
779 | ||
10a4be0f | 780 | if (delta.tv_sec >= max_time) |
ea3e6ca9 BB |
781 | rc = -ETIME; |
782 | ||
783 | if (!rc && kcp->kcp_rc) | |
784 | rc = kcp->kcp_rc; | |
785 | ||
786 | out_cache: | |
787 | kmem_cache_destroy(kcp->kcp_cache); | |
788 | out_kcp: | |
789 | splat_kmem_cache_test_kcp_free(kcp); | |
f1ca4da6 | 790 | return rc; |
791 | } | |
792 | ||
a1502d76 | 793 | /* Validate small object cache behavior for dynamic/kmem/vmem caches */ |
2fb9b26a | 794 | static int |
795 | splat_kmem_test5(struct file *file, void *arg) | |
796 | { | |
a1502d76 | 797 | char *name = SPLAT_KMEM_TEST5_NAME; |
10946b02 | 798 | int i, rc = 0; |
a1502d76 | 799 | |
10946b02 AX |
800 | /* Randomly pick small object sizes and alignments. */ |
801 | for (i = 0; i < 100; i++) { | |
802 | int size, align, flags = 0; | |
803 | uint32_t rnd; | |
804 | ||
805 | /* Evenly distribute tests over all value cache types */ | |
806 | get_random_bytes((void *)&rnd, sizeof (uint32_t)); | |
807 | switch (rnd & 0x03) { | |
808 | default: | |
809 | case 0x00: | |
810 | flags = 0; | |
811 | break; | |
812 | case 0x01: | |
813 | flags = KMC_KMEM; | |
814 | break; | |
815 | case 0x02: | |
816 | flags = KMC_VMEM; | |
817 | break; | |
818 | case 0x03: | |
819 | flags = KMC_SLAB; | |
820 | break; | |
821 | } | |
a1502d76 | 822 | |
10946b02 AX |
823 | /* The following flags are set with a 1/10 chance */ |
824 | flags |= ((((rnd >> 8) % 10) == 0) ? KMC_OFFSLAB : 0); | |
825 | flags |= ((((rnd >> 16) % 10) == 0) ? KMC_NOEMERGENCY : 0); | |
80093b6f | 826 | |
10946b02 AX |
827 | /* 32b - PAGE_SIZE */ |
828 | get_random_bytes((void *)&rnd, sizeof (uint32_t)); | |
829 | size = MAX(rnd % (PAGE_SIZE + 1), 32); | |
80093b6f | 830 | |
10946b02 AX |
831 | /* 2^N where (3 <= N <= PAGE_SHIFT) */ |
832 | get_random_bytes((void *)&rnd, sizeof (uint32_t)); | |
833 | align = (1 << MAX(3, rnd % (PAGE_SHIFT + 1))); | |
80093b6f | 834 | |
10946b02 AX |
835 | rc = splat_kmem_cache_test(file, arg, name, size, align, flags); |
836 | if (rc) | |
837 | return (rc); | |
838 | } | |
80093b6f | 839 | |
10946b02 | 840 | return (rc); |
2fb9b26a | 841 | } |
842 | ||
efcd0ca3 BB |
843 | /* |
844 | * Validate large object cache behavior for dynamic/kmem/vmem caches | |
845 | */ | |
2fb9b26a | 846 | static int |
847 | splat_kmem_test6(struct file *file, void *arg) | |
848 | { | |
a1502d76 | 849 | char *name = SPLAT_KMEM_TEST6_NAME; |
10946b02 AX |
850 | int i, max_size, rc = 0; |
851 | ||
852 | /* Randomly pick large object sizes and alignments. */ | |
853 | for (i = 0; i < 100; i++) { | |
854 | int size, align, flags = 0; | |
855 | uint32_t rnd; | |
856 | ||
857 | /* Evenly distribute tests over all value cache types */ | |
858 | get_random_bytes((void *)&rnd, sizeof (uint32_t)); | |
859 | switch (rnd & 0x03) { | |
860 | default: | |
861 | case 0x00: | |
862 | flags = 0; | |
863 | max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2; | |
864 | break; | |
865 | case 0x01: | |
866 | flags = KMC_KMEM; | |
867 | max_size = (SPL_MAX_ORDER_NR_PAGES - 2) * PAGE_SIZE; | |
868 | break; | |
869 | case 0x02: | |
870 | flags = KMC_VMEM; | |
871 | max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2; | |
872 | break; | |
873 | case 0x03: | |
874 | flags = KMC_SLAB; | |
875 | max_size = SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE; | |
876 | break; | |
877 | } | |
a1502d76 | 878 | |
10946b02 AX |
879 | /* The following flags are set with a 1/10 chance */ |
880 | flags |= ((((rnd >> 8) % 10) == 0) ? KMC_OFFSLAB : 0); | |
881 | flags |= ((((rnd >> 16) % 10) == 0) ? KMC_NOEMERGENCY : 0); | |
80093b6f | 882 | |
10946b02 AX |
883 | /* PAGE_SIZE - max_size */ |
884 | get_random_bytes((void *)&rnd, sizeof (uint32_t)); | |
885 | size = MAX(rnd % (max_size + 1), PAGE_SIZE), | |
80093b6f | 886 | |
10946b02 AX |
887 | /* 2^N where (3 <= N <= PAGE_SHIFT) */ |
888 | get_random_bytes((void *)&rnd, sizeof (uint32_t)); | |
889 | align = (1 << MAX(3, rnd % (PAGE_SHIFT + 1))); | |
80093b6f | 890 | |
10946b02 AX |
891 | rc = splat_kmem_cache_test(file, arg, name, size, align, flags); |
892 | if (rc) | |
893 | return (rc); | |
894 | } | |
80093b6f | 895 | |
10946b02 | 896 | return (rc); |
2fb9b26a | 897 | } |
898 | ||
efcd0ca3 BB |
899 | /* |
900 | * Validate object alignment cache behavior for caches | |
901 | */ | |
ea3e6ca9 BB |
902 | static int |
903 | splat_kmem_test7(struct file *file, void *arg) | |
f1ca4da6 | 904 | { |
ea3e6ca9 | 905 | char *name = SPLAT_KMEM_TEST7_NAME; |
10946b02 | 906 | int max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2; |
ea3e6ca9 | 907 | int i, rc; |
2fb9b26a | 908 | |
8b45dda2 | 909 | for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) { |
10946b02 AX |
910 | uint32_t size; |
911 | ||
912 | get_random_bytes((void *)&size, sizeof (uint32_t)); | |
913 | size = MAX(size % (max_size + 1), 32); | |
914 | ||
915 | rc = splat_kmem_cache_test(file, arg, name, size, i, 0); | |
ea3e6ca9 BB |
916 | if (rc) |
917 | return rc; | |
80093b6f | 918 | |
10946b02 | 919 | rc = splat_kmem_cache_test(file, arg, name, size, i, |
80093b6f AX |
920 | KMC_OFFSLAB); |
921 | if (rc) | |
922 | return rc; | |
f1ca4da6 | 923 | } |
924 | ||
ea3e6ca9 | 925 | return rc; |
f1ca4da6 | 926 | } |
927 | ||
efcd0ca3 BB |
928 | /* |
929 | * Validate kmem_cache_reap() by requesting the slab cache free any objects | |
930 | * it can. For a few reasons this may not immediately result in more free | |
931 | * memory even if objects are freed. First off, due to fragmentation we | |
932 | * may not be able to reclaim any slabs. Secondly, even if we do we fully | |
933 | * clear some slabs we will not want to immediately reclaim all of them | |
934 | * because we may contend with cache allocations and thrash. What we want | |
935 | * to see is the slab size decrease more gradually as it becomes clear they | |
936 | * will not be needed. This should be achievable in less than a minute. | |
937 | * If it takes longer than this something has gone wrong. | |
938 | */ | |
f1ca4da6 | 939 | static int |
ea3e6ca9 | 940 | splat_kmem_test8(struct file *file, void *arg) |
f1ca4da6 | 941 | { |
ea3e6ca9 | 942 | kmem_cache_priv_t *kcp; |
efcd0ca3 | 943 | kmem_cache_thread_t *kct; |
0936c344 | 944 | unsigned int spl_kmem_cache_expire_old; |
a9a7a01c | 945 | int i, rc = 0; |
ea3e6ca9 | 946 | |
0936c344 BB |
947 | /* Enable cache aging just for this test if it is disabled */ |
948 | spl_kmem_cache_expire_old = spl_kmem_cache_expire; | |
949 | spl_kmem_cache_expire = KMC_EXPIRE_AGE; | |
950 | ||
ea3e6ca9 | 951 | kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME, |
efcd0ca3 | 952 | 256, 0, 0); |
ea3e6ca9 BB |
953 | if (!kcp) { |
954 | splat_vprint(file, SPLAT_KMEM_TEST8_NAME, | |
955 | "Unable to create '%s'\n", "kcp"); | |
efcd0ca3 BB |
956 | rc = -ENOMEM; |
957 | goto out; | |
f1ca4da6 | 958 | } |
959 | ||
ea3e6ca9 BB |
960 | kcp->kcp_cache = |
961 | kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0, | |
962 | splat_kmem_cache_test_constructor, | |
963 | splat_kmem_cache_test_destructor, | |
964 | splat_kmem_cache_test_reclaim, | |
965 | kcp, NULL, 0); | |
966 | if (!kcp->kcp_cache) { | |
ea3e6ca9 BB |
967 | splat_vprint(file, SPLAT_KMEM_TEST8_NAME, |
968 | "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME); | |
efcd0ca3 BB |
969 | rc = -ENOMEM; |
970 | goto out_kcp; | |
ea3e6ca9 | 971 | } |
f1ca4da6 | 972 | |
efcd0ca3 BB |
973 | kct = splat_kmem_cache_test_kct_alloc(kcp, 0); |
974 | if (!kct) { | |
975 | splat_vprint(file, SPLAT_KMEM_TEST8_NAME, | |
976 | "Unable to create '%s'\n", "kct"); | |
977 | rc = -ENOMEM; | |
978 | goto out_cache; | |
979 | } | |
980 | ||
981 | rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, SPLAT_KMEM_OBJ_COUNT); | |
982 | if (rc) { | |
983 | splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to " | |
984 | "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME); | |
985 | goto out_kct; | |
f1ca4da6 | 986 | } |
987 | ||
9e4fb5c2 LG |
988 | /* Force reclaim every 1/10 a second for 60 seconds. */ |
989 | for (i = 0; i < 600; i++) { | |
ea3e6ca9 | 990 | kmem_cache_reap_now(kcp->kcp_cache); |
a9a7a01c | 991 | splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp); |
ea3e6ca9 | 992 | |
9e4fb5c2 | 993 | if (kcp->kcp_count == 0) |
2fb9b26a | 994 | break; |
995 | ||
996 | set_current_state(TASK_INTERRUPTIBLE); | |
9e4fb5c2 | 997 | schedule_timeout(HZ / 10); |
2fb9b26a | 998 | } |
999 | ||
9e4fb5c2 | 1000 | if (kcp->kcp_count == 0) { |
ea3e6ca9 | 1001 | splat_vprint(file, SPLAT_KMEM_TEST8_NAME, |
2fb9b26a | 1002 | "Successfully created %d objects " |
1003 | "in cache %s and reclaimed them\n", | |
ea3e6ca9 | 1004 | SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME); |
2fb9b26a | 1005 | } else { |
ea3e6ca9 | 1006 | splat_vprint(file, SPLAT_KMEM_TEST8_NAME, |
2fb9b26a | 1007 | "Failed to reclaim %u/%d objects from cache %s\n", |
9e4fb5c2 | 1008 | (unsigned)kcp->kcp_count, |
ea3e6ca9 | 1009 | SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME); |
2fb9b26a | 1010 | rc = -ENOMEM; |
1011 | } | |
f1ca4da6 | 1012 | |
2fb9b26a | 1013 | /* Cleanup our mess (for failure case of time expiring) */ |
efcd0ca3 BB |
1014 | splat_kmem_cache_test_kcd_free(kcp, kct); |
1015 | out_kct: | |
1016 | splat_kmem_cache_test_kct_free(kcp, kct); | |
1017 | out_cache: | |
ea3e6ca9 | 1018 | kmem_cache_destroy(kcp->kcp_cache); |
efcd0ca3 | 1019 | out_kcp: |
ea3e6ca9 | 1020 | splat_kmem_cache_test_kcp_free(kcp); |
efcd0ca3 | 1021 | out: |
0936c344 BB |
1022 | spl_kmem_cache_expire = spl_kmem_cache_expire_old; |
1023 | ||
f1ca4da6 | 1024 | return rc; |
1025 | } | |
1026 | ||
efcd0ca3 BB |
1027 | /* Test cache aging, we have allocated a large number of objects thus |
1028 | * creating a large number of slabs and then free'd them all. However, | |
1029 | * since there should be little memory pressure at the moment those | |
1030 | * slabs have not been freed. What we want to see is the slab size | |
1031 | * decrease gradually as it becomes clear they will not be be needed. | |
1032 | * This should be achievable in less than minute. If it takes longer | |
1033 | * than this something has gone wrong. | |
1034 | */ | |
ea3e6ca9 BB |
1035 | static int |
1036 | splat_kmem_test9(struct file *file, void *arg) | |
44b8f176 | 1037 | { |
ea3e6ca9 | 1038 | kmem_cache_priv_t *kcp; |
efcd0ca3 | 1039 | kmem_cache_thread_t *kct; |
0936c344 | 1040 | unsigned int spl_kmem_cache_expire_old; |
a9a7a01c | 1041 | int i, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128; |
ea3e6ca9 | 1042 | |
0936c344 BB |
1043 | /* Enable cache aging just for this test if it is disabled */ |
1044 | spl_kmem_cache_expire_old = spl_kmem_cache_expire; | |
1045 | spl_kmem_cache_expire = KMC_EXPIRE_AGE; | |
1046 | ||
ea3e6ca9 | 1047 | kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME, |
efcd0ca3 | 1048 | 256, 0, 0); |
ea3e6ca9 BB |
1049 | if (!kcp) { |
1050 | splat_vprint(file, SPLAT_KMEM_TEST9_NAME, | |
1051 | "Unable to create '%s'\n", "kcp"); | |
efcd0ca3 BB |
1052 | rc = -ENOMEM; |
1053 | goto out; | |
ea3e6ca9 | 1054 | } |
44b8f176 | 1055 | |
ea3e6ca9 BB |
1056 | kcp->kcp_cache = |
1057 | kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0, | |
1058 | splat_kmem_cache_test_constructor, | |
1059 | splat_kmem_cache_test_destructor, | |
1060 | NULL, kcp, NULL, 0); | |
1061 | if (!kcp->kcp_cache) { | |
ea3e6ca9 BB |
1062 | splat_vprint(file, SPLAT_KMEM_TEST9_NAME, |
1063 | "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME); | |
efcd0ca3 BB |
1064 | rc = -ENOMEM; |
1065 | goto out_kcp; | |
44b8f176 | 1066 | } |
1067 | ||
efcd0ca3 BB |
1068 | kct = splat_kmem_cache_test_kct_alloc(kcp, 0); |
1069 | if (!kct) { | |
1070 | splat_vprint(file, SPLAT_KMEM_TEST8_NAME, | |
1071 | "Unable to create '%s'\n", "kct"); | |
1072 | rc = -ENOMEM; | |
1073 | goto out_cache; | |
44b8f176 | 1074 | } |
1075 | ||
efcd0ca3 BB |
1076 | rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, count); |
1077 | if (rc) { | |
1078 | splat_vprint(file, SPLAT_KMEM_TEST9_NAME, "Unable to " | |
1079 | "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME); | |
1080 | goto out_kct; | |
1081 | } | |
1082 | ||
1083 | splat_kmem_cache_test_kcd_free(kcp, kct); | |
e9d7a2be | 1084 | |
ea3e6ca9 | 1085 | for (i = 0; i < 60; i++) { |
a9a7a01c | 1086 | splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST9_NAME, kcp); |
ea3e6ca9 | 1087 | |
9e4fb5c2 | 1088 | if (kcp->kcp_count == 0) |
ea3e6ca9 | 1089 | break; |
44b8f176 | 1090 | |
ea3e6ca9 BB |
1091 | set_current_state(TASK_INTERRUPTIBLE); |
1092 | schedule_timeout(HZ); | |
1093 | } | |
44b8f176 | 1094 | |
9e4fb5c2 | 1095 | if (kcp->kcp_count == 0) { |
ea3e6ca9 BB |
1096 | splat_vprint(file, SPLAT_KMEM_TEST9_NAME, |
1097 | "Successfully created %d objects " | |
1098 | "in cache %s and reclaimed them\n", | |
1099 | count, SPLAT_KMEM_CACHE_NAME); | |
1100 | } else { | |
1101 | splat_vprint(file, SPLAT_KMEM_TEST9_NAME, | |
1102 | "Failed to reclaim %u/%d objects from cache %s\n", | |
9e4fb5c2 | 1103 | (unsigned)kcp->kcp_count, count, |
ea3e6ca9 BB |
1104 | SPLAT_KMEM_CACHE_NAME); |
1105 | rc = -ENOMEM; | |
1106 | } | |
1107 | ||
efcd0ca3 BB |
1108 | out_kct: |
1109 | splat_kmem_cache_test_kct_free(kcp, kct); | |
1110 | out_cache: | |
ea3e6ca9 | 1111 | kmem_cache_destroy(kcp->kcp_cache); |
efcd0ca3 | 1112 | out_kcp: |
ea3e6ca9 | 1113 | splat_kmem_cache_test_kcp_free(kcp); |
efcd0ca3 | 1114 | out: |
0936c344 BB |
1115 | spl_kmem_cache_expire = spl_kmem_cache_expire_old; |
1116 | ||
ea3e6ca9 | 1117 | return rc; |
44b8f176 | 1118 | } |
1119 | ||
ea3e6ca9 BB |
1120 | /* |
1121 | * This test creates N threads with a shared kmem cache. They then all | |
1122 | * concurrently allocate and free from the cache to stress the locking and | |
1123 | * concurrent cache performance. If any one test takes longer than 5 | |
1124 | * seconds to complete it is treated as a failure and may indicate a | |
1125 | * performance regression. On my test system no one test takes more | |
1126 | * than 1 second to complete so a 5x slowdown likely a problem. | |
44b8f176 | 1127 | */ |
1128 | static int | |
ea3e6ca9 | 1129 | splat_kmem_test10(struct file *file, void *arg) |
44b8f176 | 1130 | { |
e11d6c5f | 1131 | uint64_t size, alloc, rc = 0; |
44b8f176 | 1132 | |
efcd0ca3 | 1133 | for (size = 32; size <= 1024*1024; size *= 2) { |
44b8f176 | 1134 | |
ea3e6ca9 BB |
1135 | splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name", |
1136 | "time (sec)\tslabs \tobjs \thash\n"); | |
1137 | splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "", | |
1138 | " \ttot/max/calc\ttot/max/calc\n"); | |
44b8f176 | 1139 | |
ea3e6ca9 | 1140 | for (alloc = 1; alloc <= 1024; alloc *= 2) { |
44b8f176 | 1141 | |
10946b02 AX |
1142 | /* Skip tests which exceed 1/2 of physical memory. */ |
1143 | if (size * alloc * SPLAT_KMEM_THREADS > physmem / 2) | |
ea3e6ca9 | 1144 | continue; |
7ea1cbf5 | 1145 | |
ea3e6ca9 | 1146 | rc = splat_kmem_cache_thread_test(file, arg, |
10a4be0f | 1147 | SPLAT_KMEM_TEST10_NAME, size, alloc, 5); |
ea3e6ca9 BB |
1148 | if (rc) |
1149 | break; | |
1150 | } | |
44b8f176 | 1151 | } |
1152 | ||
7ea1cbf5 | 1153 | return rc; |
44b8f176 | 1154 | } |
1155 | ||
11124863 | 1156 | #if 0 |
ea3e6ca9 BB |
1157 | /* |
1158 | * This test creates N threads with a shared kmem cache which overcommits | |
1159 | * memory by 4x. This makes it impossible for the slab to satify the | |
1160 | * thread requirements without having its reclaim hook run which will | |
1161 | * free objects back for use. This behavior is triggered by the linum VM | |
1162 | * detecting a low memory condition on the node and invoking the shrinkers. | |
1163 | * This should allow all the threads to complete while avoiding deadlock | |
1164 | * and for the most part out of memory events. This is very tough on the | |
4e5691fa BB |
1165 | * system so it is possible the test app may get oom'ed. This particular |
1166 | * test has proven troublesome on 32-bit archs with limited virtual | |
1167 | * address space so it only run on 64-bit systems. | |
ea3e6ca9 | 1168 | */ |
fece7c99 | 1169 | static int |
ea3e6ca9 | 1170 | splat_kmem_test11(struct file *file, void *arg) |
fece7c99 | 1171 | { |
ea3e6ca9 | 1172 | uint64_t size, alloc, rc; |
fece7c99 | 1173 | |
efcd0ca3 | 1174 | size = 8 * 1024; |
e11d6c5f | 1175 | alloc = ((4 * physmem * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS; |
fece7c99 | 1176 | |
e11d6c5f | 1177 | splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "name", |
ea3e6ca9 | 1178 | "time (sec)\tslabs \tobjs \thash\n"); |
e11d6c5f | 1179 | splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "", |
ea3e6ca9 | 1180 | " \ttot/max/calc\ttot/max/calc\n"); |
48e0606a | 1181 | |
ea3e6ca9 | 1182 | rc = splat_kmem_cache_thread_test(file, arg, |
10a4be0f | 1183 | SPLAT_KMEM_TEST11_NAME, size, alloc, 60); |
48e0606a BB |
1184 | |
1185 | return rc; | |
1186 | } | |
11124863 | 1187 | #endif |
48e0606a | 1188 | |
a9a7a01c PS |
1189 | typedef struct dummy_page { |
1190 | struct list_head dp_list; | |
1191 | char dp_pad[PAGE_SIZE - sizeof(struct list_head)]; | |
1192 | } dummy_page_t; | |
1193 | ||
1194 | /* | |
1195 | * This test is designed to verify that direct reclaim is functioning as | |
1196 | * expected. We allocate a large number of objects thus creating a large | |
1197 | * number of slabs. We then apply memory pressure and expect that the | |
1198 | * direct reclaim path can easily recover those slabs. The registered | |
1199 | * reclaim function will free the objects and the slab shrinker will call | |
1200 | * it repeatedly until at least a single slab can be freed. | |
1201 | * | |
1202 | * Note it may not be possible to reclaim every last slab via direct reclaim | |
1203 | * without a failure because the shrinker_rwsem may be contended. For this | |
1204 | * reason, quickly reclaiming 3/4 of the slabs is considered a success. | |
1205 | * | |
1206 | * This should all be possible within 10 seconds. For reference, on a | |
1207 | * system with 2G of memory this test takes roughly 0.2 seconds to run. | |
1208 | * It may take longer on larger memory systems but should still easily | |
1209 | * complete in the alloted 10 seconds. | |
1210 | */ | |
1211 | static int | |
1212 | splat_kmem_test13(struct file *file, void *arg) | |
1213 | { | |
1214 | kmem_cache_priv_t *kcp; | |
efcd0ca3 | 1215 | kmem_cache_thread_t *kct; |
a9a7a01c PS |
1216 | dummy_page_t *dp; |
1217 | struct list_head list; | |
33a20369 | 1218 | struct timespec start, stop, delta = { 0, 0 }; |
a9a7a01c PS |
1219 | int size, count, slabs, fails = 0; |
1220 | int i, rc = 0, max_time = 10; | |
1221 | ||
1222 | size = 128 * 1024; | |
1223 | count = ((physmem * PAGE_SIZE) / 4 / size); | |
1224 | ||
1225 | kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST13_NAME, | |
efcd0ca3 | 1226 | size, 0, 0); |
a9a7a01c PS |
1227 | if (!kcp) { |
1228 | splat_vprint(file, SPLAT_KMEM_TEST13_NAME, | |
1229 | "Unable to create '%s'\n", "kcp"); | |
efcd0ca3 BB |
1230 | rc = -ENOMEM; |
1231 | goto out; | |
a9a7a01c PS |
1232 | } |
1233 | ||
1234 | kcp->kcp_cache = | |
1235 | kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0, | |
1236 | splat_kmem_cache_test_constructor, | |
1237 | splat_kmem_cache_test_destructor, | |
1238 | splat_kmem_cache_test_reclaim, | |
1239 | kcp, NULL, 0); | |
1240 | if (!kcp->kcp_cache) { | |
a9a7a01c PS |
1241 | splat_vprint(file, SPLAT_KMEM_TEST13_NAME, |
1242 | "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME); | |
efcd0ca3 BB |
1243 | rc = -ENOMEM; |
1244 | goto out_kcp; | |
a9a7a01c PS |
1245 | } |
1246 | ||
efcd0ca3 BB |
1247 | kct = splat_kmem_cache_test_kct_alloc(kcp, 0); |
1248 | if (!kct) { | |
1249 | splat_vprint(file, SPLAT_KMEM_TEST13_NAME, | |
1250 | "Unable to create '%s'\n", "kct"); | |
1251 | rc = -ENOMEM; | |
1252 | goto out_cache; | |
1253 | } | |
1254 | ||
1255 | rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, count); | |
1256 | if (rc) { | |
1257 | splat_vprint(file, SPLAT_KMEM_TEST13_NAME, "Unable to " | |
1258 | "allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME); | |
1259 | goto out_kct; | |
a9a7a01c PS |
1260 | } |
1261 | ||
1262 | i = 0; | |
1263 | slabs = kcp->kcp_cache->skc_slab_total; | |
1264 | INIT_LIST_HEAD(&list); | |
33a20369 | 1265 | getnstimeofday(&start); |
a9a7a01c | 1266 | |
efcd0ca3 | 1267 | /* Apply memory pressure */ |
a9a7a01c PS |
1268 | while (kcp->kcp_cache->skc_slab_total > (slabs >> 2)) { |
1269 | ||
1270 | if ((i % 10000) == 0) | |
1271 | splat_kmem_cache_test_debug( | |
1272 | file, SPLAT_KMEM_TEST13_NAME, kcp); | |
1273 | ||
33a20369 LG |
1274 | getnstimeofday(&stop); |
1275 | delta = timespec_sub(stop, start); | |
a9a7a01c PS |
1276 | if (delta.tv_sec >= max_time) { |
1277 | splat_vprint(file, SPLAT_KMEM_TEST13_NAME, | |
1278 | "Failed to reclaim 3/4 of cache in %ds, " | |
1279 | "%u/%u slabs remain\n", max_time, | |
1280 | (unsigned)kcp->kcp_cache->skc_slab_total, | |
1281 | slabs); | |
1282 | rc = -ETIME; | |
1283 | break; | |
1284 | } | |
1285 | ||
9e4fb5c2 | 1286 | dp = (dummy_page_t *)__get_free_page(GFP_KERNEL); |
a9a7a01c PS |
1287 | if (!dp) { |
1288 | fails++; | |
1289 | splat_vprint(file, SPLAT_KMEM_TEST13_NAME, | |
1290 | "Failed (%d) to allocate page with %u " | |
1291 | "slabs still in the cache\n", fails, | |
1292 | (unsigned)kcp->kcp_cache->skc_slab_total); | |
1293 | continue; | |
1294 | } | |
1295 | ||
1296 | list_add(&dp->dp_list, &list); | |
1297 | i++; | |
1298 | } | |
1299 | ||
1300 | if (rc == 0) | |
1301 | splat_vprint(file, SPLAT_KMEM_TEST13_NAME, | |
1302 | "Successfully created %u slabs and with %d alloc " | |
1303 | "failures reclaimed 3/4 of them in %d.%03ds\n", | |
1304 | slabs, fails, | |
1305 | (int)delta.tv_sec, (int)delta.tv_nsec / 1000000); | |
1306 | ||
1307 | /* Release memory pressure pages */ | |
1308 | while (!list_empty(&list)) { | |
1309 | dp = list_entry(list.next, dummy_page_t, dp_list); | |
1310 | list_del_init(&dp->dp_list); | |
1311 | free_page((unsigned long)dp); | |
1312 | } | |
1313 | ||
1314 | /* Release remaining kmem cache objects */ | |
efcd0ca3 BB |
1315 | splat_kmem_cache_test_kcd_free(kcp, kct); |
1316 | out_kct: | |
1317 | splat_kmem_cache_test_kct_free(kcp, kct); | |
1318 | out_cache: | |
a9a7a01c | 1319 | kmem_cache_destroy(kcp->kcp_cache); |
efcd0ca3 | 1320 | out_kcp: |
a9a7a01c | 1321 | splat_kmem_cache_test_kcp_free(kcp); |
efcd0ca3 | 1322 | out: |
a9a7a01c PS |
1323 | return rc; |
1324 | } | |
1325 | ||
7c50328b | 1326 | splat_subsystem_t * |
1327 | splat_kmem_init(void) | |
f1ca4da6 | 1328 | { |
ea3e6ca9 | 1329 | splat_subsystem_t *sub; |
f1ca4da6 | 1330 | |
ea3e6ca9 BB |
1331 | sub = kmalloc(sizeof(*sub), GFP_KERNEL); |
1332 | if (sub == NULL) | |
1333 | return NULL; | |
f1ca4da6 | 1334 | |
ea3e6ca9 BB |
1335 | memset(sub, 0, sizeof(*sub)); |
1336 | strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE); | |
7c50328b | 1337 | strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE); |
ea3e6ca9 | 1338 | INIT_LIST_HEAD(&sub->subsystem_list); |
f1ca4da6 | 1339 | INIT_LIST_HEAD(&sub->test_list); |
ea3e6ca9 BB |
1340 | spin_lock_init(&sub->test_lock); |
1341 | sub->desc.id = SPLAT_SUBSYSTEM_KMEM; | |
1342 | ||
1343 | SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC, | |
1344 | SPLAT_KMEM_TEST1_ID, splat_kmem_test1); | |
1345 | SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC, | |
1346 | SPLAT_KMEM_TEST2_ID, splat_kmem_test2); | |
1347 | SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC, | |
1348 | SPLAT_KMEM_TEST3_ID, splat_kmem_test3); | |
1349 | SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC, | |
1350 | SPLAT_KMEM_TEST4_ID, splat_kmem_test4); | |
1351 | SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC, | |
1352 | SPLAT_KMEM_TEST5_ID, splat_kmem_test5); | |
1353 | SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC, | |
1354 | SPLAT_KMEM_TEST6_ID, splat_kmem_test6); | |
1355 | SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC, | |
1356 | SPLAT_KMEM_TEST7_ID, splat_kmem_test7); | |
1357 | SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC, | |
1358 | SPLAT_KMEM_TEST8_ID, splat_kmem_test8); | |
1359 | SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST9_NAME, SPLAT_KMEM_TEST9_DESC, | |
1360 | SPLAT_KMEM_TEST9_ID, splat_kmem_test9); | |
1361 | SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST10_NAME, SPLAT_KMEM_TEST10_DESC, | |
1362 | SPLAT_KMEM_TEST10_ID, splat_kmem_test10); | |
11124863 | 1363 | #if 0 |
ea3e6ca9 BB |
1364 | SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC, |
1365 | SPLAT_KMEM_TEST11_ID, splat_kmem_test11); | |
11124863 | 1366 | #endif |
a9a7a01c PS |
1367 | SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST13_NAME, SPLAT_KMEM_TEST13_DESC, |
1368 | SPLAT_KMEM_TEST13_ID, splat_kmem_test13); | |
ea3e6ca9 BB |
1369 | |
1370 | return sub; | |
f1ca4da6 | 1371 | } |
1372 | ||
1373 | void | |
7c50328b | 1374 | splat_kmem_fini(splat_subsystem_t *sub) |
f1ca4da6 | 1375 | { |
ea3e6ca9 | 1376 | ASSERT(sub); |
a9a7a01c | 1377 | SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST13_ID); |
11124863 | 1378 | #if 0 |
ea3e6ca9 | 1379 | SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST11_ID); |
11124863 | 1380 | #endif |
ea3e6ca9 BB |
1381 | SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST10_ID); |
1382 | SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST9_ID); | |
1383 | SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST8_ID); | |
1384 | SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID); | |
1385 | SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID); | |
1386 | SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID); | |
1387 | SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID); | |
1388 | SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID); | |
1389 | SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID); | |
1390 | SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID); | |
1391 | ||
1392 | kfree(sub); | |
f1ca4da6 | 1393 | } |
1394 | ||
1395 | int | |
7c50328b | 1396 | splat_kmem_id(void) { |
ea3e6ca9 | 1397 | return SPLAT_SUBSYSTEM_KMEM; |
f1ca4da6 | 1398 | } |