]> git.proxmox.com Git - mirror_spl.git/blob - modules/splat/splat-kmem.c
9491a081a9770a51ed624ea54734121846275801
[mirror_spl.git] / modules / splat / splat-kmem.c
1 #include "splat-internal.h"
2
3 #define SPLAT_SUBSYSTEM_KMEM 0x0100
4 #define SPLAT_KMEM_NAME "kmem"
5 #define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
6
7 #define SPLAT_KMEM_TEST1_ID 0x0101
8 #define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
9 #define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
10
11 #define SPLAT_KMEM_TEST2_ID 0x0102
12 #define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
13 #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
14
15 #define SPLAT_KMEM_TEST3_ID 0x0103
16 #define SPLAT_KMEM_TEST3_NAME "slab_alloc"
17 #define SPLAT_KMEM_TEST3_DESC "Slab constructor/destructor test"
18
19 #define SPLAT_KMEM_TEST4_ID 0x0104
20 #define SPLAT_KMEM_TEST4_NAME "slab_reap"
21 #define SPLAT_KMEM_TEST4_DESC "Slab reaping test"
22
23 #define SPLAT_KMEM_TEST5_ID 0x0105
24 #define SPLAT_KMEM_TEST5_NAME "vmem_alloc"
25 #define SPLAT_KMEM_TEST5_DESC "Memory allocation test (vmem_alloc)"
26
27 #define SPLAT_KMEM_ALLOC_COUNT 10
28 #define SPLAT_VMEM_ALLOC_COUNT 10
29
30 /* XXX - This test may fail under tight memory conditions */
31 static int
32 splat_kmem_test1(struct file *file, void *arg)
33 {
34 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
35 int size = PAGE_SIZE;
36 int i, count, rc = 0;
37
38 /* We are intentionally going to push kmem_alloc to its max
39 * allocation size, so suppress the console warnings for now */
40 kmem_set_warning(0);
41
42 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
43 count = 0;
44
45 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
46 ptr[i] = kmem_alloc(size, KM_SLEEP);
47 if (ptr[i])
48 count++;
49 }
50
51 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
52 if (ptr[i])
53 kmem_free(ptr[i], size);
54
55 splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
56 "%d byte allocations, %d/%d successful\n",
57 size, count, SPLAT_KMEM_ALLOC_COUNT);
58 if (count != SPLAT_KMEM_ALLOC_COUNT)
59 rc = -ENOMEM;
60
61 size *= 2;
62 }
63
64 kmem_set_warning(1);
65
66 return rc;
67 }
68
69 static int
70 splat_kmem_test2(struct file *file, void *arg)
71 {
72 void *ptr[SPLAT_KMEM_ALLOC_COUNT];
73 int size = PAGE_SIZE;
74 int i, j, count, rc = 0;
75
76 /* We are intentionally going to push kmem_alloc to its max
77 * allocation size, so suppress the console warnings for now */
78 kmem_set_warning(0);
79
80 while ((!rc) && (size <= (PAGE_SIZE * 32))) {
81 count = 0;
82
83 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
84 ptr[i] = kmem_zalloc(size, KM_SLEEP);
85 if (ptr[i])
86 count++;
87 }
88
89 /* Ensure buffer has been zero filled */
90 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
91 for (j = 0; j < size; j++) {
92 if (((char *)ptr[i])[j] != '\0') {
93 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
94 "%d-byte allocation was "
95 "not zeroed\n", size);
96 rc = -EFAULT;
97 }
98 }
99 }
100
101 for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
102 if (ptr[i])
103 kmem_free(ptr[i], size);
104
105 splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
106 "%d byte allocations, %d/%d successful\n",
107 size, count, SPLAT_KMEM_ALLOC_COUNT);
108 if (count != SPLAT_KMEM_ALLOC_COUNT)
109 rc = -ENOMEM;
110
111 size *= 2;
112 }
113
114 kmem_set_warning(1);
115
116 return rc;
117 }
118
119 #define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
120 #define SPLAT_KMEM_CACHE_NAME "kmem_test"
121 #define SPLAT_KMEM_CACHE_SIZE 256
122 #define SPLAT_KMEM_OBJ_COUNT 128
123 #define SPLAT_KMEM_OBJ_RECLAIM 64
124
125 typedef struct kmem_cache_data {
126 char kcd_buf[SPLAT_KMEM_CACHE_SIZE];
127 unsigned long kcd_magic;
128 int kcd_flag;
129 } kmem_cache_data_t;
130
131 typedef struct kmem_cache_priv {
132 unsigned long kcp_magic;
133 struct file *kcp_file;
134 kmem_cache_t *kcp_cache;
135 kmem_cache_data_t *kcp_kcd[SPLAT_KMEM_OBJ_COUNT];
136 int kcp_count;
137 int kcp_rc;
138 } kmem_cache_priv_t;
139
140 static int
141 splat_kmem_test34_constructor(void *ptr, void *priv, int flags)
142 {
143 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
144 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
145
146 if (kcd) {
147 memset(kcd->kcd_buf, 0xaa, SPLAT_KMEM_CACHE_SIZE);
148 kcd->kcd_flag = 1;
149
150 if (kcp) {
151 kcd->kcd_magic = kcp->kcp_magic;
152 kcp->kcp_count++;
153 }
154 }
155
156 return 0;
157 }
158
159 static void
160 splat_kmem_test34_destructor(void *ptr, void *priv)
161 {
162 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
163 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
164
165 if (kcd) {
166 memset(kcd->kcd_buf, 0xbb, SPLAT_KMEM_CACHE_SIZE);
167 kcd->kcd_flag = 0;
168
169 if (kcp)
170 kcp->kcp_count--;
171 }
172
173 return;
174 }
175
176 static int
177 splat_kmem_test3(struct file *file, void *arg)
178 {
179 kmem_cache_t *cache = NULL;
180 kmem_cache_data_t *kcd = NULL;
181 kmem_cache_priv_t kcp;
182 int rc = 0, max;
183
184 kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
185 kcp.kcp_file = file;
186 kcp.kcp_count = 0;
187 kcp.kcp_rc = 0;
188
189 cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, sizeof(*kcd), 0,
190 splat_kmem_test34_constructor,
191 splat_kmem_test34_destructor,
192 NULL, &kcp, NULL, 0);
193 if (!cache) {
194 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
195 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
196 return -ENOMEM;
197 }
198
199 kcd = kmem_cache_alloc(cache, 0);
200 if (!kcd) {
201 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
202 "Unable to allocate from '%s'\n",
203 SPLAT_KMEM_CACHE_NAME);
204 rc = -EINVAL;
205 goto out_free;
206 }
207
208 if (!kcd->kcd_flag) {
209 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
210 "Failed to run contructor for '%s'\n",
211 SPLAT_KMEM_CACHE_NAME);
212 rc = -EINVAL;
213 goto out_free;
214 }
215
216 if (kcd->kcd_magic != kcp.kcp_magic) {
217 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
218 "Failed to pass private data to constructor "
219 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
220 rc = -EINVAL;
221 goto out_free;
222 }
223
224 max = kcp.kcp_count;
225
226 /* Destructor's run lazily so it hard to check correctness here.
227 * We assume if it doesn't crash the free worked properly */
228 kmem_cache_free(cache, kcd);
229
230 /* Destroy the entire cache which will force destructors to
231 * run and we can verify one was called for every object */
232 kmem_cache_destroy(cache);
233 if (kcp.kcp_count) {
234 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
235 "Failed to run destructor on all slab objects "
236 "for '%s'\n", SPLAT_KMEM_CACHE_NAME);
237 rc = -EINVAL;
238 }
239
240 splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
241 "%d allocated/destroyed objects for '%s'\n",
242 max, SPLAT_KMEM_CACHE_NAME);
243
244 return rc;
245
246 out_free:
247 if (kcd)
248 kmem_cache_free(cache, kcd);
249
250 kmem_cache_destroy(cache);
251 return rc;
252 }
253
254 static void
255 splat_kmem_test4_reclaim(void *priv)
256 {
257 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
258 int i;
259
260 splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST4_NAME,
261 "Reaping %d objects from '%s'\n",
262 SPLAT_KMEM_OBJ_RECLAIM, SPLAT_KMEM_CACHE_NAME);
263 for (i = 0; i < SPLAT_KMEM_OBJ_RECLAIM; i++) {
264 if (kcp->kcp_kcd[i]) {
265 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
266 kcp->kcp_kcd[i] = NULL;
267 }
268 }
269
270 return;
271 }
272
273 static int
274 splat_kmem_test4(struct file *file, void *arg)
275 {
276 kmem_cache_t *cache;
277 kmem_cache_priv_t kcp;
278 int i, rc = 0, max, reclaim_percent, target_percent;
279
280 kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
281 kcp.kcp_file = file;
282 kcp.kcp_count = 0;
283 kcp.kcp_rc = 0;
284
285 cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
286 sizeof(kmem_cache_data_t), 0,
287 splat_kmem_test34_constructor,
288 splat_kmem_test34_destructor,
289 splat_kmem_test4_reclaim, &kcp, NULL, 0);
290 if (!cache) {
291 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
292 "Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
293 return -ENOMEM;
294 }
295
296 kcp.kcp_cache = cache;
297
298 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
299 /* All allocations need not succeed */
300 kcp.kcp_kcd[i] = kmem_cache_alloc(cache, 0);
301 if (!kcp.kcp_kcd[i]) {
302 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
303 "Unable to allocate from '%s'\n",
304 SPLAT_KMEM_CACHE_NAME);
305 }
306 }
307
308 max = kcp.kcp_count;
309 ASSERT(max > 0);
310
311 /* Force shrinker to run */
312 kmem_reap();
313
314 /* Reclaim reclaimed objects, this ensure the destructors are run */
315 kmem_cache_reap_now(cache);
316
317 reclaim_percent = ((kcp.kcp_count * 100) / max);
318 target_percent = (((SPLAT_KMEM_OBJ_COUNT - SPLAT_KMEM_OBJ_RECLAIM) * 100) /
319 SPLAT_KMEM_OBJ_COUNT);
320 splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
321 "%d%% (%d/%d) of previous size, target of "
322 "%d%%-%d%% for '%s'\n", reclaim_percent, kcp.kcp_count,
323 max, target_percent - 10, target_percent + 10,
324 SPLAT_KMEM_CACHE_NAME);
325 if ((reclaim_percent < target_percent - 10) ||
326 (reclaim_percent > target_percent + 10))
327 rc = -EINVAL;
328
329 /* Cleanup our mess */
330 for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
331 if (kcp.kcp_kcd[i])
332 kmem_cache_free(cache, kcp.kcp_kcd[i]);
333
334 kmem_cache_destroy(cache);
335
336 return rc;
337 }
338
339 static int
340 splat_kmem_test5(struct file *file, void *arg)
341 {
342 void *ptr[SPLAT_VMEM_ALLOC_COUNT];
343 int size = PAGE_SIZE;
344 int i, count, rc = 0;
345
346 while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
347 count = 0;
348
349 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
350 ptr[i] = vmem_alloc(size, KM_SLEEP);
351 if (ptr[i])
352 count++;
353 }
354
355 for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
356 if (ptr[i])
357 vmem_free(ptr[i], size);
358
359 splat_vprint(file, SPLAT_KMEM_TEST5_NAME,
360 "%d byte allocations, %d/%d successful\n",
361 size, count, SPLAT_VMEM_ALLOC_COUNT);
362 if (count != SPLAT_VMEM_ALLOC_COUNT)
363 rc = -ENOMEM;
364
365 size *= 2;
366 }
367
368 return rc;
369 }
370
371 splat_subsystem_t *
372 splat_kmem_init(void)
373 {
374 splat_subsystem_t *sub;
375
376 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
377 if (sub == NULL)
378 return NULL;
379
380 memset(sub, 0, sizeof(*sub));
381 strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
382 strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
383 INIT_LIST_HEAD(&sub->subsystem_list);
384 INIT_LIST_HEAD(&sub->test_list);
385 spin_lock_init(&sub->test_lock);
386 sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
387
388 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
389 SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
390 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
391 SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
392 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
393 SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
394 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
395 SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
396 SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
397 SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
398
399 return sub;
400 }
401
402 void
403 splat_kmem_fini(splat_subsystem_t *sub)
404 {
405 ASSERT(sub);
406 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
407 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
408 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);
409 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST2_ID);
410 SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST1_ID);
411
412 kfree(sub);
413 }
414
415 int
416 splat_kmem_id(void) {
417 return SPLAT_SUBSYSTEM_KMEM;
418 }