]> git.proxmox.com Git - mirror_spl-debian.git/blame - modules/splat/splat-kmem.c
OK, it builds... and the modules load... now for some more
[mirror_spl-debian.git] / modules / splat / splat-kmem.c
CommitLineData
f1b59d26 1#include <splat-ctl.h>
f1ca4da6 2
3#define KZT_SUBSYSTEM_KMEM 0x0100
4#define KZT_KMEM_NAME "kmem"
5#define KZT_KMEM_DESC "Kernel Malloc/Slab Tests"
6
7#define KZT_KMEM_TEST1_ID 0x0101
8#define KZT_KMEM_TEST1_NAME "kmem_alloc"
9#define KZT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
10
11#define KZT_KMEM_TEST2_ID 0x0102
12#define KZT_KMEM_TEST2_NAME "kmem_zalloc"
13#define KZT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
14
15#define KZT_KMEM_TEST3_ID 0x0103
16#define KZT_KMEM_TEST3_NAME "slab_alloc"
17#define KZT_KMEM_TEST3_DESC "Slab constructor/destructor test"
18
19#define KZT_KMEM_TEST4_ID 0x0104
20#define KZT_KMEM_TEST4_NAME "slab_reap"
21#define KZT_KMEM_TEST4_DESC "Slab reaping test"
22
23#define KZT_KMEM_ALLOC_COUNT 10
24/* XXX - This test may fail under tight memory conditions */
25static int
26kzt_kmem_test1(struct file *file, void *arg)
27{
28 void *ptr[KZT_KMEM_ALLOC_COUNT];
29 int size = PAGE_SIZE;
30 int i, count, rc = 0;
31
32 while ((!rc) && (size < (PAGE_SIZE * 16))) {
33 count = 0;
34
35 for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
36 ptr[i] = kmem_alloc(size, KM_SLEEP);
37 if (ptr[i])
38 count++;
39 }
40
41 for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
42 if (ptr[i])
43 kmem_free(ptr[i], size);
44
45 kzt_vprint(file, KZT_KMEM_TEST1_NAME,
46 "%d byte allocations, %d/%d successful\n",
47 size, count, KZT_KMEM_ALLOC_COUNT);
48 if (count != KZT_KMEM_ALLOC_COUNT)
49 rc = -ENOMEM;
50
51 size *= 2;
52 }
53
54 return rc;
55}
56
57static int
58kzt_kmem_test2(struct file *file, void *arg)
59{
60 void *ptr[KZT_KMEM_ALLOC_COUNT];
61 int size = PAGE_SIZE;
62 int i, j, count, rc = 0;
63
64 while ((!rc) && (size < (PAGE_SIZE * 16))) {
65 count = 0;
66
67 for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
68 ptr[i] = kmem_zalloc(size, KM_SLEEP);
69 if (ptr[i])
70 count++;
71 }
72
73 /* Ensure buffer has been zero filled */
74 for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
75 for (j = 0; j < size; j++) {
76 if (((char *)ptr[i])[j] != '\0') {
77 kzt_vprint(file, KZT_KMEM_TEST2_NAME,
78 "%d-byte allocation was "
79 "not zeroed\n", size);
80 rc = -EFAULT;
81 }
82 }
83 }
84
85 for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
86 if (ptr[i])
87 kmem_free(ptr[i], size);
88
89 kzt_vprint(file, KZT_KMEM_TEST2_NAME,
90 "%d byte allocations, %d/%d successful\n",
91 size, count, KZT_KMEM_ALLOC_COUNT);
92 if (count != KZT_KMEM_ALLOC_COUNT)
93 rc = -ENOMEM;
94
95 size *= 2;
96 }
97
98 return rc;
99}
100
101#define KZT_KMEM_TEST_MAGIC 0x004488CCUL
102#define KZT_KMEM_CACHE_NAME "kmem_test"
103#define KZT_KMEM_CACHE_SIZE 256
104#define KZT_KMEM_OBJ_COUNT 128
105#define KZT_KMEM_OBJ_RECLAIM 64
106
107typedef struct kmem_cache_data {
108 char kcd_buf[KZT_KMEM_CACHE_SIZE];
109 unsigned long kcd_magic;
110 int kcd_flag;
111} kmem_cache_data_t;
112
113typedef struct kmem_cache_priv {
114 unsigned long kcp_magic;
115 struct file *kcp_file;
116 kmem_cache_t *kcp_cache;
117 kmem_cache_data_t *kcp_kcd[KZT_KMEM_OBJ_COUNT];
118 int kcp_count;
119 int kcp_rc;
120} kmem_cache_priv_t;
121
122static int
123kzt_kmem_test34_constructor(void *ptr, void *priv, int flags)
124{
125 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
126 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
127
128 if (kcd) {
129 memset(kcd->kcd_buf, 0xaa, KZT_KMEM_CACHE_SIZE);
130 kcd->kcd_flag = 1;
131
132 if (kcp) {
133 kcd->kcd_magic = kcp->kcp_magic;
134 kcp->kcp_count++;
135 }
136 }
137
138 return 0;
139}
140
141static void
142kzt_kmem_test34_destructor(void *ptr, void *priv)
143{
144 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
145 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
146
147 if (kcd) {
148 memset(kcd->kcd_buf, 0xbb, KZT_KMEM_CACHE_SIZE);
149 kcd->kcd_flag = 0;
150
151 if (kcp)
152 kcp->kcp_count--;
153 }
154
155 return;
156}
157
158static int
159kzt_kmem_test3(struct file *file, void *arg)
160{
161 kmem_cache_t *cache = NULL;
162 kmem_cache_data_t *kcd = NULL;
163 kmem_cache_priv_t kcp;
164 int rc = 0, max;
165
166 kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
167 kcp.kcp_file = file;
168 kcp.kcp_count = 0;
169 kcp.kcp_rc = 0;
170
171 cache = kmem_cache_create(KZT_KMEM_CACHE_NAME, sizeof(*kcd), 0,
172 kzt_kmem_test34_constructor,
173 kzt_kmem_test34_destructor,
174 NULL, &kcp, NULL, 0);
175 if (!cache) {
176 kzt_vprint(file, KZT_KMEM_TEST3_NAME,
177 "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
178 return -ENOMEM;
179 }
180
181 kcd = kmem_cache_alloc(cache, 0);
182 if (!kcd) {
183 kzt_vprint(file, KZT_KMEM_TEST3_NAME,
184 "Unable to allocate from '%s'\n",
185 KZT_KMEM_CACHE_NAME);
186 rc = -EINVAL;
187 goto out_free;
188 }
189
190 if (!kcd->kcd_flag) {
191 kzt_vprint(file, KZT_KMEM_TEST3_NAME,
192 "Failed to run contructor for '%s'\n",
193 KZT_KMEM_CACHE_NAME);
194 rc = -EINVAL;
195 goto out_free;
196 }
197
198 if (kcd->kcd_magic != kcp.kcp_magic) {
199 kzt_vprint(file, KZT_KMEM_TEST3_NAME,
200 "Failed to pass private data to constructor "
201 "for '%s'\n", KZT_KMEM_CACHE_NAME);
202 rc = -EINVAL;
203 goto out_free;
204 }
205
206 max = kcp.kcp_count;
207
208 /* Destructor's run lazily so it hard to check correctness here.
209 * We assume if it doesn't crash the free worked properly */
210 kmem_cache_free(cache, kcd);
211
212 /* Destroy the entire cache which will force destructors to
213 * run and we can verify one was called for every object */
214 kmem_cache_destroy(cache);
215 if (kcp.kcp_count) {
216 kzt_vprint(file, KZT_KMEM_TEST3_NAME,
217 "Failed to run destructor on all slab objects "
218 "for '%s'\n", KZT_KMEM_CACHE_NAME);
219 rc = -EINVAL;
220 }
221
222 kzt_vprint(file, KZT_KMEM_TEST3_NAME,
223 "%d allocated/destroyed objects for '%s'\n",
224 max, KZT_KMEM_CACHE_NAME);
225
226 return rc;
227
228out_free:
229 if (kcd)
230 kmem_cache_free(cache, kcd);
f1b59d26 231
f1ca4da6 232 kmem_cache_destroy(cache);
233 return rc;
234}
235
236static void
237kzt_kmem_test4_reclaim(void *priv)
238{
239 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
240 int i;
241
242 kzt_vprint(kcp->kcp_file, KZT_KMEM_TEST4_NAME,
243 "Reaping %d objects from '%s'\n",
244 KZT_KMEM_OBJ_RECLAIM, KZT_KMEM_CACHE_NAME);
245 for (i = 0; i < KZT_KMEM_OBJ_RECLAIM; i++) {
246 if (kcp->kcp_kcd[i]) {
247 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
248 kcp->kcp_kcd[i] = NULL;
249 }
250 }
251
252 return;
253}
254
255static int
256kzt_kmem_test4(struct file *file, void *arg)
257{
258 kmem_cache_t *cache;
259 kmem_cache_priv_t kcp;
260 int i, rc = 0, max, reclaim_percent, target_percent;
261
262 kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
263 kcp.kcp_file = file;
264 kcp.kcp_count = 0;
265 kcp.kcp_rc = 0;
266
267 cache = kmem_cache_create(KZT_KMEM_CACHE_NAME,
268 sizeof(kmem_cache_data_t), 0,
269 kzt_kmem_test34_constructor,
270 kzt_kmem_test34_destructor,
271 kzt_kmem_test4_reclaim, &kcp, NULL, 0);
272 if (!cache) {
273 kzt_vprint(file, KZT_KMEM_TEST4_NAME,
274 "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
275 return -ENOMEM;
276 }
277
278 kcp.kcp_cache = cache;
279
280 for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++) {
f1b59d26 281 /* All allocations need not succeed */
f1ca4da6 282 kcp.kcp_kcd[i] = kmem_cache_alloc(cache, 0);
283 if (!kcp.kcp_kcd[i]) {
284 kzt_vprint(file, KZT_KMEM_TEST4_NAME,
285 "Unable to allocate from '%s'\n",
286 KZT_KMEM_CACHE_NAME);
287 }
288 }
289
290 max = kcp.kcp_count;
291
292 /* Force shrinker to run */
293 kmem_reap();
294
295 /* Reclaim reclaimed objects, this ensure the destructors are run */
296 kmem_cache_reap_now(cache);
297
298 reclaim_percent = ((kcp.kcp_count * 100) / max);
299 target_percent = (((KZT_KMEM_OBJ_COUNT - KZT_KMEM_OBJ_RECLAIM) * 100) /
300 KZT_KMEM_OBJ_COUNT);
301 kzt_vprint(file, KZT_KMEM_TEST4_NAME,
302 "%d%% (%d/%d) of previous size, target of "
303 "%d%%-%d%% for '%s'\n", reclaim_percent, kcp.kcp_count,
304 max, target_percent - 10, target_percent + 10,
305 KZT_KMEM_CACHE_NAME);
306 if ((reclaim_percent < target_percent - 10) ||
307 (reclaim_percent > target_percent + 10))
308 rc = -EINVAL;
309
310 /* Cleanup our mess */
311 for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++)
312 if (kcp.kcp_kcd[i])
313 kmem_cache_free(cache, kcp.kcp_kcd[i]);
314
315 kmem_cache_destroy(cache);
316
317 return rc;
318}
319
320kzt_subsystem_t *
321kzt_kmem_init(void)
322{
323 kzt_subsystem_t *sub;
324
325 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
326 if (sub == NULL)
327 return NULL;
328
329 memset(sub, 0, sizeof(*sub));
330 strncpy(sub->desc.name, KZT_KMEM_NAME, KZT_NAME_SIZE);
331 strncpy(sub->desc.desc, KZT_KMEM_DESC, KZT_DESC_SIZE);
332 INIT_LIST_HEAD(&sub->subsystem_list);
333 INIT_LIST_HEAD(&sub->test_list);
334 spin_lock_init(&sub->test_lock);
335 sub->desc.id = KZT_SUBSYSTEM_KMEM;
336
337 KZT_TEST_INIT(sub, KZT_KMEM_TEST1_NAME, KZT_KMEM_TEST1_DESC,
338 KZT_KMEM_TEST1_ID, kzt_kmem_test1);
339 KZT_TEST_INIT(sub, KZT_KMEM_TEST2_NAME, KZT_KMEM_TEST2_DESC,
340 KZT_KMEM_TEST2_ID, kzt_kmem_test2);
341 KZT_TEST_INIT(sub, KZT_KMEM_TEST3_NAME, KZT_KMEM_TEST3_DESC,
342 KZT_KMEM_TEST3_ID, kzt_kmem_test3);
343 KZT_TEST_INIT(sub, KZT_KMEM_TEST4_NAME, KZT_KMEM_TEST4_DESC,
344 KZT_KMEM_TEST4_ID, kzt_kmem_test4);
345
346 return sub;
347}
348
349void
350kzt_kmem_fini(kzt_subsystem_t *sub)
351{
352 ASSERT(sub);
353 KZT_TEST_FINI(sub, KZT_KMEM_TEST4_ID);
354 KZT_TEST_FINI(sub, KZT_KMEM_TEST3_ID);
355 KZT_TEST_FINI(sub, KZT_KMEM_TEST2_ID);
356 KZT_TEST_FINI(sub, KZT_KMEM_TEST1_ID);
357
358 kfree(sub);
359}
360
361int
362kzt_kmem_id(void) {
363 return KZT_SUBSYSTEM_KMEM;
364}