]> git.proxmox.com Git - mirror_spl.git/blob - src/splat/splat-kmem.c
fb40819b572eb9f6559c40eb8925c3e69cbde654
[mirror_spl.git] / src / splat / splat-kmem.c
1 #include <sys/zfs_context.h>
2 #include <sys/splat-ctl.h>
3
4 #define KZT_SUBSYSTEM_KMEM 0x0100
5 #define KZT_KMEM_NAME "kmem"
6 #define KZT_KMEM_DESC "Kernel Malloc/Slab Tests"
7
8 #define KZT_KMEM_TEST1_ID 0x0101
9 #define KZT_KMEM_TEST1_NAME "kmem_alloc"
10 #define KZT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
11
12 #define KZT_KMEM_TEST2_ID 0x0102
13 #define KZT_KMEM_TEST2_NAME "kmem_zalloc"
14 #define KZT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
15
16 #define KZT_KMEM_TEST3_ID 0x0103
17 #define KZT_KMEM_TEST3_NAME "slab_alloc"
18 #define KZT_KMEM_TEST3_DESC "Slab constructor/destructor test"
19
20 #define KZT_KMEM_TEST4_ID 0x0104
21 #define KZT_KMEM_TEST4_NAME "slab_reap"
22 #define KZT_KMEM_TEST4_DESC "Slab reaping test"
23
24 #define KZT_KMEM_ALLOC_COUNT 10
25 /* XXX - This test may fail under tight memory conditions */
26 static int
27 kzt_kmem_test1(struct file *file, void *arg)
28 {
29 void *ptr[KZT_KMEM_ALLOC_COUNT];
30 int size = PAGE_SIZE;
31 int i, count, rc = 0;
32
33 while ((!rc) && (size < (PAGE_SIZE * 16))) {
34 count = 0;
35
36 for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
37 ptr[i] = kmem_alloc(size, KM_SLEEP);
38 if (ptr[i])
39 count++;
40 }
41
42 for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
43 if (ptr[i])
44 kmem_free(ptr[i], size);
45
46 kzt_vprint(file, KZT_KMEM_TEST1_NAME,
47 "%d byte allocations, %d/%d successful\n",
48 size, count, KZT_KMEM_ALLOC_COUNT);
49 if (count != KZT_KMEM_ALLOC_COUNT)
50 rc = -ENOMEM;
51
52 size *= 2;
53 }
54
55 return rc;
56 }
57
58 static int
59 kzt_kmem_test2(struct file *file, void *arg)
60 {
61 void *ptr[KZT_KMEM_ALLOC_COUNT];
62 int size = PAGE_SIZE;
63 int i, j, count, rc = 0;
64
65 while ((!rc) && (size < (PAGE_SIZE * 16))) {
66 count = 0;
67
68 for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
69 ptr[i] = kmem_zalloc(size, KM_SLEEP);
70 if (ptr[i])
71 count++;
72 }
73
74 /* Ensure buffer has been zero filled */
75 for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++) {
76 for (j = 0; j < size; j++) {
77 if (((char *)ptr[i])[j] != '\0') {
78 kzt_vprint(file, KZT_KMEM_TEST2_NAME,
79 "%d-byte allocation was "
80 "not zeroed\n", size);
81 rc = -EFAULT;
82 }
83 }
84 }
85
86 for (i = 0; i < KZT_KMEM_ALLOC_COUNT; i++)
87 if (ptr[i])
88 kmem_free(ptr[i], size);
89
90 kzt_vprint(file, KZT_KMEM_TEST2_NAME,
91 "%d byte allocations, %d/%d successful\n",
92 size, count, KZT_KMEM_ALLOC_COUNT);
93 if (count != KZT_KMEM_ALLOC_COUNT)
94 rc = -ENOMEM;
95
96 size *= 2;
97 }
98
99 return rc;
100 }
101
102 #define KZT_KMEM_TEST_MAGIC 0x004488CCUL
103 #define KZT_KMEM_CACHE_NAME "kmem_test"
104 #define KZT_KMEM_CACHE_SIZE 256
105 #define KZT_KMEM_OBJ_COUNT 128
106 #define KZT_KMEM_OBJ_RECLAIM 64
107
108 typedef struct kmem_cache_data {
109 char kcd_buf[KZT_KMEM_CACHE_SIZE];
110 unsigned long kcd_magic;
111 int kcd_flag;
112 } kmem_cache_data_t;
113
114 typedef struct kmem_cache_priv {
115 unsigned long kcp_magic;
116 struct file *kcp_file;
117 kmem_cache_t *kcp_cache;
118 kmem_cache_data_t *kcp_kcd[KZT_KMEM_OBJ_COUNT];
119 int kcp_count;
120 int kcp_rc;
121 } kmem_cache_priv_t;
122
123 static int
124 kzt_kmem_test34_constructor(void *ptr, void *priv, int flags)
125 {
126 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
127 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
128
129 if (kcd) {
130 memset(kcd->kcd_buf, 0xaa, KZT_KMEM_CACHE_SIZE);
131 kcd->kcd_flag = 1;
132
133 if (kcp) {
134 kcd->kcd_magic = kcp->kcp_magic;
135 kcp->kcp_count++;
136 }
137 }
138
139 return 0;
140 }
141
142 static void
143 kzt_kmem_test34_destructor(void *ptr, void *priv)
144 {
145 kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
146 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
147
148 if (kcd) {
149 memset(kcd->kcd_buf, 0xbb, KZT_KMEM_CACHE_SIZE);
150 kcd->kcd_flag = 0;
151
152 if (kcp)
153 kcp->kcp_count--;
154 }
155
156 return;
157 }
158
159 static int
160 kzt_kmem_test3(struct file *file, void *arg)
161 {
162 kmem_cache_t *cache = NULL;
163 kmem_cache_data_t *kcd = NULL;
164 kmem_cache_priv_t kcp;
165 int rc = 0, max;
166
167 kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
168 kcp.kcp_file = file;
169 kcp.kcp_count = 0;
170 kcp.kcp_rc = 0;
171
172 cache = kmem_cache_create(KZT_KMEM_CACHE_NAME, sizeof(*kcd), 0,
173 kzt_kmem_test34_constructor,
174 kzt_kmem_test34_destructor,
175 NULL, &kcp, NULL, 0);
176 if (!cache) {
177 kzt_vprint(file, KZT_KMEM_TEST3_NAME,
178 "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
179 return -ENOMEM;
180 }
181
182 kcd = kmem_cache_alloc(cache, 0);
183 if (!kcd) {
184 kzt_vprint(file, KZT_KMEM_TEST3_NAME,
185 "Unable to allocate from '%s'\n",
186 KZT_KMEM_CACHE_NAME);
187 rc = -EINVAL;
188 goto out_free;
189 }
190
191 if (!kcd->kcd_flag) {
192 kzt_vprint(file, KZT_KMEM_TEST3_NAME,
193 "Failed to run contructor for '%s'\n",
194 KZT_KMEM_CACHE_NAME);
195 rc = -EINVAL;
196 goto out_free;
197 }
198
199 if (kcd->kcd_magic != kcp.kcp_magic) {
200 kzt_vprint(file, KZT_KMEM_TEST3_NAME,
201 "Failed to pass private data to constructor "
202 "for '%s'\n", KZT_KMEM_CACHE_NAME);
203 rc = -EINVAL;
204 goto out_free;
205 }
206
207 max = kcp.kcp_count;
208
209 /* Destructor's run lazily so it hard to check correctness here.
210 * We assume if it doesn't crash the free worked properly */
211 kmem_cache_free(cache, kcd);
212
213 /* Destroy the entire cache which will force destructors to
214 * run and we can verify one was called for every object */
215 kmem_cache_destroy(cache);
216 if (kcp.kcp_count) {
217 kzt_vprint(file, KZT_KMEM_TEST3_NAME,
218 "Failed to run destructor on all slab objects "
219 "for '%s'\n", KZT_KMEM_CACHE_NAME);
220 rc = -EINVAL;
221 }
222
223 kzt_vprint(file, KZT_KMEM_TEST3_NAME,
224 "%d allocated/destroyed objects for '%s'\n",
225 max, KZT_KMEM_CACHE_NAME);
226
227 return rc;
228
229 out_free:
230 if (kcd)
231 kmem_cache_free(cache, kcd);
232 out_destroy:
233 kmem_cache_destroy(cache);
234 return rc;
235 }
236
237 static void
238 kzt_kmem_test4_reclaim(void *priv)
239 {
240 kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
241 int i;
242
243 kzt_vprint(kcp->kcp_file, KZT_KMEM_TEST4_NAME,
244 "Reaping %d objects from '%s'\n",
245 KZT_KMEM_OBJ_RECLAIM, KZT_KMEM_CACHE_NAME);
246 for (i = 0; i < KZT_KMEM_OBJ_RECLAIM; i++) {
247 if (kcp->kcp_kcd[i]) {
248 kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
249 kcp->kcp_kcd[i] = NULL;
250 }
251 }
252
253 return;
254 }
255
256 static int
257 kzt_kmem_test4(struct file *file, void *arg)
258 {
259 kmem_cache_t *cache;
260 kmem_cache_priv_t kcp;
261 int i, rc = 0, max, reclaim_percent, target_percent;
262
263 kcp.kcp_magic = KZT_KMEM_TEST_MAGIC;
264 kcp.kcp_file = file;
265 kcp.kcp_count = 0;
266 kcp.kcp_rc = 0;
267
268 cache = kmem_cache_create(KZT_KMEM_CACHE_NAME,
269 sizeof(kmem_cache_data_t), 0,
270 kzt_kmem_test34_constructor,
271 kzt_kmem_test34_destructor,
272 kzt_kmem_test4_reclaim, &kcp, NULL, 0);
273 if (!cache) {
274 kzt_vprint(file, KZT_KMEM_TEST4_NAME,
275 "Unable to create '%s'\n", KZT_KMEM_CACHE_NAME);
276 return -ENOMEM;
277 }
278
279 kcp.kcp_cache = cache;
280
281 for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++) {
282 /* All allocations need not succeed */
283 kcp.kcp_kcd[i] = kmem_cache_alloc(cache, 0);
284 if (!kcp.kcp_kcd[i]) {
285 kzt_vprint(file, KZT_KMEM_TEST4_NAME,
286 "Unable to allocate from '%s'\n",
287 KZT_KMEM_CACHE_NAME);
288 }
289 }
290
291 max = kcp.kcp_count;
292
293 /* Force shrinker to run */
294 kmem_reap();
295
296 /* Reclaim reclaimed objects, this ensure the destructors are run */
297 kmem_cache_reap_now(cache);
298
299 reclaim_percent = ((kcp.kcp_count * 100) / max);
300 target_percent = (((KZT_KMEM_OBJ_COUNT - KZT_KMEM_OBJ_RECLAIM) * 100) /
301 KZT_KMEM_OBJ_COUNT);
302 kzt_vprint(file, KZT_KMEM_TEST4_NAME,
303 "%d%% (%d/%d) of previous size, target of "
304 "%d%%-%d%% for '%s'\n", reclaim_percent, kcp.kcp_count,
305 max, target_percent - 10, target_percent + 10,
306 KZT_KMEM_CACHE_NAME);
307 if ((reclaim_percent < target_percent - 10) ||
308 (reclaim_percent > target_percent + 10))
309 rc = -EINVAL;
310
311 /* Cleanup our mess */
312 for (i = 0; i < KZT_KMEM_OBJ_COUNT; i++)
313 if (kcp.kcp_kcd[i])
314 kmem_cache_free(cache, kcp.kcp_kcd[i]);
315
316 kmem_cache_destroy(cache);
317
318 return rc;
319 }
320
321 kzt_subsystem_t *
322 kzt_kmem_init(void)
323 {
324 kzt_subsystem_t *sub;
325
326 sub = kmalloc(sizeof(*sub), GFP_KERNEL);
327 if (sub == NULL)
328 return NULL;
329
330 memset(sub, 0, sizeof(*sub));
331 strncpy(sub->desc.name, KZT_KMEM_NAME, KZT_NAME_SIZE);
332 strncpy(sub->desc.desc, KZT_KMEM_DESC, KZT_DESC_SIZE);
333 INIT_LIST_HEAD(&sub->subsystem_list);
334 INIT_LIST_HEAD(&sub->test_list);
335 spin_lock_init(&sub->test_lock);
336 sub->desc.id = KZT_SUBSYSTEM_KMEM;
337
338 KZT_TEST_INIT(sub, KZT_KMEM_TEST1_NAME, KZT_KMEM_TEST1_DESC,
339 KZT_KMEM_TEST1_ID, kzt_kmem_test1);
340 KZT_TEST_INIT(sub, KZT_KMEM_TEST2_NAME, KZT_KMEM_TEST2_DESC,
341 KZT_KMEM_TEST2_ID, kzt_kmem_test2);
342 KZT_TEST_INIT(sub, KZT_KMEM_TEST3_NAME, KZT_KMEM_TEST3_DESC,
343 KZT_KMEM_TEST3_ID, kzt_kmem_test3);
344 KZT_TEST_INIT(sub, KZT_KMEM_TEST4_NAME, KZT_KMEM_TEST4_DESC,
345 KZT_KMEM_TEST4_ID, kzt_kmem_test4);
346
347 return sub;
348 }
349
350 void
351 kzt_kmem_fini(kzt_subsystem_t *sub)
352 {
353 ASSERT(sub);
354 KZT_TEST_FINI(sub, KZT_KMEM_TEST4_ID);
355 KZT_TEST_FINI(sub, KZT_KMEM_TEST3_ID);
356 KZT_TEST_FINI(sub, KZT_KMEM_TEST2_ID);
357 KZT_TEST_FINI(sub, KZT_KMEM_TEST1_ID);
358
359 kfree(sub);
360 }
361
362 int
363 kzt_kmem_id(void) {
364 return KZT_SUBSYSTEM_KMEM;
365 }