]>
Commit | Line | Data |
---|---|---|
715f6251 | 1 | /* |
2 | * This file is part of the SPL: Solaris Porting Layer. | |
3 | * | |
4 | * Copyright (c) 2008 Lawrence Livermore National Security, LLC. | |
5 | * Produced at Lawrence Livermore National Laboratory | |
6 | * Written by: | |
7 | * Brian Behlendorf <behlendorf1@llnl.gov>, | |
8 | * Herb Wartens <wartens2@llnl.gov>, | |
9 | * Jim Garlick <garlick@llnl.gov> | |
10 | * UCRL-CODE-235197 | |
11 | * | |
12 | * This is free software; you can redistribute it and/or modify it | |
13 | * under the terms of the GNU General Public License as published by | |
14 | * the Free Software Foundation; either version 2 of the License, or | |
15 | * (at your option) any later version. | |
16 | * | |
17 | * This is distributed in the hope that it will be useful, but WITHOUT | |
18 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
20 | * for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License along | |
23 | * with this program; if not, write to the Free Software Foundation, Inc., | |
24 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
25 | */ | |
26 | ||
09b414e8 | 27 | #ifndef _SPL_KMEM_H |
28 | #define _SPL_KMEM_H | |
f1ca4da6 | 29 | |
30 | #ifdef __cplusplus | |
31 | extern "C" { | |
32 | #endif | |
33 | ||
f1ca4da6 | 34 | #undef DEBUG_KMEM_UNIMPLEMENTED |
ff449ac4 | 35 | #undef DEBUG_KMEM_TRACKING /* Per-allocation memory tracking */ |
f1ca4da6 | 36 | |
f1b59d26 | 37 | #include <linux/module.h> |
f1ca4da6 | 38 | #include <linux/slab.h> |
79b31f36 | 39 | #include <linux/vmalloc.h> |
f1ca4da6 | 40 | #include <linux/mm.h> |
41 | #include <linux/spinlock.h> | |
d6a26c6a | 42 | #include <linux/rwsem.h> |
43 | #include <linux/hash.h> | |
44 | #include <linux/ctype.h> | |
57d86234 | 45 | #include <sys/types.h> |
937879f1 | 46 | #include <sys/debug.h> |
f1ca4da6 | 47 | /* |
48 | * Memory allocation interfaces | |
49 | */ | |
50 | #define KM_SLEEP GFP_KERNEL | |
51 | #define KM_NOSLEEP GFP_ATOMIC | |
52 | #undef KM_PANIC /* No linux analog */ | |
e9d7a2be | 53 | #define KM_PUSHPAGE (KM_SLEEP | __GFP_HIGH) |
f1ca4da6 | 54 | #define KM_VMFLAGS GFP_LEVEL_MASK |
55 | #define KM_FLAGS __GFP_BITS_MASK | |
56 | ||
3d061e9d | 57 | /* |
58 | * Used internally, the kernel does not need to support this flag | |
59 | */ | |
60 | #ifndef __GFP_ZERO | |
61 | #define __GFP_ZERO 0x8000 | |
62 | #endif | |
63 | ||
f1ca4da6 | 64 | #ifdef DEBUG_KMEM |
c19c06f3 | 65 | extern atomic64_t kmem_alloc_used; |
66 | extern unsigned long kmem_alloc_max; | |
67 | extern atomic64_t vmem_alloc_used; | |
68 | extern unsigned long vmem_alloc_max; | |
69 | extern int kmem_warning_flag; | |
f1ca4da6 | 70 | |
ff449ac4 | 71 | #ifdef DEBUG_KMEM_TRACKING |
8464443f | 72 | /* XXX - Not to surprisingly with debugging enabled the xmem_locks are very |
73 | * highly contended particularly on xfree(). If we want to run with this | |
74 | * detailed debugging enabled for anything other than debugging we need to | |
75 | * minimize the contention by moving to a lock per xmem_table entry model. | |
76 | */ | |
d6a26c6a | 77 | #define KMEM_HASH_BITS 10 |
78 | #define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS) | |
79 | ||
80 | extern struct hlist_head kmem_table[KMEM_TABLE_SIZE]; | |
81 | extern struct list_head kmem_list; | |
82 | extern spinlock_t kmem_lock; | |
83 | ||
13cdca65 | 84 | #define VMEM_HASH_BITS 10 |
85 | #define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS) | |
86 | ||
87 | extern struct hlist_head vmem_table[VMEM_TABLE_SIZE]; | |
88 | extern struct list_head vmem_list; | |
89 | extern spinlock_t vmem_lock; | |
90 | ||
d6a26c6a | 91 | typedef struct kmem_debug { |
92 | struct hlist_node kd_hlist; /* Hash node linkage */ | |
93 | struct list_head kd_list; /* List of all allocations */ | |
94 | void *kd_addr; /* Allocation pointer */ | |
95 | size_t kd_size; /* Allocation size */ | |
96 | const char *kd_func; /* Allocation function */ | |
97 | int kd_line; /* Allocation line */ | |
98 | } kmem_debug_t; | |
99 | ||
100 | static __inline__ kmem_debug_t * | |
13cdca65 | 101 | __kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr) |
d6a26c6a | 102 | { |
103 | struct hlist_head *head; | |
104 | struct hlist_node *node; | |
105 | struct kmem_debug *p; | |
106 | unsigned long flags; | |
107 | ||
13cdca65 | 108 | spin_lock_irqsave(lock, flags); |
109 | head = &table[hash_ptr(addr, bits)]; | |
d6a26c6a | 110 | hlist_for_each_entry_rcu(p, node, head, kd_hlist) { |
111 | if (p->kd_addr == addr) { | |
112 | hlist_del_init(&p->kd_hlist); | |
113 | list_del_init(&p->kd_list); | |
13cdca65 | 114 | spin_unlock_irqrestore(lock, flags); |
d6a26c6a | 115 | return p; |
116 | } | |
117 | } | |
118 | ||
13cdca65 | 119 | spin_unlock_irqrestore(lock, flags); |
d6a26c6a | 120 | return NULL; |
121 | } | |
122 | ||
3d061e9d | 123 | #define __kmem_alloc(size, flags, allocator, args...) \ |
d6a26c6a | 124 | ({ void *_ptr_ = NULL; \ |
125 | kmem_debug_t *_dptr_; \ | |
126 | unsigned long _flags_; \ | |
f1ca4da6 | 127 | \ |
d6a26c6a | 128 | _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \ |
129 | if (_dptr_ == NULL) { \ | |
3561541c | 130 | __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \ |
d6a26c6a | 131 | "kmem_alloc(%d, 0x%x) debug failed\n", \ |
132 | sizeof(kmem_debug_t), (int)(flags)); \ | |
c19c06f3 | 133 | } else { \ |
427a782d | 134 | /* Marked unlikely because we should never be doing this, */ \ |
135 | /* we tolerate to up 2 pages but a single page is best. */ \ | |
136 | if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \ | |
d6a26c6a | 137 | __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \ |
138 | "kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \ | |
139 | (int)(size), (int)(flags), \ | |
140 | atomic64_read(&kmem_alloc_used), \ | |
141 | kmem_alloc_max); \ | |
142 | \ | |
3d061e9d | 143 | _ptr_ = (void *)allocator((size), (flags), ## args); \ |
d6a26c6a | 144 | if (_ptr_ == NULL) { \ |
145 | kfree(_dptr_); \ | |
146 | __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \ | |
147 | "kmem_alloc(%d, 0x%x) failed (%ld/" \ | |
148 | "%ld)\n", (int)(size), (int)(flags), \ | |
149 | atomic64_read(&kmem_alloc_used), \ | |
150 | kmem_alloc_max); \ | |
151 | } else { \ | |
152 | atomic64_add((size), &kmem_alloc_used); \ | |
153 | if (unlikely(atomic64_read(&kmem_alloc_used) > \ | |
154 | kmem_alloc_max)) \ | |
155 | kmem_alloc_max = \ | |
156 | atomic64_read(&kmem_alloc_used); \ | |
157 | \ | |
158 | INIT_HLIST_NODE(&_dptr_->kd_hlist); \ | |
159 | INIT_LIST_HEAD(&_dptr_->kd_list); \ | |
160 | _dptr_->kd_addr = _ptr_; \ | |
161 | _dptr_->kd_size = (size); \ | |
162 | _dptr_->kd_func = __FUNCTION__; \ | |
163 | _dptr_->kd_line = __LINE__; \ | |
164 | spin_lock_irqsave(&kmem_lock, _flags_); \ | |
165 | hlist_add_head_rcu(&_dptr_->kd_hlist, \ | |
166 | &kmem_table[hash_ptr(_ptr_, KMEM_HASH_BITS)]);\ | |
167 | list_add_tail(&_dptr_->kd_list, &kmem_list); \ | |
168 | spin_unlock_irqrestore(&kmem_lock, _flags_); \ | |
169 | \ | |
170 | __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(" \ | |
171 | "%d, 0x%x) = %p (%ld/%ld)\n", \ | |
172 | (int)(size), (int)(flags), _ptr_, \ | |
173 | atomic64_read(&kmem_alloc_used), \ | |
174 | kmem_alloc_max); \ | |
175 | } \ | |
f1ca4da6 | 176 | } \ |
177 | \ | |
178 | _ptr_; \ | |
179 | }) | |
180 | ||
f1ca4da6 | 181 | #define kmem_free(ptr, size) \ |
182 | ({ \ | |
d6a26c6a | 183 | kmem_debug_t *_dptr_; \ |
937879f1 | 184 | ASSERT((ptr) || (size > 0)); \ |
d6a26c6a | 185 | \ |
13cdca65 | 186 | _dptr_ = __kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);\ |
d6a26c6a | 187 | ASSERT(_dptr_); /* Must exist in hash due to kmem_alloc() */ \ |
188 | ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \ | |
189 | "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \ | |
190 | _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \ | |
c19c06f3 | 191 | atomic64_sub((size), &kmem_alloc_used); \ |
9ab1ac14 | 192 | __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \ |
193 | (ptr), (int)(size), atomic64_read(&kmem_alloc_used), \ | |
839d8b43 | 194 | kmem_alloc_max); \ |
d6a26c6a | 195 | \ |
196 | memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \ | |
197 | kfree(_dptr_); \ | |
198 | \ | |
199 | memset(ptr, 0x5a, (size)); \ | |
f1ca4da6 | 200 | kfree(ptr); \ |
f1ca4da6 | 201 | }) |
202 | ||
79b31f36 | 203 | #define __vmem_alloc(size, flags) \ |
13cdca65 | 204 | ({ void *_ptr_ = NULL; \ |
205 | kmem_debug_t *_dptr_; \ | |
206 | unsigned long _flags_; \ | |
79b31f36 | 207 | \ |
13cdca65 | 208 | ASSERT((flags) & KM_SLEEP); \ |
79b31f36 | 209 | \ |
13cdca65 | 210 | _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \ |
211 | if (_dptr_ == NULL) { \ | |
3561541c | 212 | __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \ |
13cdca65 | 213 | "vmem_alloc(%d, 0x%x) debug failed\n", \ |
214 | sizeof(kmem_debug_t), (int)(flags)); \ | |
c19c06f3 | 215 | } else { \ |
13cdca65 | 216 | _ptr_ = (void *)__vmalloc((size), (((flags) | \ |
217 | __GFP_HIGHMEM) & ~__GFP_ZERO), \ | |
218 | PAGE_KERNEL); \ | |
219 | if (_ptr_ == NULL) { \ | |
220 | kfree(_dptr_); \ | |
221 | __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \ | |
222 | "vmem_alloc(%d, 0x%x) failed (%ld/" \ | |
223 | "%ld)\n", (int)(size), (int)(flags), \ | |
224 | atomic64_read(&vmem_alloc_used), \ | |
225 | vmem_alloc_max); \ | |
226 | } else { \ | |
227 | if (flags & __GFP_ZERO) \ | |
228 | memset(_ptr_, 0, (size)); \ | |
3561541c | 229 | \ |
13cdca65 | 230 | atomic64_add((size), &vmem_alloc_used); \ |
231 | if (unlikely(atomic64_read(&vmem_alloc_used) > \ | |
232 | vmem_alloc_max)) \ | |
233 | vmem_alloc_max = \ | |
234 | atomic64_read(&vmem_alloc_used); \ | |
235 | \ | |
236 | INIT_HLIST_NODE(&_dptr_->kd_hlist); \ | |
237 | INIT_LIST_HEAD(&_dptr_->kd_list); \ | |
238 | _dptr_->kd_addr = _ptr_; \ | |
239 | _dptr_->kd_size = (size); \ | |
240 | _dptr_->kd_func = __FUNCTION__; \ | |
241 | _dptr_->kd_line = __LINE__; \ | |
242 | spin_lock_irqsave(&vmem_lock, _flags_); \ | |
243 | hlist_add_head_rcu(&_dptr_->kd_hlist, \ | |
244 | &vmem_table[hash_ptr(_ptr_, VMEM_HASH_BITS)]);\ | |
245 | list_add_tail(&_dptr_->kd_list, &vmem_list); \ | |
246 | spin_unlock_irqrestore(&vmem_lock, _flags_); \ | |
839d8b43 | 247 | \ |
13cdca65 | 248 | __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(" \ |
249 | "%d, 0x%x) = %p (%ld/%ld)\n", \ | |
250 | (int)(size), (int)(flags), _ptr_, \ | |
251 | atomic64_read(&vmem_alloc_used), \ | |
252 | vmem_alloc_max); \ | |
253 | } \ | |
79b31f36 | 254 | } \ |
255 | \ | |
256 | _ptr_; \ | |
257 | }) | |
258 | ||
79b31f36 | 259 | #define vmem_free(ptr, size) \ |
260 | ({ \ | |
13cdca65 | 261 | kmem_debug_t *_dptr_; \ |
937879f1 | 262 | ASSERT((ptr) || (size > 0)); \ |
13cdca65 | 263 | \ |
264 | _dptr_ = __kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);\ | |
265 | ASSERT(_dptr_); /* Must exist in hash due to vmem_alloc() */ \ | |
266 | ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \ | |
267 | "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \ | |
268 | _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \ | |
c19c06f3 | 269 | atomic64_sub((size), &vmem_alloc_used); \ |
9ab1ac14 | 270 | __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_free(%p, %d) (%ld/%ld)\n", \ |
271 | (ptr), (int)(size), atomic64_read(&vmem_alloc_used), \ | |
839d8b43 | 272 | vmem_alloc_max); \ |
13cdca65 | 273 | \ |
274 | memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \ | |
275 | kfree(_dptr_); \ | |
276 | \ | |
277 | memset(ptr, 0x5a, (size)); \ | |
79b31f36 | 278 | vfree(ptr); \ |
279 | }) | |
f1ca4da6 | 280 | |
ff449ac4 | 281 | #else /* DEBUG_KMEM_TRACKING */ |
282 | ||
3d061e9d | 283 | #define __kmem_alloc(size, flags, allocator, args...) \ |
ff449ac4 | 284 | ({ void *_ptr_ = NULL; \ |
285 | \ | |
286 | /* Marked unlikely because we should never be doing this, */ \ | |
287 | /* we tolerate to up 2 pages but a single page is best. */ \ | |
288 | if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \ | |
289 | __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \ | |
290 | "kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \ | |
291 | (int)(size), (int)(flags), \ | |
292 | atomic64_read(&kmem_alloc_used), \ | |
293 | kmem_alloc_max); \ | |
294 | \ | |
3d061e9d | 295 | _ptr_ = (void *)allocator((size), (flags), ## args); \ |
ff449ac4 | 296 | if (_ptr_ == NULL) { \ |
297 | __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \ | |
298 | "kmem_alloc(%d, 0x%x) failed (%ld/" \ | |
299 | "%ld)\n", (int)(size), (int)(flags), \ | |
300 | atomic64_read(&kmem_alloc_used), \ | |
301 | kmem_alloc_max); \ | |
302 | } else { \ | |
303 | atomic64_add((size), &kmem_alloc_used); \ | |
304 | if (unlikely(atomic64_read(&kmem_alloc_used) > \ | |
305 | kmem_alloc_max)) \ | |
306 | kmem_alloc_max = \ | |
307 | atomic64_read(&kmem_alloc_used); \ | |
308 | \ | |
309 | __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(%d, 0x%x) = %p " \ | |
310 | "(%ld/%ld)\n", (int)(size), (int)(flags), \ | |
311 | _ptr_, atomic64_read(&kmem_alloc_used), \ | |
312 | kmem_alloc_max); \ | |
313 | } \ | |
314 | \ | |
315 | _ptr_; \ | |
316 | }) | |
317 | ||
318 | #define kmem_free(ptr, size) \ | |
319 | ({ \ | |
320 | ASSERT((ptr) || (size > 0)); \ | |
321 | \ | |
322 | atomic64_sub((size), &kmem_alloc_used); \ | |
323 | __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \ | |
324 | (ptr), (int)(size), atomic64_read(&kmem_alloc_used), \ | |
325 | kmem_alloc_max); \ | |
326 | memset(ptr, 0x5a, (size)); \ | |
327 | kfree(ptr); \ | |
328 | }) | |
329 | ||
330 | #define __vmem_alloc(size, flags) \ | |
331 | ({ void *_ptr_ = NULL; \ | |
332 | \ | |
333 | ASSERT((flags) & KM_SLEEP); \ | |
334 | \ | |
335 | _ptr_ = (void *)__vmalloc((size), (((flags) | \ | |
336 | __GFP_HIGHMEM) & ~__GFP_ZERO), PAGE_KERNEL);\ | |
337 | if (_ptr_ == NULL) { \ | |
338 | __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \ | |
339 | "vmem_alloc(%d, 0x%x) failed (%ld/" \ | |
340 | "%ld)\n", (int)(size), (int)(flags), \ | |
341 | atomic64_read(&vmem_alloc_used), \ | |
342 | vmem_alloc_max); \ | |
343 | } else { \ | |
344 | if (flags & __GFP_ZERO) \ | |
345 | memset(_ptr_, 0, (size)); \ | |
346 | \ | |
347 | atomic64_add((size), &vmem_alloc_used); \ | |
348 | if (unlikely(atomic64_read(&vmem_alloc_used) > \ | |
349 | vmem_alloc_max)) \ | |
350 | vmem_alloc_max = \ | |
351 | atomic64_read(&vmem_alloc_used); \ | |
352 | \ | |
353 | __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(" \ | |
354 | "%d, 0x%x) = %p (%ld/%ld)\n", \ | |
355 | (int)(size), (int)(flags), _ptr_, \ | |
356 | atomic64_read(&vmem_alloc_used), \ | |
357 | vmem_alloc_max); \ | |
358 | } \ | |
359 | \ | |
360 | _ptr_; \ | |
361 | }) | |
362 | ||
363 | #define vmem_free(ptr, size) \ | |
364 | ({ \ | |
365 | ASSERT((ptr) || (size > 0)); \ | |
366 | \ | |
367 | atomic64_sub((size), &vmem_alloc_used); \ | |
368 | __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_free(%p, %d) (%ld/%ld)\n", \ | |
369 | (ptr), (int)(size), atomic64_read(&vmem_alloc_used), \ | |
370 | vmem_alloc_max); \ | |
371 | memset(ptr, 0x5a, (size)); \ | |
372 | vfree(ptr); \ | |
373 | }) | |
374 | ||
375 | #endif /* DEBUG_KMEM_TRACKING */ | |
376 | ||
377 | #define kmem_alloc(size, flags) __kmem_alloc((size), (flags), kmalloc) | |
378 | #define kmem_zalloc(size, flags) __kmem_alloc((size), (flags), kzalloc) | |
379 | ||
3d061e9d | 380 | #ifdef HAVE_KMALLOC_NODE |
381 | #define kmem_alloc_node(size, flags, node) \ | |
382 | __kmem_alloc((size), (flags), kmalloc_node, node) | |
383 | #else | |
384 | #define kmem_alloc_node(size, flags, node) \ | |
385 | __kmem_alloc((size), (flags), kmalloc) | |
386 | #endif | |
387 | ||
ff449ac4 | 388 | #define vmem_alloc(size, flags) __vmem_alloc((size), (flags)) |
389 | #define vmem_zalloc(size, flags) __vmem_alloc((size), ((flags) | __GFP_ZERO)) | |
390 | ||
c6dc93d6 | 391 | #else /* DEBUG_KMEM */ |
f1ca4da6 | 392 | |
4fd2f7ee | 393 | #define kmem_alloc(size, flags) kmalloc((size), (flags)) |
394 | #define kmem_zalloc(size, flags) kzalloc((size), (flags)) | |
c6dc93d6 | 395 | #define kmem_free(ptr, size) kfree(ptr) |
f1ca4da6 | 396 | |
3d061e9d | 397 | #ifdef HAVE_KMALLOC_NODE |
398 | #define kmem_alloc_node(size, flags, node) \ | |
399 | kmalloc_node((size), (flags), (node)) | |
400 | #else | |
401 | #define kmem_alloc_node(size, flags, node) \ | |
402 | kmalloc((size), (flags)) | |
403 | #endif | |
404 | ||
4fd2f7ee | 405 | #define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \ |
c6dc93d6 | 406 | __GFP_HIGHMEM), PAGE_KERNEL) |
407 | #define vmem_zalloc(size, flags) \ | |
79b31f36 | 408 | ({ \ |
c6dc93d6 | 409 | void *_ptr_ = __vmalloc((size),((flags)|__GFP_HIGHMEM),PAGE_KERNEL); \ |
410 | if (_ptr_) \ | |
411 | memset(_ptr_, 0, (size)); \ | |
412 | _ptr_; \ | |
79b31f36 | 413 | }) |
c6dc93d6 | 414 | #define vmem_free(ptr, size) vfree(ptr) |
79b31f36 | 415 | |
f1ca4da6 | 416 | #endif /* DEBUG_KMEM */ |
417 | ||
f1ca4da6 | 418 | #ifdef DEBUG_KMEM_UNIMPLEMENTED |
419 | static __inline__ void * | |
420 | kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags) | |
421 | { | |
422 | #error "kmem_alloc_tryhard() not implemented" | |
423 | } | |
424 | #endif /* DEBUG_KMEM_UNIMPLEMENTED */ | |
425 | ||
426 | /* | |
427 | * Slab allocation interfaces | |
428 | */ | |
a1502d76 | 429 | #define KMC_NOTOUCH 0x00000001 |
430 | #define KMC_NODEBUG 0x00000002 /* Default behavior */ | |
431 | #define KMC_NOMAGAZINE 0x00000004 /* XXX: No disable support available */ | |
432 | #define KMC_NOHASH 0x00000008 /* XXX: No hash available */ | |
433 | #define KMC_QCACHE 0x00000010 /* XXX: Unsupported */ | |
434 | #define KMC_KMEM 0x00000100 /* Use kmem cache */ | |
435 | #define KMC_VMEM 0x00000200 /* Use vmem cache */ | |
436 | #define KMC_OFFSLAB 0x00000400 /* Objects not on slab */ | |
f1ca4da6 | 437 | |
438 | #define KMC_REAP_CHUNK 256 | |
439 | #define KMC_DEFAULT_SEEKS DEFAULT_SEEKS | |
440 | ||
f1ca4da6 | 441 | #ifdef DEBUG_KMEM_UNIMPLEMENTED |
442 | static __inline__ void kmem_init(void) { | |
443 | #error "kmem_init() not implemented" | |
444 | } | |
445 | ||
446 | static __inline__ void kmem_thread_init(void) { | |
447 | #error "kmem_thread_init() not implemented" | |
448 | } | |
449 | ||
450 | static __inline__ void kmem_mp_init(void) { | |
451 | #error "kmem_mp_init() not implemented" | |
452 | } | |
453 | ||
454 | static __inline__ void kmem_reap_idspace(void) { | |
455 | #error "kmem_reap_idspace() not implemented" | |
456 | } | |
457 | ||
458 | static __inline__ size_t kmem_avail(void) { | |
459 | #error "kmem_avail() not implemented" | |
460 | } | |
461 | ||
462 | static __inline__ size_t kmem_maxavail(void) { | |
463 | #error "kmem_maxavail() not implemented" | |
464 | } | |
465 | ||
2fb9b26a | 466 | static __inline__ uint64_t kmem_cache_stat(spl_kmem_cache_t *cache) { |
f1ca4da6 | 467 | #error "kmem_cache_stat() not implemented" |
468 | } | |
469 | #endif /* DEBUG_KMEM_UNIMPLEMENTED */ | |
470 | ||
471 | /* XXX - Used by arc.c to adjust its memory footprint. We may want | |
472 | * to use this hook in the future to adjust behavior based on | |
473 | * debug levels. For now it's safe to always return 0. | |
474 | */ | |
475 | static __inline__ int | |
476 | kmem_debugging(void) | |
477 | { | |
478 | return 0; | |
479 | } | |
480 | ||
c19c06f3 | 481 | extern int kmem_set_warning(int flag); |
482 | ||
ff449ac4 | 483 | extern struct list_head spl_kmem_cache_list; |
484 | extern struct rw_semaphore spl_kmem_cache_sem; | |
2fb9b26a | 485 | |
4afaaefa | 486 | #define SKM_MAGIC 0x2e2e2e2e |
2fb9b26a | 487 | #define SKO_MAGIC 0x20202020 |
488 | #define SKS_MAGIC 0x22222222 | |
489 | #define SKC_MAGIC 0x2c2c2c2c | |
490 | ||
2fb9b26a | 491 | #define SPL_KMEM_CACHE_DELAY 5 |
492 | #define SPL_KMEM_CACHE_OBJ_PER_SLAB 32 | |
493 | ||
494 | typedef int (*spl_kmem_ctor_t)(void *, void *, int); | |
495 | typedef void (*spl_kmem_dtor_t)(void *, void *); | |
496 | typedef void (*spl_kmem_reclaim_t)(void *); | |
497 | ||
4afaaefa | 498 | typedef struct spl_kmem_magazine { |
499 | uint32_t skm_magic; /* Sanity magic */ | |
500 | uint32_t skm_avail; /* Available objects */ | |
501 | uint32_t skm_size; /* Magazine size */ | |
502 | uint32_t skm_refill; /* Batch refill size */ | |
503 | unsigned long skm_age; /* Last cache access */ | |
504 | void *skm_objs[0]; /* Object pointers */ | |
505 | } spl_kmem_magazine_t; | |
506 | ||
2fb9b26a | 507 | typedef struct spl_kmem_obj { |
508 | uint32_t sko_magic; /* Sanity magic */ | |
2fb9b26a | 509 | void *sko_addr; /* Buffer address */ |
510 | struct spl_kmem_slab *sko_slab; /* Owned by slab */ | |
511 | struct list_head sko_list; /* Free object list linkage */ | |
2fb9b26a | 512 | } spl_kmem_obj_t; |
513 | ||
514 | typedef struct spl_kmem_slab { | |
515 | uint32_t sks_magic; /* Sanity magic */ | |
516 | uint32_t sks_objs; /* Objects per slab */ | |
517 | struct spl_kmem_cache *sks_cache; /* Owned by cache */ | |
518 | struct list_head sks_list; /* Slab list linkage */ | |
519 | struct list_head sks_free_list; /* Free object list */ | |
520 | unsigned long sks_age; /* Last modify jiffie */ | |
4afaaefa | 521 | uint32_t sks_ref; /* Ref count used objects */ |
2fb9b26a | 522 | } spl_kmem_slab_t; |
523 | ||
524 | typedef struct spl_kmem_cache { | |
525 | uint32_t skc_magic; /* Sanity magic */ | |
526 | uint32_t skc_name_size; /* Name length */ | |
527 | char *skc_name; /* Name string */ | |
4afaaefa | 528 | spl_kmem_magazine_t *skc_mag[NR_CPUS]; /* Per-CPU warm cache */ |
529 | uint32_t skc_mag_size; /* Magazine size */ | |
530 | uint32_t skc_mag_refill; /* Magazine refill count */ | |
2fb9b26a | 531 | spl_kmem_ctor_t skc_ctor; /* Constructor */ |
532 | spl_kmem_dtor_t skc_dtor; /* Destructor */ | |
533 | spl_kmem_reclaim_t skc_reclaim; /* Reclaimator */ | |
534 | void *skc_private; /* Private data */ | |
535 | void *skc_vmp; /* Unused */ | |
536 | uint32_t skc_flags; /* Flags */ | |
537 | uint32_t skc_obj_size; /* Object size */ | |
a1502d76 | 538 | uint32_t skc_slab_objs; /* Objects per slab */ |
539 | uint32_t skc_slab_size; /* Slab size */ | |
2fb9b26a | 540 | uint32_t skc_delay; /* slab reclaim interval */ |
2fb9b26a | 541 | struct list_head skc_list; /* List of caches linkage */ |
542 | struct list_head skc_complete_list;/* Completely alloc'ed */ | |
543 | struct list_head skc_partial_list; /* Partially alloc'ed */ | |
d46630e0 | 544 | spinlock_t skc_lock; /* Cache lock */ |
2fb9b26a | 545 | uint64_t skc_slab_fail; /* Slab alloc failures */ |
546 | uint64_t skc_slab_create;/* Slab creates */ | |
547 | uint64_t skc_slab_destroy;/* Slab destroys */ | |
d46630e0 | 548 | uint64_t skc_slab_total; /* Slab total current */ |
549 | uint64_t skc_slab_alloc; /* Slab alloc current */ | |
550 | uint64_t skc_slab_max; /* Slab max historic */ | |
551 | uint64_t skc_obj_total; /* Obj total current */ | |
552 | uint64_t skc_obj_alloc; /* Obj alloc current */ | |
553 | uint64_t skc_obj_max; /* Obj max historic */ | |
2fb9b26a | 554 | } spl_kmem_cache_t; |
7afde631 | 555 | #define kmem_cache_t spl_kmem_cache_t |
2fb9b26a | 556 | |
557 | extern spl_kmem_cache_t * | |
558 | spl_kmem_cache_create(char *name, size_t size, size_t align, | |
559 | spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, spl_kmem_reclaim_t reclaim, | |
f1ca4da6 | 560 | void *priv, void *vmp, int flags); |
561 | ||
2fb9b26a | 562 | extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc); |
563 | extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags); | |
564 | extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj); | |
565 | extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc); | |
566 | extern void spl_kmem_reap(void); | |
f1ca4da6 | 567 | |
2fb9b26a | 568 | int spl_kmem_init(void); |
569 | void spl_kmem_fini(void); | |
5d86345d | 570 | |
f1ca4da6 | 571 | #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \ |
2fb9b26a | 572 | spl_kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) |
573 | #define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc) | |
574 | #define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags) | |
575 | #define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj) | |
576 | #define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc) | |
577 | #define kmem_reap() spl_kmem_reap() | |
a1502d76 | 578 | #define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \ |
579 | ((ptr) < (void *)VMALLOC_END)) | |
f1ca4da6 | 580 | |
581 | #ifdef __cplusplus | |
582 | } | |
583 | #endif | |
584 | ||
09b414e8 | 585 | #endif /* _SPL_KMEM_H */ |