]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/kmem.h
Apply two nice improvements caught by Ricardo,
[mirror_spl.git] / include / sys / kmem.h
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #ifndef _SPL_KMEM_H
28 #define _SPL_KMEM_H
29
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33
34 #undef DEBUG_KMEM_UNIMPLEMENTED
35 #undef DEBUG_KMEM_TRACKING /* Per-allocation memory tracking */
36
37 #include <linux/module.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/mm.h>
41 #include <linux/spinlock.h>
42 #include <linux/rwsem.h>
43 #include <linux/hash.h>
44 #include <linux/ctype.h>
45 #include <asm/atomic_compat.h>
46 #include <sys/types.h>
47 #include <sys/debug.h>
48
49 /*
50 * Memory allocation interfaces
51 */
52 #define KM_SLEEP GFP_KERNEL
53 #define KM_NOSLEEP GFP_ATOMIC
54 #undef KM_PANIC /* No linux analog */
55 #define KM_PUSHPAGE (KM_SLEEP | __GFP_HIGH)
56 #define KM_VMFLAGS GFP_LEVEL_MASK
57 #define KM_FLAGS __GFP_BITS_MASK
58
59 /*
60 * Used internally, the kernel does not need to support this flag
61 */
62 #ifndef __GFP_ZERO
63 #define __GFP_ZERO 0x8000
64 #endif
65
66 #ifdef DEBUG_KMEM
67 extern atomic64_t kmem_alloc_used;
68 extern unsigned long kmem_alloc_max;
69 extern atomic64_t vmem_alloc_used;
70 extern unsigned long vmem_alloc_max;
71 extern int kmem_warning_flag;
72
73 #ifdef DEBUG_KMEM_TRACKING
74 /* XXX - Not to surprisingly with debugging enabled the xmem_locks are very
75 * highly contended particularly on xfree(). If we want to run with this
76 * detailed debugging enabled for anything other than debugging we need to
77 * minimize the contention by moving to a lock per xmem_table entry model.
78 */
79 #define KMEM_HASH_BITS 10
80 #define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
81
82 extern struct hlist_head kmem_table[KMEM_TABLE_SIZE];
83 extern struct list_head kmem_list;
84 extern spinlock_t kmem_lock;
85
86 #define VMEM_HASH_BITS 10
87 #define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
88
89 extern struct hlist_head vmem_table[VMEM_TABLE_SIZE];
90 extern struct list_head vmem_list;
91 extern spinlock_t vmem_lock;
92
93 typedef struct kmem_debug {
94 struct hlist_node kd_hlist; /* Hash node linkage */
95 struct list_head kd_list; /* List of all allocations */
96 void *kd_addr; /* Allocation pointer */
97 size_t kd_size; /* Allocation size */
98 const char *kd_func; /* Allocation function */
99 int kd_line; /* Allocation line */
100 } kmem_debug_t;
101
102 static __inline__ kmem_debug_t *
103 __kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
104 {
105 struct hlist_head *head;
106 struct hlist_node *node;
107 struct kmem_debug *p;
108 unsigned long flags;
109
110 spin_lock_irqsave(lock, flags);
111 head = &table[hash_ptr(addr, bits)];
112 hlist_for_each_entry_rcu(p, node, head, kd_hlist) {
113 if (p->kd_addr == addr) {
114 hlist_del_init(&p->kd_hlist);
115 list_del_init(&p->kd_list);
116 spin_unlock_irqrestore(lock, flags);
117 return p;
118 }
119 }
120
121 spin_unlock_irqrestore(lock, flags);
122 return NULL;
123 }
124
125 #define __kmem_alloc(size, flags, allocator, args...) \
126 ({ void *_ptr_ = NULL; \
127 kmem_debug_t *_dptr_; \
128 unsigned long _flags_; \
129 \
130 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
131 if (_dptr_ == NULL) { \
132 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
133 "kmem_alloc(%d, 0x%x) debug failed\n", \
134 sizeof(kmem_debug_t), (int)(flags)); \
135 } else { \
136 /* Marked unlikely because we should never be doing this, */ \
137 /* we tolerate to up 2 pages but a single page is best. */ \
138 if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \
139 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \
140 "kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \
141 (int)(size), (int)(flags), \
142 atomic64_read(&kmem_alloc_used), \
143 kmem_alloc_max); \
144 \
145 _ptr_ = (void *)allocator((size), (flags), ## args); \
146 if (_ptr_ == NULL) { \
147 kfree(_dptr_); \
148 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
149 "kmem_alloc(%d, 0x%x) failed (%ld/" \
150 "%ld)\n", (int)(size), (int)(flags), \
151 atomic64_read(&kmem_alloc_used), \
152 kmem_alloc_max); \
153 } else { \
154 atomic64_add((size), &kmem_alloc_used); \
155 if (unlikely(atomic64_read(&kmem_alloc_used) > \
156 kmem_alloc_max)) \
157 kmem_alloc_max = \
158 atomic64_read(&kmem_alloc_used); \
159 \
160 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
161 INIT_LIST_HEAD(&_dptr_->kd_list); \
162 _dptr_->kd_addr = _ptr_; \
163 _dptr_->kd_size = (size); \
164 _dptr_->kd_func = __FUNCTION__; \
165 _dptr_->kd_line = __LINE__; \
166 spin_lock_irqsave(&kmem_lock, _flags_); \
167 hlist_add_head_rcu(&_dptr_->kd_hlist, \
168 &kmem_table[hash_ptr(_ptr_, KMEM_HASH_BITS)]);\
169 list_add_tail(&_dptr_->kd_list, &kmem_list); \
170 spin_unlock_irqrestore(&kmem_lock, _flags_); \
171 \
172 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(" \
173 "%d, 0x%x) = %p (%ld/%ld)\n", \
174 (int)(size), (int)(flags), _ptr_, \
175 atomic64_read(&kmem_alloc_used), \
176 kmem_alloc_max); \
177 } \
178 } \
179 \
180 _ptr_; \
181 })
182
183 #define kmem_free(ptr, size) \
184 ({ \
185 kmem_debug_t *_dptr_; \
186 ASSERT((ptr) || (size > 0)); \
187 \
188 _dptr_ = __kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);\
189 ASSERT(_dptr_); /* Must exist in hash due to kmem_alloc() */ \
190 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
191 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
192 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
193 atomic64_sub((size), &kmem_alloc_used); \
194 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \
195 (ptr), (int)(size), atomic64_read(&kmem_alloc_used), \
196 kmem_alloc_max); \
197 \
198 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
199 kfree(_dptr_); \
200 \
201 memset(ptr, 0x5a, (size)); \
202 kfree(ptr); \
203 })
204
205 #define __vmem_alloc(size, flags) \
206 ({ void *_ptr_ = NULL; \
207 kmem_debug_t *_dptr_; \
208 unsigned long _flags_; \
209 \
210 ASSERT((flags) & KM_SLEEP); \
211 \
212 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
213 if (_dptr_ == NULL) { \
214 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
215 "vmem_alloc(%d, 0x%x) debug failed\n", \
216 sizeof(kmem_debug_t), (int)(flags)); \
217 } else { \
218 _ptr_ = (void *)__vmalloc((size), (((flags) | \
219 __GFP_HIGHMEM) & ~__GFP_ZERO), \
220 PAGE_KERNEL); \
221 if (_ptr_ == NULL) { \
222 kfree(_dptr_); \
223 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
224 "vmem_alloc(%d, 0x%x) failed (%ld/" \
225 "%ld)\n", (int)(size), (int)(flags), \
226 atomic64_read(&vmem_alloc_used), \
227 vmem_alloc_max); \
228 } else { \
229 if (flags & __GFP_ZERO) \
230 memset(_ptr_, 0, (size)); \
231 \
232 atomic64_add((size), &vmem_alloc_used); \
233 if (unlikely(atomic64_read(&vmem_alloc_used) > \
234 vmem_alloc_max)) \
235 vmem_alloc_max = \
236 atomic64_read(&vmem_alloc_used); \
237 \
238 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
239 INIT_LIST_HEAD(&_dptr_->kd_list); \
240 _dptr_->kd_addr = _ptr_; \
241 _dptr_->kd_size = (size); \
242 _dptr_->kd_func = __FUNCTION__; \
243 _dptr_->kd_line = __LINE__; \
244 spin_lock_irqsave(&vmem_lock, _flags_); \
245 hlist_add_head_rcu(&_dptr_->kd_hlist, \
246 &vmem_table[hash_ptr(_ptr_, VMEM_HASH_BITS)]);\
247 list_add_tail(&_dptr_->kd_list, &vmem_list); \
248 spin_unlock_irqrestore(&vmem_lock, _flags_); \
249 \
250 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(" \
251 "%d, 0x%x) = %p (%ld/%ld)\n", \
252 (int)(size), (int)(flags), _ptr_, \
253 atomic64_read(&vmem_alloc_used), \
254 vmem_alloc_max); \
255 } \
256 } \
257 \
258 _ptr_; \
259 })
260
261 #define vmem_free(ptr, size) \
262 ({ \
263 kmem_debug_t *_dptr_; \
264 ASSERT((ptr) || (size > 0)); \
265 \
266 _dptr_ = __kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);\
267 ASSERT(_dptr_); /* Must exist in hash due to vmem_alloc() */ \
268 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
269 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
270 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
271 atomic64_sub((size), &vmem_alloc_used); \
272 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_free(%p, %d) (%ld/%ld)\n", \
273 (ptr), (int)(size), atomic64_read(&vmem_alloc_used), \
274 vmem_alloc_max); \
275 \
276 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
277 kfree(_dptr_); \
278 \
279 memset(ptr, 0x5a, (size)); \
280 vfree(ptr); \
281 })
282
283 #else /* DEBUG_KMEM_TRACKING */
284
285 #define __kmem_alloc(size, flags, allocator, args...) \
286 ({ void *_ptr_ = NULL; \
287 \
288 /* Marked unlikely because we should never be doing this, */ \
289 /* we tolerate to up 2 pages but a single page is best. */ \
290 if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \
291 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \
292 "kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \
293 (int)(size), (int)(flags), \
294 atomic64_read(&kmem_alloc_used), \
295 kmem_alloc_max); \
296 \
297 _ptr_ = (void *)allocator((size), (flags), ## args); \
298 if (_ptr_ == NULL) { \
299 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
300 "kmem_alloc(%d, 0x%x) failed (%ld/" \
301 "%ld)\n", (int)(size), (int)(flags), \
302 atomic64_read(&kmem_alloc_used), \
303 kmem_alloc_max); \
304 } else { \
305 atomic64_add((size), &kmem_alloc_used); \
306 if (unlikely(atomic64_read(&kmem_alloc_used) > \
307 kmem_alloc_max)) \
308 kmem_alloc_max = \
309 atomic64_read(&kmem_alloc_used); \
310 \
311 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(%d, 0x%x) = %p " \
312 "(%ld/%ld)\n", (int)(size), (int)(flags), \
313 _ptr_, atomic64_read(&kmem_alloc_used), \
314 kmem_alloc_max); \
315 } \
316 \
317 _ptr_; \
318 })
319
320 #define kmem_free(ptr, size) \
321 ({ \
322 ASSERT((ptr) || (size > 0)); \
323 \
324 atomic64_sub((size), &kmem_alloc_used); \
325 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \
326 (ptr), (int)(size), atomic64_read(&kmem_alloc_used), \
327 kmem_alloc_max); \
328 memset(ptr, 0x5a, (size)); \
329 kfree(ptr); \
330 })
331
332 #define __vmem_alloc(size, flags) \
333 ({ void *_ptr_ = NULL; \
334 \
335 ASSERT((flags) & KM_SLEEP); \
336 \
337 _ptr_ = (void *)__vmalloc((size), (((flags) | \
338 __GFP_HIGHMEM) & ~__GFP_ZERO), PAGE_KERNEL);\
339 if (_ptr_ == NULL) { \
340 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
341 "vmem_alloc(%d, 0x%x) failed (%ld/" \
342 "%ld)\n", (int)(size), (int)(flags), \
343 atomic64_read(&vmem_alloc_used), \
344 vmem_alloc_max); \
345 } else { \
346 if (flags & __GFP_ZERO) \
347 memset(_ptr_, 0, (size)); \
348 \
349 atomic64_add((size), &vmem_alloc_used); \
350 if (unlikely(atomic64_read(&vmem_alloc_used) > \
351 vmem_alloc_max)) \
352 vmem_alloc_max = \
353 atomic64_read(&vmem_alloc_used); \
354 \
355 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(" \
356 "%d, 0x%x) = %p (%ld/%ld)\n", \
357 (int)(size), (int)(flags), _ptr_, \
358 atomic64_read(&vmem_alloc_used), \
359 vmem_alloc_max); \
360 } \
361 \
362 _ptr_; \
363 })
364
365 #define vmem_free(ptr, size) \
366 ({ \
367 ASSERT((ptr) || (size > 0)); \
368 \
369 atomic64_sub((size), &vmem_alloc_used); \
370 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_free(%p, %d) (%ld/%ld)\n", \
371 (ptr), (int)(size), atomic64_read(&vmem_alloc_used), \
372 vmem_alloc_max); \
373 memset(ptr, 0x5a, (size)); \
374 vfree(ptr); \
375 })
376
377 #endif /* DEBUG_KMEM_TRACKING */
378
379 #define kmem_alloc(size, flags) __kmem_alloc((size), (flags), kmalloc)
380 #define kmem_zalloc(size, flags) __kmem_alloc((size), (flags), kzalloc)
381
382 #ifdef HAVE_KMALLOC_NODE
383 #define kmem_alloc_node(size, flags, node) \
384 __kmem_alloc((size), (flags), kmalloc_node, node)
385 #else
386 #define kmem_alloc_node(size, flags, node) \
387 __kmem_alloc((size), (flags), kmalloc)
388 #endif
389
390 #define vmem_alloc(size, flags) __vmem_alloc((size), (flags))
391 #define vmem_zalloc(size, flags) __vmem_alloc((size), ((flags) | __GFP_ZERO))
392
393 #else /* DEBUG_KMEM */
394
395 #define kmem_alloc(size, flags) kmalloc((size), (flags))
396 #define kmem_zalloc(size, flags) kzalloc((size), (flags))
397 #define kmem_free(ptr, size) kfree(ptr)
398
399 #ifdef HAVE_KMALLOC_NODE
400 #define kmem_alloc_node(size, flags, node) \
401 kmalloc_node((size), (flags), (node))
402 #else
403 #define kmem_alloc_node(size, flags, node) \
404 kmalloc((size), (flags))
405 #endif
406
407 #define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
408 __GFP_HIGHMEM), PAGE_KERNEL)
409 #define vmem_zalloc(size, flags) \
410 ({ \
411 void *_ptr_ = __vmalloc((size),((flags)|__GFP_HIGHMEM),PAGE_KERNEL); \
412 if (_ptr_) \
413 memset(_ptr_, 0, (size)); \
414 _ptr_; \
415 })
416 #define vmem_free(ptr, size) vfree(ptr)
417
418 #endif /* DEBUG_KMEM */
419
420 #ifdef DEBUG_KMEM_UNIMPLEMENTED
421 static __inline__ void *
422 kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags)
423 {
424 #error "kmem_alloc_tryhard() not implemented"
425 }
426 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
427
428 /*
429 * Slab allocation interfaces
430 */
431 #define KMC_NOTOUCH 0x00000001
432 #define KMC_NODEBUG 0x00000002 /* Default behavior */
433 #define KMC_NOMAGAZINE 0x00000004 /* XXX: No disable support available */
434 #define KMC_NOHASH 0x00000008 /* XXX: No hash available */
435 #define KMC_QCACHE 0x00000010 /* XXX: Unsupported */
436 #define KMC_KMEM 0x00000100 /* Use kmem cache */
437 #define KMC_VMEM 0x00000200 /* Use vmem cache */
438 #define KMC_OFFSLAB 0x00000400 /* Objects not on slab */
439
440 #define KMC_REAP_CHUNK 256
441 #define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
442
443 #ifdef DEBUG_KMEM_UNIMPLEMENTED
444 static __inline__ void kmem_init(void) {
445 #error "kmem_init() not implemented"
446 }
447
448 static __inline__ void kmem_thread_init(void) {
449 #error "kmem_thread_init() not implemented"
450 }
451
452 static __inline__ void kmem_mp_init(void) {
453 #error "kmem_mp_init() not implemented"
454 }
455
456 static __inline__ void kmem_reap_idspace(void) {
457 #error "kmem_reap_idspace() not implemented"
458 }
459
460 static __inline__ size_t kmem_avail(void) {
461 #error "kmem_avail() not implemented"
462 }
463
464 static __inline__ size_t kmem_maxavail(void) {
465 #error "kmem_maxavail() not implemented"
466 }
467
468 static __inline__ uint64_t kmem_cache_stat(spl_kmem_cache_t *cache) {
469 #error "kmem_cache_stat() not implemented"
470 }
471 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
472
473 /* XXX - Used by arc.c to adjust its memory footprint. We may want
474 * to use this hook in the future to adjust behavior based on
475 * debug levels. For now it's safe to always return 0.
476 */
477 static __inline__ int
478 kmem_debugging(void)
479 {
480 return 0;
481 }
482
483 extern int kmem_set_warning(int flag);
484
485 extern struct list_head spl_kmem_cache_list;
486 extern struct rw_semaphore spl_kmem_cache_sem;
487
488 #define SKM_MAGIC 0x2e2e2e2e
489 #define SKO_MAGIC 0x20202020
490 #define SKS_MAGIC 0x22222222
491 #define SKC_MAGIC 0x2c2c2c2c
492
493 #define SPL_KMEM_CACHE_DELAY 5
494 #define SPL_KMEM_CACHE_OBJ_PER_SLAB 32
495
496 typedef int (*spl_kmem_ctor_t)(void *, void *, int);
497 typedef void (*spl_kmem_dtor_t)(void *, void *);
498 typedef void (*spl_kmem_reclaim_t)(void *);
499
500 typedef struct spl_kmem_magazine {
501 uint32_t skm_magic; /* Sanity magic */
502 uint32_t skm_avail; /* Available objects */
503 uint32_t skm_size; /* Magazine size */
504 uint32_t skm_refill; /* Batch refill size */
505 unsigned long skm_age; /* Last cache access */
506 void *skm_objs[0]; /* Object pointers */
507 } spl_kmem_magazine_t;
508
509 typedef struct spl_kmem_obj {
510 uint32_t sko_magic; /* Sanity magic */
511 void *sko_addr; /* Buffer address */
512 struct spl_kmem_slab *sko_slab; /* Owned by slab */
513 struct list_head sko_list; /* Free object list linkage */
514 } spl_kmem_obj_t;
515
516 typedef struct spl_kmem_slab {
517 uint32_t sks_magic; /* Sanity magic */
518 uint32_t sks_objs; /* Objects per slab */
519 struct spl_kmem_cache *sks_cache; /* Owned by cache */
520 struct list_head sks_list; /* Slab list linkage */
521 struct list_head sks_free_list; /* Free object list */
522 unsigned long sks_age; /* Last modify jiffie */
523 uint32_t sks_ref; /* Ref count used objects */
524 } spl_kmem_slab_t;
525
526 typedef struct spl_kmem_cache {
527 uint32_t skc_magic; /* Sanity magic */
528 uint32_t skc_name_size; /* Name length */
529 char *skc_name; /* Name string */
530 spl_kmem_magazine_t *skc_mag[NR_CPUS]; /* Per-CPU warm cache */
531 uint32_t skc_mag_size; /* Magazine size */
532 uint32_t skc_mag_refill; /* Magazine refill count */
533 spl_kmem_ctor_t skc_ctor; /* Constructor */
534 spl_kmem_dtor_t skc_dtor; /* Destructor */
535 spl_kmem_reclaim_t skc_reclaim; /* Reclaimator */
536 void *skc_private; /* Private data */
537 void *skc_vmp; /* Unused */
538 uint32_t skc_flags; /* Flags */
539 uint32_t skc_obj_size; /* Object size */
540 uint32_t skc_slab_objs; /* Objects per slab */
541 uint32_t skc_slab_size; /* Slab size */
542 uint32_t skc_delay; /* slab reclaim interval */
543 struct list_head skc_list; /* List of caches linkage */
544 struct list_head skc_complete_list;/* Completely alloc'ed */
545 struct list_head skc_partial_list; /* Partially alloc'ed */
546 spinlock_t skc_lock; /* Cache lock */
547 uint64_t skc_slab_fail; /* Slab alloc failures */
548 uint64_t skc_slab_create;/* Slab creates */
549 uint64_t skc_slab_destroy;/* Slab destroys */
550 uint64_t skc_slab_total; /* Slab total current */
551 uint64_t skc_slab_alloc; /* Slab alloc current */
552 uint64_t skc_slab_max; /* Slab max historic */
553 uint64_t skc_obj_total; /* Obj total current */
554 uint64_t skc_obj_alloc; /* Obj alloc current */
555 uint64_t skc_obj_max; /* Obj max historic */
556 } spl_kmem_cache_t;
557 #define kmem_cache_t spl_kmem_cache_t
558
559 extern spl_kmem_cache_t *
560 spl_kmem_cache_create(char *name, size_t size, size_t align,
561 spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, spl_kmem_reclaim_t reclaim,
562 void *priv, void *vmp, int flags);
563
564 extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc);
565 extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags);
566 extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
567 extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc);
568 extern void spl_kmem_reap(void);
569
570 int spl_kmem_init(void);
571 void spl_kmem_fini(void);
572
573 #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
574 spl_kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
575 #define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
576 #define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
577 #define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
578 #define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc)
579 #define kmem_reap() spl_kmem_reap()
580 #define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \
581 ((ptr) < (void *)VMALLOC_END))
582
583 #ifdef __cplusplus
584 }
585 #endif
586
587 #endif /* _SPL_KMEM_H */