]> git.proxmox.com Git - mirror_spl-debian.git/blame - include/sys/kmem.h
Apply Ricardo's spl-02-condvar-poison.patch
[mirror_spl-debian.git] / include / sys / kmem.h
CommitLineData
715f6251 1/*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
09b414e8 27#ifndef _SPL_KMEM_H
28#define _SPL_KMEM_H
f1ca4da6 29
30#ifdef __cplusplus
31extern "C" {
32#endif
33
f1ca4da6 34#undef DEBUG_KMEM_UNIMPLEMENTED
ff449ac4 35#undef DEBUG_KMEM_TRACKING /* Per-allocation memory tracking */
f1ca4da6 36
f1b59d26 37#include <linux/module.h>
f1ca4da6 38#include <linux/slab.h>
79b31f36 39#include <linux/vmalloc.h>
f1ca4da6 40#include <linux/mm.h>
41#include <linux/spinlock.h>
d6a26c6a 42#include <linux/rwsem.h>
43#include <linux/hash.h>
44#include <linux/ctype.h>
57d86234 45#include <sys/types.h>
937879f1 46#include <sys/debug.h>
f1ca4da6 47/*
48 * Memory allocation interfaces
49 */
50#define KM_SLEEP GFP_KERNEL
51#define KM_NOSLEEP GFP_ATOMIC
52#undef KM_PANIC /* No linux analog */
e9d7a2be 53#define KM_PUSHPAGE (KM_SLEEP | __GFP_HIGH)
f1ca4da6 54#define KM_VMFLAGS GFP_LEVEL_MASK
55#define KM_FLAGS __GFP_BITS_MASK
56
57#ifdef DEBUG_KMEM
c19c06f3 58extern atomic64_t kmem_alloc_used;
59extern unsigned long kmem_alloc_max;
60extern atomic64_t vmem_alloc_used;
61extern unsigned long vmem_alloc_max;
62extern int kmem_warning_flag;
f1ca4da6 63
ff449ac4 64#ifdef DEBUG_KMEM_TRACKING
8464443f 65/* XXX - Not to surprisingly with debugging enabled the xmem_locks are very
66 * highly contended particularly on xfree(). If we want to run with this
67 * detailed debugging enabled for anything other than debugging we need to
68 * minimize the contention by moving to a lock per xmem_table entry model.
69 */
d6a26c6a 70#define KMEM_HASH_BITS 10
71#define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
72
73extern struct hlist_head kmem_table[KMEM_TABLE_SIZE];
74extern struct list_head kmem_list;
75extern spinlock_t kmem_lock;
76
13cdca65 77#define VMEM_HASH_BITS 10
78#define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
79
80extern struct hlist_head vmem_table[VMEM_TABLE_SIZE];
81extern struct list_head vmem_list;
82extern spinlock_t vmem_lock;
83
d6a26c6a 84typedef struct kmem_debug {
85 struct hlist_node kd_hlist; /* Hash node linkage */
86 struct list_head kd_list; /* List of all allocations */
87 void *kd_addr; /* Allocation pointer */
88 size_t kd_size; /* Allocation size */
89 const char *kd_func; /* Allocation function */
90 int kd_line; /* Allocation line */
91} kmem_debug_t;
92
93static __inline__ kmem_debug_t *
13cdca65 94__kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
d6a26c6a 95{
96 struct hlist_head *head;
97 struct hlist_node *node;
98 struct kmem_debug *p;
99 unsigned long flags;
100
13cdca65 101 spin_lock_irqsave(lock, flags);
102 head = &table[hash_ptr(addr, bits)];
d6a26c6a 103 hlist_for_each_entry_rcu(p, node, head, kd_hlist) {
104 if (p->kd_addr == addr) {
105 hlist_del_init(&p->kd_hlist);
106 list_del_init(&p->kd_list);
13cdca65 107 spin_unlock_irqrestore(lock, flags);
d6a26c6a 108 return p;
109 }
110 }
111
13cdca65 112 spin_unlock_irqrestore(lock, flags);
d6a26c6a 113 return NULL;
114}
115
f1ca4da6 116#define __kmem_alloc(size, flags, allocator) \
d6a26c6a 117({ void *_ptr_ = NULL; \
118 kmem_debug_t *_dptr_; \
119 unsigned long _flags_; \
f1ca4da6 120 \
d6a26c6a 121 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
122 if (_dptr_ == NULL) { \
3561541c 123 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
d6a26c6a 124 "kmem_alloc(%d, 0x%x) debug failed\n", \
125 sizeof(kmem_debug_t), (int)(flags)); \
c19c06f3 126 } else { \
427a782d 127 /* Marked unlikely because we should never be doing this, */ \
128 /* we tolerate to up 2 pages but a single page is best. */ \
129 if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \
d6a26c6a 130 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \
131 "kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \
132 (int)(size), (int)(flags), \
133 atomic64_read(&kmem_alloc_used), \
134 kmem_alloc_max); \
135 \
136 _ptr_ = (void *)allocator((size), (flags)); \
137 if (_ptr_ == NULL) { \
138 kfree(_dptr_); \
139 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
140 "kmem_alloc(%d, 0x%x) failed (%ld/" \
141 "%ld)\n", (int)(size), (int)(flags), \
142 atomic64_read(&kmem_alloc_used), \
143 kmem_alloc_max); \
144 } else { \
145 atomic64_add((size), &kmem_alloc_used); \
146 if (unlikely(atomic64_read(&kmem_alloc_used) > \
147 kmem_alloc_max)) \
148 kmem_alloc_max = \
149 atomic64_read(&kmem_alloc_used); \
150 \
151 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
152 INIT_LIST_HEAD(&_dptr_->kd_list); \
153 _dptr_->kd_addr = _ptr_; \
154 _dptr_->kd_size = (size); \
155 _dptr_->kd_func = __FUNCTION__; \
156 _dptr_->kd_line = __LINE__; \
157 spin_lock_irqsave(&kmem_lock, _flags_); \
158 hlist_add_head_rcu(&_dptr_->kd_hlist, \
159 &kmem_table[hash_ptr(_ptr_, KMEM_HASH_BITS)]);\
160 list_add_tail(&_dptr_->kd_list, &kmem_list); \
161 spin_unlock_irqrestore(&kmem_lock, _flags_); \
162 \
163 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(" \
164 "%d, 0x%x) = %p (%ld/%ld)\n", \
165 (int)(size), (int)(flags), _ptr_, \
166 atomic64_read(&kmem_alloc_used), \
167 kmem_alloc_max); \
168 } \
f1ca4da6 169 } \
170 \
171 _ptr_; \
172})
173
f1ca4da6 174#define kmem_free(ptr, size) \
175({ \
d6a26c6a 176 kmem_debug_t *_dptr_; \
937879f1 177 ASSERT((ptr) || (size > 0)); \
d6a26c6a 178 \
13cdca65 179 _dptr_ = __kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);\
d6a26c6a 180 ASSERT(_dptr_); /* Must exist in hash due to kmem_alloc() */ \
181 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
182 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
183 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
c19c06f3 184 atomic64_sub((size), &kmem_alloc_used); \
9ab1ac14 185 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \
186 (ptr), (int)(size), atomic64_read(&kmem_alloc_used), \
839d8b43 187 kmem_alloc_max); \
d6a26c6a 188 \
189 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
190 kfree(_dptr_); \
191 \
192 memset(ptr, 0x5a, (size)); \
f1ca4da6 193 kfree(ptr); \
f1ca4da6 194})
195
79b31f36 196#define __vmem_alloc(size, flags) \
13cdca65 197({ void *_ptr_ = NULL; \
198 kmem_debug_t *_dptr_; \
199 unsigned long _flags_; \
79b31f36 200 \
13cdca65 201 ASSERT((flags) & KM_SLEEP); \
79b31f36 202 \
13cdca65 203 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
204 if (_dptr_ == NULL) { \
3561541c 205 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
13cdca65 206 "vmem_alloc(%d, 0x%x) debug failed\n", \
207 sizeof(kmem_debug_t), (int)(flags)); \
c19c06f3 208 } else { \
13cdca65 209 _ptr_ = (void *)__vmalloc((size), (((flags) | \
210 __GFP_HIGHMEM) & ~__GFP_ZERO), \
211 PAGE_KERNEL); \
212 if (_ptr_ == NULL) { \
213 kfree(_dptr_); \
214 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
215 "vmem_alloc(%d, 0x%x) failed (%ld/" \
216 "%ld)\n", (int)(size), (int)(flags), \
217 atomic64_read(&vmem_alloc_used), \
218 vmem_alloc_max); \
219 } else { \
220 if (flags & __GFP_ZERO) \
221 memset(_ptr_, 0, (size)); \
3561541c 222 \
13cdca65 223 atomic64_add((size), &vmem_alloc_used); \
224 if (unlikely(atomic64_read(&vmem_alloc_used) > \
225 vmem_alloc_max)) \
226 vmem_alloc_max = \
227 atomic64_read(&vmem_alloc_used); \
228 \
229 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
230 INIT_LIST_HEAD(&_dptr_->kd_list); \
231 _dptr_->kd_addr = _ptr_; \
232 _dptr_->kd_size = (size); \
233 _dptr_->kd_func = __FUNCTION__; \
234 _dptr_->kd_line = __LINE__; \
235 spin_lock_irqsave(&vmem_lock, _flags_); \
236 hlist_add_head_rcu(&_dptr_->kd_hlist, \
237 &vmem_table[hash_ptr(_ptr_, VMEM_HASH_BITS)]);\
238 list_add_tail(&_dptr_->kd_list, &vmem_list); \
239 spin_unlock_irqrestore(&vmem_lock, _flags_); \
839d8b43 240 \
13cdca65 241 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(" \
242 "%d, 0x%x) = %p (%ld/%ld)\n", \
243 (int)(size), (int)(flags), _ptr_, \
244 atomic64_read(&vmem_alloc_used), \
245 vmem_alloc_max); \
246 } \
79b31f36 247 } \
248 \
249 _ptr_; \
250})
251
79b31f36 252#define vmem_free(ptr, size) \
253({ \
13cdca65 254 kmem_debug_t *_dptr_; \
937879f1 255 ASSERT((ptr) || (size > 0)); \
13cdca65 256 \
257 _dptr_ = __kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);\
258 ASSERT(_dptr_); /* Must exist in hash due to vmem_alloc() */ \
259 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
260 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
261 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
c19c06f3 262 atomic64_sub((size), &vmem_alloc_used); \
9ab1ac14 263 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_free(%p, %d) (%ld/%ld)\n", \
264 (ptr), (int)(size), atomic64_read(&vmem_alloc_used), \
839d8b43 265 vmem_alloc_max); \
13cdca65 266 \
267 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
268 kfree(_dptr_); \
269 \
270 memset(ptr, 0x5a, (size)); \
79b31f36 271 vfree(ptr); \
272})
f1ca4da6 273
ff449ac4 274#else /* DEBUG_KMEM_TRACKING */
275
276#define __kmem_alloc(size, flags, allocator) \
277({ void *_ptr_ = NULL; \
278 \
279 /* Marked unlikely because we should never be doing this, */ \
280 /* we tolerate to up 2 pages but a single page is best. */ \
281 if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \
282 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \
283 "kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \
284 (int)(size), (int)(flags), \
285 atomic64_read(&kmem_alloc_used), \
286 kmem_alloc_max); \
287 \
288 _ptr_ = (void *)allocator((size), (flags)); \
289 if (_ptr_ == NULL) { \
290 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
291 "kmem_alloc(%d, 0x%x) failed (%ld/" \
292 "%ld)\n", (int)(size), (int)(flags), \
293 atomic64_read(&kmem_alloc_used), \
294 kmem_alloc_max); \
295 } else { \
296 atomic64_add((size), &kmem_alloc_used); \
297 if (unlikely(atomic64_read(&kmem_alloc_used) > \
298 kmem_alloc_max)) \
299 kmem_alloc_max = \
300 atomic64_read(&kmem_alloc_used); \
301 \
302 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(%d, 0x%x) = %p " \
303 "(%ld/%ld)\n", (int)(size), (int)(flags), \
304 _ptr_, atomic64_read(&kmem_alloc_used), \
305 kmem_alloc_max); \
306 } \
307 \
308 _ptr_; \
309})
310
311#define kmem_free(ptr, size) \
312({ \
313 ASSERT((ptr) || (size > 0)); \
314 \
315 atomic64_sub((size), &kmem_alloc_used); \
316 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \
317 (ptr), (int)(size), atomic64_read(&kmem_alloc_used), \
318 kmem_alloc_max); \
319 memset(ptr, 0x5a, (size)); \
320 kfree(ptr); \
321})
322
323#define __vmem_alloc(size, flags) \
324({ void *_ptr_ = NULL; \
325 \
326 ASSERT((flags) & KM_SLEEP); \
327 \
328 _ptr_ = (void *)__vmalloc((size), (((flags) | \
329 __GFP_HIGHMEM) & ~__GFP_ZERO), PAGE_KERNEL);\
330 if (_ptr_ == NULL) { \
331 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
332 "vmem_alloc(%d, 0x%x) failed (%ld/" \
333 "%ld)\n", (int)(size), (int)(flags), \
334 atomic64_read(&vmem_alloc_used), \
335 vmem_alloc_max); \
336 } else { \
337 if (flags & __GFP_ZERO) \
338 memset(_ptr_, 0, (size)); \
339 \
340 atomic64_add((size), &vmem_alloc_used); \
341 if (unlikely(atomic64_read(&vmem_alloc_used) > \
342 vmem_alloc_max)) \
343 vmem_alloc_max = \
344 atomic64_read(&vmem_alloc_used); \
345 \
346 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(" \
347 "%d, 0x%x) = %p (%ld/%ld)\n", \
348 (int)(size), (int)(flags), _ptr_, \
349 atomic64_read(&vmem_alloc_used), \
350 vmem_alloc_max); \
351 } \
352 \
353 _ptr_; \
354})
355
356#define vmem_free(ptr, size) \
357({ \
358 ASSERT((ptr) || (size > 0)); \
359 \
360 atomic64_sub((size), &vmem_alloc_used); \
361 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_free(%p, %d) (%ld/%ld)\n", \
362 (ptr), (int)(size), atomic64_read(&vmem_alloc_used), \
363 vmem_alloc_max); \
364 memset(ptr, 0x5a, (size)); \
365 vfree(ptr); \
366})
367
368#endif /* DEBUG_KMEM_TRACKING */
369
370#define kmem_alloc(size, flags) __kmem_alloc((size), (flags), kmalloc)
371#define kmem_zalloc(size, flags) __kmem_alloc((size), (flags), kzalloc)
372
373#define vmem_alloc(size, flags) __vmem_alloc((size), (flags))
374#define vmem_zalloc(size, flags) __vmem_alloc((size), ((flags) | __GFP_ZERO))
375
c6dc93d6 376#else /* DEBUG_KMEM */
f1ca4da6 377
4fd2f7ee 378#define kmem_alloc(size, flags) kmalloc((size), (flags))
379#define kmem_zalloc(size, flags) kzalloc((size), (flags))
c6dc93d6 380#define kmem_free(ptr, size) kfree(ptr)
f1ca4da6 381
4fd2f7ee 382#define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
c6dc93d6 383 __GFP_HIGHMEM), PAGE_KERNEL)
384#define vmem_zalloc(size, flags) \
79b31f36 385({ \
c6dc93d6 386 void *_ptr_ = __vmalloc((size),((flags)|__GFP_HIGHMEM),PAGE_KERNEL); \
387 if (_ptr_) \
388 memset(_ptr_, 0, (size)); \
389 _ptr_; \
79b31f36 390})
c6dc93d6 391#define vmem_free(ptr, size) vfree(ptr)
79b31f36 392
f1ca4da6 393#endif /* DEBUG_KMEM */
394
f1ca4da6 395#ifdef DEBUG_KMEM_UNIMPLEMENTED
396static __inline__ void *
397kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags)
398{
399#error "kmem_alloc_tryhard() not implemented"
400}
401#endif /* DEBUG_KMEM_UNIMPLEMENTED */
402
403/*
404 * Slab allocation interfaces
405 */
a1502d76 406#define KMC_NOTOUCH 0x00000001
407#define KMC_NODEBUG 0x00000002 /* Default behavior */
408#define KMC_NOMAGAZINE 0x00000004 /* XXX: No disable support available */
409#define KMC_NOHASH 0x00000008 /* XXX: No hash available */
410#define KMC_QCACHE 0x00000010 /* XXX: Unsupported */
411#define KMC_KMEM 0x00000100 /* Use kmem cache */
412#define KMC_VMEM 0x00000200 /* Use vmem cache */
413#define KMC_OFFSLAB 0x00000400 /* Objects not on slab */
f1ca4da6 414
415#define KMC_REAP_CHUNK 256
416#define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
417
f1ca4da6 418#ifdef DEBUG_KMEM_UNIMPLEMENTED
419static __inline__ void kmem_init(void) {
420#error "kmem_init() not implemented"
421}
422
423static __inline__ void kmem_thread_init(void) {
424#error "kmem_thread_init() not implemented"
425}
426
427static __inline__ void kmem_mp_init(void) {
428#error "kmem_mp_init() not implemented"
429}
430
431static __inline__ void kmem_reap_idspace(void) {
432#error "kmem_reap_idspace() not implemented"
433}
434
435static __inline__ size_t kmem_avail(void) {
436#error "kmem_avail() not implemented"
437}
438
439static __inline__ size_t kmem_maxavail(void) {
440#error "kmem_maxavail() not implemented"
441}
442
2fb9b26a 443static __inline__ uint64_t kmem_cache_stat(spl_kmem_cache_t *cache) {
f1ca4da6 444#error "kmem_cache_stat() not implemented"
445}
446#endif /* DEBUG_KMEM_UNIMPLEMENTED */
447
448/* XXX - Used by arc.c to adjust its memory footprint. We may want
449 * to use this hook in the future to adjust behavior based on
450 * debug levels. For now it's safe to always return 0.
451 */
452static __inline__ int
453kmem_debugging(void)
454{
455 return 0;
456}
457
c19c06f3 458extern int kmem_set_warning(int flag);
459
ff449ac4 460extern struct list_head spl_kmem_cache_list;
461extern struct rw_semaphore spl_kmem_cache_sem;
2fb9b26a 462
4afaaefa 463#define SKM_MAGIC 0x2e2e2e2e
2fb9b26a 464#define SKO_MAGIC 0x20202020
465#define SKS_MAGIC 0x22222222
466#define SKC_MAGIC 0x2c2c2c2c
467
2fb9b26a 468#define SPL_KMEM_CACHE_DELAY 5
469#define SPL_KMEM_CACHE_OBJ_PER_SLAB 32
470
471typedef int (*spl_kmem_ctor_t)(void *, void *, int);
472typedef void (*spl_kmem_dtor_t)(void *, void *);
473typedef void (*spl_kmem_reclaim_t)(void *);
474
4afaaefa 475typedef struct spl_kmem_magazine {
476 uint32_t skm_magic; /* Sanity magic */
477 uint32_t skm_avail; /* Available objects */
478 uint32_t skm_size; /* Magazine size */
479 uint32_t skm_refill; /* Batch refill size */
480 unsigned long skm_age; /* Last cache access */
481 void *skm_objs[0]; /* Object pointers */
482} spl_kmem_magazine_t;
483
2fb9b26a 484typedef struct spl_kmem_obj {
485 uint32_t sko_magic; /* Sanity magic */
2fb9b26a 486 void *sko_addr; /* Buffer address */
487 struct spl_kmem_slab *sko_slab; /* Owned by slab */
488 struct list_head sko_list; /* Free object list linkage */
2fb9b26a 489} spl_kmem_obj_t;
490
491typedef struct spl_kmem_slab {
492 uint32_t sks_magic; /* Sanity magic */
493 uint32_t sks_objs; /* Objects per slab */
494 struct spl_kmem_cache *sks_cache; /* Owned by cache */
495 struct list_head sks_list; /* Slab list linkage */
496 struct list_head sks_free_list; /* Free object list */
497 unsigned long sks_age; /* Last modify jiffie */
4afaaefa 498 uint32_t sks_ref; /* Ref count used objects */
2fb9b26a 499} spl_kmem_slab_t;
500
501typedef struct spl_kmem_cache {
502 uint32_t skc_magic; /* Sanity magic */
503 uint32_t skc_name_size; /* Name length */
504 char *skc_name; /* Name string */
4afaaefa 505 spl_kmem_magazine_t *skc_mag[NR_CPUS]; /* Per-CPU warm cache */
506 uint32_t skc_mag_size; /* Magazine size */
507 uint32_t skc_mag_refill; /* Magazine refill count */
2fb9b26a 508 spl_kmem_ctor_t skc_ctor; /* Constructor */
509 spl_kmem_dtor_t skc_dtor; /* Destructor */
510 spl_kmem_reclaim_t skc_reclaim; /* Reclaimator */
511 void *skc_private; /* Private data */
512 void *skc_vmp; /* Unused */
513 uint32_t skc_flags; /* Flags */
514 uint32_t skc_obj_size; /* Object size */
a1502d76 515 uint32_t skc_slab_objs; /* Objects per slab */
516 uint32_t skc_slab_size; /* Slab size */
2fb9b26a 517 uint32_t skc_delay; /* slab reclaim interval */
2fb9b26a 518 struct list_head skc_list; /* List of caches linkage */
519 struct list_head skc_complete_list;/* Completely alloc'ed */
520 struct list_head skc_partial_list; /* Partially alloc'ed */
d46630e0 521 spinlock_t skc_lock; /* Cache lock */
2fb9b26a 522 uint64_t skc_slab_fail; /* Slab alloc failures */
523 uint64_t skc_slab_create;/* Slab creates */
524 uint64_t skc_slab_destroy;/* Slab destroys */
d46630e0 525 uint64_t skc_slab_total; /* Slab total current */
526 uint64_t skc_slab_alloc; /* Slab alloc current */
527 uint64_t skc_slab_max; /* Slab max historic */
528 uint64_t skc_obj_total; /* Obj total current */
529 uint64_t skc_obj_alloc; /* Obj alloc current */
530 uint64_t skc_obj_max; /* Obj max historic */
2fb9b26a 531} spl_kmem_cache_t;
532
533extern spl_kmem_cache_t *
534spl_kmem_cache_create(char *name, size_t size, size_t align,
535 spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, spl_kmem_reclaim_t reclaim,
f1ca4da6 536 void *priv, void *vmp, int flags);
537
2fb9b26a 538extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc);
539extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags);
540extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
541extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc);
542extern void spl_kmem_reap(void);
f1ca4da6 543
2fb9b26a 544int spl_kmem_init(void);
545void spl_kmem_fini(void);
5d86345d 546
f1ca4da6 547#define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
2fb9b26a 548 spl_kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
549#define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
550#define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
551#define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
552#define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc)
553#define kmem_reap() spl_kmem_reap()
a1502d76 554#define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \
555 ((ptr) < (void *)VMALLOC_END))
f1ca4da6 556
5cbd57fa 557#ifdef HAVE_KMEM_CACHE_CREATE_DTOR
558#define __kmem_cache_create(name, size, align, flags, ctor, dtor) \
559 kmem_cache_create(name, size, align, flags, ctor, dtor)
560#else
561#define __kmem_cache_create(name, size, align, flags, ctor, dtor) \
562 kmem_cache_create(name, size, align, flags, ctor)
563#endif /* HAVE_KMEM_CACHE_CREATE_DTOR */
564
f1ca4da6 565#ifdef __cplusplus
566}
567#endif
568
09b414e8 569#endif /* _SPL_KMEM_H */