]>
Commit | Line | Data |
---|---|---|
715f6251 BB |
1 | /* |
2 | * This file is part of the SPL: Solaris Porting Layer. | |
3 | * | |
4 | * Copyright (c) 2008 Lawrence Livermore National Security, LLC. | |
5 | * Produced at Lawrence Livermore National Laboratory | |
6 | * Written by: | |
7 | * Brian Behlendorf <behlendorf1@llnl.gov>, | |
8 | * Herb Wartens <wartens2@llnl.gov>, | |
9 | * Jim Garlick <garlick@llnl.gov> | |
10 | * UCRL-CODE-235197 | |
11 | * | |
12 | * This is free software; you can redistribute it and/or modify it | |
13 | * under the terms of the GNU General Public License as published by | |
14 | * the Free Software Foundation; either version 2 of the License, or | |
15 | * (at your option) any later version. | |
16 | * | |
17 | * This is distributed in the hope that it will be useful, but WITHOUT | |
18 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
20 | * for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License along | |
23 | * with this program; if not, write to the Free Software Foundation, Inc., | |
24 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
25 | */ | |
26 | ||
09b414e8 BB |
27 | #ifndef _SPL_KMEM_H |
28 | #define _SPL_KMEM_H | |
f1ca4da6 BB |
29 | |
30 | #ifdef __cplusplus | |
31 | extern "C" { | |
32 | #endif | |
33 | ||
f1ca4da6 BB |
34 | #undef DEBUG_KMEM_UNIMPLEMENTED |
35 | ||
f1b59d26 | 36 | #include <linux/module.h> |
f1ca4da6 | 37 | #include <linux/slab.h> |
79b31f36 | 38 | #include <linux/vmalloc.h> |
f1ca4da6 BB |
39 | #include <linux/mm.h> |
40 | #include <linux/spinlock.h> | |
d6a26c6a BB |
41 | #include <linux/rwsem.h> |
42 | #include <linux/hash.h> | |
43 | #include <linux/ctype.h> | |
57d86234 | 44 | #include <sys/types.h> |
937879f1 | 45 | #include <sys/debug.h> |
f1ca4da6 BB |
46 | /* |
47 | * Memory allocation interfaces | |
48 | */ | |
49 | #define KM_SLEEP GFP_KERNEL | |
50 | #define KM_NOSLEEP GFP_ATOMIC | |
51 | #undef KM_PANIC /* No linux analog */ | |
e9d7a2be | 52 | #define KM_PUSHPAGE (KM_SLEEP | __GFP_HIGH) |
f1ca4da6 BB |
53 | #define KM_VMFLAGS GFP_LEVEL_MASK |
54 | #define KM_FLAGS __GFP_BITS_MASK | |
55 | ||
56 | #ifdef DEBUG_KMEM | |
c19c06f3 BB |
57 | extern atomic64_t kmem_alloc_used; |
58 | extern unsigned long kmem_alloc_max; | |
59 | extern atomic64_t vmem_alloc_used; | |
60 | extern unsigned long vmem_alloc_max; | |
5c2bb9b2 | 61 | |
c19c06f3 | 62 | extern int kmem_warning_flag; |
5c2bb9b2 | 63 | extern atomic64_t kmem_cache_alloc_failed; |
f1ca4da6 | 64 | |
8464443f BB |
65 | /* XXX - Not to surprisingly with debugging enabled the xmem_locks are very |
66 | * highly contended particularly on xfree(). If we want to run with this | |
67 | * detailed debugging enabled for anything other than debugging we need to | |
68 | * minimize the contention by moving to a lock per xmem_table entry model. | |
69 | */ | |
d6a26c6a BB |
70 | #define KMEM_HASH_BITS 10 |
71 | #define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS) | |
72 | ||
73 | extern struct hlist_head kmem_table[KMEM_TABLE_SIZE]; | |
74 | extern struct list_head kmem_list; | |
75 | extern spinlock_t kmem_lock; | |
76 | ||
13cdca65 BB |
77 | #define VMEM_HASH_BITS 10 |
78 | #define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS) | |
79 | ||
80 | extern struct hlist_head vmem_table[VMEM_TABLE_SIZE]; | |
81 | extern struct list_head vmem_list; | |
82 | extern spinlock_t vmem_lock; | |
83 | ||
d6a26c6a BB |
84 | typedef struct kmem_debug { |
85 | struct hlist_node kd_hlist; /* Hash node linkage */ | |
86 | struct list_head kd_list; /* List of all allocations */ | |
87 | void *kd_addr; /* Allocation pointer */ | |
88 | size_t kd_size; /* Allocation size */ | |
89 | const char *kd_func; /* Allocation function */ | |
90 | int kd_line; /* Allocation line */ | |
91 | } kmem_debug_t; | |
92 | ||
93 | static __inline__ kmem_debug_t * | |
13cdca65 | 94 | __kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr) |
d6a26c6a BB |
95 | { |
96 | struct hlist_head *head; | |
97 | struct hlist_node *node; | |
98 | struct kmem_debug *p; | |
99 | unsigned long flags; | |
100 | ||
13cdca65 BB |
101 | spin_lock_irqsave(lock, flags); |
102 | head = &table[hash_ptr(addr, bits)]; | |
d6a26c6a BB |
103 | hlist_for_each_entry_rcu(p, node, head, kd_hlist) { |
104 | if (p->kd_addr == addr) { | |
105 | hlist_del_init(&p->kd_hlist); | |
106 | list_del_init(&p->kd_list); | |
13cdca65 | 107 | spin_unlock_irqrestore(lock, flags); |
d6a26c6a BB |
108 | return p; |
109 | } | |
110 | } | |
111 | ||
13cdca65 | 112 | spin_unlock_irqrestore(lock, flags); |
d6a26c6a BB |
113 | return NULL; |
114 | } | |
115 | ||
f1ca4da6 | 116 | #define __kmem_alloc(size, flags, allocator) \ |
d6a26c6a BB |
117 | ({ void *_ptr_ = NULL; \ |
118 | kmem_debug_t *_dptr_; \ | |
119 | unsigned long _flags_; \ | |
f1ca4da6 | 120 | \ |
d6a26c6a BB |
121 | _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \ |
122 | if (_dptr_ == NULL) { \ | |
3561541c | 123 | __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \ |
d6a26c6a BB |
124 | "kmem_alloc(%d, 0x%x) debug failed\n", \ |
125 | sizeof(kmem_debug_t), (int)(flags)); \ | |
c19c06f3 | 126 | } else { \ |
427a782d BB |
127 | /* Marked unlikely because we should never be doing this, */ \ |
128 | /* we tolerate to up 2 pages but a single page is best. */ \ | |
129 | if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \ | |
d6a26c6a BB |
130 | __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \ |
131 | "kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \ | |
132 | (int)(size), (int)(flags), \ | |
133 | atomic64_read(&kmem_alloc_used), \ | |
134 | kmem_alloc_max); \ | |
135 | \ | |
136 | _ptr_ = (void *)allocator((size), (flags)); \ | |
137 | if (_ptr_ == NULL) { \ | |
138 | kfree(_dptr_); \ | |
139 | __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \ | |
140 | "kmem_alloc(%d, 0x%x) failed (%ld/" \ | |
141 | "%ld)\n", (int)(size), (int)(flags), \ | |
142 | atomic64_read(&kmem_alloc_used), \ | |
143 | kmem_alloc_max); \ | |
144 | } else { \ | |
145 | atomic64_add((size), &kmem_alloc_used); \ | |
146 | if (unlikely(atomic64_read(&kmem_alloc_used) > \ | |
147 | kmem_alloc_max)) \ | |
148 | kmem_alloc_max = \ | |
149 | atomic64_read(&kmem_alloc_used); \ | |
150 | \ | |
151 | INIT_HLIST_NODE(&_dptr_->kd_hlist); \ | |
152 | INIT_LIST_HEAD(&_dptr_->kd_list); \ | |
153 | _dptr_->kd_addr = _ptr_; \ | |
154 | _dptr_->kd_size = (size); \ | |
155 | _dptr_->kd_func = __FUNCTION__; \ | |
156 | _dptr_->kd_line = __LINE__; \ | |
157 | spin_lock_irqsave(&kmem_lock, _flags_); \ | |
158 | hlist_add_head_rcu(&_dptr_->kd_hlist, \ | |
159 | &kmem_table[hash_ptr(_ptr_, KMEM_HASH_BITS)]);\ | |
160 | list_add_tail(&_dptr_->kd_list, &kmem_list); \ | |
161 | spin_unlock_irqrestore(&kmem_lock, _flags_); \ | |
162 | \ | |
163 | __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(" \ | |
164 | "%d, 0x%x) = %p (%ld/%ld)\n", \ | |
165 | (int)(size), (int)(flags), _ptr_, \ | |
166 | atomic64_read(&kmem_alloc_used), \ | |
167 | kmem_alloc_max); \ | |
168 | } \ | |
f1ca4da6 BB |
169 | } \ |
170 | \ | |
171 | _ptr_; \ | |
172 | }) | |
173 | ||
4fd2f7ee BB |
174 | #define kmem_alloc(size, flags) __kmem_alloc((size), (flags), kmalloc) |
175 | #define kmem_zalloc(size, flags) __kmem_alloc((size), (flags), kzalloc) | |
f1ca4da6 BB |
176 | |
177 | #define kmem_free(ptr, size) \ | |
178 | ({ \ | |
d6a26c6a | 179 | kmem_debug_t *_dptr_; \ |
937879f1 | 180 | ASSERT((ptr) || (size > 0)); \ |
d6a26c6a | 181 | \ |
13cdca65 | 182 | _dptr_ = __kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);\ |
d6a26c6a BB |
183 | ASSERT(_dptr_); /* Must exist in hash due to kmem_alloc() */ \ |
184 | ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \ | |
185 | "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \ | |
186 | _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \ | |
c19c06f3 | 187 | atomic64_sub((size), &kmem_alloc_used); \ |
9ab1ac14 BB |
188 | __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \ |
189 | (ptr), (int)(size), atomic64_read(&kmem_alloc_used), \ | |
839d8b43 | 190 | kmem_alloc_max); \ |
d6a26c6a BB |
191 | \ |
192 | memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \ | |
193 | kfree(_dptr_); \ | |
194 | \ | |
195 | memset(ptr, 0x5a, (size)); \ | |
f1ca4da6 | 196 | kfree(ptr); \ |
f1ca4da6 BB |
197 | }) |
198 | ||
79b31f36 | 199 | #define __vmem_alloc(size, flags) \ |
13cdca65 BB |
200 | ({ void *_ptr_ = NULL; \ |
201 | kmem_debug_t *_dptr_; \ | |
202 | unsigned long _flags_; \ | |
79b31f36 | 203 | \ |
13cdca65 | 204 | ASSERT((flags) & KM_SLEEP); \ |
79b31f36 | 205 | \ |
13cdca65 BB |
206 | _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \ |
207 | if (_dptr_ == NULL) { \ | |
3561541c | 208 | __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \ |
13cdca65 BB |
209 | "vmem_alloc(%d, 0x%x) debug failed\n", \ |
210 | sizeof(kmem_debug_t), (int)(flags)); \ | |
c19c06f3 | 211 | } else { \ |
13cdca65 BB |
212 | _ptr_ = (void *)__vmalloc((size), (((flags) | \ |
213 | __GFP_HIGHMEM) & ~__GFP_ZERO), \ | |
214 | PAGE_KERNEL); \ | |
215 | if (_ptr_ == NULL) { \ | |
216 | kfree(_dptr_); \ | |
217 | __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \ | |
218 | "vmem_alloc(%d, 0x%x) failed (%ld/" \ | |
219 | "%ld)\n", (int)(size), (int)(flags), \ | |
220 | atomic64_read(&vmem_alloc_used), \ | |
221 | vmem_alloc_max); \ | |
222 | } else { \ | |
223 | if (flags & __GFP_ZERO) \ | |
224 | memset(_ptr_, 0, (size)); \ | |
3561541c | 225 | \ |
13cdca65 BB |
226 | atomic64_add((size), &vmem_alloc_used); \ |
227 | if (unlikely(atomic64_read(&vmem_alloc_used) > \ | |
228 | vmem_alloc_max)) \ | |
229 | vmem_alloc_max = \ | |
230 | atomic64_read(&vmem_alloc_used); \ | |
231 | \ | |
232 | INIT_HLIST_NODE(&_dptr_->kd_hlist); \ | |
233 | INIT_LIST_HEAD(&_dptr_->kd_list); \ | |
234 | _dptr_->kd_addr = _ptr_; \ | |
235 | _dptr_->kd_size = (size); \ | |
236 | _dptr_->kd_func = __FUNCTION__; \ | |
237 | _dptr_->kd_line = __LINE__; \ | |
238 | spin_lock_irqsave(&vmem_lock, _flags_); \ | |
239 | hlist_add_head_rcu(&_dptr_->kd_hlist, \ | |
240 | &vmem_table[hash_ptr(_ptr_, VMEM_HASH_BITS)]);\ | |
241 | list_add_tail(&_dptr_->kd_list, &vmem_list); \ | |
242 | spin_unlock_irqrestore(&vmem_lock, _flags_); \ | |
839d8b43 | 243 | \ |
13cdca65 BB |
244 | __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(" \ |
245 | "%d, 0x%x) = %p (%ld/%ld)\n", \ | |
246 | (int)(size), (int)(flags), _ptr_, \ | |
247 | atomic64_read(&vmem_alloc_used), \ | |
248 | vmem_alloc_max); \ | |
249 | } \ | |
79b31f36 BB |
250 | } \ |
251 | \ | |
252 | _ptr_; \ | |
253 | }) | |
254 | ||
4fd2f7ee BB |
255 | #define vmem_alloc(size, flags) __vmem_alloc((size), (flags)) |
256 | #define vmem_zalloc(size, flags) __vmem_alloc((size), ((flags) | \ | |
257 | __GFP_ZERO)) | |
79b31f36 BB |
258 | |
259 | #define vmem_free(ptr, size) \ | |
260 | ({ \ | |
13cdca65 | 261 | kmem_debug_t *_dptr_; \ |
937879f1 | 262 | ASSERT((ptr) || (size > 0)); \ |
13cdca65 BB |
263 | \ |
264 | _dptr_ = __kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);\ | |
265 | ASSERT(_dptr_); /* Must exist in hash due to vmem_alloc() */ \ | |
266 | ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \ | |
267 | "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \ | |
268 | _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \ | |
c19c06f3 | 269 | atomic64_sub((size), &vmem_alloc_used); \ |
9ab1ac14 BB |
270 | __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_free(%p, %d) (%ld/%ld)\n", \ |
271 | (ptr), (int)(size), atomic64_read(&vmem_alloc_used), \ | |
839d8b43 | 272 | vmem_alloc_max); \ |
13cdca65 BB |
273 | \ |
274 | memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \ | |
275 | kfree(_dptr_); \ | |
276 | \ | |
277 | memset(ptr, 0x5a, (size)); \ | |
79b31f36 BB |
278 | vfree(ptr); \ |
279 | }) | |
f1ca4da6 | 280 | |
c6dc93d6 | 281 | #else /* DEBUG_KMEM */ |
f1ca4da6 | 282 | |
4fd2f7ee BB |
283 | #define kmem_alloc(size, flags) kmalloc((size), (flags)) |
284 | #define kmem_zalloc(size, flags) kzalloc((size), (flags)) | |
c6dc93d6 | 285 | #define kmem_free(ptr, size) kfree(ptr) |
f1ca4da6 | 286 | |
4fd2f7ee | 287 | #define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \ |
c6dc93d6 BB |
288 | __GFP_HIGHMEM), PAGE_KERNEL) |
289 | #define vmem_zalloc(size, flags) \ | |
79b31f36 | 290 | ({ \ |
c6dc93d6 BB |
291 | void *_ptr_ = __vmalloc((size),((flags)|__GFP_HIGHMEM),PAGE_KERNEL); \ |
292 | if (_ptr_) \ | |
293 | memset(_ptr_, 0, (size)); \ | |
294 | _ptr_; \ | |
79b31f36 | 295 | }) |
c6dc93d6 | 296 | #define vmem_free(ptr, size) vfree(ptr) |
79b31f36 | 297 | |
f1ca4da6 BB |
298 | #endif /* DEBUG_KMEM */ |
299 | ||
f1ca4da6 BB |
300 | #ifdef DEBUG_KMEM_UNIMPLEMENTED |
301 | static __inline__ void * | |
302 | kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags) | |
303 | { | |
304 | #error "kmem_alloc_tryhard() not implemented" | |
305 | } | |
306 | #endif /* DEBUG_KMEM_UNIMPLEMENTED */ | |
307 | ||
308 | /* | |
309 | * Slab allocation interfaces | |
310 | */ | |
2fb9b26a | 311 | #undef KMC_NOTOUCH /* XXX: Unsupported */ |
48f940b9 | 312 | #define KMC_NODEBUG 0x00000000 /* Default behavior */ |
2fb9b26a BB |
313 | #define KMC_NOMAGAZINE /* XXX: Unsupported */ |
314 | #define KMC_NOHASH /* XXX: Unsupported */ | |
315 | #define KMC_QCACHE /* XXX: Unsupported */ | |
f1ca4da6 BB |
316 | |
317 | #define KMC_REAP_CHUNK 256 | |
318 | #define KMC_DEFAULT_SEEKS DEFAULT_SEEKS | |
319 | ||
f1ca4da6 BB |
320 | #ifdef DEBUG_KMEM_UNIMPLEMENTED |
321 | static __inline__ void kmem_init(void) { | |
322 | #error "kmem_init() not implemented" | |
323 | } | |
324 | ||
325 | static __inline__ void kmem_thread_init(void) { | |
326 | #error "kmem_thread_init() not implemented" | |
327 | } | |
328 | ||
329 | static __inline__ void kmem_mp_init(void) { | |
330 | #error "kmem_mp_init() not implemented" | |
331 | } | |
332 | ||
333 | static __inline__ void kmem_reap_idspace(void) { | |
334 | #error "kmem_reap_idspace() not implemented" | |
335 | } | |
336 | ||
337 | static __inline__ size_t kmem_avail(void) { | |
338 | #error "kmem_avail() not implemented" | |
339 | } | |
340 | ||
341 | static __inline__ size_t kmem_maxavail(void) { | |
342 | #error "kmem_maxavail() not implemented" | |
343 | } | |
344 | ||
2fb9b26a | 345 | static __inline__ uint64_t kmem_cache_stat(spl_kmem_cache_t *cache) { |
f1ca4da6 BB |
346 | #error "kmem_cache_stat() not implemented" |
347 | } | |
348 | #endif /* DEBUG_KMEM_UNIMPLEMENTED */ | |
349 | ||
350 | /* XXX - Used by arc.c to adjust its memory footprint. We may want | |
351 | * to use this hook in the future to adjust behavior based on | |
352 | * debug levels. For now it's safe to always return 0. | |
353 | */ | |
354 | static __inline__ int | |
355 | kmem_debugging(void) | |
356 | { | |
357 | return 0; | |
358 | } | |
359 | ||
c19c06f3 BB |
360 | extern int kmem_set_warning(int flag); |
361 | ||
2fb9b26a | 362 | |
4afaaefa | 363 | #define SKM_MAGIC 0x2e2e2e2e |
2fb9b26a BB |
364 | #define SKO_MAGIC 0x20202020 |
365 | #define SKS_MAGIC 0x22222222 | |
366 | #define SKC_MAGIC 0x2c2c2c2c | |
367 | ||
d46630e0 | 368 | #define SPL_KMEM_CACHE_HASH_BITS 12 |
2fb9b26a BB |
369 | #define SPL_KMEM_CACHE_HASH_ELTS (1 << SPL_KMEM_CACHE_HASH_BITS) |
370 | #define SPL_KMEM_CACHE_HASH_SIZE (sizeof(struct hlist_head) * \ | |
371 | SPL_KMEM_CACHE_HASH_ELTS) | |
372 | ||
373 | #define SPL_KMEM_CACHE_DELAY 5 | |
374 | #define SPL_KMEM_CACHE_OBJ_PER_SLAB 32 | |
375 | ||
376 | typedef int (*spl_kmem_ctor_t)(void *, void *, int); | |
377 | typedef void (*spl_kmem_dtor_t)(void *, void *); | |
378 | typedef void (*spl_kmem_reclaim_t)(void *); | |
379 | ||
4afaaefa BB |
380 | typedef struct spl_kmem_magazine { |
381 | uint32_t skm_magic; /* Sanity magic */ | |
382 | uint32_t skm_avail; /* Available objects */ | |
383 | uint32_t skm_size; /* Magazine size */ | |
384 | uint32_t skm_refill; /* Batch refill size */ | |
385 | unsigned long skm_age; /* Last cache access */ | |
386 | void *skm_objs[0]; /* Object pointers */ | |
387 | } spl_kmem_magazine_t; | |
388 | ||
2fb9b26a BB |
389 | typedef struct spl_kmem_obj { |
390 | uint32_t sko_magic; /* Sanity magic */ | |
391 | uint32_t sko_flags; /* Per object flags */ | |
392 | void *sko_addr; /* Buffer address */ | |
393 | struct spl_kmem_slab *sko_slab; /* Owned by slab */ | |
394 | struct list_head sko_list; /* Free object list linkage */ | |
395 | struct hlist_node sko_hlist; /* Used object hash linkage */ | |
396 | } spl_kmem_obj_t; | |
397 | ||
398 | typedef struct spl_kmem_slab { | |
399 | uint32_t sks_magic; /* Sanity magic */ | |
400 | uint32_t sks_objs; /* Objects per slab */ | |
401 | struct spl_kmem_cache *sks_cache; /* Owned by cache */ | |
402 | struct list_head sks_list; /* Slab list linkage */ | |
403 | struct list_head sks_free_list; /* Free object list */ | |
404 | unsigned long sks_age; /* Last modify jiffie */ | |
4afaaefa | 405 | uint32_t sks_ref; /* Ref count used objects */ |
2fb9b26a BB |
406 | } spl_kmem_slab_t; |
407 | ||
408 | typedef struct spl_kmem_cache { | |
409 | uint32_t skc_magic; /* Sanity magic */ | |
410 | uint32_t skc_name_size; /* Name length */ | |
411 | char *skc_name; /* Name string */ | |
4afaaefa BB |
412 | spl_kmem_magazine_t *skc_mag[NR_CPUS]; /* Per-CPU warm cache */ |
413 | uint32_t skc_mag_size; /* Magazine size */ | |
414 | uint32_t skc_mag_refill; /* Magazine refill count */ | |
2fb9b26a BB |
415 | spl_kmem_ctor_t skc_ctor; /* Constructor */ |
416 | spl_kmem_dtor_t skc_dtor; /* Destructor */ | |
417 | spl_kmem_reclaim_t skc_reclaim; /* Reclaimator */ | |
418 | void *skc_private; /* Private data */ | |
419 | void *skc_vmp; /* Unused */ | |
420 | uint32_t skc_flags; /* Flags */ | |
421 | uint32_t skc_obj_size; /* Object size */ | |
422 | uint32_t skc_chunk_size; /* sizeof(*obj) + alignment */ | |
423 | uint32_t skc_slab_size; /* slab size */ | |
424 | uint32_t skc_max_chunks; /* max chunks per slab */ | |
425 | uint32_t skc_delay; /* slab reclaim interval */ | |
426 | uint32_t skc_hash_bits; /* Hash table bits */ | |
427 | uint32_t skc_hash_size; /* Hash table size */ | |
428 | uint32_t skc_hash_elts; /* Hash table elements */ | |
429 | struct hlist_head *skc_hash; /* Hash table address */ | |
430 | struct list_head skc_list; /* List of caches linkage */ | |
431 | struct list_head skc_complete_list;/* Completely alloc'ed */ | |
432 | struct list_head skc_partial_list; /* Partially alloc'ed */ | |
d46630e0 | 433 | spinlock_t skc_lock; /* Cache lock */ |
2fb9b26a BB |
434 | uint64_t skc_slab_fail; /* Slab alloc failures */ |
435 | uint64_t skc_slab_create;/* Slab creates */ | |
436 | uint64_t skc_slab_destroy;/* Slab destroys */ | |
d46630e0 BB |
437 | uint64_t skc_slab_total; /* Slab total current */ |
438 | uint64_t skc_slab_alloc; /* Slab alloc current */ | |
439 | uint64_t skc_slab_max; /* Slab max historic */ | |
440 | uint64_t skc_obj_total; /* Obj total current */ | |
441 | uint64_t skc_obj_alloc; /* Obj alloc current */ | |
442 | uint64_t skc_obj_max; /* Obj max historic */ | |
4afaaefa BB |
443 | uint64_t skc_hash_depth; /* Lazy hash depth */ |
444 | uint64_t skc_hash_count; /* Hash entries current */ | |
2fb9b26a BB |
445 | } spl_kmem_cache_t; |
446 | ||
447 | extern spl_kmem_cache_t * | |
448 | spl_kmem_cache_create(char *name, size_t size, size_t align, | |
449 | spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, spl_kmem_reclaim_t reclaim, | |
f1ca4da6 BB |
450 | void *priv, void *vmp, int flags); |
451 | ||
2fb9b26a BB |
452 | extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc); |
453 | extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags); | |
454 | extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj); | |
455 | extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc); | |
456 | extern void spl_kmem_reap(void); | |
f1ca4da6 | 457 | |
2fb9b26a BB |
458 | int spl_kmem_init(void); |
459 | void spl_kmem_fini(void); | |
5d86345d | 460 | |
f1ca4da6 | 461 | #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \ |
2fb9b26a BB |
462 | spl_kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) |
463 | #define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc) | |
464 | #define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags) | |
465 | #define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj) | |
466 | #define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc) | |
467 | #define kmem_reap() spl_kmem_reap() | |
f1ca4da6 | 468 | |
5cbd57fa BB |
469 | #ifdef HAVE_KMEM_CACHE_CREATE_DTOR |
470 | #define __kmem_cache_create(name, size, align, flags, ctor, dtor) \ | |
471 | kmem_cache_create(name, size, align, flags, ctor, dtor) | |
472 | #else | |
473 | #define __kmem_cache_create(name, size, align, flags, ctor, dtor) \ | |
474 | kmem_cache_create(name, size, align, flags, ctor) | |
475 | #endif /* HAVE_KMEM_CACHE_CREATE_DTOR */ | |
476 | ||
f1ca4da6 BB |
477 | #ifdef __cplusplus |
478 | } | |
479 | #endif | |
480 | ||
09b414e8 | 481 | #endif /* _SPL_KMEM_H */ |