]> git.proxmox.com Git - mirror_spl.git/blame - include/sys/kmem.h
Stability hack. Under Solaris when KM_SLEEP is set kmem_cache_alloc()
[mirror_spl.git] / include / sys / kmem.h
CommitLineData
09b414e8 1#ifndef _SPL_KMEM_H
2#define _SPL_KMEM_H
f1ca4da6 3
4#ifdef __cplusplus
5extern "C" {
6#endif
7
79b31f36 8#define DEBUG_KMEM
f1ca4da6 9#undef DEBUG_KMEM_UNIMPLEMENTED
10
f1b59d26 11#include <linux/module.h>
f1ca4da6 12#include <linux/slab.h>
79b31f36 13#include <linux/vmalloc.h>
f1ca4da6 14#include <linux/mm.h>
15#include <linux/spinlock.h>
d6a26c6a 16#include <linux/rwsem.h>
17#include <linux/hash.h>
18#include <linux/ctype.h>
937879f1 19#include <sys/debug.h>
f1ca4da6 20/*
21 * Memory allocation interfaces
22 */
23#define KM_SLEEP GFP_KERNEL
24#define KM_NOSLEEP GFP_ATOMIC
25#undef KM_PANIC /* No linux analog */
26#define KM_PUSHPAGE (GFP_KERNEL | GFP_HIGH)
27#define KM_VMFLAGS GFP_LEVEL_MASK
28#define KM_FLAGS __GFP_BITS_MASK
29
30#ifdef DEBUG_KMEM
c19c06f3 31extern atomic64_t kmem_alloc_used;
32extern unsigned long kmem_alloc_max;
33extern atomic64_t vmem_alloc_used;
34extern unsigned long vmem_alloc_max;
5c2bb9b2 35
c19c06f3 36extern int kmem_warning_flag;
5c2bb9b2 37extern atomic64_t kmem_cache_alloc_failed;
f1ca4da6 38
d6a26c6a 39#define KMEM_HASH_BITS 10
40#define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
41
42extern struct hlist_head kmem_table[KMEM_TABLE_SIZE];
43extern struct list_head kmem_list;
44extern spinlock_t kmem_lock;
45
13cdca65 46#define VMEM_HASH_BITS 10
47#define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
48
49extern struct hlist_head vmem_table[VMEM_TABLE_SIZE];
50extern struct list_head vmem_list;
51extern spinlock_t vmem_lock;
52
d6a26c6a 53typedef struct kmem_debug {
54 struct hlist_node kd_hlist; /* Hash node linkage */
55 struct list_head kd_list; /* List of all allocations */
56 void *kd_addr; /* Allocation pointer */
57 size_t kd_size; /* Allocation size */
58 const char *kd_func; /* Allocation function */
59 int kd_line; /* Allocation line */
60} kmem_debug_t;
61
62static __inline__ kmem_debug_t *
13cdca65 63__kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
d6a26c6a 64{
65 struct hlist_head *head;
66 struct hlist_node *node;
67 struct kmem_debug *p;
68 unsigned long flags;
69
13cdca65 70 spin_lock_irqsave(lock, flags);
71 head = &table[hash_ptr(addr, bits)];
d6a26c6a 72 hlist_for_each_entry_rcu(p, node, head, kd_hlist) {
73 if (p->kd_addr == addr) {
74 hlist_del_init(&p->kd_hlist);
75 list_del_init(&p->kd_list);
13cdca65 76 spin_unlock_irqrestore(lock, flags);
d6a26c6a 77 return p;
78 }
79 }
80
13cdca65 81 spin_unlock_irqrestore(lock, flags);
d6a26c6a 82 return NULL;
83}
84
f1ca4da6 85#define __kmem_alloc(size, flags, allocator) \
d6a26c6a 86({ void *_ptr_ = NULL; \
87 kmem_debug_t *_dptr_; \
88 unsigned long _flags_; \
f1ca4da6 89 \
d6a26c6a 90 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
91 if (_dptr_ == NULL) { \
3561541c 92 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
d6a26c6a 93 "kmem_alloc(%d, 0x%x) debug failed\n", \
94 sizeof(kmem_debug_t), (int)(flags)); \
c19c06f3 95 } else { \
427a782d 96 /* Marked unlikely because we should never be doing this, */ \
97 /* we tolerate to up 2 pages but a single page is best. */ \
98 if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \
d6a26c6a 99 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \
100 "kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \
101 (int)(size), (int)(flags), \
102 atomic64_read(&kmem_alloc_used), \
103 kmem_alloc_max); \
104 \
105 _ptr_ = (void *)allocator((size), (flags)); \
106 if (_ptr_ == NULL) { \
107 kfree(_dptr_); \
108 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
109 "kmem_alloc(%d, 0x%x) failed (%ld/" \
110 "%ld)\n", (int)(size), (int)(flags), \
111 atomic64_read(&kmem_alloc_used), \
112 kmem_alloc_max); \
113 } else { \
114 atomic64_add((size), &kmem_alloc_used); \
115 if (unlikely(atomic64_read(&kmem_alloc_used) > \
116 kmem_alloc_max)) \
117 kmem_alloc_max = \
118 atomic64_read(&kmem_alloc_used); \
119 \
120 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
121 INIT_LIST_HEAD(&_dptr_->kd_list); \
122 _dptr_->kd_addr = _ptr_; \
123 _dptr_->kd_size = (size); \
124 _dptr_->kd_func = __FUNCTION__; \
125 _dptr_->kd_line = __LINE__; \
126 spin_lock_irqsave(&kmem_lock, _flags_); \
127 hlist_add_head_rcu(&_dptr_->kd_hlist, \
128 &kmem_table[hash_ptr(_ptr_, KMEM_HASH_BITS)]);\
129 list_add_tail(&_dptr_->kd_list, &kmem_list); \
130 spin_unlock_irqrestore(&kmem_lock, _flags_); \
131 \
132 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(" \
133 "%d, 0x%x) = %p (%ld/%ld)\n", \
134 (int)(size), (int)(flags), _ptr_, \
135 atomic64_read(&kmem_alloc_used), \
136 kmem_alloc_max); \
137 } \
f1ca4da6 138 } \
139 \
140 _ptr_; \
141})
142
4fd2f7ee 143#define kmem_alloc(size, flags) __kmem_alloc((size), (flags), kmalloc)
144#define kmem_zalloc(size, flags) __kmem_alloc((size), (flags), kzalloc)
f1ca4da6 145
146#define kmem_free(ptr, size) \
147({ \
d6a26c6a 148 kmem_debug_t *_dptr_; \
937879f1 149 ASSERT((ptr) || (size > 0)); \
d6a26c6a 150 \
13cdca65 151 _dptr_ = __kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);\
d6a26c6a 152 ASSERT(_dptr_); /* Must exist in hash due to kmem_alloc() */ \
153 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
154 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
155 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
c19c06f3 156 atomic64_sub((size), &kmem_alloc_used); \
9ab1ac14 157 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \
158 (ptr), (int)(size), atomic64_read(&kmem_alloc_used), \
839d8b43 159 kmem_alloc_max); \
d6a26c6a 160 \
161 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
162 kfree(_dptr_); \
163 \
164 memset(ptr, 0x5a, (size)); \
f1ca4da6 165 kfree(ptr); \
f1ca4da6 166})
167
79b31f36 168#define __vmem_alloc(size, flags) \
13cdca65 169({ void *_ptr_ = NULL; \
170 kmem_debug_t *_dptr_; \
171 unsigned long _flags_; \
79b31f36 172 \
13cdca65 173 ASSERT((flags) & KM_SLEEP); \
79b31f36 174 \
13cdca65 175 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
176 if (_dptr_ == NULL) { \
3561541c 177 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
13cdca65 178 "vmem_alloc(%d, 0x%x) debug failed\n", \
179 sizeof(kmem_debug_t), (int)(flags)); \
c19c06f3 180 } else { \
13cdca65 181 _ptr_ = (void *)__vmalloc((size), (((flags) | \
182 __GFP_HIGHMEM) & ~__GFP_ZERO), \
183 PAGE_KERNEL); \
184 if (_ptr_ == NULL) { \
185 kfree(_dptr_); \
186 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
187 "vmem_alloc(%d, 0x%x) failed (%ld/" \
188 "%ld)\n", (int)(size), (int)(flags), \
189 atomic64_read(&vmem_alloc_used), \
190 vmem_alloc_max); \
191 } else { \
192 if (flags & __GFP_ZERO) \
193 memset(_ptr_, 0, (size)); \
3561541c 194 \
13cdca65 195 atomic64_add((size), &vmem_alloc_used); \
196 if (unlikely(atomic64_read(&vmem_alloc_used) > \
197 vmem_alloc_max)) \
198 vmem_alloc_max = \
199 atomic64_read(&vmem_alloc_used); \
200 \
201 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
202 INIT_LIST_HEAD(&_dptr_->kd_list); \
203 _dptr_->kd_addr = _ptr_; \
204 _dptr_->kd_size = (size); \
205 _dptr_->kd_func = __FUNCTION__; \
206 _dptr_->kd_line = __LINE__; \
207 spin_lock_irqsave(&vmem_lock, _flags_); \
208 hlist_add_head_rcu(&_dptr_->kd_hlist, \
209 &vmem_table[hash_ptr(_ptr_, VMEM_HASH_BITS)]);\
210 list_add_tail(&_dptr_->kd_list, &vmem_list); \
211 spin_unlock_irqrestore(&vmem_lock, _flags_); \
839d8b43 212 \
13cdca65 213 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(" \
214 "%d, 0x%x) = %p (%ld/%ld)\n", \
215 (int)(size), (int)(flags), _ptr_, \
216 atomic64_read(&vmem_alloc_used), \
217 vmem_alloc_max); \
218 } \
79b31f36 219 } \
220 \
221 _ptr_; \
222})
223
4fd2f7ee 224#define vmem_alloc(size, flags) __vmem_alloc((size), (flags))
225#define vmem_zalloc(size, flags) __vmem_alloc((size), ((flags) | \
226 __GFP_ZERO))
79b31f36 227
228#define vmem_free(ptr, size) \
229({ \
13cdca65 230 kmem_debug_t *_dptr_; \
937879f1 231 ASSERT((ptr) || (size > 0)); \
13cdca65 232 \
233 _dptr_ = __kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);\
234 ASSERT(_dptr_); /* Must exist in hash due to vmem_alloc() */ \
235 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
236 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
237 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
c19c06f3 238 atomic64_sub((size), &vmem_alloc_used); \
9ab1ac14 239 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_free(%p, %d) (%ld/%ld)\n", \
240 (ptr), (int)(size), atomic64_read(&vmem_alloc_used), \
839d8b43 241 vmem_alloc_max); \
13cdca65 242 \
243 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
244 kfree(_dptr_); \
245 \
246 memset(ptr, 0x5a, (size)); \
79b31f36 247 vfree(ptr); \
248})
f1ca4da6 249
250#else
251
4fd2f7ee 252#define kmem_alloc(size, flags) kmalloc((size), (flags))
253#define kmem_zalloc(size, flags) kzalloc((size), (flags))
3b3ba48f 254#define kmem_free(ptr, size) \
255({ \
937879f1 256 ASSERT((ptr) || (size > 0)); \
3b3ba48f 257 kfree(ptr); \
258})
f1ca4da6 259
4fd2f7ee 260#define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
261 __GFP_HIGHMEM), PAGE_KERNEL)
262#define vmem_zalloc(size, flags) __vmalloc((size), ((flags) | \
263 __GFP_HIGHMEM | __GFP_ZERO) \
264 PAGE_KERNEL)
79b31f36 265#define vmem_free(ptr, size) \
266({ \
937879f1 267 ASSERT((ptr) || (size > 0)); \
79b31f36 268 vfree(ptr); \
269})
270
f1ca4da6 271#endif /* DEBUG_KMEM */
272
273
274#ifdef DEBUG_KMEM_UNIMPLEMENTED
275static __inline__ void *
276kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags)
277{
278#error "kmem_alloc_tryhard() not implemented"
279}
280#endif /* DEBUG_KMEM_UNIMPLEMENTED */
281
282/*
283 * Slab allocation interfaces
284 */
285#undef KMC_NOTOUCH /* No linux analog */
48f940b9 286#define KMC_NODEBUG 0x00000000 /* Default behavior */
f1ca4da6 287#define KMC_NOMAGAZINE /* No linux analog */
288#define KMC_NOHASH /* No linux analog */
289#define KMC_QCACHE /* No linux analog */
290
291#define KMC_REAP_CHUNK 256
292#define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
293
294/* Defined by linux slab.h
295 * typedef struct kmem_cache_s kmem_cache_t;
296 */
297
298/* No linux analog
299 * extern int kmem_ready;
300 * extern pgcnt_t kmem_reapahead;
301 */
302
303#ifdef DEBUG_KMEM_UNIMPLEMENTED
304static __inline__ void kmem_init(void) {
305#error "kmem_init() not implemented"
306}
307
308static __inline__ void kmem_thread_init(void) {
309#error "kmem_thread_init() not implemented"
310}
311
312static __inline__ void kmem_mp_init(void) {
313#error "kmem_mp_init() not implemented"
314}
315
316static __inline__ void kmem_reap_idspace(void) {
317#error "kmem_reap_idspace() not implemented"
318}
319
320static __inline__ size_t kmem_avail(void) {
321#error "kmem_avail() not implemented"
322}
323
324static __inline__ size_t kmem_maxavail(void) {
325#error "kmem_maxavail() not implemented"
326}
327
328static __inline__ uint64_t kmem_cache_stat(kmem_cache_t *cache) {
329#error "kmem_cache_stat() not implemented"
330}
331#endif /* DEBUG_KMEM_UNIMPLEMENTED */
332
333/* XXX - Used by arc.c to adjust its memory footprint. We may want
334 * to use this hook in the future to adjust behavior based on
335 * debug levels. For now it's safe to always return 0.
336 */
337static __inline__ int
338kmem_debugging(void)
339{
340 return 0;
341}
342
343typedef int (*kmem_constructor_t)(void *, void *, int);
344typedef void (*kmem_destructor_t)(void *, void *);
345typedef void (*kmem_reclaim_t)(void *);
346
c19c06f3 347extern int kmem_set_warning(int flag);
348
f1b59d26 349extern kmem_cache_t *
f1ca4da6 350__kmem_cache_create(char *name, size_t size, size_t align,
f1b59d26 351 kmem_constructor_t constructor,
352 kmem_destructor_t destructor,
353 kmem_reclaim_t reclaim,
f1ca4da6 354 void *priv, void *vmp, int flags);
355
5c2bb9b2 356extern int __kmem_cache_destroy(kmem_cache_t *cache);
357extern void *__kmem_cache_alloc(kmem_cache_t *cache, gfp_t flags);
358extern void __kmem_reap(void);
f1ca4da6 359
5d86345d 360int kmem_init(void);
361void kmem_fini(void);
362
f1ca4da6 363#define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
364 __kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
365#define kmem_cache_destroy(cache) __kmem_cache_destroy(cache)
5c2bb9b2 366#define kmem_cache_alloc(cache, flags) __kmem_cache_alloc(cache, flags)
f1ca4da6 367#define kmem_cache_free(cache, ptr) kmem_cache_free(cache, ptr)
368#define kmem_cache_reap_now(cache) kmem_cache_shrink(cache)
f1b59d26 369#define kmem_reap() __kmem_reap()
f1ca4da6 370
371#ifdef __cplusplus
372}
373#endif
374
09b414e8 375#endif /* _SPL_KMEM_H */