]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/kmem.h
Stability hack. Under Solaris when KM_SLEEP is set kmem_cache_alloc()
[mirror_spl.git] / include / sys / kmem.h
1 #ifndef _SPL_KMEM_H
2 #define _SPL_KMEM_H
3
4 #ifdef __cplusplus
5 extern "C" {
6 #endif
7
8 #define DEBUG_KMEM
9 #undef DEBUG_KMEM_UNIMPLEMENTED
10
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/mm.h>
15 #include <linux/spinlock.h>
16 #include <linux/rwsem.h>
17 #include <linux/hash.h>
18 #include <linux/ctype.h>
19 #include <sys/debug.h>
20 /*
21 * Memory allocation interfaces
22 */
23 #define KM_SLEEP GFP_KERNEL
24 #define KM_NOSLEEP GFP_ATOMIC
25 #undef KM_PANIC /* No linux analog */
26 #define KM_PUSHPAGE (GFP_KERNEL | GFP_HIGH)
27 #define KM_VMFLAGS GFP_LEVEL_MASK
28 #define KM_FLAGS __GFP_BITS_MASK
29
30 #ifdef DEBUG_KMEM
31 extern atomic64_t kmem_alloc_used;
32 extern unsigned long kmem_alloc_max;
33 extern atomic64_t vmem_alloc_used;
34 extern unsigned long vmem_alloc_max;
35
36 extern int kmem_warning_flag;
37 extern atomic64_t kmem_cache_alloc_failed;
38
39 #define KMEM_HASH_BITS 10
40 #define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
41
42 extern struct hlist_head kmem_table[KMEM_TABLE_SIZE];
43 extern struct list_head kmem_list;
44 extern spinlock_t kmem_lock;
45
46 #define VMEM_HASH_BITS 10
47 #define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
48
49 extern struct hlist_head vmem_table[VMEM_TABLE_SIZE];
50 extern struct list_head vmem_list;
51 extern spinlock_t vmem_lock;
52
53 typedef struct kmem_debug {
54 struct hlist_node kd_hlist; /* Hash node linkage */
55 struct list_head kd_list; /* List of all allocations */
56 void *kd_addr; /* Allocation pointer */
57 size_t kd_size; /* Allocation size */
58 const char *kd_func; /* Allocation function */
59 int kd_line; /* Allocation line */
60 } kmem_debug_t;
61
62 static __inline__ kmem_debug_t *
63 __kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
64 {
65 struct hlist_head *head;
66 struct hlist_node *node;
67 struct kmem_debug *p;
68 unsigned long flags;
69
70 spin_lock_irqsave(lock, flags);
71 head = &table[hash_ptr(addr, bits)];
72 hlist_for_each_entry_rcu(p, node, head, kd_hlist) {
73 if (p->kd_addr == addr) {
74 hlist_del_init(&p->kd_hlist);
75 list_del_init(&p->kd_list);
76 spin_unlock_irqrestore(lock, flags);
77 return p;
78 }
79 }
80
81 spin_unlock_irqrestore(lock, flags);
82 return NULL;
83 }
84
85 #define __kmem_alloc(size, flags, allocator) \
86 ({ void *_ptr_ = NULL; \
87 kmem_debug_t *_dptr_; \
88 unsigned long _flags_; \
89 \
90 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
91 if (_dptr_ == NULL) { \
92 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
93 "kmem_alloc(%d, 0x%x) debug failed\n", \
94 sizeof(kmem_debug_t), (int)(flags)); \
95 } else { \
96 /* Marked unlikely because we should never be doing this, */ \
97 /* we tolerate to up 2 pages but a single page is best. */ \
98 if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \
99 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \
100 "kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \
101 (int)(size), (int)(flags), \
102 atomic64_read(&kmem_alloc_used), \
103 kmem_alloc_max); \
104 \
105 _ptr_ = (void *)allocator((size), (flags)); \
106 if (_ptr_ == NULL) { \
107 kfree(_dptr_); \
108 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
109 "kmem_alloc(%d, 0x%x) failed (%ld/" \
110 "%ld)\n", (int)(size), (int)(flags), \
111 atomic64_read(&kmem_alloc_used), \
112 kmem_alloc_max); \
113 } else { \
114 atomic64_add((size), &kmem_alloc_used); \
115 if (unlikely(atomic64_read(&kmem_alloc_used) > \
116 kmem_alloc_max)) \
117 kmem_alloc_max = \
118 atomic64_read(&kmem_alloc_used); \
119 \
120 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
121 INIT_LIST_HEAD(&_dptr_->kd_list); \
122 _dptr_->kd_addr = _ptr_; \
123 _dptr_->kd_size = (size); \
124 _dptr_->kd_func = __FUNCTION__; \
125 _dptr_->kd_line = __LINE__; \
126 spin_lock_irqsave(&kmem_lock, _flags_); \
127 hlist_add_head_rcu(&_dptr_->kd_hlist, \
128 &kmem_table[hash_ptr(_ptr_, KMEM_HASH_BITS)]);\
129 list_add_tail(&_dptr_->kd_list, &kmem_list); \
130 spin_unlock_irqrestore(&kmem_lock, _flags_); \
131 \
132 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(" \
133 "%d, 0x%x) = %p (%ld/%ld)\n", \
134 (int)(size), (int)(flags), _ptr_, \
135 atomic64_read(&kmem_alloc_used), \
136 kmem_alloc_max); \
137 } \
138 } \
139 \
140 _ptr_; \
141 })
142
143 #define kmem_alloc(size, flags) __kmem_alloc((size), (flags), kmalloc)
144 #define kmem_zalloc(size, flags) __kmem_alloc((size), (flags), kzalloc)
145
146 #define kmem_free(ptr, size) \
147 ({ \
148 kmem_debug_t *_dptr_; \
149 ASSERT((ptr) || (size > 0)); \
150 \
151 _dptr_ = __kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);\
152 ASSERT(_dptr_); /* Must exist in hash due to kmem_alloc() */ \
153 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
154 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
155 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
156 atomic64_sub((size), &kmem_alloc_used); \
157 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \
158 (ptr), (int)(size), atomic64_read(&kmem_alloc_used), \
159 kmem_alloc_max); \
160 \
161 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
162 kfree(_dptr_); \
163 \
164 memset(ptr, 0x5a, (size)); \
165 kfree(ptr); \
166 })
167
168 #define __vmem_alloc(size, flags) \
169 ({ void *_ptr_ = NULL; \
170 kmem_debug_t *_dptr_; \
171 unsigned long _flags_; \
172 \
173 ASSERT((flags) & KM_SLEEP); \
174 \
175 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
176 if (_dptr_ == NULL) { \
177 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
178 "vmem_alloc(%d, 0x%x) debug failed\n", \
179 sizeof(kmem_debug_t), (int)(flags)); \
180 } else { \
181 _ptr_ = (void *)__vmalloc((size), (((flags) | \
182 __GFP_HIGHMEM) & ~__GFP_ZERO), \
183 PAGE_KERNEL); \
184 if (_ptr_ == NULL) { \
185 kfree(_dptr_); \
186 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
187 "vmem_alloc(%d, 0x%x) failed (%ld/" \
188 "%ld)\n", (int)(size), (int)(flags), \
189 atomic64_read(&vmem_alloc_used), \
190 vmem_alloc_max); \
191 } else { \
192 if (flags & __GFP_ZERO) \
193 memset(_ptr_, 0, (size)); \
194 \
195 atomic64_add((size), &vmem_alloc_used); \
196 if (unlikely(atomic64_read(&vmem_alloc_used) > \
197 vmem_alloc_max)) \
198 vmem_alloc_max = \
199 atomic64_read(&vmem_alloc_used); \
200 \
201 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
202 INIT_LIST_HEAD(&_dptr_->kd_list); \
203 _dptr_->kd_addr = _ptr_; \
204 _dptr_->kd_size = (size); \
205 _dptr_->kd_func = __FUNCTION__; \
206 _dptr_->kd_line = __LINE__; \
207 spin_lock_irqsave(&vmem_lock, _flags_); \
208 hlist_add_head_rcu(&_dptr_->kd_hlist, \
209 &vmem_table[hash_ptr(_ptr_, VMEM_HASH_BITS)]);\
210 list_add_tail(&_dptr_->kd_list, &vmem_list); \
211 spin_unlock_irqrestore(&vmem_lock, _flags_); \
212 \
213 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(" \
214 "%d, 0x%x) = %p (%ld/%ld)\n", \
215 (int)(size), (int)(flags), _ptr_, \
216 atomic64_read(&vmem_alloc_used), \
217 vmem_alloc_max); \
218 } \
219 } \
220 \
221 _ptr_; \
222 })
223
224 #define vmem_alloc(size, flags) __vmem_alloc((size), (flags))
225 #define vmem_zalloc(size, flags) __vmem_alloc((size), ((flags) | \
226 __GFP_ZERO))
227
228 #define vmem_free(ptr, size) \
229 ({ \
230 kmem_debug_t *_dptr_; \
231 ASSERT((ptr) || (size > 0)); \
232 \
233 _dptr_ = __kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);\
234 ASSERT(_dptr_); /* Must exist in hash due to vmem_alloc() */ \
235 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
236 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
237 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
238 atomic64_sub((size), &vmem_alloc_used); \
239 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_free(%p, %d) (%ld/%ld)\n", \
240 (ptr), (int)(size), atomic64_read(&vmem_alloc_used), \
241 vmem_alloc_max); \
242 \
243 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
244 kfree(_dptr_); \
245 \
246 memset(ptr, 0x5a, (size)); \
247 vfree(ptr); \
248 })
249
250 #else
251
252 #define kmem_alloc(size, flags) kmalloc((size), (flags))
253 #define kmem_zalloc(size, flags) kzalloc((size), (flags))
254 #define kmem_free(ptr, size) \
255 ({ \
256 ASSERT((ptr) || (size > 0)); \
257 kfree(ptr); \
258 })
259
260 #define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
261 __GFP_HIGHMEM), PAGE_KERNEL)
262 #define vmem_zalloc(size, flags) __vmalloc((size), ((flags) | \
263 __GFP_HIGHMEM | __GFP_ZERO) \
264 PAGE_KERNEL)
265 #define vmem_free(ptr, size) \
266 ({ \
267 ASSERT((ptr) || (size > 0)); \
268 vfree(ptr); \
269 })
270
271 #endif /* DEBUG_KMEM */
272
273
274 #ifdef DEBUG_KMEM_UNIMPLEMENTED
275 static __inline__ void *
276 kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags)
277 {
278 #error "kmem_alloc_tryhard() not implemented"
279 }
280 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
281
282 /*
283 * Slab allocation interfaces
284 */
285 #undef KMC_NOTOUCH /* No linux analog */
286 #define KMC_NODEBUG 0x00000000 /* Default behavior */
287 #define KMC_NOMAGAZINE /* No linux analog */
288 #define KMC_NOHASH /* No linux analog */
289 #define KMC_QCACHE /* No linux analog */
290
291 #define KMC_REAP_CHUNK 256
292 #define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
293
294 /* Defined by linux slab.h
295 * typedef struct kmem_cache_s kmem_cache_t;
296 */
297
298 /* No linux analog
299 * extern int kmem_ready;
300 * extern pgcnt_t kmem_reapahead;
301 */
302
303 #ifdef DEBUG_KMEM_UNIMPLEMENTED
304 static __inline__ void kmem_init(void) {
305 #error "kmem_init() not implemented"
306 }
307
308 static __inline__ void kmem_thread_init(void) {
309 #error "kmem_thread_init() not implemented"
310 }
311
312 static __inline__ void kmem_mp_init(void) {
313 #error "kmem_mp_init() not implemented"
314 }
315
316 static __inline__ void kmem_reap_idspace(void) {
317 #error "kmem_reap_idspace() not implemented"
318 }
319
320 static __inline__ size_t kmem_avail(void) {
321 #error "kmem_avail() not implemented"
322 }
323
324 static __inline__ size_t kmem_maxavail(void) {
325 #error "kmem_maxavail() not implemented"
326 }
327
328 static __inline__ uint64_t kmem_cache_stat(kmem_cache_t *cache) {
329 #error "kmem_cache_stat() not implemented"
330 }
331 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
332
333 /* XXX - Used by arc.c to adjust its memory footprint. We may want
334 * to use this hook in the future to adjust behavior based on
335 * debug levels. For now it's safe to always return 0.
336 */
337 static __inline__ int
338 kmem_debugging(void)
339 {
340 return 0;
341 }
342
343 typedef int (*kmem_constructor_t)(void *, void *, int);
344 typedef void (*kmem_destructor_t)(void *, void *);
345 typedef void (*kmem_reclaim_t)(void *);
346
347 extern int kmem_set_warning(int flag);
348
349 extern kmem_cache_t *
350 __kmem_cache_create(char *name, size_t size, size_t align,
351 kmem_constructor_t constructor,
352 kmem_destructor_t destructor,
353 kmem_reclaim_t reclaim,
354 void *priv, void *vmp, int flags);
355
356 extern int __kmem_cache_destroy(kmem_cache_t *cache);
357 extern void *__kmem_cache_alloc(kmem_cache_t *cache, gfp_t flags);
358 extern void __kmem_reap(void);
359
360 int kmem_init(void);
361 void kmem_fini(void);
362
363 #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
364 __kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
365 #define kmem_cache_destroy(cache) __kmem_cache_destroy(cache)
366 #define kmem_cache_alloc(cache, flags) __kmem_cache_alloc(cache, flags)
367 #define kmem_cache_free(cache, ptr) kmem_cache_free(cache, ptr)
368 #define kmem_cache_reap_now(cache) kmem_cache_shrink(cache)
369 #define kmem_reap() __kmem_reap()
370
371 #ifdef __cplusplus
372 }
373 #endif
374
375 #endif /* _SPL_KMEM_H */