]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/kmem.h
Decrease of kmem warnign threshold back to 2 pages, no worse than a stack.
[mirror_spl.git] / include / sys / kmem.h
1 #ifndef _SPL_KMEM_H
2 #define _SPL_KMEM_H
3
4 #ifdef __cplusplus
5 extern "C" {
6 #endif
7
8 #define DEBUG_KMEM
9 #undef DEBUG_KMEM_UNIMPLEMENTED
10
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14 #include <linux/mm.h>
15 #include <linux/spinlock.h>
16 #include <linux/rwsem.h>
17 #include <linux/hash.h>
18 #include <linux/ctype.h>
19 #include <sys/debug.h>
20 /*
21 * Memory allocation interfaces
22 */
23 #define KM_SLEEP GFP_KERNEL
24 #define KM_NOSLEEP GFP_ATOMIC
25 #undef KM_PANIC /* No linux analog */
26 #define KM_PUSHPAGE (GFP_KERNEL | GFP_HIGH)
27 #define KM_VMFLAGS GFP_LEVEL_MASK
28 #define KM_FLAGS __GFP_BITS_MASK
29
30 #ifdef DEBUG_KMEM
31 extern atomic64_t kmem_alloc_used;
32 extern unsigned long kmem_alloc_max;
33 extern atomic64_t vmem_alloc_used;
34 extern unsigned long vmem_alloc_max;
35 extern int kmem_warning_flag;
36
37 #define KMEM_HASH_BITS 10
38 #define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
39
40 extern struct hlist_head kmem_table[KMEM_TABLE_SIZE];
41 extern struct list_head kmem_list;
42 extern spinlock_t kmem_lock;
43
44 #define VMEM_HASH_BITS 10
45 #define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
46
47 extern struct hlist_head vmem_table[VMEM_TABLE_SIZE];
48 extern struct list_head vmem_list;
49 extern spinlock_t vmem_lock;
50
51 typedef struct kmem_debug {
52 struct hlist_node kd_hlist; /* Hash node linkage */
53 struct list_head kd_list; /* List of all allocations */
54 void *kd_addr; /* Allocation pointer */
55 size_t kd_size; /* Allocation size */
56 const char *kd_func; /* Allocation function */
57 int kd_line; /* Allocation line */
58 } kmem_debug_t;
59
60 static __inline__ kmem_debug_t *
61 __kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
62 {
63 struct hlist_head *head;
64 struct hlist_node *node;
65 struct kmem_debug *p;
66 unsigned long flags;
67
68 spin_lock_irqsave(lock, flags);
69 head = &table[hash_ptr(addr, bits)];
70 hlist_for_each_entry_rcu(p, node, head, kd_hlist) {
71 if (p->kd_addr == addr) {
72 hlist_del_init(&p->kd_hlist);
73 list_del_init(&p->kd_list);
74 spin_unlock_irqrestore(lock, flags);
75 return p;
76 }
77 }
78
79 spin_unlock_irqrestore(lock, flags);
80 return NULL;
81 }
82
83 #define __kmem_alloc(size, flags, allocator) \
84 ({ void *_ptr_ = NULL; \
85 kmem_debug_t *_dptr_; \
86 unsigned long _flags_; \
87 \
88 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
89 if (_dptr_ == NULL) { \
90 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
91 "kmem_alloc(%d, 0x%x) debug failed\n", \
92 sizeof(kmem_debug_t), (int)(flags)); \
93 } else { \
94 /* Marked unlikely because we should never be doing this, */ \
95 /* we tolerate to up 2 pages but a single page is best. */ \
96 if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \
97 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \
98 "kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \
99 (int)(size), (int)(flags), \
100 atomic64_read(&kmem_alloc_used), \
101 kmem_alloc_max); \
102 \
103 _ptr_ = (void *)allocator((size), (flags)); \
104 if (_ptr_ == NULL) { \
105 kfree(_dptr_); \
106 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
107 "kmem_alloc(%d, 0x%x) failed (%ld/" \
108 "%ld)\n", (int)(size), (int)(flags), \
109 atomic64_read(&kmem_alloc_used), \
110 kmem_alloc_max); \
111 } else { \
112 atomic64_add((size), &kmem_alloc_used); \
113 if (unlikely(atomic64_read(&kmem_alloc_used) > \
114 kmem_alloc_max)) \
115 kmem_alloc_max = \
116 atomic64_read(&kmem_alloc_used); \
117 \
118 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
119 INIT_LIST_HEAD(&_dptr_->kd_list); \
120 _dptr_->kd_addr = _ptr_; \
121 _dptr_->kd_size = (size); \
122 _dptr_->kd_func = __FUNCTION__; \
123 _dptr_->kd_line = __LINE__; \
124 spin_lock_irqsave(&kmem_lock, _flags_); \
125 hlist_add_head_rcu(&_dptr_->kd_hlist, \
126 &kmem_table[hash_ptr(_ptr_, KMEM_HASH_BITS)]);\
127 list_add_tail(&_dptr_->kd_list, &kmem_list); \
128 spin_unlock_irqrestore(&kmem_lock, _flags_); \
129 \
130 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(" \
131 "%d, 0x%x) = %p (%ld/%ld)\n", \
132 (int)(size), (int)(flags), _ptr_, \
133 atomic64_read(&kmem_alloc_used), \
134 kmem_alloc_max); \
135 } \
136 } \
137 \
138 _ptr_; \
139 })
140
141 #define kmem_alloc(size, flags) __kmem_alloc((size), (flags), kmalloc)
142 #define kmem_zalloc(size, flags) __kmem_alloc((size), (flags), kzalloc)
143
144 #define kmem_free(ptr, size) \
145 ({ \
146 kmem_debug_t *_dptr_; \
147 ASSERT((ptr) || (size > 0)); \
148 \
149 _dptr_ = __kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);\
150 ASSERT(_dptr_); /* Must exist in hash due to kmem_alloc() */ \
151 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
152 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
153 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
154 atomic64_sub((size), &kmem_alloc_used); \
155 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \
156 (ptr), (int)(size), atomic64_read(&kmem_alloc_used), \
157 kmem_alloc_max); \
158 \
159 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
160 kfree(_dptr_); \
161 \
162 memset(ptr, 0x5a, (size)); \
163 kfree(ptr); \
164 })
165
166 #define __vmem_alloc(size, flags) \
167 ({ void *_ptr_ = NULL; \
168 kmem_debug_t *_dptr_; \
169 unsigned long _flags_; \
170 \
171 ASSERT((flags) & KM_SLEEP); \
172 \
173 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
174 if (_dptr_ == NULL) { \
175 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
176 "vmem_alloc(%d, 0x%x) debug failed\n", \
177 sizeof(kmem_debug_t), (int)(flags)); \
178 } else { \
179 _ptr_ = (void *)__vmalloc((size), (((flags) | \
180 __GFP_HIGHMEM) & ~__GFP_ZERO), \
181 PAGE_KERNEL); \
182 if (_ptr_ == NULL) { \
183 kfree(_dptr_); \
184 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
185 "vmem_alloc(%d, 0x%x) failed (%ld/" \
186 "%ld)\n", (int)(size), (int)(flags), \
187 atomic64_read(&vmem_alloc_used), \
188 vmem_alloc_max); \
189 } else { \
190 if (flags & __GFP_ZERO) \
191 memset(_ptr_, 0, (size)); \
192 \
193 atomic64_add((size), &vmem_alloc_used); \
194 if (unlikely(atomic64_read(&vmem_alloc_used) > \
195 vmem_alloc_max)) \
196 vmem_alloc_max = \
197 atomic64_read(&vmem_alloc_used); \
198 \
199 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
200 INIT_LIST_HEAD(&_dptr_->kd_list); \
201 _dptr_->kd_addr = _ptr_; \
202 _dptr_->kd_size = (size); \
203 _dptr_->kd_func = __FUNCTION__; \
204 _dptr_->kd_line = __LINE__; \
205 spin_lock_irqsave(&vmem_lock, _flags_); \
206 hlist_add_head_rcu(&_dptr_->kd_hlist, \
207 &vmem_table[hash_ptr(_ptr_, VMEM_HASH_BITS)]);\
208 list_add_tail(&_dptr_->kd_list, &vmem_list); \
209 spin_unlock_irqrestore(&vmem_lock, _flags_); \
210 \
211 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(" \
212 "%d, 0x%x) = %p (%ld/%ld)\n", \
213 (int)(size), (int)(flags), _ptr_, \
214 atomic64_read(&vmem_alloc_used), \
215 vmem_alloc_max); \
216 } \
217 } \
218 \
219 _ptr_; \
220 })
221
222 #define vmem_alloc(size, flags) __vmem_alloc((size), (flags))
223 #define vmem_zalloc(size, flags) __vmem_alloc((size), ((flags) | \
224 __GFP_ZERO))
225
226 #define vmem_free(ptr, size) \
227 ({ \
228 kmem_debug_t *_dptr_; \
229 ASSERT((ptr) || (size > 0)); \
230 \
231 _dptr_ = __kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);\
232 ASSERT(_dptr_); /* Must exist in hash due to vmem_alloc() */ \
233 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
234 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
235 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
236 atomic64_sub((size), &vmem_alloc_used); \
237 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_free(%p, %d) (%ld/%ld)\n", \
238 (ptr), (int)(size), atomic64_read(&vmem_alloc_used), \
239 vmem_alloc_max); \
240 \
241 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
242 kfree(_dptr_); \
243 \
244 memset(ptr, 0x5a, (size)); \
245 vfree(ptr); \
246 })
247
248 #else
249
250 #define kmem_alloc(size, flags) kmalloc((size), (flags))
251 #define kmem_zalloc(size, flags) kzalloc((size), (flags))
252 #define kmem_free(ptr, size) \
253 ({ \
254 ASSERT((ptr) || (size > 0)); \
255 kfree(ptr); \
256 })
257
258 #define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
259 __GFP_HIGHMEM), PAGE_KERNEL)
260 #define vmem_zalloc(size, flags) __vmalloc((size), ((flags) | \
261 __GFP_HIGHMEM | __GFP_ZERO) \
262 PAGE_KERNEL)
263 #define vmem_free(ptr, size) \
264 ({ \
265 ASSERT((ptr) || (size > 0)); \
266 vfree(ptr); \
267 })
268
269 #endif /* DEBUG_KMEM */
270
271
272 #ifdef DEBUG_KMEM_UNIMPLEMENTED
273 static __inline__ void *
274 kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags)
275 {
276 #error "kmem_alloc_tryhard() not implemented"
277 }
278 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
279
280 /*
281 * Slab allocation interfaces
282 */
283 #undef KMC_NOTOUCH /* No linux analog */
284 #define KMC_NODEBUG 0x00000000 /* Default behavior */
285 #define KMC_NOMAGAZINE /* No linux analog */
286 #define KMC_NOHASH /* No linux analog */
287 #define KMC_QCACHE /* No linux analog */
288
289 #define KMC_REAP_CHUNK 256
290 #define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
291
292 /* Defined by linux slab.h
293 * typedef struct kmem_cache_s kmem_cache_t;
294 */
295
296 /* No linux analog
297 * extern int kmem_ready;
298 * extern pgcnt_t kmem_reapahead;
299 */
300
301 #ifdef DEBUG_KMEM_UNIMPLEMENTED
302 static __inline__ void kmem_init(void) {
303 #error "kmem_init() not implemented"
304 }
305
306 static __inline__ void kmem_thread_init(void) {
307 #error "kmem_thread_init() not implemented"
308 }
309
310 static __inline__ void kmem_mp_init(void) {
311 #error "kmem_mp_init() not implemented"
312 }
313
314 static __inline__ void kmem_reap_idspace(void) {
315 #error "kmem_reap_idspace() not implemented"
316 }
317
318 static __inline__ size_t kmem_avail(void) {
319 #error "kmem_avail() not implemented"
320 }
321
322 static __inline__ size_t kmem_maxavail(void) {
323 #error "kmem_maxavail() not implemented"
324 }
325
326 static __inline__ uint64_t kmem_cache_stat(kmem_cache_t *cache) {
327 #error "kmem_cache_stat() not implemented"
328 }
329 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
330
331 /* XXX - Used by arc.c to adjust its memory footprint. We may want
332 * to use this hook in the future to adjust behavior based on
333 * debug levels. For now it's safe to always return 0.
334 */
335 static __inline__ int
336 kmem_debugging(void)
337 {
338 return 0;
339 }
340
341 typedef int (*kmem_constructor_t)(void *, void *, int);
342 typedef void (*kmem_destructor_t)(void *, void *);
343 typedef void (*kmem_reclaim_t)(void *);
344
345 extern int kmem_set_warning(int flag);
346
347 extern kmem_cache_t *
348 __kmem_cache_create(char *name, size_t size, size_t align,
349 kmem_constructor_t constructor,
350 kmem_destructor_t destructor,
351 kmem_reclaim_t reclaim,
352 void *priv, void *vmp, int flags);
353
354 int
355 extern __kmem_cache_destroy(kmem_cache_t *cache);
356
357 void
358 extern __kmem_reap(void);
359
360 int kmem_init(void);
361 void kmem_fini(void);
362
363 #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
364 __kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
365 #define kmem_cache_destroy(cache) __kmem_cache_destroy(cache)
366 #define kmem_cache_alloc(cache, flags) kmem_cache_alloc(cache, flags)
367 #define kmem_cache_free(cache, ptr) kmem_cache_free(cache, ptr)
368 #define kmem_cache_reap_now(cache) kmem_cache_shrink(cache)
369 #define kmem_reap() __kmem_reap()
370
371 #ifdef __cplusplus
372 }
373 #endif
374
375 #endif /* _SPL_KMEM_H */