]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/kmem.h
Go through and add a header with the proper UCRL number.
[mirror_spl.git] / include / sys / kmem.h
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #ifndef _SPL_KMEM_H
28 #define _SPL_KMEM_H
29
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33
34 #undef DEBUG_KMEM_UNIMPLEMENTED
35
36 #include <linux/module.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/mm.h>
40 #include <linux/spinlock.h>
41 #include <linux/rwsem.h>
42 #include <linux/hash.h>
43 #include <linux/ctype.h>
44 #include <sys/debug.h>
45 /*
46 * Memory allocation interfaces
47 */
48 #define KM_SLEEP GFP_KERNEL
49 #define KM_NOSLEEP GFP_ATOMIC
50 #undef KM_PANIC /* No linux analog */
51 #define KM_PUSHPAGE (GFP_KERNEL | __GFP_HIGH)
52 #define KM_VMFLAGS GFP_LEVEL_MASK
53 #define KM_FLAGS __GFP_BITS_MASK
54
55 #ifdef DEBUG_KMEM
56 extern atomic64_t kmem_alloc_used;
57 extern unsigned long kmem_alloc_max;
58 extern atomic64_t vmem_alloc_used;
59 extern unsigned long vmem_alloc_max;
60
61 extern int kmem_warning_flag;
62 extern atomic64_t kmem_cache_alloc_failed;
63
64 /* XXX - Not to surprisingly with debugging enabled the xmem_locks are very
65 * highly contended particularly on xfree(). If we want to run with this
66 * detailed debugging enabled for anything other than debugging we need to
67 * minimize the contention by moving to a lock per xmem_table entry model.
68 */
69 #define KMEM_HASH_BITS 10
70 #define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
71
72 extern struct hlist_head kmem_table[KMEM_TABLE_SIZE];
73 extern struct list_head kmem_list;
74 extern spinlock_t kmem_lock;
75
76 #define VMEM_HASH_BITS 10
77 #define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
78
79 extern struct hlist_head vmem_table[VMEM_TABLE_SIZE];
80 extern struct list_head vmem_list;
81 extern spinlock_t vmem_lock;
82
83 typedef struct kmem_debug {
84 struct hlist_node kd_hlist; /* Hash node linkage */
85 struct list_head kd_list; /* List of all allocations */
86 void *kd_addr; /* Allocation pointer */
87 size_t kd_size; /* Allocation size */
88 const char *kd_func; /* Allocation function */
89 int kd_line; /* Allocation line */
90 } kmem_debug_t;
91
92 static __inline__ kmem_debug_t *
93 __kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
94 {
95 struct hlist_head *head;
96 struct hlist_node *node;
97 struct kmem_debug *p;
98 unsigned long flags;
99
100 spin_lock_irqsave(lock, flags);
101 head = &table[hash_ptr(addr, bits)];
102 hlist_for_each_entry_rcu(p, node, head, kd_hlist) {
103 if (p->kd_addr == addr) {
104 hlist_del_init(&p->kd_hlist);
105 list_del_init(&p->kd_list);
106 spin_unlock_irqrestore(lock, flags);
107 return p;
108 }
109 }
110
111 spin_unlock_irqrestore(lock, flags);
112 return NULL;
113 }
114
115 #define __kmem_alloc(size, flags, allocator) \
116 ({ void *_ptr_ = NULL; \
117 kmem_debug_t *_dptr_; \
118 unsigned long _flags_; \
119 \
120 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
121 if (_dptr_ == NULL) { \
122 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
123 "kmem_alloc(%d, 0x%x) debug failed\n", \
124 sizeof(kmem_debug_t), (int)(flags)); \
125 } else { \
126 /* Marked unlikely because we should never be doing this, */ \
127 /* we tolerate to up 2 pages but a single page is best. */ \
128 if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \
129 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \
130 "kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \
131 (int)(size), (int)(flags), \
132 atomic64_read(&kmem_alloc_used), \
133 kmem_alloc_max); \
134 \
135 _ptr_ = (void *)allocator((size), (flags)); \
136 if (_ptr_ == NULL) { \
137 kfree(_dptr_); \
138 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
139 "kmem_alloc(%d, 0x%x) failed (%ld/" \
140 "%ld)\n", (int)(size), (int)(flags), \
141 atomic64_read(&kmem_alloc_used), \
142 kmem_alloc_max); \
143 } else { \
144 atomic64_add((size), &kmem_alloc_used); \
145 if (unlikely(atomic64_read(&kmem_alloc_used) > \
146 kmem_alloc_max)) \
147 kmem_alloc_max = \
148 atomic64_read(&kmem_alloc_used); \
149 \
150 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
151 INIT_LIST_HEAD(&_dptr_->kd_list); \
152 _dptr_->kd_addr = _ptr_; \
153 _dptr_->kd_size = (size); \
154 _dptr_->kd_func = __FUNCTION__; \
155 _dptr_->kd_line = __LINE__; \
156 spin_lock_irqsave(&kmem_lock, _flags_); \
157 hlist_add_head_rcu(&_dptr_->kd_hlist, \
158 &kmem_table[hash_ptr(_ptr_, KMEM_HASH_BITS)]);\
159 list_add_tail(&_dptr_->kd_list, &kmem_list); \
160 spin_unlock_irqrestore(&kmem_lock, _flags_); \
161 \
162 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(" \
163 "%d, 0x%x) = %p (%ld/%ld)\n", \
164 (int)(size), (int)(flags), _ptr_, \
165 atomic64_read(&kmem_alloc_used), \
166 kmem_alloc_max); \
167 } \
168 } \
169 \
170 _ptr_; \
171 })
172
173 #define kmem_alloc(size, flags) __kmem_alloc((size), (flags), kmalloc)
174 #define kmem_zalloc(size, flags) __kmem_alloc((size), (flags), kzalloc)
175
176 #define kmem_free(ptr, size) \
177 ({ \
178 kmem_debug_t *_dptr_; \
179 ASSERT((ptr) || (size > 0)); \
180 \
181 _dptr_ = __kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);\
182 ASSERT(_dptr_); /* Must exist in hash due to kmem_alloc() */ \
183 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
184 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
185 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
186 atomic64_sub((size), &kmem_alloc_used); \
187 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \
188 (ptr), (int)(size), atomic64_read(&kmem_alloc_used), \
189 kmem_alloc_max); \
190 \
191 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
192 kfree(_dptr_); \
193 \
194 memset(ptr, 0x5a, (size)); \
195 kfree(ptr); \
196 })
197
198 #define __vmem_alloc(size, flags) \
199 ({ void *_ptr_ = NULL; \
200 kmem_debug_t *_dptr_; \
201 unsigned long _flags_; \
202 \
203 ASSERT((flags) & KM_SLEEP); \
204 \
205 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
206 if (_dptr_ == NULL) { \
207 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
208 "vmem_alloc(%d, 0x%x) debug failed\n", \
209 sizeof(kmem_debug_t), (int)(flags)); \
210 } else { \
211 _ptr_ = (void *)__vmalloc((size), (((flags) | \
212 __GFP_HIGHMEM) & ~__GFP_ZERO), \
213 PAGE_KERNEL); \
214 if (_ptr_ == NULL) { \
215 kfree(_dptr_); \
216 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
217 "vmem_alloc(%d, 0x%x) failed (%ld/" \
218 "%ld)\n", (int)(size), (int)(flags), \
219 atomic64_read(&vmem_alloc_used), \
220 vmem_alloc_max); \
221 } else { \
222 if (flags & __GFP_ZERO) \
223 memset(_ptr_, 0, (size)); \
224 \
225 atomic64_add((size), &vmem_alloc_used); \
226 if (unlikely(atomic64_read(&vmem_alloc_used) > \
227 vmem_alloc_max)) \
228 vmem_alloc_max = \
229 atomic64_read(&vmem_alloc_used); \
230 \
231 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
232 INIT_LIST_HEAD(&_dptr_->kd_list); \
233 _dptr_->kd_addr = _ptr_; \
234 _dptr_->kd_size = (size); \
235 _dptr_->kd_func = __FUNCTION__; \
236 _dptr_->kd_line = __LINE__; \
237 spin_lock_irqsave(&vmem_lock, _flags_); \
238 hlist_add_head_rcu(&_dptr_->kd_hlist, \
239 &vmem_table[hash_ptr(_ptr_, VMEM_HASH_BITS)]);\
240 list_add_tail(&_dptr_->kd_list, &vmem_list); \
241 spin_unlock_irqrestore(&vmem_lock, _flags_); \
242 \
243 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(" \
244 "%d, 0x%x) = %p (%ld/%ld)\n", \
245 (int)(size), (int)(flags), _ptr_, \
246 atomic64_read(&vmem_alloc_used), \
247 vmem_alloc_max); \
248 } \
249 } \
250 \
251 _ptr_; \
252 })
253
254 #define vmem_alloc(size, flags) __vmem_alloc((size), (flags))
255 #define vmem_zalloc(size, flags) __vmem_alloc((size), ((flags) | \
256 __GFP_ZERO))
257
258 #define vmem_free(ptr, size) \
259 ({ \
260 kmem_debug_t *_dptr_; \
261 ASSERT((ptr) || (size > 0)); \
262 \
263 _dptr_ = __kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);\
264 ASSERT(_dptr_); /* Must exist in hash due to vmem_alloc() */ \
265 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
266 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
267 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
268 atomic64_sub((size), &vmem_alloc_used); \
269 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_free(%p, %d) (%ld/%ld)\n", \
270 (ptr), (int)(size), atomic64_read(&vmem_alloc_used), \
271 vmem_alloc_max); \
272 \
273 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
274 kfree(_dptr_); \
275 \
276 memset(ptr, 0x5a, (size)); \
277 vfree(ptr); \
278 })
279
280 #else /* DEBUG_KMEM */
281
282 #define kmem_alloc(size, flags) kmalloc((size), (flags))
283 #define kmem_zalloc(size, flags) kzalloc((size), (flags))
284 #define kmem_free(ptr, size) kfree(ptr)
285
286 #define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
287 __GFP_HIGHMEM), PAGE_KERNEL)
288 #define vmem_zalloc(size, flags) \
289 ({ \
290 void *_ptr_ = __vmalloc((size),((flags)|__GFP_HIGHMEM),PAGE_KERNEL); \
291 if (_ptr_) \
292 memset(_ptr_, 0, (size)); \
293 _ptr_; \
294 })
295 #define vmem_free(ptr, size) vfree(ptr)
296
297 #endif /* DEBUG_KMEM */
298
299 #ifdef DEBUG_KMEM_UNIMPLEMENTED
300 static __inline__ void *
301 kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags)
302 {
303 #error "kmem_alloc_tryhard() not implemented"
304 }
305 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
306
307 /*
308 * Slab allocation interfaces
309 */
310 #undef KMC_NOTOUCH /* No linux analog */
311 #define KMC_NODEBUG 0x00000000 /* Default behavior */
312 #define KMC_NOMAGAZINE /* No linux analog */
313 #define KMC_NOHASH /* No linux analog */
314 #define KMC_QCACHE /* No linux analog */
315
316 #define KMC_REAP_CHUNK 256
317 #define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
318
319 /* Defined by linux slab.h
320 * typedef struct kmem_cache_s kmem_cache_t;
321 */
322
323 /* No linux analog
324 * extern int kmem_ready;
325 * extern pgcnt_t kmem_reapahead;
326 */
327
328 #ifdef DEBUG_KMEM_UNIMPLEMENTED
329 static __inline__ void kmem_init(void) {
330 #error "kmem_init() not implemented"
331 }
332
333 static __inline__ void kmem_thread_init(void) {
334 #error "kmem_thread_init() not implemented"
335 }
336
337 static __inline__ void kmem_mp_init(void) {
338 #error "kmem_mp_init() not implemented"
339 }
340
341 static __inline__ void kmem_reap_idspace(void) {
342 #error "kmem_reap_idspace() not implemented"
343 }
344
345 static __inline__ size_t kmem_avail(void) {
346 #error "kmem_avail() not implemented"
347 }
348
349 static __inline__ size_t kmem_maxavail(void) {
350 #error "kmem_maxavail() not implemented"
351 }
352
353 static __inline__ uint64_t kmem_cache_stat(kmem_cache_t *cache) {
354 #error "kmem_cache_stat() not implemented"
355 }
356 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
357
358 /* XXX - Used by arc.c to adjust its memory footprint. We may want
359 * to use this hook in the future to adjust behavior based on
360 * debug levels. For now it's safe to always return 0.
361 */
362 static __inline__ int
363 kmem_debugging(void)
364 {
365 return 0;
366 }
367
368 typedef int (*kmem_constructor_t)(void *, void *, int);
369 typedef void (*kmem_destructor_t)(void *, void *);
370 typedef void (*kmem_reclaim_t)(void *);
371
372 extern int kmem_set_warning(int flag);
373
374 extern kmem_cache_t *
375 __kmem_cache_create(char *name, size_t size, size_t align,
376 kmem_constructor_t constructor,
377 kmem_destructor_t destructor,
378 kmem_reclaim_t reclaim,
379 void *priv, void *vmp, int flags);
380
381 extern int __kmem_cache_destroy(kmem_cache_t *cache);
382 extern void *__kmem_cache_alloc(kmem_cache_t *cache, gfp_t flags);
383 extern void __kmem_reap(void);
384
385 int kmem_init(void);
386 void kmem_fini(void);
387
388 #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
389 __kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
390 #define kmem_cache_destroy(cache) __kmem_cache_destroy(cache)
391 #define kmem_cache_alloc(cache, flags) __kmem_cache_alloc(cache, flags)
392 #define kmem_cache_free(cache, ptr) kmem_cache_free(cache, ptr)
393 #define kmem_cache_reap_now(cache) kmem_cache_shrink(cache)
394 #define kmem_reap() __kmem_reap()
395
396 #ifdef __cplusplus
397 }
398 #endif
399
400 #endif /* _SPL_KMEM_H */