2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
34 #undef DEBUG_KMEM_UNIMPLEMENTED
36 #include <linux/module.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
40 #include <linux/spinlock.h>
41 #include <linux/rwsem.h>
42 #include <linux/hash.h>
43 #include <linux/ctype.h>
44 #include <sys/debug.h>
46 * Memory allocation interfaces
48 #define KM_SLEEP GFP_KERNEL
49 #define KM_NOSLEEP GFP_ATOMIC
50 #undef KM_PANIC /* No linux analog */
51 #define KM_PUSHPAGE (GFP_KERNEL | __GFP_HIGH)
52 #define KM_VMFLAGS GFP_LEVEL_MASK
53 #define KM_FLAGS __GFP_BITS_MASK
56 extern atomic64_t kmem_alloc_used
;
57 extern unsigned long kmem_alloc_max
;
58 extern atomic64_t vmem_alloc_used
;
59 extern unsigned long vmem_alloc_max
;
61 extern int kmem_warning_flag
;
62 extern atomic64_t kmem_cache_alloc_failed
;
64 /* XXX - Not to surprisingly with debugging enabled the xmem_locks are very
65 * highly contended particularly on xfree(). If we want to run with this
66 * detailed debugging enabled for anything other than debugging we need to
67 * minimize the contention by moving to a lock per xmem_table entry model.
69 #define KMEM_HASH_BITS 10
70 #define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
72 extern struct hlist_head kmem_table
[KMEM_TABLE_SIZE
];
73 extern struct list_head kmem_list
;
74 extern spinlock_t kmem_lock
;
76 #define VMEM_HASH_BITS 10
77 #define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
79 extern struct hlist_head vmem_table
[VMEM_TABLE_SIZE
];
80 extern struct list_head vmem_list
;
81 extern spinlock_t vmem_lock
;
83 typedef struct kmem_debug
{
84 struct hlist_node kd_hlist
; /* Hash node linkage */
85 struct list_head kd_list
; /* List of all allocations */
86 void *kd_addr
; /* Allocation pointer */
87 size_t kd_size
; /* Allocation size */
88 const char *kd_func
; /* Allocation function */
89 int kd_line
; /* Allocation line */
92 static __inline__ kmem_debug_t
*
93 __kmem_del_init(spinlock_t
*lock
,struct hlist_head
*table
,int bits
,void *addr
)
95 struct hlist_head
*head
;
96 struct hlist_node
*node
;
100 spin_lock_irqsave(lock
, flags
);
101 head
= &table
[hash_ptr(addr
, bits
)];
102 hlist_for_each_entry_rcu(p
, node
, head
, kd_hlist
) {
103 if (p
->kd_addr
== addr
) {
104 hlist_del_init(&p
->kd_hlist
);
105 list_del_init(&p
->kd_list
);
106 spin_unlock_irqrestore(lock
, flags
);
111 spin_unlock_irqrestore(lock
, flags
);
115 #define __kmem_alloc(size, flags, allocator) \
116 ({ void *_ptr_ = NULL; \
117 kmem_debug_t *_dptr_; \
118 unsigned long _flags_; \
120 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
121 if (_dptr_ == NULL) { \
122 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
123 "kmem_alloc(%d, 0x%x) debug failed\n", \
124 sizeof(kmem_debug_t), (int)(flags)); \
126 /* Marked unlikely because we should never be doing this, */ \
127 /* we tolerate to up 2 pages but a single page is best. */ \
128 if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \
129 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \
130 "kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \
131 (int)(size), (int)(flags), \
132 atomic64_read(&kmem_alloc_used), \
135 _ptr_ = (void *)allocator((size), (flags)); \
136 if (_ptr_ == NULL) { \
138 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
139 "kmem_alloc(%d, 0x%x) failed (%ld/" \
140 "%ld)\n", (int)(size), (int)(flags), \
141 atomic64_read(&kmem_alloc_used), \
144 atomic64_add((size), &kmem_alloc_used); \
145 if (unlikely(atomic64_read(&kmem_alloc_used) > \
148 atomic64_read(&kmem_alloc_used); \
150 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
151 INIT_LIST_HEAD(&_dptr_->kd_list); \
152 _dptr_->kd_addr = _ptr_; \
153 _dptr_->kd_size = (size); \
154 _dptr_->kd_func = __FUNCTION__; \
155 _dptr_->kd_line = __LINE__; \
156 spin_lock_irqsave(&kmem_lock, _flags_); \
157 hlist_add_head_rcu(&_dptr_->kd_hlist, \
158 &kmem_table[hash_ptr(_ptr_, KMEM_HASH_BITS)]);\
159 list_add_tail(&_dptr_->kd_list, &kmem_list); \
160 spin_unlock_irqrestore(&kmem_lock, _flags_); \
162 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(" \
163 "%d, 0x%x) = %p (%ld/%ld)\n", \
164 (int)(size), (int)(flags), _ptr_, \
165 atomic64_read(&kmem_alloc_used), \
173 #define kmem_alloc(size, flags) __kmem_alloc((size), (flags), kmalloc)
174 #define kmem_zalloc(size, flags) __kmem_alloc((size), (flags), kzalloc)
176 #define kmem_free(ptr, size) \
178 kmem_debug_t *_dptr_; \
179 ASSERT((ptr) || (size > 0)); \
181 _dptr_ = __kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);\
182 ASSERT(_dptr_); /* Must exist in hash due to kmem_alloc() */ \
183 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
184 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
185 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
186 atomic64_sub((size), &kmem_alloc_used); \
187 __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \
188 (ptr), (int)(size), atomic64_read(&kmem_alloc_used), \
191 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
194 memset(ptr, 0x5a, (size)); \
198 #define __vmem_alloc(size, flags) \
199 ({ void *_ptr_ = NULL; \
200 kmem_debug_t *_dptr_; \
201 unsigned long _flags_; \
203 ASSERT((flags) & KM_SLEEP); \
205 _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
206 if (_dptr_ == NULL) { \
207 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
208 "vmem_alloc(%d, 0x%x) debug failed\n", \
209 sizeof(kmem_debug_t), (int)(flags)); \
211 _ptr_ = (void *)__vmalloc((size), (((flags) | \
212 __GFP_HIGHMEM) & ~__GFP_ZERO), \
214 if (_ptr_ == NULL) { \
216 __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
217 "vmem_alloc(%d, 0x%x) failed (%ld/" \
218 "%ld)\n", (int)(size), (int)(flags), \
219 atomic64_read(&vmem_alloc_used), \
222 if (flags & __GFP_ZERO) \
223 memset(_ptr_, 0, (size)); \
225 atomic64_add((size), &vmem_alloc_used); \
226 if (unlikely(atomic64_read(&vmem_alloc_used) > \
229 atomic64_read(&vmem_alloc_used); \
231 INIT_HLIST_NODE(&_dptr_->kd_hlist); \
232 INIT_LIST_HEAD(&_dptr_->kd_list); \
233 _dptr_->kd_addr = _ptr_; \
234 _dptr_->kd_size = (size); \
235 _dptr_->kd_func = __FUNCTION__; \
236 _dptr_->kd_line = __LINE__; \
237 spin_lock_irqsave(&vmem_lock, _flags_); \
238 hlist_add_head_rcu(&_dptr_->kd_hlist, \
239 &vmem_table[hash_ptr(_ptr_, VMEM_HASH_BITS)]);\
240 list_add_tail(&_dptr_->kd_list, &vmem_list); \
241 spin_unlock_irqrestore(&vmem_lock, _flags_); \
243 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(" \
244 "%d, 0x%x) = %p (%ld/%ld)\n", \
245 (int)(size), (int)(flags), _ptr_, \
246 atomic64_read(&vmem_alloc_used), \
254 #define vmem_alloc(size, flags) __vmem_alloc((size), (flags))
255 #define vmem_zalloc(size, flags) __vmem_alloc((size), ((flags) | \
258 #define vmem_free(ptr, size) \
260 kmem_debug_t *_dptr_; \
261 ASSERT((ptr) || (size > 0)); \
263 _dptr_ = __kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);\
264 ASSERT(_dptr_); /* Must exist in hash due to vmem_alloc() */ \
265 ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
266 "kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
267 _dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
268 atomic64_sub((size), &vmem_alloc_used); \
269 __CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_free(%p, %d) (%ld/%ld)\n", \
270 (ptr), (int)(size), atomic64_read(&vmem_alloc_used), \
273 memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
276 memset(ptr, 0x5a, (size)); \
280 #else /* DEBUG_KMEM */
282 #define kmem_alloc(size, flags) kmalloc((size), (flags))
283 #define kmem_zalloc(size, flags) kzalloc((size), (flags))
284 #define kmem_free(ptr, size) kfree(ptr)
286 #define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
287 __GFP_HIGHMEM), PAGE_KERNEL)
288 #define vmem_zalloc(size, flags) \
290 void *_ptr_ = __vmalloc((size),((flags)|__GFP_HIGHMEM),PAGE_KERNEL); \
292 memset(_ptr_, 0, (size)); \
295 #define vmem_free(ptr, size) vfree(ptr)
297 #endif /* DEBUG_KMEM */
299 #ifdef DEBUG_KMEM_UNIMPLEMENTED
300 static __inline__
void *
301 kmem_alloc_tryhard(size_t size
, size_t *alloc_size
, int kmflags
)
303 #error "kmem_alloc_tryhard() not implemented"
305 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
308 * Slab allocation interfaces
310 #undef KMC_NOTOUCH /* No linux analog */
311 #define KMC_NODEBUG 0x00000000 /* Default behavior */
312 #define KMC_NOMAGAZINE /* No linux analog */
313 #define KMC_NOHASH /* No linux analog */
314 #define KMC_QCACHE /* No linux analog */
316 #define KMC_REAP_CHUNK 256
317 #define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
319 /* Defined by linux slab.h
320 * typedef struct kmem_cache_s kmem_cache_t;
324 * extern int kmem_ready;
325 * extern pgcnt_t kmem_reapahead;
328 #ifdef DEBUG_KMEM_UNIMPLEMENTED
329 static __inline__
void kmem_init(void) {
330 #error "kmem_init() not implemented"
333 static __inline__
void kmem_thread_init(void) {
334 #error "kmem_thread_init() not implemented"
337 static __inline__
void kmem_mp_init(void) {
338 #error "kmem_mp_init() not implemented"
341 static __inline__
void kmem_reap_idspace(void) {
342 #error "kmem_reap_idspace() not implemented"
345 static __inline__
size_t kmem_avail(void) {
346 #error "kmem_avail() not implemented"
349 static __inline__
size_t kmem_maxavail(void) {
350 #error "kmem_maxavail() not implemented"
353 static __inline__
uint64_t kmem_cache_stat(kmem_cache_t
*cache
) {
354 #error "kmem_cache_stat() not implemented"
356 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
358 /* XXX - Used by arc.c to adjust its memory footprint. We may want
359 * to use this hook in the future to adjust behavior based on
360 * debug levels. For now it's safe to always return 0.
362 static __inline__
int
368 typedef int (*kmem_constructor_t
)(void *, void *, int);
369 typedef void (*kmem_destructor_t
)(void *, void *);
370 typedef void (*kmem_reclaim_t
)(void *);
372 extern int kmem_set_warning(int flag
);
374 extern kmem_cache_t
*
375 __kmem_cache_create(char *name
, size_t size
, size_t align
,
376 kmem_constructor_t constructor
,
377 kmem_destructor_t destructor
,
378 kmem_reclaim_t reclaim
,
379 void *priv
, void *vmp
, int flags
);
381 extern int __kmem_cache_destroy(kmem_cache_t
*cache
);
382 extern void *__kmem_cache_alloc(kmem_cache_t
*cache
, gfp_t flags
);
383 extern void __kmem_reap(void);
386 void kmem_fini(void);
388 #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
389 __kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
390 #define kmem_cache_destroy(cache) __kmem_cache_destroy(cache)
391 #define kmem_cache_alloc(cache, flags) __kmem_cache_alloc(cache, flags)
392 #define kmem_cache_free(cache, ptr) kmem_cache_free(cache, ptr)
393 #define kmem_cache_reap_now(cache) kmem_cache_shrink(cache)
394 #define kmem_reap() __kmem_reap()
400 #endif /* _SPL_KMEM_H */