9 #undef DEBUG_KMEM_UNIMPLEMENTED
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
15 #include <linux/spinlock.h>
16 #include <sys/debug.h>
18 * Memory allocation interfaces
20 #define KM_SLEEP GFP_KERNEL
21 #define KM_NOSLEEP GFP_ATOMIC
22 #undef KM_PANIC /* No linux analog */
23 #define KM_PUSHPAGE (GFP_KERNEL | GFP_HIGH)
24 #define KM_VMFLAGS GFP_LEVEL_MASK
25 #define KM_FLAGS __GFP_BITS_MASK
28 extern atomic64_t kmem_alloc_used
;
29 extern unsigned long kmem_alloc_max
;
30 extern atomic64_t vmem_alloc_used
;
31 extern unsigned long vmem_alloc_max
;
32 extern int kmem_warning_flag
;
34 #define __kmem_alloc(size, flags, allocator) \
37 /* Marked unlikely because we should never be doing this */ \
38 if (unlikely((size) > (PAGE_SIZE * 4)) && kmem_warning_flag) \
39 printk("spl: Warning kmem_alloc(%d, 0x%x) large alloc at %s:%d "\
40 "(%ld/%ld)\n", (int)(size), (int)(flags), \
42 atomic64_read(&kmem_alloc_used), kmem_alloc_max); \
44 _ptr_ = (void *)allocator((size), (flags)); \
45 if (_ptr_ == NULL) { \
46 printk("spl: Warning kmem_alloc(%d, 0x%x) failed at %s:%d " \
47 "(%ld/%ld)\n", (int)(size), (int)(flags), \
49 atomic64_read(&kmem_alloc_used), kmem_alloc_max); \
51 atomic64_add((size), &kmem_alloc_used); \
52 if (unlikely(atomic64_read(&kmem_alloc_used)>kmem_alloc_max)) \
53 kmem_alloc_max = atomic64_read(&kmem_alloc_used); \
59 #define kmem_alloc(size, flags) __kmem_alloc((size), (flags), kmalloc)
60 #define kmem_zalloc(size, flags) __kmem_alloc((size), (flags), kzalloc)
62 #define kmem_free(ptr, size) \
64 ASSERT((ptr) || (size > 0)); \
65 atomic64_sub((size), &kmem_alloc_used); \
66 memset(ptr, 0x5a, (size)); /* Poison */ \
70 #define __vmem_alloc(size, flags) \
73 ASSERT(flags & KM_SLEEP); \
75 _ptr_ = (void *)__vmalloc((size), (((flags) | \
77 ~__GFP_ZERO), PAGE_KERNEL); \
78 if (_ptr_ == NULL) { \
79 printk("spl: Warning vmem_alloc(%d, 0x%x) failed at %s:%d " \
80 "(%ld/%ld)\n", (int)(size), (int)(flags), \
82 atomic64_read(&vmem_alloc_used), vmem_alloc_max); \
84 if (flags & __GFP_ZERO) \
85 memset(_ptr_, 0, (size)); \
86 atomic64_add((size), &vmem_alloc_used); \
87 if (unlikely(atomic64_read(&vmem_alloc_used)>vmem_alloc_max)) \
88 vmem_alloc_max = atomic64_read(&vmem_alloc_used); \
94 #define vmem_alloc(size, flags) __vmem_alloc((size), (flags))
95 #define vmem_zalloc(size, flags) __vmem_alloc((size), ((flags) | \
98 #define vmem_free(ptr, size) \
100 ASSERT((ptr) || (size > 0)); \
101 atomic64_sub((size), &vmem_alloc_used); \
102 memset(ptr, 0x5a, (size)); /* Poison */ \
108 #define kmem_alloc(size, flags) kmalloc((size), (flags))
109 #define kmem_zalloc(size, flags) kzalloc((size), (flags))
110 #define kmem_free(ptr, size) \
112 ASSERT((ptr) || (size > 0)); \
116 #define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
117 __GFP_HIGHMEM), PAGE_KERNEL)
118 #define vmem_zalloc(size, flags) __vmalloc((size), ((flags) | \
119 __GFP_HIGHMEM | __GFP_ZERO) \
121 #define vmem_free(ptr, size) \
123 ASSERT((ptr) || (size > 0)); \
127 #endif /* DEBUG_KMEM */
130 #ifdef DEBUG_KMEM_UNIMPLEMENTED
131 static __inline__
void *
132 kmem_alloc_tryhard(size_t size
, size_t *alloc_size
, int kmflags
)
134 #error "kmem_alloc_tryhard() not implemented"
136 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
139 * Slab allocation interfaces
141 #undef KMC_NOTOUCH /* No linux analog */
142 #define KMC_NODEBUG 0x00000000 /* Default behavior */
143 #define KMC_NOMAGAZINE /* No linux analog */
144 #define KMC_NOHASH /* No linux analog */
145 #define KMC_QCACHE /* No linux analog */
147 #define KMC_REAP_CHUNK 256
148 #define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
150 /* Defined by linux slab.h
151 * typedef struct kmem_cache_s kmem_cache_t;
155 * extern int kmem_ready;
156 * extern pgcnt_t kmem_reapahead;
159 #ifdef DEBUG_KMEM_UNIMPLEMENTED
160 static __inline__
void kmem_init(void) {
161 #error "kmem_init() not implemented"
164 static __inline__
void kmem_thread_init(void) {
165 #error "kmem_thread_init() not implemented"
168 static __inline__
void kmem_mp_init(void) {
169 #error "kmem_mp_init() not implemented"
172 static __inline__
void kmem_reap_idspace(void) {
173 #error "kmem_reap_idspace() not implemented"
176 static __inline__
size_t kmem_avail(void) {
177 #error "kmem_avail() not implemented"
180 static __inline__
size_t kmem_maxavail(void) {
181 #error "kmem_maxavail() not implemented"
184 static __inline__
uint64_t kmem_cache_stat(kmem_cache_t
*cache
) {
185 #error "kmem_cache_stat() not implemented"
187 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
189 /* XXX - Used by arc.c to adjust its memory footprint. We may want
190 * to use this hook in the future to adjust behavior based on
191 * debug levels. For now it's safe to always return 0.
193 static __inline__
int
199 typedef int (*kmem_constructor_t
)(void *, void *, int);
200 typedef void (*kmem_destructor_t
)(void *, void *);
201 typedef void (*kmem_reclaim_t
)(void *);
203 extern int kmem_set_warning(int flag
);
205 extern kmem_cache_t
*
206 __kmem_cache_create(char *name
, size_t size
, size_t align
,
207 kmem_constructor_t constructor
,
208 kmem_destructor_t destructor
,
209 kmem_reclaim_t reclaim
,
210 void *priv
, void *vmp
, int flags
);
213 extern __kmem_cache_destroy(kmem_cache_t
*cache
);
216 extern __kmem_reap(void);
219 void kmem_fini(void);
221 #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
222 __kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
223 #define kmem_cache_destroy(cache) __kmem_cache_destroy(cache)
224 #define kmem_cache_alloc(cache, flags) kmem_cache_alloc(cache, flags)
225 #define kmem_cache_free(cache, ptr) kmem_cache_free(cache, ptr)
226 #define kmem_cache_reap_now(cache) kmem_cache_shrink(cache)
227 #define kmem_reap() __kmem_reap()
233 #endif /* _SPL_KMEM_H */