9 #undef DEBUG_KMEM_UNIMPLEMENTED
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
15 #include <linux/spinlock.h>
17 * Memory allocation interfaces
19 #define KM_SLEEP GFP_KERNEL
20 #define KM_NOSLEEP GFP_ATOMIC
21 #undef KM_PANIC /* No linux analog */
22 #define KM_PUSHPAGE (GFP_KERNEL | GFP_HIGH)
23 #define KM_VMFLAGS GFP_LEVEL_MASK
24 #define KM_FLAGS __GFP_BITS_MASK
27 extern atomic64_t kmem_alloc_used
;
28 extern unsigned long kmem_alloc_max
;
29 extern atomic64_t vmem_alloc_used
;
30 extern unsigned long vmem_alloc_max
;
31 extern int kmem_warning_flag
;
33 #define __kmem_alloc(size, flags, allocator) \
36 /* Marked unlikely because we should never be doing this */ \
37 if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) \
38 printk("Warning: kmem_alloc(%d, 0x%x) large alloc at %s:%d " \
39 "(%ld/%ld)\n", (int)(size), (int)(flags), \
41 atomic64_read(&kmem_alloc_used), kmem_alloc_max); \
43 _ptr_ = (void *)allocator((size), (flags)); \
44 if (_ptr_ == NULL) { \
45 printk("Warning: kmem_alloc(%d, 0x%x) failed at %s:%d " \
46 "(%ld/%ld)\n", (int)(size), (int)(flags), \
48 atomic64_read(&kmem_alloc_used), kmem_alloc_max); \
50 atomic64_add((size), &kmem_alloc_used); \
51 if (unlikely(atomic64_read(&kmem_alloc_used)>kmem_alloc_max)) \
52 kmem_alloc_max = atomic64_read(&kmem_alloc_used); \
58 #define kmem_alloc(size, flags) __kmem_alloc(size, flags, kmalloc)
59 #define kmem_zalloc(size, flags) __kmem_alloc(size, flags, kzalloc)
61 #define kmem_free(ptr, size) \
63 BUG_ON(!(ptr) || (size) < 0); \
64 atomic64_sub((size), &kmem_alloc_used); \
65 memset(ptr, 0x5a, (size)); /* Poison */ \
69 #define __vmem_alloc(size, flags) \
72 BUG_ON(flags != KM_SLEEP); \
74 _ptr_ = (void *)vmalloc((size)); \
75 if (_ptr_ == NULL) { \
76 printk("Warning: vmem_alloc(%d, 0x%x) failed at %s:%d " \
77 "(%ld/%ld)\n", (int)(size), (int)(flags), \
79 atomic64_read(&vmem_alloc_used), vmem_alloc_max); \
81 atomic64_add((size), &vmem_alloc_used); \
82 if (unlikely(atomic64_read(&vmem_alloc_used)>vmem_alloc_max)) \
83 vmem_alloc_max = atomic64_read(&vmem_alloc_used); \
89 #define vmem_alloc(size, flags) __vmem_alloc(size, flags)
91 #define vmem_free(ptr, size) \
93 BUG_ON(!(ptr) || (size) < 0); \
94 atomic64_sub((size), &vmem_alloc_used); \
95 memset(ptr, 0x5a, (size)); /* Poison */ \
101 #define kmem_alloc(size, flags) kmalloc(size, flags)
102 #define kmem_zalloc(size, flags) kzalloc(size, flags)
103 #define kmem_free(ptr, size) \
105 BUG_ON(!(ptr) || (size) < 0); \
109 #define vmem_alloc(size, flags) vmalloc(size)
110 #define vmem_free(ptr, size) \
112 BUG_ON(!(ptr) || (size) < 0); \
116 #endif /* DEBUG_KMEM */
119 #ifdef DEBUG_KMEM_UNIMPLEMENTED
120 static __inline__
void *
121 kmem_alloc_tryhard(size_t size
, size_t *alloc_size
, int kmflags
)
123 #error "kmem_alloc_tryhard() not implemented"
125 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
128 * Slab allocation interfaces
130 #undef KMC_NOTOUCH /* No linux analog */
131 #define KMC_NODEBUG 0x00000000 /* Default behavior */
132 #define KMC_NOMAGAZINE /* No linux analog */
133 #define KMC_NOHASH /* No linux analog */
134 #define KMC_QCACHE /* No linux analog */
136 #define KMC_REAP_CHUNK 256
137 #define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
139 /* Defined by linux slab.h
140 * typedef struct kmem_cache_s kmem_cache_t;
144 * extern int kmem_ready;
145 * extern pgcnt_t kmem_reapahead;
148 #ifdef DEBUG_KMEM_UNIMPLEMENTED
149 static __inline__
void kmem_init(void) {
150 #error "kmem_init() not implemented"
153 static __inline__
void kmem_thread_init(void) {
154 #error "kmem_thread_init() not implemented"
157 static __inline__
void kmem_mp_init(void) {
158 #error "kmem_mp_init() not implemented"
161 static __inline__
void kmem_reap_idspace(void) {
162 #error "kmem_reap_idspace() not implemented"
165 static __inline__
size_t kmem_avail(void) {
166 #error "kmem_avail() not implemented"
169 static __inline__
size_t kmem_maxavail(void) {
170 #error "kmem_maxavail() not implemented"
173 static __inline__
uint64_t kmem_cache_stat(kmem_cache_t
*cache
) {
174 #error "kmem_cache_stat() not implemented"
176 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
178 /* XXX - Used by arc.c to adjust its memory footprint. We may want
179 * to use this hook in the future to adjust behavior based on
180 * debug levels. For now it's safe to always return 0.
182 static __inline__
int
188 typedef int (*kmem_constructor_t
)(void *, void *, int);
189 typedef void (*kmem_destructor_t
)(void *, void *);
190 typedef void (*kmem_reclaim_t
)(void *);
192 extern int kmem_set_warning(int flag
);
194 extern kmem_cache_t
*
195 __kmem_cache_create(char *name
, size_t size
, size_t align
,
196 kmem_constructor_t constructor
,
197 kmem_destructor_t destructor
,
198 kmem_reclaim_t reclaim
,
199 void *priv
, void *vmp
, int flags
);
202 extern __kmem_cache_destroy(kmem_cache_t
*cache
);
205 extern __kmem_reap(void);
208 void kmem_fini(void);
210 #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
211 __kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
212 #define kmem_cache_destroy(cache) __kmem_cache_destroy(cache)
213 #define kmem_cache_alloc(cache, flags) kmem_cache_alloc(cache, flags)
214 #define kmem_cache_free(cache, ptr) kmem_cache_free(cache, ptr)
215 #define kmem_cache_reap_now(cache) kmem_cache_shrink(cache)
216 #define kmem_reap() __kmem_reap()
222 #endif /* _SPL_KMEM_H */