2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
34 #undef DEBUG_KMEM_UNIMPLEMENTED
36 #include <linux/module.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
40 #include <linux/spinlock.h>
41 #include <linux/rwsem.h>
42 #include <linux/hash.h>
43 #include <linux/ctype.h>
44 #include <asm/atomic_compat.h>
45 #include <sys/types.h>
46 #include <sys/debug.h>
47 #include <sys/vmsystm.h>
50 * Memory allocation interfaces
52 #define KM_SLEEP (GFP_KERNEL | __GFP_NOFAIL)
53 #define KM_NOSLEEP GFP_ATOMIC
54 #undef KM_PANIC /* No linux analog */
55 #define KM_PUSHPAGE (KM_SLEEP | __GFP_HIGH)
56 #define KM_VMFLAGS GFP_LEVEL_MASK
57 #define KM_FLAGS __GFP_BITS_MASK
60 * Used internally, the kernel does not need to support this flag
63 # define __GFP_ZERO 0x8000
68 extern atomic64_t kmem_alloc_used
;
69 extern unsigned long long kmem_alloc_max
;
70 extern atomic64_t vmem_alloc_used
;
71 extern unsigned long long vmem_alloc_max
;
73 # define kmem_alloc(size, flags) __kmem_alloc((size), (flags), 0, 0)
74 # define kmem_zalloc(size, flags) __kmem_alloc((size), ((flags) | \
77 /* The node alloc functions are only used by the SPL code itself */
78 # ifdef HAVE_KMALLOC_NODE
79 # define kmem_alloc_node(size, flags, node) __kmem_alloc((size), (flags), 1, \
82 # define kmem_alloc_node(size, flags, node) __kmem_alloc((size), (flags), 0, 0)
85 # define vmem_zalloc(size, flags) vmem_alloc((size), ((flags) | \
88 # ifdef DEBUG_KMEM_TRACKING
90 extern void *kmem_alloc_track(size_t size
, int flags
, const char *func
,
91 int line
, int node_alloc
, int node
);
92 extern void kmem_free_track(void *ptr
, size_t size
);
93 extern void *vmem_alloc_track(size_t size
, int flags
, const char *func
,
95 extern void vmem_free_track(void *ptr
, size_t size
);
97 # define __kmem_alloc(size, flags, na, node) kmem_alloc_track((size), \
98 (flags), __FUNCTION__, \
99 __LINE__, (na), (node))
100 # define kmem_free(ptr, size) kmem_free_track((ptr), (size))
101 # define vmem_alloc(size, flags) vmem_alloc_track((size), \
102 (flags),__FUNCTION__, \
104 # define vmem_free(ptr, size) vmem_free_track((ptr), (size))
106 # else /* DEBUG_KMEM_TRACKING */
108 extern void *kmem_alloc_debug(size_t size
, int flags
, const char *func
,
109 int line
, int node_alloc
, int node
);
110 extern void kmem_free_debug(void *ptr
, size_t size
);
111 extern void *vmem_alloc_debug(size_t size
, int flags
, const char *func
,
113 extern void vmem_free_debug(void *ptr
, size_t size
);
115 # define __kmem_alloc(size, flags, na, node) kmem_alloc_debug((size), \
116 (flags), __FUNCTION__, \
117 __LINE__, (na), (node))
118 # define kmem_free(ptr, size) kmem_free_debug((ptr), (size))
119 # define vmem_alloc(size, flags) vmem_alloc_debug((size), \
120 (flags), __FUNCTION__, \
122 # define vmem_free(ptr, size) vmem_free_debug((ptr), (size))
124 # endif /* DEBUG_KMEM_TRACKING */
126 #else /* DEBUG_KMEM */
128 # define kmem_alloc(size, flags) kmalloc((size), (flags))
129 # define kmem_zalloc(size, flags) kzalloc((size), (flags))
130 # define kmem_free(ptr, size) ((void)(size), kfree(ptr))
132 # ifdef HAVE_KMALLOC_NODE
133 # define kmem_alloc_node(size, flags, node) \
134 kmalloc_node((size), (flags), (node))
136 # define kmem_alloc_node(size, flags, node) \
137 kmalloc((size), (flags))
140 # define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
141 __GFP_HIGHMEM), PAGE_KERNEL)
142 # define vmem_zalloc(size, flags) \
144 void *_ptr_ = __vmalloc((size),((flags)|__GFP_HIGHMEM),PAGE_KERNEL); \
146 memset(_ptr_, 0, (size)); \
149 # define vmem_free(ptr, size) ((void)(size), vfree(ptr))
151 #endif /* DEBUG_KMEM */
153 #ifdef DEBUG_KMEM_UNIMPLEMENTED
154 static __inline__
void *
155 kmem_alloc_tryhard(size_t size
, size_t *alloc_size
, int kmflags
)
157 #error "kmem_alloc_tryhard() not implemented"
159 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
162 * Slab allocation interfaces
165 KMC_BIT_NOTOUCH
= 0, /* Don't update ages */
166 KMC_BIT_NODEBUG
= 1, /* Default behavior */
167 KMC_BIT_NOMAGAZINE
= 2, /* XXX: Unsupported */
168 KMC_BIT_NOHASH
= 3, /* XXX: Unsupported */
169 KMC_BIT_QCACHE
= 4, /* XXX: Unsupported */
170 KMC_BIT_KMEM
= 5, /* Use kmem cache */
171 KMC_BIT_VMEM
= 6, /* Use vmem cache */
172 KMC_BIT_OFFSLAB
= 7, /* Objects not on slab */
173 KMC_BIT_REAPING
= 16, /* Reaping in progress */
174 KMC_BIT_DESTROY
= 17, /* Destroy in progress */
177 #define KMC_NOTOUCH (1 << KMC_BIT_NOTOUCH)
178 #define KMC_NODEBUG (1 << KMC_BIT_NODEBUG)
179 #define KMC_NOMAGAZINE (1 << KMC_BIT_NOMAGAZINE)
180 #define KMC_NOHASH (1 << KMC_BIT_NOHASH)
181 #define KMC_QCACHE (1 << KMC_BIT_QCACHE)
182 #define KMC_KMEM (1 << KMC_BIT_KMEM)
183 #define KMC_VMEM (1 << KMC_BIT_VMEM)
184 #define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB)
185 #define KMC_REAPING (1 << KMC_BIT_REAPING)
186 #define KMC_DESTROY (1 << KMC_BIT_DESTROY)
188 #define KMC_REAP_CHUNK INT_MAX
189 #define KMC_DEFAULT_SEEKS 1
191 #ifdef DEBUG_KMEM_UNIMPLEMENTED
192 static __inline__
void kmem_init(void) {
193 #error "kmem_init() not implemented"
196 static __inline__
void kmem_thread_init(void) {
197 #error "kmem_thread_init() not implemented"
200 static __inline__
void kmem_mp_init(void) {
201 #error "kmem_mp_init() not implemented"
204 static __inline__
void kmem_reap_idspace(void) {
205 #error "kmem_reap_idspace() not implemented"
208 static __inline__
size_t kmem_avail(void) {
209 #error "kmem_avail() not implemented"
212 static __inline__
size_t kmem_maxavail(void) {
213 #error "kmem_maxavail() not implemented"
216 static __inline__
uint64_t kmem_cache_stat(spl_kmem_cache_t
*cache
) {
217 #error "kmem_cache_stat() not implemented"
219 #endif /* DEBUG_KMEM_UNIMPLEMENTED */
221 /* XXX - Used by arc.c to adjust its memory footprint. We may want
222 * to use this hook in the future to adjust behavior based on
223 * debug levels. For now it's safe to always return 0.
225 static __inline__
int
231 extern int kmem_set_warning(int flag
);
233 extern struct list_head spl_kmem_cache_list
;
234 extern struct rw_semaphore spl_kmem_cache_sem
;
236 #define SKM_MAGIC 0x2e2e2e2e
237 #define SKO_MAGIC 0x20202020
238 #define SKS_MAGIC 0x22222222
239 #define SKC_MAGIC 0x2c2c2c2c
241 #define SPL_KMEM_CACHE_DELAY 15 /* Minimum slab release age */
242 #define SPL_KMEM_CACHE_REAP 0 /* Default reap everything */
243 #define SPL_KMEM_CACHE_OBJ_PER_SLAB 32 /* Target objects per slab */
244 #define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 8 /* Minimum objects per slab */
245 #define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */
247 typedef int (*spl_kmem_ctor_t
)(void *, void *, int);
248 typedef void (*spl_kmem_dtor_t
)(void *, void *);
249 typedef void (*spl_kmem_reclaim_t
)(void *);
251 typedef struct spl_kmem_magazine
{
252 uint32_t skm_magic
; /* Sanity magic */
253 uint32_t skm_avail
; /* Available objects */
254 uint32_t skm_size
; /* Magazine size */
255 uint32_t skm_refill
; /* Batch refill size */
256 struct spl_kmem_cache
*skm_cache
; /* Owned by cache */
257 struct delayed_work skm_work
; /* Magazine reclaim work */
258 unsigned long skm_age
; /* Last cache access */
259 void *skm_objs
[0]; /* Object pointers */
260 } spl_kmem_magazine_t
;
262 typedef struct spl_kmem_obj
{
263 uint32_t sko_magic
; /* Sanity magic */
264 void *sko_addr
; /* Buffer address */
265 struct spl_kmem_slab
*sko_slab
; /* Owned by slab */
266 struct list_head sko_list
; /* Free object list linkage */
269 typedef struct spl_kmem_slab
{
270 uint32_t sks_magic
; /* Sanity magic */
271 uint32_t sks_objs
; /* Objects per slab */
272 struct spl_kmem_cache
*sks_cache
; /* Owned by cache */
273 struct list_head sks_list
; /* Slab list linkage */
274 struct list_head sks_free_list
; /* Free object list */
275 unsigned long sks_age
; /* Last modify jiffie */
276 uint32_t sks_ref
; /* Ref count used objects */
279 typedef struct spl_kmem_cache
{
280 uint32_t skc_magic
; /* Sanity magic */
281 uint32_t skc_name_size
; /* Name length */
282 char *skc_name
; /* Name string */
283 spl_kmem_magazine_t
*skc_mag
[NR_CPUS
]; /* Per-CPU warm cache */
284 uint32_t skc_mag_size
; /* Magazine size */
285 uint32_t skc_mag_refill
; /* Magazine refill count */
286 spl_kmem_ctor_t skc_ctor
; /* Constructor */
287 spl_kmem_dtor_t skc_dtor
; /* Destructor */
288 spl_kmem_reclaim_t skc_reclaim
; /* Reclaimator */
289 void *skc_private
; /* Private data */
290 void *skc_vmp
; /* Unused */
291 unsigned long skc_flags
; /* Flags */
292 uint32_t skc_obj_size
; /* Object size */
293 uint32_t skc_obj_align
; /* Object alignment */
294 uint32_t skc_slab_objs
; /* Objects per slab */
295 uint32_t skc_slab_size
; /* Slab size */
296 uint32_t skc_delay
; /* Slab reclaim interval */
297 uint32_t skc_reap
; /* Slab reclaim count */
298 atomic_t skc_ref
; /* Ref count callers */
299 struct delayed_work skc_work
; /* Slab reclaim work */
300 struct list_head skc_list
; /* List of caches linkage */
301 struct list_head skc_complete_list
;/* Completely alloc'ed */
302 struct list_head skc_partial_list
; /* Partially alloc'ed */
303 spinlock_t skc_lock
; /* Cache lock */
304 uint64_t skc_slab_fail
; /* Slab alloc failures */
305 uint64_t skc_slab_create
;/* Slab creates */
306 uint64_t skc_slab_destroy
;/* Slab destroys */
307 uint64_t skc_slab_total
; /* Slab total current */
308 uint64_t skc_slab_alloc
; /* Slab alloc current */
309 uint64_t skc_slab_max
; /* Slab max historic */
310 uint64_t skc_obj_total
; /* Obj total current */
311 uint64_t skc_obj_alloc
; /* Obj alloc current */
312 uint64_t skc_obj_max
; /* Obj max historic */
314 #define kmem_cache_t spl_kmem_cache_t
316 extern spl_kmem_cache_t
*
317 spl_kmem_cache_create(char *name
, size_t size
, size_t align
,
318 spl_kmem_ctor_t ctor
, spl_kmem_dtor_t dtor
, spl_kmem_reclaim_t reclaim
,
319 void *priv
, void *vmp
, int flags
);
321 extern void spl_kmem_cache_destroy(spl_kmem_cache_t
*skc
);
322 extern void *spl_kmem_cache_alloc(spl_kmem_cache_t
*skc
, int flags
);
323 extern void spl_kmem_cache_free(spl_kmem_cache_t
*skc
, void *obj
);
324 extern void spl_kmem_cache_reap_now(spl_kmem_cache_t
*skc
);
325 extern void spl_kmem_reap(void);
327 int spl_kmem_init_kallsyms_lookup(void);
328 int spl_kmem_init(void);
329 void spl_kmem_fini(void);
331 #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
332 spl_kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
333 #define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
334 #define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
335 #define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
336 #define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc)
337 #define kmem_reap() spl_kmem_reap()
338 #define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \
339 ((ptr) < (void *)VMALLOC_END))
345 #endif /* _SPL_KMEM_H */