]> git.proxmox.com Git - mirror_spl.git/blob - include/sys/kmem.h
Add kmem_asprintf(), strfree(), strdup(), and minor cleanup.
[mirror_spl.git] / include / sys / kmem.h
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 \*****************************************************************************/
24
25 #ifndef _SPL_KMEM_H
26 #define _SPL_KMEM_H
27
28 #include <linux/module.h>
29 #include <linux/slab.h>
30 #include <linux/vmalloc.h>
31 #include <linux/mm_compat.h>
32 #include <linux/spinlock.h>
33 #include <linux/rwsem.h>
34 #include <linux/hash.h>
35 #include <linux/ctype.h>
36 #include <asm/atomic.h>
37 #include <sys/types.h>
38 #include <sys/debug.h>
39 #include <sys/vmsystm.h>
40
41 /*
42 * Memory allocation interfaces
43 */
44 #define KM_SLEEP GFP_KERNEL
45 #define KM_NOSLEEP GFP_ATOMIC
46 #undef KM_PANIC /* No linux analog */
47 #define KM_PUSHPAGE (KM_SLEEP | __GFP_HIGH)
48 #define KM_VMFLAGS GFP_LEVEL_MASK
49 #define KM_FLAGS __GFP_BITS_MASK
50 #define KM_NODEBUG __GFP_NOWARN
51
52 /*
53 * Used internally, the kernel does not need to support this flag
54 */
55 #ifndef __GFP_ZERO
56 # define __GFP_ZERO 0x8000
57 #endif
58
59 /*
60 * __GFP_NOFAIL looks like it will be removed from the kernel perhaps as
61 * early as 2.6.32. To avoid this issue when it occurs in upstream kernels
62 * we retry the allocation here as long as it is not __GFP_WAIT (GFP_ATOMIC).
63 * I would prefer the caller handle the failure case cleanly but we are
64 * trying to emulate Solaris and those are not the Solaris semantics.
65 */
66 static inline void *
67 kmalloc_nofail(size_t size, gfp_t flags)
68 {
69 void *ptr;
70
71 do {
72 ptr = kmalloc(size, flags);
73 } while (ptr == NULL && (flags & __GFP_WAIT));
74
75 return ptr;
76 }
77
78 static inline void *
79 kzalloc_nofail(size_t size, gfp_t flags)
80 {
81 void *ptr;
82
83 do {
84 ptr = kzalloc(size, flags);
85 } while (ptr == NULL && (flags & __GFP_WAIT));
86
87 return ptr;
88 }
89
90 #ifdef HAVE_KMALLOC_NODE
91 static inline void *
92 kmalloc_node_nofail(size_t size, gfp_t flags, int node)
93 {
94 void *ptr;
95
96 do {
97 ptr = kmalloc_node(size, flags, node);
98 } while (ptr == NULL && (flags & __GFP_WAIT));
99
100 return ptr;
101 }
102 #endif /* HAVE_KMALLOC_NODE */
103
104 #ifdef DEBUG_KMEM
105 # ifdef HAVE_ATOMIC64_T
106
107 extern atomic64_t kmem_alloc_used;
108 extern unsigned long long kmem_alloc_max;
109 extern atomic64_t vmem_alloc_used;
110 extern unsigned long long vmem_alloc_max;
111
112 # define kmem_alloc_used_add(size) atomic64_add(size, &kmem_alloc_used)
113 # define kmem_alloc_used_sub(size) atomic64_sub(size, &kmem_alloc_used)
114 # define kmem_alloc_used_read() atomic64_read(&kmem_alloc_used)
115 # define kmem_alloc_used_set(size) atomic64_set(&kmem_alloc_used, size)
116 # define vmem_alloc_used_add(size) atomic64_add(size, &vmem_alloc_used)
117 # define vmem_alloc_used_sub(size) atomic64_sub(size, &vmem_alloc_used)
118 # define vmem_alloc_used_read() atomic64_read(&vmem_alloc_used)
119 # define vmem_alloc_used_set(size) atomic64_set(&vmem_alloc_used, size)
120
121 # else
122
123 extern atomic_t kmem_alloc_used;
124 extern unsigned long long kmem_alloc_max;
125 extern atomic_t vmem_alloc_used;
126 extern unsigned long long vmem_alloc_max;
127
128 # define kmem_alloc_used_add(size) atomic_add(size, &kmem_alloc_used)
129 # define kmem_alloc_used_sub(size) atomic_sub(size, &kmem_alloc_used)
130 # define kmem_alloc_used_read() atomic_read(&kmem_alloc_used)
131 # define kmem_alloc_used_set(size) atomic_set(&kmem_alloc_used, size)
132 # define vmem_alloc_used_add(size) atomic_add(size, &vmem_alloc_used)
133 # define vmem_alloc_used_sub(size) atomic_sub(size, &vmem_alloc_used)
134 # define vmem_alloc_used_read() atomic_read(&vmem_alloc_used)
135 # define vmem_alloc_used_set(size) atomic_set(&vmem_alloc_used, size)
136
137 # endif /* _LP64 */
138
139 # define kmem_alloc(size, flags) __kmem_alloc((size), (flags), 0, 0)
140 # define kmem_zalloc(size, flags) __kmem_alloc((size), ((flags) | \
141 __GFP_ZERO), 0, 0)
142
143 /* The node alloc functions are only used by the SPL code itself */
144 # ifdef HAVE_KMALLOC_NODE
145 # define kmem_alloc_node(size, flags, node) __kmem_alloc((size), (flags), 1, \
146 node)
147 # else
148 # define kmem_alloc_node(size, flags, node) __kmem_alloc((size), (flags), 0, 0)
149 # endif
150
151 # define vmem_zalloc(size, flags) vmem_alloc((size), ((flags) | \
152 __GFP_ZERO))
153
154 # ifdef DEBUG_KMEM_TRACKING
155
156 extern void *kmem_alloc_track(size_t size, int flags, const char *func,
157 int line, int node_alloc, int node);
158 extern void kmem_free_track(void *ptr, size_t size);
159 extern void *vmem_alloc_track(size_t size, int flags, const char *func,
160 int line);
161 extern void vmem_free_track(void *ptr, size_t size);
162
163 # define __kmem_alloc(size, flags, na, node) kmem_alloc_track((size), \
164 (flags), __FUNCTION__, \
165 __LINE__, (na), (node))
166 # define kmem_free(ptr, size) kmem_free_track((ptr), (size))
167 # define vmem_alloc(size, flags) vmem_alloc_track((size), \
168 (flags),__FUNCTION__, \
169 __LINE__)
170 # define vmem_free(ptr, size) vmem_free_track((ptr), (size))
171
172 # else /* DEBUG_KMEM_TRACKING */
173
174 extern void *kmem_alloc_debug(size_t size, int flags, const char *func,
175 int line, int node_alloc, int node);
176 extern void kmem_free_debug(void *ptr, size_t size);
177 extern void *vmem_alloc_debug(size_t size, int flags, const char *func,
178 int line);
179 extern void vmem_free_debug(void *ptr, size_t size);
180
181 # define __kmem_alloc(size, flags, na, node) kmem_alloc_debug((size), \
182 (flags), __FUNCTION__, \
183 __LINE__, (na), (node))
184 # define kmem_free(ptr, size) kmem_free_debug((ptr), (size))
185 # define vmem_alloc(size, flags) vmem_alloc_debug((size), \
186 (flags), __FUNCTION__, \
187 __LINE__)
188 # define vmem_free(ptr, size) vmem_free_debug((ptr), (size))
189
190 # endif /* DEBUG_KMEM_TRACKING */
191
192 #else /* DEBUG_KMEM */
193
194 # define kmem_alloc(size, flags) kmalloc_nofail((size), (flags))
195 # define kmem_zalloc(size, flags) kzalloc_nofail((size), (flags))
196 # define kmem_free(ptr, size) ((void)(size), kfree(ptr))
197
198 # ifdef HAVE_KMALLOC_NODE
199 # define kmem_alloc_node(size, flags, node) \
200 kmalloc_node_nofail((size), (flags), (node))
201 # else
202 # define kmem_alloc_node(size, flags, node) \
203 kmalloc_nofail((size), (flags))
204 # endif
205
206 # define vmem_alloc(size, flags) __vmalloc((size), ((flags) | \
207 __GFP_HIGHMEM), PAGE_KERNEL)
208 # define vmem_zalloc(size, flags) \
209 ({ \
210 void *_ptr_ = __vmalloc((size),((flags)|__GFP_HIGHMEM),PAGE_KERNEL); \
211 if (_ptr_) \
212 memset(_ptr_, 0, (size)); \
213 _ptr_; \
214 })
215 # define vmem_free(ptr, size) ((void)(size), vfree(ptr))
216
217 #endif /* DEBUG_KMEM */
218
219 /*
220 * Slab allocation interfaces
221 */
222 enum {
223 KMC_BIT_NOTOUCH = 0, /* Don't update ages */
224 KMC_BIT_NODEBUG = 1, /* Default behavior */
225 KMC_BIT_NOMAGAZINE = 2, /* XXX: Unsupported */
226 KMC_BIT_NOHASH = 3, /* XXX: Unsupported */
227 KMC_BIT_QCACHE = 4, /* XXX: Unsupported */
228 KMC_BIT_KMEM = 5, /* Use kmem cache */
229 KMC_BIT_VMEM = 6, /* Use vmem cache */
230 KMC_BIT_OFFSLAB = 7, /* Objects not on slab */
231 KMC_BIT_REAPING = 16, /* Reaping in progress */
232 KMC_BIT_DESTROY = 17, /* Destroy in progress */
233 };
234
235 #define KMC_NOTOUCH (1 << KMC_BIT_NOTOUCH)
236 #define KMC_NODEBUG (1 << KMC_BIT_NODEBUG)
237 #define KMC_NOMAGAZINE (1 << KMC_BIT_NOMAGAZINE)
238 #define KMC_NOHASH (1 << KMC_BIT_NOHASH)
239 #define KMC_QCACHE (1 << KMC_BIT_QCACHE)
240 #define KMC_KMEM (1 << KMC_BIT_KMEM)
241 #define KMC_VMEM (1 << KMC_BIT_VMEM)
242 #define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB)
243 #define KMC_REAPING (1 << KMC_BIT_REAPING)
244 #define KMC_DESTROY (1 << KMC_BIT_DESTROY)
245
246 #define KMC_REAP_CHUNK INT_MAX
247 #define KMC_DEFAULT_SEEKS 1
248
249 extern int kmem_debugging(void);
250 extern char *kmem_asprintf(const char *fmt, ...);
251 #define strfree(str) kfree(str)
252 #define strdup(str) kstrdup(str, GFP_KERNEL)
253
254 extern struct list_head spl_kmem_cache_list;
255 extern struct rw_semaphore spl_kmem_cache_sem;
256
257 #define SKM_MAGIC 0x2e2e2e2e
258 #define SKO_MAGIC 0x20202020
259 #define SKS_MAGIC 0x22222222
260 #define SKC_MAGIC 0x2c2c2c2c
261
262 #define SPL_KMEM_CACHE_DELAY 15 /* Minimum slab release age */
263 #define SPL_KMEM_CACHE_REAP 0 /* Default reap everything */
264 #define SPL_KMEM_CACHE_OBJ_PER_SLAB 32 /* Target objects per slab */
265 #define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 8 /* Minimum objects per slab */
266 #define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */
267
268 typedef int (*spl_kmem_ctor_t)(void *, void *, int);
269 typedef void (*spl_kmem_dtor_t)(void *, void *);
270 typedef void (*spl_kmem_reclaim_t)(void *);
271
272 typedef struct spl_kmem_magazine {
273 uint32_t skm_magic; /* Sanity magic */
274 uint32_t skm_avail; /* Available objects */
275 uint32_t skm_size; /* Magazine size */
276 uint32_t skm_refill; /* Batch refill size */
277 struct spl_kmem_cache *skm_cache; /* Owned by cache */
278 struct delayed_work skm_work; /* Magazine reclaim work */
279 unsigned long skm_age; /* Last cache access */
280 void *skm_objs[0]; /* Object pointers */
281 } spl_kmem_magazine_t;
282
283 typedef struct spl_kmem_obj {
284 uint32_t sko_magic; /* Sanity magic */
285 void *sko_addr; /* Buffer address */
286 struct spl_kmem_slab *sko_slab; /* Owned by slab */
287 struct list_head sko_list; /* Free object list linkage */
288 } spl_kmem_obj_t;
289
290 typedef struct spl_kmem_slab {
291 uint32_t sks_magic; /* Sanity magic */
292 uint32_t sks_objs; /* Objects per slab */
293 struct spl_kmem_cache *sks_cache; /* Owned by cache */
294 struct list_head sks_list; /* Slab list linkage */
295 struct list_head sks_free_list; /* Free object list */
296 unsigned long sks_age; /* Last modify jiffie */
297 uint32_t sks_ref; /* Ref count used objects */
298 } spl_kmem_slab_t;
299
300 typedef struct spl_kmem_cache {
301 uint32_t skc_magic; /* Sanity magic */
302 uint32_t skc_name_size; /* Name length */
303 char *skc_name; /* Name string */
304 spl_kmem_magazine_t *skc_mag[NR_CPUS]; /* Per-CPU warm cache */
305 uint32_t skc_mag_size; /* Magazine size */
306 uint32_t skc_mag_refill; /* Magazine refill count */
307 spl_kmem_ctor_t skc_ctor; /* Constructor */
308 spl_kmem_dtor_t skc_dtor; /* Destructor */
309 spl_kmem_reclaim_t skc_reclaim; /* Reclaimator */
310 void *skc_private; /* Private data */
311 void *skc_vmp; /* Unused */
312 unsigned long skc_flags; /* Flags */
313 uint32_t skc_obj_size; /* Object size */
314 uint32_t skc_obj_align; /* Object alignment */
315 uint32_t skc_slab_objs; /* Objects per slab */
316 uint32_t skc_slab_size; /* Slab size */
317 uint32_t skc_delay; /* Slab reclaim interval */
318 uint32_t skc_reap; /* Slab reclaim count */
319 atomic_t skc_ref; /* Ref count callers */
320 struct delayed_work skc_work; /* Slab reclaim work */
321 struct list_head skc_list; /* List of caches linkage */
322 struct list_head skc_complete_list;/* Completely alloc'ed */
323 struct list_head skc_partial_list; /* Partially alloc'ed */
324 spinlock_t skc_lock; /* Cache lock */
325 uint64_t skc_slab_fail; /* Slab alloc failures */
326 uint64_t skc_slab_create;/* Slab creates */
327 uint64_t skc_slab_destroy;/* Slab destroys */
328 uint64_t skc_slab_total; /* Slab total current */
329 uint64_t skc_slab_alloc; /* Slab alloc current */
330 uint64_t skc_slab_max; /* Slab max historic */
331 uint64_t skc_obj_total; /* Obj total current */
332 uint64_t skc_obj_alloc; /* Obj alloc current */
333 uint64_t skc_obj_max; /* Obj max historic */
334 } spl_kmem_cache_t;
335 #define kmem_cache_t spl_kmem_cache_t
336
337 extern spl_kmem_cache_t *
338 spl_kmem_cache_create(char *name, size_t size, size_t align,
339 spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, spl_kmem_reclaim_t reclaim,
340 void *priv, void *vmp, int flags);
341
342 extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc);
343 extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags);
344 extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
345 extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc);
346 extern void spl_kmem_reap(void);
347
348 int spl_kmem_init_kallsyms_lookup(void);
349 int spl_kmem_init(void);
350 void spl_kmem_fini(void);
351
352 #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
353 spl_kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
354 #define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
355 #define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
356 #define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
357 #define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc)
358 #define kmem_reap() spl_kmem_reap()
359 #define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \
360 ((ptr) < (void *)VMALLOC_END))
361
362 #endif /* _SPL_KMEM_H */