1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 \*****************************************************************************/
28 #include <linux/module.h>
29 #include <linux/slab.h>
30 #include <linux/vmalloc.h>
31 #include <linux/spinlock.h>
32 #include <linux/rwsem.h>
33 #include <linux/hash.h>
34 #include <linux/ctype.h>
35 #include <asm/atomic.h>
36 #include <sys/types.h>
37 #include <sys/vmsystm.h>
38 #include <sys/kstat.h>
41 * Memory allocation interfaces
43 #define KM_SLEEP GFP_KERNEL /* Can sleep, never fails */
44 #define KM_NOSLEEP GFP_ATOMIC /* Can not sleep, may fail */
45 #define KM_PUSHPAGE (GFP_NOIO | __GFP_HIGH) /* Use reserved memory */
46 #define KM_NODEBUG __GFP_NOWARN /* Suppress warnings */
47 #define KM_FLAGS __GFP_BITS_MASK
48 #define KM_VMFLAGS GFP_LEVEL_MASK
51 * Used internally, the kernel does not need to support this flag
54 # define __GFP_ZERO 0x8000
58 * PF_NOFS is a per-process debug flag which is set in current->flags to
59 * detect when a process is performing an unsafe allocation. All tasks
60 * with PF_NOFS set must strictly use KM_PUSHPAGE for allocations because
61 * if they enter direct reclaim and initiate I/O the may deadlock.
63 * When debugging is disabled, any incorrect usage will be detected and
64 * a call stack with warning will be printed to the console. The flags
65 * will then be automatically corrected to allow for safe execution. If
66 * debugging is enabled this will be treated as a fatal condition.
68 * To avoid any risk of conflicting with the existing PF_ flags. The
69 * PF_NOFS bit shadows the rarely used PF_MUTEX_TESTER bit. Only when
70 * CONFIG_RT_MUTEX_TESTER is not set, and we know this bit is unused,
71 * will the PF_NOFS bit be valid. Happily, most existing distributions
72 * ship a kernel with CONFIG_RT_MUTEX_TESTER disabled.
74 #if !defined(CONFIG_RT_MUTEX_TESTER) && defined(PF_MUTEX_TESTER)
75 # define PF_NOFS PF_MUTEX_TESTER
78 sanitize_flags(struct task_struct
*p
, gfp_t
*flags
)
80 if (unlikely((p
->flags
& PF_NOFS
) && (*flags
& (__GFP_IO
|__GFP_FS
)))) {
82 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
, "Fixing allocation for "
83 "task %s (%d) which used GFP flags 0x%x with PF_NOFS set\n",
84 p
->comm
, p
->pid
, flags
);
85 spl_debug_dumpstack(p
);
86 *flags
&= ~(__GFP_IO
|__GFP_FS
);
88 PANIC("FATAL allocation for task %s (%d) which used GFP "
89 "flags 0x%x with PF_NOFS set\n", p
->comm
, p
->pid
, flags
);
94 # define PF_NOFS 0x00000000
95 # define sanitize_flags(p, fl) ((void)0)
96 #endif /* !defined(CONFIG_RT_MUTEX_TESTER) && defined(PF_MUTEX_TESTER) */
99 * __GFP_NOFAIL looks like it will be removed from the kernel perhaps as
100 * early as 2.6.32. To avoid this issue when it occurs in upstream kernels
101 * we retry the allocation here as long as it is not __GFP_WAIT (GFP_ATOMIC).
102 * I would prefer the caller handle the failure case cleanly but we are
103 * trying to emulate Solaris and those are not the Solaris semantics.
106 kmalloc_nofail(size_t size
, gfp_t flags
)
110 sanitize_flags(current
, &flags
);
113 ptr
= kmalloc(size
, flags
);
114 } while (ptr
== NULL
&& (flags
& __GFP_WAIT
));
120 kzalloc_nofail(size_t size
, gfp_t flags
)
124 sanitize_flags(current
, &flags
);
127 ptr
= kzalloc(size
, flags
);
128 } while (ptr
== NULL
&& (flags
& __GFP_WAIT
));
134 kmalloc_node_nofail(size_t size
, gfp_t flags
, int node
)
136 #ifdef HAVE_KMALLOC_NODE
139 sanitize_flags(current
, &flags
);
142 ptr
= kmalloc_node(size
, flags
, node
);
143 } while (ptr
== NULL
&& (flags
& __GFP_WAIT
));
147 return kmalloc_nofail(size
, flags
);
148 #endif /* HAVE_KMALLOC_NODE */
152 vmalloc_nofail(size_t size
, gfp_t flags
)
156 sanitize_flags(current
, &flags
);
159 * Retry failed __vmalloc() allocations once every second. The
160 * rational for the delay is that the likely failure modes are:
162 * 1) The system has completely exhausted memory, in which case
163 * delaying 1 second for the memory reclaim to run is reasonable
164 * to avoid thrashing the system.
165 * 2) The system has memory but has exhausted the small virtual
166 * address space available on 32-bit systems. Retrying the
167 * allocation immediately will only result in spinning on the
168 * virtual address space lock. It is better delay a second and
169 * hope that another process will free some of the address space.
170 * But the bottom line is there is not much we can actually do
171 * since we can never safely return a failure and honor the
175 ptr
= __vmalloc(size
, flags
| __GFP_HIGHMEM
, PAGE_KERNEL
);
176 if (unlikely((ptr
== NULL
) && (flags
& __GFP_WAIT
))) {
177 set_current_state(TASK_INTERRUPTIBLE
);
178 schedule_timeout(HZ
);
188 vzalloc_nofail(size_t size
, gfp_t flags
)
192 ptr
= vmalloc_nofail(size
, flags
);
194 memset(ptr
, 0, (size
));
202 * Memory accounting functions to be used only when DEBUG_KMEM is set.
204 # ifdef HAVE_ATOMIC64_T
206 # define kmem_alloc_used_add(size) atomic64_add(size, &kmem_alloc_used)
207 # define kmem_alloc_used_sub(size) atomic64_sub(size, &kmem_alloc_used)
208 # define kmem_alloc_used_read() atomic64_read(&kmem_alloc_used)
209 # define kmem_alloc_used_set(size) atomic64_set(&kmem_alloc_used, size)
210 # define vmem_alloc_used_add(size) atomic64_add(size, &vmem_alloc_used)
211 # define vmem_alloc_used_sub(size) atomic64_sub(size, &vmem_alloc_used)
212 # define vmem_alloc_used_read() atomic64_read(&vmem_alloc_used)
213 # define vmem_alloc_used_set(size) atomic64_set(&vmem_alloc_used, size)
215 extern atomic64_t kmem_alloc_used
;
216 extern unsigned long long kmem_alloc_max
;
217 extern atomic64_t vmem_alloc_used
;
218 extern unsigned long long vmem_alloc_max
;
220 # else /* HAVE_ATOMIC64_T */
222 # define kmem_alloc_used_add(size) atomic_add(size, &kmem_alloc_used)
223 # define kmem_alloc_used_sub(size) atomic_sub(size, &kmem_alloc_used)
224 # define kmem_alloc_used_read() atomic_read(&kmem_alloc_used)
225 # define kmem_alloc_used_set(size) atomic_set(&kmem_alloc_used, size)
226 # define vmem_alloc_used_add(size) atomic_add(size, &vmem_alloc_used)
227 # define vmem_alloc_used_sub(size) atomic_sub(size, &vmem_alloc_used)
228 # define vmem_alloc_used_read() atomic_read(&vmem_alloc_used)
229 # define vmem_alloc_used_set(size) atomic_set(&vmem_alloc_used, size)
231 extern atomic_t kmem_alloc_used
;
232 extern unsigned long long kmem_alloc_max
;
233 extern atomic_t vmem_alloc_used
;
234 extern unsigned long long vmem_alloc_max
;
236 # endif /* HAVE_ATOMIC64_T */
238 # ifdef DEBUG_KMEM_TRACKING
240 * DEBUG_KMEM && DEBUG_KMEM_TRACKING
242 * The maximum level of memory debugging. All memory will be accounted
243 * for and each allocation will be explicitly tracked. Any allocation
244 * which is leaked will be reported on module unload and the exact location
245 * where that memory was allocation will be reported. This level of memory
246 * tracking will have a significant impact on performance and should only
247 * be enabled for debugging. This feature may be enabled by passing
248 * --enable-debug-kmem-tracking to configure.
250 # define kmem_alloc(sz, fl) kmem_alloc_track((sz), (fl), \
251 __FUNCTION__, __LINE__, 0, 0)
252 # define kmem_zalloc(sz, fl) kmem_alloc_track((sz), (fl)|__GFP_ZERO,\
253 __FUNCTION__, __LINE__, 0, 0)
254 # define kmem_alloc_node(sz, fl, nd) kmem_alloc_track((sz), (fl), \
255 __FUNCTION__, __LINE__, 1, nd)
256 # define kmem_free(ptr, sz) kmem_free_track((ptr), (sz))
258 # define vmem_alloc(sz, fl) vmem_alloc_track((sz), (fl), \
259 __FUNCTION__, __LINE__)
260 # define vmem_zalloc(sz, fl) vmem_alloc_track((sz), (fl)|__GFP_ZERO,\
261 __FUNCTION__, __LINE__)
262 # define vmem_free(ptr, sz) vmem_free_track((ptr), (sz))
264 extern void *kmem_alloc_track(size_t, int, const char *, int, int, int);
265 extern void kmem_free_track(const void *, size_t);
266 extern void *vmem_alloc_track(size_t, int, const char *, int);
267 extern void vmem_free_track(const void *, size_t);
269 # else /* DEBUG_KMEM_TRACKING */
271 * DEBUG_KMEM && !DEBUG_KMEM_TRACKING
273 * The default build will set DEBUG_KEM. This provides basic memory
274 * accounting with little to no impact on performance. When the module
275 * is unloaded in any memory was leaked the total number of leaked bytes
276 * will be reported on the console. To disable this basic accounting
277 * pass the --disable-debug-kmem option to configure.
279 # define kmem_alloc(sz, fl) kmem_alloc_debug((sz), (fl), \
280 __FUNCTION__, __LINE__, 0, 0)
281 # define kmem_zalloc(sz, fl) kmem_alloc_debug((sz), (fl)|__GFP_ZERO,\
282 __FUNCTION__, __LINE__, 0, 0)
283 # define kmem_alloc_node(sz, fl, nd) kmem_alloc_debug((sz), (fl), \
284 __FUNCTION__, __LINE__, 1, nd)
285 # define kmem_free(ptr, sz) kmem_free_debug((ptr), (sz))
287 # define vmem_alloc(sz, fl) vmem_alloc_debug((sz), (fl), \
288 __FUNCTION__, __LINE__)
289 # define vmem_zalloc(sz, fl) vmem_alloc_debug((sz), (fl)|__GFP_ZERO,\
290 __FUNCTION__, __LINE__)
291 # define vmem_free(ptr, sz) vmem_free_debug((ptr), (sz))
293 extern void *kmem_alloc_debug(size_t, int, const char *, int, int, int);
294 extern void kmem_free_debug(const void *, size_t);
295 extern void *vmem_alloc_debug(size_t, int, const char *, int);
296 extern void vmem_free_debug(const void *, size_t);
298 # endif /* DEBUG_KMEM_TRACKING */
299 #else /* DEBUG_KMEM */
301 * !DEBUG_KMEM && !DEBUG_KMEM_TRACKING
303 * All debugging is disabled. There will be no overhead even for
304 * minimal memory accounting. To enable basic accounting pass the
305 * --enable-debug-kmem option to configure.
307 # define kmem_alloc(sz, fl) kmalloc_nofail((sz), (fl))
308 # define kmem_zalloc(sz, fl) kzalloc_nofail((sz), (fl))
309 # define kmem_alloc_node(sz, fl, nd) kmalloc_node_nofail((sz), (fl), (nd))
310 # define kmem_free(ptr, sz) ((void)(sz), kfree(ptr))
312 # define vmem_alloc(sz, fl) vmalloc_nofail((sz), (fl))
313 # define vmem_zalloc(sz, fl) vzalloc_nofail((sz), (fl))
314 # define vmem_free(ptr, sz) ((void)(sz), vfree(ptr))
316 #endif /* DEBUG_KMEM */
318 extern int kmem_debugging(void);
319 extern char *kmem_vasprintf(const char *fmt
, va_list ap
);
320 extern char *kmem_asprintf(const char *fmt
, ...);
321 extern char *strdup(const char *str
);
322 extern void strfree(char *str
);
326 * Slab allocation interfaces. The SPL slab differs from the standard
327 * Linux SLAB or SLUB primarily in that each cache may be backed by slabs
328 * allocated from the physical or virtal memory address space. The virtual
329 * slabs allow for good behavior when allocation large objects of identical
330 * size. This slab implementation also supports both constructors and
331 * destructions which the Linux slab does not.
334 KMC_BIT_NOTOUCH
= 0, /* Don't update ages */
335 KMC_BIT_NODEBUG
= 1, /* Default behavior */
336 KMC_BIT_NOMAGAZINE
= 2, /* XXX: Unsupported */
337 KMC_BIT_NOHASH
= 3, /* XXX: Unsupported */
338 KMC_BIT_QCACHE
= 4, /* XXX: Unsupported */
339 KMC_BIT_KMEM
= 5, /* Use kmem cache */
340 KMC_BIT_VMEM
= 6, /* Use vmem cache */
341 KMC_BIT_OFFSLAB
= 7, /* Objects not on slab */
342 KMC_BIT_NOEMERGENCY
= 8, /* Disable emergency objects */
343 KMC_BIT_GROWING
= 15, /* Growing in progress */
344 KMC_BIT_REAPING
= 16, /* Reaping in progress */
345 KMC_BIT_DESTROY
= 17, /* Destroy in progress */
346 KMC_BIT_TOTAL
= 18, /* Proc handler helper bit */
347 KMC_BIT_ALLOC
= 19, /* Proc handler helper bit */
348 KMC_BIT_MAX
= 20, /* Proc handler helper bit */
351 /* kmem move callback return values */
352 typedef enum kmem_cbrc
{
353 KMEM_CBRC_YES
= 0, /* Object moved */
354 KMEM_CBRC_NO
= 1, /* Object not moved */
355 KMEM_CBRC_LATER
= 2, /* Object not moved, try again later */
356 KMEM_CBRC_DONT_NEED
= 3, /* Neither object is needed */
357 KMEM_CBRC_DONT_KNOW
= 4, /* Object unknown */
360 #define KMC_NOTOUCH (1 << KMC_BIT_NOTOUCH)
361 #define KMC_NODEBUG (1 << KMC_BIT_NODEBUG)
362 #define KMC_NOMAGAZINE (1 << KMC_BIT_NOMAGAZINE)
363 #define KMC_NOHASH (1 << KMC_BIT_NOHASH)
364 #define KMC_QCACHE (1 << KMC_BIT_QCACHE)
365 #define KMC_KMEM (1 << KMC_BIT_KMEM)
366 #define KMC_VMEM (1 << KMC_BIT_VMEM)
367 #define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB)
368 #define KMC_NOEMERGENCY (1 << KMC_BIT_NOEMERGENCY)
369 #define KMC_GROWING (1 << KMC_BIT_GROWING)
370 #define KMC_REAPING (1 << KMC_BIT_REAPING)
371 #define KMC_DESTROY (1 << KMC_BIT_DESTROY)
372 #define KMC_TOTAL (1 << KMC_BIT_TOTAL)
373 #define KMC_ALLOC (1 << KMC_BIT_ALLOC)
374 #define KMC_MAX (1 << KMC_BIT_MAX)
376 #define KMC_REAP_CHUNK INT_MAX
377 #define KMC_DEFAULT_SEEKS 1
379 extern struct list_head spl_kmem_cache_list
;
380 extern struct rw_semaphore spl_kmem_cache_sem
;
382 #define SKM_MAGIC 0x2e2e2e2e
383 #define SKO_MAGIC 0x20202020
384 #define SKS_MAGIC 0x22222222
385 #define SKC_MAGIC 0x2c2c2c2c
387 #define SPL_KMEM_CACHE_DELAY 15 /* Minimum slab release age */
388 #define SPL_KMEM_CACHE_REAP 0 /* Default reap everything */
389 #define SPL_KMEM_CACHE_OBJ_PER_SLAB 16 /* Target objects per slab */
390 #define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 8 /* Minimum objects per slab */
391 #define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */
393 #define POINTER_IS_VALID(p) 0 /* Unimplemented */
394 #define POINTER_INVALIDATE(pp) /* Unimplemented */
396 typedef int (*spl_kmem_ctor_t
)(void *, void *, int);
397 typedef void (*spl_kmem_dtor_t
)(void *, void *);
398 typedef void (*spl_kmem_reclaim_t
)(void *);
400 typedef struct spl_kmem_magazine
{
401 uint32_t skm_magic
; /* Sanity magic */
402 uint32_t skm_avail
; /* Available objects */
403 uint32_t skm_size
; /* Magazine size */
404 uint32_t skm_refill
; /* Batch refill size */
405 struct spl_kmem_cache
*skm_cache
; /* Owned by cache */
406 struct delayed_work skm_work
; /* Magazine reclaim work */
407 unsigned long skm_age
; /* Last cache access */
408 unsigned int skm_cpu
; /* Owned by cpu */
409 void *skm_objs
[0]; /* Object pointers */
410 } spl_kmem_magazine_t
;
412 typedef struct spl_kmem_obj
{
413 uint32_t sko_magic
; /* Sanity magic */
414 void *sko_addr
; /* Buffer address */
415 struct spl_kmem_slab
*sko_slab
; /* Owned by slab */
416 struct list_head sko_list
; /* Free object list linkage */
419 typedef struct spl_kmem_slab
{
420 uint32_t sks_magic
; /* Sanity magic */
421 uint32_t sks_objs
; /* Objects per slab */
422 struct spl_kmem_cache
*sks_cache
; /* Owned by cache */
423 struct list_head sks_list
; /* Slab list linkage */
424 struct list_head sks_free_list
; /* Free object list */
425 unsigned long sks_age
; /* Last modify jiffie */
426 uint32_t sks_ref
; /* Ref count used objects */
429 typedef struct spl_kmem_alloc
{
430 struct spl_kmem_cache
*ska_cache
; /* Owned by cache */
431 int ska_flags
; /* Allocation flags */
432 struct delayed_work ska_work
; /* Allocation work */
435 typedef struct spl_kmem_emergency
{
436 void *ske_obj
; /* Buffer address */
437 struct list_head ske_list
; /* Emergency list linkage */
438 } spl_kmem_emergency_t
;
440 typedef struct spl_kmem_cache
{
441 uint32_t skc_magic
; /* Sanity magic */
442 uint32_t skc_name_size
; /* Name length */
443 char *skc_name
; /* Name string */
444 spl_kmem_magazine_t
*skc_mag
[NR_CPUS
]; /* Per-CPU warm cache */
445 uint32_t skc_mag_size
; /* Magazine size */
446 uint32_t skc_mag_refill
; /* Magazine refill count */
447 spl_kmem_ctor_t skc_ctor
; /* Constructor */
448 spl_kmem_dtor_t skc_dtor
; /* Destructor */
449 spl_kmem_reclaim_t skc_reclaim
; /* Reclaimator */
450 void *skc_private
; /* Private data */
451 void *skc_vmp
; /* Unused */
452 unsigned long skc_flags
; /* Flags */
453 uint32_t skc_obj_size
; /* Object size */
454 uint32_t skc_obj_align
; /* Object alignment */
455 uint32_t skc_slab_objs
; /* Objects per slab */
456 uint32_t skc_slab_size
; /* Slab size */
457 uint32_t skc_delay
; /* Slab reclaim interval */
458 uint32_t skc_reap
; /* Slab reclaim count */
459 atomic_t skc_ref
; /* Ref count callers */
460 struct delayed_work skc_work
; /* Slab reclaim work */
461 struct list_head skc_list
; /* List of caches linkage */
462 struct list_head skc_complete_list
;/* Completely alloc'ed */
463 struct list_head skc_partial_list
; /* Partially alloc'ed */
464 struct list_head skc_emergency_list
; /* Min sized objects */
465 spinlock_t skc_lock
; /* Cache lock */
466 wait_queue_head_t skc_waitq
; /* Allocation waiters */
467 uint64_t skc_slab_fail
; /* Slab alloc failures */
468 uint64_t skc_slab_create
;/* Slab creates */
469 uint64_t skc_slab_destroy
;/* Slab destroys */
470 uint64_t skc_slab_total
; /* Slab total current */
471 uint64_t skc_slab_alloc
; /* Slab alloc current */
472 uint64_t skc_slab_max
; /* Slab max historic */
473 uint64_t skc_obj_total
; /* Obj total current */
474 uint64_t skc_obj_alloc
; /* Obj alloc current */
475 uint64_t skc_obj_max
; /* Obj max historic */
476 uint64_t skc_obj_emergency
; /* Obj emergency current */
477 uint64_t skc_obj_emergency_max
; /* Obj emergency max */
479 #define kmem_cache_t spl_kmem_cache_t
481 extern spl_kmem_cache_t
*spl_kmem_cache_create(char *name
, size_t size
,
482 size_t align
, spl_kmem_ctor_t ctor
, spl_kmem_dtor_t dtor
,
483 spl_kmem_reclaim_t reclaim
, void *priv
, void *vmp
, int flags
);
484 extern void spl_kmem_cache_set_move(spl_kmem_cache_t
*,
485 kmem_cbrc_t (*)(void *, void *, size_t, void *));
486 extern void spl_kmem_cache_destroy(spl_kmem_cache_t
*skc
);
487 extern void *spl_kmem_cache_alloc(spl_kmem_cache_t
*skc
, int flags
);
488 extern void spl_kmem_cache_free(spl_kmem_cache_t
*skc
, void *obj
);
489 extern void spl_kmem_cache_reap_now(spl_kmem_cache_t
*skc
, int count
);
490 extern void spl_kmem_reap(void);
492 int spl_kmem_init_kallsyms_lookup(void);
493 int spl_kmem_init(void);
494 void spl_kmem_fini(void);
496 #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
497 spl_kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
498 #define kmem_cache_set_move(skc, move) spl_kmem_cache_set_move(skc, move)
499 #define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
500 #define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
501 #define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
502 #define kmem_cache_reap_now(skc) \
503 spl_kmem_cache_reap_now(skc, skc->skc_reap)
504 #define kmem_reap() spl_kmem_reap()
505 #define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \
506 ((ptr) < (void *)VMALLOC_END))
508 #endif /* _SPL_KMEM_H */