1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 \*****************************************************************************/
28 #include <linux/module.h>
29 #include <linux/slab.h>
30 #include <linux/vmalloc.h>
31 #include <linux/spinlock.h>
32 #include <linux/rwsem.h>
33 #include <linux/hash.h>
34 #include <linux/rbtree.h>
35 #include <linux/ctype.h>
36 #include <asm/atomic.h>
37 #include <sys/types.h>
38 #include <sys/vmsystm.h>
39 #include <sys/kstat.h>
40 #include <sys/taskq.h>
43 * Memory allocation interfaces
45 #define KM_SLEEP GFP_KERNEL /* Can sleep, never fails */
46 #define KM_NOSLEEP GFP_ATOMIC /* Can not sleep, may fail */
47 #define KM_PUSHPAGE (GFP_NOIO | __GFP_HIGH) /* Use reserved memory */
48 #define KM_NODEBUG __GFP_NOWARN /* Suppress warnings */
49 #define KM_FLAGS __GFP_BITS_MASK
50 #define KM_VMFLAGS GFP_LEVEL_MASK
53 * Used internally, the kernel does not need to support this flag
56 # define __GFP_ZERO 0x8000
60 * PF_NOFS is a per-process debug flag which is set in current->flags to
61 * detect when a process is performing an unsafe allocation. All tasks
62 * with PF_NOFS set must strictly use KM_PUSHPAGE for allocations because
63 * if they enter direct reclaim and initiate I/O the may deadlock.
65 * When debugging is disabled, any incorrect usage will be detected and
66 * a call stack with warning will be printed to the console. The flags
67 * will then be automatically corrected to allow for safe execution. If
68 * debugging is enabled this will be treated as a fatal condition.
70 * To avoid any risk of conflicting with the existing PF_ flags. The
71 * PF_NOFS bit shadows the rarely used PF_MUTEX_TESTER bit. Only when
72 * CONFIG_RT_MUTEX_TESTER is not set, and we know this bit is unused,
73 * will the PF_NOFS bit be valid. Happily, most existing distributions
74 * ship a kernel with CONFIG_RT_MUTEX_TESTER disabled.
76 #if !defined(CONFIG_RT_MUTEX_TESTER) && defined(PF_MUTEX_TESTER)
77 # define PF_NOFS PF_MUTEX_TESTER
80 sanitize_flags(struct task_struct
*p
, gfp_t
*flags
)
82 if (unlikely((p
->flags
& PF_NOFS
) && (*flags
& (__GFP_IO
|__GFP_FS
)))) {
84 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
, "Fixing allocation for "
85 "task %s (%d) which used GFP flags 0x%x with PF_NOFS set\n",
86 p
->comm
, p
->pid
, flags
);
87 spl_debug_dumpstack(p
);
88 *flags
&= ~(__GFP_IO
|__GFP_FS
);
90 PANIC("FATAL allocation for task %s (%d) which used GFP "
91 "flags 0x%x with PF_NOFS set\n", p
->comm
, p
->pid
, flags
);
96 # define PF_NOFS 0x00000000
97 # define sanitize_flags(p, fl) ((void)0)
98 #endif /* !defined(CONFIG_RT_MUTEX_TESTER) && defined(PF_MUTEX_TESTER) */
101 * __GFP_NOFAIL looks like it will be removed from the kernel perhaps as
102 * early as 2.6.32. To avoid this issue when it occurs in upstream kernels
103 * we retry the allocation here as long as it is not __GFP_WAIT (GFP_ATOMIC).
104 * I would prefer the caller handle the failure case cleanly but we are
105 * trying to emulate Solaris and those are not the Solaris semantics.
108 kmalloc_nofail(size_t size
, gfp_t flags
)
112 sanitize_flags(current
, &flags
);
115 ptr
= kmalloc(size
, flags
);
116 } while (ptr
== NULL
&& (flags
& __GFP_WAIT
));
122 kzalloc_nofail(size_t size
, gfp_t flags
)
126 sanitize_flags(current
, &flags
);
129 ptr
= kzalloc(size
, flags
);
130 } while (ptr
== NULL
&& (flags
& __GFP_WAIT
));
136 kmalloc_node_nofail(size_t size
, gfp_t flags
, int node
)
138 #ifdef HAVE_KMALLOC_NODE
141 sanitize_flags(current
, &flags
);
144 ptr
= kmalloc_node(size
, flags
, node
);
145 } while (ptr
== NULL
&& (flags
& __GFP_WAIT
));
149 return kmalloc_nofail(size
, flags
);
150 #endif /* HAVE_KMALLOC_NODE */
154 vmalloc_nofail(size_t size
, gfp_t flags
)
158 sanitize_flags(current
, &flags
);
161 * Retry failed __vmalloc() allocations once every second. The
162 * rational for the delay is that the likely failure modes are:
164 * 1) The system has completely exhausted memory, in which case
165 * delaying 1 second for the memory reclaim to run is reasonable
166 * to avoid thrashing the system.
167 * 2) The system has memory but has exhausted the small virtual
168 * address space available on 32-bit systems. Retrying the
169 * allocation immediately will only result in spinning on the
170 * virtual address space lock. It is better delay a second and
171 * hope that another process will free some of the address space.
172 * But the bottom line is there is not much we can actually do
173 * since we can never safely return a failure and honor the
177 ptr
= __vmalloc(size
, flags
| __GFP_HIGHMEM
, PAGE_KERNEL
);
178 if (unlikely((ptr
== NULL
) && (flags
& __GFP_WAIT
))) {
179 set_current_state(TASK_INTERRUPTIBLE
);
180 schedule_timeout(HZ
);
190 vzalloc_nofail(size_t size
, gfp_t flags
)
194 ptr
= vmalloc_nofail(size
, flags
);
196 memset(ptr
, 0, (size
));
204 * Memory accounting functions to be used only when DEBUG_KMEM is set.
206 # ifdef HAVE_ATOMIC64_T
208 # define kmem_alloc_used_add(size) atomic64_add(size, &kmem_alloc_used)
209 # define kmem_alloc_used_sub(size) atomic64_sub(size, &kmem_alloc_used)
210 # define kmem_alloc_used_read() atomic64_read(&kmem_alloc_used)
211 # define kmem_alloc_used_set(size) atomic64_set(&kmem_alloc_used, size)
212 # define vmem_alloc_used_add(size) atomic64_add(size, &vmem_alloc_used)
213 # define vmem_alloc_used_sub(size) atomic64_sub(size, &vmem_alloc_used)
214 # define vmem_alloc_used_read() atomic64_read(&vmem_alloc_used)
215 # define vmem_alloc_used_set(size) atomic64_set(&vmem_alloc_used, size)
217 extern atomic64_t kmem_alloc_used
;
218 extern unsigned long long kmem_alloc_max
;
219 extern atomic64_t vmem_alloc_used
;
220 extern unsigned long long vmem_alloc_max
;
222 # else /* HAVE_ATOMIC64_T */
224 # define kmem_alloc_used_add(size) atomic_add(size, &kmem_alloc_used)
225 # define kmem_alloc_used_sub(size) atomic_sub(size, &kmem_alloc_used)
226 # define kmem_alloc_used_read() atomic_read(&kmem_alloc_used)
227 # define kmem_alloc_used_set(size) atomic_set(&kmem_alloc_used, size)
228 # define vmem_alloc_used_add(size) atomic_add(size, &vmem_alloc_used)
229 # define vmem_alloc_used_sub(size) atomic_sub(size, &vmem_alloc_used)
230 # define vmem_alloc_used_read() atomic_read(&vmem_alloc_used)
231 # define vmem_alloc_used_set(size) atomic_set(&vmem_alloc_used, size)
233 extern atomic_t kmem_alloc_used
;
234 extern unsigned long long kmem_alloc_max
;
235 extern atomic_t vmem_alloc_used
;
236 extern unsigned long long vmem_alloc_max
;
238 # endif /* HAVE_ATOMIC64_T */
240 # ifdef DEBUG_KMEM_TRACKING
242 * DEBUG_KMEM && DEBUG_KMEM_TRACKING
244 * The maximum level of memory debugging. All memory will be accounted
245 * for and each allocation will be explicitly tracked. Any allocation
246 * which is leaked will be reported on module unload and the exact location
247 * where that memory was allocation will be reported. This level of memory
248 * tracking will have a significant impact on performance and should only
249 * be enabled for debugging. This feature may be enabled by passing
250 * --enable-debug-kmem-tracking to configure.
252 # define kmem_alloc(sz, fl) kmem_alloc_track((sz), (fl), \
253 __FUNCTION__, __LINE__, 0, 0)
254 # define kmem_zalloc(sz, fl) kmem_alloc_track((sz), (fl)|__GFP_ZERO,\
255 __FUNCTION__, __LINE__, 0, 0)
256 # define kmem_alloc_node(sz, fl, nd) kmem_alloc_track((sz), (fl), \
257 __FUNCTION__, __LINE__, 1, nd)
258 # define kmem_free(ptr, sz) kmem_free_track((ptr), (sz))
260 # define vmem_alloc(sz, fl) vmem_alloc_track((sz), (fl), \
261 __FUNCTION__, __LINE__)
262 # define vmem_zalloc(sz, fl) vmem_alloc_track((sz), (fl)|__GFP_ZERO,\
263 __FUNCTION__, __LINE__)
264 # define vmem_free(ptr, sz) vmem_free_track((ptr), (sz))
266 extern void *kmem_alloc_track(size_t, int, const char *, int, int, int);
267 extern void kmem_free_track(const void *, size_t);
268 extern void *vmem_alloc_track(size_t, int, const char *, int);
269 extern void vmem_free_track(const void *, size_t);
271 # else /* DEBUG_KMEM_TRACKING */
273 * DEBUG_KMEM && !DEBUG_KMEM_TRACKING
275 * The default build will set DEBUG_KEM. This provides basic memory
276 * accounting with little to no impact on performance. When the module
277 * is unloaded in any memory was leaked the total number of leaked bytes
278 * will be reported on the console. To disable this basic accounting
279 * pass the --disable-debug-kmem option to configure.
281 # define kmem_alloc(sz, fl) kmem_alloc_debug((sz), (fl), \
282 __FUNCTION__, __LINE__, 0, 0)
283 # define kmem_zalloc(sz, fl) kmem_alloc_debug((sz), (fl)|__GFP_ZERO,\
284 __FUNCTION__, __LINE__, 0, 0)
285 # define kmem_alloc_node(sz, fl, nd) kmem_alloc_debug((sz), (fl), \
286 __FUNCTION__, __LINE__, 1, nd)
287 # define kmem_free(ptr, sz) kmem_free_debug((ptr), (sz))
289 # define vmem_alloc(sz, fl) vmem_alloc_debug((sz), (fl), \
290 __FUNCTION__, __LINE__)
291 # define vmem_zalloc(sz, fl) vmem_alloc_debug((sz), (fl)|__GFP_ZERO,\
292 __FUNCTION__, __LINE__)
293 # define vmem_free(ptr, sz) vmem_free_debug((ptr), (sz))
295 extern void *kmem_alloc_debug(size_t, int, const char *, int, int, int);
296 extern void kmem_free_debug(const void *, size_t);
297 extern void *vmem_alloc_debug(size_t, int, const char *, int);
298 extern void vmem_free_debug(const void *, size_t);
300 # endif /* DEBUG_KMEM_TRACKING */
301 #else /* DEBUG_KMEM */
303 * !DEBUG_KMEM && !DEBUG_KMEM_TRACKING
305 * All debugging is disabled. There will be no overhead even for
306 * minimal memory accounting. To enable basic accounting pass the
307 * --enable-debug-kmem option to configure.
309 # define kmem_alloc(sz, fl) kmalloc_nofail((sz), (fl))
310 # define kmem_zalloc(sz, fl) kzalloc_nofail((sz), (fl))
311 # define kmem_alloc_node(sz, fl, nd) kmalloc_node_nofail((sz), (fl), (nd))
312 # define kmem_free(ptr, sz) ((void)(sz), kfree(ptr))
314 # define vmem_alloc(sz, fl) vmalloc_nofail((sz), (fl))
315 # define vmem_zalloc(sz, fl) vzalloc_nofail((sz), (fl))
316 # define vmem_free(ptr, sz) ((void)(sz), vfree(ptr))
318 #endif /* DEBUG_KMEM */
320 extern int kmem_debugging(void);
321 extern char *kmem_vasprintf(const char *fmt
, va_list ap
);
322 extern char *kmem_asprintf(const char *fmt
, ...);
323 extern char *strdup(const char *str
);
324 extern void strfree(char *str
);
328 * Slab allocation interfaces. The SPL slab differs from the standard
329 * Linux SLAB or SLUB primarily in that each cache may be backed by slabs
330 * allocated from the physical or virtal memory address space. The virtual
331 * slabs allow for good behavior when allocation large objects of identical
332 * size. This slab implementation also supports both constructors and
333 * destructions which the Linux slab does not.
336 KMC_BIT_NOTOUCH
= 0, /* Don't update ages */
337 KMC_BIT_NODEBUG
= 1, /* Default behavior */
338 KMC_BIT_NOMAGAZINE
= 2, /* XXX: Unsupported */
339 KMC_BIT_NOHASH
= 3, /* XXX: Unsupported */
340 KMC_BIT_QCACHE
= 4, /* XXX: Unsupported */
341 KMC_BIT_KMEM
= 5, /* Use kmem cache */
342 KMC_BIT_VMEM
= 6, /* Use vmem cache */
343 KMC_BIT_OFFSLAB
= 7, /* Objects not on slab */
344 KMC_BIT_NOEMERGENCY
= 8, /* Disable emergency objects */
345 KMC_BIT_DEADLOCKED
= 14, /* Deadlock detected */
346 KMC_BIT_GROWING
= 15, /* Growing in progress */
347 KMC_BIT_REAPING
= 16, /* Reaping in progress */
348 KMC_BIT_DESTROY
= 17, /* Destroy in progress */
349 KMC_BIT_TOTAL
= 18, /* Proc handler helper bit */
350 KMC_BIT_ALLOC
= 19, /* Proc handler helper bit */
351 KMC_BIT_MAX
= 20, /* Proc handler helper bit */
354 /* kmem move callback return values */
355 typedef enum kmem_cbrc
{
356 KMEM_CBRC_YES
= 0, /* Object moved */
357 KMEM_CBRC_NO
= 1, /* Object not moved */
358 KMEM_CBRC_LATER
= 2, /* Object not moved, try again later */
359 KMEM_CBRC_DONT_NEED
= 3, /* Neither object is needed */
360 KMEM_CBRC_DONT_KNOW
= 4, /* Object unknown */
363 #define KMC_NOTOUCH (1 << KMC_BIT_NOTOUCH)
364 #define KMC_NODEBUG (1 << KMC_BIT_NODEBUG)
365 #define KMC_NOMAGAZINE (1 << KMC_BIT_NOMAGAZINE)
366 #define KMC_NOHASH (1 << KMC_BIT_NOHASH)
367 #define KMC_QCACHE (1 << KMC_BIT_QCACHE)
368 #define KMC_KMEM (1 << KMC_BIT_KMEM)
369 #define KMC_VMEM (1 << KMC_BIT_VMEM)
370 #define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB)
371 #define KMC_NOEMERGENCY (1 << KMC_BIT_NOEMERGENCY)
372 #define KMC_DEADLOCKED (1 << KMC_BIT_DEADLOCKED)
373 #define KMC_GROWING (1 << KMC_BIT_GROWING)
374 #define KMC_REAPING (1 << KMC_BIT_REAPING)
375 #define KMC_DESTROY (1 << KMC_BIT_DESTROY)
376 #define KMC_TOTAL (1 << KMC_BIT_TOTAL)
377 #define KMC_ALLOC (1 << KMC_BIT_ALLOC)
378 #define KMC_MAX (1 << KMC_BIT_MAX)
380 #define KMC_REAP_CHUNK INT_MAX
381 #define KMC_DEFAULT_SEEKS 1
383 #define KMC_EXPIRE_AGE 0x1 /* Due to age */
384 #define KMC_EXPIRE_MEM 0x2 /* Due to low memory */
386 extern unsigned int spl_kmem_cache_expire
;
387 extern struct list_head spl_kmem_cache_list
;
388 extern struct rw_semaphore spl_kmem_cache_sem
;
390 #define SKM_MAGIC 0x2e2e2e2e
391 #define SKO_MAGIC 0x20202020
392 #define SKS_MAGIC 0x22222222
393 #define SKC_MAGIC 0x2c2c2c2c
395 #define SPL_KMEM_CACHE_DELAY 15 /* Minimum slab release age */
396 #define SPL_KMEM_CACHE_REAP 0 /* Default reap everything */
397 #define SPL_KMEM_CACHE_OBJ_PER_SLAB 16 /* Target objects per slab */
398 #define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 8 /* Minimum objects per slab */
399 #define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */
401 #define POINTER_IS_VALID(p) 0 /* Unimplemented */
402 #define POINTER_INVALIDATE(pp) /* Unimplemented */
404 typedef int (*spl_kmem_ctor_t
)(void *, void *, int);
405 typedef void (*spl_kmem_dtor_t
)(void *, void *);
406 typedef void (*spl_kmem_reclaim_t
)(void *);
408 typedef struct spl_kmem_magazine
{
409 uint32_t skm_magic
; /* Sanity magic */
410 uint32_t skm_avail
; /* Available objects */
411 uint32_t skm_size
; /* Magazine size */
412 uint32_t skm_refill
; /* Batch refill size */
413 struct spl_kmem_cache
*skm_cache
; /* Owned by cache */
414 unsigned long skm_age
; /* Last cache access */
415 unsigned int skm_cpu
; /* Owned by cpu */
416 void *skm_objs
[0]; /* Object pointers */
417 } spl_kmem_magazine_t
;
419 typedef struct spl_kmem_obj
{
420 uint32_t sko_magic
; /* Sanity magic */
421 void *sko_addr
; /* Buffer address */
422 struct spl_kmem_slab
*sko_slab
; /* Owned by slab */
423 struct list_head sko_list
; /* Free object list linkage */
426 typedef struct spl_kmem_slab
{
427 uint32_t sks_magic
; /* Sanity magic */
428 uint32_t sks_objs
; /* Objects per slab */
429 struct spl_kmem_cache
*sks_cache
; /* Owned by cache */
430 struct list_head sks_list
; /* Slab list linkage */
431 struct list_head sks_free_list
; /* Free object list */
432 unsigned long sks_age
; /* Last modify jiffie */
433 uint32_t sks_ref
; /* Ref count used objects */
436 typedef struct spl_kmem_alloc
{
437 struct spl_kmem_cache
*ska_cache
; /* Owned by cache */
438 int ska_flags
; /* Allocation flags */
439 taskq_ent_t ska_tqe
; /* Task queue entry */
442 typedef struct spl_kmem_emergency
{
443 struct rb_node ske_node
; /* Emergency tree linkage */
444 void *ske_obj
; /* Buffer address */
445 } spl_kmem_emergency_t
;
447 typedef struct spl_kmem_cache
{
448 uint32_t skc_magic
; /* Sanity magic */
449 uint32_t skc_name_size
; /* Name length */
450 char *skc_name
; /* Name string */
451 spl_kmem_magazine_t
*skc_mag
[NR_CPUS
]; /* Per-CPU warm cache */
452 uint32_t skc_mag_size
; /* Magazine size */
453 uint32_t skc_mag_refill
; /* Magazine refill count */
454 spl_kmem_ctor_t skc_ctor
; /* Constructor */
455 spl_kmem_dtor_t skc_dtor
; /* Destructor */
456 spl_kmem_reclaim_t skc_reclaim
; /* Reclaimator */
457 void *skc_private
; /* Private data */
458 void *skc_vmp
; /* Unused */
459 unsigned long skc_flags
; /* Flags */
460 uint32_t skc_obj_size
; /* Object size */
461 uint32_t skc_obj_align
; /* Object alignment */
462 uint32_t skc_slab_objs
; /* Objects per slab */
463 uint32_t skc_slab_size
; /* Slab size */
464 uint32_t skc_delay
; /* Slab reclaim interval */
465 uint32_t skc_reap
; /* Slab reclaim count */
466 atomic_t skc_ref
; /* Ref count callers */
467 taskqid_t skc_taskqid
; /* Slab reclaim task */
468 struct list_head skc_list
; /* List of caches linkage */
469 struct list_head skc_complete_list
;/* Completely alloc'ed */
470 struct list_head skc_partial_list
; /* Partially alloc'ed */
471 struct rb_root skc_emergency_tree
; /* Min sized objects */
472 spinlock_t skc_lock
; /* Cache lock */
473 wait_queue_head_t skc_waitq
; /* Allocation waiters */
474 uint64_t skc_slab_fail
; /* Slab alloc failures */
475 uint64_t skc_slab_create
;/* Slab creates */
476 uint64_t skc_slab_destroy
;/* Slab destroys */
477 uint64_t skc_slab_total
; /* Slab total current */
478 uint64_t skc_slab_alloc
; /* Slab alloc current */
479 uint64_t skc_slab_max
; /* Slab max historic */
480 uint64_t skc_obj_total
; /* Obj total current */
481 uint64_t skc_obj_alloc
; /* Obj alloc current */
482 uint64_t skc_obj_max
; /* Obj max historic */
483 uint64_t skc_obj_deadlock
; /* Obj emergency deadlocks */
484 uint64_t skc_obj_emergency
; /* Obj emergency current */
485 uint64_t skc_obj_emergency_max
; /* Obj emergency max */
487 #define kmem_cache_t spl_kmem_cache_t
489 extern spl_kmem_cache_t
*spl_kmem_cache_create(char *name
, size_t size
,
490 size_t align
, spl_kmem_ctor_t ctor
, spl_kmem_dtor_t dtor
,
491 spl_kmem_reclaim_t reclaim
, void *priv
, void *vmp
, int flags
);
492 extern void spl_kmem_cache_set_move(spl_kmem_cache_t
*,
493 kmem_cbrc_t (*)(void *, void *, size_t, void *));
494 extern void spl_kmem_cache_destroy(spl_kmem_cache_t
*skc
);
495 extern void *spl_kmem_cache_alloc(spl_kmem_cache_t
*skc
, int flags
);
496 extern void spl_kmem_cache_free(spl_kmem_cache_t
*skc
, void *obj
);
497 extern void spl_kmem_cache_reap_now(spl_kmem_cache_t
*skc
, int count
);
498 extern void spl_kmem_reap(void);
500 int spl_kmem_init_kallsyms_lookup(void);
501 int spl_kmem_init(void);
502 void spl_kmem_fini(void);
504 #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
505 spl_kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
506 #define kmem_cache_set_move(skc, move) spl_kmem_cache_set_move(skc, move)
507 #define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
508 #define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
509 #define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
510 #define kmem_cache_reap_now(skc) \
511 spl_kmem_cache_reap_now(skc, skc->skc_reap)
512 #define kmem_reap() spl_kmem_reap()
513 #define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \
514 ((ptr) < (void *)VMALLOC_END))
516 #endif /* _SPL_KMEM_H */