From b34b95635a99223b6bff5437fb389e9340dc7dcd Mon Sep 17 00:00:00 2001 From: Brian Behlendorf Date: Mon, 8 Dec 2014 13:35:51 -0500 Subject: [PATCH] Fix kmem cstyle issues Address all cstyle issues in the kmem, vmem, and kmem_cache source and headers. This will done to make it easier to review subsequent changes which will rework the kmem/vmem implementation. Signed-off-by: Brian Behlendorf --- include/sys/kmem.h | 98 ++++++++++----------- include/sys/kmem_cache.h | 121 +++++++++++++------------- include/sys/vmem.h | 68 +++++++-------- module/spl/spl-kmem-cache.c | 168 ++++++++++++++++++++---------------- module/spl/spl-kmem.c | 104 +++++++++++----------- module/spl/spl-vmem.c | 89 ++++++++++--------- 6 files changed, 335 insertions(+), 313 deletions(-) diff --git a/include/sys/kmem.h b/include/sys/kmem.h index ee25e4c..a9d94c9 100644 --- a/include/sys/kmem.h +++ b/include/sys/kmem.h @@ -1,4 +1,4 @@ -/*****************************************************************************\ +/* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -20,7 +20,7 @@ * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . -\*****************************************************************************/ + */ #ifndef _SPL_KMEM_H #define _SPL_KMEM_H @@ -36,18 +36,18 @@ extern void strfree(char *str); /* * Memory allocation interfaces */ -#define KM_SLEEP GFP_KERNEL /* Can sleep, never fails */ -#define KM_NOSLEEP GFP_ATOMIC /* Can not sleep, may fail */ -#define KM_PUSHPAGE (GFP_NOIO | __GFP_HIGH) /* Use reserved memory */ -#define KM_NODEBUG __GFP_NOWARN /* Suppress warnings */ -#define KM_FLAGS __GFP_BITS_MASK -#define KM_VMFLAGS GFP_LEVEL_MASK +#define KM_SLEEP GFP_KERNEL /* Can sleep, never fails */ +#define KM_NOSLEEP GFP_ATOMIC /* Can not sleep, may fail */ +#define KM_PUSHPAGE (GFP_NOIO | __GFP_HIGH) /* Use reserved memory */ +#define KM_NODEBUG __GFP_NOWARN /* Suppress warnings */ +#define KM_FLAGS __GFP_BITS_MASK +#define KM_VMFLAGS GFP_LEVEL_MASK /* * Used internally, the kernel does not need to support this flag */ #ifndef __GFP_ZERO -# define __GFP_ZERO 0x8000 +#define __GFP_ZERO 0x8000 #endif /* @@ -66,7 +66,7 @@ kmalloc_nofail(size_t size, gfp_t flags) ptr = kmalloc(size, flags); } while (ptr == NULL && (flags & __GFP_WAIT)); - return ptr; + return (ptr); } static inline void * @@ -78,7 +78,7 @@ kzalloc_nofail(size_t size, gfp_t flags) ptr = kzalloc(size, flags); } while (ptr == NULL && (flags & __GFP_WAIT)); - return ptr; + return (ptr); } static inline void * @@ -90,7 +90,7 @@ kmalloc_node_nofail(size_t size, gfp_t flags, int node) ptr = kmalloc_node(size, flags, node); } while (ptr == NULL && (flags & __GFP_WAIT)); - return ptr; + return (ptr); } #ifdef DEBUG_KMEM @@ -98,29 +98,23 @@ kmalloc_node_nofail(size_t size, gfp_t flags, int node) /* * Memory accounting functions to be used only when DEBUG_KMEM is set. */ -# ifdef HAVE_ATOMIC64_T - -# define kmem_alloc_used_add(size) atomic64_add(size, &kmem_alloc_used) -# define kmem_alloc_used_sub(size) atomic64_sub(size, &kmem_alloc_used) -# define kmem_alloc_used_read() atomic64_read(&kmem_alloc_used) -# define kmem_alloc_used_set(size) atomic64_set(&kmem_alloc_used, size) - +#ifdef HAVE_ATOMIC64_T +#define kmem_alloc_used_add(size) atomic64_add(size, &kmem_alloc_used) +#define kmem_alloc_used_sub(size) atomic64_sub(size, &kmem_alloc_used) +#define kmem_alloc_used_read() atomic64_read(&kmem_alloc_used) +#define kmem_alloc_used_set(size) atomic64_set(&kmem_alloc_used, size) extern atomic64_t kmem_alloc_used; extern unsigned long long kmem_alloc_max; - -# else /* HAVE_ATOMIC64_T */ - -# define kmem_alloc_used_add(size) atomic_add(size, &kmem_alloc_used) -# define kmem_alloc_used_sub(size) atomic_sub(size, &kmem_alloc_used) -# define kmem_alloc_used_read() atomic_read(&kmem_alloc_used) -# define kmem_alloc_used_set(size) atomic_set(&kmem_alloc_used, size) - +#else /* HAVE_ATOMIC64_T */ +#define kmem_alloc_used_add(size) atomic_add(size, &kmem_alloc_used) +#define kmem_alloc_used_sub(size) atomic_sub(size, &kmem_alloc_used) +#define kmem_alloc_used_read() atomic_read(&kmem_alloc_used) +#define kmem_alloc_used_set(size) atomic_set(&kmem_alloc_used, size) extern atomic_t kmem_alloc_used; extern unsigned long long kmem_alloc_max; +#endif /* HAVE_ATOMIC64_T */ -# endif /* HAVE_ATOMIC64_T */ - -# ifdef DEBUG_KMEM_TRACKING +#ifdef DEBUG_KMEM_TRACKING /* * DEBUG_KMEM && DEBUG_KMEM_TRACKING * @@ -132,18 +126,18 @@ extern unsigned long long kmem_alloc_max; * be enabled for debugging. This feature may be enabled by passing * --enable-debug-kmem-tracking to configure. */ -# define kmem_alloc(sz, fl) kmem_alloc_track((sz), (fl), \ - __FUNCTION__, __LINE__, 0, 0) -# define kmem_zalloc(sz, fl) kmem_alloc_track((sz), (fl)|__GFP_ZERO,\ - __FUNCTION__, __LINE__, 0, 0) -# define kmem_alloc_node(sz, fl, nd) kmem_alloc_track((sz), (fl), \ - __FUNCTION__, __LINE__, 1, nd) -# define kmem_free(ptr, sz) kmem_free_track((ptr), (sz)) +#define kmem_alloc(sz, fl) kmem_alloc_track((sz), (fl), \ + __FUNCTION__, __LINE__, 0, 0) +#define kmem_zalloc(sz, fl) kmem_alloc_track((sz), (fl)|__GFP_ZERO,\ + __FUNCTION__, __LINE__, 0, 0) +#define kmem_alloc_node(sz, fl, nd) kmem_alloc_track((sz), (fl), \ + __FUNCTION__, __LINE__, 1, nd) +#define kmem_free(ptr, sz) kmem_free_track((ptr), (sz)) extern void *kmem_alloc_track(size_t, int, const char *, int, int, int); extern void kmem_free_track(const void *, size_t); -# else /* DEBUG_KMEM_TRACKING */ +#else /* DEBUG_KMEM_TRACKING */ /* * DEBUG_KMEM && !DEBUG_KMEM_TRACKING * @@ -153,18 +147,18 @@ extern void kmem_free_track(const void *, size_t); * will be reported on the console. To disable this basic accounting * pass the --disable-debug-kmem option to configure. */ -# define kmem_alloc(sz, fl) kmem_alloc_debug((sz), (fl), \ - __FUNCTION__, __LINE__, 0, 0) -# define kmem_zalloc(sz, fl) kmem_alloc_debug((sz), (fl)|__GFP_ZERO,\ - __FUNCTION__, __LINE__, 0, 0) -# define kmem_alloc_node(sz, fl, nd) kmem_alloc_debug((sz), (fl), \ - __FUNCTION__, __LINE__, 1, nd) -# define kmem_free(ptr, sz) kmem_free_debug((ptr), (sz)) +#define kmem_alloc(sz, fl) kmem_alloc_debug((sz), (fl), \ + __FUNCTION__, __LINE__, 0, 0) +#define kmem_zalloc(sz, fl) kmem_alloc_debug((sz), (fl)|__GFP_ZERO,\ + __FUNCTION__, __LINE__, 0, 0) +#define kmem_alloc_node(sz, fl, nd) kmem_alloc_debug((sz), (fl), \ + __FUNCTION__, __LINE__, 1, nd) +#define kmem_free(ptr, sz) kmem_free_debug((ptr), (sz)) extern void *kmem_alloc_debug(size_t, int, const char *, int, int, int); extern void kmem_free_debug(const void *, size_t); -# endif /* DEBUG_KMEM_TRACKING */ +#endif /* DEBUG_KMEM_TRACKING */ #else /* DEBUG_KMEM */ /* * !DEBUG_KMEM && !DEBUG_KMEM_TRACKING @@ -173,17 +167,17 @@ extern void kmem_free_debug(const void *, size_t); * minimal memory accounting. To enable basic accounting pass the * --enable-debug-kmem option to configure. */ -# define kmem_alloc(sz, fl) kmalloc_nofail((sz), (fl)) -# define kmem_zalloc(sz, fl) kzalloc_nofail((sz), (fl)) -# define kmem_alloc_node(sz, fl, nd) kmalloc_node_nofail((sz), (fl), (nd)) -# define kmem_free(ptr, sz) ((void)(sz), kfree(ptr)) +#define kmem_alloc(sz, fl) kmalloc_nofail((sz), (fl)) +#define kmem_zalloc(sz, fl) kzalloc_nofail((sz), (fl)) +#define kmem_alloc_node(sz, fl, nd) kmalloc_node_nofail((sz), (fl), (nd)) +#define kmem_free(ptr, sz) ((void)(sz), kfree(ptr)) #endif /* DEBUG_KMEM */ int spl_kmem_init(void); void spl_kmem_fini(void); -#define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \ - ((ptr) < (void *)VMALLOC_END)) +#define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \ + ((ptr) < (void *)VMALLOC_END)) #endif /* _SPL_KMEM_H */ diff --git a/include/sys/kmem_cache.h b/include/sys/kmem_cache.h index 654a2ea..a5bc032 100644 --- a/include/sys/kmem_cache.h +++ b/include/sys/kmem_cache.h @@ -1,4 +1,4 @@ -/*****************************************************************************\ +/* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -20,7 +20,7 @@ * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . -\*****************************************************************************/ + */ #ifndef _SPL_KMEM_CACHE_H #define _SPL_KMEM_CACHE_H @@ -33,7 +33,7 @@ * allocated from the physical or virtal memory address space. The virtual * slabs allow for good behavior when allocation large objects of identical * size. This slab implementation also supports both constructors and - * destructions which the Linux slab does not. + * destructors which the Linux slab does not. */ enum { KMC_BIT_NOTOUCH = 0, /* Don't update ages */ @@ -46,8 +46,8 @@ enum { KMC_BIT_SLAB = 7, /* Use Linux slab cache */ KMC_BIT_OFFSLAB = 8, /* Objects not on slab */ KMC_BIT_NOEMERGENCY = 9, /* Disable emergency objects */ - KMC_BIT_DEADLOCKED = 14, /* Deadlock detected */ - KMC_BIT_GROWING = 15, /* Growing in progress */ + KMC_BIT_DEADLOCKED = 14, /* Deadlock detected */ + KMC_BIT_GROWING = 15, /* Growing in progress */ KMC_BIT_REAPING = 16, /* Reaping in progress */ KMC_BIT_DESTROY = 17, /* Destroy in progress */ KMC_BIT_TOTAL = 18, /* Proc handler helper bit */ @@ -64,29 +64,29 @@ typedef enum kmem_cbrc { KMEM_CBRC_DONT_KNOW = 4, /* Object unknown */ } kmem_cbrc_t; -#define KMC_NOTOUCH (1 << KMC_BIT_NOTOUCH) -#define KMC_NODEBUG (1 << KMC_BIT_NODEBUG) -#define KMC_NOMAGAZINE (1 << KMC_BIT_NOMAGAZINE) -#define KMC_NOHASH (1 << KMC_BIT_NOHASH) -#define KMC_QCACHE (1 << KMC_BIT_QCACHE) -#define KMC_KMEM (1 << KMC_BIT_KMEM) -#define KMC_VMEM (1 << KMC_BIT_VMEM) -#define KMC_SLAB (1 << KMC_BIT_SLAB) -#define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB) -#define KMC_NOEMERGENCY (1 << KMC_BIT_NOEMERGENCY) -#define KMC_DEADLOCKED (1 << KMC_BIT_DEADLOCKED) -#define KMC_GROWING (1 << KMC_BIT_GROWING) -#define KMC_REAPING (1 << KMC_BIT_REAPING) -#define KMC_DESTROY (1 << KMC_BIT_DESTROY) -#define KMC_TOTAL (1 << KMC_BIT_TOTAL) -#define KMC_ALLOC (1 << KMC_BIT_ALLOC) -#define KMC_MAX (1 << KMC_BIT_MAX) - -#define KMC_REAP_CHUNK INT_MAX -#define KMC_DEFAULT_SEEKS 1 - -#define KMC_EXPIRE_AGE 0x1 /* Due to age */ -#define KMC_EXPIRE_MEM 0x2 /* Due to low memory */ +#define KMC_NOTOUCH (1 << KMC_BIT_NOTOUCH) +#define KMC_NODEBUG (1 << KMC_BIT_NODEBUG) +#define KMC_NOMAGAZINE (1 << KMC_BIT_NOMAGAZINE) +#define KMC_NOHASH (1 << KMC_BIT_NOHASH) +#define KMC_QCACHE (1 << KMC_BIT_QCACHE) +#define KMC_KMEM (1 << KMC_BIT_KMEM) +#define KMC_VMEM (1 << KMC_BIT_VMEM) +#define KMC_SLAB (1 << KMC_BIT_SLAB) +#define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB) +#define KMC_NOEMERGENCY (1 << KMC_BIT_NOEMERGENCY) +#define KMC_DEADLOCKED (1 << KMC_BIT_DEADLOCKED) +#define KMC_GROWING (1 << KMC_BIT_GROWING) +#define KMC_REAPING (1 << KMC_BIT_REAPING) +#define KMC_DESTROY (1 << KMC_BIT_DESTROY) +#define KMC_TOTAL (1 << KMC_BIT_TOTAL) +#define KMC_ALLOC (1 << KMC_BIT_ALLOC) +#define KMC_MAX (1 << KMC_BIT_MAX) + +#define KMC_REAP_CHUNK INT_MAX +#define KMC_DEFAULT_SEEKS 1 + +#define KMC_EXPIRE_AGE 0x1 /* Due to age */ +#define KMC_EXPIRE_MEM 0x2 /* Due to low memory */ #define KMC_RECLAIM_ONCE 0x1 /* Force a single shrinker pass */ @@ -94,19 +94,19 @@ extern unsigned int spl_kmem_cache_expire; extern struct list_head spl_kmem_cache_list; extern struct rw_semaphore spl_kmem_cache_sem; -#define SKM_MAGIC 0x2e2e2e2e -#define SKO_MAGIC 0x20202020 -#define SKS_MAGIC 0x22222222 -#define SKC_MAGIC 0x2c2c2c2c +#define SKM_MAGIC 0x2e2e2e2e +#define SKO_MAGIC 0x20202020 +#define SKS_MAGIC 0x22222222 +#define SKC_MAGIC 0x2c2c2c2c -#define SPL_KMEM_CACHE_DELAY 15 /* Minimum slab release age */ -#define SPL_KMEM_CACHE_REAP 0 /* Default reap everything */ -#define SPL_KMEM_CACHE_OBJ_PER_SLAB 16 /* Target objects per slab */ -#define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 1 /* Minimum objects per slab */ -#define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */ +#define SPL_KMEM_CACHE_DELAY 15 /* Minimum slab release age */ +#define SPL_KMEM_CACHE_REAP 0 /* Default reap everything */ +#define SPL_KMEM_CACHE_OBJ_PER_SLAB 16 /* Target objects per slab */ +#define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 1 /* Minimum objects per slab */ +#define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */ -#define POINTER_IS_VALID(p) 0 /* Unimplemented */ -#define POINTER_INVALIDATE(pp) /* Unimplemented */ +#define POINTER_IS_VALID(p) 0 /* Unimplemented */ +#define POINTER_INVALIDATE(pp) /* Unimplemented */ typedef int (*spl_kmem_ctor_t)(void *, void *, int); typedef void (*spl_kmem_dtor_t)(void *, void *); @@ -124,14 +124,14 @@ typedef struct spl_kmem_magazine { } spl_kmem_magazine_t; typedef struct spl_kmem_obj { - uint32_t sko_magic; /* Sanity magic */ + uint32_t sko_magic; /* Sanity magic */ void *sko_addr; /* Buffer address */ struct spl_kmem_slab *sko_slab; /* Owned by slab */ struct list_head sko_list; /* Free object list linkage */ } spl_kmem_obj_t; typedef struct spl_kmem_slab { - uint32_t sks_magic; /* Sanity magic */ + uint32_t sks_magic; /* Sanity magic */ uint32_t sks_objs; /* Objects per slab */ struct spl_kmem_cache *sks_cache; /* Owned by cache */ struct list_head sks_list; /* Slab list linkage */ @@ -174,14 +174,14 @@ typedef struct spl_kmem_cache { atomic_t skc_ref; /* Ref count callers */ taskqid_t skc_taskqid; /* Slab reclaim task */ struct list_head skc_list; /* List of caches linkage */ - struct list_head skc_complete_list;/* Completely alloc'ed */ - struct list_head skc_partial_list; /* Partially alloc'ed */ + struct list_head skc_complete_list; /* Completely alloc'ed */ + struct list_head skc_partial_list; /* Partially alloc'ed */ struct rb_root skc_emergency_tree; /* Min sized objects */ spinlock_t skc_lock; /* Cache lock */ wait_queue_head_t skc_waitq; /* Allocation waiters */ uint64_t skc_slab_fail; /* Slab alloc failures */ - uint64_t skc_slab_create;/* Slab creates */ - uint64_t skc_slab_destroy;/* Slab destroys */ + uint64_t skc_slab_create; /* Slab creates */ + uint64_t skc_slab_destroy; /* Slab destroys */ uint64_t skc_slab_total; /* Slab total current */ uint64_t skc_slab_alloc; /* Slab alloc current */ uint64_t skc_slab_max; /* Slab max historic */ @@ -192,30 +192,31 @@ typedef struct spl_kmem_cache { uint64_t skc_obj_emergency; /* Obj emergency current */ uint64_t skc_obj_emergency_max; /* Obj emergency max */ } spl_kmem_cache_t; -#define kmem_cache_t spl_kmem_cache_t +#define kmem_cache_t spl_kmem_cache_t extern spl_kmem_cache_t *spl_kmem_cache_create(char *name, size_t size, - size_t align, spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, - spl_kmem_reclaim_t reclaim, void *priv, void *vmp, int flags); + size_t align, spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, + spl_kmem_reclaim_t reclaim, void *priv, void *vmp, int flags); extern void spl_kmem_cache_set_move(spl_kmem_cache_t *, - kmem_cbrc_t (*)(void *, void *, size_t, void *)); + kmem_cbrc_t (*)(void *, void *, size_t, void *)); extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc); extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags); extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj); extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count); extern void spl_kmem_reap(void); -#define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \ - spl_kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) -#define kmem_cache_set_move(skc, move) spl_kmem_cache_set_move(skc, move) -#define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc) -#define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags) -#define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj) -#define kmem_cache_reap_now(skc) \ - spl_kmem_cache_reap_now(skc, skc->skc_reap) -#define kmem_reap() spl_kmem_reap() -#define kmem_virt(ptr) (((ptr) >= (void *)VMALLOC_START) && \ - ((ptr) < (void *)VMALLOC_END)) +#define kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl) \ + spl_kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl) +#define kmem_cache_set_move(skc, move) spl_kmem_cache_set_move(skc, move) +#define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc) +#define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags) +#define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj) +#define kmem_cache_reap_now(skc) \ + spl_kmem_cache_reap_now(skc, skc->skc_reap) +#define kmem_reap() spl_kmem_reap() +#define kmem_virt(ptr) \ + (((ptr) >= (void *)VMALLOC_START) && \ + ((ptr) < (void *)VMALLOC_END)) /* * Allow custom slab allocation flags to be set for KMC_SLAB based caches. diff --git a/include/sys/vmem.h b/include/sys/vmem.h index e86e89b..f59ac5e 100644 --- a/include/sys/vmem.h +++ b/include/sys/vmem.h @@ -1,4 +1,4 @@ -/*****************************************************************************\ +/* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -20,7 +20,7 @@ * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . -\*****************************************************************************/ + */ #ifndef _SPL_VMEM_H #define _SPL_VMEM_H @@ -40,11 +40,11 @@ extern size_t vmem_size(vmem_t *vmp, int typemask); /* * Memory allocation interfaces */ -#define VMEM_ALLOC 0x01 -#define VMEM_FREE 0x02 +#define VMEM_ALLOC 0x01 +#define VMEM_FREE 0x02 #ifndef VMALLOC_TOTAL -#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) +#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) #endif static inline void * @@ -78,7 +78,7 @@ vmalloc_nofail(size_t size, gfp_t flags) } } - return ptr; + return (ptr); } static inline void * @@ -90,7 +90,7 @@ vzalloc_nofail(size_t size, gfp_t flags) if (ptr) memset(ptr, 0, (size)); - return ptr; + return (ptr); } #ifdef DEBUG_KMEM @@ -98,29 +98,29 @@ vzalloc_nofail(size_t size, gfp_t flags) /* * Memory accounting functions to be used only when DEBUG_KMEM is set. */ -# ifdef HAVE_ATOMIC64_T +#ifdef HAVE_ATOMIC64_T -# define vmem_alloc_used_add(size) atomic64_add(size, &vmem_alloc_used) -# define vmem_alloc_used_sub(size) atomic64_sub(size, &vmem_alloc_used) -# define vmem_alloc_used_read() atomic64_read(&vmem_alloc_used) -# define vmem_alloc_used_set(size) atomic64_set(&vmem_alloc_used, size) +#define vmem_alloc_used_add(size) atomic64_add(size, &vmem_alloc_used) +#define vmem_alloc_used_sub(size) atomic64_sub(size, &vmem_alloc_used) +#define vmem_alloc_used_read() atomic64_read(&vmem_alloc_used) +#define vmem_alloc_used_set(size) atomic64_set(&vmem_alloc_used, size) extern atomic64_t vmem_alloc_used; extern unsigned long long vmem_alloc_max; -# else /* HAVE_ATOMIC64_T */ +#else /* HAVE_ATOMIC64_T */ -# define vmem_alloc_used_add(size) atomic_add(size, &vmem_alloc_used) -# define vmem_alloc_used_sub(size) atomic_sub(size, &vmem_alloc_used) -# define vmem_alloc_used_read() atomic_read(&vmem_alloc_used) -# define vmem_alloc_used_set(size) atomic_set(&vmem_alloc_used, size) +#define vmem_alloc_used_add(size) atomic_add(size, &vmem_alloc_used) +#define vmem_alloc_used_sub(size) atomic_sub(size, &vmem_alloc_used) +#define vmem_alloc_used_read() atomic_read(&vmem_alloc_used) +#define vmem_alloc_used_set(size) atomic_set(&vmem_alloc_used, size) extern atomic_t vmem_alloc_used; extern unsigned long long vmem_alloc_max; -# endif /* HAVE_ATOMIC64_T */ +#endif /* HAVE_ATOMIC64_T */ -# ifdef DEBUG_KMEM_TRACKING +#ifdef DEBUG_KMEM_TRACKING /* * DEBUG_KMEM && DEBUG_KMEM_TRACKING * @@ -132,18 +132,18 @@ extern unsigned long long vmem_alloc_max; * be enabled for debugging. This feature may be enabled by passing * --enable-debug-kmem-tracking to configure. */ -# define vmem_alloc(sz, fl) vmem_alloc_track((sz), (fl), \ - __FUNCTION__, __LINE__) -# define vmem_zalloc(sz, fl) vmem_alloc_track((sz), (fl)|__GFP_ZERO,\ - __FUNCTION__, __LINE__) -# define vmem_free(ptr, sz) vmem_free_track((ptr), (sz)) +#define vmem_alloc(sz, fl) vmem_alloc_track((sz), (fl), \ + __FUNCTION__, __LINE__) +#define vmem_zalloc(sz, fl) vmem_alloc_track((sz), (fl)|__GFP_ZERO,\ + __FUNCTION__, __LINE__) +#define vmem_free(ptr, sz) vmem_free_track((ptr), (sz)) extern void *kmem_alloc_track(size_t, int, const char *, int, int, int); extern void kmem_free_track(const void *, size_t); extern void *vmem_alloc_track(size_t, int, const char *, int); extern void vmem_free_track(const void *, size_t); -# else /* DEBUG_KMEM_TRACKING */ +#else /* DEBUG_KMEM_TRACKING */ /* * DEBUG_KMEM && !DEBUG_KMEM_TRACKING * @@ -153,16 +153,16 @@ extern void vmem_free_track(const void *, size_t); * will be reported on the console. To disable this basic accounting * pass the --disable-debug-kmem option to configure. */ -# define vmem_alloc(sz, fl) vmem_alloc_debug((sz), (fl), \ - __FUNCTION__, __LINE__) -# define vmem_zalloc(sz, fl) vmem_alloc_debug((sz), (fl)|__GFP_ZERO,\ - __FUNCTION__, __LINE__) -# define vmem_free(ptr, sz) vmem_free_debug((ptr), (sz)) +#define vmem_alloc(sz, fl) vmem_alloc_debug((sz), (fl), \ + __FUNCTION__, __LINE__) +#define vmem_zalloc(sz, fl) vmem_alloc_debug((sz), (fl)|__GFP_ZERO,\ + __FUNCTION__, __LINE__) +#define vmem_free(ptr, sz) vmem_free_debug((ptr), (sz)) extern void *vmem_alloc_debug(size_t, int, const char *, int); extern void vmem_free_debug(const void *, size_t); -# endif /* DEBUG_KMEM_TRACKING */ +#endif /* DEBUG_KMEM_TRACKING */ #else /* DEBUG_KMEM */ /* * !DEBUG_KMEM && !DEBUG_KMEM_TRACKING @@ -171,9 +171,9 @@ extern void vmem_free_debug(const void *, size_t); * minimal memory accounting. To enable basic accounting pass the * --enable-debug-kmem option to configure. */ -# define vmem_alloc(sz, fl) vmalloc_nofail((sz), (fl)) -# define vmem_zalloc(sz, fl) vzalloc_nofail((sz), (fl)) -# define vmem_free(ptr, sz) ((void)(sz), vfree(ptr)) +#define vmem_alloc(sz, fl) vmalloc_nofail((sz), (fl)) +#define vmem_zalloc(sz, fl) vzalloc_nofail((sz), (fl)) +#define vmem_free(ptr, sz) ((void)(sz), vfree(ptr)) #endif /* DEBUG_KMEM */ diff --git a/module/spl/spl-kmem-cache.c b/module/spl/spl-kmem-cache.c index d24c4c2..3aa65a9 100644 --- a/module/spl/spl-kmem-cache.c +++ b/module/spl/spl-kmem-cache.c @@ -1,4 +1,4 @@ -/*****************************************************************************\ +/* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -20,9 +20,7 @@ * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . - ***************************************************************************** - * Solaris Porting Layer (SPL) Kmem Implementation. -\*****************************************************************************/ + */ #include #include @@ -76,7 +74,7 @@ MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab"); unsigned int spl_kmem_cache_obj_per_slab_min = SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN; module_param(spl_kmem_cache_obj_per_slab_min, uint, 0644); MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab_min, - "Minimal number of objects per slab"); + "Minimal number of objects per slab"); unsigned int spl_kmem_cache_max_size = 32; module_param(spl_kmem_cache_max_size, uint, 0644); @@ -95,12 +93,12 @@ unsigned int spl_kmem_cache_slab_limit = 0; #endif module_param(spl_kmem_cache_slab_limit, uint, 0644); MODULE_PARM_DESC(spl_kmem_cache_slab_limit, - "Objects less than N bytes use the Linux slab"); + "Objects less than N bytes use the Linux slab"); unsigned int spl_kmem_cache_kmem_limit = (PAGE_SIZE / 4); module_param(spl_kmem_cache_kmem_limit, uint, 0644); MODULE_PARM_DESC(spl_kmem_cache_kmem_limit, - "Objects less than N bytes use the kmalloc"); + "Objects less than N bytes use the kmalloc"); /* * Slab allocation interfaces @@ -114,7 +112,7 @@ MODULE_PARM_DESC(spl_kmem_cache_kmem_limit, * breaker for the SPL which contains particularly expensive * initializers for mutex's, condition variables, etc. We also * require a minimal level of cleanup for these data types unlike - * many Linux data type which do need to be explicitly destroyed. + * many Linux data types which do need to be explicitly destroyed. * * 2) Virtual address space backed slab. Callers of the Solaris slab * expect it to work well for both small are very large allocations. @@ -135,7 +133,7 @@ MODULE_PARM_DESC(spl_kmem_cache_kmem_limit, * * XXX: Improve the partial slab list by carefully maintaining a * strict ordering of fullest to emptiest slabs based on - * the slab reference count. This guarantees the when freeing + * the slab reference count. This guarantees that when freeing * slabs back to the system we need only linearly traverse the * last N slabs in the list to discover all the freeable slabs. * @@ -149,7 +147,7 @@ MODULE_PARM_DESC(spl_kmem_cache_kmem_limit, struct list_head spl_kmem_cache_list; /* List of caches */ struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */ -taskq_t *spl_kmem_cache_taskq; /* Task queue for ageing / reclaim */ +taskq_t *spl_kmem_cache_taskq; /* Task queue for ageing / reclaim */ static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj); @@ -173,7 +171,7 @@ kv_alloc(spl_kmem_cache_t *skc, int size, int flags) /* Resulting allocated memory will be page aligned */ ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); - return ptr; + return (ptr); } static void @@ -204,8 +202,8 @@ kv_free(spl_kmem_cache_t *skc, void *ptr, int size) static inline uint32_t spl_sks_size(spl_kmem_cache_t *skc) { - return P2ROUNDUP_TYPED(sizeof(spl_kmem_slab_t), - skc->skc_obj_align, uint32_t); + return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t), + skc->skc_obj_align, uint32_t)); } /* @@ -216,8 +214,8 @@ spl_obj_size(spl_kmem_cache_t *skc) { uint32_t align = skc->skc_obj_align; - return P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) + - P2ROUNDUP_TYPED(sizeof(spl_kmem_obj_t), align, uint32_t); + return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) + + P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t)); } /* @@ -226,8 +224,8 @@ spl_obj_size(spl_kmem_cache_t *skc) static inline spl_kmem_obj_t * spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj) { - return obj + P2ROUNDUP_TYPED(skc->skc_obj_size, - skc->skc_obj_align, uint32_t); + return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size, + skc->skc_obj_align, uint32_t)); } /* @@ -237,7 +235,7 @@ spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj) static inline uint32_t spl_offslab_size(spl_kmem_cache_t *skc) { - return 1UL << (fls64(spl_obj_size(skc)) + 1); + return (1UL << (fls64(spl_obj_size(skc)) + 1)); } /* @@ -320,8 +318,8 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags) out: if (rc) { if (skc->skc_flags & KMC_OFFSLAB) - list_for_each_entry_safe(sko, n, &sks->sks_free_list, - sko_list) + list_for_each_entry_safe(sko, + n, &sks->sks_free_list, sko_list) kv_free(skc, sko->sko_addr, offslab_size); kv_free(skc, base, skc->skc_slab_size); @@ -338,7 +336,7 @@ out: */ static void spl_slab_free(spl_kmem_slab_t *sks, - struct list_head *sks_list, struct list_head *sko_list) + struct list_head *sks_list, struct list_head *sko_list) { spl_kmem_cache_t *skc; @@ -363,7 +361,7 @@ spl_slab_free(spl_kmem_slab_t *sks, } /* - * Traverses all the partial slabs attached to a cache and free those + * Traverse all the partial slabs attached to a cache and free those * which which are currently empty, and have not been touched for * skc_delay seconds to avoid thrashing. The count argument is * passed to optionally cap the number of slabs reclaimed, a count @@ -387,7 +385,8 @@ spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag) * however when flag is set the delay will not be used. */ spin_lock(&skc->skc_lock); - list_for_each_entry_safe_reverse(sks,m,&skc->skc_partial_list,sks_list){ + list_for_each_entry_safe_reverse(sks, m, + &skc->skc_partial_list, sks_list) { /* * All empty slabs are at the end of skc->skc_partial_list, * therefore once a non-empty slab is found we can stop @@ -397,7 +396,8 @@ spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag) if ((sks->sks_ref > 0) || (count && i >= count)) break; - if (time_after(jiffies,sks->sks_age+skc->skc_delay*HZ)||flag) { + if (time_after(jiffies, sks->sks_age + skc->skc_delay * HZ) || + flag) { spl_slab_free(sks, &sks_list, &sko_list); i++; } @@ -443,10 +443,10 @@ spl_emergency_search(struct rb_root *root, void *obj) else if (address > (unsigned long)ske->ske_obj) node = node->rb_right; else - return ske; + return (ske); } - return NULL; + return (NULL); } static int @@ -465,13 +465,13 @@ spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske) else if (address > (unsigned long)ske_tmp->ske_obj) new = &((*new)->rb_right); else - return 0; + return (0); } rb_link_node(&ske->ske_node, parent, new); rb_insert_color(&ske->ske_node, root); - return 1; + return (1); } /* @@ -490,7 +490,7 @@ spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj) if (!empty) return (-EEXIST); - ske = kmalloc(sizeof(*ske), flags); + ske = kmalloc(sizeof (*ske), flags); if (ske == NULL) return (-ENOMEM); @@ -565,7 +565,7 @@ __spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush) skm->skm_avail -= count; memmove(skm->skm_objs, &(skm->skm_objs[count]), - sizeof(void *) * skm->skm_avail); + sizeof (void *) * skm->skm_avail); } static void @@ -666,7 +666,7 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size) if (skc->skc_flags & KMC_OFFSLAB) { *objs = spl_kmem_cache_obj_per_slab; - *size = P2ROUNDUP(sizeof(spl_kmem_slab_t), PAGE_SIZE); + *size = P2ROUNDUP(sizeof (spl_kmem_slab_t), PAGE_SIZE); return (0); } else { sks_size = spl_sks_size(skc); @@ -731,8 +731,8 @@ static spl_kmem_magazine_t * spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu) { spl_kmem_magazine_t *skm; - int size = sizeof(spl_kmem_magazine_t) + - sizeof(void *) * skc->skc_mag_size; + int size = sizeof (spl_kmem_magazine_t) + + sizeof (void *) * skc->skc_mag_size; skm = kmem_alloc_node(size, KM_SLEEP, cpu_to_node(cpu)); if (skm) { @@ -754,8 +754,8 @@ spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu) static void spl_magazine_free(spl_kmem_magazine_t *skm) { - int size = sizeof(spl_kmem_magazine_t) + - sizeof(void *) * skm->skm_size; + int size = sizeof (spl_kmem_magazine_t) + + sizeof (void *) * skm->skm_size; ASSERT(skm->skm_magic == SKM_MAGIC); ASSERT(skm->skm_avail == 0); @@ -802,11 +802,11 @@ spl_magazine_destroy(spl_kmem_cache_t *skc) if (skc->skc_flags & KMC_NOMAGAZINE) return; - for_each_online_cpu(i) { + for_each_online_cpu(i) { skm = skc->skc_mag[i]; spl_cache_flush(skc, skm, skm->skm_avail); spl_magazine_free(skm); - } + } } /* @@ -832,12 +832,10 @@ spl_magazine_destroy(spl_kmem_cache_t *skc) */ spl_kmem_cache_t * spl_kmem_cache_create(char *name, size_t size, size_t align, - spl_kmem_ctor_t ctor, - spl_kmem_dtor_t dtor, - spl_kmem_reclaim_t reclaim, - void *priv, void *vmp, int flags) + spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, spl_kmem_reclaim_t reclaim, + void *priv, void *vmp, int flags) { - spl_kmem_cache_t *skc; + spl_kmem_cache_t *skc; int rc; /* @@ -851,13 +849,13 @@ spl_kmem_cache_create(char *name, size_t size, size_t align, might_sleep(); /* - * Allocate memory for a new cache an initialize it. Unfortunately, + * Allocate memory for a new cache and initialize it. Unfortunately, * this usually ends up being a large allocation of ~32k because * we need to allocate enough memory for the worst case number of * cpus in the magazine, skc_mag[NR_CPUS]. Because of this we * explicitly pass KM_NODEBUG to suppress the kmem warning */ - skc = kmem_zalloc(sizeof(*skc), KM_SLEEP| KM_NODEBUG); + skc = kmem_zalloc(sizeof (*skc), KM_SLEEP| KM_NODEBUG); if (skc == NULL) return (NULL); @@ -865,7 +863,7 @@ spl_kmem_cache_create(char *name, size_t size, size_t align, skc->skc_name_size = strlen(name) + 1; skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, KM_SLEEP); if (skc->skc_name == NULL) { - kmem_free(skc, sizeof(*skc)); + kmem_free(skc, sizeof (*skc)); return (NULL); } strncpy(skc->skc_name, name, skc->skc_name_size); @@ -923,7 +921,7 @@ spl_kmem_cache_create(char *name, size_t size, size_t align, * Objects smaller than spl_kmem_cache_slab_limit can * use the Linux slab for better space-efficiency. By * default this functionality is disabled until its - * performance characters are fully understood. + * performance characteristics are fully understood. */ if (spl_kmem_cache_slab_limit && size <= (size_t)spl_kmem_cache_slab_limit) @@ -980,20 +978,20 @@ spl_kmem_cache_create(char *name, size_t size, size_t align, return (skc); out: kmem_free(skc->skc_name, skc->skc_name_size); - kmem_free(skc, sizeof(*skc)); + kmem_free(skc, sizeof (*skc)); return (NULL); } EXPORT_SYMBOL(spl_kmem_cache_create); /* - * Register a move callback to for cache defragmentation. + * Register a move callback for cache defragmentation. * XXX: Unimplemented but harmless to stub out for now. */ void spl_kmem_cache_set_move(spl_kmem_cache_t *skc, kmem_cbrc_t (move)(void *, void *, size_t, void *)) { - ASSERT(move != NULL); + ASSERT(move != NULL); } EXPORT_SYMBOL(spl_kmem_cache_set_move); @@ -1022,9 +1020,11 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc) taskq_cancel_id(spl_kmem_cache_taskq, id); - /* Wait until all current callers complete, this is mainly + /* + * Wait until all current callers complete, this is mainly * to catch the case where a low memory situation triggers a - * cache reaping action which races with this destroy. */ + * cache reaping action which races with this destroy. + */ wait_event(wq, atomic_read(&skc->skc_ref) == 0); if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) { @@ -1037,8 +1037,10 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc) spin_lock(&skc->skc_lock); - /* Validate there are no objects in use and free all the - * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */ + /* + * Validate there are no objects in use and free all the + * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. + */ ASSERT3U(skc->skc_slab_alloc, ==, 0); ASSERT3U(skc->skc_obj_alloc, ==, 0); ASSERT3U(skc->skc_slab_total, ==, 0); @@ -1049,7 +1051,7 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc) kmem_free(skc->skc_name, skc->skc_name_size); spin_unlock(&skc->skc_lock); - kmem_free(skc, sizeof(*skc)); + kmem_free(skc, sizeof (*skc)); } EXPORT_SYMBOL(spl_kmem_cache_destroy); @@ -1089,7 +1091,7 @@ spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks) skc->skc_slab_max = skc->skc_slab_alloc; } - return sko->sko_addr; + return (sko->sko_addr); } /* @@ -1127,7 +1129,7 @@ spl_cache_grow_work(void *data) static int spl_cache_grow_wait(spl_kmem_cache_t *skc) { - return !test_bit(KMC_BIT_GROWING, &skc->skc_flags); + return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags)); } /* @@ -1164,7 +1166,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj) if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) { spl_kmem_alloc_t *ska; - ska = kmalloc(sizeof(*ska), flags); + ska = kmalloc(sizeof (*ska), flags); if (ska == NULL) { clear_bit(KMC_BIT_GROWING, &skc->skc_flags); wake_up_all(&skc->skc_waitq); @@ -1192,7 +1194,7 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj) rc = spl_emergency_alloc(skc, flags, obj); } else { remaining = wait_event_timeout(skc->skc_waitq, - spl_cache_grow_wait(skc), HZ); + spl_cache_grow_wait(skc), HZ); if (!remaining && test_bit(KMC_BIT_VMEM, &skc->skc_flags)) { spin_lock(&skc->skc_lock); @@ -1249,9 +1251,11 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) if (skm != skc->skc_mag[smp_processor_id()]) goto out; - /* Potentially rescheduled to the same CPU but + /* + * Potentially rescheduled to the same CPU but * allocations may have occurred from this CPU while - * we were sleeping so recalculate max refill. */ + * we were sleeping so recalculate max refill. + */ refill = MIN(refill, skm->skm_size - skm->skm_avail); spin_lock(&skc->skc_lock); @@ -1260,17 +1264,21 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) /* Grab the next available slab */ sks = list_entry((&skc->skc_partial_list)->next, - spl_kmem_slab_t, sks_list); + spl_kmem_slab_t, sks_list); ASSERT(sks->sks_magic == SKS_MAGIC); ASSERT(sks->sks_ref < sks->sks_objs); ASSERT(!list_empty(&sks->sks_free_list)); - /* Consume as many objects as needed to refill the requested - * cache. We must also be careful not to overfill it. */ - while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++count) { + /* + * Consume as many objects as needed to refill the requested + * cache. We must also be careful not to overfill it. + */ + while (sks->sks_ref < sks->sks_objs && refill-- > 0 && + ++count) { ASSERT(skm->skm_avail < skm->skm_size); ASSERT(count < skm->skm_size); - skm->skm_objs[skm->skm_avail++]=spl_cache_obj(skc,sks); + skm->skm_objs[skm->skm_avail++] = + spl_cache_obj(skc, sks); } /* Move slab to skc_complete_list when full */ @@ -1308,16 +1316,20 @@ spl_cache_shrink(spl_kmem_cache_t *skc, void *obj) sks->sks_ref--; skc->skc_obj_alloc--; - /* Move slab to skc_partial_list when no longer full. Slabs + /* + * Move slab to skc_partial_list when no longer full. Slabs * are added to the head to keep the partial list is quasi-full - * sorted order. Fuller at the head, emptier at the tail. */ + * sorted order. Fuller at the head, emptier at the tail. + */ if (sks->sks_ref == (sks->sks_objs - 1)) { list_del(&sks->sks_list); list_add(&sks->sks_list, &skc->skc_partial_list); } - /* Move empty slabs to the end of the partial list so - * they can be easily found and freed during reclamation. */ + /* + * Move empty slabs to the end of the partial list so + * they can be easily found and freed during reclamation. + */ if (sks->sks_ref == 0) { list_del(&sks->sks_list); list_add_tail(&sks->sks_list, &skc->skc_partial_list); @@ -1359,10 +1371,12 @@ spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags) local_irq_disable(); restart: - /* Safe to update per-cpu structure without lock, but + /* + * Safe to update per-cpu structure without lock, but * in the restart case we must be careful to reacquire * the local magazine since this may have changed - * when we need to grow the cache. */ + * when we need to grow the cache. + */ skm = skc->skc_mag[smp_processor_id()]; ASSERT(skm->skm_magic == SKM_MAGIC); @@ -1438,10 +1452,12 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) local_irq_save(flags); - /* Safe to update per-cpu structure without lock, but + /* + * Safe to update per-cpu structure without lock, but * no remote memory allocation tracking is being performed * it is entirely possible to allocate an object from one - * CPU cache and return it to another. */ + * CPU cache and return it to another. + */ skm = skc->skc_mag[smp_processor_id()]; ASSERT(skm->skm_magic == SKM_MAGIC); @@ -1495,12 +1511,12 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink, #ifdef HAVE_SPLIT_SHRINKER_CALLBACK uint64_t oldalloc = skc->skc_obj_alloc; spl_kmem_cache_reap_now(skc, - MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1)); + MAX(sc->nr_to_scan>>fls64(skc->skc_slab_objs), 1)); if (oldalloc > skc->skc_obj_alloc) alloc += oldalloc - skc->skc_obj_alloc; #else spl_kmem_cache_reap_now(skc, - MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1)); + MAX(sc->nr_to_scan>>fls64(skc->skc_slab_objs), 1)); alloc += skc->skc_obj_alloc; #endif /* HAVE_SPLIT_SHRINKER_CALLBACK */ } else { @@ -1581,7 +1597,7 @@ spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count) spin_lock(&skc->skc_lock); do_reclaim = (skc->skc_slab_total > 0) && - ((skc->skc_slab_total - skc->skc_slab_alloc) == 0) && + ((skc->skc_slab_total-skc->skc_slab_alloc) == 0) && (skc->skc_obj_alloc < objects); objects = skc->skc_obj_alloc; diff --git a/module/spl/spl-kmem.c b/module/spl/spl-kmem.c index 075bf25..96ad2b0 100644 --- a/module/spl/spl-kmem.c +++ b/module/spl/spl-kmem.c @@ -1,4 +1,4 @@ -/*****************************************************************************\ +/* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -20,9 +20,7 @@ * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . - ***************************************************************************** - * Solaris Porting Layer (SPL) Kmem Implementation. -\*****************************************************************************/ + */ #include #include @@ -31,7 +29,7 @@ int kmem_debugging(void) { - return 0; + return (0); } EXPORT_SYMBOL(kmem_debugging); @@ -47,7 +45,7 @@ kmem_vasprintf(const char *fmt, va_list ap) va_end(aq); } while (ptr == NULL); - return ptr; + return (ptr); } EXPORT_SYMBOL(kmem_vasprintf); @@ -63,7 +61,7 @@ kmem_asprintf(const char *fmt, ...) va_end(ap); } while (ptr == NULL); - return ptr; + return (ptr); } EXPORT_SYMBOL(kmem_asprintf); @@ -78,13 +76,13 @@ __strdup(const char *str, int flags) if (ptr) memcpy(ptr, str, n + 1); - return ptr; + return (ptr); } char * strdup(const char *str) { - return __strdup(str, KM_SLEEP); + return (__strdup(str, KM_SLEEP)); } EXPORT_SYMBOL(strdup); @@ -104,18 +102,19 @@ EXPORT_SYMBOL(strfree); #ifdef DEBUG_KMEM /* Shim layer memory accounting */ -# ifdef HAVE_ATOMIC64_T +#ifdef HAVE_ATOMIC64_T atomic64_t kmem_alloc_used = ATOMIC64_INIT(0); unsigned long long kmem_alloc_max = 0; -# else /* HAVE_ATOMIC64_T */ +#else /* HAVE_ATOMIC64_T */ atomic_t kmem_alloc_used = ATOMIC_INIT(0); unsigned long long kmem_alloc_max = 0; -# endif /* HAVE_ATOMIC64_T */ +#endif /* HAVE_ATOMIC64_T */ EXPORT_SYMBOL(kmem_alloc_used); EXPORT_SYMBOL(kmem_alloc_max); -/* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked +/* + * When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked * but also the location of every alloc and free. When the SPL module is * unloaded a list of all leaked addresses and where they were allocated * will be dumped to the console. Enabling this feature has a significant @@ -126,18 +125,18 @@ EXPORT_SYMBOL(kmem_alloc_max); * debugging enabled for anything other than debugging we need to minimize * the contention by moving to a lock per xmem_table entry model. */ -# ifdef DEBUG_KMEM_TRACKING +#ifdef DEBUG_KMEM_TRACKING -# define KMEM_HASH_BITS 10 -# define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS) +#define KMEM_HASH_BITS 10 +#define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS) typedef struct kmem_debug { - struct hlist_node kd_hlist; /* Hash node linkage */ - struct list_head kd_list; /* List of all allocations */ - void *kd_addr; /* Allocation pointer */ - size_t kd_size; /* Allocation size */ - const char *kd_func; /* Allocation function */ - int kd_line; /* Allocation line */ + struct hlist_node kd_hlist; /* Hash node linkage */ + struct list_head kd_list; /* List of all allocations */ + void *kd_addr; /* Allocation pointer */ + size_t kd_size; /* Allocation size */ + const char *kd_func; /* Allocation function */ + int kd_line; /* Allocation line */ } kmem_debug_t; spinlock_t kmem_lock; @@ -149,7 +148,8 @@ EXPORT_SYMBOL(kmem_table); EXPORT_SYMBOL(kmem_list); static kmem_debug_t * -kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, const void *addr) +kmem_del_init(spinlock_t *lock, struct hlist_head *table, + int bits, const void *addr) { struct hlist_head *head; struct hlist_node *node; @@ -165,7 +165,7 @@ kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, const void * hlist_del_init(&p->kd_hlist); list_del_init(&p->kd_list); spin_unlock_irqrestore(lock, flags); - return p; + return (p); } } @@ -183,12 +183,12 @@ kmem_alloc_track(size_t size, int flags, const char *func, int line, unsigned long irq_flags; /* Function may be called with KM_NOSLEEP so failure is possible */ - dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t), + dptr = (kmem_debug_t *) kmalloc_nofail(sizeof (kmem_debug_t), flags & ~__GFP_ZERO); if (unlikely(dptr == NULL)) { printk(KERN_WARNING "debug kmem_alloc(%ld, 0x%x) at %s:%d " - "failed (%lld/%llu)\n", sizeof(kmem_debug_t), flags, + "failed (%lld/%llu)\n", sizeof (kmem_debug_t), flags, func, line, kmem_alloc_used_read(), kmem_alloc_max); } else { /* @@ -280,7 +280,7 @@ kmem_free_track(const void *ptr, size_t size) kmem_alloc_used_sub(size); kfree(dptr->kd_func); - memset((void *)dptr, 0x5a, sizeof(kmem_debug_t)); + memset((void *)dptr, 0x5a, sizeof (kmem_debug_t)); kfree(dptr); memset((void *)ptr, 0x5a, size); @@ -288,7 +288,7 @@ kmem_free_track(const void *ptr, size_t size) } EXPORT_SYMBOL(kmem_free_track); -# else /* DEBUG_KMEM_TRACKING */ +#else /* DEBUG_KMEM_TRACKING */ void * kmem_alloc_debug(size_t size, int flags, const char *func, int line, @@ -342,7 +342,7 @@ kmem_free_debug(const void *ptr, size_t size) } EXPORT_SYMBOL(kmem_free_debug); -# endif /* DEBUG_KMEM_TRACKING */ +#endif /* DEBUG_KMEM_TRACKING */ #endif /* DEBUG_KMEM */ #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING) @@ -355,15 +355,19 @@ spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min) ASSERT(str != NULL && len >= 17); memset(str, 0, len); - /* Check for a fully printable string, and while we are at - * it place the printable characters in the passed buffer. */ + /* + * Check for a fully printable string, and while we are at + * it place the printable characters in the passed buffer. + */ for (i = 0; i < size; i++) { str[i] = ((char *)(kd->kd_addr))[i]; if (isprint(str[i])) { continue; } else { - /* Minimum number of printable characters found - * to make it worthwhile to print this as ascii. */ + /* + * Minimum number of printable characters found + * to make it worthwhile to print this as ascii. + */ if (i > min) break; @@ -374,17 +378,17 @@ spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min) if (!flag) { sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x", - *((uint8_t *)kd->kd_addr), - *((uint8_t *)kd->kd_addr + 2), - *((uint8_t *)kd->kd_addr + 4), - *((uint8_t *)kd->kd_addr + 6), - *((uint8_t *)kd->kd_addr + 8), - *((uint8_t *)kd->kd_addr + 10), - *((uint8_t *)kd->kd_addr + 12), - *((uint8_t *)kd->kd_addr + 14)); + *((uint8_t *)kd->kd_addr), + *((uint8_t *)kd->kd_addr + 2), + *((uint8_t *)kd->kd_addr + 4), + *((uint8_t *)kd->kd_addr + 6), + *((uint8_t *)kd->kd_addr + 8), + *((uint8_t *)kd->kd_addr + 10), + *((uint8_t *)kd->kd_addr + 12), + *((uint8_t *)kd->kd_addr + 14)); } - return str; + return (str); } static int @@ -411,18 +415,18 @@ spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock) spin_lock_irqsave(lock, flags); if (!list_empty(list)) printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address", - "size", "data", "func", "line"); + "size", "data", "func", "line"); list_for_each_entry(kd, list, kd_list) printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr, - (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8), - kd->kd_func, kd->kd_line); + (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8), + kd->kd_func, kd->kd_line); spin_unlock_irqrestore(lock, flags); } #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */ -#define spl_kmem_init_tracking(list, lock, size) -#define spl_kmem_fini_tracking(list, lock) +#define spl_kmem_init_tracking(list, lock, size) +#define spl_kmem_fini_tracking(list, lock) #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */ int @@ -442,10 +446,12 @@ void spl_kmem_fini(void) { #ifdef DEBUG_KMEM - /* Display all unreclaimed memory addresses, including the + /* + * Display all unreclaimed memory addresses, including the * allocation size and the first few bytes of what's located * at that address to aid in debugging. Performance is not - * a serious concern here since it is module unload time. */ + * a serious concern here since it is module unload time. + */ if (kmem_alloc_used_read() != 0) printk(KERN_WARNING "kmem leaked %ld/%llu bytes\n", kmem_alloc_used_read(), kmem_alloc_max); diff --git a/module/spl/spl-vmem.c b/module/spl/spl-vmem.c index 4c140eb..51aef94 100644 --- a/module/spl/spl-vmem.c +++ b/module/spl/spl-vmem.c @@ -1,4 +1,4 @@ -/*****************************************************************************\ +/* * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. * Copyright (C) 2007 The Regents of the University of California. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). @@ -20,9 +20,7 @@ * * You should have received a copy of the GNU General Public License along * with the SPL. If not, see . - ***************************************************************************** - * Solaris Porting Layer (SPL) Kmem Implementation. -\*****************************************************************************/ + */ #include #include @@ -57,18 +55,19 @@ EXPORT_SYMBOL(vmem_size); #ifdef DEBUG_KMEM /* Shim layer memory accounting */ -# ifdef HAVE_ATOMIC64_T +#ifdef HAVE_ATOMIC64_T atomic64_t vmem_alloc_used = ATOMIC64_INIT(0); unsigned long long vmem_alloc_max = 0; -# else /* HAVE_ATOMIC64_T */ +#else /* HAVE_ATOMIC64_T */ atomic_t vmem_alloc_used = ATOMIC_INIT(0); unsigned long long vmem_alloc_max = 0; -# endif /* HAVE_ATOMIC64_T */ +#endif /* HAVE_ATOMIC64_T */ EXPORT_SYMBOL(vmem_alloc_used); EXPORT_SYMBOL(vmem_alloc_max); -/* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked +/* + * When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked * but also the location of every alloc and free. When the SPL module is * unloaded a list of all leaked addresses and where they were allocated * will be dumped to the console. Enabling this feature has a significant @@ -79,18 +78,18 @@ EXPORT_SYMBOL(vmem_alloc_max); * debugging enabled for anything other than debugging we need to minimize * the contention by moving to a lock per xmem_table entry model. */ -# ifdef DEBUG_KMEM_TRACKING +#ifdef DEBUG_KMEM_TRACKING -# define VMEM_HASH_BITS 10 -# define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS) +#define VMEM_HASH_BITS 10 +#define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS) typedef struct kmem_debug { - struct hlist_node kd_hlist; /* Hash node linkage */ - struct list_head kd_list; /* List of all allocations */ - void *kd_addr; /* Allocation pointer */ - size_t kd_size; /* Allocation size */ - const char *kd_func; /* Allocation function */ - int kd_line; /* Allocation line */ + struct hlist_node kd_hlist; /* Hash node linkage */ + struct list_head kd_list; /* List of all allocations */ + void *kd_addr; /* Allocation pointer */ + size_t kd_size; /* Allocation size */ + const char *kd_func; /* Allocation function */ + int kd_line; /* Allocation line */ } kmem_debug_t; spinlock_t vmem_lock; @@ -111,12 +110,12 @@ vmem_alloc_track(size_t size, int flags, const char *func, int line) ASSERT(flags & KM_SLEEP); /* Function may be called with KM_NOSLEEP so failure is possible */ - dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t), + dptr = (kmem_debug_t *) kmalloc_nofail(sizeof (kmem_debug_t), flags & ~__GFP_ZERO); if (unlikely(dptr == NULL)) { printk(KERN_WARNING "debug vmem_alloc(%ld, 0x%x) " "at %s:%d failed (%lld/%llu)\n", - sizeof(kmem_debug_t), flags, func, line, + sizeof (kmem_debug_t), flags, func, line, vmem_alloc_used_read(), vmem_alloc_max); } else { /* @@ -194,7 +193,7 @@ vmem_free_track(const void *ptr, size_t size) vmem_alloc_used_sub(size); kfree(dptr->kd_func); - memset((void *)dptr, 0x5a, sizeof(kmem_debug_t)); + memset((void *)dptr, 0x5a, sizeof (kmem_debug_t)); kfree(dptr); memset((void *)ptr, 0x5a, size); @@ -202,7 +201,7 @@ vmem_free_track(const void *ptr, size_t size) } EXPORT_SYMBOL(vmem_free_track); -# else /* DEBUG_KMEM_TRACKING */ +#else /* DEBUG_KMEM_TRACKING */ void * vmem_alloc_debug(size_t size, int flags, const char *func, int line) @@ -242,7 +241,7 @@ vmem_free_debug(const void *ptr, size_t size) } EXPORT_SYMBOL(vmem_free_debug); -# endif /* DEBUG_KMEM_TRACKING */ +#endif /* DEBUG_KMEM_TRACKING */ #endif /* DEBUG_KMEM */ #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING) @@ -255,15 +254,19 @@ spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min) ASSERT(str != NULL && len >= 17); memset(str, 0, len); - /* Check for a fully printable string, and while we are at - * it place the printable characters in the passed buffer. */ + /* + * Check for a fully printable string, and while we are at + * it place the printable characters in the passed buffer. + */ for (i = 0; i < size; i++) { str[i] = ((char *)(kd->kd_addr))[i]; if (isprint(str[i])) { continue; } else { - /* Minimum number of printable characters found - * to make it worthwhile to print this as ascii. */ + /* + * Minimum number of printable characters found + * to make it worthwhile to print this as ascii. + */ if (i > min) break; @@ -274,17 +277,17 @@ spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min) if (!flag) { sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x", - *((uint8_t *)kd->kd_addr), - *((uint8_t *)kd->kd_addr + 2), - *((uint8_t *)kd->kd_addr + 4), - *((uint8_t *)kd->kd_addr + 6), - *((uint8_t *)kd->kd_addr + 8), - *((uint8_t *)kd->kd_addr + 10), - *((uint8_t *)kd->kd_addr + 12), - *((uint8_t *)kd->kd_addr + 14)); + *((uint8_t *)kd->kd_addr), + *((uint8_t *)kd->kd_addr + 2), + *((uint8_t *)kd->kd_addr + 4), + *((uint8_t *)kd->kd_addr + 6), + *((uint8_t *)kd->kd_addr + 8), + *((uint8_t *)kd->kd_addr + 10), + *((uint8_t *)kd->kd_addr + 12), + *((uint8_t *)kd->kd_addr + 14)); } - return str; + return (str); } static int @@ -311,18 +314,18 @@ spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock) spin_lock_irqsave(lock, flags); if (!list_empty(list)) printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address", - "size", "data", "func", "line"); + "size", "data", "func", "line"); list_for_each_entry(kd, list, kd_list) printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr, - (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8), - kd->kd_func, kd->kd_line); + (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8), + kd->kd_func, kd->kd_line); spin_unlock_irqrestore(lock, flags); } #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */ -#define spl_kmem_init_tracking(list, lock, size) -#define spl_kmem_fini_tracking(list, lock) +#define spl_kmem_init_tracking(list, lock, size) +#define spl_kmem_fini_tracking(list, lock) #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */ int @@ -342,10 +345,12 @@ void spl_vmem_fini(void) { #ifdef DEBUG_KMEM - /* Display all unreclaimed memory addresses, including the + /* + * Display all unreclaimed memory addresses, including the * allocation size and the first few bytes of what's located * at that address to aid in debugging. Performance is not - * a serious concern here since it is module unload time. */ + * a serious concern here since it is module unload time. + */ if (vmem_alloc_used_read() != 0) printk(KERN_WARNING "vmem leaked %ld/%llu bytes\n", vmem_alloc_used_read(), vmem_alloc_max); -- 2.39.2