2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
25 #ifndef _SPL_KMEM_CACHE_H
26 #define _SPL_KMEM_CACHE_H
28 #include <sys/taskq.h>
31 * Slab allocation interfaces. The SPL slab differs from the standard
32 * Linux SLAB or SLUB primarily in that each cache may be backed by slabs
33 * allocated from the physical or virtal memory address space. The virtual
34 * slabs allow for good behavior when allocation large objects of identical
35 * size. This slab implementation also supports both constructors and
36 * destructors which the Linux slab does not.
39 KMC_BIT_NOTOUCH
= 0, /* Don't update ages */
40 KMC_BIT_NODEBUG
= 1, /* Default behavior */
41 KMC_BIT_NOMAGAZINE
= 2, /* XXX: Unsupported */
42 KMC_BIT_NOHASH
= 3, /* XXX: Unsupported */
43 KMC_BIT_QCACHE
= 4, /* XXX: Unsupported */
44 KMC_BIT_KMEM
= 5, /* Use kmem cache */
45 KMC_BIT_VMEM
= 6, /* Use vmem cache */
46 KMC_BIT_SLAB
= 7, /* Use Linux slab cache */
47 KMC_BIT_OFFSLAB
= 8, /* Objects not on slab */
48 KMC_BIT_NOEMERGENCY
= 9, /* Disable emergency objects */
49 KMC_BIT_DEADLOCKED
= 14, /* Deadlock detected */
50 KMC_BIT_GROWING
= 15, /* Growing in progress */
51 KMC_BIT_REAPING
= 16, /* Reaping in progress */
52 KMC_BIT_DESTROY
= 17, /* Destroy in progress */
53 KMC_BIT_TOTAL
= 18, /* Proc handler helper bit */
54 KMC_BIT_ALLOC
= 19, /* Proc handler helper bit */
55 KMC_BIT_MAX
= 20, /* Proc handler helper bit */
58 /* kmem move callback return values */
59 typedef enum kmem_cbrc
{
60 KMEM_CBRC_YES
= 0, /* Object moved */
61 KMEM_CBRC_NO
= 1, /* Object not moved */
62 KMEM_CBRC_LATER
= 2, /* Object not moved, try again later */
63 KMEM_CBRC_DONT_NEED
= 3, /* Neither object is needed */
64 KMEM_CBRC_DONT_KNOW
= 4, /* Object unknown */
67 #define KMC_NOTOUCH (1 << KMC_BIT_NOTOUCH)
68 #define KMC_NODEBUG (1 << KMC_BIT_NODEBUG)
69 #define KMC_NOMAGAZINE (1 << KMC_BIT_NOMAGAZINE)
70 #define KMC_NOHASH (1 << KMC_BIT_NOHASH)
71 #define KMC_QCACHE (1 << KMC_BIT_QCACHE)
72 #define KMC_KMEM (1 << KMC_BIT_KMEM)
73 #define KMC_VMEM (1 << KMC_BIT_VMEM)
74 #define KMC_SLAB (1 << KMC_BIT_SLAB)
75 #define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB)
76 #define KMC_NOEMERGENCY (1 << KMC_BIT_NOEMERGENCY)
77 #define KMC_DEADLOCKED (1 << KMC_BIT_DEADLOCKED)
78 #define KMC_GROWING (1 << KMC_BIT_GROWING)
79 #define KMC_REAPING (1 << KMC_BIT_REAPING)
80 #define KMC_DESTROY (1 << KMC_BIT_DESTROY)
81 #define KMC_TOTAL (1 << KMC_BIT_TOTAL)
82 #define KMC_ALLOC (1 << KMC_BIT_ALLOC)
83 #define KMC_MAX (1 << KMC_BIT_MAX)
85 #define KMC_REAP_CHUNK INT_MAX
86 #define KMC_DEFAULT_SEEKS 1
88 #define KMC_EXPIRE_AGE 0x1 /* Due to age */
89 #define KMC_EXPIRE_MEM 0x2 /* Due to low memory */
91 #define KMC_RECLAIM_ONCE 0x1 /* Force a single shrinker pass */
93 extern unsigned int spl_kmem_cache_expire
;
94 extern struct list_head spl_kmem_cache_list
;
95 extern struct rw_semaphore spl_kmem_cache_sem
;
97 #define SKM_MAGIC 0x2e2e2e2e
98 #define SKO_MAGIC 0x20202020
99 #define SKS_MAGIC 0x22222222
100 #define SKC_MAGIC 0x2c2c2c2c
102 #define SPL_KMEM_CACHE_DELAY 15 /* Minimum slab release age */
103 #define SPL_KMEM_CACHE_REAP 0 /* Default reap everything */
104 #define SPL_KMEM_CACHE_OBJ_PER_SLAB 8 /* Target objects per slab */
105 #define SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN 1 /* Minimum objects per slab */
106 #define SPL_KMEM_CACHE_ALIGN 8 /* Default object alignment */
108 #define SPL_KMEM_CACHE_MAX_SIZE 32 /* Max slab size in MB */
110 #define SPL_KMEM_CACHE_MAX_SIZE 4 /* Max slab size in MB */
113 #define SPL_MAX_ORDER (MAX_ORDER - 3)
114 #define SPL_MAX_ORDER_NR_PAGES (1 << (SPL_MAX_ORDER - 1))
117 #define SPL_MAX_KMEM_CACHE_ORDER PAGE_ALLOC_COSTLY_ORDER
118 #define SPL_MAX_KMEM_ORDER_NR_PAGES (1 << (SPL_MAX_KMEM_CACHE_ORDER - 1))
120 #define SPL_MAX_KMEM_ORDER_NR_PAGES (KMALLOC_MAX_SIZE >> PAGE_SHIFT)
123 #define POINTER_IS_VALID(p) 0 /* Unimplemented */
124 #define POINTER_INVALIDATE(pp) /* Unimplemented */
126 typedef int (*spl_kmem_ctor_t
)(void *, void *, int);
127 typedef void (*spl_kmem_dtor_t
)(void *, void *);
128 typedef void (*spl_kmem_reclaim_t
)(void *);
130 typedef struct spl_kmem_magazine
{
131 uint32_t skm_magic
; /* Sanity magic */
132 uint32_t skm_avail
; /* Available objects */
133 uint32_t skm_size
; /* Magazine size */
134 uint32_t skm_refill
; /* Batch refill size */
135 struct spl_kmem_cache
*skm_cache
; /* Owned by cache */
136 unsigned long skm_age
; /* Last cache access */
137 unsigned int skm_cpu
; /* Owned by cpu */
138 void *skm_objs
[0]; /* Object pointers */
139 } spl_kmem_magazine_t
;
141 typedef struct spl_kmem_obj
{
142 uint32_t sko_magic
; /* Sanity magic */
143 void *sko_addr
; /* Buffer address */
144 struct spl_kmem_slab
*sko_slab
; /* Owned by slab */
145 struct list_head sko_list
; /* Free object list linkage */
148 typedef struct spl_kmem_slab
{
149 uint32_t sks_magic
; /* Sanity magic */
150 uint32_t sks_objs
; /* Objects per slab */
151 struct spl_kmem_cache
*sks_cache
; /* Owned by cache */
152 struct list_head sks_list
; /* Slab list linkage */
153 struct list_head sks_free_list
; /* Free object list */
154 unsigned long sks_age
; /* Last modify jiffie */
155 uint32_t sks_ref
; /* Ref count used objects */
158 typedef struct spl_kmem_alloc
{
159 struct spl_kmem_cache
*ska_cache
; /* Owned by cache */
160 int ska_flags
; /* Allocation flags */
161 taskq_ent_t ska_tqe
; /* Task queue entry */
164 typedef struct spl_kmem_emergency
{
165 struct rb_node ske_node
; /* Emergency tree linkage */
166 unsigned long ske_obj
; /* Buffer address */
167 } spl_kmem_emergency_t
;
169 typedef struct spl_kmem_cache
{
170 uint32_t skc_magic
; /* Sanity magic */
171 uint32_t skc_name_size
; /* Name length */
172 char *skc_name
; /* Name string */
173 spl_kmem_magazine_t
*skc_mag
[NR_CPUS
]; /* Per-CPU warm cache */
174 uint32_t skc_mag_size
; /* Magazine size */
175 uint32_t skc_mag_refill
; /* Magazine refill count */
176 spl_kmem_ctor_t skc_ctor
; /* Constructor */
177 spl_kmem_dtor_t skc_dtor
; /* Destructor */
178 spl_kmem_reclaim_t skc_reclaim
; /* Reclaimator */
179 void *skc_private
; /* Private data */
180 void *skc_vmp
; /* Unused */
181 struct kmem_cache
*skc_linux_cache
; /* Linux slab cache if used */
182 unsigned long skc_flags
; /* Flags */
183 uint32_t skc_obj_size
; /* Object size */
184 uint32_t skc_obj_align
; /* Object alignment */
185 uint32_t skc_slab_objs
; /* Objects per slab */
186 uint32_t skc_slab_size
; /* Slab size */
187 uint32_t skc_delay
; /* Slab reclaim interval */
188 uint32_t skc_reap
; /* Slab reclaim count */
189 atomic_t skc_ref
; /* Ref count callers */
190 taskqid_t skc_taskqid
; /* Slab reclaim task */
191 struct list_head skc_list
; /* List of caches linkage */
192 struct list_head skc_complete_list
; /* Completely alloc'ed */
193 struct list_head skc_partial_list
; /* Partially alloc'ed */
194 struct rb_root skc_emergency_tree
; /* Min sized objects */
195 spinlock_t skc_lock
; /* Cache lock */
196 wait_queue_head_t skc_waitq
; /* Allocation waiters */
197 uint64_t skc_slab_fail
; /* Slab alloc failures */
198 uint64_t skc_slab_create
; /* Slab creates */
199 uint64_t skc_slab_destroy
; /* Slab destroys */
200 uint64_t skc_slab_total
; /* Slab total current */
201 uint64_t skc_slab_alloc
; /* Slab alloc current */
202 uint64_t skc_slab_max
; /* Slab max historic */
203 uint64_t skc_obj_total
; /* Obj total current */
204 uint64_t skc_obj_alloc
; /* Obj alloc current */
205 uint64_t skc_obj_max
; /* Obj max historic */
206 uint64_t skc_obj_deadlock
; /* Obj emergency deadlocks */
207 uint64_t skc_obj_emergency
; /* Obj emergency current */
208 uint64_t skc_obj_emergency_max
; /* Obj emergency max */
210 #define kmem_cache_t spl_kmem_cache_t
212 extern spl_kmem_cache_t
*spl_kmem_cache_create(char *name
, size_t size
,
213 size_t align
, spl_kmem_ctor_t ctor
, spl_kmem_dtor_t dtor
,
214 spl_kmem_reclaim_t reclaim
, void *priv
, void *vmp
, int flags
);
215 extern void spl_kmem_cache_set_move(spl_kmem_cache_t
*,
216 kmem_cbrc_t (*)(void *, void *, size_t, void *));
217 extern void spl_kmem_cache_destroy(spl_kmem_cache_t
*skc
);
218 extern void *spl_kmem_cache_alloc(spl_kmem_cache_t
*skc
, int flags
);
219 extern void spl_kmem_cache_free(spl_kmem_cache_t
*skc
, void *obj
);
220 extern void spl_kmem_cache_set_allocflags(spl_kmem_cache_t
*skc
, gfp_t flags
);
221 extern void spl_kmem_cache_reap_now(spl_kmem_cache_t
*skc
, int count
);
222 extern void spl_kmem_reap(void);
224 #define kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl) \
225 spl_kmem_cache_create(name, size, align, ctor, dtor, rclm, priv, vmp, fl)
226 #define kmem_cache_set_move(skc, move) spl_kmem_cache_set_move(skc, move)
227 #define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
228 #define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
229 #define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
230 #define kmem_cache_reap_now(skc) \
231 spl_kmem_cache_reap_now(skc, skc->skc_reap)
232 #define kmem_reap() spl_kmem_reap()
235 * The following functions are only available for internal use.
237 extern int spl_kmem_cache_init(void);
238 extern void spl_kmem_cache_fini(void);
240 #endif /* _SPL_KMEM_CACHE_H */