]> git.proxmox.com Git - mirror_edk2.git/blob - StdLib/Include/sys/pool.h
43c080a2272fa359e14f09f231eb8834eaed7449
[mirror_edk2.git] / StdLib / Include / sys / pool.h
1 /* $NetBSD: pool.h,v 1.54 2006/08/20 09:35:25 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9 * Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #ifndef _SYS_POOL_H_
41 #define _SYS_POOL_H_
42
43 #ifdef _KERNEL
44 #define __POOL_EXPOSE
45 #endif
46
47 #if defined(_KERNEL_OPT)
48 #include "opt_pool.h"
49 #endif
50
51 #ifdef __POOL_EXPOSE
52 #include <sys/lock.h>
53 #include <sys/queue.h>
54 #include <sys/time.h>
55 #include <sys/tree.h>
56 #if defined(_KERNEL)
57 #include <sys/callback.h>
58 #endif /* defined(_KERNEL) */
59 #endif
60
61 #define PCG_NOBJECTS 16
62
63 #define POOL_PADDR_INVALID ((paddr_t) -1)
64
65 #ifdef __POOL_EXPOSE
66 /* The pool cache group. */
67 struct pool_cache_group {
68 LIST_ENTRY(pool_cache_group)
69 pcg_list; /* link in the pool cache's group list */
70 u_int pcg_avail; /* # available objects */
71 /* pointers to the objects */
72 struct {
73 void *pcgo_va; /* cache object virtual address */
74 paddr_t pcgo_pa;/* cache object physical address */
75 } pcg_objects[PCG_NOBJECTS];
76 };
77
78 LIST_HEAD(pool_cache_grouplist,pool_cache_group);
79 struct pool_cache {
80 LIST_ENTRY(pool_cache)
81 pc_poollist; /* entry on pool's group list */
82 struct pool_cache_grouplist
83 pc_emptygroups; /* list of empty cache groups */
84 struct pool_cache_grouplist
85 pc_fullgroups; /* list of full cache groups */
86 struct pool_cache_grouplist
87 pc_partgroups; /* list of partial cache groups */
88 struct pool *pc_pool; /* parent pool */
89 struct simplelock pc_slock; /* mutex */
90
91 int (*pc_ctor)(void *, void *, int);
92 void (*pc_dtor)(void *, void *);
93 void *pc_arg;
94
95 /* Statistics. */
96 unsigned long pc_hits; /* cache hits */
97 unsigned long pc_misses; /* cache misses */
98
99 unsigned long pc_ngroups; /* # cache groups */
100
101 unsigned long pc_nitems; /* # objects currently in cache */
102 };
103
104 struct pool_allocator {
105 void *(*pa_alloc)(struct pool *, int);
106 void (*pa_free)(struct pool *, void *);
107 unsigned int pa_pagesz;
108
109 /* The following fields are for internal use only. */
110 struct simplelock pa_slock;
111 TAILQ_HEAD(, pool) pa_list; /* list of pools using this allocator */
112 int pa_flags;
113 #define PA_INITIALIZED 0x01
114 int pa_pagemask;
115 int pa_pageshift;
116 struct vm_map *pa_backingmap;
117 #if defined(_KERNEL)
118 struct vm_map **pa_backingmapptr;
119 SLIST_ENTRY(pool_allocator) pa_q;
120 #endif /* defined(_KERNEL) */
121 };
122
123 LIST_HEAD(pool_pagelist,pool_item_header);
124
125 struct pool {
126 LIST_ENTRY(pool)
127 pr_poollist;
128 struct pool_pagelist
129 pr_emptypages; /* Empty pages */
130 struct pool_pagelist
131 pr_fullpages; /* Full pages */
132 struct pool_pagelist
133 pr_partpages; /* Partially-allocated pages */
134 struct pool_item_header *pr_curpage;
135 struct pool *pr_phpool; /* Pool item header pool */
136 LIST_HEAD(,pool_cache)
137 pr_cachelist; /* Caches for this pool */
138 unsigned int pr_size; /* Size of item */
139 unsigned int pr_align; /* Requested alignment, must be 2^n */
140 unsigned int pr_itemoffset; /* Align this offset in item */
141 unsigned int pr_minitems; /* minimum # of items to keep */
142 unsigned int pr_minpages; /* same in page units */
143 unsigned int pr_maxpages; /* maximum # of pages to keep */
144 unsigned int pr_npages; /* # of pages allocated */
145 unsigned int pr_itemsperpage;/* # items that fit in a page */
146 unsigned int pr_slack; /* unused space in a page */
147 unsigned int pr_nitems; /* number of available items in pool */
148 unsigned int pr_nout; /* # items currently allocated */
149 unsigned int pr_hardlimit; /* hard limit to number of allocated
150 items */
151 struct pool_allocator *pr_alloc;/* back-end allocator */
152 TAILQ_ENTRY(pool) pr_alloc_list;/* link on allocator's pool list */
153
154 /* Drain hook. */
155 void (*pr_drain_hook)(void *, int);
156 void *pr_drain_hook_arg;
157
158 const char *pr_wchan; /* tsleep(9) identifier */
159 unsigned int pr_flags; /* r/w flags */
160 unsigned int pr_roflags; /* r/o flags */
161 #define PR_NOWAIT 0x00 /* for symmetry */
162 #define PR_WAITOK 0x02
163 #define PR_WANTED 0x04
164 #define PR_PHINPAGE 0x40
165 #define PR_LOGGING 0x80
166 #define PR_LIMITFAIL 0x100 /* even if waiting, fail if we hit limit */
167 #define PR_RECURSIVE 0x200 /* pool contains pools, for vmstat(8) */
168 #define PR_NOTOUCH 0x400 /* don't use free items to keep internal state*/
169 #define PR_NOALIGN 0x800 /* don't assume backend alignment */
170
171 /*
172 * `pr_slock' protects the pool's data structures when removing
173 * items from or returning items to the pool, or when reading
174 * or updating read/write fields in the pool descriptor.
175 *
176 * We assume back-end page allocators provide their own locking
177 * scheme. They will be called with the pool descriptor _unlocked_,
178 * since the page allocators may block.
179 */
180 struct simplelock pr_slock;
181
182 SPLAY_HEAD(phtree, pool_item_header) pr_phtree;
183
184 int pr_maxcolor; /* Cache colouring */
185 int pr_curcolor;
186 int pr_phoffset; /* Offset in page of page header */
187
188 /*
189 * Warning message to be issued, and a per-time-delta rate cap,
190 * if the hard limit is reached.
191 */
192 const char *pr_hardlimit_warning;
193 struct timeval pr_hardlimit_ratecap;
194 struct timeval pr_hardlimit_warning_last;
195
196 /*
197 * Instrumentation
198 */
199 unsigned long pr_nget; /* # of successful requests */
200 unsigned long pr_nfail; /* # of unsuccessful requests */
201 unsigned long pr_nput; /* # of releases */
202 unsigned long pr_npagealloc; /* # of pages allocated */
203 unsigned long pr_npagefree; /* # of pages released */
204 unsigned int pr_hiwat; /* max # of pages in pool */
205 unsigned long pr_nidle; /* # of idle pages */
206
207 /*
208 * Diagnostic aides.
209 */
210 struct pool_log *pr_log;
211 int pr_curlogentry;
212 int pr_logsize;
213
214 const char *pr_entered_file; /* reentrancy check */
215 long pr_entered_line;
216
217 #if defined(_KERNEL)
218 struct callback_entry pr_reclaimerentry;
219 #endif
220 };
221 #endif /* __POOL_EXPOSE */
222
223 #ifdef _KERNEL
224 /*
225 * pool_allocator_kmem is the default that all pools get unless
226 * otherwise specified. pool_allocator_nointr is provided for
227 * pools that know they will never be accessed in interrupt
228 * context.
229 */
230 extern struct pool_allocator pool_allocator_kmem;
231 extern struct pool_allocator pool_allocator_nointr;
232 #ifdef POOL_SUBPAGE
233 /* The above are subpage allocators in this case. */
234 extern struct pool_allocator pool_allocator_kmem_fullpage;
235 extern struct pool_allocator pool_allocator_nointr_fullpage;
236 #endif
237
238 struct link_pool_init { /* same as args to pool_init() */
239 struct pool *pp;
240 size_t size;
241 u_int align;
242 u_int align_offset;
243 int flags;
244 const char *wchan;
245 struct pool_allocator *palloc;
246 };
247 #define POOL_INIT(pp, size, align, align_offset, flags, wchan, palloc) \
248 struct pool pp; \
249 static const struct link_pool_init _link_ ## pp[1] = { \
250 { &pp, size, align, align_offset, flags, wchan, palloc } \
251 }; \
252 __link_set_add_rodata(pools, _link_ ## pp)
253
254 void pool_subsystem_init(void);
255
256 void pool_init(struct pool *, size_t, u_int, u_int,
257 int, const char *, struct pool_allocator *);
258 void pool_destroy(struct pool *);
259
260 void pool_set_drain_hook(struct pool *,
261 void (*)(void *, int), void *);
262
263 void *pool_get(struct pool *, int);
264 void pool_put(struct pool *, void *);
265 int pool_reclaim(struct pool *);
266
267 #ifdef POOL_DIAGNOSTIC
268 /*
269 * These versions do reentrancy checking.
270 */
271 void *_pool_get(struct pool *, int, const char *, long);
272 void _pool_put(struct pool *, void *, const char *, long);
273 int _pool_reclaim(struct pool *, const char *, long);
274 #define pool_get(h, f) _pool_get((h), (f), __FILE__, __LINE__)
275 #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
276 #define pool_reclaim(h) _pool_reclaim((h), __FILE__, __LINE__)
277 #endif /* POOL_DIAGNOSTIC */
278
279 int pool_prime(struct pool *, int);
280 void pool_setlowat(struct pool *, int);
281 void pool_sethiwat(struct pool *, int);
282 void pool_sethardlimit(struct pool *, int, const char *, int);
283 void pool_drain(void *);
284
285 /*
286 * Debugging and diagnostic aides.
287 */
288 void pool_print(struct pool *, const char *);
289 void pool_printit(struct pool *, const char *,
290 void (*)(const char *, ...));
291 void pool_printall(const char *, void (*)(const char *, ...));
292 int pool_chk(struct pool *, const char *);
293
294 /*
295 * Pool cache routines.
296 */
297 void pool_cache_init(struct pool_cache *, struct pool *,
298 int (*)(void *, void *, int),
299 void (*)(void *, void *),
300 void *);
301 void pool_cache_destroy(struct pool_cache *);
302 void *pool_cache_get_paddr(struct pool_cache *, int, paddr_t *);
303 #define pool_cache_get(pc, f) pool_cache_get_paddr((pc), (f), NULL)
304 void pool_cache_put_paddr(struct pool_cache *, void *, paddr_t);
305 #define pool_cache_put(pc, o) pool_cache_put_paddr((pc), (o), \
306 POOL_PADDR_INVALID)
307 void pool_cache_destruct_object(struct pool_cache *, void *);
308 void pool_cache_invalidate(struct pool_cache *);
309 #endif /* _KERNEL */
310
311 #endif /* _SYS_POOL_H_ */