]> git.proxmox.com Git - mirror_edk2.git/blame - StdLib/Include/sys/pool.h
Vlv2TbltDevicePkg/PlatformFlashAccessLib: Fix IA32 build issues
[mirror_edk2.git] / StdLib / Include / sys / pool.h
CommitLineData
2aa62f2b 1/* $NetBSD: pool.h,v 1.54 2006/08/20 09:35:25 yamt Exp $ */\r
2\r
3/*-\r
4 * Copyright (c) 1997, 1998, 1999, 2000 The NetBSD Foundation, Inc.\r
5 * All rights reserved.\r
6 *\r
7 * This code is derived from software contributed to The NetBSD Foundation\r
8 * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace\r
9 * Simulation Facility, NASA Ames Research Center.\r
10 *\r
11 * Redistribution and use in source and binary forms, with or without\r
12 * modification, are permitted provided that the following conditions\r
13 * are met:\r
14 * 1. Redistributions of source code must retain the above copyright\r
15 * notice, this list of conditions and the following disclaimer.\r
16 * 2. Redistributions in binary form must reproduce the above copyright\r
17 * notice, this list of conditions and the following disclaimer in the\r
18 * documentation and/or other materials provided with the distribution.\r
19 * 3. All advertising materials mentioning features or use of this software\r
20 * must display the following acknowledgement:\r
21 * This product includes software developed by the NetBSD\r
22 * Foundation, Inc. and its contributors.\r
23 * 4. Neither the name of The NetBSD Foundation nor the names of its\r
24 * contributors may be used to endorse or promote products derived\r
25 * from this software without specific prior written permission.\r
26 *\r
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS\r
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\r
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\r
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS\r
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\r
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\r
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\r
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\r
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\r
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\r
37 * POSSIBILITY OF SUCH DAMAGE.\r
38 */\r
39\r
40#ifndef _SYS_POOL_H_\r
41#define _SYS_POOL_H_\r
42\r
43#ifdef _KERNEL\r
44#define __POOL_EXPOSE\r
45#endif\r
46\r
47#if defined(_KERNEL_OPT)\r
48#include "opt_pool.h"\r
49#endif\r
50\r
51#ifdef __POOL_EXPOSE\r
52#include <sys/lock.h>\r
53#include <sys/queue.h>\r
54#include <sys/time.h>\r
55#include <sys/tree.h>\r
56#if defined(_KERNEL)\r
57#include <sys/callback.h>\r
58#endif /* defined(_KERNEL) */\r
59#endif\r
60\r
61#define PCG_NOBJECTS 16\r
62\r
63#define POOL_PADDR_INVALID ((paddr_t) -1)\r
64\r
65#ifdef __POOL_EXPOSE\r
66/* The pool cache group. */\r
67struct pool_cache_group {\r
68 LIST_ENTRY(pool_cache_group)\r
69 pcg_list; /* link in the pool cache's group list */\r
70 u_int pcg_avail; /* # available objects */\r
71 /* pointers to the objects */\r
72 struct {\r
73 void *pcgo_va; /* cache object virtual address */\r
74 paddr_t pcgo_pa;/* cache object physical address */\r
75 } pcg_objects[PCG_NOBJECTS];\r
76};\r
77\r
78LIST_HEAD(pool_cache_grouplist,pool_cache_group);\r
79struct pool_cache {\r
80 LIST_ENTRY(pool_cache)\r
81 pc_poollist; /* entry on pool's group list */\r
82 struct pool_cache_grouplist\r
83 pc_emptygroups; /* list of empty cache groups */\r
84 struct pool_cache_grouplist\r
85 pc_fullgroups; /* list of full cache groups */\r
86 struct pool_cache_grouplist\r
87 pc_partgroups; /* list of partial cache groups */\r
88 struct pool *pc_pool; /* parent pool */\r
89 struct simplelock pc_slock; /* mutex */\r
90\r
91 int (*pc_ctor)(void *, void *, int);\r
92 void (*pc_dtor)(void *, void *);\r
93 void *pc_arg;\r
94\r
95 /* Statistics. */\r
96 unsigned long pc_hits; /* cache hits */\r
97 unsigned long pc_misses; /* cache misses */\r
98\r
99 unsigned long pc_ngroups; /* # cache groups */\r
100\r
101 unsigned long pc_nitems; /* # objects currently in cache */\r
102};\r
103\r
104struct pool_allocator {\r
105 void *(*pa_alloc)(struct pool *, int);\r
106 void (*pa_free)(struct pool *, void *);\r
107 unsigned int pa_pagesz;\r
108\r
109 /* The following fields are for internal use only. */\r
110 struct simplelock pa_slock;\r
111 TAILQ_HEAD(, pool) pa_list; /* list of pools using this allocator */\r
112 int pa_flags;\r
113#define PA_INITIALIZED 0x01\r
114 int pa_pagemask;\r
115 int pa_pageshift;\r
116 struct vm_map *pa_backingmap;\r
117#if defined(_KERNEL)\r
118 struct vm_map **pa_backingmapptr;\r
119 SLIST_ENTRY(pool_allocator) pa_q;\r
120#endif /* defined(_KERNEL) */\r
121};\r
122\r
123LIST_HEAD(pool_pagelist,pool_item_header);\r
124\r
125struct pool {\r
126 LIST_ENTRY(pool)\r
127 pr_poollist;\r
128 struct pool_pagelist\r
129 pr_emptypages; /* Empty pages */\r
130 struct pool_pagelist\r
131 pr_fullpages; /* Full pages */\r
132 struct pool_pagelist\r
133 pr_partpages; /* Partially-allocated pages */\r
134 struct pool_item_header *pr_curpage;\r
135 struct pool *pr_phpool; /* Pool item header pool */\r
136 LIST_HEAD(,pool_cache)\r
137 pr_cachelist; /* Caches for this pool */\r
138 unsigned int pr_size; /* Size of item */\r
139 unsigned int pr_align; /* Requested alignment, must be 2^n */\r
140 unsigned int pr_itemoffset; /* Align this offset in item */\r
141 unsigned int pr_minitems; /* minimum # of items to keep */\r
142 unsigned int pr_minpages; /* same in page units */\r
143 unsigned int pr_maxpages; /* maximum # of pages to keep */\r
144 unsigned int pr_npages; /* # of pages allocated */\r
145 unsigned int pr_itemsperpage;/* # items that fit in a page */\r
146 unsigned int pr_slack; /* unused space in a page */\r
147 unsigned int pr_nitems; /* number of available items in pool */\r
148 unsigned int pr_nout; /* # items currently allocated */\r
149 unsigned int pr_hardlimit; /* hard limit to number of allocated\r
150 items */\r
151 struct pool_allocator *pr_alloc;/* back-end allocator */\r
152 TAILQ_ENTRY(pool) pr_alloc_list;/* link on allocator's pool list */\r
153\r
154 /* Drain hook. */\r
155 void (*pr_drain_hook)(void *, int);\r
156 void *pr_drain_hook_arg;\r
157\r
158 const char *pr_wchan; /* tsleep(9) identifier */\r
159 unsigned int pr_flags; /* r/w flags */\r
160 unsigned int pr_roflags; /* r/o flags */\r
161#define PR_NOWAIT 0x00 /* for symmetry */\r
162#define PR_WAITOK 0x02\r
163#define PR_WANTED 0x04\r
164#define PR_PHINPAGE 0x40\r
165#define PR_LOGGING 0x80\r
166#define PR_LIMITFAIL 0x100 /* even if waiting, fail if we hit limit */\r
167#define PR_RECURSIVE 0x200 /* pool contains pools, for vmstat(8) */\r
168#define PR_NOTOUCH 0x400 /* don't use free items to keep internal state*/\r
169#define PR_NOALIGN 0x800 /* don't assume backend alignment */\r
170\r
171 /*\r
172 * `pr_slock' protects the pool's data structures when removing\r
173 * items from or returning items to the pool, or when reading\r
174 * or updating read/write fields in the pool descriptor.\r
175 *\r
176 * We assume back-end page allocators provide their own locking\r
177 * scheme. They will be called with the pool descriptor _unlocked_,\r
178 * since the page allocators may block.\r
179 */\r
180 struct simplelock pr_slock;\r
181\r
182 SPLAY_HEAD(phtree, pool_item_header) pr_phtree;\r
183\r
184 int pr_maxcolor; /* Cache colouring */\r
185 int pr_curcolor;\r
186 int pr_phoffset; /* Offset in page of page header */\r
187\r
188 /*\r
189 * Warning message to be issued, and a per-time-delta rate cap,\r
190 * if the hard limit is reached.\r
191 */\r
192 const char *pr_hardlimit_warning;\r
193 struct timeval pr_hardlimit_ratecap;\r
194 struct timeval pr_hardlimit_warning_last;\r
195\r
196 /*\r
197 * Instrumentation\r
198 */\r
199 unsigned long pr_nget; /* # of successful requests */\r
200 unsigned long pr_nfail; /* # of unsuccessful requests */\r
201 unsigned long pr_nput; /* # of releases */\r
202 unsigned long pr_npagealloc; /* # of pages allocated */\r
203 unsigned long pr_npagefree; /* # of pages released */\r
204 unsigned int pr_hiwat; /* max # of pages in pool */\r
205 unsigned long pr_nidle; /* # of idle pages */\r
206\r
207 /*\r
208 * Diagnostic aides.\r
209 */\r
210 struct pool_log *pr_log;\r
211 int pr_curlogentry;\r
212 int pr_logsize;\r
213\r
214 const char *pr_entered_file; /* reentrancy check */\r
215 long pr_entered_line;\r
216\r
217#if defined(_KERNEL)\r
218 struct callback_entry pr_reclaimerentry;\r
219#endif\r
220};\r
221#endif /* __POOL_EXPOSE */\r
222\r
223#ifdef _KERNEL\r
224/*\r
225 * pool_allocator_kmem is the default that all pools get unless\r
226 * otherwise specified. pool_allocator_nointr is provided for\r
227 * pools that know they will never be accessed in interrupt\r
228 * context.\r
229 */\r
230extern struct pool_allocator pool_allocator_kmem;\r
231extern struct pool_allocator pool_allocator_nointr;\r
232#ifdef POOL_SUBPAGE\r
233/* The above are subpage allocators in this case. */\r
234extern struct pool_allocator pool_allocator_kmem_fullpage;\r
235extern struct pool_allocator pool_allocator_nointr_fullpage;\r
236#endif\r
237\r
238struct link_pool_init { /* same as args to pool_init() */\r
239 struct pool *pp;\r
240 size_t size;\r
241 u_int align;\r
242 u_int align_offset;\r
243 int flags;\r
244 const char *wchan;\r
245 struct pool_allocator *palloc;\r
246};\r
247#define POOL_INIT(pp, size, align, align_offset, flags, wchan, palloc) \\r
248struct pool pp; \\r
249static const struct link_pool_init _link_ ## pp[1] = { \\r
250 { &pp, size, align, align_offset, flags, wchan, palloc } \\r
251}; \\r
252__link_set_add_rodata(pools, _link_ ## pp)\r
253\r
254void pool_subsystem_init(void);\r
255\r
256void pool_init(struct pool *, size_t, u_int, u_int,\r
257 int, const char *, struct pool_allocator *);\r
258void pool_destroy(struct pool *);\r
259\r
260void pool_set_drain_hook(struct pool *,\r
261 void (*)(void *, int), void *);\r
262\r
263void *pool_get(struct pool *, int);\r
264void pool_put(struct pool *, void *);\r
265int pool_reclaim(struct pool *);\r
266\r
267#ifdef POOL_DIAGNOSTIC\r
268/*\r
269 * These versions do reentrancy checking.\r
270 */\r
271void *_pool_get(struct pool *, int, const char *, long);\r
272void _pool_put(struct pool *, void *, const char *, long);\r
273int _pool_reclaim(struct pool *, const char *, long);\r
274#define pool_get(h, f) _pool_get((h), (f), __FILE__, __LINE__)\r
275#define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)\r
276#define pool_reclaim(h) _pool_reclaim((h), __FILE__, __LINE__)\r
277#endif /* POOL_DIAGNOSTIC */\r
278\r
279int pool_prime(struct pool *, int);\r
280void pool_setlowat(struct pool *, int);\r
281void pool_sethiwat(struct pool *, int);\r
282void pool_sethardlimit(struct pool *, int, const char *, int);\r
283void pool_drain(void *);\r
284\r
285/*\r
286 * Debugging and diagnostic aides.\r
287 */\r
288void pool_print(struct pool *, const char *);\r
289void pool_printit(struct pool *, const char *,\r
290 void (*)(const char *, ...));\r
291void pool_printall(const char *, void (*)(const char *, ...));\r
292int pool_chk(struct pool *, const char *);\r
293\r
294/*\r
295 * Pool cache routines.\r
296 */\r
297void pool_cache_init(struct pool_cache *, struct pool *,\r
298 int (*)(void *, void *, int),\r
299 void (*)(void *, void *),\r
300 void *);\r
301void pool_cache_destroy(struct pool_cache *);\r
302void *pool_cache_get_paddr(struct pool_cache *, int, paddr_t *);\r
303#define pool_cache_get(pc, f) pool_cache_get_paddr((pc), (f), NULL)\r
304void pool_cache_put_paddr(struct pool_cache *, void *, paddr_t);\r
305#define pool_cache_put(pc, o) pool_cache_put_paddr((pc), (o), \\r
306 POOL_PADDR_INVALID)\r
307void pool_cache_destruct_object(struct pool_cache *, void *);\r
308void pool_cache_invalidate(struct pool_cache *);\r
309#endif /* _KERNEL */\r
310\r
311#endif /* _SYS_POOL_H_ */\r