]>
Commit | Line | Data |
---|---|---|
4710c53d | 1 | #include "Python.h"\r |
2 | \r | |
3 | #ifdef WITH_PYMALLOC\r | |
4 | \r | |
5 | #ifdef WITH_VALGRIND\r | |
6 | #include <valgrind/valgrind.h>\r | |
7 | \r | |
8 | /* If we're using GCC, use __builtin_expect() to reduce overhead of\r | |
9 | the valgrind checks */\r | |
10 | #if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)\r | |
11 | # define UNLIKELY(value) __builtin_expect((value), 0)\r | |
12 | #else\r | |
13 | # define UNLIKELY(value) (value)\r | |
14 | #endif\r | |
15 | \r | |
16 | /* -1 indicates that we haven't checked that we're running on valgrind yet. */\r | |
17 | static int running_on_valgrind = -1;\r | |
18 | #endif\r | |
19 | \r | |
20 | /* An object allocator for Python.\r | |
21 | \r | |
22 | Here is an introduction to the layers of the Python memory architecture,\r | |
23 | showing where the object allocator is actually used (layer +2), It is\r | |
24 | called for every object allocation and deallocation (PyObject_New/Del),\r | |
25 | unless the object-specific allocators implement a proprietary allocation\r | |
26 | scheme (ex.: ints use a simple free list). This is also the place where\r | |
27 | the cyclic garbage collector operates selectively on container objects.\r | |
28 | \r | |
29 | \r | |
30 | Object-specific allocators\r | |
31 | _____ ______ ______ ________\r | |
32 | [ int ] [ dict ] [ list ] ... [ string ] Python core |\r | |
33 | +3 | <----- Object-specific memory -----> | <-- Non-object memory --> |\r | |
34 | _______________________________ | |\r | |
35 | [ Python's object allocator ] | |\r | |
36 | +2 | ####### Object memory ####### | <------ Internal buffers ------> |\r | |
37 | ______________________________________________________________ |\r | |
38 | [ Python's raw memory allocator (PyMem_ API) ] |\r | |
39 | +1 | <----- Python memory (under PyMem manager's control) ------> | |\r | |
40 | __________________________________________________________________\r | |
41 | [ Underlying general-purpose allocator (ex: C library malloc) ]\r | |
42 | 0 | <------ Virtual memory allocated for the python process -------> |\r | |
43 | \r | |
44 | =========================================================================\r | |
45 | _______________________________________________________________________\r | |
46 | [ OS-specific Virtual Memory Manager (VMM) ]\r | |
47 | -1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |\r | |
48 | __________________________________ __________________________________\r | |
49 | [ ] [ ]\r | |
50 | -2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |\r | |
51 | \r | |
52 | */\r | |
53 | /*==========================================================================*/\r | |
54 | \r | |
55 | /* A fast, special-purpose memory allocator for small blocks, to be used\r | |
56 | on top of a general-purpose malloc -- heavily based on previous art. */\r | |
57 | \r | |
58 | /* Vladimir Marangozov -- August 2000 */\r | |
59 | \r | |
60 | /*\r | |
61 | * "Memory management is where the rubber meets the road -- if we do the wrong\r | |
62 | * thing at any level, the results will not be good. And if we don't make the\r | |
63 | * levels work well together, we are in serious trouble." (1)\r | |
64 | *\r | |
65 | * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,\r | |
66 | * "Dynamic Storage Allocation: A Survey and Critical Review",\r | |
67 | * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.\r | |
68 | */\r | |
69 | \r | |
70 | /* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */\r | |
71 | \r | |
72 | /*==========================================================================*/\r | |
73 | \r | |
74 | /*\r | |
75 | * Allocation strategy abstract:\r | |
76 | *\r | |
77 | * For small requests, the allocator sub-allocates <Big> blocks of memory.\r | |
78 | * Requests greater than 256 bytes are routed to the system's allocator.\r | |
79 | *\r | |
80 | * Small requests are grouped in size classes spaced 8 bytes apart, due\r | |
81 | * to the required valid alignment of the returned address. Requests of\r | |
82 | * a particular size are serviced from memory pools of 4K (one VMM page).\r | |
83 | * Pools are fragmented on demand and contain free lists of blocks of one\r | |
84 | * particular size class. In other words, there is a fixed-size allocator\r | |
85 | * for each size class. Free pools are shared by the different allocators\r | |
86 | * thus minimizing the space reserved for a particular size class.\r | |
87 | *\r | |
88 | * This allocation strategy is a variant of what is known as "simple\r | |
89 | * segregated storage based on array of free lists". The main drawback of\r | |
90 | * simple segregated storage is that we might end up with lot of reserved\r | |
91 | * memory for the different free lists, which degenerate in time. To avoid\r | |
92 | * this, we partition each free list in pools and we share dynamically the\r | |
93 | * reserved space between all free lists. This technique is quite efficient\r | |
94 | * for memory intensive programs which allocate mainly small-sized blocks.\r | |
95 | *\r | |
96 | * For small requests we have the following table:\r | |
97 | *\r | |
98 | * Request in bytes Size of allocated block Size class idx\r | |
99 | * ----------------------------------------------------------------\r | |
100 | * 1-8 8 0\r | |
101 | * 9-16 16 1\r | |
102 | * 17-24 24 2\r | |
103 | * 25-32 32 3\r | |
104 | * 33-40 40 4\r | |
105 | * 41-48 48 5\r | |
106 | * 49-56 56 6\r | |
107 | * 57-64 64 7\r | |
108 | * 65-72 72 8\r | |
109 | * ... ... ...\r | |
110 | * 241-248 248 30\r | |
111 | * 249-256 256 31\r | |
112 | *\r | |
113 | * 0, 257 and up: routed to the underlying allocator.\r | |
114 | */\r | |
115 | \r | |
116 | /*==========================================================================*/\r | |
117 | \r | |
118 | /*\r | |
119 | * -- Main tunable settings section --\r | |
120 | */\r | |
121 | \r | |
122 | /*\r | |
123 | * Alignment of addresses returned to the user. 8-bytes alignment works\r | |
124 | * on most current architectures (with 32-bit or 64-bit address busses).\r | |
125 | * The alignment value is also used for grouping small requests in size\r | |
126 | * classes spaced ALIGNMENT bytes apart.\r | |
127 | *\r | |
128 | * You shouldn't change this unless you know what you are doing.\r | |
129 | */\r | |
130 | #define ALIGNMENT 8 /* must be 2^N */\r | |
131 | #define ALIGNMENT_SHIFT 3\r | |
132 | #define ALIGNMENT_MASK (ALIGNMENT - 1)\r | |
133 | \r | |
134 | /* Return the number of bytes in size class I, as a uint. */\r | |
135 | #define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)\r | |
136 | \r | |
137 | /*\r | |
138 | * Max size threshold below which malloc requests are considered to be\r | |
139 | * small enough in order to use preallocated memory pools. You can tune\r | |
140 | * this value according to your application behaviour and memory needs.\r | |
141 | *\r | |
142 | * The following invariants must hold:\r | |
143 | * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256\r | |
144 | * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT\r | |
145 | *\r | |
146 | * Although not required, for better performance and space efficiency,\r | |
147 | * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.\r | |
148 | */\r | |
149 | #define SMALL_REQUEST_THRESHOLD 256\r | |
150 | #define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)\r | |
151 | \r | |
152 | /*\r | |
153 | * The system's VMM page size can be obtained on most unices with a\r | |
154 | * getpagesize() call or deduced from various header files. To make\r | |
155 | * things simpler, we assume that it is 4K, which is OK for most systems.\r | |
156 | * It is probably better if this is the native page size, but it doesn't\r | |
157 | * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page\r | |
158 | * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation\r | |
159 | * violation fault. 4K is apparently OK for all the platforms that python\r | |
160 | * currently targets.\r | |
161 | */\r | |
162 | #define SYSTEM_PAGE_SIZE (4 * 1024)\r | |
163 | #define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)\r | |
164 | \r | |
165 | /*\r | |
166 | * Maximum amount of memory managed by the allocator for small requests.\r | |
167 | */\r | |
168 | #ifdef WITH_MEMORY_LIMITS\r | |
169 | #ifndef SMALL_MEMORY_LIMIT\r | |
170 | #define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */\r | |
171 | #endif\r | |
172 | #endif\r | |
173 | \r | |
174 | /*\r | |
175 | * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned\r | |
176 | * on a page boundary. This is a reserved virtual address space for the\r | |
177 | * current process (obtained through a malloc call). In no way this means\r | |
178 | * that the memory arenas will be used entirely. A malloc(<Big>) is usually\r | |
179 | * an address range reservation for <Big> bytes, unless all pages within this\r | |
180 | * space are referenced subsequently. So malloc'ing big blocks and not using\r | |
181 | * them does not mean "wasting memory". It's an addressable range wastage...\r | |
182 | *\r | |
183 | * Therefore, allocating arenas with malloc is not optimal, because there is\r | |
184 | * some address space wastage, but this is the most portable way to request\r | |
185 | * memory from the system across various platforms.\r | |
186 | */\r | |
187 | #define ARENA_SIZE (256 << 10) /* 256KB */\r | |
188 | \r | |
189 | #ifdef WITH_MEMORY_LIMITS\r | |
190 | #define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)\r | |
191 | #endif\r | |
192 | \r | |
193 | /*\r | |
194 | * Size of the pools used for small blocks. Should be a power of 2,\r | |
195 | * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.\r | |
196 | */\r | |
197 | #define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */\r | |
198 | #define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK\r | |
199 | \r | |
200 | /*\r | |
201 | * -- End of tunable settings section --\r | |
202 | */\r | |
203 | \r | |
204 | /*==========================================================================*/\r | |
205 | \r | |
206 | /*\r | |
207 | * Locking\r | |
208 | *\r | |
209 | * To reduce lock contention, it would probably be better to refine the\r | |
210 | * crude function locking with per size class locking. I'm not positive\r | |
211 | * however, whether it's worth switching to such locking policy because\r | |
212 | * of the performance penalty it might introduce.\r | |
213 | *\r | |
214 | * The following macros describe the simplest (should also be the fastest)\r | |
215 | * lock object on a particular platform and the init/fini/lock/unlock\r | |
216 | * operations on it. The locks defined here are not expected to be recursive\r | |
217 | * because it is assumed that they will always be called in the order:\r | |
218 | * INIT, [LOCK, UNLOCK]*, FINI.\r | |
219 | */\r | |
220 | \r | |
221 | /*\r | |
222 | * Python's threads are serialized, so object malloc locking is disabled.\r | |
223 | */\r | |
224 | #define SIMPLELOCK_DECL(lock) /* simple lock declaration */\r | |
225 | #define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */\r | |
226 | #define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */\r | |
227 | #define SIMPLELOCK_LOCK(lock) /* acquire released lock */\r | |
228 | #define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */\r | |
229 | \r | |
230 | /*\r | |
231 | * Basic types\r | |
232 | * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.\r | |
233 | */\r | |
234 | #undef uchar\r | |
235 | #define uchar unsigned char /* assuming == 8 bits */\r | |
236 | \r | |
237 | #undef uint\r | |
238 | #define uint unsigned int /* assuming >= 16 bits */\r | |
239 | \r | |
240 | #undef ulong\r | |
241 | #define ulong unsigned long /* assuming >= 32 bits */\r | |
242 | \r | |
243 | #undef uptr\r | |
244 | #define uptr Py_uintptr_t\r | |
245 | \r | |
246 | /* When you say memory, my mind reasons in terms of (pointers to) blocks */\r | |
247 | typedef uchar block;\r | |
248 | \r | |
249 | /* Pool for small blocks. */\r | |
250 | struct pool_header {\r | |
251 | union { block *_padding;\r | |
252 | uint count; } ref; /* number of allocated blocks */\r | |
253 | block *freeblock; /* pool's free list head */\r | |
254 | struct pool_header *nextpool; /* next pool of this size class */\r | |
255 | struct pool_header *prevpool; /* previous pool "" */\r | |
256 | uint arenaindex; /* index into arenas of base adr */\r | |
257 | uint szidx; /* block size class index */\r | |
258 | uint nextoffset; /* bytes to virgin block */\r | |
259 | uint maxnextoffset; /* largest valid nextoffset */\r | |
260 | };\r | |
261 | \r | |
262 | typedef struct pool_header *poolp;\r | |
263 | \r | |
264 | /* Record keeping for arenas. */\r | |
265 | struct arena_object {\r | |
266 | /* The address of the arena, as returned by malloc. Note that 0\r | |
267 | * will never be returned by a successful malloc, and is used\r | |
268 | * here to mark an arena_object that doesn't correspond to an\r | |
269 | * allocated arena.\r | |
270 | */\r | |
271 | uptr address;\r | |
272 | \r | |
273 | /* Pool-aligned pointer to the next pool to be carved off. */\r | |
274 | block* pool_address;\r | |
275 | \r | |
276 | /* The number of available pools in the arena: free pools + never-\r | |
277 | * allocated pools.\r | |
278 | */\r | |
279 | uint nfreepools;\r | |
280 | \r | |
281 | /* The total number of pools in the arena, whether or not available. */\r | |
282 | uint ntotalpools;\r | |
283 | \r | |
284 | /* Singly-linked list of available pools. */\r | |
285 | struct pool_header* freepools;\r | |
286 | \r | |
287 | /* Whenever this arena_object is not associated with an allocated\r | |
288 | * arena, the nextarena member is used to link all unassociated\r | |
289 | * arena_objects in the singly-linked `unused_arena_objects` list.\r | |
290 | * The prevarena member is unused in this case.\r | |
291 | *\r | |
292 | * When this arena_object is associated with an allocated arena\r | |
293 | * with at least one available pool, both members are used in the\r | |
294 | * doubly-linked `usable_arenas` list, which is maintained in\r | |
295 | * increasing order of `nfreepools` values.\r | |
296 | *\r | |
297 | * Else this arena_object is associated with an allocated arena\r | |
298 | * all of whose pools are in use. `nextarena` and `prevarena`\r | |
299 | * are both meaningless in this case.\r | |
300 | */\r | |
301 | struct arena_object* nextarena;\r | |
302 | struct arena_object* prevarena;\r | |
303 | };\r | |
304 | \r | |
305 | #undef ROUNDUP\r | |
306 | #define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)\r | |
307 | #define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))\r | |
308 | \r | |
309 | #define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */\r | |
310 | \r | |
311 | /* Round pointer P down to the closest pool-aligned address <= P, as a poolp */\r | |
312 | #define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))\r | |
313 | \r | |
314 | /* Return total number of blocks in pool of size index I, as a uint. */\r | |
315 | #define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))\r | |
316 | \r | |
317 | /*==========================================================================*/\r | |
318 | \r | |
319 | /*\r | |
320 | * This malloc lock\r | |
321 | */\r | |
322 | SIMPLELOCK_DECL(_malloc_lock)\r | |
323 | #define LOCK() SIMPLELOCK_LOCK(_malloc_lock)\r | |
324 | #define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)\r | |
325 | #define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)\r | |
326 | #define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)\r | |
327 | \r | |
328 | /*\r | |
329 | * Pool table -- headed, circular, doubly-linked lists of partially used pools.\r | |
330 | \r | |
331 | This is involved. For an index i, usedpools[i+i] is the header for a list of\r | |
332 | all partially used pools holding small blocks with "size class idx" i. So\r | |
333 | usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size\r | |
334 | 16, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.\r | |
335 | \r | |
336 | Pools are carved off an arena's highwater mark (an arena_object's pool_address\r | |
337 | member) as needed. Once carved off, a pool is in one of three states forever\r | |
338 | after:\r | |
339 | \r | |
340 | used == partially used, neither empty nor full\r | |
341 | At least one block in the pool is currently allocated, and at least one\r | |
342 | block in the pool is not currently allocated (note this implies a pool\r | |
343 | has room for at least two blocks).\r | |
344 | This is a pool's initial state, as a pool is created only when malloc\r | |
345 | needs space.\r | |
346 | The pool holds blocks of a fixed size, and is in the circular list headed\r | |
347 | at usedpools[i] (see above). It's linked to the other used pools of the\r | |
348 | same size class via the pool_header's nextpool and prevpool members.\r | |
349 | If all but one block is currently allocated, a malloc can cause a\r | |
350 | transition to the full state. If all but one block is not currently\r | |
351 | allocated, a free can cause a transition to the empty state.\r | |
352 | \r | |
353 | full == all the pool's blocks are currently allocated\r | |
354 | On transition to full, a pool is unlinked from its usedpools[] list.\r | |
355 | It's not linked to from anything then anymore, and its nextpool and\r | |
356 | prevpool members are meaningless until it transitions back to used.\r | |
357 | A free of a block in a full pool puts the pool back in the used state.\r | |
358 | Then it's linked in at the front of the appropriate usedpools[] list, so\r | |
359 | that the next allocation for its size class will reuse the freed block.\r | |
360 | \r | |
361 | empty == all the pool's blocks are currently available for allocation\r | |
362 | On transition to empty, a pool is unlinked from its usedpools[] list,\r | |
363 | and linked to the front of its arena_object's singly-linked freepools list,\r | |
364 | via its nextpool member. The prevpool member has no meaning in this case.\r | |
365 | Empty pools have no inherent size class: the next time a malloc finds\r | |
366 | an empty list in usedpools[], it takes the first pool off of freepools.\r | |
367 | If the size class needed happens to be the same as the size class the pool\r | |
368 | last had, some pool initialization can be skipped.\r | |
369 | \r | |
370 | \r | |
371 | Block Management\r | |
372 | \r | |
373 | Blocks within pools are again carved out as needed. pool->freeblock points to\r | |
374 | the start of a singly-linked list of free blocks within the pool. When a\r | |
375 | block is freed, it's inserted at the front of its pool's freeblock list. Note\r | |
376 | that the available blocks in a pool are *not* linked all together when a pool\r | |
377 | is initialized. Instead only "the first two" (lowest addresses) blocks are\r | |
378 | set up, returning the first such block, and setting pool->freeblock to a\r | |
379 | one-block list holding the second such block. This is consistent with that\r | |
380 | pymalloc strives at all levels (arena, pool, and block) never to touch a piece\r | |
381 | of memory until it's actually needed.\r | |
382 | \r | |
383 | So long as a pool is in the used state, we're certain there *is* a block\r | |
384 | available for allocating, and pool->freeblock is not NULL. If pool->freeblock\r | |
385 | points to the end of the free list before we've carved the entire pool into\r | |
386 | blocks, that means we simply haven't yet gotten to one of the higher-address\r | |
387 | blocks. The offset from the pool_header to the start of "the next" virgin\r | |
388 | block is stored in the pool_header nextoffset member, and the largest value\r | |
389 | of nextoffset that makes sense is stored in the maxnextoffset member when a\r | |
390 | pool is initialized. All the blocks in a pool have been passed out at least\r | |
391 | once when and only when nextoffset > maxnextoffset.\r | |
392 | \r | |
393 | \r | |
394 | Major obscurity: While the usedpools vector is declared to have poolp\r | |
395 | entries, it doesn't really. It really contains two pointers per (conceptual)\r | |
396 | poolp entry, the nextpool and prevpool members of a pool_header. The\r | |
397 | excruciating initialization code below fools C so that\r | |
398 | \r | |
399 | usedpool[i+i]\r | |
400 | \r | |
401 | "acts like" a genuine poolp, but only so long as you only reference its\r | |
402 | nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is\r | |
403 | compensating for that a pool_header's nextpool and prevpool members\r | |
404 | immediately follow a pool_header's first two members:\r | |
405 | \r | |
406 | union { block *_padding;\r | |
407 | uint count; } ref;\r | |
408 | block *freeblock;\r | |
409 | \r | |
410 | each of which consume sizeof(block *) bytes. So what usedpools[i+i] really\r | |
411 | contains is a fudged-up pointer p such that *if* C believes it's a poolp\r | |
412 | pointer, then p->nextpool and p->prevpool are both p (meaning that the headed\r | |
413 | circular list is empty).\r | |
414 | \r | |
415 | It's unclear why the usedpools setup is so convoluted. It could be to\r | |
416 | minimize the amount of cache required to hold this heavily-referenced table\r | |
417 | (which only *needs* the two interpool pointer members of a pool_header). OTOH,\r | |
418 | referencing code has to remember to "double the index" and doing so isn't\r | |
419 | free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying\r | |
420 | on that C doesn't insert any padding anywhere in a pool_header at or before\r | |
421 | the prevpool member.\r | |
422 | **************************************************************************** */\r | |
423 | \r | |
424 | #define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))\r | |
425 | #define PT(x) PTA(x), PTA(x)\r | |
426 | \r | |
427 | static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {\r | |
428 | PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)\r | |
429 | #if NB_SMALL_SIZE_CLASSES > 8\r | |
430 | , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)\r | |
431 | #if NB_SMALL_SIZE_CLASSES > 16\r | |
432 | , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)\r | |
433 | #if NB_SMALL_SIZE_CLASSES > 24\r | |
434 | , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)\r | |
435 | #if NB_SMALL_SIZE_CLASSES > 32\r | |
436 | , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)\r | |
437 | #if NB_SMALL_SIZE_CLASSES > 40\r | |
438 | , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)\r | |
439 | #if NB_SMALL_SIZE_CLASSES > 48\r | |
440 | , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)\r | |
441 | #if NB_SMALL_SIZE_CLASSES > 56\r | |
442 | , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)\r | |
443 | #endif /* NB_SMALL_SIZE_CLASSES > 56 */\r | |
444 | #endif /* NB_SMALL_SIZE_CLASSES > 48 */\r | |
445 | #endif /* NB_SMALL_SIZE_CLASSES > 40 */\r | |
446 | #endif /* NB_SMALL_SIZE_CLASSES > 32 */\r | |
447 | #endif /* NB_SMALL_SIZE_CLASSES > 24 */\r | |
448 | #endif /* NB_SMALL_SIZE_CLASSES > 16 */\r | |
449 | #endif /* NB_SMALL_SIZE_CLASSES > 8 */\r | |
450 | };\r | |
451 | \r | |
452 | /*==========================================================================\r | |
453 | Arena management.\r | |
454 | \r | |
455 | `arenas` is a vector of arena_objects. It contains maxarenas entries, some of\r | |
456 | which may not be currently used (== they're arena_objects that aren't\r | |
457 | currently associated with an allocated arena). Note that arenas proper are\r | |
458 | separately malloc'ed.\r | |
459 | \r | |
460 | Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,\r | |
461 | we do try to free() arenas, and use some mild heuristic strategies to increase\r | |
462 | the likelihood that arenas eventually can be freed.\r | |
463 | \r | |
464 | unused_arena_objects\r | |
465 | \r | |
466 | This is a singly-linked list of the arena_objects that are currently not\r | |
467 | being used (no arena is associated with them). Objects are taken off the\r | |
468 | head of the list in new_arena(), and are pushed on the head of the list in\r | |
469 | PyObject_Free() when the arena is empty. Key invariant: an arena_object\r | |
470 | is on this list if and only if its .address member is 0.\r | |
471 | \r | |
472 | usable_arenas\r | |
473 | \r | |
474 | This is a doubly-linked list of the arena_objects associated with arenas\r | |
475 | that have pools available. These pools are either waiting to be reused,\r | |
476 | or have not been used before. The list is sorted to have the most-\r | |
477 | allocated arenas first (ascending order based on the nfreepools member).\r | |
478 | This means that the next allocation will come from a heavily used arena,\r | |
479 | which gives the nearly empty arenas a chance to be returned to the system.\r | |
480 | In my unscientific tests this dramatically improved the number of arenas\r | |
481 | that could be freed.\r | |
482 | \r | |
483 | Note that an arena_object associated with an arena all of whose pools are\r | |
484 | currently in use isn't on either list.\r | |
485 | */\r | |
486 | \r | |
487 | /* Array of objects used to track chunks of memory (arenas). */\r | |
488 | static struct arena_object* arenas = NULL;\r | |
489 | /* Number of slots currently allocated in the `arenas` vector. */\r | |
490 | static uint maxarenas = 0;\r | |
491 | \r | |
492 | /* The head of the singly-linked, NULL-terminated list of available\r | |
493 | * arena_objects.\r | |
494 | */\r | |
495 | static struct arena_object* unused_arena_objects = NULL;\r | |
496 | \r | |
497 | /* The head of the doubly-linked, NULL-terminated at each end, list of\r | |
498 | * arena_objects associated with arenas that have pools available.\r | |
499 | */\r | |
500 | static struct arena_object* usable_arenas = NULL;\r | |
501 | \r | |
502 | /* How many arena_objects do we initially allocate?\r | |
503 | * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the\r | |
504 | * `arenas` vector.\r | |
505 | */\r | |
506 | #define INITIAL_ARENA_OBJECTS 16\r | |
507 | \r | |
508 | /* Number of arenas allocated that haven't been free()'d. */\r | |
509 | static size_t narenas_currently_allocated = 0;\r | |
510 | \r | |
511 | #ifdef PYMALLOC_DEBUG\r | |
512 | /* Total number of times malloc() called to allocate an arena. */\r | |
513 | static size_t ntimes_arena_allocated = 0;\r | |
514 | /* High water mark (max value ever seen) for narenas_currently_allocated. */\r | |
515 | static size_t narenas_highwater = 0;\r | |
516 | #endif\r | |
517 | \r | |
518 | /* Allocate a new arena. If we run out of memory, return NULL. Else\r | |
519 | * allocate a new arena, and return the address of an arena_object\r | |
520 | * describing the new arena. It's expected that the caller will set\r | |
521 | * `usable_arenas` to the return value.\r | |
522 | */\r | |
523 | static struct arena_object*\r | |
524 | new_arena(void)\r | |
525 | {\r | |
526 | struct arena_object* arenaobj;\r | |
527 | uint excess; /* number of bytes above pool alignment */\r | |
528 | \r | |
529 | #ifdef PYMALLOC_DEBUG\r | |
530 | if (Py_GETENV("PYTHONMALLOCSTATS"))\r | |
531 | _PyObject_DebugMallocStats();\r | |
532 | #endif\r | |
533 | if (unused_arena_objects == NULL) {\r | |
534 | uint i;\r | |
535 | uint numarenas;\r | |
536 | size_t nbytes;\r | |
537 | \r | |
538 | /* Double the number of arena objects on each allocation.\r | |
539 | * Note that it's possible for `numarenas` to overflow.\r | |
540 | */\r | |
541 | numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;\r | |
542 | if (numarenas <= maxarenas)\r | |
543 | return NULL; /* overflow */\r | |
544 | #if SIZEOF_SIZE_T <= SIZEOF_INT\r | |
545 | if (numarenas > PY_SIZE_MAX / sizeof(*arenas))\r | |
546 | return NULL; /* overflow */\r | |
547 | #endif\r | |
548 | nbytes = numarenas * sizeof(*arenas);\r | |
549 | arenaobj = (struct arena_object *)realloc(arenas, nbytes);\r | |
550 | if (arenaobj == NULL)\r | |
551 | return NULL;\r | |
552 | arenas = arenaobj;\r | |
553 | \r | |
554 | /* We might need to fix pointers that were copied. However,\r | |
555 | * new_arena only gets called when all the pages in the\r | |
556 | * previous arenas are full. Thus, there are *no* pointers\r | |
557 | * into the old array. Thus, we don't have to worry about\r | |
558 | * invalid pointers. Just to be sure, some asserts:\r | |
559 | */\r | |
560 | assert(usable_arenas == NULL);\r | |
561 | assert(unused_arena_objects == NULL);\r | |
562 | \r | |
563 | /* Put the new arenas on the unused_arena_objects list. */\r | |
564 | for (i = maxarenas; i < numarenas; ++i) {\r | |
565 | arenas[i].address = 0; /* mark as unassociated */\r | |
566 | arenas[i].nextarena = i < numarenas - 1 ?\r | |
567 | &arenas[i+1] : NULL;\r | |
568 | }\r | |
569 | \r | |
570 | /* Update globals. */\r | |
571 | unused_arena_objects = &arenas[maxarenas];\r | |
572 | maxarenas = numarenas;\r | |
573 | }\r | |
574 | \r | |
575 | /* Take the next available arena object off the head of the list. */\r | |
576 | assert(unused_arena_objects != NULL);\r | |
577 | arenaobj = unused_arena_objects;\r | |
578 | unused_arena_objects = arenaobj->nextarena;\r | |
579 | assert(arenaobj->address == 0);\r | |
580 | arenaobj->address = (uptr)malloc(ARENA_SIZE);\r | |
581 | if (arenaobj->address == 0) {\r | |
582 | /* The allocation failed: return NULL after putting the\r | |
583 | * arenaobj back.\r | |
584 | */\r | |
585 | arenaobj->nextarena = unused_arena_objects;\r | |
586 | unused_arena_objects = arenaobj;\r | |
587 | return NULL;\r | |
588 | }\r | |
589 | \r | |
590 | ++narenas_currently_allocated;\r | |
591 | #ifdef PYMALLOC_DEBUG\r | |
592 | ++ntimes_arena_allocated;\r | |
593 | if (narenas_currently_allocated > narenas_highwater)\r | |
594 | narenas_highwater = narenas_currently_allocated;\r | |
595 | #endif\r | |
596 | arenaobj->freepools = NULL;\r | |
597 | /* pool_address <- first pool-aligned address in the arena\r | |
598 | nfreepools <- number of whole pools that fit after alignment */\r | |
599 | arenaobj->pool_address = (block*)arenaobj->address;\r | |
600 | arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;\r | |
601 | assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);\r | |
602 | excess = (uint)(arenaobj->address & POOL_SIZE_MASK);\r | |
603 | if (excess != 0) {\r | |
604 | --arenaobj->nfreepools;\r | |
605 | arenaobj->pool_address += POOL_SIZE - excess;\r | |
606 | }\r | |
607 | arenaobj->ntotalpools = arenaobj->nfreepools;\r | |
608 | \r | |
609 | return arenaobj;\r | |
610 | }\r | |
611 | \r | |
612 | /*\r | |
613 | Py_ADDRESS_IN_RANGE(P, POOL)\r | |
614 | \r | |
615 | Return true if and only if P is an address that was allocated by pymalloc.\r | |
616 | POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)\r | |
617 | (the caller is asked to compute this because the macro expands POOL more than\r | |
618 | once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a\r | |
619 | variable and pass the latter to the macro; because Py_ADDRESS_IN_RANGE is\r | |
620 | called on every alloc/realloc/free, micro-efficiency is important here).\r | |
621 | \r | |
622 | Tricky: Let B be the arena base address associated with the pool, B =\r | |
623 | arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if\r | |
624 | \r | |
625 | B <= P < B + ARENA_SIZE\r | |
626 | \r | |
627 | Subtracting B throughout, this is true iff\r | |
628 | \r | |
629 | 0 <= P-B < ARENA_SIZE\r | |
630 | \r | |
631 | By using unsigned arithmetic, the "0 <=" half of the test can be skipped.\r | |
632 | \r | |
633 | Obscure: A PyMem "free memory" function can call the pymalloc free or realloc\r | |
634 | before the first arena has been allocated. `arenas` is still NULL in that\r | |
635 | case. We're relying on that maxarenas is also 0 in that case, so that\r | |
636 | (POOL)->arenaindex < maxarenas must be false, saving us from trying to index\r | |
637 | into a NULL arenas.\r | |
638 | \r | |
639 | Details: given P and POOL, the arena_object corresponding to P is AO =\r | |
640 | arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild\r | |
641 | stores, etc), POOL is the correct address of P's pool, AO.address is the\r | |
642 | correct base address of the pool's arena, and P must be within ARENA_SIZE of\r | |
643 | AO.address. In addition, AO.address is not 0 (no arena can start at address 0\r | |
644 | (NULL)). Therefore Py_ADDRESS_IN_RANGE correctly reports that obmalloc\r | |
645 | controls P.\r | |
646 | \r | |
647 | Now suppose obmalloc does not control P (e.g., P was obtained via a direct\r | |
648 | call to the system malloc() or realloc()). (POOL)->arenaindex may be anything\r | |
649 | in this case -- it may even be uninitialized trash. If the trash arenaindex\r | |
650 | is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't\r | |
651 | control P.\r | |
652 | \r | |
653 | Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an\r | |
654 | allocated arena, obmalloc controls all the memory in slice AO.address :\r | |
655 | AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,\r | |
656 | so P doesn't lie in that slice, so the macro correctly reports that P is not\r | |
657 | controlled by obmalloc.\r | |
658 | \r | |
659 | Finally, if P is not controlled by obmalloc and AO corresponds to an unused\r | |
660 | arena_object (one not currently associated with an allocated arena),\r | |
661 | AO.address is 0, and the second test in the macro reduces to:\r | |
662 | \r | |
663 | P < ARENA_SIZE\r | |
664 | \r | |
665 | If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes\r | |
666 | that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part\r | |
667 | of the test still passes, and the third clause (AO.address != 0) is necessary\r | |
668 | to get the correct result: AO.address is 0 in this case, so the macro\r | |
669 | correctly reports that P is not controlled by obmalloc (despite that P lies in\r | |
670 | slice AO.address : AO.address + ARENA_SIZE).\r | |
671 | \r | |
672 | Note: The third (AO.address != 0) clause was added in Python 2.5. Before\r | |
673 | 2.5, arenas were never free()'ed, and an arenaindex < maxarena always\r | |
674 | corresponded to a currently-allocated arena, so the "P is not controlled by\r | |
675 | obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case\r | |
676 | was impossible.\r | |
677 | \r | |
678 | Note that the logic is excruciating, and reading up possibly uninitialized\r | |
679 | memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)\r | |
680 | creates problems for some memory debuggers. The overwhelming advantage is\r | |
681 | that this test determines whether an arbitrary address is controlled by\r | |
682 | obmalloc in a small constant time, independent of the number of arenas\r | |
683 | obmalloc controls. Since this test is needed at every entry point, it's\r | |
684 | extremely desirable that it be this fast.\r | |
685 | \r | |
686 | Since Py_ADDRESS_IN_RANGE may be reading from memory which was not allocated\r | |
687 | by Python, it is important that (POOL)->arenaindex is read only once, as\r | |
688 | another thread may be concurrently modifying the value without holding the\r | |
689 | GIL. To accomplish this, the arenaindex_temp variable is used to store\r | |
690 | (POOL)->arenaindex for the duration of the Py_ADDRESS_IN_RANGE macro's\r | |
691 | execution. The caller of the macro is responsible for declaring this\r | |
692 | variable.\r | |
693 | */\r | |
694 | #define Py_ADDRESS_IN_RANGE(P, POOL) \\r | |
695 | ((arenaindex_temp = (POOL)->arenaindex) < maxarenas && \\r | |
696 | (uptr)(P) - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && \\r | |
697 | arenas[arenaindex_temp].address != 0)\r | |
698 | \r | |
699 | \r | |
700 | /* This is only useful when running memory debuggers such as\r | |
701 | * Purify or Valgrind. Uncomment to use.\r | |
702 | *\r | |
703 | #define Py_USING_MEMORY_DEBUGGER\r | |
704 | */\r | |
705 | \r | |
706 | #ifdef Py_USING_MEMORY_DEBUGGER\r | |
707 | \r | |
708 | /* Py_ADDRESS_IN_RANGE may access uninitialized memory by design\r | |
709 | * This leads to thousands of spurious warnings when using\r | |
710 | * Purify or Valgrind. By making a function, we can easily\r | |
711 | * suppress the uninitialized memory reads in this one function.\r | |
712 | * So we won't ignore real errors elsewhere.\r | |
713 | *\r | |
714 | * Disable the macro and use a function.\r | |
715 | */\r | |
716 | \r | |
717 | #undef Py_ADDRESS_IN_RANGE\r | |
718 | \r | |
719 | #if defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) || \\r | |
720 | (__GNUC__ >= 4))\r | |
721 | #define Py_NO_INLINE __attribute__((__noinline__))\r | |
722 | #else\r | |
723 | #define Py_NO_INLINE\r | |
724 | #endif\r | |
725 | \r | |
726 | /* Don't make static, to try to ensure this isn't inlined. */\r | |
727 | int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;\r | |
728 | #undef Py_NO_INLINE\r | |
729 | #endif\r | |
730 | \r | |
731 | /*==========================================================================*/\r | |
732 | \r | |
733 | /* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct\r | |
734 | * from all other currently live pointers. This may not be possible.\r | |
735 | */\r | |
736 | \r | |
737 | /*\r | |
738 | * The basic blocks are ordered by decreasing execution frequency,\r | |
739 | * which minimizes the number of jumps in the most common cases,\r | |
740 | * improves branching prediction and instruction scheduling (small\r | |
741 | * block allocations typically result in a couple of instructions).\r | |
742 | * Unless the optimizer reorders everything, being too smart...\r | |
743 | */\r | |
744 | \r | |
745 | #undef PyObject_Malloc\r | |
746 | void *\r | |
747 | PyObject_Malloc(size_t nbytes)\r | |
748 | {\r | |
749 | block *bp;\r | |
750 | poolp pool;\r | |
751 | poolp next;\r | |
752 | uint size;\r | |
753 | \r | |
754 | #ifdef WITH_VALGRIND\r | |
755 | if (UNLIKELY(running_on_valgrind == -1))\r | |
756 | running_on_valgrind = RUNNING_ON_VALGRIND;\r | |
757 | if (UNLIKELY(running_on_valgrind))\r | |
758 | goto redirect;\r | |
759 | #endif\r | |
760 | \r | |
761 | /*\r | |
762 | * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.\r | |
763 | * Most python internals blindly use a signed Py_ssize_t to track\r | |
764 | * things without checking for overflows or negatives.\r | |
765 | * As size_t is unsigned, checking for nbytes < 0 is not required.\r | |
766 | */\r | |
767 | if (nbytes > PY_SSIZE_T_MAX)\r | |
768 | return NULL;\r | |
769 | \r | |
770 | /*\r | |
771 | * This implicitly redirects malloc(0).\r | |
772 | */\r | |
773 | if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {\r | |
774 | LOCK();\r | |
775 | /*\r | |
776 | * Most frequent paths first\r | |
777 | */\r | |
778 | size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;\r | |
779 | pool = usedpools[size + size];\r | |
780 | if (pool != pool->nextpool) {\r | |
781 | /*\r | |
782 | * There is a used pool for this size class.\r | |
783 | * Pick up the head block of its free list.\r | |
784 | */\r | |
785 | ++pool->ref.count;\r | |
786 | bp = pool->freeblock;\r | |
787 | assert(bp != NULL);\r | |
788 | if ((pool->freeblock = *(block **)bp) != NULL) {\r | |
789 | UNLOCK();\r | |
790 | return (void *)bp;\r | |
791 | }\r | |
792 | /*\r | |
793 | * Reached the end of the free list, try to extend it.\r | |
794 | */\r | |
795 | if (pool->nextoffset <= pool->maxnextoffset) {\r | |
796 | /* There is room for another block. */\r | |
797 | pool->freeblock = (block*)pool +\r | |
798 | pool->nextoffset;\r | |
799 | pool->nextoffset += INDEX2SIZE(size);\r | |
800 | *(block **)(pool->freeblock) = NULL;\r | |
801 | UNLOCK();\r | |
802 | return (void *)bp;\r | |
803 | }\r | |
804 | /* Pool is full, unlink from used pools. */\r | |
805 | next = pool->nextpool;\r | |
806 | pool = pool->prevpool;\r | |
807 | next->prevpool = pool;\r | |
808 | pool->nextpool = next;\r | |
809 | UNLOCK();\r | |
810 | return (void *)bp;\r | |
811 | }\r | |
812 | \r | |
813 | /* There isn't a pool of the right size class immediately\r | |
814 | * available: use a free pool.\r | |
815 | */\r | |
816 | if (usable_arenas == NULL) {\r | |
817 | /* No arena has a free pool: allocate a new arena. */\r | |
818 | #ifdef WITH_MEMORY_LIMITS\r | |
819 | if (narenas_currently_allocated >= MAX_ARENAS) {\r | |
820 | UNLOCK();\r | |
821 | goto redirect;\r | |
822 | }\r | |
823 | #endif\r | |
824 | usable_arenas = new_arena();\r | |
825 | if (usable_arenas == NULL) {\r | |
826 | UNLOCK();\r | |
827 | goto redirect;\r | |
828 | }\r | |
829 | usable_arenas->nextarena =\r | |
830 | usable_arenas->prevarena = NULL;\r | |
831 | }\r | |
832 | assert(usable_arenas->address != 0);\r | |
833 | \r | |
834 | /* Try to get a cached free pool. */\r | |
835 | pool = usable_arenas->freepools;\r | |
836 | if (pool != NULL) {\r | |
837 | /* Unlink from cached pools. */\r | |
838 | usable_arenas->freepools = pool->nextpool;\r | |
839 | \r | |
840 | /* This arena already had the smallest nfreepools\r | |
841 | * value, so decreasing nfreepools doesn't change\r | |
842 | * that, and we don't need to rearrange the\r | |
843 | * usable_arenas list. However, if the arena has\r | |
844 | * become wholly allocated, we need to remove its\r | |
845 | * arena_object from usable_arenas.\r | |
846 | */\r | |
847 | --usable_arenas->nfreepools;\r | |
848 | if (usable_arenas->nfreepools == 0) {\r | |
849 | /* Wholly allocated: remove. */\r | |
850 | assert(usable_arenas->freepools == NULL);\r | |
851 | assert(usable_arenas->nextarena == NULL ||\r | |
852 | usable_arenas->nextarena->prevarena ==\r | |
853 | usable_arenas);\r | |
854 | \r | |
855 | usable_arenas = usable_arenas->nextarena;\r | |
856 | if (usable_arenas != NULL) {\r | |
857 | usable_arenas->prevarena = NULL;\r | |
858 | assert(usable_arenas->address != 0);\r | |
859 | }\r | |
860 | }\r | |
861 | else {\r | |
862 | /* nfreepools > 0: it must be that freepools\r | |
863 | * isn't NULL, or that we haven't yet carved\r | |
864 | * off all the arena's pools for the first\r | |
865 | * time.\r | |
866 | */\r | |
867 | assert(usable_arenas->freepools != NULL ||\r | |
868 | usable_arenas->pool_address <=\r | |
869 | (block*)usable_arenas->address +\r | |
870 | ARENA_SIZE - POOL_SIZE);\r | |
871 | }\r | |
872 | init_pool:\r | |
873 | /* Frontlink to used pools. */\r | |
874 | next = usedpools[size + size]; /* == prev */\r | |
875 | pool->nextpool = next;\r | |
876 | pool->prevpool = next;\r | |
877 | next->nextpool = pool;\r | |
878 | next->prevpool = pool;\r | |
879 | pool->ref.count = 1;\r | |
880 | if (pool->szidx == size) {\r | |
881 | /* Luckily, this pool last contained blocks\r | |
882 | * of the same size class, so its header\r | |
883 | * and free list are already initialized.\r | |
884 | */\r | |
885 | bp = pool->freeblock;\r | |
886 | pool->freeblock = *(block **)bp;\r | |
887 | UNLOCK();\r | |
888 | return (void *)bp;\r | |
889 | }\r | |
890 | /*\r | |
891 | * Initialize the pool header, set up the free list to\r | |
892 | * contain just the second block, and return the first\r | |
893 | * block.\r | |
894 | */\r | |
895 | pool->szidx = size;\r | |
896 | size = INDEX2SIZE(size);\r | |
897 | bp = (block *)pool + POOL_OVERHEAD;\r | |
898 | pool->nextoffset = POOL_OVERHEAD + (size << 1);\r | |
899 | pool->maxnextoffset = POOL_SIZE - size;\r | |
900 | pool->freeblock = bp + size;\r | |
901 | *(block **)(pool->freeblock) = NULL;\r | |
902 | UNLOCK();\r | |
903 | return (void *)bp;\r | |
904 | }\r | |
905 | \r | |
906 | /* Carve off a new pool. */\r | |
907 | assert(usable_arenas->nfreepools > 0);\r | |
908 | assert(usable_arenas->freepools == NULL);\r | |
909 | pool = (poolp)usable_arenas->pool_address;\r | |
910 | assert((block*)pool <= (block*)usable_arenas->address +\r | |
911 | ARENA_SIZE - POOL_SIZE);\r | |
912 | pool->arenaindex = usable_arenas - arenas;\r | |
913 | assert(&arenas[pool->arenaindex] == usable_arenas);\r | |
914 | pool->szidx = DUMMY_SIZE_IDX;\r | |
915 | usable_arenas->pool_address += POOL_SIZE;\r | |
916 | --usable_arenas->nfreepools;\r | |
917 | \r | |
918 | if (usable_arenas->nfreepools == 0) {\r | |
919 | assert(usable_arenas->nextarena == NULL ||\r | |
920 | usable_arenas->nextarena->prevarena ==\r | |
921 | usable_arenas);\r | |
922 | /* Unlink the arena: it is completely allocated. */\r | |
923 | usable_arenas = usable_arenas->nextarena;\r | |
924 | if (usable_arenas != NULL) {\r | |
925 | usable_arenas->prevarena = NULL;\r | |
926 | assert(usable_arenas->address != 0);\r | |
927 | }\r | |
928 | }\r | |
929 | \r | |
930 | goto init_pool;\r | |
931 | }\r | |
932 | \r | |
933 | /* The small block allocator ends here. */\r | |
934 | \r | |
935 | redirect:\r | |
936 | /* Redirect the original request to the underlying (libc) allocator.\r | |
937 | * We jump here on bigger requests, on error in the code above (as a\r | |
938 | * last chance to serve the request) or when the max memory limit\r | |
939 | * has been reached.\r | |
940 | */\r | |
941 | if (nbytes == 0)\r | |
942 | nbytes = 1;\r | |
943 | return (void *)malloc(nbytes);\r | |
944 | }\r | |
945 | \r | |
946 | /* free */\r | |
947 | \r | |
948 | #undef PyObject_Free\r | |
949 | void\r | |
950 | PyObject_Free(void *p)\r | |
951 | {\r | |
952 | poolp pool;\r | |
953 | block *lastfree;\r | |
954 | poolp next, prev;\r | |
955 | uint size;\r | |
956 | #ifndef Py_USING_MEMORY_DEBUGGER\r | |
957 | uint arenaindex_temp;\r | |
958 | #endif\r | |
959 | \r | |
960 | if (p == NULL) /* free(NULL) has no effect */\r | |
961 | return;\r | |
962 | \r | |
963 | #ifdef WITH_VALGRIND\r | |
964 | if (UNLIKELY(running_on_valgrind > 0))\r | |
965 | goto redirect;\r | |
966 | #endif\r | |
967 | \r | |
968 | pool = POOL_ADDR(p);\r | |
969 | if (Py_ADDRESS_IN_RANGE(p, pool)) {\r | |
970 | /* We allocated this address. */\r | |
971 | LOCK();\r | |
972 | /* Link p to the start of the pool's freeblock list. Since\r | |
973 | * the pool had at least the p block outstanding, the pool\r | |
974 | * wasn't empty (so it's already in a usedpools[] list, or\r | |
975 | * was full and is in no list -- it's not in the freeblocks\r | |
976 | * list in any case).\r | |
977 | */\r | |
978 | assert(pool->ref.count > 0); /* else it was empty */\r | |
979 | *(block **)p = lastfree = pool->freeblock;\r | |
980 | pool->freeblock = (block *)p;\r | |
981 | if (lastfree) {\r | |
982 | struct arena_object* ao;\r | |
983 | uint nf; /* ao->nfreepools */\r | |
984 | \r | |
985 | /* freeblock wasn't NULL, so the pool wasn't full,\r | |
986 | * and the pool is in a usedpools[] list.\r | |
987 | */\r | |
988 | if (--pool->ref.count != 0) {\r | |
989 | /* pool isn't empty: leave it in usedpools */\r | |
990 | UNLOCK();\r | |
991 | return;\r | |
992 | }\r | |
993 | /* Pool is now empty: unlink from usedpools, and\r | |
994 | * link to the front of freepools. This ensures that\r | |
995 | * previously freed pools will be allocated later\r | |
996 | * (being not referenced, they are perhaps paged out).\r | |
997 | */\r | |
998 | next = pool->nextpool;\r | |
999 | prev = pool->prevpool;\r | |
1000 | next->prevpool = prev;\r | |
1001 | prev->nextpool = next;\r | |
1002 | \r | |
1003 | /* Link the pool to freepools. This is a singly-linked\r | |
1004 | * list, and pool->prevpool isn't used there.\r | |
1005 | */\r | |
1006 | ao = &arenas[pool->arenaindex];\r | |
1007 | pool->nextpool = ao->freepools;\r | |
1008 | ao->freepools = pool;\r | |
1009 | nf = ++ao->nfreepools;\r | |
1010 | \r | |
1011 | /* All the rest is arena management. We just freed\r | |
1012 | * a pool, and there are 4 cases for arena mgmt:\r | |
1013 | * 1. If all the pools are free, return the arena to\r | |
1014 | * the system free().\r | |
1015 | * 2. If this is the only free pool in the arena,\r | |
1016 | * add the arena back to the `usable_arenas` list.\r | |
1017 | * 3. If the "next" arena has a smaller count of free\r | |
1018 | * pools, we have to "slide this arena right" to\r | |
1019 | * restore that usable_arenas is sorted in order of\r | |
1020 | * nfreepools.\r | |
1021 | * 4. Else there's nothing more to do.\r | |
1022 | */\r | |
1023 | if (nf == ao->ntotalpools) {\r | |
1024 | /* Case 1. First unlink ao from usable_arenas.\r | |
1025 | */\r | |
1026 | assert(ao->prevarena == NULL ||\r | |
1027 | ao->prevarena->address != 0);\r | |
1028 | assert(ao ->nextarena == NULL ||\r | |
1029 | ao->nextarena->address != 0);\r | |
1030 | \r | |
1031 | /* Fix the pointer in the prevarena, or the\r | |
1032 | * usable_arenas pointer.\r | |
1033 | */\r | |
1034 | if (ao->prevarena == NULL) {\r | |
1035 | usable_arenas = ao->nextarena;\r | |
1036 | assert(usable_arenas == NULL ||\r | |
1037 | usable_arenas->address != 0);\r | |
1038 | }\r | |
1039 | else {\r | |
1040 | assert(ao->prevarena->nextarena == ao);\r | |
1041 | ao->prevarena->nextarena =\r | |
1042 | ao->nextarena;\r | |
1043 | }\r | |
1044 | /* Fix the pointer in the nextarena. */\r | |
1045 | if (ao->nextarena != NULL) {\r | |
1046 | assert(ao->nextarena->prevarena == ao);\r | |
1047 | ao->nextarena->prevarena =\r | |
1048 | ao->prevarena;\r | |
1049 | }\r | |
1050 | /* Record that this arena_object slot is\r | |
1051 | * available to be reused.\r | |
1052 | */\r | |
1053 | ao->nextarena = unused_arena_objects;\r | |
1054 | unused_arena_objects = ao;\r | |
1055 | \r | |
1056 | /* Free the entire arena. */\r | |
1057 | free((void *)ao->address);\r | |
1058 | ao->address = 0; /* mark unassociated */\r | |
1059 | --narenas_currently_allocated;\r | |
1060 | \r | |
1061 | UNLOCK();\r | |
1062 | return;\r | |
1063 | }\r | |
1064 | if (nf == 1) {\r | |
1065 | /* Case 2. Put ao at the head of\r | |
1066 | * usable_arenas. Note that because\r | |
1067 | * ao->nfreepools was 0 before, ao isn't\r | |
1068 | * currently on the usable_arenas list.\r | |
1069 | */\r | |
1070 | ao->nextarena = usable_arenas;\r | |
1071 | ao->prevarena = NULL;\r | |
1072 | if (usable_arenas)\r | |
1073 | usable_arenas->prevarena = ao;\r | |
1074 | usable_arenas = ao;\r | |
1075 | assert(usable_arenas->address != 0);\r | |
1076 | \r | |
1077 | UNLOCK();\r | |
1078 | return;\r | |
1079 | }\r | |
1080 | /* If this arena is now out of order, we need to keep\r | |
1081 | * the list sorted. The list is kept sorted so that\r | |
1082 | * the "most full" arenas are used first, which allows\r | |
1083 | * the nearly empty arenas to be completely freed. In\r | |
1084 | * a few un-scientific tests, it seems like this\r | |
1085 | * approach allowed a lot more memory to be freed.\r | |
1086 | */\r | |
1087 | if (ao->nextarena == NULL ||\r | |
1088 | nf <= ao->nextarena->nfreepools) {\r | |
1089 | /* Case 4. Nothing to do. */\r | |
1090 | UNLOCK();\r | |
1091 | return;\r | |
1092 | }\r | |
1093 | /* Case 3: We have to move the arena towards the end\r | |
1094 | * of the list, because it has more free pools than\r | |
1095 | * the arena to its right.\r | |
1096 | * First unlink ao from usable_arenas.\r | |
1097 | */\r | |
1098 | if (ao->prevarena != NULL) {\r | |
1099 | /* ao isn't at the head of the list */\r | |
1100 | assert(ao->prevarena->nextarena == ao);\r | |
1101 | ao->prevarena->nextarena = ao->nextarena;\r | |
1102 | }\r | |
1103 | else {\r | |
1104 | /* ao is at the head of the list */\r | |
1105 | assert(usable_arenas == ao);\r | |
1106 | usable_arenas = ao->nextarena;\r | |
1107 | }\r | |
1108 | ao->nextarena->prevarena = ao->prevarena;\r | |
1109 | \r | |
1110 | /* Locate the new insertion point by iterating over\r | |
1111 | * the list, using our nextarena pointer.\r | |
1112 | */\r | |
1113 | while (ao->nextarena != NULL &&\r | |
1114 | nf > ao->nextarena->nfreepools) {\r | |
1115 | ao->prevarena = ao->nextarena;\r | |
1116 | ao->nextarena = ao->nextarena->nextarena;\r | |
1117 | }\r | |
1118 | \r | |
1119 | /* Insert ao at this point. */\r | |
1120 | assert(ao->nextarena == NULL ||\r | |
1121 | ao->prevarena == ao->nextarena->prevarena);\r | |
1122 | assert(ao->prevarena->nextarena == ao->nextarena);\r | |
1123 | \r | |
1124 | ao->prevarena->nextarena = ao;\r | |
1125 | if (ao->nextarena != NULL)\r | |
1126 | ao->nextarena->prevarena = ao;\r | |
1127 | \r | |
1128 | /* Verify that the swaps worked. */\r | |
1129 | assert(ao->nextarena == NULL ||\r | |
1130 | nf <= ao->nextarena->nfreepools);\r | |
1131 | assert(ao->prevarena == NULL ||\r | |
1132 | nf > ao->prevarena->nfreepools);\r | |
1133 | assert(ao->nextarena == NULL ||\r | |
1134 | ao->nextarena->prevarena == ao);\r | |
1135 | assert((usable_arenas == ao &&\r | |
1136 | ao->prevarena == NULL) ||\r | |
1137 | ao->prevarena->nextarena == ao);\r | |
1138 | \r | |
1139 | UNLOCK();\r | |
1140 | return;\r | |
1141 | }\r | |
1142 | /* Pool was full, so doesn't currently live in any list:\r | |
1143 | * link it to the front of the appropriate usedpools[] list.\r | |
1144 | * This mimics LRU pool usage for new allocations and\r | |
1145 | * targets optimal filling when several pools contain\r | |
1146 | * blocks of the same size class.\r | |
1147 | */\r | |
1148 | --pool->ref.count;\r | |
1149 | assert(pool->ref.count > 0); /* else the pool is empty */\r | |
1150 | size = pool->szidx;\r | |
1151 | next = usedpools[size + size];\r | |
1152 | prev = next->prevpool;\r | |
1153 | /* insert pool before next: prev <-> pool <-> next */\r | |
1154 | pool->nextpool = next;\r | |
1155 | pool->prevpool = prev;\r | |
1156 | next->prevpool = pool;\r | |
1157 | prev->nextpool = pool;\r | |
1158 | UNLOCK();\r | |
1159 | return;\r | |
1160 | }\r | |
1161 | \r | |
1162 | #ifdef WITH_VALGRIND\r | |
1163 | redirect:\r | |
1164 | #endif\r | |
1165 | /* We didn't allocate this address. */\r | |
1166 | free(p);\r | |
1167 | }\r | |
1168 | \r | |
1169 | /* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,\r | |
1170 | * then as the Python docs promise, we do not treat this like free(p), and\r | |
1171 | * return a non-NULL result.\r | |
1172 | */\r | |
1173 | \r | |
1174 | #undef PyObject_Realloc\r | |
1175 | void *\r | |
1176 | PyObject_Realloc(void *p, size_t nbytes)\r | |
1177 | {\r | |
1178 | void *bp;\r | |
1179 | poolp pool;\r | |
1180 | size_t size;\r | |
1181 | #ifndef Py_USING_MEMORY_DEBUGGER\r | |
1182 | uint arenaindex_temp;\r | |
1183 | #endif\r | |
1184 | \r | |
1185 | if (p == NULL)\r | |
1186 | return PyObject_Malloc(nbytes);\r | |
1187 | \r | |
1188 | /*\r | |
1189 | * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.\r | |
1190 | * Most python internals blindly use a signed Py_ssize_t to track\r | |
1191 | * things without checking for overflows or negatives.\r | |
1192 | * As size_t is unsigned, checking for nbytes < 0 is not required.\r | |
1193 | */\r | |
1194 | if (nbytes > PY_SSIZE_T_MAX)\r | |
1195 | return NULL;\r | |
1196 | \r | |
1197 | #ifdef WITH_VALGRIND\r | |
1198 | /* Treat running_on_valgrind == -1 the same as 0 */\r | |
1199 | if (UNLIKELY(running_on_valgrind > 0))\r | |
1200 | goto redirect;\r | |
1201 | #endif\r | |
1202 | \r | |
1203 | pool = POOL_ADDR(p);\r | |
1204 | if (Py_ADDRESS_IN_RANGE(p, pool)) {\r | |
1205 | /* We're in charge of this block */\r | |
1206 | size = INDEX2SIZE(pool->szidx);\r | |
1207 | if (nbytes <= size) {\r | |
1208 | /* The block is staying the same or shrinking. If\r | |
1209 | * it's shrinking, there's a tradeoff: it costs\r | |
1210 | * cycles to copy the block to a smaller size class,\r | |
1211 | * but it wastes memory not to copy it. The\r | |
1212 | * compromise here is to copy on shrink only if at\r | |
1213 | * least 25% of size can be shaved off.\r | |
1214 | */\r | |
1215 | if (4 * nbytes > 3 * size) {\r | |
1216 | /* It's the same,\r | |
1217 | * or shrinking and new/old > 3/4.\r | |
1218 | */\r | |
1219 | return p;\r | |
1220 | }\r | |
1221 | size = nbytes;\r | |
1222 | }\r | |
1223 | bp = PyObject_Malloc(nbytes);\r | |
1224 | if (bp != NULL) {\r | |
1225 | memcpy(bp, p, size);\r | |
1226 | PyObject_Free(p);\r | |
1227 | }\r | |
1228 | return bp;\r | |
1229 | }\r | |
1230 | #ifdef WITH_VALGRIND\r | |
1231 | redirect:\r | |
1232 | #endif\r | |
1233 | /* We're not managing this block. If nbytes <=\r | |
1234 | * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this\r | |
1235 | * block. However, if we do, we need to copy the valid data from\r | |
1236 | * the C-managed block to one of our blocks, and there's no portable\r | |
1237 | * way to know how much of the memory space starting at p is valid.\r | |
1238 | * As bug 1185883 pointed out the hard way, it's possible that the\r | |
1239 | * C-managed block is "at the end" of allocated VM space, so that\r | |
1240 | * a memory fault can occur if we try to copy nbytes bytes starting\r | |
1241 | * at p. Instead we punt: let C continue to manage this block.\r | |
1242 | */\r | |
1243 | if (nbytes)\r | |
1244 | return realloc(p, nbytes);\r | |
1245 | /* C doesn't define the result of realloc(p, 0) (it may or may not\r | |
1246 | * return NULL then), but Python's docs promise that nbytes==0 never\r | |
1247 | * returns NULL. We don't pass 0 to realloc(), to avoid that endcase\r | |
1248 | * to begin with. Even then, we can't be sure that realloc() won't\r | |
1249 | * return NULL.\r | |
1250 | */\r | |
1251 | bp = realloc(p, 1);\r | |
1252 | return bp ? bp : p;\r | |
1253 | }\r | |
1254 | \r | |
1255 | #else /* ! WITH_PYMALLOC */\r | |
1256 | \r | |
1257 | /*==========================================================================*/\r | |
1258 | /* pymalloc not enabled: Redirect the entry points to malloc. These will\r | |
1259 | * only be used by extensions that are compiled with pymalloc enabled. */\r | |
1260 | \r | |
1261 | void *\r | |
1262 | PyObject_Malloc(size_t n)\r | |
1263 | {\r | |
1264 | return PyMem_MALLOC(n);\r | |
1265 | }\r | |
1266 | \r | |
1267 | void *\r | |
1268 | PyObject_Realloc(void *p, size_t n)\r | |
1269 | {\r | |
1270 | return PyMem_REALLOC(p, n);\r | |
1271 | }\r | |
1272 | \r | |
1273 | void\r | |
1274 | PyObject_Free(void *p)\r | |
1275 | {\r | |
1276 | PyMem_FREE(p);\r | |
1277 | }\r | |
1278 | #endif /* WITH_PYMALLOC */\r | |
1279 | \r | |
1280 | #ifdef PYMALLOC_DEBUG\r | |
1281 | /*==========================================================================*/\r | |
1282 | /* A x-platform debugging allocator. This doesn't manage memory directly,\r | |
1283 | * it wraps a real allocator, adding extra debugging info to the memory blocks.\r | |
1284 | */\r | |
1285 | \r | |
1286 | /* Special bytes broadcast into debug memory blocks at appropriate times.\r | |
1287 | * Strings of these are unlikely to be valid addresses, floats, ints or\r | |
1288 | * 7-bit ASCII.\r | |
1289 | */\r | |
1290 | #undef CLEANBYTE\r | |
1291 | #undef DEADBYTE\r | |
1292 | #undef FORBIDDENBYTE\r | |
1293 | #define CLEANBYTE 0xCB /* clean (newly allocated) memory */\r | |
1294 | #define DEADBYTE 0xDB /* dead (newly freed) memory */\r | |
1295 | #define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */\r | |
1296 | \r | |
1297 | /* We tag each block with an API ID in order to tag API violations */\r | |
1298 | #define _PYMALLOC_MEM_ID 'm' /* the PyMem_Malloc() API */\r | |
1299 | #define _PYMALLOC_OBJ_ID 'o' /* The PyObject_Malloc() API */\r | |
1300 | \r | |
1301 | static size_t serialno = 0; /* incremented on each debug {m,re}alloc */\r | |
1302 | \r | |
1303 | /* serialno is always incremented via calling this routine. The point is\r | |
1304 | * to supply a single place to set a breakpoint.\r | |
1305 | */\r | |
1306 | static void\r | |
1307 | bumpserialno(void)\r | |
1308 | {\r | |
1309 | ++serialno;\r | |
1310 | }\r | |
1311 | \r | |
1312 | #define SST SIZEOF_SIZE_T\r | |
1313 | \r | |
1314 | /* Read sizeof(size_t) bytes at p as a big-endian size_t. */\r | |
1315 | static size_t\r | |
1316 | read_size_t(const void *p)\r | |
1317 | {\r | |
1318 | const uchar *q = (const uchar *)p;\r | |
1319 | size_t result = *q++;\r | |
1320 | int i;\r | |
1321 | \r | |
1322 | for (i = SST; --i > 0; ++q)\r | |
1323 | result = (result << 8) | *q;\r | |
1324 | return result;\r | |
1325 | }\r | |
1326 | \r | |
1327 | /* Write n as a big-endian size_t, MSB at address p, LSB at\r | |
1328 | * p + sizeof(size_t) - 1.\r | |
1329 | */\r | |
1330 | static void\r | |
1331 | write_size_t(void *p, size_t n)\r | |
1332 | {\r | |
1333 | uchar *q = (uchar *)p + SST - 1;\r | |
1334 | int i;\r | |
1335 | \r | |
1336 | for (i = SST; --i >= 0; --q) {\r | |
1337 | *q = (uchar)(n & 0xff);\r | |
1338 | n >>= 8;\r | |
1339 | }\r | |
1340 | }\r | |
1341 | \r | |
1342 | #ifdef Py_DEBUG\r | |
1343 | /* Is target in the list? The list is traversed via the nextpool pointers.\r | |
1344 | * The list may be NULL-terminated, or circular. Return 1 if target is in\r | |
1345 | * list, else 0.\r | |
1346 | */\r | |
1347 | static int\r | |
1348 | pool_is_in_list(const poolp target, poolp list)\r | |
1349 | {\r | |
1350 | poolp origlist = list;\r | |
1351 | assert(target != NULL);\r | |
1352 | if (list == NULL)\r | |
1353 | return 0;\r | |
1354 | do {\r | |
1355 | if (target == list)\r | |
1356 | return 1;\r | |
1357 | list = list->nextpool;\r | |
1358 | } while (list != NULL && list != origlist);\r | |
1359 | return 0;\r | |
1360 | }\r | |
1361 | \r | |
1362 | #else\r | |
1363 | #define pool_is_in_list(X, Y) 1\r | |
1364 | \r | |
1365 | #endif /* Py_DEBUG */\r | |
1366 | \r | |
1367 | /* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and\r | |
1368 | fills them with useful stuff, here calling the underlying malloc's result p:\r | |
1369 | \r | |
1370 | p[0: S]\r | |
1371 | Number of bytes originally asked for. This is a size_t, big-endian (easier\r | |
1372 | to read in a memory dump).\r | |
1373 | p[S: 2*S]\r | |
1374 | Copies of FORBIDDENBYTE. Used to catch under- writes and reads.\r | |
1375 | p[2*S: 2*S+n]\r | |
1376 | The requested memory, filled with copies of CLEANBYTE.\r | |
1377 | Used to catch reference to uninitialized memory.\r | |
1378 | &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc\r | |
1379 | handled the request itself.\r | |
1380 | p[2*S+n: 2*S+n+S]\r | |
1381 | Copies of FORBIDDENBYTE. Used to catch over- writes and reads.\r | |
1382 | p[2*S+n+S: 2*S+n+2*S]\r | |
1383 | A serial number, incremented by 1 on each call to _PyObject_DebugMalloc\r | |
1384 | and _PyObject_DebugRealloc.\r | |
1385 | This is a big-endian size_t.\r | |
1386 | If "bad memory" is detected later, the serial number gives an\r | |
1387 | excellent way to set a breakpoint on the next run, to capture the\r | |
1388 | instant at which this block was passed out.\r | |
1389 | */\r | |
1390 | \r | |
1391 | /* debug replacements for the PyMem_* memory API */\r | |
1392 | void *\r | |
1393 | _PyMem_DebugMalloc(size_t nbytes)\r | |
1394 | {\r | |
1395 | return _PyObject_DebugMallocApi(_PYMALLOC_MEM_ID, nbytes);\r | |
1396 | }\r | |
1397 | void *\r | |
1398 | _PyMem_DebugRealloc(void *p, size_t nbytes)\r | |
1399 | {\r | |
1400 | return _PyObject_DebugReallocApi(_PYMALLOC_MEM_ID, p, nbytes);\r | |
1401 | }\r | |
1402 | void\r | |
1403 | _PyMem_DebugFree(void *p)\r | |
1404 | {\r | |
1405 | _PyObject_DebugFreeApi(_PYMALLOC_MEM_ID, p);\r | |
1406 | }\r | |
1407 | \r | |
1408 | /* debug replacements for the PyObject_* memory API */\r | |
1409 | void *\r | |
1410 | _PyObject_DebugMalloc(size_t nbytes)\r | |
1411 | {\r | |
1412 | return _PyObject_DebugMallocApi(_PYMALLOC_OBJ_ID, nbytes);\r | |
1413 | }\r | |
1414 | void *\r | |
1415 | _PyObject_DebugRealloc(void *p, size_t nbytes)\r | |
1416 | {\r | |
1417 | return _PyObject_DebugReallocApi(_PYMALLOC_OBJ_ID, p, nbytes);\r | |
1418 | }\r | |
1419 | void\r | |
1420 | _PyObject_DebugFree(void *p)\r | |
1421 | {\r | |
1422 | _PyObject_DebugFreeApi(_PYMALLOC_OBJ_ID, p);\r | |
1423 | }\r | |
1424 | void\r | |
1425 | _PyObject_DebugCheckAddress(const void *p)\r | |
1426 | {\r | |
1427 | _PyObject_DebugCheckAddressApi(_PYMALLOC_OBJ_ID, p);\r | |
1428 | }\r | |
1429 | \r | |
1430 | \r | |
1431 | /* generic debug memory api, with an "id" to identify the API in use */\r | |
1432 | void *\r | |
1433 | _PyObject_DebugMallocApi(char id, size_t nbytes)\r | |
1434 | {\r | |
1435 | uchar *p; /* base address of malloc'ed block */\r | |
1436 | uchar *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */\r | |
1437 | size_t total; /* nbytes + 4*SST */\r | |
1438 | \r | |
1439 | bumpserialno();\r | |
1440 | total = nbytes + 4*SST;\r | |
1441 | if (total < nbytes)\r | |
1442 | /* overflow: can't represent total as a size_t */\r | |
1443 | return NULL;\r | |
1444 | \r | |
1445 | p = (uchar *)PyObject_Malloc(total);\r | |
1446 | if (p == NULL)\r | |
1447 | return NULL;\r | |
1448 | \r | |
1449 | /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */\r | |
1450 | write_size_t(p, nbytes);\r | |
1451 | p[SST] = (uchar)id;\r | |
1452 | memset(p + SST + 1 , FORBIDDENBYTE, SST-1);\r | |
1453 | \r | |
1454 | if (nbytes > 0)\r | |
1455 | memset(p + 2*SST, CLEANBYTE, nbytes);\r | |
1456 | \r | |
1457 | /* at tail, write pad (SST bytes) and serialno (SST bytes) */\r | |
1458 | tail = p + 2*SST + nbytes;\r | |
1459 | memset(tail, FORBIDDENBYTE, SST);\r | |
1460 | write_size_t(tail + SST, serialno);\r | |
1461 | \r | |
1462 | return p + 2*SST;\r | |
1463 | }\r | |
1464 | \r | |
1465 | /* The debug free first checks the 2*SST bytes on each end for sanity (in\r | |
1466 | particular, that the FORBIDDENBYTEs with the api ID are still intact).\r | |
1467 | Then fills the original bytes with DEADBYTE.\r | |
1468 | Then calls the underlying free.\r | |
1469 | */\r | |
1470 | void\r | |
1471 | _PyObject_DebugFreeApi(char api, void *p)\r | |
1472 | {\r | |
1473 | uchar *q = (uchar *)p - 2*SST; /* address returned from malloc */\r | |
1474 | size_t nbytes;\r | |
1475 | \r | |
1476 | if (p == NULL)\r | |
1477 | return;\r | |
1478 | _PyObject_DebugCheckAddressApi(api, p);\r | |
1479 | nbytes = read_size_t(q);\r | |
1480 | nbytes += 4*SST;\r | |
1481 | if (nbytes > 0)\r | |
1482 | memset(q, DEADBYTE, nbytes);\r | |
1483 | PyObject_Free(q);\r | |
1484 | }\r | |
1485 | \r | |
1486 | void *\r | |
1487 | _PyObject_DebugReallocApi(char api, void *p, size_t nbytes)\r | |
1488 | {\r | |
1489 | uchar *q = (uchar *)p;\r | |
1490 | uchar *tail;\r | |
1491 | size_t total; /* nbytes + 4*SST */\r | |
1492 | size_t original_nbytes;\r | |
1493 | int i;\r | |
1494 | \r | |
1495 | if (p == NULL)\r | |
1496 | return _PyObject_DebugMallocApi(api, nbytes);\r | |
1497 | \r | |
1498 | _PyObject_DebugCheckAddressApi(api, p);\r | |
1499 | bumpserialno();\r | |
1500 | original_nbytes = read_size_t(q - 2*SST);\r | |
1501 | total = nbytes + 4*SST;\r | |
1502 | if (total < nbytes)\r | |
1503 | /* overflow: can't represent total as a size_t */\r | |
1504 | return NULL;\r | |
1505 | \r | |
1506 | if (nbytes < original_nbytes) {\r | |
1507 | /* shrinking: mark old extra memory dead */\r | |
1508 | memset(q + nbytes, DEADBYTE, original_nbytes - nbytes + 2*SST);\r | |
1509 | }\r | |
1510 | \r | |
1511 | /* Resize and add decorations. We may get a new pointer here, in which\r | |
1512 | * case we didn't get the chance to mark the old memory with DEADBYTE,\r | |
1513 | * but we live with that.\r | |
1514 | */\r | |
1515 | q = (uchar *)PyObject_Realloc(q - 2*SST, total);\r | |
1516 | if (q == NULL)\r | |
1517 | return NULL;\r | |
1518 | \r | |
1519 | write_size_t(q, nbytes);\r | |
1520 | assert(q[SST] == (uchar)api);\r | |
1521 | for (i = 1; i < SST; ++i)\r | |
1522 | assert(q[SST + i] == FORBIDDENBYTE);\r | |
1523 | q += 2*SST;\r | |
1524 | tail = q + nbytes;\r | |
1525 | memset(tail, FORBIDDENBYTE, SST);\r | |
1526 | write_size_t(tail + SST, serialno);\r | |
1527 | \r | |
1528 | if (nbytes > original_nbytes) {\r | |
1529 | /* growing: mark new extra memory clean */\r | |
1530 | memset(q + original_nbytes, CLEANBYTE,\r | |
1531 | nbytes - original_nbytes);\r | |
1532 | }\r | |
1533 | \r | |
1534 | return q;\r | |
1535 | }\r | |
1536 | \r | |
1537 | /* Check the forbidden bytes on both ends of the memory allocated for p.\r | |
1538 | * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,\r | |
1539 | * and call Py_FatalError to kill the program.\r | |
1540 | * The API id, is also checked.\r | |
1541 | */\r | |
1542 | void\r | |
1543 | _PyObject_DebugCheckAddressApi(char api, const void *p)\r | |
1544 | {\r | |
1545 | const uchar *q = (const uchar *)p;\r | |
1546 | char msgbuf[64];\r | |
1547 | char *msg;\r | |
1548 | size_t nbytes;\r | |
1549 | const uchar *tail;\r | |
1550 | int i;\r | |
1551 | char id;\r | |
1552 | \r | |
1553 | if (p == NULL) {\r | |
1554 | msg = "didn't expect a NULL pointer";\r | |
1555 | goto error;\r | |
1556 | }\r | |
1557 | \r | |
1558 | /* Check the API id */\r | |
1559 | id = (char)q[-SST];\r | |
1560 | if (id != api) {\r | |
1561 | msg = msgbuf;\r | |
1562 | snprintf(msg, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);\r | |
1563 | msgbuf[sizeof(msgbuf)-1] = 0;\r | |
1564 | goto error;\r | |
1565 | }\r | |
1566 | \r | |
1567 | /* Check the stuff at the start of p first: if there's underwrite\r | |
1568 | * corruption, the number-of-bytes field may be nuts, and checking\r | |
1569 | * the tail could lead to a segfault then.\r | |
1570 | */\r | |
1571 | for (i = SST-1; i >= 1; --i) {\r | |
1572 | if (*(q-i) != FORBIDDENBYTE) {\r | |
1573 | msg = "bad leading pad byte";\r | |
1574 | goto error;\r | |
1575 | }\r | |
1576 | }\r | |
1577 | \r | |
1578 | nbytes = read_size_t(q - 2*SST);\r | |
1579 | tail = q + nbytes;\r | |
1580 | for (i = 0; i < SST; ++i) {\r | |
1581 | if (tail[i] != FORBIDDENBYTE) {\r | |
1582 | msg = "bad trailing pad byte";\r | |
1583 | goto error;\r | |
1584 | }\r | |
1585 | }\r | |
1586 | \r | |
1587 | return;\r | |
1588 | \r | |
1589 | error:\r | |
1590 | _PyObject_DebugDumpAddress(p);\r | |
1591 | Py_FatalError(msg);\r | |
1592 | }\r | |
1593 | \r | |
1594 | /* Display info to stderr about the memory block at p. */\r | |
1595 | void\r | |
1596 | _PyObject_DebugDumpAddress(const void *p)\r | |
1597 | {\r | |
1598 | const uchar *q = (const uchar *)p;\r | |
1599 | const uchar *tail;\r | |
1600 | size_t nbytes, serial;\r | |
1601 | int i;\r | |
1602 | int ok;\r | |
1603 | char id;\r | |
1604 | \r | |
1605 | fprintf(stderr, "Debug memory block at address p=%p:", p);\r | |
1606 | if (p == NULL) {\r | |
1607 | fprintf(stderr, "\n");\r | |
1608 | return;\r | |
1609 | }\r | |
1610 | id = (char)q[-SST];\r | |
1611 | fprintf(stderr, " API '%c'\n", id);\r | |
1612 | \r | |
1613 | nbytes = read_size_t(q - 2*SST);\r | |
1614 | fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "\r | |
1615 | "requested\n", nbytes);\r | |
1616 | \r | |
1617 | /* In case this is nuts, check the leading pad bytes first. */\r | |
1618 | fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);\r | |
1619 | ok = 1;\r | |
1620 | for (i = 1; i <= SST-1; ++i) {\r | |
1621 | if (*(q-i) != FORBIDDENBYTE) {\r | |
1622 | ok = 0;\r | |
1623 | break;\r | |
1624 | }\r | |
1625 | }\r | |
1626 | if (ok)\r | |
1627 | fputs("FORBIDDENBYTE, as expected.\n", stderr);\r | |
1628 | else {\r | |
1629 | fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",\r | |
1630 | FORBIDDENBYTE);\r | |
1631 | for (i = SST-1; i >= 1; --i) {\r | |
1632 | const uchar byte = *(q-i);\r | |
1633 | fprintf(stderr, " at p-%d: 0x%02x", i, byte);\r | |
1634 | if (byte != FORBIDDENBYTE)\r | |
1635 | fputs(" *** OUCH", stderr);\r | |
1636 | fputc('\n', stderr);\r | |
1637 | }\r | |
1638 | \r | |
1639 | fputs(" Because memory is corrupted at the start, the "\r | |
1640 | "count of bytes requested\n"\r | |
1641 | " may be bogus, and checking the trailing pad "\r | |
1642 | "bytes may segfault.\n", stderr);\r | |
1643 | }\r | |
1644 | \r | |
1645 | tail = q + nbytes;\r | |
1646 | fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);\r | |
1647 | ok = 1;\r | |
1648 | for (i = 0; i < SST; ++i) {\r | |
1649 | if (tail[i] != FORBIDDENBYTE) {\r | |
1650 | ok = 0;\r | |
1651 | break;\r | |
1652 | }\r | |
1653 | }\r | |
1654 | if (ok)\r | |
1655 | fputs("FORBIDDENBYTE, as expected.\n", stderr);\r | |
1656 | else {\r | |
1657 | fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",\r | |
1658 | FORBIDDENBYTE);\r | |
1659 | for (i = 0; i < SST; ++i) {\r | |
1660 | const uchar byte = tail[i];\r | |
1661 | fprintf(stderr, " at tail+%d: 0x%02x",\r | |
1662 | i, byte);\r | |
1663 | if (byte != FORBIDDENBYTE)\r | |
1664 | fputs(" *** OUCH", stderr);\r | |
1665 | fputc('\n', stderr);\r | |
1666 | }\r | |
1667 | }\r | |
1668 | \r | |
1669 | serial = read_size_t(tail + SST);\r | |
1670 | fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T\r | |
1671 | "u to debug malloc/realloc.\n", serial);\r | |
1672 | \r | |
1673 | if (nbytes > 0) {\r | |
1674 | i = 0;\r | |
1675 | fputs(" Data at p:", stderr);\r | |
1676 | /* print up to 8 bytes at the start */\r | |
1677 | while (q < tail && i < 8) {\r | |
1678 | fprintf(stderr, " %02x", *q);\r | |
1679 | ++i;\r | |
1680 | ++q;\r | |
1681 | }\r | |
1682 | /* and up to 8 at the end */\r | |
1683 | if (q < tail) {\r | |
1684 | if (tail - q > 8) {\r | |
1685 | fputs(" ...", stderr);\r | |
1686 | q = tail - 8;\r | |
1687 | }\r | |
1688 | while (q < tail) {\r | |
1689 | fprintf(stderr, " %02x", *q);\r | |
1690 | ++q;\r | |
1691 | }\r | |
1692 | }\r | |
1693 | fputc('\n', stderr);\r | |
1694 | }\r | |
1695 | }\r | |
1696 | \r | |
1697 | static size_t\r | |
1698 | printone(const char* msg, size_t value)\r | |
1699 | {\r | |
1700 | int i, k;\r | |
1701 | char buf[100];\r | |
1702 | size_t origvalue = value;\r | |
1703 | \r | |
1704 | fputs(msg, stderr);\r | |
1705 | for (i = (int)strlen(msg); i < 35; ++i)\r | |
1706 | fputc(' ', stderr);\r | |
1707 | fputc('=', stderr);\r | |
1708 | \r | |
1709 | /* Write the value with commas. */\r | |
1710 | i = 22;\r | |
1711 | buf[i--] = '\0';\r | |
1712 | buf[i--] = '\n';\r | |
1713 | k = 3;\r | |
1714 | do {\r | |
1715 | size_t nextvalue = value / 10;\r | |
1716 | uint digit = (uint)(value - nextvalue * 10);\r | |
1717 | value = nextvalue;\r | |
1718 | buf[i--] = (char)(digit + '0');\r | |
1719 | --k;\r | |
1720 | if (k == 0 && value && i >= 0) {\r | |
1721 | k = 3;\r | |
1722 | buf[i--] = ',';\r | |
1723 | }\r | |
1724 | } while (value && i >= 0);\r | |
1725 | \r | |
1726 | while (i >= 0)\r | |
1727 | buf[i--] = ' ';\r | |
1728 | fputs(buf, stderr);\r | |
1729 | \r | |
1730 | return origvalue;\r | |
1731 | }\r | |
1732 | \r | |
1733 | /* Print summary info to stderr about the state of pymalloc's structures.\r | |
1734 | * In Py_DEBUG mode, also perform some expensive internal consistency\r | |
1735 | * checks.\r | |
1736 | */\r | |
1737 | void\r | |
1738 | _PyObject_DebugMallocStats(void)\r | |
1739 | {\r | |
1740 | uint i;\r | |
1741 | const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;\r | |
1742 | /* # of pools, allocated blocks, and free blocks per class index */\r | |
1743 | size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];\r | |
1744 | size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];\r | |
1745 | size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];\r | |
1746 | /* total # of allocated bytes in used and full pools */\r | |
1747 | size_t allocated_bytes = 0;\r | |
1748 | /* total # of available bytes in used pools */\r | |
1749 | size_t available_bytes = 0;\r | |
1750 | /* # of free pools + pools not yet carved out of current arena */\r | |
1751 | uint numfreepools = 0;\r | |
1752 | /* # of bytes for arena alignment padding */\r | |
1753 | size_t arena_alignment = 0;\r | |
1754 | /* # of bytes in used and full pools used for pool_headers */\r | |
1755 | size_t pool_header_bytes = 0;\r | |
1756 | /* # of bytes in used and full pools wasted due to quantization,\r | |
1757 | * i.e. the necessarily leftover space at the ends of used and\r | |
1758 | * full pools.\r | |
1759 | */\r | |
1760 | size_t quantization = 0;\r | |
1761 | /* # of arenas actually allocated. */\r | |
1762 | size_t narenas = 0;\r | |
1763 | /* running total -- should equal narenas * ARENA_SIZE */\r | |
1764 | size_t total;\r | |
1765 | char buf[128];\r | |
1766 | \r | |
1767 | fprintf(stderr, "Small block threshold = %d, in %u size classes.\n",\r | |
1768 | SMALL_REQUEST_THRESHOLD, numclasses);\r | |
1769 | \r | |
1770 | for (i = 0; i < numclasses; ++i)\r | |
1771 | numpools[i] = numblocks[i] = numfreeblocks[i] = 0;\r | |
1772 | \r | |
1773 | /* Because full pools aren't linked to from anything, it's easiest\r | |
1774 | * to march over all the arenas. If we're lucky, most of the memory\r | |
1775 | * will be living in full pools -- would be a shame to miss them.\r | |
1776 | */\r | |
1777 | for (i = 0; i < maxarenas; ++i) {\r | |
1778 | uint j;\r | |
1779 | uptr base = arenas[i].address;\r | |
1780 | \r | |
1781 | /* Skip arenas which are not allocated. */\r | |
1782 | if (arenas[i].address == (uptr)NULL)\r | |
1783 | continue;\r | |
1784 | narenas += 1;\r | |
1785 | \r | |
1786 | numfreepools += arenas[i].nfreepools;\r | |
1787 | \r | |
1788 | /* round up to pool alignment */\r | |
1789 | if (base & (uptr)POOL_SIZE_MASK) {\r | |
1790 | arena_alignment += POOL_SIZE;\r | |
1791 | base &= ~(uptr)POOL_SIZE_MASK;\r | |
1792 | base += POOL_SIZE;\r | |
1793 | }\r | |
1794 | \r | |
1795 | /* visit every pool in the arena */\r | |
1796 | assert(base <= (uptr) arenas[i].pool_address);\r | |
1797 | for (j = 0;\r | |
1798 | base < (uptr) arenas[i].pool_address;\r | |
1799 | ++j, base += POOL_SIZE) {\r | |
1800 | poolp p = (poolp)base;\r | |
1801 | const uint sz = p->szidx;\r | |
1802 | uint freeblocks;\r | |
1803 | \r | |
1804 | if (p->ref.count == 0) {\r | |
1805 | /* currently unused */\r | |
1806 | assert(pool_is_in_list(p, arenas[i].freepools));\r | |
1807 | continue;\r | |
1808 | }\r | |
1809 | ++numpools[sz];\r | |
1810 | numblocks[sz] += p->ref.count;\r | |
1811 | freeblocks = NUMBLOCKS(sz) - p->ref.count;\r | |
1812 | numfreeblocks[sz] += freeblocks;\r | |
1813 | #ifdef Py_DEBUG\r | |
1814 | if (freeblocks > 0)\r | |
1815 | assert(pool_is_in_list(p, usedpools[sz + sz]));\r | |
1816 | #endif\r | |
1817 | }\r | |
1818 | }\r | |
1819 | assert(narenas == narenas_currently_allocated);\r | |
1820 | \r | |
1821 | fputc('\n', stderr);\r | |
1822 | fputs("class size num pools blocks in use avail blocks\n"\r | |
1823 | "----- ---- --------- ------------- ------------\n",\r | |
1824 | stderr);\r | |
1825 | \r | |
1826 | for (i = 0; i < numclasses; ++i) {\r | |
1827 | size_t p = numpools[i];\r | |
1828 | size_t b = numblocks[i];\r | |
1829 | size_t f = numfreeblocks[i];\r | |
1830 | uint size = INDEX2SIZE(i);\r | |
1831 | if (p == 0) {\r | |
1832 | assert(b == 0 && f == 0);\r | |
1833 | continue;\r | |
1834 | }\r | |
1835 | fprintf(stderr, "%5u %6u "\r | |
1836 | "%11" PY_FORMAT_SIZE_T "u "\r | |
1837 | "%15" PY_FORMAT_SIZE_T "u "\r | |
1838 | "%13" PY_FORMAT_SIZE_T "u\n",\r | |
1839 | i, size, p, b, f);\r | |
1840 | allocated_bytes += b * size;\r | |
1841 | available_bytes += f * size;\r | |
1842 | pool_header_bytes += p * POOL_OVERHEAD;\r | |
1843 | quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);\r | |
1844 | }\r | |
1845 | fputc('\n', stderr);\r | |
1846 | (void)printone("# times object malloc called", serialno);\r | |
1847 | \r | |
1848 | (void)printone("# arenas allocated total", ntimes_arena_allocated);\r | |
1849 | (void)printone("# arenas reclaimed", ntimes_arena_allocated - narenas);\r | |
1850 | (void)printone("# arenas highwater mark", narenas_highwater);\r | |
1851 | (void)printone("# arenas allocated current", narenas);\r | |
1852 | \r | |
1853 | PyOS_snprintf(buf, sizeof(buf),\r | |
1854 | "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",\r | |
1855 | narenas, ARENA_SIZE);\r | |
1856 | (void)printone(buf, narenas * ARENA_SIZE);\r | |
1857 | \r | |
1858 | fputc('\n', stderr);\r | |
1859 | \r | |
1860 | total = printone("# bytes in allocated blocks", allocated_bytes);\r | |
1861 | total += printone("# bytes in available blocks", available_bytes);\r | |
1862 | \r | |
1863 | PyOS_snprintf(buf, sizeof(buf),\r | |
1864 | "%u unused pools * %d bytes", numfreepools, POOL_SIZE);\r | |
1865 | total += printone(buf, (size_t)numfreepools * POOL_SIZE);\r | |
1866 | \r | |
1867 | total += printone("# bytes lost to pool headers", pool_header_bytes);\r | |
1868 | total += printone("# bytes lost to quantization", quantization);\r | |
1869 | total += printone("# bytes lost to arena alignment", arena_alignment);\r | |
1870 | (void)printone("Total", total);\r | |
1871 | }\r | |
1872 | \r | |
1873 | #endif /* PYMALLOC_DEBUG */\r | |
1874 | \r | |
1875 | #ifdef Py_USING_MEMORY_DEBUGGER\r | |
1876 | /* Make this function last so gcc won't inline it since the definition is\r | |
1877 | * after the reference.\r | |
1878 | */\r | |
1879 | int\r | |
1880 | Py_ADDRESS_IN_RANGE(void *P, poolp pool)\r | |
1881 | {\r | |
1882 | uint arenaindex_temp = pool->arenaindex;\r | |
1883 | \r | |
1884 | return arenaindex_temp < maxarenas &&\r | |
1885 | (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE &&\r | |
1886 | arenas[arenaindex_temp].address != 0;\r | |
1887 | }\r | |
1888 | #endif\r |