--- /dev/null
+#include "Python.h"\r
+\r
+#if defined(__has_feature) /* Clang */\r
+ #if __has_feature(address_sanitizer) /* is ASAN enabled? */\r
+ #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \\r
+ __attribute__((no_address_safety_analysis)) \\r
+ __attribute__ ((noinline))\r
+ #else\r
+ #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS\r
+ #endif\r
+#else\r
+ #if defined(__SANITIZE_ADDRESS__) /* GCC 4.8.x, is ASAN enabled? */\r
+ #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \\r
+ __attribute__((no_address_safety_analysis)) \\r
+ __attribute__ ((noinline))\r
+ #else\r
+ #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS\r
+ #endif\r
+#endif\r
+\r
+#ifdef WITH_PYMALLOC\r
+\r
+#ifdef HAVE_MMAP\r
+ #include <sys/mman.h>\r
+ #ifdef MAP_ANONYMOUS\r
+ #define ARENAS_USE_MMAP\r
+ #endif\r
+#endif\r
+\r
+#ifdef WITH_VALGRIND\r
+#include <valgrind/valgrind.h>\r
+\r
+/* If we're using GCC, use __builtin_expect() to reduce overhead of\r
+ the valgrind checks */\r
+#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__)\r
+# define UNLIKELY(value) __builtin_expect((value), 0)\r
+#else\r
+# define UNLIKELY(value) (value)\r
+#endif\r
+\r
+/* -1 indicates that we haven't checked that we're running on valgrind yet. */\r
+static int running_on_valgrind = -1;\r
+#endif\r
+\r
+/* An object allocator for Python.\r
+\r
+ Here is an introduction to the layers of the Python memory architecture,\r
+ showing where the object allocator is actually used (layer +2), It is\r
+ called for every object allocation and deallocation (PyObject_New/Del),\r
+ unless the object-specific allocators implement a proprietary allocation\r
+ scheme (ex.: ints use a simple free list). This is also the place where\r
+ the cyclic garbage collector operates selectively on container objects.\r
+\r
+\r
+ Object-specific allocators\r
+ _____ ______ ______ ________\r
+ [ int ] [ dict ] [ list ] ... [ string ] Python core |\r
++3 | <----- Object-specific memory -----> | <-- Non-object memory --> |\r
+ _______________________________ | |\r
+ [ Python's object allocator ] | |\r
++2 | ####### Object memory ####### | <------ Internal buffers ------> |\r
+ ______________________________________________________________ |\r
+ [ Python's raw memory allocator (PyMem_ API) ] |\r
++1 | <----- Python memory (under PyMem manager's control) ------> | |\r
+ __________________________________________________________________\r
+ [ Underlying general-purpose allocator (ex: C library malloc) ]\r
+ 0 | <------ Virtual memory allocated for the python process -------> |\r
+\r
+ =========================================================================\r
+ _______________________________________________________________________\r
+ [ OS-specific Virtual Memory Manager (VMM) ]\r
+-1 | <--- Kernel dynamic storage allocation & management (page-based) ---> |\r
+ __________________________________ __________________________________\r
+ [ ] [ ]\r
+-2 | <-- Physical memory: ROM/RAM --> | | <-- Secondary storage (swap) --> |\r
+\r
+*/\r
+/*==========================================================================*/\r
+\r
+/* A fast, special-purpose memory allocator for small blocks, to be used\r
+ on top of a general-purpose malloc -- heavily based on previous art. */\r
+\r
+/* Vladimir Marangozov -- August 2000 */\r
+\r
+/*\r
+ * "Memory management is where the rubber meets the road -- if we do the wrong\r
+ * thing at any level, the results will not be good. And if we don't make the\r
+ * levels work well together, we are in serious trouble." (1)\r
+ *\r
+ * (1) Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles,\r
+ * "Dynamic Storage Allocation: A Survey and Critical Review",\r
+ * in Proc. 1995 Int'l. Workshop on Memory Management, September 1995.\r
+ */\r
+\r
+/* #undef WITH_MEMORY_LIMITS */ /* disable mem limit checks */\r
+\r
+/*==========================================================================*/\r
+\r
+/*\r
+ * Allocation strategy abstract:\r
+ *\r
+ * For small requests, the allocator sub-allocates <Big> blocks of memory.\r
+ * Requests greater than SMALL_REQUEST_THRESHOLD bytes are routed to the\r
+ * system's allocator. \r
+ *\r
+ * Small requests are grouped in size classes spaced 8 bytes apart, due\r
+ * to the required valid alignment of the returned address. Requests of\r
+ * a particular size are serviced from memory pools of 4K (one VMM page).\r
+ * Pools are fragmented on demand and contain free lists of blocks of one\r
+ * particular size class. In other words, there is a fixed-size allocator\r
+ * for each size class. Free pools are shared by the different allocators\r
+ * thus minimizing the space reserved for a particular size class.\r
+ *\r
+ * This allocation strategy is a variant of what is known as "simple\r
+ * segregated storage based on array of free lists". The main drawback of\r
+ * simple segregated storage is that we might end up with lot of reserved\r
+ * memory for the different free lists, which degenerate in time. To avoid\r
+ * this, we partition each free list in pools and we share dynamically the\r
+ * reserved space between all free lists. This technique is quite efficient\r
+ * for memory intensive programs which allocate mainly small-sized blocks.\r
+ *\r
+ * For small requests we have the following table:\r
+ *\r
+ * Request in bytes Size of allocated block Size class idx\r
+ * ----------------------------------------------------------------\r
+ * 1-8 8 0\r
+ * 9-16 16 1\r
+ * 17-24 24 2\r
+ * 25-32 32 3\r
+ * 33-40 40 4\r
+ * 41-48 48 5\r
+ * 49-56 56 6\r
+ * 57-64 64 7\r
+ * 65-72 72 8\r
+ * ... ... ...\r
+ * 497-504 504 62\r
+ * 505-512 512 63 \r
+ *\r
+ * 0, SMALL_REQUEST_THRESHOLD + 1 and up: routed to the underlying\r
+ * allocator.\r
+ */\r
+\r
+/*==========================================================================*/\r
+\r
+/*\r
+ * -- Main tunable settings section --\r
+ */\r
+\r
+/*\r
+ * Alignment of addresses returned to the user. 8-bytes alignment works\r
+ * on most current architectures (with 32-bit or 64-bit address busses).\r
+ * The alignment value is also used for grouping small requests in size\r
+ * classes spaced ALIGNMENT bytes apart.\r
+ *\r
+ * You shouldn't change this unless you know what you are doing.\r
+ */\r
+#define ALIGNMENT 8 /* must be 2^N */\r
+#define ALIGNMENT_SHIFT 3\r
+#define ALIGNMENT_MASK (ALIGNMENT - 1)\r
+\r
+/* Return the number of bytes in size class I, as a uint. */\r
+#define INDEX2SIZE(I) (((uint)(I) + 1) << ALIGNMENT_SHIFT)\r
+\r
+/*\r
+ * Max size threshold below which malloc requests are considered to be\r
+ * small enough in order to use preallocated memory pools. You can tune\r
+ * this value according to your application behaviour and memory needs.\r
+ *\r
+ * The following invariants must hold:\r
+ * 1) ALIGNMENT <= SMALL_REQUEST_THRESHOLD <= 256\r
+ * 2) SMALL_REQUEST_THRESHOLD is evenly divisible by ALIGNMENT\r
+ *\r
+ * Note: a size threshold of 512 guarantees that newly created dictionaries\r
+ * will be allocated from preallocated memory pools on 64-bit.\r
+ *\r
+ * Although not required, for better performance and space efficiency,\r
+ * it is recommended that SMALL_REQUEST_THRESHOLD is set to a power of 2.\r
+ */\r
+#define SMALL_REQUEST_THRESHOLD 512 \r
+#define NB_SMALL_SIZE_CLASSES (SMALL_REQUEST_THRESHOLD / ALIGNMENT)\r
+\r
+/*\r
+ * The system's VMM page size can be obtained on most unices with a\r
+ * getpagesize() call or deduced from various header files. To make\r
+ * things simpler, we assume that it is 4K, which is OK for most systems.\r
+ * It is probably better if this is the native page size, but it doesn't\r
+ * have to be. In theory, if SYSTEM_PAGE_SIZE is larger than the native page\r
+ * size, then `POOL_ADDR(p)->arenaindex' could rarely cause a segmentation\r
+ * violation fault. 4K is apparently OK for all the platforms that python\r
+ * currently targets.\r
+ */\r
+#define SYSTEM_PAGE_SIZE (4 * 1024)\r
+#define SYSTEM_PAGE_SIZE_MASK (SYSTEM_PAGE_SIZE - 1)\r
+\r
+/*\r
+ * Maximum amount of memory managed by the allocator for small requests.\r
+ */\r
+#ifdef WITH_MEMORY_LIMITS\r
+#ifndef SMALL_MEMORY_LIMIT\r
+#define SMALL_MEMORY_LIMIT (64 * 1024 * 1024) /* 64 MB -- more? */\r
+#endif\r
+#endif\r
+\r
+/*\r
+ * The allocator sub-allocates <Big> blocks of memory (called arenas) aligned\r
+ * on a page boundary. This is a reserved virtual address space for the\r
+ * current process (obtained through a malloc()/mmap() call). In no way this\r
+ * means that the memory arenas will be used entirely. A malloc(<Big>) is\r
+ * usually an address range reservation for <Big> bytes, unless all pages within\r
+ * this space are referenced subsequently. So malloc'ing big blocks and not\r
+ * using them does not mean "wasting memory". It's an addressable range\r
+ * wastage... \r
+ *\r
+ * Arenas are allocated with mmap() on systems supporting anonymous memory\r
+ * mappings to reduce heap fragmentation.\r
+ */\r
+#define ARENA_SIZE (256 << 10) /* 256KB */\r
+\r
+#ifdef WITH_MEMORY_LIMITS\r
+#define MAX_ARENAS (SMALL_MEMORY_LIMIT / ARENA_SIZE)\r
+#endif\r
+\r
+/*\r
+ * Size of the pools used for small blocks. Should be a power of 2,\r
+ * between 1K and SYSTEM_PAGE_SIZE, that is: 1k, 2k, 4k.\r
+ */\r
+#define POOL_SIZE SYSTEM_PAGE_SIZE /* must be 2^N */\r
+#define POOL_SIZE_MASK SYSTEM_PAGE_SIZE_MASK\r
+\r
+/*\r
+ * -- End of tunable settings section --\r
+ */\r
+\r
+/*==========================================================================*/\r
+\r
+/*\r
+ * Locking\r
+ *\r
+ * To reduce lock contention, it would probably be better to refine the\r
+ * crude function locking with per size class locking. I'm not positive\r
+ * however, whether it's worth switching to such locking policy because\r
+ * of the performance penalty it might introduce.\r
+ *\r
+ * The following macros describe the simplest (should also be the fastest)\r
+ * lock object on a particular platform and the init/fini/lock/unlock\r
+ * operations on it. The locks defined here are not expected to be recursive\r
+ * because it is assumed that they will always be called in the order:\r
+ * INIT, [LOCK, UNLOCK]*, FINI.\r
+ */\r
+\r
+/*\r
+ * Python's threads are serialized, so object malloc locking is disabled.\r
+ */\r
+#define SIMPLELOCK_DECL(lock) /* simple lock declaration */\r
+#define SIMPLELOCK_INIT(lock) /* allocate (if needed) and initialize */\r
+#define SIMPLELOCK_FINI(lock) /* free/destroy an existing lock */\r
+#define SIMPLELOCK_LOCK(lock) /* acquire released lock */\r
+#define SIMPLELOCK_UNLOCK(lock) /* release acquired lock */\r
+\r
+/*\r
+ * Basic types\r
+ * I don't care if these are defined in <sys/types.h> or elsewhere. Axiom.\r
+ */\r
+#undef uchar\r
+#define uchar unsigned char /* assuming == 8 bits */\r
+\r
+#undef uint\r
+#define uint unsigned int /* assuming >= 16 bits */\r
+\r
+#undef ulong\r
+#define ulong unsigned long /* assuming >= 32 bits */\r
+\r
+#undef uptr\r
+#define uptr Py_uintptr_t\r
+\r
+/* When you say memory, my mind reasons in terms of (pointers to) blocks */\r
+typedef uchar block;\r
+\r
+/* Pool for small blocks. */\r
+struct pool_header {\r
+ union { block *_padding;\r
+ uint count; } ref; /* number of allocated blocks */\r
+ block *freeblock; /* pool's free list head */\r
+ struct pool_header *nextpool; /* next pool of this size class */\r
+ struct pool_header *prevpool; /* previous pool "" */\r
+ uint arenaindex; /* index into arenas of base adr */\r
+ uint szidx; /* block size class index */\r
+ uint nextoffset; /* bytes to virgin block */\r
+ uint maxnextoffset; /* largest valid nextoffset */\r
+};\r
+\r
+typedef struct pool_header *poolp;\r
+\r
+/* Record keeping for arenas. */\r
+struct arena_object {\r
+ /* The address of the arena, as returned by malloc. Note that 0\r
+ * will never be returned by a successful malloc, and is used\r
+ * here to mark an arena_object that doesn't correspond to an\r
+ * allocated arena.\r
+ */\r
+ uptr address;\r
+\r
+ /* Pool-aligned pointer to the next pool to be carved off. */\r
+ block* pool_address;\r
+\r
+ /* The number of available pools in the arena: free pools + never-\r
+ * allocated pools.\r
+ */\r
+ uint nfreepools;\r
+\r
+ /* The total number of pools in the arena, whether or not available. */\r
+ uint ntotalpools;\r
+\r
+ /* Singly-linked list of available pools. */\r
+ struct pool_header* freepools;\r
+\r
+ /* Whenever this arena_object is not associated with an allocated\r
+ * arena, the nextarena member is used to link all unassociated\r
+ * arena_objects in the singly-linked `unused_arena_objects` list.\r
+ * The prevarena member is unused in this case.\r
+ *\r
+ * When this arena_object is associated with an allocated arena\r
+ * with at least one available pool, both members are used in the\r
+ * doubly-linked `usable_arenas` list, which is maintained in\r
+ * increasing order of `nfreepools` values.\r
+ *\r
+ * Else this arena_object is associated with an allocated arena\r
+ * all of whose pools are in use. `nextarena` and `prevarena`\r
+ * are both meaningless in this case.\r
+ */\r
+ struct arena_object* nextarena;\r
+ struct arena_object* prevarena;\r
+};\r
+\r
+#undef ROUNDUP\r
+#define ROUNDUP(x) (((x) + ALIGNMENT_MASK) & ~ALIGNMENT_MASK)\r
+#define POOL_OVERHEAD ROUNDUP(sizeof(struct pool_header))\r
+\r
+#define DUMMY_SIZE_IDX 0xffff /* size class of newly cached pools */\r
+\r
+/* Round pointer P down to the closest pool-aligned address <= P, as a poolp */\r
+#define POOL_ADDR(P) ((poolp)((uptr)(P) & ~(uptr)POOL_SIZE_MASK))\r
+\r
+/* Return total number of blocks in pool of size index I, as a uint. */\r
+#define NUMBLOCKS(I) ((uint)(POOL_SIZE - POOL_OVERHEAD) / INDEX2SIZE(I))\r
+\r
+/*==========================================================================*/\r
+\r
+/*\r
+ * This malloc lock\r
+ */\r
+SIMPLELOCK_DECL(_malloc_lock)\r
+#define LOCK() SIMPLELOCK_LOCK(_malloc_lock)\r
+#define UNLOCK() SIMPLELOCK_UNLOCK(_malloc_lock)\r
+#define LOCK_INIT() SIMPLELOCK_INIT(_malloc_lock)\r
+#define LOCK_FINI() SIMPLELOCK_FINI(_malloc_lock)\r
+\r
+/*\r
+ * Pool table -- headed, circular, doubly-linked lists of partially used pools.\r
+\r
+This is involved. For an index i, usedpools[i+i] is the header for a list of\r
+all partially used pools holding small blocks with "size class idx" i. So\r
+usedpools[0] corresponds to blocks of size 8, usedpools[2] to blocks of size\r
+16, and so on: index 2*i <-> blocks of size (i+1)<<ALIGNMENT_SHIFT.\r
+\r
+Pools are carved off an arena's highwater mark (an arena_object's pool_address\r
+member) as needed. Once carved off, a pool is in one of three states forever\r
+after:\r
+\r
+used == partially used, neither empty nor full\r
+ At least one block in the pool is currently allocated, and at least one\r
+ block in the pool is not currently allocated (note this implies a pool\r
+ has room for at least two blocks).\r
+ This is a pool's initial state, as a pool is created only when malloc\r
+ needs space.\r
+ The pool holds blocks of a fixed size, and is in the circular list headed\r
+ at usedpools[i] (see above). It's linked to the other used pools of the\r
+ same size class via the pool_header's nextpool and prevpool members.\r
+ If all but one block is currently allocated, a malloc can cause a\r
+ transition to the full state. If all but one block is not currently\r
+ allocated, a free can cause a transition to the empty state.\r
+\r
+full == all the pool's blocks are currently allocated\r
+ On transition to full, a pool is unlinked from its usedpools[] list.\r
+ It's not linked to from anything then anymore, and its nextpool and\r
+ prevpool members are meaningless until it transitions back to used.\r
+ A free of a block in a full pool puts the pool back in the used state.\r
+ Then it's linked in at the front of the appropriate usedpools[] list, so\r
+ that the next allocation for its size class will reuse the freed block.\r
+\r
+empty == all the pool's blocks are currently available for allocation\r
+ On transition to empty, a pool is unlinked from its usedpools[] list,\r
+ and linked to the front of its arena_object's singly-linked freepools list,\r
+ via its nextpool member. The prevpool member has no meaning in this case.\r
+ Empty pools have no inherent size class: the next time a malloc finds\r
+ an empty list in usedpools[], it takes the first pool off of freepools.\r
+ If the size class needed happens to be the same as the size class the pool\r
+ last had, some pool initialization can be skipped.\r
+\r
+\r
+Block Management\r
+\r
+Blocks within pools are again carved out as needed. pool->freeblock points to\r
+the start of a singly-linked list of free blocks within the pool. When a\r
+block is freed, it's inserted at the front of its pool's freeblock list. Note\r
+that the available blocks in a pool are *not* linked all together when a pool\r
+is initialized. Instead only "the first two" (lowest addresses) blocks are\r
+set up, returning the first such block, and setting pool->freeblock to a\r
+one-block list holding the second such block. This is consistent with that\r
+pymalloc strives at all levels (arena, pool, and block) never to touch a piece\r
+of memory until it's actually needed.\r
+\r
+So long as a pool is in the used state, we're certain there *is* a block\r
+available for allocating, and pool->freeblock is not NULL. If pool->freeblock\r
+points to the end of the free list before we've carved the entire pool into\r
+blocks, that means we simply haven't yet gotten to one of the higher-address\r
+blocks. The offset from the pool_header to the start of "the next" virgin\r
+block is stored in the pool_header nextoffset member, and the largest value\r
+of nextoffset that makes sense is stored in the maxnextoffset member when a\r
+pool is initialized. All the blocks in a pool have been passed out at least\r
+once when and only when nextoffset > maxnextoffset.\r
+\r
+\r
+Major obscurity: While the usedpools vector is declared to have poolp\r
+entries, it doesn't really. It really contains two pointers per (conceptual)\r
+poolp entry, the nextpool and prevpool members of a pool_header. The\r
+excruciating initialization code below fools C so that\r
+\r
+ usedpool[i+i]\r
+\r
+"acts like" a genuine poolp, but only so long as you only reference its\r
+nextpool and prevpool members. The "- 2*sizeof(block *)" gibberish is\r
+compensating for that a pool_header's nextpool and prevpool members\r
+immediately follow a pool_header's first two members:\r
+\r
+ union { block *_padding;\r
+ uint count; } ref;\r
+ block *freeblock;\r
+\r
+each of which consume sizeof(block *) bytes. So what usedpools[i+i] really\r
+contains is a fudged-up pointer p such that *if* C believes it's a poolp\r
+pointer, then p->nextpool and p->prevpool are both p (meaning that the headed\r
+circular list is empty).\r
+\r
+It's unclear why the usedpools setup is so convoluted. It could be to\r
+minimize the amount of cache required to hold this heavily-referenced table\r
+(which only *needs* the two interpool pointer members of a pool_header). OTOH,\r
+referencing code has to remember to "double the index" and doing so isn't\r
+free, usedpools[0] isn't a strictly legal pointer, and we're crucially relying\r
+on that C doesn't insert any padding anywhere in a pool_header at or before\r
+the prevpool member.\r
+**************************************************************************** */\r
+\r
+#define PTA(x) ((poolp )((uchar *)&(usedpools[2*(x)]) - 2*sizeof(block *)))\r
+#define PT(x) PTA(x), PTA(x)\r
+\r
+static poolp usedpools[2 * ((NB_SMALL_SIZE_CLASSES + 7) / 8) * 8] = {\r
+ PT(0), PT(1), PT(2), PT(3), PT(4), PT(5), PT(6), PT(7)\r
+#if NB_SMALL_SIZE_CLASSES > 8\r
+ , PT(8), PT(9), PT(10), PT(11), PT(12), PT(13), PT(14), PT(15)\r
+#if NB_SMALL_SIZE_CLASSES > 16\r
+ , PT(16), PT(17), PT(18), PT(19), PT(20), PT(21), PT(22), PT(23)\r
+#if NB_SMALL_SIZE_CLASSES > 24\r
+ , PT(24), PT(25), PT(26), PT(27), PT(28), PT(29), PT(30), PT(31)\r
+#if NB_SMALL_SIZE_CLASSES > 32\r
+ , PT(32), PT(33), PT(34), PT(35), PT(36), PT(37), PT(38), PT(39)\r
+#if NB_SMALL_SIZE_CLASSES > 40\r
+ , PT(40), PT(41), PT(42), PT(43), PT(44), PT(45), PT(46), PT(47)\r
+#if NB_SMALL_SIZE_CLASSES > 48\r
+ , PT(48), PT(49), PT(50), PT(51), PT(52), PT(53), PT(54), PT(55)\r
+#if NB_SMALL_SIZE_CLASSES > 56\r
+ , PT(56), PT(57), PT(58), PT(59), PT(60), PT(61), PT(62), PT(63)\r
+#if NB_SMALL_SIZE_CLASSES > 64\r
+#error "NB_SMALL_SIZE_CLASSES should be less than 64"\r
+#endif /* NB_SMALL_SIZE_CLASSES > 64 */\r
+#endif /* NB_SMALL_SIZE_CLASSES > 56 */\r
+#endif /* NB_SMALL_SIZE_CLASSES > 48 */\r
+#endif /* NB_SMALL_SIZE_CLASSES > 40 */\r
+#endif /* NB_SMALL_SIZE_CLASSES > 32 */\r
+#endif /* NB_SMALL_SIZE_CLASSES > 24 */\r
+#endif /* NB_SMALL_SIZE_CLASSES > 16 */\r
+#endif /* NB_SMALL_SIZE_CLASSES > 8 */\r
+};\r
+\r
+/*==========================================================================\r
+Arena management.\r
+\r
+`arenas` is a vector of arena_objects. It contains maxarenas entries, some of\r
+which may not be currently used (== they're arena_objects that aren't\r
+currently associated with an allocated arena). Note that arenas proper are\r
+separately malloc'ed.\r
+\r
+Prior to Python 2.5, arenas were never free()'ed. Starting with Python 2.5,\r
+we do try to free() arenas, and use some mild heuristic strategies to increase\r
+the likelihood that arenas eventually can be freed.\r
+\r
+unused_arena_objects\r
+\r
+ This is a singly-linked list of the arena_objects that are currently not\r
+ being used (no arena is associated with them). Objects are taken off the\r
+ head of the list in new_arena(), and are pushed on the head of the list in\r
+ PyObject_Free() when the arena is empty. Key invariant: an arena_object\r
+ is on this list if and only if its .address member is 0.\r
+\r
+usable_arenas\r
+\r
+ This is a doubly-linked list of the arena_objects associated with arenas\r
+ that have pools available. These pools are either waiting to be reused,\r
+ or have not been used before. The list is sorted to have the most-\r
+ allocated arenas first (ascending order based on the nfreepools member).\r
+ This means that the next allocation will come from a heavily used arena,\r
+ which gives the nearly empty arenas a chance to be returned to the system.\r
+ In my unscientific tests this dramatically improved the number of arenas\r
+ that could be freed.\r
+\r
+Note that an arena_object associated with an arena all of whose pools are\r
+currently in use isn't on either list.\r
+*/\r
+\r
+/* Array of objects used to track chunks of memory (arenas). */\r
+static struct arena_object* arenas = NULL;\r
+/* Number of slots currently allocated in the `arenas` vector. */\r
+static uint maxarenas = 0;\r
+\r
+/* The head of the singly-linked, NULL-terminated list of available\r
+ * arena_objects.\r
+ */\r
+static struct arena_object* unused_arena_objects = NULL;\r
+\r
+/* The head of the doubly-linked, NULL-terminated at each end, list of\r
+ * arena_objects associated with arenas that have pools available.\r
+ */\r
+static struct arena_object* usable_arenas = NULL;\r
+\r
+/* How many arena_objects do we initially allocate?\r
+ * 16 = can allocate 16 arenas = 16 * ARENA_SIZE = 4MB before growing the\r
+ * `arenas` vector.\r
+ */\r
+#define INITIAL_ARENA_OBJECTS 16\r
+\r
+/* Number of arenas allocated that haven't been free()'d. */\r
+static size_t narenas_currently_allocated = 0;\r
+\r
+#ifdef PYMALLOC_DEBUG\r
+/* Total number of times malloc() called to allocate an arena. */\r
+static size_t ntimes_arena_allocated = 0;\r
+/* High water mark (max value ever seen) for narenas_currently_allocated. */\r
+static size_t narenas_highwater = 0;\r
+#endif\r
+\r
+/* Allocate a new arena. If we run out of memory, return NULL. Else\r
+ * allocate a new arena, and return the address of an arena_object\r
+ * describing the new arena. It's expected that the caller will set\r
+ * `usable_arenas` to the return value.\r
+ */\r
+static struct arena_object*\r
+new_arena(void)\r
+{\r
+ struct arena_object* arenaobj;\r
+ uint excess; /* number of bytes above pool alignment */\r
+ void *address;\r
+ int err;\r
+\r
+#ifdef PYMALLOC_DEBUG\r
+ if (Py_GETENV("PYTHONMALLOCSTATS"))\r
+ _PyObject_DebugMallocStats();\r
+#endif\r
+ if (unused_arena_objects == NULL) {\r
+ uint i;\r
+ uint numarenas;\r
+ size_t nbytes;\r
+\r
+ /* Double the number of arena objects on each allocation.\r
+ * Note that it's possible for `numarenas` to overflow.\r
+ */\r
+ numarenas = maxarenas ? maxarenas << 1 : INITIAL_ARENA_OBJECTS;\r
+ if (numarenas <= maxarenas)\r
+ return NULL; /* overflow */\r
+#if SIZEOF_SIZE_T <= SIZEOF_INT\r
+ if (numarenas > PY_SIZE_MAX / sizeof(*arenas))\r
+ return NULL; /* overflow */\r
+#endif\r
+ nbytes = numarenas * sizeof(*arenas);\r
+ arenaobj = (struct arena_object *)realloc(arenas, nbytes);\r
+ if (arenaobj == NULL)\r
+ return NULL;\r
+ arenas = arenaobj;\r
+\r
+ /* We might need to fix pointers that were copied. However,\r
+ * new_arena only gets called when all the pages in the\r
+ * previous arenas are full. Thus, there are *no* pointers\r
+ * into the old array. Thus, we don't have to worry about\r
+ * invalid pointers. Just to be sure, some asserts:\r
+ */\r
+ assert(usable_arenas == NULL);\r
+ assert(unused_arena_objects == NULL);\r
+\r
+ /* Put the new arenas on the unused_arena_objects list. */\r
+ for (i = maxarenas; i < numarenas; ++i) {\r
+ arenas[i].address = 0; /* mark as unassociated */\r
+ arenas[i].nextarena = i < numarenas - 1 ?\r
+ &arenas[i+1] : NULL;\r
+ }\r
+\r
+ /* Update globals. */\r
+ unused_arena_objects = &arenas[maxarenas];\r
+ maxarenas = numarenas;\r
+ }\r
+\r
+ /* Take the next available arena object off the head of the list. */\r
+ assert(unused_arena_objects != NULL);\r
+ arenaobj = unused_arena_objects;\r
+ unused_arena_objects = arenaobj->nextarena;\r
+ assert(arenaobj->address == 0);\r
+#ifdef ARENAS_USE_MMAP\r
+ address = mmap(NULL, ARENA_SIZE, PROT_READ|PROT_WRITE,\r
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);\r
+ err = (address == MAP_FAILED);\r
+#else\r
+ address = malloc(ARENA_SIZE);\r
+ err = (address == 0);\r
+#endif \r
+ if (err) {\r
+ /* The allocation failed: return NULL after putting the\r
+ * arenaobj back.\r
+ */\r
+ arenaobj->nextarena = unused_arena_objects;\r
+ unused_arena_objects = arenaobj;\r
+ return NULL;\r
+ }\r
+ arenaobj->address = (uptr)address;\r
+\r
+ ++narenas_currently_allocated;\r
+#ifdef PYMALLOC_DEBUG\r
+ ++ntimes_arena_allocated;\r
+ if (narenas_currently_allocated > narenas_highwater)\r
+ narenas_highwater = narenas_currently_allocated;\r
+#endif\r
+ arenaobj->freepools = NULL;\r
+ /* pool_address <- first pool-aligned address in the arena\r
+ nfreepools <- number of whole pools that fit after alignment */\r
+ arenaobj->pool_address = (block*)arenaobj->address;\r
+ arenaobj->nfreepools = ARENA_SIZE / POOL_SIZE;\r
+ assert(POOL_SIZE * arenaobj->nfreepools == ARENA_SIZE);\r
+ excess = (uint)(arenaobj->address & POOL_SIZE_MASK);\r
+ if (excess != 0) {\r
+ --arenaobj->nfreepools;\r
+ arenaobj->pool_address += POOL_SIZE - excess;\r
+ }\r
+ arenaobj->ntotalpools = arenaobj->nfreepools;\r
+\r
+ return arenaobj;\r
+}\r
+\r
+/*\r
+Py_ADDRESS_IN_RANGE(P, POOL)\r
+\r
+Return true if and only if P is an address that was allocated by pymalloc.\r
+POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)\r
+(the caller is asked to compute this because the macro expands POOL more than\r
+once, and for efficiency it's best for the caller to assign POOL_ADDR(P) to a\r
+variable and pass the latter to the macro; because Py_ADDRESS_IN_RANGE is\r
+called on every alloc/realloc/free, micro-efficiency is important here).\r
+\r
+Tricky: Let B be the arena base address associated with the pool, B =\r
+arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if\r
+\r
+ B <= P < B + ARENA_SIZE\r
+\r
+Subtracting B throughout, this is true iff\r
+\r
+ 0 <= P-B < ARENA_SIZE\r
+\r
+By using unsigned arithmetic, the "0 <=" half of the test can be skipped.\r
+\r
+Obscure: A PyMem "free memory" function can call the pymalloc free or realloc\r
+before the first arena has been allocated. `arenas` is still NULL in that\r
+case. We're relying on that maxarenas is also 0 in that case, so that\r
+(POOL)->arenaindex < maxarenas must be false, saving us from trying to index\r
+into a NULL arenas.\r
+\r
+Details: given P and POOL, the arena_object corresponding to P is AO =\r
+arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild\r
+stores, etc), POOL is the correct address of P's pool, AO.address is the\r
+correct base address of the pool's arena, and P must be within ARENA_SIZE of\r
+AO.address. In addition, AO.address is not 0 (no arena can start at address 0\r
+(NULL)). Therefore Py_ADDRESS_IN_RANGE correctly reports that obmalloc\r
+controls P.\r
+\r
+Now suppose obmalloc does not control P (e.g., P was obtained via a direct\r
+call to the system malloc() or realloc()). (POOL)->arenaindex may be anything\r
+in this case -- it may even be uninitialized trash. If the trash arenaindex\r
+is >= maxarenas, the macro correctly concludes at once that obmalloc doesn't\r
+control P.\r
+\r
+Else arenaindex is < maxarena, and AO is read up. If AO corresponds to an\r
+allocated arena, obmalloc controls all the memory in slice AO.address :\r
+AO.address+ARENA_SIZE. By case assumption, P is not controlled by obmalloc,\r
+so P doesn't lie in that slice, so the macro correctly reports that P is not\r
+controlled by obmalloc.\r
+\r
+Finally, if P is not controlled by obmalloc and AO corresponds to an unused\r
+arena_object (one not currently associated with an allocated arena),\r
+AO.address is 0, and the second test in the macro reduces to:\r
+\r
+ P < ARENA_SIZE\r
+\r
+If P >= ARENA_SIZE (extremely likely), the macro again correctly concludes\r
+that P is not controlled by obmalloc. However, if P < ARENA_SIZE, this part\r
+of the test still passes, and the third clause (AO.address != 0) is necessary\r
+to get the correct result: AO.address is 0 in this case, so the macro\r
+correctly reports that P is not controlled by obmalloc (despite that P lies in\r
+slice AO.address : AO.address + ARENA_SIZE).\r
+\r
+Note: The third (AO.address != 0) clause was added in Python 2.5. Before\r
+2.5, arenas were never free()'ed, and an arenaindex < maxarena always\r
+corresponded to a currently-allocated arena, so the "P is not controlled by\r
+obmalloc, AO corresponds to an unused arena_object, and P < ARENA_SIZE" case\r
+was impossible.\r
+\r
+Note that the logic is excruciating, and reading up possibly uninitialized\r
+memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)\r
+creates problems for some memory debuggers. The overwhelming advantage is\r
+that this test determines whether an arbitrary address is controlled by\r
+obmalloc in a small constant time, independent of the number of arenas\r
+obmalloc controls. Since this test is needed at every entry point, it's\r
+extremely desirable that it be this fast.\r
+\r
+Since Py_ADDRESS_IN_RANGE may be reading from memory which was not allocated\r
+by Python, it is important that (POOL)->arenaindex is read only once, as\r
+another thread may be concurrently modifying the value without holding the\r
+GIL. To accomplish this, the arenaindex_temp variable is used to store\r
+(POOL)->arenaindex for the duration of the Py_ADDRESS_IN_RANGE macro's\r
+execution. The caller of the macro is responsible for declaring this\r
+variable.\r
+*/\r
+#define Py_ADDRESS_IN_RANGE(P, POOL) \\r
+ ((arenaindex_temp = (POOL)->arenaindex) < maxarenas && \\r
+ (uptr)(P) - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE && \\r
+ arenas[arenaindex_temp].address != 0)\r
+\r
+\r
+/* This is only useful when running memory debuggers such as\r
+ * Purify or Valgrind. Uncomment to use.\r
+ *\r
+#define Py_USING_MEMORY_DEBUGGER\r
+ */\r
+\r
+#ifdef Py_USING_MEMORY_DEBUGGER\r
+\r
+/* Py_ADDRESS_IN_RANGE may access uninitialized memory by design\r
+ * This leads to thousands of spurious warnings when using\r
+ * Purify or Valgrind. By making a function, we can easily\r
+ * suppress the uninitialized memory reads in this one function.\r
+ * So we won't ignore real errors elsewhere.\r
+ *\r
+ * Disable the macro and use a function.\r
+ */\r
+\r
+#undef Py_ADDRESS_IN_RANGE\r
+\r
+#if defined(__GNUC__) && ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) || \\r
+ (__GNUC__ >= 4))\r
+#define Py_NO_INLINE __attribute__((__noinline__))\r
+#else\r
+#define Py_NO_INLINE\r
+#endif\r
+\r
+/* Don't make static, to try to ensure this isn't inlined. */\r
+int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;\r
+#undef Py_NO_INLINE\r
+#endif\r
+\r
+/*==========================================================================*/\r
+\r
+/* malloc. Note that nbytes==0 tries to return a non-NULL pointer, distinct\r
+ * from all other currently live pointers. This may not be possible.\r
+ */\r
+\r
+/*\r
+ * The basic blocks are ordered by decreasing execution frequency,\r
+ * which minimizes the number of jumps in the most common cases,\r
+ * improves branching prediction and instruction scheduling (small\r
+ * block allocations typically result in a couple of instructions).\r
+ * Unless the optimizer reorders everything, being too smart...\r
+ */\r
+\r
+#undef PyObject_Malloc\r
+void *\r
+PyObject_Malloc(size_t nbytes)\r
+{\r
+ block *bp;\r
+ poolp pool;\r
+ poolp next;\r
+ uint size;\r
+\r
+#ifdef WITH_VALGRIND\r
+ if (UNLIKELY(running_on_valgrind == -1))\r
+ running_on_valgrind = RUNNING_ON_VALGRIND;\r
+ if (UNLIKELY(running_on_valgrind))\r
+ goto redirect;\r
+#endif\r
+\r
+ /*\r
+ * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.\r
+ * Most python internals blindly use a signed Py_ssize_t to track\r
+ * things without checking for overflows or negatives.\r
+ * As size_t is unsigned, checking for nbytes < 0 is not required.\r
+ */\r
+ if (nbytes > PY_SSIZE_T_MAX)\r
+ return NULL;\r
+\r
+ /*\r
+ * This implicitly redirects malloc(0).\r
+ */\r
+ if ((nbytes - 1) < SMALL_REQUEST_THRESHOLD) {\r
+ LOCK();\r
+ /*\r
+ * Most frequent paths first\r
+ */\r
+ size = (uint)(nbytes - 1) >> ALIGNMENT_SHIFT;\r
+ pool = usedpools[size + size];\r
+ if (pool != pool->nextpool) {\r
+ /*\r
+ * There is a used pool for this size class.\r
+ * Pick up the head block of its free list.\r
+ */\r
+ ++pool->ref.count;\r
+ bp = pool->freeblock;\r
+ assert(bp != NULL);\r
+ if ((pool->freeblock = *(block **)bp) != NULL) {\r
+ UNLOCK();\r
+ return (void *)bp;\r
+ }\r
+ /*\r
+ * Reached the end of the free list, try to extend it.\r
+ */\r
+ if (pool->nextoffset <= pool->maxnextoffset) {\r
+ /* There is room for another block. */\r
+ pool->freeblock = (block*)pool +\r
+ pool->nextoffset;\r
+ pool->nextoffset += INDEX2SIZE(size);\r
+ *(block **)(pool->freeblock) = NULL;\r
+ UNLOCK();\r
+ return (void *)bp;\r
+ }\r
+ /* Pool is full, unlink from used pools. */\r
+ next = pool->nextpool;\r
+ pool = pool->prevpool;\r
+ next->prevpool = pool;\r
+ pool->nextpool = next;\r
+ UNLOCK();\r
+ return (void *)bp;\r
+ }\r
+\r
+ /* There isn't a pool of the right size class immediately\r
+ * available: use a free pool.\r
+ */\r
+ if (usable_arenas == NULL) {\r
+ /* No arena has a free pool: allocate a new arena. */\r
+#ifdef WITH_MEMORY_LIMITS\r
+ if (narenas_currently_allocated >= MAX_ARENAS) {\r
+ UNLOCK();\r
+ goto redirect;\r
+ }\r
+#endif\r
+ usable_arenas = new_arena();\r
+ if (usable_arenas == NULL) {\r
+ UNLOCK();\r
+ goto redirect;\r
+ }\r
+ usable_arenas->nextarena =\r
+ usable_arenas->prevarena = NULL;\r
+ }\r
+ assert(usable_arenas->address != 0);\r
+\r
+ /* Try to get a cached free pool. */\r
+ pool = usable_arenas->freepools;\r
+ if (pool != NULL) {\r
+ /* Unlink from cached pools. */\r
+ usable_arenas->freepools = pool->nextpool;\r
+\r
+ /* This arena already had the smallest nfreepools\r
+ * value, so decreasing nfreepools doesn't change\r
+ * that, and we don't need to rearrange the\r
+ * usable_arenas list. However, if the arena has\r
+ * become wholly allocated, we need to remove its\r
+ * arena_object from usable_arenas.\r
+ */\r
+ --usable_arenas->nfreepools;\r
+ if (usable_arenas->nfreepools == 0) {\r
+ /* Wholly allocated: remove. */\r
+ assert(usable_arenas->freepools == NULL);\r
+ assert(usable_arenas->nextarena == NULL ||\r
+ usable_arenas->nextarena->prevarena ==\r
+ usable_arenas);\r
+\r
+ usable_arenas = usable_arenas->nextarena;\r
+ if (usable_arenas != NULL) {\r
+ usable_arenas->prevarena = NULL;\r
+ assert(usable_arenas->address != 0);\r
+ }\r
+ }\r
+ else {\r
+ /* nfreepools > 0: it must be that freepools\r
+ * isn't NULL, or that we haven't yet carved\r
+ * off all the arena's pools for the first\r
+ * time.\r
+ */\r
+ assert(usable_arenas->freepools != NULL ||\r
+ usable_arenas->pool_address <=\r
+ (block*)usable_arenas->address +\r
+ ARENA_SIZE - POOL_SIZE);\r
+ }\r
+ init_pool:\r
+ /* Frontlink to used pools. */\r
+ next = usedpools[size + size]; /* == prev */\r
+ pool->nextpool = next;\r
+ pool->prevpool = next;\r
+ next->nextpool = pool;\r
+ next->prevpool = pool;\r
+ pool->ref.count = 1;\r
+ if (pool->szidx == size) {\r
+ /* Luckily, this pool last contained blocks\r
+ * of the same size class, so its header\r
+ * and free list are already initialized.\r
+ */\r
+ bp = pool->freeblock;\r
+ pool->freeblock = *(block **)bp;\r
+ UNLOCK();\r
+ return (void *)bp;\r
+ }\r
+ /*\r
+ * Initialize the pool header, set up the free list to\r
+ * contain just the second block, and return the first\r
+ * block.\r
+ */\r
+ pool->szidx = size;\r
+ size = INDEX2SIZE(size);\r
+ bp = (block *)pool + POOL_OVERHEAD;\r
+ pool->nextoffset = POOL_OVERHEAD + (size << 1);\r
+ pool->maxnextoffset = POOL_SIZE - size;\r
+ pool->freeblock = bp + size;\r
+ *(block **)(pool->freeblock) = NULL;\r
+ UNLOCK();\r
+ return (void *)bp;\r
+ }\r
+\r
+ /* Carve off a new pool. */\r
+ assert(usable_arenas->nfreepools > 0);\r
+ assert(usable_arenas->freepools == NULL);\r
+ pool = (poolp)usable_arenas->pool_address;\r
+ assert((block*)pool <= (block*)usable_arenas->address +\r
+ ARENA_SIZE - POOL_SIZE);\r
+ pool->arenaindex = usable_arenas - arenas;\r
+ assert(&arenas[pool->arenaindex] == usable_arenas);\r
+ pool->szidx = DUMMY_SIZE_IDX;\r
+ usable_arenas->pool_address += POOL_SIZE;\r
+ --usable_arenas->nfreepools;\r
+\r
+ if (usable_arenas->nfreepools == 0) {\r
+ assert(usable_arenas->nextarena == NULL ||\r
+ usable_arenas->nextarena->prevarena ==\r
+ usable_arenas);\r
+ /* Unlink the arena: it is completely allocated. */\r
+ usable_arenas = usable_arenas->nextarena;\r
+ if (usable_arenas != NULL) {\r
+ usable_arenas->prevarena = NULL;\r
+ assert(usable_arenas->address != 0);\r
+ }\r
+ }\r
+\r
+ goto init_pool;\r
+ }\r
+\r
+ /* The small block allocator ends here. */\r
+\r
+redirect:\r
+ /* Redirect the original request to the underlying (libc) allocator.\r
+ * We jump here on bigger requests, on error in the code above (as a\r
+ * last chance to serve the request) or when the max memory limit\r
+ * has been reached.\r
+ */\r
+ if (nbytes == 0)\r
+ nbytes = 1;\r
+ return (void *)malloc(nbytes);\r
+}\r
+\r
+/* free */\r
+\r
+#undef PyObject_Free\r
+ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS\r
+void\r
+PyObject_Free(void *p)\r
+{\r
+ poolp pool;\r
+ block *lastfree;\r
+ poolp next, prev;\r
+ uint size;\r
+#ifndef Py_USING_MEMORY_DEBUGGER\r
+ uint arenaindex_temp;\r
+#endif\r
+\r
+ if (p == NULL) /* free(NULL) has no effect */\r
+ return;\r
+\r
+#ifdef WITH_VALGRIND\r
+ if (UNLIKELY(running_on_valgrind > 0))\r
+ goto redirect;\r
+#endif\r
+\r
+ pool = POOL_ADDR(p);\r
+ if (Py_ADDRESS_IN_RANGE(p, pool)) {\r
+ /* We allocated this address. */\r
+ LOCK();\r
+ /* Link p to the start of the pool's freeblock list. Since\r
+ * the pool had at least the p block outstanding, the pool\r
+ * wasn't empty (so it's already in a usedpools[] list, or\r
+ * was full and is in no list -- it's not in the freeblocks\r
+ * list in any case).\r
+ */\r
+ assert(pool->ref.count > 0); /* else it was empty */\r
+ *(block **)p = lastfree = pool->freeblock;\r
+ pool->freeblock = (block *)p;\r
+ if (lastfree) {\r
+ struct arena_object* ao;\r
+ uint nf; /* ao->nfreepools */\r
+\r
+ /* freeblock wasn't NULL, so the pool wasn't full,\r
+ * and the pool is in a usedpools[] list.\r
+ */\r
+ if (--pool->ref.count != 0) {\r
+ /* pool isn't empty: leave it in usedpools */\r
+ UNLOCK();\r
+ return;\r
+ }\r
+ /* Pool is now empty: unlink from usedpools, and\r
+ * link to the front of freepools. This ensures that\r
+ * previously freed pools will be allocated later\r
+ * (being not referenced, they are perhaps paged out).\r
+ */\r
+ next = pool->nextpool;\r
+ prev = pool->prevpool;\r
+ next->prevpool = prev;\r
+ prev->nextpool = next;\r
+\r
+ /* Link the pool to freepools. This is a singly-linked\r
+ * list, and pool->prevpool isn't used there.\r
+ */\r
+ ao = &arenas[pool->arenaindex];\r
+ pool->nextpool = ao->freepools;\r
+ ao->freepools = pool;\r
+ nf = ++ao->nfreepools;\r
+\r
+ /* All the rest is arena management. We just freed\r
+ * a pool, and there are 4 cases for arena mgmt:\r
+ * 1. If all the pools are free, return the arena to\r
+ * the system free().\r
+ * 2. If this is the only free pool in the arena,\r
+ * add the arena back to the `usable_arenas` list.\r
+ * 3. If the "next" arena has a smaller count of free\r
+ * pools, we have to "slide this arena right" to\r
+ * restore that usable_arenas is sorted in order of\r
+ * nfreepools.\r
+ * 4. Else there's nothing more to do.\r
+ */\r
+ if (nf == ao->ntotalpools) {\r
+ /* Case 1. First unlink ao from usable_arenas.\r
+ */\r
+ assert(ao->prevarena == NULL ||\r
+ ao->prevarena->address != 0);\r
+ assert(ao ->nextarena == NULL ||\r
+ ao->nextarena->address != 0);\r
+\r
+ /* Fix the pointer in the prevarena, or the\r
+ * usable_arenas pointer.\r
+ */\r
+ if (ao->prevarena == NULL) {\r
+ usable_arenas = ao->nextarena;\r
+ assert(usable_arenas == NULL ||\r
+ usable_arenas->address != 0);\r
+ }\r
+ else {\r
+ assert(ao->prevarena->nextarena == ao);\r
+ ao->prevarena->nextarena =\r
+ ao->nextarena;\r
+ }\r
+ /* Fix the pointer in the nextarena. */\r
+ if (ao->nextarena != NULL) {\r
+ assert(ao->nextarena->prevarena == ao);\r
+ ao->nextarena->prevarena =\r
+ ao->prevarena;\r
+ }\r
+ /* Record that this arena_object slot is\r
+ * available to be reused.\r
+ */\r
+ ao->nextarena = unused_arena_objects;\r
+ unused_arena_objects = ao;\r
+\r
+ /* Free the entire arena. */\r
+#ifdef ARENAS_USE_MMAP\r
+ munmap((void *)ao->address, ARENA_SIZE);\r
+#else\r
+ free((void *)ao->address);\r
+#endif\r
+ ao->address = 0; /* mark unassociated */\r
+ --narenas_currently_allocated;\r
+\r
+ UNLOCK();\r
+ return;\r
+ }\r
+ if (nf == 1) {\r
+ /* Case 2. Put ao at the head of\r
+ * usable_arenas. Note that because\r
+ * ao->nfreepools was 0 before, ao isn't\r
+ * currently on the usable_arenas list.\r
+ */\r
+ ao->nextarena = usable_arenas;\r
+ ao->prevarena = NULL;\r
+ if (usable_arenas)\r
+ usable_arenas->prevarena = ao;\r
+ usable_arenas = ao;\r
+ assert(usable_arenas->address != 0);\r
+\r
+ UNLOCK();\r
+ return;\r
+ }\r
+ /* If this arena is now out of order, we need to keep\r
+ * the list sorted. The list is kept sorted so that\r
+ * the "most full" arenas are used first, which allows\r
+ * the nearly empty arenas to be completely freed. In\r
+ * a few un-scientific tests, it seems like this\r
+ * approach allowed a lot more memory to be freed.\r
+ */\r
+ if (ao->nextarena == NULL ||\r
+ nf <= ao->nextarena->nfreepools) {\r
+ /* Case 4. Nothing to do. */\r
+ UNLOCK();\r
+ return;\r
+ }\r
+ /* Case 3: We have to move the arena towards the end\r
+ * of the list, because it has more free pools than\r
+ * the arena to its right.\r
+ * First unlink ao from usable_arenas.\r
+ */\r
+ if (ao->prevarena != NULL) {\r
+ /* ao isn't at the head of the list */\r
+ assert(ao->prevarena->nextarena == ao);\r
+ ao->prevarena->nextarena = ao->nextarena;\r
+ }\r
+ else {\r
+ /* ao is at the head of the list */\r
+ assert(usable_arenas == ao);\r
+ usable_arenas = ao->nextarena;\r
+ }\r
+ ao->nextarena->prevarena = ao->prevarena;\r
+\r
+ /* Locate the new insertion point by iterating over\r
+ * the list, using our nextarena pointer.\r
+ */\r
+ while (ao->nextarena != NULL &&\r
+ nf > ao->nextarena->nfreepools) {\r
+ ao->prevarena = ao->nextarena;\r
+ ao->nextarena = ao->nextarena->nextarena;\r
+ }\r
+\r
+ /* Insert ao at this point. */\r
+ assert(ao->nextarena == NULL ||\r
+ ao->prevarena == ao->nextarena->prevarena);\r
+ assert(ao->prevarena->nextarena == ao->nextarena);\r
+\r
+ ao->prevarena->nextarena = ao;\r
+ if (ao->nextarena != NULL)\r
+ ao->nextarena->prevarena = ao;\r
+\r
+ /* Verify that the swaps worked. */\r
+ assert(ao->nextarena == NULL ||\r
+ nf <= ao->nextarena->nfreepools);\r
+ assert(ao->prevarena == NULL ||\r
+ nf > ao->prevarena->nfreepools);\r
+ assert(ao->nextarena == NULL ||\r
+ ao->nextarena->prevarena == ao);\r
+ assert((usable_arenas == ao &&\r
+ ao->prevarena == NULL) ||\r
+ ao->prevarena->nextarena == ao);\r
+\r
+ UNLOCK();\r
+ return;\r
+ }\r
+ /* Pool was full, so doesn't currently live in any list:\r
+ * link it to the front of the appropriate usedpools[] list.\r
+ * This mimics LRU pool usage for new allocations and\r
+ * targets optimal filling when several pools contain\r
+ * blocks of the same size class.\r
+ */\r
+ --pool->ref.count;\r
+ assert(pool->ref.count > 0); /* else the pool is empty */\r
+ size = pool->szidx;\r
+ next = usedpools[size + size];\r
+ prev = next->prevpool;\r
+ /* insert pool before next: prev <-> pool <-> next */\r
+ pool->nextpool = next;\r
+ pool->prevpool = prev;\r
+ next->prevpool = pool;\r
+ prev->nextpool = pool;\r
+ UNLOCK();\r
+ return;\r
+ }\r
+\r
+#ifdef WITH_VALGRIND\r
+redirect:\r
+#endif\r
+ /* We didn't allocate this address. */\r
+ free(p);\r
+}\r
+\r
+/* realloc. If p is NULL, this acts like malloc(nbytes). Else if nbytes==0,\r
+ * then as the Python docs promise, we do not treat this like free(p), and\r
+ * return a non-NULL result.\r
+ */\r
+\r
+#undef PyObject_Realloc\r
+ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS\r
+void *\r
+PyObject_Realloc(void *p, size_t nbytes)\r
+{\r
+ void *bp;\r
+ poolp pool;\r
+ size_t size;\r
+#ifndef Py_USING_MEMORY_DEBUGGER\r
+ uint arenaindex_temp;\r
+#endif\r
+\r
+ if (p == NULL)\r
+ return PyObject_Malloc(nbytes);\r
+\r
+ /*\r
+ * Limit ourselves to PY_SSIZE_T_MAX bytes to prevent security holes.\r
+ * Most python internals blindly use a signed Py_ssize_t to track\r
+ * things without checking for overflows or negatives.\r
+ * As size_t is unsigned, checking for nbytes < 0 is not required.\r
+ */\r
+ if (nbytes > PY_SSIZE_T_MAX)\r
+ return NULL;\r
+\r
+#ifdef WITH_VALGRIND\r
+ /* Treat running_on_valgrind == -1 the same as 0 */\r
+ if (UNLIKELY(running_on_valgrind > 0))\r
+ goto redirect;\r
+#endif\r
+\r
+ pool = POOL_ADDR(p);\r
+ if (Py_ADDRESS_IN_RANGE(p, pool)) {\r
+ /* We're in charge of this block */\r
+ size = INDEX2SIZE(pool->szidx);\r
+ if (nbytes <= size) {\r
+ /* The block is staying the same or shrinking. If\r
+ * it's shrinking, there's a tradeoff: it costs\r
+ * cycles to copy the block to a smaller size class,\r
+ * but it wastes memory not to copy it. The\r
+ * compromise here is to copy on shrink only if at\r
+ * least 25% of size can be shaved off.\r
+ */\r
+ if (4 * nbytes > 3 * size) {\r
+ /* It's the same,\r
+ * or shrinking and new/old > 3/4.\r
+ */\r
+ return p;\r
+ }\r
+ size = nbytes;\r
+ }\r
+ bp = PyObject_Malloc(nbytes);\r
+ if (bp != NULL) {\r
+ memcpy(bp, p, size);\r
+ PyObject_Free(p);\r
+ }\r
+ return bp;\r
+ }\r
+#ifdef WITH_VALGRIND\r
+ redirect:\r
+#endif\r
+ /* We're not managing this block. If nbytes <=\r
+ * SMALL_REQUEST_THRESHOLD, it's tempting to try to take over this\r
+ * block. However, if we do, we need to copy the valid data from\r
+ * the C-managed block to one of our blocks, and there's no portable\r
+ * way to know how much of the memory space starting at p is valid.\r
+ * As bug 1185883 pointed out the hard way, it's possible that the\r
+ * C-managed block is "at the end" of allocated VM space, so that\r
+ * a memory fault can occur if we try to copy nbytes bytes starting\r
+ * at p. Instead we punt: let C continue to manage this block.\r
+ */\r
+ if (nbytes)\r
+ return realloc(p, nbytes);\r
+ /* C doesn't define the result of realloc(p, 0) (it may or may not\r
+ * return NULL then), but Python's docs promise that nbytes==0 never\r
+ * returns NULL. We don't pass 0 to realloc(), to avoid that endcase\r
+ * to begin with. Even then, we can't be sure that realloc() won't\r
+ * return NULL.\r
+ */\r
+ bp = realloc(p, 1);\r
+ return bp ? bp : p;\r
+}\r
+\r
+#else /* ! WITH_PYMALLOC */\r
+\r
+/*==========================================================================*/\r
+/* pymalloc not enabled: Redirect the entry points to malloc. These will\r
+ * only be used by extensions that are compiled with pymalloc enabled. */\r
+\r
+void *\r
+PyObject_Malloc(size_t n)\r
+{\r
+ return PyMem_MALLOC(n);\r
+}\r
+\r
+void *\r
+PyObject_Realloc(void *p, size_t n)\r
+{\r
+ return PyMem_REALLOC(p, n);\r
+}\r
+\r
+void\r
+PyObject_Free(void *p)\r
+{\r
+ PyMem_FREE(p);\r
+}\r
+#endif /* WITH_PYMALLOC */\r
+\r
+#ifdef PYMALLOC_DEBUG\r
+/*==========================================================================*/\r
+/* A x-platform debugging allocator. This doesn't manage memory directly,\r
+ * it wraps a real allocator, adding extra debugging info to the memory blocks.\r
+ */\r
+\r
+/* Special bytes broadcast into debug memory blocks at appropriate times.\r
+ * Strings of these are unlikely to be valid addresses, floats, ints or\r
+ * 7-bit ASCII.\r
+ */\r
+#undef CLEANBYTE\r
+#undef DEADBYTE\r
+#undef FORBIDDENBYTE\r
+#define CLEANBYTE 0xCB /* clean (newly allocated) memory */\r
+#define DEADBYTE 0xDB /* dead (newly freed) memory */\r
+#define FORBIDDENBYTE 0xFB /* untouchable bytes at each end of a block */\r
+\r
+/* We tag each block with an API ID in order to tag API violations */\r
+#define _PYMALLOC_MEM_ID 'm' /* the PyMem_Malloc() API */\r
+#define _PYMALLOC_OBJ_ID 'o' /* The PyObject_Malloc() API */\r
+\r
+static size_t serialno = 0; /* incremented on each debug {m,re}alloc */\r
+\r
+/* serialno is always incremented via calling this routine. The point is\r
+ * to supply a single place to set a breakpoint.\r
+ */\r
+static void\r
+bumpserialno(void)\r
+{\r
+ ++serialno;\r
+}\r
+\r
+#define SST SIZEOF_SIZE_T\r
+\r
+/* Read sizeof(size_t) bytes at p as a big-endian size_t. */\r
+static size_t\r
+read_size_t(const void *p)\r
+{\r
+ const uchar *q = (const uchar *)p;\r
+ size_t result = *q++;\r
+ int i;\r
+\r
+ for (i = SST; --i > 0; ++q)\r
+ result = (result << 8) | *q;\r
+ return result;\r
+}\r
+\r
+/* Write n as a big-endian size_t, MSB at address p, LSB at\r
+ * p + sizeof(size_t) - 1.\r
+ */\r
+static void\r
+write_size_t(void *p, size_t n)\r
+{\r
+ uchar *q = (uchar *)p + SST - 1;\r
+ int i;\r
+\r
+ for (i = SST; --i >= 0; --q) {\r
+ *q = (uchar)(n & 0xff);\r
+ n >>= 8;\r
+ }\r
+}\r
+\r
+#ifdef Py_DEBUG\r
+/* Is target in the list? The list is traversed via the nextpool pointers.\r
+ * The list may be NULL-terminated, or circular. Return 1 if target is in\r
+ * list, else 0.\r
+ */\r
+static int\r
+pool_is_in_list(const poolp target, poolp list)\r
+{\r
+ poolp origlist = list;\r
+ assert(target != NULL);\r
+ if (list == NULL)\r
+ return 0;\r
+ do {\r
+ if (target == list)\r
+ return 1;\r
+ list = list->nextpool;\r
+ } while (list != NULL && list != origlist);\r
+ return 0;\r
+}\r
+\r
+#else\r
+#define pool_is_in_list(X, Y) 1\r
+\r
+#endif /* Py_DEBUG */\r
+\r
+/* Let S = sizeof(size_t). The debug malloc asks for 4*S extra bytes and\r
+ fills them with useful stuff, here calling the underlying malloc's result p:\r
+\r
+p[0: S]\r
+ Number of bytes originally asked for. This is a size_t, big-endian (easier\r
+ to read in a memory dump).\r
+p[S: 2*S]\r
+ Copies of FORBIDDENBYTE. Used to catch under- writes and reads.\r
+p[2*S: 2*S+n]\r
+ The requested memory, filled with copies of CLEANBYTE.\r
+ Used to catch reference to uninitialized memory.\r
+ &p[2*S] is returned. Note that this is 8-byte aligned if pymalloc\r
+ handled the request itself.\r
+p[2*S+n: 2*S+n+S]\r
+ Copies of FORBIDDENBYTE. Used to catch over- writes and reads.\r
+p[2*S+n+S: 2*S+n+2*S]\r
+ A serial number, incremented by 1 on each call to _PyObject_DebugMalloc\r
+ and _PyObject_DebugRealloc.\r
+ This is a big-endian size_t.\r
+ If "bad memory" is detected later, the serial number gives an\r
+ excellent way to set a breakpoint on the next run, to capture the\r
+ instant at which this block was passed out.\r
+*/\r
+\r
+/* debug replacements for the PyMem_* memory API */\r
+void *\r
+_PyMem_DebugMalloc(size_t nbytes)\r
+{\r
+ return _PyObject_DebugMallocApi(_PYMALLOC_MEM_ID, nbytes);\r
+}\r
+void *\r
+_PyMem_DebugRealloc(void *p, size_t nbytes)\r
+{\r
+ return _PyObject_DebugReallocApi(_PYMALLOC_MEM_ID, p, nbytes);\r
+}\r
+void\r
+_PyMem_DebugFree(void *p)\r
+{\r
+ _PyObject_DebugFreeApi(_PYMALLOC_MEM_ID, p);\r
+}\r
+\r
+/* debug replacements for the PyObject_* memory API */\r
+void *\r
+_PyObject_DebugMalloc(size_t nbytes)\r
+{\r
+ return _PyObject_DebugMallocApi(_PYMALLOC_OBJ_ID, nbytes);\r
+}\r
+void *\r
+_PyObject_DebugRealloc(void *p, size_t nbytes)\r
+{\r
+ return _PyObject_DebugReallocApi(_PYMALLOC_OBJ_ID, p, nbytes);\r
+}\r
+void\r
+_PyObject_DebugFree(void *p)\r
+{\r
+ _PyObject_DebugFreeApi(_PYMALLOC_OBJ_ID, p);\r
+}\r
+void\r
+_PyObject_DebugCheckAddress(const void *p)\r
+{\r
+ _PyObject_DebugCheckAddressApi(_PYMALLOC_OBJ_ID, p);\r
+}\r
+\r
+\r
+/* generic debug memory api, with an "id" to identify the API in use */\r
+void *\r
+_PyObject_DebugMallocApi(char id, size_t nbytes)\r
+{\r
+ uchar *p; /* base address of malloc'ed block */\r
+ uchar *tail; /* p + 2*SST + nbytes == pointer to tail pad bytes */\r
+ size_t total; /* nbytes + 4*SST */\r
+\r
+ bumpserialno();\r
+ total = nbytes + 4*SST;\r
+ if (total < nbytes)\r
+ /* overflow: can't represent total as a size_t */\r
+ return NULL;\r
+\r
+ p = (uchar *)PyObject_Malloc(total);\r
+ if (p == NULL)\r
+ return NULL;\r
+\r
+ /* at p, write size (SST bytes), id (1 byte), pad (SST-1 bytes) */\r
+ write_size_t(p, nbytes);\r
+ p[SST] = (uchar)id;\r
+ memset(p + SST + 1 , FORBIDDENBYTE, SST-1);\r
+\r
+ if (nbytes > 0)\r
+ memset(p + 2*SST, CLEANBYTE, nbytes);\r
+\r
+ /* at tail, write pad (SST bytes) and serialno (SST bytes) */\r
+ tail = p + 2*SST + nbytes;\r
+ memset(tail, FORBIDDENBYTE, SST);\r
+ write_size_t(tail + SST, serialno);\r
+\r
+ return p + 2*SST;\r
+}\r
+\r
+/* The debug free first checks the 2*SST bytes on each end for sanity (in\r
+ particular, that the FORBIDDENBYTEs with the api ID are still intact).\r
+ Then fills the original bytes with DEADBYTE.\r
+ Then calls the underlying free.\r
+*/\r
+void\r
+_PyObject_DebugFreeApi(char api, void *p)\r
+{\r
+ uchar *q = (uchar *)p - 2*SST; /* address returned from malloc */\r
+ size_t nbytes;\r
+\r
+ if (p == NULL)\r
+ return;\r
+ _PyObject_DebugCheckAddressApi(api, p);\r
+ nbytes = read_size_t(q);\r
+ nbytes += 4*SST;\r
+ if (nbytes > 0)\r
+ memset(q, DEADBYTE, nbytes);\r
+ PyObject_Free(q);\r
+}\r
+\r
+void *\r
+_PyObject_DebugReallocApi(char api, void *p, size_t nbytes)\r
+{\r
+ uchar *q = (uchar *)p;\r
+ uchar *tail;\r
+ size_t total; /* nbytes + 4*SST */\r
+ size_t original_nbytes;\r
+ int i;\r
+\r
+ if (p == NULL)\r
+ return _PyObject_DebugMallocApi(api, nbytes);\r
+\r
+ _PyObject_DebugCheckAddressApi(api, p);\r
+ bumpserialno();\r
+ original_nbytes = read_size_t(q - 2*SST);\r
+ total = nbytes + 4*SST;\r
+ if (total < nbytes)\r
+ /* overflow: can't represent total as a size_t */\r
+ return NULL;\r
+\r
+ if (nbytes < original_nbytes) {\r
+ /* shrinking: mark old extra memory dead */\r
+ memset(q + nbytes, DEADBYTE, original_nbytes - nbytes + 2*SST);\r
+ }\r
+\r
+ /* Resize and add decorations. We may get a new pointer here, in which\r
+ * case we didn't get the chance to mark the old memory with DEADBYTE,\r
+ * but we live with that.\r
+ */\r
+ q = (uchar *)PyObject_Realloc(q - 2*SST, total);\r
+ if (q == NULL)\r
+ return NULL;\r
+\r
+ write_size_t(q, nbytes);\r
+ assert(q[SST] == (uchar)api);\r
+ for (i = 1; i < SST; ++i)\r
+ assert(q[SST + i] == FORBIDDENBYTE);\r
+ q += 2*SST;\r
+ tail = q + nbytes;\r
+ memset(tail, FORBIDDENBYTE, SST);\r
+ write_size_t(tail + SST, serialno);\r
+\r
+ if (nbytes > original_nbytes) {\r
+ /* growing: mark new extra memory clean */\r
+ memset(q + original_nbytes, CLEANBYTE,\r
+ nbytes - original_nbytes);\r
+ }\r
+\r
+ return q;\r
+}\r
+\r
+/* Check the forbidden bytes on both ends of the memory allocated for p.\r
+ * If anything is wrong, print info to stderr via _PyObject_DebugDumpAddress,\r
+ * and call Py_FatalError to kill the program.\r
+ * The API id, is also checked.\r
+ */\r
+ void\r
+_PyObject_DebugCheckAddressApi(char api, const void *p)\r
+{\r
+ const uchar *q = (const uchar *)p;\r
+ char msgbuf[64];\r
+ char *msg;\r
+ size_t nbytes;\r
+ const uchar *tail;\r
+ int i;\r
+ char id;\r
+\r
+ if (p == NULL) {\r
+ msg = "didn't expect a NULL pointer";\r
+ goto error;\r
+ }\r
+\r
+ /* Check the API id */\r
+ id = (char)q[-SST];\r
+ if (id != api) {\r
+ msg = msgbuf;\r
+ snprintf(msg, sizeof(msgbuf), "bad ID: Allocated using API '%c', verified using API '%c'", id, api);\r
+ msgbuf[sizeof(msgbuf)-1] = 0;\r
+ goto error;\r
+ }\r
+\r
+ /* Check the stuff at the start of p first: if there's underwrite\r
+ * corruption, the number-of-bytes field may be nuts, and checking\r
+ * the tail could lead to a segfault then.\r
+ */\r
+ for (i = SST-1; i >= 1; --i) {\r
+ if (*(q-i) != FORBIDDENBYTE) {\r
+ msg = "bad leading pad byte";\r
+ goto error;\r
+ }\r
+ }\r
+\r
+ nbytes = read_size_t(q - 2*SST);\r
+ tail = q + nbytes;\r
+ for (i = 0; i < SST; ++i) {\r
+ if (tail[i] != FORBIDDENBYTE) {\r
+ msg = "bad trailing pad byte";\r
+ goto error;\r
+ }\r
+ }\r
+\r
+ return;\r
+\r
+error:\r
+ _PyObject_DebugDumpAddress(p);\r
+ Py_FatalError(msg);\r
+}\r
+\r
+/* Display info to stderr about the memory block at p. */\r
+void\r
+_PyObject_DebugDumpAddress(const void *p)\r
+{\r
+ const uchar *q = (const uchar *)p;\r
+ const uchar *tail;\r
+ size_t nbytes, serial;\r
+ int i;\r
+ int ok;\r
+ char id;\r
+\r
+ fprintf(stderr, "Debug memory block at address p=%p:", p);\r
+ if (p == NULL) {\r
+ fprintf(stderr, "\n");\r
+ return;\r
+ }\r
+ id = (char)q[-SST];\r
+ fprintf(stderr, " API '%c'\n", id);\r
+\r
+ nbytes = read_size_t(q - 2*SST);\r
+ fprintf(stderr, " %" PY_FORMAT_SIZE_T "u bytes originally "\r
+ "requested\n", nbytes);\r
+\r
+ /* In case this is nuts, check the leading pad bytes first. */\r
+ fprintf(stderr, " The %d pad bytes at p-%d are ", SST-1, SST-1);\r
+ ok = 1;\r
+ for (i = 1; i <= SST-1; ++i) {\r
+ if (*(q-i) != FORBIDDENBYTE) {\r
+ ok = 0;\r
+ break;\r
+ }\r
+ }\r
+ if (ok)\r
+ fputs("FORBIDDENBYTE, as expected.\n", stderr);\r
+ else {\r
+ fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",\r
+ FORBIDDENBYTE);\r
+ for (i = SST-1; i >= 1; --i) {\r
+ const uchar byte = *(q-i);\r
+ fprintf(stderr, " at p-%d: 0x%02x", i, byte);\r
+ if (byte != FORBIDDENBYTE)\r
+ fputs(" *** OUCH", stderr);\r
+ fputc('\n', stderr);\r
+ }\r
+\r
+ fputs(" Because memory is corrupted at the start, the "\r
+ "count of bytes requested\n"\r
+ " may be bogus, and checking the trailing pad "\r
+ "bytes may segfault.\n", stderr);\r
+ }\r
+\r
+ tail = q + nbytes;\r
+ fprintf(stderr, " The %d pad bytes at tail=%p are ", SST, tail);\r
+ ok = 1;\r
+ for (i = 0; i < SST; ++i) {\r
+ if (tail[i] != FORBIDDENBYTE) {\r
+ ok = 0;\r
+ break;\r
+ }\r
+ }\r
+ if (ok)\r
+ fputs("FORBIDDENBYTE, as expected.\n", stderr);\r
+ else {\r
+ fprintf(stderr, "not all FORBIDDENBYTE (0x%02x):\n",\r
+ FORBIDDENBYTE);\r
+ for (i = 0; i < SST; ++i) {\r
+ const uchar byte = tail[i];\r
+ fprintf(stderr, " at tail+%d: 0x%02x",\r
+ i, byte);\r
+ if (byte != FORBIDDENBYTE)\r
+ fputs(" *** OUCH", stderr);\r
+ fputc('\n', stderr);\r
+ }\r
+ }\r
+\r
+ serial = read_size_t(tail + SST);\r
+ fprintf(stderr, " The block was made by call #%" PY_FORMAT_SIZE_T\r
+ "u to debug malloc/realloc.\n", serial);\r
+\r
+ if (nbytes > 0) {\r
+ i = 0;\r
+ fputs(" Data at p:", stderr);\r
+ /* print up to 8 bytes at the start */\r
+ while (q < tail && i < 8) {\r
+ fprintf(stderr, " %02x", *q);\r
+ ++i;\r
+ ++q;\r
+ }\r
+ /* and up to 8 at the end */\r
+ if (q < tail) {\r
+ if (tail - q > 8) {\r
+ fputs(" ...", stderr);\r
+ q = tail - 8;\r
+ }\r
+ while (q < tail) {\r
+ fprintf(stderr, " %02x", *q);\r
+ ++q;\r
+ }\r
+ }\r
+ fputc('\n', stderr);\r
+ }\r
+}\r
+\r
+static size_t\r
+printone(const char* msg, size_t value)\r
+{\r
+ int i, k;\r
+ char buf[100];\r
+ size_t origvalue = value;\r
+\r
+ fputs(msg, stderr);\r
+ for (i = (int)strlen(msg); i < 35; ++i)\r
+ fputc(' ', stderr);\r
+ fputc('=', stderr);\r
+\r
+ /* Write the value with commas. */\r
+ i = 22;\r
+ buf[i--] = '\0';\r
+ buf[i--] = '\n';\r
+ k = 3;\r
+ do {\r
+ size_t nextvalue = value / 10;\r
+ unsigned int digit = (unsigned int)(value - nextvalue * 10);\r
+ value = nextvalue;\r
+ buf[i--] = (char)(digit + '0');\r
+ --k;\r
+ if (k == 0 && value && i >= 0) {\r
+ k = 3;\r
+ buf[i--] = ',';\r
+ }\r
+ } while (value && i >= 0);\r
+\r
+ while (i >= 0)\r
+ buf[i--] = ' ';\r
+ fputs(buf, stderr);\r
+\r
+ return origvalue;\r
+}\r
+\r
+/* Print summary info to stderr about the state of pymalloc's structures.\r
+ * In Py_DEBUG mode, also perform some expensive internal consistency\r
+ * checks.\r
+ */\r
+void\r
+_PyObject_DebugMallocStats(void)\r
+{\r
+ uint i;\r
+ const uint numclasses = SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT;\r
+ /* # of pools, allocated blocks, and free blocks per class index */\r
+ size_t numpools[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];\r
+ size_t numblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];\r
+ size_t numfreeblocks[SMALL_REQUEST_THRESHOLD >> ALIGNMENT_SHIFT];\r
+ /* total # of allocated bytes in used and full pools */\r
+ size_t allocated_bytes = 0;\r
+ /* total # of available bytes in used pools */\r
+ size_t available_bytes = 0;\r
+ /* # of free pools + pools not yet carved out of current arena */\r
+ uint numfreepools = 0;\r
+ /* # of bytes for arena alignment padding */\r
+ size_t arena_alignment = 0;\r
+ /* # of bytes in used and full pools used for pool_headers */\r
+ size_t pool_header_bytes = 0;\r
+ /* # of bytes in used and full pools wasted due to quantization,\r
+ * i.e. the necessarily leftover space at the ends of used and\r
+ * full pools.\r
+ */\r
+ size_t quantization = 0;\r
+ /* # of arenas actually allocated. */\r
+ size_t narenas = 0;\r
+ /* running total -- should equal narenas * ARENA_SIZE */\r
+ size_t total;\r
+ char buf[128];\r
+\r
+ fprintf(stderr, "Small block threshold = %d, in %u size classes.\n",\r
+ SMALL_REQUEST_THRESHOLD, numclasses);\r
+\r
+ for (i = 0; i < numclasses; ++i)\r
+ numpools[i] = numblocks[i] = numfreeblocks[i] = 0;\r
+\r
+ /* Because full pools aren't linked to from anything, it's easiest\r
+ * to march over all the arenas. If we're lucky, most of the memory\r
+ * will be living in full pools -- would be a shame to miss them.\r
+ */\r
+ for (i = 0; i < maxarenas; ++i) {\r
+ uint j;\r
+ uptr base = arenas[i].address;\r
+\r
+ /* Skip arenas which are not allocated. */\r
+ if (arenas[i].address == (uptr)NULL)\r
+ continue;\r
+ narenas += 1;\r
+\r
+ numfreepools += arenas[i].nfreepools;\r
+\r
+ /* round up to pool alignment */\r
+ if (base & (uptr)POOL_SIZE_MASK) {\r
+ arena_alignment += POOL_SIZE;\r
+ base &= ~(uptr)POOL_SIZE_MASK;\r
+ base += POOL_SIZE;\r
+ }\r
+\r
+ /* visit every pool in the arena */\r
+ assert(base <= (uptr) arenas[i].pool_address);\r
+ for (j = 0;\r
+ base < (uptr) arenas[i].pool_address;\r
+ ++j, base += POOL_SIZE) {\r
+ poolp p = (poolp)base;\r
+ const uint sz = p->szidx;\r
+ uint freeblocks;\r
+\r
+ if (p->ref.count == 0) {\r
+ /* currently unused */\r
+ assert(pool_is_in_list(p, arenas[i].freepools));\r
+ continue;\r
+ }\r
+ ++numpools[sz];\r
+ numblocks[sz] += p->ref.count;\r
+ freeblocks = NUMBLOCKS(sz) - p->ref.count;\r
+ numfreeblocks[sz] += freeblocks;\r
+#ifdef Py_DEBUG\r
+ if (freeblocks > 0)\r
+ assert(pool_is_in_list(p, usedpools[sz + sz]));\r
+#endif\r
+ }\r
+ }\r
+ assert(narenas == narenas_currently_allocated);\r
+\r
+ fputc('\n', stderr);\r
+ fputs("class size num pools blocks in use avail blocks\n"\r
+ "----- ---- --------- ------------- ------------\n",\r
+ stderr);\r
+\r
+ for (i = 0; i < numclasses; ++i) {\r
+ size_t p = numpools[i];\r
+ size_t b = numblocks[i];\r
+ size_t f = numfreeblocks[i];\r
+ uint size = INDEX2SIZE(i);\r
+ if (p == 0) {\r
+ assert(b == 0 && f == 0);\r
+ continue;\r
+ }\r
+ fprintf(stderr, "%5u %6u "\r
+ "%11" PY_FORMAT_SIZE_T "u "\r
+ "%15" PY_FORMAT_SIZE_T "u "\r
+ "%13" PY_FORMAT_SIZE_T "u\n",\r
+ i, size, p, b, f);\r
+ allocated_bytes += b * size;\r
+ available_bytes += f * size;\r
+ pool_header_bytes += p * POOL_OVERHEAD;\r
+ quantization += p * ((POOL_SIZE - POOL_OVERHEAD) % size);\r
+ }\r
+ fputc('\n', stderr);\r
+ (void)printone("# times object malloc called", serialno);\r
+\r
+ (void)printone("# arenas allocated total", ntimes_arena_allocated);\r
+ (void)printone("# arenas reclaimed", ntimes_arena_allocated - narenas);\r
+ (void)printone("# arenas highwater mark", narenas_highwater);\r
+ (void)printone("# arenas allocated current", narenas);\r
+\r
+ PyOS_snprintf(buf, sizeof(buf),\r
+ "%" PY_FORMAT_SIZE_T "u arenas * %d bytes/arena",\r
+ narenas, ARENA_SIZE);\r
+ (void)printone(buf, narenas * ARENA_SIZE);\r
+\r
+ fputc('\n', stderr);\r
+\r
+ total = printone("# bytes in allocated blocks", allocated_bytes);\r
+ total += printone("# bytes in available blocks", available_bytes);\r
+\r
+ PyOS_snprintf(buf, sizeof(buf),\r
+ "%u unused pools * %d bytes", numfreepools, POOL_SIZE);\r
+ total += printone(buf, (size_t)numfreepools * POOL_SIZE);\r
+\r
+ total += printone("# bytes lost to pool headers", pool_header_bytes);\r
+ total += printone("# bytes lost to quantization", quantization);\r
+ total += printone("# bytes lost to arena alignment", arena_alignment);\r
+ (void)printone("Total", total);\r
+}\r
+\r
+#endif /* PYMALLOC_DEBUG */\r
+\r
+#ifdef Py_USING_MEMORY_DEBUGGER\r
+/* Make this function last so gcc won't inline it since the definition is\r
+ * after the reference.\r
+ */\r
+int\r
+Py_ADDRESS_IN_RANGE(void *P, poolp pool)\r
+{\r
+ uint arenaindex_temp = pool->arenaindex;\r
+\r
+ return arenaindex_temp < maxarenas &&\r
+ (uptr)P - arenas[arenaindex_temp].address < (uptr)ARENA_SIZE &&\r
+ arenas[arenaindex_temp].address != 0;\r
+}\r
+#endif\r