+static void *SzAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return MyAlloc(size); }\r
+static void SzFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); MyFree(address); }\r
+const ISzAlloc g_Alloc = { SzAlloc, SzFree };\r
+\r
+static void *SzMidAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return MidAlloc(size); }\r
+static void SzMidFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); MidFree(address); }\r
+const ISzAlloc g_MidAlloc = { SzMidAlloc, SzMidFree };\r
+\r
+static void *SzBigAlloc(ISzAllocPtr p, size_t size) { UNUSED_VAR(p); return BigAlloc(size); }\r
+static void SzBigFree(ISzAllocPtr p, void *address) { UNUSED_VAR(p); BigFree(address); }\r
+const ISzAlloc g_BigAlloc = { SzBigAlloc, SzBigFree };\r
+\r
+\r
+/*\r
+ uintptr_t : <stdint.h> C99 (optional)\r
+ : unsupported in VS6\r
+*/\r
+\r
+#ifdef _WIN32\r
+ typedef UINT_PTR UIntPtr;\r
+#else\r
+ /*\r
+ typedef uintptr_t UIntPtr;\r
+ */\r
+ typedef ptrdiff_t UIntPtr;\r
+#endif\r
+\r
+\r
+#define ADJUST_ALLOC_SIZE 0\r
+/*\r
+#define ADJUST_ALLOC_SIZE (sizeof(void *) - 1)\r
+*/\r
+/*\r
+ Use (ADJUST_ALLOC_SIZE = (sizeof(void *) - 1)), if\r
+ MyAlloc() can return address that is NOT multiple of sizeof(void *).\r
+*/\r
+\r
+\r
+/*\r
+#define MY_ALIGN_PTR_DOWN(p, align) ((void *)((char *)(p) - ((size_t)(UIntPtr)(p) & ((align) - 1))))\r
+*/\r
+#define MY_ALIGN_PTR_DOWN(p, align) ((void *)((((UIntPtr)(p)) & ~((UIntPtr)(align) - 1))))\r
+\r
+#define MY_ALIGN_PTR_UP_PLUS(p, align) MY_ALIGN_PTR_DOWN(((char *)(p) + (align) + ADJUST_ALLOC_SIZE), align)\r
+\r
+\r
+#if (_POSIX_C_SOURCE >= 200112L) && !defined(_WIN32)\r
+ #define USE_posix_memalign\r
+#endif\r
+\r
+/*\r
+ This posix_memalign() is for test purposes only.\r
+ We also need special Free() function instead of free(),\r
+ if this posix_memalign() is used.\r
+*/\r
+\r
+/*\r
+static int posix_memalign(void **ptr, size_t align, size_t size)\r
+{\r
+ size_t newSize = size + align;\r
+ void *p;\r
+ void *pAligned;\r
+ *ptr = NULL;\r
+ if (newSize < size)\r
+ return 12; // ENOMEM\r
+ p = MyAlloc(newSize);\r
+ if (!p)\r
+ return 12; // ENOMEM\r
+ pAligned = MY_ALIGN_PTR_UP_PLUS(p, align);\r
+ ((void **)pAligned)[-1] = p;\r
+ *ptr = pAligned;\r
+ return 0;\r
+}\r
+*/\r
+\r
+/*\r
+ ALLOC_ALIGN_SIZE >= sizeof(void *)\r
+ ALLOC_ALIGN_SIZE >= cache_line_size\r
+*/\r
+\r
+#define ALLOC_ALIGN_SIZE ((size_t)1 << 7)\r
+\r
+static void *SzAlignedAlloc(ISzAllocPtr pp, size_t size)\r
+{\r
+ #ifndef USE_posix_memalign\r
+ \r
+ void *p;\r
+ void *pAligned;\r
+ size_t newSize;\r
+ UNUSED_VAR(pp);\r
+\r
+ /* also we can allocate additional dummy ALLOC_ALIGN_SIZE bytes after aligned\r
+ block to prevent cache line sharing with another allocated blocks */\r
+\r
+ newSize = size + ALLOC_ALIGN_SIZE * 1 + ADJUST_ALLOC_SIZE;\r
+ if (newSize < size)\r
+ return NULL;\r
+\r
+ p = MyAlloc(newSize);\r
+ \r
+ if (!p)\r
+ return NULL;\r
+ pAligned = MY_ALIGN_PTR_UP_PLUS(p, ALLOC_ALIGN_SIZE);\r
+\r
+ Print(" size="); PrintHex(size, 8);\r
+ Print(" a_size="); PrintHex(newSize, 8);\r
+ Print(" ptr="); PrintAddr(p);\r
+ Print(" a_ptr="); PrintAddr(pAligned);\r
+ PrintLn();\r
+\r
+ ((void **)pAligned)[-1] = p;\r
+\r
+ return pAligned;\r
+\r
+ #else\r
+\r
+ void *p;\r
+ UNUSED_VAR(pp);\r
+ if (posix_memalign(&p, ALLOC_ALIGN_SIZE, size))\r
+ return NULL;\r
+\r
+ Print(" posix_memalign="); PrintAddr(p);\r
+ PrintLn();\r
+\r
+ return p;\r
+\r
+ #endif\r
+}\r
+\r
+\r
+static void SzAlignedFree(ISzAllocPtr pp, void *address)\r
+{\r
+ UNUSED_VAR(pp);\r
+ #ifndef USE_posix_memalign\r
+ if (address)\r
+ MyFree(((void **)address)[-1]);\r
+ #else\r
+ free(address);\r
+ #endif\r
+}\r
+\r
+\r
+const ISzAlloc g_AlignedAlloc = { SzAlignedAlloc, SzAlignedFree };\r
+\r
+\r
+\r
+#define MY_ALIGN_PTR_DOWN_1(p) MY_ALIGN_PTR_DOWN(p, sizeof(void *))\r
+\r
+/* we align ptr to support cases where CAlignOffsetAlloc::offset is not multiply of sizeof(void *) */\r
+#define REAL_BLOCK_PTR_VAR(p) ((void **)MY_ALIGN_PTR_DOWN_1(p))[-1]\r
+/*\r
+#define REAL_BLOCK_PTR_VAR(p) ((void **)(p))[-1]\r
+*/\r
+\r
+static void *AlignOffsetAlloc_Alloc(ISzAllocPtr pp, size_t size)\r
+{\r
+ CAlignOffsetAlloc *p = CONTAINER_FROM_VTBL(pp, CAlignOffsetAlloc, vt);\r
+ void *adr;\r
+ void *pAligned;\r
+ size_t newSize;\r
+ size_t extra;\r
+ size_t alignSize = (size_t)1 << p->numAlignBits;\r
+\r
+ if (alignSize < sizeof(void *))\r
+ alignSize = sizeof(void *);\r
+ \r
+ if (p->offset >= alignSize)\r
+ return NULL;\r
+\r
+ /* also we can allocate additional dummy ALLOC_ALIGN_SIZE bytes after aligned\r
+ block to prevent cache line sharing with another allocated blocks */\r
+ extra = p->offset & (sizeof(void *) - 1);\r
+ newSize = size + alignSize + extra + ADJUST_ALLOC_SIZE;\r
+ if (newSize < size)\r
+ return NULL;\r