-LzmaCustomDecompressLib is based on the LZMA SDK 16.04.\r
-LZMA SDK 16.04 was placed in the public domain on\r
-2016-10-04. It was released on the\r
+LzmaCustomDecompressLib is based on the LZMA SDK 18.05.\r
+LZMA SDK 18.05 was placed in the public domain on\r
+2018-04-30. It was released on the\r
http://www.7-zip.org/sdk.html website.\r
## @file\r
# LzmaArchCustomDecompressLib produces LZMA custom decompression algorithm with the converter for the different arch code.\r
#\r
-# It is based on the LZMA SDK 16.04\r
-# LZMA SDK 16.04 was placed in the public domain on 2016-10-04.\r
+# It is based on the LZMA SDK 18.05\r
+# LZMA SDK 18.05 was placed in the public domain on 2018-04-30.\r
# It was released on the http://www.7-zip.org/sdk.html website.\r
#\r
-# Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>\r
+# Copyright (c) 2012 - 2018, Intel Corporation. All rights reserved.<BR>\r
#\r
# This program and the accompanying materials\r
# are licensed and made available under the terms and conditions of the BSD License\r
## @file\r
# LzmaCustomDecompressLib produces LZMA custom decompression algorithm.\r
#\r
-# It is based on the LZMA SDK 16.04.\r
-# LZMA SDK 16.04 was placed in the public domain on 2016-10-04.\r
+# It is based on the LZMA SDK 18.05.\r
+# LZMA SDK 18.05 was placed in the public domain on 2018-04-30.\r
# It was released on the http://www.7-zip.org/sdk.html website.\r
#\r
# Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR>\r
#\r
# The following information is for reference only and not required by the build tools.\r
#\r
-# VALID_ARCHITECTURES = IA32 X64 EBC\r
+# VALID_ARCHITECTURES = IA32 X64 IPF EBC\r
#\r
\r
[Sources]\r
**/\r
VOID *\r
SzAlloc (\r
- VOID *P,\r
+ CONST ISzAlloc *P,\r
size_t Size\r
)\r
{\r
**/\r
VOID\r
SzFree (\r
- VOID *P,\r
+ CONST ISzAlloc *P,\r
VOID *Address\r
)\r
{\r
/* 7zTypes.h -- Basic types\r
-2013-11-12 : Igor Pavlov : Public domain */\r
+2017-07-17 : Igor Pavlov : Public domain */\r
\r
#ifndef __7Z_TYPES_H\r
#define __7Z_TYPES_H\r
\r
typedef int SRes;\r
\r
+\r
#ifdef _WIN32\r
+\r
/* typedef DWORD WRes; */\r
typedef unsigned WRes;\r
+#define MY_SRes_HRESULT_FROM_WRes(x) HRESULT_FROM_WIN32(x)\r
+\r
#else\r
+\r
typedef int WRes;\r
+#define MY__FACILITY_WIN32 7\r
+#define MY__FACILITY__WRes MY__FACILITY_WIN32\r
+#define MY_SRes_HRESULT_FROM_WRes(x) ((HRESULT)(x) <= 0 ? ((HRESULT)(x)) : ((HRESULT) (((x) & 0x0000FFFF) | (MY__FACILITY__WRes << 16) | 0x80000000)))\r
+\r
#endif\r
\r
+\r
#ifndef RINOK\r
#define RINOK(x) { int __result__ = (x); if (__result__ != 0) return __result__; }\r
#endif\r
#define MY_NO_INLINE\r
#endif\r
\r
+#define MY_FORCE_INLINE __forceinline\r
+\r
#define MY_CDECL __cdecl\r
#define MY_FAST_CALL __fastcall\r
\r
#else\r
\r
#define MY_NO_INLINE\r
+#define MY_FORCE_INLINE\r
#define MY_CDECL\r
#define MY_FAST_CALL\r
\r
+/* inline keyword : for C++ / C99 */\r
+\r
+/* GCC, clang: */\r
+/*\r
+#if defined (__GNUC__) && (__GNUC__ >= 4)\r
+#define MY_FORCE_INLINE __attribute__((always_inline))\r
+#define MY_NO_INLINE __attribute__((noinline))\r
+#endif\r
+*/\r
+\r
#endif\r
\r
\r
/* The following interfaces use first parameter as pointer to structure */\r
\r
-typedef struct\r
+typedef struct IByteIn IByteIn;\r
+struct IByteIn\r
{\r
- Byte (*Read)(void *p); /* reads one byte, returns 0 in case of EOF or error */\r
-} IByteIn;\r
+ Byte (*Read)(const IByteIn *p); /* reads one byte, returns 0 in case of EOF or error */\r
+};\r
+#define IByteIn_Read(p) (p)->Read(p)\r
\r
-typedef struct\r
+\r
+typedef struct IByteOut IByteOut;\r
+struct IByteOut\r
{\r
- void (*Write)(void *p, Byte b);\r
-} IByteOut;\r
+ void (*Write)(const IByteOut *p, Byte b);\r
+};\r
+#define IByteOut_Write(p, b) (p)->Write(p, b)\r
\r
-typedef struct\r
+\r
+typedef struct ISeqInStream ISeqInStream;\r
+struct ISeqInStream\r
{\r
- SRes (*Read)(void *p, void *buf, size_t *size);\r
+ SRes (*Read)(const ISeqInStream *p, void *buf, size_t *size);\r
/* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.\r
(output(*size) < input(*size)) is allowed */\r
-} ISeqInStream;\r
+};\r
+#define ISeqInStream_Read(p, buf, size) (p)->Read(p, buf, size)\r
\r
/* it can return SZ_ERROR_INPUT_EOF */\r
-SRes SeqInStream_Read(ISeqInStream *stream, void *buf, size_t size);\r
-SRes SeqInStream_Read2(ISeqInStream *stream, void *buf, size_t size, SRes errorType);\r
-SRes SeqInStream_ReadByte(ISeqInStream *stream, Byte *buf);\r
+SRes SeqInStream_Read(const ISeqInStream *stream, void *buf, size_t size);\r
+SRes SeqInStream_Read2(const ISeqInStream *stream, void *buf, size_t size, SRes errorType);\r
+SRes SeqInStream_ReadByte(const ISeqInStream *stream, Byte *buf);\r
\r
-typedef struct\r
+\r
+typedef struct ISeqOutStream ISeqOutStream;\r
+struct ISeqOutStream\r
{\r
- size_t (*Write)(void *p, const void *buf, size_t size);\r
+ size_t (*Write)(const ISeqOutStream *p, const void *buf, size_t size);\r
/* Returns: result - the number of actually written bytes.\r
(result < size) means error */\r
-} ISeqOutStream;\r
+};\r
+#define ISeqOutStream_Write(p, buf, size) (p)->Write(p, buf, size)\r
\r
typedef enum\r
{\r
SZ_SEEK_END = 2\r
} ESzSeek;\r
\r
-typedef struct\r
+\r
+typedef struct ISeekInStream ISeekInStream;\r
+struct ISeekInStream\r
{\r
- SRes (*Read)(void *p, void *buf, size_t *size); /* same as ISeqInStream::Read */\r
- SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);\r
-} ISeekInStream;\r
+ SRes (*Read)(const ISeekInStream *p, void *buf, size_t *size); /* same as ISeqInStream::Read */\r
+ SRes (*Seek)(const ISeekInStream *p, Int64 *pos, ESzSeek origin);\r
+};\r
+#define ISeekInStream_Read(p, buf, size) (p)->Read(p, buf, size)\r
+#define ISeekInStream_Seek(p, pos, origin) (p)->Seek(p, pos, origin)\r
\r
-typedef struct\r
+\r
+typedef struct ILookInStream ILookInStream;\r
+struct ILookInStream\r
{\r
- SRes (*Look)(void *p, const void **buf, size_t *size);\r
+ SRes (*Look)(const ILookInStream *p, const void **buf, size_t *size);\r
/* if (input(*size) != 0 && output(*size) == 0) means end_of_stream.\r
(output(*size) > input(*size)) is not allowed\r
(output(*size) < input(*size)) is allowed */\r
- SRes (*Skip)(void *p, size_t offset);\r
+ SRes (*Skip)(const ILookInStream *p, size_t offset);\r
/* offset must be <= output(*size) of Look */\r
\r
- SRes (*Read)(void *p, void *buf, size_t *size);\r
+ SRes (*Read)(const ILookInStream *p, void *buf, size_t *size);\r
/* reads directly (without buffer). It's same as ISeqInStream::Read */\r
- SRes (*Seek)(void *p, Int64 *pos, ESzSeek origin);\r
-} ILookInStream;\r
+ SRes (*Seek)(const ILookInStream *p, Int64 *pos, ESzSeek origin);\r
+};\r
\r
-SRes LookInStream_LookRead(ILookInStream *stream, void *buf, size_t *size);\r
-SRes LookInStream_SeekTo(ILookInStream *stream, UInt64 offset);\r
+#define ILookInStream_Look(p, buf, size) (p)->Look(p, buf, size)\r
+#define ILookInStream_Skip(p, offset) (p)->Skip(p, offset)\r
+#define ILookInStream_Read(p, buf, size) (p)->Read(p, buf, size)\r
+#define ILookInStream_Seek(p, pos, origin) (p)->Seek(p, pos, origin)\r
+\r
+\r
+SRes LookInStream_LookRead(const ILookInStream *stream, void *buf, size_t *size);\r
+SRes LookInStream_SeekTo(const ILookInStream *stream, UInt64 offset);\r
\r
/* reads via ILookInStream::Read */\r
-SRes LookInStream_Read2(ILookInStream *stream, void *buf, size_t size, SRes errorType);\r
-SRes LookInStream_Read(ILookInStream *stream, void *buf, size_t size);\r
+SRes LookInStream_Read2(const ILookInStream *stream, void *buf, size_t size, SRes errorType);\r
+SRes LookInStream_Read(const ILookInStream *stream, void *buf, size_t size);\r
+\r
\r
-#define LookToRead_BUF_SIZE (1 << 14)\r
\r
typedef struct\r
{\r
- ILookInStream s;\r
- ISeekInStream *realStream;\r
+ ILookInStream vt;\r
+ const ISeekInStream *realStream;\r
+ \r
size_t pos;\r
- size_t size;\r
- Byte buf[LookToRead_BUF_SIZE];\r
-} CLookToRead;\r
+ size_t size; /* it's data size */\r
+ \r
+ /* the following variables must be set outside */\r
+ Byte *buf;\r
+ size_t bufSize;\r
+} CLookToRead2;\r
+\r
+void LookToRead2_CreateVTable(CLookToRead2 *p, int lookahead);\r
+\r
+#define LookToRead2_Init(p) { (p)->pos = (p)->size = 0; }\r
\r
-void LookToRead_CreateVTable(CLookToRead *p, int lookahead);\r
-void LookToRead_Init(CLookToRead *p);\r
\r
typedef struct\r
{\r
- ISeqInStream s;\r
- ILookInStream *realStream;\r
+ ISeqInStream vt;\r
+ const ILookInStream *realStream;\r
} CSecToLook;\r
\r
void SecToLook_CreateVTable(CSecToLook *p);\r
\r
+\r
+\r
typedef struct\r
{\r
- ISeqInStream s;\r
- ILookInStream *realStream;\r
+ ISeqInStream vt;\r
+ const ILookInStream *realStream;\r
} CSecToRead;\r
\r
void SecToRead_CreateVTable(CSecToRead *p);\r
\r
-typedef struct\r
+\r
+typedef struct ICompressProgress ICompressProgress;\r
+\r
+struct ICompressProgress\r
{\r
- SRes (*Progress)(void *p, UInt64 inSize, UInt64 outSize);\r
+ SRes (*Progress)(const ICompressProgress *p, UInt64 inSize, UInt64 outSize);\r
/* Returns: result. (result != SZ_OK) means break.\r
Value (UInt64)(Int64)-1 for size means unknown value. */\r
-} ICompressProgress;\r
+};\r
+#define ICompressProgress_Progress(p, inSize, outSize) (p)->Progress(p, inSize, outSize)\r
\r
-typedef struct\r
+\r
+\r
+typedef struct ISzAlloc ISzAlloc;\r
+typedef const ISzAlloc * ISzAllocPtr;\r
+\r
+struct ISzAlloc\r
{\r
- void *(*Alloc)(void *p, size_t size);\r
- void (*Free)(void *p, void *address); /* address can be 0 */\r
-} ISzAlloc;\r
+ void *(*Alloc)(ISzAllocPtr p, size_t size);\r
+ void (*Free)(ISzAllocPtr p, void *address); /* address can be 0 */\r
+};\r
+\r
+#define ISzAlloc_Alloc(p, size) (p)->Alloc(p, size)\r
+#define ISzAlloc_Free(p, a) (p)->Free(p, a)\r
+\r
+/* deprecated */\r
+#define IAlloc_Alloc(p, size) ISzAlloc_Alloc(p, size)\r
+#define IAlloc_Free(p, a) ISzAlloc_Free(p, a)\r
+\r
+\r
+\r
+\r
+\r
+#ifndef MY_offsetof\r
+ #ifdef offsetof\r
+ #define MY_offsetof(type, m) offsetof(type, m)\r
+ /*\r
+ #define MY_offsetof(type, m) FIELD_OFFSET(type, m)\r
+ */\r
+ #else\r
+ #define MY_offsetof(type, m) ((size_t)&(((type *)0)->m))\r
+ #endif\r
+#endif\r
+\r
+\r
+\r
+#ifndef MY_container_of\r
+\r
+/*\r
+#define MY_container_of(ptr, type, m) container_of(ptr, type, m)\r
+#define MY_container_of(ptr, type, m) CONTAINING_RECORD(ptr, type, m)\r
+#define MY_container_of(ptr, type, m) ((type *)((char *)(ptr) - offsetof(type, m)))\r
+#define MY_container_of(ptr, type, m) (&((type *)0)->m == (ptr), ((type *)(((char *)(ptr)) - MY_offsetof(type, m))))\r
+*/\r
+\r
+/*\r
+ GCC shows warning: "perhaps the 'offsetof' macro was used incorrectly"\r
+ GCC 3.4.4 : classes with constructor\r
+ GCC 4.8.1 : classes with non-public variable members"\r
+*/\r
+\r
+#define MY_container_of(ptr, type, m) ((type *)((char *)(1 ? (ptr) : &((type *)0)->m) - MY_offsetof(type, m)))\r
+\r
+\r
+#endif\r
+\r
+#define CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m) ((type *)(ptr))\r
+\r
+/*\r
+#define CONTAINER_FROM_VTBL(ptr, type, m) CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m)\r
+*/\r
+#define CONTAINER_FROM_VTBL(ptr, type, m) MY_container_of(ptr, type, m)\r
+\r
+#define CONTAINER_FROM_VTBL_CLS(ptr, type, m) CONTAINER_FROM_VTBL_SIMPLE(ptr, type, m)\r
+/*\r
+#define CONTAINER_FROM_VTBL_CLS(ptr, type, m) CONTAINER_FROM_VTBL(ptr, type, m)\r
+*/\r
+\r
\r
-#define IAlloc_Alloc(p, size) (p)->Alloc((p), size)\r
-#define IAlloc_Free(p, a) (p)->Free((p), a)\r
\r
#ifdef _WIN32\r
\r
-#define MY_VER_MAJOR 16\r
-#define MY_VER_MINOR 04\r
+#define MY_VER_MAJOR 18\r
+#define MY_VER_MINOR 05\r
#define MY_VER_BUILD 0\r
-#define MY_VERSION_NUMBERS "16.04"\r
-#define MY_VERSION "16.04"\r
-#define MY_DATE "2016-10-04"\r
+#define MY_VERSION_NUMBERS "18.05"\r
+#define MY_VERSION MY_VERSION_NUMBERS\r
+\r
+#ifdef MY_CPU_NAME\r
+ #define MY_VERSION_CPU MY_VERSION " (" MY_CPU_NAME ")"\r
+#else\r
+ #define MY_VERSION_CPU MY_VERSION\r
+#endif\r
+\r
+#define MY_DATE "2018-04-30"\r
#undef MY_COPYRIGHT\r
#undef MY_VERSION_COPYRIGHT_DATE\r
#define MY_AUTHOR_NAME "Igor Pavlov"\r
#define MY_COPYRIGHT_PD "Igor Pavlov : Public domain"\r
-#define MY_COPYRIGHT_CR "Copyright (c) 1999-2016 Igor Pavlov"\r
+#define MY_COPYRIGHT_CR "Copyright (c) 1999-2018 Igor Pavlov"\r
\r
#ifdef USE_COPYRIGHT_CR\r
#define MY_COPYRIGHT MY_COPYRIGHT_CR\r
#define MY_COPYRIGHT MY_COPYRIGHT_PD\r
#endif\r
\r
-#define MY_VERSION_COPYRIGHT_DATE MY_VERSION " : " MY_COPYRIGHT " : " MY_DATE\r
+#define MY_COPYRIGHT_DATE MY_COPYRIGHT " : " MY_DATE\r
+#define MY_VERSION_COPYRIGHT_DATE MY_VERSION_CPU " : " MY_COPYRIGHT " : " MY_DATE\r
/* Bra86.c -- Converter for x86 code (BCJ)\r
-2013-11-12 : Igor Pavlov : Public domain */\r
+2017-04-03 : Igor Pavlov : Public domain */\r
\r
#include "Precomp.h"\r
\r
else\r
{\r
mask >>= (unsigned)d;\r
- if (mask != 0 && (mask > 4 || mask == 3 || Test86MSByte(p[(mask >> 1) + 1])))\r
+ if (mask != 0 && (mask > 4 || mask == 3 || Test86MSByte(p[(size_t)(mask >> 1) + 1])))\r
{\r
mask = (mask >> 1) | 4;\r
pos++;\r
/* Compiler.h\r
-2015-08-02 : Igor Pavlov : Public domain */\r
+2017-04-03 : Igor Pavlov : Public domain */\r
\r
#ifndef __7Z_COMPILER_H\r
#define __7Z_COMPILER_H\r
#pragma warning(disable : 4514) // unreferenced inline function has been removed\r
#pragma warning(disable : 4702) // unreachable code\r
#pragma warning(disable : 4710) // not inlined\r
+ #pragma warning(disable : 4714) // function marked as __forceinline not inlined\r
#pragma warning(disable : 4786) // identifier was truncated to '255' characters in the debug information\r
#endif\r
\r
/* CpuArch.h -- CPU specific code\r
-2016-06-09: Igor Pavlov : Public domain */\r
+2017-09-04 : Igor Pavlov : Public domain */\r
\r
#ifndef __CPU_ARCH_H\r
#define __CPU_ARCH_H\r
MY_CPU_LE_UNALIGN means that CPU is LITTLE ENDIAN and CPU supports unaligned memory accesses.\r
*/\r
\r
-#if defined(_M_X64) \\r
- || defined(_M_AMD64) \\r
- || defined(__x86_64__) \\r
- || defined(__AMD64__) \\r
- || defined(__amd64__)\r
+#if defined(_M_X64) \\r
+ || defined(_M_AMD64) \\r
+ || defined(__x86_64__) \\r
+ || defined(__AMD64__) \\r
+ || defined(__amd64__)\r
#define MY_CPU_AMD64\r
+ #ifdef __ILP32__\r
+ #define MY_CPU_NAME "x32"\r
+ #else\r
+ #define MY_CPU_NAME "x64"\r
+ #endif\r
+ #define MY_CPU_64BIT\r
#endif\r
\r
-#if defined(MY_CPU_AMD64) \\r
- || defined(_M_IA64) \\r
- || defined(__AARCH64EL__) \\r
- || defined(__AARCH64EB__)\r
+\r
+#if defined(_M_IX86) \\r
+ || defined(__i386__)\r
+ #define MY_CPU_X86\r
+ #define MY_CPU_NAME "x86"\r
+ #define MY_CPU_32BIT\r
+#endif\r
+\r
+\r
+#if defined(_M_ARM64) \\r
+ || defined(__AARCH64EL__) \\r
+ || defined(__AARCH64EB__) \\r
+ || defined(__aarch64__)\r
+ #define MY_CPU_ARM64\r
+ #define MY_CPU_NAME "arm64"\r
#define MY_CPU_64BIT\r
#endif\r
\r
-#if defined(_M_IX86) || defined(__i386__)\r
-#define MY_CPU_X86\r
+\r
+#if defined(_M_ARM) \\r
+ || defined(_M_ARM_NT) \\r
+ || defined(_M_ARMT) \\r
+ || defined(__arm__) \\r
+ || defined(__thumb__) \\r
+ || defined(__ARMEL__) \\r
+ || defined(__ARMEB__) \\r
+ || defined(__THUMBEL__) \\r
+ || defined(__THUMBEB__)\r
+ #define MY_CPU_ARM\r
+ #define MY_CPU_NAME "arm"\r
+ #define MY_CPU_32BIT\r
#endif\r
\r
-#if defined(MY_CPU_X86) || defined(MY_CPU_AMD64)\r
-#define MY_CPU_X86_OR_AMD64\r
+\r
+#if defined(_M_IA64) \\r
+ || defined(__ia64__)\r
+ #define MY_CPU_IA64\r
+ #define MY_CPU_NAME "ia64"\r
+ #define MY_CPU_64BIT\r
#endif\r
\r
-#if defined(MY_CPU_X86) \\r
- || defined(_M_ARM) \\r
- || defined(__ARMEL__) \\r
- || defined(__THUMBEL__) \\r
- || defined(__ARMEB__) \\r
- || defined(__THUMBEB__)\r
+\r
+#if defined(__mips64) \\r
+ || defined(__mips64__) \\r
+ || (defined(__mips) && (__mips == 64 || __mips == 4 || __mips == 3))\r
+ #define MY_CPU_NAME "mips64"\r
+ #define MY_CPU_64BIT\r
+#elif defined(__mips__)\r
+ #define MY_CPU_NAME "mips"\r
+ /* #define MY_CPU_32BIT */\r
+#endif\r
+\r
+\r
+#if defined(__ppc64__) \\r
+ || defined(__powerpc64__)\r
+ #ifdef __ILP32__\r
+ #define MY_CPU_NAME "ppc64-32"\r
+ #else\r
+ #define MY_CPU_NAME "ppc64"\r
+ #endif\r
+ #define MY_CPU_64BIT\r
+#elif defined(__ppc__) \\r
+ || defined(__powerpc__)\r
+ #define MY_CPU_NAME "ppc"\r
#define MY_CPU_32BIT\r
#endif\r
\r
-#if defined(_WIN32) && defined(_M_ARM)\r
-#define MY_CPU_ARM_LE\r
+\r
+#if defined(__sparc64__)\r
+ #define MY_CPU_NAME "sparc64"\r
+ #define MY_CPU_64BIT\r
+#elif defined(__sparc__)\r
+ #define MY_CPU_NAME "sparc"\r
+ /* #define MY_CPU_32BIT */\r
#endif\r
\r
-#if defined(_WIN32) && defined(_M_IA64)\r
-#define MY_CPU_IA64_LE\r
+\r
+#if defined(MY_CPU_X86) || defined(MY_CPU_AMD64)\r
+#define MY_CPU_X86_OR_AMD64\r
#endif\r
\r
+\r
+#ifdef _WIN32\r
+\r
+ #ifdef MY_CPU_ARM\r
+ #define MY_CPU_ARM_LE\r
+ #endif\r
+\r
+ #ifdef MY_CPU_ARM64\r
+ #define MY_CPU_ARM64_LE\r
+ #endif\r
+\r
+ #ifdef _M_IA64\r
+ #define MY_CPU_IA64_LE\r
+ #endif\r
+\r
+#endif\r
+\r
+\r
#if defined(MY_CPU_X86_OR_AMD64) \\r
|| defined(MY_CPU_ARM_LE) \\r
+ || defined(MY_CPU_ARM64_LE) \\r
|| defined(MY_CPU_IA64_LE) \\r
|| defined(__LITTLE_ENDIAN__) \\r
|| defined(__ARMEL__) \\r
#define MY_CPU_BE\r
#endif\r
\r
+\r
#if defined(MY_CPU_LE) && defined(MY_CPU_BE)\r
-Stop_Compiling_Bad_Endian\r
+ #error Stop_Compiling_Bad_Endian\r
+#endif\r
+\r
+\r
+#if defined(MY_CPU_32BIT) && defined(MY_CPU_64BIT)\r
+ #error Stop_Compiling_Bad_32_64_BIT\r
#endif\r
\r
\r
+#ifndef MY_CPU_NAME\r
+ #ifdef MY_CPU_LE\r
+ #define MY_CPU_NAME "LE"\r
+ #elif defined(MY_CPU_BE)\r
+ #define MY_CPU_NAME "BE"\r
+ #else\r
+ /*\r
+ #define MY_CPU_NAME ""\r
+ */\r
+ #endif\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
#ifdef MY_CPU_LE\r
#if defined(MY_CPU_X86_OR_AMD64) \\r
- /* || defined(__AARCH64EL__) */\r
+ || defined(MY_CPU_ARM64) \\r
+ || defined(__ARM_FEATURE_UNALIGNED)\r
#define MY_CPU_LE_UNALIGN\r
#endif\r
#endif\r
\r
#endif\r
\r
+#ifdef __has_builtin\r
+ #define MY__has_builtin(x) __has_builtin(x)\r
+#else\r
+ #define MY__has_builtin(x) 0\r
+#endif\r
\r
#if defined(MY_CPU_LE_UNALIGN) && /* defined(_WIN64) && */ (_MSC_VER >= 1300)\r
\r
\r
#include <stdlib.h>\r
\r
+#pragma intrinsic(_byteswap_ushort)\r
#pragma intrinsic(_byteswap_ulong)\r
#pragma intrinsic(_byteswap_uint64)\r
+\r
+/* #define GetBe16(p) _byteswap_ushort(*(const UInt16 *)(const Byte *)(p)) */\r
#define GetBe32(p) _byteswap_ulong(*(const UInt32 *)(const Byte *)(p))\r
#define GetBe64(p) _byteswap_uint64(*(const UInt64 *)(const Byte *)(p))\r
\r
#define SetBe32(p, v) (*(UInt32 *)(void *)(p)) = _byteswap_ulong(v)\r
\r
-#elif defined(MY_CPU_LE_UNALIGN) && defined (__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))\r
+#elif defined(MY_CPU_LE_UNALIGN) && ( \\r
+ (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))) \\r
+ || (defined(__clang__) && MY__has_builtin(__builtin_bswap16)) )\r
\r
+/* #define GetBe16(p) __builtin_bswap16(*(const UInt16 *)(const Byte *)(p)) */\r
#define GetBe32(p) __builtin_bswap32(*(const UInt32 *)(const Byte *)(p))\r
#define GetBe64(p) __builtin_bswap64(*(const UInt64 *)(const Byte *)(p))\r
\r
#endif\r
\r
\r
+#ifndef GetBe16\r
+\r
#define GetBe16(p) ( (UInt16) ( \\r
((UInt16)((const Byte *)(p))[0] << 8) | \\r
((const Byte *)(p))[1] ))\r
\r
+#endif\r
+\r
\r
\r
#ifdef MY_CPU_X86_OR_AMD64\r
/* LzFind.c -- Match finder for LZ algorithms\r
-2015-10-15 : Igor Pavlov : Public domain */\r
+2017-06-10 : Igor Pavlov : Public domain */\r
\r
#include "Precomp.h"\r
\r
\r
#define kStartMaxLen 3\r
\r
-static void LzInWindow_Free(CMatchFinder *p, ISzAlloc *alloc)\r
+static void LzInWindow_Free(CMatchFinder *p, ISzAllocPtr alloc)\r
{\r
if (!p->directInput)\r
{\r
- alloc->Free(alloc, p->bufferBase);\r
+ ISzAlloc_Free(alloc, p->bufferBase);\r
p->bufferBase = NULL;\r
}\r
}\r
\r
/* keepSizeBefore + keepSizeAfter + keepSizeReserv must be < 4G) */\r
\r
-static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAlloc *alloc)\r
+static int LzInWindow_Create(CMatchFinder *p, UInt32 keepSizeReserv, ISzAllocPtr alloc)\r
{\r
UInt32 blockSize = p->keepSizeBefore + p->keepSizeAfter + keepSizeReserv;\r
if (p->directInput)\r
{\r
LzInWindow_Free(p, alloc);\r
p->blockSize = blockSize;\r
- p->bufferBase = (Byte *)alloc->Alloc(alloc, (size_t)blockSize);\r
+ p->bufferBase = (Byte *)ISzAlloc_Alloc(alloc, (size_t)blockSize);\r
}\r
return (p->bufferBase != NULL);\r
}\r
if (size == 0)\r
return;\r
\r
- p->result = p->stream->Read(p->stream, dest, &size);\r
+ p->result = ISeqInStream_Read(p->stream, dest, &size);\r
if (p->result != SZ_OK)\r
return;\r
if (size == 0)\r
p->bufferBase = NULL;\r
p->directInput = 0;\r
p->hash = NULL;\r
+ p->expectedDataSize = (UInt64)(Int64)-1;\r
MatchFinder_SetDefaultSettings(p);\r
\r
for (i = 0; i < 256; i++)\r
UInt32 r = i;\r
unsigned j;\r
for (j = 0; j < 8; j++)\r
- r = (r >> 1) ^ (kCrcPoly & ~((r & 1) - 1));\r
+ r = (r >> 1) ^ (kCrcPoly & ((UInt32)0 - (r & 1)));\r
p->crc[i] = r;\r
}\r
}\r
\r
-static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAlloc *alloc)\r
+static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAllocPtr alloc)\r
{\r
- alloc->Free(alloc, p->hash);\r
+ ISzAlloc_Free(alloc, p->hash);\r
p->hash = NULL;\r
}\r
\r
-void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc)\r
+void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc)\r
{\r
MatchFinder_FreeThisClassMemory(p, alloc);\r
LzInWindow_Free(p, alloc);\r
}\r
\r
-static CLzRef* AllocRefs(size_t num, ISzAlloc *alloc)\r
+static CLzRef* AllocRefs(size_t num, ISzAllocPtr alloc)\r
{\r
size_t sizeInBytes = (size_t)num * sizeof(CLzRef);\r
if (sizeInBytes / sizeof(CLzRef) != num)\r
return NULL;\r
- return (CLzRef *)alloc->Alloc(alloc, sizeInBytes);\r
+ return (CLzRef *)ISzAlloc_Alloc(alloc, sizeInBytes);\r
}\r
\r
int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,\r
UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,\r
- ISzAlloc *alloc)\r
+ ISzAllocPtr alloc)\r
{\r
UInt32 sizeReserv;\r
\r
hs = (1 << 16) - 1;\r
else\r
{\r
- hs = historySize - 1;\r
+ hs = historySize;\r
+ if (hs > p->expectedDataSize)\r
+ hs = (UInt32)p->expectedDataSize;\r
+ if (hs != 0)\r
+ hs--;\r
hs |= (hs >> 1);\r
hs |= (hs >> 2);\r
hs |= (hs >> 4);\r
p->posLimit = p->pos + limit;\r
}\r
\r
-void MatchFinder_Init_2(CMatchFinder *p, int readData)\r
+\r
+void MatchFinder_Init_LowHash(CMatchFinder *p)\r
+{\r
+ size_t i;\r
+ CLzRef *items = p->hash;\r
+ size_t numItems = p->fixedHashSize;\r
+ for (i = 0; i < numItems; i++)\r
+ items[i] = kEmptyHashValue;\r
+}\r
+\r
+\r
+void MatchFinder_Init_HighHash(CMatchFinder *p)\r
+{\r
+ size_t i;\r
+ CLzRef *items = p->hash + p->fixedHashSize;\r
+ size_t numItems = (size_t)p->hashMask + 1;\r
+ for (i = 0; i < numItems; i++)\r
+ items[i] = kEmptyHashValue;\r
+}\r
+\r
+\r
+void MatchFinder_Init_3(CMatchFinder *p, int readData)\r
{\r
- UInt32 i;\r
- UInt32 *hash = p->hash;\r
- UInt32 num = p->hashSizeSum;\r
- for (i = 0; i < num; i++)\r
- hash[i] = kEmptyHashValue;\r
- \r
p->cyclicBufferPos = 0;\r
p->buffer = p->bufferBase;\r
- p->pos = p->streamPos = p->cyclicBufferSize;\r
+ p->pos =\r
+ p->streamPos = p->cyclicBufferSize;\r
p->result = SZ_OK;\r
p->streamEndWasReached = 0;\r
\r
MatchFinder_SetLimits(p);\r
}\r
\r
+\r
void MatchFinder_Init(CMatchFinder *p)\r
{\r
- MatchFinder_Init_2(p, True);\r
+ MatchFinder_Init_HighHash(p);\r
+ MatchFinder_Init_LowHash(p);\r
+ MatchFinder_Init_3(p, True);\r
}\r
+\r
\r
static UInt32 MatchFinder_GetSubValue(CMatchFinder *p)\r
{\r
\r
d2 = pos - hash[h2];\r
\r
- curMatch = hash[kFix3HashSize + hv];\r
+ curMatch = (hash + kFix3HashSize)[hv];\r
\r
hash[h2] = pos;\r
- hash[kFix3HashSize + hv] = pos;\r
+ (hash + kFix3HashSize)[hv] = pos;\r
\r
maxLen = 2;\r
offset = 0;\r
pos = p->pos;\r
\r
d2 = pos - hash[ h2];\r
- d3 = pos - hash[kFix3HashSize + h3];\r
+ d3 = pos - (hash + kFix3HashSize)[h3];\r
\r
- curMatch = hash[kFix4HashSize + hv];\r
+ curMatch = (hash + kFix4HashSize)[hv];\r
\r
hash[ h2] = pos;\r
- hash[kFix3HashSize + h3] = pos;\r
- hash[kFix4HashSize + hv] = pos;\r
+ (hash + kFix3HashSize)[h3] = pos;\r
+ (hash + kFix4HashSize)[hv] = pos;\r
\r
maxLen = 0;\r
offset = 0;\r
if (d2 != d3 && d3 < p->cyclicBufferSize && *(cur - d3) == *cur)\r
{\r
maxLen = 3;\r
- distances[offset + 1] = d3 - 1;\r
+ distances[(size_t)offset + 1] = d3 - 1;\r
offset += 2;\r
d2 = d3;\r
}\r
if (offset != 0)\r
{\r
UPDATE_maxLen\r
- distances[offset - 2] = maxLen;\r
+ distances[(size_t)offset - 2] = maxLen;\r
if (maxLen == lenLimit)\r
{\r
SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));\r
pos = p->pos;\r
\r
d2 = pos - hash[ h2];\r
- d3 = pos - hash[kFix3HashSize + h3];\r
- d4 = pos - hash[kFix4HashSize + h4];\r
+ d3 = pos - (hash + kFix3HashSize)[h3];\r
+ d4 = pos - (hash + kFix4HashSize)[h4];\r
\r
- curMatch = hash[kFix5HashSize + hv];\r
+ curMatch = (hash + kFix5HashSize)[hv];\r
\r
hash[ h2] = pos;\r
- hash[kFix3HashSize + h3] = pos;\r
- hash[kFix4HashSize + h4] = pos;\r
- hash[kFix5HashSize + hv] = pos;\r
+ (hash + kFix3HashSize)[h3] = pos;\r
+ (hash + kFix4HashSize)[h4] = pos;\r
+ (hash + kFix5HashSize)[hv] = pos;\r
\r
maxLen = 0;\r
offset = 0;\r
&& *(cur - d4 + 3) == *(cur + 3))\r
{\r
maxLen = 4;\r
- distances[offset + 1] = d4 - 1;\r
+ distances[(size_t)offset + 1] = d4 - 1;\r
offset += 2;\r
d2 = d4;\r
}\r
if (offset != 0)\r
{\r
UPDATE_maxLen\r
- distances[offset - 2] = maxLen;\r
+ distances[(size_t)offset - 2] = maxLen;\r
if (maxLen == lenLimit)\r
{\r
SkipMatchesSpec(lenLimit, curMatch, MF_PARAMS(p));\r
pos = p->pos;\r
\r
d2 = pos - hash[ h2];\r
- d3 = pos - hash[kFix3HashSize + h3];\r
+ d3 = pos - (hash + kFix3HashSize)[h3];\r
\r
- curMatch = hash[kFix4HashSize + hv];\r
+ curMatch = (hash + kFix4HashSize)[hv];\r
\r
hash[ h2] = pos;\r
- hash[kFix3HashSize + h3] = pos;\r
- hash[kFix4HashSize + hv] = pos;\r
+ (hash + kFix3HashSize)[h3] = pos;\r
+ (hash + kFix4HashSize)[hv] = pos;\r
\r
maxLen = 0;\r
offset = 0;\r
if (d2 != d3 && d3 < p->cyclicBufferSize && *(cur - d3) == *cur)\r
{\r
maxLen = 3;\r
- distances[offset + 1] = d3 - 1;\r
+ distances[(size_t)offset + 1] = d3 - 1;\r
offset += 2;\r
d2 = d3;\r
}\r
if (offset != 0)\r
{\r
UPDATE_maxLen\r
- distances[offset - 2] = maxLen;\r
+ distances[(size_t)offset - 2] = maxLen;\r
if (maxLen == lenLimit)\r
{\r
p->son[p->cyclicBufferPos] = curMatch;\r
pos = p->pos;\r
\r
d2 = pos - hash[ h2];\r
- d3 = pos - hash[kFix3HashSize + h3];\r
- d4 = pos - hash[kFix4HashSize + h4];\r
+ d3 = pos - (hash + kFix3HashSize)[h3];\r
+ d4 = pos - (hash + kFix4HashSize)[h4];\r
\r
- curMatch = hash[kFix5HashSize + hv];\r
+ curMatch = (hash + kFix5HashSize)[hv];\r
\r
hash[ h2] = pos;\r
- hash[kFix3HashSize + h3] = pos;\r
- hash[kFix4HashSize + h4] = pos;\r
- hash[kFix5HashSize + hv] = pos;\r
+ (hash + kFix3HashSize)[h3] = pos;\r
+ (hash + kFix4HashSize)[h4] = pos;\r
+ (hash + kFix5HashSize)[hv] = pos;\r
\r
maxLen = 0;\r
offset = 0;\r
&& *(cur - d4 + 3) == *(cur + 3))\r
{\r
maxLen = 4;\r
- distances[offset + 1] = d4 - 1;\r
+ distances[(size_t)offset + 1] = d4 - 1;\r
offset += 2;\r
d2 = d4;\r
}\r
if (offset != 0)\r
{\r
UPDATE_maxLen\r
- distances[offset - 2] = maxLen;\r
+ distances[(size_t)offset - 2] = maxLen;\r
if (maxLen == lenLimit)\r
{\r
p->son[p->cyclicBufferPos] = curMatch;\r
SKIP_HEADER(3)\r
HASH3_CALC;\r
hash = p->hash;\r
- curMatch = hash[kFix3HashSize + hv];\r
+ curMatch = (hash + kFix3HashSize)[hv];\r
hash[h2] =\r
- hash[kFix3HashSize + hv] = p->pos;\r
+ (hash + kFix3HashSize)[hv] = p->pos;\r
SKIP_FOOTER\r
}\r
while (--num != 0);\r
SKIP_HEADER(4)\r
HASH4_CALC;\r
hash = p->hash;\r
- curMatch = hash[kFix4HashSize + hv];\r
+ curMatch = (hash + kFix4HashSize)[hv];\r
hash[ h2] =\r
- hash[kFix3HashSize + h3] =\r
- hash[kFix4HashSize + hv] = p->pos;\r
+ (hash + kFix3HashSize)[h3] =\r
+ (hash + kFix4HashSize)[hv] = p->pos;\r
SKIP_FOOTER\r
}\r
while (--num != 0);\r
SKIP_HEADER(5)\r
HASH5_CALC;\r
hash = p->hash;\r
- curMatch = hash[kFix5HashSize + hv];\r
+ curMatch = (hash + kFix5HashSize)[hv];\r
hash[ h2] =\r
- hash[kFix3HashSize + h3] =\r
- hash[kFix4HashSize + h4] =\r
- hash[kFix5HashSize + hv] = p->pos;\r
+ (hash + kFix3HashSize)[h3] =\r
+ (hash + kFix4HashSize)[h4] =\r
+ (hash + kFix5HashSize)[hv] = p->pos;\r
SKIP_FOOTER\r
}\r
while (--num != 0);\r
SKIP_HEADER(4)\r
HASH4_CALC;\r
hash = p->hash;\r
- curMatch = hash[kFix4HashSize + hv];\r
+ curMatch = (hash + kFix4HashSize)[hv];\r
hash[ h2] =\r
- hash[kFix3HashSize + h3] =\r
- hash[kFix4HashSize + hv] = p->pos;\r
+ (hash + kFix3HashSize)[h3] =\r
+ (hash + kFix4HashSize)[hv] = p->pos;\r
p->son[p->cyclicBufferPos] = curMatch;\r
MOVE_POS\r
}\r
SKIP_HEADER(5)\r
HASH5_CALC;\r
hash = p->hash;\r
- curMatch = p->hash[kFix5HashSize + hv];\r
+ curMatch = hash + kFix5HashSize)[hv];\r
hash[ h2] =\r
- hash[kFix3HashSize + h3] =\r
- hash[kFix4HashSize + h4] =\r
- hash[kFix5HashSize + hv] = p->pos;\r
+ (hash + kFix3HashSize)[h3] =\r
+ (hash + kFix4HashSize)[h4] =\r
+ (hash + kFix5HashSize)[hv] = p->pos;\r
p->son[p->cyclicBufferPos] = curMatch;\r
MOVE_POS\r
}\r
/* LzFind.h -- Match finder for LZ algorithms\r
-2015-10-15 : Igor Pavlov : Public domain */\r
+2017-06-10 : Igor Pavlov : Public domain */\r
\r
#ifndef __LZ_FIND_H\r
#define __LZ_FIND_H\r
SRes result;\r
UInt32 crc[256];\r
size_t numRefs;\r
+\r
+ UInt64 expectedDataSize;\r
} CMatchFinder;\r
\r
#define Inline_MatchFinder_GetPointerToCurrentPos(p) ((p)->buffer)\r
*/\r
int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,\r
UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,\r
- ISzAlloc *alloc);\r
-void MatchFinder_Free(CMatchFinder *p, ISzAlloc *alloc);\r
+ ISzAllocPtr alloc);\r
+void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc);\r
void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems);\r
void MatchFinder_ReduceOffsets(CMatchFinder *p, UInt32 subValue);\r
\r
\r
void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder *vTable);\r
\r
-void MatchFinder_Init_2(CMatchFinder *p, int readData);\r
+void MatchFinder_Init_LowHash(CMatchFinder *p);\r
+void MatchFinder_Init_HighHash(CMatchFinder *p);\r
+void MatchFinder_Init_3(CMatchFinder *p, int readData);\r
void MatchFinder_Init(CMatchFinder *p);\r
\r
UInt32 Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances);\r
/* LzmaDec.c -- LZMA Decoder\r
-2016-05-16 : Igor Pavlov : Public domain */\r
+2018-02-28 : Igor Pavlov : Public domain */\r
\r
#include "Precomp.h"\r
\r
+/* #include "CpuArch.h" */\r
#include "LzmaDec.h"\r
\r
#ifndef EFIAPI\r
#define GET_BIT2(p, i, A0, A1) IF_BIT_0(p) \\r
{ UPDATE_0(p); i = (i + i); A0; } else \\r
{ UPDATE_1(p); i = (i + i) + 1; A1; }\r
-#define GET_BIT(p, i) GET_BIT2(p, i, ; , ;)\r
\r
-#define TREE_GET_BIT(probs, i) { GET_BIT((probs + i), i); }\r
+#define TREE_GET_BIT(probs, i) { GET_BIT2(probs + i, i, ;, ;); }\r
+\r
+#define REV_BIT(p, i, A0, A1) IF_BIT_0(p + i) \\r
+ { UPDATE_0(p + i); A0; } else \\r
+ { UPDATE_1(p + i); A1; }\r
+#define REV_BIT_VAR( p, i, m) REV_BIT(p, i, i += m; m += m, m += m; i += m; )\r
+#define REV_BIT_CONST(p, i, m) REV_BIT(p, i, i += m; , i += m * 2; )\r
+#define REV_BIT_LAST( p, i, m) REV_BIT(p, i, i -= m , ; )\r
+\r
#define TREE_DECODE(probs, limit, i) \\r
{ i = 1; do { TREE_GET_BIT(probs, i); } while (i < limit); i -= limit; }\r
\r
i -= 0x40; }\r
#endif\r
\r
-#define NORMAL_LITER_DEC GET_BIT(prob + symbol, symbol)\r
+#define NORMAL_LITER_DEC TREE_GET_BIT(prob, symbol)\r
#define MATCHED_LITER_DEC \\r
- matchByte <<= 1; \\r
- bit = (matchByte & offs); \\r
- probLit = prob + offs + bit + symbol; \\r
- GET_BIT2(probLit, symbol, offs &= ~bit, offs &= bit)\r
+ matchByte += matchByte; \\r
+ bit = offs; \\r
+ offs &= matchByte; \\r
+ probLit = prob + (offs + bit + symbol); \\r
+ GET_BIT2(probLit, symbol, offs ^= bit; , ;)\r
+\r
+\r
\r
#define NORMALIZE_CHECK if (range < kTopValue) { if (buf >= bufLimit) return DUMMY_ERROR; range <<= 8; code = (code << 8) | (*buf++); }\r
\r
{ i = 1; do { GET_BIT_CHECK(probs + i, i) } while (i < limit); i -= limit; }\r
\r
\r
+#define REV_BIT_CHECK(p, i, m) IF_BIT_0_CHECK(p + i) \\r
+ { UPDATE_0_CHECK; i += m; m += m; } else \\r
+ { UPDATE_1_CHECK; m += m; i += m; }\r
+\r
+\r
#define kNumPosBitsMax 4\r
#define kNumPosStatesMax (1 << kNumPosBitsMax)\r
\r
#define kLenNumLowBits 3\r
#define kLenNumLowSymbols (1 << kLenNumLowBits)\r
-#define kLenNumMidBits 3\r
-#define kLenNumMidSymbols (1 << kLenNumMidBits)\r
#define kLenNumHighBits 8\r
#define kLenNumHighSymbols (1 << kLenNumHighBits)\r
\r
-#define LenChoice 0\r
-#define LenChoice2 (LenChoice + 1)\r
-#define LenLow (LenChoice2 + 1)\r
-#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))\r
-#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))\r
+#define LenLow 0\r
+#define LenHigh (LenLow + 2 * (kNumPosStatesMax << kLenNumLowBits))\r
#define kNumLenProbs (LenHigh + kLenNumHighSymbols)\r
\r
+#define LenChoice LenLow\r
+#define LenChoice2 (LenLow + (1 << kLenNumLowBits))\r
\r
#define kNumStates 12\r
+#define kNumStates2 16\r
#define kNumLitStates 7\r
\r
#define kStartPosModelIndex 4\r
#define kAlignTableSize (1 << kNumAlignBits)\r
\r
#define kMatchMinLen 2\r
-#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols + kLenNumMidSymbols + kLenNumHighSymbols)\r
+#define kMatchSpecLenStart (kMatchMinLen + kLenNumLowSymbols * 2 + kLenNumHighSymbols)\r
\r
-#define IsMatch 0\r
-#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))\r
+/* External ASM code needs same CLzmaProb array layout. So don't change it. */\r
+\r
+/* (probs_1664) is faster and better for code size at some platforms */\r
+/*\r
+#ifdef MY_CPU_X86_OR_AMD64\r
+*/\r
+#define kStartOffset 1664\r
+#define GET_PROBS p->probs_1664\r
+/*\r
+#define GET_PROBS p->probs + kStartOffset\r
+#else\r
+#define kStartOffset 0\r
+#define GET_PROBS p->probs\r
+#endif\r
+*/\r
+\r
+#define SpecPos (-kStartOffset)\r
+#define IsRep0Long (SpecPos + kNumFullDistances)\r
+#define RepLenCoder (IsRep0Long + (kNumStates2 << kNumPosBitsMax))\r
+#define LenCoder (RepLenCoder + kNumLenProbs)\r
+#define IsMatch (LenCoder + kNumLenProbs)\r
+#define Align (IsMatch + (kNumStates2 << kNumPosBitsMax))\r
+#define IsRep (Align + kAlignTableSize)\r
#define IsRepG0 (IsRep + kNumStates)\r
#define IsRepG1 (IsRepG0 + kNumStates)\r
#define IsRepG2 (IsRepG1 + kNumStates)\r
-#define IsRep0Long (IsRepG2 + kNumStates)\r
-#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))\r
-#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))\r
-#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)\r
-#define LenCoder (Align + kAlignTableSize)\r
-#define RepLenCoder (LenCoder + kNumLenProbs)\r
-#define Literal (RepLenCoder + kNumLenProbs)\r
-\r
-#define LZMA_BASE_SIZE 1846\r
-#define LZMA_LIT_SIZE 0x300\r
+#define PosSlot (IsRepG2 + kNumStates)\r
+#define Literal (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))\r
+#define NUM_BASE_PROBS (Literal + kStartOffset)\r
\r
-#if Literal != LZMA_BASE_SIZE\r
-StopCompilingDueBUG\r
+#if Align != 0 && kStartOffset != 0\r
+ #error Stop_Compiling_Bad_LZMA_kAlign\r
#endif\r
\r
-#define LzmaProps_GetNumProbs(p) (Literal + ((UInt32)LZMA_LIT_SIZE << ((p)->lc + (p)->lp)))\r
+#if NUM_BASE_PROBS != 1984\r
+ #error Stop_Compiling_Bad_LZMA_PROBS\r
+#endif\r
+\r
+\r
+#define LZMA_LIT_SIZE 0x300\r
+\r
+#define LzmaProps_GetNumProbs(p) (NUM_BASE_PROBS + ((UInt32)LZMA_LIT_SIZE << ((p)->lc + (p)->lp)))\r
+\r
+\r
+#define CALC_POS_STATE(processedPos, pbMask) (((processedPos) & (pbMask)) << 4)\r
+#define COMBINED_PS_STATE (posState + state)\r
+#define GET_LEN_STATE (posState)\r
\r
#define LZMA_DIC_MIN (1 << 12)\r
\r
-/* First LZMA-symbol is always decoded.\r
-And it decodes new LZMA-symbols while (buf < bufLimit), but "buf" is without last normalization\r
+/*\r
+p->remainLen : shows status of LZMA decoder:\r
+ < kMatchSpecLenStart : normal remain\r
+ = kMatchSpecLenStart : finished\r
+ = kMatchSpecLenStart + 1 : need init range coder\r
+ = kMatchSpecLenStart + 2 : need init range coder and state\r
+*/\r
+\r
+/* ---------- LZMA_DECODE_REAL ---------- */\r
+/*\r
+LzmaDec_DecodeReal_3() can be implemented in external ASM file.\r
+3 - is the code compatibility version of that function for check at link time.\r
+*/\r
+\r
+#define LZMA_DECODE_REAL LzmaDec_DecodeReal_3\r
+\r
+/*\r
+LZMA_DECODE_REAL()\r
+In:\r
+ RangeCoder is normalized\r
+ if (p->dicPos == limit)\r
+ {\r
+ LzmaDec_TryDummy() was called before to exclude LITERAL and MATCH-REP cases.\r
+ So first symbol can be only MATCH-NON-REP. And if that MATCH-NON-REP symbol\r
+ is not END_OF_PAYALOAD_MARKER, then function returns error code.\r
+ }\r
+\r
+Processing:\r
+ first LZMA symbol will be decoded in any case\r
+ All checks for limits are at the end of main loop,\r
+ It will decode new LZMA-symbols while (p->buf < bufLimit && dicPos < limit),\r
+ RangeCoder is still without last normalization when (p->buf < bufLimit) is being checked.\r
+\r
Out:\r
+ RangeCoder is normalized\r
Result:\r
SZ_OK - OK\r
SZ_ERROR_DATA - Error\r
p->remainLen:\r
< kMatchSpecLenStart : normal remain\r
= kMatchSpecLenStart : finished\r
- = kMatchSpecLenStart + 1 : Flush marker (unused now)\r
- = kMatchSpecLenStart + 2 : State Init Marker (unused now)\r
*/\r
\r
-static int MY_FAST_CALL LzmaDec_DecodeReal(CLzmaDec *p, SizeT limit, const Byte *bufLimit)\r
-{\r
- CLzmaProb *probs = p->probs;\r
\r
- unsigned state = p->state;\r
+#ifdef _LZMA_DEC_OPT\r
+\r
+int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit);\r
+\r
+#else\r
+\r
+static\r
+int MY_FAST_CALL LZMA_DECODE_REAL(CLzmaDec *p, SizeT limit, const Byte *bufLimit)\r
+{\r
+ CLzmaProb *probs = GET_PROBS;\r
+ unsigned state = (unsigned)p->state;\r
UInt32 rep0 = p->reps[0], rep1 = p->reps[1], rep2 = p->reps[2], rep3 = p->reps[3];\r
unsigned pbMask = ((unsigned)1 << (p->prop.pb)) - 1;\r
- unsigned lpMask = ((unsigned)1 << (p->prop.lp)) - 1;\r
unsigned lc = p->prop.lc;\r
+ unsigned lpMask = ((unsigned)0x100 << p->prop.lp) - ((unsigned)0x100 >> lc);\r
\r
Byte *dic = p->dic;\r
SizeT dicBufSize = p->dicBufSize;\r
CLzmaProb *prob;\r
UInt32 bound;\r
unsigned ttt;\r
- unsigned posState = processedPos & pbMask;\r
+ unsigned posState = CALC_POS_STATE(processedPos, pbMask);\r
\r
- prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;\r
+ prob = probs + IsMatch + COMBINED_PS_STATE;\r
IF_BIT_0(prob)\r
{\r
unsigned symbol;\r
UPDATE_0(prob);\r
prob = probs + Literal;\r
if (processedPos != 0 || checkDicSize != 0)\r
- prob += ((UInt32)LZMA_LIT_SIZE * (((processedPos & lpMask) << lc) +\r
- (dic[(dicPos == 0 ? dicBufSize : dicPos) - 1] >> (8 - lc))));\r
+ prob += (UInt32)3 * ((((processedPos << 8) + dic[(dicPos == 0 ? dicBufSize : dicPos) - 1]) & lpMask) << lc);\r
processedPos++;\r
\r
if (state < kNumLitStates)\r
else\r
{\r
UPDATE_1(prob);\r
+ /*\r
+ // that case was checked before with kBadRepCode\r
if (checkDicSize == 0 && processedPos == 0)\r
return SZ_ERROR_DATA;\r
+ */\r
prob = probs + IsRepG0 + state;\r
IF_BIT_0(prob)\r
{\r
UPDATE_0(prob);\r
- prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;\r
+ prob = probs + IsRep0Long + COMBINED_PS_STATE;\r
IF_BIT_0(prob)\r
{\r
UPDATE_0(prob);\r
IF_BIT_0(probLen)\r
{\r
UPDATE_0(probLen);\r
- probLen = prob + LenLow + (posState << kLenNumLowBits);\r
+ probLen = prob + LenLow + GET_LEN_STATE;\r
offset = 0;\r
lim = (1 << kLenNumLowBits);\r
}\r
IF_BIT_0(probLen)\r
{\r
UPDATE_0(probLen);\r
- probLen = prob + LenMid + (posState << kLenNumMidBits);\r
+ probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);\r
offset = kLenNumLowSymbols;\r
- lim = (1 << kLenNumMidBits);\r
+ lim = (1 << kLenNumLowBits);\r
}\r
else\r
{\r
UPDATE_1(probLen);\r
probLen = prob + LenHigh;\r
- offset = kLenNumLowSymbols + kLenNumMidSymbols;\r
+ offset = kLenNumLowSymbols * 2;\r
lim = (1 << kLenNumHighBits);\r
}\r
}\r
IF_BIT_0(probLen)\r
{\r
UPDATE_0(probLen);\r
- probLen = prob + LenLow + (posState << kLenNumLowBits);\r
+ probLen = prob + LenLow + GET_LEN_STATE;\r
len = 1;\r
TREE_GET_BIT(probLen, len);\r
TREE_GET_BIT(probLen, len);\r
IF_BIT_0(probLen)\r
{\r
UPDATE_0(probLen);\r
- probLen = prob + LenMid + (posState << kLenNumMidBits);\r
+ probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);\r
len = 1;\r
TREE_GET_BIT(probLen, len);\r
TREE_GET_BIT(probLen, len);\r
UPDATE_1(probLen);\r
probLen = prob + LenHigh;\r
TREE_DECODE(probLen, (1 << kLenNumHighBits), len);\r
- len += kLenNumLowSymbols + kLenNumMidSymbols;\r
+ len += kLenNumLowSymbols * 2;\r
}\r
}\r
}\r
if (posSlot < kEndPosModelIndex)\r
{\r
distance <<= numDirectBits;\r
- prob = probs + SpecPos + distance - posSlot - 1;\r
+ prob = probs + SpecPos;\r
{\r
- UInt32 mask = 1;\r
- unsigned i = 1;\r
+ UInt32 m = 1;\r
+ distance++;\r
do\r
{\r
- GET_BIT2(prob + i, i, ; , distance |= mask);\r
- mask <<= 1;\r
+ REV_BIT_VAR(prob, distance, m);\r
}\r
- while (--numDirectBits != 0);\r
+ while (--numDirectBits);\r
+ distance -= m;\r
}\r
}\r
else\r
}\r
*/\r
}\r
- while (--numDirectBits != 0);\r
+ while (--numDirectBits);\r
prob = probs + Align;\r
distance <<= kNumAlignBits;\r
{\r
unsigned i = 1;\r
- GET_BIT2(prob + i, i, ; , distance |= 1);\r
- GET_BIT2(prob + i, i, ; , distance |= 2);\r
- GET_BIT2(prob + i, i, ; , distance |= 4);\r
- GET_BIT2(prob + i, i, ; , distance |= 8);\r
+ REV_BIT_CONST(prob, i, 1);\r
+ REV_BIT_CONST(prob, i, 2);\r
+ REV_BIT_CONST(prob, i, 4);\r
+ REV_BIT_LAST (prob, i, 8);\r
+ distance |= i;\r
}\r
if (distance == (UInt32)0xFFFFFFFF)\r
{\r
- len += kMatchSpecLenStart;\r
+ len = kMatchSpecLenStart;\r
state -= kNumStates;\r
break;\r
}\r
rep2 = rep1;\r
rep1 = rep0;\r
rep0 = distance + 1;\r
- if (checkDicSize == 0)\r
- {\r
- if (distance >= processedPos)\r
- {\r
- p->dicPos = dicPos;\r
- return SZ_ERROR_DATA;\r
- }\r
- }\r
- else if (distance >= checkDicSize)\r
+ state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3;\r
+ if (distance >= (checkDicSize == 0 ? processedPos: checkDicSize))\r
{\r
p->dicPos = dicPos;\r
return SZ_ERROR_DATA;\r
}\r
- state = (state < kNumStates + kNumLitStates) ? kNumLitStates : kNumLitStates + 3;\r
}\r
\r
len += kMatchMinLen;\r
\r
return SZ_OK;\r
}\r
+#endif\r
\r
static void MY_FAST_CALL LzmaDec_WriteRem(CLzmaDec *p, SizeT limit)\r
{\r
Byte *dic = p->dic;\r
SizeT dicPos = p->dicPos;\r
SizeT dicBufSize = p->dicBufSize;\r
- unsigned len = p->remainLen;\r
+ unsigned len = (unsigned)p->remainLen;\r
SizeT rep0 = p->reps[0]; /* we use SizeT to avoid the BUG of VC14 for AMD64 */\r
SizeT rem = limit - dicPos;\r
if (rem < len)\r
}\r
}\r
\r
+\r
+#define kRange0 0xFFFFFFFF\r
+#define kBound0 ((kRange0 >> kNumBitModelTotalBits) << (kNumBitModelTotalBits - 1))\r
+#define kBadRepCode (kBound0 + (((kRange0 - kBound0) >> kNumBitModelTotalBits) << (kNumBitModelTotalBits - 1)))\r
+#if kBadRepCode != (0xC0000000 - 0x400)\r
+ #error Stop_Compiling_Bad_LZMA_Check\r
+#endif\r
+\r
static int MY_FAST_CALL LzmaDec_DecodeReal2(CLzmaDec *p, SizeT limit, const Byte *bufLimit)\r
{\r
do\r
UInt32 rem = p->prop.dicSize - p->processedPos;\r
if (limit - p->dicPos > rem)\r
limit2 = p->dicPos + rem;\r
+\r
+ if (p->processedPos == 0)\r
+ if (p->code >= kBadRepCode)\r
+ return SZ_ERROR_DATA;\r
}\r
- \r
- RINOK(LzmaDec_DecodeReal(p, limit2, bufLimit));\r
+\r
+ RINOK(LZMA_DECODE_REAL(p, limit2, bufLimit));\r
\r
if (p->checkDicSize == 0 && p->processedPos >= p->prop.dicSize)\r
p->checkDicSize = p->prop.dicSize;\r
}\r
while (p->dicPos < limit && p->buf < bufLimit && p->remainLen < kMatchSpecLenStart);\r
\r
- if (p->remainLen > kMatchSpecLenStart)\r
- p->remainLen = kMatchSpecLenStart;\r
-\r
return 0;\r
}\r
\r
UInt32 range = p->range;\r
UInt32 code = p->code;\r
const Byte *bufLimit = buf + inSize;\r
- const CLzmaProb *probs = p->probs;\r
- unsigned state = p->state;\r
+ const CLzmaProb *probs = GET_PROBS;\r
+ unsigned state = (unsigned)p->state;\r
ELzmaDummy res;\r
\r
{\r
const CLzmaProb *prob;\r
UInt32 bound;\r
unsigned ttt;\r
- unsigned posState = (p->processedPos) & ((1 << p->prop.pb) - 1);\r
+ unsigned posState = CALC_POS_STATE(p->processedPos, (1 << p->prop.pb) - 1);\r
\r
- prob = probs + IsMatch + (state << kNumPosBitsMax) + posState;\r
+ prob = probs + IsMatch + COMBINED_PS_STATE;\r
IF_BIT_0_CHECK(prob)\r
{\r
UPDATE_0_CHECK\r
{\r
unsigned bit;\r
const CLzmaProb *probLit;\r
- matchByte <<= 1;\r
- bit = (matchByte & offs);\r
- probLit = prob + offs + bit + symbol;\r
- GET_BIT2_CHECK(probLit, symbol, offs &= ~bit, offs &= bit)\r
+ matchByte += matchByte;\r
+ bit = offs;\r
+ offs &= matchByte;\r
+ probLit = prob + (offs + bit + symbol);\r
+ GET_BIT2_CHECK(probLit, symbol, offs ^= bit; , ; )\r
}\r
while (symbol < 0x100);\r
}\r
IF_BIT_0_CHECK(prob)\r
{\r
UPDATE_0_CHECK;\r
- prob = probs + IsRep0Long + (state << kNumPosBitsMax) + posState;\r
+ prob = probs + IsRep0Long + COMBINED_PS_STATE;\r
IF_BIT_0_CHECK(prob)\r
{\r
UPDATE_0_CHECK;\r
IF_BIT_0_CHECK(probLen)\r
{\r
UPDATE_0_CHECK;\r
- probLen = prob + LenLow + (posState << kLenNumLowBits);\r
+ probLen = prob + LenLow + GET_LEN_STATE;\r
offset = 0;\r
limit = 1 << kLenNumLowBits;\r
}\r
IF_BIT_0_CHECK(probLen)\r
{\r
UPDATE_0_CHECK;\r
- probLen = prob + LenMid + (posState << kLenNumMidBits);\r
+ probLen = prob + LenLow + GET_LEN_STATE + (1 << kLenNumLowBits);\r
offset = kLenNumLowSymbols;\r
- limit = 1 << kLenNumMidBits;\r
+ limit = 1 << kLenNumLowBits;\r
}\r
else\r
{\r
UPDATE_1_CHECK;\r
probLen = prob + LenHigh;\r
- offset = kLenNumLowSymbols + kLenNumMidSymbols;\r
+ offset = kLenNumLowSymbols * 2;\r
limit = 1 << kLenNumHighBits;\r
}\r
}\r
{\r
unsigned posSlot;\r
prob = probs + PosSlot +\r
- ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<\r
+ ((len < kNumLenToPosStates - 1 ? len : kNumLenToPosStates - 1) <<\r
kNumPosSlotBits);\r
TREE_DECODE_CHECK(prob, 1 << kNumPosSlotBits, posSlot);\r
if (posSlot >= kStartPosModelIndex)\r
\r
if (posSlot < kEndPosModelIndex)\r
{\r
- prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits) - posSlot - 1;\r
+ prob = probs + SpecPos + ((2 | (posSlot & 1)) << numDirectBits);\r
}\r
else\r
{\r
code -= range & (((code - range) >> 31) - 1);\r
/* if (code >= range) code -= range; */\r
}\r
- while (--numDirectBits != 0);\r
+ while (--numDirectBits);\r
prob = probs + Align;\r
numDirectBits = kNumAlignBits;\r
}\r
{\r
unsigned i = 1;\r
+ unsigned m = 1;\r
do\r
{\r
- GET_BIT_CHECK(prob + i, i);\r
+ REV_BIT_CHECK(prob, i, m);\r
}\r
- while (--numDirectBits != 0);\r
+ while (--numDirectBits);\r
}\r
}\r
}\r
\r
void LzmaDec_InitDicAndState(CLzmaDec *p, Bool initDic, Bool initState)\r
{\r
- p->needFlush = 1;\r
- p->remainLen = 0;\r
+ p->remainLen = kMatchSpecLenStart + 1;\r
p->tempBufSize = 0;\r
\r
if (initDic)\r
{\r
p->processedPos = 0;\r
p->checkDicSize = 0;\r
- p->needInitState = 1;\r
+ p->remainLen = kMatchSpecLenStart + 2;\r
}\r
if (initState)\r
- p->needInitState = 1;\r
+ p->remainLen = kMatchSpecLenStart + 2;\r
}\r
\r
void LzmaDec_Init(CLzmaDec *p)\r
LzmaDec_InitDicAndState(p, True, True);\r
}\r
\r
-static void LzmaDec_InitStateReal(CLzmaDec *p)\r
-{\r
- SizeT numProbs = LzmaProps_GetNumProbs(&p->prop);\r
- SizeT i;\r
- CLzmaProb *probs = p->probs;\r
- for (i = 0; i < numProbs; i++)\r
- probs[i] = kBitModelTotal >> 1;\r
- p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1;\r
- p->state = 0;\r
- p->needInitState = 0;\r
-}\r
\r
SRes LzmaDec_DecodeToDic(CLzmaDec *p, SizeT dicLimit, const Byte *src, SizeT *srcLen,\r
ELzmaFinishMode finishMode, ELzmaStatus *status)\r
{\r
SizeT inSize = *srcLen;\r
(*srcLen) = 0;\r
- LzmaDec_WriteRem(p, dicLimit);\r
\r
*status = LZMA_STATUS_NOT_SPECIFIED;\r
\r
- while (p->remainLen != kMatchSpecLenStart)\r
+ if (p->remainLen > kMatchSpecLenStart)\r
{\r
- int checkEndMarkNow;\r
+ for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--)\r
+ p->tempBuf[p->tempBufSize++] = *src++;\r
+ if (p->tempBufSize != 0 && p->tempBuf[0] != 0)\r
+ return SZ_ERROR_DATA;\r
+ if (p->tempBufSize < RC_INIT_SIZE)\r
+ {\r
+ *status = LZMA_STATUS_NEEDS_MORE_INPUT;\r
+ return SZ_OK;\r
+ }\r
+ p->code =\r
+ ((UInt32)p->tempBuf[1] << 24)\r
+ | ((UInt32)p->tempBuf[2] << 16)\r
+ | ((UInt32)p->tempBuf[3] << 8)\r
+ | ((UInt32)p->tempBuf[4]);\r
+ p->range = 0xFFFFFFFF;\r
+ p->tempBufSize = 0;\r
+\r
+ if (p->remainLen > kMatchSpecLenStart + 1)\r
+ {\r
+ SizeT numProbs = LzmaProps_GetNumProbs(&p->prop);\r
+ SizeT i;\r
+ CLzmaProb *probs = p->probs;\r
+ for (i = 0; i < numProbs; i++)\r
+ probs[i] = kBitModelTotal >> 1;\r
+ p->reps[0] = p->reps[1] = p->reps[2] = p->reps[3] = 1;\r
+ p->state = 0;\r
+ }\r
\r
- if (p->needFlush)\r
- {\r
- for (; inSize > 0 && p->tempBufSize < RC_INIT_SIZE; (*srcLen)++, inSize--)\r
- p->tempBuf[p->tempBufSize++] = *src++;\r
- if (p->tempBufSize < RC_INIT_SIZE)\r
- {\r
- *status = LZMA_STATUS_NEEDS_MORE_INPUT;\r
- return SZ_OK;\r
- }\r
- if (p->tempBuf[0] != 0)\r
- return SZ_ERROR_DATA;\r
- p->code =\r
- ((UInt32)p->tempBuf[1] << 24)\r
- | ((UInt32)p->tempBuf[2] << 16)\r
- | ((UInt32)p->tempBuf[3] << 8)\r
- | ((UInt32)p->tempBuf[4]);\r
- p->range = 0xFFFFFFFF;\r
- p->needFlush = 0;\r
- p->tempBufSize = 0;\r
- }\r
+ p->remainLen = 0;\r
+ }\r
+\r
+ LzmaDec_WriteRem(p, dicLimit);\r
+\r
+ while (p->remainLen != kMatchSpecLenStart)\r
+ {\r
+ int checkEndMarkNow = 0;\r
\r
- checkEndMarkNow = 0;\r
if (p->dicPos >= dicLimit)\r
{\r
if (p->remainLen == 0 && p->code == 0)\r
checkEndMarkNow = 1;\r
}\r
\r
- if (p->needInitState)\r
- LzmaDec_InitStateReal(p);\r
- \r
if (p->tempBufSize == 0)\r
{\r
SizeT processed;\r
p->tempBufSize = 0;\r
}\r
}\r
- if (p->code == 0)\r
- *status = LZMA_STATUS_FINISHED_WITH_MARK;\r
- return (p->code == 0) ? SZ_OK : SZ_ERROR_DATA;\r
+ \r
+ if (p->code != 0)\r
+ return SZ_ERROR_DATA;\r
+ *status = LZMA_STATUS_FINISHED_WITH_MARK;\r
+ return SZ_OK;\r
}\r
\r
+\r
SRes LzmaDec_DecodeToBuf(CLzmaDec *p, Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen, ELzmaFinishMode finishMode, ELzmaStatus *status)\r
{\r
SizeT outSize = *destLen;\r
}\r
}\r
\r
-void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc)\r
+void LzmaDec_FreeProbs(CLzmaDec *p, ISzAllocPtr alloc)\r
{\r
- alloc->Free(alloc, p->probs);\r
+ ISzAlloc_Free(alloc, p->probs);\r
p->probs = NULL;\r
}\r
\r
-static void LzmaDec_FreeDict(CLzmaDec *p, ISzAlloc *alloc)\r
+static void LzmaDec_FreeDict(CLzmaDec *p, ISzAllocPtr alloc)\r
{\r
- alloc->Free(alloc, p->dic);\r
+ ISzAlloc_Free(alloc, p->dic);\r
p->dic = NULL;\r
}\r
\r
-void LzmaDec_Free(CLzmaDec *p, ISzAlloc *alloc)\r
+void LzmaDec_Free(CLzmaDec *p, ISzAllocPtr alloc)\r
{\r
LzmaDec_FreeProbs(p, alloc);\r
LzmaDec_FreeDict(p, alloc);\r
if (d >= (9 * 5 * 5))\r
return SZ_ERROR_UNSUPPORTED;\r
\r
- p->lc = d % 9;\r
+ p->lc = (Byte)(d % 9);\r
d /= 9;\r
- p->pb = d / 5;\r
- p->lp = d % 5;\r
+ p->pb = (Byte)(d / 5);\r
+ p->lp = (Byte)(d % 5);\r
\r
return SZ_OK;\r
}\r
\r
-static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAlloc *alloc)\r
+static SRes LzmaDec_AllocateProbs2(CLzmaDec *p, const CLzmaProps *propNew, ISzAllocPtr alloc)\r
{\r
UInt32 numProbs = LzmaProps_GetNumProbs(propNew);\r
if (!p->probs || numProbs != p->numProbs)\r
{\r
LzmaDec_FreeProbs(p, alloc);\r
- p->probs = (CLzmaProb *)alloc->Alloc(alloc, numProbs * sizeof(CLzmaProb));\r
- p->numProbs = numProbs;\r
+ p->probs = (CLzmaProb *)ISzAlloc_Alloc(alloc, numProbs * sizeof(CLzmaProb));\r
if (!p->probs)\r
return SZ_ERROR_MEM;\r
+ p->probs_1664 = p->probs + 1664;\r
+ p->numProbs = numProbs;\r
}\r
return SZ_OK;\r
}\r
\r
-SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)\r
+SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc)\r
{\r
CLzmaProps propNew;\r
RINOK(LzmaProps_Decode(&propNew, props, propsSize));\r
return SZ_OK;\r
}\r
\r
-SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc)\r
+SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc)\r
{\r
CLzmaProps propNew;\r
SizeT dicBufSize;\r
if (!p->dic || dicBufSize != p->dicBufSize)\r
{\r
LzmaDec_FreeDict(p, alloc);\r
- p->dic = (Byte *)alloc->Alloc(alloc, dicBufSize);\r
+ p->dic = (Byte *)ISzAlloc_Alloc(alloc, dicBufSize);\r
if (!p->dic)\r
{\r
LzmaDec_FreeProbs(p, alloc);\r
\r
SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,\r
const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,\r
- ELzmaStatus *status, ISzAlloc *alloc)\r
+ ELzmaStatus *status, ISzAllocPtr alloc)\r
{\r
CLzmaDec p;\r
SRes res;\r
/* LzmaDec.h -- LZMA Decoder\r
-2013-01-18 : Igor Pavlov : Public domain */\r
+2018-04-21 : Igor Pavlov : Public domain */\r
\r
#ifndef __LZMA_DEC_H\r
#define __LZMA_DEC_H\r
/* _LZMA_PROB32 can increase the speed on some CPUs,\r
but memory usage for CLzmaDec::probs will be doubled in that case */\r
\r
+typedef\r
#ifdef _LZMA_PROB32\r
-#define CLzmaProb UInt32\r
+ UInt32\r
#else\r
-#define CLzmaProb UInt16\r
+ UInt16\r
#endif\r
+ CLzmaProb;\r
\r
\r
/* ---------- LZMA Properties ---------- */\r
\r
typedef struct _CLzmaProps\r
{\r
- unsigned lc, lp, pb;\r
+ Byte lc;\r
+ Byte lp;\r
+ Byte pb;\r
+ Byte _pad_;\r
UInt32 dicSize;\r
} CLzmaProps;\r
\r
\r
typedef struct\r
{\r
+ /* Don't change this structure. ASM code can use it. */\r
CLzmaProps prop;\r
CLzmaProb *probs;\r
+ CLzmaProb *probs_1664;\r
Byte *dic;\r
- const Byte *buf;\r
- UInt32 range, code;\r
- SizeT dicPos;\r
SizeT dicBufSize;\r
+ SizeT dicPos;\r
+ const Byte *buf;\r
+ UInt32 range;\r
+ UInt32 code;\r
UInt32 processedPos;\r
UInt32 checkDicSize;\r
- unsigned state;\r
UInt32 reps[4];\r
- unsigned remainLen;\r
- int needFlush;\r
- int needInitState;\r
+ UInt32 state;\r
+ UInt32 remainLen;\r
+\r
UInt32 numProbs;\r
unsigned tempBufSize;\r
Byte tempBuf[LZMA_REQUIRED_INPUT_MAX];\r
} CLzmaDec;\r
\r
-#define LzmaDec_Construct(p) { (p)->dic = 0; (p)->probs = 0; }\r
+#define LzmaDec_Construct(p) { (p)->dic = NULL; (p)->probs = NULL; }\r
\r
void LzmaDec_Init(CLzmaDec *p);\r
\r
/* There are two types of LZMA streams:\r
- 0) Stream with end mark. That end mark adds about 6 bytes to compressed size.\r
- 1) Stream without end mark. You must know exact uncompressed size to decompress such stream. */\r
+ - Stream with end mark. That end mark adds about 6 bytes to compressed size.\r
+ - Stream without end mark. You must know exact uncompressed size to decompress such stream. */\r
\r
typedef enum\r
{\r
SZ_ERROR_UNSUPPORTED - Unsupported properties\r
*/\r
\r
-SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAlloc *alloc);\r
-void LzmaDec_FreeProbs(CLzmaDec *p, ISzAlloc *alloc);\r
+SRes LzmaDec_AllocateProbs(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc);\r
+void LzmaDec_FreeProbs(CLzmaDec *p, ISzAllocPtr alloc);\r
\r
-SRes LzmaDec_Allocate(CLzmaDec *state, const Byte *prop, unsigned propsSize, ISzAlloc *alloc);\r
-void LzmaDec_Free(CLzmaDec *state, ISzAlloc *alloc);\r
+SRes LzmaDec_Allocate(CLzmaDec *p, const Byte *props, unsigned propsSize, ISzAllocPtr alloc);\r
+void LzmaDec_Free(CLzmaDec *p, ISzAllocPtr alloc);\r
\r
/* ---------- Dictionary Interface ---------- */\r
\r
You must work with CLzmaDec variables directly in this interface.\r
\r
STEPS:\r
- LzmaDec_Constr()\r
+ LzmaDec_Construct()\r
LzmaDec_Allocate()\r
for (each new stream)\r
{\r
\r
SRes LzmaDecode(Byte *dest, SizeT *destLen, const Byte *src, SizeT *srcLen,\r
const Byte *propData, unsigned propSize, ELzmaFinishMode finishMode,\r
- ELzmaStatus *status, ISzAlloc *alloc);\r
+ ELzmaStatus *status, ISzAllocPtr alloc);\r
\r
EXTERN_C_END\r
\r
HISTORY of the LZMA SDK\r
-----------------------\r
\r
+18.05 2018-04-30\r
+-------------------------\r
+- The speed for LZMA/LZMA2 compressing was increased \r
+ by 8% for fastest/fast compression levels and \r
+ by 3% for normal/maximum compression levels.\r
+- Previous versions of 7-Zip could work incorrectly in "Large memory pages" mode in\r
+ Windows 10 because of some BUG with "Large Pages" in Windows 10. \r
+ Now 7-Zip doesn't use "Large Pages" on Windows 10 up to revision 1709 (16299).\r
+- The BUG was fixed in Lzma2Enc.c\r
+ Lzma2Enc_Encode2() function worked incorretly,\r
+ if (inStream == NULL) and the number of block threads is more than 1.\r
+\r
+\r
+18.03 beta 2018-03-04\r
+-------------------------\r
+- Asm\x86\LzmaDecOpt.asm: new optimized LZMA decoder written in asm \r
+ for x64 with about 30% higher speed than main version of LZMA decoder written in C.\r
+- The speed for single-thread LZMA/LZMA2 decoder written in C was increased by 3%.\r
+- 7-Zip now can use multi-threading for 7z/LZMA2 decoding,\r
+ if there are multiple independent data chunks in LZMA2 stream.\r
+- 7-Zip now can use multi-threading for xz decoding,\r
+ if there are multiple blocks in xz stream.\r
+\r
+\r
+18.01 2019-01-28\r
+-------------------------\r
+- The BUG in 17.01 - 18.00 beta was fixed:\r
+ XzDec.c : random block unpacking and XzUnpacker_IsBlockFinished()\r
+ didn't work correctly for xz archives without checksum (CRC).\r
+\r
+\r
+18.00 beta 2019-01-10\r
+-------------------------\r
+- The BUG in xz encoder was fixed:\r
+ There was memory leak of 16 KB for each file compressed with \r
+ xz compression method, if additional filter was used.\r
+\r
+\r
+17.01 beta 2017-08-28\r
+-------------------------\r
+- Minor speed optimization for LZMA2 (xz and 7z) multi-threading compression.\r
+ 7-Zip now uses additional memory buffers for multi-block LZMA2 compression.\r
+ CPU utilization was slightly improved.\r
+- 7-zip now creates multi-block xz archives by default. Block size can be \r
+ specified with -ms[Size]{m|g} switch.\r
+- xz decoder now can unpack random block from multi-block xz archives.\r
+- 7-Zip command line: @listfile now doesn't work after -- switch.\r
+ Use -i@listfile before -- switch instead.\r
+- The BUGs were fixed:\r
+ 7-Zip 17.00 beta crashed for commands that write anti-item to 7z archive.\r
+\r
+\r
+17.00 beta 2017-04-29\r
+-------------------------\r
+- NewHandler.h / NewHandler.cpp: \r
+ now it redefines operator new() only for old MSVC compilers (_MSC_VER < 1900).\r
+- C/7zTypes.h : the names of variables in interface structures were changed (vt).\r
+- Some bugs were fixed. 7-Zip could crash in some cases.\r
+- Some internal changes in code.\r
+\r
+\r
16.04 2016-10-04\r
-------------------------\r
- The bug was fixed in DllSecur.c.\r
\r
4.57 2007-12-12\r
-------------------------\r
-- Speed optimizations in C++ LZMA Decoder. \r
+- Speed optimizations in ?++ LZMA Decoder. \r
- Small changes for more compatibility with some C/C++ compilers.\r
\r
\r
-LZMA SDK 16.04\r
+LZMA SDK 18.05\r
--------------\r
\r
LZMA SDK provides the documentation, samples, header files,\r