]>
git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/lz4.c
2 * LZ4 - Fast LZ compression algorithm
4 * Copyright (C) 2011-2013, Yann Collet.
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You can contact the author at :
31 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
32 * - LZ4 source repository : http://code.google.com/p/lz4/
35 #include <sys/zfs_context.h>
37 static int real_LZ4_compress(const char *source
, char *dest
, int isize
,
39 static int LZ4_uncompress_unknownOutputSize(const char *source
, char *dest
,
40 int isize
, int maxOutputSize
);
41 static int LZ4_compressCtx(void *ctx
, const char *source
, char *dest
,
42 int isize
, int osize
);
43 static int LZ4_compress64kCtx(void *ctx
, const char *source
, char *dest
,
44 int isize
, int osize
);
46 static kmem_cache_t
*lz4_cache
;
50 lz4_compress_zfs(void *s_start
, void *d_start
, size_t s_len
,
56 ASSERT(d_len
>= sizeof (bufsiz
));
58 bufsiz
= real_LZ4_compress(s_start
, &dest
[sizeof (bufsiz
)], s_len
,
59 d_len
- sizeof (bufsiz
));
61 /* Signal an error if the compression routine returned zero. */
66 * Encode the compresed buffer size at the start. We'll need this in
67 * decompression to counter the effects of padding which might be
68 * added to the compressed buffer and which, if unhandled, would
69 * confuse the hell out of our decompression function.
71 *(uint32_t *)dest
= BE_32(bufsiz
);
73 return (bufsiz
+ sizeof (bufsiz
));
78 lz4_decompress_zfs(void *s_start
, void *d_start
, size_t s_len
,
81 const char *src
= s_start
;
82 uint32_t bufsiz
= BE_IN32(src
);
84 /* invalid compressed buffer size encoded at start */
85 if (bufsiz
+ sizeof (bufsiz
) > s_len
)
89 * Returns 0 on success (decompression function returned non-negative)
90 * and non-zero on failure (decompression function returned negative.
92 return (LZ4_uncompress_unknownOutputSize(&src
[sizeof (bufsiz
)],
93 d_start
, bufsiz
, d_len
) < 0);
97 * LZ4 API Description:
100 * real_LZ4_compress() :
101 * isize : is the input size. Max supported value is ~1.9GB
102 * return : the number of bytes written in buffer dest
103 * or 0 if the compression fails (if LZ4_COMPRESSMIN is set).
104 * note : destination buffer must be already allocated.
105 * destination buffer must be sized to handle worst cases
106 * situations (input data not compressible) worst case size
107 * evaluation is provided by function LZ4_compressBound().
109 * real_LZ4_uncompress() :
110 * osize : is the output size, therefore the original size
111 * return : the number of bytes read in the source buffer.
112 * If the source stream is malformed, the function will stop
113 * decoding and return a negative result, indicating the byte
114 * position of the faulty instruction. This function never
115 * writes beyond dest + osize, and is therefore protected
116 * against malicious data packets.
117 * note : destination buffer must be already allocated
121 * LZ4_compressBound() :
122 * Provides the maximum size that LZ4 may output in a "worst case"
123 * scenario (input data not compressible) primarily useful for memory
124 * allocation of output buffer.
126 * isize : is the input size. Max supported value is ~1.9GB
127 * return : maximum output size in a "worst case" scenario
128 * note : this function is limited by "int" range (2^31-1)
130 * LZ4_uncompress_unknownOutputSize() :
131 * isize : is the input size, therefore the compressed size
132 * maxOutputSize : is the size of the destination buffer (which must be
134 * return : the number of bytes decoded in the destination buffer
135 * (necessarily <= maxOutputSize). If the source stream is
136 * malformed, the function will stop decoding and return a
137 * negative result, indicating the byte position of the faulty
138 * instruction. This function never writes beyond dest +
139 * maxOutputSize, and is therefore protected against malicious
141 * note : Destination buffer must be already allocated.
142 * This version is slightly slower than real_LZ4_uncompress()
144 * LZ4_compressCtx() :
145 * This function explicitly handles the CTX memory structure.
147 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
148 * by the caller (either on the stack or using kmem_cache_alloc). Passing
151 * LZ4_compress64kCtx() :
152 * Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
153 * isize *Must* be <64KB, otherwise the output will be corrupted.
155 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
156 * by the caller (either on the stack or using kmem_cache_alloc). Passing
165 * COMPRESSIONLEVEL: Increasing this value improves compression ratio
166 * Lowering this value reduces memory usage. Reduced memory usage
167 * typically improves speed, due to cache effect (ex: L1 32KB for Intel,
168 * L1 64KB for AMD). Memory usage formula : N->2^(N+2) Bytes
169 * (examples : 12 -> 16KB ; 17 -> 512KB)
171 #define COMPRESSIONLEVEL 12
174 * NOTCOMPRESSIBLE_CONFIRMATION: Decreasing this value will make the
175 * algorithm skip faster data segments considered "incompressible".
176 * This may decrease compression ratio dramatically, but will be
177 * faster on incompressible data. Increasing this value will make
178 * the algorithm search more before declaring a segment "incompressible".
179 * This could improve compression a bit, but will be slower on
180 * incompressible data. The default value (6) is recommended.
182 #define NOTCOMPRESSIBLE_CONFIRMATION 6
185 * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE: This will provide a boost to
186 * performance for big endian cpu, but the resulting compressed stream
187 * will be incompatible with little-endian CPU. You can set this option
188 * to 1 in situations where data will stay within closed environment.
189 * This option is useless on Little_Endian CPU (such as x86).
191 /* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
194 * CPU Feature Detection
197 /* 32 or 64 bits ? */
205 * Little Endian or Big Endian?
206 * Note: overwrite the below #define if you know your architecture endianess.
208 #if defined(_BIG_ENDIAN)
209 #define LZ4_BIG_ENDIAN 1
212 * Little Endian assumed. PDP Endian and other very rare endian format
215 #undef LZ4_BIG_ENDIAN
219 * Unaligned memory access is automatically enabled for "common" CPU,
220 * such as x86. For others CPU, the compiler will be more cautious, and
221 * insert extra code to ensure aligned access is respected. If you know
222 * your target CPU supports unaligned memory access, you may want to
223 * force this option manually to improve performance
225 #if defined(__ARM_FEATURE_UNALIGNED)
226 #define LZ4_FORCE_UNALIGNED_ACCESS 1
230 * Illumos : we can't use GCC's __builtin_ctz family of builtins in the
232 * Linux : we can use GCC's __builtin_ctz family of builtins in the
235 #undef LZ4_FORCE_SW_BITCOUNT
237 #define LZ4_FORCE_SW_BITCOUNT
243 /* Disable restrict */
247 * Linux : GCC_VERSION is defined as of 3.9-rc1, so undefine it.
248 * torvalds/linux@3f3f8d2f48acfd8ed3b8e6b7377935da57b27b16
254 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
256 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
257 #define expect(expr, value) (__builtin_expect((expr), (value)))
259 #define expect(expr, value) (expr)
263 #define likely(expr) expect((expr) != 0, 1)
267 #define unlikely(expr) expect((expr) != 0, 0)
270 #define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \
271 (((x) & 0xffu) << 8)))
280 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
284 typedef struct _U16_S
{
287 typedef struct _U32_S
{
290 typedef struct _U64_S
{
294 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
298 #define A64(x) (((U64_S *)(x))->v)
299 #define A32(x) (((U32_S *)(x))->v)
300 #define A16(x) (((U16_S *)(x))->v)
307 #define HASH_LOG COMPRESSIONLEVEL
308 #define HASHTABLESIZE (1 << HASH_LOG)
309 #define HASH_MASK (HASHTABLESIZE - 1)
311 #define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION > 2 ? \
312 NOTCOMPRESSIBLE_CONFIRMATION : 2)
315 #define LASTLITERALS 5
316 #define MFLIMIT (COPYLENGTH + MINMATCH)
317 #define MINLENGTH (MFLIMIT + 1)
320 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
323 #define ML_MASK ((1U<<ML_BITS)-1)
324 #define RUN_BITS (8-ML_BITS)
325 #define RUN_MASK ((1U<<RUN_BITS)-1)
329 * Architecture-specific macros
335 #define LZ4_COPYSTEP(s, d) A64(d) = A64(s); d += 8; s += 8;
336 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
337 #define LZ4_SECURECOPY(s, d, e) if (d < e) LZ4_WILDCOPY(s, d, e)
339 #define INITBASE(base) const BYTE* const base = ip
340 #else /* !LZ4_ARCH64 */
344 #define LZ4_COPYSTEP(s, d) A32(d) = A32(s); d += 4; s += 4;
345 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d); LZ4_COPYSTEP(s, d);
346 #define LZ4_SECURECOPY LZ4_WILDCOPY
347 #define HTYPE const BYTE *
348 #define INITBASE(base) const int base = 0
349 #endif /* !LZ4_ARCH64 */
351 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
352 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
353 { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
354 #define LZ4_WRITE_LITTLEENDIAN_16(p, i) \
355 { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p += 2; }
357 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) { d = (s) - A16(p); }
358 #define LZ4_WRITE_LITTLEENDIAN_16(p, v) { A16(p) = v; p += 2; }
362 /* Local structures */
364 HTYPE hashTable
[HASHTABLESIZE
];
369 #define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH * 8) - \
371 #define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
372 #define LZ4_WILDCOPY(s, d, e) do { LZ4_COPYPACKET(s, d) } while (d < e);
373 #define LZ4_BLINDCOPY(s, d, l) { BYTE* e = (d) + l; LZ4_WILDCOPY(s, d, e); \
377 /* Private functions */
381 LZ4_NbCommonBytes(register U64 val
)
383 #if defined(LZ4_BIG_ENDIAN)
384 #if defined(__GNUC__) && (GCC_VERSION >= 304) && \
385 !defined(LZ4_FORCE_SW_BITCOUNT)
386 return (__builtin_clzll(val
) >> 3);
405 #if defined(__GNUC__) && (GCC_VERSION >= 304) && \
406 !defined(LZ4_FORCE_SW_BITCOUNT)
407 return (__builtin_ctzll(val
) >> 3);
409 static const int DeBruijnBytePos
[64] =
410 { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5,
411 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5,
412 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4,
413 4, 5, 7, 2, 6, 5, 7, 6, 7, 7
415 return DeBruijnBytePos
[((U64
) ((val
& -val
) * 0x0218A392CDABBD3F)) >>
424 LZ4_NbCommonBytes(register U32 val
)
426 #if defined(LZ4_BIG_ENDIAN)
427 #if defined(__GNUC__) && (GCC_VERSION >= 304) && \
428 !defined(LZ4_FORCE_SW_BITCOUNT)
429 return (__builtin_clz(val
) >> 3);
443 #if defined(__GNUC__) && (GCC_VERSION >= 304) && \
444 !defined(LZ4_FORCE_SW_BITCOUNT)
445 return (__builtin_ctz(val
) >> 3);
447 static const int DeBruijnBytePos
[32] = {
448 0, 0, 3, 0, 3, 1, 3, 0,
449 3, 2, 2, 1, 3, 2, 0, 1,
450 3, 3, 1, 2, 2, 2, 2, 0,
451 3, 1, 2, 0, 1, 0, 1, 1
453 return DeBruijnBytePos
[((U32
) ((val
& -(S32
) val
) * 0x077CB531U
)) >>
461 /* Compression functions */
465 LZ4_compressCtx(void *ctx
, const char *source
, char *dest
, int isize
,
468 struct refTables
*srt
= (struct refTables
*)ctx
;
469 HTYPE
*HashTable
= (HTYPE
*) (srt
->hashTable
);
471 const BYTE
*ip
= (BYTE
*) source
;
473 const BYTE
*anchor
= ip
;
474 const BYTE
*const iend
= ip
+ isize
;
475 const BYTE
*const oend
= (BYTE
*) dest
+ osize
;
476 const BYTE
*const mflimit
= iend
- MFLIMIT
;
477 #define matchlimit (iend - LASTLITERALS)
479 BYTE
*op
= (BYTE
*) dest
;
482 const int skipStrength
= SKIPSTRENGTH
;
487 if (isize
< MINLENGTH
)
491 HashTable
[LZ4_HASH_VALUE(ip
)] = ip
- base
;
493 forwardH
= LZ4_HASH_VALUE(ip
);
497 int findMatchAttempts
= (1U << skipStrength
) + 3;
498 const BYTE
*forwardIp
= ip
;
505 int step
= findMatchAttempts
++ >> skipStrength
;
507 forwardIp
= ip
+ step
;
509 if (unlikely(forwardIp
> mflimit
)) {
513 forwardH
= LZ4_HASH_VALUE(forwardIp
);
514 ref
= base
+ HashTable
[h
];
515 HashTable
[h
] = ip
- base
;
517 } while ((ref
< ip
- MAX_DISTANCE
) || (A32(ref
) != A32(ip
)));
520 while ((ip
> anchor
) && (ref
> (BYTE
*) source
) &&
521 unlikely(ip
[-1] == ref
[-1])) {
526 /* Encode Literal length */
527 length
= ip
- anchor
;
530 /* Check output limit */
531 if (unlikely(op
+ length
+ (2 + 1 + LASTLITERALS
) +
532 (length
>> 8) > oend
))
535 if (length
>= (int)RUN_MASK
) {
536 *token
= (RUN_MASK
<< ML_BITS
);
537 len
= length
- RUN_MASK
;
538 for (; len
> 254; len
-= 255)
542 *token
= (length
<< ML_BITS
);
545 LZ4_BLINDCOPY(anchor
, op
, length
);
549 LZ4_WRITE_LITTLEENDIAN_16(op
, ip
- ref
);
553 ref
+= MINMATCH
; /* MinMatch verified */
555 while (likely(ip
< matchlimit
- (STEPSIZE
- 1))) {
556 UARCH diff
= AARCH(ref
) ^ AARCH(ip
);
562 ip
+= LZ4_NbCommonBytes(diff
);
566 if ((ip
< (matchlimit
- 3)) && (A32(ref
) == A32(ip
))) {
571 if ((ip
< (matchlimit
- 1)) && (A16(ref
) == A16(ip
))) {
575 if ((ip
< matchlimit
) && (*ref
== *ip
))
579 /* Encode MatchLength */
581 /* Check output limit */
582 if (unlikely(op
+ (1 + LASTLITERALS
) + (len
>> 8) > oend
))
584 if (len
>= (int)ML_MASK
) {
587 for (; len
> 509; len
-= 510) {
599 /* Test end of chunk */
605 HashTable
[LZ4_HASH_VALUE(ip
- 2)] = ip
- 2 - base
;
607 /* Test next position */
608 ref
= base
+ HashTable
[LZ4_HASH_VALUE(ip
)];
609 HashTable
[LZ4_HASH_VALUE(ip
)] = ip
- base
;
610 if ((ref
> ip
- (MAX_DISTANCE
+ 1)) && (A32(ref
) == A32(ip
))) {
615 /* Prepare next loop */
617 forwardH
= LZ4_HASH_VALUE(ip
);
621 /* Encode Last Literals */
623 int lastRun
= iend
- anchor
;
624 if (op
+ lastRun
+ 1 + ((lastRun
+ 255 - RUN_MASK
) / 255) >
627 if (lastRun
>= (int)RUN_MASK
) {
628 *op
++ = (RUN_MASK
<< ML_BITS
);
630 for (; lastRun
> 254; lastRun
-= 255) {
633 *op
++ = (BYTE
)lastRun
;
635 *op
++ = (lastRun
<< ML_BITS
);
636 (void) memcpy(op
, anchor
, iend
- anchor
);
641 return (int)(((char *)op
) - dest
);
646 /* Note : this function is valid only if isize < LZ4_64KLIMIT */
647 #define LZ4_64KLIMIT ((1 << 16) + (MFLIMIT - 1))
648 #define HASHLOG64K (HASH_LOG + 1)
649 #define HASH64KTABLESIZE (1U << HASHLOG64K)
650 #define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8) - \
652 #define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
656 LZ4_compress64kCtx(void *ctx
, const char *source
, char *dest
, int isize
,
659 struct refTables
*srt
= (struct refTables
*)ctx
;
660 U16
*HashTable
= (U16
*) (srt
->hashTable
);
662 const BYTE
*ip
= (BYTE
*) source
;
663 const BYTE
*anchor
= ip
;
664 const BYTE
*const base
= ip
;
665 const BYTE
*const iend
= ip
+ isize
;
666 const BYTE
*const oend
= (BYTE
*) dest
+ osize
;
667 const BYTE
*const mflimit
= iend
- MFLIMIT
;
668 #define matchlimit (iend - LASTLITERALS)
670 BYTE
*op
= (BYTE
*) dest
;
673 const int skipStrength
= SKIPSTRENGTH
;
677 if (isize
< MINLENGTH
)
682 forwardH
= LZ4_HASH64K_VALUE(ip
);
686 int findMatchAttempts
= (1U << skipStrength
) + 3;
687 const BYTE
*forwardIp
= ip
;
694 int step
= findMatchAttempts
++ >> skipStrength
;
696 forwardIp
= ip
+ step
;
698 if (forwardIp
> mflimit
) {
702 forwardH
= LZ4_HASH64K_VALUE(forwardIp
);
703 ref
= base
+ HashTable
[h
];
704 HashTable
[h
] = ip
- base
;
706 } while (A32(ref
) != A32(ip
));
709 while ((ip
> anchor
) && (ref
> (BYTE
*) source
) &&
710 (ip
[-1] == ref
[-1])) {
715 /* Encode Literal length */
716 length
= ip
- anchor
;
719 /* Check output limit */
720 if (unlikely(op
+ length
+ (2 + 1 + LASTLITERALS
) +
721 (length
>> 8) > oend
))
724 if (length
>= (int)RUN_MASK
) {
725 *token
= (RUN_MASK
<< ML_BITS
);
726 len
= length
- RUN_MASK
;
727 for (; len
> 254; len
-= 255)
731 *token
= (length
<< ML_BITS
);
734 LZ4_BLINDCOPY(anchor
, op
, length
);
738 LZ4_WRITE_LITTLEENDIAN_16(op
, ip
- ref
);
742 ref
+= MINMATCH
; /* MinMatch verified */
744 while (ip
< matchlimit
- (STEPSIZE
- 1)) {
745 UARCH diff
= AARCH(ref
) ^ AARCH(ip
);
751 ip
+= LZ4_NbCommonBytes(diff
);
755 if ((ip
< (matchlimit
- 3)) && (A32(ref
) == A32(ip
))) {
760 if ((ip
< (matchlimit
- 1)) && (A16(ref
) == A16(ip
))) {
764 if ((ip
< matchlimit
) && (*ref
== *ip
))
768 /* Encode MatchLength */
770 /* Check output limit */
771 if (unlikely(op
+ (1 + LASTLITERALS
) + (len
>> 8) > oend
))
773 if (len
>= (int)ML_MASK
) {
776 for (; len
> 509; len
-= 510) {
788 /* Test end of chunk */
794 HashTable
[LZ4_HASH64K_VALUE(ip
- 2)] = ip
- 2 - base
;
796 /* Test next position */
797 ref
= base
+ HashTable
[LZ4_HASH64K_VALUE(ip
)];
798 HashTable
[LZ4_HASH64K_VALUE(ip
)] = ip
- base
;
799 if (A32(ref
) == A32(ip
)) {
804 /* Prepare next loop */
806 forwardH
= LZ4_HASH64K_VALUE(ip
);
810 /* Encode Last Literals */
812 int lastRun
= iend
- anchor
;
813 if (op
+ lastRun
+ 1 + ((lastRun
+ 255 - RUN_MASK
) / 255) >
816 if (lastRun
>= (int)RUN_MASK
) {
817 *op
++ = (RUN_MASK
<< ML_BITS
);
819 for (; lastRun
> 254; lastRun
-= 255)
821 *op
++ = (BYTE
)lastRun
;
823 *op
++ = (lastRun
<< ML_BITS
);
824 (void) memcpy(op
, anchor
, iend
- anchor
);
829 return (int)(((char *)op
) - dest
);
833 real_LZ4_compress(const char *source
, char *dest
, int isize
, int osize
)
838 ASSERT(lz4_cache
!= NULL
);
839 ctx
= kmem_cache_alloc(lz4_cache
, KM_PUSHPAGE
);
842 * out of kernel memory, gently fall through - this will disable
843 * compression in zio_compress_data
848 memset(ctx
, 0, sizeof (struct refTables
));
850 if (isize
< LZ4_64KLIMIT
)
851 result
= LZ4_compress64kCtx(ctx
, source
, dest
, isize
, osize
);
853 result
= LZ4_compressCtx(ctx
, source
, dest
, isize
, osize
);
855 kmem_cache_free(lz4_cache
, ctx
);
859 /* Decompression functions */
862 * Note: The decoding functions real_LZ4_uncompress() and
863 * LZ4_uncompress_unknownOutputSize() are safe against "buffer overflow"
864 * attack type. They will never write nor read outside of the provided
865 * output buffers. LZ4_uncompress_unknownOutputSize() also insures that
866 * it will never read outside of the input buffer. A corrupted input
867 * will produce an error result, a negative int, indicating the position
868 * of the error within input stream.
872 LZ4_uncompress_unknownOutputSize(const char *source
, char *dest
, int isize
,
875 /* Local Variables */
876 const BYTE
*restrict ip
= (const BYTE
*) source
;
877 const BYTE
*const iend
= ip
+ isize
;
880 BYTE
*op
= (BYTE
*) dest
;
881 BYTE
*const oend
= op
+ maxOutputSize
;
884 size_t dec32table
[] = {0, 3, 2, 3, 0, 0, 0, 0};
886 size_t dec64table
[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
896 if ((length
= (token
>> ML_BITS
)) == RUN_MASK
) {
898 while ((ip
< iend
) && (s
== 255)) {
905 if ((cpy
> oend
- COPYLENGTH
) ||
906 (ip
+ length
> iend
- COPYLENGTH
)) {
908 /* Error: writes beyond output buffer */
910 if (ip
+ length
!= iend
)
912 * Error: LZ4 format requires to consume all
913 * input at this stage
916 (void) memcpy(op
, ip
, length
);
918 /* Necessarily EOF, due to parsing restrictions */
921 LZ4_WILDCOPY(ip
, op
, cpy
);
926 LZ4_READ_LITTLEENDIAN_16(ref
, cpy
, ip
);
928 if (ref
< (BYTE
* const) dest
)
930 * Error: offset creates reference outside of
935 /* get matchlength */
936 if ((length
= (token
& ML_MASK
)) == ML_MASK
) {
945 /* copy repeated sequence */
946 if (unlikely(op
- ref
< STEPSIZE
)) {
948 size_t dec64
= dec64table
[op
-ref
];
958 ref
-= dec32table
[op
-ref
];
963 LZ4_COPYSTEP(ref
, op
);
965 cpy
= op
+ length
- (STEPSIZE
- 4);
966 if (cpy
> oend
- COPYLENGTH
) {
969 * Error: request to write outside of
973 LZ4_SECURECOPY(ref
, op
, (oend
- COPYLENGTH
));
979 * Check EOF (should never happen, since
980 * last 5 bytes are supposed to be literals)
985 LZ4_SECURECOPY(ref
, op
, cpy
);
986 op
= cpy
; /* correction */
989 /* end of decoding */
990 return (int)(((char *)op
) - dest
);
992 /* write overflow error detected */
994 return (int)(-(((char *)ip
) - source
));
1000 lz4_cache
= kmem_cache_create("lz4_cache",
1001 sizeof (struct refTables
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
1008 kmem_cache_destroy(lz4_cache
);