]>
git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/lz4.c
2 * LZ4 - Fast LZ compression algorithm
4 * Copyright (C) 2011-2013, Yann Collet.
5 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following disclaimer
15 * in the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You can contact the author at :
31 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
32 * - LZ4 source repository : http://code.google.com/p/lz4/
35 #include <sys/zfs_context.h>
37 static int real_LZ4_compress(const char *source
, char *dest
, int isize
,
39 static int LZ4_uncompress_unknownOutputSize(const char *source
, char *dest
,
40 int isize
, int maxOutputSize
);
41 static int LZ4_compressCtx(void *ctx
, const char *source
, char *dest
,
42 int isize
, int osize
);
43 static int LZ4_compress64kCtx(void *ctx
, const char *source
, char *dest
,
44 int isize
, int osize
);
46 static kmem_cache_t
*lz4_cache
;
50 lz4_compress_zfs(void *s_start
, void *d_start
, size_t s_len
,
56 ASSERT(d_len
>= sizeof (bufsiz
));
58 bufsiz
= real_LZ4_compress(s_start
, &dest
[sizeof (bufsiz
)], s_len
,
59 d_len
- sizeof (bufsiz
));
61 /* Signal an error if the compression routine returned zero. */
66 * Encode the compressed buffer size at the start. We'll need this in
67 * decompression to counter the effects of padding which might be
68 * added to the compressed buffer and which, if unhandled, would
69 * confuse the hell out of our decompression function.
71 *(uint32_t *)dest
= BE_32(bufsiz
);
73 return (bufsiz
+ sizeof (bufsiz
));
78 lz4_decompress_zfs(void *s_start
, void *d_start
, size_t s_len
,
81 const char *src
= s_start
;
82 uint32_t bufsiz
= BE_IN32(src
);
84 /* invalid compressed buffer size encoded at start */
85 if (bufsiz
+ sizeof (bufsiz
) > s_len
)
89 * Returns 0 on success (decompression function returned non-negative)
90 * and non-zero on failure (decompression function returned negative).
92 return (LZ4_uncompress_unknownOutputSize(&src
[sizeof (bufsiz
)],
93 d_start
, bufsiz
, d_len
) < 0);
97 * LZ4 API Description:
100 * real_LZ4_compress() :
101 * isize : is the input size. Max supported value is ~1.9GB
102 * return : the number of bytes written in buffer dest
103 * or 0 if the compression fails (if LZ4_COMPRESSMIN is set).
104 * note : destination buffer must be already allocated.
105 * destination buffer must be sized to handle worst cases
106 * situations (input data not compressible) worst case size
107 * evaluation is provided by function LZ4_compressBound().
109 * real_LZ4_uncompress() :
110 * osize : is the output size, therefore the original size
111 * return : the number of bytes read in the source buffer.
112 * If the source stream is malformed, the function will stop
113 * decoding and return a negative result, indicating the byte
114 * position of the faulty instruction. This function never
115 * writes beyond dest + osize, and is therefore protected
116 * against malicious data packets.
117 * note : destination buffer must be already allocated
118 * note : real_LZ4_uncompress() is not used in ZFS so its code
119 * is not present here.
123 * LZ4_compressBound() :
124 * Provides the maximum size that LZ4 may output in a "worst case"
125 * scenario (input data not compressible) primarily useful for memory
126 * allocation of output buffer.
128 * isize : is the input size. Max supported value is ~1.9GB
129 * return : maximum output size in a "worst case" scenario
130 * note : this function is limited by "int" range (2^31-1)
132 * LZ4_uncompress_unknownOutputSize() :
133 * isize : is the input size, therefore the compressed size
134 * maxOutputSize : is the size of the destination buffer (which must be
136 * return : the number of bytes decoded in the destination buffer
137 * (necessarily <= maxOutputSize). If the source stream is
138 * malformed, the function will stop decoding and return a
139 * negative result, indicating the byte position of the faulty
140 * instruction. This function never writes beyond dest +
141 * maxOutputSize, and is therefore protected against malicious
143 * note : Destination buffer must be already allocated.
144 * This version is slightly slower than real_LZ4_uncompress()
146 * LZ4_compressCtx() :
147 * This function explicitly handles the CTX memory structure.
149 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
150 * by the caller (either on the stack or using kmem_cache_alloc). Passing
153 * LZ4_compress64kCtx() :
154 * Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
155 * isize *Must* be <64KB, otherwise the output will be corrupted.
157 * ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
158 * by the caller (either on the stack or using kmem_cache_alloc). Passing
167 * COMPRESSIONLEVEL: Increasing this value improves compression ratio
168 * Lowering this value reduces memory usage. Reduced memory usage
169 * typically improves speed, due to cache effect (ex: L1 32KB for Intel,
170 * L1 64KB for AMD). Memory usage formula : N->2^(N+2) Bytes
171 * (examples : 12 -> 16KB ; 17 -> 512KB)
173 #define COMPRESSIONLEVEL 12
176 * NOTCOMPRESSIBLE_CONFIRMATION: Decreasing this value will make the
177 * algorithm skip faster data segments considered "incompressible".
178 * This may decrease compression ratio dramatically, but will be
179 * faster on incompressible data. Increasing this value will make
180 * the algorithm search more before declaring a segment "incompressible".
181 * This could improve compression a bit, but will be slower on
182 * incompressible data. The default value (6) is recommended.
184 #define NOTCOMPRESSIBLE_CONFIRMATION 6
187 * BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE: This will provide a boost to
188 * performance for big endian cpu, but the resulting compressed stream
189 * will be incompatible with little-endian CPU. You can set this option
190 * to 1 in situations where data will stay within closed environment.
191 * This option is useless on Little_Endian CPU (such as x86).
193 /* #define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1 */
196 * CPU Feature Detection
199 /* 32 or 64 bits ? */
207 * Little Endian or Big Endian?
208 * Note: overwrite the below #define if you know your architecture endianness.
210 #if defined(_BIG_ENDIAN)
211 #define LZ4_BIG_ENDIAN 1
214 * Little Endian assumed. PDP Endian and other very rare endian format
217 #undef LZ4_BIG_ENDIAN
221 * Unaligned memory access is automatically enabled for "common" CPU,
222 * such as x86. For others CPU, the compiler will be more cautious, and
223 * insert extra code to ensure aligned access is respected. If you know
224 * your target CPU supports unaligned memory access, you may want to
225 * force this option manually to improve performance
227 #if defined(__ARM_FEATURE_UNALIGNED)
228 #define LZ4_FORCE_UNALIGNED_ACCESS 1
232 * Illumos : we can't use GCC's __builtin_ctz family of builtins in the
234 * Linux : we can use GCC's __builtin_ctz family of builtins in the
237 #undef LZ4_FORCE_SW_BITCOUNT
239 #define LZ4_FORCE_SW_BITCOUNT
245 /* Disable restrict */
249 * Linux : GCC_VERSION is defined as of 3.9-rc1, so undefine it.
250 * torvalds/linux@3f3f8d2f48acfd8ed3b8e6b7377935da57b27b16
256 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
258 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
259 #define expect(expr, value) (__builtin_expect((expr), (value)))
261 #define expect(expr, value) (expr)
265 #define likely(expr) expect((expr) != 0, 1)
269 #define unlikely(expr) expect((expr) != 0, 0)
272 #define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \
273 (((x) & 0xffu) << 8)))
282 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
286 typedef struct _U16_S
{
289 typedef struct _U32_S
{
292 typedef struct _U64_S
{
296 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
300 #define A64(x) (((U64_S *)(x))->v)
301 #define A32(x) (((U32_S *)(x))->v)
302 #define A16(x) (((U16_S *)(x))->v)
309 #define HASH_LOG COMPRESSIONLEVEL
310 #define HASHTABLESIZE (1 << HASH_LOG)
311 #define HASH_MASK (HASHTABLESIZE - 1)
313 #define SKIPSTRENGTH (NOTCOMPRESSIBLE_CONFIRMATION > 2 ? \
314 NOTCOMPRESSIBLE_CONFIRMATION : 2)
317 #define LASTLITERALS 5
318 #define MFLIMIT (COPYLENGTH + MINMATCH)
319 #define MINLENGTH (MFLIMIT + 1)
322 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
325 #define ML_MASK ((1U<<ML_BITS)-1)
326 #define RUN_BITS (8-ML_BITS)
327 #define RUN_MASK ((1U<<RUN_BITS)-1)
331 * Architecture-specific macros
337 #define LZ4_COPYSTEP(s, d) A64(d) = A64(s); d += 8; s += 8;
338 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
339 #define LZ4_SECURECOPY(s, d, e) if (d < e) LZ4_WILDCOPY(s, d, e)
341 #define INITBASE(base) const BYTE* const base = ip
342 #else /* !LZ4_ARCH64 */
346 #define LZ4_COPYSTEP(s, d) A32(d) = A32(s); d += 4; s += 4;
347 #define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d); LZ4_COPYSTEP(s, d);
348 #define LZ4_SECURECOPY LZ4_WILDCOPY
349 #define HTYPE const BYTE *
350 #define INITBASE(base) const int base = 0
351 #endif /* !LZ4_ARCH64 */
353 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
354 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
355 { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
356 #define LZ4_WRITE_LITTLEENDIAN_16(p, i) \
357 { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p += 2; }
359 #define LZ4_READ_LITTLEENDIAN_16(d, s, p) { d = (s) - A16(p); }
360 #define LZ4_WRITE_LITTLEENDIAN_16(p, v) { A16(p) = v; p += 2; }
364 /* Local structures */
366 HTYPE hashTable
[HASHTABLESIZE
];
371 #define LZ4_HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH * 8) - \
373 #define LZ4_HASH_VALUE(p) LZ4_HASH_FUNCTION(A32(p))
374 #define LZ4_WILDCOPY(s, d, e) do { LZ4_COPYPACKET(s, d) } while (d < e);
375 #define LZ4_BLINDCOPY(s, d, l) { BYTE* e = (d) + l; LZ4_WILDCOPY(s, d, e); \
379 /* Private functions */
383 LZ4_NbCommonBytes(register U64 val
)
385 #if defined(LZ4_BIG_ENDIAN)
386 #if defined(__GNUC__) && (GCC_VERSION >= 304) && \
387 !defined(LZ4_FORCE_SW_BITCOUNT)
388 return (__builtin_clzll(val
) >> 3);
407 #if defined(__GNUC__) && (GCC_VERSION >= 304) && \
408 !defined(LZ4_FORCE_SW_BITCOUNT)
409 return (__builtin_ctzll(val
) >> 3);
411 static const int DeBruijnBytePos
[64] =
412 { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5,
413 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5,
414 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4,
415 4, 5, 7, 2, 6, 5, 7, 6, 7, 7
417 return DeBruijnBytePos
[((U64
) ((val
& -val
) * 0x0218A392CDABBD3F)) >>
426 LZ4_NbCommonBytes(register U32 val
)
428 #if defined(LZ4_BIG_ENDIAN)
429 #if defined(__GNUC__) && (GCC_VERSION >= 304) && \
430 !defined(LZ4_FORCE_SW_BITCOUNT)
431 return (__builtin_clz(val
) >> 3);
445 #if defined(__GNUC__) && (GCC_VERSION >= 304) && \
446 !defined(LZ4_FORCE_SW_BITCOUNT)
447 return (__builtin_ctz(val
) >> 3);
449 static const int DeBruijnBytePos
[32] = {
450 0, 0, 3, 0, 3, 1, 3, 0,
451 3, 2, 2, 1, 3, 2, 0, 1,
452 3, 3, 1, 2, 2, 2, 2, 0,
453 3, 1, 2, 0, 1, 0, 1, 1
455 return DeBruijnBytePos
[((U32
) ((val
& -(S32
) val
) * 0x077CB531U
)) >>
463 /* Compression functions */
467 LZ4_compressCtx(void *ctx
, const char *source
, char *dest
, int isize
,
470 struct refTables
*srt
= (struct refTables
*)ctx
;
471 HTYPE
*HashTable
= (HTYPE
*) (srt
->hashTable
);
473 const BYTE
*ip
= (BYTE
*) source
;
475 const BYTE
*anchor
= ip
;
476 const BYTE
*const iend
= ip
+ isize
;
477 const BYTE
*const oend
= (BYTE
*) dest
+ osize
;
478 const BYTE
*const mflimit
= iend
- MFLIMIT
;
479 #define matchlimit (iend - LASTLITERALS)
481 BYTE
*op
= (BYTE
*) dest
;
484 const int skipStrength
= SKIPSTRENGTH
;
489 if (isize
< MINLENGTH
)
493 HashTable
[LZ4_HASH_VALUE(ip
)] = ip
- base
;
495 forwardH
= LZ4_HASH_VALUE(ip
);
499 int findMatchAttempts
= (1U << skipStrength
) + 3;
500 const BYTE
*forwardIp
= ip
;
507 int step
= findMatchAttempts
++ >> skipStrength
;
509 forwardIp
= ip
+ step
;
511 if (unlikely(forwardIp
> mflimit
)) {
515 forwardH
= LZ4_HASH_VALUE(forwardIp
);
516 ref
= base
+ HashTable
[h
];
517 HashTable
[h
] = ip
- base
;
519 } while ((ref
< ip
- MAX_DISTANCE
) || (A32(ref
) != A32(ip
)));
522 while ((ip
> anchor
) && (ref
> (BYTE
*) source
) &&
523 unlikely(ip
[-1] == ref
[-1])) {
528 /* Encode Literal length */
529 length
= ip
- anchor
;
532 /* Check output limit */
533 if (unlikely(op
+ length
+ (2 + 1 + LASTLITERALS
) +
534 (length
>> 8) > oend
))
537 if (length
>= (int)RUN_MASK
) {
538 *token
= (RUN_MASK
<< ML_BITS
);
539 len
= length
- RUN_MASK
;
540 for (; len
> 254; len
-= 255)
544 *token
= (length
<< ML_BITS
);
547 LZ4_BLINDCOPY(anchor
, op
, length
);
551 LZ4_WRITE_LITTLEENDIAN_16(op
, ip
- ref
);
555 ref
+= MINMATCH
; /* MinMatch verified */
557 while (likely(ip
< matchlimit
- (STEPSIZE
- 1))) {
558 UARCH diff
= AARCH(ref
) ^ AARCH(ip
);
564 ip
+= LZ4_NbCommonBytes(diff
);
568 if ((ip
< (matchlimit
- 3)) && (A32(ref
) == A32(ip
))) {
573 if ((ip
< (matchlimit
- 1)) && (A16(ref
) == A16(ip
))) {
577 if ((ip
< matchlimit
) && (*ref
== *ip
))
581 /* Encode MatchLength */
583 /* Check output limit */
584 if (unlikely(op
+ (1 + LASTLITERALS
) + (len
>> 8) > oend
))
586 if (len
>= (int)ML_MASK
) {
589 for (; len
> 509; len
-= 510) {
601 /* Test end of chunk */
607 HashTable
[LZ4_HASH_VALUE(ip
- 2)] = ip
- 2 - base
;
609 /* Test next position */
610 ref
= base
+ HashTable
[LZ4_HASH_VALUE(ip
)];
611 HashTable
[LZ4_HASH_VALUE(ip
)] = ip
- base
;
612 if ((ref
> ip
- (MAX_DISTANCE
+ 1)) && (A32(ref
) == A32(ip
))) {
617 /* Prepare next loop */
619 forwardH
= LZ4_HASH_VALUE(ip
);
623 /* Encode Last Literals */
625 int lastRun
= iend
- anchor
;
626 if (op
+ lastRun
+ 1 + ((lastRun
+ 255 - RUN_MASK
) / 255) >
629 if (lastRun
>= (int)RUN_MASK
) {
630 *op
++ = (RUN_MASK
<< ML_BITS
);
632 for (; lastRun
> 254; lastRun
-= 255) {
635 *op
++ = (BYTE
)lastRun
;
637 *op
++ = (lastRun
<< ML_BITS
);
638 (void) memcpy(op
, anchor
, iend
- anchor
);
643 return (int)(((char *)op
) - dest
);
648 /* Note : this function is valid only if isize < LZ4_64KLIMIT */
649 #define LZ4_64KLIMIT ((1 << 16) + (MFLIMIT - 1))
650 #define HASHLOG64K (HASH_LOG + 1)
651 #define HASH64KTABLESIZE (1U << HASHLOG64K)
652 #define LZ4_HASH64K_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8) - \
654 #define LZ4_HASH64K_VALUE(p) LZ4_HASH64K_FUNCTION(A32(p))
658 LZ4_compress64kCtx(void *ctx
, const char *source
, char *dest
, int isize
,
661 struct refTables
*srt
= (struct refTables
*)ctx
;
662 U16
*HashTable
= (U16
*) (srt
->hashTable
);
664 const BYTE
*ip
= (BYTE
*) source
;
665 const BYTE
*anchor
= ip
;
666 const BYTE
*const base
= ip
;
667 const BYTE
*const iend
= ip
+ isize
;
668 const BYTE
*const oend
= (BYTE
*) dest
+ osize
;
669 const BYTE
*const mflimit
= iend
- MFLIMIT
;
670 #define matchlimit (iend - LASTLITERALS)
672 BYTE
*op
= (BYTE
*) dest
;
675 const int skipStrength
= SKIPSTRENGTH
;
679 if (isize
< MINLENGTH
)
684 forwardH
= LZ4_HASH64K_VALUE(ip
);
688 int findMatchAttempts
= (1U << skipStrength
) + 3;
689 const BYTE
*forwardIp
= ip
;
696 int step
= findMatchAttempts
++ >> skipStrength
;
698 forwardIp
= ip
+ step
;
700 if (forwardIp
> mflimit
) {
704 forwardH
= LZ4_HASH64K_VALUE(forwardIp
);
705 ref
= base
+ HashTable
[h
];
706 HashTable
[h
] = ip
- base
;
708 } while (A32(ref
) != A32(ip
));
711 while ((ip
> anchor
) && (ref
> (BYTE
*) source
) &&
712 (ip
[-1] == ref
[-1])) {
717 /* Encode Literal length */
718 length
= ip
- anchor
;
721 /* Check output limit */
722 if (unlikely(op
+ length
+ (2 + 1 + LASTLITERALS
) +
723 (length
>> 8) > oend
))
726 if (length
>= (int)RUN_MASK
) {
727 *token
= (RUN_MASK
<< ML_BITS
);
728 len
= length
- RUN_MASK
;
729 for (; len
> 254; len
-= 255)
733 *token
= (length
<< ML_BITS
);
736 LZ4_BLINDCOPY(anchor
, op
, length
);
740 LZ4_WRITE_LITTLEENDIAN_16(op
, ip
- ref
);
744 ref
+= MINMATCH
; /* MinMatch verified */
746 while (ip
< matchlimit
- (STEPSIZE
- 1)) {
747 UARCH diff
= AARCH(ref
) ^ AARCH(ip
);
753 ip
+= LZ4_NbCommonBytes(diff
);
757 if ((ip
< (matchlimit
- 3)) && (A32(ref
) == A32(ip
))) {
762 if ((ip
< (matchlimit
- 1)) && (A16(ref
) == A16(ip
))) {
766 if ((ip
< matchlimit
) && (*ref
== *ip
))
770 /* Encode MatchLength */
772 /* Check output limit */
773 if (unlikely(op
+ (1 + LASTLITERALS
) + (len
>> 8) > oend
))
775 if (len
>= (int)ML_MASK
) {
778 for (; len
> 509; len
-= 510) {
790 /* Test end of chunk */
796 HashTable
[LZ4_HASH64K_VALUE(ip
- 2)] = ip
- 2 - base
;
798 /* Test next position */
799 ref
= base
+ HashTable
[LZ4_HASH64K_VALUE(ip
)];
800 HashTable
[LZ4_HASH64K_VALUE(ip
)] = ip
- base
;
801 if (A32(ref
) == A32(ip
)) {
806 /* Prepare next loop */
808 forwardH
= LZ4_HASH64K_VALUE(ip
);
812 /* Encode Last Literals */
814 int lastRun
= iend
- anchor
;
815 if (op
+ lastRun
+ 1 + ((lastRun
+ 255 - RUN_MASK
) / 255) >
818 if (lastRun
>= (int)RUN_MASK
) {
819 *op
++ = (RUN_MASK
<< ML_BITS
);
821 for (; lastRun
> 254; lastRun
-= 255)
823 *op
++ = (BYTE
)lastRun
;
825 *op
++ = (lastRun
<< ML_BITS
);
826 (void) memcpy(op
, anchor
, iend
- anchor
);
831 return (int)(((char *)op
) - dest
);
835 real_LZ4_compress(const char *source
, char *dest
, int isize
, int osize
)
840 ASSERT(lz4_cache
!= NULL
);
841 ctx
= kmem_cache_alloc(lz4_cache
, KM_SLEEP
);
844 * out of kernel memory, gently fall through - this will disable
845 * compression in zio_compress_data
850 memset(ctx
, 0, sizeof (struct refTables
));
852 if (isize
< LZ4_64KLIMIT
)
853 result
= LZ4_compress64kCtx(ctx
, source
, dest
, isize
, osize
);
855 result
= LZ4_compressCtx(ctx
, source
, dest
, isize
, osize
);
857 kmem_cache_free(lz4_cache
, ctx
);
861 /* Decompression functions */
864 * Note: The decoding functions real_LZ4_uncompress() and
865 * LZ4_uncompress_unknownOutputSize() are safe against "buffer overflow"
866 * attack type. They will never write nor read outside of the provided
867 * output buffers. LZ4_uncompress_unknownOutputSize() also insures that
868 * it will never read outside of the input buffer. A corrupted input
869 * will produce an error result, a negative int, indicating the position
870 * of the error within input stream.
872 * Note[2]: real_LZ4_uncompress(), referred to above, is not used in ZFS so
873 * its code is not present here.
876 static const int dec32table
[] = {0, 3, 2, 3, 0, 0, 0, 0};
878 static const int dec64table
[] = {0, 0, 0, -1, 0, 1, 2, 3};
882 LZ4_uncompress_unknownOutputSize(const char *source
, char *dest
, int isize
,
885 /* Local Variables */
886 const BYTE
*restrict ip
= (const BYTE
*) source
;
887 const BYTE
*const iend
= ip
+ isize
;
890 BYTE
*op
= (BYTE
*) dest
;
891 BYTE
*const oend
= op
+ maxOutputSize
;
901 if ((length
= (token
>> ML_BITS
)) == RUN_MASK
) {
903 while ((ip
< iend
) && (s
== 255)) {
905 if (unlikely(length
> (size_t)(length
+ s
)))
912 /* CORNER-CASE: cpy might overflow. */
914 goto _output_error
; /* cpy was overflowed, bail! */
915 if ((cpy
> oend
- COPYLENGTH
) ||
916 (ip
+ length
> iend
- COPYLENGTH
)) {
918 /* Error: writes beyond output buffer */
920 if (ip
+ length
!= iend
)
922 * Error: LZ4 format requires to consume all
923 * input at this stage
926 (void) memcpy(op
, ip
, length
);
928 /* Necessarily EOF, due to parsing restrictions */
931 LZ4_WILDCOPY(ip
, op
, cpy
);
936 LZ4_READ_LITTLEENDIAN_16(ref
, cpy
, ip
);
938 if (ref
< (BYTE
* const) dest
)
940 * Error: offset creates reference outside of
945 /* get matchlength */
946 if ((length
= (token
& ML_MASK
)) == ML_MASK
) {
949 if (unlikely(length
> (size_t)(length
+ s
)))
957 /* copy repeated sequence */
958 if (unlikely(op
- ref
< STEPSIZE
)) {
960 int dec64
= dec64table
[op
- ref
];
970 ref
-= dec32table
[op
- ref
];
975 LZ4_COPYSTEP(ref
, op
);
977 cpy
= op
+ length
- (STEPSIZE
- 4);
978 if (cpy
> oend
- COPYLENGTH
) {
981 * Error: request to write outside of
986 if ((ref
+ COPYLENGTH
) > oend
)
988 if ((ref
+ COPYLENGTH
) > oend
||
989 (op
+ COPYLENGTH
) > oend
)
992 LZ4_SECURECOPY(ref
, op
, (oend
- COPYLENGTH
));
998 * Check EOF (should never happen, since
999 * last 5 bytes are supposed to be literals)
1004 LZ4_SECURECOPY(ref
, op
, cpy
);
1005 op
= cpy
; /* correction */
1008 /* end of decoding */
1009 return (int)(((char *)op
) - dest
);
1011 /* write overflow error detected */
1019 lz4_cache
= kmem_cache_create("lz4_cache",
1020 sizeof (struct refTables
), 0, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
1027 kmem_cache_destroy(lz4_cache
);