/*ARGSUSED*/
size_t
-lz4_compress_zfs(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
+lz4_compress_zfs(void *s_start, void *d_start, size_t s_len,
+ size_t d_len, int n)
{
uint32_t bufsiz;
char *dest = d_start;
return (s_len);
/*
- * Encode the compresed buffer size at the start. We'll need this in
+ * Encode the compressed buffer size at the start. We'll need this in
* decompression to counter the effects of padding which might be
* added to the compressed buffer and which, if unhandled, would
* confuse the hell out of our decompression function.
/*ARGSUSED*/
int
-lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len, size_t d_len, int n)
+lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len,
+ size_t d_len, int n)
{
const char *src = s_start;
uint32_t bufsiz = BE_IN32(src);
/*
* Returns 0 on success (decompression function returned non-negative)
- * and non-zero on failure (decompression function returned negative.
+ * and non-zero on failure (decompression function returned negative).
*/
return (LZ4_uncompress_unknownOutputSize(&src[sizeof (bufsiz)],
d_start, bufsiz, d_len) < 0);
* writes beyond dest + osize, and is therefore protected
* against malicious data packets.
* note : destination buffer must be already allocated
+ * note : real_LZ4_uncompress() is not used in ZFS so its code
+ * is not present here.
*
* Advanced Functions
*
* This function explicitly handles the CTX memory structure.
*
* ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
- * by the caller (either on the stack or using kmem_cache_alloc). Passing NULL
- * isn't valid.
+ * by the caller (either on the stack or using kmem_cache_alloc). Passing
+ * NULL isn't valid.
*
* LZ4_compress64kCtx() :
* Same as LZ4_compressCtx(), but specific to small inputs (<64KB).
* isize *Must* be <64KB, otherwise the output will be corrupted.
*
* ILLUMOS CHANGES: the CTX memory structure must be explicitly allocated
- * by the caller (either on the stack or using kmem_cache_alloc). Passing NULL
- * isn't valid.
+ * by the caller (either on the stack or using kmem_cache_alloc). Passing
+ * NULL isn't valid.
*/
/*
*/
/* 32 or 64 bits ? */
-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || \
- defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || \
- defined(__LP64__) || defined(_LP64))
+#if defined(_LP64)
#define LZ4_ARCH64 1
#else
#define LZ4_ARCH64 0
/*
* Little Endian or Big Endian?
- * Note: overwrite the below #define if you know your architecture endianess.
+ * Note: overwrite the below #define if you know your architecture endianness.
*/
-#if (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || \
- defined(_BIG_ENDIAN) || defined(_ARCH_PPC) || defined(__PPC__) || \
- defined(__PPC) || defined(PPC) || defined(__powerpc__) || \
- defined(__powerpc) || defined(powerpc) || \
- ((defined(__BYTE_ORDER__)&&(__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))))
+#if defined(_BIG_ENDIAN)
#define LZ4_BIG_ENDIAN 1
#else
/*
* Little Endian assumed. PDP Endian and other very rare endian format
* are unsupported.
*/
+#undef LZ4_BIG_ENDIAN
#endif
/*
* kernel
*/
#undef LZ4_FORCE_SW_BITCOUNT
+#if defined(__sparc)
+#define LZ4_FORCE_SW_BITCOUNT
+#endif
/*
* Compiler Options
#define unlikely(expr) expect((expr) != 0, 0)
#endif
-#define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \
+#define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | \
(((x) & 0xffu) << 8)))
/* Basic types */
int result;
ASSERT(lz4_cache != NULL);
- ctx = kmem_cache_alloc(lz4_cache, KM_PUSHPAGE);
+ ctx = kmem_cache_alloc(lz4_cache, KM_SLEEP);
/*
* out of kernel memory, gently fall through - this will disable
* it will never read outside of the input buffer. A corrupted input
* will produce an error result, a negative int, indicating the position
* of the error within input stream.
+ *
+ * Note[2]: real_LZ4_uncompress(), referred to above, is not used in ZFS so
+ * its code is not present here.
*/
+static const int dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
+#if LZ4_ARCH64
+static const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
+#endif
+
static int
LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize,
int maxOutputSize)
BYTE *const oend = op + maxOutputSize;
BYTE *cpy;
- size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
-#if LZ4_ARCH64
- size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
-#endif
-
/* Main Loop */
while (ip < iend) {
unsigned token;
int s = 255;
while ((ip < iend) && (s == 255)) {
s = *ip++;
+ if (unlikely(length > (size_t)(length + s)))
+ goto _output_error;
length += s;
}
}
/* copy literals */
cpy = op + length;
+ /* CORNER-CASE: cpy might overflow. */
+ if (cpy < op)
+ goto _output_error; /* cpy was overflowed, bail! */
if ((cpy > oend - COPYLENGTH) ||
(ip + length > iend - COPYLENGTH)) {
if (cpy > oend)
if ((length = (token & ML_MASK)) == ML_MASK) {
while (ip < iend) {
int s = *ip++;
+ if (unlikely(length > (size_t)(length + s)))
+ goto _output_error;
length += s;
if (s == 255)
continue;
/* copy repeated sequence */
if (unlikely(op - ref < STEPSIZE)) {
#if LZ4_ARCH64
- size_t dec64 = dec64table[op-ref];
+ int dec64 = dec64table[op - ref];
#else
const int dec64 = 0;
#endif
op[3] = ref[3];
op += 4;
ref += 4;
- ref -= dec32table[op-ref];
+ ref -= dec32table[op - ref];
A32(op) = A32(ref);
op += STEPSIZE - 4;
ref -= dec64;
* destination buffer
*/
goto _output_error;
+#if LZ4_ARCH64
+ if ((ref + COPYLENGTH) > oend)
+#else
+ if ((ref + COPYLENGTH) > oend ||
+ (op + COPYLENGTH) > oend)
+#endif
+ goto _output_error;
LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
while (op < cpy)
*op++ = *ref++;
/* write overflow error detected */
_output_error:
- return (int)(-(((char *)ip) - source));
+ return (-1);
}
void
lz4_init(void)
{
lz4_cache = kmem_cache_create("lz4_cache",
- sizeof (struct refTables), 0, NULL, NULL, NULL, NULL, NULL, 0);
+ sizeof (struct refTables), 0, NULL, NULL, NULL, NULL, NULL, 0);
}
void
lz4_cache = NULL;
}
}
-